summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/arm64/gtdt.c33
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_brcm.c2
-rw-r--r--drivers/ata/ahci_ceva.c2
-rw-r--r--drivers/ata/ahci_da850.c2
-rw-r--r--drivers/ata/ahci_dm816.c2
-rw-r--r--drivers/ata/ahci_dwc.c2
-rw-r--r--drivers/ata/ahci_imx.c4
-rw-r--r--drivers/ata/ahci_mtk.c2
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_qoriq.c2
-rw-r--r--drivers/ata/ahci_seattle.c2
-rw-r--r--drivers/ata/ahci_st.c2
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_tegra.c2
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-scsi.c516
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/pata_falcon.c4
-rw-r--r--drivers/ata/pata_ftide010.c2
-rw-r--r--drivers/ata/pata_gayle.c6
-rw-r--r--drivers/ata/pata_imx.c2
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_pxa.c2
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_gemini.c2
-rw-r--r--drivers/ata/sata_highbank.c12
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/block/brd.c66
-rw-r--r--drivers/block/loop.c13
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c14
-rw-r--r--drivers/block/null_blk/main.c9
-rw-r--r--drivers/block/null_blk/zoned.c2
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/ublk_drv.c208
-rw-r--r--drivers/block/virtio_blk.c55
-rw-r--r--drivers/bluetooth/btintel.c5
-rw-r--r--drivers/char/hw_random/Kconfig30
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/airoha-trng.c243
-rw-r--r--drivers/char/hw_random/atmel-rng.c2
-rw-r--r--drivers/char/hw_random/bcm74110-rng.c125
-rw-r--r--drivers/char/hw_random/cctrng.c2
-rw-r--r--drivers/char/hw_random/core.c11
-rw-r--r--drivers/char/hw_random/exynos-trng.c2
-rw-r--r--drivers/char/hw_random/histb-rng.c2
-rw-r--r--drivers/char/hw_random/ingenic-rng.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c2
-rw-r--r--drivers/char/hw_random/mxc-rnga.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/hw_random/npcm-rng.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/stm32-rng.c78
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/char/hw_random/xgene-rng.c2
-rw-r--r--drivers/char/tpm/tpm-buf.c20
-rw-r--r--drivers/char/tpm/tpm2-cmd.c30
-rw-r--r--drivers/char/tpm/tpm2-sessions.c58
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c9
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c6
-rw-r--r--drivers/cpufreq/amd-pstate.c229
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c2
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c37
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c2
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c2
-rw-r--r--drivers/cpufreq/raspberrypi-cpufreq.c2
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c2
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-arm.c2
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c2
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/cpuidle/governors/menu.c76
-rw-r--r--drivers/crypto/Kconfig21
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c58
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c10
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c4
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c2
-rw-r--r--drivers/crypto/atmel-aes.c2
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-sha.c2
-rw-r--r--drivers/crypto/atmel-sha204a.c4
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/bcm/cipher.c7
-rw-r--r--drivers/crypto/caam/caampkc.c11
-rw-r--r--drivers/crypto/caam/jr.c2
-rw-r--r--drivers/crypto/caam/qi.c7
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c2
-rw-r--r--drivers/crypto/ccp/sp-platform.c2
-rw-r--r--drivers/crypto/ccree/cc_aead.c4
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2
-rw-r--r--drivers/crypto/exynos-rng.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h23
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c194
-rw-r--r--drivers/crypto/hisilicon/qm.c166
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h26
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c8
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c108
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c2
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h18
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c153
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c10
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c2
-rw-r--r--drivers/crypto/marvell/Kconfig2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c54
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c24
-rw-r--r--drivers/crypto/mxs-dcp.c22
-rw-r--r--drivers/crypto/n2_core.c4
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c37
-rw-r--r--drivers/crypto/omap-aes.c2
-rw-r--r--drivers/crypto/omap-des.c2
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qce/core.c2
-rw-r--r--drivers/crypto/qcom-rng.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c2
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/sa2ul.c4
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c7
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c2
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c2
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c2
-rw-r--r--drivers/crypto/stm32/stm32-hash.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c65
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c2
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c2
-rw-r--r--drivers/firmware/arm_scmi/perf.c44
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c14
-rw-r--r--drivers/firmware/google/gsmi.c6
-rw-r--r--drivers/firmware/sysfb.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c117
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c49
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h2
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c21
-rw-r--r--drivers/gpu/drm/drm_syncobj.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c4
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c50
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c24
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c2
-rw-r--r--drivers/gpu/drm/lib/drm_random.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c6
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c43
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c20
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c4
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c2
-rw-r--r--drivers/idle/intel_idle.c48
-rw-r--r--drivers/infiniband/core/addr.c2
-rw-r--r--drivers/infiniband/core/ucma.c19
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c27
-rw-r--r--drivers/mailbox/qcom-cpucp-mbox.c2
-rw-r--r--drivers/md/dm-bufio.c12
-rw-r--r--drivers/md/dm-cache-background-tracker.c25
-rw-r--r--drivers/md/dm-cache-background-tracker.h8
-rw-r--r--drivers/md/dm-cache-target.c29
-rw-r--r--drivers/md/dm-clone-target.c4
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-zone.c4
-rw-r--r--drivers/md/md-bitmap.c1
-rw-r--r--drivers/md/md.c15
-rw-r--r--drivers/md/md.h24
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c108
-rw-r--r--drivers/md/raid10.c87
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c17
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/mc/mc-request.c18
-rw-r--r--drivers/media/rc/lirc_dev.c13
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c1
-rw-r--r--drivers/mmc/core/block.c55
-rw-r--r--drivers/mmc/host/dw_mmc.c4
-rw-r--r--drivers/mmc/host/sunxi-mmc.c6
-rw-r--r--drivers/mtd/tests/oobtest.c2
-rw-r--r--drivers/mtd/tests/pagetest.c2
-rw-r--r--drivers/mtd/tests/subpagetest.c2
-rw-r--r--drivers/net/bonding/bond_main.c16
-rw-r--r--drivers/net/bonding/bond_options.c82
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c13
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h12
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c4
-rw-r--r--drivers/net/phy/phylink.c14
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/nvme/host/core.c38
-rw-r--r--drivers/nvme/host/ioctl.c21
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c120
-rw-r--r--drivers/nvme/host/trace.c58
-rw-r--r--drivers/nvme/host/zns.c2
-rw-r--r--drivers/nvme/target/Makefile2
-rw-r--r--drivers/nvme/target/admin-cmd.c288
-rw-r--r--drivers/nvme/target/configfs.c27
-rw-r--r--drivers/nvme/target/core.c64
-rw-r--r--drivers/nvme/target/fabrics-cmd.c7
-rw-r--r--drivers/nvme/target/nvmet.h67
-rw-r--r--drivers/nvme/target/pr.c1156
-rw-r--r--drivers/nvme/target/trace.c108
-rw-r--r--drivers/nvme/target/zns.c21
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c2
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c2
-rw-r--r--drivers/perf/arm-cci.c2
-rw-r--r--drivers/perf/arm-ccn.c2
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c2
-rw-r--r--drivers/perf/arm_dmc620_pmu.c2
-rw-r--r--drivers/perf/arm_dsu_pmu.c2
-rw-r--r--drivers/perf/arm_pmuv3.c32
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c2
-rw-r--r--drivers/perf/arm_spe_pmu.c2
-rw-r--r--drivers/perf/cxl_pmu.c9
-rw-r--r--drivers/perf/dwc_pcie_pmu.c16
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c2
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c7
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c2
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c2
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c2
-rw-r--r--drivers/perf/marvell_pem_pmu.c425
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c4
-rw-r--r--drivers/perf/thunderx2_pmu.c2
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c2
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c2
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c2
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c2
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c3
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c2
-rw-r--r--drivers/platform/chrome/cros_hps_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c2
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c2
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c2
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c3
-rw-r--r--drivers/pmdomain/core.c49
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c4
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_diag.c15
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c18
-rw-r--r--drivers/s390/char/con3270.c4
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c8
-rw-r--r--drivers/s390/char/sclp_ocf.c4
-rw-r--r--drivers/s390/char/sclp_pci.c2
-rw-r--r--drivers/s390/char/tape_core.c16
-rw-r--r--drivers/s390/char/uvdevice.c153
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/chp.c31
-rw-r--r--drivers/s390/cio/chp.h1
-rw-r--r--drivers/s390/cio/chsc.c31
-rw-r--r--drivers/s390/cio/chsc.h16
-rw-r--r--drivers/s390/cio/cio.c6
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/cmf.c15
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/device.c40
-rw-r--r--drivers/s390/cio/ioasm.c107
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/scm.c2
-rw-r--r--drivers/s390/crypto/Makefile4
-rw-r--r--drivers/s390/crypto/pkey_base.c14
-rw-r--r--drivers/s390/crypto/pkey_base.h36
-rw-r--r--drivers/s390/crypto/pkey_cca.c5
-rw-r--r--drivers/s390/crypto/pkey_ep11.c1
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c239
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c1
-rw-r--r--drivers/s390/crypto/pkey_uv.c284
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c45
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h1
-rw-r--r--drivers/s390/net/netiucv.c24
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c82
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/sd_zbc.c2
-rw-r--r--drivers/tc/tc.c2
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/gov_bang_bang.c15
-rw-r--r--drivers/thermal/gov_fair_share.c20
-rw-r--r--drivers/thermal/gov_power_allocator.c86
-rw-r--r--drivers/thermal/gov_step_wise.c22
-rw-r--r--drivers/thermal/testing/zone.c41
-rw-r--r--drivers/thermal/thermal_core.c883
-rw-r--r--drivers/thermal/thermal_core.h41
-rw-r--r--drivers/thermal/thermal_debugfs.c50
-rw-r--r--drivers/thermal/thermal_helpers.c46
-rw-r--r--drivers/thermal/thermal_hwmon.c5
-rw-r--r--drivers/thermal/thermal_netlink.c253
-rw-r--r--drivers/thermal/thermal_netlink.h34
-rw-r--r--drivers/thermal/thermal_sysfs.c132
-rw-r--r--drivers/thermal/thermal_thresholds.c240
-rw-r--r--drivers/thermal/thermal_thresholds.h19
-rw-r--r--drivers/thermal/thermal_trip.c48
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c2
-rw-r--r--drivers/vdpa/mlx5/core/mr.c8
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c21
-rw-r--r--drivers/vdpa/solidrun/snet_main.c14
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c10
-rw-r--r--drivers/vfio/group.c6
-rw-r--r--drivers/vfio/virqfd.c16
-rw-r--r--drivers/virt/acrn/irqfd.c13
-rw-r--r--drivers/virt/coco/Kconfig2
-rw-r--r--drivers/virt/coco/Makefile1
-rw-r--r--drivers/virt/coco/arm-cca-guest/Kconfig11
-rw-r--r--drivers/virt/coco/arm-cca-guest/Makefile2
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c224
-rw-r--r--drivers/virtio/Kconfig12
-rw-r--r--drivers/virtio/virtio_pci_common.c24
-rw-r--r--drivers/virtio/virtio_pci_common.h1
-rw-r--r--drivers/virtio/virtio_pci_modern.c12
-rw-r--r--drivers/xen/privcmd.c28
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c8
432 files changed, 7893 insertions, 3160 deletions
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index c0e77c1c8e09..3561553eff8b 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -36,19 +36,25 @@ struct acpi_gtdt_descriptor {
static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
-static inline __init void *next_platform_timer(void *platform_timer)
+static __init bool platform_timer_valid(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
- platform_timer += gh->length;
- if (platform_timer < acpi_gtdt_desc.gtdt_end)
- return platform_timer;
+ return (platform_timer >= (void *)(acpi_gtdt_desc.gtdt + 1) &&
+ platform_timer < acpi_gtdt_desc.gtdt_end &&
+ gh->length != 0 &&
+ platform_timer + gh->length <= acpi_gtdt_desc.gtdt_end);
+}
+
+static __init void *next_platform_timer(void *platform_timer)
+{
+ struct acpi_gtdt_header *gh = platform_timer;
- return NULL;
+ return platform_timer + gh->length;
}
#define for_each_platform_timer(_g) \
- for (_g = acpi_gtdt_desc.platform_timer; _g; \
+ for (_g = acpi_gtdt_desc.platform_timer; platform_timer_valid(_g);\
_g = next_platform_timer(_g))
static inline bool is_timer_block(void *platform_timer)
@@ -157,6 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
{
void *platform_timer;
struct acpi_table_gtdt *gtdt;
+ int cnt = 0;
gtdt = container_of(table, struct acpi_table_gtdt, header);
acpi_gtdt_desc.gtdt = gtdt;
@@ -176,12 +183,16 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
return 0;
}
- platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
- if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) {
+ acpi_gtdt_desc.platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
+ for_each_platform_timer(platform_timer)
+ cnt++;
+
+ if (cnt != gtdt->platform_timer_count) {
+ acpi_gtdt_desc.platform_timer = NULL;
pr_err(FW_BUG "invalid timer data.\n");
return -EINVAL;
}
- acpi_gtdt_desc.platform_timer = platform_timer;
+
if (platform_timer_count)
*platform_timer_count = gtdt->platform_timer_count;
@@ -283,7 +294,7 @@ error:
if (frame->virt_irq > 0)
acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt);
frame->virt_irq = 0;
- } while (i-- >= 0 && gtdt_frame--);
+ } while (i-- > 0 && gtdt_frame--);
return -EINVAL;
}
@@ -352,7 +363,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
}
irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
- res[2] = (struct resource)DEFINE_RES_IRQ(irq);
+ res[2] = DEFINE_RES_IRQ(irq);
if (irq <= 0) {
pr_warn("failed to map the Watchdog interrupt.\n");
nr_res--;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 45f63b09828a..2d3d3d67b4d9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1676,7 +1676,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
- * of multipe MSIs is negated and use single MSI mode instead.
+ * of multiple MSIs is negated and use single MSI mode instead.
*/
if (n_ports > 1) {
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 2f16524c2526..ef569eae4ce4 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -571,7 +571,7 @@ static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
- .remove_new = brcm_ahci_remove,
+ .remove = brcm_ahci_remove,
.shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 11a2c199a7c2..1ec35778903d 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -402,7 +402,7 @@ MODULE_DEVICE_TABLE(of, ceva_ahci_of_match);
static struct platform_driver ceva_ahci_driver = {
.probe = ceva_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ceva_ahci_of_match,
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 55a6627d5450..ca0924dc5bd2 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -238,7 +238,7 @@ MODULE_DEVICE_TABLE(of, ahci_da850_of_match);
static struct platform_driver ahci_da850_driver = {
.probe = ahci_da850_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_da850_of_match,
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index 4cb70064fb99..b08547b877a1 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -182,7 +182,7 @@ MODULE_DEVICE_TABLE(of, ahci_dm816_of_match);
static struct platform_driver ahci_dm816_driver = {
.probe = ahci_dm816_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = AHCI_DM816_DRV_NAME,
.of_match_table = ahci_dm816_of_match,
diff --git a/drivers/ata/ahci_dwc.c b/drivers/ata/ahci_dwc.c
index ed263de3fd70..aec6d793f51a 100644
--- a/drivers/ata/ahci_dwc.c
+++ b/drivers/ata/ahci_dwc.c
@@ -478,7 +478,7 @@ MODULE_DEVICE_TABLE(of, ahci_dwc_of_match);
static struct platform_driver ahci_dwc_driver = {
.probe = ahci_dwc_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 6f955e9105e8..f01f08048f97 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -511,7 +511,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
/*
- * set PHY Paremeters, two steps to configure the GPR13,
+ * set PHY Parameters, two steps to configure the GPR13,
* one write for rest of parameters, mask of first write
* is 0x07ffffff, and the other one write for setting
* the mpll_clk_en.
@@ -1027,7 +1027,7 @@ static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
static struct platform_driver imx_ahci_driver = {
.probe = imx_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_ahci_of_match,
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index adc851cd5578..7295b9066ae2 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -174,7 +174,7 @@ MODULE_DEVICE_TABLE(of, ahci_of_match);
static struct platform_driver mtk_ahci_driver = {
.probe = mtk_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f3187351e8a6..8744dae41612 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -245,7 +245,7 @@ MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
static struct platform_driver ahci_mvebu_driver = {
.probe = ahci_mvebu_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.suspend = ahci_mvebu_suspend,
.resume = ahci_mvebu_resume,
.driver = {
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 81fc63f6b008..c18054333f7c 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -96,7 +96,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b1a4e57578e2..30e39885b64e 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -357,7 +357,7 @@ static SIMPLE_DEV_PM_OPS(ahci_qoriq_pm_ops, ahci_platform_suspend,
static struct platform_driver ahci_qoriq_driver = {
.probe = ahci_qoriq_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_qoriq_of_match,
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 59f97aa7ac75..3f16c1678402 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -185,7 +185,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_seattle_driver = {
.probe = ahci_seattle_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.acpi_match_table = ahci_acpi_match,
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 79a8b0aa37bf..6b9b4a1dfa15 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -238,7 +238,7 @@ static struct platform_driver st_ahci_driver = {
.of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(st_ahci_driver);
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 58b2683954dd..5d4584570ae0 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -292,7 +292,7 @@ MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
static struct platform_driver ahci_sunxi_driver = {
.probe = ahci_sunxi_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_sunxi_of_match,
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 8703c2a4658b..44584eed6374 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -608,7 +608,7 @@ deinit_controller:
static struct platform_driver tegra_ahci_driver = {
.probe = tegra_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = tegra_ahci_of_match,
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 81a1d838c0fc..dfbd8c53abcb 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -534,7 +534,7 @@ softreset_retry:
/**
* xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
- * @host: Host that recieved the irq
+ * @host: Host that received the irq
* @irq_masked: HOST_IRQ_STAT value
*
* For hardware with broken edge trigger latch
@@ -859,7 +859,7 @@ disable_resources:
static struct platform_driver xgene_ahci_driver = {
.probe = xgene_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = xgene_ahci_of_match,
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index d36e71f475ab..b7f0bf795521 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -86,7 +86,7 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
* @dev: ATA device ACPI event occurred (can be NULL)
* @event: ACPI event which occurred
*
- * All ACPI bay / device realted events end up in this function. If
+ * All ACPI bay / device related events end up in this function. If
* the event is port-wide @dev is NULL. If the event is specific to a
* device, @dev points to it.
*
@@ -832,7 +832,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
dev->flags |= ATA_DFLAG_ACPI_PENDING;
}
} else {
- /* SATA _GTF needs to be evaulated after _SDD and
+ /* SATA _GTF needs to be evaluated after _SDD and
* there's no reason to evaluate IDE _GTF early
* without _STM. Clear cache and schedule _GTF.
*/
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f915e3df57a9..2ce5befd2242 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1334,17 +1334,8 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
*/
static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
- u64 lba = 0;
- u32 len;
-
- lba |= ((u64)(cdb[1] & 0x1f)) << 16;
- lba |= ((u64)cdb[2]) << 8;
- lba |= ((u64)cdb[3]);
-
- len = cdb[4];
-
- *plba = lba;
- *plen = len;
+ *plba = get_unaligned_be24(&cdb[1]) & 0x1fffff;
+ *plen = cdb[4];
}
/**
@@ -1781,15 +1772,10 @@ defer:
return SCSI_MLQUEUE_HOST_BUSY;
}
-struct ata_scsi_args {
- struct ata_device *dev;
- u16 *id;
- struct scsi_cmnd *cmd;
-};
-
/**
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @actor: Callback hook for desired SCSI command simulator
*
* Takes care of the hard work of simulating a SCSI command...
@@ -1802,30 +1788,32 @@ struct ata_scsi_args {
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
- unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
+static void ata_scsi_rbuf_fill(struct ata_device *dev, struct scsi_cmnd *cmd,
+ unsigned int (*actor)(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf))
{
- unsigned int rc;
- struct scsi_cmnd *cmd = args->cmd;
unsigned long flags;
+ unsigned int len;
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
- rc = actor(args, ata_scsi_rbuf);
- if (rc == 0)
+ len = actor(dev, cmd, ata_scsi_rbuf);
+ if (len) {
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+ cmd->result = SAM_STAT_GOOD;
+ if (scsi_bufflen(cmd) > len)
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
+ }
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
-
- if (rc == 0)
- cmd->result = SAM_STAT_GOOD;
}
/**
- * ata_scsiop_inq_std - Simulate INQUIRY command
- * @args: device IDENTIFY data / SCSI command of interest.
+ * ata_scsiop_inq_std - Simulate standard INQUIRY command
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns standard device identification data associated
@@ -1834,7 +1822,8 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_std(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
static const u8 versions[] = {
0x00,
@@ -1875,40 +1864,45 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
* Set the SCSI Removable Media Bit (RMB) if the ATA removable media
* device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id))
+ if (ata_id_removable(dev->id))
hdr[1] |= (1 << 7);
- if (args->dev->class == ATA_DEV_ZAC) {
+ if (dev->class == ATA_DEV_ZAC) {
hdr[0] = TYPE_ZBC;
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
}
- if (args->dev->flags & ATA_DFLAG_CDL)
+ if (dev->flags & ATA_DFLAG_CDL)
hdr[2] = 0xd; /* claim SPC-6 version compatibility */
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
- ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+ ata_id_string(dev->id, &rbuf[16], ATA_ID_PROD, 16);
/* From SAT, use last 2 words from fw rev unless they are spaces */
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
+ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
if (strncmp(&rbuf[32], " ", 4) == 0)
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
+ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV, 4);
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
- if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
+ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
else
memcpy(rbuf + 58, versions, sizeof(versions));
- return 0;
+ /*
+ * Include all 8 possible version descriptors, even if not all of
+ * them are popoulated.
+ */
+ return 96;
}
/**
* ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns list of inquiry VPD pages available.
@@ -1916,7 +1910,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
int i, num_pages = 0;
static const u8 pages[] = {
@@ -1933,18 +1928,20 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
for (i = 0; i < sizeof(pages); i++) {
if (pages[i] == 0xb6 &&
- !(args->dev->flags & ATA_DFLAG_ZAC))
+ !(dev->flags & ATA_DFLAG_ZAC))
continue;
rbuf[num_pages + 4] = pages[i];
num_pages++;
}
rbuf[3] = num_pages; /* number of supported VPD pages */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns ATA device serial number.
@@ -1952,7 +1949,8 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_80(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
static const u8 hdr[] = {
0,
@@ -1962,14 +1960,16 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
};
memcpy(rbuf, hdr, sizeof(hdr));
- ata_id_string(args->id, (unsigned char *) &rbuf[4],
+ ata_id_string(dev->id, (unsigned char *) &rbuf[4],
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields two logical unit device identification designators:
@@ -1980,7 +1980,8 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_83(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
const int sat_model_serial_desc_len = 68;
int num;
@@ -1992,7 +1993,7 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
rbuf[num + 0] = 2;
rbuf[num + 3] = ATA_ID_SERNO_LEN;
num += 4;
- ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num,
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
@@ -2004,31 +2005,33 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
num += 4;
memcpy(rbuf + num, "ATA ", 8);
num += 8;
- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
ATA_ID_PROD_LEN);
num += ATA_ID_PROD_LEN;
- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
- if (ata_id_has_wwn(args->id)) {
+ if (ata_id_has_wwn(dev->id)) {
/* SAT defined lu world wide name */
/* piv=0, assoc=lu, code_set=binary, designator=NAA */
rbuf[num + 0] = 1;
rbuf[num + 1] = 3;
rbuf[num + 3] = ATA_ID_WWN_LEN;
num += 4;
- ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num,
ATA_ID_WWN, ATA_ID_WWN_LEN);
num += ATA_ID_WWN_LEN;
}
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields SAT-specified ATA VPD page.
@@ -2036,7 +2039,8 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_89(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
rbuf[1] = 0x89; /* our page code */
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
@@ -2057,13 +2061,25 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
rbuf[56] = ATA_CMD_ID_ATA;
- memcpy(&rbuf[60], &args->id[0], 512);
- return 0;
+ memcpy(&rbuf[60], &dev->id[0], 512);
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b0 - Simulate INQUIRY VPD page B0, Block Limits
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B0h (Block Limits).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b0(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
u16 min_io_sectors;
rbuf[1] = 0xb0;
@@ -2076,7 +2092,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* logical than physical sector size we need to figure out what the
* latter is.
*/
- min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
+ min_io_sectors = 1 << ata_id_log2_per_physical_sector(dev->id);
put_unaligned_be16(min_io_sectors, &rbuf[6]);
/*
@@ -2088,7 +2104,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* that we support some form of unmap - in thise case via WRITE SAME
* with the unmap bit set.
*/
- if (ata_id_has_trim(args->id)) {
+ if (ata_id_has_trim(dev->id)) {
u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M)
@@ -2098,14 +2114,27 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
put_unaligned_be32(1, &rbuf[28]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b1 - Simulate INQUIRY VPD page B1, Block Device
+ * Characteristics
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B1h (Block Device Characteristics).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b1(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- int form_factor = ata_id_form_factor(args->id);
- int media_rotation_rate = ata_id_rotation_rate(args->id);
- u8 zoned = ata_id_zoned_cap(args->id);
+ int form_factor = ata_id_form_factor(dev->id);
+ int media_rotation_rate = ata_id_rotation_rate(dev->id);
+ u8 zoned = ata_id_zoned_cap(dev->id);
rbuf[1] = 0xb1;
rbuf[3] = 0x3c;
@@ -2115,21 +2144,52 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
if (zoned)
rbuf[8] = (zoned << 4);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b2 - Simulate INQUIRY VPD page B2, Logical Block
+ * Provisioning
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Logical Block Provisioning).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
/* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
rbuf[1] = 0xb2;
rbuf[3] = 0x4;
rbuf[5] = 1 << 6; /* TPWS */
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b6 - Simulate INQUIRY VPD page B6, Zoned Block Device
+ * Characteristics
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Zoned Block Device Characteristics).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
+ if (!(dev->flags & ATA_DFLAG_ZAC)) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
+
/*
* zbc-r05 SCSI Zoned Block device characteristics VPD page
*/
@@ -2139,21 +2199,39 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
/*
* URSWRZ bit is only meaningful for host-managed ZAC drives
*/
- if (args->dev->zac_zoned_cap & 1)
+ if (dev->zac_zoned_cap & 1)
rbuf[4] |= 1;
- put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
- put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
- put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
+ put_unaligned_be32(dev->zac_zones_optimal_open, &rbuf[8]);
+ put_unaligned_be32(dev->zac_zones_optimal_nonseq, &rbuf[12]);
+ put_unaligned_be32(dev->zac_zones_max_open, &rbuf[16]);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b9 - Simulate INQUIRY VPD page B9, Concurrent Positioning
+ * Ranges
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B9h (Concurrent Positioning Ranges).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b9(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_cpr_log *cpr_log = args->dev->cpr_log;
+ struct ata_cpr_log *cpr_log = dev->cpr_log;
u8 *desc = &rbuf[64];
int i;
+ if (!cpr_log) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
+
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
rbuf[1] = 0xb9;
put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
@@ -2165,7 +2243,58 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
+}
+
+/**
+ * ata_scsiop_inquiry - Simulate INQUIRY command
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns data associated with an INQUIRY command output.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inquiry(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
+{
+ const u8 *scsicmd = cmd->cmnd;
+
+ /* is CmdDt set? */
+ if (scsicmd[1] & 2) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
+
+ /* Is EVPD clear? */
+ if ((scsicmd[1] & 1) == 0)
+ return ata_scsiop_inq_std(dev, cmd, rbuf);
+
+ switch (scsicmd[2]) {
+ case 0x00:
+ return ata_scsiop_inq_00(dev, cmd, rbuf);
+ case 0x80:
+ return ata_scsiop_inq_80(dev, cmd, rbuf);
+ case 0x83:
+ return ata_scsiop_inq_83(dev, cmd, rbuf);
+ case 0x89:
+ return ata_scsiop_inq_89(dev, cmd, rbuf);
+ case 0xb0:
+ return ata_scsiop_inq_b0(dev, cmd, rbuf);
+ case 0xb1:
+ return ata_scsiop_inq_b1(dev, cmd, rbuf);
+ case 0xb2:
+ return ata_scsiop_inq_b2(dev, cmd, rbuf);
+ case 0xb6:
+ return ata_scsiop_inq_b6(dev, cmd, rbuf);
+ case 0xb9:
+ return ata_scsiop_inq_b9(dev, cmd, rbuf);
+ default:
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
}
/**
@@ -2388,7 +2517,8 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate MODE SENSE commands. Assume this is invoked for direct
@@ -2398,10 +2528,10 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_mode_sense(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
- u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
+ u8 *scsicmd = cmd->cmnd, *p = rbuf;
static const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
0,
@@ -2466,17 +2596,17 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
break;
case CACHE_MPAGE:
- p += ata_msense_caching(args->id, p, page_control == 1);
+ p += ata_msense_caching(dev->id, p, page_control == 1);
break;
case CONTROL_MPAGE:
- p += ata_msense_control(args->dev, p, spg, page_control == 1);
+ p += ata_msense_control(dev, p, spg, page_control == 1);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p, page_control == 1);
- p += ata_msense_caching(args->id, p, page_control == 1);
- p += ata_msense_control(args->dev, p, spg, page_control == 1);
+ p += ata_msense_caching(dev->id, p, page_control == 1);
+ p += ata_msense_control(dev, p, spg, page_control == 1);
break;
default: /* invalid page code */
@@ -2494,29 +2624,33 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
rbuf[3] = sizeof(sat_blk_desc);
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
}
- } else {
- put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
- rbuf[3] |= dpofua;
- if (ebd) {
- rbuf[7] = sizeof(sat_blk_desc);
- memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
- }
+
+ return rbuf[0] + 1;
}
- return 0;
+
+ put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
+ rbuf[3] |= dpofua;
+ if (ebd) {
+ rbuf[7] = sizeof(sat_blk_desc);
+ memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
+ }
+
+ return get_unaligned_be16(&rbuf[0]) + 2;
invalid_fld:
- ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
- return 1;
+ ata_scsi_set_invalid_field(dev, cmd, fp, bp);
+ return 0;
saving_not_supp:
- ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+ ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x39, 0x0);
/* "Saving parameters not supported" */
- return 1;
+ return 0;
}
/**
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate READ CAPACITY commands.
@@ -2524,9 +2658,10 @@ saving_not_supp:
* LOCKING:
* None.
*/
-static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
+ u8 *scsicmd = cmd->cmnd;
u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
u32 sector_size; /* physical sector size in bytes */
u8 log2_per_phys;
@@ -2536,7 +2671,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
- if (args->cmd->cmnd[0] == READ_CAPACITY) {
+ if (scsicmd[0] == READ_CAPACITY) {
if (last_lba >= 0xffffffffULL)
last_lba = 0xffffffff;
@@ -2551,48 +2686,59 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[5] = sector_size >> (8 * 2);
rbuf[6] = sector_size >> (8 * 1);
rbuf[7] = sector_size;
- } else {
- /* sector count, 64-bit */
- rbuf[0] = last_lba >> (8 * 7);
- rbuf[1] = last_lba >> (8 * 6);
- rbuf[2] = last_lba >> (8 * 5);
- rbuf[3] = last_lba >> (8 * 4);
- rbuf[4] = last_lba >> (8 * 3);
- rbuf[5] = last_lba >> (8 * 2);
- rbuf[6] = last_lba >> (8 * 1);
- rbuf[7] = last_lba;
- /* sector size */
- rbuf[ 8] = sector_size >> (8 * 3);
- rbuf[ 9] = sector_size >> (8 * 2);
- rbuf[10] = sector_size >> (8 * 1);
- rbuf[11] = sector_size;
-
- rbuf[12] = 0;
- rbuf[13] = log2_per_phys;
- rbuf[14] = (lowest_aligned >> 8) & 0x3f;
- rbuf[15] = lowest_aligned;
-
- if (ata_id_has_trim(args->id) &&
- !(dev->quirks & ATA_QUIRK_NOTRIM)) {
- rbuf[14] |= 0x80; /* LBPME */
-
- if (ata_id_has_zero_after_trim(args->id) &&
- dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
- ata_dev_info(dev, "Enabling discard_zeroes_data\n");
- rbuf[14] |= 0x40; /* LBPRZ */
- }
+ return 8;
+ }
+
+ /*
+ * READ CAPACITY 16 command is defined as a service action
+ * (SERVICE_ACTION_IN_16 command).
+ */
+ if (scsicmd[0] != SERVICE_ACTION_IN_16 ||
+ (scsicmd[1] & 0x1f) != SAI_READ_CAPACITY_16) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
+
+ /* sector count, 64-bit */
+ rbuf[0] = last_lba >> (8 * 7);
+ rbuf[1] = last_lba >> (8 * 6);
+ rbuf[2] = last_lba >> (8 * 5);
+ rbuf[3] = last_lba >> (8 * 4);
+ rbuf[4] = last_lba >> (8 * 3);
+ rbuf[5] = last_lba >> (8 * 2);
+ rbuf[6] = last_lba >> (8 * 1);
+ rbuf[7] = last_lba;
+
+ /* sector size */
+ rbuf[ 8] = sector_size >> (8 * 3);
+ rbuf[ 9] = sector_size >> (8 * 2);
+ rbuf[10] = sector_size >> (8 * 1);
+ rbuf[11] = sector_size;
+
+ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
+ rbuf[12] = (1 << 4); /* RC_BASIS */
+ rbuf[13] = log2_per_phys;
+ rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+ rbuf[15] = lowest_aligned;
+
+ if (ata_id_has_trim(dev->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) {
+ rbuf[14] |= 0x80; /* LBPME */
+
+ if (ata_id_has_zero_after_trim(dev->id) &&
+ dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
}
- if (ata_id_zoned_cap(args->id) ||
- args->dev->class == ATA_DEV_ZAC)
- rbuf[12] = (1 << 4); /* RC_BASIS */
}
- return 0;
+
+ return 16;
}
/**
* ata_scsiop_report_luns - Simulate REPORT LUNS command
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate REPORT LUNS command.
@@ -2600,11 +2746,12 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_report_luns(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
- return 0;
+ return 16;
}
/*
@@ -3312,7 +3459,8 @@ invalid_opcode:
/**
* ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
- * @args: device MAINTENANCE_IN data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields a subset to satisfy scsi_report_opcode()
@@ -3320,17 +3468,21 @@ invalid_opcode:
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_maint_in(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
- u8 *cdb = args->cmd->cmnd;
+ u8 *cdb = cmd->cmnd;
u8 supported = 0, cdlp = 0, rwcdlp = 0;
- unsigned int err = 0;
+
+ if ((cdb[1] & 0x1f) != MI_REPORT_SUPPORTED_OPERATION_CODES) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
if (cdb[2] != 1 && cdb[2] != 3) {
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
- err = 2;
- goto out;
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
}
switch (cdb[3]) {
@@ -3398,11 +3550,12 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
default:
break;
}
-out:
+
/* One command format */
rbuf[0] = rwcdlp;
rbuf[1] = cdlp | supported;
- return err;
+
+ return 4;
}
/**
@@ -4262,78 +4415,26 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
{
- struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd;
u8 tmp8;
- args.dev = dev;
- args.id = dev->id;
- args.cmd = cmd;
-
switch(scsicmd[0]) {
case INQUIRY:
- if (scsicmd[1] & 2) /* is CmdDt set? */
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
- else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
- else switch (scsicmd[2]) {
- case 0x00:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
- break;
- case 0x80:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
- break;
- case 0x83:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
- break;
- case 0x89:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
- break;
- case 0xb0:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
- break;
- case 0xb1:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
- break;
- case 0xb2:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
- break;
- case 0xb6:
- if (dev->flags & ATA_DFLAG_ZAC)
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
- else
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- case 0xb9:
- if (dev->cpr_log)
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
- else
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- default:
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- }
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_inquiry);
break;
case MODE_SENSE:
case MODE_SENSE_10:
- ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_mode_sense);
break;
case READ_CAPACITY:
- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
- break;
-
case SERVICE_ACTION_IN_16:
- if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
- else
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_read_cap);
break;
case REPORT_LUNS:
- ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_report_luns);
break;
case REQUEST_SENSE:
@@ -4361,10 +4462,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
- if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
- ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
- else
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_maint_in);
break;
/* all other commands */
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index d0c6924d25b6..514d549286b5 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -964,7 +964,7 @@ MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
- .remove_new = arasan_cf_remove,
+ .remove = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &arasan_cf_pm_ops,
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index f3f5b2b0ecc9..e8cda988feb5 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -1015,7 +1015,7 @@ static struct platform_driver ep93xx_pata_platform_driver = {
.of_match_table = ep93xx_pata_of_ids,
},
.probe = ep93xx_pata_probe,
- .remove_new = ep93xx_pata_remove,
+ .remove = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 18ceefd176df..334c4eea41ec 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -225,8 +225,8 @@ static void pata_falcon_remove_one(struct platform_device *pdev)
static struct platform_driver pata_falcon_driver = {
.probe = pata_falcon_init_one,
- .remove_new = pata_falcon_remove_one,
- .driver = {
+ .remove = pata_falcon_remove_one,
+ .driver = {
.name = "atari-falcon-ide",
},
};
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 73a9a5109238..c3a8384c3e04 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -557,7 +557,7 @@ static struct platform_driver pata_ftide010_driver = {
.of_match_table = pata_ftide010_of_match,
},
.probe = pata_ftide010_probe,
- .remove_new = pata_ftide010_remove,
+ .remove = pata_ftide010_remove,
};
module_platform_driver(pata_ftide010_driver);
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
index 94df60ac2307..8602c3889948 100644
--- a/drivers/ata/pata_gayle.c
+++ b/drivers/ata/pata_gayle.c
@@ -202,9 +202,9 @@ static void pata_gayle_remove_one(struct platform_device *pdev)
static struct platform_driver pata_gayle_driver = {
.probe = pata_gayle_init_one,
- .remove_new = pata_gayle_remove_one,
- .driver = {
- .name = "amiga-gayle-ide",
+ .remove = pata_gayle_remove_one,
+ .driver = {
+ .name = "amiga-gayle-ide",
},
};
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index d0aa8fc929b4..b37682b0578f 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -249,7 +249,7 @@ MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
- .remove_new = pata_imx_remove,
+ .remove = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index b7ac56103c8a..9cbe2132ce59 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -81,7 +81,7 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. The 8213 is a clone so very similar
*/
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 8a9ee828478f..80f6a91acf6f 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -298,7 +298,7 @@ static struct platform_driver ixp4xx_pata_platform_driver = {
.of_match_table = ixp4xx_pata_of_match,
},
.probe = ixp4xx_pata_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(ixp4xx_pata_platform_driver);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 3f9258677915..210a63283f62 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -854,7 +854,7 @@ static const struct of_device_id mpc52xx_ata_of_match[] = {
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
- .remove_new = mpc52xx_ata_remove,
+ .remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 0bb9607e7348..dce24806a052 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -183,7 +183,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
reg_tim.s.ale = 0;
/* Not used */
reg_tim.s.page = 0;
- /* Time after IORDY to coninue to assert the data */
+ /* Time after IORDY to continue to assert the data */
reg_tim.s.wait = 0;
/* Time to wait to complete the cycle. */
reg_tim.s.pause = pause;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 4956f0f5b93f..178b28eff170 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -89,7 +89,7 @@ static struct platform_driver pata_of_platform_driver = {
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(pata_of_platform_driver);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index dca82d92b004..3d01b7000e41 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -70,7 +70,7 @@ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44.
*/
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 232c3dad7ee8..87479bc893b2 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -223,7 +223,7 @@ static int pata_platform_probe(struct platform_device *pdev)
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 538bd3423d85..434f380114af 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -306,7 +306,7 @@ static void pxa_ata_remove(struct platform_device *pdev)
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
- .remove_new = pxa_ata_remove,
+ .remove = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 84b001097093..40ef8072c159 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -45,7 +45,7 @@ static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44. The Radisys is a relative of the PIIX
* but not the same so be careful.
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 0fa253ad7c93..fd81e75c9402 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -164,7 +164,7 @@ static void rb532_pata_driver_remove(struct platform_device *pdev)
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
- .remove_new = rb532_pata_driver_remove,
+ .remove = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 52f5168e4db5..6e1dd0d9c035 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1240,7 +1240,7 @@ static struct platform_driver sata_dwc_driver = {
.of_match_table = sata_dwc_match,
},
.probe = sata_dwc_probe,
- .remove_new = sata_dwc_remove,
+ .remove = sata_dwc_remove,
};
module_platform_driver(sata_dwc_driver);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 01aa05f4c3f5..87e91a937a44 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1589,7 +1589,7 @@ static struct platform_driver fsl_sata_driver = {
.of_match_table = fsl_sata_match,
},
.probe = sata_fsl_probe,
- .remove_new = sata_fsl_remove,
+ .remove = sata_fsl_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = sata_fsl_suspend,
.resume = sata_fsl_resume,
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index f574e3c3f5b4..d040799bf9cb 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -425,7 +425,7 @@ static struct platform_driver gemini_sata_driver = {
.of_match_table = gemini_sata_of_match,
},
.probe = gemini_sata_probe,
- .remove_new = gemini_sata_remove,
+ .remove = gemini_sata_remove,
};
module_platform_driver(gemini_sata_driver);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 63ef7bb073ce..b1b40e9551de 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -614,12 +614,12 @@ static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
ahci_highbank_suspend, ahci_highbank_resume);
static struct platform_driver ahci_highbank_driver = {
- .remove_new = ata_platform_remove_one,
- .driver = {
- .name = "highbank-ahci",
- .of_match_table = ahci_of_match,
- .pm = &ahci_highbank_pm_ops,
- },
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "highbank-ahci",
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_highbank_pm_ops,
+ },
.probe = ahci_highbank_probe,
};
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 05c905827dc5..b8f363370e1a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4255,7 +4255,7 @@ MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
- .remove_new = mv_platform_remove,
+ .remove = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index c1469d076880..22820a02d740 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -1009,7 +1009,7 @@ static const struct dev_pm_ops sata_rcar_pm_ops = {
static struct platform_driver sata_rcar_driver = {
.probe = sata_rcar_probe,
- .remove_new = sata_rcar_remove,
+ .remove = sata_rcar_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sata_rcar_match,
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2fd1ed101748..5a95671d8151 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -316,8 +316,40 @@ __setup("ramdisk_size=", ramdisk_size);
* (should share code eventually).
*/
static LIST_HEAD(brd_devices);
+static DEFINE_MUTEX(brd_devices_mutex);
static struct dentry *brd_debugfs_dir;
+static struct brd_device *brd_find_or_alloc_device(int i)
+{
+ struct brd_device *brd;
+
+ mutex_lock(&brd_devices_mutex);
+ list_for_each_entry(brd, &brd_devices, brd_list) {
+ if (brd->brd_number == i) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
+ brd = kzalloc(sizeof(*brd), GFP_KERNEL);
+ if (!brd) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+ brd->brd_number = i;
+ list_add_tail(&brd->brd_list, &brd_devices);
+ mutex_unlock(&brd_devices_mutex);
+ return brd;
+}
+
+static void brd_free_device(struct brd_device *brd)
+{
+ mutex_lock(&brd_devices_mutex);
+ list_del(&brd->brd_list);
+ mutex_unlock(&brd_devices_mutex);
+ kfree(brd);
+}
+
static int brd_alloc(int i)
{
struct brd_device *brd;
@@ -340,14 +372,9 @@ static int brd_alloc(int i)
BLK_FEAT_NOWAIT,
};
- list_for_each_entry(brd, &brd_devices, brd_list)
- if (brd->brd_number == i)
- return -EEXIST;
- brd = kzalloc(sizeof(*brd), GFP_KERNEL);
- if (!brd)
- return -ENOMEM;
- brd->brd_number = i;
- list_add_tail(&brd->brd_list, &brd_devices);
+ brd = brd_find_or_alloc_device(i);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
xa_init(&brd->brd_pages);
@@ -378,8 +405,7 @@ static int brd_alloc(int i)
out_cleanup_disk:
put_disk(disk);
out_free_dev:
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
return err;
}
@@ -398,8 +424,7 @@ static void brd_cleanup(void)
del_gendisk(brd->brd_disk);
put_disk(brd->brd_disk);
brd_free_pages(brd);
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
}
}
@@ -426,16 +451,6 @@ static int __init brd_init(void)
{
int err, i;
- brd_check_and_reset_par();
-
- brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
-
- for (i = 0; i < rd_nr; i++) {
- err = brd_alloc(i);
- if (err)
- goto out_free;
- }
-
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
@@ -451,11 +466,18 @@ static int __init brd_init(void)
* dynamically.
*/
+ brd_check_and_reset_par();
+
+ brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
+
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
err = -EIO;
goto out_free;
}
+ for (i = 0; i < rd_nr; i++)
+ brd_alloc(i);
+
pr_info("brd: module loaded\n");
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 78a7bb28defe..fe9bb4fb5f1b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -173,7 +173,7 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
static bool lo_bdev_can_use_dio(struct loop_device *lo,
struct block_device *backing_bdev)
{
- unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
+ unsigned int sb_bsize = bdev_logical_block_size(backing_bdev);
if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
return false;
@@ -786,11 +786,10 @@ static void loop_config_discard(struct loop_device *lo,
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
+ struct block_device *bdev = I_BDEV(inode);
- max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
- granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
- queue_physical_block_size(backingq);
+ max_discard_sectors = bdev_write_zeroes_sectors(bdev);
+ granularity = bdev_discard_granularity(bdev);
/*
* We use punch hole to reclaim the free space used by the
@@ -977,7 +976,7 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
-static unsigned short loop_default_blocksize(struct loop_device *lo,
+static unsigned int loop_default_blocksize(struct loop_device *lo,
struct block_device *backing_bdev)
{
/* In case of direct I/O, match underlying block size */
@@ -986,7 +985,7 @@ static unsigned short loop_default_blocksize(struct loop_device *lo,
return SECTOR_SIZE;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
+static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 223faa9d5ffd..43701b7b10a7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2701,7 +2701,12 @@ static int mtip_hw_init(struct driver_data *dd)
int rv;
unsigned long timeout, timetaken;
- dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
+ dd->mmio = pcim_iomap_region(dd->pdev, MTIP_ABAR, MTIP_DRV_NAME);
+ if (IS_ERR(dd->mmio)) {
+ dev_err(&dd->pdev->dev, "Unable to request / ioremap PCI region\n");
+ return PTR_ERR(dd->mmio);
+ }
+
mtip_detect_product(dd);
if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
@@ -3710,13 +3715,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto iomap_err;
}
- /* Map BAR5 to memory. */
- rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
- if (rv < 0) {
- dev_err(&pdev->dev, "Unable to map regions\n");
- goto iomap_err;
- }
-
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 2f0431e42c49..3c3d8d200abb 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1638,10 +1638,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void null_queue_rqs(struct request **rqlist)
+static void null_queue_rqs(struct rq_list *rqlist)
{
- struct request *requeue_list = NULL;
- struct request **requeue_lastp = &requeue_list;
+ struct rq_list requeue_list = {};
struct blk_mq_queue_data bd = { };
blk_status_t ret;
@@ -1651,8 +1650,8 @@ static void null_queue_rqs(struct request **rqlist)
bd.rq = rq;
ret = null_queue_rq(rq->mq_hctx, &bd);
if (ret != BLK_STS_OK)
- rq_list_add_tail(&requeue_lastp, rq);
- } while (!rq_list_empty(*rqlist));
+ rq_list_add_tail(&requeue_list, rq);
+ } while (!rq_list_empty(rqlist));
*rqlist = requeue_list;
}
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 9bc768b2ca56..0d5f9bf95229 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -166,7 +166,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
lim->features |= BLK_FEAT_ZONED;
lim->chunk_sectors = dev->zone_size_sects;
- lim->max_zone_append_sectors = dev->zone_append_max_sectors;
+ lim->max_hw_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
lim->max_active_zones = dev->zone_max_active;
return 0;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c8b19a22c2a..ac421dbeeb11 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7284,6 +7284,7 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
*/
blk_mq_freeze_queue(rbd_dev->disk->queue);
blk_mark_disk_dead(rbd_dev->disk);
+ blk_mq_unfreeze_queue(rbd_dev->disk->queue);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 6ba2c1dd1d87..c6d18cd8af44 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -60,7 +60,12 @@
| UBLK_F_UNPRIVILEGED_DEV \
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
- | UBLK_F_ZONED)
+ | UBLK_F_ZONED \
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
+
+#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
+ | UBLK_F_USER_RECOVERY_REISSUE \
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL \
@@ -143,6 +148,7 @@ struct ublk_queue {
bool force_abort;
bool timeout;
bool canceling;
+ bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
@@ -179,8 +185,7 @@ struct ublk_device {
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
- struct work_struct quiesce_work;
- struct work_struct stop_work;
+ struct work_struct nosrv_work;
};
/* header of ublk_params */
@@ -664,30 +669,69 @@ static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
+static inline int __ublk_queue_cmd_buf_size(int depth)
+{
+ return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
+}
+
static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
{
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
- PAGE_SIZE);
+ return __ublk_queue_cmd_buf_size(ubq->q_depth);
+}
+
+static int ublk_max_cmd_buf_size(void)
+{
+ return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
+}
+
+/*
+ * Should I/O outstanding to the ublk server when it exits be reissued?
+ * If not, outstanding I/O will get errors.
+ */
+static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
+}
+
+/*
+ * Should I/O issued while there is no ublk server queue? If not, I/O
+ * issued while there is no ublk server will get errors.
+ */
+static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO);
}
-static inline bool ublk_queue_can_use_recovery_reissue(
- struct ublk_queue *ubq)
+/*
+ * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
+ * of the device flags for smaller cache footprint - better for fast
+ * paths.
+ */
+static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
{
return (ubq->flags & UBLK_F_USER_RECOVERY) &&
- (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
+ !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO);
}
-static inline bool ublk_queue_can_use_recovery(
- struct ublk_queue *ubq)
+/*
+ * Should ublk devices be stopped (i.e. no recovery possible) when the
+ * ublk server exits? If not, devices can be used again by a future
+ * incarnation of a ublk server via the start_recovery/end_recovery
+ * commands.
+ */
+static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
{
- return ubq->flags & UBLK_F_USER_RECOVERY;
+ return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
}
-static inline bool ublk_can_use_recovery(struct ublk_device *ub)
+static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub)
{
- return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
+ return ub->dev_info.state == UBLK_S_DEV_QUIESCED ||
+ ub->dev_info.state == UBLK_S_DEV_FAIL_IO;
}
static void ublk_free_disk(struct gendisk *disk)
@@ -1063,7 +1107,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (ublk_queue_can_use_recovery_reissue(ubq))
+ if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
blk_mq_requeue_request(req, false);
else
ublk_put_req_ref(ubq, req);
@@ -1091,7 +1135,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
struct request *rq)
{
/* We cannot process this rq so just requeue it. */
- if (ublk_queue_can_use_recovery(ubq))
+ if (ublk_nosrv_dev_should_queue_io(ubq->dev))
blk_mq_requeue_request(rq, false);
else
blk_mq_end_request(rq, BLK_STS_IOERR);
@@ -1236,10 +1280,7 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
struct ublk_device *ub = ubq->dev;
if (ublk_abort_requests(ub, ubq)) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
+ schedule_work(&ub->nosrv_work);
}
return BLK_EH_DONE;
}
@@ -1254,6 +1295,10 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
blk_status_t res;
+ if (unlikely(ubq->fail_io)) {
+ return BLK_STS_TARGET;
+ }
+
/* fill iod to slot in io cmd buffer */
res = ublk_setup_iod(ubq, rq);
if (unlikely(res != BLK_STS_OK))
@@ -1268,7 +1313,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
if (unlikely(ubq->canceling)) {
@@ -1322,7 +1367,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ublk_device *ub = filp->private_data;
size_t sz = vma->vm_end - vma->vm_start;
- unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
+ unsigned max_sz = ublk_max_cmd_buf_size();
unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
int q_id, ret = 0;
@@ -1489,10 +1534,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
ublk_cancel_cmd(ubq, io, issue_flags);
if (need_schedule) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
+ schedule_work(&ub->nosrv_work);
}
}
@@ -1555,20 +1597,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
ub->dev_info.state = UBLK_S_DEV_QUIESCED;
}
-static void ublk_quiesce_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, quiesce_work);
-
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state != UBLK_S_DEV_LIVE)
- goto unlock;
- __ublk_quiesce_dev(ub);
- unlock:
- mutex_unlock(&ub->mutex);
- ublk_cancel_dev(ub);
-}
-
static void ublk_unquiesce_dev(struct ublk_device *ub)
{
int i;
@@ -1597,7 +1625,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
goto unlock;
- if (ublk_can_use_recovery(ub)) {
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
if (ub->dev_info.state == UBLK_S_DEV_LIVE)
__ublk_quiesce_dev(ub);
ublk_unquiesce_dev(ub);
@@ -1617,6 +1645,37 @@ static void ublk_stop_dev(struct ublk_device *ub)
ublk_cancel_dev(ub);
}
+static void ublk_nosrv_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, nosrv_work);
+ int i;
+
+ if (ublk_nosrv_should_stop_dev(ub)) {
+ ublk_stop_dev(ub);
+ return;
+ }
+
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto unlock;
+
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ __ublk_quiesce_dev(ub);
+ } else {
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ublk_get_queue(ub, i)->fail_io = true;
+ }
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ }
+
+ unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_cancel_dev(ub);
+}
+
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -2130,14 +2189,6 @@ static int ublk_add_chdev(struct ublk_device *ub)
return ret;
}
-static void ublk_stop_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, stop_work);
-
- ublk_stop_dev(ub);
-}
-
/* align max io buffer size with PAGE_SIZE */
static void ublk_align_max_io_size(struct ublk_device *ub)
{
@@ -2162,8 +2213,7 @@ static int ublk_add_tag_set(struct ublk_device *ub)
static void ublk_remove(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
+ cancel_work_sync(&ub->nosrv_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
ublk_put_device(ub);
ublks_added--;
@@ -2229,7 +2279,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
lim.features |= BLK_FEAT_ZONED;
lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones;
- lim.max_zone_append_sectors = p->max_zone_append_sectors;
+ lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
}
if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
@@ -2372,6 +2422,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
return -EPERM;
+ /* forbid nonsense combinations of recovery flags */
+ switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) {
+ case 0:
+ case UBLK_F_USER_RECOVERY:
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE):
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO):
+ break;
+ default:
+ pr_warn("%s: invalid recovery flags %llx\n", __func__,
+ info.flags & UBLK_F_ALL_RECOVERY_FLAGS);
+ return -EINVAL;
+ }
+
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
@@ -2424,8 +2487,7 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
- INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
- INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
+ INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2560,9 +2622,7 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
-
+ cancel_work_sync(&ub->nosrv_work);
return 0;
}
@@ -2699,7 +2759,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
int i;
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
if (!ub->nr_queues_ready)
goto out_unlock;
@@ -2710,14 +2770,18 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
* and related io_uring ctx is freed so file struct of /dev/ublkcX is
* released.
*
+ * and one of the following holds
+ *
* (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
* (a)has quiesced request queue
* (b)has requeued every inflight rqs whose io_flags is ACTIVE
* (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
* (d)has completed/camceled all ioucmds owned by ther dying process
+ *
+ * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
+ * quiesced, but all I/O is being immediately errored
*/
- if (test_bit(UB_STATE_OPEN, &ub->state) ||
- ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
@@ -2741,6 +2805,7 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
+ int i;
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@@ -2752,21 +2817,32 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
- if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (!ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
+
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ pr_devel("%s: queue unquiesced, dev id %d.\n",
+ __func__, header->dev_id);
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
+ } else {
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ublk_get_queue(ub, i)->fail_io = false;
+ }
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ }
+
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 194417abc105..c0cdba71f436 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -471,18 +471,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
}
-static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist)
+static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct rq_list *rqlist)
{
+ struct request *req;
unsigned long flags;
- int err;
bool kick;
spin_lock_irqsave(&vq->lock, flags);
- while (!rq_list_empty(*rqlist)) {
- struct request *req = rq_list_pop(rqlist);
+ while ((req = rq_list_pop(rqlist))) {
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ int err;
err = virtblk_add_req(vq->vq, vbr);
if (err) {
@@ -495,37 +495,32 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->lock, flags);
- return kick;
+ if (kick)
+ virtqueue_notify(vq->vq);
}
-static void virtio_queue_rqs(struct request **rqlist)
+static void virtio_queue_rqs(struct rq_list *rqlist)
{
- struct request *req, *next, *prev = NULL;
- struct request *requeue_list = NULL;
-
- rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
- bool kick;
-
- if (!virtblk_prep_rq_batch(req)) {
- rq_list_move(rqlist, &requeue_list, req, prev);
- req = prev;
- if (!req)
- continue;
- }
+ struct rq_list submit_list = { };
+ struct rq_list requeue_list = { };
+ struct virtio_blk_vq *vq = NULL;
+ struct request *req;
- if (!next || req->mq_hctx != next->mq_hctx) {
- req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist);
- if (kick)
- virtqueue_notify(vq->vq);
+ while ((req = rq_list_pop(rqlist))) {
+ struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
- *rqlist = next;
- prev = NULL;
- } else
- prev = req;
+ if (vq && vq != this_vq)
+ virtblk_add_req_batch(vq, &submit_list);
+ vq = this_vq;
+
+ if (virtblk_prep_rq_batch(req))
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
}
+ if (vq)
+ virtblk_add_req_batch(vq, &submit_list);
*rqlist = requeue_list;
}
@@ -784,7 +779,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
wg, v);
return -ENODEV;
}
- lim->max_zone_append_sectors = v;
+ lim->max_hw_zone_append_sectors = v;
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
return 0;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 438b92967bc3..30a32ebbcc68 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -3288,13 +3288,12 @@ static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
case INTEL_TLV_TEST_EXCEPTION:
/* Generate devcoredump from exception */
if (!hci_devcd_init(hdev, skb->len)) {
- hci_devcd_append(hdev, skb);
+ hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC));
hci_devcd_complete(hdev);
} else {
bt_dev_err(hdev, "Failed to generate devcoredump");
- kfree_skb(skb);
}
- return 0;
+ break;
default:
bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]);
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b51d9e243f35..17854f052386 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -50,7 +50,7 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
- depends on (X86 || PPC_MAPLE || COMPILE_TEST)
+ depends on (X86 || COMPILE_TEST)
depends on PCI && HAS_IOPORT_MAP
default HW_RANDOM
help
@@ -62,6 +62,19 @@ config HW_RANDOM_AMD
If unsure, say Y.
+config HW_RANDOM_AIROHA
+ tristate "Airoha True HW Random Number Generator support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number
+ Generator hardware found on Airoha SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called airoha-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
depends on (ARCH_AT91 || COMPILE_TEST)
@@ -99,9 +112,22 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
+config HW_RANDOM_BCM74110
+ tristate "Broadcom BCM74110 Random Number Generator support"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the Broadcom BCM74110 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm74110-rng
+
+ If unsure, say Y.
+
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the RNG200
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 01f012eab440..b9132b3f5d21 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -8,6 +8,7 @@ rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM74110) += bcm74110-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
diff --git a/drivers/char/hw_random/airoha-trng.c b/drivers/char/hw_random/airoha-trng.c
new file mode 100644
index 000000000000..1dbfa9505c21
--- /dev/null
+++ b/drivers/char/hw_random/airoha-trng.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Christian Marangi */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+
+#define TRNG_IP_RDY 0x800
+#define CNT_TRANS GENMASK(15, 8)
+#define SAMPLE_RDY BIT(0)
+#define TRNG_NS_SEK_AND_DAT_EN 0x804
+#define RNG_EN BIT(31) /* referenced as ring_en */
+#define RAW_DATA_EN BIT(16)
+#define TRNG_HEALTH_TEST_SW_RST 0x808
+#define SW_RST BIT(0) /* Active High */
+#define TRNG_INTR_EN 0x818
+#define INTR_MASK BIT(16)
+#define CONTINUOUS_HEALTH_INITR_EN BIT(2)
+#define SW_STARTUP_INITR_EN BIT(1)
+#define RST_STARTUP_INITR_EN BIT(0)
+/* Notice that Health Test are done only out of Reset and with RNG_EN */
+#define TRNG_HEALTH_TEST_STATUS 0x824
+#define CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23)
+#define CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22)
+#define SW_STARTUP_TEST_DONE BIT(21)
+#define SW_STARTUP_AP_TEST_FAIL BIT(20)
+#define SW_STARTUP_RC_TEST_FAIL BIT(19)
+#define RST_STARTUP_TEST_DONE BIT(18)
+#define RST_STARTUP_AP_TEST_FAIL BIT(17)
+#define RST_STARTUP_RC_TEST_FAIL BIT(16)
+#define RAW_DATA_VALID BIT(7)
+
+#define TRNG_RAW_DATA_OUT 0x828
+
+#define TRNG_CNT_TRANS_VALID 0x80
+#define BUSY_LOOP_SLEEP 10
+#define BUSY_LOOP_TIMEOUT (BUSY_LOOP_SLEEP * 10000)
+
+struct airoha_trng {
+ void __iomem *base;
+ struct hwrng rng;
+ struct device *dev;
+
+ struct completion rng_op_done;
+};
+
+static int airoha_trng_irq_mask(struct airoha_trng *trng)
+{
+ u32 val;
+
+ val = readl(trng->base + TRNG_INTR_EN);
+ val |= INTR_MASK;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ return 0;
+}
+
+static int airoha_trng_irq_unmask(struct airoha_trng *trng)
+{
+ u32 val;
+
+ val = readl(trng->base + TRNG_INTR_EN);
+ val &= ~INTR_MASK;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ return 0;
+}
+
+static int airoha_trng_init(struct hwrng *rng)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ int ret;
+ u32 val;
+
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val |= RNG_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Set out of SW Reset */
+ airoha_trng_irq_unmask(trng);
+ writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST);
+
+ ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT);
+ if (ret <= 0) {
+ dev_err(trng->dev, "Timeout waiting for Health Check\n");
+ airoha_trng_irq_mask(trng);
+ return -ENODEV;
+ }
+
+ /* Check if Health Test Failed */
+ val = readl(trng->base + TRNG_HEALTH_TEST_STATUS);
+ if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) {
+ dev_err(trng->dev, "Health Check fail: %s test fail\n",
+ val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC");
+ return -ENODEV;
+ }
+
+ /* Check if IP is ready */
+ ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
+ val & SAMPLE_RDY, 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for IP ready");
+ return -ENODEV;
+ }
+
+ /* CNT_TRANS must be 0x80 for IP to be considered ready */
+ ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
+ FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID,
+ 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for IP ready");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void airoha_trng_cleanup(struct hwrng *rng)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ u32 val;
+
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val &= ~RNG_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Put it in SW Reset */
+ writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
+}
+
+static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status,
+ status & RAW_DATA_VALID, 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n");
+ return ret;
+ }
+
+ *data = readl(trng->base + TRNG_RAW_DATA_OUT);
+
+ return 4;
+}
+
+static irqreturn_t airoha_trng_irq(int irq, void *priv)
+{
+ struct airoha_trng *trng = (struct airoha_trng *)priv;
+
+ airoha_trng_irq_mask(trng);
+ /* Just complete the task, we will read the value later */
+ complete(&trng->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_trng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct airoha_trng *trng;
+ int irq, ret;
+ u32 val;
+
+ trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return PTR_ERR(trng->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ airoha_trng_irq_mask(trng);
+ ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0,
+ pdev->name, (void *)trng);
+ if (ret) {
+ dev_err(dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
+ init_completion(&trng->rng_op_done);
+
+ /* Enable interrupt for SW reset Health Check */
+ val = readl(trng->base + TRNG_INTR_EN);
+ val |= RST_STARTUP_INITR_EN;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ /* Set output to raw data */
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val |= RAW_DATA_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Put it in SW Reset */
+ writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
+
+ trng->dev = dev;
+ trng->rng.name = pdev->name;
+ trng->rng.init = airoha_trng_init;
+ trng->rng.cleanup = airoha_trng_cleanup;
+ trng->rng.read = airoha_trng_read;
+
+ ret = devm_hwrng_register(dev, &trng->rng);
+ if (ret) {
+ dev_err(dev, "failed to register rng device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id airoha_trng_of_match[] = {
+ { .compatible = "airoha,en7581-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_trng_of_match);
+
+static struct platform_driver airoha_trng_driver = {
+ .driver = {
+ .name = "airoha-trng",
+ .of_match_table = airoha_trng_of_match,
+ },
+ .probe = airoha_trng_probe,
+};
+
+module_platform_driver(airoha_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Airoha True Random Number Generator driver");
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index e9157255f851..143406bc6939 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
- .remove_new = atmel_trng_remove,
+ .remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.pm = pm_ptr(&atmel_trng_pm_ops),
diff --git a/drivers/char/hw_random/bcm74110-rng.c b/drivers/char/hw_random/bcm74110-rng.c
new file mode 100644
index 000000000000..5c64148e91f1
--- /dev/null
+++ b/drivers/char/hw_random/bcm74110-rng.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Broadcom
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+#define HOST_REV_ID 0x00
+#define HOST_FIFO_DEPTH 0x04
+#define HOST_FIFO_COUNT 0x08
+#define HOST_FIFO_THRESHOLD 0x0c
+#define HOST_FIFO_DATA 0x10
+
+#define HOST_FIFO_COUNT_MASK 0xffff
+
+/* Delay range in microseconds */
+#define FIFO_DELAY_MIN_US 3
+#define FIFO_DELAY_MAX_US 7
+#define FIFO_DELAY_MAX_COUNT 10
+
+struct bcm74110_priv {
+ void __iomem *base;
+};
+
+static inline int bcm74110_rng_fifo_count(void __iomem *mem)
+{
+ return readl_relaxed(mem) & HOST_FIFO_COUNT_MASK;
+}
+
+static int bcm74110_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct bcm74110_priv *priv = (struct bcm74110_priv *)rng->priv;
+ void __iomem *fc_addr = priv->base + HOST_FIFO_COUNT;
+ void __iomem *fd_addr = priv->base + HOST_FIFO_DATA;
+ unsigned underrun_count = 0;
+ u32 max_words = max / sizeof(u32);
+ u32 num_words;
+ unsigned i;
+
+ /*
+ * We need to check how many words are available in the RNG FIFO. If
+ * there aren't any, we need to wait for some to become available.
+ */
+ while ((num_words = bcm74110_rng_fifo_count(fc_addr)) == 0) {
+ if (!wait)
+ return 0;
+ /*
+ * As a precaution, limit how long we wait. If the FIFO doesn't
+ * refill within the allotted time, return 0 (=no data) to the
+ * caller.
+ */
+ if (likely(underrun_count < FIFO_DELAY_MAX_COUNT))
+ usleep_range(FIFO_DELAY_MIN_US, FIFO_DELAY_MAX_US);
+ else
+ return 0;
+ underrun_count++;
+ }
+ if (num_words > max_words)
+ num_words = max_words;
+
+ /* Bail early if we run out of random numbers unexpectedly */
+ for (i = 0; i < num_words && bcm74110_rng_fifo_count(fc_addr) > 0; i++)
+ ((u32 *)buf)[i] = readl_relaxed(fd_addr);
+
+ return i * sizeof(u32);
+}
+
+static struct hwrng bcm74110_hwrng = {
+ .read = bcm74110_rng_read,
+};
+
+static int bcm74110_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_priv *priv;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bcm74110_hwrng.name = pdev->name;
+ bcm74110_hwrng.priv = (unsigned long)priv;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ rc = devm_hwrng_register(dev, &bcm74110_hwrng);
+ if (rc)
+ dev_err(dev, "hwrng registration failed (%d)\n", rc);
+ else
+ dev_info(dev, "hwrng registered\n");
+
+ return rc;
+}
+
+static const struct of_device_id bcm74110_rng_match[] = {
+ { .compatible = "brcm,bcm74110-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm74110_rng_match);
+
+static struct platform_driver bcm74110_rng_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bcm74110_rng_match,
+ },
+ .probe = bcm74110_rng_probe,
+};
+module_platform_driver(bcm74110_rng_driver);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("BCM 74110 Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 4c50efc46483..4db198849695 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -653,7 +653,7 @@ static struct platform_driver cctrng_driver = {
.pm = &cctrng_pm,
},
.probe = cctrng_probe,
- .remove_new = cctrng_remove,
+ .remove = cctrng_remove,
};
module_platform_driver(cctrng_driver);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 57c51efa5613..018316f54621 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -181,8 +181,15 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int present;
BUG_ON(!mutex_is_locked(&reading_mutex));
- if (rng->read)
- return rng->read(rng, (void *)buffer, size, wait);
+ if (rng->read) {
+ int err;
+
+ err = rng->read(rng, buffer, size, wait);
+ if (WARN_ON_ONCE(err > 0 && err > size))
+ err = size;
+
+ return err;
+ }
if (rng->data_present)
present = rng->data_present(rng, wait);
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 9f039fddaee3..02e207c09e81 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -335,7 +335,7 @@ static struct platform_driver exynos_trng_driver = {
.of_match_table = exynos_trng_dt_match,
},
.probe = exynos_trng_probe,
- .remove_new = exynos_trng_remove,
+ .remove = exynos_trng_remove,
};
module_platform_driver(exynos_trng_driver);
diff --git a/drivers/char/hw_random/histb-rng.c b/drivers/char/hw_random/histb-rng.c
index f652e1135e4b..1b91e88cc4c0 100644
--- a/drivers/char/hw_random/histb-rng.c
+++ b/drivers/char/hw_random/histb-rng.c
@@ -89,7 +89,7 @@ depth_show(struct device *dev, struct device_attribute *attr, char *buf)
struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
- return sprintf(buf, "%d\n", histb_rng_get_depth(base));
+ return sprintf(buf, "%u\n", histb_rng_get_depth(base));
}
static ssize_t
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
index 2f9b6483c4a1..bbfd662d25a6 100644
--- a/drivers/char/hw_random/ingenic-rng.c
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -132,7 +132,7 @@ MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
static struct platform_driver ingenic_rng_driver = {
.probe = ingenic_rng_probe,
- .remove_new = ingenic_rng_remove,
+ .remove = ingenic_rng_remove,
.driver = {
.name = "ingenic-rng",
.of_match_table = ingenic_rng_of_match,
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 36c34252b4f6..d8fd8a354482 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -261,7 +261,7 @@ static struct platform_driver ks_sa_rng_driver = {
.of_match_table = ks_sa_rng_dt_match,
},
.probe = ks_sa_rng_probe,
- .remove_new = ks_sa_rng_remove,
+ .remove = ks_sa_rng_remove,
};
module_platform_driver(ks_sa_rng_driver);
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index f01eb95bee31..e3fcb8bcc29b 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -188,7 +188,7 @@ static struct platform_driver mxc_rnga_driver = {
.of_match_table = mxc_rnga_of_match,
},
.probe = mxc_rnga_probe,
- .remove_new = mxc_rnga_remove,
+ .remove = mxc_rnga_remove,
};
module_platform_driver(mxc_rnga_driver);
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1b49e3a86d57..ea6d5599242f 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -858,7 +858,7 @@ static struct platform_driver n2rng_driver = {
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
- .remove_new = n2rng_remove,
+ .remove = n2rng_remove,
};
module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index bce8c4829a1f..9ff00f096f38 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -176,7 +176,7 @@ static struct platform_driver npcm_rng_driver = {
.of_match_table = of_match_ptr(rng_dt_id),
},
.probe = npcm_rng_probe,
- .remove_new = npcm_rng_remove,
+ .remove = npcm_rng_remove,
};
module_platform_driver(npcm_rng_driver);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 4914a8720e58..5e8b50f15db7 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -558,7 +558,7 @@ static struct platform_driver omap_rng_driver = {
.of_match_table = of_match_ptr(omap_rng_of_match),
},
.probe = omap_rng_probe,
- .remove_new = omap_rng_remove,
+ .remove = omap_rng_remove,
};
module_platform_driver(omap_rng_driver);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 9d041a67c295..98edbe796bc5 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
@@ -49,6 +50,7 @@
struct stm32_rng_data {
uint max_clock_rate;
+ uint nb_clock;
u32 cr;
u32 nscr;
u32 htcr;
@@ -72,7 +74,7 @@ struct stm32_rng_private {
struct hwrng rng;
struct device *dev;
void __iomem *base;
- struct clk *clk;
+ struct clk_bulk_data *clk_bulk;
struct reset_control *rst;
struct stm32_rng_config pm_conf;
const struct stm32_rng_data *data;
@@ -266,7 +268,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
unsigned long clock_rate = 0;
uint clock_div = 0;
- clock_rate = clk_get_rate(priv->clk);
+ clock_rate = clk_get_rate(priv->clk_bulk[0].clk);
/*
* Get the exponent to apply on the CLKDIV field in RNG_CR register
@@ -276,7 +278,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
while ((clock_rate >> clock_div) > priv->data->max_clock_rate)
clock_div++;
- pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk) >> clock_div);
+ pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div);
return clock_div;
}
@@ -288,7 +290,7 @@ static int stm32_rng_init(struct hwrng *rng)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -328,7 +330,7 @@ static int stm32_rng_init(struct hwrng *rng)
(!(reg & RNG_CR_CONDRST)),
10, 50000);
if (err) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg);
return -EINVAL;
}
@@ -356,12 +358,13 @@ static int stm32_rng_init(struct hwrng *rng)
reg & RNG_SR_DRDY,
10, 100000);
if (err || (reg & ~RNG_SR_DRDY)) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg);
+
return -EINVAL;
}
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -379,7 +382,8 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
reg = readl_relaxed(priv->base + RNG_CR);
reg &= ~RNG_CR_RNGEN;
writel_relaxed(reg, priv->base + RNG_CR);
- clk_disable_unprepare(priv->clk);
+
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -389,7 +393,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
struct stm32_rng_private *priv = dev_get_drvdata(dev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -403,7 +407,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR);
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -414,7 +418,7 @@ static int __maybe_unused stm32_rng_runtime_resume(struct device *dev)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -434,7 +438,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -462,7 +466,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
reg & ~RNG_CR_CONDRST, 10, 100000);
if (err) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg);
return -EINVAL;
}
@@ -472,7 +476,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
writel_relaxed(reg, priv->base + RNG_CR);
}
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -484,9 +488,19 @@ static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = {
stm32_rng_resume)
};
+static const struct stm32_rng_data stm32mp25_rng_data = {
+ .has_cond_reset = true,
+ .max_clock_rate = 48000000,
+ .nb_clock = 2,
+ .cr = 0x00F00D00,
+ .nscr = 0x2B5BB,
+ .htcr = 0x969D,
+};
+
static const struct stm32_rng_data stm32mp13_rng_data = {
.has_cond_reset = true,
.max_clock_rate = 48000000,
+ .nb_clock = 1,
.cr = 0x00F00D00,
.nscr = 0x2B5BB,
.htcr = 0x969D,
@@ -494,11 +508,16 @@ static const struct stm32_rng_data stm32mp13_rng_data = {
static const struct stm32_rng_data stm32_rng_data = {
.has_cond_reset = false,
- .max_clock_rate = 3000000,
+ .max_clock_rate = 48000000,
+ .nb_clock = 1,
};
static const struct of_device_id stm32_rng_match[] = {
{
+ .compatible = "st,stm32mp25-rng",
+ .data = &stm32mp25_rng_data,
+ },
+ {
.compatible = "st,stm32mp13-rng",
.data = &stm32mp13_rng_data,
},
@@ -516,6 +535,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct stm32_rng_private *priv;
struct resource *res;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -525,10 +545,6 @@ static int stm32_rng_probe(struct platform_device *ofdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(&ofdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
@@ -551,6 +567,28 @@ static int stm32_rng_probe(struct platform_device *ofdev)
priv->rng.read = stm32_rng_read;
priv->rng.quality = 900;
+ if (!priv->data->nb_clock || priv->data->nb_clock > 2)
+ return -EINVAL;
+
+ ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk);
+ if (ret != priv->data->nb_clock)
+ return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret);
+
+ if (priv->data->nb_clock == 2) {
+ const char *id = priv->clk_bulk[1].id;
+ struct clk *clk = priv->clk_bulk[1].clk;
+
+ if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id)
+ return dev_err_probe(dev, -EINVAL, "Missing clock name\n");
+
+ if (strcmp(priv->clk_bulk[0].id, "core")) {
+ priv->clk_bulk[1].id = priv->clk_bulk[0].id;
+ priv->clk_bulk[1].clk = priv->clk_bulk[0].clk;
+ priv->clk_bulk[0].id = id;
+ priv->clk_bulk[0].clk = clk;
+ }
+ }
+
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
@@ -565,7 +603,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
- .remove_new = stm32_rng_remove,
+ .remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 65b8260339f5..7174bfccc7b3 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -193,7 +193,7 @@ static struct platform_driver timeriomem_rng_driver = {
.of_match_table = timeriomem_rng_match,
},
.probe = timeriomem_rng_probe,
- .remove_new = timeriomem_rng_remove,
+ .remove = timeriomem_rng_remove,
};
module_platform_driver(timeriomem_rng_driver);
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 642d13519464..39acaa503fec 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
static struct platform_driver xgene_rng_driver = {
.probe = xgene_rng_probe,
- .remove_new = xgene_rng_remove,
+ .remove = xgene_rng_remove,
.driver = {
.name = "xgene-rng",
.of_match_table = xgene_rng_of_match,
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index cad0048bcc3c..e49a19fea3bd 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -147,6 +147,26 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
EXPORT_SYMBOL_GPL(tpm_buf_append_u32);
/**
+ * tpm_buf_append_handle() - Add a handle
+ * @chip: &tpm_chip instance
+ * @buf: &tpm_buf instance
+ * @handle: a TPM object handle
+ *
+ * Add a handle to the buffer, and increase the count tracking the number of
+ * handles in the command buffer. Works only for command buffers.
+ */
+void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle)
+{
+ if (buf->flags & TPM_BUF_TPM2B) {
+ dev_err(&chip->dev, "Invalid buffer type (TPM2B)\n");
+ return;
+ }
+
+ tpm_buf_append_u32(buf, handle);
+ buf->handles++;
+}
+
+/**
* tpm_buf_read() - Read from a TPM buffer
* @buf: &tpm_buf instance
* @offset: offset within the buffer
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 1e856259219e..dfdcbd009720 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -14,6 +14,10 @@
#include "tpm.h"
#include <crypto/hash_info.h>
+static bool disable_pcr_integrity;
+module_param(disable_pcr_integrity, bool, 0444);
+MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend");
+
static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
{HASH_ALGO_SHA256, TPM_ALG_SHA256},
@@ -232,18 +236,26 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
int rc;
int i;
- rc = tpm2_start_auth_session(chip);
- if (rc)
- return rc;
+ if (!disable_pcr_integrity) {
+ rc = tpm2_start_auth_session(chip);
+ if (rc)
+ return rc;
+ }
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
if (rc) {
- tpm2_end_auth_session(chip);
+ if (!disable_pcr_integrity)
+ tpm2_end_auth_session(chip);
return rc;
}
- tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
- tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
+ if (!disable_pcr_integrity) {
+ tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
+ } else {
+ tpm_buf_append_handle(chip, &buf, pcr_idx);
+ tpm_buf_append_auth(chip, &buf, 0, NULL, 0);
+ }
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
@@ -253,9 +265,11 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
- tpm_buf_fill_hmac_session(chip, &buf);
+ if (!disable_pcr_integrity)
+ tpm_buf_fill_hmac_session(chip, &buf);
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
- rc = tpm_buf_check_hmac_response(chip, &buf, rc);
+ if (!disable_pcr_integrity)
+ rc = tpm_buf_check_hmac_response(chip, &buf, rc);
tpm_buf_destroy(&buf);
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 0739830904b2..b0f13c8ea79c 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -237,9 +237,7 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- tpm_buf_append_u32(buf, handle);
- /* count the number of handles in the upper bits of flags */
- buf->handles++;
+ tpm_buf_append_handle(chip, buf, handle);
return;
}
@@ -272,6 +270,31 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
}
EXPORT_SYMBOL_GPL(tpm_buf_append_name);
+void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase, int passphrase_len)
+{
+ /* offset tells us where the sessions area begins */
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ u32 len = 9 + passphrase_len;
+
+ if (tpm_buf_length(buf) != offset) {
+ /* not the first session so update the existing length */
+ len += get_unaligned_be32(&buf->data[offset]);
+ put_unaligned_be32(len, &buf->data[offset]);
+ } else {
+ tpm_buf_append_u32(buf, len);
+ }
+ /* auth handle */
+ tpm_buf_append_u32(buf, TPM2_RS_PW);
+ /* nonce */
+ tpm_buf_append_u16(buf, 0);
+ /* attributes */
+ tpm_buf_append_u8(buf, 0);
+ /* passphrase */
+ tpm_buf_append_u16(buf, passphrase_len);
+ tpm_buf_append(buf, passphrase, passphrase_len);
+}
+
/**
* tpm_buf_append_hmac_session() - Append a TPM session element
* @chip: the TPM chip structure
@@ -309,26 +332,8 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- /* offset tells us where the sessions area begins */
- int offset = buf->handles * 4 + TPM_HEADER_SIZE;
- u32 len = 9 + passphrase_len;
-
- if (tpm_buf_length(buf) != offset) {
- /* not the first session so update the existing length */
- len += get_unaligned_be32(&buf->data[offset]);
- put_unaligned_be32(len, &buf->data[offset]);
- } else {
- tpm_buf_append_u32(buf, len);
- }
- /* auth handle */
- tpm_buf_append_u32(buf, TPM2_RS_PW);
- /* nonce */
- tpm_buf_append_u16(buf, 0);
- /* attributes */
- tpm_buf_append_u8(buf, 0);
- /* passphrase */
- tpm_buf_append_u16(buf, passphrase_len);
- tpm_buf_append(buf, passphrase, passphrase_len);
+ tpm_buf_append_auth(chip, buf, attributes, passphrase,
+ passphrase_len);
return;
}
@@ -948,10 +953,13 @@ static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
/* Deduce from the name change TPM interference: */
dev_err(&chip->dev, "null key integrity check failed\n");
tpm2_flush_context(chip, tmp_null_key);
- chip->flags |= TPM_CHIP_FLAG_DISABLE;
err:
- return rc ? -ENODEV : 0;
+ if (rc) {
+ chip->flags |= TPM_CHIP_FLAG_DISABLE;
+ rc = -ENODEV;
+ }
+ return rc;
}
/**
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0f04feb6cafa..c9ebacf5c88e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -73,20 +73,17 @@ static unsigned int acpi_pstate_strict;
static bool boost_state(unsigned int cpu)
{
- u32 lo, hi;
u64 msr;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
@@ -1028,7 +1025,7 @@ static struct platform_driver acpi_cpufreq_platdrv = {
.driver = {
.name = "acpi-cpufreq",
},
- .remove_new = acpi_cpufreq_remove,
+ .remove = acpi_cpufreq_remove,
};
static int __init acpi_cpufreq_init(void)
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index f66701514d90..a261d7300951 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -227,10 +227,10 @@ static void amd_pstate_ut_check_freq(u32 index)
goto skip_test;
}
- if (cpudata->min_freq != policy->min) {
+ if (cpudata->lowest_nonlinear_freq != policy->min) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
- __func__, cpu, cpudata->min_freq, policy->min);
+ pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
+ __func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
goto skip_test;
}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index b63863f77c67..f834cc8205e2 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -233,7 +233,7 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
return index;
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+static void msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch)
{
if (fast_switch)
@@ -243,7 +243,7 @@ static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
READ_ONCE(cpudata->cppc_req_cached));
}
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
@@ -306,11 +306,17 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
return ret;
}
-static inline int pstate_enable(bool enable)
+static inline int msr_cppc_enable(bool enable)
{
int ret, cpu;
unsigned long logical_proc_id_mask = 0;
+ /*
+ * MSR_AMD_CPPC_ENABLE is write-once, once set it cannot be cleared.
+ */
+ if (!enable)
+ return 0;
+
if (enable == cppc_enabled)
return 0;
@@ -332,7 +338,7 @@ static inline int pstate_enable(bool enable)
return 0;
}
-static int cppc_enable(bool enable)
+static int shmem_cppc_enable(bool enable)
{
int cpu, ret = 0;
struct cppc_perf_ctrls perf_ctrls;
@@ -359,14 +365,14 @@ static int cppc_enable(bool enable)
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
+DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
-static inline int amd_pstate_enable(bool enable)
+static inline int amd_pstate_cppc_enable(bool enable)
{
- return static_call(amd_pstate_enable)(enable);
+ return static_call(amd_pstate_cppc_enable)(enable);
}
-static int pstate_init_perf(struct amd_cpudata *cpudata)
+static int msr_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
@@ -385,7 +391,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
return 0;
}
-static int cppc_init_perf(struct amd_cpudata *cpudata)
+static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
@@ -420,14 +426,14 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
+DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf);
static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
{
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void cppc_update_perf(struct amd_cpudata *cpudata,
+static void shmem_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
{
@@ -527,9 +533,28 @@ cpufreq_policy_put:
cpufreq_cpu_put(policy);
}
-static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
{
- cpufreq_verify_within_cpu_limits(policy);
+ /*
+ * Initialize lower frequency limit (i.e.policy->min) with
+ * lowest_nonlinear_frequency which is the most energy efficient
+ * frequency. Override the initial value set by cpufreq core and
+ * amd-pstate qos_requests.
+ */
+ if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(policy_data->cpu);
+ struct amd_cpudata *cpudata;
+
+ if (!policy)
+ return -EINVAL;
+
+ cpudata = policy->driver_data;
+ policy_data->min = cpudata->lowest_nonlinear_freq;
+ cpufreq_cpu_put(policy);
+ }
+
+ cpufreq_verify_within_cpu_limits(policy_data);
+ pr_debug("policy_max =%d, policy_min=%d\n", policy_data->max, policy_data->min);
return 0;
}
@@ -665,34 +690,12 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- u32 highest_perf, nominal_perf, nominal_freq, max_freq;
+ u32 nominal_freq, max_freq;
int ret = 0;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
nominal_freq = READ_ONCE(cpudata->nominal_freq);
max_freq = READ_ONCE(cpudata->max_freq);
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
-
- value &= ~GENMASK_ULL(7, 0);
- value |= on ? highest_perf : nominal_perf;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
- ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
- if (ret) {
- cpufreq_cpu_release(policy);
- pr_debug("Failed to set max perf on CPU:%d. ret:%d\n",
- cpudata->cpu, ret);
- return ret;
- }
- }
-
if (on)
policy->cpuinfo.max_freq = max_freq;
else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
@@ -1001,7 +1004,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
- FREQ_QOS_MIN, policy->cpuinfo.min_freq);
+ FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_cpudata1;
@@ -1045,7 +1048,7 @@ static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
{
int ret;
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
@@ -1056,7 +1059,7 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
{
int ret;
- ret = amd_pstate_enable(false);
+ ret = amd_pstate_cppc_enable(false);
if (ret)
pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
@@ -1189,25 +1192,41 @@ static ssize_t show_energy_performance_preference(
static void amd_pstate_driver_cleanup(void)
{
- amd_pstate_enable(false);
+ amd_pstate_cppc_enable(false);
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
+static int amd_pstate_set_driver(int mode_idx)
+{
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int amd_pstate_register_driver(int mode)
{
int ret;
- if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
- else if (mode == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
- else
- return -EINVAL;
+ ret = amd_pstate_set_driver(mode);
+ if (ret)
+ return ret;
cppc_state = mode;
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_cppc_enable(true);
if (ret) {
pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
ret);
@@ -1485,6 +1504,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
+ current_pstate_driver->adjust_perf = NULL;
+
return 0;
free_cpudata1:
@@ -1507,26 +1528,13 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
+ u32 max_perf, min_perf;
u64 value;
s16 epp;
- if (cpudata->boost_supported && !policy->boost_enabled)
- max_perf = READ_ONCE(cpudata->nominal_perf);
- else
- max_perf = READ_ONCE(cpudata->highest_perf);
+ max_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
- max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq);
-
- if (min_limit_perf < min_perf)
- min_limit_perf = min_perf;
-
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
-
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+ amd_pstate_update_min_max_limit(policy);
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
@@ -1535,7 +1543,7 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
value = READ_ONCE(cpudata->cppc_req_cached);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_perf = max_perf;
+ min_perf = min(cpudata->nominal_perf, max_perf);
/* Initial min/max values for CPPC Performance Controls Register */
value &= ~AMD_CPPC_MIN_PERF(~0L);
@@ -1563,12 +1571,6 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
- /* Set initial EPP value */
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- }
-
WRITE_ONCE(cpudata->cppc_req_cached, value);
return amd_pstate_set_epp(cpudata, epp);
}
@@ -1605,7 +1607,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
u64 value, max_perf;
int ret;
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
@@ -1616,8 +1618,9 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.max_perf = max_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+ cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
}
}
@@ -1657,9 +1660,11 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.desired_perf = 0;
+ perf_ctrls.min_perf = min_perf;
perf_ctrls.max_perf = min_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
+ cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
}
mutex_unlock(&amd_pstate_limits_lock);
}
@@ -1679,13 +1684,6 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
return 0;
}
-static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
-{
- cpufreq_verify_within_cpu_limits(policy);
- pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
- return 0;
-}
-
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -1699,7 +1697,7 @@ static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
cpudata->suspended = true;
/* disable CPPC in lowlevel firmware */
- ret = amd_pstate_enable(false);
+ ret = amd_pstate_cppc_enable(false);
if (ret)
pr_err("failed to suspend, return %d\n", ret);
@@ -1741,7 +1739,7 @@ static struct cpufreq_driver amd_pstate_driver = {
static struct cpufreq_driver amd_pstate_epp_driver = {
.flags = CPUFREQ_CONST_LOOPS,
- .verify = amd_pstate_epp_verify_policy,
+ .verify = amd_pstate_verify,
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
@@ -1755,26 +1753,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.attr = amd_pstate_epp_attr,
};
-static int __init amd_pstate_set_driver(int mode_idx)
-{
- if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
- cppc_state = mode_idx;
- if (cppc_state == AMD_PSTATE_DISABLE)
- pr_info("driver is explicitly disabled\n");
-
- if (cppc_state == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
-
- if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
-
- return 0;
- }
-
- return -EINVAL;
-}
-
-/**
+/*
* CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
* show the debug message that helps to check if the CPU has CPPC support for loading issue.
*/
@@ -1864,10 +1843,10 @@ static int __init amd_pstate_init(void)
if (cppc_state == AMD_PSTATE_UNDEFINED) {
/* Disable on the following configs by default:
* 1. Undefined platforms
- * 2. Server platforms
+ * 2. Server platforms with CPUs older than Family 0x1A.
*/
if (amd_pstate_acpi_pm_profile_undefined() ||
- amd_pstate_acpi_pm_profile_server()) {
+ (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
@@ -1875,50 +1854,31 @@ static int __init amd_pstate_init(void)
cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
}
- switch (cppc_state) {
- case AMD_PSTATE_DISABLE:
+ if (cppc_state == AMD_PSTATE_DISABLE) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
- case AMD_PSTATE_PASSIVE:
- case AMD_PSTATE_ACTIVE:
- case AMD_PSTATE_GUIDED:
- ret = amd_pstate_set_driver(cppc_state);
- if (ret)
- return ret;
- break;
- default:
- return -EINVAL;
}
/* capability check */
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- if (cppc_state != AMD_PSTATE_ACTIVE)
- current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
- static_call_update(amd_pstate_enable, cppc_enable);
- static_call_update(amd_pstate_init_perf, cppc_init_perf);
- static_call_update(amd_pstate_update_perf, cppc_update_perf);
+ static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
+ static_call_update(amd_pstate_init_perf, shmem_init_perf);
+ static_call_update(amd_pstate_update_perf, shmem_update_perf);
}
- if (amd_pstate_prefcore) {
- ret = amd_detect_prefcore(&amd_pstate_prefcore);
- if (ret)
- return ret;
- }
-
- /* enable amd pstate feature */
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_register_driver(cppc_state);
if (ret) {
- pr_err("failed to enable driver mode(%d)\n", cppc_state);
+ pr_err("failed to register with return %d\n", ret);
return ret;
}
- ret = cpufreq_register_driver(current_pstate_driver);
- if (ret) {
- pr_err("failed to register with return %d\n", ret);
- goto disable_driver;
+ if (amd_pstate_prefcore) {
+ ret = amd_detect_prefcore(&amd_pstate_prefcore);
+ if (ret)
+ return ret;
}
dev_root = bus_get_dev_root(&cpu_subsys);
@@ -1935,8 +1895,7 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
-disable_driver:
- amd_pstate_enable(false);
+ amd_pstate_cppc_enable(false);
return ret;
}
device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index ea8438550b49..5d03a295a085 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -777,7 +777,7 @@ static struct platform_driver brcm_avs_cpufreq_platdrv = {
.of_match_table = brcm_avs_cpufreq_match,
},
.probe = brcm_avs_cpufreq_probe,
- .remove_new = brcm_avs_cpufreq_remove,
+ .remove = brcm_avs_cpufreq_remove,
};
module_platform_driver(brcm_avs_cpufreq_platdrv);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 983443396f8f..3a7c3372bda7 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -345,7 +345,7 @@ static struct platform_driver dt_cpufreq_platdrv = {
.name = "cpufreq-dt",
},
.probe = dt_cpufreq_probe,
- .remove_new = dt_cpufreq_remove,
+ .remove = dt_cpufreq_remove,
};
module_platform_driver(dt_cpufreq_platdrv);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f98c9438760c..1a4cae54a01b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1520,7 +1520,7 @@ static int cpufreq_online(unsigned int cpu)
* frequency for longer duration. Hence, a BUG_ON().
*/
BUG_ON(ret);
- pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
+ pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
__func__, policy->cpu, old_freq, policy->cur);
}
}
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 7d2754411d8c..8736be3a06ce 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -145,7 +145,7 @@ static struct platform_driver davinci_cpufreq_driver = {
.driver = {
.name = "cpufreq-davinci",
},
- .remove_new = __exit_p(davinci_cpufreq_remove),
+ .remove = __exit_p(davinci_cpufreq_remove),
};
int __init davinci_cpufreq_init(void)
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 577bb9e2f112..1492c92ffc1a 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -183,7 +183,7 @@ static void imx_cpufreq_dt_remove(struct platform_device *pdev)
static struct platform_driver imx_cpufreq_dt_driver = {
.probe = imx_cpufreq_dt_probe,
- .remove_new = imx_cpufreq_dt_remove,
+ .remove = imx_cpufreq_dt_remove,
.driver = {
.name = "imx-cpufreq-dt",
},
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c20d3ecc5a81..f3c99f378ad6 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -522,7 +522,7 @@ static struct platform_driver imx6q_cpufreq_platdrv = {
.name = "imx6q-cpufreq",
},
.probe = imx6q_cpufreq_probe,
- .remove_new = imx6q_cpufreq_remove,
+ .remove = imx6q_cpufreq_remove,
};
module_platform_driver(imx6q_cpufreq_platdrv);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cd2ac1ba53d2..b8e2396a708a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1028,26 +1028,29 @@ static void hybrid_update_cpu_capacity_scaling(void)
}
}
-static void __hybrid_init_cpu_capacity_scaling(void)
+static void __hybrid_refresh_cpu_capacity_scaling(void)
{
hybrid_max_perf_cpu = NULL;
hybrid_update_cpu_capacity_scaling();
}
-static void hybrid_init_cpu_capacity_scaling(bool refresh)
+static void hybrid_refresh_cpu_capacity_scaling(void)
{
- bool disable_itmt = false;
+ guard(mutex)(&hybrid_capacity_lock);
- mutex_lock(&hybrid_capacity_lock);
+ __hybrid_refresh_cpu_capacity_scaling();
+}
+static void hybrid_init_cpu_capacity_scaling(bool refresh)
+{
/*
* If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
* scaling has been enabled already and the driver is just changing the
* operation mode.
*/
if (refresh) {
- __hybrid_init_cpu_capacity_scaling();
- goto unlock;
+ hybrid_refresh_cpu_capacity_scaling();
+ return;
}
/*
@@ -1056,19 +1059,13 @@ static void hybrid_init_cpu_capacity_scaling(bool refresh)
* do not do that when SMT is in use.
*/
if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
- __hybrid_init_cpu_capacity_scaling();
- disable_itmt = true;
- }
-
-unlock:
- mutex_unlock(&hybrid_capacity_lock);
-
- /*
- * Disabling ITMT causes sched domains to be rebuilt to disable asym
- * packing and enable asym capacity.
- */
- if (disable_itmt)
+ hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Disabling ITMT causes sched domains to be rebuilt to disable asym
+ * packing and enable asym capacity.
+ */
sched_clear_itmt_support();
+ }
}
static bool hybrid_clear_max_perf_cpu(void)
@@ -1404,7 +1401,7 @@ static void intel_pstate_update_limits_for_all(void)
mutex_lock(&hybrid_capacity_lock);
if (hybrid_max_perf_cpu)
- __hybrid_init_cpu_capacity_scaling();
+ __hybrid_refresh_cpu_capacity_scaling();
mutex_unlock(&hybrid_capacity_lock);
}
@@ -3658,6 +3655,8 @@ static const struct x86_cpu_id intel_epp_default[] = {
X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
179, 64, 16)),
X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index fd20b986d1f2..312f2654d1d5 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -189,7 +189,7 @@ static void kirkwood_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver kirkwood_cpufreq_platform_driver = {
.probe = kirkwood_cpufreq_probe,
- .remove_new = kirkwood_cpufreq_remove,
+ .remove = kirkwood_cpufreq_remove,
.driver = {
.name = "kirkwood-cpufreq",
},
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
index 6b5e6798d9a2..61ebebf69455 100644
--- a/drivers/cpufreq/loongson3_cpufreq.c
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -386,7 +386,7 @@ static struct platform_driver loongson3_platform_driver = {
},
.id_table = cpufreq_id_table,
.probe = loongson3_cpufreq_probe,
- .remove_new = loongson3_cpufreq_remove,
+ .remove = loongson3_cpufreq_remove,
};
module_platform_driver(loongson3_platform_driver);
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 8925e096d5b9..f7db5f4ad306 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -344,7 +344,7 @@ MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
- .remove_new = mtk_cpufreq_hw_driver_remove,
+ .remove = mtk_cpufreq_hw_driver_remove,
.driver = {
.name = "mtk-cpufreq-hw",
.of_match_table = mtk_cpufreq_hw_match,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index de8be0a8932d..106220c0fd11 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -188,7 +188,7 @@ static struct platform_driver omap_cpufreq_platdrv = {
.name = "omap-cpufreq",
},
.probe = omap_cpufreq_probe,
- .remove_new = omap_cpufreq_remove,
+ .remove = omap_cpufreq_remove,
};
module_platform_driver(omap_cpufreq_platdrv);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 771efbf51a48..ac2e90a65f0c 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -615,7 +615,7 @@ static struct platform_driver pcc_cpufreq_platdrv = {
.driver = {
.name = "pcc-cpufreq",
},
- .remove_new = pcc_cpufreq_remove,
+ .remove = pcc_cpufreq_remove,
};
static int __init pcc_cpufreq_init(void)
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 900d6844c43d..98129565acb8 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -736,7 +736,7 @@ static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
static struct platform_driver qcom_cpufreq_hw_driver = {
.probe = qcom_cpufreq_hw_driver_probe,
- .remove_new = qcom_cpufreq_hw_driver_remove,
+ .remove = qcom_cpufreq_hw_driver_remove,
.driver = {
.name = "qcom-cpufreq-hw",
.of_match_table = qcom_cpufreq_hw_match,
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 703308fb891a..08e518c89fc3 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -604,7 +604,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL)
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
- .remove_new = qcom_cpufreq_remove,
+ .remove = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
.pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 3519bf34d397..a37ce051236c 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -296,7 +296,7 @@ static struct platform_driver qoriq_cpufreq_platform_driver = {
.name = "qoriq-cpufreq",
},
.probe = qoriq_cpufreq_probe,
- .remove_new = qoriq_cpufreq_remove,
+ .remove = qoriq_cpufreq_remove,
};
module_platform_driver(qoriq_cpufreq_platform_driver);
diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c
index e0705cc9a57d..5050932954e3 100644
--- a/drivers/cpufreq/raspberrypi-cpufreq.c
+++ b/drivers/cpufreq/raspberrypi-cpufreq.c
@@ -85,7 +85,7 @@ static struct platform_driver raspberrypi_cpufreq_driver = {
.name = "raspberrypi-cpufreq",
},
.probe = raspberrypi_cpufreq_probe,
- .remove_new = raspberrypi_cpufreq_remove,
+ .remove = raspberrypi_cpufreq_remove,
};
module_platform_driver(raspberrypi_cpufreq_driver);
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 8d73e6e8be2a..cd89c1b9832c 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -217,7 +217,7 @@ static struct platform_driver scpi_cpufreq_platdrv = {
.name = "scpi-cpufreq",
},
.probe = scpi_cpufreq_probe,
- .remove_new = scpi_cpufreq_remove,
+ .remove = scpi_cpufreq_remove,
};
module_platform_driver(scpi_cpufreq_platdrv);
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 293921acec93..352e1a69a85e 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -283,7 +283,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
static struct platform_driver sun50i_cpufreq_driver = {
.probe = sun50i_cpufreq_nvmem_probe,
- .remove_new = sun50i_cpufreq_nvmem_remove,
+ .remove = sun50i_cpufreq_nvmem_remove,
.driver = {
.name = "sun50i-cpufreq-nvmem",
},
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 7b8fcfa55038..c7761eb99f3c 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -276,7 +276,7 @@ static struct platform_driver tegra186_cpufreq_platform_driver = {
.of_match_table = tegra186_cpufreq_of_match,
},
.probe = tegra186_cpufreq_probe,
- .remove_new = tegra186_cpufreq_remove,
+ .remove = tegra186_cpufreq_remove,
};
module_platform_driver(tegra186_cpufreq_platform_driver);
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 07ea7ed61b68..9055dd398e7f 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -818,7 +818,7 @@ static struct platform_driver tegra194_ccplex_driver = {
.of_match_table = tegra194_cpufreq_of_match,
},
.probe = tegra194_cpufreq_probe,
- .remove_new = tegra194_cpufreq_remove,
+ .remove = tegra194_cpufreq_remove,
};
module_platform_driver(tegra194_ccplex_driver);
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 3fadf536c429..0f86cdb7ec8a 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -565,7 +565,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = {
.name = "vexpress-spc-cpufreq",
},
.probe = ve_spc_cpufreq_probe,
- .remove_new = ve_spc_cpufreq_remove,
+ .remove = ve_spc_cpufreq_remove,
};
module_platform_driver(ve_spc_cpufreq_platdrv);
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 7cfb980a357d..caba6f4bb1b7 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -139,7 +139,7 @@ out_kfree_drv:
*
* Initializes arm cpuidle driver for all CPUs, if any CPU fails
* to register cpuidle driver then rollback to cancel all CPUs
- * registeration.
+ * registration.
*/
static int __init arm_idle_init(void)
{
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 1fc9968eae19..3ab240e0e122 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -48,7 +48,7 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
ret = cpu_suspend(0, qcom_pm_collapse);
/*
* ARM common code executes WFI without calling into our driver and
- * if the SPM mode is not reset, then we may accidently power down the
+ * if the SPM mode is not reset, then we may accidentally power down the
* cpu when we intended only to gate the cpu clock.
* Ensure the state is set to standby before returning.
*/
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 9e418aec1755..06ace16f9e71 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -406,7 +406,7 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index)
* Min polling interval of 10usec is a guess. It is assuming that
* for most users, the time for a single ping-pong workload like
* perf bench pipe would generally complete within 10usec but
- * this is hardware dependant. Actual time can be estimated with
+ * this is hardware dependent. Actual time can be estimated with
*
* perf bench sched pipe -l 10000
*
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index cf5873cc45dc..9bbfa594c442 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -261,7 +261,7 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
* @drv: a pointer to a valid struct cpuidle_driver
*
* Register the driver under a lock to prevent concurrent attempts to
- * [un]register the driver from occuring at the same time.
+ * [un]register the driver from occurring at the same time.
*
* Returns 0 on success, a negative error code (returned by
* __cpuidle_register_driver()) otherwise.
@@ -296,7 +296,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_driver);
* @drv: a pointer to a valid struct cpuidle_driver
*
* Unregisters the cpuidle driver under a lock to prevent concurrent attempts
- * to [un]register the driver from occuring at the same time. @drv has to
+ * to [un]register the driver from occurring at the same time. @drv has to
* match the currently registered driver.
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f3c9d49f0f2a..28363bfa3e4c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,7 +19,7 @@
#include "gov.h"
-#define BUCKETS 12
+#define BUCKETS 6
#define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
#define RESOLUTION 1024
@@ -29,12 +29,11 @@
/*
* Concepts and ideas behind the menu governor
*
- * For the menu governor, there are 3 decision factors for picking a C
+ * For the menu governor, there are 2 decision factors for picking a C
* state:
* 1) Energy break even point
- * 2) Performance impact
- * 3) Latency tolerance (from pmqos infrastructure)
- * These three factors are treated independently.
+ * 2) Latency tolerance (from pmqos infrastructure)
+ * These two factors are treated independently.
*
* Energy break even point
* -----------------------
@@ -75,30 +74,6 @@
* intervals and if the stand deviation of these 8 intervals is below a
* threshold value, we use the average of these intervals as prediction.
*
- * Limiting Performance Impact
- * ---------------------------
- * C states, especially those with large exit latencies, can have a real
- * noticeable impact on workloads, which is not acceptable for most sysadmins,
- * and in addition, less performance has a power price of its own.
- *
- * As a general rule of thumb, menu assumes that the following heuristic
- * holds:
- * The busier the system, the less impact of C states is acceptable
- *
- * This rule-of-thumb is implemented using a performance-multiplier:
- * If the exit latency times the performance multiplier is longer than
- * the predicted duration, the C state is not considered a candidate
- * for selection due to a too high performance impact. So the higher
- * this multiplier is, the longer we need to be idle to pick a deep C
- * state, and thus the less likely a busy CPU will hit such a deep
- * C state.
- *
- * Currently there is only one value determining the factor:
- * 10 points are added for each process that is waiting for IO on this CPU.
- * (This value was experimentally determined.)
- * Utilization is no longer a factor as it was shown that it never contributed
- * significantly to the performance multiplier in the first place.
- *
*/
struct menu_device {
@@ -112,19 +87,10 @@ struct menu_device {
int interval_ptr;
};
-static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
+static inline int which_bucket(u64 duration_ns)
{
int bucket = 0;
- /*
- * We keep two groups of stats; one with no
- * IO pending, one without.
- * This allows us to calculate
- * E(duration)|iowait
- */
- if (nr_iowaiters)
- bucket = BUCKETS/2;
-
if (duration_ns < 10ULL * NSEC_PER_USEC)
return bucket;
if (duration_ns < 100ULL * NSEC_PER_USEC)
@@ -138,19 +104,6 @@ static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
return bucket + 5;
}
-/*
- * Return a multiplier for the exit latency that is intended
- * to take performance requirements into account.
- * The more performance critical we estimate the system
- * to be, the higher this multiplier, and thus the higher
- * the barrier to go to an expensive C state.
- */
-static inline int performance_multiplier(unsigned int nr_iowaiters)
-{
- /* for IO wait tasks (per cpu!) we add 10x each */
- return 1 + 10 * nr_iowaiters;
-}
-
static DEFINE_PER_CPU(struct menu_device, menu_devices);
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
@@ -258,8 +211,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct menu_device *data = this_cpu_ptr(&menu_devices);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
u64 predicted_ns;
- u64 interactivity_req;
- unsigned int nr_iowaiters;
ktime_t delta, delta_tick;
int i, idx;
@@ -268,8 +219,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
data->needs_update = 0;
}
- nr_iowaiters = nr_iowait_cpu(dev->cpu);
-
/* Find the shortest expected idle interval. */
predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
@@ -283,7 +232,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
data->next_timer_ns = delta;
- data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
+ data->bucket = which_bucket(data->next_timer_ns);
/* Round up the result for half microseconds. */
timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
@@ -301,7 +250,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
data->next_timer_ns = KTIME_MAX;
delta_tick = TICK_NSEC / 2;
- data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
+ data->bucket = which_bucket(KTIME_MAX);
}
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
@@ -328,15 +277,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
if (predicted_ns < TICK_NSEC)
predicted_ns = data->next_timer_ns;
- } else {
- /*
- * Use the performance multiplier and the user-configurable
- * latency_req to determine the maximum exit latency.
- */
- interactivity_req = div64_u64(predicted_ns,
- performance_multiplier(nr_iowaiters));
- if (latency_req > interactivity_req)
- latency_req = interactivity_req;
+ } else if (latency_req > predicted_ns) {
+ latency_req = predicted_ns;
}
/*
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 08b1238bcd7b..0a9cdd31cbd9 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -95,6 +95,9 @@ config PKEY
loaded when a CEX crypto card is available.
- A pkey EP11 kernel module (pkey-ep11.ko) which is automatically
loaded when a CEX crypto card is available.
+ - A pkey UV kernel module (pkey-uv.ko) which is automatically
+ loaded when the Ultravisor feature is available within a
+ protected execution environment.
Select this option if you want to enable the kernel and userspace
API for protected key handling.
@@ -152,6 +155,24 @@ config PKEY_PCKMO
this option unless you are sure you never need to derive protected
keys from clear key values directly via PCKMO.
+config PKEY_UV
+ tristate "PKEY UV support handler"
+ depends on PKEY
+ depends on S390_UV_UAPI
+ help
+ This is the PKEY Ultravisor support handler for deriving protected
+ keys from secrets stored within the Ultravisor (UV).
+
+ This module works together with the UV device and supports the
+ retrieval of protected keys from secrets stored within the
+ UV firmware layer. This service is only available within
+ a protected execution guest and thus this module will fail upon
+ modprobe if no protected execution environment is detected.
+
+ Enable this option if you intend to run this kernel with an KVM
+ guest with protected execution and you want to use UV retrievable
+ secrets via PKEY API.
+
config CRYPTO_PAES_S390
tristate "PAES cipher algorithms"
depends on S390
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 890664bd5f0f..58a76e2ba64e 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -542,7 +542,7 @@ MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
static struct platform_driver sun4i_ss_driver = {
.probe = sun4i_ss_probe,
- .remove_new = sun4i_ss_remove,
+ .remove = sun4i_ss_remove,
.driver = {
.name = "sun4i-ss",
.pm = &sun4i_ss_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index e55e58e164db..ec1ffda9ea32 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -1129,7 +1129,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
static struct platform_driver sun8i_ce_driver = {
.probe = sun8i_ce_probe,
- .remove_new = sun8i_ce_remove,
+ .remove = sun8i_ce_remove,
.driver = {
.name = "sun8i-ce",
.pm = &sun8i_ce_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 0dbc0220146c..f45685707e0d 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -929,7 +929,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table);
static struct platform_driver sun8i_ss_driver = {
.probe = sun8i_ss_probe,
- .remove_new = sun8i_ss_remove,
+ .remove = sun8i_ss_remove,
.driver = {
.name = "sun8i-ss",
.pm = &sun8i_ss_pm_ops,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6006703fb6d7..ec3ccfa60445 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -653,9 +653,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
- iounmap(core_dev->dev->ce_base);
- kfree(core_dev->dev);
- kfree(core_dev);
}
static u32 get_next_gd(u32 current)
@@ -1333,17 +1330,12 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
static int crypto4xx_probe(struct platform_device *ofdev)
{
int rc;
- struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
struct device_node *np;
u32 pvr;
bool is_revb = true;
- rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
- if (rc)
- return -ENODEV;
-
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
if (np) {
mtdcri(SDR0, PPC460EX_SDR0_SRST,
@@ -1374,16 +1366,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
of_node_put(np);
- core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
+ core_dev = devm_kzalloc(
+ &ofdev->dev, sizeof(struct crypto4xx_core_device), GFP_KERNEL);
if (!core_dev)
return -ENOMEM;
dev_set_drvdata(dev, core_dev);
core_dev->ofdev = ofdev;
- core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
- rc = -ENOMEM;
+ core_dev->dev = devm_kzalloc(
+ &ofdev->dev, sizeof(struct crypto4xx_device), GFP_KERNEL);
if (!core_dev->dev)
- goto err_alloc_dev;
+ return -ENOMEM;
/*
* Older version of 460EX/GT have a hardware bug.
@@ -1402,7 +1395,9 @@ static int crypto4xx_probe(struct platform_device *ofdev)
core_dev->dev->core_dev = core_dev;
core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
- mutex_init(&core_dev->rng_lock);
+ rc = devm_mutex_init(&ofdev->dev, &core_dev->rng_lock);
+ if (rc)
+ return rc;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
@@ -1421,21 +1416,21 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
- if (!core_dev->dev->ce_base) {
- dev_err(dev, "failed to of_iomap\n");
- rc = -ENOMEM;
- goto err_iomap;
+ core_dev->dev->ce_base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(core_dev->dev->ce_base)) {
+ dev_err(&ofdev->dev, "failed to ioremap resource");
+ rc = PTR_ERR(core_dev->dev->ce_base);
+ goto err_build_sdr;
}
/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, is_revb ?
- crypto4xx_ce_interrupt_handler_revb :
- crypto4xx_ce_interrupt_handler, 0,
- KBUILD_MODNAME, dev);
+ rc = devm_request_irq(&ofdev->dev, core_dev->irq,
+ is_revb ? crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler,
+ 0, KBUILD_MODNAME, dev);
if (rc)
- goto err_request_irq;
+ goto err_iomap;
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1444,26 +1439,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
ARRAY_SIZE(crypto4xx_alg));
if (rc)
- goto err_start_dev;
+ goto err_iomap;
ppc4xx_trng_probe(core_dev);
return 0;
-err_start_dev:
- free_irq(core_dev->irq, dev);
-err_request_irq:
- irq_dispose_mapping(core_dev->irq);
- iounmap(core_dev->dev->ce_base);
err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_pdr(core_dev->dev);
- kfree(core_dev->dev);
-err_alloc_dev:
- kfree(core_dev);
-
return rc;
}
@@ -1474,13 +1460,9 @@ static void crypto4xx_remove(struct platform_device *ofdev)
ppc4xx_trng_remove(core_dev);
- free_irq(core_dev->irq, dev);
- irq_dispose_mapping(core_dev->irq);
-
tasklet_kill(&core_dev->tasklet);
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
- mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
}
@@ -1497,7 +1479,7 @@ static struct platform_driver crypto4xx_driver = {
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
- .remove_new = crypto4xx_remove,
+ .remove = crypto4xx_remove,
};
module_platform_driver(crypto4xx_driver);
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index f54ab0d0b1e8..1c18a5b8470e 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -240,11 +240,9 @@ static int meson_crypto_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc);
mc->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mc->base)) {
- err = PTR_ERR(mc->base);
- dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
- return err;
- }
+ if (IS_ERR(mc->base))
+ return PTR_ERR(mc->base);
+
mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
if (IS_ERR(mc->busclk)) {
err = PTR_ERR(mc->busclk);
@@ -322,7 +320,7 @@ MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
static struct platform_driver meson_crypto_driver = {
.probe = meson_crypto_probe,
- .remove_new = meson_crypto_remove,
+ .remove = meson_crypto_remove,
.driver = {
.name = "gxl-crypto",
.of_match_table = meson_crypto_of_match_table,
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index b4613bd4ad96..8d1c79aaca07 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -601,8 +601,6 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
.akcipher.base = {
.encrypt = aspeed_acry_rsa_enc,
.decrypt = aspeed_acry_rsa_dec,
- .sign = aspeed_acry_rsa_dec,
- .verify = aspeed_acry_rsa_enc,
.set_pub_key = aspeed_acry_rsa_set_pub_key,
.set_priv_key = aspeed_acry_rsa_set_priv_key,
.max_size = aspeed_acry_rsa_max_size,
@@ -808,7 +806,7 @@ MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
static struct platform_driver aspeed_acry_driver = {
.probe = aspeed_acry_probe,
- .remove_new = aspeed_acry_remove,
+ .remove = aspeed_acry_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_acry_of_matches,
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
index 062f2a66dd23..3fe644bfe037 100644
--- a/drivers/crypto/aspeed/aspeed-hace.c
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -266,7 +266,7 @@ MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
static struct platform_driver aspeed_hace_driver = {
.probe = aspeed_hace_probe,
- .remove_new = aspeed_hace_remove,
+ .remove = aspeed_hace_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_hace_of_matches,
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 0dd90785db9a..14bf86957d31 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2453,7 +2453,7 @@ static void atmel_aes_remove(struct platform_device *pdev)
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
- .remove_new = atmel_aes_remove,
+ .remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.of_match_table = atmel_aes_dt_ids,
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 590ea984c622..0d48e64d28b1 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -379,7 +379,7 @@ MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
#endif
static const struct i2c_device_id atmel_ecc_id[] = {
- { "atecc508a", 0 },
+ { "atecc508a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8cc57df25778..67a170608566 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2691,7 +2691,7 @@ static void atmel_sha_remove(struct platform_device *pdev)
static struct platform_driver atmel_sha_driver = {
.probe = atmel_sha_probe,
- .remove_new = atmel_sha_remove,
+ .remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.of_match_table = atmel_sha_dt_ids,
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index a02d496f4c41..75bebec2c757 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -202,8 +202,8 @@ static const struct of_device_id atmel_sha204a_dt_ids[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
static const struct i2c_device_id atmel_sha204a_id[] = {
- { "atsha204", 0 },
- { "atsha204a", 0 },
+ { "atsha204" },
+ { "atsha204a" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index dcc2380a5889..de9717e221e4 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -872,7 +872,7 @@ static void atmel_tdes_done_task(unsigned long data)
if (!err)
err = atmel_tdes_crypt_start(dd);
if (!err)
- return; /* DMA started. Not fininishing. */
+ return; /* DMA started. Not finishing. */
}
atmel_tdes_finish_req(dd, err);
@@ -1074,7 +1074,7 @@ static void atmel_tdes_remove(struct platform_device *pdev)
static struct platform_driver atmel_tdes_driver = {
.probe = atmel_tdes_probe,
- .remove_new = atmel_tdes_remove,
+ .remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.of_match_table = atmel_tdes_dt_ids,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 75440ea6206e..1c1f57baef0e 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2975,7 +2975,7 @@ static void artpec6_crypto_remove(struct platform_device *pdev)
static struct platform_driver artpec6_crypto_driver = {
.probe = artpec6_crypto_probe,
- .remove_new = artpec6_crypto_remove,
+ .remove = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
.of_match_table = artpec6_crypto_of_match,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1a3ecd44cbaf..9e6798efbfb7 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -2415,6 +2415,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
static int ahash_hmac_init(struct ahash_request *req)
{
+ int ret;
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
@@ -2424,7 +2425,9 @@ static int ahash_hmac_init(struct ahash_request *req)
flow_log("ahash_hmac_init()\n");
/* init the context as a hash */
- ahash_init(req);
+ ret = ahash_init(req);
+ if (ret)
+ return ret;
if (!spu_no_incr_hash(ctx)) {
/* SPU-M can do incr hashing but needs sw for outer HMAC */
@@ -4704,7 +4707,7 @@ static struct platform_driver bcm_spu_pdriver = {
.of_match_table = of_match_ptr(bcm_spu_dt_ids),
},
.probe = bcm_spu_probe,
- .remove_new = bcm_spu_remove,
+ .remove = bcm_spu_remove,
};
module_platform_driver(bcm_spu_pdriver);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 887a5f2fb927..cb001aa1de66 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -984,7 +984,7 @@ err:
return -ENOMEM;
}
-static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
struct rsa_key *raw_key)
{
struct caam_rsa_key *rsa_key = &ctx->key;
@@ -994,7 +994,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
if (!rsa_key->p)
- return;
+ return -ENOMEM;
rsa_key->p_sz = p_sz;
rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
@@ -1029,7 +1029,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->priv_form = FORM3;
- return;
+ return 0;
free_dq:
kfree_sensitive(rsa_key->dq);
@@ -1043,6 +1043,7 @@ free_q:
kfree_sensitive(rsa_key->q);
free_p:
kfree_sensitive(rsa_key->p);
+ return -ENOMEM;
}
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
@@ -1088,7 +1089,9 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
- caam_rsa_set_priv_key_form(ctx, &raw_key);
+ ret = caam_rsa_set_priv_key_form(ctx, &raw_key);
+ if (ret)
+ goto err;
return 0;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 26eba7de3fb0..9fcdb64084ac 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -819,7 +819,7 @@ static struct platform_driver caam_jr_driver = {
.pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
- .remove_new = caam_jr_remove,
+ .remove = caam_jr_remove,
.shutdown = caam_jr_remove,
};
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index f6111ee9ed34..7701d00bcb3a 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -733,7 +733,7 @@ static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus)
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
- struct device *ctrldev = &caam_pdev->dev, *qidev;
+ struct device *qidev = &caam_pdev->dev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
cpumask_var_t clean_mask;
@@ -742,8 +742,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
goto fail_cpumask;
- ctrlpriv = dev_get_drvdata(ctrldev);
- qidev = ctrldev;
+ ctrlpriv = dev_get_drvdata(qidev);
/* Initialize the congestion detection */
err = init_cgr(qidev);
@@ -794,7 +793,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
caam_debugfs_qi_init(ctrlpriv);
- err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
+ err = devm_add_action_or_reset(qidev, caam_qi_shutdown, qidev);
if (err)
goto fail2;
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 6872ac344001..54de869e5374 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -44,7 +44,7 @@ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
dev_err(dev, "Cores still busy %llx", coremask);
grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0));
- if (timeout--)
+ if (!timeout--)
break;
udelay(CSR_DELAY);
@@ -302,6 +302,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ret = do_cpt_init(cpt, mcode);
if (ret) {
+ dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
+ mcode->code, mcode->phys_base);
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
goto fw_release;
}
@@ -394,7 +396,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
dev_err(dev, "Cores still busy");
grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0));
- if (timeout--)
+ if (!timeout--)
break;
udelay(CSR_DELAY);
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 153004bdfb5c..fb59bb282455 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -238,7 +238,7 @@ static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
qinfo = &cptvf->cqinfo;
queue = &qinfo->queue[qno];
- /* lock commad queue */
+ /* lock command queue */
spin_lock(&queue->lock);
ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
memcpy(ent, (void *)cmd, qinfo->cmd_size);
@@ -510,7 +510,7 @@ get_pending_entry:
info->time_in = jiffies;
info->req = req;
- /* Create the CPT_INST_S type command for HW intrepretation */
+ /* Create the CPT_INST_S type command for HW interpretation */
cptinst.s.doneint = true;
cptinst.s.res_addr = (u64)info->comp_baddr;
cptinst.s.tag = 0;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index a5cdc2b48bd6..068265207ddd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -17,7 +17,7 @@
#define CRYPTO_CTX_SIZE 256
-/* packet inuput ring alignments */
+/* packet input ring alignments */
#define PKTIN_Q_ALIGN_BYTES 16
/* AQM Queue input alignments */
#define AQM_Q_ALIGN_BYTES 32
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index ff6ceb4feee0..3933cac1694d 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -210,7 +210,7 @@ static struct platform_driver sp_platform_driver = {
.of_match_table = sp_of_match,
},
.probe = sp_platform_probe,
- .remove_new = sp_platform_remove,
+ .remove = sp_platform_remove,
#ifdef CONFIG_PM
.suspend = sp_platform_suspend,
.resume = sp_platform_resume,
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 5ef39d682389..81533681f7fb 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2226,7 +2226,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
memset(areq_ctx, 0, sizeof(*areq_ctx));
- //plaintext is not encryped with rfc4543
+ //plaintext is not encrypted with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
@@ -2277,7 +2277,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
memset(areq_ctx, 0, sizeof(*areq_ctx));
- //plaintext is not decryped with rfc4543
+ //plaintext is not decrypted with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 3fb667a17bbb..d39c067672fd 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -179,7 +179,7 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
}
max_key_buf_size <<= 1;
- /* Alloc fallabck tfm or essiv when key size != 256 bit */
+ /* Alloc fallback tfm or essiv when key size != 256 bit */
ctx_p->fallback_tfm =
crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 9177b54bb0f5..061e68a31c36 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -643,7 +643,7 @@ static struct platform_driver ccree_driver = {
#endif
},
.probe = ccree_probe,
- .remove_new = ccree_remove,
+ .remove = ccree_remove,
};
static int __init ccree_init(void)
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index f418162932fe..d0612bec4d58 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1577,7 +1577,7 @@ struct cc_hash_template {
/* hash descriptors */
static struct cc_hash_template driver_hash[] = {
- //Asynchronize hash template
+ //Asynchronous hash template
{
.name = "sha1",
.driver_name = "sha1-ccree",
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 177428480c7d..af37477ffd8d 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1186,7 +1186,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
else
bytes = rounddown(bytes, 16);
} else {
- /*CTR mode counter overfloa*/
+ /*CTR mode counter overflow*/
bytes = req->cryptlen - reqctx->processed;
}
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 0dd8baf16cb4..2aaa98f9b44e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -389,7 +389,7 @@ static struct platform_driver exynos_rng_driver = {
.of_match_table = exynos_rng_dt_match,
},
.probe = exynos_rng_probe,
- .remove_new = exynos_rng_remove,
+ .remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
index 1d1a889599bb..f7e0e3fea15c 100644
--- a/drivers/crypto/gemini/sl3516-ce-core.c
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -528,7 +528,7 @@ MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
static struct platform_driver sl3516_ce_driver = {
.probe = sl3516_ce_probe,
- .remove_new = sl3516_ce_remove,
+ .remove = sl3516_ce_remove,
.driver = {
.name = "sl3516-crypto",
.pm = &sl3516_ce_pm_ops,
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9f0b94c8e03d..0f3ddbadbcf9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -100,6 +100,29 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
+enum hpre_cap_table_type {
+ QM_RAS_NFE_TYPE = 0x0,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ HPRE_RAS_NFE_TYPE,
+ HPRE_RAS_NFE_RESET,
+ HPRE_RAS_CE_TYPE,
+ HPRE_CORE_INFO,
+ HPRE_CORE_EN,
+ HPRE_DRV_ALG_BITMAP,
+ HPRE_ALG_BITMAP,
+ HPRE_CORE1_BITMAP_CAP,
+ HPRE_CORE2_BITMAP_CAP,
+ HPRE_CORE3_BITMAP_CAP,
+ HPRE_CORE4_BITMAP_CAP,
+ HPRE_CORE5_BITMAP_CAP,
+ HPRE_CORE6_BITMAP_CAP,
+ HPRE_CORE7_BITMAP_CAP,
+ HPRE_CORE8_BITMAP_CAP,
+ HPRE_CORE9_BITMAP_CAP,
+ HPRE_CORE10_BITMAP_CAP,
+};
+
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index c167dbd6c7d6..2a2910261210 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -2006,8 +2006,6 @@ static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
}
static struct akcipher_alg rsa = {
- .sign = hpre_rsa_dec,
- .verify = hpre_rsa_enc,
.encrypt = hpre_rsa_enc,
.decrypt = hpre_rsa_dec,
.set_pub_key = hpre_rsa_setpubkey,
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 6b536ad2ada5..96fde9437b4b 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,6 +13,7 @@
#include <linux/uacce.h>
#include "hpre.h"
+#define CAP_FILE_PERMISSION 0444
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_CTRL_CNT_CLR_CE 0x301000
#define HPRE_FSM_MAX_CNT 0x301008
@@ -203,7 +204,7 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
- {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
{HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
{HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
@@ -222,18 +223,27 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
};
-enum hpre_pre_store_cap_idx {
- HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
- HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
- HPRE_DRV_ALG_BITMAP_CAP_IDX,
- HPRE_DEV_ALG_BITMAP_CAP_IDX,
-};
-
-static const u32 hpre_pre_store_caps[] = {
- HPRE_CLUSTER_NUM_CAP,
- HPRE_CORE_ENABLE_BITMAP_CAP,
- HPRE_DRV_ALG_BITMAP_CAP,
- HPRE_DEV_ALG_BITMAP_CAP,
+static const struct hisi_qm_cap_query_info hpre_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {HPRE_RAS_NFE_TYPE, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E},
+ {HPRE_RAS_NFE_RESET, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E},
+ {HPRE_RAS_CE_TYPE, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
+ {HPRE_CORE_INFO, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A},
+ {HPRE_CORE_EN, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF},
+ {HPRE_DRV_ALG_BITMAP, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27},
+ {HPRE_ALG_BITMAP, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F},
+ {HPRE_CORE1_BITMAP_CAP, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE2_BITMAP_CAP, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE3_BITMAP_CAP, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE4_BITMAP_CAP, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE5_BITMAP_CAP, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE6_BITMAP_CAP, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE7_BITMAP_CAP, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE8_BITMAP_CAP, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE9_BITMAP_CAP, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10},
+ {HPRE_CORE10_BITMAP_CAP, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10},
};
static const struct hpre_hw_error hpre_hw_errors[] = {
@@ -360,7 +370,7 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
- cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
+ cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val;
if (alg & cap_val)
return true;
@@ -415,7 +425,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
@@ -503,14 +513,17 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
static int hpre_set_cluster(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- unsigned long offset;
u32 cluster_core_mask;
+ unsigned long offset;
+ u32 hpre_core_info;
u8 clusters_num;
u32 val = 0;
int ret, i;
- cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = i * HPRE_CLSTR_ADDR_INTRVL;
@@ -593,6 +606,9 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
static void hpre_enable_clock_gate(struct hisi_qm *qm)
{
+ unsigned long offset;
+ u8 clusters_num, i;
+ u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@@ -606,17 +622,26 @@ static void hpre_enable_clock_gate(struct hisi_qm *qm)
val |= HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
- val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
- val |= HPRE_CLUSTER_DYN_CTL_EN;
- writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
-
- val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
- val |= HPRE_CORE_GATE_EN;
- writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ for (i = 0; i < clusters_num; i++) {
+ offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
+ val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+ val |= HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ val |= HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ }
}
static void hpre_disable_clock_gate(struct hisi_qm *qm)
{
+ unsigned long offset;
+ u8 clusters_num, i;
+ u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@@ -630,13 +655,19 @@ static void hpre_disable_clock_gate(struct hisi_qm *qm)
val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
- val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
- val &= ~HPRE_CLUSTER_DYN_CTL_EN;
- writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
-
- val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
- val &= ~HPRE_CORE_GATE_EN;
- writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ for (i = 0; i < clusters_num; i++) {
+ offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
+ val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+ val &= ~HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ val &= ~HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ }
}
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
@@ -699,11 +730,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
unsigned long offset;
+ u32 hpre_core_info;
u8 clusters_num;
int i;
/* clear clusterX/cluster_ctrl */
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
@@ -995,10 +1029,13 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
+ u32 hpre_core_info;
u8 clusters_num;
int i, ret;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
@@ -1041,6 +1078,26 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
return hpre_cluster_debugfs_init(qm);
}
+static int hpre_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs);
+
static void hpre_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
@@ -1059,6 +1116,9 @@ static void hpre_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hpre_regs)
debugfs_create_file("diff_regs", 0444, parent,
qm, &hpre_diff_regs_fops);
+
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &hpre_cap_regs_fops);
}
static int hpre_debugfs_init(struct hisi_qm *qm)
@@ -1106,26 +1166,33 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *hpre_cap;
struct device *dev = &qm->pdev->dev;
+ u32 hpre_core_info;
+ u8 clusters_num;
size_t i, size;
- size = ARRAY_SIZE(hpre_pre_store_caps);
+ size = ARRAY_SIZE(hpre_cap_query_info);
hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
if (!hpre_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- hpre_cap[i].type = hpre_pre_store_caps[i];
- hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
- hpre_pre_store_caps[i], qm->cap_ver);
+ hpre_cap[i].type = hpre_cap_query_info[i].type;
+ hpre_cap[i].name = hpre_cap_query_info[i].name;
+ hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info,
+ i, qm->cap_ver);
}
- if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
+ hpre_core_info = hpre_cap[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ if (clusters_num > HPRE_CLUSTERS_NUM_MAX) {
dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
- hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
+ clusters_num, HPRE_CLUSTERS_NUM_MAX);
return -EINVAL;
}
qm->cap_tables.dev_cap_table = hpre_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1172,7 +1239,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
- alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
+ alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
if (ret) {
pci_err(pdev, "Failed to set hpre algs!\n");
@@ -1188,10 +1255,13 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
@@ -1231,6 +1301,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
struct qm_debug *debug = &qm->debug;
struct pci_dev *pdev = qm->pdev;
void __iomem *io_base;
+ u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
u32 val;
@@ -1246,7 +1317,9 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
hpre_com_dfx_regs[i].name, debug->last_words[i], val);
}
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
io_base = qm->io_base + hpre_cluster_offsets[i];
for (j = 0; j < cluster_dfx_regs_num; j++) {
@@ -1280,11 +1353,15 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
+}
+
+static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1298,6 +1375,27 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
+static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ hpre_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ hpre_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hpre_clear_hw_err_status(qm, err_status);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1324,12 +1422,12 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
- .log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
.open_sva_prefetch = hpre_open_sva_prefetch,
.close_sva_prefetch = hpre_close_sva_prefetch,
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
+ .get_err_result = hpre_get_err_result,
};
static int hpre_pf_probe_init(struct hpre *hpre)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 07983af9e3e2..19c1b5d3c954 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -271,12 +271,6 @@ enum vft_type {
SHAPER_VFT,
};
-enum acc_err_result {
- ACC_ERR_NONE,
- ACC_ERR_NEED_RESET,
- ACC_ERR_RECOVERED,
-};
-
enum qm_alg_type {
ALG_TYPE_0,
ALG_TYPE_1,
@@ -307,11 +301,29 @@ enum qm_basic_type {
QM_VF_IRQ_NUM_CAP,
};
-enum qm_pre_store_cap_idx {
- QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
- QM_AEQ_IRQ_TYPE_CAP_IDX,
- QM_ABN_IRQ_TYPE_CAP_IDX,
- QM_PF2VF_IRQ_TYPE_CAP_IDX,
+enum qm_cap_table_type {
+ QM_CAP_VF = 0x0,
+ QM_AEQE_NUM,
+ QM_SCQE_NUM,
+ QM_EQ_IRQ,
+ QM_AEQ_IRQ,
+ QM_ABNORMAL_IRQ,
+ QM_MB_IRQ,
+ MAX_IRQ_NUM,
+ EXT_BAR_INDEX,
+};
+
+static const struct hisi_qm_cap_query_info qm_cap_query_info[] = {
+ {QM_CAP_VF, "QM_CAP_VF ", 0x3100, 0x0, 0x0, 0x6F01},
+ {QM_AEQE_NUM, "QM_AEQE_NUM ", 0x3104, 0x800, 0x4000800, 0x4000800},
+ {QM_SCQE_NUM, "QM_SCQE_NUM ",
+ 0x3108, 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ, "QM_EQ_IRQ ", 0x310c, 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ, "QM_AEQ_IRQ ", 0x3110, 0x0, 0x10001, 0x10001},
+ {QM_ABNORMAL_IRQ, "QM_ABNORMAL_IRQ ", 0x3114, 0x0, 0x10003, 0x10003},
+ {QM_MB_IRQ, "QM_MB_IRQ ", 0x3118, 0x0, 0x0, 0x10002},
+ {MAX_IRQ_NUM, "MAX_IRQ_NUM ", 0x311c, 0x10001, 0x40002, 0x40003},
+ {EXT_BAR_INDEX, "EXT_BAR_INDEX ", 0x3120, 0x0, 0x0, 0x14},
};
static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
@@ -344,13 +356,6 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
{QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
};
-static const u32 qm_pre_store_caps[] = {
- QM_EQ_IRQ_TYPE_CAP,
- QM_AEQ_IRQ_TYPE_CAP,
- QM_ABN_IRQ_TYPE_CAP,
- QM_PF2VF_IRQ_TYPE_CAP,
-};
-
struct qm_mailbox {
__le16 w0;
__le16 queue_num;
@@ -451,6 +456,37 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
static void qm_irqs_unregister(struct hisi_qm *qm);
static int qm_reset_device(struct hisi_qm *qm);
+int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %u\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+
+ pci_dev_put(pdev);
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || n < QM_MIN_QNUM || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_q_num_set);
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
{
@@ -763,6 +799,27 @@ u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
}
EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
+u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
+ const struct hisi_qm_cap_query_info *info_table,
+ u32 index, bool is_read)
+{
+ u32 val;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ return info_table[index].v1_val;
+ case QM_HW_V2:
+ return info_table[index].v2_val;
+ default:
+ if (!is_read)
+ return info_table[index].v3_val;
+
+ val = readl(qm->io_base + info_table[index].offset);
+ return val;
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_cap_value);
+
static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
u16 *high_bits, enum qm_basic_type type)
{
@@ -1425,22 +1482,25 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp;
-
- /* read err sts */
- tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
- error_status = qm->error_mask & tmp;
+ u32 error_status;
- if (error_status) {
+ error_status = qm_get_hw_error_status(qm);
+ if (error_status & qm->error_mask) {
if (error_status & QM_ECC_MBIT)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status & qm->err_info.qm_reset_mask)
+ if (error_status & qm->err_info.qm_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ writel(qm->err_info.nfe & (~error_status),
+ qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_NEED_RESET;
+ }
+ /* Clear error source if not need reset. */
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -3861,30 +3921,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
{
- u32 err_sts;
-
- if (!qm->err_ini->get_dev_hw_err_status) {
- dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
+ if (!qm->err_ini->get_err_result) {
+ dev_err(&qm->pdev->dev, "Device doesn't support reset!\n");
return ACC_ERR_NONE;
}
- /* get device hardware error status */
- err_sts = qm->err_ini->get_dev_hw_err_status(qm);
- if (err_sts) {
- if (err_sts & qm->err_info.ecc_2bits_mask)
- qm->err_status.is_dev_ecc_mbit = true;
-
- if (qm->err_ini->log_dev_hw_err)
- qm->err_ini->log_dev_hw_err(qm, err_sts);
-
- if (err_sts & qm->err_info.dev_reset_mask)
- return ACC_ERR_NEED_RESET;
-
- if (qm->err_ini->clear_dev_hw_err_status)
- qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
- }
-
- return ACC_ERR_RECOVERED;
+ return qm->err_ini->get_err_result(qm);
}
static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
@@ -4866,7 +4908,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_VF)
return;
- val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return;
@@ -4883,7 +4925,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_VF)
return 0;
- val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;
@@ -4900,7 +4942,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4914,7 +4956,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -4931,7 +4973,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4945,7 +4987,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -4963,7 +5005,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4977,7 +5019,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -5065,24 +5107,26 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}
-static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
+static int qm_pre_store_caps(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *qm_cap;
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(qm_pre_store_caps);
+ size = ARRAY_SIZE(qm_cap_query_info);
qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
if (!qm_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- qm_cap[i].type = qm_pre_store_caps[i];
- qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
- qm_pre_store_caps[i], qm->cap_ver);
+ qm_cap[i].type = qm_cap_query_info[i].type;
+ qm_cap[i].name = qm_cap_query_info[i].name;
+ qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.qm_cap_table = qm_cap;
+ qm->cap_tables.qm_cap_size = size;
return 0;
}
@@ -5119,8 +5163,8 @@ static int qm_get_hw_caps(struct hisi_qm *qm)
set_bit(cap_info[i].type, &qm->caps);
}
- /* Fetch and save the value of irq type related capability registers */
- return qm_pre_store_irq_type_caps(qm);
+ /* Fetch and save the value of qm capability registers */
+ return qm_pre_store_caps(qm);
}
static int qm_get_pci_res(struct hisi_qm *qm)
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index 9bafcc5aa404..ef0cb733c92c 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1304,7 +1304,7 @@ MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
static struct platform_driver sec_driver = {
.probe = sec_probe,
- .remove_new = sec_remove,
+ .remove = sec_remove,
.driver = {
.name = "hisi_sec_platform_driver",
.of_match_table = sec_match,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 410c83712e28..356188bee6fb 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -220,11 +220,27 @@ enum sec_cap_type {
SEC_CORE4_ALG_BITMAP_HIGH,
};
-enum sec_cap_reg_record_idx {
- SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
- SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DEV_ALG_BITMAP_LOW_IDX,
- SEC_DEV_ALG_BITMAP_HIGH_IDX,
+enum sec_cap_table_type {
+ QM_RAS_NFE_TYPE = 0x0,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ SEC_RAS_NFE_TYPE,
+ SEC_RAS_NFE_RESET,
+ SEC_RAS_CE_TYPE,
+ SEC_CORE_INFO,
+ SEC_CORE_EN,
+ SEC_DRV_ALG_BITMAP_LOW_TB,
+ SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_ALG_BITMAP_LOW,
+ SEC_ALG_BITMAP_HIGH,
+ SEC_CORE1_BITMAP_LOW,
+ SEC_CORE1_BITMAP_HIGH,
+ SEC_CORE2_BITMAP_LOW,
+ SEC_CORE2_BITMAP_HIGH,
+ SEC_CORE3_BITMAP_LOW,
+ SEC_CORE3_BITMAP_HIGH,
+ SEC_CORE4_BITMAP_LOW,
+ SEC_CORE4_BITMAP_HIGH,
};
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0558f98e221f..ae9ebbb4103d 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -2520,8 +2520,8 @@ int sec_register_to_crypto(struct hisi_qm *qm)
u64 alg_mask;
int ret = 0;
- alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DRV_ALG_BITMAP_LOW_IDX);
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_DRV_ALG_BITMAP_LOW_TB);
mutex_lock(&sec_algs_lock);
if (sec_available_devs) {
@@ -2553,8 +2553,8 @@ void sec_unregister_from_crypto(struct hisi_qm *qm)
{
u64 alg_mask;
- alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DRV_ALG_BITMAP_LOW_IDX);
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_DRV_ALG_BITMAP_LOW_TB);
mutex_lock(&sec_algs_lock);
if (--sec_available_devs)
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index c35533d8930b..8ec5333bb5aa 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -14,9 +14,9 @@
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
-
#include "sec.h"
+#define CAP_FILE_PERMISSION 0444
#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
#define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255
@@ -167,11 +167,34 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
};
-static const u32 sec_pre_store_caps[] = {
- SEC_DRV_ALG_BITMAP_LOW,
- SEC_DRV_ALG_BITMAP_HIGH,
- SEC_DEV_ALG_BITMAP_LOW,
- SEC_DEV_ALG_BITMAP_HIGH,
+static const struct hisi_qm_cap_query_info sec_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C77, 0x7C77},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {SEC_RAS_NFE_TYPE, "SEC_RAS_NFE_TYPE ", 0x3130, 0x0, 0x177, 0x60177},
+ {SEC_RAS_NFE_RESET, "SEC_RAS_NFE_RESET ", 0x3134, 0x0, 0x177, 0x177},
+ {SEC_RAS_CE_TYPE, "SEC_RAS_CE_TYPE ", 0x3138, 0x0, 0x88, 0xC088},
+ {SEC_CORE_INFO, "SEC_CORE_INFO ", 0x313c, 0x110404, 0x110404, 0x110404},
+ {SEC_CORE_EN, "SEC_CORE_EN ", 0x3140, 0x17F, 0x17F, 0xF},
+ {SEC_DRV_ALG_BITMAP_LOW_TB, "SEC_DRV_ALG_BITMAP_LOW ",
+ 0x3144, 0x18050CB, 0x18050CB, 0x18670CF},
+ {SEC_DRV_ALG_BITMAP_HIGH_TB, "SEC_DRV_ALG_BITMAP_HIGH ",
+ 0x3148, 0x395C, 0x395C, 0x395C},
+ {SEC_ALG_BITMAP_LOW, "SEC_ALG_BITMAP_LOW ",
+ 0x314c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_ALG_BITMAP_HIGH, "SEC_ALG_BITMAP_HIGH ", 0x3150, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE1_BITMAP_LOW, "SEC_CORE1_BITMAP_LOW ",
+ 0x3154, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE1_BITMAP_HIGH, "SEC_CORE1_BITMAP_HIGH ", 0x3158, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE2_BITMAP_LOW, "SEC_CORE2_BITMAP_LOW ",
+ 0x315c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE2_BITMAP_HIGH, "SEC_CORE2_BITMAP_HIGH ", 0x3160, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE3_BITMAP_LOW, "SEC_CORE3_BITMAP_LOW ",
+ 0x3164, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE3_BITMAP_HIGH, "SEC_CORE3_BITMAP_HIGH ", 0x3168, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE4_BITMAP_LOW, "SEC_CORE4_BITMAP_LOW ",
+ 0x316c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE4_BITMAP_HIGH, "SEC_CORE4_BITMAP_HIGH ", 0x3170, 0x3FFF, 0x3FFF, 0x3FFF},
};
static const struct qm_dev_alg sec_dev_algs[] = { {
@@ -322,7 +345,7 @@ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
@@ -838,6 +861,26 @@ static int sec_regs_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(sec_regs);
+static int sec_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sec_cap_regs);
+
static int sec_core_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
@@ -872,6 +915,9 @@ static int sec_core_debug_init(struct hisi_qm *qm)
tmp_d, data, &sec_atomic64_ops);
}
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &sec_cap_regs_fops);
+
return 0;
}
@@ -1010,11 +1056,15 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
- nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
+}
+
+static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1026,6 +1076,27 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
}
+static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ sec_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ sec_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ sec_clear_hw_err_status(qm, err_status);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1052,12 +1123,12 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.hw_err_disable = sec_hw_error_disable,
.get_dev_hw_err_status = sec_get_hw_err_status,
.clear_dev_hw_err_status = sec_clear_hw_err_status,
- .log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
.open_sva_prefetch = sec_open_sva_prefetch,
.close_sva_prefetch = sec_close_sva_prefetch,
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
+ .get_err_result = sec_get_err_result,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1085,18 +1156,20 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(sec_pre_store_caps);
+ size = ARRAY_SIZE(sec_cap_query_info);
sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
if (!sec_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- sec_cap[i].type = sec_pre_store_caps[i];
- sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
- sec_pre_store_caps[i], qm->cap_ver);
+ sec_cap[i].type = sec_cap_query_info[i].type;
+ sec_cap[i].name = sec_cap_query_info[i].name;
+ sec_cap[i].cap_val = hisi_qm_get_cap_value(qm, sec_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.dev_cap_table = sec_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1146,8 +1219,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
hisi_qm_uninit(qm);
return ret;
}
-
- alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
+ alg_msk = sec_get_alg_bitmap(qm, SEC_ALG_BITMAP_HIGH, SEC_ALG_BITMAP_LOW);
ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set sec algs!\n");
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 66c551ecdee8..ac74df4a9471 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -324,7 +324,7 @@ MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match);
static struct platform_driver hisi_trng_driver = {
.probe = hisi_trng_probe,
- .remove_new = hisi_trng_remove,
+ .remove = hisi_trng_remove,
.driver = {
.name = "hisi-trng-v2",
.acpi_match_table = ACPI_PTR(hisi_trng_acpi_match),
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index f2e6da3240ae..2fecf346c3c9 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,6 +81,24 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
+enum zip_cap_table_type {
+ QM_RAS_NFE_TYPE,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ ZIP_RAS_NFE_TYPE,
+ ZIP_RAS_NFE_RESET,
+ ZIP_RAS_CE_TYPE,
+ ZIP_CORE_INFO,
+ ZIP_CORE_EN,
+ ZIP_DRV_ALG_BITMAP_TB,
+ ZIP_ALG_BITMAP,
+ ZIP_CORE1_BITMAP,
+ ZIP_CORE2_BITMAP,
+ ZIP_CORE3_BITMAP,
+ ZIP_CORE4_BITMAP,
+ ZIP_CORE5_BITMAP,
+};
+
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index d07e47b48be0..9239b251c2d7 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -14,6 +14,7 @@
#include <linux/uacce.h>
#include "zip.h"
+#define CAP_FILE_PERMISSION 0444
#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250
#define HZIP_QUEUE_NUM_V1 4096
@@ -250,24 +251,22 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = {
{ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
};
-enum zip_pre_store_cap_idx {
- ZIP_CORE_NUM_CAP_IDX = 0x0,
- ZIP_CLUSTER_COMP_NUM_CAP_IDX,
- ZIP_CLUSTER_DECOMP_NUM_CAP_IDX,
- ZIP_DECOMP_ENABLE_BITMAP_IDX,
- ZIP_COMP_ENABLE_BITMAP_IDX,
- ZIP_DRV_ALG_BITMAP_IDX,
- ZIP_DEV_ALG_BITMAP_IDX,
-};
-
-static const u32 zip_pre_store_caps[] = {
- ZIP_CORE_NUM_CAP,
- ZIP_CLUSTER_COMP_NUM_CAP,
- ZIP_CLUSTER_DECOMP_NUM_CAP,
- ZIP_DECOMP_ENABLE_BITMAP,
- ZIP_COMP_ENABLE_BITMAP,
- ZIP_DRV_ALG_BITMAP,
- ZIP_DEV_ALG_BITMAP,
+static const struct hisi_qm_cap_query_info zip_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C57, 0x7C77},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC57, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {ZIP_RAS_NFE_TYPE, "ZIP_RAS_NFE_TYPE ", 0x3130, 0x0, 0x7FE, 0x1FFE},
+ {ZIP_RAS_NFE_RESET, "ZIP_RAS_NFE_RESET ", 0x3134, 0x0, 0x7FE, 0x7FE},
+ {ZIP_RAS_CE_TYPE, "ZIP_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
+ {ZIP_CORE_INFO, "ZIP_CORE_INFO ", 0x313C, 0x12080206, 0x12080206, 0x12050203},
+ {ZIP_CORE_EN, "ZIP_CORE_EN ", 0x3140, 0xFC0003, 0xFC0003, 0x1C0003},
+ {ZIP_DRV_ALG_BITMAP_TB, "ZIP_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x0, 0x30},
+ {ZIP_ALG_BITMAP, "ZIP_ALG_BITMAP ", 0x3148, 0xF, 0xF, 0x3F},
+ {ZIP_CORE1_BITMAP, "ZIP_CORE1_BITMAP ", 0x314C, 0x5, 0x5, 0xD5},
+ {ZIP_CORE2_BITMAP, "ZIP_CORE2_BITMAP ", 0x3150, 0x5, 0x5, 0xD5},
+ {ZIP_CORE3_BITMAP, "ZIP_CORE3_BITMAP ", 0x3154, 0xA, 0xA, 0x2A},
+ {ZIP_CORE4_BITMAP, "ZIP_CORE4_BITMAP ", 0x3158, 0xA, 0xA, 0x2A},
+ {ZIP_CORE5_BITMAP, "ZIP_CORE5_BITMAP ", 0x315C, 0xA, 0xA, 0x2A},
};
static const struct debugfs_reg32 hzip_dfx_regs[] = {
@@ -402,7 +401,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@@ -442,7 +441,7 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
- cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val;
+ cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_TB].cap_val;
if ((alg & cap_val) == alg)
return true;
@@ -530,6 +529,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
u32 dcomp_bm, comp_bm;
+ u32 zip_core_en;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -567,8 +567,12 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
- comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val;
+
+ zip_core_en = qm->cap_tables.dev_cap_table[ZIP_CORE_EN].cap_val;
+ dcomp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].shift) &
+ zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].mask;
+ comp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].shift) &
+ zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].mask;
writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
@@ -788,7 +792,12 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num)
{
- u32 zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ u8 zip_comp_core_num;
+ u32 zip_core_info;
+
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
if (core_num < zip_comp_core_num)
return qm->io_base + HZIP_CORE_DFX_BASE +
@@ -803,12 +812,16 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
u32 zip_core_num, zip_comp_core_num;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
+ u32 zip_core_info;
struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
- zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -834,6 +847,26 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
return 0;
}
+static int zip_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(zip_cap_regs);
+
static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs;
@@ -854,6 +887,9 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hzip_regs)
debugfs_create_file("diff_regs", 0444, tmp_dir,
qm, &hzip_diff_regs_fops);
+
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &zip_cap_regs_fops);
}
static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
@@ -912,9 +948,14 @@ debugfs_remove:
/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
- u32 zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ u32 zip_core_info;
+ u8 zip_core_num;
int i, j;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+
/* enable register read_clear bit */
writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
for (i = 0; i < zip_core_num; i++)
@@ -946,10 +987,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 zip_core_info;
u32 zip_core_num;
int i, j, idx;
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
sizeof(unsigned int), GFP_KERNEL);
@@ -991,6 +1035,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
u32 zip_core_num, zip_comp_core_num;
struct qm_debug *debug = &qm->debug;
char buf[HZIP_BUF_SIZE];
+ u32 zip_core_info;
void __iomem *base;
int i, j, idx;
u32 val;
@@ -1005,8 +1050,11 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
- zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -1059,11 +1107,15 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+}
+
+static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1093,6 +1145,27 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HZIP_CORE_INT_SET);
}
+static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ hisi_zip_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_zip_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hisi_zip_clear_hw_err_status(qm, err_status);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1120,13 +1193,13 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.hw_err_disable = hisi_zip_hw_error_disable,
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
- .log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.open_sva_prefetch = hisi_zip_open_sva_prefetch,
.close_sva_prefetch = hisi_zip_close_sva_prefetch,
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
+ .get_err_result = hisi_zip_get_err_result,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1167,18 +1240,20 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(zip_pre_store_caps);
+ size = ARRAY_SIZE(zip_cap_query_info);
zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
if (!zip_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- zip_cap[i].type = zip_pre_store_caps[i];
- zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- zip_pre_store_caps[i], qm->cap_ver);
+ zip_cap[i].type = zip_cap_query_info[i].type;
+ zip_cap[i].name = zip_cap_query_info[i].name;
+ zip_cap[i].cap_val = hisi_qm_get_cap_value(qm, zip_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.dev_cap_table = zip_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1230,7 +1305,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
- alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val;
+ alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 7e93159c3b6b..1dc2378aa88b 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -1084,7 +1084,7 @@ static const struct dev_pm_ops img_hash_pm_ops = {
static struct platform_driver img_hash_driver = {
.probe = img_hash_probe,
- .remove_new = img_hash_remove,
+ .remove = img_hash_remove,
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index f5c1912aa564..45758c7aa80e 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1868,7 +1868,7 @@ MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
- .remove_new = safexcel_remove,
+ .remove = safexcel_remove,
.driver = {
.name = "crypto-safexcel",
.of_match_table = safexcel_of_match_table,
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index e17577b785c3..f44c08f5f5ec 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -2093,7 +2093,7 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
safexcel_ahash_cra_init(tfm);
ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
- return PTR_ERR_OR_ZERO(ctx->aes);
+ return ctx->aes == NULL ? -ENOMEM : 0;
}
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 237f87000070..8fced88d3d06 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -945,12 +945,22 @@ static inline int check_completion(struct device *dev,
bool only_once)
{
char *op_str = compress ? "compress" : "decompress";
+ int status_checks = 0;
int ret = 0;
while (!comp->status) {
if (only_once)
return -EAGAIN;
cpu_relax();
+ if (status_checks++ >= IAA_COMPLETION_TIMEOUT) {
+ /* Something is wrong with the hw, disable it. */
+ dev_err(dev, "%s completion timed out - "
+ "assuming broken hw, iaa_crypto now DISABLED\n",
+ op_str);
+ iaa_crypto_enabled = false;
+ ret = -ETIMEDOUT;
+ goto out;
+ }
}
if (comp->status != IAX_COMP_SUCCESS) {
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index f8a77bff8844..449c6d3ab2db 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -1588,7 +1588,7 @@ static const struct of_device_id ixp4xx_crypto_of_match[] = {
static struct platform_driver ixp_crypto_driver = {
.probe = ixp_crypto_probe,
- .remove_new = ixp_crypto_remove,
+ .remove = ixp_crypto_remove,
.driver = {
.name = "ixp4xx_crypto",
.of_match_table = ixp4xx_crypto_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index 9b2d098e5eb2..8a8f6c81e010 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -1656,7 +1656,7 @@ list_del:
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_aes_driver = {
.probe = kmb_ocs_aes_probe,
- .remove_new = kmb_ocs_aes_remove,
+ .remove = kmb_ocs_aes_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_aes_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 5e24f2d8affc..59308926399d 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -991,7 +991,7 @@ static const struct of_device_id kmb_ocs_ecc_of_match[] = {
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_ecc_driver = {
.probe = kmb_ocs_ecc_probe,
- .remove_new = kmb_ocs_ecc_remove,
+ .remove = kmb_ocs_ecc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_ecc_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index e54c79890d44..95dc8979918d 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -1243,7 +1243,7 @@ list_del:
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_hcu_driver = {
.probe = kmb_ocs_hcu_probe,
- .remove_new = kmb_ocs_hcu_remove,
+ .remove = kmb_ocs_hcu_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_hcu_of_match,
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 78f0ea49254d..9faef33e54bd 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -375,7 +375,7 @@ static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
else
id = -EINVAL;
- if (id < 0 || id > num_objs)
+ if (id < 0 || id >= num_objs)
return NULL;
return fw_objs[id];
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 9fd7ec53b9f3..bbd92c017c28 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -334,7 +334,7 @@ static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
else
id = -EINVAL;
- if (id < 0 || id > num_objs)
+ if (id < 0 || id >= num_objs)
return NULL;
return fw_objs[id];
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index ec7913ab00a2..4cb8bd83f570 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -281,8 +281,11 @@ int adf_init_aer(void)
return -EFAULT;
device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
- if (!device_sriov_wq)
+ if (!device_sriov_wq) {
+ destroy_workqueue(device_reset_wq);
+ device_reset_wq = NULL;
return -EFAULT;
+ }
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index f7ecabdf7805..eaa6388a6678 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -69,7 +69,6 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
-struct adf_accel_dev *adf_devmgr_get_first(void);
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
int adf_devmgr_verify_id(u32 id);
void adf_devmgr_get_num_dev(u32 *num);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index c42f5c25aabd..4c11ad1ebcf0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -22,18 +22,13 @@
void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
{
char name[ADF_DEVICE_NAME_LENGTH];
- void *ret;
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
accel_dev->hw_device->dev_class->name,
pci_name(accel_dev->accel_pci_dev.pci_dev));
- ret = debugfs_create_dir(name, NULL);
- if (IS_ERR_OR_NULL(ret))
- return;
-
- accel_dev->debugfs_dir = ret;
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
adf_cfg_dev_dbgfs_add(accel_dev);
}
@@ -59,9 +54,6 @@ EXPORT_SYMBOL_GPL(adf_dbgfs_exit);
*/
void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
{
- if (!accel_dev->debugfs_dir)
- return;
-
if (!accel_dev->is_vf) {
adf_fw_counters_dbgfs_add(accel_dev);
adf_heartbeat_dbgfs_add(accel_dev);
@@ -77,9 +69,6 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
*/
void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
- if (!accel_dev->debugfs_dir)
- return;
-
if (!accel_dev->is_vf) {
adf_tl_dbgfs_rm(accel_dev);
adf_cnv_dbgfs_rm(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
index 96ddd1c419c4..34b9f7731c78 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -276,16 +276,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
-struct adf_accel_dev *adf_devmgr_get_first(void)
-{
- struct adf_accel_dev *dev = NULL;
-
- if (!list_empty(&accel_table))
- dev = list_first_entry(&accel_table, struct adf_accel_dev,
- list);
- return dev;
-}
-
/**
* adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
* @pci_dev: Pointer to PCI device.
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
index ee0b5079de3e..2e4095c4c12c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
@@ -42,13 +42,13 @@ struct pm_status_row {
const char *key;
};
-static struct pm_status_row pm_fuse_rows[] = {
+static const struct pm_status_row pm_fuse_rows[] = {
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE),
};
-static struct pm_status_row pm_info_rows[] = {
+static const struct pm_status_row pm_info_rows[] = {
PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE),
PM_INFO_REGSET_ENTRY(pm.status, PENDING_WP),
PM_INFO_REGSET_ENTRY(pm.status, CURRENT_WP),
@@ -59,7 +59,7 @@ static struct pm_status_row pm_info_rows[] = {
PM_INFO_REGSET_ENTRY(pm.main, THR_VALUE),
};
-static struct pm_status_row pm_ssm_rows[] = {
+static const struct pm_status_row pm_ssm_rows[] = {
PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE),
PM_INFO_REGSET_ENTRY32(ssm.active_constraint, ACTIVE_CONSTRAINT),
PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWER_GATED),
@@ -83,7 +83,7 @@ static struct pm_status_row pm_ssm_rows[] = {
PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WCP_MANAGED_COUNT),
};
-static struct pm_status_row pm_log_rows[] = {
+static const struct pm_status_row pm_log_rows[] = {
PM_INFO_REGSET_ENTRY32(event_counters.host_msg, HOST_MSG_EVENT_COUNT),
PM_INFO_REGSET_ENTRY32(event_counters.sys_pm, SYS_PM_EVENT_COUNT),
PM_INFO_REGSET_ENTRY32(event_counters.local_ssm, SSM_EVENT_COUNT),
@@ -91,7 +91,7 @@ static struct pm_status_row pm_log_rows[] = {
PM_INFO_REGSET_ENTRY32(event_counters.unknown, UNKNOWN_EVENT_COUNT),
};
-static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
+static const struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
PM_INFO_REGSET_ENTRY32(event_log[0], EVENT0),
PM_INFO_REGSET_ENTRY32(event_log[1], EVENT1),
PM_INFO_REGSET_ENTRY32(event_log[2], EVENT2),
@@ -102,14 +102,14 @@ static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
PM_INFO_REGSET_ENTRY32(event_log[7], EVENT7),
};
-static struct pm_status_row pm_csrs_rows[] = {
+static const struct pm_status_row pm_csrs_rows[] = {
PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT),
PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS),
PM_INFO_REGSET_ENTRY32(pm.main, CPM_PM_MASTER_FW),
PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ),
};
-static int pm_scnprint_table(char *buff, struct pm_status_row *table,
+static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
u32 *pm_info_regs, size_t buff_size, int table_len,
bool lowercase)
{
@@ -131,7 +131,7 @@ static int pm_scnprint_table(char *buff, struct pm_status_row *table,
return wr;
}
-static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table,
+static int pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
u32 *pm_info_regs, size_t buff_size,
int table_len)
{
@@ -139,7 +139,7 @@ static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table,
table_len, false);
}
-static int pm_scnprint_table_lower_keys(char *buff, struct pm_status_row *table,
+static int pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
u32 *pm_info_regs, size_t buff_size,
int table_len)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
index 65bd26b25abc..f93d9cca70ce 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -90,10 +90,6 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
hw_data->get_arb_info(&info);
- /* Reset arbiter configuration */
- for (i = 0; i < ADF_ARB_NUM; i++)
- WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
-
/* Unmap worker threads to service arbiters */
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index 317cafa9d11f..ef8a9cf74f0c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -163,7 +163,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
- /* Sets the accelaration engine context mode to either four or eight */
+ /* Sets the acceleration engine context mode to either four or eight */
csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 78217577aa54..4c25a78ab3ed 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -7,7 +7,7 @@ config CRYPTO_DEV_MARVELL
config CRYPTO_DEV_MARVELL_CESA
tristate "Marvell's Cryptographic Engine driver"
- depends on PLAT_ORION || ARCH_MVEBU
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5fd31ba715c2..fa08f10e6f3f 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -375,7 +375,6 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
{
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- const char *res_name = "sram";
struct resource *res;
engine->pool = of_gen_pool_get(cesa->dev->of_node,
@@ -391,19 +390,7 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
return -ENOMEM;
}
- if (cesa->caps->nengines > 1) {
- if (!idx)
- res_name = "sram0";
- else
- res_name = "sram1";
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- res_name);
- if (!res || resource_size(res) < cesa->sram_size)
- return -EINVAL;
-
- engine->sram = devm_ioremap_resource(cesa->dev, res);
+ engine->sram = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
@@ -510,25 +497,21 @@ static int mv_cesa_probe(struct platform_device *pdev)
* if the clock does not exist.
*/
snprintf(res_name, sizeof(res_name), "cesa%u", i);
- engine->clk = devm_clk_get(dev, res_name);
+ engine->clk = devm_clk_get_optional_enabled(dev, res_name);
if (IS_ERR(engine->clk)) {
- engine->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(engine->clk))
- engine->clk = NULL;
+ engine->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(engine->clk)) {
+ ret = PTR_ERR(engine->clk);
+ goto err_cleanup;
+ }
}
snprintf(res_name, sizeof(res_name), "cesaz%u", i);
- engine->zclk = devm_clk_get(dev, res_name);
- if (IS_ERR(engine->zclk))
- engine->zclk = NULL;
-
- ret = clk_prepare_enable(engine->clk);
- if (ret)
- goto err_cleanup;
-
- ret = clk_prepare_enable(engine->zclk);
- if (ret)
+ engine->zclk = devm_clk_get_optional_enabled(dev, res_name);
+ if (IS_ERR(engine->zclk)) {
+ ret = PTR_ERR(engine->zclk);
goto err_cleanup;
+ }
engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
@@ -570,13 +553,8 @@ static int mv_cesa_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- for (i = 0; i < caps->nengines; i++) {
- clk_disable_unprepare(cesa->engines[i].zclk);
- clk_disable_unprepare(cesa->engines[i].clk);
+ for (i = 0; i < caps->nengines; i++)
mv_cesa_put_sram(pdev, i);
- if (cesa->engines[i].irq > 0)
- irq_set_affinity_hint(cesa->engines[i].irq, NULL);
- }
return ret;
}
@@ -588,12 +566,8 @@ static void mv_cesa_remove(struct platform_device *pdev)
mv_cesa_remove_algs(cesa);
- for (i = 0; i < cesa->caps->nengines; i++) {
- clk_disable_unprepare(cesa->engines[i].zclk);
- clk_disable_unprepare(cesa->engines[i].clk);
+ for (i = 0; i < cesa->caps->nengines; i++)
mv_cesa_put_sram(pdev, i);
- irq_set_affinity_hint(cesa->engines[i].irq, NULL);
- }
}
static const struct platform_device_id mv_cesa_plat_id_table[] = {
@@ -604,7 +578,7 @@ MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
- .remove_new = mv_cesa_remove,
+ .remove = mv_cesa_remove,
.id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 0f37dfd42d85..cf62db50f958 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -489,7 +489,7 @@ static int mv_cesa_des_op(struct skcipher_request *req,
static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -500,7 +500,7 @@ static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -543,7 +543,7 @@ static int mv_cesa_cbc_des_op(struct skcipher_request *req,
static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
@@ -552,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
@@ -596,7 +596,7 @@ static int mv_cesa_des3_op(struct skcipher_request *req,
static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -608,7 +608,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -649,7 +649,7 @@ static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
@@ -661,7 +661,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
@@ -725,7 +725,7 @@ static int mv_cesa_aes_op(struct skcipher_request *req,
static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -736,7 +736,7 @@ static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -778,7 +778,7 @@ static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
@@ -787,7 +787,7 @@ static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index c82775dbb557..d94a26c3541a 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -225,21 +225,22 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct skcipher_request *req, int init)
{
- dma_addr_t key_phys = 0;
- dma_addr_t src_phys, dst_phys;
+ dma_addr_t key_phys, src_phys, dst_phys;
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
bool key_referenced = actx->key_referenced;
int ret;
- if (!key_referenced) {
+ if (key_referenced)
+ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key + AES_KEYSIZE_128,
+ AES_KEYSIZE_128, DMA_TO_DEVICE);
+ else
key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
- ret = dma_mapping_error(sdcp->dev, key_phys);
- if (ret)
- return ret;
- }
+ ret = dma_mapping_error(sdcp->dev, key_phys);
+ if (ret)
+ return ret;
src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -300,7 +301,10 @@ aes_done_run:
err_dst:
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
err_src:
- if (!key_referenced)
+ if (key_referenced)
+ dma_unmap_single(sdcp->dev, key_phys, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ else
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
return ret;
@@ -1243,7 +1247,7 @@ MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
static struct platform_driver mxs_dcp_driver = {
.probe = mxs_dcp_probe,
- .remove_new = mxs_dcp_remove,
+ .remove = mxs_dcp_remove,
.driver = {
.name = "mxs-dcp",
.of_match_table = mxs_dcp_dt_ids,
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index b11545cc5cb7..14c302d2db79 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -2119,7 +2119,7 @@ static struct platform_driver n2_crypto_driver = {
.of_match_table = n2_crypto_match,
},
.probe = n2_crypto_probe,
- .remove_new = n2_crypto_remove,
+ .remove = n2_crypto_remove,
};
static const struct of_device_id n2_mau_match[] = {
@@ -2146,7 +2146,7 @@ static struct platform_driver n2_mau_driver = {
.of_match_table = n2_mau_match,
},
.probe = n2_mau_probe,
- .remove_new = n2_mau_remove,
+ .remove = n2_mau_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 35f2d0d8507e..1660c5cf3641 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -133,7 +133,7 @@ struct nx842_devdata {
};
static struct nx842_devdata __rcu *devdata;
-static DEFINE_SPINLOCK(devdata_mutex);
+static DEFINE_SPINLOCK(devdata_spinlock);
#define NX842_COUNTER_INC(_x) \
static inline void nx842_inc_##_x( \
@@ -750,15 +750,15 @@ static int nx842_OF_upd(struct property *new_prop)
if (!new_devdata)
return -ENOMEM;
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
if (old_devdata)
of_node = old_devdata->dev->of_node;
if (!old_devdata || !of_node) {
pr_err("%s: device is not available\n", __func__);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
kfree(new_devdata);
return -ENODEV;
}
@@ -810,7 +810,7 @@ out:
old_devdata->max_sg_len);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(new_devdata->dev, new_devdata);
kfree(old_devdata);
@@ -821,13 +821,13 @@ error_out:
dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
nx842_OF_set_defaults(new_devdata);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(new_devdata->dev, new_devdata);
kfree(old_devdata);
} else {
dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
}
if (!ret)
@@ -1045,9 +1045,9 @@ static int nx842_probe(struct vio_dev *viodev,
return -ENOMEM;
}
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
if (old_devdata && old_devdata->vdev != NULL) {
dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
@@ -1062,7 +1062,7 @@ static int nx842_probe(struct vio_dev *viodev,
nx842_OF_set_defaults(new_devdata);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
kfree(old_devdata);
@@ -1101,7 +1101,7 @@ static int nx842_probe(struct vio_dev *viodev,
return 0;
error_unlock:
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
if (new_devdata)
kfree(new_devdata->counters);
kfree(new_devdata);
@@ -1122,12 +1122,13 @@ static void nx842_remove(struct vio_dev *viodev)
crypto_unregister_alg(&nx842_pseries_alg);
- spin_lock_irqsave(&devdata_mutex, flags);
- old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
of_reconfig_notifier_unregister(&nx842_of_nb);
+
+ spin_lock_irqsave(&devdata_spinlock, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_spinlock));
RCU_INIT_POINTER(devdata, NULL);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(&viodev->dev, NULL);
if (old_devdata)
@@ -1257,11 +1258,11 @@ static void __exit nx842_pseries_exit(void)
crypto_unregister_alg(&nx842_pseries_alg);
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
RCU_INIT_POINTER(devdata, NULL);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
if (old_devdata && old_devdata->dev)
dev_set_drvdata(old_devdata->dev, NULL);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index bad1adacbc84..e27b84616743 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1305,7 +1305,7 @@ static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
static struct platform_driver omap_aes_driver = {
.probe = omap_aes_probe,
- .remove_new = omap_aes_remove,
+ .remove = omap_aes_remove,
.driver = {
.name = "omap-aes",
.pm = &omap_aes_pm_ops,
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 209d3dc03a9b..498cbd585ed1 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1115,7 +1115,7 @@ static SIMPLE_DEV_PM_OPS(omap_des_pm_ops, omap_des_suspend, omap_des_resume);
static struct platform_driver omap_des_driver = {
.probe = omap_des_probe,
- .remove_new = omap_des_remove,
+ .remove = omap_des_remove,
.driver = {
.name = "omap-des",
.pm = &omap_des_pm_ops,
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 5bcd9ab0f72a..7021481bf027 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2216,7 +2216,7 @@ static void omap_sham_remove(struct platform_device *pdev)
static struct platform_driver omap_sham_driver = {
.probe = omap_sham_probe,
- .remove_new = omap_sham_remove,
+ .remove = omap_sham_remove,
.driver = {
.name = "omap-sham",
.of_match_table = omap_sham_of_match,
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 28b5fd823827..e228a31fe28d 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -299,7 +299,7 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
- .remove_new = qce_crypto_remove,
+ .remove = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 09419e79e34c..0685ba122e8a 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -262,7 +262,7 @@ MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
static struct platform_driver qcom_rng_driver = {
.probe = qcom_rng_probe,
- .remove_new = qcom_rng_remove,
+ .remove = qcom_rng_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(qcom_rng_of_match),
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index f74b3c81ba6d..b77bdce8e7fc 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -433,7 +433,7 @@ static void rk_crypto_remove(struct platform_device *pdev)
static struct platform_driver crypto_driver = {
.probe = rk_crypto_probe,
- .remove_new = rk_crypto_remove,
+ .remove = rk_crypto_remove,
.driver = {
.name = "rk3288-crypto",
.pm = &rk_crypto_pm_ops,
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 8b6e3f5c94de..57ab237e899e 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -2335,7 +2335,7 @@ static void s5p_aes_remove(struct platform_device *pdev)
static struct platform_driver s5p_aes_crypto = {
.probe = s5p_aes_probe,
- .remove_new = s5p_aes_remove,
+ .remove = s5p_aes_remove,
.driver = {
.name = "s5p-secss",
.of_match_table = s5p_sss_dt_match,
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 461eca40e878..091612b066f1 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -574,7 +574,7 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
/* Clear the command label */
memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
- /* Iniialize the command update structure */
+ /* Initialize the command update structure */
memzero_explicit(upd_info, sizeof(*upd_info));
if (cfg->enc_eng_id && cfg->auth_eng_id) {
@@ -2489,7 +2489,7 @@ static void sa_ul_remove(struct platform_device *pdev)
static struct platform_driver sa_ul_driver = {
.probe = sa_ul_probe,
- .remove_new = sa_ul_remove,
+ .remove = sa_ul_remove,
.driver = {
.name = "saul-crypto",
.of_match_table = of_match,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 96d4af5d48a6..533080b0cddc 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1421,7 +1421,7 @@ static void sahara_remove(struct platform_device *pdev)
static struct platform_driver sahara_driver = {
.probe = sahara_probe,
- .remove_new = sahara_remove,
+ .remove = sahara_remove,
.driver = {
.name = SAHARA_NAME,
.of_match_table = sahara_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index e4dfed7ee0b0..42114e9364f0 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -151,7 +151,7 @@ static int starfive_cryp_probe(struct platform_device *pdev)
ret = starfive_aes_register_algs();
if (ret)
- goto err_algs_aes;
+ goto err_engine_start;
ret = starfive_hash_register_algs();
if (ret)
@@ -167,8 +167,6 @@ err_algs_rsa:
starfive_hash_unregister_algs();
err_algs_hash:
starfive_aes_unregister_algs();
-err_algs_aes:
- crypto_engine_stop(cryp->engine);
err_engine_start:
crypto_engine_exit(cryp->engine);
err_engine:
@@ -193,7 +191,6 @@ static void starfive_cryp_remove(struct platform_device *pdev)
starfive_hash_unregister_algs();
starfive_rsa_unregister_algs();
- crypto_engine_stop(cryp->engine);
crypto_engine_exit(cryp->engine);
starfive_dma_cleanup(cryp);
@@ -215,7 +212,7 @@ MODULE_DEVICE_TABLE(of, starfive_dt_ids);
static struct platform_driver starfive_cryp_driver = {
.probe = starfive_cryp_probe,
- .remove_new = starfive_cryp_remove,
+ .remove = starfive_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = starfive_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index a778c4846025..d109c743f076 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -565,8 +565,6 @@ static void starfive_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg starfive_rsa = {
.encrypt = starfive_rsa_enc,
.decrypt = starfive_rsa_dec,
- .sign = starfive_rsa_dec,
- .verify = starfive_rsa_enc,
.set_pub_key = starfive_rsa_set_pub_key,
.set_priv_key = starfive_rsa_set_priv_key,
.max_size = starfive_rsa_max_size,
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index e0faddbf8990..de4d0402f133 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -465,7 +465,7 @@ MODULE_DEVICE_TABLE(of, stm32_dt_ids);
static struct platform_driver stm32_crc_driver = {
.probe = stm32_crc_probe,
- .remove_new = stm32_crc_remove,
+ .remove = stm32_crc_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_crc_pm_ops,
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 937f6dab8955..14c6339c2e43 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -2771,7 +2771,7 @@ static const struct dev_pm_ops stm32_cryp_pm_ops = {
static struct platform_driver stm32_cryp_driver = {
.probe = stm32_cryp_probe,
- .remove_new = stm32_cryp_remove,
+ .remove = stm32_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_cryp_pm_ops,
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 351827372ea6..768b27de4737 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -2532,7 +2532,7 @@ static const struct dev_pm_ops stm32_hash_pm_ops = {
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
- .remove_new = stm32_hash_remove,
+ .remove = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
.pm = &stm32_hash_pm_ops,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 511ddcb0efd4..e8c0db687c57 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -3560,7 +3560,7 @@ static struct platform_driver talitos_driver = {
.of_match_table = talitos_match,
},
.probe = talitos_probe,
- .remove_new = talitos_remove,
+ .remove = talitos_remove,
};
module_platform_driver(talitos_driver);
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index ae7a0f8435fc..9d130592cc0a 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -1180,8 +1180,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
goto out;
} else {
rctx->cryptlen = req->cryptlen - ctx->authsize;
- if (ret)
- goto out;
/* CTR operation */
ret = tegra_ccm_do_ctr(ctx, rctx);
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
index f94c0331b148..918c0b10614d 100644
--- a/drivers/crypto/tegra/tegra-se-main.c
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -312,7 +312,6 @@ static int tegra_se_probe(struct platform_device *pdev)
ret = tegra_se_host1x_register(se);
if (ret) {
- crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
return dev_err_probe(dev, ret, "failed to init host1x params\n");
}
@@ -324,7 +323,6 @@ static void tegra_se_remove(struct platform_device *pdev)
{
struct tegra_se *se = platform_get_drvdata(pdev);
- crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
host1x_client_unregister(&se->client);
}
@@ -387,7 +385,7 @@ static struct platform_driver tegra_se_driver = {
.of_match_table = tegra_se_of_match,
},
.probe = tegra_se_probe,
- .remove_new = tegra_se_remove,
+ .remove = tegra_se_remove,
};
static int tegra_se_host1x_probe(struct host1x_device *dev)
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index cb92b7fa99c6..48fee07b7e51 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -83,23 +83,16 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
-
- case VIRTIO_CRYPTO_KEY_REJECTED:
- error = -EKEYREJECTED;
- break;
-
default:
error = -EIO;
break;
}
akcipher_req = vc_akcipher_req->akcipher_req;
- if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
- /* actuall length maybe less than dst buffer */
- akcipher_req->dst_len = len - sizeof(vc_req->status);
- sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
- vc_akcipher_req->dst_buf, akcipher_req->dst_len);
- }
+ /* actual length maybe less than dst buffer */
+ akcipher_req->dst_len = len - sizeof(vc_req->status);
+ sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
+ vc_akcipher_req->dst_buf, akcipher_req->dst_len);
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
}
@@ -230,36 +223,27 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request
int node = dev_to_node(&vcrypto->vdev->dev);
unsigned long flags;
int ret;
- bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
- unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
/* out header */
sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
sgs[num_out++] = &outhdr_sg;
/* src data */
- src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
+ src_buf = kcalloc_node(req->src_len, 1, GFP_KERNEL, node);
if (!src_buf)
return -ENOMEM;
- if (verify) {
- /* for verify operation, both src and dst data work as OUT direction */
- sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
- sg_init_one(&srcdata_sg, src_buf, src_len);
- sgs[num_out++] = &srcdata_sg;
- } else {
- sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
- sg_init_one(&srcdata_sg, src_buf, src_len);
- sgs[num_out++] = &srcdata_sg;
+ sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, req->src_len);
+ sg_init_one(&srcdata_sg, src_buf, req->src_len);
+ sgs[num_out++] = &srcdata_sg;
- /* dst data */
- dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
- if (!dst_buf)
- goto free_src;
+ /* dst data */
+ dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
+ if (!dst_buf)
+ goto free_src;
- sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
- sgs[num_out + num_in++] = &dstdata_sg;
- }
+ sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
+ sgs[num_out + num_in++] = &dstdata_sg;
vc_akcipher_req->src_buf = src_buf;
vc_akcipher_req->dst_buf = dst_buf;
@@ -352,16 +336,6 @@ static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
}
-static int virtio_crypto_rsa_sign(struct akcipher_request *req)
-{
- return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
-}
-
-static int virtio_crypto_rsa_verify(struct akcipher_request *req)
-{
- return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
-}
-
static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen,
@@ -524,16 +498,19 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
- .sign = virtio_crypto_rsa_sign,
- .verify = virtio_crypto_rsa_verify,
+ /*
+ * Must specify an arbitrary hash algorithm upon
+ * set_{pub,priv}_key (even though it's not used
+ * by encrypt/decrypt) because qemu checks for it.
+ */
.set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
.set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.base = {
- .cra_name = "pkcs1pad(rsa,sha1)",
- .cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
+ .cra_name = "pkcs1pad(rsa)",
+ .cra_driver_name = "virtio-pkcs1-rsa",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 7f0ec6887a39..6e72d9229410 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -438,7 +438,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
static struct platform_driver zynqmp_aes_driver = {
.probe = zynqmp_aes_aead_probe,
- .remove_new = zynqmp_aes_aead_remove,
+ .remove = zynqmp_aes_aead_remove,
.driver = {
.name = "zynqmp-aes",
.of_match_table = zynqmp_aes_dt_ids,
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 1bcec6f46c9c..580649f9bff8 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -248,7 +248,7 @@ static void zynqmp_sha_remove(struct platform_device *pdev)
static struct platform_driver zynqmp_sha_driver = {
.probe = zynqmp_sha_probe,
- .remove_new = zynqmp_sha_remove,
+ .remove = zynqmp_sha_remove,
.driver = {
.name = "zynqmp-sha3-384",
},
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 2d77b5f40ca7..c7e5a34b254b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -373,7 +373,7 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
return 0;
}
-static inline void
+static inline int
process_response_opp(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels *r)
@@ -386,12 +386,16 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom,
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
opp->perf, dom->info.name, ret);
+ return ret;
+ }
+
+ return 0;
}
-static inline void
+static inline int
process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
@@ -404,9 +408,11 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
opp->perf, dom->info.name, ret);
+ return ret;
+ }
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
@@ -415,13 +421,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
GFP_KERNEL);
- if (ret)
+ if (ret) {
dev_warn(dev,
"Failed to add opps_by_idx at %d for %s - ret:%d\n",
opp->level_index, dom->info.name, ret);
+ /* Cleanup by_lvl too */
+ xa_erase(&dom->opps_by_lvl, opp->perf);
+
+ return ret;
+ }
+
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
+
+ return 0;
}
static int
@@ -429,16 +443,22 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
+ int ret;
struct scmi_opp *opp;
struct scmi_perf_ipriv *p = priv;
- opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
+ opp = &p->perf_dom->opp[p->perf_dom->opp_count];
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
- process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx,
- response);
+ ret = process_response_opp(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
else
- process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
- response);
+ ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
+
+ /* Skip BAD duplicates received from firmware */
+ if (ret)
+ return ret == -EBUSY ? 0 : ret;
+
p->perf_dom->opp_count++;
dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index daadd71d8ddd..c68c9f56370f 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
+#include <linux/sysfb.h>
#include "coreboot_table.h"
@@ -36,6 +37,19 @@ static int framebuffer_probe(struct coreboot_device *dev)
.format = NULL,
};
+ /*
+ * On coreboot systems, the advertised LB_TAG_FRAMEBUFFER entry
+ * in the coreboot table should only be used if the payload did
+ * not pass a framebuffer information to the Linux kernel.
+ *
+ * If the global screen_info data has been filled, the Generic
+ * System Framebuffers (sysfb) will already register a platform
+ * device and pass that screen_info as platform_data to a driver
+ * that can scan-out using the system provided framebuffer.
+ */
+ if (sysfb_handles_screen_info())
+ return -ENODEV;
+
if (!fb->physical_address)
return -ENODEV;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index d304913314e4..24e666d5c3d1 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -918,7 +918,8 @@ static __init int gsmi_init(void)
gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
printk(KERN_ERR "gsmi: unable to register platform device\n");
- return PTR_ERR(gsmi_dev.pdev);
+ ret = PTR_ERR(gsmi_dev.pdev);
+ goto out_unregister;
}
/* SMI access needs to be serialized */
@@ -1056,10 +1057,11 @@ out_err:
gsmi_buf_free(gsmi_dev.name_buf);
kmem_cache_destroy(gsmi_dev.mem_pool);
platform_device_unregister(gsmi_dev.pdev);
- pr_info("gsmi: failed to load: %d\n", ret);
+out_unregister:
#ifdef CONFIG_PM
platform_driver_unregister(&gsmi_driver_info);
#endif
+ pr_info("gsmi: failed to load: %d\n", ret);
return ret;
}
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index a3df782fa687..7c5c03f274b9 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -79,6 +79,25 @@ void sysfb_disable(struct device *dev)
}
EXPORT_SYMBOL_GPL(sysfb_disable);
+/**
+ * sysfb_handles_screen_info() - reports if sysfb handles the global screen_info
+ *
+ * Callers can use sysfb_handles_screen_info() to determine whether the Generic
+ * System Framebuffers (sysfb) can handle the global screen_info data structure
+ * or not. Drivers might need this information to know if they have to setup the
+ * system framebuffer, or if they have to delegate this action to sysfb instead.
+ *
+ * Returns:
+ * True if sysfb handles the global screen_info data structure.
+ */
+bool sysfb_handles_screen_info(void)
+{
+ const struct screen_info *si = &screen_info;
+
+ return !!screen_info_video_type(si);
+}
+EXPORT_SYMBOL_GPL(sysfb_handles_screen_info);
+
#if defined(CONFIG_PCI)
static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 44819cdba7fb..971419e3a9bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -161,7 +161,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
- if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
+ if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+ !(adev->flags & AMD_IS_APU))
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index b0a8abc7a8ec..341beec59537 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -35,21 +35,19 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
int32_t priority)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx_mgr *mgr;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
- if (!fd_file(f))
+ if (fd_empty(f))
return -EINVAL;
r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
- if (r) {
- fdput(f);
+ if (r)
return r;
- }
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
@@ -57,7 +55,6 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
amdgpu_ctx_priority_override(ctx, priority);
mutex_unlock(&mgr->lock);
- fdput(f);
return 0;
}
@@ -66,31 +63,25 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
unsigned ctx_id,
int32_t priority)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
int r;
- if (!fd_file(f))
+ if (fd_empty(f))
return -EINVAL;
r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
- if (r) {
- fdput(f);
+ if (r)
return r;
- }
ctx = amdgpu_ctx_get(fpriv, ctx_id);
- if (!ctx) {
- fdput(f);
+ if (!ctx)
return -EINVAL;
- }
amdgpu_ctx_priority_override(ctx, priority);
amdgpu_ctx_put(ctx);
- fdput(f);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c76ac0dfe572..7a45f3fdc734 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1124,8 +1124,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
- bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
- bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT);
+ bool is_vram = bo->tbo.resource &&
+ bo->tbo.resource->mem_type == TTM_PL_VRAM;
+ bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
@@ -1133,6 +1135,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
bool snoop = false;
bool is_local;
+ dma_resv_assert_held(bo->tbo.base.resv);
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
@@ -1251,9 +1255,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (bo && bo->tbo.resource)
- gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
- mapping, flags);
+ if ((*flags & AMDGPU_PTE_VALID) && bo)
+ gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index a37a6801c9ea..b3175ff676f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -550,7 +550,7 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
- mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
+ mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index fb37e354a9d5..1ac730328516 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 7, 0):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
+ break;
+ }
}
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 4938e6b340e9..73065a85e0d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -67,8 +67,8 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_encode = {
@@ -94,8 +94,8 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = {
/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs sc_video_codecs_encode = {
@@ -136,8 +136,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = {
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8d16dacdc172..307185c0e1b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -90,8 +90,8 @@ static const struct amd_ip_funcs soc15_common_ip_funcs;
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_encode =
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index d30ad7d56def..bba35880badb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -49,13 +49,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
@@ -96,14 +96,14 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = {
/* SRIOV SOC21, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index fd4c3d4f8387..29a848f2466b 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -48,7 +48,7 @@
static const struct amd_ip_funcs soc24_common_ip_funcs;
static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index d39c670f6220..792b2eb6bbac 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -136,15 +136,15 @@ static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[]
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 07e9ce99694f..8d97f17ffe66 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6762,7 +6762,7 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+ aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
@@ -8875,6 +8875,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
}
}
+static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
+ const struct dm_crtc_state *acrtc_state,
+ const u64 current_ts)
+{
+ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
+ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (pr->config.replay_supported && !pr->replay_feature_enabled)
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !psr->psr_feature_enabled)
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+
+ /* Decrement skip count when SR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ (psr->psr_feature_enabled || pr->config.replay_supported)) {
+ if (aconn->sr_skip_count > 0)
+ aconn->sr_skip_count--;
+
+ /* Allow SR when skip count is 0. */
+ acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
+
+ /*
+ * If sink supports PSR SU/Panel Replay, there is no need to rely on
+ * a vblank event disable request to enable PSR/RP. PSR SU/RP
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (acrtc_attach->dm_irq_params.allow_sr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
+ if (pr->replay_feature_enabled && !pr->replay_allow_active)
+ amdgpu_dm_replay_enable(acrtc_state->stream, true);
+ if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
+ !psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ }
+ } else {
+ acrtc_attach->dm_irq_params.allow_sr_entry = false;
+ }
+}
+
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
@@ -9028,7 +9078,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* during the PSR-SU was disabled.
*/
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
- acrtc_attach->dm_irq_params.allow_psr_entry &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
@@ -9203,9 +9253,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (acrtc_state->stream->link->replay_settings.replay_allow_active)
+ amdgpu_dm_replay_disable(acrtc_state->stream);
+ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ }
mutex_unlock(&dm->dc_lock);
/*
@@ -9246,57 +9299,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dm_update_pflip_irq_state(drm_to_adev(dev),
acrtc_attach);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
- if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
- !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
- struct amdgpu_dm_connector *aconn =
- (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
- amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
- } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
- !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
-
- struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
- acrtc_state->stream->dm_stream_context;
-
- if (!aconn->disallow_edp_enter_psr)
- amdgpu_dm_link_setup_psr(acrtc_state->stream);
- }
- }
-
- /* Decrement skip count when PSR is enabled and we're doing fast updates. */
- if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
- acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
- struct amdgpu_dm_connector *aconn =
- (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
-
- if (aconn->psr_skip_count > 0)
- aconn->psr_skip_count--;
-
- /* Allow PSR when skip count is 0. */
- acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
-
- /*
- * If sink supports PSR SU, there is no need to rely on
- * a vblank event disable request to enable PSR. PSR SU
- * can be enabled immediately once OS demonstrates an
- * adequate number of fast atomic commits to notify KMD
- * of update events. See `vblank_control_worker()`.
- */
- if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
- acrtc_attach->dm_irq_params.allow_psr_entry &&
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
-#endif
- !acrtc_state->stream->link->psr_settings.psr_allow_active &&
- !aconn->disallow_edp_enter_psr &&
- (timestamp_ns -
- acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
- 500000000)
- amdgpu_dm_psr_enable(acrtc_state->stream);
- } else {
- acrtc_attach->dm_irq_params.allow_psr_entry = false;
- }
-
+ amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
mutex_unlock(&dm->dc_lock);
}
@@ -12080,7 +12083,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
break;
}
- while (j < EDID_LENGTH) {
+ while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 15d4690c74d6..90dfffec33cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -727,7 +727,7 @@ struct amdgpu_dm_connector {
/* Cached display modes */
struct drm_display_mode freesync_vid_base;
- int psr_skip_count;
+ int sr_skip_count;
bool disallow_edp_enter_psr;
/* Record progress status of mst*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index a2cf2c066a76..288be19db7c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -266,11 +266,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
* where the SU region is the full hactive*vactive region. See
* fill_dc_dirty_rects().
*/
- if (vblank_work->stream && vblank_work->stream->link) {
+ if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) {
amdgpu_dm_crtc_set_panel_sr_feature(
vblank_work, vblank_work->enable,
- vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
- vblank_work->stream->link->replay_settings.replay_feature_enabled);
+ vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
if (dm->active_vblank_irq_count == 0) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 5c9303241aeb..6a7ecc1e4602 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -33,7 +33,7 @@ struct dm_irq_params {
struct mod_vrr_params vrr_params;
struct dc_stream_state *stream;
int active_planes;
- bool allow_psr_entry;
+ bool allow_sr_entry;
struct mod_freesync_config freesync_config;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index be8fbb04ad98..c9a6de110b74 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -3122,14 +3122,12 @@ static enum bp_result bios_parser_get_vram_info(
struct dc_vram_info *info)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- static enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
struct atom_common_table_header *header;
struct atom_data_revision revision;
// vram info moved to umc_info for DCN4x
- if (dcb->ctx->dce_version >= DCN_VERSION_4_01 &&
- dcb->ctx->dce_version < DCN_VERSION_MAX &&
- info && DATA_TABLES(umc_info)) {
+ if (info && DATA_TABLES(umc_info)) {
header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(umc_info));
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 2597e3fd562b..e006f816ff2f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -265,6 +265,9 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
dc_state_copy_internal(new_state, src_state);
#ifdef CONFIG_DRM_AMD_DC_FP
+ new_state->bw_ctx.dml2 = NULL;
+ new_state->bw_ctx.dml2_dc_power_source = NULL;
+
if (src_state->bw_ctx.dml2 &&
!dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
dc_state_release(new_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 1cf9015e854a..dd9971867f74 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -8,6 +8,7 @@
#include "dml2_pmo_dcn4_fams2.h"
static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
+static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
// VActive Preferred
@@ -2139,6 +2140,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
struct dml2_pmo_instance *pmo = in_out->instance;
bool stutter_period_meets_z8_eco = true;
bool z8_stutter_optimization_too_expensive = false;
+ bool stutter_optimization_too_expensive = false;
double line_time_us, vblank_nom_time_us;
unsigned int i;
@@ -2160,10 +2162,15 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
- if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) {
+ if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
z8_stutter_optimization_too_expensive = true;
break;
}
+
+ if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
+ stutter_optimization_too_expensive = true;
+ break;
+ }
}
pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
@@ -2179,7 +2186,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
}
- if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
+ if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
pmo->scratch.pmo_dcn4.num_stutter_candidates++;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ee1bcfaae3e3..80e60ea2d11e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1259,33 +1259,26 @@ static int smu_sw_init(void *handle)
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- smu->user_dpm_profile.user_workload_mask = 0;
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
- smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
- smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
- smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
- smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
- smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4;
- smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
- smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
if (smu->is_apu ||
- !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) {
- smu->driver_workload_mask =
- 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
- } else {
- smu->driver_workload_mask =
- 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
- smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- }
+ !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ else
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
- smu->workload_mask = smu->driver_workload_mask |
- smu->user_dpm_profile.user_workload_mask;
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
@@ -2355,20 +2348,17 @@ static int smu_switch_power_profile(void *handle,
return -EINVAL;
if (!en) {
- smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]);
+ smu->workload_mask &= ~(1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
} else {
- smu->driver_workload_mask |= (1 << smu->workload_priority[type]);
+ smu->workload_mask |= (1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
}
- smu->workload_mask = smu->driver_workload_mask |
- smu->user_dpm_profile.user_workload_mask;
-
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0);
@@ -3059,23 +3049,12 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
- int ret;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
- if (smu->user_dpm_profile.user_workload_mask &
- (1 << smu->workload_priority[param[param_size]]))
- return 0;
-
- smu->user_dpm_profile.user_workload_mask =
- (1 << smu->workload_priority[param[param_size]]);
- smu->workload_mask = smu->user_dpm_profile.user_workload_mask |
- smu->driver_workload_mask;
- ret = smu_bump_power_profile_mode(smu, param, param_size);
-
- return ret;
+ return smu_bump_power_profile_mode(smu, param, param_size);
}
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index d60d9a12a47e..b44a185d07e8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -240,7 +240,6 @@ struct smu_user_dpm_profile {
/* user clock state information */
uint32_t clk_mask[SMU_CLK_COUNT];
uint32_t clk_dependency;
- uint32_t user_workload_mask;
};
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
@@ -558,8 +557,7 @@ struct smu_context {
bool disable_uclk_switch;
uint32_t workload_mask;
- uint32_t driver_workload_mask;
- uint32_t workload_priority[WORKLOAD_POLICY_MAX];
+ uint32_t workload_prority[WORKLOAD_POLICY_MAX];
uint32_t workload_setting[WORKLOAD_POLICY_MAX];
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 31fe512028f4..c0f6b59369b7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1455,6 +1455,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return -EINVAL;
}
+
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
(smu->smc_fw_version >= 0x360d00)) {
if (size != 10)
@@ -1522,14 +1523,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- smu->workload_mask,
+ 1 << workload_type,
NULL);
if (ret) {
dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
return ret;
}
- smu_cmn_assign_power_profile(smu);
+ smu->power_profile_mode = profile_mode;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 12223f507977..16af1a329621 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2081,13 +2081,10 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
-
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- smu->workload_mask, NULL);
+ 1 << workload_type, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
- else
- smu_cmn_assign_power_profile(smu);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 3b7b2ec8319a..9c3c48297cba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1786,13 +1786,10 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
-
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- smu->workload_mask, NULL);
+ 1 << workload_type, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
- else
- smu_cmn_assign_power_profile(smu);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 952ee22cbc90..1fe020f1f4db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1079,7 +1079,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- smu->workload_mask,
+ 1 << workload_type,
NULL);
if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
@@ -1087,7 +1087,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
return ret;
}
- smu_cmn_assign_power_profile(smu);
+ smu->power_profile_mode = profile_mode;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 62316a6707ef..cc0504b063fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -890,14 +890,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- smu->workload_mask,
+ 1 << workload_type,
NULL);
if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
return ret;
}
- smu_cmn_assign_power_profile(smu);
+ smu->power_profile_mode = profile_mode;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 5dd7ceca64fe..d53e162dcd8d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2485,7 +2485,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
- u32 workload_mask;
+ u32 workload_mask, selected_workload_mask;
smu->power_profile_mode = input[size];
@@ -2552,7 +2552,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
- workload_mask = 1 << workload_type;
+ selected_workload_mask = workload_mask = 1 << workload_type;
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
@@ -2567,22 +2567,12 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask |= 1 << workload_type;
}
- smu->workload_mask |= workload_mask;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- smu->workload_mask,
+ workload_mask,
NULL);
- if (!ret) {
- smu_cmn_assign_power_profile(smu);
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) {
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_FULLSCREEN3D);
- smu->power_profile_mode = smu->workload_mask & (1 << workload_type)
- ? PP_SMC_POWER_PROFILE_FULLSCREEN3D
- : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- }
- }
+ if (!ret)
+ smu->workload_mask = selected_workload_mask;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9d0b19419de0..b891a5e0a396 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2499,14 +2499,13 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
-
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- smu->workload_mask, NULL);
+ 1 << workload_type, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
else
- smu_cmn_assign_power_profile(smu);
+ smu->workload_mask = (1 << workload_type);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 8798ebfcea83..84f9b007b59f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1132,7 +1132,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, ret = 0, size = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
@@ -1168,7 +1168,8 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
break;
for (i = 0; i < count; i++) {
- ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 1aa13d32ceb2..1e16a281f2dc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1807,11 +1807,12 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- smu->workload_mask, NULL);
-
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type,
+ NULL);
if (!ret)
- smu_cmn_assign_power_profile(smu);
+ smu->workload_mask = 1 << workload_type;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index bdfc5e617333..91ad434bcdae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1138,14 +1138,6 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
return ret;
}
-void smu_cmn_assign_power_profile(struct smu_context *smu)
-{
- uint32_t index;
- index = fls(smu->workload_mask);
- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- smu->power_profile_mode = smu->workload_setting[index];
-}
-
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
{
struct pci_dev *p = NULL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 8a801e389659..1de685defe85 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -130,8 +130,6 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
-void smu_cmn_assign_power_profile(struct smu_context *smu);
-
/*
* Helper function to make sysfs_emit_at() happy. Align buf to
* the current page boundary and record the offset.
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 0e8813278a2f..bb1750a3dab0 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -125,6 +125,9 @@
#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
+/* TC358768_DSICMD_TX (0x0600) register */
+#define TC358768_DSI_CMDTX_DC_START BIT(0)
+
static const char * const tc358768_supplies[] = {
"vddc", "vddmipi", "vddio"
};
@@ -229,6 +232,21 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
tc358768_write(priv, reg, tmp);
}
+static void tc358768_dsicmd_tx(struct tc358768_priv *priv)
+{
+ u32 val;
+
+ /* start transfer */
+ tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START);
+ if (priv->error)
+ return;
+
+ /* wait transfer completion */
+ priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val,
+ (val & TC358768_DSI_CMDTX_DC_START) == 0,
+ 100, 100000);
+}
+
static int tc358768_sw_reset(struct tc358768_priv *priv)
{
/* Assert Reset */
@@ -516,8 +534,7 @@ static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
}
}
- /* start transfer */
- tc358768_write(priv, TC358768_DSICMD_TX, 1);
+ tc358768_dsicmd_tx(priv);
ret = tc358768_clear_error(priv);
if (ret)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 8e3d2d7060f8..4f2ab8a7b50f 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -712,16 +712,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
int fd, u32 *handle)
{
struct drm_syncobj *syncobj;
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
int ret;
- if (!fd_file(f))
+ if (fd_empty(f))
return -EINVAL;
- if (fd_file(f)->f_op != &drm_syncobj_file_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &drm_syncobj_file_fops)
return -EINVAL;
- }
/* take a reference to put in the idr */
syncobj = fd_file(f)->private_data;
@@ -739,7 +737,6 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
} else
drm_syncobj_put(syncobj);
- fdput(f);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 581844d1db9a..5fee4be64592 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -928,7 +928,7 @@ intel_enable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
@@ -942,7 +942,7 @@ intel_disable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0);
}
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 1fb6ff77fd89..bb696b29ee2c 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -40,7 +40,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_shmem(obj)) {
file = obj->base.filp;
- atomic_long_inc(&file->f_count);
+ get_file(file);
return file;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 551b0d7974ff..5dc0ccd07636 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -80,6 +80,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
const struct intel_gsc_cpd_entry *cpd_entry = NULL;
const struct intel_gsc_manifest_header *manifest;
+ struct intel_uc_fw_ver min_ver = { 0 };
size_t min_size = sizeof(*layout);
int i;
@@ -212,33 +213,46 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
}
}
- if (IS_ARROWLAKE(gt->i915)) {
+ /*
+ * ARL SKUs require newer firmwares, but the blob is actually common
+ * across all MTL and ARL SKUs, so we need to do an explicit version check
+ * here rather than using a separate table entry. If a too old version
+ * is found, then just don't use GSC rather than aborting the driver load.
+ * Note that the major number in the GSC FW version is used to indicate
+ * the platform, so we expect it to always be 102 for MTL/ARL binaries.
+ */
+ if (IS_ARROWLAKE_S(gt->i915))
+ min_ver = (struct intel_uc_fw_ver){ 102, 0, 10, 1878 };
+ else if (IS_ARROWLAKE_H(gt->i915) || IS_ARROWLAKE_U(gt->i915))
+ min_ver = (struct intel_uc_fw_ver){ 102, 1, 15, 1926 };
+
+ if (IS_METEORLAKE(gt->i915) && gsc->release.major != 102) {
+ gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
+ gsc->release.major, gsc->release.minor,
+ gsc->release.patch, gsc->release.build);
+ return -EINVAL;
+ }
+
+ if (min_ver.major) {
bool too_old = false;
- /*
- * ARL requires a newer firmware than MTL did (102.0.10.1878) but the
- * firmware is actually common. So, need to do an explicit version check
- * here rather than using a separate table entry. And if the older
- * MTL-only version is found, then just don't use GSC rather than aborting
- * the driver load.
- */
- if (gsc->release.major < 102) {
+ if (gsc->release.minor < min_ver.minor) {
too_old = true;
- } else if (gsc->release.major == 102) {
- if (gsc->release.minor == 0) {
- if (gsc->release.patch < 10) {
+ } else if (gsc->release.minor == min_ver.minor) {
+ if (gsc->release.patch < min_ver.patch) {
+ too_old = true;
+ } else if (gsc->release.patch == min_ver.patch) {
+ if (gsc->release.build < min_ver.build)
too_old = true;
- } else if (gsc->release.patch == 10) {
- if (gsc->release.build < 1878)
- too_old = true;
- }
}
}
if (too_old) {
- gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least 102.0.10.1878",
+ gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least %d.%d.%d.%d",
gsc->release.major, gsc->release.minor,
- gsc->release.patch, gsc->release.build);
+ gsc->release.patch, gsc->release.build,
+ min_ver.major, min_ver.minor,
+ min_ver.patch, min_ver.build);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 39f6614a0a99..aa0b1bfb38e0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -540,8 +540,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_LUNARLAKE(i915) (0 && i915)
#define IS_BATTLEMAGE(i915) (0 && i915)
-#define IS_ARROWLAKE(i915) \
- IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL)
+#define IS_ARROWLAKE_H(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
+#define IS_ARROWLAKE_U(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_U)
+#define IS_ARROWLAKE_S(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_S)
#define IS_DG2_G10(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(i915) \
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 3c47c625993e..467999249b9a 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -200,8 +200,16 @@ static const u16 subplatform_g12_ids[] = {
INTEL_DG2_G12_IDS(ID),
};
-static const u16 subplatform_arl_ids[] = {
- INTEL_ARL_IDS(ID),
+static const u16 subplatform_arl_h_ids[] = {
+ INTEL_ARL_H_IDS(ID),
+};
+
+static const u16 subplatform_arl_u_ids[] = {
+ INTEL_ARL_U_IDS(ID),
+};
+
+static const u16 subplatform_arl_s_ids[] = {
+ INTEL_ARL_S_IDS(ID),
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
@@ -261,9 +269,15 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_g12_ids,
ARRAY_SIZE(subplatform_g12_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G12);
- } else if (find_devid(devid, subplatform_arl_ids,
- ARRAY_SIZE(subplatform_arl_ids))) {
- mask = BIT(INTEL_SUBPLATFORM_ARL);
+ } else if (find_devid(devid, subplatform_arl_h_ids,
+ ARRAY_SIZE(subplatform_arl_h_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_H);
+ } else if (find_devid(devid, subplatform_arl_u_ids,
+ ARRAY_SIZE(subplatform_arl_u_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_U);
+ } else if (find_devid(devid, subplatform_arl_s_ids,
+ ARRAY_SIZE(subplatform_arl_s_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_S);
}
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 643ff1bf74ee..a9fcaf33df9e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -128,7 +128,9 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_RPLU 2
/* MTL */
-#define INTEL_SUBPLATFORM_ARL 0
+#define INTEL_SUBPLATFORM_ARL_H 0
+#define INTEL_SUBPLATFORM_ARL_U 1
+#define INTEL_SUBPLATFORM_ARL_S 2
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 61da4ed9d521..0727492576be 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -4,7 +4,7 @@
* Copyright © 2018 Intel Corporation
*/
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 05364eca20f7..70330a2e80f2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -26,7 +26,7 @@
#define __I915_SELFTESTS_RANDOM_H__
#include <linux/math64.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "../i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index 805c4bfb85fe..7e59591bbed6 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -22,7 +22,7 @@
*/
#include <linux/prime_numbers.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "i915_selftest.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h
index 5543bf0474bc..9f827260a89d 100644
--- a/drivers/gpu/drm/lib/drm_random.h
+++ b/drivers/gpu/drm/lib/drm_random.h
@@ -6,7 +6,7 @@
* be transposed to lib/ at the earliest convenience.
*/
-#include <linux/random.h>
+#include <linux/prandom.h>
#define DRM_RND_STATE_INITIALIZER(seed__) ({ \
struct rnd_state state__; \
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
index 027867c2a8c5..99110ab2f44d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -992,7 +992,7 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8
ctrl->data = data;
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret == -EAGAIN && ctrl->retryTimeMs) {
+ if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
/*
* Device (likely an eDP panel) isn't ready yet, wait for the time specified
* by GSP before retrying again
@@ -1060,33 +1060,44 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
u8 size = *psize;
int ret;
+ int retries;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ for (retries = 0; retries < 3; ++retries) {
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
- ctrl->subDeviceInstance = 0;
- ctrl->displayId = BIT(outp->index);
- ctrl->bAddrOnly = !size;
- ctrl->cmd = type;
- if (ctrl->bAddrOnly) {
- ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
- ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
- }
- ctrl->addr = addr;
- ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
- memcpy(ctrl->data, data, size);
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->bAddrOnly = !size;
+ ctrl->cmd = type;
+ if (ctrl->bAddrOnly) {
+ ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
+ ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
+ }
+ ctrl->addr = addr;
+ ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
+ memcpy(ctrl->data, data, size);
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
+ /*
+ * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+ * by GSP before retrying again
+ */
+ nvkm_debug(&disp->engine.subdev,
+ "Waiting %dms for GSP LT panel delay before retrying in AUX\n",
+ ctrl->retryTimeMs);
+ msleep(ctrl->retryTimeMs);
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ } else {
+ memcpy(data, ctrl->data, size);
+ *psize = ctrl->size;
+ ret = ctrl->replyType;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ break;
+ }
}
-
- memcpy(data, ctrl->data, size);
- *psize = ctrl->size;
- ret = ctrl->replyType;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
index a1c8545f1249..cac6d64ab67d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
@@ -89,11 +89,6 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
nvkm_falcon_fw_dtor_sigs(fw);
}
- /* after last write to the img, sync dma mappings */
- dma_sync_single_for_device(fw->fw.device->dev,
- fw->fw.phys,
- sg_dma_len(&fw->fw.mem.sgl),
- DMA_TO_DEVICE);
FLCNFW_DBG(fw, "resetting");
fw->func->reset(fw);
@@ -105,6 +100,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
goto done;
}
+ /* after last write to the img, sync dma mappings */
+ dma_sync_single_for_device(fw->fw.device->dev,
+ fw->fw.phys,
+ sg_dma_len(&fw->fw.mem.sgl),
+ DMA_TO_DEVICE);
+
ret = fw->func->load(fw);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index cf58f9da9139..d586aea30898 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -78,7 +78,7 @@ r535_rpc_status_to_errno(uint32_t rpc_status)
switch (rpc_status) {
case 0x55: /* NV_ERR_NOT_READY */
case 0x66: /* NV_ERR_TIMEOUT_RETRY */
- return -EAGAIN;
+ return -EBUSY;
case 0x51: /* NV_ERR_NO_MEMORY */
return -ENOMEM;
default:
@@ -601,7 +601,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
if (rpc->status) {
ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
- if (PTR_ERR(ret) != -EAGAIN)
+ if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
} else {
ret = repc ? rpc->params : NULL;
@@ -660,7 +660,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
if (rpc->status) {
ret = r535_rpc_status_to_errno(rpc->status);
- if (ret != -EAGAIN)
+ if (ret != -EAGAIN && ret != -EBUSY)
nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
object->client->object.handle, object->handle, rpc->cmd, rpc->status);
}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 7db2edb3374c..0e6f94df690d 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -990,6 +990,8 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
if (!size)
break;
+
+ offset = 0;
}
return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index f161f40d8ce4..69900138295b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1093,10 +1093,10 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
- if (state)
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
- else /* Special case for asynchronous cursor updates. */
+ crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
+
+ /* Special case for asynchronous cursor updates. */
+ if (!crtc_state)
crtc_state = plane->crtc->state;
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 3353e97687d1..a17e62867f3b 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -471,7 +471,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*/
static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
{
- return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+ return file_ref_get(&dmabuf->file->f_ref);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 63b8d7591253..10d596cb4b40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1265,6 +1265,8 @@ static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ if (WARN_ON(!bo))
+ return -EINVAL;
return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
}
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index e5f51fd23c65..2a093540354e 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -886,8 +886,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
- if (WARN_ON(!xe_bo_is_vram(bo)))
- return -EINVAL;
+ if (!xe_bo_is_vram(bo))
+ return 0;
ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
if (ret)
@@ -937,6 +937,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
.interruptible = false,
};
struct ttm_resource *new_mem;
+ struct ttm_place *place = &bo->placements[0];
int ret;
xe_bo_assert_held(bo);
@@ -947,9 +948,15 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
- if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
+ if (WARN_ON(xe_bo_is_vram(bo)))
+ return -EINVAL;
+
+ if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
return -EINVAL;
+ if (!mem_type_is_vram(place->mem_type))
+ return 0;
+
ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
if (ret)
return ret;
@@ -1719,6 +1726,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
int xe_bo_pin(struct xe_bo *bo)
{
+ struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
int err;
@@ -1749,21 +1757,21 @@ int xe_bo_pin(struct xe_bo *bo)
*/
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
-
if (mem_type_is_vram(place->mem_type)) {
xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
-
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
}
}
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ spin_unlock(&xe->pinned.lock);
+ }
+
ttm_bo_pin(&bo->ttm);
/*
@@ -1809,23 +1817,18 @@ void xe_bo_unpin_external(struct xe_bo *bo)
void xe_bo_unpin(struct xe_bo *bo)
{
+ struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
xe_assert(xe, !bo->ttm.base.import_attach);
xe_assert(xe, xe_bo_is_pinned(bo));
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
-
- if (mem_type_is_vram(place->mem_type)) {
- spin_lock(&xe->pinned.lock);
- xe_assert(xe, !list_empty(&bo->pinned_link));
- list_del_init(&bo->pinned_link);
- spin_unlock(&xe->pinned.lock);
- }
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ xe_assert(xe, !list_empty(&bo->pinned_link));
+ list_del_init(&bo->pinned_link);
+ spin_unlock(&xe->pinned.lock);
}
-
ttm_bo_unpin(&bo->ttm);
}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 541b49007d73..8fb2be061003 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -34,14 +34,22 @@ int xe_bo_evict_all(struct xe_device *xe)
u8 id;
int ret;
- if (!IS_DGFX(xe))
- return 0;
-
/* User memory */
- for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
+ for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
struct ttm_resource_manager *man =
ttm_manager_type(bdev, mem_type);
+ /*
+ * On igpu platforms with flat CCS we need to ensure we save and restore any CCS
+ * state since this state lives inside graphics stolen memory which doesn't survive
+ * hibernation.
+ *
+ * This can be further improved by only evicting objects that we know have actually
+ * used a compression enabled PAT index.
+ */
+ if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
+ continue;
+
if (man) {
ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
@@ -125,9 +133,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
struct xe_bo *bo;
int ret;
- if (!IS_DGFX(xe))
- return 0;
-
spin_lock(&xe->pinned.lock);
for (;;) {
bo = list_first_entry_or_null(&xe->pinned.evicted,
@@ -159,7 +164,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
* should setup the iosys map.
*/
xe_assert(xe, !iosys_map_is_null(&bo->vmap));
- xe_assert(xe, xe_bo_is_vram(bo));
xe_bo_put(bo);
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 756b492f13b0..31cca938956f 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -203,14 +203,14 @@ retry:
write_locked = false;
}
if (err)
- goto err_syncs;
+ goto err_hw_exec_mode;
if (write_locked) {
err = xe_vm_userptr_pin(vm);
downgrade_write(&vm->lock);
write_locked = false;
if (err)
- goto err_hw_exec_mode;
+ goto err_unlock_list;
}
if (!args->num_batch_buffer) {
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 2804f14f8f29..78823f53d290 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1206,9 +1206,11 @@ static int xe_oa_release(struct inode *inode, struct file *file)
struct xe_oa_stream *stream = file->private_data;
struct xe_gt *gt = stream->gt;
+ xe_pm_runtime_get(gt_to_xe(gt));
mutex_lock(&gt->oa.gt_lock);
xe_oa_destroy_locked(stream);
mutex_unlock(&gt->oa.gt_lock);
+ xe_pm_runtime_put(gt_to_xe(gt));
/* Release the reference the OA stream kept on the driver */
drm_dev_put(&gt_to_xe(gt)->drm);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 67aebfe0fed6..ac4d8faa3886 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1069,6 +1069,47 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state gnrd_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 4,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_INIT_XSTATE |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
+ .exit_latency = 220,
+ .target_residency = 650,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6P",
+ .desc = "MWAIT 0x21",
+ .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_INIT_XSTATE |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
+ .exit_latency = 240,
+ .target_residency = 750,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E",
@@ -1508,6 +1549,12 @@ static const struct idle_cpu idle_cpu_gnr __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_gnrd __initconst = {
+ .state_table = gnrd_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_avn __initconst = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
@@ -1593,6 +1640,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &idle_cpu_gnr),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &idle_cpu_gnrd),
X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt),
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index c4cf26f1d149..be0743dac3ff 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -269,8 +269,6 @@ rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
break;
#endif
}
- if (!ret && dev && is_vlan_dev(dev))
- dev = vlan_dev_real_dev(dev);
return ret ? ERR_PTR(ret) : dev;
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5dbb248e9625..02f1666f3cba 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1615,7 +1615,6 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
struct ucma_event *uevent, *tmp;
struct ucma_context *ctx;
LIST_HEAD(event_list);
- struct fd f;
struct ucma_file *cur_file;
int ret = 0;
@@ -1623,21 +1622,17 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
return -EFAULT;
/* Get current fd to protect against it being closed */
- f = fdget(cmd.fd);
- if (!fd_file(f))
+ CLASS(fd, f)(cmd.fd);
+ if (fd_empty(f))
return -ENOENT;
- if (fd_file(f)->f_op != &ucma_fops) {
- ret = -EINVAL;
- goto file_put;
- }
+ if (fd_file(f)->f_op != &ucma_fops)
+ return -EINVAL;
cur_file = fd_file(f)->private_data;
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(cur_file, cmd.id);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto file_put;
- }
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
rdma_lock_handler(ctx->cm_id);
/*
@@ -1678,8 +1673,6 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
err_unlock:
rdma_unlock_handler(ctx->cm_id);
ucma_put_ctx(ctx);
-file_put:
- fdput(f);
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a4cce360df21..66b02fbf077a 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -584,7 +584,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
if (cmd.fd != -1) {
/* search for file descriptor */
f = fdget(cmd.fd);
- if (!fd_file(f)) {
+ if (fd_empty(f)) {
ret = -EBADF;
goto err_tree_mutex_unlock;
}
@@ -632,8 +632,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
atomic_inc(&xrcd->usecnt);
}
- if (fd_file(f))
- fdput(f);
+ fdput(f);
mutex_unlock(&ibudev->xrcd_tree_mutex);
uobj_finalize_uobj_create(&obj->uobject, attrs);
@@ -648,8 +647,7 @@ err:
uobj_alloc_abort(&obj->uobject, attrs);
err_tree_mutex_unlock:
- if (fd_file(f))
- fdput(f);
+ fdput(f);
mutex_unlock(&ibudev->xrcd_tree_mutex);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 6715c96a3eee..9eb290ec71a8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -300,9 +300,6 @@ static void bnxt_re_shutdown(struct auxiliary_device *adev)
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
struct bnxt_re_dev *rdev;
- if (!en_info)
- return;
-
rdev = en_info->rdev;
ib_unregister_device(&rdev->ibdev);
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
@@ -316,9 +313,6 @@ static void bnxt_re_stop_irq(void *handle)
struct bnxt_qplib_nq *nq;
int indx;
- if (!en_info)
- return;
-
rdev = en_info->rdev;
rcfw = &rdev->rcfw;
@@ -339,9 +333,6 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
struct bnxt_qplib_nq *nq;
int indx, rc;
- if (!en_info)
- return;
-
rdev = en_info->rdev;
msix_ent = rdev->en_dev->msix_entries;
rcfw = &rdev->rcfw;
@@ -1991,10 +1982,6 @@ static void bnxt_re_remove(struct auxiliary_device *adev)
struct bnxt_re_dev *rdev;
mutex_lock(&bnxt_re_mutex);
- if (!en_info) {
- mutex_unlock(&bnxt_re_mutex);
- return;
- }
rdev = en_info->rdev;
if (rdev)
@@ -2025,7 +2012,15 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
auxiliary_set_drvdata(adev, en_info);
rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
+ if (rc)
+ goto err;
mutex_unlock(&bnxt_re_mutex);
+ return 0;
+
+err:
+ mutex_unlock(&bnxt_re_mutex);
+ kfree(en_info);
+
return rc;
}
@@ -2035,9 +2030,6 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
struct bnxt_en_dev *en_dev;
struct bnxt_re_dev *rdev;
- if (!en_info)
- return 0;
-
rdev = en_info->rdev;
en_dev = en_info->en_dev;
mutex_lock(&bnxt_re_mutex);
@@ -2082,9 +2074,6 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
struct bnxt_re_dev *rdev;
- if (!en_info)
- return 0;
-
mutex_lock(&bnxt_re_mutex);
/* L2 driver may invoke this callback during device recovery, resume.
* reset. Current RoCE driver doesn't recover the device in case of
diff --git a/drivers/mailbox/qcom-cpucp-mbox.c b/drivers/mailbox/qcom-cpucp-mbox.c
index e5437c294803..44f4ed15f818 100644
--- a/drivers/mailbox/qcom-cpucp-mbox.c
+++ b/drivers/mailbox/qcom-cpucp-mbox.c
@@ -138,7 +138,7 @@ static int qcom_cpucp_mbox_probe(struct platform_device *pdev)
return irq;
ret = devm_request_irq(dev, irq, qcom_cpucp_mbox_irq_fn,
- IRQF_TRIGGER_HIGH, "apss_cpucp_mbox", cpucp);
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, "apss_cpucp_mbox", cpucp);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to register irq: %d\n", irq);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d478aafa02c9..23e0b71b991e 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -2471,7 +2471,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
int r;
unsigned int num_locks;
struct dm_bufio_client *c;
- char slab_name[27];
+ char slab_name[64];
+ static atomic_t seqno = ATOMIC_INIT(0);
if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
DMERR("%s: block size not specified or is not multiple of 512b", __func__);
@@ -2522,7 +2523,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
(block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
- snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size);
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u",
+ block_size, atomic_inc_return(&seqno));
c->slab_cache = kmem_cache_create(slab_name, block_size, align,
SLAB_RECLAIM_ACCOUNT, NULL);
if (!c->slab_cache) {
@@ -2531,9 +2533,11 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
}
}
if (aux_size)
- snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size);
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u",
+ aux_size, atomic_inc_return(&seqno));
else
- snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer");
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u",
+ atomic_inc_return(&seqno));
c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
0, SLAB_RECLAIM_ACCOUNT, NULL);
if (!c->slab_buffer) {
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 9c5308298cf1..f3051bd7d2df 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -11,12 +11,6 @@
#define DM_MSG_PREFIX "dm-background-tracker"
-struct bt_work {
- struct list_head list;
- struct rb_node node;
- struct policy_work work;
-};
-
struct background_tracker {
unsigned int max_work;
atomic_t pending_promotes;
@@ -26,10 +20,10 @@ struct background_tracker {
struct list_head issued;
struct list_head queued;
struct rb_root pending;
-
- struct kmem_cache *work_cache;
};
+struct kmem_cache *btracker_work_cache = NULL;
+
struct background_tracker *btracker_create(unsigned int max_work)
{
struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
@@ -48,12 +42,6 @@ struct background_tracker *btracker_create(unsigned int max_work)
INIT_LIST_HEAD(&b->queued);
b->pending = RB_ROOT;
- b->work_cache = KMEM_CACHE(bt_work, 0);
- if (!b->work_cache) {
- DMERR("couldn't create mempool for background work items");
- kfree(b);
- b = NULL;
- }
return b;
}
@@ -66,10 +54,9 @@ void btracker_destroy(struct background_tracker *b)
BUG_ON(!list_empty(&b->issued));
list_for_each_entry_safe (w, tmp, &b->queued, list) {
list_del(&w->list);
- kmem_cache_free(b->work_cache, w);
+ kmem_cache_free(btracker_work_cache, w);
}
- kmem_cache_destroy(b->work_cache);
kfree(b);
}
EXPORT_SYMBOL_GPL(btracker_destroy);
@@ -180,7 +167,7 @@ static struct bt_work *alloc_work(struct background_tracker *b)
if (max_work_reached(b))
return NULL;
- return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
+ return kmem_cache_alloc(btracker_work_cache, GFP_NOWAIT);
}
int btracker_queue(struct background_tracker *b,
@@ -203,7 +190,7 @@ int btracker_queue(struct background_tracker *b,
* There was a race, we'll just ignore this second
* bit of work for the same oblock.
*/
- kmem_cache_free(b->work_cache, w);
+ kmem_cache_free(btracker_work_cache, w);
return -EINVAL;
}
@@ -244,7 +231,7 @@ void btracker_complete(struct background_tracker *b,
update_stats(b, &w->work, -1);
rb_erase(&w->node, &b->pending);
list_del(&w->list);
- kmem_cache_free(b->work_cache, w);
+ kmem_cache_free(btracker_work_cache, w);
}
EXPORT_SYMBOL_GPL(btracker_complete);
diff --git a/drivers/md/dm-cache-background-tracker.h b/drivers/md/dm-cache-background-tracker.h
index 5b8f5c667b81..09c8fc59f7bb 100644
--- a/drivers/md/dm-cache-background-tracker.h
+++ b/drivers/md/dm-cache-background-tracker.h
@@ -26,6 +26,14 @@
* protected with a spinlock.
*/
+struct bt_work {
+ struct list_head list;
+ struct rb_node node;
+ struct policy_work work;
+};
+
+extern struct kmem_cache *btracker_work_cache;
+
struct background_work;
struct background_tracker;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 40709310e327..9cb797a561d6 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -10,6 +10,7 @@
#include "dm-bio-record.h"
#include "dm-cache-metadata.h"
#include "dm-io-tracker.h"
+#include "dm-cache-background-tracker.h"
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
@@ -2263,7 +2264,7 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
/*----------------------------------------------------------------*/
-static struct kmem_cache *migration_cache;
+static struct kmem_cache *migration_cache = NULL;
#define NOT_CORE_OPTION 1
@@ -3361,7 +3362,7 @@ static int cache_iterate_devices(struct dm_target *ti,
static void disable_passdown_if_not_supported(struct cache *cache)
{
struct block_device *origin_bdev = cache->origin_dev->bdev;
- struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
+ struct queue_limits *origin_limits = bdev_limits(origin_bdev);
const char *reason = NULL;
if (!cache->features.discard_passdown)
@@ -3383,7 +3384,7 @@ static void disable_passdown_if_not_supported(struct cache *cache)
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{
struct block_device *origin_bdev = cache->origin_dev->bdev;
- struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
+ struct queue_limits *origin_limits = bdev_limits(origin_bdev);
if (!cache->features.discard_passdown) {
/* No passdown is done so setting own virtual limits */
@@ -3445,22 +3446,36 @@ static int __init dm_cache_init(void)
int r;
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
- if (!migration_cache)
- return -ENOMEM;
+ if (!migration_cache) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ btracker_work_cache = kmem_cache_create("dm_cache_bt_work",
+ sizeof(struct bt_work), __alignof__(struct bt_work), 0, NULL);
+ if (!btracker_work_cache) {
+ r = -ENOMEM;
+ goto err;
+ }
r = dm_register_target(&cache_target);
if (r) {
- kmem_cache_destroy(migration_cache);
- return r;
+ goto err;
}
return 0;
+
+err:
+ kmem_cache_destroy(migration_cache);
+ kmem_cache_destroy(btracker_work_cache);
+ return r;
}
static void __exit dm_cache_exit(void)
{
dm_unregister_target(&cache_target);
kmem_cache_destroy(migration_cache);
+ kmem_cache_destroy(btracker_work_cache);
}
module_init(dm_cache_init);
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 12bbe487a4c8..e956d980672c 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2020,7 +2020,7 @@ static void clone_resume(struct dm_target *ti)
static void disable_passdown_if_not_supported(struct clone *clone)
{
struct block_device *dest_dev = clone->dest_dev->bdev;
- struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
+ struct queue_limits *dest_limits = bdev_limits(dest_dev);
const char *reason = NULL;
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
@@ -2041,7 +2041,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
{
struct block_device *dest_bdev = clone->dest_dev->bdev;
- struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
+ struct queue_limits *dest_limits = bdev_limits(dest_bdev);
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
/* No passdown is done so we set our own virtual limits */
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 89632ce97760..9095f19a84f3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2842,7 +2842,7 @@ static void disable_discard_passdown_if_not_supported(struct pool_c *pt)
{
struct pool *pool = pt->pool;
struct block_device *data_bdev = pt->data_dev->bdev;
- struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
+ struct queue_limits *data_limits = bdev_limits(data_bdev);
const char *reason = NULL;
if (!pt->adjusted_pf.discard_passdown)
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index c0d41c36e06e..20edd3fabbab 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -344,7 +344,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
} else {
set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- lim->max_zone_append_sectors = 0;
+ lim->max_hw_zone_append_sectors = 0;
}
/*
@@ -379,7 +379,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
if (!zlim.mapped_nr_seq_zones) {
lim->max_open_zones = 0;
lim->max_active_zones = 0;
- lim->max_zone_append_sectors = 0;
+ lim->max_hw_zone_append_sectors = 0;
lim->zone_write_granularity = 0;
lim->chunk_sectors = 0;
lim->features &= ~BLK_FEAT_ZONED;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 29da10e6f703..c3a42dd66ce5 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1285,6 +1285,7 @@ static void bitmap_unplug_async(struct bitmap *bitmap)
queue_work(md_bitmap_wq, &unplug_work.work);
wait_for_completion(&done);
+ destroy_work_on_stack(&unplug_work.work);
}
static void bitmap_unplug(struct mddev *mddev, bool sync)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 67108c397c5a..aebe12b0ee27 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9784,9 +9784,7 @@ EXPORT_SYMBOL(md_reap_sync_thread);
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
- wait_event_timeout(rdev->blocked_wait,
- !test_bit(Blocked, &rdev->flags) &&
- !test_bit(BlockedBadBlocks, &rdev->flags),
+ wait_event_timeout(rdev->blocked_wait, !rdev_blocked(rdev),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
@@ -9815,6 +9813,17 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
{
struct mddev *mddev = rdev->mddev;
int rv;
+
+ /*
+ * Recording new badblocks for faulty rdev will force unnecessary
+ * super block updating. This is fragile for external management because
+ * userspace daemon may trying to remove this device and deadlock may
+ * occur. This will be probably solved in the mdadm, but it is safer to
+ * avoid it.
+ */
+ if (test_bit(Faulty, &rdev->flags))
+ return 1;
+
if (is_new)
s += rdev->new_data_offset;
else
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 5d2e6bd58e4d..4ba93af36126 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -1002,6 +1002,30 @@ static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
}
+static inline bool rdev_blocked(struct md_rdev *rdev)
+{
+ /*
+ * Blocked will be set by error handler and cleared by daemon after
+ * updating superblock, meanwhile write IO should be blocked to prevent
+ * reading old data after power failure.
+ */
+ if (test_bit(Blocked, &rdev->flags))
+ return true;
+
+ /*
+ * Faulty device should not be accessed anymore, there is no need to
+ * wait for bad block to be acknowledged.
+ */
+ if (test_bit(Faulty, &rdev->flags))
+ return false;
+
+ /* rdev is blocked by badblocks. */
+ if (test_bit(BlockedBadBlocks, &rdev->flags))
+ return true;
+
+ return false;
+}
+
#define mddev_add_trace_msg(mddev, fmt, args...) \
do { \
if (!mddev_is_dm(mddev)) \
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 32d587524778..baaf5f8b80ae 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -466,6 +466,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio,
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
&mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return;
+ }
bio_chain(split, bio);
submit_bio_noacct(bio);
bio = split;
@@ -608,6 +614,12 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
if (sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, sectors, GFP_NOIO,
&mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return true;
+ }
bio_chain(split, bio);
raid0_map_submit_bio(mddev, bio);
bio = split;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6c9d24203f39..a5adf08ee174 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
const enum req_op op = bio_op(bio);
const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
- int rdisk;
+ int rdisk, error;
bool r1bio_existed = !!r1_bio;
/*
@@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
+
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
submit_bio_noacct(bio);
bio = split;
@@ -1410,6 +1415,47 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r1_bio;
mddev_trace_remap(mddev, read_bio, r1_bio->sector);
submit_bio_noacct(read_bio);
+ return;
+
+err_handle:
+ atomic_dec(&mirror->rdev->nr_pending);
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ raid_end_bio_io(r1_bio);
+}
+
+static bool wait_blocked_rdev(struct mddev *mddev, struct bio *bio)
+{
+ struct r1conf *conf = mddev->private;
+ int disks = conf->raid_disks * 2;
+ int i;
+
+retry:
+ for (i = 0; i < disks; i++) {
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
+
+ if (!rdev)
+ continue;
+
+ /* don't write here until the bad block is acknowledged */
+ if (test_bit(WriteErrorSeen, &rdev->flags) &&
+ rdev_has_badblock(rdev, bio->bi_iter.bi_sector,
+ bio_sectors(bio)) < 0)
+ set_bit(BlockedBadBlocks, &rdev->flags);
+
+ if (rdev_blocked(rdev)) {
+ if (bio->bi_opf & REQ_NOWAIT)
+ return false;
+
+ mddev_add_trace_msg(rdev->mddev, "raid1 wait rdev %d blocked",
+ rdev->raid_disk);
+ atomic_inc(&rdev->nr_pending);
+ md_wait_for_blocked_rdev(rdev, rdev->mddev);
+ goto retry;
+ }
+ }
+
+ return true;
}
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1417,9 +1463,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- int i, disks;
+ int i, disks, k, error;
unsigned long flags;
- struct md_rdev *blocked_rdev;
int first_clone;
int max_sectors;
bool write_behind = false;
@@ -1457,7 +1502,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
return;
}
- retry_write:
+ if (!wait_blocked_rdev(mddev, bio)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
+
r1_bio = alloc_r1bio(mddev, bio);
r1_bio->sectors = max_write_sectors;
@@ -1473,7 +1522,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
*/
disks = conf->raid_disks * 2;
- blocked_rdev = NULL;
max_sectors = r1_bio->sectors;
for (i = 0; i < disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
@@ -1486,11 +1534,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags))
write_behind = true;
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- atomic_inc(&rdev->nr_pending);
- blocked_rdev = rdev;
- break;
- }
r1_bio->bios[i] = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks)
@@ -1506,13 +1549,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
&first_bad, &bad_sectors);
- if (is_bad < 0) {
- /* mustn't write here until the bad block is
- * acknowledged*/
- set_bit(BlockedBadBlocks, &rdev->flags);
- blocked_rdev = rdev;
- break;
- }
if (is_bad && first_bad <= r1_bio->sector) {
/* Cannot write here at all */
bad_sectors -= (r1_bio->sector - first_bad);
@@ -1543,27 +1579,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio->bios[i] = bio;
}
- if (unlikely(blocked_rdev)) {
- /* Wait for this device to become unblocked */
- int j;
-
- for (j = 0; j < i; j++)
- if (r1_bio->bios[j])
- rdev_dec_pending(conf->mirrors[j].rdev, mddev);
- mempool_free(r1_bio, &conf->r1bio_pool);
- allow_barrier(conf, bio->bi_iter.bi_sector);
-
- if (bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- mddev_add_trace_msg(mddev, "raid1 wait rdev %d blocked",
- blocked_rdev->raid_disk);
- md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf, bio->bi_iter.bi_sector, false);
- goto retry_write;
- }
-
/*
* When using a bitmap, we may call alloc_behind_master_bio below.
* alloc_behind_master_bio allocates a copy of the data payload a page
@@ -1576,6 +1591,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
GFP_NOIO, &conf->bio_split);
+
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
submit_bio_noacct(bio);
bio = split;
@@ -1660,6 +1680,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
/* In case raid1d snuck in to freeze_array */
wake_up_barrier(conf);
+ return;
+err_handle:
+ for (k = 0; k < i; k++) {
+ if (r1_bio->bios[k]) {
+ rdev_dec_pending(conf->mirrors[k].rdev, mddev);
+ r1_bio->bios[k] = NULL;
+ }
+ }
+
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ raid_end_bio_io(r1_bio);
}
static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 862b1fb71d86..18989231791a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1159,6 +1159,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
int slot = r10_bio->read_slot;
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
+ int error;
if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
@@ -1206,6 +1207,10 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
allow_barrier(conf);
submit_bio_noacct(bio);
@@ -1236,6 +1241,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
mddev_trace_remap(mddev, read_bio, r10_bio->sector);
submit_bio_noacct(read_bio);
return;
+err_handle:
+ atomic_dec(&rdev->nr_pending);
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+ raid_end_bio_io(r10_bio);
}
static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
@@ -1285,9 +1295,9 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
{
- int i;
struct r10conf *conf = mddev->private;
struct md_rdev *blocked_rdev;
+ int i;
retry_wait:
blocked_rdev = NULL;
@@ -1295,40 +1305,36 @@ retry_wait:
struct md_rdev *rdev, *rrdev;
rdev = conf->mirrors[i].rdev;
- rrdev = conf->mirrors[i].replacement;
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- atomic_inc(&rdev->nr_pending);
- blocked_rdev = rdev;
- break;
- }
- if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
- atomic_inc(&rrdev->nr_pending);
- blocked_rdev = rrdev;
- break;
- }
-
- if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
+ if (rdev) {
sector_t dev_sector = r10_bio->devs[i].addr;
/*
* Discard request doesn't care the write result
* so it doesn't need to wait blocked disk here.
*/
- if (!r10_bio->sectors)
- continue;
-
- if (rdev_has_badblock(rdev, dev_sector,
- r10_bio->sectors) < 0) {
+ if (test_bit(WriteErrorSeen, &rdev->flags) &&
+ r10_bio->sectors &&
+ rdev_has_badblock(rdev, dev_sector,
+ r10_bio->sectors) < 0)
/*
- * Mustn't write here until the bad block
- * is acknowledged
+ * Mustn't write here until the bad
+ * block is acknowledged
*/
- atomic_inc(&rdev->nr_pending);
set_bit(BlockedBadBlocks, &rdev->flags);
+
+ if (rdev_blocked(rdev)) {
blocked_rdev = rdev;
+ atomic_inc(&rdev->nr_pending);
break;
}
}
+
+ rrdev = conf->mirrors[i].replacement;
+ if (rrdev && rdev_blocked(rrdev)) {
+ atomic_inc(&rrdev->nr_pending);
+ blocked_rdev = rrdev;
+ break;
+ }
}
if (unlikely(blocked_rdev)) {
@@ -1347,9 +1353,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
- int i;
+ int i, k;
sector_t sectors;
int max_sectors;
+ int error;
if ((mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
@@ -1482,6 +1489,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
if (r10_bio->sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, r10_bio->sectors,
GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
allow_barrier(conf);
submit_bio_noacct(bio);
@@ -1503,6 +1514,26 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
raid10_write_one_disk(mddev, r10_bio, bio, true, i);
}
one_write_done(r10_bio);
+ return;
+err_handle:
+ for (k = 0; k < i; k++) {
+ int d = r10_bio->devs[k].devnum;
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
+ struct md_rdev *rrdev = conf->mirrors[d].replacement;
+
+ if (r10_bio->devs[k].bio) {
+ rdev_dec_pending(rdev, mddev);
+ r10_bio->devs[k].bio = NULL;
+ }
+ if (r10_bio->devs[k].repl_bio) {
+ rdev_dec_pending(rrdev, mddev);
+ r10_bio->devs[k].repl_bio = NULL;
+ }
+ }
+
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+ raid_end_bio_io(r10_bio);
}
static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
@@ -1644,6 +1675,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (remainder) {
split_size = stripe_size - remainder;
split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return 0;
+ }
bio_chain(split, bio);
allow_barrier(conf);
/* Resend the fist split part */
@@ -1654,6 +1690,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (remainder) {
split_size = bio_sectors(bio) - remainder;
split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return 0;
+ }
bio_chain(split, bio);
allow_barrier(conf);
/* Resend the second split part */
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index a70cbec12ed0..37c4da5311ca 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -258,7 +258,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
pplhdr->signature = cpu_to_le32(ppl_conf->signature);
- io->seq = atomic64_add_return(1, &ppl_conf->seq);
+ io->seq = atomic64_inc_return(&ppl_conf->seq);
pplhdr->generation = cpu_to_le64(io->seq);
return io;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dc2ea636d173..f09e7677ee9f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4724,14 +4724,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev) {
is_bad = rdev_has_badblock(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf));
- if (s->blocked_rdev == NULL
- && (test_bit(Blocked, &rdev->flags)
- || is_bad < 0)) {
+ if (s->blocked_rdev == NULL) {
if (is_bad < 0)
- set_bit(BlockedBadBlocks,
- &rdev->flags);
- s->blocked_rdev = rdev;
- atomic_inc(&rdev->nr_pending);
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ if (rdev_blocked(rdev)) {
+ s->blocked_rdev = rdev;
+ atomic_inc(&rdev->nr_pending);
+ }
}
}
clear_bit(R5_Insync, &dev->flags);
@@ -7177,6 +7176,8 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
err = mddev_suspend_and_lock(mddev);
if (err)
return err;
+ raid5_quiesce(mddev, true);
+
conf = mddev->private;
if (!conf)
err = -ENODEV;
@@ -7198,6 +7199,8 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
kfree(old_groups);
}
}
+
+ raid5_quiesce(mddev, false);
mddev_unlock_and_resume(mddev);
return err ?: len;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 896ecfc4afa6..d174e586698f 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -633,7 +633,7 @@ struct r5conf {
* two caches.
*/
int active_name;
- char cache_name[2][32];
+ char cache_name[2][48];
struct kmem_cache *slab_cache; /* for allocating stripes */
struct mutex cache_size_mutex; /* Protect changes to cache size */
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
index e064914c476e..df39c8c11e9a 100644
--- a/drivers/media/mc/mc-request.c
+++ b/drivers/media/mc/mc-request.c
@@ -246,22 +246,21 @@ static const struct file_operations request_fops = {
struct media_request *
media_request_get_by_fd(struct media_device *mdev, int request_fd)
{
- struct fd f;
struct media_request *req;
if (!mdev || !mdev->ops ||
!mdev->ops->req_validate || !mdev->ops->req_queue)
return ERR_PTR(-EBADR);
- f = fdget(request_fd);
- if (!fd_file(f))
- goto err_no_req_fd;
+ CLASS(fd, f)(request_fd);
+ if (fd_empty(f))
+ goto err;
if (fd_file(f)->f_op != &request_fops)
- goto err_fput;
+ goto err;
req = fd_file(f)->private_data;
if (req->mdev != mdev)
- goto err_fput;
+ goto err;
/*
* Note: as long as someone has an open filehandle of the request,
@@ -272,14 +271,9 @@ media_request_get_by_fd(struct media_device *mdev, int request_fd)
* before media_request_get() is called.
*/
media_request_get(req);
- fdput(f);
-
return req;
-err_fput:
- fdput(f);
-
-err_no_req_fd:
+err:
dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index f042f3f14afa..a2257dc2f25d 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -815,28 +815,23 @@ void __exit lirc_dev_exit(void)
struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
struct lirc_fh *fh;
struct rc_dev *dev;
- if (!fd_file(f))
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (fd_file(f)->f_op != &lirc_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &lirc_fops)
return ERR_PTR(-EINVAL);
- }
- if (write && !(fd_file(f)->f_mode & FMODE_WRITE)) {
- fdput(f);
+ if (write && !(fd_file(f)->f_mode & FMODE_WRITE))
return ERR_PTR(-EPERM);
- }
fh = fd_file(f)->private_data;
dev = fh->rc;
get_device(&dev->dev);
- fdput(f);
return dev;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index 6a790ac8cbe6..d8b34c194693 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/videodev2.h>
+#include <linux/prandom.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ef06a4d5d65b..79f6fad97a80 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2501,6 +2501,56 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
+/*
+ * Search for a declared partitions node for the disk in mmc-card related node.
+ *
+ * This is to permit support for partition table defined in DT in special case
+ * where a partition table is not written in the disk and is expected to be
+ * passed from the running system.
+ *
+ * For the user disk, "partitions" node is searched.
+ * For the special HW disk, "partitions-" node with the appended name is used
+ * following this conversion table (to adhere to JEDEC naming)
+ * - boot0 -> partitions-boot1
+ * - boot1 -> partitions-boot2
+ * - gp0 -> partitions-gp1
+ * - gp1 -> partitions-gp2
+ * - gp2 -> partitions-gp3
+ * - gp3 -> partitions-gp4
+ */
+static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev,
+ const char *subname)
+{
+ const char *node_name = "partitions";
+
+ if (subname) {
+ mmc_dev = mmc_dev->parent;
+
+ /*
+ * Check if we are allocating a BOOT disk boot0/1 disk.
+ * In DT we use the JEDEC naming boot1/2.
+ */
+ if (!strcmp(subname, "boot0"))
+ node_name = "partitions-boot1";
+ if (!strcmp(subname, "boot1"))
+ node_name = "partitions-boot2";
+ /*
+ * Check if we are allocating a GP disk gp0/1/2/3 disk.
+ * In DT we use the JEDEC naming gp1/2/3/4.
+ */
+ if (!strcmp(subname, "gp0"))
+ node_name = "partitions-gp1";
+ if (!strcmp(subname, "gp1"))
+ node_name = "partitions-gp2";
+ if (!strcmp(subname, "gp2"))
+ node_name = "partitions-gp3";
+ if (!strcmp(subname, "gp3"))
+ node_name = "partitions-gp4";
+ }
+
+ return device_get_named_child_node(mmc_dev, node_name);
+}
+
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
@@ -2509,6 +2559,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
int area_type,
unsigned int part_type)
{
+ struct fwnode_handle *disk_fwnode;
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
@@ -2610,7 +2661,9 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
- ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ disk_fwnode = mmc_blk_get_partitions_node(parent, subname);
+ ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups,
+ disk_fwnode);
if (ret)
goto err_put_disk;
return md;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 41e451235f63..e9f6e4e62290 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2957,8 +2957,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65535;
- mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
- mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_seg_size = 0x1000;
+ mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d3bd0ac99ec4..e0ab5fd635e6 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1191,10 +1191,9 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = {
.needs_new_timings = true,
};
-static const struct sunxi_mmc_cfg sun50i_a100_cfg = {
+static const struct sunxi_mmc_cfg sun50i_h616_cfg = {
.idma_des_size_bits = 16,
.idma_des_shift = 2,
- .clk_delays = NULL,
.can_calibrate = true,
.mask_data0 = true,
.needs_new_timings = true,
@@ -1217,8 +1216,9 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
- { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
+ { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a100-emmc", .data = &sun50i_a100_emmc_cfg },
+ { .compatible = "allwinner,sun50i-h616-mmc", .data = &sun50i_h616_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 13fed398937e..e1ee68f8b8f8 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -17,7 +17,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "mtd_test.h"
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
index 8eb40b6e6dfa..6878700d2fc0 100644
--- a/drivers/mtd/tests/pagetest.c
+++ b/drivers/mtd/tests/pagetest.c
@@ -17,7 +17,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "mtd_test.h"
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
index 05250a080139..f34bbf033c4d 100644
--- a/drivers/mtd/tests/subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -15,7 +15,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "mtd_test.h"
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1bffd8e9a95..15e0f14d0d49 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1008,6 +1008,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_UP)
bond_hw_addr_flush(bond->dev, old_active->dev);
+
+ bond_slave_ns_maddrs_add(bond, old_active);
}
if (new_active) {
@@ -1024,6 +1026,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
dev_mc_sync(new_active->dev, bond->dev);
netif_addr_unlock_bh(bond->dev);
}
+
+ bond_slave_ns_maddrs_del(bond, new_active);
}
}
@@ -2341,6 +2345,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bond_compute_features(bond);
bond_set_carrier(bond);
+ /* Needs to be called before bond_select_active_slave(), which will
+ * remove the maddrs if the slave is selected as active slave.
+ */
+ bond_slave_ns_maddrs_add(bond, new_slave);
+
if (bond_uses_primary(bond)) {
block_netpoll_tx();
bond_select_active_slave(bond);
@@ -2350,7 +2359,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
-
if (!slave_dev->netdev_ops->ndo_bpf ||
!slave_dev->netdev_ops->ndo_xdp_xmit) {
if (bond->xdp_prog) {
@@ -2548,6 +2556,12 @@ static int __bond_release_one(struct net_device *bond_dev,
if (oldcurrent == slave)
bond_change_active_slave(bond, NULL);
+ /* Must be called after bond_change_active_slave () as the slave
+ * might change from an active slave to a backup slave. Then it is
+ * necessary to clear the maddrs on the backup slave.
+ */
+ bond_slave_ns_maddrs_del(bond, slave);
+
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
* detached from the list and the curr_active_slave
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 95d59a18c022..327b6ecdc77e 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -15,6 +15,7 @@
#include <linux/sched/signal.h>
#include <net/bonding.h>
+#include <net/ndisc.h>
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval);
@@ -1234,6 +1235,68 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
}
#if IS_ENABLED(CONFIG_IPV6)
+static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *slave)
+{
+ return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ !bond_is_active_slave(slave) &&
+ slave->dev->flags & IFF_MULTICAST;
+}
+
+static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ char slot_maddr[MAX_ADDR_LEN];
+ int i;
+
+ if (!slave_can_set_ns_maddr(bond, slave))
+ return;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (ipv6_addr_any(&targets[i]))
+ break;
+
+ if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) {
+ if (add)
+ dev_mc_add(slave->dev, slot_maddr);
+ else
+ dev_mc_del(slave->dev, slot_maddr);
+ }
+ }
+}
+
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave)
+{
+ if (!bond->params.arp_validate)
+ return;
+ slave_set_ns_maddrs(bond, slave, true);
+}
+
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
+{
+ if (!bond->params.arp_validate)
+ return;
+ slave_set_ns_maddrs(bond, slave, false);
+}
+
+static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
+ struct in6_addr *target, struct in6_addr *slot)
+{
+ char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN];
+
+ if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
+ return;
+
+ /* remove the previous maddr from slave */
+ if (!ipv6_addr_any(slot) &&
+ !ndisc_mc_map(slot, slot_maddr, slave->dev, 0))
+ dev_mc_del(slave->dev, slot_maddr);
+
+ /* add new maddr on slave if target is set */
+ if (!ipv6_addr_any(target) &&
+ !ndisc_mc_map(target, target_maddr, slave->dev, 0))
+ dev_mc_add(slave->dev, target_maddr);
+}
+
static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
struct in6_addr *target,
unsigned long last_rx)
@@ -1243,8 +1306,10 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
struct slave *slave;
if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave(bond, slave, iter) {
slave->target_last_arp_rx[slot] = last_rx;
+ slave_set_ns_maddr(bond, slave, target, &targets[slot]);
+ }
targets[slot] = *target;
}
}
@@ -1296,15 +1361,30 @@ static int bond_option_ns_ip6_targets_set(struct bonding *bond,
{
return -EPERM;
}
+
+static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) {}
+
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) {}
+
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) {}
#endif
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ bool changed = !!bond->params.arp_validate != !!newval->value;
+ struct list_head *iter;
+ struct slave *slave;
+
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
bond->params.arp_validate = newval->value;
+ if (changed) {
+ bond_for_each_slave(bond, slave, iter)
+ slave_set_ns_maddrs(bond, slave, !!bond->params.arp_validate);
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b83df5f94b1f..f1d088168723 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -907,7 +907,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
- igb_msix_other, IRQF_NO_THREAD, netdev->name, adapter);
+ igb_msix_other, 0, netdev->name, adapter);
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index dcfccaaa8d91..92d5cfec3dc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -866,7 +866,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return 0;
err_rule:
- mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index d61be26a4df1..3db31cc10719 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -660,7 +660,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
while (remaining > 0) {
skb_frag_t *frag = &record->frags[i];
- get_page(skb_frag_page(frag));
+ page_ref_inc(skb_frag_page(frag));
remaining -= skb_frag_size(frag);
info->frags[i++] = *frag;
}
@@ -763,7 +763,7 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats = sq->stats;
mlx5e_tx_dma_unmap(sq->pdev, dma);
- put_page(wi->resync_dump_frag_page);
+ page_ref_dec(wi->resync_dump_frag_page);
stats->tls_dump_packets++;
stats->tls_dump_bytes += wi->num_bytes;
}
@@ -816,12 +816,12 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
err_out:
for (; i < info.nr_frags; i++)
- /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
+ /* The page_ref_dec() here undoes the page ref obtained in tx_sync_info_get().
* Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
* released only upon their completions (or in mlx5e_free_txqsq_descs,
* if channel closes).
*/
- put_page(skb_frag_page(&info.frags[i]));
+ page_ref_dec(skb_frag_page(&info.frags[i]));
return MLX5E_KTLS_SYNC_FAIL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e601324a690a..13a3fa8dc0cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4267,7 +4267,8 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
struct mlx5e_params *params = &priv->channels.params;
xdp_features_t val;
- if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ if (!netdev->netdev_ops->ndo_bpf ||
+ params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
xdp_clear_features_flag(netdev);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5bf8318cc48b..1d60465cc2ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -36,6 +36,7 @@
#include "en.h"
#include "en/port.h"
#include "eswitch.h"
+#include "lib/mlx5.h"
static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
@@ -247,6 +248,9 @@ static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
if (is_mdev_switchdev_mode(priv->mdev))
return -EOPNOTSUPP;
+ if (mlx5_get_sd(priv->mdev))
+ return -EOPNOTSUPP;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f24f91d213f2..8cf61ae8b89d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2527,8 +2527,11 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_LOADED, REP_REGISTERED) == REP_LOADED)
+ REP_LOADED, REP_REGISTERED) == REP_LOADED) {
+ if (rep_type == REP_ETH)
+ __esw_offloads_unload_rep(esw, rep, REP_IB);
esw->offloads.rep_ops[rep_type]->unload(rep);
+ }
}
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8505d5e241e1..6e4f8aaf8d2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2105,13 +2105,22 @@ lookup_fte_locked(struct mlx5_flow_group *g,
fte_tmp = NULL;
goto out;
}
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
if (!fte_tmp->node.active) {
+ up_write_ref_node(&fte_tmp->node, false);
+
+ if (take_write)
+ up_write_ref_node(&g->node, false);
+ else
+ up_read_ref_node(&g->node);
+
tree_put_node(&fte_tmp->node, false);
- fte_tmp = NULL;
- goto out;
+
+ return NULL;
}
- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
if (take_write)
up_write_ref_node(&g->node, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 81a9232a03e1..7db9cab9bedf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -593,9 +593,11 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
kvfree(pool);
}
-static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
+static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec,
+ bool dynamic_vec)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
+ int sf_vec_available = sf_vec;
int num_sf_ctrl;
int err;
@@ -616,6 +618,13 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
MLX5_SFS_PER_CTRL_IRQ);
num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
+ if (!dynamic_vec && (num_sf_ctrl + 1) > sf_vec_available) {
+ mlx5_core_dbg(dev,
+ "Not enough IRQs for SFs control and completion pool, required=%d avail=%d\n",
+ num_sf_ctrl + 1, sf_vec_available);
+ return 0;
+ }
+
table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
"mlx5_sf_ctrl",
MLX5_EQ_SHARE_IRQ_MIN_CTRL,
@@ -624,9 +633,11 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
err = PTR_ERR(table->sf_ctrl_pool);
goto err_pf;
}
- /* init sf_comp_pool */
+ sf_vec_available -= num_sf_ctrl;
+
+ /* init sf_comp_pool, remaining vectors are for the SF completions */
table->sf_comp_pool = irq_pool_alloc(dev, pcif_vec + num_sf_ctrl,
- sf_vec - num_sf_ctrl, "mlx5_sf_comp",
+ sf_vec_available, "mlx5_sf_comp",
MLX5_EQ_SHARE_IRQ_MIN_COMP,
MLX5_EQ_SHARE_IRQ_MAX_COMP);
if (IS_ERR(table->sf_comp_pool)) {
@@ -715,6 +726,7 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
int num_eqs = mlx5_max_eq_cap_get(dev);
+ bool dynamic_vec;
int total_vec;
int pcif_vec;
int req_vec;
@@ -724,21 +736,31 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
+ /* PCI PF vectors usage is limited by online cpus, device EQs and
+ * PCI MSI-X capability.
+ */
pcif_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
pcif_vec = min_t(int, pcif_vec, num_eqs);
+ pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
total_vec = pcif_vec;
if (mlx5_sf_max_functions(dev))
total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev);
total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev));
- pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
req_vec = pci_msix_can_alloc_dyn(dev->pdev) ? 1 : total_vec;
n = pci_alloc_irq_vectors(dev->pdev, 1, req_vec, PCI_IRQ_MSIX);
if (n < 0)
return n;
- err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec);
+ /* Further limit vectors of the pools based on platform for non dynamic case */
+ dynamic_vec = pci_msix_can_alloc_dyn(dev->pdev);
+ if (!dynamic_vec) {
+ pcif_vec = min_t(int, n, pcif_vec);
+ total_vec = min_t(int, n, total_vec);
+ }
+
+ err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec, dynamic_vec);
if (err)
pci_free_irq_vectors(dev->pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index d68f0c4e7835..9739bc9867c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -108,7 +108,12 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (IS_ERR(dwmac->tx_clk))
return PTR_ERR(dwmac->tx_clk);
- clk_prepare_enable(dwmac->tx_clk);
+ ret = clk_prepare_enable(dwmac->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable tx_clk\n");
+ return ret;
+ }
/* Check and configure TX clock rate */
rate = clk_get_rate(dwmac->tx_clk);
@@ -119,7 +124,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set tx_clk\n");
- return ret;
+ goto err_tx_clk_disable;
}
}
}
@@ -133,7 +138,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- return ret;
+ goto err_tx_clk_disable;
}
}
}
@@ -149,12 +154,15 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
}
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret) {
- clk_disable_unprepare(dwmac->tx_clk);
- return ret;
- }
+ if (ret)
+ goto err_tx_clk_disable;
return 0;
+
+err_tx_clk_disable:
+ if (dwmac->data->tx_clk_en)
+ clk_disable_unprepare(dwmac->tx_clk);
+ return ret;
}
static void intel_eth_plat_remove(struct platform_device *pdev)
@@ -162,7 +170,8 @@ static void intel_eth_plat_remove(struct platform_device *pdev)
struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
stmmac_pltfr_remove(pdev);
- clk_disable_unprepare(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_en)
+ clk_disable_unprepare(dwmac->tx_clk);
}
static struct platform_driver intel_eth_plat_driver = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 2a9132d6d743..001857c294fb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -589,9 +589,9 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
plat->mac_interface = priv_plat->phy_mode;
if (priv_plat->mac_wol)
- plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
- else
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
+ else
+ plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
plat->riwt_off = 1;
plat->maxmtu = ETH_DATA_LEN;
plat->host_dma_width = priv_plat->variant->dma_bit_mask;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 5c20ceb164df..fe2fd1bfc904 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -16,6 +16,7 @@
#include <linux/if_hsr.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -411,6 +412,8 @@ static int prueth_perout_enable(void *clockops_data,
struct prueth_emac *emac = clockops_data;
u32 reduction_factor = 0, offset = 0;
struct timespec64 ts;
+ u64 current_cycle;
+ u64 start_offset;
u64 ns_period;
if (!on)
@@ -449,8 +452,14 @@ static int prueth_perout_enable(void *clockops_data,
writel(reduction_factor, emac->prueth->shram.va +
TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
- writel(0, emac->prueth->shram.va +
- TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
+ current_cycle = icssg_read_time(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
+
+ /* Rounding of current_cycle count to next second */
+ start_offset = roundup(current_cycle, MSEC_PER_SEC);
+
+ hi_lo_writeq(start_offset, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
return 0;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 8722bb4a268a..f5c1d473e9f9 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -330,6 +330,18 @@ static inline int prueth_emac_slice(struct prueth_emac *emac)
extern const struct ethtool_ops icssg_ethtool_ops;
extern const struct dev_pm_ops prueth_dev_pm_ops;
+static inline u64 icssg_read_time(const void __iomem *addr)
+{
+ u32 low, high;
+
+ do {
+ high = readl(addr + 4);
+ low = readl(addr);
+ } while (high != readl(addr + 4));
+
+ return low + ((u64)high << 32);
+}
+
/* Classifier helpers */
void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac);
void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac);
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index 2c37957478fb..89dc4c401a8d 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -437,13 +437,15 @@ static void mse102x_tx_work(struct work_struct *work)
mse = &mses->mse102x;
while ((txb = skb_dequeue(&mse->txq))) {
+ unsigned int len = max_t(unsigned int, txb->len, ETH_ZLEN);
+
mutex_lock(&mses->lock);
ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
mutex_unlock(&mses->lock);
if (ret) {
mse->ndev->stats.tx_dropped++;
} else {
- mse->ndev->stats.tx_bytes += txb->len;
+ mse->ndev->stats.tx_bytes += len;
mse->ndev->stats.tx_packets++;
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 4309317de3d1..3e9957b6aa14 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -78,7 +78,7 @@ struct phylink {
unsigned int pcs_neg_mode;
unsigned int pcs_state;
- bool mac_link_dropped;
+ bool link_failed;
bool using_mac_select_pcs;
struct sfp_bus *sfp_bus;
@@ -1475,9 +1475,9 @@ static void phylink_resolve(struct work_struct *w)
cur_link_state = pl->old_link_state;
if (pl->phylink_disable_state) {
- pl->mac_link_dropped = false;
+ pl->link_failed = false;
link_state.link = false;
- } else if (pl->mac_link_dropped) {
+ } else if (pl->link_failed) {
link_state.link = false;
retrigger = true;
} else {
@@ -1572,7 +1572,7 @@ static void phylink_resolve(struct work_struct *w)
phylink_link_up(pl, link_state);
}
if (!link_state.link && retrigger) {
- pl->mac_link_dropped = false;
+ pl->link_failed = false;
queue_work(system_power_efficient_wq, &pl->resolve);
}
mutex_unlock(&pl->state_mutex);
@@ -1835,6 +1835,8 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
pl->phy_state.pause |= MLO_PAUSE_RX;
pl->phy_state.interface = phydev->interface;
pl->phy_state.link = up;
+ if (!up)
+ pl->link_failed = true;
mutex_unlock(&pl->state_mutex);
phylink_run_resolve(pl);
@@ -2158,7 +2160,7 @@ EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
static void phylink_link_changed(struct phylink *pl, bool up, const char *what)
{
if (!up)
- pl->mac_link_dropped = true;
+ pl->link_failed = true;
phylink_run_resolve(pl);
phylink_dbg(pl, "%s link %s\n", what, up ? "up" : "down");
}
@@ -2792,7 +2794,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
* link will cycle.
*/
if (manual_changed) {
- pl->mac_link_dropped = true;
+ pl->link_failed = true;
phylink_run_resolve(pl);
}
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index b1387dc459a3..7cd1102a8d2c 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -649,7 +649,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
found = apple_nvme_poll_cq(q, &iob);
- if (!rq_list_empty(iob.req_list))
+ if (!rq_list_empty(&iob.req_list))
apple_nvme_complete_batch(&iob);
return found;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 855b42c92284..1a8d32a4a5c3 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -42,6 +42,8 @@ struct nvme_ns_info {
bool is_readonly;
bool is_ready;
bool is_removed;
+ bool is_rotational;
+ bool no_vwc;
};
unsigned int admin_timeout = 60;
@@ -1639,6 +1641,8 @@ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
info->is_ready = id->nstat & NVME_NSTAT_NRDY;
+ info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL;
+ info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT;
}
kfree(id);
return ret;
@@ -2185,11 +2189,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);
- if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc)
lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
else
lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
+ if (info->is_rotational)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
/*
* Register a metadata profile for PI, or the plain non-integrity NVMe
* metadata masquerading as Type 0 if supported, otherwise reject block
@@ -3636,6 +3643,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->ns_id = info->nsid;
head->ids = info->ids;
head->shared = info->is_shared;
+ head->rotational = info->is_rotational;
ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1);
ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE);
kref_init(&head->ref);
@@ -4017,7 +4025,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
- int ret;
+ int ret = 1;
if (nvme_identify_ns_descs(ctrl, &info))
return;
@@ -4034,9 +4042,10 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
* set up a namespace. If not fall back to the legacy version.
*/
if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
- (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS))
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS) ||
+ ctrl->vs >= NVME_VS(2, 0, 0))
ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
- else
+ if (ret > 0)
ret = nvme_ns_info_from_identify(ctrl, &info);
if (info.is_removed)
@@ -4895,7 +4904,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
- blk_mq_unfreeze_queue(ns->queue);
+ blk_mq_unfreeze_queue_non_owner(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
}
@@ -4940,7 +4949,12 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
- blk_freeze_queue_start(ns->queue);
+ /*
+ * Typical non_owner use case is from pci driver, in which
+ * start_freeze is called from timeout work function, but
+ * unfreeze is done in reset work context
+ */
+ blk_freeze_queue_start_non_owner(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
@@ -5036,6 +5050,8 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
+ BUILD_BUG_ON(sizeof(struct nvme_endurance_group_log) != 512);
+ BUILD_BUG_ON(sizeof(struct nvme_rotational_media_log) != 512);
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
@@ -5044,22 +5060,20 @@ static inline void _nvme_check_size(void)
static int __init nvme_core_init(void)
{
+ unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS;
int result = -ENOMEM;
_nvme_check_size();
- nvme_wq = alloc_workqueue("nvme-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ nvme_wq = alloc_workqueue("nvme-wq", wq_flags, 0);
if (!nvme_wq)
goto out;
- nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ nvme_reset_wq = alloc_workqueue("nvme-reset-wq", wq_flags, 0);
if (!nvme_reset_wq)
goto destroy_wq;
- nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ nvme_delete_wq = alloc_workqueue("nvme-delete-wq", wq_flags, 0);
if (!nvme_delete_wq)
goto destroy_reset_wq;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index a96976b22fa7..6522ae16531c 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -114,7 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
+ struct io_uring_cmd *ioucmd, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
@@ -152,8 +152,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
bio_set_dev(bio, bdev);
if (has_metadata) {
- ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
- meta_seed);
+ ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len);
if (ret)
goto out_unmap;
}
@@ -170,7 +169,7 @@ out:
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
- void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
+ void __user *meta_buffer, unsigned meta_len,
u64 *result, unsigned timeout, unsigned int flags)
{
struct nvme_ns *ns = q->queuedata;
@@ -187,7 +186,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, NULL, flags);
+ meta_len, NULL, flags);
if (ret)
return ret;
}
@@ -268,7 +267,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.lbatm = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
- meta_len, lower_32_bits(io.slba), NULL, 0, 0);
+ meta_len, NULL, 0, 0);
}
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -323,7 +322,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
- cmd.metadata_len, 0, &result, timeout, 0);
+ cmd.metadata_len, &result, timeout, 0);
if (status >= 0) {
if (put_user(result, &ucmd->result))
@@ -370,7 +369,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
- cmd.metadata_len, 0, &cmd.result, timeout, flags);
+ cmd.metadata_len, &cmd.result, timeout, flags);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -402,7 +401,7 @@ struct nvme_uring_cmd_pdu {
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
struct io_uring_cmd *ioucmd)
{
- return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+ return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu);
}
static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
@@ -507,7 +506,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.addr && d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, ioucmd, vec);
+ d.metadata_len, ioucmd, vec);
if (ret)
return ret;
}
@@ -635,8 +634,6 @@ static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
- BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
-
ret = nvme_uring_cmd_checks(issue_flags);
if (ret)
return ret;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 6a15873055b9..f04cfe3fb936 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -635,8 +635,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
if (head->ids.csi == NVME_CSI_ZNS)
lim.features |= BLK_FEAT_ZONED;
- else
- lim.max_zone_append_sectors = 0;
head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
if (IS_ERR(head->disk))
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 093cb423f536..900719c4c70c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -474,6 +474,7 @@ struct nvme_ns_head {
struct list_head entry;
struct kref ref;
bool shared;
+ bool rotational;
bool passthru_err_log_enabled;
struct nvme_effects_log *effects;
u64 nuse;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4b9fda0b1d9a..5f2e3ad2cc52 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -141,6 +141,7 @@ struct nvme_dev {
struct nvme_ctrl ctrl;
u32 last_ps;
bool hmb;
+ struct sg_table *hmb_sgt;
mempool_t *iod_mempool;
@@ -153,6 +154,7 @@ struct nvme_dev {
/* host memory buffer support: */
u64 host_mem_size;
u32 nr_host_mem_descs;
+ u32 host_mem_descs_size;
dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
@@ -902,11 +904,12 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
{
+ struct request *req;
+
spin_lock(&nvmeq->sq_lock);
- while (!rq_list_empty(*rqlist)) {
- struct request *req = rq_list_pop(rqlist);
+ while ((req = rq_list_pop(rqlist))) {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
nvme_sq_copy_cmd(nvmeq, &iod->cmd);
@@ -929,33 +932,26 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
}
-static void nvme_queue_rqs(struct request **rqlist)
+static void nvme_queue_rqs(struct rq_list *rqlist)
{
- struct request *req, *next, *prev = NULL;
- struct request *requeue_list = NULL;
-
- rq_list_for_each_safe(rqlist, req, next) {
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
-
- if (!nvme_prep_rq_batch(nvmeq, req)) {
- /* detach 'req' and add to remainder list */
- rq_list_move(rqlist, &requeue_list, req, prev);
+ struct rq_list submit_list = { };
+ struct rq_list requeue_list = { };
+ struct nvme_queue *nvmeq = NULL;
+ struct request *req;
- req = prev;
- if (!req)
- continue;
- }
+ while ((req = rq_list_pop(rqlist))) {
+ if (nvmeq && nvmeq != req->mq_hctx->driver_data)
+ nvme_submit_cmds(nvmeq, &submit_list);
+ nvmeq = req->mq_hctx->driver_data;
- if (!next || req->mq_hctx != next->mq_hctx) {
- /* detach rest of list, and submit */
- req->rq_next = NULL;
- nvme_submit_cmds(nvmeq, rqlist);
- *rqlist = next;
- prev = NULL;
- } else
- prev = req;
+ if (nvme_prep_rq_batch(nvmeq, req))
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
}
+ if (nvmeq)
+ nvme_submit_cmds(nvmeq, &submit_list);
*rqlist = requeue_list;
}
@@ -1083,7 +1079,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
DEFINE_IO_COMP_BATCH(iob);
if (nvme_poll_cq(nvmeq, &iob)) {
- if (!rq_list_empty(iob.req_list))
+ if (!rq_list_empty(&iob.req_list))
nvme_pci_complete_batch(&iob);
return IRQ_HANDLED;
}
@@ -1951,7 +1947,7 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
return ret;
}
-static void nvme_free_host_mem(struct nvme_dev *dev)
+static void nvme_free_host_mem_multi(struct nvme_dev *dev)
{
int i;
@@ -1966,18 +1962,54 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
kfree(dev->host_mem_desc_bufs);
dev->host_mem_desc_bufs = NULL;
- dma_free_coherent(dev->dev,
- dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+}
+
+static void nvme_free_host_mem(struct nvme_dev *dev)
+{
+ if (dev->hmb_sgt)
+ dma_free_noncontiguous(dev->dev, dev->host_mem_size,
+ dev->hmb_sgt, DMA_BIDIRECTIONAL);
+ else
+ nvme_free_host_mem_multi(dev);
+
+ dma_free_coherent(dev->dev, dev->host_mem_descs_size,
dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL;
+ dev->host_mem_descs_size = 0;
dev->nr_host_mem_descs = 0;
}
-static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size)
+{
+ dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size,
+ DMA_BIDIRECTIONAL, GFP_KERNEL, 0);
+ if (!dev->hmb_sgt)
+ return -ENOMEM;
+
+ dev->host_mem_descs = dma_alloc_coherent(dev->dev,
+ sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma,
+ GFP_KERNEL);
+ if (!dev->host_mem_descs) {
+ dma_free_noncontiguous(dev->dev, dev->host_mem_size,
+ dev->hmb_sgt, DMA_BIDIRECTIONAL);
+ dev->hmb_sgt = NULL;
+ return -ENOMEM;
+ }
+ dev->host_mem_size = size;
+ dev->host_mem_descs_size = sizeof(*dev->host_mem_descs);
+ dev->nr_host_mem_descs = 1;
+
+ dev->host_mem_descs[0].addr =
+ cpu_to_le64(dev->hmb_sgt->sgl->dma_address);
+ dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE);
+ return 0;
+}
+
+static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
u32 chunk_size)
{
struct nvme_host_mem_buf_desc *descs;
- u32 max_entries, len;
+ u32 max_entries, len, descs_size;
dma_addr_t descs_dma;
int i = 0;
void **bufs;
@@ -1990,8 +2022,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
max_entries = dev->ctrl.hmmaxd;
- descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
- &descs_dma, GFP_KERNEL);
+ descs_size = max_entries * sizeof(*descs);
+ descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma,
+ GFP_KERNEL);
if (!descs)
goto out;
@@ -2020,6 +2053,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
dev->host_mem_size = size;
dev->host_mem_descs = descs;
dev->host_mem_descs_dma = descs_dma;
+ dev->host_mem_descs_size = descs_size;
dev->host_mem_desc_bufs = bufs;
return 0;
@@ -2034,8 +2068,7 @@ out_free_bufs:
kfree(bufs);
out_free_descs:
- dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
- descs_dma);
+ dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
out:
dev->host_mem_descs = NULL;
return -ENOMEM;
@@ -2047,9 +2080,18 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
u64 chunk_size;
+ /*
+ * If there is an IOMMU that can merge pages, try a virtually
+ * non-contiguous allocation for a single segment first.
+ */
+ if (!(PAGE_SIZE & dma_get_merge_boundary(dev->dev))) {
+ if (!nvme_alloc_host_mem_single(dev, preferred))
+ return 0;
+ }
+
/* start big and work our way down */
for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
- if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
+ if (!nvme_alloc_host_mem_multi(dev, preferred, chunk_size)) {
if (!min || dev->host_mem_size >= min)
return 0;
nvme_free_host_mem(dev);
@@ -2097,8 +2139,10 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
}
dev_info(dev->ctrl.device,
- "allocated %lld MiB host memory buffer.\n",
- dev->host_mem_size >> ilog2(SZ_1M));
+ "allocated %lld MiB host memory buffer (%u segment%s).\n",
+ dev->host_mem_size >> ilog2(SZ_1M),
+ dev->nr_host_mem_descs,
+ str_plural(dev->nr_host_mem_descs));
}
ret = nvme_set_host_mem(dev, enable_bits);
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 87c437fc070d..ad25ad1e4041 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -228,27 +228,61 @@ static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const rrega_strs[] = {
+ [0x00] = "register",
+ [0x01] = "unregister",
+ [0x02] = "replace",
+ };
const char *ret = trace_seq_buffer_ptr(p);
u8 rrega = cdw10[0] & 0x7;
u8 iekey = (cdw10[0] >> 3) & 0x1;
u8 ptpl = (cdw10[3] >> 6) & 0x3;
+ const char *rrega_str;
+
+ if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega])
+ rrega_str = rrega_strs[rrega];
+ else
+ rrega_str = "reserved";
- trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u",
- rrega, iekey, ptpl);
+ trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u",
+ rrega, rrega_str, iekey, ptpl);
trace_seq_putc(p, 0);
return ret;
}
+static const char * const rtype_strs[] = {
+ [0x00] = "reserved",
+ [0x01] = "write exclusive",
+ [0x02] = "exclusive access",
+ [0x03] = "write exclusive registrants only",
+ [0x04] = "exclusive access registrants only",
+ [0x05] = "write exclusive all registrants",
+ [0x06] = "exclusive access all registrants",
+};
+
static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const racqa_strs[] = {
+ [0x00] = "acquire",
+ [0x01] = "preempt",
+ [0x02] = "preempt and abort",
+ };
const char *ret = trace_seq_buffer_ptr(p);
u8 racqa = cdw10[0] & 0x7;
u8 iekey = (cdw10[0] >> 3) & 0x1;
u8 rtype = cdw10[1];
+ const char *racqa_str = "reserved";
+ const char *rtype_str = "reserved";
- trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u",
- racqa, iekey, rtype);
+ if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa])
+ racqa_str = racqa_strs[racqa];
+
+ if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
+ rtype_str = rtype_strs[rtype];
+
+ trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s",
+ racqa, racqa_str, iekey, rtype, rtype_str);
trace_seq_putc(p, 0);
return ret;
@@ -256,13 +290,25 @@ static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const rrela_strs[] = {
+ [0x00] = "release",
+ [0x01] = "clear",
+ };
const char *ret = trace_seq_buffer_ptr(p);
u8 rrela = cdw10[0] & 0x7;
u8 iekey = (cdw10[0] >> 3) & 0x1;
u8 rtype = cdw10[1];
+ const char *rrela_str = "reserved";
+ const char *rtype_str = "reserved";
+
+ if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela])
+ rrela_str = rrela_strs[rrela];
+
+ if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
+ rtype_str = rtype_strs[rtype];
- trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u",
- rrela, iekey, rtype);
+ trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s",
+ rrela, rrela_str, iekey, rtype, rtype_str);
trace_seq_putc(p, 0);
return ret;
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 9a06f9d98cd6..382949e18c6a 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -111,7 +111,7 @@ void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
lim->features |= BLK_FEAT_ZONED;
lim->max_open_zones = zi->max_open_zones;
lim->max_active_zones = zi->max_active_zones;
- lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
+ lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append;
lim->chunk_sectors = ns->head->zsze =
nvme_lba_to_sect(ns->head, zi->zone_size);
}
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index c402c44350b2..f2b025bbe10c 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
- discovery.o io-cmd-file.o io-cmd-bdev.o
+ discovery.o io-cmd-file.o io-cmd-bdev.o pr.o
nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 081f0473cd9e..934b401fbc2f 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -71,6 +71,35 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
nvmet_req_complete(req, 0);
}
+static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
+{
+ struct nvme_supported_log *logs;
+ u16 status;
+
+ logs = kzalloc(sizeof(*logs), GFP_KERNEL);
+ if (!logs) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
+
+ status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
+ kfree(logs);
+out:
+ nvmet_req_complete(req, status);
+}
+
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
@@ -130,6 +159,45 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
return NVME_SC_SUCCESS;
}
+static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
+{
+ struct nvme_rotational_media_log *log;
+ struct gendisk *disk;
+ u16 status;
+
+ req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
+ req->cmd->get_log_page.lsi));
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ if (req->transfer_len != sizeof(*log)) {
+ status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto out;
+
+ log->endgid = req->cmd->get_log_page.lsi;
+ disk = req->ns->bdev->bd_disk;
+ if (disk && disk->ia_ranges)
+ log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
+ else
+ log->numa = cpu_to_le16(1);
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+ kfree(log);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
{
struct nvme_smart_log *log;
@@ -176,6 +244,10 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
log->iocs[nvme_cmd_read] =
log->iocs[nvme_cmd_flush] =
log->iocs[nvme_cmd_dsm] =
+ log->iocs[nvme_cmd_resv_acquire] =
+ log->iocs[nvme_cmd_resv_register] =
+ log->iocs[nvme_cmd_resv_release] =
+ log->iocs[nvme_cmd_resv_report] =
cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
log->iocs[nvme_cmd_write] =
log->iocs[nvme_cmd_write_zeroes] =
@@ -272,6 +344,49 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
return struct_size(desc, nsids, count);
}
+static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
+{
+ u64 host_reads, host_writes, data_units_read, data_units_written;
+ struct nvme_endurance_group_log *log;
+ u16 status;
+
+ /*
+ * The target driver emulates each endurance group as its own
+ * namespace, reusing the nsid as the endurance group identifier.
+ */
+ req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
+ req->cmd->get_log_page.lsi));
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ if (!req->ns->bdev)
+ goto copy;
+
+ host_reads = part_stat_read(req->ns->bdev, ios[READ]);
+ data_units_read =
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
+ data_units_written =
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
+
+ put_unaligned_le64(host_reads, &log->hrc[0]);
+ put_unaligned_le64(data_units_read, &log->dur[0]);
+ put_unaligned_le64(host_writes, &log->hwc[0]);
+ put_unaligned_le64(data_units_written, &log->duw[0]);
+copy:
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+ kfree(log);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
{
struct nvme_ana_rsp_hdr hdr = { 0, };
@@ -317,12 +432,44 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
+{
+ struct nvme_supported_features_log *features;
+ u16 status;
+
+ features = kzalloc(sizeof(*features), GFP_KERNEL);
+ if (!features) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ features->fis[NVME_FEAT_NUM_QUEUES] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
+ features->fis[NVME_FEAT_KATO] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
+ features->fis[NVME_FEAT_ASYNC_EVENT] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
+ features->fis[NVME_FEAT_HOST_ID] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
+ features->fis[NVME_FEAT_WRITE_PROTECT] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
+ features->fis[NVME_FEAT_RESV_MASK] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
+
+ status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
+ kfree(features);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_get_log_page(struct nvmet_req *req)
{
if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
return;
switch (req->cmd->get_log_page.lid) {
+ case NVME_LOG_SUPPORTED:
+ return nvmet_execute_get_supported_log_pages(req);
case NVME_LOG_ERROR:
return nvmet_execute_get_log_page_error(req);
case NVME_LOG_SMART:
@@ -338,8 +485,16 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
return nvmet_execute_get_log_changed_ns(req);
case NVME_LOG_CMD_EFFECTS:
return nvmet_execute_get_log_cmd_effects_ns(req);
+ case NVME_LOG_ENDURANCE_GROUP:
+ return nvmet_execute_get_log_page_endgrp(req);
case NVME_LOG_ANA:
return nvmet_execute_get_log_page_ana(req);
+ case NVME_LOG_FEATURES:
+ return nvmet_execute_get_log_page_features(req);
+ case NVME_LOG_RMI:
+ return nvmet_execute_get_log_page_rmi(req);
+ case NVME_LOG_RESERVATION:
+ return nvmet_execute_get_log_page_resv(req);
}
pr_debug("unhandled lid %d on qid %d\n",
req->cmd->get_log_page.lid, req->sq->qid);
@@ -433,7 +588,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
- NVME_CTRL_ONCS_WRITE_ZEROES);
+ NVME_CTRL_ONCS_WRITE_ZEROES |
+ NVME_CTRL_ONCS_RESERVATIONS);
/* XXX: don't report vwc if the underlying device is write through */
id->vwc = NVME_CTRL_VWC_PRESENT;
@@ -467,6 +623,13 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->msdbd = ctrl->ops->msdbd;
+ /*
+ * Endurance group identifier is 16 bits, so we can't let namespaces
+ * overflow that since we reuse the nsid
+ */
+ BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
+ id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
+
id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
id->anatt = 10; /* random value */
id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
@@ -551,6 +714,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->nmic = NVME_NS_NMIC_SHARED;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
+ if (req->ns->pr.enable)
+ id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
+ NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
+
+ /*
+ * Since we don't know any better, every namespace is its own endurance
+ * group.
+ */
+ id->endgid = cpu_to_le16(req->ns->nsid);
+
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
id->lbaf[0].ds = req->ns->blksize_shift;
@@ -576,7 +754,40 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_execute_identify_nslist(struct nvmet_req *req)
+static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
+{
+ u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
+ static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ unsigned long idx;
+ __le16 *list;
+ u16 status;
+ int i = 1;
+
+ list = kzalloc(buf_size, GFP_KERNEL);
+ if (!list) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ if (ns->nsid <= min_endgid)
+ continue;
+
+ list[i++] = cpu_to_le16(ns->nsid);
+ if (i == buf_size / sizeof(__le16))
+ break;
+ }
+
+ list[0] = cpu_to_le16(i - 1);
+ status = nvmet_copy_to_sgl(req, 0, list, buf_size);
+ kfree(list);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
{
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -606,6 +817,8 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
+ if (match_css && req->ns->csi != req->cmd->identify.csi)
+ continue;
list[i++] = cpu_to_le32(ns->nsid);
if (i == buf_size / sizeof(__le32))
break;
@@ -685,6 +898,56 @@ static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
}
+static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
+{
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ status = nvmet_copy_to_sgl(req, 0, ZERO_PAGE(0),
+ NVME_IDENTIFY_DATA_SIZE);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
+{
+ struct nvme_id_ns_cs_indep *id;
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ id->nstat = NVME_NSTAT_NRDY;
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
+ id->nmic = NVME_NS_NMIC_SHARED;
+ if (req->ns->readonly)
+ id->nsattr |= NVME_NS_ATTR_RO;
+ if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
+ id->nsfeat |= NVME_NS_ROTATIONAL;
+ /*
+ * We need flush command to flush the file's metadata,
+ * so report supporting vwc if backend is file, even
+ * though buffered_io is disable.
+ */
+ if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
+ id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_identify(struct nvmet_req *req)
{
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
@@ -698,7 +961,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
nvmet_execute_identify_ctrl(req);
return;
case NVME_ID_CNS_NS_ACTIVE_LIST:
- nvmet_execute_identify_nslist(req);
+ nvmet_execute_identify_nslist(req, false);
return;
case NVME_ID_CNS_NS_DESC_LIST:
nvmet_execute_identify_desclist(req);
@@ -706,8 +969,8 @@ static void nvmet_execute_identify(struct nvmet_req *req)
case NVME_ID_CNS_CS_NS:
switch (req->cmd->identify.csi) {
case NVME_CSI_NVM:
- /* Not supported */
- break;
+ nvme_execute_identify_ns_nvm(req);
+ return;
case NVME_CSI_ZNS:
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
nvmet_execute_identify_ns_zns(req);
@@ -729,6 +992,15 @@ static void nvmet_execute_identify(struct nvmet_req *req)
break;
}
break;
+ case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
+ nvmet_execute_identify_nslist(req, true);
+ return;
+ case NVME_ID_CNS_NS_CS_INDEP:
+ nvmet_execute_id_cs_indep(req);
+ return;
+ case NVME_ID_CNS_ENDGRP_LIST:
+ nvmet_execute_identify_endgrp_list(req);
+ return;
}
pr_debug("unhandled identify cns %d on qid %d\n",
@@ -861,6 +1133,9 @@ void nvmet_execute_set_features(struct nvmet_req *req)
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req);
break;
+ case NVME_FEAT_RESV_MASK:
+ status = nvmet_set_feat_resv_notif_mask(req, cdw11);
+ break;
default:
req->error_loc = offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
@@ -959,6 +1234,9 @@ void nvmet_execute_get_features(struct nvmet_req *req)
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_get_feat_write_protect(req);
break;
+ case NVME_FEAT_RESV_MASK:
+ status = nvmet_get_feat_resv_notif_mask(req);
+ break;
default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 685e89b35d33..eeee9e9b854c 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -769,6 +769,32 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
+static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable);
+}
+
+static ssize_t nvmet_ns_resv_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ pr_err("the ns:%d is already enabled.\n", ns->nsid);
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+ ns->pr.enable = val;
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+CONFIGFS_ATTR(nvmet_ns_, resv_enable);
+
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
@@ -777,6 +803,7 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
&nvmet_ns_attr_revalidate_size,
+ &nvmet_ns_attr_resv_enable,
#ifdef CONFIG_PCI_P2PDMA
&nvmet_ns_attr_p2pmem,
#endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ed2424f8a396..1f4e9989663b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -611,6 +611,12 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ret)
goto out_restore_subsys_maxnsid;
+ if (ns->pr.enable) {
+ ret = nvmet_pr_init_ns(ns);
+ if (ret)
+ goto out_remove_from_subsys;
+ }
+
subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
@@ -620,6 +626,8 @@ out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+out_remove_from_subsys:
+ xa_erase(&subsys->namespaces, ns->nsid);
out_restore_subsys_maxnsid:
subsys->max_nsid = nvmet_max_nsid(subsys);
percpu_ref_exit(&ns->ref);
@@ -663,6 +671,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
wait_for_completion(&ns->disable_done);
percpu_ref_exit(&ns->ref);
+ if (ns->pr.enable)
+ nvmet_pr_exit_ns(ns);
+
mutex_lock(&subsys->lock);
subsys->nr_namespaces--;
@@ -754,6 +765,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
struct nvmet_ns *ns = req->ns;
+ struct nvmet_pr_per_ctrl_ref *pc_ref = req->pc_ref;
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
@@ -766,6 +778,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
trace_nvmet_req_complete(req);
req->ops->queue_response(req);
+
+ if (pc_ref)
+ nvmet_pr_put_ns_pc_ref(pc_ref);
if (ns)
nvmet_put_namespace(ns);
}
@@ -929,18 +944,39 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return ret;
}
+ if (req->ns->pr.enable) {
+ ret = nvmet_parse_pr_cmd(req);
+ if (!ret)
+ return ret;
+ }
+
switch (req->ns->csi) {
case NVME_CSI_NVM:
if (req->ns->file)
- return nvmet_file_parse_io_cmd(req);
- return nvmet_bdev_parse_io_cmd(req);
+ ret = nvmet_file_parse_io_cmd(req);
+ else
+ ret = nvmet_bdev_parse_io_cmd(req);
+ break;
case NVME_CSI_ZNS:
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
- return nvmet_bdev_zns_parse_io_cmd(req);
- return NVME_SC_INVALID_IO_CMD_SET;
+ ret = nvmet_bdev_zns_parse_io_cmd(req);
+ else
+ ret = NVME_SC_INVALID_IO_CMD_SET;
+ break;
default:
- return NVME_SC_INVALID_IO_CMD_SET;
+ ret = NVME_SC_INVALID_IO_CMD_SET;
}
+ if (ret)
+ return ret;
+
+ if (req->ns->pr.enable) {
+ ret = nvmet_pr_check_cmd_access(req);
+ if (ret)
+ return ret;
+
+ ret = nvmet_pr_get_ns_pc_ref(req);
+ }
+ return ret;
}
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
@@ -964,6 +1000,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->ns = NULL;
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
+ req->pc_ref = NULL;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
@@ -1015,6 +1052,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req)
{
percpu_ref_put(&req->sq->ref);
+ if (req->pc_ref)
+ nvmet_pr_put_ns_pc_ref(req->pc_ref);
if (req->ns)
nvmet_put_namespace(req->ns);
}
@@ -1383,7 +1422,8 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
}
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
+ uuid_t *hostid)
{
struct nvmet_subsys *subsys;
struct nvmet_ctrl *ctrl;
@@ -1462,6 +1502,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
}
ctrl->cntlid = ret;
+ uuid_copy(&ctrl->hostid, hostid);
+
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
@@ -1478,6 +1520,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
nvmet_start_keep_alive_timer(ctrl);
mutex_lock(&subsys->lock);
+ ret = nvmet_ctrl_init_pr(ctrl);
+ if (ret)
+ goto init_pr_fail;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
nvmet_setup_p2p_ns_map(ctrl, req);
nvmet_debugfs_ctrl_setup(ctrl);
@@ -1486,6 +1531,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl;
return 0;
+init_pr_fail:
+ mutex_unlock(&subsys->lock);
+ nvmet_stop_keep_alive_timer(ctrl);
+ ida_free(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
out_free_changed_ns_list:
@@ -1504,6 +1553,7 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_subsys *subsys = ctrl->subsys;
mutex_lock(&subsys->lock);
+ nvmet_ctrl_destroy_pr(ctrl);
nvmet_release_p2p_ns_map(ctrl);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
@@ -1717,7 +1767,7 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue;
nvmet_wq = alloc_workqueue("nvmet-wq",
- WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0);
if (!nvmet_wq)
goto out_free_buffered_work_queue;
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index c4b2eddd5666..c49904ebb6c2 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -64,6 +64,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
case NVME_REG_CSTS:
val = ctrl->csts;
break;
+ case NVME_REG_CRTO:
+ val = NVME_CAP_TIMEOUT(ctrl->csts);
+ break;
default:
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
@@ -245,12 +248,10 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
- le32_to_cpu(c->kato), &ctrl);
+ le32_to_cpu(c->kato), &ctrl, &d->hostid);
if (status)
goto out;
- uuid_copy(&ctrl->hostid, &d->hostid);
-
dhchap_status = nvmet_setup_auth(ctrl);
if (dhchap_status) {
pr_err("Failed to setup authentication, dhchap status %u\n",
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 190f55e6d753..58328b35dc96 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -20,8 +20,9 @@
#include <linux/blkdev.h>
#include <linux/radix-tree.h>
#include <linux/t10-pi.h>
+#include <linux/kfifo.h>
-#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
+#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
@@ -30,6 +31,7 @@
#define NVMET_MN_MAX_SIZE 40
#define NVMET_SN_MAX_SIZE 20
#define NVMET_FR_MAX_SIZE 8
+#define NVMET_PR_LOG_QUEUE_SIZE 64
/*
* Supported optional AENs:
@@ -56,6 +58,38 @@
#define IPO_IATTR_CONNECT_SQE(x) \
(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
+struct nvmet_pr_registrant {
+ u64 rkey;
+ uuid_t hostid;
+ enum nvme_pr_type rtype;
+ struct list_head entry;
+ struct rcu_head rcu;
+};
+
+struct nvmet_pr {
+ bool enable;
+ unsigned long notify_mask;
+ atomic_t generation;
+ struct nvmet_pr_registrant __rcu *holder;
+ /*
+ * During the execution of the reservation command, mutual
+ * exclusion is required throughout the process. However,
+ * while waiting asynchronously for the 'per controller
+ * percpu_ref' to complete before the 'preempt and abort'
+ * command finishes, a semaphore is needed to ensure mutual
+ * exclusion instead of a mutex.
+ */
+ struct semaphore pr_sem;
+ struct list_head registrant_list;
+};
+
+struct nvmet_pr_per_ctrl_ref {
+ struct percpu_ref ref;
+ struct completion free_done;
+ struct completion confirm_done;
+ uuid_t hostid;
+};
+
struct nvmet_ns {
struct percpu_ref ref;
struct file *bdev_file;
@@ -85,6 +119,8 @@ struct nvmet_ns {
int pi_type;
int metadata_size;
u8 csi;
+ struct nvmet_pr pr;
+ struct xarray pr_per_ctrl_refs;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -191,6 +227,13 @@ static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
}
+struct nvmet_pr_log_mgr {
+ struct mutex lock;
+ u64 lost_count;
+ u64 counter;
+ DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE);
+};
+
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
@@ -246,6 +289,7 @@ struct nvmet_ctrl {
u8 *dh_key;
size_t dh_keysize;
#endif
+ struct nvmet_pr_log_mgr pr_log_mgr;
};
struct nvmet_subsys {
@@ -396,6 +440,9 @@ struct nvmet_req {
struct work_struct zmgmt_work;
} z;
#endif /* CONFIG_BLK_DEV_ZONED */
+ struct {
+ struct work_struct abort_work;
+ } r;
};
int sg_cnt;
int metadata_sg_cnt;
@@ -412,6 +459,7 @@ struct nvmet_req {
struct device *p2p_client;
u16 error_loc;
u64 error_slba;
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
};
#define NVMET_MAX_MPOOL_BVEC 16
@@ -498,7 +546,8 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
+ uuid_t *hostid);
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
const char *hostnqn, u16 cntlid,
struct nvmet_req *req);
@@ -761,4 +810,18 @@ static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
#endif
+int nvmet_pr_init_ns(struct nvmet_ns *ns);
+u16 nvmet_parse_pr_cmd(struct nvmet_req *req);
+u16 nvmet_pr_check_cmd_access(struct nvmet_req *req);
+int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl);
+void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl);
+void nvmet_pr_exit_ns(struct nvmet_ns *ns);
+void nvmet_execute_get_log_page_resv(struct nvmet_req *req);
+u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask);
+u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req);
+u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req);
+static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
+{
+ percpu_ref_put(&pc_ref->ref);
+}
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/pr.c b/drivers/nvme/target/pr.c
new file mode 100644
index 000000000000..25a02b50d9f3
--- /dev/null
+++ b/drivers/nvme/target/pr.c
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics Persist Reservation.
+ * Copyright (c) 2024 Guixin Liu, Alibaba Group.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/unaligned.h>
+#include "nvmet.h"
+
+#define NVMET_PR_NOTIFI_MASK_ALL \
+ (1 << NVME_PR_NOTIFY_BIT_REG_PREEMPTED | \
+ 1 << NVME_PR_NOTIFY_BIT_RESV_RELEASED | \
+ 1 << NVME_PR_NOTIFY_BIT_RESV_PREEMPTED)
+
+static inline bool nvmet_pr_parse_ignore_key(u32 cdw10)
+{
+ /* Ignore existing key, bit 03. */
+ return (cdw10 >> 3) & 1;
+}
+
+static inline struct nvmet_ns *nvmet_pr_to_ns(struct nvmet_pr *pr)
+{
+ return container_of(pr, struct nvmet_ns, pr);
+}
+
+static struct nvmet_pr_registrant *
+nvmet_pr_find_registrant(struct nvmet_pr *pr, uuid_t *hostid)
+{
+ struct nvmet_pr_registrant *reg;
+
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, hostid))
+ return reg;
+ }
+ return NULL;
+}
+
+u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask)
+{
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ unsigned long idx;
+ u16 status;
+
+ if (mask & ~(NVMET_PR_NOTIFI_MASK_ALL)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ if (nsid != U32_MAX) {
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
+ if (!req->ns->pr.enable)
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+
+ WRITE_ONCE(req->ns->pr.notify_mask, mask);
+ goto success;
+ }
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ if (ns->pr.enable)
+ WRITE_ONCE(ns->pr.notify_mask, mask);
+ }
+
+success:
+ nvmet_set_result(req, mask);
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req)
+{
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
+
+ if (!req->ns->pr.enable)
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+
+ nvmet_set_result(req, READ_ONCE(req->ns->pr.notify_mask));
+ return status;
+}
+
+void nvmet_execute_get_log_page_resv(struct nvmet_req *req)
+{
+ struct nvmet_pr_log_mgr *log_mgr = &req->sq->ctrl->pr_log_mgr;
+ struct nvme_pr_log next_log = {0};
+ struct nvme_pr_log log = {0};
+ u16 status = NVME_SC_SUCCESS;
+ u64 lost_count;
+ u64 cur_count;
+ u64 next_count;
+
+ mutex_lock(&log_mgr->lock);
+ if (!kfifo_get(&log_mgr->log_queue, &log))
+ goto out;
+
+ /*
+ * We can't get the last in kfifo.
+ * Utilize the current count and the count from the next log to
+ * calculate the number of lost logs, while also addressing cases
+ * of overflow. If there is no subsequent log, the number of lost
+ * logs is equal to the lost_count within the nvmet_pr_log_mgr.
+ */
+ cur_count = le64_to_cpu(log.count);
+ if (kfifo_peek(&log_mgr->log_queue, &next_log)) {
+ next_count = le64_to_cpu(next_log.count);
+ if (next_count > cur_count)
+ lost_count = next_count - cur_count - 1;
+ else
+ lost_count = U64_MAX - cur_count + next_count - 1;
+ } else {
+ lost_count = log_mgr->lost_count;
+ }
+
+ log.count = cpu_to_le64((cur_count + lost_count) == 0 ?
+ 1 : (cur_count + lost_count));
+ log_mgr->lost_count -= lost_count;
+
+ log.nr_pages = kfifo_len(&log_mgr->log_queue);
+
+out:
+ status = nvmet_copy_to_sgl(req, 0, &log, sizeof(log));
+ mutex_unlock(&log_mgr->lock);
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_pr_add_resv_log(struct nvmet_ctrl *ctrl, u8 log_type,
+ u32 nsid)
+{
+ struct nvmet_pr_log_mgr *log_mgr = &ctrl->pr_log_mgr;
+ struct nvme_pr_log log = {0};
+
+ mutex_lock(&log_mgr->lock);
+ log_mgr->counter++;
+ if (log_mgr->counter == 0)
+ log_mgr->counter = 1;
+
+ log.count = cpu_to_le64(log_mgr->counter);
+ log.type = log_type;
+ log.nsid = cpu_to_le32(nsid);
+
+ if (!kfifo_put(&log_mgr->log_queue, log)) {
+ pr_info("a reservation log lost, cntlid:%d, log_type:%d, nsid:%d\n",
+ ctrl->cntlid, log_type, nsid);
+ log_mgr->lost_count++;
+ }
+
+ mutex_unlock(&log_mgr->lock);
+}
+
+static void nvmet_pr_resv_released(struct nvmet_pr *pr, uuid_t *hostid)
+{
+ struct nvmet_ns *ns = nvmet_pr_to_ns(pr);
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+
+ if (test_bit(NVME_PR_NOTIFY_BIT_RESV_RELEASED, &pr->notify_mask))
+ return;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (!uuid_equal(&ctrl->hostid, hostid) &&
+ nvmet_pr_find_registrant(pr, &ctrl->hostid)) {
+ nvmet_pr_add_resv_log(ctrl,
+ NVME_PR_LOG_RESERVATION_RELEASED, ns->nsid);
+ nvmet_add_async_event(ctrl, NVME_AER_CSS,
+ NVME_AEN_RESV_LOG_PAGE_AVALIABLE,
+ NVME_LOG_RESERVATION);
+ }
+ }
+ mutex_unlock(&subsys->lock);
+}
+
+static void nvmet_pr_send_event_to_host(struct nvmet_pr *pr, uuid_t *hostid,
+ u8 log_type)
+{
+ struct nvmet_ns *ns = nvmet_pr_to_ns(pr);
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (uuid_equal(hostid, &ctrl->hostid)) {
+ nvmet_pr_add_resv_log(ctrl, log_type, ns->nsid);
+ nvmet_add_async_event(ctrl, NVME_AER_CSS,
+ NVME_AEN_RESV_LOG_PAGE_AVALIABLE,
+ NVME_LOG_RESERVATION);
+ }
+ }
+ mutex_unlock(&subsys->lock);
+}
+
+static void nvmet_pr_resv_preempted(struct nvmet_pr *pr, uuid_t *hostid)
+{
+ if (test_bit(NVME_PR_NOTIFY_BIT_RESV_PREEMPTED, &pr->notify_mask))
+ return;
+
+ nvmet_pr_send_event_to_host(pr, hostid,
+ NVME_PR_LOG_RESERVATOIN_PREEMPTED);
+}
+
+static void nvmet_pr_registration_preempted(struct nvmet_pr *pr,
+ uuid_t *hostid)
+{
+ if (test_bit(NVME_PR_NOTIFY_BIT_REG_PREEMPTED, &pr->notify_mask))
+ return;
+
+ nvmet_pr_send_event_to_host(pr, hostid,
+ NVME_PR_LOG_REGISTRATION_PREEMPTED);
+}
+
+static inline void nvmet_pr_set_new_holder(struct nvmet_pr *pr, u8 new_rtype,
+ struct nvmet_pr_registrant *reg)
+{
+ reg->rtype = new_rtype;
+ rcu_assign_pointer(pr->holder, reg);
+}
+
+static u16 nvmet_pr_register(struct nvmet_req *req,
+ struct nvmet_pr_register_data *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr_registrant *new, *reg;
+ struct nvmet_pr *pr = &req->ns->pr;
+ u16 status = NVME_SC_SUCCESS;
+ u64 nrkey = le64_to_cpu(d->nrkey);
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NVME_SC_INTERNAL;
+
+ down(&pr->pr_sem);
+ reg = nvmet_pr_find_registrant(pr, &ctrl->hostid);
+ if (reg) {
+ if (reg->rkey != nrkey)
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ kfree(new);
+ goto out;
+ }
+
+ memset(new, 0, sizeof(*new));
+ INIT_LIST_HEAD(&new->entry);
+ new->rkey = nrkey;
+ uuid_copy(&new->hostid, &ctrl->hostid);
+ list_add_tail_rcu(&new->entry, &pr->registrant_list);
+
+out:
+ up(&pr->pr_sem);
+ return status;
+}
+
+static void nvmet_pr_unregister_one(struct nvmet_pr *pr,
+ struct nvmet_pr_registrant *reg)
+{
+ struct nvmet_pr_registrant *first_reg;
+ struct nvmet_pr_registrant *holder;
+ u8 original_rtype;
+
+ list_del_rcu(&reg->entry);
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (reg != holder)
+ goto out;
+
+ original_rtype = holder->rtype;
+ if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
+ original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
+ first_reg = list_first_or_null_rcu(&pr->registrant_list,
+ struct nvmet_pr_registrant, entry);
+ if (first_reg)
+ first_reg->rtype = original_rtype;
+ rcu_assign_pointer(pr->holder, first_reg);
+ } else {
+ rcu_assign_pointer(pr->holder, NULL);
+
+ if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_REG_ONLY ||
+ original_rtype == NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY)
+ nvmet_pr_resv_released(pr, &reg->hostid);
+ }
+out:
+ kfree_rcu(reg, rcu);
+}
+
+static u16 nvmet_pr_unregister(struct nvmet_req *req,
+ struct nvmet_pr_register_data *d,
+ bool ignore_key)
+{
+ u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *reg;
+
+ down(&pr->pr_sem);
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, &ctrl->hostid)) {
+ if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) {
+ status = NVME_SC_SUCCESS;
+ nvmet_pr_unregister_one(pr, reg);
+ }
+ break;
+ }
+ }
+ up(&pr->pr_sem);
+
+ return status;
+}
+
+static void nvmet_pr_update_reg_rkey(struct nvmet_pr_registrant *reg,
+ void *attr)
+{
+ reg->rkey = *(u64 *)attr;
+}
+
+static u16 nvmet_pr_update_reg_attr(struct nvmet_pr *pr,
+ struct nvmet_pr_registrant *reg,
+ void (*change_attr)(struct nvmet_pr_registrant *reg,
+ void *attr),
+ void *attr)
+{
+ struct nvmet_pr_registrant *holder;
+ struct nvmet_pr_registrant *new;
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (reg != holder) {
+ change_attr(reg, attr);
+ return NVME_SC_SUCCESS;
+ }
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ return NVME_SC_INTERNAL;
+
+ new->rkey = holder->rkey;
+ new->rtype = holder->rtype;
+ uuid_copy(&new->hostid, &holder->hostid);
+ INIT_LIST_HEAD(&new->entry);
+
+ change_attr(new, attr);
+ list_replace_rcu(&holder->entry, &new->entry);
+ rcu_assign_pointer(pr->holder, new);
+ kfree_rcu(holder, rcu);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_pr_replace(struct nvmet_req *req,
+ struct nvmet_pr_register_data *d,
+ bool ignore_key)
+{
+ u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *reg;
+ u64 nrkey = le64_to_cpu(d->nrkey);
+
+ down(&pr->pr_sem);
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, &ctrl->hostid)) {
+ if (ignore_key || reg->rkey == le64_to_cpu(d->crkey))
+ status = nvmet_pr_update_reg_attr(pr, reg,
+ nvmet_pr_update_reg_rkey,
+ &nrkey);
+ break;
+ }
+ }
+ up(&pr->pr_sem);
+ return status;
+}
+
+static void nvmet_execute_pr_register(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
+ struct nvmet_pr_register_data *d;
+ u8 reg_act = cdw10 & 0x07; /* Reservation Register Action, bit 02:00 */
+ u16 status;
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (status)
+ goto free_data;
+
+ switch (reg_act) {
+ case NVME_PR_REGISTER_ACT_REG:
+ status = nvmet_pr_register(req, d);
+ break;
+ case NVME_PR_REGISTER_ACT_UNREG:
+ status = nvmet_pr_unregister(req, d, ignore_key);
+ break;
+ case NVME_PR_REGISTER_ACT_REPLACE:
+ status = nvmet_pr_replace(req, d, ignore_key);
+ break;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ break;
+ }
+free_data:
+ kfree(d);
+out:
+ if (!status)
+ atomic_inc(&req->ns->pr.generation);
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_pr_acquire(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 rtype)
+{
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *holder;
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (holder && reg != holder)
+ return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ if (holder && reg == holder) {
+ if (holder->rtype == rtype)
+ return NVME_SC_SUCCESS;
+ return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ }
+
+ nvmet_pr_set_new_holder(pr, rtype, reg);
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_pr_confirm_ns_pc_ref(struct percpu_ref *ref)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref =
+ container_of(ref, struct nvmet_pr_per_ctrl_ref, ref);
+
+ complete(&pc_ref->confirm_done);
+}
+
+static void nvmet_pr_set_ctrl_to_abort(struct nvmet_req *req, uuid_t *hostid)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ns *ns = req->ns;
+ unsigned long idx;
+
+ xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
+ if (uuid_equal(&pc_ref->hostid, hostid)) {
+ percpu_ref_kill_and_confirm(&pc_ref->ref,
+ nvmet_pr_confirm_ns_pc_ref);
+ wait_for_completion(&pc_ref->confirm_done);
+ }
+ }
+}
+
+static u16 nvmet_pr_unreg_all_host_by_prkey(struct nvmet_req *req, u64 prkey,
+ uuid_t *send_hostid,
+ bool abort)
+{
+ u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr *pr = &req->ns->pr;
+ uuid_t hostid;
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ if (reg->rkey == prkey) {
+ status = NVME_SC_SUCCESS;
+ uuid_copy(&hostid, &reg->hostid);
+ if (abort)
+ nvmet_pr_set_ctrl_to_abort(req, &hostid);
+ nvmet_pr_unregister_one(pr, reg);
+ if (!uuid_equal(&hostid, send_hostid))
+ nvmet_pr_registration_preempted(pr, &hostid);
+ }
+ }
+ return status;
+}
+
+static void nvmet_pr_unreg_all_others_by_prkey(struct nvmet_req *req,
+ u64 prkey,
+ uuid_t *send_hostid,
+ bool abort)
+{
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr *pr = &req->ns->pr;
+ uuid_t hostid;
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ if (reg->rkey == prkey &&
+ !uuid_equal(&reg->hostid, send_hostid)) {
+ uuid_copy(&hostid, &reg->hostid);
+ if (abort)
+ nvmet_pr_set_ctrl_to_abort(req, &hostid);
+ nvmet_pr_unregister_one(pr, reg);
+ nvmet_pr_registration_preempted(pr, &hostid);
+ }
+ }
+}
+
+static void nvmet_pr_unreg_all_others(struct nvmet_req *req,
+ uuid_t *send_hostid,
+ bool abort)
+{
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr *pr = &req->ns->pr;
+ uuid_t hostid;
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ if (!uuid_equal(&reg->hostid, send_hostid)) {
+ uuid_copy(&hostid, &reg->hostid);
+ if (abort)
+ nvmet_pr_set_ctrl_to_abort(req, &hostid);
+ nvmet_pr_unregister_one(pr, reg);
+ nvmet_pr_registration_preempted(pr, &hostid);
+ }
+ }
+}
+
+static void nvmet_pr_update_holder_rtype(struct nvmet_pr_registrant *reg,
+ void *attr)
+{
+ u8 new_rtype = *(u8 *)attr;
+
+ reg->rtype = new_rtype;
+}
+
+static u16 nvmet_pr_preempt(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 rtype,
+ struct nvmet_pr_acquire_data *d,
+ bool abort)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *holder;
+ enum nvme_pr_type original_rtype;
+ u64 prkey = le64_to_cpu(d->prkey);
+ u16 status;
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (!holder)
+ return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
+ &ctrl->hostid, abort);
+
+ original_rtype = holder->rtype;
+ if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
+ original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
+ if (!prkey) {
+ /*
+ * To prevent possible access from other hosts, and
+ * avoid terminate the holder, set the new holder
+ * first before unregistering.
+ */
+ nvmet_pr_set_new_holder(pr, rtype, reg);
+ nvmet_pr_unreg_all_others(req, &ctrl->hostid, abort);
+ return NVME_SC_SUCCESS;
+ }
+ return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
+ &ctrl->hostid, abort);
+ }
+
+ if (holder == reg) {
+ status = nvmet_pr_update_reg_attr(pr, holder,
+ nvmet_pr_update_holder_rtype, &rtype);
+ if (!status && original_rtype != rtype)
+ nvmet_pr_resv_released(pr, &reg->hostid);
+ return status;
+ }
+
+ if (prkey == holder->rkey) {
+ /*
+ * Same as before, set the new holder first.
+ */
+ nvmet_pr_set_new_holder(pr, rtype, reg);
+ nvmet_pr_unreg_all_others_by_prkey(req, prkey, &ctrl->hostid,
+ abort);
+ if (original_rtype != rtype)
+ nvmet_pr_resv_released(pr, &reg->hostid);
+ return NVME_SC_SUCCESS;
+ }
+
+ if (prkey)
+ return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
+ &ctrl->hostid, abort);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+}
+
+static void nvmet_pr_do_abort(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, r.abort_work);
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ns *ns = req->ns;
+ unsigned long idx;
+
+ /*
+ * The target does not support abort, just wait per-controller ref to 0.
+ */
+ xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
+ if (percpu_ref_is_dying(&pc_ref->ref)) {
+ wait_for_completion(&pc_ref->free_done);
+ reinit_completion(&pc_ref->confirm_done);
+ reinit_completion(&pc_ref->free_done);
+ percpu_ref_resurrect(&pc_ref->ref);
+ }
+ }
+
+ up(&ns->pr.pr_sem);
+ nvmet_req_complete(req, NVME_SC_SUCCESS);
+}
+
+static u16 __nvmet_execute_pr_acquire(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 acquire_act,
+ u8 rtype,
+ struct nvmet_pr_acquire_data *d)
+{
+ u16 status;
+
+ switch (acquire_act) {
+ case NVME_PR_ACQUIRE_ACT_ACQUIRE:
+ status = nvmet_pr_acquire(req, reg, rtype);
+ goto out;
+ case NVME_PR_ACQUIRE_ACT_PREEMPT:
+ status = nvmet_pr_preempt(req, reg, rtype, d, false);
+ goto inc_gen;
+ case NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT:
+ status = nvmet_pr_preempt(req, reg, rtype, d, true);
+ goto inc_gen;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ goto out;
+ }
+inc_gen:
+ if (!status)
+ atomic_inc(&req->ns->pr.generation);
+out:
+ return status;
+}
+
+static void nvmet_execute_pr_acquire(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
+ /* Reservation type, bit 15:08 */
+ u8 rtype = (u8)((cdw10 >> 8) & 0xff);
+ /* Reservation acquire action, bit 02:00 */
+ u8 acquire_act = cdw10 & 0x07;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr_acquire_data *d = NULL;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *reg;
+ u16 status = NVME_SC_SUCCESS;
+
+ if (ignore_key ||
+ rtype < NVME_PR_WRITE_EXCLUSIVE ||
+ rtype > NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (status)
+ goto free_data;
+
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ down(&pr->pr_sem);
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, &ctrl->hostid) &&
+ reg->rkey == le64_to_cpu(d->crkey)) {
+ status = __nvmet_execute_pr_acquire(req, reg,
+ acquire_act, rtype, d);
+ break;
+ }
+ }
+
+ if (!status && acquire_act == NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT) {
+ kfree(d);
+ INIT_WORK(&req->r.abort_work, nvmet_pr_do_abort);
+ queue_work(nvmet_wq, &req->r.abort_work);
+ return;
+ }
+
+ up(&pr->pr_sem);
+
+free_data:
+ kfree(d);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_pr_release(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 rtype)
+{
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *holder;
+ u8 original_rtype;
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (!holder || reg != holder)
+ return NVME_SC_SUCCESS;
+
+ original_rtype = holder->rtype;
+ if (original_rtype != rtype)
+ return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+
+ rcu_assign_pointer(pr->holder, NULL);
+
+ if (original_rtype != NVME_PR_WRITE_EXCLUSIVE &&
+ original_rtype != NVME_PR_EXCLUSIVE_ACCESS)
+ nvmet_pr_resv_released(pr, &reg->hostid);
+
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_pr_clear(struct nvmet_req *req)
+{
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr *pr = &req->ns->pr;
+
+ rcu_assign_pointer(pr->holder, NULL);
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ list_del_rcu(&reg->entry);
+ if (!uuid_equal(&req->sq->ctrl->hostid, &reg->hostid))
+ nvmet_pr_resv_preempted(pr, &reg->hostid);
+ kfree_rcu(reg, rcu);
+ }
+
+ atomic_inc(&pr->generation);
+}
+
+static u16 __nvmet_execute_pr_release(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 release_act, u8 rtype)
+{
+ switch (release_act) {
+ case NVME_PR_RELEASE_ACT_RELEASE:
+ return nvmet_pr_release(req, reg, rtype);
+ case NVME_PR_RELEASE_ACT_CLEAR:
+ nvmet_pr_clear(req);
+ return NVME_SC_SUCCESS;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ }
+}
+
+static void nvmet_execute_pr_release(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
+ u8 rtype = (u8)((cdw10 >> 8) & 0xff); /* Reservation type, bit 15:08 */
+ u8 release_act = cdw10 & 0x07; /* Reservation release action, bit 02:00 */
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_release_data *d;
+ struct nvmet_pr_registrant *reg;
+ u16 status;
+
+ if (ignore_key) {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (status)
+ goto free_data;
+
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ down(&pr->pr_sem);
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, &ctrl->hostid) &&
+ reg->rkey == le64_to_cpu(d->crkey)) {
+ status = __nvmet_execute_pr_release(req, reg,
+ release_act, rtype);
+ break;
+ }
+ }
+ up(&pr->pr_sem);
+free_data:
+ kfree(d);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_pr_report(struct nvmet_req *req)
+{
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u32 num_bytes = 4 * (cdw10 + 1); /* cdw10 is number of dwords */
+ u8 eds = cdw11 & 1; /* Extended data structure, bit 00 */
+ struct nvme_registered_ctrl_ext *ctrl_eds;
+ struct nvme_reservation_status_ext *data;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *holder;
+ struct nvmet_pr_registrant *reg;
+ u16 num_ctrls = 0;
+ u16 status;
+ u8 rtype;
+
+ /* nvmet hostid(uuid_t) is 128 bit. */
+ if (!eds) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ status = NVME_SC_HOST_ID_INCONSIST | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ if (num_bytes < sizeof(struct nvme_reservation_status_ext)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ data = kmalloc(num_bytes, GFP_KERNEL);
+ if (!data) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+ memset(data, 0, num_bytes);
+ data->gen = cpu_to_le32(atomic_read(&pr->generation));
+ data->ptpls = 0;
+ ctrl_eds = data->regctl_eds;
+
+ rcu_read_lock();
+ holder = rcu_dereference(pr->holder);
+ rtype = holder ? holder->rtype : 0;
+ data->rtype = rtype;
+
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ num_ctrls++;
+ /*
+ * continue to get the number of all registrans.
+ */
+ if (((void *)ctrl_eds + sizeof(*ctrl_eds)) >
+ ((void *)data + num_bytes))
+ continue;
+ /*
+ * Dynamic controller, set cntlid to 0xffff.
+ */
+ ctrl_eds->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
+ if (rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
+ rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS)
+ ctrl_eds->rcsts = 1;
+ if (reg == holder)
+ ctrl_eds->rcsts = 1;
+ uuid_copy((uuid_t *)&ctrl_eds->hostid, &reg->hostid);
+ ctrl_eds->rkey = cpu_to_le64(reg->rkey);
+ ctrl_eds++;
+ }
+ rcu_read_unlock();
+
+ put_unaligned_le16(num_ctrls, data->regctl);
+ status = nvmet_copy_to_sgl(req, 0, data, num_bytes);
+ kfree(data);
+out:
+ nvmet_req_complete(req, status);
+}
+
+u16 nvmet_parse_pr_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_resv_register:
+ req->execute = nvmet_execute_pr_register;
+ break;
+ case nvme_cmd_resv_acquire:
+ req->execute = nvmet_execute_pr_acquire;
+ break;
+ case nvme_cmd_resv_release:
+ req->execute = nvmet_execute_pr_release;
+ break;
+ case nvme_cmd_resv_report:
+ req->execute = nvmet_execute_pr_report;
+ break;
+ default:
+ return 1;
+ }
+ return NVME_SC_SUCCESS;
+}
+
+static bool nvmet_is_req_write_cmd_group(struct nvmet_req *req)
+{
+ u8 opcode = req->cmd->common.opcode;
+
+ if (req->sq->qid) {
+ switch (opcode) {
+ case nvme_cmd_flush:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ case nvme_cmd_dsm:
+ case nvme_cmd_zone_append:
+ case nvme_cmd_zone_mgmt_send:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+static bool nvmet_is_req_read_cmd_group(struct nvmet_req *req)
+{
+ u8 opcode = req->cmd->common.opcode;
+
+ if (req->sq->qid) {
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_zone_mgmt_recv:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+u16 nvmet_pr_check_cmd_access(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr_registrant *holder;
+ struct nvmet_ns *ns = req->ns;
+ struct nvmet_pr *pr = &ns->pr;
+ u16 status = NVME_SC_SUCCESS;
+
+ rcu_read_lock();
+ holder = rcu_dereference(pr->holder);
+ if (!holder)
+ goto unlock;
+ if (uuid_equal(&ctrl->hostid, &holder->hostid))
+ goto unlock;
+
+ /*
+ * The Reservation command group is checked in executing,
+ * allow it here.
+ */
+ switch (holder->rtype) {
+ case NVME_PR_WRITE_EXCLUSIVE:
+ if (nvmet_is_req_write_cmd_group(req))
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ break;
+ case NVME_PR_EXCLUSIVE_ACCESS:
+ if (nvmet_is_req_read_cmd_group(req) ||
+ nvmet_is_req_write_cmd_group(req))
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ break;
+ case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
+ case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
+ if ((nvmet_is_req_write_cmd_group(req)) &&
+ !nvmet_pr_find_registrant(pr, &ctrl->hostid))
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ break;
+ case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ if ((nvmet_is_req_read_cmd_group(req) ||
+ nvmet_is_req_write_cmd_group(req)) &&
+ !nvmet_pr_find_registrant(pr, &ctrl->hostid))
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ break;
+ default:
+ pr_warn("the reservation type is set wrong, type:%d\n",
+ holder->rtype);
+ break;
+ }
+
+unlock:
+ rcu_read_unlock();
+ if (status)
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return status;
+}
+
+u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+
+ pc_ref = xa_load(&req->ns->pr_per_ctrl_refs,
+ req->sq->ctrl->cntlid);
+ if (unlikely(!percpu_ref_tryget_live(&pc_ref->ref)))
+ return NVME_SC_INTERNAL;
+ req->pc_ref = pc_ref;
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_pr_ctrl_ns_all_cmds_done(struct percpu_ref *ref)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref =
+ container_of(ref, struct nvmet_pr_per_ctrl_ref, ref);
+
+ complete(&pc_ref->free_done);
+}
+
+static int nvmet_pr_alloc_and_insert_pc_ref(struct nvmet_ns *ns,
+ unsigned long idx,
+ uuid_t *hostid)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ int ret;
+
+ pc_ref = kmalloc(sizeof(*pc_ref), GFP_ATOMIC);
+ if (!pc_ref)
+ return -ENOMEM;
+
+ ret = percpu_ref_init(&pc_ref->ref, nvmet_pr_ctrl_ns_all_cmds_done,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
+ if (ret)
+ goto free;
+
+ init_completion(&pc_ref->free_done);
+ init_completion(&pc_ref->confirm_done);
+ uuid_copy(&pc_ref->hostid, hostid);
+
+ ret = xa_insert(&ns->pr_per_ctrl_refs, idx, pc_ref, GFP_KERNEL);
+ if (ret)
+ goto exit;
+ return ret;
+exit:
+ percpu_ref_exit(&pc_ref->ref);
+free:
+ kfree(pc_ref);
+ return ret;
+}
+
+int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_subsys *subsys = ctrl->subsys;
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ns *ns = NULL;
+ unsigned long idx;
+ int ret;
+
+ ctrl->pr_log_mgr.counter = 0;
+ ctrl->pr_log_mgr.lost_count = 0;
+ mutex_init(&ctrl->pr_log_mgr.lock);
+ INIT_KFIFO(ctrl->pr_log_mgr.log_queue);
+
+ /*
+ * Here we are under subsys lock, if an ns not in subsys->namespaces,
+ * we can make sure that ns is not enabled, and not call
+ * nvmet_pr_init_ns(), see more details in nvmet_ns_enable().
+ * So just check ns->pr.enable.
+ */
+ xa_for_each(&subsys->namespaces, idx, ns) {
+ if (ns->pr.enable) {
+ ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
+ &ctrl->hostid);
+ if (ret)
+ goto free_per_ctrl_refs;
+ }
+ }
+ return 0;
+
+free_per_ctrl_refs:
+ xa_for_each(&subsys->namespaces, idx, ns) {
+ if (ns->pr.enable) {
+ pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
+ if (pc_ref)
+ percpu_ref_exit(&pc_ref->ref);
+ kfree(pc_ref);
+ }
+ }
+ return ret;
+}
+
+void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ns *ns;
+ unsigned long idx;
+
+ kfifo_free(&ctrl->pr_log_mgr.log_queue);
+ mutex_destroy(&ctrl->pr_log_mgr.lock);
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ if (ns->pr.enable) {
+ pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
+ if (pc_ref)
+ percpu_ref_exit(&pc_ref->ref);
+ kfree(pc_ref);
+ }
+ }
+}
+
+int nvmet_pr_init_ns(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ctrl *ctrl = NULL;
+ unsigned long idx;
+ int ret;
+
+ ns->pr.holder = NULL;
+ atomic_set(&ns->pr.generation, 0);
+ sema_init(&ns->pr.pr_sem, 1);
+ INIT_LIST_HEAD(&ns->pr.registrant_list);
+ ns->pr.notify_mask = 0;
+
+ xa_init(&ns->pr_per_ctrl_refs);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
+ &ctrl->hostid);
+ if (ret)
+ goto free_per_ctrl_refs;
+ }
+ return 0;
+
+free_per_ctrl_refs:
+ xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
+ xa_erase(&ns->pr_per_ctrl_refs, idx);
+ percpu_ref_exit(&pc_ref->ref);
+ kfree(pc_ref);
+ }
+ return ret;
+}
+
+void nvmet_pr_exit_ns(struct nvmet_ns *ns)
+{
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_pr *pr = &ns->pr;
+ unsigned long idx;
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ list_del(&reg->entry);
+ kfree(reg);
+ }
+
+ xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
+ /*
+ * No command on ns here, we can safely free pc_ref.
+ */
+ pc_ref = xa_erase(&ns->pr_per_ctrl_refs, idx);
+ percpu_ref_exit(&pc_ref->ref);
+ kfree(pc_ref);
+ }
+
+ xa_destroy(&ns->pr_per_ctrl_refs);
+}
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 9a3548179a8e..6dbc7036f2e4 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -180,6 +180,106 @@ static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
return ret;
}
+static const char *nvmet_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const rrega_strs[] = {
+ [0x00] = "register",
+ [0x01] = "unregister",
+ [0x02] = "replace",
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrega = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 ptpl = (cdw10[3] >> 6) & 0x3;
+ const char *rrega_str;
+
+ if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega])
+ rrega_str = rrega_strs[rrega];
+ else
+ rrega_str = "reserved";
+
+ trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u",
+ rrega, rrega_str, iekey, ptpl);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char * const rtype_strs[] = {
+ [0x00] = "reserved",
+ [0x01] = "write exclusive",
+ [0x02] = "exclusive access",
+ [0x03] = "write exclusive registrants only",
+ [0x04] = "exclusive access registrants only",
+ [0x05] = "write exclusive all registrants",
+ [0x06] = "exclusive access all registrants",
+};
+
+static const char *nvmet_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const racqa_strs[] = {
+ [0x00] = "acquire",
+ [0x01] = "preempt",
+ [0x02] = "preempt and abort",
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 racqa = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+ const char *racqa_str = "reserved";
+ const char *rtype_str = "reserved";
+
+ if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa])
+ racqa_str = racqa_strs[racqa];
+
+ if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
+ rtype_str = rtype_strs[rtype];
+
+ trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s",
+ racqa, racqa_str, iekey, rtype, rtype_str);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const rrela_strs[] = {
+ [0x00] = "release",
+ [0x01] = "clear",
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrela = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+ const char *rrela_str = "reserved";
+ const char *rtype_str = "reserved";
+
+ if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela])
+ rrela_str = rrela_strs[rrela];
+
+ if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
+ rtype_str = rtype_strs[rtype];
+
+ trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s",
+ rrela, rrela_str, iekey, rtype, rtype_str);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_resv_report(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u32 numd = get_unaligned_le32(cdw10);
+ u8 eds = cdw10[4] & 0x1;
+
+ trace_seq_printf(p, "numd=%u, eds=%u", numd, eds);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
u8 opcode, u8 *cdw10)
{
@@ -195,6 +295,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
return nvmet_trace_zone_mgmt_send(p, cdw10);
case nvme_cmd_zone_mgmt_recv:
return nvmet_trace_zone_mgmt_recv(p, cdw10);
+ case nvme_cmd_resv_register:
+ return nvmet_trace_resv_reg(p, cdw10);
+ case nvme_cmd_resv_acquire:
+ return nvmet_trace_resv_acq(p, cdw10);
+ case nvme_cmd_resv_release:
+ return nvmet_trace_resv_rel(p, cdw10);
+ case nvme_cmd_resv_report:
+ return nvmet_trace_resv_report(p, cdw10);
default:
return nvmet_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index af9e13be7678..3aef35b05111 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -537,6 +537,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0;
struct scatterlist *sg;
+ u32 data_len = nvmet_rw_data_len(req);
struct bio *bio;
int sg_cnt;
@@ -544,6 +545,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return;
+ if (data_len >
+ bdev_max_zone_append_sectors(req->ns->bdev) << SECTOR_SHIFT) {
+ req->error_loc = offsetof(struct nvme_rw_command, length);
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
if (!req->sg_cnt) {
nvmet_req_complete(req, 0);
return;
@@ -576,20 +584,17 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
bio->bi_opf |= REQ_FUA;
for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
- struct page *p = sg_page(sg);
- unsigned int l = sg->length;
- unsigned int o = sg->offset;
- unsigned int ret;
+ unsigned int len = sg->length;
- ret = bio_add_zone_append_page(bio, p, l, o);
- if (ret != sg->length) {
+ if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio,
+ sg_page(sg), len, sg->offset) != len) {
status = NVME_SC_INTERNAL;
goto out_put_bio;
}
- total_len += sg->length;
+ total_len += len;
}
- if (total_len != nvmet_rw_data_len(req)) {
+ if (total_len != data_len) {
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto out_put_bio;
}
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index bab8ba64162f..4e268de351c4 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -284,4 +284,11 @@ config CXL_PMU
If unsure say 'm'.
+config MARVELL_PEM_PMU
+ tristate "MARVELL PEM PMU Support"
+ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
+ help
+ Enable support for PCIe Interface performance monitoring
+ on Marvell platform.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 8268f38e42c5..de71d2574857 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
+obj-$(CONFIG_MARVELL_PEM_PMU) += marvell_pem_pmu.o
obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o
obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index c6ff1bc7d336..99a0ef9817e0 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -782,7 +782,7 @@ static struct platform_driver ali_drw_pmu_driver = {
.acpi_match_table = ali_drw_acpi_match,
},
.probe = ali_drw_pmu_probe,
- .remove_new = ali_drw_pmu_remove,
+ .remove = ali_drw_pmu_remove,
};
static int __init ali_drw_pmu_init(void)
diff --git a/drivers/perf/amlogic/meson_g12_ddr_pmu.c b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
index 99cc791892bc..f33e9a456e85 100644
--- a/drivers/perf/amlogic/meson_g12_ddr_pmu.c
+++ b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
@@ -379,7 +379,7 @@ MODULE_DEVICE_TABLE(of, meson_ddr_pmu_dt_match);
static struct platform_driver g12_ddr_pmu_driver = {
.probe = g12_ddr_pmu_probe,
- .remove_new = g12_ddr_pmu_remove,
+ .remove = g12_ddr_pmu_remove,
.driver = {
.name = "meson-g12-ddr-pmu",
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index c76bac668dea..1cc3214d6b6d 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1705,7 +1705,7 @@ static struct platform_driver cci_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = cci_pmu_probe,
- .remove_new = cci_pmu_remove,
+ .remove = cci_pmu_remove,
};
module_platform_driver(cci_pmu_driver);
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 5c66b9278862..d5fcea3d4328 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1529,7 +1529,7 @@ static struct platform_driver arm_ccn_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_ccn_probe,
- .remove_new = arm_ccn_remove,
+ .remove = arm_ccn_remove,
};
static int __init arm_ccn_init(void)
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 397a46410f7c..49bd811c6fd6 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -2662,7 +2662,7 @@ static struct platform_driver arm_cmn_driver = {
.acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
},
.probe = arm_cmn_probe,
- .remove_new = arm_cmn_remove,
+ .remove = arm_cmn_remove,
};
static int __init arm_cmn_init(void)
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 2158a5975c90..81e8b97e9353 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -1282,7 +1282,7 @@ static struct platform_driver arm_cspmu_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_cspmu_device_probe,
- .remove_new = arm_cspmu_device_remove,
+ .remove = arm_cspmu_device_remove,
.id_table = arm_cspmu_id,
};
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 7e5f1d4fca0f..619cf937602f 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -750,7 +750,7 @@ static struct platform_driver dmc620_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = dmc620_pmu_device_probe,
- .remove_new = dmc620_pmu_device_remove,
+ .remove = dmc620_pmu_device_remove,
};
static int __init dmc620_pmu_init(void)
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index f2bd25a3470a..cb4fb59fe04b 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -787,7 +787,7 @@ static struct platform_driver dsu_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = dsu_pmu_device_probe,
- .remove_new = dsu_pmu_device_remove,
+ .remove = dsu_pmu_device_remove,
};
static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 0afe02f879b4..b5cc11abc962 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -770,18 +770,27 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
int i;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
- /* Clear any unused counters to avoid leaking their contents */
- for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
- ARMPMU_MAX_HWEVENTS) {
- if (i == ARMV8_PMU_CYCLE_IDX)
- write_pmccntr(0);
- else if (i == ARMV8_PMU_INSTR_IDX)
- write_pmicntr(0);
- else
- armv8pmu_write_evcntr(i, 0);
+ if (is_pmuv3p9(cpu_pmu->pmuver)) {
+ u64 mask = 0;
+ for_each_set_bit(i, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) {
+ if (armv8pmu_event_has_user_read(cpuc->events[i]))
+ mask |= BIT(i);
+ }
+ write_pmuacr(mask);
+ } else {
+ /* Clear any unused counters to avoid leaking their contents */
+ for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
+ ARMPMU_MAX_HWEVENTS) {
+ if (i == ARMV8_PMU_CYCLE_IDX)
+ write_pmccntr(0);
+ else if (i == ARMV8_PMU_INSTR_IDX)
+ write_pmicntr(0);
+ else
+ armv8pmu_write_evcntr(i, 0);
+ }
}
- update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
+ update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_UEN);
}
static void armv8pmu_enable_event(struct perf_event *event)
@@ -1364,6 +1373,8 @@ PMUV3_INIT_SIMPLE(armv8_neoverse_v3ae)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
+PMUV3_INIT_SIMPLE(armv8_samsung_mongoose)
+
PMUV3_INIT_MAP_EVENT(armv8_cortex_a35, armv8_a53_map_event)
PMUV3_INIT_MAP_EVENT(armv8_cortex_a53, armv8_a53_map_event)
PMUV3_INIT_MAP_EVENT(armv8_cortex_a57, armv8_a57_map_event)
@@ -1409,6 +1420,7 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
{.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
+ {.compatible = "samsung,mongoose-pmu", .data = armv8_samsung_mongoose_pmu_init},
{},
};
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index d5fa92ba8373..b1510f660c7a 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -996,7 +996,7 @@ static struct platform_driver smmu_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
- .remove_new = smmu_pmu_remove,
+ .remove = smmu_pmu_remove,
.shutdown = smmu_pmu_shutdown,
};
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 3569050f9cf3..fd5b78732603 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1280,7 +1280,7 @@ static struct platform_driver arm_spe_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_spe_pmu_device_probe,
- .remove_new = arm_spe_pmu_device_remove,
+ .remove = arm_spe_pmu_device_remove,
};
static int __init arm_spe_pmu_init(void)
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 43d68b69e630..bee4b5b52ec6 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -354,7 +354,7 @@ static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinvf, CXL_PMU_GID_D2H_REQ, BIT(13)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrinv, CXL_PMU_GID_D2H_REQ, BIT(14)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_cacheflushed, CXL_PMU_GID_D2H_REQ, BIT(16)),
- /* CXL rev 3.0 Table 3-20 - D2H Repsonse Encodings */
+ /* CXL rev 3.0 Table 3-20 - D2H Response Encodings */
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihiti, CXL_PMU_GID_D2H_RSP, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvhitv, CXL_PMU_GID_D2H_RSP, BIT(6)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihitse, CXL_PMU_GID_D2H_RSP, BIT(5)),
@@ -377,12 +377,14 @@ static struct attribute *cxl_pmu_event_attrs[] = {
/* CXL rev 3.0 Table 13-5 directly lists these */
CXL_PMU_EVENT_CXL_ATTR(cachedata_d2h_data, CXL_PMU_GID_CACHE_DATA, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(cachedata_h2d_data, CXL_PMU_GID_CACHE_DATA, BIT(1)),
- /* CXL rev 3.0 Table 3-29 M2S Req Memory Opcodes */
+ /* CXL rev 3.1 Table 3-35 M2S Req Memory Opcodes */
CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminv, CXL_PMU_GID_M2S_REQ, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrd, CXL_PMU_GID_M2S_REQ, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrddata, CXL_PMU_GID_M2S_REQ, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrdfwd, CXL_PMU_GID_M2S_REQ, BIT(3)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memwrfwd, CXL_PMU_GID_M2S_REQ, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrdtee, CXL_PMU_GID_M2S_REQ, BIT(5)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrddatatee, CXL_PMU_GID_M2S_REQ, BIT(6)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memspecrd, CXL_PMU_GID_M2S_REQ, BIT(8)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminvnt, CXL_PMU_GID_M2S_REQ, BIT(9)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memcleanevict, CXL_PMU_GID_M2S_REQ, BIT(10)),
@@ -404,10 +406,11 @@ static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_curblk, CXL_PMU_GID_S2M_BISNP, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_datblk, CXL_PMU_GID_S2M_BISNP, BIT(5)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_invblk, CXL_PMU_GID_S2M_BISNP, BIT(6)),
- /* CXL rev 3.0 Table 3-43 S2M NDR Opcopdes */
+ /* CXL rev 3.1 Table 3-50 S2M NDR Opcopdes */
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpm, CXL_PMU_GID_S2M_NDR, BIT(3)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)),
/* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 4ca50f9b6dfe..9cbea9675e21 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -82,7 +82,6 @@ struct dwc_pcie_pmu {
u16 ras_des_offset;
u32 nr_lanes;
- struct list_head pmu_node;
struct hlist_node cpuhp_node;
struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX];
int on_cpu;
@@ -107,6 +106,7 @@ struct dwc_pcie_vendor_id {
static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
{.vendor_id = PCI_VENDOR_ID_ALIBABA },
+ {.vendor_id = PCI_VENDOR_ID_AMPERE },
{.vendor_id = PCI_VENDOR_ID_QCOM },
{} /* terminator */
};
@@ -203,10 +203,10 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09),
/* Group #1 */
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_pcie_tlp_data_payload, 0x21),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_ccix_tlp_data_payload, 0x22),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_ccix_tlp_data_payload, 0x23),
/*
* Leave it to the user to specify the lane ID to avoid generating
@@ -216,9 +216,9 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601),
DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602),
DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603),
- DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604),
- DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605),
- DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nullified_tlp, 0x604),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nullified_tlp, 0x605),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tlp, 0x606),
DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700),
DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701),
DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702),
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 746b92330ca7..b989ffa95d69 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -846,7 +846,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
- .remove_new = ddr_perf_remove,
+ .remove = ddr_perf_remove,
};
module_platform_driver(imx_ddr_pmu_driver);
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 69f920b1caf2..3c856d9a4e97 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -81,6 +81,10 @@ struct ddr_pmu {
int id;
};
+static const struct imx_ddr_devtype_data imx91_devtype_data = {
+ .identifier = "imx91",
+};
+
static const struct imx_ddr_devtype_data imx93_devtype_data = {
.identifier = "imx93",
};
@@ -100,6 +104,7 @@ static inline bool is_imx95(struct ddr_pmu *pmu)
}
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
+ { .compatible = "fsl,imx91-ddr-pmu", .data = &imx91_devtype_data },
{ .compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data },
{ .compatible = "fsl,imx95-ddr-pmu", .data = &imx95_devtype_data },
{ /* sentinel */ }
@@ -848,7 +853,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
- .remove_new = ddr_perf_remove,
+ .remove = ddr_perf_remove,
};
module_platform_driver(imx_ddr_pmu_driver);
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index 0e923f94fa5b..3f3fb1de11f5 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -358,7 +358,7 @@ static struct platform_driver hisi_cpa_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_cpa_pmu_probe,
- .remove_new = hisi_cpa_pmu_remove,
+ .remove = hisi_cpa_pmu_remove,
};
static int __init hisi_cpa_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index b804e3738113..a6ebf2ec99d3 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -547,7 +547,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_ddrc_pmu_probe,
- .remove_new = hisi_ddrc_pmu_remove,
+ .remove = hisi_ddrc_pmu_remove,
};
static int __init hisi_ddrc_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 21e69b1cdd4d..32624872596f 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -550,7 +550,7 @@ static struct platform_driver hisi_hha_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_hha_pmu_probe,
- .remove_new = hisi_hha_pmu_remove,
+ .remove = hisi_hha_pmu_remove,
};
static int __init hisi_hha_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 51ba76871097..c235b46ce873 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -584,7 +584,7 @@ static struct platform_driver hisi_l3c_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_l3c_pmu_probe,
- .remove_new = hisi_l3c_pmu_remove,
+ .remove = hisi_l3c_pmu_remove,
};
static int __init hisi_l3c_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 3cdb35c741f9..c0f5d7c73e06 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -538,7 +538,7 @@ static struct platform_driver hisi_pa_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_pa_pmu_probe,
- .remove_new = hisi_pa_pmu_remove,
+ .remove = hisi_pa_pmu_remove,
};
static int __init hisi_pa_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index 765bbd61db26..c5f4764ee888 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -476,7 +476,7 @@ static struct platform_driver hisi_sllc_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_sllc_pmu_probe,
- .remove_new = hisi_sllc_pmu_remove,
+ .remove = hisi_sllc_pmu_remove,
};
static int __init hisi_sllc_pmu_module_init(void)
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 94f1ebcd2a27..8860d9f687ae 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -732,7 +732,7 @@ static struct platform_driver cn10k_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = cn10k_ddr_perf_probe,
- .remove_new = cn10k_ddr_perf_remove,
+ .remove = cn10k_ddr_perf_remove,
};
static int __init cn10k_ddr_pmu_init(void)
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index 9e635f355470..cda55ee35eee 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -383,7 +383,7 @@ static struct platform_driver tad_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = tad_pmu_probe,
- .remove_new = tad_pmu_remove,
+ .remove = tad_pmu_remove,
};
static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/perf/marvell_pem_pmu.c b/drivers/perf/marvell_pem_pmu.c
new file mode 100644
index 000000000000..29fbcd1848e4
--- /dev/null
+++ b/drivers/perf/marvell_pem_pmu.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell PEM(PCIe RC) Performance Monitor Driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+/*
+ * Each of these events maps to a free running 64 bit counter
+ * with no event control, but can be reset.
+ */
+enum pem_events {
+ IB_TLP_NPR,
+ IB_TLP_PR,
+ IB_TLP_CPL,
+ IB_TLP_DWORDS_NPR,
+ IB_TLP_DWORDS_PR,
+ IB_TLP_DWORDS_CPL,
+ IB_INFLIGHT,
+ IB_READS,
+ IB_REQ_NO_RO_NCB,
+ IB_REQ_NO_RO_EBUS,
+ OB_TLP_NPR,
+ OB_TLP_PR,
+ OB_TLP_CPL,
+ OB_TLP_DWORDS_NPR,
+ OB_TLP_DWORDS_PR,
+ OB_TLP_DWORDS_CPL,
+ OB_INFLIGHT,
+ OB_READS,
+ OB_MERGES_NPR,
+ OB_MERGES_PR,
+ OB_MERGES_CPL,
+ ATS_TRANS,
+ ATS_TRANS_LATENCY,
+ ATS_PRI,
+ ATS_PRI_LATENCY,
+ ATS_INV,
+ ATS_INV_LATENCY,
+ PEM_EVENTIDS_MAX
+};
+
+static u64 eventid_to_offset_table[] = {
+ [IB_TLP_NPR] = 0x0,
+ [IB_TLP_PR] = 0x8,
+ [IB_TLP_CPL] = 0x10,
+ [IB_TLP_DWORDS_NPR] = 0x100,
+ [IB_TLP_DWORDS_PR] = 0x108,
+ [IB_TLP_DWORDS_CPL] = 0x110,
+ [IB_INFLIGHT] = 0x200,
+ [IB_READS] = 0x300,
+ [IB_REQ_NO_RO_NCB] = 0x400,
+ [IB_REQ_NO_RO_EBUS] = 0x408,
+ [OB_TLP_NPR] = 0x500,
+ [OB_TLP_PR] = 0x508,
+ [OB_TLP_CPL] = 0x510,
+ [OB_TLP_DWORDS_NPR] = 0x600,
+ [OB_TLP_DWORDS_PR] = 0x608,
+ [OB_TLP_DWORDS_CPL] = 0x610,
+ [OB_INFLIGHT] = 0x700,
+ [OB_READS] = 0x800,
+ [OB_MERGES_NPR] = 0x900,
+ [OB_MERGES_PR] = 0x908,
+ [OB_MERGES_CPL] = 0x910,
+ [ATS_TRANS] = 0x2D18,
+ [ATS_TRANS_LATENCY] = 0x2D20,
+ [ATS_PRI] = 0x2D28,
+ [ATS_PRI_LATENCY] = 0x2D30,
+ [ATS_INV] = 0x2D38,
+ [ATS_INV_LATENCY] = 0x2D40,
+};
+
+struct pem_pmu {
+ struct pmu pmu;
+ void __iomem *base;
+ unsigned int cpu;
+ struct device *dev;
+ struct hlist_node node;
+};
+
+#define to_pem_pmu(p) container_of(p, struct pem_pmu, pmu)
+
+static int eventid_to_offset(int eventid)
+{
+ return eventid_to_offset_table[eventid];
+}
+
+/* Events */
+static ssize_t pem_pmu_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define PEM_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, pem_pmu_event_show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *pem_perf_events_attrs[] = {
+ PEM_EVENT_ATTR(ib_tlp_npr, IB_TLP_NPR),
+ PEM_EVENT_ATTR(ib_tlp_pr, IB_TLP_PR),
+ PEM_EVENT_ATTR(ib_tlp_cpl_partid, IB_TLP_CPL),
+ PEM_EVENT_ATTR(ib_tlp_dwords_npr, IB_TLP_DWORDS_NPR),
+ PEM_EVENT_ATTR(ib_tlp_dwords_pr, IB_TLP_DWORDS_PR),
+ PEM_EVENT_ATTR(ib_tlp_dwords_cpl_partid, IB_TLP_DWORDS_CPL),
+ PEM_EVENT_ATTR(ib_inflight, IB_INFLIGHT),
+ PEM_EVENT_ATTR(ib_reads, IB_READS),
+ PEM_EVENT_ATTR(ib_req_no_ro_ncb, IB_REQ_NO_RO_NCB),
+ PEM_EVENT_ATTR(ib_req_no_ro_ebus, IB_REQ_NO_RO_EBUS),
+ PEM_EVENT_ATTR(ob_tlp_npr_partid, OB_TLP_NPR),
+ PEM_EVENT_ATTR(ob_tlp_pr_partid, OB_TLP_PR),
+ PEM_EVENT_ATTR(ob_tlp_cpl_partid, OB_TLP_CPL),
+ PEM_EVENT_ATTR(ob_tlp_dwords_npr_partid, OB_TLP_DWORDS_NPR),
+ PEM_EVENT_ATTR(ob_tlp_dwords_pr_partid, OB_TLP_DWORDS_PR),
+ PEM_EVENT_ATTR(ob_tlp_dwords_cpl_partid, OB_TLP_DWORDS_CPL),
+ PEM_EVENT_ATTR(ob_inflight_partid, OB_INFLIGHT),
+ PEM_EVENT_ATTR(ob_reads_partid, OB_READS),
+ PEM_EVENT_ATTR(ob_merges_npr_partid, OB_MERGES_NPR),
+ PEM_EVENT_ATTR(ob_merges_pr_partid, OB_MERGES_PR),
+ PEM_EVENT_ATTR(ob_merges_cpl_partid, OB_MERGES_CPL),
+ PEM_EVENT_ATTR(ats_trans, ATS_TRANS),
+ PEM_EVENT_ATTR(ats_trans_latency, ATS_TRANS_LATENCY),
+ PEM_EVENT_ATTR(ats_pri, ATS_PRI),
+ PEM_EVENT_ATTR(ats_pri_latency, ATS_PRI_LATENCY),
+ PEM_EVENT_ATTR(ats_inv, ATS_INV),
+ PEM_EVENT_ATTR(ats_inv_latency, ATS_INV_LATENCY),
+ NULL
+};
+
+static struct attribute_group pem_perf_events_attr_group = {
+ .name = "events",
+ .attrs = pem_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-5");
+
+static struct attribute *pem_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL
+};
+
+static struct attribute_group pem_perf_format_attr_group = {
+ .name = "format",
+ .attrs = pem_perf_format_attrs,
+};
+
+/* cpumask */
+static ssize_t pem_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pem_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute pem_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, pem_perf_cpumask_show, NULL);
+
+static struct attribute *pem_perf_cpumask_attrs[] = {
+ &pem_perf_cpumask_attr.attr,
+ NULL
+};
+
+static struct attribute_group pem_perf_cpumask_attr_group = {
+ .attrs = pem_perf_cpumask_attrs,
+};
+
+static const struct attribute_group *pem_perf_attr_groups[] = {
+ &pem_perf_events_attr_group,
+ &pem_perf_cpumask_attr_group,
+ &pem_perf_format_attr_group,
+ NULL
+};
+
+static int pem_perf_event_init(struct perf_event *event)
+{
+ struct pem_pmu *pmu = to_pem_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event *sibling;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (event->attr.config >= PEM_EVENTIDS_MAX)
+ return -EINVAL;
+
+ if (is_sampling_event(event) ||
+ event->attach_state & PERF_ATTACH_TASK) {
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0)
+ return -EOPNOTSUPP;
+
+ /* We must NOT create groups containing mixed PMUs */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+ }
+ /*
+ * Set ownership of event to one CPU, same event can not be observed
+ * on multiple cpus at same time.
+ */
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+ return 0;
+}
+
+static u64 pem_perf_read_counter(struct pem_pmu *pmu,
+ struct perf_event *event, int eventid)
+{
+ return readq_relaxed(pmu->base + eventid_to_offset(eventid));
+}
+
+static void pem_perf_event_update(struct perf_event *event)
+{
+ struct pem_pmu *pmu = to_pem_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_count, new_count;
+
+ do {
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = pem_perf_read_counter(pmu, event, hwc->idx);
+ } while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
+
+ local64_add((new_count - prev_count), &event->count);
+}
+
+static void pem_perf_event_start(struct perf_event *event, int flags)
+{
+ struct pem_pmu *pmu = to_pem_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int eventid = hwc->idx;
+
+ /*
+ * All counters are free-running and associated with
+ * a fixed event to track in Hardware
+ */
+ local64_set(&hwc->prev_count,
+ pem_perf_read_counter(pmu, event, eventid));
+
+ hwc->state = 0;
+}
+
+static int pem_perf_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->idx = event->attr.config;
+ if (WARN_ON_ONCE(hwc->idx >= PEM_EVENTIDS_MAX))
+ return -EINVAL;
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ pem_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void pem_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_UPDATE)
+ pem_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void pem_perf_event_del(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ pem_perf_event_stop(event, PERF_EF_UPDATE);
+ hwc->idx = -1;
+}
+
+static int pem_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct pem_pmu *pmu = hlist_entry_safe(node, struct pem_pmu, node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+ return 0;
+}
+
+static int pem_perf_probe(struct platform_device *pdev)
+{
+ struct pem_pmu *pem_pmu;
+ struct resource *res;
+ void __iomem *base;
+ char *name;
+ int ret;
+
+ pem_pmu = devm_kzalloc(&pdev->dev, sizeof(*pem_pmu), GFP_KERNEL);
+ if (!pem_pmu)
+ return -ENOMEM;
+
+ pem_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pem_pmu);
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ pem_pmu->base = base;
+
+ pem_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = pem_perf_attr_groups,
+ .event_init = pem_perf_event_init,
+ .add = pem_perf_event_add,
+ .del = pem_perf_event_del,
+ .start = pem_perf_event_start,
+ .stop = pem_perf_event_stop,
+ .read = pem_perf_event_update,
+ };
+
+ /* Choose this cpu to collect perf data */
+ pem_pmu->cpu = raw_smp_processor_id();
+
+ name = devm_kasprintf(pem_pmu->dev, GFP_KERNEL, "mrvl_pcie_rc_pmu_%llx",
+ res->start);
+ if (!name)
+ return -ENOMEM;
+
+ cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ &pem_pmu->node);
+
+ ret = perf_pmu_register(&pem_pmu->pmu, name, -1);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ &pem_pmu->node);
+ return ret;
+}
+
+static void pem_perf_remove(struct platform_device *pdev)
+{
+ struct pem_pmu *pem_pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ &pem_pmu->node);
+
+ perf_pmu_unregister(&pem_pmu->pmu);
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id pem_pmu_acpi_match[] = {
+ {"MRVL000E", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, pem_pmu_acpi_match);
+#endif
+
+static struct platform_driver pem_pmu_driver = {
+ .driver = {
+ .name = "pem-pmu",
+ .acpi_match_table = ACPI_PTR(pem_pmu_acpi_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = pem_perf_probe,
+ .remove = pem_perf_remove,
+};
+
+static int __init pem_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ "perf/marvell/pem:online", NULL,
+ pem_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&pem_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE);
+ return ret;
+}
+
+static void __exit pem_pmu_exit(void)
+{
+ platform_driver_unregister(&pem_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE);
+}
+
+module_init(pem_pmu_init);
+module_exit(pem_pmu_exit);
+
+MODULE_DESCRIPTION("Marvell PEM Perf driver");
+MODULE_AUTHOR("Gowthami Thiagarajan <gthiagarajan@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 980e3051edd7..ea8c85729937 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -981,7 +981,7 @@ static struct platform_driver l2_cache_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = l2_cache_pmu_probe,
- .remove_new = l2_cache_pmu_remove,
+ .remove = l2_cache_pmu_remove,
};
static int __init register_l2_cache_pmu_driver(void)
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 391ca1422cae..1aa303f76cc7 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -1393,8 +1393,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
goto out_unregister;
cpu = get_cpu();
-
ret = pmu_sbi_snapshot_setup(pmu, cpu);
+ put_cpu();
+
if (ret) {
/* Snapshot is an optional feature. Continue if not available */
pmu_sbi_snapshot_free(pmu);
@@ -1408,7 +1409,6 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
*/
static_branch_enable(&sbi_pmu_snapshot_available);
}
- put_cpu();
}
register_sysctl("kernel", sbi_pmu_sysctl_table);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index faf763d2c95c..cadd60221b8f 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -1010,7 +1010,7 @@ static struct platform_driver tx2_uncore_driver = {
.suppress_bind_attrs = true,
},
.probe = tx2_uncore_probe,
- .remove_new = tx2_uncore_remove,
+ .remove = tx2_uncore_remove,
};
static int __init tx2_uncore_driver_init(void)
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index c01466ae1e3d..33b5497bdc06 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1943,7 +1943,7 @@ static void xgene_pmu_remove(struct platform_device *pdev)
static struct platform_driver xgene_pmu_driver = {
.probe = xgene_pmu_probe,
- .remove_new = xgene_pmu_remove,
+ .remove = xgene_pmu_remove,
.driver = {
.name = "xgene-pmu",
.of_match_table = xgene_pmu_of_match,
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 7f034ead7ae4..21a484385fc5 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -415,7 +415,7 @@ static struct platform_driver cros_ec_chardev_driver = {
.name = DRV_NAME,
},
.probe = cros_ec_chardev_probe,
- .remove_new = cros_ec_chardev_remove,
+ .remove = cros_ec_chardev_remove,
.id_table = cros_ec_chardev_id,
};
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 839154c46e46..92ac9a2f9c88 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -582,7 +582,7 @@ static struct platform_driver cros_ec_debugfs_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = cros_ec_debugfs_probe,
- .remove_new = cros_ec_debugfs_remove,
+ .remove = cros_ec_debugfs_remove,
.id_table = cros_ec_debugfs_id,
};
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index e29c51cbfd71..62662ba5bf6e 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -352,7 +352,7 @@ MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
#endif
static const struct i2c_device_id cros_ec_i2c_id[] = {
- { "cros-ec-i2c", 0 },
+ { "cros-ec-i2c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cros_ec_i2c_id);
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index 1e69f61115a4..87634f6921b7 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -608,7 +608,7 @@ static struct platform_driver cros_ec_lightbar_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = cros_ec_lightbar_probe,
- .remove_new = cros_ec_lightbar_remove,
+ .remove = cros_ec_lightbar_remove,
.id_table = cros_ec_lightbar_id,
};
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index c784119ab5dc..924bf4d3cc77 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -783,7 +783,7 @@ static struct platform_driver cros_ec_lpc_driver = {
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
.probe = cros_ec_lpc_probe,
- .remove_new = cros_ec_lpc_remove,
+ .remove = cros_ec_lpc_remove,
};
static struct platform_device cros_ec_lpc_device = {
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index 9c944146ee50..bc1a5ba09528 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -359,7 +359,7 @@ static struct platform_driver cros_ec_sysfs_driver = {
.name = DRV_NAME,
},
.probe = cros_ec_sysfs_probe,
- .remove_new = cros_ec_sysfs_remove,
+ .remove = cros_ec_sysfs_remove,
.id_table = cros_ec_sysfs_id,
};
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index c7781aea0b88..ae2f86296954 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -409,6 +409,7 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
return 0;
unregister_ports:
+ fwnode_handle_put(fwnode);
cros_unregister_ports(typec);
return ret;
}
@@ -1325,7 +1326,7 @@ static struct platform_driver cros_typec_driver = {
.pm = &cros_typec_pm_ops,
},
.probe = cros_typec_probe,
- .remove_new = cros_typec_remove,
+ .remove = cros_typec_remove,
};
module_platform_driver(cros_typec_driver);
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 787a19db4911..7bdb489354c5 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -145,7 +145,7 @@ static struct platform_driver cros_ec_vbc_driver = {
.name = DRV_NAME,
},
.probe = cros_ec_vbc_probe,
- .remove_new = cros_ec_vbc_remove,
+ .remove = cros_ec_vbc_remove,
.id_table = cros_ec_vbc_id,
};
diff --git a/drivers/platform/chrome/cros_hps_i2c.c b/drivers/platform/chrome/cros_hps_i2c.c
index dd14957ec39f..6b479cfe3f73 100644
--- a/drivers/platform/chrome/cros_hps_i2c.c
+++ b/drivers/platform/chrome/cros_hps_i2c.c
@@ -129,7 +129,7 @@ static int hps_resume(struct device *dev)
static DEFINE_RUNTIME_DEV_PM_OPS(hps_pm_ops, hps_suspend, hps_resume, NULL);
static const struct i2c_device_id hps_i2c_id[] = {
- { "cros-hps", 0 },
+ { "cros-hps" },
{ }
};
MODULE_DEVICE_TABLE(i2c, hps_i2c_id);
diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
index 07a19386dc4e..8d7c34abb0a1 100644
--- a/drivers/platform/chrome/cros_typec_switch.c
+++ b/drivers/platform/chrome/cros_typec_switch.c
@@ -318,7 +318,7 @@ static struct platform_driver cros_typec_switch_driver = {
.acpi_match_table = ACPI_PTR(cros_typec_switch_acpi_id),
},
.probe = cros_typec_switch_probe,
- .remove_new = cros_typec_switch_remove,
+ .remove = cros_typec_switch_remove,
};
module_platform_driver(cros_typec_switch_driver);
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index 930c2f47269f..cd71f1caea81 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -262,7 +262,7 @@ static struct platform_driver cros_usbpd_logger_driver = {
.pm = &cros_usbpd_logger_pm_ops,
},
.probe = cros_usbpd_logger_probe,
- .remove_new = cros_usbpd_logger_remove,
+ .remove = cros_usbpd_logger_remove,
.id_table = cros_usbpd_logger_id,
};
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
index c83f81d86483..313d2bcd577b 100644
--- a/drivers/platform/chrome/cros_usbpd_notify.c
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -156,7 +156,7 @@ static struct platform_driver cros_usbpd_notify_acpi_driver = {
.acpi_match_table = cros_usbpd_notify_acpi_device_ids,
},
.probe = cros_usbpd_notify_probe_acpi,
- .remove_new = cros_usbpd_notify_remove_acpi,
+ .remove = cros_usbpd_notify_remove_acpi,
};
#endif /* CONFIG_ACPI */
@@ -230,7 +230,7 @@ static struct platform_driver cros_usbpd_notify_plat_driver = {
.name = DRV_NAME,
},
.probe = cros_usbpd_notify_probe_plat,
- .remove_new = cros_usbpd_notify_remove_plat,
+ .remove = cros_usbpd_notify_remove_plat,
.id_table = cros_usbpd_notify_id,
};
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
index 3e6b6cd81a9b..9f978e531e1f 100644
--- a/drivers/platform/chrome/wilco_ec/core.c
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -163,7 +163,7 @@ static struct platform_driver wilco_ec_driver = {
.acpi_match_table = wilco_ec_acpi_device_ids,
},
.probe = wilco_ec_probe,
- .remove_new = wilco_ec_remove,
+ .remove = wilco_ec_remove,
.id_table = wilco_ec_id,
};
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index 99486086af6a..0617292b5cd7 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -276,7 +276,7 @@ static struct platform_driver wilco_ec_debugfs_driver = {
.name = DRV_NAME,
},
.probe = wilco_ec_debugfs_probe,
- .remove_new = wilco_ec_debugfs_remove,
+ .remove = wilco_ec_debugfs_remove,
.id_table = wilco_ec_debugfs_id,
};
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index a87877e4300a..7d8ae2cbf72f 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -417,7 +417,7 @@ MODULE_DEVICE_TABLE(platform, telem_id);
static struct platform_driver telem_driver = {
.probe = telem_device_probe,
- .remove_new = telem_device_remove,
+ .remove = telem_device_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/pmdomain/arm/scmi_perf_domain.c b/drivers/pmdomain/arm/scmi_perf_domain.c
index d7ef46ccd9b8..3693423459c9 100644
--- a/drivers/pmdomain/arm/scmi_perf_domain.c
+++ b/drivers/pmdomain/arm/scmi_perf_domain.c
@@ -125,7 +125,8 @@ static int scmi_perf_domain_probe(struct scmi_device *sdev)
scmi_pd->ph = ph;
scmi_pd->genpd.name = scmi_pd->info->name;
scmi_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON |
- GENPD_FLAG_OPP_TABLE_FW;
+ GENPD_FLAG_OPP_TABLE_FW |
+ GENPD_FLAG_DEV_NAME_FW;
scmi_pd->genpd.set_performance_state = scmi_pd_set_perf_state;
scmi_pd->genpd.attach_dev = scmi_pd_attach_dev;
scmi_pd->genpd.detach_dev = scmi_pd_detach_dev;
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 5ede0f7eda09..29ad510e881c 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "PM: " fmt
#include <linux/delay.h>
+#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -23,6 +24,9 @@
#include <linux/cpu.h>
#include <linux/debugfs.h>
+/* Provides a unique ID for each genpd device */
+static DEFINE_IDA(genpd_ida);
+
#define GENPD_RETRY_MAX_MS 250 /* Approximate */
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
@@ -171,6 +175,7 @@ static const struct genpd_lock_ops genpd_raw_spin_ops = {
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
+#define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW)
static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -189,7 +194,7 @@ static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
if (ret)
dev_warn_once(dev, "PM domain %s will not be powered off\n",
- genpd->name);
+ dev_name(&genpd->dev));
return ret;
}
@@ -274,7 +279,7 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd)
if (!genpd_debugfs_dir)
return;
- debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
+ debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
}
static void genpd_update_accounting(struct generic_pm_domain *genpd)
@@ -731,7 +736,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
genpd->gd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
- genpd->name, "on", elapsed_ns);
+ dev_name(&genpd->dev), "on", elapsed_ns);
out:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
@@ -782,7 +787,7 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
genpd->gd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
- genpd->name, "off", elapsed_ns);
+ dev_name(&genpd->dev), "off", elapsed_ns);
out:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
@@ -1940,7 +1945,7 @@ int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
if (ret) {
dev_warn(dev, "failed to add notifier for PM domain %s\n",
- genpd->name);
+ dev_name(&genpd->dev));
return ret;
}
@@ -1987,7 +1992,7 @@ int dev_pm_genpd_remove_notifier(struct device *dev)
if (ret) {
dev_warn(dev, "failed to remove notifier for PM domain %s\n",
- genpd->name);
+ dev_name(&genpd->dev));
return ret;
}
@@ -2013,7 +2018,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
*/
if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
- genpd->name, subdomain->name);
+ dev_name(&genpd->dev), subdomain->name);
return -EINVAL;
}
@@ -2088,7 +2093,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n",
- genpd->name, subdomain->name);
+ dev_name(&genpd->dev), subdomain->name);
ret = -EBUSY;
goto out;
}
@@ -2225,6 +2230,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
genpd->device_count = 0;
genpd->provider = NULL;
+ genpd->device_id = -ENXIO;
genpd->has_provider = false;
genpd->accounting_time = ktime_get_mono_fast_ns();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
@@ -2265,7 +2271,18 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
return ret;
device_initialize(&genpd->dev);
- dev_set_name(&genpd->dev, "%s", genpd->name);
+
+ if (!genpd_is_dev_name_fw(genpd)) {
+ dev_set_name(&genpd->dev, "%s", genpd->name);
+ } else {
+ ret = ida_alloc(&genpd_ida, GFP_KERNEL);
+ if (ret < 0) {
+ put_device(&genpd->dev);
+ return ret;
+ }
+ genpd->device_id = ret;
+ dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
+ }
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
@@ -2287,13 +2304,13 @@ static int genpd_remove(struct generic_pm_domain *genpd)
if (genpd->has_provider) {
genpd_unlock(genpd);
- pr_err("Provider present, unable to remove %s\n", genpd->name);
+ pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
return -EBUSY;
}
if (!list_empty(&genpd->parent_links) || genpd->device_count) {
genpd_unlock(genpd);
- pr_err("%s: unable to remove %s\n", __func__, genpd->name);
+ pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
return -EBUSY;
}
@@ -2307,9 +2324,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
genpd_unlock(genpd);
genpd_debug_remove(genpd);
cancel_work_sync(&genpd->power_off_work);
+ if (genpd->device_id != -ENXIO)
+ ida_free(&genpd_ida, genpd->device_id);
genpd_free_data(genpd);
- pr_debug("%s: removed %s\n", __func__, genpd->name);
+ pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
return 0;
}
@@ -3272,12 +3291,12 @@ static int genpd_summary_one(struct seq_file *s,
else
snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]);
- seq_printf(s, "%-30s %-30s %u", genpd->name, state, genpd->performance_state);
+ seq_printf(s, "%-30s %-30s %u", dev_name(&genpd->dev), state, genpd->performance_state);
/*
* Modifications on the list require holding locks on both
* parent and child, so we are safe.
- * Also genpd->name is immutable.
+ * Also the device name is immutable.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
if (list_is_first(&link->parent_node, &genpd->parent_links))
@@ -3502,7 +3521,7 @@ static void genpd_debug_add(struct generic_pm_domain *genpd)
if (!genpd_debugfs_dir)
return;
- d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
+ d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
debugfs_create_file("current_state", 0444,
d, genpd, &status_fops);
diff --git a/drivers/pmdomain/imx/imx93-blk-ctrl.c b/drivers/pmdomain/imx/imx93-blk-ctrl.c
index 904ffa55b8f4..b10348ac10f0 100644
--- a/drivers/pmdomain/imx/imx93-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx93-blk-ctrl.c
@@ -313,7 +313,9 @@ static void imx93_blk_ctrl_remove(struct platform_device *pdev)
of_genpd_del_provider(pdev->dev.of_node);
- for (i = 0; bc->onecell_data.num_domains; i++) {
+ pm_runtime_disable(&pdev->dev);
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 42a4a996defb..3ed642f4f00d 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2117,7 +2117,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
case DASD_CQR_IN_IO:
rc = device->discipline->term_IO(cqr);
if (rc) {
- /* unable to terminate requeust */
+ /* unable to terminate request */
dev_err(&device->cdev->dev,
"Flushing the DASD request queue failed\n");
/* stop flush processing */
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 6adaeb985dde..71d8fb86139d 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -855,7 +855,7 @@ dasd_delete_device(struct dasd_device *device)
dev_set_drvdata(&device->cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
- /* Removve copy relation */
+ /* Remove copy relation */
dasd_devmap_delete_copy_relation_device(device);
/*
* Drop ref_count by 3, one for the devmap reference, one for
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 8245b742e4a2..26812abddef1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/vtoc.h>
+#include <asm/asm.h>
#include "dasd_int.h"
#include "dasd_diag.h"
@@ -67,22 +68,24 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
static inline int __dia250(void *iob, int cmd)
{
union register_pair rx = { .even = (unsigned long)iob, };
+ int cc, exception;
typedef union {
struct dasd_diag_init_io init_io;
struct dasd_diag_rw_io rw_io;
} addr_type;
- int cc;
- cc = 3;
+ exception = 1;
asm volatile(
" diag %[rx],%[cmd],0x250\n"
- "0: ipm %[cc]\n"
- " srl %[cc],28\n"
+ "0: lhi %[exc],0\n"
"1:\n"
+ CC_IPM(cc)
EX_TABLE(0b,1b)
- : [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob)
+ : CC_OUT(cc, cc), [rx] "+d" (rx.pair),
+ "+m" (*(addr_type *)iob), [exc] "+d" (exception)
: [cmd] "d" (cmd)
- : "cc");
+ : CC_CLOBBER);
+ cc = exception ? 3 : CC_TRANSFORM(cc);
return cc | rx.odd;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 90b106408992..1ebe589b5185 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2405,7 +2405,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
}
if (count_area != NULL && count_area->kl == 0) {
- /* we found notthing violating our disk layout */
+ /* we found nothing violating our disk layout */
if (dasd_check_blocksize(count_area->dl) == 0)
block->bp_block = count_area->dl;
}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 0faaa437d9be..48e12e81df00 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -350,6 +350,7 @@ dasd_proc_init(void)
remove_proc_entry("devices", dasd_proc_root_entry);
out_nodevices:
remove_proc_entry("dasd", NULL);
+ dasd_proc_root_entry = NULL;
out_nodasd:
return -ENOENT;
}
@@ -357,7 +358,11 @@ dasd_proc_init(void)
void
dasd_proc_exit(void)
{
+ if (!dasd_proc_root_entry)
+ return;
+
remove_proc_entry("devices", dasd_proc_root_entry);
remove_proc_entry("statistics", dasd_proc_root_entry);
remove_proc_entry("dasd", NULL);
+ dasd_proc_root_entry = NULL;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 02a4a51da1b7..0f14d279d30b 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -339,7 +339,7 @@ dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf
struct dcssblk_dev_info *dev_info;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
- return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
+ return sysfs_emit(buf, dev_info->is_shared ? "1\n" : "0\n");
}
static ssize_t
@@ -444,7 +444,7 @@ dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
struct dcssblk_dev_info *dev_info;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
- return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
+ return sysfs_emit(buf, dev_info->save_pending ? "1\n" : "0\n");
}
static ssize_t
@@ -506,21 +506,15 @@ static ssize_t
dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- int i;
-
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
+ int i;
+ i = 0;
down_read(&dcssblk_devices_sem);
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
- i = 0;
- buf[0] = '\0';
- list_for_each_entry(entry, &dev_info->seg_list, lh) {
- strcpy(&buf[i], entry->segment_name);
- i += strlen(entry->segment_name);
- buf[i] = '\n';
- i++;
- }
+ list_for_each_entry(entry, &dev_info->seg_list, lh)
+ i += sysfs_emit_at(buf, i, "%s\n", entry->segment_name);
up_read(&dcssblk_devices_sem);
return i;
}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 053102d0fcd2..ae1b9aa3a2b5 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -528,7 +528,7 @@ static void tty3270_update(struct timer_list *t)
u8 cmd = TC_WRITE;
int rc, len;
- wrq = xchg(&tp->write, 0);
+ wrq = xchg(&tp->write, NULL);
if (!wrq) {
tty3270_set_timer(tp, 1);
return;
@@ -746,7 +746,7 @@ static void tty3270_issue_read(struct tty3270 *tp, int lock)
struct raw3270_request *rrq;
int rc;
- rrq = xchg(&tp->read, 0);
+ rrq = xchg(&tp->read, NULL);
if (!rrq)
/* Read already scheduled. */
return;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6a23ec286c70..6c91e422927f 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -14,6 +14,7 @@
#include <asm/asm-extable.h>
#include <asm/sclp.h>
#include <asm/ebcdic.h>
+#include <asm/asm.h>
/* maximum number of pages concerning our own memory management */
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
@@ -325,19 +326,22 @@ struct read_info_sccb * __init sclp_early_get_info(void);
/* Perform service call. Return 0 on success, non-zero otherwise. */
static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
{
- int cc = 4; /* Initialize for program check handling */
+ int cc, exception;
+ exception = 1;
asm volatile(
- "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
- "1: ipm %0\n"
- " srl %0,28\n"
+ "0: .insn rre,0xb2200000,%[cmd],%[sccb]\n" /* servc */
+ "1: lhi %[exc],0\n"
"2:\n"
+ CC_IPM(cc)
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
- : "+&d" (cc) : "d" (command), "a" (__pa(sccb))
- : "cc", "memory");
- if (cc == 4)
+ : CC_OUT(cc, cc), [exc] "+d" (exception)
+ : [cmd] "d" (command), [sccb] "a" (__pa(sccb))
+ : CC_CLOBBER_LIST("memory"));
+ if (exception)
return -EINVAL;
+ cc = CC_TRANSFORM(cc);
if (cc == 3)
return -EIO;
if (cc == 2)
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index f60d7ea8268d..d8f91aab11e8 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -223,7 +223,7 @@ static ssize_t system_name_show(struct kobject *kobj,
int rc;
mutex_lock(&sclp_cpi_mutex);
- rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
+ rc = sysfs_emit(page, "%s\n", system_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
@@ -255,7 +255,7 @@ static ssize_t sysplex_name_show(struct kobject *kobj,
int rc;
mutex_lock(&sclp_cpi_mutex);
- rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
+ rc = sysfs_emit(page, "%s\n", sysplex_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
@@ -287,7 +287,7 @@ static ssize_t system_type_show(struct kobject *kobj,
int rc;
mutex_lock(&sclp_cpi_mutex);
- rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
+ rc = sysfs_emit(page, "%s\n", system_type);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
@@ -321,7 +321,7 @@ static ssize_t system_level_show(struct kobject *kobj,
mutex_lock(&sclp_cpi_mutex);
level = system_level;
mutex_unlock(&sclp_cpi_mutex);
- return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
+ return sysfs_emit(page, "%#018llx\n", level);
}
static ssize_t system_level_store(struct kobject *kobj,
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index d35f10ea5b52..ca6c5260dc53 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -101,7 +101,7 @@ static ssize_t cpc_name_show(struct kobject *kobj,
sclp_ocf_cpc_name_copy(name);
name[OCF_LENGTH_CPC_NAME] = 0;
EBCASC(name, OCF_LENGTH_CPC_NAME);
- return snprintf(page, PAGE_SIZE, "%s\n", name);
+ return sysfs_emit(page, "%s\n", name);
}
static struct kobj_attribute cpc_name_attr =
@@ -113,7 +113,7 @@ static ssize_t hmc_network_show(struct kobject *kobj,
int rc;
spin_lock_irq(&sclp_ocf_lock);
- rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
+ rc = sysfs_emit(page, "%s\n", hmc_network);
spin_unlock_irq(&sclp_ocf_lock);
return rc;
}
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index a3e5a5fb0c1e..c3466a8c56bb 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -27,6 +27,7 @@
#define SCLP_ERRNOTIFY_AQ_RESET 0
#define SCLP_ERRNOTIFY_AQ_REPAIR 1
#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
+#define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3
static DEFINE_MUTEX(sclp_pci_mutex);
static struct sclp_register sclp_pci_event = {
@@ -116,6 +117,7 @@ static int sclp_pci_check_report(struct zpci_report_error_header *report)
case SCLP_ERRNOTIFY_AQ_RESET:
case SCLP_ERRNOTIFY_AQ_REPAIR:
case SCLP_ERRNOTIFY_AQ_INFO_LOG:
+ case SCLP_ERRNOTIFY_AQ_OPTICS_DATA:
break;
default:
return -EINVAL;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index a6d2a4792185..ce8a440598a8 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -96,7 +96,7 @@ tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *
struct tape_device *tdev;
tdev = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
+ return sysfs_emit(buf, "%i\n", tdev->medium_state);
}
static
@@ -108,7 +108,7 @@ tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *b
struct tape_device *tdev;
tdev = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
+ return sysfs_emit(buf, "%i\n", tdev->first_minor);
}
static
@@ -120,8 +120,8 @@ tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
struct tape_device *tdev;
tdev = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
- "OFFLINE" : tape_state_verbose[tdev->tape_state]);
+ return sysfs_emit(buf, "%s\n", (tdev->first_minor < 0) ?
+ "OFFLINE" : tape_state_verbose[tdev->tape_state]);
}
static
@@ -135,17 +135,17 @@ tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf
tdev = dev_get_drvdata(dev);
if (tdev->first_minor < 0)
- return scnprintf(buf, PAGE_SIZE, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
spin_lock_irq(get_ccwdev_lock(tdev->cdev));
if (list_empty(&tdev->req_queue))
- rc = scnprintf(buf, PAGE_SIZE, "---\n");
+ rc = sysfs_emit(buf, "---\n");
else {
struct tape_request *req;
req = list_entry(tdev->req_queue.next, struct tape_request,
list);
- rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
+ rc = sysfs_emit(buf, "%s\n", tape_op_verbose[req->op]);
}
spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
return rc;
@@ -161,7 +161,7 @@ tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf
tdev = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
+ return sysfs_emit(buf, "%i\n", tdev->char_data.block_size);
}
static
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index f598edc5f251..2b83fb6dc1d7 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright IBM Corp. 2022
+ * Copyright IBM Corp. 2022, 2024
* Author(s): Steffen Eiden <seiden@linux.ibm.com>
*
* This file provides a Linux misc device to give userspace access to some
@@ -40,6 +40,7 @@ static const u32 ioctl_nr_to_uvc_bit[] __initconst = {
[UVIO_IOCTL_ADD_SECRET_NR] = BIT_UVC_CMD_ADD_SECRET,
[UVIO_IOCTL_LIST_SECRETS_NR] = BIT_UVC_CMD_LIST_SECRETS,
[UVIO_IOCTL_LOCK_SECRETS_NR] = BIT_UVC_CMD_LOCK_SECRETS,
+ [UVIO_IOCTL_RETR_SECRET_NR] = BIT_UVC_CMD_RETR_ATTEST,
};
static_assert(ARRAY_SIZE(ioctl_nr_to_uvc_bit) == UVIO_IOCTL_NUM_IOCTLS);
@@ -62,11 +63,13 @@ static void __init set_supp_uv_cmds(unsigned long *supp_uv_cmds)
}
/**
- * uvio_uvdev_info() - get information about the uvdevice
+ * uvio_uvdev_info() - Get information about the uvdevice
*
* @uv_ioctl: ioctl control block
*
* Lists all IOCTLs that are supported by this uvdevice
+ *
+ * Return: 0 on success or a negative error code on error
*/
static int uvio_uvdev_info(struct uvio_ioctl_cb *uv_ioctl)
{
@@ -177,7 +180,7 @@ static int get_uvio_attest(struct uvio_ioctl_cb *uv_ioctl, struct uvio_attest *u
*
* Context: might sleep
*
- * Return: 0 on success or a negative error code on error.
+ * Return: 0 on success or a negative error code on error
*/
static int uvio_attestation(struct uvio_ioctl_cb *uv_ioctl)
{
@@ -237,7 +240,8 @@ out:
return ret;
}
-/** uvio_add_secret() - perform an Add Secret UVC
+/**
+ * uvio_add_secret() - Perform an Add Secret UVC
*
* @uv_ioctl: ioctl control block
*
@@ -260,7 +264,7 @@ out:
*
* Context: might sleep
*
- * Return: 0 on success or a negative error code on error.
+ * Return: 0 on success or a negative error code on error
*/
static int uvio_add_secret(struct uvio_ioctl_cb *uv_ioctl)
{
@@ -296,7 +300,44 @@ out:
return ret;
}
-/** uvio_list_secrets() - perform a List Secret UVC
+/*
+ * Do the actual secret list creation. Calls the list secrets UVC until there
+ * is no more space in the user buffer, or the list ends.
+ */
+static int uvio_get_list(void *zpage, struct uvio_ioctl_cb *uv_ioctl)
+{
+ const size_t data_off = offsetof(struct uv_secret_list, secrets);
+ u8 __user *user_buf = (u8 __user *)uv_ioctl->argument_addr;
+ struct uv_secret_list *list = zpage;
+ u16 num_secrets_stored = 0;
+ size_t user_off = data_off;
+ size_t copy_len;
+
+ do {
+ uv_list_secrets(list, list->next_secret_idx, &uv_ioctl->uv_rc,
+ &uv_ioctl->uv_rrc);
+ if (uv_ioctl->uv_rc != UVC_RC_EXECUTED &&
+ uv_ioctl->uv_rc != UVC_RC_MORE_DATA)
+ break;
+
+ copy_len = sizeof(list->secrets[0]) * list->num_secr_stored;
+ if (copy_to_user(user_buf + user_off, list->secrets, copy_len))
+ return -EFAULT;
+
+ user_off += copy_len;
+ num_secrets_stored += list->num_secr_stored;
+ } while (uv_ioctl->uv_rc == UVC_RC_MORE_DATA &&
+ user_off + sizeof(*list) <= uv_ioctl->argument_len);
+
+ list->num_secr_stored = num_secrets_stored;
+ if (copy_to_user(user_buf, list, data_off))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * uvio_list_secrets() - Perform a List Secret UVC
+ *
* @uv_ioctl: ioctl control block
*
* uvio_list_secrets() performs the List Secret Ultravisor Call. It verifies
@@ -307,45 +348,43 @@ out:
*
* The argument specifies the location for the result of the UV-Call.
*
+ * Argument length must be a multiple of a page.
+ * The list secrets IOCTL will call the list UVC multiple times and fill
+ * the provided user-buffer with list elements until either the list ends or
+ * the buffer is full. The list header is merged over all list header from the
+ * individual UVCs.
+ *
* If the List Secrets UV facility is not present, UV will return invalid
* command rc. This won't be fenced in the driver and does not result in a
* negative return value.
*
* Context: might sleep
*
- * Return: 0 on success or a negative error code on error.
+ * Return: 0 on success or a negative error code on error
*/
static int uvio_list_secrets(struct uvio_ioctl_cb *uv_ioctl)
{
- void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr;
- struct uv_cb_guest_addr uvcb = {
- .header.len = sizeof(uvcb),
- .header.cmd = UVC_CMD_LIST_SECRETS,
- };
- void *secrets = NULL;
- int ret = 0;
+ void *zpage;
+ int rc;
- if (uv_ioctl->argument_len != UVIO_LIST_SECRETS_LEN)
+ if (uv_ioctl->argument_len == 0 ||
+ uv_ioctl->argument_len % UVIO_LIST_SECRETS_LEN != 0)
return -EINVAL;
- secrets = kvzalloc(UVIO_LIST_SECRETS_LEN, GFP_KERNEL);
- if (!secrets)
+ zpage = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!zpage)
return -ENOMEM;
- uvcb.addr = (u64)secrets;
- uv_call_sched(0, (u64)&uvcb);
- uv_ioctl->uv_rc = uvcb.header.rc;
- uv_ioctl->uv_rrc = uvcb.header.rrc;
-
- if (copy_to_user(user_buf_arg, secrets, UVIO_LIST_SECRETS_LEN))
- ret = -EFAULT;
+ rc = uvio_get_list(zpage, uv_ioctl);
- kvfree(secrets);
- return ret;
+ free_page((unsigned long)zpage);
+ return rc;
}
-/** uvio_lock_secrets() - perform a Lock Secret Store UVC
- * @uv_ioctl: ioctl control block
+/**
+ * uvio_lock_secrets() - Perform a Lock Secret Store UVC
+ *
+ * @ioctl: ioctl control block
*
* uvio_lock_secrets() performs the Lock Secret Store Ultravisor Call. It
* performs the UV-call and copies the return codes to the ioctl control block.
@@ -360,7 +399,7 @@ static int uvio_list_secrets(struct uvio_ioctl_cb *uv_ioctl)
*
* Context: might sleep
*
- * Return: 0 on success or a negative error code on error.
+ * Return: 0 on success or a negative error code on error
*/
static int uvio_lock_secrets(struct uvio_ioctl_cb *ioctl)
{
@@ -379,6 +418,59 @@ static int uvio_lock_secrets(struct uvio_ioctl_cb *ioctl)
return 0;
}
+/**
+ * uvio_retr_secret() - Perform a retrieve secret UVC
+ *
+ * @uv_ioctl: ioctl control block.
+ *
+ * uvio_retr_secret() performs the Retrieve Secret Ultravisor Call.
+ * The first two bytes of the argument specify the index of the secret to be
+ * retrieved. The retrieved secret is copied into the argument buffer if there
+ * is enough space.
+ * The argument length must be at least two bytes and at max 8192 bytes.
+ *
+ * Context: might sleep
+ *
+ * Return: 0 on success or a negative error code on error
+ */
+static int uvio_retr_secret(struct uvio_ioctl_cb *uv_ioctl)
+{
+ u16 __user *user_index = (u16 __user *)uv_ioctl->argument_addr;
+ struct uv_cb_retr_secr uvcb = {
+ .header.len = sizeof(uvcb),
+ .header.cmd = UVC_CMD_RETR_SECRET,
+ };
+ u32 buf_len = uv_ioctl->argument_len;
+ void *buf = NULL;
+ int ret;
+
+ if (buf_len > UVIO_RETR_SECRET_MAX_LEN || buf_len < sizeof(*user_index))
+ return -EINVAL;
+
+ buf = kvzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (get_user(uvcb.secret_idx, user_index))
+ goto err;
+
+ uvcb.buf_addr = (u64)buf;
+ uvcb.buf_size = buf_len;
+ uv_call_sched(0, (u64)&uvcb);
+
+ if (copy_to_user((__user void *)uv_ioctl->argument_addr, buf, buf_len))
+ goto err;
+
+ ret = 0;
+ uv_ioctl->uv_rc = uvcb.header.rc;
+ uv_ioctl->uv_rrc = uvcb.header.rrc;
+
+err:
+ kvfree_sensitive(buf, buf_len);
+ return ret;
+}
+
static int uvio_copy_and_check_ioctl(struct uvio_ioctl_cb *ioctl, void __user *argp,
unsigned long cmd)
{
@@ -432,6 +524,9 @@ static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case UVIO_IOCTL_LOCK_SECRETS_NR:
ret = uvio_lock_secrets(&uv_ioctl);
break;
+ case UVIO_IOCTL_RETR_SECRET_NR:
+ ret = uvio_retr_secret(&uv_ioctl);
+ break;
default:
ret = -ENOIOCTLCMD;
break;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index bd5cecc44123..3dd50ac9c5b0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -531,7 +531,7 @@ static ssize_t vmlogrdr_autopurge_show(struct device *dev,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", priv->autopurge);
+ return sysfs_emit(buf, "%u\n", priv->autopurge);
}
@@ -605,7 +605,7 @@ static ssize_t vmlogrdr_autorecording_show(struct device *dev,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", priv->autorecording);
+ return sysfs_emit(buf, "%u\n", priv->autorecording);
}
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index fe94dec427b6..90ba7a2b9cb4 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -345,7 +345,7 @@ static ssize_t ur_attr_reclen_show(struct device *dev,
urd = urdev_get_from_cdev(to_ccwdev(dev));
if (!urd)
return -ENODEV;
- rc = sprintf(buf, "%zu\n", urd->reclen);
+ rc = sysfs_emit(buf, "%zu\n", urd->reclen);
urdev_put(urd);
return rc;
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 66b1bdc63284..7bcf8b98b8dd 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -147,7 +147,7 @@ static ssize_t ccwgroup_online_show(struct device *dev,
online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
- return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+ return sysfs_emit(buf, "%d\n", online);
}
/*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index a07bbecba61c..cba2d048a96b 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -144,6 +144,18 @@ static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj,
}
static BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars));
+static ssize_t measurement_chars_full_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
+
+ return memory_read_from_buffer(buf, count, &off, &chp->cmcb,
+ sizeof(chp->cmcb));
+}
+static BIN_ATTR_ADMIN_RO(measurement_chars_full, sizeof(struct cmg_cmcb));
+
static ssize_t chp_measurement_copy_block(void *buf, loff_t off, size_t count,
struct kobject *kobj, bool extended)
{
@@ -201,6 +213,7 @@ static BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry));
static struct bin_attribute *measurement_attrs[] = {
&bin_attr_measurement_chars,
+ &bin_attr_measurement_chars_full,
&bin_attr_measurement,
&bin_attr_ext_measurement,
NULL,
@@ -230,7 +243,7 @@ static ssize_t chp_status_show(struct device *dev,
status = chp->state;
mutex_unlock(&chp->lock);
- return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
+ return status ? sysfs_emit(buf, "online\n") : sysfs_emit(buf, "offline\n");
}
static ssize_t chp_status_write(struct device *dev,
@@ -311,7 +324,7 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
mutex_lock(&chp->lock);
type = chp->desc.desc;
mutex_unlock(&chp->lock);
- return sprintf(buf, "%x\n", type);
+ return sysfs_emit(buf, "%x\n", type);
}
static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
@@ -324,8 +337,8 @@ static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
if (!chp)
return 0;
if (chp->cmg == -1) /* channel measurements not available */
- return sprintf(buf, "unknown\n");
- return sprintf(buf, "%d\n", chp->cmg);
+ return sysfs_emit(buf, "unknown\n");
+ return sysfs_emit(buf, "%d\n", chp->cmg);
}
static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
@@ -338,8 +351,8 @@ static ssize_t chp_shared_show(struct device *dev,
if (!chp)
return 0;
if (chp->shared == -1) /* channel measurements not available */
- return sprintf(buf, "unknown\n");
- return sprintf(buf, "%x\n", chp->shared);
+ return sysfs_emit(buf, "unknown\n");
+ return sysfs_emit(buf, "%x\n", chp->shared);
}
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
@@ -352,7 +365,7 @@ static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
mutex_lock(&chp->lock);
if (chp->desc_fmt1.flags & 0x10)
- rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+ rc = sysfs_emit(buf, "%04x\n", chp->desc_fmt1.chid);
else
rc = 0;
mutex_unlock(&chp->lock);
@@ -369,7 +382,7 @@ static ssize_t chp_chid_external_show(struct device *dev,
mutex_lock(&chp->lock);
if (chp->desc_fmt1.flags & 0x10)
- rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+ rc = sysfs_emit(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
else
rc = 0;
mutex_unlock(&chp->lock);
@@ -385,7 +398,7 @@ static ssize_t chp_esc_show(struct device *dev,
ssize_t rc;
mutex_lock(&chp->lock);
- rc = sprintf(buf, "%x\n", chp->desc_fmt1.esc);
+ rc = sysfs_emit(buf, "%x\n", chp->desc_fmt1.esc);
mutex_unlock(&chp->lock);
return rc;
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index a15324a43aa3..391b52a7474c 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -54,6 +54,7 @@ struct channel_path {
int extended;
unsigned long speed;
struct cmg_chars cmg_chars;
+ struct cmg_cmcb cmcb;
};
/* Return channel_path struct for given chpid. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index dcc1e1c34ca2..e6462317abd0 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -376,7 +376,7 @@ struct lir {
#define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */
#define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
-/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
+/* Copy EBCDIC text, convert to ASCII and optionally add delimiter. */
static char *store_ebcdic(char *dest, const char *src, unsigned long len,
char delim)
{
@@ -1092,19 +1092,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 zeroes1;
struct chsc_header response;
u32 zeroes2;
- u32 not_valid : 1;
- u32 shared : 1;
- u32 extended : 1;
- u32 : 21;
- u32 chpid : 8;
- u32 cmcv : 5;
- u32 : 7;
- u32 cmgp : 4;
- u32 cmgq : 8;
- u32 cmg : 8;
- u32 : 16;
- u32 cmgs : 16;
- u32 data[NR_MEASUREMENT_CHARS];
+ struct cmg_cmcb cmcb;
} *scmc_area;
chp->shared = -1;
@@ -1135,15 +1123,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
scmc_area->response.code);
goto out;
}
- if (scmc_area->not_valid)
+ chp->cmcb = scmc_area->cmcb;
+ if (scmc_area->cmcb.not_valid)
goto out;
- chp->cmg = scmc_area->cmg;
- chp->shared = scmc_area->shared;
- chp->extended = scmc_area->extended;
- chp->speed = scmc_get_speed(scmc_area->cmgs, scmc_area->cmgp);
- chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
- (struct cmg_chars *) &scmc_area->data);
+ chp->cmg = scmc_area->cmcb.cmg;
+ chp->shared = scmc_area->cmcb.shared;
+ chp->extended = scmc_area->cmcb.extended;
+ chp->speed = scmc_get_speed(scmc_area->cmcb.cmgs, scmc_area->cmcb.cmgp);
+ chsc_initialize_cmg_chars(chp, scmc_area->cmcb.cmcv,
+ (struct cmg_chars *)&scmc_area->cmcb.data);
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 24cd65dbc5a7..6fe983ebf4b3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -17,6 +17,22 @@ struct cmg_chars {
u32 values[NR_MEASUREMENT_CHARS];
};
+struct cmg_cmcb {
+ u32 not_valid : 1;
+ u32 shared : 1;
+ u32 extended : 1;
+ u32 : 21;
+ u32 chpid : 8;
+ u32 cmcv : 5;
+ u32 : 7;
+ u32 cmgp : 4;
+ u32 cmgq : 8;
+ u32 cmg : 8;
+ u32 : 16;
+ u32 cmgs : 16;
+ u32 data[NR_MEASUREMENT_CHARS];
+};
+
#define NR_MEASUREMENT_ENTRIES 8
struct cmg_entry {
u32 values[NR_MEASUREMENT_ENTRIES];
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index c32e818f06db..ad17ab0a9314 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -459,10 +459,14 @@ int cio_update_schib(struct subchannel *sch)
{
struct schib schib;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch(sch->schid, &schib))
return -ENODEV;
memcpy(&sch->schib, &schib, sizeof(schib));
+
+ if (!css_sch_is_valid(&schib))
+ return -EACCES;
+
return 0;
}
EXPORT_SYMBOL_GPL(cio_update_schib);
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index a9057a5b670a..08a5e9380e75 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -19,7 +19,7 @@ struct pmcw {
u32 intparm; /* interruption parameter */
u32 qf : 1; /* qdio facility */
u32 w : 1;
- u32 isc : 3; /* interruption sublass */
+ u32 isc : 3; /* interruption subclass */
u32 res5 : 3; /* reserved zeros */
u32 ena : 1; /* enabled */
u32 lm : 2; /* limit mode */
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index f80dc18e2a76..fdab760f1f28 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -46,7 +46,7 @@
/* indices for READCMB */
enum cmb_index {
avg_utilization = -1,
- /* basic and exended format: */
+ /* basic and extended format: */
cmb_ssch_rsch_count = 0,
cmb_sample_count,
cmb_device_connect_time,
@@ -135,7 +135,7 @@ static inline u64 time_to_nsec(u32 value)
* Users are usually interested in average times,
* not accumulated time.
* This also helps us with atomicity problems
- * when reading sinlge values.
+ * when reading single values.
*/
static inline u64 time_to_avg_nsec(u32 value, u32 count)
{
@@ -977,8 +977,7 @@ static struct cmb_operations cmbops_extended = {
static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
{
- return sprintf(buf, "%lld\n",
- (unsigned long long) cmf_read(to_ccwdev(dev), idx));
+ return sysfs_emit(buf, "%lld\n", cmf_read(to_ccwdev(dev), idx));
}
static ssize_t cmb_show_avg_sample_interval(struct device *dev,
@@ -998,7 +997,7 @@ static ssize_t cmb_show_avg_sample_interval(struct device *dev,
} else
interval = -1;
spin_unlock_irq(cdev->ccwlock);
- return sprintf(buf, "%ld\n", interval);
+ return sysfs_emit(buf, "%ld\n", interval);
}
static ssize_t cmb_show_avg_utilization(struct device *dev,
@@ -1007,7 +1006,7 @@ static ssize_t cmb_show_avg_utilization(struct device *dev,
{
unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
- return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
+ return sysfs_emit(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
}
#define cmf_attr(name) \
@@ -1080,7 +1079,7 @@ static ssize_t cmb_enable_show(struct device *dev,
{
struct ccw_device *cdev = to_ccwdev(dev);
- return sprintf(buf, "%d\n", cmf_enabled(cdev));
+ return sysfs_emit(buf, "%d\n", cmf_enabled(cdev));
}
static ssize_t cmb_enable_store(struct device *dev,
@@ -1227,7 +1226,7 @@ int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
return cmbops->readall(cdev, data);
}
-/* Reenable cmf when a disconnected device becomes available again. */
+/* Re-enable cmf when a disconnected device becomes available again. */
int cmf_reenable(struct ccw_device *cdev)
{
cmbops->reset(cdev);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7b59d20bf785..be78a57f9bfd 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -380,11 +380,11 @@ static ssize_t chpids_show(struct device *dev,
for (chp = 0; chp < 8; chp++) {
mask = 0x80 >> chp;
if (ssd->path_mask & mask)
- ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+ ret += sysfs_emit_at(buf, ret, "%02x ", ssd->chpid[chp].id);
else
- ret += sprintf(buf + ret, "00 ");
+ ret += sysfs_emit_at(buf, ret, "00 ");
}
- ret += sprintf(buf + ret, "\n");
+ ret += sysfs_emit_at(buf, ret, "\n");
return ret;
}
static DEVICE_ATTR_RO(chpids);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index b0f23242e171..fb2c07cb4d3d 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -201,10 +201,9 @@ devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
struct ccw_device_id *id = &(cdev->id);
if (id->dev_type != 0)
- return sprintf(buf, "%04x/%02x\n",
- id->dev_type, id->dev_model);
+ return sysfs_emit(buf, "%04x/%02x\n", id->dev_type, id->dev_model);
else
- return sprintf(buf, "n/a\n");
+ return sysfs_emit(buf, "n/a\n");
}
static ssize_t
@@ -213,8 +212,7 @@ cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_device_id *id = &(cdev->id);
- return sprintf(buf, "%04x/%02x\n",
- id->cu_type, id->cu_model);
+ return sysfs_emit(buf, "%04x/%02x\n", id->cu_type, id->cu_model);
}
static ssize_t
@@ -234,7 +232,7 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
- return sprintf(buf, cdev->online ? "1\n" : "0\n");
+ return sysfs_emit(buf, cdev->online ? "1\n" : "0\n");
}
int ccw_device_is_orphan(struct ccw_device *cdev)
@@ -546,21 +544,21 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
struct subchannel *sch;
if (ccw_device_is_orphan(cdev))
- return sprintf(buf, "no device\n");
+ return sysfs_emit(buf, "no device\n");
switch (cdev->private->state) {
case DEV_STATE_BOXED:
- return sprintf(buf, "boxed\n");
+ return sysfs_emit(buf, "boxed\n");
case DEV_STATE_DISCONNECTED:
case DEV_STATE_DISCONNECTED_SENSE_ID:
case DEV_STATE_NOT_OPER:
sch = to_subchannel(dev->parent);
if (!sch->lpm)
- return sprintf(buf, "no path\n");
+ return sysfs_emit(buf, "no path\n");
else
- return sprintf(buf, "no device\n");
+ return sysfs_emit(buf, "no device\n");
default:
/* All other states considered fine. */
- return sprintf(buf, "good\n");
+ return sysfs_emit(buf, "good\n");
}
}
@@ -587,7 +585,7 @@ static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
{
struct subchannel *sch = to_subchannel(dev);
- return sprintf(buf, "%02x\n", sch->vpm);
+ return sysfs_emit(buf, "%02x\n", sch->vpm);
}
static DEVICE_ATTR_RO(devtype);
@@ -1387,14 +1385,18 @@ enum io_sch_action {
IO_SCH_VERIFY,
IO_SCH_DISC,
IO_SCH_NOP,
+ IO_SCH_ORPH_CDEV,
};
static enum io_sch_action sch_get_action(struct subchannel *sch)
{
struct ccw_device *cdev;
+ int rc;
cdev = sch_get_cdev(sch);
- if (cio_update_schib(sch)) {
+ rc = cio_update_schib(sch);
+
+ if (rc == -ENODEV) {
/* Not operational. */
if (!cdev)
return IO_SCH_UNREG;
@@ -1402,6 +1404,16 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
return IO_SCH_UNREG;
return IO_SCH_ORPH_UNREG;
}
+
+ /* Avoid unregistering subchannels without working device. */
+ if (rc == -EACCES) {
+ if (!cdev)
+ return IO_SCH_NOP;
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ return IO_SCH_UNREG_CDEV;
+ return IO_SCH_ORPH_CDEV;
+ }
+
/* Operational. */
if (!cdev)
return IO_SCH_ATTACH;
@@ -1471,6 +1483,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
rc = 0;
goto out_unlock;
case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_CDEV:
case IO_SCH_ORPH_ATTACH:
ccw_device_set_disconnected(cdev);
break;
@@ -1502,6 +1515,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
/* Handle attached ccw device. */
switch (action) {
case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_CDEV:
case IO_SCH_ORPH_ATTACH:
/* Move ccw device to orphanage. */
rc = ccw_device_move_to_orph(cdev);
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index acf1edd36549..5ff1e51cddf3 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -8,6 +8,7 @@
#include <asm/asm-extable.h>
#include <asm/chpid.h>
#include <asm/schid.h>
+#include <asm/asm.h>
#include <asm/crw.h>
#include "ioasm.h"
@@ -18,19 +19,20 @@
static inline int __stsch(struct subchannel_id schid, struct schib *addr)
{
unsigned long r1 = *(unsigned int *)&schid;
- int ccode = -EIO;
+ int ccode, exception;
+ exception = 1;
asm volatile(
" lgr 1,%[r1]\n"
" stsch %[addr]\n"
- "0: ipm %[cc]\n"
- " srl %[cc],28\n"
+ "0: lhi %[exc],0\n"
"1:\n"
+ CC_IPM(cc)
EX_TABLE(0b, 1b)
- : [cc] "+&d" (ccode), [addr] "=Q" (*addr)
+ : CC_OUT(cc, ccode), [addr] "=Q" (*addr), [exc] "+d" (exception)
: [r1] "d" (r1)
- : "cc", "1");
- return ccode;
+ : CC_CLOBBER_LIST("1"));
+ return exception ? -EIO : CC_TRANSFORM(ccode);
}
int stsch(struct subchannel_id schid, struct schib *addr)
@@ -47,19 +49,20 @@ EXPORT_SYMBOL(stsch);
static inline int __msch(struct subchannel_id schid, struct schib *addr)
{
unsigned long r1 = *(unsigned int *)&schid;
- int ccode = -EIO;
+ int ccode, exception;
+ exception = 1;
asm volatile(
" lgr 1,%[r1]\n"
" msch %[addr]\n"
- "0: ipm %[cc]\n"
- " srl %[cc],28\n"
+ "0: lhi %[exc],0\n"
"1:\n"
+ CC_IPM(cc)
EX_TABLE(0b, 1b)
- : [cc] "+&d" (ccode)
+ : CC_OUT(cc, ccode), [exc] "+d" (exception)
: [r1] "d" (r1), [addr] "Q" (*addr)
- : "cc", "1");
- return ccode;
+ : CC_CLOBBER_LIST("1"));
+ return exception ? -EIO : CC_TRANSFORM(ccode);
}
int msch(struct subchannel_id schid, struct schib *addr)
@@ -80,12 +83,11 @@ static inline int __tsch(struct subchannel_id schid, struct irb *addr)
asm volatile(
" lgr 1,%[r1]\n"
" tsch %[addr]\n"
- " ipm %[cc]\n"
- " srl %[cc],28"
- : [cc] "=&d" (ccode), [addr] "=Q" (*addr)
+ CC_IPM(cc)
+ : CC_OUT(cc, ccode), [addr] "=Q" (*addr)
: [r1] "d" (r1)
- : "cc", "1");
- return ccode;
+ : CC_CLOBBER_LIST("1"));
+ return CC_TRANSFORM(ccode);
}
int tsch(struct subchannel_id schid, struct irb *addr)
@@ -101,19 +103,20 @@ int tsch(struct subchannel_id schid, struct irb *addr)
static inline int __ssch(struct subchannel_id schid, union orb *addr)
{
unsigned long r1 = *(unsigned int *)&schid;
- int ccode = -EIO;
+ int ccode, exception;
+ exception = 1;
asm volatile(
" lgr 1,%[r1]\n"
" ssch %[addr]\n"
- "0: ipm %[cc]\n"
- " srl %[cc],28\n"
+ "0: lhi %[exc],0\n"
"1:\n"
+ CC_IPM(cc)
EX_TABLE(0b, 1b)
- : [cc] "+&d" (ccode)
+ : CC_OUT(cc, ccode), [exc] "+d" (exception)
: [r1] "d" (r1), [addr] "Q" (*addr)
- : "cc", "memory", "1");
- return ccode;
+ : CC_CLOBBER_LIST("memory", "1"));
+ return CC_TRANSFORM(ccode);
}
int ssch(struct subchannel_id schid, union orb *addr)
@@ -135,12 +138,11 @@ static inline int __csch(struct subchannel_id schid)
asm volatile(
" lgr 1,%[r1]\n"
" csch\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (ccode)
+ CC_IPM(cc)
+ : CC_OUT(cc, ccode)
: [r1] "d" (r1)
- : "cc", "1");
- return ccode;
+ : CC_CLOBBER_LIST("1"));
+ return CC_TRANSFORM(ccode);
}
int csch(struct subchannel_id schid)
@@ -160,11 +162,11 @@ int tpi(struct tpi_info *addr)
asm volatile(
" tpi %[addr]\n"
- " ipm %[cc]\n"
- " srl %[cc],28"
- : [cc] "=&d" (ccode), [addr] "=Q" (*addr)
+ CC_IPM(cc)
+ : CC_OUT(cc, ccode), [addr] "=Q" (*addr)
:
- : "cc");
+ : CC_CLOBBER);
+ ccode = CC_TRANSFORM(ccode);
trace_s390_cio_tpi(addr, ccode);
return ccode;
@@ -173,17 +175,19 @@ int tpi(struct tpi_info *addr)
int chsc(void *chsc_area)
{
typedef struct { char _[4096]; } addr_type;
- int cc = -EIO;
+ int cc, exception;
+ exception = 1;
asm volatile(
" .insn rre,0xb25f0000,%[chsc_area],0\n"
- "0: ipm %[cc]\n"
- " srl %[cc],28\n"
+ "0: lhi %[exc],0\n"
"1:\n"
+ CC_IPM(cc)
EX_TABLE(0b, 1b)
- : [cc] "+&d" (cc), "+m" (*(addr_type *)chsc_area)
+ : CC_OUT(cc, cc), "+m" (*(addr_type *)chsc_area), [exc] "+d" (exception)
: [chsc_area] "d" (chsc_area)
- : "cc");
+ : CC_CLOBBER);
+ cc = exception ? -EIO : CC_TRANSFORM(cc);
trace_s390_cio_chsc(chsc_area, cc);
return cc;
@@ -198,12 +202,11 @@ static inline int __rsch(struct subchannel_id schid)
asm volatile(
" lgr 1,%[r1]\n"
" rsch\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (ccode)
+ CC_IPM(cc)
+ : CC_OUT(cc, ccode)
: [r1] "d" (r1)
- : "cc", "memory", "1");
- return ccode;
+ : CC_CLOBBER_LIST("memory", "1"));
+ return CC_TRANSFORM(ccode);
}
int rsch(struct subchannel_id schid)
@@ -224,12 +227,11 @@ static inline int __hsch(struct subchannel_id schid)
asm volatile(
" lgr 1,%[r1]\n"
" hsch\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (ccode)
+ CC_IPM(cc)
+ : CC_OUT(cc, ccode)
: [r1] "d" (r1)
- : "cc", "1");
- return ccode;
+ : CC_CLOBBER_LIST("1"));
+ return CC_TRANSFORM(ccode);
}
int hsch(struct subchannel_id schid)
@@ -256,7 +258,7 @@ static inline int __xsch(struct subchannel_id schid)
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "1");
- return ccode;
+ return CC_TRANSFORM(ccode);
}
int xsch(struct subchannel_id schid)
@@ -275,12 +277,11 @@ static inline int __stcrw(struct crw *crw)
asm volatile(
" stcrw %[crw]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (ccode), [crw] "=Q" (*crw)
+ CC_IPM(cc)
+ : CC_OUT(cc, ccode), [crw] "=Q" (*crw)
:
- : "cc");
- return ccode;
+ : CC_CLOBBER);
+ return CC_TRANSFORM(ccode);
}
static inline int _stcrw(struct crw *crw)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index b711bb17f9da..07e82816b77a 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -17,6 +17,7 @@
#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
+#include <asm/asm.h>
#include <asm/ipl.h>
#include "cio.h"
@@ -42,13 +43,12 @@ static inline int do_siga_sync(unsigned long schid,
" lgr 2,%[out]\n"
" lgr 3,%[in]\n"
" siga 0\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (cc)
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
: [fc] "d" (fc), [schid] "d" (schid),
[out] "d" (out_mask), [in] "d" (in_mask)
- : "cc", "0", "1", "2", "3");
- return cc;
+ : CC_CLOBBER_LIST("0", "1", "2", "3"));
+ return CC_TRANSFORM(cc);
}
static inline int do_siga_input(unsigned long schid, unsigned long mask,
@@ -61,12 +61,11 @@ static inline int do_siga_input(unsigned long schid, unsigned long mask,
" lgr 1,%[schid]\n"
" lgr 2,%[mask]\n"
" siga 0\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (cc)
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
: [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
- : "cc", "0", "1", "2");
- return cc;
+ : CC_CLOBBER_LIST("0", "1", "2"));
+ return CC_TRANSFORM(cc);
}
/**
@@ -93,13 +92,12 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
" lgr 3,%[aob]\n"
" siga 0\n"
" lgr %[fc],0\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (cc), [fc] "+&d" (fc)
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [fc] "+&d" (fc)
: [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
- : "cc", "0", "1", "2", "3");
+ : CC_CLOBBER_LIST("0", "1", "2", "3"));
*bb = fc >> 31;
- return cc;
+ return CC_TRANSFORM(cc);
}
/**
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index c7894d61306d..a0825e372d42 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -91,7 +91,7 @@ static ssize_t show_##name(struct device *dev, \
int ret; \
\
device_lock(dev); \
- ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
+ ret = sysfs_emit(buf, "%u\n", scmdev->attrs.name); \
device_unlock(dev); \
\
return ret; \
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index c88b6e071847..e83c6603c858 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -29,6 +29,10 @@ obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o
pkey-pckmo-objs := pkey_pckmo.o
obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o
+# pkey uv handler module
+pkey-uv-objs := pkey_uv.o
+obj-$(CONFIG_PKEY_UV) += pkey-uv.o
+
# adjunct processor matrix
vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c
index fea243322838..64a376501d26 100644
--- a/drivers/s390/crypto/pkey_base.c
+++ b/drivers/s390/crypto/pkey_base.c
@@ -304,7 +304,19 @@ void pkey_handler_request_modules(void)
{
#ifdef CONFIG_MODULES
static const char * const pkey_handler_modules[] = {
- "pkey_cca", "pkey_ep11", "pkey_pckmo" };
+#if IS_MODULE(CONFIG_PKEY_CCA)
+ "pkey_cca",
+#endif
+#if IS_MODULE(CONFIG_PKEY_EP11)
+ "pkey_ep11",
+#endif
+#if IS_MODULE(CONFIG_PKEY_PCKMO)
+ "pkey_pckmo",
+#endif
+#if IS_MODULE(CONFIG_PKEY_UV)
+ "pkey_uv",
+#endif
+ };
int i;
for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) {
diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h
index 7a1a5ce192d8..7347647dfaa7 100644
--- a/drivers/s390/crypto/pkey_base.h
+++ b/drivers/s390/crypto/pkey_base.h
@@ -97,6 +97,42 @@ static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize)
}
/*
+ * helper function which translates the PKEY_KEYTYPE_*
+ * to the protected key size minus the WK VP length
+ */
+static inline u32 pkey_keytype_to_size(u32 keytype)
+{
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ return 16;
+ case PKEY_KEYTYPE_AES_192:
+ return 24;
+ case PKEY_KEYTYPE_AES_256:
+ return 32;
+ case PKEY_KEYTYPE_ECC_P256:
+ return 32;
+ case PKEY_KEYTYPE_ECC_P384:
+ return 48;
+ case PKEY_KEYTYPE_ECC_P521:
+ return 80;
+ case PKEY_KEYTYPE_ECC_ED25519:
+ return 32;
+ case PKEY_KEYTYPE_ECC_ED448:
+ return 54;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ return 32;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ return 64;
+ case PKEY_KEYTYPE_HMAC_512:
+ return 64;
+ case PKEY_KEYTYPE_HMAC_1024:
+ return 128;
+ default:
+ return 0;
+ }
+}
+
+/*
* pkey_api.c:
*/
int __init pkey_api_init(void);
diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c
index 937051381720..cda22db31f6c 100644
--- a/drivers/s390/crypto/pkey_cca.c
+++ b/drivers/s390/crypto/pkey_cca.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
-#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "pkey_base.h"
@@ -225,14 +224,14 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES) {
/* CCA AES data key */
- if (keylen != sizeof(struct secaeskeytoken))
+ if (keylen < sizeof(struct secaeskeytoken))
return -EINVAL;
if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
return -EINVAL;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC) {
/* CCA AES cipher key */
- if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ if (keylen < hdr->len)
return -EINVAL;
if (cca_check_secaescipherkey(pkey_dbf_info,
3, key, 0, 1))
diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c
index f42d397a9cb6..5b033ca3e828 100644
--- a/drivers/s390/crypto/pkey_ep11.c
+++ b/drivers/s390/crypto/pkey_ep11.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
-#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "zcrypt_ep11misc.h"
#include "pkey_base.h"
diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c
index beeca8827c46..835d59f4fbc5 100644
--- a/drivers/s390/crypto/pkey_pckmo.c
+++ b/drivers/s390/crypto/pkey_pckmo.c
@@ -15,7 +15,6 @@
#include <crypto/aes.h>
#include <linux/random.h>
-#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "pkey_base.h"
@@ -38,23 +37,9 @@ static bool is_pckmo_key(const u8 *key, u32 keylen)
case TOKTYPE_NON_CCA:
switch (hdr->version) {
case TOKVER_CLEAR_KEY:
- switch (t->keytype) {
- case PKEY_KEYTYPE_AES_128:
- case PKEY_KEYTYPE_AES_192:
- case PKEY_KEYTYPE_AES_256:
- case PKEY_KEYTYPE_ECC_P256:
- case PKEY_KEYTYPE_ECC_P384:
- case PKEY_KEYTYPE_ECC_P521:
- case PKEY_KEYTYPE_ECC_ED25519:
- case PKEY_KEYTYPE_ECC_ED448:
- case PKEY_KEYTYPE_AES_XTS_128:
- case PKEY_KEYTYPE_AES_XTS_256:
- case PKEY_KEYTYPE_HMAC_512:
- case PKEY_KEYTYPE_HMAC_1024:
+ if (pkey_keytype_to_size(t->keytype))
return true;
- default:
- return false;
- }
+ return false;
case TOKVER_PROTECTED_KEY:
return true;
default:
@@ -86,80 +71,49 @@ static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
int keysize, rc = -EINVAL;
u8 paramblock[160];
- u32 pkeytype;
- long fc;
+ u32 pkeytype = 0;
+ unsigned int fc;
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
- /* 16 byte key, 32 byte aes wkvp, total 48 bytes */
- keysize = 16;
- pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_128_KEY;
break;
case PKEY_KEYTYPE_AES_192:
- /* 24 byte key, 32 byte aes wkvp, total 56 bytes */
- keysize = 24;
- pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_192_KEY;
break;
case PKEY_KEYTYPE_AES_256:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
- pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_256_KEY;
break;
case PKEY_KEYTYPE_ECC_P256:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
break;
case PKEY_KEYTYPE_ECC_P384:
- /* 48 byte key, 32 byte aes wkvp, total 80 bytes */
- keysize = 48;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
break;
case PKEY_KEYTYPE_ECC_P521:
- /* 80 byte key, 32 byte aes wkvp, total 112 bytes */
- keysize = 80;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
break;
case PKEY_KEYTYPE_ECC_ED25519:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
break;
case PKEY_KEYTYPE_ECC_ED448:
- /* 64 byte key, 32 byte aes wkvp, total 96 bytes */
- keysize = 64;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
break;
case PKEY_KEYTYPE_AES_XTS_128:
- /* 2x16 byte keys, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
- pkeytype = PKEY_KEYTYPE_AES_XTS_128;
fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY;
break;
case PKEY_KEYTYPE_AES_XTS_256:
- /* 2x32 byte keys, 32 byte aes wkvp, total 96 bytes */
- keysize = 64;
- pkeytype = PKEY_KEYTYPE_AES_XTS_256;
fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY;
break;
case PKEY_KEYTYPE_HMAC_512:
- /* 64 byte key, 32 byte aes wkvp, total 96 bytes */
- keysize = 64;
- pkeytype = PKEY_KEYTYPE_HMAC_512;
fc = CPACF_PCKMO_ENC_HMAC_512_KEY;
break;
case PKEY_KEYTYPE_HMAC_1024:
- /* 128 byte key, 32 byte aes wkvp, total 160 bytes */
- keysize = 128;
- pkeytype = PKEY_KEYTYPE_HMAC_1024;
fc = CPACF_PCKMO_ENC_HMAC_1024_KEY;
break;
default:
@@ -168,6 +122,9 @@ static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
goto out;
}
+ keysize = pkey_keytype_to_size(keytype);
+ pkeytype = pkeytype ?: keytype;
+
if (clrkeylen && clrkeylen < keysize) {
PKEY_DBF_ERR("%s clear key size too small: %u < %d\n",
__func__, clrkeylen, keysize);
@@ -190,7 +147,8 @@ static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
}
/* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
- PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
+ PKEY_DBF_ERR("%s pckmo fc 0x%02x not available\n",
+ __func__, fc);
rc = -ENODEV;
goto out;
}
@@ -216,60 +174,42 @@ out:
/*
* Verify a raw protected key blob.
- * Currently only AES protected keys are supported.
*/
static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen,
u32 protkeytype)
{
- struct {
- u8 iv[AES_BLOCK_SIZE];
- u8 key[MAXPROTKEYSIZE];
- } param;
- u8 null_msg[AES_BLOCK_SIZE];
- u8 dest_buf[AES_BLOCK_SIZE];
- unsigned int k, pkeylen;
- unsigned long fc;
- int rc = -EINVAL;
+ u8 clrkey[16] = { 0 }, tmpkeybuf[16 + AES_WK_VP_SIZE];
+ u32 tmpkeybuflen, tmpkeytype;
+ int keysize, rc = -EINVAL;
+ u8 *wkvp;
- switch (protkeytype) {
- case PKEY_KEYTYPE_AES_128:
- pkeylen = 16 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_128;
- break;
- case PKEY_KEYTYPE_AES_192:
- pkeylen = 24 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_192;
- break;
- case PKEY_KEYTYPE_AES_256:
- pkeylen = 32 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_256;
- break;
- default:
+ /* check protkey type and size */
+ keysize = pkey_keytype_to_size(protkeytype);
+ if (!keysize) {
PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
protkeytype);
goto out;
}
- if (protkeylen != pkeylen) {
- PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
- __func__, protkeylen, protkeytype);
+ if (protkeylen < keysize + AES_WK_VP_SIZE)
goto out;
- }
-
- memset(null_msg, 0, sizeof(null_msg));
- memset(param.iv, 0, sizeof(param.iv));
- memcpy(param.key, protkey, protkeylen);
+ /* generate a dummy AES 128 protected key */
+ tmpkeybuflen = sizeof(tmpkeybuf);
+ rc = pckmo_clr2protkey(PKEY_KEYTYPE_AES_128,
+ clrkey, sizeof(clrkey),
+ tmpkeybuf, &tmpkeybuflen, &tmpkeytype);
+ if (rc)
+ goto out;
+ memzero_explicit(tmpkeybuf, 16);
+ wkvp = tmpkeybuf + 16;
- k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
- sizeof(null_msg));
- if (k != sizeof(null_msg)) {
- PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
+ /* compare WK VP from the temp key with that of the given prot key */
+ if (memcmp(wkvp, protkey + keysize, AES_WK_VP_SIZE)) {
+ PKEY_DBF_ERR("%s protected key WK VP mismatch\n", __func__);
rc = -EKEYREJECTED;
goto out;
}
- rc = 0;
-
out:
pr_debug("rc=%d\n", rc);
return rc;
@@ -289,37 +229,33 @@ static int pckmo_key2protkey(const u8 *key, u32 keylen,
switch (hdr->version) {
case TOKVER_PROTECTED_KEY: {
struct protkeytoken *t = (struct protkeytoken *)key;
+ u32 keysize;
if (keylen < sizeof(*t))
goto out;
+ keysize = pkey_keytype_to_size(t->keytype);
+ if (!keysize) {
+ PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n",
+ __func__, t->keytype);
+ goto out;
+ }
switch (t->keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
- if (keylen != sizeof(struct protaeskeytoken))
+ if (t->len != keysize + AES_WK_VP_SIZE ||
+ keylen < sizeof(struct protaeskeytoken))
goto out;
rc = pckmo_verify_protkey(t->protkey, t->len,
t->keytype);
if (rc)
goto out;
break;
- case PKEY_KEYTYPE_AES_XTS_128:
- if (t->len != 64 || keylen != sizeof(*t) + t->len)
- goto out;
- break;
- case PKEY_KEYTYPE_AES_XTS_256:
- case PKEY_KEYTYPE_HMAC_512:
- if (t->len != 96 || keylen != sizeof(*t) + t->len)
- goto out;
- break;
- case PKEY_KEYTYPE_HMAC_1024:
- if (t->len != 160 || keylen != sizeof(*t) + t->len)
+ default:
+ if (t->len != keysize + AES_WK_VP_SIZE ||
+ keylen < sizeof(*t) + keysize + AES_WK_VP_SIZE)
goto out;
break;
- default:
- PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n",
- __func__, t->keytype);
- goto out;
}
memcpy(protkey, t->protkey, t->len);
*protkeylen = t->len;
@@ -329,47 +265,12 @@ static int pckmo_key2protkey(const u8 *key, u32 keylen,
}
case TOKVER_CLEAR_KEY: {
struct clearkeytoken *t = (struct clearkeytoken *)key;
- u32 keysize = 0;
+ u32 keysize;
- if (keylen < sizeof(struct clearkeytoken) ||
- keylen != sizeof(*t) + t->len)
+ if (keylen < sizeof(*t) ||
+ keylen < sizeof(*t) + t->len)
goto out;
- switch (t->keytype) {
- case PKEY_KEYTYPE_AES_128:
- case PKEY_KEYTYPE_AES_192:
- case PKEY_KEYTYPE_AES_256:
- keysize = pkey_keytype_aes_to_size(t->keytype);
- break;
- case PKEY_KEYTYPE_ECC_P256:
- keysize = 32;
- break;
- case PKEY_KEYTYPE_ECC_P384:
- keysize = 48;
- break;
- case PKEY_KEYTYPE_ECC_P521:
- keysize = 80;
- break;
- case PKEY_KEYTYPE_ECC_ED25519:
- keysize = 32;
- break;
- case PKEY_KEYTYPE_ECC_ED448:
- keysize = 64;
- break;
- case PKEY_KEYTYPE_AES_XTS_128:
- keysize = 32;
- break;
- case PKEY_KEYTYPE_AES_XTS_256:
- keysize = 64;
- break;
- case PKEY_KEYTYPE_HMAC_512:
- keysize = 64;
- break;
- case PKEY_KEYTYPE_HMAC_1024:
- keysize = 128;
- break;
- default:
- break;
- }
+ keysize = pkey_keytype_to_size(t->keytype);
if (!keysize) {
PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n",
__func__, t->keytype);
@@ -397,8 +298,6 @@ out:
/*
* Generate a random protected key.
- * Currently only the generation of AES protected keys
- * is supported.
*/
static int pckmo_gen_protkey(u32 keytype, u32 subtype,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
@@ -407,32 +306,32 @@ static int pckmo_gen_protkey(u32 keytype, u32 subtype,
int keysize;
int rc;
+ keysize = pkey_keytype_to_size(keytype);
+ if (!keysize) {
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+ if (subtype != PKEY_TYPE_PROTKEY) {
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
- keysize = pkey_keytype_aes_to_size(keytype);
- break;
case PKEY_KEYTYPE_AES_XTS_128:
- keysize = 32;
- break;
case PKEY_KEYTYPE_AES_XTS_256:
case PKEY_KEYTYPE_HMAC_512:
- keysize = 64;
- break;
case PKEY_KEYTYPE_HMAC_1024:
- keysize = 128;
break;
default:
- PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ PKEY_DBF_ERR("%s unsupported keytype %d\n",
__func__, keytype);
return -EINVAL;
}
- if (subtype != PKEY_TYPE_PROTKEY) {
- PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
- __func__, subtype);
- return -EINVAL;
- }
/* generate a dummy random clear key */
get_random_bytes(clrkey, keysize);
@@ -453,7 +352,6 @@ out:
/*
* Verify a protected key token blob.
- * Currently only AES protected keys are supported.
*/
static int pckmo_verify_key(const u8 *key, u32 keylen)
{
@@ -467,11 +365,26 @@ static int pckmo_verify_key(const u8 *key, u32 keylen)
switch (hdr->version) {
case TOKVER_PROTECTED_KEY: {
- struct protaeskeytoken *t;
+ struct protkeytoken *t = (struct protkeytoken *)key;
+ u32 keysize;
- if (keylen != sizeof(struct protaeskeytoken))
+ if (keylen < sizeof(*t))
+ goto out;
+ keysize = pkey_keytype_to_size(t->keytype);
+ if (!keysize || t->len != keysize + AES_WK_VP_SIZE)
goto out;
- t = (struct protaeskeytoken *)key;
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ if (keylen < sizeof(struct protaeskeytoken))
+ goto out;
+ break;
+ default:
+ if (keylen < sizeof(*t) + keysize + AES_WK_VP_SIZE)
+ goto out;
+ break;
+ }
rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype);
break;
}
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
index cc0fc1e264bd..a4eb45803f5e 100644
--- a/drivers/s390/crypto/pkey_sysfs.c
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -10,7 +10,6 @@
#include <linux/sysfs.h>
-#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "zcrypt_ep11misc.h"
diff --git a/drivers/s390/crypto/pkey_uv.c b/drivers/s390/crypto/pkey_uv.c
new file mode 100644
index 000000000000..805817b14354
--- /dev/null
+++ b/drivers/s390/crypto/pkey_uv.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey uv specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <asm/uv.h>
+
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key UV handler");
+
+/*
+ * UV secret token struct and defines.
+ */
+
+#define TOKVER_UV_SECRET 0x09
+
+struct uvsecrettoken {
+ u8 type; /* 0x00 = TOKTYPE_NON_CCA */
+ u8 res0[3];
+ u8 version; /* 0x09 = TOKVER_UV_SECRET */
+ u8 res1[3];
+ u16 secret_type; /* one of enum uv_secret_types from uv.h */
+ u16 secret_len; /* length in bytes of the secret */
+ u8 secret_id[UV_SECRET_ID_LEN]; /* the secret id for this secret */
+} __packed;
+
+/*
+ * Check key blob for known and supported UV key.
+ */
+static bool is_uv_key(const u8 *key, u32 keylen)
+{
+ struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+
+ if (keylen < sizeof(*t))
+ return false;
+
+ switch (t->type) {
+ case TOKTYPE_NON_CCA:
+ switch (t->version) {
+ case TOKVER_UV_SECRET:
+ switch (t->secret_type) {
+ case UV_SECRET_AES_128:
+ case UV_SECRET_AES_192:
+ case UV_SECRET_AES_256:
+ case UV_SECRET_AES_XTS_128:
+ case UV_SECRET_AES_XTS_256:
+ case UV_SECRET_HMAC_SHA_256:
+ case UV_SECRET_HMAC_SHA_512:
+ case UV_SECRET_ECDSA_P256:
+ case UV_SECRET_ECDSA_P384:
+ case UV_SECRET_ECDSA_P521:
+ case UV_SECRET_ECDSA_ED25519:
+ case UV_SECRET_ECDSA_ED448:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool is_uv_keytype(enum pkey_key_type keytype)
+{
+ switch (keytype) {
+ case PKEY_TYPE_UVSECRET:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN],
+ u16 *secret_type, u8 *buf, u32 *buflen)
+{
+ struct uv_secret_list_item_hdr secret_meta_data;
+ int rc;
+
+ rc = uv_get_secret_metadata(secret_id, &secret_meta_data);
+ if (rc)
+ return rc;
+
+ if (*buflen < secret_meta_data.length)
+ return -EINVAL;
+
+ rc = uv_retrieve_secret(secret_meta_data.index,
+ buf, secret_meta_data.length);
+ if (rc)
+ return rc;
+
+ *secret_type = secret_meta_data.type;
+ *buflen = secret_meta_data.length;
+
+ return 0;
+}
+
+static int uv_get_size_and_type(u16 secret_type, u32 *pkeysize, u32 *pkeytype)
+{
+ int rc = 0;
+
+ switch (secret_type) {
+ case UV_SECRET_AES_128:
+ *pkeysize = 16 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case UV_SECRET_AES_192:
+ *pkeysize = 24 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case UV_SECRET_AES_256:
+ *pkeysize = 32 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ case UV_SECRET_AES_XTS_128:
+ *pkeysize = 16 + 16 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_XTS_128;
+ break;
+ case UV_SECRET_AES_XTS_256:
+ *pkeysize = 32 + 32 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_XTS_256;
+ break;
+ case UV_SECRET_HMAC_SHA_256:
+ *pkeysize = 64 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_HMAC_512;
+ break;
+ case UV_SECRET_HMAC_SHA_512:
+ *pkeysize = 128 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_HMAC_1024;
+ break;
+ case UV_SECRET_ECDSA_P256:
+ *pkeysize = 32 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_P256;
+ break;
+ case UV_SECRET_ECDSA_P384:
+ *pkeysize = 48 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_P384;
+ break;
+ case UV_SECRET_ECDSA_P521:
+ *pkeysize = 80 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_P521;
+ break;
+ case UV_SECRET_ECDSA_ED25519:
+ *pkeysize = 32 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_ED25519;
+ break;
+ case UV_SECRET_ECDSA_ED448:
+ *pkeysize = 64 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_ED448;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused,
+ size_t _nr_apqns __always_unused,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *keyinfo)
+{
+ struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+ u32 pkeysize, pkeytype;
+ u16 secret_type;
+ int rc;
+
+ rc = uv_get_size_and_type(t->secret_type, &pkeysize, &pkeytype);
+ if (rc)
+ goto out;
+
+ if (*protkeylen < pkeysize) {
+ PKEY_DBF_ERR("%s prot key buffer size too small: %u < %u\n",
+ __func__, *protkeylen, pkeysize);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = retrieve_secret(t->secret_id, &secret_type, protkey, protkeylen);
+ if (rc) {
+ PKEY_DBF_ERR("%s retrieve_secret() failed with %d\n",
+ __func__, rc);
+ goto out;
+ }
+ if (secret_type != t->secret_type) {
+ PKEY_DBF_ERR("%s retrieved secret type %u != expected type %u\n",
+ __func__, secret_type, t->secret_type);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (keyinfo)
+ *keyinfo = pkeytype;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int uv_verifykey(const u8 *key, u32 keylen,
+ u16 *_card __always_unused,
+ u16 *_dom __always_unused,
+ u32 *keytype, u32 *keybitsize, u32 *flags)
+{
+ struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+ struct uv_secret_list_item_hdr secret_meta_data;
+ u32 pkeysize, pkeytype, bitsize;
+ int rc;
+
+ rc = uv_get_size_and_type(t->secret_type, &pkeysize, &pkeytype);
+ if (rc)
+ goto out;
+
+ rc = uv_get_secret_metadata(t->secret_id, &secret_meta_data);
+ if (rc)
+ goto out;
+
+ if (secret_meta_data.type != t->secret_type) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* set keytype; keybitsize and flags are not supported */
+ if (keytype)
+ *keytype = PKEY_TYPE_UVSECRET;
+ if (keybitsize) {
+ bitsize = 8 * pkey_keytype_to_size(pkeytype);
+ *keybitsize = bitsize ?: PKEY_SIZE_UNKNOWN;
+ }
+ if (flags)
+ *flags = pkeytype;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static struct pkey_handler uv_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY UV handler",
+ .is_supported_key = is_uv_key,
+ .is_supported_keytype = is_uv_keytype,
+ .key_to_protkey = uv_key2protkey,
+ .verify_key = uv_verifykey,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_uv_init(void)
+{
+ if (!is_prot_virt_guest())
+ return -ENODEV;
+
+ if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list))
+ return -ENODEV;
+
+ return pkey_handler_register(&uv_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_uv_exit(void)
+{
+ pkey_handler_unregister(&uv_handler);
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init);
+module_exit(pkey_uv_exit);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 9f76f2d7b66e..8c0b40d8eb39 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1521,18 +1521,13 @@ static ssize_t control_domains_show(struct device *dev,
char *buf)
{
unsigned long id;
- int nchars = 0;
- int n;
- char *bufpos = buf;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
+ int nchars = 0;
mutex_lock(&matrix_dev->mdevs_lock);
- for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
- n = sprintf(bufpos, "%04lx\n", id);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1)
+ nchars += sysfs_emit_at(buf, nchars, "%04lx\n", id);
mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
@@ -1541,7 +1536,6 @@ static DEVICE_ATTR_RO(control_domains);
static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
{
- char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
unsigned long apid1;
@@ -1549,33 +1543,21 @@ static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
unsigned long napm_bits = matrix->apm_max + 1;
unsigned long naqm_bits = matrix->aqm_max + 1;
int nchars = 0;
- int n;
apid1 = find_first_bit_inv(matrix->apm, napm_bits);
apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
- for_each_set_bit_inv(apqi, matrix->aqm,
- naqm_bits) {
- n = sprintf(bufpos, "%02lx.%04lx\n", apid,
- apqi);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits)
+ nchars += sysfs_emit_at(buf, nchars, "%02lx.%04lx\n", apid, apqi);
}
} else if (apid1 < napm_bits) {
- for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
- n = sprintf(bufpos, "%02lx.\n", apid);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits)
+ nchars += sysfs_emit_at(buf, nchars, "%02lx.\n", apid);
} else if (apqi1 < naqm_bits) {
- for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
- n = sprintf(bufpos, ".%04lx\n", apqi);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits)
+ nchars += sysfs_emit_at(buf, nchars, ".%04lx\n", apqi);
}
return nchars;
@@ -2263,14 +2245,11 @@ static ssize_t status_show(struct device *dev,
if (matrix_mdev->kvm &&
test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_IN_USE);
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_IN_USE);
else
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_ASSIGNED);
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_ASSIGNED);
} else {
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_UNASSIGNED);
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_UNASSIGNED);
}
mutex_unlock(&matrix_dev->mdevs_lock);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index aed7e8384542..26bdca702523 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -12,6 +12,7 @@
#include <asm/zcrypt.h>
#include <asm/pkey.h>
+#include "zcrypt_api.h"
/* Key token types */
#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 039e18d46f76..31c9f95d809d 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1319,7 +1319,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
+ return sysfs_emit(buf, "%s\n", netiucv_printuser(priv->conn));
}
static int netiucv_check_user(const char *buf, size_t count, char *username,
@@ -1415,7 +1415,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%d\n", priv->conn->max_buffsize);
+ return sysfs_emit(buf, "%d\n", priv->conn->max_buffsize);
}
static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
@@ -1473,7 +1473,7 @@ static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
+ return sysfs_emit(buf, "%s\n", fsm_getstate_str(priv->fsm));
}
static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
@@ -1484,7 +1484,7 @@ static ssize_t conn_fsm_show (struct device *dev,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
+ return sysfs_emit(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
}
static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
@@ -1495,7 +1495,7 @@ static ssize_t maxmulti_show (struct device *dev,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
+ return sysfs_emit(buf, "%ld\n", priv->conn->prof.maxmulti);
}
static ssize_t maxmulti_write (struct device *dev,
@@ -1517,7 +1517,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
+ return sysfs_emit(buf, "%ld\n", priv->conn->prof.maxcqueue);
}
static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
@@ -1538,7 +1538,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
+ return sysfs_emit(buf, "%ld\n", priv->conn->prof.doios_single);
}
static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
@@ -1559,7 +1559,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
+ return sysfs_emit(buf, "%ld\n", priv->conn->prof.doios_multi);
}
static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
@@ -1580,7 +1580,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
+ return sysfs_emit(buf, "%ld\n", priv->conn->prof.txlen);
}
static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
@@ -1601,7 +1601,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
+ return sysfs_emit(buf, "%ld\n", priv->conn->prof.tx_time);
}
static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
@@ -1622,7 +1622,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
+ return sysfs_emit(buf, "%ld\n", priv->conn->prof.tx_pending);
}
static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
@@ -1643,7 +1643,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
+ return sysfs_emit(buf, "%ld\n", priv->conn->prof.tx_max_pending);
}
static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index cb67fa80fb12..304b81bb5f90 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -24,7 +24,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
- return sprintf(buf, _format, _value); \
+ return sysfs_emit(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
@@ -34,7 +34,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
- return sprintf(buf, _format, _value); \
+ return sysfs_emit(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
@@ -51,7 +51,7 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
if (!adapter) \
return -ENODEV; \
\
- i = sprintf(buf, _format, _value); \
+ i = sysfs_emit(buf, _format, _value); \
zfcp_ccw_adapter_put(adapter); \
return i; \
} \
@@ -95,9 +95,9 @@ static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
- return sprintf(buf, "1\n");
+ return sysfs_emit(buf, "1\n");
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
@@ -135,7 +135,7 @@ static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
scsi_device_put(sdev);
}
- return sprintf(buf, "%d\n", failed);
+ return sysfs_emit(buf, "%d\n", failed);
}
static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
@@ -176,9 +176,9 @@ static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
return -ENODEV;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
- i = sprintf(buf, "1\n");
+ i = sysfs_emit(buf, "1\n");
else
- i = sprintf(buf, "0\n");
+ i = sysfs_emit(buf, "0\n");
zfcp_ccw_adapter_put(adapter);
return i;
@@ -348,8 +348,7 @@ zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
if (!adapter)
return -ENODEV;
- /* ceil(log(2^64 - 1) / log(10)) = 20 */
- rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
+ rc = sysfs_emit(buf, "%lu\n", adapter->diagnostics->max_age);
zfcp_ccw_adapter_put(adapter);
return rc;
@@ -401,14 +400,14 @@ static ssize_t zfcp_sysfs_adapter_fc_security_show(
*/
status = atomic_read(&adapter->status);
if (0 == (status & ZFCP_STATUS_COMMON_OPEN))
- i = sprintf(buf, "unknown\n");
+ i = sysfs_emit(buf, "unknown\n");
else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
- i = sprintf(buf, "unsupported\n");
+ i = sysfs_emit(buf, "unsupported\n");
else {
i = zfcp_fsf_scnprint_fc_security(
buf, PAGE_SIZE - 1, adapter->fc_security_algorithms,
ZFCP_FSF_PRINT_FMT_LIST);
- i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+ i += sysfs_emit_at(buf, i, "\n");
}
zfcp_ccw_adapter_put(adapter);
@@ -490,14 +489,14 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
0 != (status & ZFCP_STATUS_PORT_LINK_TEST) ||
0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
- i = sprintf(buf, "unknown\n");
+ i = sysfs_emit(buf, "unknown\n");
else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
- i = sprintf(buf, "unsupported\n");
+ i = sysfs_emit(buf, "unsupported\n");
else {
i = zfcp_fsf_scnprint_fc_security(
buf, PAGE_SIZE - 1, port->connection_info,
ZFCP_FSF_PRINT_FMT_SINGLEITEM);
- i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+ i += sysfs_emit_at(buf, i, "\n");
}
return i;
@@ -569,8 +568,8 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
do_div(cmin, 1000); \
do_div(cmax, 1000); \
\
- return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
- fmin, fmax, fsum, cmin, cmax, csum, cc); \
+ return sysfs_emit(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
+ fmin, fmax, fsum, cmin, cmax, csum, cc); \
} \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
@@ -610,8 +609,8 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
\
- return sprintf(buf, _format, _value); \
-} \
+ return sysfs_emit(buf, _format, _value); \
+} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
@@ -625,7 +624,7 @@ static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
+ return sysfs_emit(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
}
static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
@@ -641,7 +640,7 @@ static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev,
unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status);
unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
- return sprintf(buf, "%d\n", failed);
+ return sysfs_emit(buf, "%d\n", failed);
}
static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev,
@@ -714,8 +713,8 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
if (retval == 0 || retval == -EAGAIN)
- retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
- qtcb_port->cb_util, qtcb_port->a_util);
+ retval = sysfs_emit(buf, "%u %u %u\n", qtcb_port->cp_util,
+ qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
return retval;
}
@@ -758,7 +757,7 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
if (retval) \
return retval; \
\
- return sprintf(buf, _format, ## _arg); \
+ return sysfs_emit(buf, _format, ## _arg); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
@@ -787,8 +786,8 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
util = qdio->req_q_util;
spin_unlock_bh(&qdio->stat_lock);
- return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
- (unsigned long long)util);
+ return sysfs_emit(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
+ (unsigned long long)util);
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
@@ -843,8 +842,7 @@ static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
.data.nport_serv_param -
sizeof(u32));
- rc = scnprintf(buf, 5 + 2, "%hu\n",
- be16_to_cpu(nsp->fl_csp.sp_bb_cred));
+ rc = sysfs_emit(buf, "%hu\n", be16_to_cpu(nsp->fl_csp.sp_bb_cred));
spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
out:
@@ -854,7 +852,7 @@ out:
static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
-#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \
+#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtfmt) \
static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \
struct device *dev, struct device_attribute *attr, char *buf) \
{ \
@@ -887,8 +885,8 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
goto out; \
\
spin_lock_irqsave(&diag_hdr->access_lock, flags); \
- rc = scnprintf( \
- buf, (_prtsize) + 2, _prtfmt "\n", \
+ rc = sysfs_emit( \
+ buf, _prtfmt "\n", \
adapter->diagnostics->port_data.data._qtcb_member); \
spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \
\
@@ -899,16 +897,16 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
-ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd");
-ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, "%hd");
+ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, "%hu");
static struct attribute *zfcp_sysfs_diag_attrs[] = {
&dev_attr_adapter_diag_sfp_temperature.attr,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 62eca9419ad7..21fa7ac849e5 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -58,6 +58,8 @@ struct virtio_ccw_device {
struct virtio_device vdev;
__u8 config[VIRTIO_CCW_CONFIG_SIZE];
struct ccw_device *cdev;
+ /* we make cdev->dev.dma_parms point to this */
+ struct device_dma_parameters dma_parms;
__u32 curr_io;
int err;
unsigned int revision; /* Transport revision */
@@ -1303,6 +1305,7 @@ static int virtio_ccw_offline(struct ccw_device *cdev)
unregister_virtio_device(&vcdev->vdev);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, NULL);
+ cdev->dev.dma_parms = NULL;
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return 0;
}
@@ -1366,6 +1369,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
}
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->cdev = cdev;
+ cdev->dev.dma_parms = &vcdev->dma_parms;
vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*vcdev->dma_area),
&vcdev->dma_area_addr);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ca4bc0ac76ad..8947dab132d7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1190,8 +1190,8 @@ static u8 sd_group_number(struct scsi_cmnd *cmd)
if (!sdkp->rscs)
return 0;
- return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count,
- 0x3fu);
+ return min3((u32)rq->bio->bi_write_hint,
+ (u32)sdkp->permanent_stream_count, 0x3fu);
}
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
@@ -1389,7 +1389,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
- sdp->use_10_for_rw || protect || rq->write_hint) {
+ sdp->use_10_for_rw || protect || rq->bio->bi_write_hint) {
ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
protect | fua);
} else {
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6ab27f4f4878..7a447ff600d2 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -633,8 +633,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
lim->max_open_zones = sdkp->zones_max_open;
lim->max_active_zones = 0;
lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks);
- /* Enable block layer zone append emulation */
- lim->max_zone_append_sectors = 0;
return 0;
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index c5b17dd8f587..0629f277f7b4 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -162,7 +162,7 @@ static int __init tc_init(void)
if (tc_bus.info.slot_size) {
unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
- pr_info("tc: TURBOchannel rev. %d at %d.%d MHz "
+ pr_info("tc: TURBOchannel rev. %d at %u.%u MHz "
"(with%s parity)\n", tc_bus.info.revision,
tc_clock / 10, tc_clock % 10,
tc_bus.info.parity ? "" : "out");
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 41c4d56beb40..1e1559bb971e 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -6,6 +6,7 @@ CFLAGS_thermal_core.o := -I$(src)
obj-$(CONFIG_THERMAL) += thermal_sys.o
thermal_sys-y += thermal_core.o thermal_sysfs.o
thermal_sys-y += thermal_trip.o thermal_helpers.o
+thermal_sys-y += thermal_thresholds.o
# netlink interface to manage the thermal framework
thermal_sys-$(CONFIG_THERMAL_NETLINK) += thermal_netlink.o
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 863e7a4272e6..97f3d819852b 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -30,9 +30,7 @@ static void bang_bang_set_instance_target(struct thermal_instance *instance,
dev_dbg(&instance->cdev->device, "target=%ld\n", instance->target);
- mutex_lock(&instance->cdev->lock);
- __thermal_cdev_update(instance->cdev);
- mutex_unlock(&instance->cdev->lock);
+ thermal_cdev_update_nocheck(instance->cdev);
}
/**
@@ -67,6 +65,7 @@ static void bang_bang_control(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
bool crossed_up)
{
+ const struct thermal_trip_desc *td = trip_to_trip_desc(trip);
struct thermal_instance *instance;
lockdep_assert_held(&tz->lock);
@@ -75,10 +74,8 @@ static void bang_bang_control(struct thermal_zone_device *tz,
thermal_zone_trip_id(tz, trip), trip->temperature,
tz->temperature, trip->hysteresis);
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip == trip)
- bang_bang_set_instance_target(instance, crossed_up);
- }
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ bang_bang_set_instance_target(instance, crossed_up);
}
static void bang_bang_manage(struct thermal_zone_device *tz)
@@ -104,8 +101,8 @@ static void bang_bang_manage(struct thermal_zone_device *tz)
* to the thermal zone temperature and the trip point threshold.
*/
turn_on = tz->temperature >= td->threshold;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (!instance->initialized && instance->trip == trip)
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ if (!instance->initialized)
bang_bang_set_instance_target(instance, turn_on);
}
}
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index ce0ea571ed67..4643be4f941d 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -44,7 +44,7 @@ static int get_trip_level(struct thermal_zone_device *tz)
/**
* fair_share_throttle - throttles devices associated with the given zone
* @tz: thermal_zone_device
- * @trip: trip point
+ * @td: trip point descriptor
* @trip_level: number of trips crossed by the zone temperature
*
* Throttling Logic: This uses three parameters to calculate the new
@@ -61,29 +61,23 @@ static int get_trip_level(struct thermal_zone_device *tz)
* new_state of cooling device = P3 * P2 * P1
*/
static void fair_share_throttle(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
+ const struct thermal_trip_desc *td,
int trip_level)
{
struct thermal_instance *instance;
int total_weight = 0;
int nr_instances = 0;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip)
- continue;
-
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
total_weight += instance->weight;
nr_instances++;
}
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
struct thermal_cooling_device *cdev = instance->cdev;
u64 dividend;
u32 divisor;
- if (instance->trip != trip)
- continue;
-
dividend = trip_level;
dividend *= cdev->max_state;
divisor = tz->num_trips;
@@ -95,9 +89,7 @@ static void fair_share_throttle(struct thermal_zone_device *tz,
}
instance->target = div_u64(dividend, divisor);
- mutex_lock(&cdev->lock);
- __thermal_cdev_update(cdev);
- mutex_unlock(&cdev->lock);
+ thermal_cdev_update_nocheck(cdev);
}
}
@@ -116,7 +108,7 @@ static void fair_share_manage(struct thermal_zone_device *tz)
trip->type == THERMAL_TRIP_HOT)
continue;
- fair_share_throttle(tz, trip, trip_level);
+ fair_share_throttle(tz, td, trip_level);
}
}
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 1b2345a697c5..ac6fa6b8f99f 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -97,11 +97,9 @@ struct power_allocator_params {
struct power_actor *power;
};
-static bool power_actor_is_valid(struct power_allocator_params *params,
- struct thermal_instance *instance)
+static bool power_actor_is_valid(struct thermal_instance *instance)
{
- return (instance->trip == params->trip_max &&
- cdev_is_power_actor(instance->cdev));
+ return cdev_is_power_actor(instance->cdev);
}
/**
@@ -118,13 +116,14 @@ static bool power_actor_is_valid(struct power_allocator_params *params,
static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
{
struct power_allocator_params *params = tz->governor_data;
+ const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
struct thermal_cooling_device *cdev;
struct thermal_instance *instance;
u32 sustainable_power = 0;
u32 min_power;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (!power_actor_is_valid(params, instance))
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ if (!power_actor_is_valid(instance))
continue;
cdev = instance->cdev;
@@ -323,9 +322,8 @@ power_actor_set_power(struct thermal_cooling_device *cdev,
return ret;
instance->target = clamp_val(state, instance->lower, instance->upper);
- mutex_lock(&cdev->lock);
- __thermal_cdev_update(cdev);
- mutex_unlock(&cdev->lock);
+
+ thermal_cdev_update_nocheck(cdev);
return 0;
}
@@ -356,11 +354,19 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
u32 extra_power = 0;
int i;
- /*
- * Prevent division by 0 if none of the actors request power.
- */
- if (!total_req_power)
- total_req_power = 1;
+ if (!total_req_power) {
+ /*
+ * Nobody requested anything, just give everybody
+ * the maximum power
+ */
+ for (i = 0; i < num_actors; i++) {
+ struct power_actor *pa = &power[i];
+
+ pa->granted_power = pa->max_power;
+ }
+
+ return;
+ }
for (i = 0; i < num_actors; i++) {
struct power_actor *pa = &power[i];
@@ -400,6 +406,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
static void allocate_power(struct thermal_zone_device *tz, int control_temp)
{
struct power_allocator_params *params = tz->governor_data;
+ const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
unsigned int num_actors = params->num_actors;
struct power_actor *power = params->power;
struct thermal_cooling_device *cdev;
@@ -417,10 +424,10 @@ static void allocate_power(struct thermal_zone_device *tz, int control_temp)
/* Clean all buffers for new power estimations */
memset(power, 0, params->buffer_size);
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
struct power_actor *pa = &power[i];
- if (!power_actor_is_valid(params, instance))
+ if (!power_actor_is_valid(instance))
continue;
cdev = instance->cdev;
@@ -454,10 +461,10 @@ static void allocate_power(struct thermal_zone_device *tz, int control_temp)
power_range);
i = 0;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
struct power_actor *pa = &power[i];
- if (!power_actor_is_valid(params, instance))
+ if (!power_actor_is_valid(instance))
continue;
power_actor_set_power(instance->cdev, instance,
@@ -538,29 +545,29 @@ static void reset_pid_controller(struct power_allocator_params *params)
static void allow_maximum_power(struct thermal_zone_device *tz)
{
struct power_allocator_params *params = tz->governor_data;
+ const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
struct thermal_cooling_device *cdev;
struct thermal_instance *instance;
u32 req_power;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (!power_actor_is_valid(params, instance))
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ if (!power_actor_is_valid(instance))
continue;
cdev = instance->cdev;
instance->target = 0;
- mutex_lock(&cdev->lock);
- /*
- * Call for updating the cooling devices local stats and avoid
- * periods of dozen of seconds when those have not been
- * maintained.
- */
- cdev->ops->get_requested_power(cdev, &req_power);
-
- if (params->update_cdevs)
- __thermal_cdev_update(cdev);
-
- mutex_unlock(&cdev->lock);
+ scoped_guard(cooling_dev, cdev) {
+ /*
+ * Call for updating the cooling devices local stats and
+ * avoid periods of dozen of seconds when those have not
+ * been maintained.
+ */
+ cdev->ops->get_requested_power(cdev, &req_power);
+
+ if (params->update_cdevs)
+ __thermal_cdev_update(cdev);
+ }
}
}
@@ -581,13 +588,11 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
static int check_power_actors(struct thermal_zone_device *tz,
struct power_allocator_params *params)
{
+ const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
struct thermal_instance *instance;
int ret = 0;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != params->trip_max)
- continue;
-
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
if (!cdev_is_power_actor(instance->cdev)) {
dev_warn(&tz->device, "power_allocator: %s is not a power actor\n",
instance->cdev->type);
@@ -635,14 +640,15 @@ static void power_allocator_update_tz(struct thermal_zone_device *tz,
enum thermal_notify_event reason)
{
struct power_allocator_params *params = tz->governor_data;
+ const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
struct thermal_instance *instance;
int num_actors = 0;
switch (reason) {
case THERMAL_TZ_BIND_CDEV:
case THERMAL_TZ_UNBIND_CDEV:
- list_for_each_entry(instance, &tz->thermal_instances, tz_node)
- if (power_actor_is_valid(params, instance))
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ if (power_actor_is_valid(instance))
num_actors++;
if (num_actors == params->num_actors)
@@ -652,8 +658,8 @@ static void power_allocator_update_tz(struct thermal_zone_device *tz,
break;
case THERMAL_INSTANCE_WEIGHT_CHANGED:
params->total_weight = 0;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node)
- if (power_actor_is_valid(params, instance))
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ if (power_actor_is_valid(instance))
params->total_weight += instance->weight;
break;
default:
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index fd5527188cf9..d1bb59f1dfbd 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -66,9 +66,10 @@ static unsigned long get_target_state(struct thermal_instance *instance,
}
static void thermal_zone_trip_update(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
+ const struct thermal_trip_desc *td,
int trip_threshold)
{
+ const struct thermal_trip *trip = &td->trip;
enum thermal_trend trend = get_tz_trend(tz, trip);
int trip_id = thermal_zone_trip_id(tz, trip);
struct thermal_instance *instance;
@@ -82,12 +83,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz,
dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
trip_id, trip->type, trip_threshold, trend, throttle);
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
int old_target;
- if (instance->trip != trip)
- continue;
-
old_target = instance->target;
instance->target = get_target_state(instance, trend, throttle);
@@ -99,9 +97,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz,
instance->initialized = true;
- mutex_lock(&instance->cdev->lock);
- instance->cdev->updated = false; /* cdev needs update */
- mutex_unlock(&instance->cdev->lock);
+ scoped_guard(cooling_dev, instance->cdev) {
+ instance->cdev->updated = false; /* cdev needs update */
+ }
}
}
@@ -127,11 +125,13 @@ static void step_wise_manage(struct thermal_zone_device *tz)
trip->type == THERMAL_TRIP_HOT)
continue;
- thermal_zone_trip_update(tz, trip, td->threshold);
+ thermal_zone_trip_update(tz, td, td->threshold);
}
- list_for_each_entry(instance, &tz->thermal_instances, tz_node)
- thermal_cdev_update(instance->cdev);
+ for_each_trip_desc(tz, td) {
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ thermal_cdev_update(instance->cdev);
+ }
}
static struct thermal_governor thermal_gov_step_wise = {
diff --git a/drivers/thermal/testing/zone.c b/drivers/thermal/testing/zone.c
index c6d8c66f40f9..1f4e450100e2 100644
--- a/drivers/thermal/testing/zone.c
+++ b/drivers/thermal/testing/zone.c
@@ -185,7 +185,7 @@ static void tt_add_tz_work_fn(struct work_struct *work)
int tt_add_tz(void)
{
struct tt_thermal_zone *tt_zone __free(kfree);
- struct tt_work *tt_work __free(kfree);
+ struct tt_work *tt_work __free(kfree) = NULL;
int ret;
tt_zone = kzalloc(sizeof(*tt_zone), GFP_KERNEL);
@@ -237,7 +237,7 @@ static void tt_zone_unregister_tz(struct tt_thermal_zone *tt_zone)
int tt_del_tz(const char *arg)
{
- struct tt_work *tt_work __free(kfree);
+ struct tt_work *tt_work __free(kfree) = NULL;
struct tt_thermal_zone *tt_zone, *aux;
int ret;
int id;
@@ -288,19 +288,14 @@ static struct tt_thermal_zone *tt_get_tt_zone(const char *arg)
guard(mutex)(&tt_thermal_zones_lock);
- ret = -EINVAL;
list_for_each_entry(tt_zone, &tt_thermal_zones, list_node) {
if (tt_zone->id == id) {
tt_zone->refcount++;
- ret = 0;
- break;
+ return tt_zone;
}
}
- if (ret)
- return ERR_PTR(ret);
-
- return tt_zone;
+ return ERR_PTR(-EINVAL);
}
static void tt_put_tt_zone(struct tt_thermal_zone *tt_zone)
@@ -310,6 +305,9 @@ static void tt_put_tt_zone(struct tt_thermal_zone *tt_zone)
tt_zone->refcount--;
}
+DEFINE_FREE(put_tt_zone, struct tt_thermal_zone *,
+ if (!IS_ERR_OR_NULL(_T)) tt_put_tt_zone(_T))
+
static void tt_zone_add_trip_work_fn(struct work_struct *work)
{
struct tt_work *tt_work = tt_work_of_work(work);
@@ -332,9 +330,9 @@ static void tt_zone_add_trip_work_fn(struct work_struct *work)
int tt_zone_add_trip(const char *arg)
{
+ struct tt_thermal_zone *tt_zone __free(put_tt_zone) = NULL;
+ struct tt_trip *tt_trip __free(kfree) = NULL;
struct tt_work *tt_work __free(kfree);
- struct tt_trip *tt_trip __free(kfree);
- struct tt_thermal_zone *tt_zone;
int id;
tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
@@ -350,10 +348,8 @@ int tt_zone_add_trip(const char *arg)
return PTR_ERR(tt_zone);
id = ida_alloc(&tt_zone->ida, GFP_KERNEL);
- if (id < 0) {
- tt_put_tt_zone(tt_zone);
+ if (id < 0)
return id;
- }
tt_trip->trip.type = THERMAL_TRIP_ACTIVE;
tt_trip->trip.temperature = THERMAL_TEMP_INVALID;
@@ -366,7 +362,7 @@ int tt_zone_add_trip(const char *arg)
tt_zone->num_trips++;
INIT_WORK(&tt_work->work, tt_zone_add_trip_work_fn);
- tt_work->tt_zone = tt_zone;
+ tt_work->tt_zone = no_free_ptr(tt_zone);
tt_work->tt_trip = no_free_ptr(tt_trip);
schedule_work(&(no_free_ptr(tt_work)->work));
@@ -391,7 +387,7 @@ static struct thermal_zone_device_ops tt_zone_ops = {
static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
{
- struct thermal_trip *trips __free(kfree);
+ struct thermal_trip *trips __free(kfree) = NULL;
struct thermal_zone_device *tz;
struct tt_trip *tt_trip;
int i;
@@ -425,23 +421,18 @@ static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
int tt_zone_reg(const char *arg)
{
- struct tt_thermal_zone *tt_zone;
- int ret;
+ struct tt_thermal_zone *tt_zone __free(put_tt_zone);
tt_zone = tt_get_tt_zone(arg);
if (IS_ERR(tt_zone))
return PTR_ERR(tt_zone);
- ret = tt_zone_register_tz(tt_zone);
-
- tt_put_tt_zone(tt_zone);
-
- return ret;
+ return tt_zone_register_tz(tt_zone);
}
int tt_zone_unreg(const char *arg)
{
- struct tt_thermal_zone *tt_zone;
+ struct tt_thermal_zone *tt_zone __free(put_tt_zone);
tt_zone = tt_get_tt_zone(arg);
if (IS_ERR(tt_zone))
@@ -449,8 +440,6 @@ int tt_zone_unreg(const char *arg)
tt_zone_unregister_tz(tt_zone);
- tt_put_tt_zone(tt_zone);
-
return 0;
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 8f03985f971c..19a3894ad752 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
-#include <linux/list_sort.h>
#include <linux/thermal.h>
#include <linux/reboot.h>
#include <linux/string.h>
@@ -40,6 +39,8 @@ static DEFINE_MUTEX(thermal_governor_lock);
static struct thermal_governor *def_governor;
+static bool thermal_pm_suspended;
+
/*
* Governor section: set of functions to handle thermal governors
*
@@ -122,7 +123,7 @@ int thermal_register_governor(struct thermal_governor *governor)
if (!governor)
return -EINVAL;
- mutex_lock(&thermal_governor_lock);
+ guard(mutex)(&thermal_governor_lock);
err = -EBUSY;
if (!__find_governor(governor->name)) {
@@ -138,7 +139,7 @@ int thermal_register_governor(struct thermal_governor *governor)
def_governor = governor;
}
- mutex_lock(&thermal_list_lock);
+ guard(mutex)(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node) {
/*
@@ -161,9 +162,6 @@ int thermal_register_governor(struct thermal_governor *governor)
}
}
- mutex_unlock(&thermal_list_lock);
- mutex_unlock(&thermal_governor_lock);
-
return err;
}
@@ -174,23 +172,20 @@ void thermal_unregister_governor(struct thermal_governor *governor)
if (!governor)
return;
- mutex_lock(&thermal_governor_lock);
+ guard(mutex)(&thermal_governor_lock);
if (!__find_governor(governor->name))
- goto exit;
+ return;
- mutex_lock(&thermal_list_lock);
+ list_del(&governor->governor_list);
+
+ guard(mutex)(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node) {
if (!strncasecmp(pos->governor->name, governor->name,
THERMAL_NAME_LENGTH))
thermal_set_governor(pos, NULL);
}
-
- mutex_unlock(&thermal_list_lock);
- list_del(&governor->governor_list);
-exit:
- mutex_unlock(&thermal_governor_lock);
}
int thermal_zone_device_set_policy(struct thermal_zone_device *tz,
@@ -199,18 +194,12 @@ int thermal_zone_device_set_policy(struct thermal_zone_device *tz,
struct thermal_governor *gov;
int ret = -EINVAL;
- mutex_lock(&thermal_governor_lock);
- mutex_lock(&tz->lock);
+ guard(mutex)(&thermal_governor_lock);
+ guard(thermal_zone)(tz);
gov = __find_governor(strim(policy));
- if (!gov)
- goto exit;
-
- ret = thermal_set_governor(tz, gov);
-
-exit:
- mutex_unlock(&tz->lock);
- mutex_unlock(&thermal_governor_lock);
+ if (gov)
+ ret = thermal_set_governor(tz, gov);
thermal_notify_tz_gov_change(tz, policy);
@@ -222,15 +211,13 @@ int thermal_build_list_of_policies(char *buf)
struct thermal_governor *pos;
ssize_t count = 0;
- mutex_lock(&thermal_governor_lock);
+ guard(mutex)(&thermal_governor_lock);
list_for_each_entry(pos, &thermal_governor_list, governor_list) {
count += sysfs_emit_at(buf, count, "%s ", pos->name);
}
count += sysfs_emit_at(buf, count, "\n");
- mutex_unlock(&thermal_governor_lock);
-
return count;
}
@@ -421,83 +408,46 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
tz->ops.hot(tz);
}
-static void handle_thermal_trip(struct thermal_zone_device *tz,
- struct thermal_trip_desc *td,
- struct list_head *way_up_list,
- struct list_head *way_down_list)
+static void move_trip_to_sorted_list(struct thermal_trip_desc *td,
+ struct list_head *list)
{
- const struct thermal_trip *trip = &td->trip;
- int old_threshold;
-
- if (trip->temperature == THERMAL_TEMP_INVALID)
- return;
+ struct thermal_trip_desc *entry;
/*
- * If the trip temperature or hysteresis has been updated recently,
- * the threshold needs to be computed again using the new values.
- * However, its initial value still reflects the old ones and that
- * is what needs to be compared with the previous zone temperature
- * to decide which action to take.
+ * Delete upfront and then add to make relocation within the same list
+ * work.
*/
- old_threshold = td->threshold;
- td->threshold = trip->temperature;
+ list_del(&td->list_node);
- if (tz->last_temperature >= old_threshold &&
- tz->last_temperature != THERMAL_TEMP_INIT) {
- /*
- * Mitigation is under way, so it needs to stop if the zone
- * temperature falls below the low temperature of the trip.
- * In that case, the trip temperature becomes the new threshold.
- */
- if (tz->temperature < trip->temperature - trip->hysteresis) {
- list_add(&td->notify_list_node, way_down_list);
- td->notify_temp = trip->temperature - trip->hysteresis;
-
- if (trip->type == THERMAL_TRIP_PASSIVE) {
- tz->passive--;
- WARN_ON(tz->passive < 0);
- }
- } else {
- td->threshold -= trip->hysteresis;
+ /* Assume that the new entry is likely to be the last one. */
+ list_for_each_entry_reverse(entry, list, list_node) {
+ if (entry->threshold <= td->threshold) {
+ list_add(&td->list_node, &entry->list_node);
+ return;
}
- } else if (tz->temperature >= trip->temperature) {
- /*
- * There is no mitigation under way, so it needs to be started
- * if the zone temperature exceeds the trip one. The new
- * threshold is then set to the low temperature of the trip.
- */
- list_add_tail(&td->notify_list_node, way_up_list);
- td->notify_temp = trip->temperature;
- td->threshold -= trip->hysteresis;
-
- if (trip->type == THERMAL_TRIP_PASSIVE)
- tz->passive++;
- else if (trip->type == THERMAL_TRIP_CRITICAL ||
- trip->type == THERMAL_TRIP_HOT)
- handle_critical_trips(tz, trip);
}
+ list_add(&td->list_node, list);
}
-static void thermal_zone_device_check(struct work_struct *work)
+static void move_to_trips_high(struct thermal_zone_device *tz,
+ struct thermal_trip_desc *td)
{
- struct thermal_zone_device *tz = container_of(work, struct
- thermal_zone_device,
- poll_queue.work);
- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ td->threshold = td->trip.temperature;
+ move_trip_to_sorted_list(td, &tz->trips_high);
}
-static void thermal_zone_device_init(struct thermal_zone_device *tz)
+static void move_to_trips_reached(struct thermal_zone_device *tz,
+ struct thermal_trip_desc *td)
{
- struct thermal_instance *pos;
-
- INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
+ td->threshold = td->trip.temperature - td->trip.hysteresis;
+ move_trip_to_sorted_list(td, &tz->trips_reached);
+}
- tz->temperature = THERMAL_TEMP_INIT;
- tz->passive = 0;
- tz->prev_low_trip = -INT_MAX;
- tz->prev_high_trip = INT_MAX;
- list_for_each_entry(pos, &tz->thermal_instances, tz_node)
- pos->initialized = false;
+static void move_to_trips_invalid(struct thermal_zone_device *tz,
+ struct thermal_trip_desc *td)
+{
+ td->threshold = INT_MAX;
+ list_move(&td->list_node, &tz->trips_invalid);
}
static void thermal_governor_trip_crossed(struct thermal_governor *governor,
@@ -513,41 +463,154 @@ static void thermal_governor_trip_crossed(struct thermal_governor *governor,
}
static void thermal_trip_crossed(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
+ struct thermal_trip_desc *td,
struct thermal_governor *governor,
bool crossed_up)
{
+ const struct thermal_trip *trip = &td->trip;
+
if (crossed_up) {
+ if (trip->type == THERMAL_TRIP_PASSIVE)
+ tz->passive++;
+ else if (trip->type == THERMAL_TRIP_CRITICAL ||
+ trip->type == THERMAL_TRIP_HOT)
+ handle_critical_trips(tz, trip);
+
thermal_notify_tz_trip_up(tz, trip);
thermal_debug_tz_trip_up(tz, trip);
} else {
+ if (trip->type == THERMAL_TRIP_PASSIVE) {
+ tz->passive--;
+ WARN_ON(tz->passive < 0);
+ }
thermal_notify_tz_trip_down(tz, trip);
thermal_debug_tz_trip_down(tz, trip);
}
thermal_governor_trip_crossed(governor, tz, trip, crossed_up);
}
-static int thermal_trip_notify_cmp(void *not_used, const struct list_head *a,
- const struct list_head *b)
+void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int hyst)
{
- struct thermal_trip_desc *tda = container_of(a, struct thermal_trip_desc,
- notify_list_node);
- struct thermal_trip_desc *tdb = container_of(b, struct thermal_trip_desc,
- notify_list_node);
- return tda->notify_temp - tdb->notify_temp;
+ struct thermal_trip_desc *td = trip_to_trip_desc(trip);
+
+ WRITE_ONCE(trip->hysteresis, hyst);
+ thermal_notify_tz_trip_change(tz, trip);
+ /*
+ * If the zone temperature is above or at the trip tmperature, the trip
+ * is in the trips_reached list and its threshold is equal to its low
+ * temperature. It needs to stay in that list, but its threshold needs
+ * to be updated and the list ordering may need to be restored.
+ */
+ if (tz->temperature >= td->threshold)
+ move_to_trips_reached(tz, td);
+}
+
+void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int temp)
+{
+ struct thermal_trip_desc *td = trip_to_trip_desc(trip);
+ int old_temp = trip->temperature;
+
+ if (old_temp == temp)
+ return;
+
+ WRITE_ONCE(trip->temperature, temp);
+ thermal_notify_tz_trip_change(tz, trip);
+
+ if (old_temp == THERMAL_TEMP_INVALID) {
+ /*
+ * The trip was invalid before the change, so move it to the
+ * trips_high list regardless of the new temperature value
+ * because there is no mitigation under way for it. If a
+ * mitigation needs to be started, the trip will be moved to the
+ * trips_reached list later.
+ */
+ move_to_trips_high(tz, td);
+ return;
+ }
+
+ if (temp == THERMAL_TEMP_INVALID) {
+ /*
+ * If the trip is in the trips_reached list, mitigation is under
+ * way for it and it needs to be stopped because the trip is
+ * effectively going away.
+ */
+ if (tz->temperature >= td->threshold)
+ thermal_trip_crossed(tz, td, thermal_get_tz_governor(tz), false);
+
+ move_to_trips_invalid(tz, td);
+ return;
+ }
+
+ /*
+ * The trip stays on its current list, but its threshold needs to be
+ * updated due to the temperature change and the list ordering may need
+ * to be restored.
+ */
+ if (tz->temperature >= td->threshold)
+ move_to_trips_reached(tz, td);
+ else
+ move_to_trips_high(tz, td);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp);
+
+static void thermal_zone_handle_trips(struct thermal_zone_device *tz,
+ struct thermal_governor *governor,
+ int *low, int *high)
+{
+ struct thermal_trip_desc *td, *next;
+ LIST_HEAD(way_down_list);
+
+ /* Check the trips that were below or at the zone temperature. */
+ list_for_each_entry_safe_reverse(td, next, &tz->trips_reached, list_node) {
+ if (td->threshold <= tz->temperature)
+ break;
+
+ thermal_trip_crossed(tz, td, governor, false);
+ /*
+ * The current trips_high list needs to be processed before
+ * adding new entries to it, so put them on a temporary list.
+ */
+ list_move(&td->list_node, &way_down_list);
+ }
+ /* Check the trips that were previously above the zone temperature. */
+ list_for_each_entry_safe(td, next, &tz->trips_high, list_node) {
+ if (td->threshold > tz->temperature)
+ break;
+
+ thermal_trip_crossed(tz, td, governor, true);
+ move_to_trips_reached(tz, td);
+ }
+ /* Move all of the trips from the temporary list to trips_high. */
+ list_for_each_entry_safe(td, next, &way_down_list, list_node)
+ move_to_trips_high(tz, td);
+
+ if (!list_empty(&tz->trips_reached)) {
+ td = list_last_entry(&tz->trips_reached,
+ struct thermal_trip_desc, list_node);
+ /*
+ * Set the "low" value below the current trip threshold in case
+ * the zone temperature is at that threshold and stays there,
+ * which would trigger a new interrupt immediately in vain.
+ */
+ *low = td->threshold - 1;
+ }
+ if (!list_empty(&tz->trips_high)) {
+ td = list_first_entry(&tz->trips_high,
+ struct thermal_trip_desc, list_node);
+ *high = td->threshold;
+ }
}
void __thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event)
{
struct thermal_governor *governor = thermal_get_tz_governor(tz);
- struct thermal_trip_desc *td;
- LIST_HEAD(way_down_list);
- LIST_HEAD(way_up_list);
int low = -INT_MAX, high = INT_MAX;
int temp, ret;
- if (tz->suspended || tz->mode != THERMAL_DEVICE_ENABLED)
+ if (tz->state != TZ_STATE_READY || tz->mode != THERMAL_DEVICE_ENABLED)
return;
ret = __thermal_zone_get_temp(tz, &temp);
@@ -575,26 +638,12 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
tz->notify_event = event;
- for_each_trip_desc(tz, td) {
- handle_thermal_trip(tz, td, &way_up_list, &way_down_list);
+ thermal_zone_handle_trips(tz, governor, &low, &high);
- if (td->threshold <= tz->temperature && td->threshold > low)
- low = td->threshold;
-
- if (td->threshold >= tz->temperature && td->threshold < high)
- high = td->threshold;
- }
+ thermal_thresholds_handle(tz, &low, &high);
thermal_zone_set_trips(tz, low, high);
- list_sort(NULL, &way_up_list, thermal_trip_notify_cmp);
- list_for_each_entry(td, &way_up_list, notify_list_node)
- thermal_trip_crossed(tz, &td->trip, governor, true);
-
- list_sort(NULL, &way_down_list, thermal_trip_notify_cmp);
- list_for_each_entry_reverse(td, &way_down_list, notify_list_node)
- thermal_trip_crossed(tz, &td->trip, governor, false);
-
if (governor->manage)
governor->manage(tz);
@@ -609,26 +658,18 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
{
int ret;
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
/* do nothing if mode isn't changing */
- if (mode == tz->mode) {
- mutex_unlock(&tz->lock);
-
+ if (mode == tz->mode)
return 0;
- }
ret = __thermal_zone_device_set_mode(tz, mode);
- if (ret) {
- mutex_unlock(&tz->lock);
-
+ if (ret)
return ret;
- }
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
- mutex_unlock(&tz->lock);
-
if (mode == THERMAL_DEVICE_ENABLED)
thermal_notify_tz_enable(tz);
else
@@ -657,85 +698,81 @@ static bool thermal_zone_is_present(struct thermal_zone_device *tz)
void thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event)
{
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
+
if (thermal_zone_is_present(tz))
__thermal_zone_device_update(tz, event);
- mutex_unlock(&tz->lock);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
-void thermal_zone_trip_down(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
-{
- thermal_trip_crossed(tz, trip, thermal_get_tz_governor(tz), false);
-}
-
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
void *data)
{
struct thermal_governor *gov;
- int ret = 0;
- mutex_lock(&thermal_governor_lock);
+ guard(mutex)(&thermal_governor_lock);
+
list_for_each_entry(gov, &thermal_governor_list, governor_list) {
+ int ret;
+
ret = cb(gov, data);
if (ret)
- break;
+ return ret;
}
- mutex_unlock(&thermal_governor_lock);
- return ret;
+ return 0;
}
int for_each_thermal_cooling_device(int (*cb)(struct thermal_cooling_device *,
void *), void *data)
{
struct thermal_cooling_device *cdev;
- int ret = 0;
- mutex_lock(&thermal_list_lock);
+ guard(mutex)(&thermal_list_lock);
+
list_for_each_entry(cdev, &thermal_cdev_list, node) {
+ int ret;
+
ret = cb(cdev, data);
if (ret)
- break;
+ return ret;
}
- mutex_unlock(&thermal_list_lock);
- return ret;
+ return 0;
}
int for_each_thermal_zone(int (*cb)(struct thermal_zone_device *, void *),
void *data)
{
struct thermal_zone_device *tz;
- int ret = 0;
- mutex_lock(&thermal_list_lock);
+ guard(mutex)(&thermal_list_lock);
+
list_for_each_entry(tz, &thermal_tz_list, node) {
+ int ret;
+
ret = cb(tz, data);
if (ret)
- break;
+ return ret;
}
- mutex_unlock(&thermal_list_lock);
- return ret;
+ return 0;
}
struct thermal_zone_device *thermal_zone_get_by_id(int id)
{
- struct thermal_zone_device *tz, *match = NULL;
+ struct thermal_zone_device *tz;
+
+ guard(mutex)(&thermal_list_lock);
- mutex_lock(&thermal_list_lock);
list_for_each_entry(tz, &thermal_tz_list, node) {
if (tz->id == id) {
get_device(&tz->device);
- match = tz;
- break;
+ return tz;
}
}
- mutex_unlock(&thermal_list_lock);
- return match;
+ return NULL;
}
/*
@@ -748,12 +785,32 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
* binding, and unbinding.
*/
+static int thermal_instance_add(struct thermal_instance *new_instance,
+ struct thermal_cooling_device *cdev,
+ struct thermal_trip_desc *td)
+{
+ struct thermal_instance *instance;
+
+ list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ if (instance->cdev == cdev)
+ return -EEXIST;
+ }
+
+ list_add_tail(&new_instance->trip_node, &td->thermal_instances);
+
+ guard(cooling_dev)(cdev);
+
+ list_add_tail(&new_instance->cdev_node, &cdev->thermal_instances);
+
+ return 0;
+}
+
/**
* thermal_bind_cdev_to_trip - bind a cooling device to a thermal zone
* @tz: pointer to struct thermal_zone_device
- * @trip: trip point the cooling devices is associated with in this zone.
+ * @td: descriptor of the trip point to bind @cdev to
* @cdev: pointer to struct thermal_cooling_device
- * @cool_spec: cooling specification for @trip and @cdev
+ * @cool_spec: cooling specification for the trip point and @cdev
*
* This interface function bind a thermal cooling device to the certain trip
* point of a thermal zone device.
@@ -762,12 +819,11 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
* Return: 0 on success, the proper error value otherwise.
*/
static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
+ struct thermal_trip_desc *td,
struct thermal_cooling_device *cdev,
struct cooling_spec *cool_spec)
{
struct thermal_instance *dev;
- struct thermal_instance *pos;
bool upper_no_limit;
int result;
@@ -790,7 +846,7 @@ static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
return -ENOMEM;
dev->cdev = cdev;
- dev->trip = trip;
+ dev->trip = &td->trip;
dev->upper = cool_spec->upper;
dev->upper_no_limit = upper_no_limit;
dev->lower = cool_spec->lower;
@@ -829,24 +885,15 @@ static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
if (result)
goto remove_trip_file;
- mutex_lock(&cdev->lock);
- list_for_each_entry(pos, &tz->thermal_instances, tz_node)
- if (pos->trip == trip && pos->cdev == cdev) {
- result = -EEXIST;
- break;
- }
- if (!result) {
- list_add_tail(&dev->tz_node, &tz->thermal_instances);
- list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
- atomic_set(&tz->need_update, 1);
+ result = thermal_instance_add(dev, cdev, td);
+ if (result)
+ goto remove_weight_file;
- thermal_governor_update_tz(tz, THERMAL_TZ_BIND_CDEV);
- }
- mutex_unlock(&cdev->lock);
+ thermal_governor_update_tz(tz, THERMAL_TZ_BIND_CDEV);
- if (!result)
- return 0;
+ return 0;
+remove_weight_file:
device_remove_file(&tz->device, &dev->weight_attr);
remove_trip_file:
device_remove_file(&tz->device, &dev->attr);
@@ -859,10 +906,19 @@ free_mem:
return result;
}
+static void thermal_instance_delete(struct thermal_instance *instance)
+{
+ list_del(&instance->trip_node);
+
+ guard(cooling_dev)(instance->cdev);
+
+ list_del(&instance->cdev_node);
+}
+
/**
* thermal_unbind_cdev_from_trip - unbind a cooling device from a thermal zone.
* @tz: pointer to a struct thermal_zone_device.
- * @trip: trip point the cooling devices is associated with in this zone.
+ * @td: descriptor of the trip point to unbind @cdev from
* @cdev: pointer to a struct thermal_cooling_device.
*
* This interface function unbind a thermal cooling device from the certain
@@ -870,28 +926,23 @@ free_mem:
* This function is usually called in the thermal zone device .unbind callback.
*/
static void thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
+ struct thermal_trip_desc *td,
struct thermal_cooling_device *cdev)
{
struct thermal_instance *pos, *next;
- mutex_lock(&cdev->lock);
- list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) {
- if (pos->trip == trip && pos->cdev == cdev) {
- list_del(&pos->tz_node);
- list_del(&pos->cdev_node);
-
- thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV);
-
- mutex_unlock(&cdev->lock);
+ list_for_each_entry_safe(pos, next, &td->thermal_instances, trip_node) {
+ if (pos->cdev == cdev) {
+ thermal_instance_delete(pos);
goto unbind;
}
}
- mutex_unlock(&cdev->lock);
return;
unbind:
+ thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV);
+
device_remove_file(&tz->device, &pos->weight_attr);
device_remove_file(&tz->device, &pos->attr);
sysfs_remove_link(&tz->device.kobj, pos->name);
@@ -924,25 +975,23 @@ static struct class *thermal_class;
static inline
void print_bind_err_msg(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
+ const struct thermal_trip_desc *td,
struct thermal_cooling_device *cdev, int ret)
{
dev_err(&tz->device, "binding cdev %s to trip %d failed: %d\n",
- cdev->type, thermal_zone_trip_id(tz, trip), ret);
+ cdev->type, thermal_zone_trip_id(tz, &td->trip), ret);
}
-static void thermal_zone_cdev_bind(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev)
+static bool __thermal_zone_cdev_bind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
{
struct thermal_trip_desc *td;
+ bool update_tz = false;
if (!tz->ops.should_bind)
- return;
-
- mutex_lock(&tz->lock);
+ return false;
for_each_trip_desc(tz, td) {
- struct thermal_trip *trip = &td->trip;
struct cooling_spec c = {
.upper = THERMAL_NO_LIMIT,
.lower = THERMAL_NO_LIMIT,
@@ -950,15 +999,40 @@ static void thermal_zone_cdev_bind(struct thermal_zone_device *tz,
};
int ret;
- if (!tz->ops.should_bind(tz, trip, cdev, &c))
+ if (!tz->ops.should_bind(tz, &td->trip, cdev, &c))
continue;
- ret = thermal_bind_cdev_to_trip(tz, trip, cdev, &c);
- if (ret)
- print_bind_err_msg(tz, trip, cdev, ret);
+ ret = thermal_bind_cdev_to_trip(tz, td, cdev, &c);
+ if (ret) {
+ print_bind_err_msg(tz, td, cdev, ret);
+ continue;
+ }
+
+ update_tz = true;
}
- mutex_unlock(&tz->lock);
+ return update_tz;
+}
+
+static void thermal_zone_cdev_bind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
+{
+ guard(thermal_zone)(tz);
+
+ if (__thermal_zone_cdev_bind(tz, cdev))
+ __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+}
+
+static void thermal_cooling_device_init_complete(struct thermal_cooling_device *cdev)
+{
+ struct thermal_zone_device *tz;
+
+ guard(mutex)(&thermal_list_lock);
+
+ list_add(&cdev->node, &thermal_cdev_list);
+
+ list_for_each_entry(tz, &thermal_tz_list, node)
+ thermal_zone_cdev_bind(tz, cdev);
}
/**
@@ -983,7 +1057,6 @@ __thermal_cooling_device_register(struct device_node *np,
const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
- struct thermal_zone_device *pos = NULL;
unsigned long current_state;
int id, ret;
@@ -1050,21 +1123,7 @@ __thermal_cooling_device_register(struct device_node *np,
if (current_state <= cdev->max_state)
thermal_debug_cdev_add(cdev, current_state);
- /* Add 'this' new cdev to the global cdev list */
- mutex_lock(&thermal_list_lock);
-
- list_add(&cdev->node, &thermal_cdev_list);
-
- /* Update binding information for 'this' new cdev */
- list_for_each_entry(pos, &thermal_tz_list, node)
- thermal_zone_cdev_bind(pos, cdev);
-
- list_for_each_entry(pos, &thermal_tz_list, node)
- if (atomic_cmpxchg(&pos->need_update, 1, 0))
- thermal_zone_device_update(pos,
- THERMAL_EVENT_UNSPECIFIED);
-
- mutex_unlock(&thermal_list_lock);
+ thermal_cooling_device_init_complete(cdev);
return cdev;
@@ -1207,19 +1266,19 @@ void thermal_cooling_device_update(struct thermal_cooling_device *cdev)
* Hold thermal_list_lock throughout the update to prevent the device
* from going away while being updated.
*/
- mutex_lock(&thermal_list_lock);
+ guard(mutex)(&thermal_list_lock);
if (!thermal_cooling_device_present(cdev))
- goto unlock_list;
+ return;
/*
* Update under the cdev lock to prevent the state from being set beyond
* the new limit concurrently.
*/
- mutex_lock(&cdev->lock);
+ guard(cooling_dev)(cdev);
if (cdev->ops->get_max_state(cdev, &cdev->max_state))
- goto unlock;
+ return;
thermal_cooling_device_stats_reinit(cdev);
@@ -1246,63 +1305,59 @@ void thermal_cooling_device_update(struct thermal_cooling_device *cdev)
}
if (cdev->ops->get_cur_state(cdev, &state) || state > cdev->max_state)
- goto unlock;
+ return;
thermal_cooling_device_stats_update(cdev, state);
-
-unlock:
- mutex_unlock(&cdev->lock);
-
-unlock_list:
- mutex_unlock(&thermal_list_lock);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_update);
-static void thermal_zone_cdev_unbind(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev)
+static void __thermal_zone_cdev_unbind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
{
struct thermal_trip_desc *td;
- mutex_lock(&tz->lock);
-
for_each_trip_desc(tz, td)
- thermal_unbind_cdev_from_trip(tz, &td->trip, cdev);
-
- mutex_unlock(&tz->lock);
+ thermal_unbind_cdev_from_trip(tz, td, cdev);
}
-/**
- * thermal_cooling_device_unregister - removes a thermal cooling device
- * @cdev: the thermal cooling device to remove.
- *
- * thermal_cooling_device_unregister() must be called when a registered
- * thermal cooling device is no longer needed.
- */
-void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
+static void thermal_zone_cdev_unbind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
{
- struct thermal_zone_device *tz;
+ guard(thermal_zone)(tz);
- if (!cdev)
- return;
+ __thermal_zone_cdev_unbind(tz, cdev);
+}
- thermal_debug_cdev_remove(cdev);
+static bool thermal_cooling_device_exit(struct thermal_cooling_device *cdev)
+{
+ struct thermal_zone_device *tz;
- mutex_lock(&thermal_list_lock);
+ guard(mutex)(&thermal_list_lock);
- if (!thermal_cooling_device_present(cdev)) {
- mutex_unlock(&thermal_list_lock);
- return;
- }
+ if (!thermal_cooling_device_present(cdev))
+ return false;
list_del(&cdev->node);
- /* Unbind all thermal zones associated with 'this' cdev */
list_for_each_entry(tz, &thermal_tz_list, node)
thermal_zone_cdev_unbind(tz, cdev);
- mutex_unlock(&thermal_list_lock);
+ return true;
+}
- device_unregister(&cdev->device);
+/**
+ * thermal_cooling_device_unregister() - removes a thermal cooling device
+ * @cdev: Thermal cooling device to remove.
+ */
+void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
+{
+ if (!cdev)
+ return;
+
+ thermal_debug_cdev_remove(cdev);
+
+ if (thermal_cooling_device_exit(cdev))
+ device_unregister(&cdev->device);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
@@ -1314,7 +1369,7 @@ int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp)
if (tz->ops.get_crit_temp)
return tz->ops.get_crit_temp(tz, temp);
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
for_each_trip_desc(tz, td) {
const struct thermal_trip *trip = &td->trip;
@@ -1326,12 +1381,91 @@ int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp)
}
}
- mutex_unlock(&tz->lock);
-
return ret;
}
EXPORT_SYMBOL_GPL(thermal_zone_get_crit_temp);
+static void thermal_zone_device_check(struct work_struct *work)
+{
+ struct thermal_zone_device *tz = container_of(work, struct
+ thermal_zone_device,
+ poll_queue.work);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+}
+
+static void thermal_zone_device_init(struct thermal_zone_device *tz)
+{
+ struct thermal_trip_desc *td, *next;
+
+ INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
+
+ tz->temperature = THERMAL_TEMP_INIT;
+ tz->passive = 0;
+ tz->prev_low_trip = -INT_MAX;
+ tz->prev_high_trip = INT_MAX;
+ for_each_trip_desc(tz, td) {
+ struct thermal_instance *instance;
+
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ instance->initialized = false;
+ }
+ /*
+ * At this point, all valid trips need to be moved to trips_high so that
+ * mitigation can be started if the zone temperature is above them.
+ */
+ list_for_each_entry_safe(td, next, &tz->trips_invalid, list_node) {
+ if (td->trip.temperature != THERMAL_TEMP_INVALID)
+ move_to_trips_high(tz, td);
+ }
+ /* The trips_reached list may not be empty during system resume. */
+ list_for_each_entry_safe(td, next, &tz->trips_reached, list_node) {
+ if (td->trip.temperature == THERMAL_TEMP_INVALID)
+ move_to_trips_invalid(tz, td);
+ else
+ move_to_trips_high(tz, td);
+ }
+}
+
+static int thermal_zone_init_governor(struct thermal_zone_device *tz)
+{
+ struct thermal_governor *governor;
+
+ guard(mutex)(&thermal_governor_lock);
+
+ if (tz->tzp)
+ governor = __find_governor(tz->tzp->governor_name);
+ else
+ governor = def_governor;
+
+ return thermal_set_governor(tz, governor);
+}
+
+static void thermal_zone_init_complete(struct thermal_zone_device *tz)
+{
+ struct thermal_cooling_device *cdev;
+
+ guard(mutex)(&thermal_list_lock);
+
+ list_add_tail(&tz->node, &thermal_tz_list);
+
+ guard(thermal_zone)(tz);
+
+ /* Bind cooling devices for this zone. */
+ list_for_each_entry(cdev, &thermal_cdev_list, node)
+ __thermal_zone_cdev_bind(tz, cdev);
+
+ tz->state &= ~TZ_STATE_FLAG_INIT;
+ /*
+ * If system suspend or resume is in progress at this point, the
+ * new thermal zone needs to be marked as suspended because
+ * thermal_pm_notify() has run already.
+ */
+ if (thermal_pm_suspended)
+ tz->state |= TZ_STATE_FLAG_SUSPENDED;
+
+ __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+}
+
/**
* thermal_zone_device_register_with_trips() - register a new thermal zone device
* @type: the thermal zone device type
@@ -1366,12 +1500,10 @@ thermal_zone_device_register_with_trips(const char *type,
unsigned int polling_delay)
{
const struct thermal_trip *trip = trips;
- struct thermal_cooling_device *cdev;
struct thermal_zone_device *tz;
struct thermal_trip_desc *td;
int id;
int result;
- struct thermal_governor *governor;
if (!type || strlen(type) == 0) {
pr_err("No thermal zone type defined\n");
@@ -1415,8 +1547,10 @@ thermal_zone_device_register_with_trips(const char *type,
}
}
- INIT_LIST_HEAD(&tz->thermal_instances);
INIT_LIST_HEAD(&tz->node);
+ INIT_LIST_HEAD(&tz->trips_high);
+ INIT_LIST_HEAD(&tz->trips_reached);
+ INIT_LIST_HEAD(&tz->trips_invalid);
ida_init(&tz->ida);
mutex_init(&tz->lock);
init_completion(&tz->removal);
@@ -1439,51 +1573,41 @@ thermal_zone_device_register_with_trips(const char *type,
tz->num_trips = num_trips;
for_each_trip_desc(tz, td) {
td->trip = *trip++;
+ INIT_LIST_HEAD(&td->thermal_instances);
+ INIT_LIST_HEAD(&td->list_node);
/*
* Mark all thresholds as invalid to start with even though
* this only matters for the trips that start as invalid and
* become valid later.
*/
- td->threshold = INT_MAX;
+ move_to_trips_invalid(tz, td);
}
tz->polling_delay_jiffies = msecs_to_jiffies(polling_delay);
tz->passive_delay_jiffies = msecs_to_jiffies(passive_delay);
tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY;
+ tz->state = TZ_STATE_FLAG_INIT;
+
/* sys I/F */
/* Add nodes that are always present via .groups */
result = thermal_zone_create_device_groups(tz);
if (result)
goto remove_id;
- /* A new thermal zone needs to be updated anyway. */
- atomic_set(&tz->need_update, 1);
-
result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
if (result) {
thermal_zone_destroy_device_groups(tz);
goto remove_id;
}
+ thermal_zone_device_init(tz);
result = device_register(&tz->device);
if (result)
goto release_device;
- /* Update 'this' zone's governor information */
- mutex_lock(&thermal_governor_lock);
-
- if (tz->tzp)
- governor = __find_governor(tz->tzp->governor_name);
- else
- governor = def_governor;
-
- result = thermal_set_governor(tz, governor);
- if (result) {
- mutex_unlock(&thermal_governor_lock);
+ result = thermal_zone_init_governor(tz);
+ if (result)
goto unregister;
- }
-
- mutex_unlock(&thermal_governor_lock);
if (!tz->tzp || !tz->tzp->no_hwmon) {
result = thermal_add_hwmon_sysfs(tz);
@@ -1491,22 +1615,11 @@ thermal_zone_device_register_with_trips(const char *type,
goto unregister;
}
- mutex_lock(&thermal_list_lock);
-
- mutex_lock(&tz->lock);
- list_add_tail(&tz->node, &thermal_tz_list);
- mutex_unlock(&tz->lock);
-
- /* Bind cooling devices for this zone */
- list_for_each_entry(cdev, &thermal_cdev_list, node)
- thermal_zone_cdev_bind(tz, cdev);
-
- mutex_unlock(&thermal_list_lock);
+ result = thermal_thresholds_init(tz);
+ if (result)
+ goto remove_hwmon;
- thermal_zone_device_init(tz);
- /* Update the new thermal zone and mark it as already updated. */
- if (atomic_cmpxchg(&tz->need_update, 1, 0))
- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ thermal_zone_init_complete(tz);
thermal_notify_tz_create(tz);
@@ -1514,6 +1627,8 @@ thermal_zone_device_register_with_trips(const char *type,
return tz;
+remove_hwmon:
+ thermal_remove_hwmon_sysfs(tz);
unregister:
device_del(&tz->device);
release_device:
@@ -1563,44 +1678,46 @@ struct device *thermal_zone_device(struct thermal_zone_device *tzd)
}
EXPORT_SYMBOL_GPL(thermal_zone_device);
+static bool thermal_zone_exit(struct thermal_zone_device *tz)
+{
+ struct thermal_cooling_device *cdev;
+
+ guard(mutex)(&thermal_list_lock);
+
+ if (list_empty(&tz->node))
+ return false;
+
+ guard(thermal_zone)(tz);
+
+ tz->state |= TZ_STATE_FLAG_EXIT;
+ list_del_init(&tz->node);
+
+ /* Unbind all cdevs associated with this thermal zone. */
+ list_for_each_entry(cdev, &thermal_cdev_list, node)
+ __thermal_zone_cdev_unbind(tz, cdev);
+
+ return true;
+}
+
/**
* thermal_zone_device_unregister - removes the registered thermal zone device
* @tz: the thermal zone device to remove
*/
void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{
- struct thermal_cooling_device *cdev;
- struct thermal_zone_device *pos = NULL;
-
if (!tz)
return;
thermal_debug_tz_remove(tz);
- mutex_lock(&thermal_list_lock);
- list_for_each_entry(pos, &thermal_tz_list, node)
- if (pos == tz)
- break;
- if (pos != tz) {
- /* thermal zone device not found */
- mutex_unlock(&thermal_list_lock);
+ if (!thermal_zone_exit(tz))
return;
- }
-
- mutex_lock(&tz->lock);
- list_del(&tz->node);
- mutex_unlock(&tz->lock);
-
- /* Unbind all cdevs associated with 'this' thermal zone */
- list_for_each_entry(cdev, &thermal_cdev_list, node)
- thermal_zone_cdev_unbind(tz, cdev);
-
- mutex_unlock(&thermal_list_lock);
cancel_delayed_work_sync(&tz->poll_queue);
thermal_set_governor(tz, NULL);
+ thermal_thresholds_exit(tz);
thermal_remove_hwmon_sysfs(tz);
ida_free(&thermal_tz_ida, tz->id);
ida_destroy(&tz->ida);
@@ -1632,24 +1749,23 @@ struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name)
unsigned int found = 0;
if (!name)
- goto exit;
+ return ERR_PTR(-EINVAL);
+
+ guard(mutex)(&thermal_list_lock);
- mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node)
if (!strncasecmp(name, pos->type, THERMAL_NAME_LENGTH)) {
found++;
ref = pos;
}
- mutex_unlock(&thermal_list_lock);
- /* nothing has been found, thus an error code for it */
- if (found == 0)
- ref = ERR_PTR(-ENODEV);
- else if (found > 1)
- /* Success only when an unique zone is found */
- ref = ERR_PTR(-EEXIST);
+ if (!found)
+ return ERR_PTR(-ENODEV);
+
+ /* Success only when one zone is found. */
+ if (found > 1)
+ return ERR_PTR(-EEXIST);
-exit:
return ref;
}
EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
@@ -1660,9 +1776,9 @@ static void thermal_zone_device_resume(struct work_struct *work)
tz = container_of(work, struct thermal_zone_device, poll_queue.work);
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
- tz->suspended = false;
+ tz->state &= ~(TZ_STATE_FLAG_SUSPENDED | TZ_STATE_FLAG_RESUMING);
thermal_debug_tz_resume(tz);
thermal_zone_device_init(tz);
@@ -1670,74 +1786,81 @@ static void thermal_zone_device_resume(struct work_struct *work)
__thermal_zone_device_update(tz, THERMAL_TZ_RESUME);
complete(&tz->resume);
- tz->resuming = false;
-
- mutex_unlock(&tz->lock);
}
-static int thermal_pm_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
+static void thermal_zone_pm_prepare(struct thermal_zone_device *tz)
{
- struct thermal_zone_device *tz;
+ guard(thermal_zone)(tz);
- switch (mode) {
- case PM_HIBERNATION_PREPARE:
- case PM_RESTORE_PREPARE:
- case PM_SUSPEND_PREPARE:
- mutex_lock(&thermal_list_lock);
+ if (tz->state & TZ_STATE_FLAG_RESUMING) {
+ /*
+ * thermal_zone_device_resume() queued up for this zone has not
+ * acquired the lock yet, so release it to let the function run
+ * and wait util it has done the work.
+ */
+ scoped_guard(thermal_zone_reverse, tz) {
+ wait_for_completion(&tz->resume);
+ }
+ }
- list_for_each_entry(tz, &thermal_tz_list, node) {
- mutex_lock(&tz->lock);
+ tz->state |= TZ_STATE_FLAG_SUSPENDED;
+}
- if (tz->resuming) {
- /*
- * thermal_zone_device_resume() queued up for
- * this zone has not acquired the lock yet, so
- * release it to let the function run and wait
- * util it has done the work.
- */
- mutex_unlock(&tz->lock);
+static void thermal_pm_notify_prepare(void)
+{
+ struct thermal_zone_device *tz;
- wait_for_completion(&tz->resume);
+ guard(mutex)(&thermal_list_lock);
- mutex_lock(&tz->lock);
- }
+ thermal_pm_suspended = true;
- tz->suspended = true;
+ list_for_each_entry(tz, &thermal_tz_list, node)
+ thermal_zone_pm_prepare(tz);
+}
- mutex_unlock(&tz->lock);
- }
+static void thermal_zone_pm_complete(struct thermal_zone_device *tz)
+{
+ guard(thermal_zone)(tz);
- mutex_unlock(&thermal_list_lock);
- break;
- case PM_POST_HIBERNATION:
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- mutex_lock(&thermal_list_lock);
+ cancel_delayed_work(&tz->poll_queue);
- list_for_each_entry(tz, &thermal_tz_list, node) {
- mutex_lock(&tz->lock);
+ reinit_completion(&tz->resume);
+ tz->state |= TZ_STATE_FLAG_RESUMING;
+
+ /*
+ * Replace the work function with the resume one, which will restore the
+ * original work function and schedule the polling work if needed.
+ */
+ INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_resume);
+ /* Queue up the work without a delay. */
+ mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, 0);
+}
- cancel_delayed_work(&tz->poll_queue);
+static void thermal_pm_notify_complete(void)
+{
+ struct thermal_zone_device *tz;
- reinit_completion(&tz->resume);
- tz->resuming = true;
+ guard(mutex)(&thermal_list_lock);
- /*
- * Replace the work function with the resume one, which
- * will restore the original work function and schedule
- * the polling work if needed.
- */
- INIT_DELAYED_WORK(&tz->poll_queue,
- thermal_zone_device_resume);
- /* Queue up the work without a delay. */
- mod_delayed_work(system_freezable_power_efficient_wq,
- &tz->poll_queue, 0);
+ thermal_pm_suspended = false;
- mutex_unlock(&tz->lock);
- }
+ list_for_each_entry(tz, &thermal_tz_list, node)
+ thermal_zone_pm_complete(tz);
+}
- mutex_unlock(&thermal_list_lock);
+static int thermal_pm_notify(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ thermal_pm_notify_prepare();
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ thermal_pm_notify_complete();
break;
default:
break;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index a64d39b1c86b..be271e7c8f41 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -9,10 +9,12 @@
#ifndef __THERMAL_CORE_H__
#define __THERMAL_CORE_H__
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/thermal.h>
#include "thermal_netlink.h"
+#include "thermal_thresholds.h"
#include "thermal_debugfs.h"
struct thermal_attr {
@@ -29,8 +31,8 @@ struct thermal_trip_attrs {
struct thermal_trip_desc {
struct thermal_trip trip;
struct thermal_trip_attrs trip_attrs;
- struct list_head notify_list_node;
- int notify_temp;
+ struct list_head list_node;
+ struct list_head thermal_instances;
int threshold;
};
@@ -61,6 +63,13 @@ struct thermal_governor {
struct list_head governor_list;
};
+#define TZ_STATE_FLAG_SUSPENDED BIT(0)
+#define TZ_STATE_FLAG_RESUMING BIT(1)
+#define TZ_STATE_FLAG_INIT BIT(2)
+#define TZ_STATE_FLAG_EXIT BIT(3)
+
+#define TZ_STATE_READY 0
+
/**
* struct thermal_zone_device - structure for a thermal zone
* @id: unique id number for each thermal zone
@@ -68,6 +77,9 @@ struct thermal_governor {
* @device: &struct device for this thermal zone
* @removal: removal completion
* @resume: resume completion
+ * @trips_high: trips above the current zone temperature
+ * @trips_reached: trips below or at the current zone temperature
+ * @trips_invalid: trips with invalid temperature
* @mode: current mode of this thermal zone
* @devdata: private pointer for device private data
* @num_trips: number of trip points the thermal zone supports
@@ -88,20 +100,17 @@ struct thermal_governor {
trip point.
* @prev_high_trip: the above current temperature if you've crossed a
passive trip point.
- * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
* @governor: pointer to the governor for this thermal zone
* @governor_data: private pointer for governor data
- * @thermal_instances: list of &struct thermal_instance of this thermal zone
* @ida: &struct ida to generate unique id for this zone's cooling
* devices
* @lock: lock to protect thermal_instances list
* @node: node in thermal_tz_list (in thermal_core.c)
* @poll_queue: delayed work for polling
* @notify_event: Last notification event
- * @suspended: thermal zone suspend indicator
- * @resuming: indicates whether or not thermal zone resume is in progress
+ * @state: current state of the thermal zone
* @trips: array of struct thermal_trip objects
*/
struct thermal_zone_device {
@@ -111,6 +120,9 @@ struct thermal_zone_device {
struct completion removal;
struct completion resume;
struct attribute_group trips_attribute_group;
+ struct list_head trips_high;
+ struct list_head trips_reached;
+ struct list_head trips_invalid;
enum thermal_device_mode mode;
void *devdata;
int num_trips;
@@ -123,25 +135,29 @@ struct thermal_zone_device {
int passive;
int prev_low_trip;
int prev_high_trip;
- atomic_t need_update;
struct thermal_zone_device_ops ops;
struct thermal_zone_params *tzp;
struct thermal_governor *governor;
void *governor_data;
- struct list_head thermal_instances;
struct ida ida;
struct mutex lock;
struct list_head node;
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
- bool suspended;
- bool resuming;
+ u8 state;
#ifdef CONFIG_THERMAL_DEBUGFS
struct thermal_debugfs *debugfs;
#endif
+ struct list_head user_thresholds;
struct thermal_trip_desc trips[] __counted_by(num_trips);
};
+DEFINE_GUARD(thermal_zone, struct thermal_zone_device *, mutex_lock(&_T->lock),
+ mutex_unlock(&_T->lock))
+
+DEFINE_GUARD(thermal_zone_reverse, struct thermal_zone_device *,
+ mutex_unlock(&_T->lock), mutex_lock(&_T->lock))
+
/* Initial thermal zone temperature. */
#define THERMAL_TEMP_INIT INT_MIN
@@ -204,6 +220,7 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
}
void thermal_cdev_update(struct thermal_cooling_device *);
+void thermal_cdev_update_nocheck(struct thermal_cooling_device *cdev);
void __thermal_cdev_update(struct thermal_cooling_device *cdev);
int get_tz_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip);
@@ -226,7 +243,7 @@ struct thermal_instance {
struct device_attribute attr;
char weight_attr_name[THERMAL_NAME_LENGTH];
struct device_attribute weight_attr;
- struct list_head tz_node; /* node in tz->thermal_instances */
+ struct list_head trip_node; /* node in trip->thermal_instances */
struct list_head cdev_node; /* node in cdev->thermal_instances */
unsigned int weight; /* The weight of the cooling device */
bool upper_no_limit;
@@ -261,8 +278,6 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz, int low, int high);
int thermal_zone_trip_id(const struct thermal_zone_device *tz,
const struct thermal_trip *trip);
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
-void thermal_zone_trip_down(struct thermal_zone_device *tz,
- const struct thermal_trip *trip);
void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz,
struct thermal_trip *trip, int hyst);
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index 939d3e5f1817..c800504c3cfe 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -516,6 +516,19 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state)
cdev->debugfs = thermal_dbg;
}
+static struct thermal_debugfs *thermal_debug_cdev_clear(struct thermal_cooling_device *cdev)
+{
+ struct thermal_debugfs *thermal_dbg;
+
+ guard(cooling_dev)(cdev);
+
+ thermal_dbg = cdev->debugfs;
+ if (thermal_dbg)
+ cdev->debugfs = NULL;
+
+ return thermal_dbg;
+}
+
/**
* thermal_debug_cdev_remove - Remove a cooling device debugfs entry
*
@@ -527,17 +540,9 @@ void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev)
{
struct thermal_debugfs *thermal_dbg;
- mutex_lock(&cdev->lock);
-
- thermal_dbg = cdev->debugfs;
- if (!thermal_dbg) {
- mutex_unlock(&cdev->lock);
+ thermal_dbg = thermal_debug_cdev_clear(cdev);
+ if (!thermal_dbg)
return;
- }
-
- cdev->debugfs = NULL;
-
- mutex_unlock(&cdev->lock);
mutex_lock(&thermal_dbg->lock);
@@ -885,6 +890,19 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
tz->debugfs = thermal_dbg;
}
+static struct thermal_debugfs *thermal_debug_tz_clear(struct thermal_zone_device *tz)
+{
+ struct thermal_debugfs *thermal_dbg;
+
+ guard(thermal_zone)(tz);
+
+ thermal_dbg = tz->debugfs;
+ if (thermal_dbg)
+ tz->debugfs = NULL;
+
+ return thermal_dbg;
+}
+
void thermal_debug_tz_remove(struct thermal_zone_device *tz)
{
struct thermal_debugfs *thermal_dbg;
@@ -892,17 +910,9 @@ void thermal_debug_tz_remove(struct thermal_zone_device *tz)
struct tz_debugfs *tz_dbg;
int *trips_crossed;
- mutex_lock(&tz->lock);
-
- thermal_dbg = tz->debugfs;
- if (!thermal_dbg) {
- mutex_unlock(&tz->lock);
+ thermal_dbg = thermal_debug_tz_clear(tz);
+ if (!thermal_dbg)
return;
- }
-
- tz->debugfs = NULL;
-
- mutex_unlock(&tz->lock);
tz_dbg = &thermal_dbg->tz_dbg;
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index dc374a7a1a65..b1152ad7acc9 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -43,10 +43,11 @@ static bool thermal_instance_present(struct thermal_zone_device *tz,
struct thermal_cooling_device *cdev,
const struct thermal_trip *trip)
{
+ const struct thermal_trip_desc *td = trip_to_trip_desc(trip);
struct thermal_instance *ti;
- list_for_each_entry(ti, &tz->thermal_instances, tz_node) {
- if (ti->trip == trip && ti->cdev == cdev)
+ list_for_each_entry(ti, &td->thermal_instances, trip_node) {
+ if (ti->cdev == cdev)
return true;
}
@@ -57,17 +58,10 @@ bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
struct thermal_cooling_device *cdev)
{
- bool ret;
+ guard(thermal_zone)(tz);
+ guard(cooling_dev)(cdev);
- mutex_lock(&tz->lock);
- mutex_lock(&cdev->lock);
-
- ret = thermal_instance_present(tz, cdev, trip);
-
- mutex_unlock(&cdev->lock);
- mutex_unlock(&tz->lock);
-
- return ret;
+ return thermal_instance_present(tz, cdev, trip);
}
EXPORT_SYMBOL_GPL(thermal_trip_is_bound_to_cdev);
@@ -137,19 +131,14 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
if (IS_ERR_OR_NULL(tz))
return -EINVAL;
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
- if (!tz->ops.get_temp) {
- ret = -EINVAL;
- goto unlock;
- }
+ if (!tz->ops.get_temp)
+ return -EINVAL;
ret = __thermal_zone_get_temp(tz, temp);
if (!ret && *temp <= THERMAL_TEMP_INVALID)
- ret = -ENODATA;
-
-unlock:
- mutex_unlock(&tz->lock);
+ return -ENODATA;
return ret;
}
@@ -201,12 +190,23 @@ void __thermal_cdev_update(struct thermal_cooling_device *cdev)
*/
void thermal_cdev_update(struct thermal_cooling_device *cdev)
{
- mutex_lock(&cdev->lock);
+ guard(cooling_dev)(cdev);
+
if (!cdev->updated) {
__thermal_cdev_update(cdev);
cdev->updated = true;
}
- mutex_unlock(&cdev->lock);
+}
+
+/**
+ * thermal_cdev_update_nocheck() - Unconditionally update cooling device state
+ * @cdev: Target cooling device.
+ */
+void thermal_cdev_update_nocheck(struct thermal_cooling_device *cdev)
+{
+ guard(cooling_dev)(cdev);
+
+ __thermal_cdev_update(cdev);
}
/**
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index f0e504fd866a..37da7a8ea948 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -78,12 +78,9 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
int temperature;
int ret;
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
ret = tz->ops.get_crit_temp(tz, &temperature);
-
- mutex_unlock(&tz->lock);
-
if (ret)
return ret;
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index f3c58c708969..315a76b01f6a 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
+#include <net/sock.h>
#include <net/genetlink.h>
#include <uapi/linux/thermal.h>
@@ -49,6 +50,11 @@ static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] =
[THERMAL_GENL_ATTR_CPU_CAPABILITY_ID] = { .type = NLA_U32 },
[THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE] = { .type = NLA_U32 },
[THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY] = { .type = NLA_U32 },
+
+ /* Thresholds */
+ [THERMAL_GENL_ATTR_THRESHOLD] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_THRESHOLD_TEMP] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_THRESHOLD_DIRECTION] = { .type = NLA_U32 },
};
struct param {
@@ -62,6 +68,8 @@ struct param {
int trip_type;
int trip_hyst;
int temp;
+ int prev_temp;
+ int direction;
int cdev_state;
int cdev_max_state;
struct thermal_genl_cpu_caps *cpu_capabilities;
@@ -234,6 +242,34 @@ out_cancel_nest:
return -EMSGSIZE;
}
+static int thermal_genl_event_threshold_add(struct param *p)
+{
+ if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) ||
+ nla_put_u32(p->msg, THERMAL_GENL_ATTR_THRESHOLD_TEMP, p->temp) ||
+ nla_put_u32(p->msg, THERMAL_GENL_ATTR_THRESHOLD_DIRECTION, p->direction))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int thermal_genl_event_threshold_flush(struct param *p)
+{
+ if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int thermal_genl_event_threshold_up(struct param *p)
+{
+ if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) ||
+ nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_PREV_TEMP, p->prev_temp) ||
+ nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TEMP, p->temp))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
int thermal_genl_event_tz_delete(struct param *p)
__attribute__((alias("thermal_genl_event_tz")));
@@ -246,6 +282,12 @@ int thermal_genl_event_tz_disable(struct param *p)
int thermal_genl_event_tz_trip_down(struct param *p)
__attribute__((alias("thermal_genl_event_tz_trip_up")));
+int thermal_genl_event_threshold_delete(struct param *p)
+ __attribute__((alias("thermal_genl_event_threshold_add")));
+
+int thermal_genl_event_threshold_down(struct param *p)
+ __attribute__((alias("thermal_genl_event_threshold_up")));
+
static cb_t event_cb[] = {
[THERMAL_GENL_EVENT_TZ_CREATE] = thermal_genl_event_tz_create,
[THERMAL_GENL_EVENT_TZ_DELETE] = thermal_genl_event_tz_delete,
@@ -259,6 +301,11 @@ static cb_t event_cb[] = {
[THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update,
[THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = thermal_genl_event_gov_change,
[THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE] = thermal_genl_event_cpu_capability_change,
+ [THERMAL_GENL_EVENT_THRESHOLD_ADD] = thermal_genl_event_threshold_add,
+ [THERMAL_GENL_EVENT_THRESHOLD_DELETE] = thermal_genl_event_threshold_delete,
+ [THERMAL_GENL_EVENT_THRESHOLD_FLUSH] = thermal_genl_event_threshold_flush,
+ [THERMAL_GENL_EVENT_THRESHOLD_DOWN] = thermal_genl_event_threshold_down,
+ [THERMAL_GENL_EVENT_THRESHOLD_UP] = thermal_genl_event_threshold_up,
};
/*
@@ -401,6 +448,43 @@ int thermal_genl_cpu_capability_event(int count,
}
EXPORT_SYMBOL_GPL(thermal_genl_cpu_capability_event);
+int thermal_notify_threshold_add(const struct thermal_zone_device *tz,
+ int temperature, int direction)
+{
+ struct param p = { .tz_id = tz->id, .temp = temperature, .direction = direction };
+
+ return thermal_genl_send_event(THERMAL_GENL_EVENT_THRESHOLD_ADD, &p);
+}
+
+int thermal_notify_threshold_delete(const struct thermal_zone_device *tz,
+ int temperature, int direction)
+{
+ struct param p = { .tz_id = tz->id, .temp = temperature, .direction = direction };
+
+ return thermal_genl_send_event(THERMAL_GENL_EVENT_THRESHOLD_DELETE, &p);
+}
+
+int thermal_notify_threshold_flush(const struct thermal_zone_device *tz)
+{
+ struct param p = { .tz_id = tz->id };
+
+ return thermal_genl_send_event(THERMAL_GENL_EVENT_THRESHOLD_FLUSH, &p);
+}
+
+int thermal_notify_threshold_down(const struct thermal_zone_device *tz)
+{
+ struct param p = { .tz_id = tz->id, .temp = tz->temperature, .prev_temp = tz->last_temperature };
+
+ return thermal_genl_send_event(THERMAL_GENL_EVENT_THRESHOLD_DOWN, &p);
+}
+
+int thermal_notify_threshold_up(const struct thermal_zone_device *tz)
+{
+ struct param p = { .tz_id = tz->id, .temp = tz->temperature, .prev_temp = tz->last_temperature };
+
+ return thermal_genl_send_event(THERMAL_GENL_EVENT_THRESHOLD_UP, &p);
+}
+
/*************************** Command encoding ********************************/
static int __thermal_genl_cmd_tz_get_id(struct thermal_zone_device *tz,
@@ -459,7 +543,7 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
if (!start_trip)
return -EMSGSIZE;
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
for_each_trip_desc(tz, td) {
const struct thermal_trip *trip = &td->trip;
@@ -469,19 +553,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip->type) ||
nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip->temperature) ||
nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip->hysteresis))
- goto out_cancel_nest;
+ return -EMSGSIZE;
}
- mutex_unlock(&tz->lock);
-
nla_nest_end(msg, start_trip);
return 0;
-
-out_cancel_nest:
- mutex_unlock(&tz->lock);
-
- return -EMSGSIZE;
}
static int thermal_genl_cmd_tz_get_temp(struct param *p)
@@ -512,7 +589,7 @@ static int thermal_genl_cmd_tz_get_temp(struct param *p)
static int thermal_genl_cmd_tz_get_gov(struct param *p)
{
struct sk_buff *msg = p->msg;
- int id, ret = 0;
+ int id;
if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID])
return -EINVAL;
@@ -523,16 +600,14 @@ static int thermal_genl_cmd_tz_get_gov(struct param *p)
if (!tz)
return -EINVAL;
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id) ||
nla_put_string(msg, THERMAL_GENL_ATTR_TZ_GOV_NAME,
tz->governor->name))
- ret = -EMSGSIZE;
-
- mutex_unlock(&tz->lock);
+ return -EMSGSIZE;
- return ret;
+ return 0;
}
static int __thermal_genl_cmd_cdev_get(struct thermal_cooling_device *cdev,
@@ -572,12 +647,128 @@ out_cancel_nest:
return ret;
}
+static int __thermal_genl_cmd_threshold_get(struct user_threshold *threshold, void *arg)
+{
+ struct sk_buff *msg = arg;
+
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_THRESHOLD_TEMP, threshold->temperature) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_THRESHOLD_DIRECTION, threshold->direction))
+ return -1;
+
+ return 0;
+}
+
+static int thermal_genl_cmd_threshold_get(struct param *p)
+{
+ struct sk_buff *msg = p->msg;
+ struct nlattr *start_trip;
+ int id, ret;
+
+ if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID])
+ return -EINVAL;
+
+ id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]);
+
+ CLASS(thermal_zone_get_by_id, tz)(id);
+ if (!tz)
+ return -EINVAL;
+
+ start_trip = nla_nest_start(msg, THERMAL_GENL_ATTR_THRESHOLD);
+ if (!start_trip)
+ return -EMSGSIZE;
+
+ ret = thermal_thresholds_for_each(tz, __thermal_genl_cmd_threshold_get, msg);
+ if (ret)
+ return -EMSGSIZE;
+
+ nla_nest_end(msg, start_trip);
+
+ return 0;
+}
+
+static int thermal_genl_cmd_threshold_add(struct param *p)
+{
+ int id, temp, direction;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID] ||
+ !p->attrs[THERMAL_GENL_ATTR_THRESHOLD_TEMP] ||
+ !p->attrs[THERMAL_GENL_ATTR_THRESHOLD_DIRECTION])
+ return -EINVAL;
+
+ id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]);
+ temp = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_THRESHOLD_TEMP]);
+ direction = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_THRESHOLD_DIRECTION]);
+
+ CLASS(thermal_zone_get_by_id, tz)(id);
+ if (!tz)
+ return -EINVAL;
+
+ guard(thermal_zone)(tz);
+
+ return thermal_thresholds_add(tz, temp, direction);
+}
+
+static int thermal_genl_cmd_threshold_delete(struct param *p)
+{
+ int id, temp, direction;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID] ||
+ !p->attrs[THERMAL_GENL_ATTR_THRESHOLD_TEMP] ||
+ !p->attrs[THERMAL_GENL_ATTR_THRESHOLD_DIRECTION])
+ return -EINVAL;
+
+ id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]);
+ temp = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_THRESHOLD_TEMP]);
+ direction = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_THRESHOLD_DIRECTION]);
+
+ CLASS(thermal_zone_get_by_id, tz)(id);
+ if (!tz)
+ return -EINVAL;
+
+ guard(thermal_zone)(tz);
+
+ return thermal_thresholds_delete(tz, temp, direction);
+}
+
+static int thermal_genl_cmd_threshold_flush(struct param *p)
+{
+ int id;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID])
+ return -EINVAL;
+
+ id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]);
+
+ CLASS(thermal_zone_get_by_id, tz)(id);
+ if (!tz)
+ return -EINVAL;
+
+ guard(thermal_zone)(tz);
+
+ thermal_thresholds_flush(tz);
+
+ return 0;
+}
+
static cb_t cmd_cb[] = {
- [THERMAL_GENL_CMD_TZ_GET_ID] = thermal_genl_cmd_tz_get_id,
- [THERMAL_GENL_CMD_TZ_GET_TRIP] = thermal_genl_cmd_tz_get_trip,
- [THERMAL_GENL_CMD_TZ_GET_TEMP] = thermal_genl_cmd_tz_get_temp,
- [THERMAL_GENL_CMD_TZ_GET_GOV] = thermal_genl_cmd_tz_get_gov,
- [THERMAL_GENL_CMD_CDEV_GET] = thermal_genl_cmd_cdev_get,
+ [THERMAL_GENL_CMD_TZ_GET_ID] = thermal_genl_cmd_tz_get_id,
+ [THERMAL_GENL_CMD_TZ_GET_TRIP] = thermal_genl_cmd_tz_get_trip,
+ [THERMAL_GENL_CMD_TZ_GET_TEMP] = thermal_genl_cmd_tz_get_temp,
+ [THERMAL_GENL_CMD_TZ_GET_GOV] = thermal_genl_cmd_tz_get_gov,
+ [THERMAL_GENL_CMD_CDEV_GET] = thermal_genl_cmd_cdev_get,
+ [THERMAL_GENL_CMD_THRESHOLD_GET] = thermal_genl_cmd_threshold_get,
+ [THERMAL_GENL_CMD_THRESHOLD_ADD] = thermal_genl_cmd_threshold_add,
+ [THERMAL_GENL_CMD_THRESHOLD_DELETE] = thermal_genl_cmd_threshold_delete,
+ [THERMAL_GENL_CMD_THRESHOLD_FLUSH] = thermal_genl_cmd_threshold_flush,
};
static int thermal_genl_cmd_dumpit(struct sk_buff *skb,
@@ -688,6 +879,26 @@ static const struct genl_small_ops thermal_genl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.dumpit = thermal_genl_cmd_dumpit,
},
+ {
+ .cmd = THERMAL_GENL_CMD_THRESHOLD_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = thermal_genl_cmd_doit,
+ },
+ {
+ .cmd = THERMAL_GENL_CMD_THRESHOLD_ADD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = thermal_genl_cmd_doit,
+ },
+ {
+ .cmd = THERMAL_GENL_CMD_THRESHOLD_DELETE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = thermal_genl_cmd_doit,
+ },
+ {
+ .cmd = THERMAL_GENL_CMD_THRESHOLD_FLUSH,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = thermal_genl_cmd_doit,
+ },
};
static struct genl_family thermal_genl_family __ro_after_init = {
@@ -700,7 +911,7 @@ static struct genl_family thermal_genl_family __ro_after_init = {
.unbind = thermal_genl_unbind,
.small_ops = thermal_genl_ops,
.n_small_ops = ARRAY_SIZE(thermal_genl_ops),
- .resv_start_op = THERMAL_GENL_CMD_CDEV_GET + 1,
+ .resv_start_op = __THERMAL_GENL_CMD_MAX,
.mcgrps = thermal_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(thermal_genl_mcgrps),
};
diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h
index e01221e8816b..075e9ae85f3d 100644
--- a/drivers/thermal/thermal_netlink.h
+++ b/drivers/thermal/thermal_netlink.h
@@ -53,6 +53,13 @@ int thermal_notify_tz_gov_change(const struct thermal_zone_device *tz,
int thermal_genl_sampling_temp(int id, int temp);
int thermal_genl_cpu_capability_event(int count,
struct thermal_genl_cpu_caps *caps);
+int thermal_notify_threshold_add(const struct thermal_zone_device *tz,
+ int temperature, int direction);
+int thermal_notify_threshold_delete(const struct thermal_zone_device *tz,
+ int temperature, int direction);
+int thermal_notify_threshold_flush(const struct thermal_zone_device *tz);
+int thermal_notify_threshold_down(const struct thermal_zone_device *tz);
+int thermal_notify_threshold_up(const struct thermal_zone_device *tz);
#else
static inline int thermal_netlink_init(void)
{
@@ -139,6 +146,33 @@ static inline int thermal_genl_cpu_capability_event(int count, struct thermal_ge
return 0;
}
+static inline int thermal_notify_threshold_add(const struct thermal_zone_device *tz,
+ int temperature, int direction)
+{
+ return 0;
+}
+
+static inline int thermal_notify_threshold_delete(const struct thermal_zone_device *tz,
+ int temperature, int direction)
+{
+ return 0;
+}
+
+static inline int thermal_notify_threshold_flush(const struct thermal_zone_device *tz)
+{
+ return 0;
+}
+
+static inline int thermal_notify_threshold_down(const struct thermal_zone_device *tz)
+{
+ return 0;
+}
+
+static inline int thermal_notify_threshold_up(const struct thermal_zone_device *tz)
+{
+ return 0;
+}
+
static inline void __init thermal_netlink_exit(void) {}
#endif /* CONFIG_THERMAL_NETLINK */
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 1838aa729bb5..24b9055a0b6c 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -50,13 +50,13 @@ static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int enabled;
- mutex_lock(&tz->lock);
- enabled = tz->mode == THERMAL_DEVICE_ENABLED;
- mutex_unlock(&tz->lock);
+ guard(thermal_zone)(tz);
- return sprintf(buf, "%s\n", enabled ? "enabled" : "disabled");
+ if (tz->mode == THERMAL_DEVICE_ENABLED)
+ return sprintf(buf, "enabled\n");
+
+ return sprintf(buf, "disabled\n");
}
static ssize_t
@@ -103,38 +103,34 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
{
struct thermal_trip *trip = thermal_trip_of_attr(attr, temp);
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int ret, temp;
+ int temp;
- ret = kstrtoint(buf, 10, &temp);
- if (ret)
+ if (kstrtoint(buf, 10, &temp))
return -EINVAL;
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
if (temp == trip->temperature)
- goto unlock;
+ return count;
/* Arrange the condition to avoid integer overflows. */
if (temp != THERMAL_TEMP_INVALID &&
- temp <= trip->hysteresis + THERMAL_TEMP_INVALID) {
- ret = -EINVAL;
- goto unlock;
- }
+ temp <= trip->hysteresis + THERMAL_TEMP_INVALID)
+ return -EINVAL;
if (tz->ops.set_trip_temp) {
+ int ret;
+
ret = tz->ops.set_trip_temp(tz, trip, temp);
if (ret)
- goto unlock;
+ return ret;
}
thermal_zone_set_trip_temp(tz, trip, temp);
__thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
-unlock:
- mutex_unlock(&tz->lock);
-
- return ret ? ret : count;
+ return count;
}
static ssize_t
@@ -152,16 +148,15 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
{
struct thermal_trip *trip = thermal_trip_of_attr(attr, hyst);
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int ret, hyst;
+ int hyst;
- ret = kstrtoint(buf, 10, &hyst);
- if (ret || hyst < 0)
+ if (kstrtoint(buf, 10, &hyst) || hyst < 0)
return -EINVAL;
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
if (hyst == trip->hysteresis)
- goto unlock;
+ return count;
/*
* Allow the hysteresis to be updated when the temperature is invalid
@@ -171,22 +166,17 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
*/
if (trip->temperature == THERMAL_TEMP_INVALID) {
WRITE_ONCE(trip->hysteresis, hyst);
- goto unlock;
+ return count;
}
- if (trip->temperature - hyst <= THERMAL_TEMP_INVALID) {
- ret = -EINVAL;
- goto unlock;
- }
+ if (trip->temperature - hyst <= THERMAL_TEMP_INVALID)
+ return -EINVAL;
thermal_zone_set_trip_hyst(tz, trip, hyst);
__thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
-unlock:
- mutex_unlock(&tz->lock);
-
- return ret ? ret : count;
+ return count;
}
static ssize_t
@@ -236,25 +226,26 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int ret = 0;
int temperature;
if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
- if (!tz->ops.set_emul_temp)
- tz->emul_temperature = temperature;
- else
- ret = tz->ops.set_emul_temp(tz, temperature);
+ if (tz->ops.set_emul_temp) {
+ int ret;
- if (!ret)
- __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ ret = tz->ops.set_emul_temp(tz, temperature);
+ if (ret)
+ return ret;
+ } else {
+ tz->emul_temperature = temperature;
+ }
- mutex_unlock(&tz->lock);
+ __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
- return ret ? ret : count;
+ return count;
}
static DEVICE_ATTR_WO(emul_temp);
#endif
@@ -553,14 +544,15 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
if (state > cdev->max_state)
return -EINVAL;
- mutex_lock(&cdev->lock);
+ guard(cooling_dev)(cdev);
result = cdev->ops->set_cur_state(cdev, state);
- if (!result)
- thermal_cooling_device_stats_update(cdev, state);
+ if (result)
+ return result;
- mutex_unlock(&cdev->lock);
- return result ? result : count;
+ thermal_cooling_device_stats_update(cdev, state);
+
+ return count;
}
static struct device_attribute
@@ -634,21 +626,18 @@ static ssize_t total_trans_show(struct device *dev,
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
struct cooling_dev_stats *stats;
- int ret = 0;
+ int ret;
- mutex_lock(&cdev->lock);
+ guard(cooling_dev)(cdev);
stats = cdev->stats;
if (!stats)
- goto unlock;
+ return 0;
spin_lock(&stats->lock);
ret = sprintf(buf, "%u\n", stats->total_trans);
spin_unlock(&stats->lock);
-unlock:
- mutex_unlock(&cdev->lock);
-
return ret;
}
@@ -661,11 +650,11 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
ssize_t len = 0;
int i;
- mutex_lock(&cdev->lock);
+ guard(cooling_dev)(cdev);
stats = cdev->stats;
if (!stats)
- goto unlock;
+ return 0;
spin_lock(&stats->lock);
@@ -677,9 +666,6 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
}
spin_unlock(&stats->lock);
-unlock:
- mutex_unlock(&cdev->lock);
-
return len;
}
@@ -691,11 +677,11 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
struct cooling_dev_stats *stats;
int i, states;
- mutex_lock(&cdev->lock);
+ guard(cooling_dev)(cdev);
stats = cdev->stats;
if (!stats)
- goto unlock;
+ return count;
states = cdev->max_state + 1;
@@ -711,9 +697,6 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
spin_unlock(&stats->lock);
-unlock:
- mutex_unlock(&cdev->lock);
-
return count;
}
@@ -725,13 +708,11 @@ static ssize_t trans_table_show(struct device *dev,
ssize_t len = 0;
int i, j;
- mutex_lock(&cdev->lock);
+ guard(cooling_dev)(cdev);
stats = cdev->stats;
- if (!stats) {
- len = -ENODATA;
- goto unlock;
- }
+ if (!stats)
+ return -ENODATA;
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
@@ -740,10 +721,8 @@ static ssize_t trans_table_show(struct device *dev,
break;
len += snprintf(buf + len, PAGE_SIZE - len, "state%2u ", i);
}
- if (len >= PAGE_SIZE) {
- len = PAGE_SIZE;
- goto unlock;
- }
+ if (len >= PAGE_SIZE)
+ return PAGE_SIZE;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -769,9 +748,6 @@ static ssize_t trans_table_show(struct device *dev,
len = -EFBIG;
}
-unlock:
- mutex_unlock(&cdev->lock);
-
return len;
}
@@ -894,13 +870,11 @@ ssize_t weight_store(struct device *dev, struct device_attribute *attr,
instance = container_of(attr, struct thermal_instance, weight_attr);
/* Don't race with governors using the 'weight' value */
- mutex_lock(&tz->lock);
+ guard(thermal_zone)(tz);
instance->weight = weight;
thermal_governor_update_tz(tz, THERMAL_INSTANCE_WEIGHT_CHANGED);
- mutex_unlock(&tz->lock);
-
return count;
}
diff --git a/drivers/thermal/thermal_thresholds.c b/drivers/thermal/thermal_thresholds.c
new file mode 100644
index 000000000000..d9b2a0bb44fc
--- /dev/null
+++ b/drivers/thermal/thermal_thresholds.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * Thermal thresholds
+ */
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/slab.h>
+
+#include "thermal_core.h"
+#include "thermal_thresholds.h"
+
+int thermal_thresholds_init(struct thermal_zone_device *tz)
+{
+ INIT_LIST_HEAD(&tz->user_thresholds);
+
+ return 0;
+}
+
+static void __thermal_thresholds_flush(struct thermal_zone_device *tz)
+{
+ struct list_head *thresholds = &tz->user_thresholds;
+ struct user_threshold *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, thresholds, list_node) {
+ list_del(&entry->list_node);
+ kfree(entry);
+ }
+}
+
+void thermal_thresholds_flush(struct thermal_zone_device *tz)
+{
+ lockdep_assert_held(&tz->lock);
+
+ __thermal_thresholds_flush(tz);
+
+ thermal_notify_threshold_flush(tz);
+
+ __thermal_zone_device_update(tz, THERMAL_TZ_FLUSH_THRESHOLDS);
+}
+
+void thermal_thresholds_exit(struct thermal_zone_device *tz)
+{
+ __thermal_thresholds_flush(tz);
+}
+
+static int __thermal_thresholds_cmp(void *data,
+ const struct list_head *l1,
+ const struct list_head *l2)
+{
+ struct user_threshold *t1 = container_of(l1, struct user_threshold, list_node);
+ struct user_threshold *t2 = container_of(l2, struct user_threshold, list_node);
+
+ return t1->temperature - t2->temperature;
+}
+
+static struct user_threshold *__thermal_thresholds_find(const struct list_head *thresholds,
+ int temperature)
+{
+ struct user_threshold *t;
+
+ list_for_each_entry(t, thresholds, list_node)
+ if (t->temperature == temperature)
+ return t;
+
+ return NULL;
+}
+
+static bool __thermal_threshold_is_crossed(struct user_threshold *threshold, int temperature,
+ int last_temperature, int direction,
+ int *low, int *high)
+{
+
+ if (temperature >= threshold->temperature) {
+ if (threshold->temperature > *low &&
+ THERMAL_THRESHOLD_WAY_DOWN & threshold->direction)
+ *low = threshold->temperature;
+
+ if (last_temperature < threshold->temperature &&
+ threshold->direction & direction)
+ return true;
+ } else {
+ if (threshold->temperature < *high && THERMAL_THRESHOLD_WAY_UP
+ & threshold->direction)
+ *high = threshold->temperature;
+
+ if (last_temperature >= threshold->temperature &&
+ threshold->direction & direction)
+ return true;
+ }
+
+ return false;
+}
+
+static bool thermal_thresholds_handle_raising(struct list_head *thresholds, int temperature,
+ int last_temperature, int *low, int *high)
+{
+ struct user_threshold *t;
+
+ list_for_each_entry(t, thresholds, list_node) {
+ if (__thermal_threshold_is_crossed(t, temperature, last_temperature,
+ THERMAL_THRESHOLD_WAY_UP, low, high))
+ return true;
+ }
+
+ return false;
+}
+
+static bool thermal_thresholds_handle_dropping(struct list_head *thresholds, int temperature,
+ int last_temperature, int *low, int *high)
+{
+ struct user_threshold *t;
+
+ list_for_each_entry_reverse(t, thresholds, list_node) {
+ if (__thermal_threshold_is_crossed(t, temperature, last_temperature,
+ THERMAL_THRESHOLD_WAY_DOWN, low, high))
+ return true;
+ }
+
+ return false;
+}
+
+void thermal_thresholds_handle(struct thermal_zone_device *tz, int *low, int *high)
+{
+ struct list_head *thresholds = &tz->user_thresholds;
+
+ int temperature = tz->temperature;
+ int last_temperature = tz->last_temperature;
+
+ lockdep_assert_held(&tz->lock);
+
+ /*
+ * We need a second update in order to detect a threshold being crossed
+ */
+ if (last_temperature == THERMAL_TEMP_INVALID)
+ return;
+
+ /*
+ * The temperature is stable, so obviously we can not have
+ * crossed a threshold.
+ */
+ if (last_temperature == temperature)
+ return;
+
+ /*
+ * Since last update the temperature:
+ * - increased : thresholds are crossed the way up
+ * - decreased : thresholds are crossed the way down
+ */
+ if (temperature > last_temperature) {
+ if (thermal_thresholds_handle_raising(thresholds, temperature,
+ last_temperature, low, high))
+ thermal_notify_threshold_up(tz);
+ } else {
+ if (thermal_thresholds_handle_dropping(thresholds, temperature,
+ last_temperature, low, high))
+ thermal_notify_threshold_down(tz);
+ }
+}
+
+int thermal_thresholds_add(struct thermal_zone_device *tz,
+ int temperature, int direction)
+{
+ struct list_head *thresholds = &tz->user_thresholds;
+ struct user_threshold *t;
+
+ lockdep_assert_held(&tz->lock);
+
+ t = __thermal_thresholds_find(thresholds, temperature);
+ if (t) {
+ if (t->direction == direction)
+ return -EEXIST;
+
+ t->direction |= direction;
+ } else {
+
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&t->list_node);
+ t->temperature = temperature;
+ t->direction = direction;
+ list_add(&t->list_node, thresholds);
+ list_sort(NULL, thresholds, __thermal_thresholds_cmp);
+ }
+
+ thermal_notify_threshold_add(tz, temperature, direction);
+
+ __thermal_zone_device_update(tz, THERMAL_TZ_ADD_THRESHOLD);
+
+ return 0;
+}
+
+int thermal_thresholds_delete(struct thermal_zone_device *tz,
+ int temperature, int direction)
+{
+ struct list_head *thresholds = &tz->user_thresholds;
+ struct user_threshold *t;
+
+ lockdep_assert_held(&tz->lock);
+
+ t = __thermal_thresholds_find(thresholds, temperature);
+ if (!t)
+ return -ENOENT;
+
+ if (t->direction == direction) {
+ list_del(&t->list_node);
+ kfree(t);
+ } else {
+ t->direction &= ~direction;
+ }
+
+ thermal_notify_threshold_delete(tz, temperature, direction);
+
+ __thermal_zone_device_update(tz, THERMAL_TZ_DEL_THRESHOLD);
+
+ return 0;
+}
+
+int thermal_thresholds_for_each(struct thermal_zone_device *tz,
+ int (*cb)(struct user_threshold *, void *arg), void *arg)
+{
+ struct list_head *thresholds = &tz->user_thresholds;
+ struct user_threshold *entry;
+ int ret;
+
+ guard(thermal_zone)(tz);
+
+ list_for_each_entry(entry, thresholds, list_node) {
+ ret = cb(entry, arg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/thermal/thermal_thresholds.h b/drivers/thermal/thermal_thresholds.h
new file mode 100644
index 000000000000..cb372659a20d
--- /dev/null
+++ b/drivers/thermal/thermal_thresholds.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __THERMAL_THRESHOLDS_H__
+#define __THERMAL_THRESHOLDS_H__
+
+struct user_threshold {
+ struct list_head list_node;
+ int temperature;
+ int direction;
+};
+
+int thermal_thresholds_init(struct thermal_zone_device *tz);
+void thermal_thresholds_exit(struct thermal_zone_device *tz);
+void thermal_thresholds_handle(struct thermal_zone_device *tz, int *low, int *high);
+void thermal_thresholds_flush(struct thermal_zone_device *tz);
+int thermal_thresholds_add(struct thermal_zone_device *tz, int temperature, int direction);
+int thermal_thresholds_delete(struct thermal_zone_device *tz, int temperature, int direction);
+int thermal_thresholds_for_each(struct thermal_zone_device *tz,
+ int (*cb)(struct user_threshold *, void *arg), void *arg);
+#endif
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index b53fac333ec5..4b8238468b53 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -45,13 +45,9 @@ int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data)
{
- int ret;
-
- mutex_lock(&tz->lock);
- ret = for_each_thermal_trip(tz, cb, data);
- mutex_unlock(&tz->lock);
+ guard(thermal_zone)(tz);
- return ret;
+ return for_each_thermal_trip(tz, cb, data);
}
EXPORT_SYMBOL_GPL(thermal_zone_for_each_trip);
@@ -92,43 +88,3 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz,
*/
return trip_to_trip_desc(trip) - tz->trips;
}
-
-void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz,
- struct thermal_trip *trip, int hyst)
-{
- WRITE_ONCE(trip->hysteresis, hyst);
- thermal_notify_tz_trip_change(tz, trip);
-}
-
-void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
- struct thermal_trip *trip, int temp)
-{
- if (trip->temperature == temp)
- return;
-
- WRITE_ONCE(trip->temperature, temp);
- thermal_notify_tz_trip_change(tz, trip);
-
- if (temp == THERMAL_TEMP_INVALID) {
- struct thermal_trip_desc *td = trip_to_trip_desc(trip);
-
- if (tz->temperature >= td->threshold) {
- /*
- * The trip has been crossed on the way up, so some
- * adjustments are needed to compensate for the lack
- * of it going forward.
- */
- if (trip->type == THERMAL_TRIP_PASSIVE) {
- tz->passive--;
- WARN_ON_ONCE(tz->passive < 0);
- }
- thermal_zone_trip_down(tz, trip);
- }
- /*
- * Invalidate the threshold to avoid triggering a spurious
- * trip crossing notification when the trip becomes valid.
- */
- td->threshold = INT_MAX;
- }
-}
-EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 472daa588a9d..d5507b63b6cd 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -108,7 +108,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
u32 i;
ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
- if (ret < 0) {
+ if (ret) {
IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
return -EIO;
}
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 2dd21e0b399e..7d0c83b5b071 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -373,7 +373,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
struct page *pg;
unsigned int nsg;
int sglen;
- u64 pa;
+ u64 pa, offset;
u64 paend;
struct scatterlist *sg;
struct device *dma = mvdev->vdev.dma_dev;
@@ -396,8 +396,10 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
sg = mr->sg_head.sgl;
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
- paend = map->addr + maplen(map, mr);
- for (pa = map->addr; pa < paend; pa += sglen) {
+ offset = mr->start > map->start ? mr->start - map->start : 0;
+ pa = map->addr + offset;
+ paend = map->addr + offset + maplen(map, mr);
+ for (; pa < paend; pa += sglen) {
pg = pfn_to_page(__phys_to_pfn(pa));
if (!sg) {
mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index dee019977716..5f581e71e201 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -3963,28 +3963,28 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
- goto err_mpfs;
+ goto err_alloc;
err = mlx5_vdpa_init_mr_resources(mvdev);
if (err)
- goto err_res;
+ goto err_alloc;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_dma_mr(mvdev);
if (err)
- goto err_mr_res;
+ goto err_alloc;
}
err = alloc_fixed_resources(ndev);
if (err)
- goto err_mr;
+ goto err_alloc;
ndev->cvq_ent.mvdev = mvdev;
INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
if (!mvdev->wq) {
err = -ENOMEM;
- goto err_res2;
+ goto err_alloc;
}
mvdev->vdev.mdev = &mgtdev->mgtdev;
@@ -4010,17 +4010,6 @@ err_setup_vq_res:
_vdpa_unregister_device(&mvdev->vdev);
err_reg:
destroy_workqueue(mvdev->wq);
-err_res2:
- free_fixed_resources(ndev);
-err_mr:
- mlx5_vdpa_clean_mrs(mvdev);
-err_mr_res:
- mlx5_vdpa_destroy_mr_resources(mvdev);
-err_res:
- mlx5_vdpa_free_resources(&ndev->mvdev);
-err_mpfs:
- if (!is_zero_ether_addr(config->mac))
- mlx5_mpfs_del_mac(pfmdev, config->mac);
err_alloc:
put_device(&mvdev->vdev.dev);
return err;
diff --git a/drivers/vdpa/solidrun/snet_main.c b/drivers/vdpa/solidrun/snet_main.c
index 99428a04068d..c8b74980dbd1 100644
--- a/drivers/vdpa/solidrun/snet_main.c
+++ b/drivers/vdpa/solidrun/snet_main.c
@@ -555,7 +555,7 @@ static const struct vdpa_config_ops snet_config_ops = {
static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
{
- char name[50];
+ char *name;
int ret, i, mask = 0;
/* We don't know which BAR will be used to communicate..
* We will map every bar with len > 0.
@@ -573,7 +573,10 @@ static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
return -ENODEV;
}
- snprintf(name, sizeof(name), "psnet[%s]-bars", pci_name(pdev));
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev));
+ if (!name)
+ return -ENOMEM;
+
ret = pcim_iomap_regions(pdev, mask, name);
if (ret) {
SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
@@ -590,10 +593,13 @@ static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet)
{
- char name[50];
+ char *name;
int ret;
- snprintf(name, sizeof(name), "snet[%s]-bar", pci_name(pdev));
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "snet[%s]-bars", pci_name(pdev));
+ if (!name)
+ return -ENOMEM;
+
/* Request and map BAR */
ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name);
if (ret) {
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index ac4ab22f7d8b..16380764275e 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -612,7 +612,11 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto mdev_err;
}
- mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
+ /*
+ * id_table should be a null terminated array, so allocate one additional
+ * entry here, see vdpa_mgmtdev_get_classes().
+ */
+ mdev_id = kcalloc(2, sizeof(struct virtio_device_id), GFP_KERNEL);
if (!mdev_id) {
err = -ENOMEM;
goto mdev_id_err;
@@ -632,8 +636,8 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_err;
}
- mdev_id->device = mdev->id.device;
- mdev_id->vendor = mdev->id.vendor;
+ mdev_id[0].device = mdev->id.device;
+ mdev_id[0].vendor = mdev->id.vendor;
mgtdev->id_table = mdev_id;
mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
mgtdev->supported_features = vp_modern_get_features(mdev);
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 95b336de8a17..49559605177e 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -104,15 +104,14 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
{
struct vfio_container *container;
struct iommufd_ctx *iommufd;
- struct fd f;
int ret;
int fd;
if (get_user(fd, arg))
return -EFAULT;
- f = fdget(fd);
- if (!fd_file(f))
+ CLASS(fd, f)(fd);
+ if (fd_empty(f))
return -EBADF;
mutex_lock(&group->group_lock);
@@ -153,7 +152,6 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
out_unlock:
mutex_unlock(&group->group_lock);
- fdput(f);
return ret;
}
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index d22881245e89..aa2891f97508 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -113,7 +113,6 @@ int vfio_virqfd_enable(void *opaque,
void (*thread)(void *, void *),
void *data, struct virqfd **pvirqfd, int fd)
{
- struct fd irqfd;
struct eventfd_ctx *ctx;
struct virqfd *virqfd;
int ret = 0;
@@ -133,8 +132,8 @@ int vfio_virqfd_enable(void *opaque,
INIT_WORK(&virqfd->inject, virqfd_inject);
INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
- irqfd = fdget(fd);
- if (!fd_file(irqfd)) {
+ CLASS(fd, irqfd)(fd);
+ if (fd_empty(irqfd)) {
ret = -EBADF;
goto err_fd;
}
@@ -142,7 +141,7 @@ int vfio_virqfd_enable(void *opaque,
ctx = eventfd_ctx_fileget(fd_file(irqfd));
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
- goto err_ctx;
+ goto err_fd;
}
virqfd->eventfd = ctx;
@@ -181,18 +180,9 @@ int vfio_virqfd_enable(void *opaque,
if ((!handler || handler(opaque, data)) && thread)
schedule_work(&virqfd->inject);
}
-
- /*
- * Do not drop the file until the irqfd is fully initialized,
- * otherwise we might race against the EPOLLHUP.
- */
- fdput(irqfd);
-
return 0;
err_busy:
eventfd_ctx_put(ctx);
-err_ctx:
- fdput(irqfd);
err_fd:
kfree(virqfd);
diff --git a/drivers/virt/acrn/irqfd.c b/drivers/virt/acrn/irqfd.c
index 9994d818bb7e..b7da24ca1475 100644
--- a/drivers/virt/acrn/irqfd.c
+++ b/drivers/virt/acrn/irqfd.c
@@ -112,7 +112,6 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
struct eventfd_ctx *eventfd = NULL;
struct hsm_irqfd *irqfd, *tmp;
__poll_t events;
- struct fd f;
int ret = 0;
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
@@ -124,8 +123,8 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
INIT_LIST_HEAD(&irqfd->list);
INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
- f = fdget(args->fd);
- if (!fd_file(f)) {
+ CLASS(fd, f)(args->fd);
+ if (fd_empty(f)) {
ret = -EBADF;
goto out;
}
@@ -133,7 +132,7 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
- goto fail;
+ goto out;
}
irqfd->eventfd = eventfd;
@@ -162,13 +161,9 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
if (events & EPOLLIN)
acrn_irqfd_inject(irqfd);
- fdput(f);
return 0;
fail:
- if (eventfd && !IS_ERR(eventfd))
- eventfd_ctx_put(eventfd);
-
- fdput(f);
+ eventfd_ctx_put(eventfd);
out:
kfree(irqfd);
return ret;
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
index d9ff676bf48d..ff869d883d95 100644
--- a/drivers/virt/coco/Kconfig
+++ b/drivers/virt/coco/Kconfig
@@ -14,3 +14,5 @@ source "drivers/virt/coco/pkvm-guest/Kconfig"
source "drivers/virt/coco/sev-guest/Kconfig"
source "drivers/virt/coco/tdx-guest/Kconfig"
+
+source "drivers/virt/coco/arm-cca-guest/Kconfig"
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
index b69c30c1c720..c3d07cfc087e 100644
--- a/drivers/virt/coco/Makefile
+++ b/drivers/virt/coco/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_EFI_SECRET) += efi_secret/
obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/
obj-$(CONFIG_SEV_GUEST) += sev-guest/
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
+obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/
diff --git a/drivers/virt/coco/arm-cca-guest/Kconfig b/drivers/virt/coco/arm-cca-guest/Kconfig
new file mode 100644
index 000000000000..9dd27c3ee215
--- /dev/null
+++ b/drivers/virt/coco/arm-cca-guest/Kconfig
@@ -0,0 +1,11 @@
+config ARM_CCA_GUEST
+ tristate "Arm CCA Guest driver"
+ depends on ARM64
+ default m
+ select TSM_REPORTS
+ help
+ The driver provides userspace interface to request and
+ attestation report from the Realm Management Monitor(RMM).
+
+ If you choose 'M' here, this module will be called
+ arm-cca-guest.
diff --git a/drivers/virt/coco/arm-cca-guest/Makefile b/drivers/virt/coco/arm-cca-guest/Makefile
new file mode 100644
index 000000000000..69eeba08e98a
--- /dev/null
+++ b/drivers/virt/coco/arm-cca-guest/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest.o
diff --git a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
new file mode 100644
index 000000000000..488153879ec9
--- /dev/null
+++ b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Ltd.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/cc_platform.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/tsm.h>
+#include <linux/types.h>
+
+#include <asm/rsi.h>
+
+/**
+ * struct arm_cca_token_info - a descriptor for the token buffer.
+ * @challenge: Pointer to the challenge data
+ * @challenge_size: Size of the challenge data
+ * @granule: PA of the granule to which the token will be written
+ * @offset: Offset within granule to start of buffer in bytes
+ * @result: result of rsi_attestation_token_continue operation
+ */
+struct arm_cca_token_info {
+ void *challenge;
+ unsigned long challenge_size;
+ phys_addr_t granule;
+ unsigned long offset;
+ unsigned long result;
+};
+
+static void arm_cca_attestation_init(void *param)
+{
+ struct arm_cca_token_info *info;
+
+ info = (struct arm_cca_token_info *)param;
+
+ info->result = rsi_attestation_token_init(info->challenge,
+ info->challenge_size);
+}
+
+/**
+ * arm_cca_attestation_continue - Retrieve the attestation token data.
+ *
+ * @param: pointer to the arm_cca_token_info
+ *
+ * Attestation token generation is a long running operation and therefore
+ * the token data may not be retrieved in a single call. Moreover, the
+ * token retrieval operation must be requested on the same CPU on which the
+ * attestation token generation was initialised.
+ * This helper function is therefore scheduled on the same CPU multiple
+ * times until the entire token data is retrieved.
+ */
+static void arm_cca_attestation_continue(void *param)
+{
+ unsigned long len;
+ unsigned long size;
+ struct arm_cca_token_info *info;
+
+ info = (struct arm_cca_token_info *)param;
+
+ size = RSI_GRANULE_SIZE - info->offset;
+ info->result = rsi_attestation_token_continue(info->granule,
+ info->offset, size, &len);
+ info->offset += len;
+}
+
+/**
+ * arm_cca_report_new - Generate a new attestation token.
+ *
+ * @report: pointer to the TSM report context information.
+ * @data: pointer to the context specific data for this module.
+ *
+ * Initialise the attestation token generation using the challenge data
+ * passed in the TSM descriptor. Allocate memory for the attestation token
+ * and schedule calls to retrieve the attestation token on the same CPU
+ * on which the attestation token generation was initialised.
+ *
+ * The challenge data must be at least 32 bytes and no more than 64 bytes. If
+ * less than 64 bytes are provided it will be zero padded to 64 bytes.
+ *
+ * Return:
+ * * %0 - Attestation token generated successfully.
+ * * %-EINVAL - A parameter was not valid.
+ * * %-ENOMEM - Out of memory.
+ * * %-EFAULT - Failed to get IPA for memory page(s).
+ * * A negative status code as returned by smp_call_function_single().
+ */
+static int arm_cca_report_new(struct tsm_report *report, void *data)
+{
+ int ret;
+ int cpu;
+ long max_size;
+ unsigned long token_size = 0;
+ struct arm_cca_token_info info;
+ void *buf;
+ u8 *token __free(kvfree) = NULL;
+ struct tsm_desc *desc = &report->desc;
+
+ if (desc->inblob_len < 32 || desc->inblob_len > 64)
+ return -EINVAL;
+
+ /*
+ * The attestation token 'init' and 'continue' calls must be
+ * performed on the same CPU. smp_call_function_single() is used
+ * instead of simply calling get_cpu() because of the need to
+ * allocate outblob based on the returned value from the 'init'
+ * call and that cannot be done in an atomic context.
+ */
+ cpu = smp_processor_id();
+
+ info.challenge = desc->inblob;
+ info.challenge_size = desc->inblob_len;
+
+ ret = smp_call_function_single(cpu, arm_cca_attestation_init,
+ &info, true);
+ if (ret)
+ return ret;
+ max_size = info.result;
+
+ if (max_size <= 0)
+ return -EINVAL;
+
+ /* Allocate outblob */
+ token = kvzalloc(max_size, GFP_KERNEL);
+ if (!token)
+ return -ENOMEM;
+
+ /*
+ * Since the outblob may not be physically contiguous, use a page
+ * to bounce the buffer from RMM.
+ */
+ buf = alloc_pages_exact(RSI_GRANULE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the PA of the memory page(s) that were allocated */
+ info.granule = (unsigned long)virt_to_phys(buf);
+
+ /* Loop until the token is ready or there is an error */
+ do {
+ /* Retrieve one RSI_GRANULE_SIZE data per loop iteration */
+ info.offset = 0;
+ do {
+ /*
+ * Schedule a call to retrieve a sub-granule chunk
+ * of data per loop iteration.
+ */
+ ret = smp_call_function_single(cpu,
+ arm_cca_attestation_continue,
+ (void *)&info, true);
+ if (ret != 0) {
+ token_size = 0;
+ goto exit_free_granule_page;
+ }
+ } while (info.result == RSI_INCOMPLETE &&
+ info.offset < RSI_GRANULE_SIZE);
+
+ if (info.result != RSI_SUCCESS) {
+ ret = -ENXIO;
+ token_size = 0;
+ goto exit_free_granule_page;
+ }
+
+ /*
+ * Copy the retrieved token data from the granule
+ * to the token buffer, ensuring that the RMM doesn't
+ * overflow the buffer.
+ */
+ if (WARN_ON(token_size + info.offset > max_size))
+ break;
+ memcpy(&token[token_size], buf, info.offset);
+ token_size += info.offset;
+ } while (info.result == RSI_INCOMPLETE);
+
+ report->outblob = no_free_ptr(token);
+exit_free_granule_page:
+ report->outblob_len = token_size;
+ free_pages_exact(buf, RSI_GRANULE_SIZE);
+ return ret;
+}
+
+static const struct tsm_ops arm_cca_tsm_ops = {
+ .name = KBUILD_MODNAME,
+ .report_new = arm_cca_report_new,
+};
+
+/**
+ * arm_cca_guest_init - Register with the Trusted Security Module (TSM)
+ * interface.
+ *
+ * Return:
+ * * %0 - Registered successfully with the TSM interface.
+ * * %-ENODEV - The execution context is not an Arm Realm.
+ * * %-EBUSY - Already registered.
+ */
+static int __init arm_cca_guest_init(void)
+{
+ int ret;
+
+ if (!is_realm_world())
+ return -ENODEV;
+
+ ret = tsm_register(&arm_cca_tsm_ops, NULL);
+ if (ret < 0)
+ pr_err("Error %d registering with TSM\n", ret);
+
+ return ret;
+}
+module_init(arm_cca_guest_init);
+
+/**
+ * arm_cca_guest_exit - unregister with the Trusted Security Module (TSM)
+ * interface.
+ */
+static void __exit arm_cca_guest_exit(void)
+{
+ tsm_unregister(&arm_cca_tsm_ops);
+}
+module_exit(arm_cca_guest_exit);
+
+MODULE_AUTHOR("Sami Mujawar <sami.mujawar@arm.com>");
+MODULE_DESCRIPTION("Arm CCA Guest TSM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 42a48ac763ee..2eb747311bfd 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -122,7 +122,7 @@ config VIRTIO_BALLOON
config VIRTIO_MEM
tristate "Virtio mem driver"
- depends on X86_64 || ARM64 || RISCV
+ depends on X86_64 || ARM64 || RISCV || S390
depends on VIRTIO
depends on MEMORY_HOTPLUG
depends on MEMORY_HOTREMOVE
@@ -132,11 +132,11 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
- This driver currently only supports x86-64 and arm64. Although it
- should compile on other architectures that implement memory
- hot(un)plug, architecture-specific and/or common
- code changes may be required for virtio-mem, kdump and kexec to work as
- expected.
+ This driver currently supports x86-64, arm64, riscv and s390.
+ Although it should compile on other architectures that implement
+ memory hot(un)plug, architecture-specific and/or common
+ code changes may be required for virtio-mem, kdump and kexec to
+ work as expected.
If unsure, say M.
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index c44d8ba00c02..88074451dd61 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -24,6 +24,16 @@ MODULE_PARM_DESC(force_legacy,
"Force legacy mode for transitional virtio 1 devices");
#endif
+bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ return index == vp_dev->admin_vq.vq_index;
+}
+
/* wait for pending irq handlers */
void vp_synchronize_vectors(struct virtio_device *vdev)
{
@@ -234,10 +244,9 @@ out_info:
return vq;
}
-static void vp_del_vq(struct virtqueue *vq)
+static void vp_del_vq(struct virtqueue *vq, struct virtio_pci_vq_info *info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
/*
@@ -258,13 +267,16 @@ static void vp_del_vq(struct virtqueue *vq)
void vp_del_vqs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info;
struct virtqueue *vq, *n;
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- if (vp_dev->per_vq_vectors) {
- int v = vp_dev->vqs[vq->index]->msix_vector;
+ info = vp_is_avq(vdev, vq->index) ? vp_dev->admin_vq.info :
+ vp_dev->vqs[vq->index];
+ if (vp_dev->per_vq_vectors) {
+ int v = info->msix_vector;
if (v != VIRTIO_MSI_NO_VECTOR &&
!vp_is_slow_path_vector(v)) {
int irq = pci_irq_vector(vp_dev->pci_dev, v);
@@ -273,7 +285,7 @@ void vp_del_vqs(struct virtio_device *vdev)
free_irq(irq, vq);
}
}
- vp_del_vq(vq);
+ vp_del_vq(vq, info);
}
vp_dev->per_vq_vectors = false;
@@ -354,7 +366,7 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
vring_interrupt, 0,
vp_dev->msix_names[msix_vec], vq);
if (err) {
- vp_del_vq(vq);
+ vp_del_vq(vq, *p_info);
return ERR_PTR(err);
}
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 1d9c49947f52..8beecf23ec85 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -178,6 +178,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
#define VIRTIO_ADMIN_CMD_BITMAP 0
#endif
+bool vp_is_avq(struct virtio_device *vdev, unsigned int index);
void vp_modern_avq_done(struct virtqueue *vq);
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
struct virtio_admin_cmd *cmd);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 9193c30d640a..4fbcbc7a9ae1 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -43,16 +43,6 @@ static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
return 0;
}
-static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return false;
-
- return index == vp_dev->admin_vq.vq_index;
-}
-
void vp_modern_avq_done(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
@@ -245,7 +235,7 @@ static void vp_modern_avq_cleanup(struct virtio_device *vdev)
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
- vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
+ vq = vp_dev->admin_vq.info->vq;
if (!vq)
return;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 4f75bc876454..13a10f3294a8 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -965,10 +965,11 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
struct privcmd_kernel_irqfd *kirqfd, *tmp;
unsigned long flags;
__poll_t events;
- struct fd f;
void *dm_op;
int ret, idx;
+ CLASS(fd, f)(irqfd->fd);
+
kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
if (!kirqfd)
return -ENOMEM;
@@ -984,8 +985,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
kirqfd->dom = irqfd->dom;
INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
- f = fdget(irqfd->fd);
- if (!fd_file(f)) {
+ if (fd_empty(f)) {
ret = -EBADF;
goto error_kfree;
}
@@ -993,7 +993,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
kirqfd->eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(kirqfd->eventfd)) {
ret = PTR_ERR(kirqfd->eventfd);
- goto error_fd_put;
+ goto error_kfree;
}
/*
@@ -1026,20 +1026,11 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
irqfd_inject(kirqfd);
srcu_read_unlock(&irqfds_srcu, idx);
-
- /*
- * Do not drop the file until the kirqfd is fully initialized, otherwise
- * we might race against the EPOLLHUP.
- */
- fdput(f);
return 0;
error_eventfd:
eventfd_ctx_put(kirqfd->eventfd);
-error_fd_put:
- fdput(f);
-
error_kfree:
kfree(kirqfd);
return ret;
@@ -1350,7 +1341,6 @@ static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
struct privcmd_kernel_ioeventfd *kioeventfd;
struct privcmd_kernel_ioreq *kioreq;
unsigned long flags;
- struct fd f;
int ret;
/* Check for range overflow */
@@ -1370,15 +1360,7 @@ static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
if (!kioeventfd)
return -ENOMEM;
- f = fdget(ioeventfd->event_fd);
- if (!fd_file(f)) {
- ret = -EBADF;
- goto error_kfree;
- }
-
- kioeventfd->eventfd = eventfd_ctx_fileget(fd_file(f));
- fdput(f);
-
+ kioeventfd->eventfd = eventfd_ctx_fdget(ioeventfd->event_fd);
if (IS_ERR(kioeventfd->eventfd)) {
ret = PTR_ERR(kioeventfd->eventfd);
goto error_kfree;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 9f097f1f4a4c..6d32ffb01136 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -313,7 +313,7 @@ int xenbus_dev_probe(struct device *_dev)
if (err) {
dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
dev->nodename);
- return err;
+ goto fail_remove;
}
dev->spurious_threshold = 1;
@@ -322,6 +322,12 @@ int xenbus_dev_probe(struct device *_dev)
dev->nodename);
return 0;
+fail_remove:
+ if (drv->remove) {
+ down(&dev->reclaim_sem);
+ drv->remove(dev);
+ up(&dev->reclaim_sem);
+ }
fail_put:
module_put(drv->driver.owner);
fail: