summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c6
-rw-r--r--drivers/acpi/acpi_pcc.c2
-rw-r--r--drivers/acpi/cppc_acpi.c3
-rw-r--r--drivers/acpi/pci_irq.c19
-rw-r--r--drivers/acpi/pci_link.c39
-rw-r--r--drivers/android/binder/page_range.rs3
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/atm/he.c3
-rw-r--r--drivers/base/power/runtime.c22
-rw-r--r--drivers/block/loop.c67
-rw-r--r--drivers/block/rnbd/rnbd-clt.c13
-rw-r--r--drivers/block/rnbd/rnbd-clt.h2
-rw-r--r--drivers/block/ublk_drv.c77
-rw-r--r--drivers/block/zloop.c8
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/counter/104-quad-8.c20
-rw-r--r--drivers/counter/interrupt-cnt.c3
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c7
-rw-r--r--drivers/crypto/hisilicon/qm.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/firewire/nosy.c10
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/libstub/gop.c8
-rw-r--r--drivers/gpio/gpio-it87.c11
-rw-r--r--drivers/gpio/gpio-mpsse.c12
-rw-r--r--drivers/gpio/gpio-pca953x.c25
-rw-r--r--drivers/gpio/gpio-rockchip.c1
-rw-r--r--drivers/gpio/gpiolib-shared.c249
-rw-r--r--drivers/gpio/gpiolib-shared.h4
-rw-r--r--drivers/gpio/gpiolib.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c545
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c122
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c5
-rw-r--r--drivers/gpu/drm/drm_pagemap.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c37
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h84
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h47
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c137
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h8
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h36
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h68
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c4
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/pptable.h2
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_state_test.c40
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c143
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c30
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c15
-rw-r--r--drivers/gpu/drm/xe/xe_device.c2
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c35
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c25
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h6
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c12
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vfio.c2
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c51
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c8
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c8
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules1
-rw-r--r--drivers/gpu/nova-core/Kconfig2
-rw-r--r--drivers/gpu/nova-core/gsp/cmdq.rs14
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs78
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144.rs11
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs105
-rw-r--r--drivers/hid/bpf/progs/Makefile6
-rw-r--r--drivers/hid/hid-debug.c1
-rw-r--r--drivers/hid/hid-elecom.c15
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-multitouch.c13
-rw-r--r--drivers/hid/hid-playstation.c5
-rw-r--r--drivers/hid/hid-quirks.c14
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c12
-rw-r--r--drivers/hid/intel-thc-hid/Kconfig1
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c4
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c9
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h2
-rw-r--r--drivers/hid/usbhid/hid-core.c17
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c9
-rw-r--r--drivers/hwmon/ibmpex.c9
-rw-r--r--drivers/hwmon/ltc4282.c9
-rw-r--r--drivers/hwmon/tmp401.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c3
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/infiniband/core/addr.c33
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c8
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c4
-rw-r--r--drivers/infiniband/hw/irdma/utils.c3
-rw-r--r--drivers/infiniband/hw/mana/cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h32
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h24
-rw-r--r--drivers/input/joystick/xpad.c5
-rw-r--r--drivers/input/keyboard/atkbd.c7
-rw-r--r--drivers/input/keyboard/lkkbd.c5
-rw-r--r--drivers/input/mouse/alps.c1
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h7
-rw-r--r--drivers/input/touchscreen/apple_z2.c4
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c2
-rw-r--r--drivers/iommu/amd/amd_iommu.h5
-rw-r--r--drivers/iommu/amd/init.c24
-rw-r--r--drivers/iommu/amd/iommu.c27
-rw-r--r--drivers/iommu/generic_pt/.kunitconfig2
-rw-r--r--drivers/iommu/generic_pt/iommu_pt.h3
-rw-r--r--drivers/iommu/generic_pt/pt_defs.h4
-rw-r--r--drivers/iommu/intel/irq_remapping.c8
-rw-r--r--drivers/iommu/iommufd/Kconfig3
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c6
-rw-r--r--drivers/iommu/iommufd/selftest.c14
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c10
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h2
-rw-r--r--drivers/md/md.c61
-rw-r--r--drivers/md/raid5.c10
-rw-r--r--drivers/media/mc/mc-request.c6
-rw-r--r--drivers/misc/lkdtm/bugs.c53
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/rp1/Kconfig6
-rw-r--r--drivers/misc/rp1/Makefile3
-rw-r--r--drivers/misc/rp1/rp1-pci.dtso25
-rw-r--r--drivers/misc/rp1/rp1_pci.c37
-rw-r--r--drivers/mmc/host/Kconfig4
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c2
-rw-r--r--drivers/mtd/nand/ecc-sw-hamming.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c4
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/raw/nand_ids.c2
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c2
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c2
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c2
-rw-r--r--drivers/mtd/nand/raw/ndfc.c2
-rw-r--r--drivers/net/can/Kconfig7
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/dev/Makefile5
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c3
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c3
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h2
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip_common.c19
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.c46
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c23
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c46
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h5
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c39
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c9
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c19
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c92
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c276
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c46
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c8
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c5
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/ethernet/ti/Kconfig3
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig4
-rw-r--r--drivers/net/fjes/fjes_hw.c12
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c3
-rw-r--r--drivers/net/mdio/mdio-aspeed.c7
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c6
-rw-r--r--drivers/net/netdevsim/bus.c8
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c2
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c2
-rw-r--r--drivers/net/phy/mxl-86110.c3
-rw-r--r--drivers/net/phy/realtek/realtek_main.c4
-rw-r--r--drivers/net/phy/sfp.c4
-rw-r--r--drivers/net/team/team_core.c2
-rw-r--r--drivers/net/usb/asix_common.c5
-rw-r--r--drivers/net/usb/ax88172a.c6
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/sr9700.c9
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c5
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c6
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/unittest.c8
-rw-r--r--drivers/parisc/sba_iommu.c4
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c39
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c4
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pci/vgaarb.c7
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8189.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c14
-rw-r--r--drivers/platform/x86/asus-armoury.h176
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c1
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c32
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c1
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c4
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/int-attributes.c2
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c5
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c5
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/string-attributes.c2
-rw-r--r--drivers/platform/x86/ibm_rtl.c2
-rw-r--r--drivers/platform/x86/intel/pmt/discovery.c8
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.c6
-rw-r--r--drivers/platform/x86/msi-laptop.c3
-rw-r--r--drivers/platform/x86/samsung-galaxybook.c9
-rw-r--r--drivers/platform/x86/uniwill/uniwill-acpi.c7
-rw-r--r--drivers/pmdomain/imx/gpc.c5
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c21
-rw-r--r--drivers/powercap/intel_rapl_common.c24
-rw-r--r--drivers/powercap/intel_rapl_msr.c4
-rw-r--r--drivers/powercap/powercap_sys.c22
-rw-r--r--drivers/regulator/fp9931.c3
-rw-r--r--drivers/resctrl/mpam_devices.c7
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/sg.c20
-rw-r--r--drivers/spi/spi-cadence-quadspi.c31
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-mem.c15
-rw-r--r--drivers/spi/spi-mpfs.c1
-rw-r--r--drivers/spi/spi-mt65xx.c2
-rw-r--r--drivers/spi/spi-sun6i.c11
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c3
-rw-r--r--drivers/thermal/thermal_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_loongson.c4
-rw-r--r--drivers/tty/serial/serial_base_bus.c11
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c14
-rw-r--r--drivers/ufs/core/ufshcd.c5
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c7
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c42
-rw-r--r--drivers/usb/host/ohci-nxp.c18
-rw-r--r--drivers/usb/host/xhci-dbgtty.c2
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c1
-rw-r--r--drivers/usb/phy/phy-isp1301.c7
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c8
-rw-r--r--drivers/usb/typec/ucsi/Kconfig1
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c5
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c36
-rw-r--r--drivers/usb/typec/ucsi/displayport.c11
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c118
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h22
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c11
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c15
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c4
-rw-r--r--drivers/vfio/pci/pds/dirty.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c25
-rw-r--r--drivers/vfio/pci/xe/main.c5
-rw-r--r--drivers/vhost/vsock.c15
-rw-r--r--drivers/xen/acpi.c13
408 files changed, 3816 insertions, 2448 deletions
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index ceef1c502e9e..8141d8e51636 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -17,6 +17,7 @@
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <linux/xarray.h>
+#include <asm/hypervisor.h>
#include "aie2_msg_priv.h"
#include "aie2_pci.h"
@@ -508,6 +509,11 @@ static int aie2_init(struct amdxdna_dev *xdna)
unsigned long bars = 0;
int i, nvec, ret;
+ if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
+ XDNA_ERR(xdna, "Running under hypervisor not supported");
+ return -EINVAL;
+ }
+
ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index 97064e943768..e3f302b9dee5 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -52,7 +52,7 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
struct pcc_data *data;
struct acpi_pcc_info *ctx = handler_context;
struct pcc_mbox_chan *pcc_chan;
- static acpi_status ret;
+ acpi_status ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3bdeeee3414e..e66e20d1f31b 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1366,7 +1366,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
- CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
+ CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg) ||
+ CPC_IN_PCC(guaranteed_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index ad81aa03fe2f..c416942ff3e2 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -188,7 +188,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
* the IRQ value, which is hardwired to specific interrupt inputs on
* the interrupt controller.
*/
- pr_debug("%04x:%02x:%02x[%c] -> %s[%d]\n",
+ pr_debug("%04x:%02x:%02x[%c] -> %s[%u]\n",
entry->id.segment, entry->id.bus, entry->id.device,
pin_name(entry->pin), prt->source, entry->index);
@@ -384,7 +384,7 @@ static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin)
int acpi_pci_irq_enable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
- int gsi;
+ u32 gsi;
u8 pin;
int triggering = ACPI_LEVEL_SENSITIVE;
/*
@@ -422,18 +422,21 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
+ rc = -ENODEV;
+
if (entry) {
if (entry->link)
- gsi = acpi_pci_link_allocate_irq(entry->link,
+ rc = acpi_pci_link_allocate_irq(entry->link,
entry->index,
&triggering, &polarity,
- &link);
- else
+ &link, &gsi);
+ else {
gsi = entry->index;
- } else
- gsi = -1;
+ rc = 0;
+ }
+ }
- if (gsi < 0) {
+ if (rc < 0) {
/*
* No IRQ known to the ACPI subsystem - maybe the BIOS /
* driver reported one, then use it. Exit in any case.
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index bed7dc85612e..b91b039a3d20 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -448,7 +448,7 @@ static int acpi_isa_irq_penalty[ACPI_MAX_ISA_IRQS] = {
/* >IRQ15 */
};
-static int acpi_irq_pci_sharing_penalty(int irq)
+static int acpi_irq_pci_sharing_penalty(u32 irq)
{
struct acpi_pci_link *link;
int penalty = 0;
@@ -474,7 +474,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
return penalty;
}
-static int acpi_irq_get_penalty(int irq)
+static int acpi_irq_get_penalty(u32 irq)
{
int penalty = 0;
@@ -528,7 +528,7 @@ static int acpi_irq_balance = -1; /* 0: static, 1: balance */
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
{
acpi_handle handle = link->device->handle;
- int irq;
+ u32 irq;
int i;
if (link->irq.initialized) {
@@ -598,44 +598,53 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
return 0;
}
-/*
- * acpi_pci_link_allocate_irq
- * success: return IRQ >= 0
- * failure: return -1
+/**
+ * acpi_pci_link_allocate_irq(): Retrieve a link device GSI
+ *
+ * @handle: Handle for the link device
+ * @index: GSI index
+ * @triggering: pointer to store the GSI trigger
+ * @polarity: pointer to store GSI polarity
+ * @name: pointer to store link device name
+ * @gsi: pointer to store GSI number
+ *
+ * Returns:
+ * 0 on success with @triggering, @polarity, @name, @gsi initialized.
+ * -ENODEV on failure
*/
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
- int *polarity, char **name)
+ int *polarity, char **name, u32 *gsi)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_link *link;
if (!device) {
acpi_handle_err(handle, "Invalid link device\n");
- return -1;
+ return -ENODEV;
}
link = acpi_driver_data(device);
if (!link) {
acpi_handle_err(handle, "Invalid link context\n");
- return -1;
+ return -ENODEV;
}
/* TBD: Support multiple index (IRQ) entries per Link Device */
if (index) {
acpi_handle_err(handle, "Invalid index %d\n", index);
- return -1;
+ return -ENODEV;
}
mutex_lock(&acpi_link_lock);
if (acpi_pci_link_allocate(link)) {
mutex_unlock(&acpi_link_lock);
- return -1;
+ return -ENODEV;
}
if (!link->irq.active) {
mutex_unlock(&acpi_link_lock);
acpi_handle_err(handle, "Link active IRQ is 0!\n");
- return -1;
+ return -ENODEV;
}
link->refcnt++;
mutex_unlock(&acpi_link_lock);
@@ -647,7 +656,9 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
if (name)
*name = acpi_device_bid(link->device);
acpi_handle_debug(handle, "Link is referenced\n");
- return link->irq.active;
+ *gsi = link->irq.active;
+
+ return 0;
}
/*
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
index 9379038f61f5..fdd97112ef5c 100644
--- a/drivers/android/binder/page_range.rs
+++ b/drivers/android/binder/page_range.rs
@@ -727,8 +727,5 @@ unsafe extern "C" fn rust_shrink_free_page(
drop(mm);
drop(page);
- // SAFETY: We just unlocked the lru lock, but it should be locked when we return.
- unsafe { bindings::spin_lock(&raw mut (*lru).lock) };
-
LRU_REMOVED_ENTRY
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0b24bd169d61..09d8c035fcdf 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,6 +4143,9 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
ATA_QUIRK_FIRMWARE_WARN },
+ /* Seagate disks with LPM issues */
+ { "ST2000DM008-2FR102", NULL, ATA_QUIRK_NOLPM },
+
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
{ "ST1000LM024 HN-M101MBB", NULL, ATA_QUIRK_BROKEN_FPDMA_AA |
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index ad91cc6a34fc..92a041d5387b 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1587,7 +1587,8 @@ he_stop(struct he_dev *he_dev)
he_dev->tbrq_base, he_dev->tbrq_phys);
if (he_dev->tpdrq_base)
- dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+ dma_free_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
he_dev->tpdrq_base, he_dev->tpdrq_phys);
dma_pool_destroy(he_dev->tpd_pool);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 84676cc24221..0ee8ea971aa4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1868,16 +1868,18 @@ void pm_runtime_init(struct device *dev)
*/
void pm_runtime_reinit(struct device *dev)
{
- if (!pm_runtime_enabled(dev)) {
- if (dev->power.runtime_status == RPM_ACTIVE)
- pm_runtime_set_suspended(dev);
- if (dev->power.irq_safe) {
- spin_lock_irq(&dev->power.lock);
- dev->power.irq_safe = 0;
- spin_unlock_irq(&dev->power.lock);
- if (dev->parent)
- pm_runtime_put(dev->parent);
- }
+ if (pm_runtime_enabled(dev))
+ return;
+
+ if (dev->power.runtime_status == RPM_ACTIVE)
+ pm_runtime_set_suspended(dev);
+
+ if (dev->power.irq_safe) {
+ spin_lock_irq(&dev->power.lock);
+ dev->power.irq_safe = 0;
+ spin_unlock_irq(&dev->power.lock);
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
}
/*
* Clear power.needs_force_resume in case it has been set by
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 272bc608e528..bd59c0e9508b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1082,7 +1082,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
/* Order wrt reading lo_state in loop_validate_file(). */
wmb();
- lo->lo_state = Lo_bound;
+ WRITE_ONCE(lo->lo_state, Lo_bound);
if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN;
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
@@ -1179,7 +1179,7 @@ static void __loop_clr_fd(struct loop_device *lo)
if (!part_shift)
set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
mutex_lock(&lo->lo_mutex);
- lo->lo_state = Lo_unbound;
+ WRITE_ONCE(lo->lo_state, Lo_unbound);
mutex_unlock(&lo->lo_mutex);
/*
@@ -1218,23 +1218,35 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
if (disk_openers(lo->lo_disk) == 1)
- lo->lo_state = Lo_rundown;
+ WRITE_ONCE(lo->lo_state, Lo_rundown);
loop_global_unlock(lo, true);
return 0;
}
static int
-loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
+loop_set_status(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev, const struct loop_info64 *info)
{
int err;
bool partscan = false;
bool size_changed = false;
unsigned int memflags;
+ /*
+ * If we don't hold exclusive handle for the device, upgrade to it
+ * here to avoid changing device under exclusive owner.
+ */
+ if (!(mode & BLK_OPEN_EXCL)) {
+ err = bd_prepare_to_claim(bdev, loop_set_status, NULL);
+ if (err)
+ goto out_reread_partitions;
+ }
+
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
- return err;
+ goto out_abort_claiming;
+
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
@@ -1273,6 +1285,10 @@ out_unfreeze:
}
out_unlock:
mutex_unlock(&lo->lo_mutex);
+out_abort_claiming:
+ if (!(mode & BLK_OPEN_EXCL))
+ bd_abort_claiming(bdev, loop_set_status);
+out_reread_partitions:
if (partscan)
loop_reread_partitions(lo);
@@ -1352,7 +1368,9 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
}
static int
-loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
+loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev,
+ const struct loop_info __user *arg)
{
struct loop_info info;
struct loop_info64 info64;
@@ -1360,17 +1378,19 @@ loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
return -EFAULT;
loop_info64_from_old(&info, &info64);
- return loop_set_status(lo, &info64);
+ return loop_set_status(lo, mode, bdev, &info64);
}
static int
-loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
+loop_set_status64(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev,
+ const struct loop_info64 __user *arg)
{
struct loop_info64 info64;
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
return -EFAULT;
- return loop_set_status(lo, &info64);
+ return loop_set_status(lo, mode, bdev, &info64);
}
static int
@@ -1549,14 +1569,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
case LOOP_SET_STATUS:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
- err = loop_set_status_old(lo, argp);
+ err = loop_set_status_old(lo, mode, bdev, argp);
break;
case LOOP_GET_STATUS:
return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
- err = loop_set_status64(lo, argp);
+ err = loop_set_status64(lo, mode, bdev, argp);
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
@@ -1650,8 +1670,9 @@ loop_info64_to_compat(const struct loop_info64 *info64,
}
static int
-loop_set_status_compat(struct loop_device *lo,
- const struct compat_loop_info __user *arg)
+loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev,
+ const struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
int ret;
@@ -1659,7 +1680,7 @@ loop_set_status_compat(struct loop_device *lo,
ret = loop_info64_from_compat(arg, &info64);
if (ret < 0)
return ret;
- return loop_set_status(lo, &info64);
+ return loop_set_status(lo, mode, bdev, &info64);
}
static int
@@ -1685,7 +1706,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
- err = loop_set_status_compat(lo,
+ err = loop_set_status_compat(lo, mode, bdev,
(const struct compat_loop_info __user *)arg);
break;
case LOOP_GET_STATUS:
@@ -1743,7 +1764,7 @@ static void lo_release(struct gendisk *disk)
mutex_lock(&lo->lo_mutex);
if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR))
- lo->lo_state = Lo_rundown;
+ WRITE_ONCE(lo->lo_state, Lo_rundown);
need_clear = (lo->lo_state == Lo_rundown);
mutex_unlock(&lo->lo_mutex);
@@ -1858,7 +1879,7 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- if (lo->lo_state != Lo_bound)
+ if (data_race(READ_ONCE(lo->lo_state)) != Lo_bound)
return BLK_STS_IOERR;
switch (req_op(rq)) {
@@ -2016,7 +2037,7 @@ static int loop_add(int i)
lo->worker_tree = RB_ROOT;
INIT_LIST_HEAD(&lo->idle_worker_list);
timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE);
- lo->lo_state = Lo_unbound;
+ WRITE_ONCE(lo->lo_state, Lo_unbound);
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
@@ -2174,7 +2195,7 @@ static int loop_control_remove(int idx)
goto mark_visible;
}
/* Mark this loop device as no more bound, but not quite unbound yet */
- lo->lo_state = Lo_deleting;
+ WRITE_ONCE(lo->lo_state, Lo_deleting);
mutex_unlock(&lo->lo_mutex);
loop_remove(lo);
@@ -2197,8 +2218,12 @@ static int loop_control_get_free(int idx)
if (ret)
return ret;
idr_for_each_entry(&loop_index_idr, lo, id) {
- /* Hitting a race results in creating a new loop device which is harmless. */
- if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
+ /*
+ * Hitting a race results in creating a new loop device
+ * which is harmless.
+ */
+ if (lo->idr_visible &&
+ data_race(READ_ONCE(lo->lo_state)) == Lo_unbound)
goto found;
}
mutex_unlock(&loop_ctl_mutex);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index f1409e54010a..d1c354636315 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1423,9 +1423,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
goto out_alloc;
}
- ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
- GFP_KERNEL);
- if (ret < 0) {
+ dev->clt_device_id = ida_alloc_max(&index_ida,
+ (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+ GFP_KERNEL);
+ if (dev->clt_device_id < 0) {
+ ret = dev->clt_device_id;
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
pathname, sess->sessname, ret);
goto out_queues;
@@ -1434,10 +1436,9 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
dev->pathname = kstrdup(pathname, GFP_KERNEL);
if (!dev->pathname) {
ret = -ENOMEM;
- goto out_queues;
+ goto out_ida;
}
- dev->clt_device_id = ret;
dev->sess = sess;
dev->access_mode = access_mode;
dev->nr_poll_queues = nr_poll_queues;
@@ -1453,6 +1454,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
return dev;
+out_ida:
+ ida_free(&index_ida, dev->clt_device_id);
out_queues:
kfree(dev->hw_queues);
out_alloc:
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index a48e040abe63..fbc1ed766025 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -112,7 +112,7 @@ struct rnbd_clt_dev {
struct rnbd_queue *hw_queues;
u32 device_id;
/* local Idr index - used to track minor number allocations. */
- u32 clt_device_id;
+ int clt_device_id;
struct mutex lock;
enum rnbd_clt_dev_state dev_state;
refcount_t refcount;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index df9831783a13..f6e5a0766721 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -237,6 +237,7 @@ struct ublk_device {
bool canceling;
pid_t ublksrv_tgid;
struct delayed_work exit_work;
+ struct work_struct partition_scan_work;
struct ublk_queue *queues[];
};
@@ -1080,12 +1081,20 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
}
+static void ublk_end_request(struct request *req, blk_status_t error)
+{
+ local_bh_disable();
+ blk_mq_end_request(req, error);
+ local_bh_enable();
+}
+
/* todo: handle partial completion */
static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
bool need_map)
{
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
+ bool requeue;
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
@@ -1117,14 +1126,30 @@ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
if (unlikely(unmapped_bytes < io->res))
io->res = unmapped_bytes;
- if (blk_update_request(req, BLK_STS_OK, io->res))
+ /*
+ * Run bio->bi_end_io() with softirqs disabled. If the final fput
+ * happens off this path, then that will prevent ublk's blkdev_release()
+ * from being called on current's task work, see fput() implementation.
+ *
+ * Otherwise, ublk server may not provide forward progress in case of
+ * reading the partition table from bdev_open() with disk->open_mutex
+ * held, and causes dead lock as we could already be holding
+ * disk->open_mutex here.
+ *
+ * Preferably we would not be doing IO with a mutex held that is also
+ * used for release, but this work-around will suffice for now.
+ */
+ local_bh_disable();
+ requeue = blk_update_request(req, BLK_STS_OK, io->res);
+ local_bh_enable();
+ if (requeue)
blk_mq_requeue_request(req, true);
else if (likely(!blk_should_fake_timeout(req->q)))
__blk_mq_end_request(req, BLK_STS_OK);
return;
exit:
- blk_mq_end_request(req, res);
+ ublk_end_request(req, res);
}
static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
@@ -1164,7 +1189,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
if (ublk_nosrv_dev_should_queue_io(ubq->dev))
blk_mq_requeue_request(rq, false);
else
- blk_mq_end_request(rq, BLK_STS_IOERR);
+ ublk_end_request(rq, BLK_STS_IOERR);
}
static void
@@ -1209,7 +1234,7 @@ __ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
ublk_auto_buf_reg_fallback(ubq, req->tag);
return AUTO_BUF_REG_FALLBACK;
}
- blk_mq_end_request(req, BLK_STS_IOERR);
+ ublk_end_request(req, BLK_STS_IOERR);
return AUTO_BUF_REG_FAIL;
}
@@ -1558,6 +1583,27 @@ static void ublk_put_disk(struct gendisk *disk)
put_device(disk_to_dev(disk));
}
+static void ublk_partition_scan_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, partition_scan_work);
+ /* Hold disk reference to prevent UAF during concurrent teardown */
+ struct gendisk *disk = ublk_get_disk(ub);
+
+ if (!disk)
+ return;
+
+ if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
+ &disk->state)))
+ goto out;
+
+ mutex_lock(&disk->open_mutex);
+ bdev_disk_changed(disk, false);
+ mutex_unlock(&disk->open_mutex);
+out:
+ ublk_put_disk(disk);
+}
+
/*
* Use this function to ensure that ->canceling is consistently set for
* the device and all queues. Do not set these flags directly.
@@ -1583,8 +1629,7 @@ static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
{
int i, j;
- if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
- UBLK_F_AUTO_BUF_REG)))
+ if (!ublk_dev_need_req_ref(ub))
return false;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
@@ -2003,6 +2048,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex);
ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
+ cancel_work_sync(&ub->partition_scan_work);
ublk_cancel_dev(ub);
}
@@ -2931,9 +2977,17 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
ublk_apply_params(ub);
- /* don't probe partitions if any daemon task is un-trusted */
- if (ub->unprivileged_daemons)
- set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+ /*
+ * Suppress partition scan to avoid potential IO hang.
+ *
+ * If ublk server error occurs during partition scan, the IO may
+ * wait while holding ub->mutex, which can deadlock with other
+ * operations that need the mutex. Defer partition scan to async
+ * work.
+ * For unprivileged daemons, keep GD_SUPPRESS_PART_SCAN set
+ * permanently.
+ */
+ set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub);
ub->dev_info.state = UBLK_S_DEV_LIVE;
@@ -2950,6 +3004,10 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
set_bit(UB_STATE_USED, &ub->state);
+ /* Schedule async partition scan for trusted daemons */
+ if (!ub->unprivileged_daemons)
+ schedule_work(&ub->partition_scan_work);
+
out_put_cdev:
if (ret) {
ublk_detach_disk(ub);
@@ -3115,6 +3173,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
mutex_init(&ub->cancel_mutex);
+ INIT_WORK(&ub->partition_scan_work, ublk_partition_scan_work);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
index 77bd6081b244..8e334f5025fc 100644
--- a/drivers/block/zloop.c
+++ b/drivers/block/zloop.c
@@ -697,7 +697,7 @@ static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct zloop_device *zlo = rq->q->queuedata;
- if (zlo->state == Zlo_deleting)
+ if (data_race(READ_ONCE(zlo->state)) == Zlo_deleting)
return BLK_STS_IOERR;
/*
@@ -1002,7 +1002,7 @@ static int zloop_ctl_add(struct zloop_options *opts)
ret = -ENOMEM;
goto out;
}
- zlo->state = Zlo_creating;
+ WRITE_ONCE(zlo->state, Zlo_creating);
ret = mutex_lock_killable(&zloop_ctl_mutex);
if (ret)
@@ -1113,7 +1113,7 @@ static int zloop_ctl_add(struct zloop_options *opts)
}
mutex_lock(&zloop_ctl_mutex);
- zlo->state = Zlo_live;
+ WRITE_ONCE(zlo->state, Zlo_live);
mutex_unlock(&zloop_ctl_mutex);
pr_info("zloop: device %d, %u zones of %llu MiB, %u B block size\n",
@@ -1177,7 +1177,7 @@ static int zloop_ctl_remove(struct zloop_options *opts)
ret = -EINVAL;
} else {
idr_remove(&zloop_index_idr, zlo->id);
- zlo->state = Zlo_deleting;
+ WRITE_ONCE(zlo->state, Zlo_deleting);
}
mutex_unlock(&zloop_ctl_mutex);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8ed3883ab8ee..ded09e94d296 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -4052,7 +4052,7 @@ static int btusb_probe(struct usb_interface *intf,
return -ENODEV;
}
- data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -4075,8 +4075,10 @@ static int btusb_probe(struct usb_interface *intf,
}
}
- if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
+ if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
+ kfree(data);
return -ENODEV;
+ }
if (id->driver_info & BTUSB_AMP) {
data->cmdreq_type = USB_TYPE_CLASS | 0x01;
@@ -4131,8 +4133,10 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_acl = hci_recv_frame;
hdev = hci_alloc_dev_priv(priv_size);
- if (!hdev)
+ if (!hdev) {
+ kfree(data);
return -ENOMEM;
+ }
hdev->bus = HCI_USB;
hci_set_drvdata(hdev, data);
@@ -4406,6 +4410,7 @@ out_free_dev:
if (data->reset_gpio)
gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
+ kfree(data);
return err;
}
@@ -4454,6 +4459,7 @@ static void btusb_disconnect(struct usb_interface *intf)
}
hci_free_dev(hdev);
+ kfree(data);
}
#ifdef CONFIG_PM
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index ce81fc4e1ae7..573b2fe93253 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -1192,6 +1192,7 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
{
struct counter_device *counter = private;
struct quad8 *const priv = counter_priv(counter);
+ struct device *dev = counter->parent;
unsigned int status;
unsigned long irq_status;
unsigned long channel;
@@ -1200,8 +1201,11 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
int ret;
ret = regmap_read(priv->map, QUAD8_INTERRUPT_STATUS, &status);
- if (ret)
- return ret;
+ if (ret) {
+ dev_WARN_ONCE(dev, true,
+ "Attempt to read Interrupt Status Register failed: %d\n", ret);
+ return IRQ_NONE;
+ }
if (!status)
return IRQ_NONE;
@@ -1223,8 +1227,9 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
break;
default:
/* should never reach this path */
- WARN_ONCE(true, "invalid interrupt trigger function %u configured for channel %lu\n",
- flg_pins, channel);
+ dev_WARN_ONCE(dev, true,
+ "invalid interrupt trigger function %u configured for channel %lu\n",
+ flg_pins, channel);
continue;
}
@@ -1232,8 +1237,11 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
}
ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION, CLEAR_PENDING_INTERRUPTS);
- if (ret)
- return ret;
+ if (ret) {
+ dev_WARN_ONCE(dev, true,
+ "Attempt to clear pending interrupts by writing to Channel Operation Register failed: %d\n", ret);
+ return IRQ_HANDLED;
+ }
return IRQ_HANDLED;
}
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 6c0c1d2d7027..e6100b5fb082 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -229,8 +229,7 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
irq_set_status_flags(priv->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(dev, priv->irq, interrupt_cnt_isr,
- IRQF_TRIGGER_RISING | IRQF_NO_THREAD,
- dev_name(dev), counter);
+ IRQF_TRIGGER_RISING, dev_name(dev), counter);
if (ret)
return ret;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index a1d11ecd1ac8..b06a43143d23 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -219,11 +219,12 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
static int __init cpufreq_dt_platdev_init(void)
{
- const void *data;
+ const void *data = NULL;
- data = of_machine_get_match_data(allowlist);
- if (data)
+ if (of_machine_device_match(allowlist)) {
+ data = of_machine_get_match_data(allowlist);
goto create_pdev;
+ }
if (cpu0_node_has_opp_v2_prop() && !of_machine_device_match(blocklist))
goto create_pdev;
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f8bfff5dd0bd..d47bf06a90f7 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -991,7 +991,7 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
return;
poll_data = &qm->poll_data[cqn];
- while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
+ do {
poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK;
eqe_num++;
@@ -1004,11 +1004,10 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
qm->status.eq_head++;
}
- if (eqe_num == (eq_depth >> 1) - 1)
- break;
-
dw0 = le32_to_cpu(eqe->dw0);
- }
+ if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase)
+ break;
+ } while (eqe_num < (eq_depth >> 1) - 1);
poll_data->eqe_num = eqe_num;
queue_work(qm->wq, &poll_data->work);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 11728cf32653..a5964fd8204c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -41,8 +41,6 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
adf_error_notifier(accel_dev);
adf_pf2vf_notify_fatal_error(accel_dev);
adf_dev_restarting_notify(accel_dev);
- adf_pf2vf_notify_restarting(accel_dev);
- adf_pf2vf_wait_for_restarting_complete(accel_dev);
pci_clear_master(pdev);
adf_dev_down(accel_dev);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index ea31ac7ac1ca..e59053738a43 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -36,6 +36,8 @@
static char driver_name[] = KBUILD_MODNAME;
+#define RCV_BUFFER_SIZE (16 * 1024)
+
/* this is the physical layout of a PCL, its size is 128 bytes */
struct pcl {
__le32 next;
@@ -517,16 +519,14 @@ remove_card(struct pci_dev *dev)
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
lynx->rcv_pcl, lynx->rcv_pcl_bus);
- dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE, lynx->rcv_buffer,
- lynx->rcv_buffer_bus);
+ dma_free_coherent(&lynx->pci_device->dev, RCV_BUFFER_SIZE,
+ lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
pci_disable_device(dev);
lynx_put(lynx);
}
-#define RCV_BUFFER_SIZE (16 * 1024)
-
static int
add_card(struct pci_dev *dev, const struct pci_device_id *unused)
{
@@ -680,7 +680,7 @@ fail_deallocate_buffers:
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
lynx->rcv_pcl, lynx->rcv_pcl_bus);
if (lynx->rcv_buffer)
- dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE,
+ dma_free_coherent(&lynx->pci_device->dev, RCV_BUFFER_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index a9070d00b833..55452e61af31 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -73,6 +73,7 @@ struct mm_struct efi_mm = {
MMAP_LOCK_INITIALIZER(efi_mm)
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ .user_ns = &init_user_ns,
.cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
#ifdef CONFIG_SCHED_MM_CID
.mm_cid.lock = __RAW_SPIN_LOCK_UNLOCKED(efi_mm.mm_cid.lock),
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 72d74436a7a4..80dc8cfeb33e 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -513,15 +513,15 @@ efi_status_t efi_setup_graphics(struct screen_info *si, struct edid_info *edid)
status = efi_bs_call(handle_protocol, handle, &EFI_EDID_ACTIVE_PROTOCOL_GUID,
(void **)&active_edid);
if (status == EFI_SUCCESS) {
- gop_size_of_edid = active_edid->size_of_edid;
- gop_edid = active_edid->edid;
+ gop_size_of_edid = efi_table_attr(active_edid, size_of_edid);
+ gop_edid = efi_table_attr(active_edid, edid);
} else {
status = efi_bs_call(handle_protocol, handle,
&EFI_EDID_DISCOVERED_PROTOCOL_GUID,
(void **)&discovered_edid);
if (status == EFI_SUCCESS) {
- gop_size_of_edid = discovered_edid->size_of_edid;
- gop_edid = discovered_edid->edid;
+ gop_size_of_edid = efi_table_attr(discovered_edid, size_of_edid);
+ gop_edid = efi_table_attr(discovered_edid, edid);
}
}
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 5d677bcfccf2..2ad3c239367b 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -241,23 +242,17 @@ static int it87_gpio_direction_out(struct gpio_chip *chip,
mask = 1 << (gpio_num % 8);
group = (gpio_num / 8);
- spin_lock(&it87_gpio->lock);
+ guard(spinlock)(&it87_gpio->lock);
rc = superio_enter();
if (rc)
- goto exit;
+ return rc;
/* set the output enable bit */
superio_set_mask(mask, group + it87_gpio->output_base);
rc = it87_gpio_set(chip, gpio_num, val);
- if (rc)
- goto exit;
-
superio_exit();
-
-exit:
- spin_unlock(&it87_gpio->lock);
return rc;
}
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index ace652ba4df1..12191aeb6566 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -548,6 +548,13 @@ static void gpio_mpsse_ida_remove(void *data)
ida_free(&gpio_mpsse_ida, priv->id);
}
+static void gpio_mpsse_usb_put_dev(void *data)
+{
+ struct mpsse_priv *priv = data;
+
+ usb_put_dev(priv->udev);
+}
+
static int mpsse_init_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
@@ -592,6 +599,10 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&priv->workers);
priv->udev = usb_get_dev(interface_to_usbdev(interface));
+ err = devm_add_action_or_reset(dev, gpio_mpsse_usb_put_dev, priv);
+ if (err)
+ return err;
+
priv->intf = interface;
priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber;
@@ -713,7 +724,6 @@ static void gpio_mpsse_disconnect(struct usb_interface *intf)
priv->intf = NULL;
usb_set_intfdata(intf, NULL);
- usb_put_dev(priv->udev);
}
static struct usb_driver gpio_mpsse_driver = {
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 0a3916cc2772..8727ae54bc57 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -943,14 +943,35 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(old_stat, MAX_LINE);
DECLARE_BITMAP(cur_stat, MAX_LINE);
DECLARE_BITMAP(new_stat, MAX_LINE);
+ DECLARE_BITMAP(int_stat, MAX_LINE);
DECLARE_BITMAP(trigger, MAX_LINE);
DECLARE_BITMAP(edges, MAX_LINE);
int ret;
+ if (chip->driver_data & PCA_PCAL) {
+ /* Read INT_STAT before it is cleared by the input-port read. */
+ ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, int_stat);
+ if (ret)
+ return false;
+ }
+
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
+ if (chip->driver_data & PCA_PCAL) {
+ /* Detect short pulses via INT_STAT. */
+ bitmap_and(trigger, int_stat, chip->irq_mask, gc->ngpio);
+
+ /* Apply filter for rising/falling edge selection. */
+ bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise,
+ cur_stat, gc->ngpio);
+
+ bitmap_and(int_stat, new_stat, trigger, gc->ngpio);
+ } else {
+ bitmap_zero(int_stat, gc->ngpio);
+ }
+
/* Remove output pins from the equation */
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
@@ -964,7 +985,8 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
if (bitmap_empty(chip->irq_trig_level_high, gc->ngpio) &&
bitmap_empty(chip->irq_trig_level_low, gc->ngpio)) {
- if (bitmap_empty(trigger, gc->ngpio))
+ if (bitmap_empty(trigger, gc->ngpio) &&
+ bitmap_empty(int_stat, gc->ngpio))
return false;
}
@@ -972,6 +994,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(edges, old_stat, cur_stat, gc->ngpio);
bitmap_and(pending, edges, trigger, gc->ngpio);
+ bitmap_or(pending, pending, int_stat, gc->ngpio);
bitmap_and(cur_stat, new_stat, chip->irq_trig_level_high, gc->ngpio);
bitmap_and(cur_stat, cur_stat, chip->irq_mask, gc->ngpio);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 47174eb3ba76..bae2061f15fc 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -593,6 +593,7 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
gc->ngpio = bank->nr_pins;
gc->label = bank->name;
gc->parent = bank->dev;
+ gc->can_sleep = true;
ret = gpiochip_add_data(gc, bank);
if (ret) {
diff --git a/drivers/gpio/gpiolib-shared.c b/drivers/gpio/gpiolib-shared.c
index ba4b718d40a0..17343fdc9758 100644
--- a/drivers/gpio/gpiolib-shared.c
+++ b/drivers/gpio/gpiolib-shared.c
@@ -38,8 +38,10 @@ struct gpio_shared_ref {
int dev_id;
/* Protects the auxiliary device struct and the lookup table. */
struct mutex lock;
+ struct lock_class_key lock_key;
struct auxiliary_device adev;
struct gpiod_lookup_table *lookup;
+ bool is_reset_gpio;
};
/* Represents a single GPIO pin. */
@@ -76,6 +78,60 @@ gpio_shared_find_entry(struct fwnode_handle *controller_node,
return NULL;
}
+static struct gpio_shared_ref *gpio_shared_make_ref(struct fwnode_handle *fwnode,
+ const char *con_id,
+ enum gpiod_flags flags)
+{
+ char *con_id_cpy __free(kfree) = NULL;
+
+ struct gpio_shared_ref *ref __free(kfree) = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return NULL;
+
+ if (con_id) {
+ con_id_cpy = kstrdup(con_id, GFP_KERNEL);
+ if (!con_id_cpy)
+ return NULL;
+ }
+
+ ref->dev_id = ida_alloc(&gpio_shared_ida, GFP_KERNEL);
+ if (ref->dev_id < 0)
+ return NULL;
+
+ ref->flags = flags;
+ ref->con_id = no_free_ptr(con_id_cpy);
+ ref->fwnode = fwnode;
+ lockdep_register_key(&ref->lock_key);
+ mutex_init_with_key(&ref->lock, &ref->lock_key);
+
+ return no_free_ptr(ref);
+}
+
+static int gpio_shared_setup_reset_proxy(struct gpio_shared_entry *entry,
+ enum gpiod_flags flags)
+{
+ struct gpio_shared_ref *ref;
+
+ list_for_each_entry(ref, &entry->refs, list) {
+ if (ref->is_reset_gpio)
+ /* Already set-up. */
+ return 0;
+ }
+
+ ref = gpio_shared_make_ref(NULL, "reset", flags);
+ if (!ref)
+ return -ENOMEM;
+
+ ref->is_reset_gpio = true;
+
+ list_add_tail(&ref->list, &entry->refs);
+
+ pr_debug("Created a secondary shared GPIO reference for potential reset-gpio device for GPIO %u at %s\n",
+ entry->offset, fwnode_get_name(entry->fwnode));
+
+ return 0;
+}
+
/* Handle all special nodes that we should ignore. */
static bool gpio_shared_of_node_ignore(struct device_node *node)
{
@@ -106,6 +162,7 @@ static int gpio_shared_of_traverse(struct device_node *curr)
size_t con_id_len, suffix_len;
struct fwnode_handle *fwnode;
struct of_phandle_args args;
+ struct gpio_shared_ref *ref;
struct property *prop;
unsigned int offset;
const char *suffix;
@@ -138,6 +195,7 @@ static int gpio_shared_of_traverse(struct device_node *curr)
for (i = 0; i < count; i++) {
struct device_node *np __free(device_node) = NULL;
+ char *con_id __free(kfree) = NULL;
ret = of_parse_phandle_with_args(curr, prop->name,
"#gpio-cells", i,
@@ -182,15 +240,6 @@ static int gpio_shared_of_traverse(struct device_node *curr)
list_add_tail(&entry->list, &gpio_shared_list);
}
- struct gpio_shared_ref *ref __free(kfree) =
- kzalloc(sizeof(*ref), GFP_KERNEL);
- if (!ref)
- return -ENOMEM;
-
- ref->fwnode = fwnode_handle_get(of_fwnode_handle(curr));
- ref->flags = args.args[1];
- mutex_init(&ref->lock);
-
if (strends(prop->name, "gpios"))
suffix = "-gpios";
else if (strends(prop->name, "gpio"))
@@ -202,27 +251,32 @@ static int gpio_shared_of_traverse(struct device_node *curr)
/* We only set con_id if there's actually one. */
if (strcmp(prop->name, "gpios") && strcmp(prop->name, "gpio")) {
- ref->con_id = kstrdup(prop->name, GFP_KERNEL);
- if (!ref->con_id)
+ con_id = kstrdup(prop->name, GFP_KERNEL);
+ if (!con_id)
return -ENOMEM;
- con_id_len = strlen(ref->con_id);
+ con_id_len = strlen(con_id);
suffix_len = strlen(suffix);
- ref->con_id[con_id_len - suffix_len] = '\0';
+ con_id[con_id_len - suffix_len] = '\0';
}
- ref->dev_id = ida_alloc(&gpio_shared_ida, GFP_KERNEL);
- if (ref->dev_id < 0) {
- kfree(ref->con_id);
+ ref = gpio_shared_make_ref(fwnode_handle_get(of_fwnode_handle(curr)),
+ con_id, args.args[1]);
+ if (!ref)
return -ENOMEM;
- }
if (!list_empty(&entry->refs))
pr_debug("GPIO %u at %s is shared by multiple firmware nodes\n",
entry->offset, fwnode_get_name(entry->fwnode));
- list_add_tail(&no_free_ptr(ref)->list, &entry->refs);
+ list_add_tail(&ref->list, &entry->refs);
+
+ if (strcmp(prop->name, "reset-gpios") == 0) {
+ ret = gpio_shared_setup_reset_proxy(entry, args.args[1]);
+ if (ret)
+ return ret;
+ }
}
}
@@ -306,21 +360,17 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
struct fwnode_handle *reset_fwnode = dev_fwnode(consumer);
struct fwnode_reference_args ref_args, aux_args;
struct device *parent = consumer->parent;
+ struct gpio_shared_ref *real_ref;
bool match;
int ret;
+ lockdep_assert_held(&ref->lock);
+
/* The reset-gpio device must have a parent AND a firmware node. */
if (!parent || !reset_fwnode)
return false;
/*
- * FIXME: use device_is_compatible() once the reset-gpio drivers gains
- * a compatible string which it currently does not have.
- */
- if (!strstarts(dev_name(consumer), "reset.gpio."))
- return false;
-
- /*
* Parent of the reset-gpio auxiliary device is the GPIO chip whose
* fwnode we stored in the entry structure.
*/
@@ -328,33 +378,61 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
return false;
/*
- * The device associated with the shared reference's firmware node is
- * the consumer of the reset control exposed by the reset-gpio device.
- * It must have a "reset-gpios" property that's referencing the entry's
- * firmware node.
- *
- * The reference args must agree between the real consumer and the
- * auxiliary reset-gpio device.
+ * Now we need to find the actual pin we want to assign to this
+ * reset-gpio device. To that end: iterate over the list of references
+ * of this entry and see if there's one, whose reset-gpios property's
+ * arguments match the ones from this consumer's node.
*/
- ret = fwnode_property_get_reference_args(ref->fwnode, "reset-gpios",
- NULL, 2, 0, &ref_args);
- if (ret)
- return false;
+ list_for_each_entry(real_ref, &entry->refs, list) {
+ if (real_ref == ref)
+ continue;
+
+ guard(mutex)(&real_ref->lock);
+
+ if (!real_ref->fwnode)
+ continue;
+
+ /*
+ * The device associated with the shared reference's firmware
+ * node is the consumer of the reset control exposed by the
+ * reset-gpio device. It must have a "reset-gpios" property
+ * that's referencing the entry's firmware node.
+ *
+ * The reference args must agree between the real consumer and
+ * the auxiliary reset-gpio device.
+ */
+ ret = fwnode_property_get_reference_args(real_ref->fwnode,
+ "reset-gpios",
+ NULL, 2, 0, &ref_args);
+ if (ret)
+ continue;
+
+ ret = fwnode_property_get_reference_args(reset_fwnode, "reset-gpios",
+ NULL, 2, 0, &aux_args);
+ if (ret) {
+ fwnode_handle_put(ref_args.fwnode);
+ continue;
+ }
+
+ match = ((ref_args.fwnode == entry->fwnode) &&
+ (aux_args.fwnode == entry->fwnode) &&
+ (ref_args.args[0] == aux_args.args[0]));
- ret = fwnode_property_get_reference_args(reset_fwnode, "reset-gpios",
- NULL, 2, 0, &aux_args);
- if (ret) {
fwnode_handle_put(ref_args.fwnode);
- return false;
- }
+ fwnode_handle_put(aux_args.fwnode);
+
+ if (!match)
+ continue;
- match = ((ref_args.fwnode == entry->fwnode) &&
- (aux_args.fwnode == entry->fwnode) &&
- (ref_args.args[0] == aux_args.args[0]));
+ /*
+ * Reuse the fwnode of the real device, next time we'll use it
+ * in the normal path.
+ */
+ ref->fwnode = fwnode_handle_get(reset_fwnode);
+ return true;
+ }
- fwnode_handle_put(ref_args.fwnode);
- fwnode_handle_put(aux_args.fwnode);
- return match;
+ return false;
}
#else
static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
@@ -365,24 +443,33 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
}
#endif /* CONFIG_RESET_GPIO */
-int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
+int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
+ unsigned long lflags)
{
const char *dev_id = dev_name(consumer);
+ struct gpiod_lookup_table *lookup;
struct gpio_shared_entry *entry;
struct gpio_shared_ref *ref;
- struct gpiod_lookup_table *lookup __free(kfree) =
- kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
- if (!lookup)
- return -ENOMEM;
-
list_for_each_entry(entry, &gpio_shared_list, list) {
list_for_each_entry(ref, &entry->refs, list) {
- if (!device_match_fwnode(consumer, ref->fwnode) &&
- !gpio_shared_dev_is_reset_gpio(consumer, entry, ref))
+ guard(mutex)(&ref->lock);
+
+ /*
+ * FIXME: use device_is_compatible() once the reset-gpio
+ * drivers gains a compatible string which it currently
+ * does not have.
+ */
+ if (!ref->fwnode && strstarts(dev_name(consumer), "reset.gpio.")) {
+ if (!gpio_shared_dev_is_reset_gpio(consumer, entry, ref))
+ continue;
+ } else if (!device_match_fwnode(consumer, ref->fwnode)) {
continue;
+ }
- guard(mutex)(&ref->lock);
+ if ((!con_id && ref->con_id) || (con_id && !ref->con_id) ||
+ (con_id && ref->con_id && strcmp(con_id, ref->con_id) != 0))
+ continue;
/* We've already done that on a previous request. */
if (ref->lookup)
@@ -395,6 +482,10 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
if (!key)
return -ENOMEM;
+ lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+
pr_debug("Adding machine lookup entry for a shared GPIO for consumer %s, with key '%s' and con_id '%s'\n",
dev_id, key, ref->con_id ?: "none");
@@ -402,7 +493,7 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
lookup->table[0] = GPIO_LOOKUP(no_free_ptr(key), 0,
ref->con_id, lflags);
- ref->lookup = no_free_ptr(lookup);
+ ref->lookup = lookup;
gpiod_add_lookup_table(ref->lookup);
return 0;
@@ -466,8 +557,9 @@ int gpio_device_setup_shared(struct gpio_device *gdev)
entry->offset, gpio_device_get_label(gdev));
list_for_each_entry(ref, &entry->refs, list) {
- pr_debug("Setting up a shared GPIO entry for %s\n",
- fwnode_get_name(ref->fwnode));
+ pr_debug("Setting up a shared GPIO entry for %s (con_id: '%s')\n",
+ fwnode_get_name(ref->fwnode) ?: "(no fwnode)",
+ ref->con_id ?: "(none)");
ret = gpio_shared_make_adev(gdev, entry, ref);
if (ret)
@@ -487,15 +579,6 @@ void gpio_device_teardown_shared(struct gpio_device *gdev)
if (!device_match_fwnode(&gdev->dev, entry->fwnode))
continue;
- /*
- * For some reason if we call synchronize_srcu() in GPIO core,
- * descent here and take this mutex and then recursively call
- * synchronize_srcu() again from gpiochip_remove() (which is
- * totally fine) called after gpio_shared_remove_adev(),
- * lockdep prints a false positive deadlock splat. Disable
- * lockdep here.
- */
- lockdep_off();
list_for_each_entry(ref, &entry->refs, list) {
guard(mutex)(&ref->lock);
@@ -508,7 +591,6 @@ void gpio_device_teardown_shared(struct gpio_device *gdev)
gpio_shared_remove_adev(&ref->adev);
}
- lockdep_on();
}
}
@@ -604,6 +686,7 @@ static void gpio_shared_drop_ref(struct gpio_shared_ref *ref)
{
list_del(&ref->list);
mutex_destroy(&ref->lock);
+ lockdep_unregister_key(&ref->lock_key);
kfree(ref->con_id);
ida_free(&gpio_shared_ida, ref->dev_id);
fwnode_handle_put(ref->fwnode);
@@ -635,12 +718,38 @@ static void __init gpio_shared_teardown(void)
}
}
+static bool gpio_shared_entry_is_really_shared(struct gpio_shared_entry *entry)
+{
+ size_t num_nodes = list_count_nodes(&entry->refs);
+ struct gpio_shared_ref *ref;
+
+ if (num_nodes <= 1)
+ return false;
+
+ if (num_nodes > 2)
+ return true;
+
+ /* Exactly two references: */
+ list_for_each_entry(ref, &entry->refs, list) {
+ /*
+ * Corner-case: the second reference comes from the potential
+ * reset-gpio instance. However, this pin is not really shared
+ * as it would have three references in this case. Avoid
+ * creating unnecessary proxies.
+ */
+ if (ref->is_reset_gpio)
+ return false;
+ }
+
+ return true;
+}
+
static void gpio_shared_free_exclusive(void)
{
struct gpio_shared_entry *entry, *epos;
list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) {
- if (list_count_nodes(&entry->refs) > 1)
+ if (gpio_shared_entry_is_really_shared(entry))
continue;
gpio_shared_drop_ref(list_first_entry(&entry->refs,
diff --git a/drivers/gpio/gpiolib-shared.h b/drivers/gpio/gpiolib-shared.h
index 667dbdff3585..40568ef7364c 100644
--- a/drivers/gpio/gpiolib-shared.h
+++ b/drivers/gpio/gpiolib-shared.h
@@ -16,7 +16,8 @@ struct device;
int gpio_device_setup_shared(struct gpio_device *gdev);
void gpio_device_teardown_shared(struct gpio_device *gdev);
-int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags);
+int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
+ unsigned long lflags);
#else
@@ -28,6 +29,7 @@ static inline int gpio_device_setup_shared(struct gpio_device *gdev)
static inline void gpio_device_teardown_shared(struct gpio_device *gdev) { }
static inline int gpio_shared_add_proxy_lookup(struct device *consumer,
+ const char *con_id,
unsigned long lflags)
{
return 0;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 91e0c384f34a..dcf427d3cf43 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1105,6 +1105,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->ngpio = gc->ngpio;
gdev->can_sleep = gc->can_sleep;
+ rwlock_init(&gdev->line_state_lock);
+ RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
+
+ ret = init_srcu_struct(&gdev->srcu);
+ if (ret)
+ goto err_free_label;
+
+ ret = init_srcu_struct(&gdev->desc_srcu);
+ if (ret)
+ goto err_cleanup_gdev_srcu;
+
scoped_guard(mutex, &gpio_devices_lock) {
/*
* TODO: this allocates a Linux GPIO number base in the global
@@ -1119,7 +1131,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (base < 0) {
ret = base;
base = 0;
- goto err_free_label;
+ goto err_cleanup_desc_srcu;
}
/*
@@ -1139,22 +1151,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiodev_add_to_list_unlocked(gdev);
if (ret) {
gpiochip_err(gc, "GPIO integer space overlap, cannot add chip\n");
- goto err_free_label;
+ goto err_cleanup_desc_srcu;
}
}
- rwlock_init(&gdev->line_state_lock);
- RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
- BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
-
- ret = init_srcu_struct(&gdev->srcu);
- if (ret)
- goto err_remove_from_list;
-
- ret = init_srcu_struct(&gdev->desc_srcu);
- if (ret)
- goto err_cleanup_gdev_srcu;
-
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -1164,11 +1164,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_set_names(gc);
if (ret)
- goto err_cleanup_desc_srcu;
+ goto err_remove_from_list;
ret = gpiochip_init_valid_mask(gc);
if (ret)
- goto err_cleanup_desc_srcu;
+ goto err_remove_from_list;
for (desc_index = 0; desc_index < gc->ngpio; desc_index++) {
struct gpio_desc *desc = &gdev->descs[desc_index];
@@ -1248,10 +1248,6 @@ err_remove_of_chip:
of_gpiochip_remove(gc);
err_free_valid_mask:
gpiochip_free_valid_mask(gc);
-err_cleanup_desc_srcu:
- cleanup_srcu_struct(&gdev->desc_srcu);
-err_cleanup_gdev_srcu:
- cleanup_srcu_struct(&gdev->srcu);
err_remove_from_list:
scoped_guard(mutex, &gpio_devices_lock)
list_del_rcu(&gdev->list);
@@ -1261,6 +1257,10 @@ err_remove_from_list:
gpio_device_put(gdev);
goto err_print_message;
}
+err_cleanup_desc_srcu:
+ cleanup_srcu_struct(&gdev->desc_srcu);
+err_cleanup_gdev_srcu:
+ cleanup_srcu_struct(&gdev->srcu);
err_free_label:
kfree_const(gdev->label);
err_free_descs:
@@ -4508,45 +4508,41 @@ void gpiod_remove_hogs(struct gpiod_hog *hogs)
}
EXPORT_SYMBOL_GPL(gpiod_remove_hogs);
-static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev)
+static bool gpiod_match_lookup_table(struct device *dev,
+ const struct gpiod_lookup_table *table)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
- struct gpiod_lookup_table *table;
- list_for_each_entry(table, &gpio_lookup_list, list) {
- if (table->dev_id && dev_id) {
- /*
- * Valid strings on both ends, must be identical to have
- * a match
- */
- if (!strcmp(table->dev_id, dev_id))
- return table;
- } else {
- /*
- * One of the pointers is NULL, so both must be to have
- * a match
- */
- if (dev_id == table->dev_id)
- return table;
- }
+ lockdep_assert_held(&gpio_lookup_lock);
+
+ if (table->dev_id && dev_id) {
+ /*
+ * Valid strings on both ends, must be identical to have
+ * a match
+ */
+ if (!strcmp(table->dev_id, dev_id))
+ return true;
+ } else {
+ /*
+ * One of the pointers is NULL, so both must be to have
+ * a match
+ */
+ if (dev_id == table->dev_id)
+ return true;
}
- return NULL;
+ return false;
}
-static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
- unsigned int idx, unsigned long *flags)
+static struct gpio_desc *gpio_desc_table_match(struct device *dev, const char *con_id,
+ unsigned int idx, unsigned long *flags,
+ struct gpiod_lookup_table *table)
{
- struct gpio_desc *desc = ERR_PTR(-ENOENT);
- struct gpiod_lookup_table *table;
+ struct gpio_desc *desc;
struct gpiod_lookup *p;
struct gpio_chip *gc;
- guard(mutex)(&gpio_lookup_lock);
-
- table = gpiod_find_lookup_table(dev);
- if (!table)
- return desc;
+ lockdep_assert_held(&gpio_lookup_lock);
for (p = &table->table[0]; p->key; p++) {
/* idx must always match exactly */
@@ -4600,7 +4596,30 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return desc;
}
- return desc;
+ return NULL;
+}
+
+static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
+ unsigned int idx, unsigned long *flags)
+{
+ struct gpiod_lookup_table *table;
+ struct gpio_desc *desc;
+
+ guard(mutex)(&gpio_lookup_lock);
+
+ list_for_each_entry(table, &gpio_lookup_list, list) {
+ if (!gpiod_match_lookup_table(dev, table))
+ continue;
+
+ desc = gpio_desc_table_match(dev, con_id, idx, flags, table);
+ if (!desc)
+ continue;
+
+ /* On IS_ERR() or match. */
+ return desc;
+ }
+
+ return ERR_PTR(-ENOENT);
}
static int platform_gpio_count(struct device *dev, const char *con_id)
@@ -4610,14 +4629,16 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
unsigned int count = 0;
scoped_guard(mutex, &gpio_lookup_lock) {
- table = gpiod_find_lookup_table(dev);
- if (!table)
- return -ENOENT;
+ list_for_each_entry(table, &gpio_lookup_list, list) {
+ if (!gpiod_match_lookup_table(dev, table))
+ continue;
- for (p = &table->table[0]; p->key; p++) {
- if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) ||
- (!con_id && !p->con_id))
- count++;
+ for (p = &table->table[0]; p->key; p++) {
+ if ((con_id && p->con_id &&
+ !strcmp(con_id, p->con_id)) ||
+ (!con_id && !p->con_id))
+ count++;
+ }
}
}
@@ -4696,7 +4717,8 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
* lookup table for the proxy device as previously
* we only knew the consumer's fwnode.
*/
- ret = gpio_shared_add_proxy_lookup(consumer, lookupflags);
+ ret = gpio_shared_add_proxy_lookup(consumer, con_id,
+ lookupflags);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 58c3ffe707d1..d5c44bd34d45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3445,11 +3445,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
- /* skip CG for VCE/UVD/VPE, it's handled specially */
+ /* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
@@ -5867,6 +5866,9 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
if (ret)
goto mode1_reset_failed;
+ /* enable mmio access after mode 1 reset completed */
+ adev->no_hw_access = false;
+
amdgpu_device_load_pci_state(adev->pdev);
ret = amdgpu_psp_wait_for_bootloader(adev);
if (ret)
@@ -6613,6 +6615,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
int r = 0;
bool need_emergency_restart = false;
+ /* save the pasid here as the job may be freed before the end of the reset */
+ int pasid = job ? job->pasid : -EINVAL;
/*
* If it reaches here because of hang/timeout and a RAS error is
@@ -6713,8 +6717,12 @@ end_reset:
if (!r) {
struct amdgpu_task_info *ti = NULL;
- if (job)
- ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+ /*
+ * The job may already be freed at this point via the sched tdr workqueue so
+ * use the cached pasid.
+ */
+ if (pasid >= 0)
+ ti = amdgpu_vm_get_task_info_pasid(adev, pasid);
drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
ti ? &ti->task : NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 2dfbddcef9ab..848e6b7db482 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -33,6 +33,7 @@
#include <drm/drm_vblank.h>
#include <linux/cc_platform.h>
+#include <linux/console.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
@@ -2704,7 +2705,9 @@ static int amdgpu_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* do not resume device if it's normal hibernation */
- if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
+ if (console_suspend_enabled &&
+ !pm_hibernate_is_recovering() &&
+ !pm_hibernation_mode_is_suspend())
return 0;
return amdgpu_device_resume(drm_dev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index c7843e336310..06c333b2213b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -89,6 +89,16 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
return seq;
}
+static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af)
+{
+ af->fence_wptr_start = af->ring->wptr;
+}
+
+static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
+{
+ af->fence_wptr_end = af->ring->wptr;
+}
+
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
@@ -116,8 +126,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
+ amdgpu_fence_save_fence_wptr_start(af);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_fence_wptr_end(af);
amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
@@ -709,6 +721,7 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
struct amdgpu_ring *ring = af->ring;
unsigned long flags;
u32 seq, last_seq;
+ bool reemitted = false;
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
@@ -726,7 +739,9 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
fence = container_of(unprocessed, struct amdgpu_fence, base);
- if (fence == af)
+ if (fence->reemitted > 1)
+ reemitted = true;
+ else if (fence == af)
dma_fence_set_error(&fence->base, -ETIME);
else if (fence->context == af->context)
dma_fence_set_error(&fence->base, -ECANCELED);
@@ -734,9 +749,12 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
rcu_read_unlock();
} while (last_seq != seq);
spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
- /* signal the guilty fence */
- amdgpu_fence_write(ring, (u32)af->base.seqno);
- amdgpu_fence_process(ring);
+
+ if (reemitted) {
+ /* if we've already reemitted once then just cancel everything */
+ amdgpu_fence_driver_force_completion(af->ring);
+ af->ring->ring_backup_entries_to_copy = 0;
+ }
}
void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
@@ -784,10 +802,18 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
/* save everything if the ring is not guilty, otherwise
* just save the content from other contexts.
*/
- if (!guilty_fence || (fence->context != guilty_fence->context))
+ if (!fence->reemitted &&
+ (!guilty_fence || (fence->context != guilty_fence->context))) {
amdgpu_ring_backup_unprocessed_command(ring, wptr,
fence->wptr);
+ } else if (!fence->reemitted) {
+ /* always save the fence */
+ amdgpu_ring_backup_unprocessed_command(ring,
+ fence->fence_wptr_start,
+ fence->fence_wptr_end);
+ }
wptr = fence->wptr;
+ fence->reemitted++;
}
rcu_read_unlock();
} while (last_seq != seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 37270c4dab8d..532f83d783d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -318,12 +318,36 @@ void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
}
EXPORT_SYMBOL(isp_kernel_buffer_free);
+static int isp_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ if (isp->funcs->hw_resume)
+ return isp->funcs->hw_resume(isp);
+
+ return -ENODEV;
+}
+
+static int isp_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ if (isp->funcs->hw_suspend)
+ return isp->funcs->hw_suspend(isp);
+
+ return -ENODEV;
+}
+
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
.is_idle = isp_is_idle,
+ .suspend = isp_suspend,
+ .resume = isp_resume,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index d6f4ffa4c97c..9a5d2b1dff9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -38,6 +38,8 @@ struct amdgpu_isp;
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
+ int (*hw_suspend)(struct amdgpu_isp *isp);
+ int (*hw_resume)(struct amdgpu_isp *isp);
};
struct amdgpu_isp {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6ee77f431d56..f65edd80cabf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -201,6 +201,9 @@ static enum amd_ip_block_type
type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
break;
+ case AMDGPU_HW_IP_VPE:
+ type = AMD_IP_BLOCK_TYPE_VPE;
+ break;
default:
type = AMD_IP_BLOCK_TYPE_NUM;
break;
@@ -721,6 +724,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMD_IP_BLOCK_TYPE_UVD:
count = adev->uvd.num_uvd_inst;
break;
+ case AMD_IP_BLOCK_TYPE_VPE:
+ count = adev->vpe.num_instances;
+ break;
/* For all other IP block types not listed in the switch statement
* the ip status is valid here and the instance count is one.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7a27c6c4bb44..055437d4edf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -144,10 +144,15 @@ struct amdgpu_fence {
struct amdgpu_ring *ring;
ktime_t start_timestamp;
- /* wptr for the fence for resets */
+ /* wptr for the total submission for resets */
u64 wptr;
/* fence context for resets */
u64 context;
+ /* has this fence been reemitted */
+ unsigned int reemitted;
+ /* wptr for the fence for the submission */
+ u64 fence_wptr_start;
+ u64 fence_wptr_end;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 4258d3e0b706..0002bcc6c4ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -26,6 +26,7 @@
*/
#include <linux/gpio/machine.h>
+#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
@@ -145,6 +146,9 @@ static int isp_genpd_add_device(struct device *dev, void *data)
return -ENODEV;
}
+ /* The devices will be managed by the pm ops from the parent */
+ dev_pm_syscore_device(dev, true);
+
exit:
/* Continue to add */
return 0;
@@ -177,12 +181,47 @@ static int isp_genpd_remove_device(struct device *dev, void *data)
drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
return -ENODEV;
}
+ dev_pm_syscore_device(dev, false);
exit:
/* Continue to remove */
return 0;
}
+static int isp_suspend_device(struct device *dev, void *data)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int isp_resume_device(struct device *dev, void *data)
+{
+ return pm_runtime_force_resume(dev);
+}
+
+static int isp_v4_1_1_hw_suspend(struct amdgpu_isp *isp)
+{
+ int r;
+
+ r = device_for_each_child(isp->parent, NULL,
+ isp_suspend_device);
+ if (r)
+ dev_err(isp->parent, "failed to suspend hw devices (%d)\n", r);
+
+ return r;
+}
+
+static int isp_v4_1_1_hw_resume(struct amdgpu_isp *isp)
+{
+ int r;
+
+ r = device_for_each_child(isp->parent, NULL,
+ isp_resume_device);
+ if (r)
+ dev_err(isp->parent, "failed to resume hw device (%d)\n", r);
+
+ return r;
+}
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
const struct software_node *amd_camera_node, *isp4_node;
@@ -369,6 +408,8 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
static const struct isp_funcs isp_v4_1_1_funcs = {
.hw_init = isp_v4_1_1_hw_init,
.hw_fini = isp_v4_1_1_hw_fini,
+ .hw_suspend = isp_v4_1_1_hw_suspend,
+ .hw_resume = isp_v4_1_1_hw_resume,
};
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index a499449fcb06..d2bc169e84b0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -312,7 +312,7 @@ void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
KFD_EVENT_FMT_QUEUE_RESTORE(ktime_get_boottime_ns(), pid,
- node->id, 0));
+ node->id, '0'));
}
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index d1471f34e419..9f11e6ca4051 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -763,14 +763,14 @@ static enum bp_result bios_parser_encoder_control(
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac1_encoder_control(
- bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ bp, cntl->action,
cntl->pixel_clock, ATOM_DAC1_PS2);
} else if (cntl->engine_id == ENGINE_ID_DACB) {
if (!bp->cmd_tbl.dac2_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac2_encoder_control(
- bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ bp, cntl->action,
cntl->pixel_clock, ATOM_DAC1_PS2);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 22457f417e65..76a3559f0ddc 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -1797,7 +1797,30 @@ static enum bp_result select_crtc_source_v3(
&params.ucEncodeMode))
return BP_RESULT_BADINPUT;
- params.ucDstBpc = bp_params->bit_depth;
+ switch (bp_params->color_depth) {
+ case COLOR_DEPTH_UNDEFINED:
+ params.ucDstBpc = PANEL_BPC_UNDEFINE;
+ break;
+ case COLOR_DEPTH_666:
+ params.ucDstBpc = PANEL_6BIT_PER_COLOR;
+ break;
+ default:
+ case COLOR_DEPTH_888:
+ params.ucDstBpc = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucDstBpc = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucDstBpc = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_141414:
+ dm_error("14-bit color not supported by SelectCRTC_Source v3\n");
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucDstBpc = PANEL_16BIT_PER_COLOR;
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
result = BP_RESULT_OK;
@@ -1815,12 +1838,12 @@ static enum bp_result select_crtc_source_v3(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
@@ -1846,12 +1869,15 @@ static void init_dac_encoder_control(struct bios_parser *bp)
static void dac_encoder_control_prepare_params(
DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
params->ucDacStandard = dac_standard;
- if (enable)
+ if (action == ENCODER_CONTROL_SETUP ||
+ action == ENCODER_CONTROL_INIT)
+ params->ucAction = ATOM_ENCODER_INIT;
+ else if (action == ENCODER_CONTROL_ENABLE)
params->ucAction = ATOM_ENABLE;
else
params->ucAction = ATOM_DISABLE;
@@ -1864,7 +1890,7 @@ static void dac_encoder_control_prepare_params(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@@ -1873,7 +1899,7 @@ static enum bp_result dac1_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
- enable,
+ action,
pixel_clock,
dac_standard);
@@ -1885,7 +1911,7 @@ static enum bp_result dac1_encoder_control_v1(
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@@ -1894,7 +1920,7 @@ static enum bp_result dac2_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
- enable,
+ action,
pixel_clock,
dac_standard);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index e89b1ba0048b..78bdbcaa61c8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -57,12 +57,12 @@ struct cmd_tbl {
struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac2_encoder_control)(
struct bios_parser *bp,
- bool enable,
+ enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac1_output_control)(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index b357683b4255..268b5fbdb48b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -30,7 +30,11 @@ dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
- frame_warn_limit := 3072
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
else
frame_warn_limit := 2048
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 8d24763938ea..1df3412be346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -77,32 +77,14 @@ static unsigned int dscceComputeDelay(
static unsigned int dscComputeDelay(
enum output_format_class pixelFormat,
enum output_encoder_class Output);
-// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ unsigned int k,
Pipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -116,7 +98,6 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
- int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@@ -124,9 +105,6 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
- bool ProgressiveToInterlaceUnitInOPP,
- double *DSTXAfterScaler,
- double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@@ -135,14 +113,7 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
- bool *NotEnoughTimeForDynamicMetadata,
- double *Tno_bw,
- double *prefetch_vmrow_bw,
- double *Tdmdl_vm,
- double *Tdmdl,
- unsigned int *VUpdateOffsetPix,
- double *VUpdateWidthPix,
- double *VReadyOffsetPix);
+ bool *NotEnoughTimeForDynamicMetadata);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static void CalculateDCCConfiguration(
@@ -294,62 +265,23 @@ static void CalculateDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int DPPOutputBufferPixels,
- unsigned int DETBufferSizeInKByte,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool GPUVMEnable,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
- bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
- enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
- double *StutterExitWatermark,
- double *StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ enum clock_change_support *DRAMClockChangeSupport);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
@@ -810,29 +742,12 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ unsigned int k,
Pipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -846,7 +761,6 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
- int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@@ -854,9 +768,6 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
- bool ProgressiveToInterlaceUnitInOPP,
- double *DSTXAfterScaler,
- double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@@ -865,15 +776,10 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
- bool *NotEnoughTimeForDynamicMetadata,
- double *Tno_bw,
- double *prefetch_vmrow_bw,
- double *Tdmdl_vm,
- double *Tdmdl,
- unsigned int *VUpdateOffsetPix,
- double *VUpdateWidthPix,
- double *VReadyOffsetPix)
+ bool *NotEnoughTimeForDynamicMetadata)
{
+ struct vba_vars_st *v = &mode_lib->vba;
+ double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles = 0, DISPCLKCycles = 0;
double DSTTotalPixelsAfterScaler = 0;
@@ -905,26 +811,26 @@ static bool CalculatePrefetchSchedule(
double Tdmec = 0;
double Tdmsks = 0;
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMInefficiencyFactor = v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevelsTrips = 0;
}
CalculateDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
+ v->MaxInterDCNTileRepeaters,
myPipe->DPPCLK,
myPipe->DISPCLK,
myPipe->DCFCLKDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
- DynamicMetadataTransmittedBytes,
- DynamicMetadataLinesBeforeActiveRequired,
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
- ProgressiveToInterlaceUnitInOPP,
+ v->ProgressiveToInterlaceUnitInOPP,
&Tsetup,
&Tdmbf,
&Tdmec,
@@ -932,16 +838,16 @@ static bool CalculatePrefetchSchedule(
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
- Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
- if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
- *Tdmdl = TWait + Tvm_trips + trip_to_mem;
+ if (v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true) {
+ v->Tdmdl[k] = TWait + Tvm_trips + trip_to_mem;
} else {
- *Tdmdl = TWait + UrgentExtraLatency;
+ v->Tdmdl[k] = TWait + UrgentExtraLatency;
}
- if (DynamicMetadataEnable == true) {
- if (VStartup * LineTime < Tsetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
+ if (v->DynamicMetadataEnable[k] == true) {
+ if (VStartup * LineTime < Tsetup + v->Tdmdl[k] + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
} else {
*NotEnoughTimeForDynamicMetadata = false;
@@ -949,39 +855,39 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
- dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
+ dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
- *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
+ v->Tdmdl_vm[k] = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
- DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
- DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
- *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ v->DSTXAfterScaler[k] = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ DSCDelay;
- *DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
+ v->DSTXAfterScaler[k] = v->DSTXAfterScaler[k] + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
- if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
- *DSTYAfterScaler = 1;
+ if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && v->ProgressiveToInterlaceUnitInOPP))
+ v->DSTYAfterScaler[k] = 1;
else
- *DSTYAfterScaler = 0;
+ v->DSTYAfterScaler[k] = 0;
- DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = v->DSTYAfterScaler[k] * myPipe->HTotal + v->DSTXAfterScaler[k];
+ v->DSTYAfterScaler[k] = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ v->DSTXAfterScaler[k] = DSTTotalPixelsAfterScaler - ((double) (v->DSTYAfterScaler[k] * myPipe->HTotal));
MyError = false;
@@ -990,33 +896,33 @@ static bool CalculatePrefetchSchedule(
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
- if (GPUVMEnable) {
- if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
+ if (v->GPUVMEnable) {
+ if (v->GPUVMMaxPageTableLevels >= 3) {
+ v->Tno_bw[k] = UrgentExtraLatency + trip_to_mem * ((v->GPUVMMaxPageTableLevels - 2) - 1);
} else
- *Tno_bw = 0;
+ v->Tno_bw[k] = 0;
} else if (!myPipe->DCCEnable)
- *Tno_bw = LineTime;
+ v->Tno_bw[k] = LineTime;
else
- *Tno_bw = LineTime / 4;
+ v->Tno_bw[k] = LineTime / 4;
- dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, v->Tdmdl[k])) / LineTime
+ - (v->DSTYAfterScaler[k] + v->DSTXAfterScaler[k] / myPipe->HTotal);
dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
- prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC) / Tsw_oto;
+ prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k]) / Tsw_oto;
- if (GPUVMEnable == true) {
- Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ if (v->GPUVMEnable == true) {
+ Tvm_oto = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
Tvm_trips,
LineTime / 4.0);
} else
Tvm_oto = LineTime / 4.0;
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max3(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
LineTime - Tvm_oto, LineTime / 4);
@@ -1042,10 +948,10 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
- dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", *Tdmdl_vm);
- dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
- dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", *DSTXAfterScaler);
- dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)*DSTYAfterScaler);
+ dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", v->Tdmdl_vm[k]);
+ dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
+ dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", v->DSTXAfterScaler[k]);
+ dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)v->DSTYAfterScaler[k]);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -1059,26 +965,26 @@ static bool CalculatePrefetchSchedule(
double PrefetchBandwidth3 = 0;
double PrefetchBandwidth4 = 0;
- if (Tpre_rounded - *Tno_bw > 0)
+ if (Tpre_rounded - v->Tno_bw[k] > 0)
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY
- + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
- / (Tpre_rounded - *Tno_bw);
+ + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
+ / (Tpre_rounded - v->Tno_bw[k]);
else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw) > 0) {
- PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw);
+ if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]) > 0) {
+ PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]);
}
- if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ if (Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY +
PrefetchSourceLinesC * swath_width_chroma_ub *
- BytePerPixelC) /
- (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
+ v->BytePerPixelC[k]) /
+ (Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
@@ -1086,7 +992,7 @@ static bool CalculatePrefetchSchedule(
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC *
- swath_width_chroma_ub * BytePerPixelC) / (Tpre_rounded -
+ swath_width_chroma_ub * v->BytePerPixelC[k]) / (Tpre_rounded -
Tvm_trips_rounded);
else
PrefetchBandwidth3 = 0;
@@ -1096,7 +1002,7 @@ static bool CalculatePrefetchSchedule(
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
- PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
+ PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
/ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth4 = 0;
@@ -1107,7 +1013,7 @@ static bool CalculatePrefetchSchedule(
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
@@ -1118,7 +1024,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth2 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
@@ -1129,7 +1035,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth3 > 0) {
- if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
+ if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
< Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
Case3OK = true;
} else {
@@ -1152,13 +1058,13 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable) {
- Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
+ if (v->GPUVMEnable) {
+ Tvm_equ = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable || myPipe->DCCEnable)) {
+ if ((v->GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
@@ -1227,7 +1133,7 @@ static bool CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * BytePerPixelY * swath_width_luma_ub / LineTime;
- *RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * BytePerPixelC * swath_width_chroma_ub / LineTime;
+ *RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * v->BytePerPixelC[k] * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
@@ -1243,9 +1149,9 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tr1: %fus - time to fetch second row of data pagetables and second row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)LinesToRequestPrefetchPixelData * LineTime);
- dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
+ dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - Tsetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
@@ -1276,7 +1182,7 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
- *prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
+ v->prefetch_vmrow_bw[k] = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
@@ -2437,30 +2343,12 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->ErrorResult[k] = CalculatePrefetchSchedule(
mode_lib,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ k,
&myPipe,
v->DSCDelay[k],
- v->DPPCLKDelaySubtotal
- + v->DPPCLKDelayCNVCFormater,
- v->DPPCLKDelaySCL,
- v->DPPCLKDelaySCLLBOnly,
- v->DPPCLKDelayCNVCCursor,
- v->DISPCLKDelaySubtotal,
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
- v->OutputFormat[k],
- v->MaxInterDCNTileRepeaters,
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
- v->GPUVMMaxPageTableLevels,
- v->GPUVMEnable,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->HostVMMinPageSize,
- v->DynamicMetadataEnable[k],
- v->DynamicMetadataVMEnabled,
- v->DynamicMetadataLinesBeforeActiveRequired[k],
- v->DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
@@ -2474,7 +2362,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
- v->BytePerPixelC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
@@ -2482,9 +2369,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
- v->ProgressiveToInterlaceUnitInOPP,
- &v->DSTXAfterScaler[k],
- &v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
@@ -2493,14 +2377,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
- &v->NotEnoughTimeForDynamicMetadata[k],
- &v->Tno_bw[k],
- &v->prefetch_vmrow_bw[k],
- &v->Tdmdl_vm[k],
- &v->Tdmdl[k],
- &v->VUpdateOffsetPix[k],
- &v->VUpdateWidthPix[k],
- &v->VReadyOffsetPix[k]);
+ &v->NotEnoughTimeForDynamicMetadata[k]);
if (v->BlendingAndTiming[k] == k) {
double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[k];
@@ -2730,62 +2607,23 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->DPPOutputBufferPixels,
- v->DETBufferSizeInKByte[0],
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->GPUVMEnable,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->FinalDRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
v->DCFCLKDeepSleep,
v->DPPPerPlane,
- v->DCCEnable,
v->DPPCLK,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
- &DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &v->StutterExitWatermark,
- &v->StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &DRAMClockChangeSupport);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -4770,29 +4608,12 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ k,
&myPipe,
v->DSCDelayPerState[i][k],
- v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
- v->DPPCLKDelaySCL,
- v->DPPCLKDelaySCLLBOnly,
- v->DPPCLKDelayCNVCCursor,
- v->DISPCLKDelaySubtotal,
v->SwathWidthYThisState[k] / v->HRatio[k],
- v->OutputFormat[k],
- v->MaxInterDCNTileRepeaters,
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
- v->GPUVMMaxPageTableLevels,
- v->GPUVMEnable,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->HostVMMinPageSize,
- v->DynamicMetadataEnable[k],
- v->DynamicMetadataVMEnabled,
- v->DynamicMetadataLinesBeforeActiveRequired[k],
- v->DynamicMetadataTransmittedBytes[k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
@@ -4806,7 +4627,6 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
- v->BytePerPixelC[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
@@ -4814,9 +4634,6 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->TWait,
- v->ProgressiveToInterlaceUnitInOPP,
- &v->DSTXAfterScaler[k],
- &v->DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
@@ -4825,14 +4642,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[i][j][k],
&v->RequiredPrefetchPixelDataBWChroma[i][j][k],
- &v->NoTimeForDynamicMetadata[i][j][k],
- &v->Tno_bw[k],
- &v->prefetch_vmrow_bw[k],
- &v->Tdmdl_vm[k],
- &v->Tdmdl[k],
- &v->VUpdateOffsetPix[k],
- &v->VUpdateWidthPix[k],
- &v->VReadyOffsetPix[k]);
+ &v->NoTimeForDynamicMetadata[i][j][k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -5007,62 +4817,23 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->DPPOutputBufferPixels,
- v->DETBufferSizeInKByte[0],
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->GPUVMEnable,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->FinalDRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
v->ProjectedDCFCLKDeepSleep[i][j],
v->NoOfDPPThisState,
- v->DCCEnable,
v->RequiredDPPCLKThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
- &v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &v->StutterExitWatermark,
- &v->StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->DRAMClockChangeSupport[i][j]);
}
}
@@ -5179,63 +4950,25 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int DPPOutputBufferPixels,
- unsigned int DETBufferSizeInKByte,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool GPUVMEnable,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
- bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
- enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
- double *StutterExitWatermark,
- double *StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ enum clock_change_support *DRAMClockChangeSupport)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY = 0;
double EffectiveLBLatencyHidingC = 0;
double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
@@ -5254,101 +4987,101 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double WritebackDRAMClockChangeLatencyHiding = 0;
unsigned int k, j;
- mode_lib->vba.TotalActiveDPP = 0;
- mode_lib->vba.TotalDCCActiveDPP = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
- if (DCCEnable[k] == true) {
- mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
+ v->TotalActiveDPP = 0;
+ v->TotalDCCActiveDPP = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->TotalActiveDPP = v->TotalActiveDPP + DPPPerPlane[k];
+ if (v->DCCEnable[k] == true) {
+ v->TotalDCCActiveDPP = v->TotalDCCActiveDPP + DPPPerPlane[k];
}
}
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->UrgentWatermark;
- mode_lib->vba.TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
- mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ v->TotalActiveWriteback = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
+ v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
- if (mode_lib->vba.TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ if (v->TotalActiveWriteback <= 1) {
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- if (mode_lib->vba.TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ if (v->TotalActiveWriteback <= 1) {
+ v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
- mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ v->LBLatencyHidingSourceLinesY = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
- mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ v->LBLatencyHidingSourceLinesC = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
LinesInDETY[k] = (double) DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
- LinesInDETC = mode_lib->vba.DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
- ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
+ ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
- ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ if (v->NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
+ ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
- ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ if (v->NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
+ v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
+ v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
+ if (v->WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
- if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
+ if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
}
- WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
+ WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
+ v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(v->ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
}
}
- mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
- mode_lib->vba.MinActiveDRAMClockChangeMargin = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
+ v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5356,40 +5089,40 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->FinalDRAMClockChangeLatency;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
- SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
}
- mode_lib->vba.TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
- mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
+ v->TotalNumberOfActiveOTG = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
+ v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (v->MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
+ } else if (((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[k];
- TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
}
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = dml_max(SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
+ v->StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ v->StutterEnterPlusExitWatermark = dml_max(v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4986f12dc9df..ebd74b43e935 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1118,13 +1118,13 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
num_audio++;
}
+ if (num_audio >= 1 && clk_mgr->funcs->enable_pme_wa) {
+ /*wake AZ from D3 first before access az endpoint*/
+ clk_mgr->funcs->enable_pme_wa(clk_mgr);
+ }
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio >= 1 && clk_mgr->funcs->enable_pme_wa)
- /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- clk_mgr->funcs->enable_pme_wa(clk_mgr);
-
link_hwss->enable_audio_packet(pipe_ctx);
if (pipe_ctx->stream_res.audio)
@@ -1610,38 +1610,12 @@ dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
struct dc_bios *bios = link->ctx->dc_bios;
struct bp_crtc_source_select crtc_source_select = {0};
enum engine_id engine_id = link->link_enc->preferred_engine;
- uint8_t bit_depth;
if (dc_is_rgb_signal(pipe_ctx->stream->signal))
engine_id = link->link_enc->analog_engine;
- switch (pipe_ctx->stream->timing.display_color_depth) {
- case COLOR_DEPTH_UNDEFINED:
- bit_depth = 0;
- break;
- case COLOR_DEPTH_666:
- bit_depth = 6;
- break;
- default:
- case COLOR_DEPTH_888:
- bit_depth = 8;
- break;
- case COLOR_DEPTH_101010:
- bit_depth = 10;
- break;
- case COLOR_DEPTH_121212:
- bit_depth = 12;
- break;
- case COLOR_DEPTH_141414:
- bit_depth = 14;
- break;
- case COLOR_DEPTH_161616:
- bit_depth = 16;
- break;
- }
-
crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
- crtc_source_select.bit_depth = bit_depth;
+ crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth;
crtc_source_select.engine_id = engine_id;
crtc_source_select.sink_signal = pipe_ctx->stream->signal;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 6d31f4967f1a..e1940b8e5bc3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -932,7 +932,7 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
struct link_encoder *link_enc = link->link_enc;
enum engine_id engine_id = link_enc->preferred_engine;
enum dal_device_type device_type = DEVICE_TYPE_CRT;
- enum bp_result bp_result;
+ enum bp_result bp_result = BP_RESULT_UNSUPPORTED;
uint32_t enum_id;
switch (engine_id) {
@@ -946,7 +946,9 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
break;
}
- bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+ if (bios->funcs->dac_load_detection)
+ bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+
return bp_result == BP_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ef69898d2cc5..d056e5fd5458 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -203,12 +203,12 @@ enum dcn35_clk_src_array_id {
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX2_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
#define NBIO_SR_ARR(reg_name, id)\
- REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX2_ ## reg_name
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
#define bios_regs_init() \
( \
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index f3c614c4490c..9fab3169069c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -183,12 +183,12 @@ enum dcn351_clk_src_array_id {
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX2_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
#define NBIO_SR_ARR(reg_name, id)\
- REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX2_ ## reg_name
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
#define bios_regs_init() \
( \
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 973b6bdbac63..f40dc612ec73 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -136,7 +136,7 @@ struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
enum signal_type sink_signal;
- uint8_t bit_depth;
+ enum dc_color_depth color_depth;
};
struct bp_transmitter_control {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 7c9f77124ab2..c4966dcc6875 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2455,24 +2455,21 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
- pptable->PcieLaneCount[i] > pcie_width_cap) {
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
- pptable->PcieGenSpeed[i] > pcie_gen_cap ?
- pcie_gen_cap : pptable->PcieGenSpeed[i];
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
- pptable->PcieLaneCount[i] > pcie_width_cap ?
- pcie_width_cap : pptable->PcieLaneCount[i];
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_gen_cap << 8;
- smu_pcie_arg |= pcie_width_cap;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- break;
- }
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
+ pptable->PcieGenSpeed[i] > pcie_gen_cap ?
+ pcie_gen_cap : pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
+ pptable->PcieLaneCount[i] > pcie_width_cap ?
+ pcie_width_cap : pptable->PcieLaneCount[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_gen[i] << 8;
+ smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_lane[i];
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ return ret;
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 677781060246..eaeff6a9bc50 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2923,8 +2923,13 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
break;
}
- if (!ret)
+ if (!ret) {
+ /* disable mmio access while doing mode 1 reset*/
+ smu->adev->no_hw_access = true;
+ /* ensure no_hw_access is globally visible before any MMIO */
+ smp_mb();
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index f9b0938c57ea..f2a16dfee599 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -1939,6 +1939,11 @@ int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
dev_err(smu->adev->dev, "Set soft max sclk failed!");
return ret;
}
+ if (smu->gfx_actual_hard_min_freq != smu->gfx_default_hard_min_freq ||
+ smu->gfx_actual_soft_max_freq != smu->gfx_default_soft_max_freq)
+ smu->user_dpm_profile.user_od = true;
+ else
+ smu->user_dpm_profile.user_od = false;
break;
default:
return -ENOSYS;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index b1bd946d8e30..97414bc39764 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1514,9 +1514,10 @@ static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
- smu->gfx_actual_hard_min_freq = 0;
- smu->gfx_actual_soft_max_freq = 0;
-
+ if (smu->gfx_actual_hard_min_freq == 0)
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ if (smu->gfx_actual_soft_max_freq == 0)
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
return 0;
}
@@ -1526,8 +1527,10 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
- smu->gfx_actual_hard_min_freq = 0;
- smu->gfx_actual_soft_max_freq = 0;
+ if (smu->gfx_actual_hard_min_freq == 0)
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ if (smu->gfx_actual_soft_max_freq == 0)
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
return 0;
}
@@ -1665,6 +1668,29 @@ static int smu_v14_0_common_set_mall_enable(struct smu_context *smu)
return ret;
}
+static int smu_v14_0_0_restore_user_od_settings(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to restore hard min sclk!\n");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to restore soft max sclk!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1688,6 +1714,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range,
+ .restore_user_od_settings = smu_v14_0_0_restore_user_od_settings,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 2cea688c604f..33c3cd2e1e24 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -2143,10 +2143,15 @@ static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
if (!ret) {
- if (amdgpu_emu_mode == 1)
+ if (amdgpu_emu_mode == 1) {
msleep(50000);
- else
+ } else {
+ /* disable mmio access while doing mode 1 reset*/
+ smu->adev->no_hw_access = true;
+ /* ensure no_hw_access is globally visible before any MMIO */
+ smp_mb();
msleep(1000);
+ }
}
return ret;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 10adac9397cf..5beea645035f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1162,8 +1162,18 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
new_state->self_refresh_active;
}
-static void
-encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_disable - disable bridges and encoder
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the CRTC needs
+ * it, disables the bridge chain all the way, then disables the encoder
+ * afterwards.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1229,9 +1239,18 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_disable);
-static void
-crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_disable - disable CRTSs
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all CRTCs in the current state and if the CRTC needs
+ * it, disables it.
+ */
+void
+drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1282,9 +1301,18 @@ crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
drm_crtc_vblank_put(crtc);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_disable);
-static void
-encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_post_disable - post-disable encoder bridges
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the CRTC needs
+ * it, post-disables all encoder bridges.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1335,15 +1363,16 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_post_disable);
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
{
- encoder_bridge_disable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_disable(dev, state);
- crtc_disable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_post_disable(dev, state);
- encoder_bridge_post_disable(dev, state);
+ drm_atomic_helper_commit_crtc_disable(dev, state);
}
/**
@@ -1446,8 +1475,17 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat
}
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
-static void
-crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_set_mode - set the new mode
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Loops over all connectors in the current state and if the mode has
+ * changed, change the mode of the CRTC, then call down the bridge
+ * chain and change the mode in all bridges as well.
+ */
+void
+drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -1508,6 +1546,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_set_mode);
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
@@ -1531,12 +1570,21 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
- crtc_set_mode(dev, state);
+ drm_atomic_helper_commit_crtc_set_mode(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
-static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
- struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_writebacks - issue writebacks
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over the connectors, checks if the new state requires
+ * a writeback job to be issued and in that case issues an atomic
+ * commit on each connector.
+ */
+void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1555,9 +1603,18 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_writebacks);
-static void
-encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_pre_enable - pre-enable bridges
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over the connectors and if the CRTC needs it, pre-enables
+ * the entire bridge chain.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1588,9 +1645,18 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_pre_enable);
-static void
-crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_crtc_enable - enables the CRTCs
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over CRTCs in the new state, and of the CRTC needs
+ * it, enables it.
+ */
+void
+drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1619,9 +1685,18 @@ crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
}
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_enable);
-static void
-encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
+/**
+ * drm_atomic_helper_commit_encoder_bridge_enable - enables the bridges
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This loops over all connectors in the new state, and of the CRTC needs
+ * it, enables the entire bridge chain.
+ */
+void
+drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@@ -1664,6 +1739,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
drm_bridge_put(bridge);
}
}
+EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable);
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
@@ -1682,11 +1758,11 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *state)
{
- encoder_bridge_pre_enable(dev, state);
+ drm_atomic_helper_commit_crtc_enable(dev, state);
- crtc_enable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_pre_enable(dev, state);
- encoder_bridge_enable(dev, state);
+ drm_atomic_helper_commit_encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4a7f72044ab8..4b47aa0dab35 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -366,6 +366,9 @@ static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
+ if (helper->info->state != FBINFO_STATE_RUNNING)
+ return;
+
drm_fb_helper_fb_dirty(helper);
}
@@ -732,6 +735,13 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
if (fb_helper->info->state != FBINFO_STATE_RUNNING)
return;
+ /*
+ * Cancel pending damage work. During GPU reset, VBlank
+ * interrupts are disabled and drm_fb_helper_fb_dirty()
+ * would wait for VBlank timeout otherwise.
+ */
+ cancel_work_sync(&fb_helper->damage_work);
+
console_lock();
} else {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index efc79bbf3c73..e4df43427394 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -969,8 +969,10 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- if (args->handle == args->new_handle)
- return 0;
+ if (args->handle == args->new_handle) {
+ ret = 0;
+ goto out;
+ }
mutex_lock(&file_priv->prime.lock);
@@ -1002,6 +1004,8 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&file_priv->prime.lock);
+out:
+ drm_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 93b9cff89080..f13eb5f36e8a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -96,7 +96,8 @@ err_release:
/**
* drm_gem_shmem_init - Initialize an allocated object.
* @dev: DRM device
- * @obj: The allocated shmem GEM object.
+ * @shmem: The allocated shmem GEM object.
+ * @size: Buffer size in bytes
*
* Returns:
* 0 on success, or a negative error code on failure.
@@ -895,4 +896,4 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 37d7cfbbb3e8..06c1bd8fc4d1 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -3,6 +3,7 @@
* Copyright © 2024-2025 Intel Corporation
*/
+#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
#include <linux/pagemap.h>
@@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
drm_pagemap_get_devmem_page(page, zdd);
}
- err = ops->copy_to_devmem(pages, pagemap_addr, npages);
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages,
+ devmem_allocation->pre_migrate_fence);
if (err)
goto err_finalize;
+ dma_fence_put(devmem_allocation->pre_migrate_fence);
+ devmem_allocation->pre_migrate_fence = NULL;
+
/* Upon success bind devmem allocation to range and zdd */
devmem_allocation->timeslice_expiration = get_jiffies_64() +
msecs_to_jiffies(timeslice_ms);
@@ -596,7 +601,7 @@ retry:
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
@@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
@@ -813,11 +818,14 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
* @ops: Pointer to the operations structure for GPU SVM device memory
* @dpagemap: The struct drm_pagemap we're allocating from.
* @size: Size of device memory allocation
+ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
+ * (May be NULL).
*/
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size)
+ struct drm_pagemap *dpagemap, size_t size,
+ struct dma_fence *pre_migrate_fence)
{
init_completion(&devmem_allocation->detached);
devmem_allocation->dev = dev;
@@ -825,6 +833,7 @@ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
devmem_allocation->ops = ops;
devmem_allocation->dpagemap = dpagemap;
devmem_allocation->size = size;
+ devmem_allocation->pre_migrate_fence = pre_migrate_fence;
}
EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 01813e11e6c6..8e76ac8ee4e2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1692,7 +1692,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct hdmi_context *hdata = arg;
- mod_delayed_work(system_wq, &hdata->hotplug_work,
+ mod_delayed_work(system_percpu_wq, &hdata->hotplug_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b057c2fa03a4..d49e96f9be51 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -951,13 +951,13 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
vma = eb_lookup_vma(eb, eb->exec[i].handle);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ return err;
}
err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err)) {
i915_vma_put(vma);
- goto err;
+ return err;
}
err = eb_add_vma(eb, &current_batch, i, vma);
@@ -966,19 +966,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
if (i915_gem_object_is_userptr(vma->obj)) {
err = i915_gem_object_userptr_submit_init(vma->obj);
- if (err) {
- if (i + 1 < eb->buffer_count) {
- /*
- * Execbuffer code expects last vma entry to be NULL,
- * since we already initialized this entry,
- * set the next value to NULL or we mess up
- * cleanup handling.
- */
- eb->vma[i + 1].vma = NULL;
- }
-
+ if (err)
return err;
- }
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
eb->args->flags |= __EXEC_USERPTR_USED;
@@ -986,10 +975,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
return 0;
-
-err:
- eb->vma[i].vma = NULL;
- return err;
}
static int eb_lock_vmas(struct i915_execbuffer *eb)
@@ -3375,7 +3360,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.exec = exec;
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
- eb.vma[0].vma = NULL;
+ memset(eb.vma, 0, (args->buffer_count + 1) * sizeof(struct eb_vma));
+
eb.batch_pool = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
@@ -3584,7 +3570,18 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- /* Allocate extra slots for use by the command parser */
+ /*
+ * Allocate extra slots for use by the command parser.
+ *
+ * Note that this allocation handles two different arrays (the
+ * exec2_list array, and the eventual eb.vma array introduced in
+ * i915_gem_do_execbuffer()), that reside in virtually contiguous
+ * memory. Also note that the allocation intentionally doesn't fill the
+ * area with zeros, because the exec2_list part doesn't need to be, as
+ * it's immediately overwritten by user data a few lines below.
+ * However, the eb.vma part is explicitly zeroed later in
+ * i915_gem_do_execbuffer().
+ */
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index a66cf082af24..c07c9a915190 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -28,6 +28,16 @@ static void pvr_gem_object_free(struct drm_gem_object *obj)
drm_gem_shmem_object_free(obj);
}
+static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
+{
+ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj);
+
+ if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT)
+ return ERR_PTR(-EPERM);
+
+ return drm_gem_prime_export(obj, flags);
+}
+
static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
{
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
@@ -42,6 +52,7 @@ static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *v
static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
.free = pvr_gem_object_free,
.print_info = drm_gem_shmem_object_print_info,
+ .export = pvr_gem_export,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 0e2bcd5f67b7..d7726091819c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1002,12 +1002,6 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
- /*
- * set flag to request the DSI host bridge be pre-enabled before device bridge
- * in the chain, so the DSI host is ready when the device bridge is pre-enabled
- */
- dsi->next_bridge->pre_enable_prev_first = true;
-
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 29107b362346..ac9a95aab2fb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -1376,7 +1376,6 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = {
REG_A6XX_UCHE_MODE_CNTL,
REG_A6XX_RB_NC_MODE_CNTL,
REG_A6XX_RB_CMP_DBG_ECO_CNTL,
- REG_A7XX_GRAS_NC_MODE_CNTL,
REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE,
REG_A6XX_UCHE_GBIF_GX_CONFIG,
REG_A6XX_UCHE_CLIENT_PF,
@@ -1392,6 +1391,7 @@ static const u32 a750_ifpc_reglist_regs[] = {
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
+ REG_A6XX_RBBM_PERFCTR_CNTL,
REG_A6XX_TPL1_NC_MODE_CNTL,
REG_A6XX_SP_NC_MODE_CNTL,
REG_A6XX_CP_DBG_ECO_CNTL,
@@ -1448,6 +1448,12 @@ static const u32 a750_ifpc_reglist_regs[] = {
DECLARE_ADRENO_REGLIST_LIST(a750_ifpc_reglist);
+static const struct adreno_reglist_pipe a7xx_dyn_pwrup_reglist_regs[] = {
+ { REG_A7XX_GRAS_NC_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) },
+};
+
+DECLARE_ADRENO_REGLIST_PIPE_LIST(a7xx_dyn_pwrup_reglist);
+
static const struct adreno_info a7xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x07000200),
@@ -1491,6 +1497,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a730_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020000,
},
@@ -1513,6 +1520,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.gbif_cx = a640_gbif,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
@@ -1547,6 +1555,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.ifpc_reglist = &a750_ifpc_reglist,
.gbif_cx = a640_gbif,
.gmu_chipid = 0x7050001,
@@ -1589,6 +1598,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.ifpc_reglist = &a750_ifpc_reglist,
.gbif_cx = a640_gbif,
.gmu_chipid = 0x7090100,
@@ -1623,6 +1633,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist,
.gbif_cx = a640_gbif,
.gmu_chipid = 0x70f0000,
.gmu_cgc_mode = 0x00020222,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0200a7e71cdf..2129d230a92b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -849,9 +849,16 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
- if (adreno_is_a7xx(adreno_gpu))
- gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
- FIELD_PREP(GENMASK(8, 5), hbb_lo));
+ if (adreno_is_a7xx(adreno_gpu)) {
+ for (u32 pipe_id = PIPE_BR; pipe_id <= PIPE_BV; pipe_id++) {
+ gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST,
+ A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id));
+ gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
+ FIELD_PREP(GENMASK(8, 5), hbb_lo));
+ }
+ gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST,
+ A7XX_CP_APERTURE_CNTL_HOST_PIPE(PIPE_NONE));
+ }
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
min_acc_len_64b << 23 | hbb_lo << 21);
@@ -865,23 +872,27 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
const struct adreno_reglist_list *reglist;
+ const struct adreno_reglist_pipe_list *dyn_pwrup_reglist;
void *ptr = a6xx_gpu->pwrup_reglist_ptr;
struct cpu_gpu_lock *lock = ptr;
u32 *dest = (u32 *)&lock->regs[0];
+ u32 dyn_pwrup_reglist_count = 0;
int i;
lock->gpu_req = lock->cpu_req = lock->turn = 0;
reglist = adreno_gpu->info->a6xx->ifpc_reglist;
- lock->ifpc_list_len = reglist->count;
+ if (reglist) {
+ lock->ifpc_list_len = reglist->count;
- /*
- * For each entry in each of the lists, write the offset and the current
- * register value into the GPU buffer
- */
- for (i = 0; i < reglist->count; i++) {
- *dest++ = reglist->regs[i];
- *dest++ = gpu_read(gpu, reglist->regs[i]);
+ /*
+ * For each entry in each of the lists, write the offset and the current
+ * register value into the GPU buffer
+ */
+ for (i = 0; i < reglist->count; i++) {
+ *dest++ = reglist->regs[i];
+ *dest++ = gpu_read(gpu, reglist->regs[i]);
+ }
}
reglist = adreno_gpu->info->a6xx->pwrup_reglist;
@@ -907,7 +918,24 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
* (<aperture, shifted 12 bits> <address> <data>), and the length is
* stored as number for triplets in dynamic_list_len.
*/
- lock->dynamic_list_len = 0;
+ dyn_pwrup_reglist = adreno_gpu->info->a6xx->dyn_pwrup_reglist;
+ if (dyn_pwrup_reglist) {
+ for (u32 pipe_id = PIPE_BR; pipe_id <= PIPE_BV; pipe_id++) {
+ gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST,
+ A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id));
+ for (i = 0; i < dyn_pwrup_reglist->count; i++) {
+ if ((dyn_pwrup_reglist->regs[i].pipe & BIT(pipe_id)) == 0)
+ continue;
+ *dest++ = A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id);
+ *dest++ = dyn_pwrup_reglist->regs[i].offset;
+ *dest++ = gpu_read(gpu, dyn_pwrup_reglist->regs[i].offset);
+ dyn_pwrup_reglist_count++;
+ }
+ }
+ gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST,
+ A7XX_CP_APERTURE_CNTL_HOST_PIPE(PIPE_NONE));
+ }
+ lock->dynamic_list_len = dyn_pwrup_reglist_count;
}
static int a7xx_preempt_start(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 6820216ec5fc..4eaa04711246 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -45,6 +45,7 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
const struct adreno_reglist_list *pwrup_reglist;
+ const struct adreno_reglist_pipe_list *dyn_pwrup_reglist;
const struct adreno_reglist_list *ifpc_reglist;
const struct adreno_reglist *gbif_cx;
const struct adreno_reglist_pipe *nonctxt_reglist;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index afc5f4aa3b17..747a22afad9f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -454,11 +454,11 @@ void a6xx_preempt_init(struct msm_gpu *gpu)
gpu->vm, &a6xx_gpu->preempt_postamble_bo,
&a6xx_gpu->preempt_postamble_iova);
- preempt_prepare_postamble(a6xx_gpu);
-
if (IS_ERR(a6xx_gpu->preempt_postamble_ptr))
goto fail;
+ preempt_prepare_postamble(a6xx_gpu);
+
timer_setup(&a6xx_gpu->preempt_timer, a6xx_preempt_timer, 0);
return;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 0f8d3de97636..1d0145f8b3ec 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -188,6 +188,19 @@ static const struct adreno_reglist_list name = { \
.count = ARRAY_SIZE(name ## _regs), \
};
+struct adreno_reglist_pipe_list {
+ /** @reg: List of register **/
+ const struct adreno_reglist_pipe *regs;
+ /** @count: Number of registers in the list **/
+ u32 count;
+};
+
+#define DECLARE_ADRENO_REGLIST_PIPE_LIST(name) \
+static const struct adreno_reglist_pipe_list name = { \
+ .regs = name ## _regs, \
+ .count = ARRAY_SIZE(name ## _regs), \
+};
+
struct adreno_gpu {
struct msm_gpu base;
const struct adreno_info *info;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index c39f1908ea65..2d06c950e814 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -200,7 +200,7 @@ static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
struct dpu_crtc_state *crtc_state)
{
struct dpu_crtc_mixer *m;
- u32 crcs[CRTC_QUAD_MIXERS];
+ u32 crcs[CRTC_DUAL_MIXERS];
int rc = 0;
int i;
@@ -1328,7 +1328,6 @@ static struct msm_display_topology dpu_crtc_get_topology(
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct msm_display_topology topology = {0};
struct drm_encoder *drm_enc;
- u32 num_rt_intf;
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
@@ -1342,14 +1341,11 @@ static struct msm_display_topology dpu_crtc_get_topology(
* Dual display
* 2 LM, 2 INTF ( Split display using 2 interfaces)
*
- * If DSC is enabled, try to use 4:4:2 topology if there is enough
- * resource. Otherwise, use 2:2:2 topology.
- *
* Single display
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
- * If DSC is enabled, use 2:2:1 topology
+ * If DSC is enabled, use 2 LMs for 2:2:1 topology
*
* Add dspps to the reservation requirements if ctm is requested
*
@@ -1361,23 +1357,14 @@ static struct msm_display_topology dpu_crtc_get_topology(
* (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
*/
- num_rt_intf = topology.num_intf;
- if (topology.cwb_enabled)
- num_rt_intf--;
-
- if (topology.num_dsc) {
- if (dpu_kms->catalog->dsc_count >= num_rt_intf * 2)
- topology.num_dsc = num_rt_intf * 2;
- else
- topology.num_dsc = num_rt_intf;
- topology.num_lm = topology.num_dsc;
- } else if (num_rt_intf == 2) {
+ if (topology.num_intf == 2 && !topology.cwb_enabled)
+ topology.num_lm = 2;
+ else if (topology.num_dsc == 2)
topology.num_lm = 2;
- } else if (dpu_kms->catalog->caps->has_3d_merge) {
+ else if (dpu_kms->catalog->caps->has_3d_merge)
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
- } else {
+ else
topology.num_lm = 1;
- }
if (crtc_state->ctm)
topology.num_dspp = topology.num_lm;
@@ -1620,17 +1607,6 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
return 0;
}
-/**
- * dpu_crtc_get_num_lm - Get mixer number in this CRTC pipeline
- * @state: Pointer to drm crtc state object
- */
-unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state)
-{
- struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
-
- return cstate->num_mixers;
-}
-
#ifdef CONFIG_DEBUG_FS
static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 455073c7025b..94392b9b9245 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -210,7 +210,7 @@ struct dpu_crtc_state {
bool bw_control;
bool bw_split_vote;
- struct drm_rect lm_bounds[CRTC_QUAD_MIXERS];
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
uint64_t input_fence_timeout_ns;
@@ -218,10 +218,10 @@ struct dpu_crtc_state {
/* HW Resources reserved for the crtc */
u32 num_mixers;
- struct dpu_crtc_mixer mixers[CRTC_QUAD_MIXERS];
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
u32 num_ctls;
- struct dpu_hw_ctl *hw_ctls[CRTC_QUAD_MIXERS];
+ struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
enum dpu_crtc_crc_source crc_source;
int crc_frame_skip_count;
@@ -267,6 +267,4 @@ static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event);
-unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state);
-
#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index d1cfe81a3373..9f3957f24c6a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -55,7 +55,7 @@
#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
-#define MAX_CHANNELS_PER_ENC 4
+#define MAX_CHANNELS_PER_ENC 2
#define MAX_CWB_PER_ENC 2
#define IDLE_SHORT_TIMEOUT 1
@@ -661,6 +661,7 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv = dpu_enc->base.dev->dev_private;
struct msm_display_info *disp_info = &dpu_enc->disp_info;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_framebuffer *fb;
@@ -674,12 +675,22 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
dsc = dpu_encoder_get_dsc_config(drm_enc);
- /*
- * Set DSC number as 1 to mark the enabled status, will be adjusted
- * in dpu_crtc_get_topology()
- */
- if (dsc)
- topology->num_dsc = 1;
+ /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
+ if (dsc) {
+ /*
+ * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces
+ * when Display Stream Compression (DSC) is enabled,
+ * and when enough DSC blocks are available.
+ * This is power-optimal and can drive up to (including) 4k
+ * screens.
+ */
+ WARN(topology->num_intf > 2,
+ "DSC topology cannot support more than 2 interfaces\n");
+ if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2)
+ topology->num_dsc = 2;
+ else
+ topology->num_dsc = 1;
+ }
connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
if (!connector)
@@ -2169,8 +2180,8 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
{
int i, num_lm;
struct dpu_global_state *global_state;
- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_mixer *hw_mixer[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[2];
+ struct dpu_hw_mixer *hw_mixer[2];
struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
/* reset all mixers for this encoder */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 09395d7910ac..61b22d949454 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -302,7 +302,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
/* Use merge_3d unless DSC MERGE topology is used */
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- (dpu_cstate->num_mixers != 1) &&
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS &&
!dpu_encoder_use_dsc_merge(phys_enc->parent))
return BLEND_3D_H_ROW_INT;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 46f348972a97..6d28f2281c76 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -247,14 +247,12 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
if (hw_cdm)
intf_cfg.cdm = hw_cdm->idx;
- if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
- phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
- mode_3d);
+ if (hw_pp && hw_pp->merge_3d && hw_pp->merge_3d->ops.setup_3d_mode)
+ hw_pp->merge_3d->ops.setup_3d_mode(hw_pp->merge_3d, mode_3d);
/* setup which pp blk will connect to this wb */
- if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
- phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb,
- phys_enc->hw_pp->idx);
+ if (hw_pp && hw_wb->ops.bind_pingpong_blk)
+ hw_wb->ops.bind_pingpong_blk(hw_wb, hw_pp->idx);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
} else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 336757103b5a..4964e70610d1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -24,7 +24,7 @@
#define DPU_MAX_IMG_WIDTH 0x3fff
#define DPU_MAX_IMG_HEIGHT 0x3fff
-#define CRTC_QUAD_MIXERS 4
+#define CRTC_DUAL_MIXERS 2
#define MAX_XIN_COUNT 16
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
index 6bb3476a05f8..75e6dae0fcd9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -89,13 +89,13 @@ enum dpu_hw_cdwn_op_mode_method_h_v {
*/
struct dpu_hw_cdm_ops {
/**
- * Enable the CDM module
+ * @enable: Enable the CDM module
* @cdm Pointer to chroma down context
*/
int (*enable)(struct dpu_hw_cdm *cdm, struct dpu_hw_cdm_cfg *cfg);
/**
- * Enable/disable the connection with pingpong
+ * @bind_pingpong_blk: Enable/disable the connection with pingpong
* @cdm Pointer to chroma down context
* @pp pingpong block id.
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 15931b22ec94..e535bf013825 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -12,9 +12,9 @@
#include "dpu_hw_sspp.h"
/**
- * dpu_ctl_mode_sel: Interface mode selection
- * DPU_CTL_MODE_SEL_VID: Video mode interface
- * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ * enum dpu_ctl_mode_sel: Interface mode selection
+ * @DPU_CTL_MODE_SEL_VID: Video mode interface
+ * @DPU_CTL_MODE_SEL_CMD: Command mode interface
*/
enum dpu_ctl_mode_sel {
DPU_CTL_MODE_SEL_VID = 0,
@@ -37,6 +37,7 @@ struct dpu_hw_stage_cfg {
* struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
* @intf : Interface id
* @intf_master: Master interface id in the dual pipe topology
+ * @wb: Writeback mode
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
@@ -64,21 +65,21 @@ struct dpu_hw_intf_cfg {
*/
struct dpu_hw_ctl_ops {
/**
- * kickoff hw operation for Sw controlled interfaces
+ * @trigger_start: kickoff hw operation for Sw controlled interfaces
* DSI cmd mode and WB interface are SW controlled
* @ctx : ctl path ctx pointer
*/
void (*trigger_start)(struct dpu_hw_ctl *ctx);
/**
- * check if the ctl is started
+ * @is_started: check if the ctl is started
* @ctx : ctl path ctx pointer
* @Return: true if started, false if stopped
*/
bool (*is_started)(struct dpu_hw_ctl *ctx);
/**
- * kickoff prepare is in progress hw operation for sw
+ * @trigger_pending: kickoff prepare is in progress hw operation for sw
* controlled interfaces: DSI cmd mode and WB interface
* are SW controlled
* @ctx : ctl path ctx pointer
@@ -86,7 +87,7 @@ struct dpu_hw_ctl_ops {
void (*trigger_pending)(struct dpu_hw_ctl *ctx);
/**
- * Clear the value of the cached pending_flush_mask
+ * @clear_pending_flush: Clear the value of the cached pending_flush_mask
* No effect on hardware.
* Required to be implemented.
* @ctx : ctl path ctx pointer
@@ -94,14 +95,15 @@ struct dpu_hw_ctl_ops {
void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
/**
- * Query the value of the cached pending_flush_mask
+ * @get_pending_flush: Query the value of the cached pending_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
*/
u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
/**
- * OR in the given flushbits to the cached pending_flush_mask
+ * @update_pending_flush: OR in the given flushbits to the cached
+ * pending_flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @flushbits : module flushmask
@@ -110,7 +112,8 @@ struct dpu_hw_ctl_ops {
u32 flushbits);
/**
- * OR in the given flushbits to the cached pending_(wb_)flush_mask
+ * @update_pending_flush_wb: OR in the given flushbits to the
+ * cached pending_(wb_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : writeback block index
@@ -119,7 +122,8 @@ struct dpu_hw_ctl_ops {
enum dpu_wb blk);
/**
- * OR in the given flushbits to the cached pending_(cwb_)flush_mask
+ * @update_pending_flush_cwb: OR in the given flushbits to the
+ * cached pending_(cwb_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : concurrent writeback block index
@@ -128,7 +132,8 @@ struct dpu_hw_ctl_ops {
enum dpu_cwb blk);
/**
- * OR in the given flushbits to the cached pending_(intf_)flush_mask
+ * @update_pending_flush_intf: OR in the given flushbits to the
+ * cached pending_(intf_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : interface block index
@@ -137,7 +142,8 @@ struct dpu_hw_ctl_ops {
enum dpu_intf blk);
/**
- * OR in the given flushbits to the cached pending_(periph_)flush_mask
+ * @update_pending_flush_periph: OR in the given flushbits to the
+ * cached pending_(periph_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : interface block index
@@ -146,7 +152,8 @@ struct dpu_hw_ctl_ops {
enum dpu_intf blk);
/**
- * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask
+ * @update_pending_flush_merge_3d: OR in the given flushbits to the
+ * cached pending_(merge_3d_)flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : interface block index
@@ -155,7 +162,8 @@ struct dpu_hw_ctl_ops {
enum dpu_merge_3d blk);
/**
- * OR in the given flushbits to the cached pending_flush_mask
+ * @update_pending_flush_sspp: OR in the given flushbits to the
+ * cached pending_flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : SSPP block index
@@ -164,7 +172,8 @@ struct dpu_hw_ctl_ops {
enum dpu_sspp blk);
/**
- * OR in the given flushbits to the cached pending_flush_mask
+ * @update_pending_flush_mixer: OR in the given flushbits to the
+ * cached pending_flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : LM block index
@@ -173,7 +182,8 @@ struct dpu_hw_ctl_ops {
enum dpu_lm blk);
/**
- * OR in the given flushbits to the cached pending_flush_mask
+ * @update_pending_flush_dspp: OR in the given flushbits to the
+ * cached pending_flush_mask.
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : DSPP block index
@@ -183,7 +193,8 @@ struct dpu_hw_ctl_ops {
enum dpu_dspp blk, u32 dspp_sub_blk);
/**
- * OR in the given flushbits to the cached pending_(dsc_)flush_mask
+ * @update_pending_flush_dsc: OR in the given flushbits to the
+ * cached pending_(dsc_)flush_mask.
* No effect on hardware
* @ctx: ctl path ctx pointer
* @blk: interface block index
@@ -192,7 +203,8 @@ struct dpu_hw_ctl_ops {
enum dpu_dsc blk);
/**
- * OR in the given flushbits to the cached pending_(cdm_)flush_mask
+ * @update_pending_flush_cdm: OR in the given flushbits to the
+ * cached pending_(cdm_)flush_mask.
* No effect on hardware
* @ctx: ctl path ctx pointer
* @cdm_num: idx of cdm to be flushed
@@ -200,20 +212,20 @@ struct dpu_hw_ctl_ops {
void (*update_pending_flush_cdm)(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num);
/**
- * Write the value of the pending_flush_mask to hardware
+ * @trigger_flush: Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
void (*trigger_flush)(struct dpu_hw_ctl *ctx);
/**
- * Read the value of the flush register
+ * @get_flush_register: Read the value of the flush register
* @ctx : ctl path ctx pointer
* @Return: value of the ctl flush register.
*/
u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
/**
- * Setup ctl_path interface config
+ * @setup_intf_cfg: Setup ctl_path interface config
* @ctx
* @cfg : interface config structure pointer
*/
@@ -221,17 +233,20 @@ struct dpu_hw_ctl_ops {
struct dpu_hw_intf_cfg *cfg);
/**
- * reset ctl_path interface config
+ * @reset_intf_cfg: reset ctl_path interface config
* @ctx : ctl path ctx pointer
* @cfg : interface config structure pointer
*/
void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg);
+ /**
+ * @reset: reset function for this ctl type
+ */
int (*reset)(struct dpu_hw_ctl *c);
- /*
- * wait_reset_status - checks ctl reset status
+ /**
+ * @wait_reset_status: checks ctl reset status
* @ctx : ctl path ctx pointer
*
* This function checks the ctl reset status bit.
@@ -242,13 +257,13 @@ struct dpu_hw_ctl_ops {
int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
/**
- * Set all blend stages to disabled
+ * @clear_all_blendstages: Set all blend stages to disabled
* @ctx : ctl path ctx pointer
*/
void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
/**
- * Configure layer mixer to pipe configuration
+ * @setup_blendstage: Configure layer mixer to pipe configuration
* @ctx : ctl path ctx pointer
* @lm : layer mixer enumeration
* @cfg : blend stage configuration
@@ -256,11 +271,16 @@ struct dpu_hw_ctl_ops {
void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+ /**
+ * @set_active_fetch_pipes: Set active pipes attached to this CTL
+ * @ctx: ctl path ctx pointer
+ * @active_pipes: bitmap of enum dpu_sspp
+ */
void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
/**
- * Set active pipes attached to this CTL
+ * @set_active_pipes: Set active pipes attached to this CTL
* @ctx: ctl path ctx pointer
* @active_pipes: bitmap of enum dpu_sspp
*/
@@ -268,13 +288,12 @@ struct dpu_hw_ctl_ops {
unsigned long *active_pipes);
/**
- * Set active layer mixers attached to this CTL
+ * @set_active_lms: Set active layer mixers attached to this CTL
* @ctx: ctl path ctx pointer
* @active_lms: bitmap of enum dpu_lm
*/
void (*set_active_lms)(struct dpu_hw_ctl *ctx,
unsigned long *active_lms);
-
};
/**
@@ -289,6 +308,9 @@ struct dpu_hw_ctl_ops {
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
* @pending_cwb_flush_mask: pending CWB flush
+ * @pending_periph_flush_mask: pending PERIPH flush
+ * @pending_merge_3d_flush_mask: pending MERGE 3D flush
+ * @pending_dspp_flush_mask: pending DSPP flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
* @mdss_ver: MDSS revision information
@@ -320,7 +342,7 @@ struct dpu_hw_ctl {
};
/**
- * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * to_dpu_hw_ctl - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
index 96b6edf6b2bb..ed7bfcee7f1c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
@@ -28,7 +28,6 @@ struct dpu_hw_cwb_setup_cfg {
};
/**
- *
* struct dpu_hw_cwb_ops : Interface to the cwb hw driver functions
* @config_cwb: configure CWB mux
*/
@@ -54,7 +53,7 @@ struct dpu_hw_cwb {
};
/**
- * dpu_hw_cwb - convert base object dpu_hw_base to container
+ * to_dpu_hw_cwb - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index cc7cc6f6f7cd..39d93b9df051 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -21,13 +21,13 @@ struct dpu_hw_dsc;
*/
struct dpu_hw_dsc_ops {
/**
- * dsc_disable - disable dsc
+ * @dsc_disable: disable dsc
* @hw_dsc: Pointer to dsc context
*/
void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc);
/**
- * dsc_config - configures dsc encoder
+ * @dsc_config: configures dsc encoder
* @hw_dsc: Pointer to dsc context
* @dsc: panel dsc parameters
* @mode: dsc topology mode to be set
@@ -39,13 +39,17 @@ struct dpu_hw_dsc_ops {
u32 initial_lines);
/**
- * dsc_config_thresh - programs panel thresholds
+ * @dsc_config_thresh: programs panel thresholds
* @hw_dsc: Pointer to dsc context
* @dsc: panel dsc parameters
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
struct drm_dsc_config *dsc);
+ /**
+ * @dsc_bind_pingpong_blk: binds pixel output from a DSC block
+ * to a pingpong block
+ */
void (*dsc_bind_pingpong_blk)(struct dpu_hw_dsc *hw_dsc,
enum dpu_pingpong pp);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
index 45c26cd49fa3..722b0f482e9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -22,7 +22,7 @@ struct dpu_hw_pcc_coeff {
};
/**
- * struct dpu_hw_pcc - pcc feature structure
+ * struct dpu_hw_pcc_cfg - pcc feature structure
* @r: red coefficients.
* @g: green coefficients.
* @b: blue coefficients.
@@ -40,7 +40,7 @@ struct dpu_hw_pcc_cfg {
*/
struct dpu_hw_dspp_ops {
/**
- * setup_pcc - setup dspp pcc
+ * @setup_pcc: setup_pcc - setup dspp pcc
* @ctx: Pointer to dspp context
* @cfg: Pointer to configuration
*/
@@ -69,7 +69,7 @@ struct dpu_hw_dspp {
};
/**
- * dpu_hw_dspp - convert base object dpu_hw_base to container
+ * to_dpu_hw_dspp - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index f31067a9aaf1..5a19cd74fa94 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -57,11 +57,11 @@ struct dpu_hw_intf_cmd_mode_cfg {
/**
* struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
* Assumption is these functions will be called after clocks are enabled
- * @ setup_timing_gen : programs the timing engine
- * @ setup_prog_fetch : enables/disables the programmable fetch logic
- * @ enable_timing: enable/disable timing engine
- * @ get_status: returns if timing engine is enabled or not
- * @ get_line_count: reads current vertical line counter
+ * @setup_timing_gen : programs the timing engine
+ * @setup_prg_fetch : enables/disables the programmable fetch logic
+ * @enable_timing: enable/disable timing engine
+ * @get_status: returns if timing engine is enabled or not
+ * @get_line_count: reads current vertical line counter
* @bind_pingpong_blk: enable/disable the connection with pingpong which will
* feed pixels to this interface
* @setup_misr: enable/disable MISR
@@ -70,12 +70,9 @@ struct dpu_hw_intf_cmd_mode_cfg {
* pointer and programs the tear check configuration
* @disable_tearcheck: Disables tearcheck block
* @connect_external_te: Read, modify, write to either set or clear listening to external TE
- * Return: 1 if TE was originally connected, 0 if not, or -ERROR
- * @get_vsync_info: Provides the programmed and current line_count
- * @setup_autorefresh: Configure and enable the autorefresh config
- * @get_autorefresh: Retrieve autorefresh config from hardware
- * Return: 0 on success, -ETIMEDOUT on timeout
+ * Returns 1 if TE was originally connected, 0 if not, or -ERROR
* @vsync_sel: Select vsync signal for tear-effect configuration
+ * @disable_autorefresh: Disable autorefresh if enabled
* @program_intf_cmd_cfg: Program the DPU to interface datapath for command mode
*/
struct dpu_hw_intf_ops {
@@ -109,9 +106,6 @@ struct dpu_hw_intf_ops {
void (*vsync_sel)(struct dpu_hw_intf *intf, enum dpu_vsync_source vsync_source);
- /**
- * Disable autorefresh if enabled
- */
void (*disable_autorefresh)(struct dpu_hw_intf *intf, uint32_t encoder_id, u16 vdisplay);
void (*program_intf_cmd_cfg)(struct dpu_hw_intf *intf,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 1b9ecd082d7f..ecbb77711d83 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -25,39 +25,38 @@ struct dpu_hw_color3_cfg {
};
/**
- *
* struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
* Assumption is these functions will be called after clocks are enabled
*/
struct dpu_hw_lm_ops {
- /*
- * Sets up mixer output width and height
+ /**
+ * @setup_mixer_out: Sets up mixer output width and height
* and border color if enabled
*/
void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
struct dpu_hw_mixer_cfg *cfg);
- /*
- * Alpha blending configuration
+ /**
+ * @setup_blend_config: Alpha blending configuration
* for the specified stage
*/
void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
- /*
- * Alpha color component selection from either fg or bg
+ /**
+ * @setup_alpha_out: Alpha color component selection from either fg or bg
*/
void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
/**
- * Clear layer mixer to pipe configuration
+ * @clear_all_blendstages: Clear layer mixer to pipe configuration
* @ctx : mixer ctx pointer
* Returns: 0 on success or -error
*/
int (*clear_all_blendstages)(struct dpu_hw_mixer *ctx);
/**
- * Configure layer mixer to pipe configuration
+ * @setup_blendstage: Configure layer mixer to pipe configuration
* @ctx : mixer ctx pointer
* @lm : layer mixer enumeration
* @stage_cfg : blend stage configuration
@@ -67,19 +66,19 @@ struct dpu_hw_lm_ops {
struct dpu_hw_stage_cfg *stage_cfg);
/**
- * setup_border_color : enable/disable border color
+ * @setup_border_color : enable/disable border color
*/
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
/**
- * setup_misr: Enable/disable MISR
+ * @setup_misr: Enable/disable MISR
*/
void (*setup_misr)(struct dpu_hw_mixer *ctx);
/**
- * collect_misr: Read MISR signature
+ * @collect_misr: Read MISR signature
*/
int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 31451241f083..046b683d4c66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -34,7 +34,7 @@
#define DPU_MAX_PLANES 4
#endif
-#define STAGES_PER_PLANE 2
+#define STAGES_PER_PLANE 1
#define PIPES_PER_STAGE 2
#define PIPES_PER_PLANE (PIPES_PER_STAGE * STAGES_PER_PLANE)
#ifndef DPU_MAX_DE_CURVES
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
index 6833c0207523..b57f88187148 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -12,7 +12,6 @@
struct dpu_hw_merge_3d;
/**
- *
* struct dpu_hw_merge_3d_ops : Interface to the merge_3d Hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @setup_3d_mode : enable 3D merge
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index dd99e1c21a1e..effd012d864a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -34,7 +34,6 @@ struct dpu_hw_dither_cfg {
};
/**
- *
* struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @enable_tearcheck: program and enable tear check block
@@ -44,51 +43,52 @@ struct dpu_hw_dither_cfg {
*/
struct dpu_hw_pingpong_ops {
/**
- * enables vysnc generation and sets up init value of
+ * @enable_tearcheck: enables vysnc generation and sets up init value of
* read pointer and programs the tear check cofiguration
*/
int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
struct dpu_hw_tear_check *cfg);
/**
- * disables tear check block
+ * @disable_tearcheck: disables tear check block
*/
int (*disable_tearcheck)(struct dpu_hw_pingpong *pp);
/**
- * read, modify, write to either set or clear listening to external TE
+ * @connect_external_te: read, modify, write to either set or clear
+ * listening to external TE
* @Return: 1 if TE was originally connected, 0 if not, or -ERROR
*/
int (*connect_external_te)(struct dpu_hw_pingpong *pp,
bool enable_external_te);
/**
- * Obtain current vertical line counter
+ * @get_line_count: Obtain current vertical line counter
*/
u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
/**
- * Disable autorefresh if enabled
+ * @disable_autorefresh: Disable autorefresh if enabled
*/
void (*disable_autorefresh)(struct dpu_hw_pingpong *pp, uint32_t encoder_id, u16 vdisplay);
/**
- * Setup dither matix for pingpong block
+ * @setup_dither: Setup dither matix for pingpong block
*/
void (*setup_dither)(struct dpu_hw_pingpong *pp,
struct dpu_hw_dither_cfg *cfg);
/**
- * Enable DSC
+ * @enable_dsc: Enable DSC
*/
int (*enable_dsc)(struct dpu_hw_pingpong *pp);
/**
- * Disable DSC
+ * @disable_dsc: Disable DSC
*/
void (*disable_dsc)(struct dpu_hw_pingpong *pp);
/**
- * Setup DSC
+ * @setup_dsc: Setup DSC
*/
int (*setup_dsc)(struct dpu_hw_pingpong *pp);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index bdac5c04bf79..3822094f85bc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -14,7 +14,7 @@ struct dpu_hw_sspp;
#define DPU_SSPP_MAX_PITCH_SIZE 0xffff
-/**
+/*
* Flags
*/
#define DPU_SSPP_FLIP_LR BIT(0)
@@ -23,7 +23,7 @@ struct dpu_hw_sspp;
#define DPU_SSPP_ROT_90 BIT(3)
#define DPU_SSPP_SOLID_FILL BIT(4)
-/**
+/*
* Component indices
*/
enum {
@@ -36,9 +36,10 @@ enum {
};
/**
- * DPU_SSPP_RECT_SOLO - multirect disabled
- * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
- * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ * enum dpu_sspp_multirect_index - multirect mode
+ * @DPU_SSPP_RECT_SOLO: multirect disabled
+ * @DPU_SSPP_RECT_0: rect0 of a multirect pipe
+ * @DPU_SSPP_RECT_1: rect1 of a multirect pipe
*
* Note: HW supports multirect with either RECT0 or
* RECT1. Considering no benefit of such configs over
@@ -143,7 +144,7 @@ struct dpu_hw_pixel_ext {
* struct dpu_sw_pipe_cfg : software pipe configuration
* @src_rect: src ROI, caller takes into account the different operations
* such as decimation, flip etc to program this field
- * @dest_rect: destination ROI.
+ * @dst_rect: destination ROI.
* @rotation: simplified drm rotation hint
*/
struct dpu_sw_pipe_cfg {
@@ -165,8 +166,8 @@ struct dpu_hw_pipe_ts_cfg {
/**
* struct dpu_sw_pipe - software pipe description
* @sspp: backing SSPP pipe
- * @index: index of the rectangle of SSPP
- * @mode: parallel or time multiplex multirect mode
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
*/
struct dpu_sw_pipe {
struct dpu_hw_sspp *sspp;
@@ -181,7 +182,7 @@ struct dpu_sw_pipe {
*/
struct dpu_hw_sspp_ops {
/**
- * setup_format - setup pixel format cropping rectangle, flip
+ * @setup_format: setup pixel format cropping rectangle, flip
* @pipe: Pointer to software pipe context
* @cfg: Pointer to pipe config structure
* @flags: Extra flags for format config
@@ -190,7 +191,7 @@ struct dpu_hw_sspp_ops {
const struct msm_format *fmt, u32 flags);
/**
- * setup_rects - setup pipe ROI rectangles
+ * @setup_rects: setup pipe ROI rectangles
* @pipe: Pointer to software pipe context
* @cfg: Pointer to pipe config structure
*/
@@ -198,7 +199,7 @@ struct dpu_hw_sspp_ops {
struct dpu_sw_pipe_cfg *cfg);
/**
- * setup_pe - setup pipe pixel extension
+ * @setup_pe: setup pipe pixel extension
* @ctx: Pointer to pipe context
* @pe_ext: Pointer to pixel ext settings
*/
@@ -206,7 +207,7 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_pixel_ext *pe_ext);
/**
- * setup_sourceaddress - setup pipe source addresses
+ * @setup_sourceaddress: setup pipe source addresses
* @pipe: Pointer to software pipe context
* @layout: format layout information for programming buffer to hardware
*/
@@ -214,14 +215,14 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_fmt_layout *layout);
/**
- * setup_csc - setup color space coversion
+ * @setup_csc: setup color space coversion
* @ctx: Pointer to pipe context
* @data: Pointer to config structure
*/
void (*setup_csc)(struct dpu_hw_sspp *ctx, const struct dpu_csc_cfg *data);
/**
- * setup_solidfill - enable/disable colorfill
+ * @setup_solidfill: enable/disable colorfill
* @pipe: Pointer to software pipe context
* @const_color: Fill color value
* @flags: Pipe flags
@@ -229,23 +230,22 @@ struct dpu_hw_sspp_ops {
void (*setup_solidfill)(struct dpu_sw_pipe *pipe, u32 color);
/**
- * setup_multirect - setup multirect configuration
+ * @setup_multirect: setup multirect configuration
* @pipe: Pointer to software pipe context
*/
void (*setup_multirect)(struct dpu_sw_pipe *pipe);
/**
- * setup_sharpening - setup sharpening
+ * @setup_sharpening: setup sharpening
* @ctx: Pointer to pipe context
* @cfg: Pointer to config structure
*/
void (*setup_sharpening)(struct dpu_hw_sspp *ctx,
struct dpu_hw_sharp_cfg *cfg);
-
/**
- * setup_qos_lut - setup QoS LUTs
+ * @setup_qos_lut: setup QoS LUTs
* @ctx: Pointer to pipe context
* @cfg: LUT configuration
*/
@@ -253,7 +253,7 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_qos_cfg *cfg);
/**
- * setup_qos_ctrl - setup QoS control
+ * @setup_qos_ctrl: setup QoS control
* @ctx: Pointer to pipe context
* @danger_safe_en: flags controlling enabling of danger/safe QoS/LUT
*/
@@ -261,7 +261,7 @@ struct dpu_hw_sspp_ops {
bool danger_safe_en);
/**
- * setup_clk_force_ctrl - setup clock force control
+ * @setup_clk_force_ctrl: setup clock force control
* @ctx: Pointer to pipe context
* @enable: enable clock force if true
*/
@@ -269,7 +269,7 @@ struct dpu_hw_sspp_ops {
bool enable);
/**
- * setup_histogram - setup histograms
+ * @setup_histogram: setup histograms
* @ctx: Pointer to pipe context
* @cfg: Pointer to histogram configuration
*/
@@ -277,7 +277,7 @@ struct dpu_hw_sspp_ops {
void *cfg);
/**
- * setup_scaler - setup scaler
+ * @setup_scaler: setup scaler
* @scaler3_cfg: Pointer to scaler configuration
* @format: pixel format parameters
*/
@@ -286,7 +286,7 @@ struct dpu_hw_sspp_ops {
const struct msm_format *format);
/**
- * setup_cdp - setup client driven prefetch
+ * @setup_cdp: setup client driven prefetch
* @pipe: Pointer to software pipe context
* @fmt: format used by the sw pipe
* @enable: whether the CDP should be enabled for this pipe
@@ -303,6 +303,7 @@ struct dpu_hw_sspp_ops {
* @ubwc: UBWC configuration data
* @idx: pipe index
* @cap: pointer to layer_cfg
+ * @mdss_ver: MDSS version info to use for feature checks
* @ops: pointer to operations possible for this pipe
*/
struct dpu_hw_sspp {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 04efdcd21ceb..80279d87c2cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -77,12 +77,11 @@ enum dpu_dp_phy_sel {
/**
* struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
* Assumption is these functions will be called after clocks are enabled.
- * @setup_split_pipe : Programs the pipe control registers
- * @setup_pp_split : Programs the pp split control registers
- * @setup_traffic_shaper : programs traffic shaper control
*/
struct dpu_hw_mdp_ops {
- /** setup_split_pipe() : Registers are not double buffered, thisk
+ /**
+ * @setup_split_pipe : Programs the pipe control registers.
+ * Registers are not double buffered, this
* function should be called before timing control enable
* @mdp : mdp top context driver
* @cfg : upper and lower part of pipe configuration
@@ -91,7 +90,7 @@ struct dpu_hw_mdp_ops {
struct split_pipe_cfg *p);
/**
- * setup_traffic_shaper() : Setup traffic shaper control
+ * @setup_traffic_shaper : programs traffic shaper control.
* @mdp : mdp top context driver
* @cfg : traffic shaper configuration
*/
@@ -99,7 +98,7 @@ struct dpu_hw_mdp_ops {
struct traffic_shaper_cfg *cfg);
/**
- * setup_clk_force_ctrl - set clock force control
+ * @setup_clk_force_ctrl: set clock force control
* @mdp: mdp top context driver
* @clk_ctrl: clock to be controlled
* @enable: force on enable
@@ -109,7 +108,7 @@ struct dpu_hw_mdp_ops {
enum dpu_clk_ctrl_type clk_ctrl, bool enable);
/**
- * get_danger_status - get danger status
+ * @get_danger_status: get danger status
* @mdp: mdp top context driver
* @status: Pointer to danger safe status
*/
@@ -117,7 +116,7 @@ struct dpu_hw_mdp_ops {
struct dpu_danger_safe_status *status);
/**
- * setup_vsync_source - setup vsync source configuration details
+ * @setup_vsync_source: setup vsync source configuration details
* @mdp: mdp top context driver
* @cfg: vsync source selection configuration
*/
@@ -125,7 +124,7 @@ struct dpu_hw_mdp_ops {
struct dpu_vsync_source_cfg *cfg);
/**
- * get_safe_status - get safe status
+ * @get_safe_status: get safe status
* @mdp: mdp top context driver
* @status: Pointer to danger safe status
*/
@@ -133,14 +132,14 @@ struct dpu_hw_mdp_ops {
struct dpu_danger_safe_status *status);
/**
- * dp_phy_intf_sel - configure intf to phy mapping
+ * @dp_phy_intf_sel: configure intf to phy mapping
* @mdp: mdp top context driver
* @phys: list of phys the DP interfaces should be connected to. 0 disables the INTF.
*/
void (*dp_phy_intf_sel)(struct dpu_hw_mdp *mdp, enum dpu_dp_phy_sel phys[2]);
/**
- * intf_audio_select - select the external interface for audio
+ * @intf_audio_select: select the external interface for audio
* @mdp: mdp top context driver
*/
void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
index 285121ec804c..9ac49448e432 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -17,7 +17,7 @@ struct dpu_hw_vbif;
*/
struct dpu_hw_vbif_ops {
/**
- * set_limit_conf - set transaction limit config
+ * @set_limit_conf: set transaction limit config
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @rd: true for read limit; false for write limit
@@ -27,7 +27,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, bool rd, u32 limit);
/**
- * get_limit_conf - get transaction limit config
+ * @get_limit_conf: get transaction limit config
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @rd: true for read limit; false for write limit
@@ -37,7 +37,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, bool rd);
/**
- * set_halt_ctrl - set halt control
+ * @set_halt_ctrl: set halt control
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @enable: halt control enable
@@ -46,7 +46,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, bool enable);
/**
- * get_halt_ctrl - get halt control
+ * @get_halt_ctrl: get halt control
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @return: halt control enable
@@ -55,7 +55,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id);
/**
- * set_qos_remap - set QoS priority remap
+ * @set_qos_remap: set QoS priority remap
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @level: priority level
@@ -65,7 +65,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, u32 level, u32 remap_level);
/**
- * set_mem_type - set memory type
+ * @set_mem_type: set memory type
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @value: memory type value
@@ -74,7 +74,7 @@ struct dpu_hw_vbif_ops {
u32 xin_id, u32 value);
/**
- * clear_errors - clear any vbif errors
+ * @clear_errors: clear any vbif errors
* This function clears any detected pending/source errors
* on the VBIF interface, and optionally returns the detected
* error mask(s).
@@ -86,7 +86,7 @@ struct dpu_hw_vbif_ops {
u32 *pnd_errors, u32 *src_errors);
/**
- * set_write_gather_en - set write_gather enable
+ * @set_write_gather_en: set write_gather enable
* @vbif: vbif context driver
* @xin_id: client interface identifier
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
index ee5e5ab786e1..cfdbb5bb2a0f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -22,11 +22,11 @@ struct dpu_hw_wb_cfg {
};
/**
- *
* struct dpu_hw_wb_ops : Interface to the wb hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @setup_outaddress: setup output address from the writeback job
* @setup_outformat: setup output format of writeback block from writeback job
+ * @setup_roi: setup ROI (Region of Interest) parameters
* @setup_qos_lut: setup qos LUT for writeback block based on input
* @setup_cdp: setup chroma down prefetch block for writeback block
* @setup_clk_force_ctrl: setup clock force control
@@ -61,7 +61,7 @@ struct dpu_hw_wb_ops {
* struct dpu_hw_wb : WB driver object
* @hw: block hardware details
* @idx: hardware index number within type
- * @wb_hw_caps: hardware capabilities
+ * @caps: hardware capabilities
* @ops: function pointers
*/
struct dpu_hw_wb {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index d07a6ab6e7ee..9b7a8b46bfa9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -826,12 +826,8 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe_cfg *pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg;
- struct dpu_sw_pipe_cfg init_pipe_cfg;
struct drm_rect fb_rect = { 0 };
- const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
uint32_t max_linewidth;
- u32 num_lm;
- int stage_id, num_stages;
min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO);
max_scale = MAX_DOWNSCALE_RATIO << 16;
@@ -854,10 +850,13 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return -EINVAL;
}
- num_lm = dpu_crtc_get_num_lm(crtc_state);
-
+ /* move the assignment here, to ease handling to another pairs later */
+ pipe_cfg = &pstate->pipe_cfg[0];
+ r_pipe_cfg = &pstate->pipe_cfg[1];
/* state->src is 16.16, src_rect is not */
- drm_rect_fp_to_int(&init_pipe_cfg.src_rect, &new_plane_state->src);
+ drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src);
+
+ pipe_cfg->dst_rect = new_plane_state->dst;
fb_rect.x2 = new_plane_state->fb->width;
fb_rect.y2 = new_plane_state->fb->height;
@@ -882,94 +881,35 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
max_linewidth = pdpu->catalog->caps->max_linewidth;
- drm_rect_rotate(&init_pipe_cfg.src_rect,
+ drm_rect_rotate(&pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- /*
- * We have 1 mixer pair cfg for 1:1:1 and 2:2:1 topology, 2 mixer pair
- * configs for left and right half screen in case of 4:4:2 topology.
- * But we may have 2 rect to split wide plane that exceeds limit with 1
- * config for 2:2:1. So need to handle both wide plane splitting, and
- * two halves of screen splitting for quad-pipe case. Check dest
- * rectangle left/right clipping first, then check wide rectangle
- * splitting in every half next.
- */
- num_stages = (num_lm + 1) / 2;
- /* iterate mixer configs for this plane, to separate left/right with the id */
- for (stage_id = 0; stage_id < num_stages; stage_id++) {
- struct drm_rect mixer_rect = {
- .x1 = stage_id * mode->hdisplay / num_stages,
- .y1 = 0,
- .x2 = (stage_id + 1) * mode->hdisplay / num_stages,
- .y2 = mode->vdisplay
- };
- int cfg_idx = stage_id * PIPES_PER_STAGE;
-
- pipe_cfg = &pstate->pipe_cfg[cfg_idx];
- r_pipe_cfg = &pstate->pipe_cfg[cfg_idx + 1];
-
- drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src);
- pipe_cfg->dst_rect = new_plane_state->dst;
-
- DPU_DEBUG_PLANE(pdpu, "checking src " DRM_RECT_FMT
- " vs clip window " DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect),
- DRM_RECT_ARG(&mixer_rect));
-
- /*
- * If this plane does not fall into mixer rect, check next
- * mixer rect.
- */
- if (!drm_rect_clip_scaled(&pipe_cfg->src_rect,
- &pipe_cfg->dst_rect,
- &mixer_rect)) {
- memset(pipe_cfg, 0, 2 * sizeof(struct dpu_sw_pipe_cfg));
-
- continue;
+ if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
+ _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
+ if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
+ return -E2BIG;
}
- pipe_cfg->dst_rect.x1 -= mixer_rect.x1;
- pipe_cfg->dst_rect.x2 -= mixer_rect.x1;
-
- DPU_DEBUG_PLANE(pdpu, "Got clip src:" DRM_RECT_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), DRM_RECT_ARG(&pipe_cfg->dst_rect));
-
- /* Split wide rect into 2 rect */
- if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
- _dpu_plane_calc_clk(mode, pipe_cfg) > max_mdp_clk_rate) {
-
- if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
-
- memcpy(r_pipe_cfg, pipe_cfg, sizeof(struct dpu_sw_pipe_cfg));
- pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
- pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
- r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
- r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
- DPU_DEBUG_PLANE(pdpu, "Split wide plane into:"
- DRM_RECT_FMT " and " DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect),
- DRM_RECT_ARG(&r_pipe_cfg->src_rect));
- } else {
- memset(r_pipe_cfg, 0, sizeof(struct dpu_sw_pipe_cfg));
- }
+ *r_pipe_cfg = *pipe_cfg;
+ pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
+ pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
+ r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
+ r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
+ } else {
+ memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg));
+ }
- drm_rect_rotate_inv(&pipe_cfg->src_rect,
- new_plane_state->fb->width,
- new_plane_state->fb->height,
+ drm_rect_rotate_inv(&pipe_cfg->src_rect,
+ new_plane_state->fb->width, new_plane_state->fb->height,
+ new_plane_state->rotation);
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
+ drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
+ new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
- drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
- new_plane_state->fb->width,
- new_plane_state->fb->height,
- new_plane_state->rotation);
- }
-
pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
return 0;
@@ -1045,17 +985,20 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
drm_atomic_get_new_plane_state(state, plane);
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
- struct dpu_sw_pipe *pipe;
- struct dpu_sw_pipe_cfg *pipe_cfg;
- int ret = 0, i;
+ struct dpu_sw_pipe *pipe = &pstate->pipe[0];
+ struct dpu_sw_pipe *r_pipe = &pstate->pipe[1];
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg[0];
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->pipe_cfg[1];
+ int ret = 0;
- for (i = 0; i < PIPES_PER_PLANE; i++) {
- pipe = &pstate->pipe[i];
- pipe_cfg = &pstate->pipe_cfg[i];
- if (!drm_rect_width(&pipe_cfg->src_rect))
- continue;
- DPU_DEBUG_PLANE(pdpu, "pipe %d is in use, validate it\n", i);
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
+ &crtc_state->adjusted_mode,
+ new_plane_state);
+ if (ret)
+ return ret;
+
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg,
&crtc_state->adjusted_mode,
new_plane_state);
if (ret)
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.h b/drivers/gpu/drm/msm/disp/mdp_format.h
index a00d646ff4d4..915954bf5dc7 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.h
+++ b/drivers/gpu/drm/msm/disp/mdp_format.h
@@ -24,7 +24,7 @@ enum msm_format_flags {
#define MSM_FORMAT_FLAG_UNPACK_TIGHT BIT(MSM_FORMAT_FLAG_UNPACK_TIGHT_BIT)
#define MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB BIT(MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB_BIT)
-/**
+/*
* DPU HW,Component order color map
*/
enum {
@@ -37,6 +37,10 @@ enum {
/**
* struct msm_format: defines the format configuration
* @pixel_format: format fourcc
+ * @bpc_g_y: element bit widths: BPC for G or Y
+ * @bpc_b_cb: element bit widths: BPC for B or Cb
+ * @bpc_r_cr: element bit widths: BPC for R or Cr
+ * @bpc_a: element bit widths: BPC for the alpha channel
* @element: element color ordering
* @fetch_type: how the color components are packed in pixel format
* @chroma_sample: chroma sub-samplng type
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 6dc0ff4f0f65..a90083fec856 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -12,7 +12,7 @@
#if defined(CONFIG_DEBUG_FS)
/**
- * msm_dp_debug_get() - configure and get the DisplayPlot debug module data
+ * msm_dp_debug_init() - configure and get the DisplayPlot debug module data
*
* @dev: device instance of the caller
* @panel: instance of panel module
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 9a461ab2f32f..fd6443d2b6ce 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -18,6 +18,7 @@
/**
* msm_dp_bridge_detect - callback to determine if connector is connected
* @bridge: Pointer to drm bridge structure
+ * @connector: Pointer to drm connector structure
* Returns: Bridge's 'is connected' status
*/
static enum drm_connector_status
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index b1eb2de6d2a7..8460e4ad2d35 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -80,11 +80,11 @@ struct msm_dp_link {
};
/**
- * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * msm_dp_link_bit_depth_to_bpp() - convert test bit depth to bpp
* @tbd: test bit depth
*
- * Returns the bits per pixel (bpp) to be used corresponding to the
- * git bit depth value. This function assumes that bit depth has
+ * Returns: the bits per pixel (bpp) to be used corresponding to the
+ * bit depth value. This function assumes that bit depth has
* already been validated.
*/
static inline u32 msm_dp_link_bit_depth_to_bpp(u32 tbd)
@@ -120,7 +120,8 @@ bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum
/**
* msm_dp_link_get() - get the functionalities of dp test module
- *
+ * @dev: kernel device structure
+ * @aux: DisplayPort AUX channel
*
* return: a pointer to msm_dp_link struct
*/
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 921a296852d4..177c1328fd99 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -63,9 +63,9 @@ void msm_dp_panel_disable_vsc_sdp(struct msm_dp_panel *msm_dp_panel);
/**
* is_link_rate_valid() - validates the link rate
- * @lane_rate: link rate requested by the sink
+ * @bw_code: link rate requested by the sink
*
- * Returns true if the requested link rate is supported.
+ * Returns: true if the requested link rate is supported.
*/
static inline bool is_link_rate_valid(u32 bw_code)
{
@@ -76,10 +76,10 @@ static inline bool is_link_rate_valid(u32 bw_code)
}
/**
- * msm_dp_link_is_lane_count_valid() - validates the lane count
+ * is_lane_count_valid() - validates the lane count
* @lane_count: lane count requested by the sink
*
- * Returns true if the requested lane count is supported.
+ * Returns: true if the requested lane count is supported.
*/
static inline bool is_lane_count_valid(u32 lane_count)
{
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 148196375a0b..3317a485beef 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -16,34 +16,29 @@
* incrementing fence seqno at the end of each submit
*/
struct msm_fence_context {
+ /** @dev: the drm device */
struct drm_device *dev;
- /** name: human readable name for fence timeline */
+ /** @name: human readable name for fence timeline */
char name[32];
- /** context: see dma_fence_context_alloc() */
+ /** @context: see dma_fence_context_alloc() */
unsigned context;
- /** index: similar to context, but local to msm_fence_context's */
+ /** @index: similar to context, but local to msm_fence_context's */
unsigned index;
-
/**
- * last_fence:
- *
+ * @last_fence:
* Last assigned fence, incremented each time a fence is created
* on this fence context. If last_fence == completed_fence,
* there is no remaining pending work
*/
uint32_t last_fence;
-
/**
- * completed_fence:
- *
+ * @completed_fence:
* The last completed fence, updated from the CPU after interrupt
* from GPU
*/
uint32_t completed_fence;
-
/**
- * fenceptr:
- *
+ * @fenceptr:
* The address that the GPU directly writes with completed fence
* seqno. This can be ahead of completed_fence. We can peek at
* this to see if a fence has already signaled but the CPU hasn't
@@ -51,6 +46,9 @@ struct msm_fence_context {
*/
volatile uint32_t *fenceptr;
+ /**
+ * @spinlock: fence context spinlock
+ */
spinlock_t spinlock;
/*
@@ -59,18 +57,22 @@ struct msm_fence_context {
* don't queue, so maybe that is ok
*/
- /** next_deadline: Time of next deadline */
+ /** @next_deadline: Time of next deadline */
ktime_t next_deadline;
-
/**
- * next_deadline_fence:
- *
+ * @next_deadline_fence:
* Fence value for next pending deadline. The deadline timer is
* canceled when this fence is signaled.
*/
uint32_t next_deadline_fence;
-
+ /**
+ * @deadline_timer: tracks nearest deadline of a fence timeline and
+ * expires just before it.
+ */
struct hrtimer deadline_timer;
+ /**
+ * @deadline_work: work to do after deadline_timer expires
+ */
struct kthread_work deadline_work;
};
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 71d5238437eb..8f7c90167447 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -65,7 +65,7 @@ struct msm_vm_unmap_op {
};
/**
- * struct msm_vma_op - A MAP or UNMAP operation
+ * struct msm_vm_op - A MAP or UNMAP operation
*/
struct msm_vm_op {
/** @op: The operation type */
@@ -798,6 +798,9 @@ static const struct drm_sched_backend_ops msm_vm_bind_ops = {
* synchronous operations are supported. In a user managed VM, userspace
* handles virtual address allocation, and both async and sync operations
* are supported.
+ *
+ * Returns: pointer to the created &struct drm_gpuvm on success
+ * or an ERR_PTR(-errno) on failure.
*/
struct drm_gpuvm *
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2894fc118485..666cf499b7ec 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -116,15 +116,12 @@ struct msm_gpu_fault_info {
* struct msm_gpu_devfreq - devfreq related state
*/
struct msm_gpu_devfreq {
- /** devfreq: devfreq instance */
+ /** @devfreq: devfreq instance */
struct devfreq *devfreq;
-
- /** lock: lock for "suspended", "busy_cycles", and "time" */
+ /** @lock: lock for "suspended", "busy_cycles", and "time" */
struct mutex lock;
-
/**
- * idle_freq:
- *
+ * @idle_freq:
* Shadow frequency used while the GPU is idle. From the PoV of
* the devfreq governor, we are continuing to sample busyness and
* adjust frequency while the GPU is idle, but we use this shadow
@@ -132,43 +129,34 @@ struct msm_gpu_devfreq {
* it is inactive.
*/
unsigned long idle_freq;
-
/**
- * boost_constraint:
- *
+ * @boost_freq:
* A PM QoS constraint to boost min freq for a period of time
* until the boost expires.
*/
struct dev_pm_qos_request boost_freq;
-
/**
- * busy_cycles: Last busy counter value, for calculating elapsed busy
+ * @busy_cycles: Last busy counter value, for calculating elapsed busy
* cycles since last sampling period.
*/
u64 busy_cycles;
-
- /** time: Time of last sampling period. */
+ /** @time: Time of last sampling period. */
ktime_t time;
-
- /** idle_time: Time of last transition to idle: */
+ /** @idle_time: Time of last transition to idle. */
ktime_t idle_time;
-
/**
- * idle_work:
- *
+ * @idle_work:
* Used to delay clamping to idle freq on active->idle transition.
*/
struct msm_hrtimer_work idle_work;
-
/**
- * boost_work:
- *
+ * @boost_work:
* Used to reset the boost_constraint after the boost period has
* elapsed
*/
struct msm_hrtimer_work boost_work;
- /** suspended: tracks if we're suspended */
+ /** @suspended: tracks if we're suspended */
bool suspended;
};
@@ -358,57 +346,43 @@ struct msm_gpu_perfcntr {
struct msm_context {
/** @queuelock: synchronizes access to submitqueues list */
rwlock_t queuelock;
-
/** @submitqueues: list of &msm_gpu_submitqueue created by userspace */
struct list_head submitqueues;
-
/**
* @queueid:
- *
* Counter incremented each time a submitqueue is created, used to
* assign &msm_gpu_submitqueue.id
*/
int queueid;
-
/**
* @closed: The device file associated with this context has been closed.
- *
* Once the device is closed, any submits that have not been written
* to the ring buffer are no-op'd.
*/
bool closed;
-
/**
* @userspace_managed_vm:
- *
* Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
* MSM_PARAM_EN_VM_BIND?
*/
bool userspace_managed_vm;
-
/**
* @vm:
- *
* The per-process GPU address-space. Do not access directly, use
* msm_context_vm().
*/
struct drm_gpuvm *vm;
-
- /** @kref: the reference count */
+ /** @ref: the reference count */
struct kref ref;
-
/**
* @seqno:
- *
* A unique per-process sequence number. Used to detect context
* switches, without relying on keeping a, potentially dangling,
* pointer to the previous context.
*/
int seqno;
-
/**
* @sysprof:
- *
* The value of MSM_PARAM_SYSPROF set by userspace. This is
* intended to be used by system profiling tools like Mesa's
* pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
@@ -423,40 +397,32 @@ struct msm_context {
* file is closed.
*/
int sysprof;
-
/**
* @comm: Overridden task comm, see MSM_PARAM_COMM
*
* Accessed under msm_gpu::lock
*/
char *comm;
-
/**
* @cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
*
* Accessed under msm_gpu::lock
*/
char *cmdline;
-
/**
- * @elapsed:
- *
+ * @elapsed_ns:
* The total (cumulative) elapsed time GPU was busy with rendering
* from this context in ns.
*/
uint64_t elapsed_ns;
-
/**
* @cycles:
- *
* The total (cumulative) GPU cycles elapsed attributed to this
* context.
*/
uint64_t cycles;
-
/**
* @entities:
- *
* Table of per-priority-level sched entities used by submitqueues
* associated with this &drm_file. Because some userspace apps
* make assumptions about rendering from multiple gl contexts
@@ -466,10 +432,8 @@ struct msm_context {
* level.
*/
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
-
/**
* @ctx_mem:
- *
* Total amount of memory of GEM buffers with handles attached for
* this context.
*/
@@ -479,7 +443,7 @@ struct msm_context {
struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
/**
- * msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
+ * msm_context_is_vmbind() - has userspace opted in to VM_BIND?
*
* @ctx: the drm_file context
*
@@ -487,6 +451,8 @@ struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx
* do sparse binding including having multiple, potentially partial,
* mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA,
* SET_IOVA) are rejected because they don't have a sensible meaning.
+ *
+ * Returns: %true if userspace is managing the VM, %false otherwise.
*/
static inline bool
msm_context_is_vmbind(struct msm_context *ctx)
@@ -518,6 +484,8 @@ msm_context_is_vmbind(struct msm_context *ctx)
* This allows generations without preemption (nr_rings==1) to have some
* amount of prioritization, and provides more priority levels for gens
* that do have preemption.
+ *
+ * Returns: %0 on success, %-errno on error.
*/
static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
unsigned *ring_nr, enum drm_sched_priority *sched_prio)
@@ -541,7 +509,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
}
/**
- * struct msm_gpu_submitqueues - Userspace created context.
+ * struct msm_gpu_submitqueue - Userspace created context.
*
* A submitqueue is associated with a gl context or vk queue (or equiv)
* in userspace.
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a188617653e8..d5dede4ff761 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -364,7 +364,7 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
}
/**
- * alloc_pt() - Custom page table allocator
+ * msm_iommu_pagetable_alloc_pt() - Custom page table allocator
* @cookie: Cookie passed at page table allocation time.
* @size: Size of the page table. This size should be fixed,
* and determined at creation time based on the granule size.
@@ -416,7 +416,7 @@ msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
/**
- * free_pt() - Custom page table free function
+ * msm_iommu_pagetable_free_pt() - Custom page table free function
* @cookie: Cookie passed at page table allocation time.
* @data: Page table to free.
* @size: Size of the page table. This size should be fixed,
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index d3c7889aaf26..c369d4acc378 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -65,13 +65,13 @@ static int refill_buf(struct msm_perf_state *perf)
if ((perf->cnt++ % 32) == 0) {
/* Header line: */
- n = snprintf(ptr, rem, "%%BUSY");
+ n = scnprintf(ptr, rem, "%%BUSY");
ptr += n;
rem -= n;
for (i = 0; i < gpu->num_perfcntrs; i++) {
const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
- n = snprintf(ptr, rem, "\t%s", perfcntr->name);
+ n = scnprintf(ptr, rem, "\t%s", perfcntr->name);
ptr += n;
rem -= n;
}
@@ -93,21 +93,21 @@ static int refill_buf(struct msm_perf_state *perf)
return ret;
val = totaltime ? 1000 * activetime / totaltime : 0;
- n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
+ n = scnprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
ptr += n;
rem -= n;
for (i = 0; i < ret; i++) {
/* cycle counters (I think).. convert to MHz.. */
val = cntrs[i] / 10000;
- n = snprintf(ptr, rem, "\t%5d.%02d",
+ n = scnprintf(ptr, rem, "\t%5d.%02d",
val / 100, val % 100);
ptr += n;
rem -= n;
}
}
- n = snprintf(ptr, rem, "\n");
+ n = scnprintf(ptr, rem, "\n");
ptr += n;
rem -= n;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 93f8f4f64578..b43c4f9bbcdf 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -152,8 +152,21 @@ static inline struct nv50_head_atom *
nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+
if (IS_ERR(statec))
return (void *)statec;
+
+ return nv50_head_atom(statec);
+}
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get_new(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *statec = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!statec)
+ return NULL;
+
return nv50_head_atom(statec);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ef9e410babbf..9a2c20fce0f3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -583,7 +583,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) {
- asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ asyh = nv50_head_atom_get_new(asyw->state.state, asyw->state.crtc);
if (IS_ERR(asyh))
return PTR_ERR(asyh);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index 35d1fcef520b..c456a9626823 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -30,6 +30,9 @@ ad102_gsp = {
.booter.ctor = ga102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 503760246660..851140e80122 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -337,18 +337,12 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
}
int
-nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp)
{
return nvkm_gsp_fwsec_init(gsp, &gsp->fws.falcon.sb, "fwsec-sb",
NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
}
-void
-nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
-{
- nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
-}
-
int
nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index d201e8697226..27a13aeccd3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -47,6 +47,9 @@ ga100_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 917f7e2f6c46..b6b3eb6f4c00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -158,6 +158,9 @@ ga102_gsp_r535 = {
.booter.ctor = ga102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 86bdd203bc10..9dd66a2e3801 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -7,9 +7,8 @@ enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
-int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
-void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
+int nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp);
struct nvkm_gsp_fwif {
int version;
@@ -52,6 +51,11 @@ struct nvkm_gsp_func {
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
+ struct {
+ int (*ctor)(struct nvkm_gsp *);
+ void (*dtor)(struct nvkm_gsp *);
+ } fwsec_sb;
+
void (*dtor)(struct nvkm_gsp *);
int (*oneinit)(struct nvkm_gsp *);
int (*init)(struct nvkm_gsp *);
@@ -67,6 +71,8 @@ extern const struct nvkm_falcon_func tu102_gsp_flcn;
extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
+void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
int tu102_gsp_init(struct nvkm_gsp *);
int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
@@ -91,5 +97,18 @@ int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
+static inline int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+{
+ if (gsp->func->fwsec_sb.ctor)
+ return gsp->func->fwsec_sb.ctor(gsp);
+ return 0;
+}
+
+static inline void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
+{
+ if (gsp->func->fwsec_sb.dtor)
+ gsp->func->fwsec_sb.dtor(gsp);
+}
+
extern const struct nvkm_gsp_func gv100_gsp;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 81e56da0474a..04b642a1f730 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -30,6 +30,18 @@
#include <nvfw/fw.h>
#include <nvfw/hs.h>
+int
+tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+{
+ return nvkm_gsp_fwsec_sb_init(gsp);
+}
+
+void
+tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
+{
+ nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
+}
+
static int
tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
{
@@ -370,6 +382,9 @@ tu102_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 97eb046c25d0..58cf25842421 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -30,6 +30,9 @@ tu116_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 76f6af819037..7a83804fedca 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -1165,6 +1165,7 @@ config DRM_PANEL_VISIONOX_RM69299
tristate "Visionox RM69299"
depends on OF
depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for Visionox
RM69299 DSI Video Mode panel.
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 7c989b70ab51..a14c86c60d19 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -212,6 +212,8 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
+ ctx->panel.prepare_prev_first = true;
+
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 56ff6a3fb483..d7dc83cf7b00 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -295,7 +295,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
variant->name, priv);
if (ret != 0) {
dev_err(dev, "%s failed irq %d\n", __func__, ret);
- return ret;
+ goto dev_put;
}
ret = pl111_modeset_init(drm);
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 969a8fb0ee9e..f4e71046dc91 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[] __counted_by(ucNumEntries);
+ UCHAR clockInfo[] /*__counted_by(ucNumEntries)*/;
}ClockInfoArray;
typedef struct _NonClockInfoArray{
diff --git a/drivers/gpu/drm/tests/drm_atomic_state_test.c b/drivers/gpu/drm/tests/drm_atomic_state_test.c
index 2f6ac7a09f44..bc27f65b2823 100644
--- a/drivers/gpu/drm/tests/drm_atomic_state_test.c
+++ b/drivers/gpu/drm/tests/drm_atomic_state_test.c
@@ -156,24 +156,29 @@ static int set_up_atomic_state(struct kunit *test,
if (connector) {
conn_state = drm_atomic_get_connector_state(state, connector);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
}
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
ret = drm_atomic_set_mode_for_crtc(crtc_state, &drm_atomic_test_mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state->enable = true;
crtc_state->active = true;
if (connector) {
ret = drm_atomic_commit(state);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
} else {
// dummy connector mask
crtc_state->connector_mask = DRM_TEST_CONN_0;
@@ -206,7 +211,13 @@ static void drm_test_check_connector_changed_modeset(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
// first modeset to enable
+retry_set_up:
ret = set_up_atomic_state(test, priv, old_conn, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_set_up;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -277,13 +288,26 @@ static void drm_test_check_valid_clones(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_set_up:
ret = set_up_atomic_state(test, priv, NULL, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_set_up;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+retry:
crtc_state = drm_atomic_get_crtc_state(state, priv->crtc);
+ if (PTR_ERR(crtc_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
crtc_state->encoder_mask = param->encoder_mask;
@@ -292,6 +316,12 @@ static void drm_test_check_valid_clones(struct kunit *test)
crtc_state->mode_changed = true;
ret = drm_atomic_helper_check_modeset(drm, state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, param->expected_result);
drm_modeset_drop_locks(&ctx);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 8bd412735000..70f9aa702143 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -257,10 +257,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -326,10 +332,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -397,10 +409,16 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -457,10 +475,17 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
mode,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -518,10 +543,16 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -580,10 +611,17 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
mode,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -643,10 +681,16 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -705,10 +749,17 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
mode,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -870,10 +921,16 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -946,10 +1003,16 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -1022,10 +1085,16 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1069,10 +1138,16 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1118,10 +1193,16 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1167,10 +1248,16 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1218,10 +1305,16 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
@@ -1292,10 +1385,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback_rgb(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1440,10 +1539,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422(struct kunit
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1669,10 +1774,17 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
mode,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1736,10 +1848,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1805,10 +1923,16 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1865,10 +1989,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1927,10 +2057,16 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1970,10 +2106,17 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
+
+retry_conn_enable:
ret = drm_kunit_helper_enable_crtc_connector(test, drm,
crtc, conn,
preferred,
&ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 86eb5d97410b..8bb93194e5ac 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -26,9 +26,33 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
tidss_runtime_get(tidss);
- drm_atomic_helper_commit_modeset_disables(ddev, old_state);
- drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
- drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+ /*
+ * TI's OLDI and DSI encoders need to be set up before the crtc is
+ * enabled. Thus drm_atomic_helper_commit_modeset_enables() and
+ * drm_atomic_helper_commit_modeset_disables() cannot be used here, as
+ * they enable the crtc before bridges' pre-enable, and disable the crtc
+ * after bridges' post-disable.
+ *
+ * Open code the functions here and first call the bridges' pre-enables,
+ * then crtc enable, then bridges' post-enable (and vice versa for
+ * disable).
+ */
+
+ drm_atomic_helper_commit_encoder_bridge_disable(ddev, old_state);
+ drm_atomic_helper_commit_crtc_disable(ddev, old_state);
+ drm_atomic_helper_commit_encoder_bridge_post_disable(ddev, old_state);
+
+ drm_atomic_helper_update_legacy_modeset_state(ddev, old_state);
+ drm_atomic_helper_calc_timestamping_constants(old_state);
+ drm_atomic_helper_commit_crtc_set_mode(ddev, old_state);
+
+ drm_atomic_helper_commit_planes(ddev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_commit_encoder_bridge_pre_enable(ddev, old_state);
+ drm_atomic_helper_commit_crtc_enable(ddev, old_state);
+ drm_atomic_helper_commit_encoder_bridge_enable(ddev, old_state);
+ drm_atomic_helper_commit_writebacks(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(ddev, old_state);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index b0bd31d14bb9..bf4ee976b680 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1527,7 +1527,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
* always succeed here, as long as we hold the lru lock.
*/
spin_lock(&ttm_bo->bdev->lru_lock);
- locked = dma_resv_trylock(ttm_bo->base.resv);
+ locked = dma_resv_trylock(&ttm_bo->base._resv);
spin_unlock(&ttm_bo->bdev->lru_lock);
xe_assert(xe, locked);
@@ -1547,13 +1547,6 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
bo = ttm_to_xe_bo(ttm_bo);
xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
- /*
- * Corner case where TTM fails to allocate memory and this BOs resv
- * still points the VMs resv
- */
- if (ttm_bo->base.resv != &ttm_bo->base._resv)
- return;
-
if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
return;
@@ -1563,14 +1556,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
* TODO: Don't do this for external bos once we scrub them after
* unbind.
*/
- dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
+ dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
DMA_RESV_USAGE_BOOKKEEP, fence) {
if (xe_fence_is_xe_preempt(fence) &&
!dma_fence_is_signaled(fence)) {
if (!replacement)
replacement = dma_fence_get_stub();
- dma_resv_replace_fences(ttm_bo->base.resv,
+ dma_resv_replace_fences(&ttm_bo->base._resv,
fence->context,
replacement,
DMA_RESV_USAGE_BOOKKEEP);
@@ -1578,7 +1571,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
}
dma_fence_put(replacement);
- dma_resv_unlock(ttm_bo->base.resv);
+ dma_resv_unlock(&ttm_bo->base._resv);
}
static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index c7d373c70f0f..cf29e259861f 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -1056,7 +1056,7 @@ static void tdf_request_sync(struct xe_device *xe)
* transient and need to be flushed..
*/
if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
- 150, NULL, false))
+ 300, NULL, false))
xe_gt_err_once(gt, "TD flush timeout\n");
xe_force_wake_put(gt_to_fw(gt), fw_ref);
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 54e42960daad..7c74a31d4486 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -124,7 +124,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
case XE_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->ttm.ttm->pages,
- bo->ttm.ttm->num_pages);
+ obj->size >> PAGE_SHIFT);
if (IS_ERR(sgt))
return sgt;
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index 97dfb7945b7a..a5c36a317a70 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -315,7 +315,7 @@ static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension
return -EFAULT;
if (XE_IOCTL_DBG(xe, ext.property >= ARRAY_SIZE(xe_set_eu_stall_property_funcs)) ||
- XE_IOCTL_DBG(xe, ext.pad))
+ XE_IOCTL_DBG(xe, !ext.property) || XE_IOCTL_DBG(xe, ext.pad))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_set_eu_stall_property_funcs));
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 4d81210e41f5..fd9480031750 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -132,7 +132,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
- XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]) ||
+ XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
return -EINVAL;
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index dbb5e7a9bc6a..cdce210e36f2 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -797,9 +797,6 @@ static int do_gt_restart(struct xe_gt *gt)
xe_gt_sriov_pf_init_hw(gt);
xe_mocs_init(gt);
- err = xe_uc_start(&gt->uc);
- if (err)
- return err;
for_each_hw_engine(hwe, gt, id)
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
@@ -807,6 +804,10 @@ static int do_gt_restart(struct xe_gt *gt)
/* Get CCS mode in sync between sw/hw */
xe_gt_apply_ccs_mode(gt);
+ err = xe_uc_start(&gt->uc);
+ if (err)
+ return err;
+
/* Restore GT freq to expected values */
xe_gt_sanitize_freq(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 849ea6c86e8e..ce3c7810469f 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -293,8 +293,10 @@ int xe_gt_freq_init(struct xe_gt *gt)
return -ENOMEM;
err = sysfs_create_files(gt->freq, freq_attrs);
- if (err)
+ if (err) {
+ kobject_put(gt->freq);
return err;
+ }
err = devm_add_action_or_reset(xe->drm.dev, freq_fini, gt->freq);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index bdc9d9877ec4..3e3d1d52f630 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -5,6 +5,7 @@
#include <drm/drm_managed.h>
+#include <generated/xe_wa_oob.h>
#include "xe_force_wake.h"
#include "xe_device.h"
#include "xe_gt.h"
@@ -16,6 +17,7 @@
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_sriov.h"
+#include "xe_wa.h"
/**
* DOC: Xe GT Idle
@@ -145,6 +147,12 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
}
+ if (XE_GT_WA(gt, 14020316580))
+ gtidle->powergate_enable &= ~(VDN_HCP_POWERGATE_ENABLE(0) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(0) |
+ VDN_HCP_POWERGATE_ENABLE(2) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(2));
+
xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 4c73a077d314..033eae2d03d3 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -733,7 +733,7 @@ static void vf_start_migration_recovery(struct xe_gt *gt)
spin_lock(&gt->sriov.vf.migration.lock);
- if (!gt->sriov.vf.migration.recovery_queued ||
+ if (!gt->sriov.vf.migration.recovery_queued &&
!gt->sriov.vf.migration.recovery_teardown) {
gt->sriov.vf.migration.recovery_queued = true;
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 82c5fbcdfbe3..01477fc7b37b 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -140,7 +140,7 @@ static ssize_t reasons_show(struct kobject *kobj,
struct throttle_attribute *other_ta = kobj_attribute_to_throttle(kattr);
if (other_ta->mask != U32_MAX && reasons & other_ta->mask)
- ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name);
+ ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name + strlen("reason_"));
}
if (drm_WARN_ONCE(&xe->drm, !ret, "Unknown reason: %#x\n", reasons))
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 4ac434ad216f..a5019d1e741b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -104,7 +104,9 @@ static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
{
g2h_fence->cancel = true;
g2h_fence->fail = true;
- g2h_fence->done = true;
+
+ /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
+ WRITE_ONCE(g2h_fence->done, true);
}
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
@@ -1203,10 +1205,13 @@ retry_same_fence:
return ret;
}
- ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+ /* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
+ * and g2h_fence_cancel.
+ */
+ ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
if (!ret) {
LNL_FLUSH_WORK(&ct->g2h_worker);
- if (g2h_fence.done) {
+ if (READ_ONCE(g2h_fence.done)) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
ret = 1;
@@ -1454,7 +1459,8 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
- g2h_fence->done = true;
+ /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
+ WRITE_ONCE(g2h_fence->done, true);
smp_mb();
wake_up_all(&ct->g2h_fence_wq);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index ed7be50b2f72..f6ba2b0f074d 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -717,26 +717,46 @@ static bool vf_recovery(struct xe_guc *guc)
return xe_gt_recovery_pending(guc_to_gt(guc));
}
+static inline void relaxed_ms_sleep(unsigned int delay_ms)
+{
+ unsigned long min_us, max_us;
+
+ if (!delay_ms)
+ return;
+
+ if (delay_ms > 20) {
+ msleep(delay_ms);
+ return;
+ }
+
+ min_us = mul_u32_u32(delay_ms, 1000);
+ max_us = min_us + 500;
+
+ usleep_range(min_us, max_us);
+}
+
static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
- unsigned int sleep_period_ms = 1;
+ unsigned int sleep_period_ms = 1, sleep_total_ms = 0;
#define AVAILABLE_SPACE \
CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
try_again:
q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
- if (wqi_size > AVAILABLE_SPACE) {
- if (sleep_period_ms == 1024) {
+ if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
+ if (sleep_total_ms > 2000) {
xe_gt_reset_async(q->gt);
return -ENODEV;
}
msleep(sleep_period_ms);
- sleep_period_ms <<= 1;
+ sleep_total_ms += sleep_period_ms;
+ if (sleep_period_ms < 64)
+ sleep_period_ms <<= 1;
goto try_again;
}
}
@@ -1585,7 +1605,7 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
since_resume_ms;
if (wait_ms > 0 && q->guc->resume_time)
- msleep(wait_ms);
+ relaxed_ms_sleep(wait_ms);
set_exec_queue_suspended(q);
disable_scheduling(q, false);
@@ -2253,10 +2273,11 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_sched_job *job = NULL;
+ struct xe_sched_job *job = NULL, *__job;
bool restore_replay = false;
- list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ list_for_each_entry(__job, &sched->base.pending_list, drm.list) {
+ job = __job;
restore_replay |= job->restore_replay;
if (restore_replay) {
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 2b3d49dd394c..495cdd4f948d 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -223,7 +223,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if (xe->heci_gsc.irq < 0)
return;
- ret = generic_handle_irq(xe->heci_gsc.irq);
+ ret = generic_handle_irq_safe(xe->heci_gsc.irq);
if (ret)
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
}
@@ -243,7 +243,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
if (xe->heci_gsc.irq < 0)
return;
- ret = generic_handle_irq(xe->heci_gsc.irq);
+ ret = generic_handle_irq_safe(xe->heci_gsc.irq);
if (ret)
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
}
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 2184af413b91..5a95b08a4723 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -2062,6 +2062,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long sram_offset,
struct drm_pagemap_addr *sram_addr,
u64 vram_addr,
+ struct dma_fence *deps,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
@@ -2150,6 +2151,14 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+ if (deps && !dma_fence_is_signaled(deps)) {
+ dma_fence_get(deps);
+ err = drm_sched_job_add_dependency(&job->drm, deps);
+ if (err)
+ dma_fence_wait(deps, false);
+ err = 0;
+ }
+
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
@@ -2175,6 +2184,8 @@ err:
* @npages: Number of pages to migrate.
* @src_addr: Array of DMA information (source of migrate)
* @dst_addr: Device physical address of VRAM (destination of migrate)
+ * @deps: struct dma_fence representing the dependencies that need
+ * to be signaled before migration.
*
* Copy from an array dma addresses to a VRAM device physical address
*
@@ -2184,10 +2195,11 @@ err:
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
struct drm_pagemap_addr *src_addr,
- u64 dst_addr)
+ u64 dst_addr,
+ struct dma_fence *deps)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
- XE_MIGRATE_COPY_TO_VRAM);
+ deps, XE_MIGRATE_COPY_TO_VRAM);
}
/**
@@ -2196,6 +2208,8 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
* @npages: Number of pages to migrate.
* @src_addr: Device physical address of VRAM (source of migrate)
* @dst_addr: Array of DMA information (destination of migrate)
+ * @deps: struct dma_fence representing the dependencies that need
+ * to be signaled before migration.
*
* Copy from a VRAM device physical address to an array dma addresses
*
@@ -2205,10 +2219,11 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- struct drm_pagemap_addr *dst_addr)
+ struct drm_pagemap_addr *dst_addr,
+ struct dma_fence *deps)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
- XE_MIGRATE_COPY_TO_SRAM);
+ deps, XE_MIGRATE_COPY_TO_SRAM);
}
static void xe_migrate_dma_unmap(struct xe_device *xe,
@@ -2384,7 +2399,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
&pagemap_addr[current_page],
- vram_addr, write ?
+ vram_addr, NULL, write ?
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
if (IS_ERR(__fence)) {
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 260e298e5dd7..b76441f062b4 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -116,12 +116,14 @@ int xe_migrate_init(struct xe_migrate *m);
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
struct drm_pagemap_addr *src_addr,
- u64 dst_addr);
+ u64 dst_addr,
+ struct dma_fence *deps);
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- struct drm_pagemap_addr *dst_addr);
+ struct drm_pagemap_addr *dst_addr,
+ struct dma_fence *deps);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 890c363282ae..f8bb28ab8124 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1105,11 +1105,12 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
- xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
- (OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl,
+ OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ (stream->periodic ?
OAG_OAGLBCTXCTRL_TIMER_ENABLE |
REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK,
- stream->period_exponent)) : 0);
+ stream->period_exponent) : 0));
/*
* Initialize Super Queue Internal Cnt Register
@@ -1254,6 +1255,9 @@ static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
struct xe_oa_open_param *param)
{
+ if (XE_IOCTL_DBG(oa->xe, value > DRM_XE_MAX_SYNCS))
+ return -EINVAL;
+
param->num_syncs = value;
return 0;
}
@@ -1343,7 +1347,7 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_fr
ARRAY_SIZE(xe_oa_set_property_funcs_config));
if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
- XE_IOCTL_DBG(oa->xe, ext.pad))
+ XE_IOCTL_DBG(oa->xe, !ext.property) || XE_IOCTL_DBG(oa->xe, ext.pad))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c b/drivers/gpu/drm/xe/xe_sriov_vfio.c
index e9a7615bb5c5..3da81af97b8b 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vfio.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c
@@ -21,7 +21,7 @@ EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
{
if (!IS_SRIOV_PF(xe))
- return -EPERM;
+ return false;
return xe_sriov_pf_migration_supported(xe);
}
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 55c5a0eb82e1..f97e0af6a9b0 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -476,7 +476,8 @@ static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
static int xe_svm_copy(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages, const enum xe_svm_copy_dir dir)
+ unsigned long npages, const enum xe_svm_copy_dir dir,
+ struct dma_fence *pre_migrate_fence)
{
struct xe_vram_region *vr = NULL;
struct xe_gt *gt = NULL;
@@ -565,7 +566,8 @@ static int xe_svm_copy(struct page **pages,
__fence = xe_migrate_from_vram(vr->migrate,
i - pos + incr,
vram_addr,
- &pagemap_addr[pos]);
+ &pagemap_addr[pos],
+ pre_migrate_fence);
} else {
vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
@@ -574,13 +576,14 @@ static int xe_svm_copy(struct page **pages,
__fence = xe_migrate_to_vram(vr->migrate,
i - pos + incr,
&pagemap_addr[pos],
- vram_addr);
+ vram_addr,
+ pre_migrate_fence);
}
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
}
-
+ pre_migrate_fence = NULL;
dma_fence_put(fence);
fence = __fence;
}
@@ -603,20 +606,22 @@ static int xe_svm_copy(struct page **pages,
vram_addr, (u64)pagemap_addr[pos].addr, 1);
__fence = xe_migrate_from_vram(vr->migrate, 1,
vram_addr,
- &pagemap_addr[pos]);
+ &pagemap_addr[pos],
+ pre_migrate_fence);
} else {
vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
(u64)pagemap_addr[pos].addr, vram_addr, 1);
__fence = xe_migrate_to_vram(vr->migrate, 1,
&pagemap_addr[pos],
- vram_addr);
+ vram_addr,
+ pre_migrate_fence);
}
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
}
-
+ pre_migrate_fence = NULL;
dma_fence_put(fence);
fence = __fence;
}
@@ -629,6 +634,8 @@ err_out:
dma_fence_wait(fence, false);
dma_fence_put(fence);
}
+ if (pre_migrate_fence)
+ dma_fence_wait(pre_migrate_fence, false);
/*
* XXX: We can't derive the GT here (or anywhere in this functions, but
@@ -645,16 +652,20 @@ err_out:
static int xe_svm_copy_to_devmem(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages)
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
{
- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
+ pre_migrate_fence);
}
static int xe_svm_copy_to_ram(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages)
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
{
- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
+ pre_migrate_fence);
}
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
@@ -667,6 +678,7 @@ static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
struct xe_bo *bo = to_xe_bo(devmem_allocation);
struct xe_device *xe = xe_bo_device(bo);
+ dma_fence_put(devmem_allocation->pre_migrate_fence);
xe_bo_put_async(bo);
xe_pm_runtime_put(xe);
}
@@ -861,6 +873,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long timeslice_ms)
{
struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct dma_fence *pre_migrate_fence = NULL;
struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
struct drm_buddy_block *block;
@@ -887,8 +900,20 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
break;
}
+ /* Ensure that any clearing or async eviction will complete before migration. */
+ if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
+ err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ &pre_migrate_fence);
+ if (err)
+ dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ else if (pre_migrate_fence)
+ dma_fence_enable_sw_signaling(pre_migrate_fence);
+ }
+
drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
- &dpagemap_devmem_ops, dpagemap, end - start);
+ &dpagemap_devmem_ops, dpagemap, end - start,
+ pre_migrate_fence);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
list_for_each_entry(block, blocks, link)
@@ -941,7 +966,7 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
xe_assert(vm->xe, IS_DGFX(vm->xe));
if (xe_svm_range_in_vram(range)) {
- drm_info(&vm->xe->drm, "Range is already in VRAM\n");
+ drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 0955d2ac8d74..fa757dd07954 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -214,7 +214,7 @@ int xe_svm_init(struct xe_vm *vm)
{
#if IS_ENABLED(CONFIG_DRM_GPUSVM)
return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
- NULL, NULL, 0, 0, 0, NULL, NULL, 0);
+ NULL, 0, 0, 0, NULL, NULL, 0);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 7cac646bdf1c..79ab6c512d3e 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1508,7 +1508,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues);
- vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
+ if (flags & XE_VM_FLAG_FAULT_MODE)
+ vm->preempt.min_run_period_ms = 0;
+ else
+ vm->preempt.min_run_period_ms = 5;
for_each_tile(tile, xe, id)
xe_range_fence_tree_init(&vm->rftree[id]);
@@ -3324,6 +3327,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
+ if (XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
+ return -EINVAL;
+
if (args->num_binds > 1) {
u64 __user *bind_user =
u64_to_user_ptr(args->vector_of_binds);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ccd6cc090309..2168ef052499 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -263,7 +263,7 @@ struct xe_vm {
* @min_run_period_ms: The minimum run period before preempting
* an engine again
*/
- s64 min_run_period_ms;
+ unsigned int min_run_period_ms;
/** @exec_queues: list of exec queues attached to this VM */
struct list_head exec_queues;
/** @num_exec_queues: number exec queues attached to this VM */
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 3764abca3d4f..e32dd2fde6f1 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -270,14 +270,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
- { XE_RTP_NAME("14020316580"),
- XE_RTP_RULES(MEDIA_VERSION(1301)),
- XE_RTP_ACTIONS(CLR(POWERGATE_ENABLE,
- VDN_HCP_POWERGATE_ENABLE(0) |
- VDN_MFXVDENC_POWERGATE_ENABLE(0) |
- VDN_HCP_POWERGATE_ENABLE(2) |
- VDN_MFXVDENC_POWERGATE_ENABLE(2))),
- },
{ XE_RTP_NAME("14019449301"),
XE_RTP_RULES(MEDIA_VERSION(1301), ENGINE_CLASS(VIDEO_DECODE)),
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F08(0), CG3DDISHRS_CLKGATE_DIS)),
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index fb38eb3d6e9a..7ca7258eb5d8 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -76,3 +76,4 @@
15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
16026007364 MEDIA_VERSION(3000)
+14020316580 MEDIA_VERSION(1301)
diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
index 20d3e6d0d796..527920f9c4d3 100644
--- a/drivers/gpu/nova-core/Kconfig
+++ b/drivers/gpu/nova-core/Kconfig
@@ -3,7 +3,7 @@ config NOVA_CORE
depends on 64BIT
depends on PCI
depends on RUST
- depends on RUST_FW_LOADER_ABSTRACTIONS
+ select RUST_FW_LOADER_ABSTRACTIONS
select AUXILIARY_BUS
default n
help
diff --git a/drivers/gpu/nova-core/gsp/cmdq.rs b/drivers/gpu/nova-core/gsp/cmdq.rs
index 6f946d14868a..3991ccc0c10f 100644
--- a/drivers/gpu/nova-core/gsp/cmdq.rs
+++ b/drivers/gpu/nova-core/gsp/cmdq.rs
@@ -588,21 +588,23 @@ impl Cmdq {
header.length(),
);
+ let payload_length = header.payload_length();
+
// Check that the driver read area is large enough for the message.
- if slice_1.len() + slice_2.len() < header.length() {
+ if slice_1.len() + slice_2.len() < payload_length {
return Err(EIO);
}
// Cut the message slices down to the actual length of the message.
- let (slice_1, slice_2) = if slice_1.len() > header.length() {
- // PANIC: we checked above that `slice_1` is at least as long as `msg_header.length()`.
- (slice_1.split_at(header.length()).0, &slice_2[0..0])
+ let (slice_1, slice_2) = if slice_1.len() > payload_length {
+ // PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
+ (slice_1.split_at(payload_length).0, &slice_2[0..0])
} else {
(
slice_1,
// PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
- // large as `msg_header.length()`.
- slice_2.split_at(header.length() - slice_1.len()).0,
+ // large as `payload_length`.
+ slice_2.split_at(payload_length - slice_1.len()).0,
)
};
diff --git a/drivers/gpu/nova-core/gsp/fw.rs b/drivers/gpu/nova-core/gsp/fw.rs
index abffd6beec65..caeb0d251fe5 100644
--- a/drivers/gpu/nova-core/gsp/fw.rs
+++ b/drivers/gpu/nova-core/gsp/fw.rs
@@ -141,8 +141,8 @@ unsafe impl AsBytes for GspFwWprMeta {}
// are valid.
unsafe impl FromBytes for GspFwWprMeta {}
-type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1;
-type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
+type GspFwWprMetaBootResumeInfo = bindings::GspFwWprMeta__bindgen_ty_1;
+type GspFwWprMetaBootInfo = bindings::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
impl GspFwWprMeta {
/// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the
@@ -150,8 +150,8 @@ impl GspFwWprMeta {
pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self {
Self(bindings::GspFwWprMeta {
// CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified.
- magic: r570_144::GSP_FW_WPR_META_MAGIC as u64,
- revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION),
+ magic: bindings::GSP_FW_WPR_META_MAGIC as u64,
+ revision: u64::from(bindings::GSP_FW_WPR_META_REVISION),
sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(),
sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size),
sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(),
@@ -315,19 +315,19 @@ impl From<MsgFunction> for u32 {
#[repr(u32)]
pub(crate) enum SeqBufOpcode {
// Core operation opcodes
- CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
- CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
- CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
- CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+ CoreReset = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ CoreResume = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+ CoreStart = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
+ CoreWaitForHalt = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
// Delay opcode
- DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
+ DelayUs = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
// Register operation opcodes
- RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
- RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
- RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
- RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
+ RegModify = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ RegPoll = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
+ RegStore = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
+ RegWrite = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
}
impl fmt::Display for SeqBufOpcode {
@@ -351,25 +351,25 @@ impl TryFrom<u32> for SeqBufOpcode {
fn try_from(value: u32) -> Result<SeqBufOpcode> {
match value {
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
Ok(SeqBufOpcode::CoreReset)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
Ok(SeqBufOpcode::CoreResume)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
Ok(SeqBufOpcode::CoreStart)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
Ok(SeqBufOpcode::CoreWaitForHalt)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
Ok(SeqBufOpcode::RegModify)
}
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
- r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
+ bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
_ => Err(EINVAL),
}
}
@@ -385,7 +385,7 @@ impl From<SeqBufOpcode> for u32 {
/// Wrapper for GSP sequencer register write payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
+pub(crate) struct RegWritePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
impl RegWritePayload {
/// Returns the register address.
@@ -408,7 +408,7 @@ unsafe impl AsBytes for RegWritePayload {}
/// Wrapper for GSP sequencer register modify payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
+pub(crate) struct RegModifyPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
impl RegModifyPayload {
/// Returns the register address.
@@ -436,7 +436,7 @@ unsafe impl AsBytes for RegModifyPayload {}
/// Wrapper for GSP sequencer register poll payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
+pub(crate) struct RegPollPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
impl RegPollPayload {
/// Returns the register address.
@@ -469,7 +469,7 @@ unsafe impl AsBytes for RegPollPayload {}
/// Wrapper for GSP sequencer delay payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
+pub(crate) struct DelayUsPayload(bindings::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
impl DelayUsPayload {
/// Returns the delay value in microseconds.
@@ -487,7 +487,7 @@ unsafe impl AsBytes for DelayUsPayload {}
/// Wrapper for GSP sequencer register store payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
-pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
+pub(crate) struct RegStorePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
impl RegStorePayload {
/// Returns the register address.
@@ -510,7 +510,7 @@ unsafe impl AsBytes for RegStorePayload {}
/// Wrapper for GSP sequencer buffer command.
#[repr(transparent)]
-pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD);
+pub(crate) struct SequencerBufferCmd(bindings::GSP_SEQUENCER_BUFFER_CMD);
impl SequencerBufferCmd {
/// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid.
@@ -612,7 +612,7 @@ unsafe impl AsBytes for SequencerBufferCmd {}
/// Wrapper for GSP run CPU sequencer RPC.
#[repr(transparent)]
-pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00);
+pub(crate) struct RunCpuSequencer(bindings::rpc_run_cpu_sequencer_v17_00);
impl RunCpuSequencer {
/// Returns the command index.
@@ -797,13 +797,6 @@ impl bindings::rpc_message_header_v {
}
}
-// SAFETY: We can't derive the Zeroable trait for this binding because the
-// procedural macro doesn't support the syntax used by bindgen to create the
-// __IncompleteArrayField types. So instead we implement it here, which is safe
-// because these are explicitly padded structures only containing types for
-// which any bit pattern, including all zeros, is valid.
-unsafe impl Zeroable for bindings::rpc_message_header_v {}
-
/// GSP Message Element.
///
/// This is essentially a message header expected to be followed by the message data.
@@ -853,11 +846,16 @@ impl GspMsgElement {
self.inner.checkSum = checksum;
}
- /// Returns the total length of the message.
+ /// Returns the length of the message's payload.
+ pub(crate) fn payload_length(&self) -> usize {
+ // `rpc.length` includes the length of the RPC message header.
+ num::u32_as_usize(self.inner.rpc.length)
+ .saturating_sub(size_of::<bindings::rpc_message_header_v>())
+ }
+
+ /// Returns the total length of the message, message and RPC headers included.
pub(crate) fn length(&self) -> usize {
- // `rpc.length` includes the length of the GspRpcHeader but not the message header.
- size_of::<Self>() - size_of::<bindings::rpc_message_header_v>()
- + num::u32_as_usize(self.inner.rpc.length)
+ size_of::<Self>() + self.payload_length()
}
// Returns the sequence number of the message.
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144.rs b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
index 048234d1a9d1..e99d315ae74c 100644
--- a/drivers/gpu/nova-core/gsp/fw/r570_144.rs
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
@@ -24,8 +24,11 @@
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
-use kernel::{
- ffi,
- prelude::Zeroable, //
-};
+use kernel::ffi;
+use pin_init::MaybeZeroable;
+
include!("r570_144/bindings.rs");
+
+// SAFETY: This type has a size of zero, so its inclusion into another type should not affect their
+// ability to implement `Zeroable`.
+unsafe impl<T> kernel::prelude::Zeroable for __IncompleteArrayField<T> {}
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
index 5bcfbcd1ad22..6d25fe0bffa9 100644
--- a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
@@ -320,11 +320,12 @@ pub const NV_VGPU_MSG_EVENT_RECOVERY_ACTION: _bindgen_ty_3 = 4130;
pub const NV_VGPU_MSG_EVENT_NUM_EVENTS: _bindgen_ty_3 = 4131;
pub type _bindgen_ty_3 = ffi::c_uint;
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
pub totalVFs: u32_,
pub firstVfOffset: u32_,
pub vfFeatureMask: u32_,
+ pub __bindgen_padding_0: [u8; 4usize],
pub FirstVFBar0Address: u64_,
pub FirstVFBar1Address: u64_,
pub FirstVFBar2Address: u64_,
@@ -340,23 +341,26 @@ pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
pub bClientRmAllocatedCtxBuffer: u8_,
pub bNonPowerOf2ChannelCountSupported: u8_,
pub bVfResizableBAR1Supported: u8_,
+ pub __bindgen_padding_1: [u8; 7usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
pub BoardID: u32_,
pub chipSKU: [ffi::c_char; 9usize],
pub chipSKUMod: [ffi::c_char; 5usize],
+ pub __bindgen_padding_0: [u8; 2usize],
pub skuConfigVersion: u32_,
pub project: [ffi::c_char; 5usize],
pub projectSKU: [ffi::c_char; 5usize],
pub CDP: [ffi::c_char; 6usize],
pub projectSKUMod: [ffi::c_char; 2usize],
+ pub __bindgen_padding_1: [u8; 2usize],
pub businessCycle: u32_,
}
pub type NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG = [u8_; 17usize];
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
pub base: u64_,
pub limit: u64_,
@@ -368,13 +372,14 @@ pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
pub blackList: NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
pub numFBRegions: u32_,
+ pub __bindgen_padding_0: [u8; 4usize],
pub fbRegion: [NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; 16usize],
}
#[repr(C)]
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
pub index: u32_,
pub flags: u32_,
@@ -391,14 +396,14 @@ impl Default for NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
}
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct DOD_METHOD_DATA {
pub status: u32_,
pub acpiIdListLen: u32_,
pub acpiIdList: [u32_; 16usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct JT_METHOD_DATA {
pub status: u32_,
pub jtCaps: u32_,
@@ -407,14 +412,14 @@ pub struct JT_METHOD_DATA {
pub __bindgen_padding_0: u8,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MUX_METHOD_DATA_ELEMENT {
pub acpiId: u32_,
pub mode: u32_,
pub status: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MUX_METHOD_DATA {
pub tableLen: u32_,
pub acpiIdMuxModeTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
@@ -422,13 +427,13 @@ pub struct MUX_METHOD_DATA {
pub acpiIdMuxStateTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct CAPS_METHOD_DATA {
pub status: u32_,
pub optimusCaps: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct ACPI_METHOD_DATA {
pub bValid: u8_,
pub __bindgen_padding_0: [u8; 3usize],
@@ -438,20 +443,20 @@ pub struct ACPI_METHOD_DATA {
pub capsMethodData: CAPS_METHOD_DATA,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS {
pub headIndex: u32_,
pub maxHResolution: u32_,
pub maxVResolution: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS {
pub numHeads: u32_,
pub maxNumHeads: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct BUSINFO {
pub deviceID: u16_,
pub vendorID: u16_,
@@ -461,7 +466,7 @@ pub struct BUSINFO {
pub __bindgen_padding_0: u8,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_VF_INFO {
pub totalVFs: u32_,
pub firstVFOffset: u32_,
@@ -474,34 +479,37 @@ pub struct GSP_VF_INFO {
pub __bindgen_padding_0: [u8; 5usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_PCIE_CONFIG_REG {
pub linkCap: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct EcidManufacturingInfo {
pub ecidLow: u32_,
pub ecidHigh: u32_,
pub ecidExtended: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct FW_WPR_LAYOUT_OFFSET {
pub nonWprHeapOffset: u64_,
pub frtsOffset: u64_,
}
#[repr(C)]
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug, Copy, Clone, MaybeZeroable)]
pub struct GspStaticConfigInfo_t {
pub grCapsBits: [u8_; 23usize],
+ pub __bindgen_padding_0: u8,
pub gidInfo: NV2080_CTRL_GPU_GET_GID_INFO_PARAMS,
pub SKUInfo: NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS,
+ pub __bindgen_padding_1: [u8; 4usize],
pub fbRegionInfoParams: NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS,
pub sriovCaps: NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS,
pub sriovMaxGfid: u32_,
pub engineCaps: [u32_; 3usize],
pub poisonFuseEnabled: u8_,
+ pub __bindgen_padding_2: [u8; 7usize],
pub fb_length: u64_,
pub fbio_mask: u64_,
pub fb_bus_width: u32_,
@@ -527,16 +535,20 @@ pub struct GspStaticConfigInfo_t {
pub bIsMigSupported: u8_,
pub RTD3GC6TotalBoardPower: u16_,
pub RTD3GC6PerstDelay: u16_,
+ pub __bindgen_padding_3: [u8; 2usize],
pub bar1PdeBase: u64_,
pub bar2PdeBase: u64_,
pub bVbiosValid: u8_,
+ pub __bindgen_padding_4: [u8; 3usize],
pub vbiosSubVendor: u32_,
pub vbiosSubDevice: u32_,
pub bPageRetirementSupported: u8_,
pub bSplitVasBetweenServerClientRm: u8_,
pub bClRootportNeedsNosnoopWAR: u8_,
+ pub __bindgen_padding_5: u8,
pub displaylessMaxHeads: VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS,
pub displaylessMaxResolution: VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS,
+ pub __bindgen_padding_6: [u8; 4usize],
pub displaylessMaxPixels: u64_,
pub hInternalClient: u32_,
pub hInternalDevice: u32_,
@@ -558,7 +570,7 @@ impl Default for GspStaticConfigInfo_t {
}
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspSystemInfo {
pub gpuPhysAddr: u64_,
pub gpuPhysFbAddr: u64_,
@@ -615,7 +627,7 @@ pub struct GspSystemInfo {
pub hostPageSize: u64_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
pub sharedMemPhysAddr: u64_,
pub pageTableEntryCount: u32_,
@@ -624,7 +636,7 @@ pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
pub statQueueOffset: u64_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SR_INIT_ARGUMENTS {
pub oldLevel: u32_,
pub flags: u32_,
@@ -632,7 +644,7 @@ pub struct GSP_SR_INIT_ARGUMENTS {
pub __bindgen_padding_0: [u8; 3usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_ARGUMENTS_CACHED {
pub messageQueueInitArguments: MESSAGE_QUEUE_INIT_ARGUMENTS,
pub srInitArguments: GSP_SR_INIT_ARGUMENTS,
@@ -642,13 +654,13 @@ pub struct GSP_ARGUMENTS_CACHED {
pub profilerArgs: GSP_ARGUMENTS_CACHED__bindgen_ty_1,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_ARGUMENTS_CACHED__bindgen_ty_1 {
pub pa: u64_,
pub size: u64_,
}
#[repr(C)]
-#[derive(Copy, Clone, Zeroable)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub union rpc_message_rpc_union_field_v03_00 {
pub spare: u32_,
pub cpuRmGfid: u32_,
@@ -664,6 +676,7 @@ impl Default for rpc_message_rpc_union_field_v03_00 {
}
pub type rpc_message_rpc_union_field_v = rpc_message_rpc_union_field_v03_00;
#[repr(C)]
+#[derive(MaybeZeroable)]
pub struct rpc_message_header_v03_00 {
pub header_version: u32_,
pub signature: u32_,
@@ -686,7 +699,7 @@ impl Default for rpc_message_header_v03_00 {
}
pub type rpc_message_header_v = rpc_message_header_v03_00;
#[repr(C)]
-#[derive(Copy, Clone, Zeroable)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta {
pub magic: u64_,
pub revision: u64_,
@@ -721,19 +734,19 @@ pub struct GspFwWprMeta {
pub verified: u64_,
}
#[repr(C)]
-#[derive(Copy, Clone, Zeroable)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub union GspFwWprMeta__bindgen_ty_1 {
pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_1__bindgen_ty_1,
pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_1__bindgen_ty_2,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_1 {
pub sysmemAddrOfSignature: u64_,
pub sizeOfSignature: u64_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_2 {
pub gspFwHeapFreeListWprOffset: u32_,
pub unused0: u32_,
@@ -749,13 +762,13 @@ impl Default for GspFwWprMeta__bindgen_ty_1 {
}
}
#[repr(C)]
-#[derive(Copy, Clone, Zeroable)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub union GspFwWprMeta__bindgen_ty_2 {
pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_2__bindgen_ty_1,
pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_2__bindgen_ty_2,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
pub partitionRpcAddr: u64_,
pub partitionRpcRequestOffset: u16_,
@@ -767,7 +780,7 @@ pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
pub lsUcodeVersion: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_2 {
pub partitionRpcPadding: [u32_; 4usize],
pub sysmemAddrOfCrashReportQueue: u64_,
@@ -802,7 +815,7 @@ pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM: LibosMemoryRegion
pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_FB: LibosMemoryRegionLoc = 2;
pub type LibosMemoryRegionLoc = ffi::c_uint;
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct LibosMemoryRegionInitArgument {
pub id8: LibosAddress,
pub pa: LibosAddress,
@@ -812,7 +825,7 @@ pub struct LibosMemoryRegionInitArgument {
pub __bindgen_padding_0: [u8; 6usize],
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct PACKED_REGISTRY_ENTRY {
pub nameOffset: u32_,
pub type_: u8_,
@@ -821,14 +834,14 @@ pub struct PACKED_REGISTRY_ENTRY {
pub length: u32_,
}
#[repr(C)]
-#[derive(Debug, Default)]
+#[derive(Debug, Default, MaybeZeroable)]
pub struct PACKED_REGISTRY_TABLE {
pub size: u32_,
pub numEntries: u32_,
pub entries: __IncompleteArrayField<PACKED_REGISTRY_ENTRY>,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct msgqTxHeader {
pub version: u32_,
pub size: u32_,
@@ -840,13 +853,13 @@ pub struct msgqTxHeader {
pub entryOff: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone, Zeroable)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct msgqRxHeader {
pub readPtr: u32_,
}
#[repr(C)]
#[repr(align(8))]
-#[derive(Zeroable)]
+#[derive(MaybeZeroable)]
pub struct GSP_MSG_QUEUE_ELEMENT {
pub authTagBuffer: [u8_; 16usize],
pub aadBuffer: [u8_; 16usize],
@@ -866,7 +879,7 @@ impl Default for GSP_MSG_QUEUE_ELEMENT {
}
}
#[repr(C)]
-#[derive(Debug, Default)]
+#[derive(Debug, Default, MaybeZeroable)]
pub struct rpc_run_cpu_sequencer_v17_00 {
pub bufferSizeDWord: u32_,
pub cmdIndex: u32_,
@@ -884,20 +897,20 @@ pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: GSP_SEQ_BUF_
pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME: GSP_SEQ_BUF_OPCODE = 8;
pub type GSP_SEQ_BUF_OPCODE = ffi::c_uint;
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_WRITE {
pub addr: u32_,
pub val: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_MODIFY {
pub addr: u32_,
pub mask: u32_,
pub val: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
pub addr: u32_,
pub mask: u32_,
@@ -906,24 +919,24 @@ pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
pub error: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_DELAY_US {
pub val: u32_,
}
#[repr(C)]
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_STORE {
pub addr: u32_,
pub index: u32_,
}
#[repr(C)]
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQUENCER_BUFFER_CMD {
pub opCode: GSP_SEQ_BUF_OPCODE,
pub payload: GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1,
}
#[repr(C)]
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, MaybeZeroable)]
pub union GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1 {
pub regWrite: GSP_SEQ_BUF_PAYLOAD_REG_WRITE,
pub regModify: GSP_SEQ_BUF_PAYLOAD_REG_MODIFY,
diff --git a/drivers/hid/bpf/progs/Makefile b/drivers/hid/bpf/progs/Makefile
index ec1fc642fd63..66b8f38e591d 100644
--- a/drivers/hid/bpf/progs/Makefile
+++ b/drivers/hid/bpf/progs/Makefile
@@ -56,8 +56,10 @@ clean:
%.bpf.o: %.bpf.c vmlinux.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BPF,$@)
- $(Q)$(CLANG) -g -O2 --target=bpf -Wall -Werror $(INCLUDES) \
- -c $(filter %.c,$^) -o $@ && \
+ $(Q)$(CLANG) -g -O2 --target=bpf -Wall -Werror $(INCLUDES) \
+ -Wno-microsoft-anon-tag \
+ -fms-extensions \
+ -c $(filter %.c,$^) -o $@ && \
$(LLVM_STRIP) -g $@
vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 337d2dc81b4c..c5865b0d2aaa 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3513,6 +3513,7 @@ static const char *absolutes[ABS_CNT] = {
[ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
[ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth",
[ABS_VOLUME] = "Volume", [ABS_PROFILE] = "Profile",
+ [ABS_SND_PROFILE] = "SoundProfile",
[ABS_MISC] = "Misc",
[ABS_MT_SLOT] = "MTSlot",
[ABS_MT_TOUCH_MAJOR] = "MTMajor",
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 981d1b6e9658..2003d2dcda7c 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -77,7 +77,7 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
break;
case USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB:
case USB_DEVICE_ID_ELECOM_M_XT3URBK_018F:
- case USB_DEVICE_ID_ELECOM_M_XT3DRBK:
+ case USB_DEVICE_ID_ELECOM_M_XT3DRBK_00FC:
case USB_DEVICE_ID_ELECOM_M_XT4DRBK:
/*
* Report descriptor format:
@@ -102,6 +102,16 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
break;
+ case USB_DEVICE_ID_ELECOM_M_XT3DRBK_018C:
+ /*
+ * Report descriptor format:
+ * 22: button bit count
+ * 30: padding bit count
+ * 24: button report size
+ * 16: button usage maximum
+ */
+ mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 6);
+ break;
case USB_DEVICE_ID_ELECOM_M_DT2DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
/*
@@ -122,7 +132,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_018F) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK_00FC) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK_018C) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d31711f1aaec..9c2bf584d9f6 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -317,6 +317,7 @@
#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA 0xb824
#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2 0xb82c
+#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA3 0xb882
#define USB_VENDOR_ID_CHUNGHWAT 0x2247
#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
@@ -438,6 +439,9 @@
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002
+#define USB_VENDOR_ID_EDIFIER 0x2d99
+#define USB_DEVICE_ID_EDIFIER_QR30 0xa101 /* EDIFIER Hal0 2.0 SE */
+
#define USB_VENDOR_ID_ELAN 0x04f3
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
@@ -451,7 +455,8 @@
#define USB_DEVICE_ID_ELECOM_M_XGL20DLBK 0x00e6
#define USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB 0x00fb
#define USB_DEVICE_ID_ELECOM_M_XT3URBK_018F 0x018f
-#define USB_DEVICE_ID_ELECOM_M_XT3DRBK 0x00fc
+#define USB_DEVICE_ID_ELECOM_M_XT3DRBK_00FC 0x00fc
+#define USB_DEVICE_ID_ELECOM_M_XT3DRBK_018C 0x018c
#define USB_DEVICE_ID_ELECOM_M_XT4DRBK 0x00fd
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
#define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index d5011a5d0890..e871f1729d4b 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4662,6 +4662,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb025) },
{ /* MX Master 3S mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) },
+ { /* MX Anywhere 3S mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb037) },
{ /* MX Anywhere 3SB mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb038) },
{}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 179dc316b4b5..b1c3ef129058 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -81,6 +81,7 @@ MODULE_LICENSE("GPL");
#define MT_INPUTMODE_TOUCHPAD 0x03
#define MT_BUTTONTYPE_CLICKPAD 0
+#define MT_BUTTONTYPE_PRESSUREPAD 1
enum latency_mode {
HID_LATENCY_NORMAL = 0,
@@ -179,6 +180,7 @@ struct mt_device {
__u8 inputmode_value; /* InputMode HID feature value */
__u8 maxcontacts;
bool is_buttonpad; /* is this device a button pad? */
+ bool is_pressurepad; /* is this device a pressurepad? */
bool is_haptic_touchpad; /* is this device a haptic touchpad? */
bool serial_maybe; /* need to check for serial protocol */
@@ -393,6 +395,7 @@ static const struct mt_class mt_classes[] = {
{ .name = MT_CLS_VTL,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_STICKY_FINGERS |
MT_QUIRK_FORCE_GET_FEATURE,
},
{ .name = MT_CLS_GOOGLE,
@@ -530,8 +533,14 @@ static void mt_feature_mapping(struct hid_device *hdev,
}
mt_get_feature(hdev, field->report);
- if (field->value[usage->usage_index] == MT_BUTTONTYPE_CLICKPAD)
+ switch (field->value[usage->usage_index]) {
+ case MT_BUTTONTYPE_CLICKPAD:
td->is_buttonpad = true;
+ break;
+ case MT_BUTTONTYPE_PRESSUREPAD:
+ td->is_pressurepad = true;
+ break;
+ }
break;
case 0xff0000c5:
@@ -1393,6 +1402,8 @@ static int mt_touch_input_configured(struct hid_device *hdev,
if (td->is_buttonpad)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ if (td->is_pressurepad)
+ __set_bit(INPUT_PROP_PRESSUREPAD, input->propbit);
app->pending_palm_slots = devm_kcalloc(&hi->input->dev,
BITS_TO_LONGS(td->maxcontacts),
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
index 128aa6abd10b..e4dfcf26b04e 100644
--- a/drivers/hid/hid-playstation.c
+++ b/drivers/hid/hid-playstation.c
@@ -753,11 +753,16 @@ ps_gamepad_create(struct hid_device *hdev,
if (IS_ERR(gamepad))
return ERR_CAST(gamepad);
+ /* Set initial resting state for joysticks to 128 (center) */
input_set_abs_params(gamepad, ABS_X, 0, 255, 0, 0);
+ gamepad->absinfo[ABS_X].value = 128;
input_set_abs_params(gamepad, ABS_Y, 0, 255, 0, 0);
+ gamepad->absinfo[ABS_Y].value = 128;
input_set_abs_params(gamepad, ABS_Z, 0, 255, 0, 0);
input_set_abs_params(gamepad, ABS_RX, 0, 255, 0, 0);
+ gamepad->absinfo[ABS_RX].value = 128;
input_set_abs_params(gamepad, ABS_RY, 0, 255, 0, 0);
+ gamepad->absinfo[ABS_RY].value = 128;
input_set_abs_params(gamepad, ABS_RZ, 0, 255, 0, 0);
input_set_abs_params(gamepad, ABS_HAT0X, -1, 1, 0, 0);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index c89a015686c0..11438039cdb7 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -81,6 +81,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EDIFIER, USB_DEVICE_ID_EDIFIER_QR30), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II), HID_QUIRK_MULTI_INPUT },
@@ -232,6 +233,15 @@ static const struct hid_device_id hid_quirks[] = {
* used as a driver. See hid_scan_report().
*/
static const struct hid_device_id hid_have_special_driver[] = {
+#if IS_ENABLED(CONFIG_APPLEDISPLAY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9218) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9219) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x921c) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x921d) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9222) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9226) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9236) },
+#endif
#if IS_ENABLED(CONFIG_HID_A4TECH)
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
@@ -412,7 +422,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_018F) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK_00FC) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK_018C) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
@@ -769,6 +780,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 63f46a2e5788..5a183af3d5c6 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -286,6 +286,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid,
* In addition to report data device will supply data length
* in the first 2 bytes of the response, so adjust .
*/
+ recv_len = min(recv_len, ihid->bufsize - sizeof(__le16));
error = i2c_hid_xfer(ihid, ihid->cmdbuf, length,
ihid->rawbuf, recv_len + sizeof(__le16));
if (error) {
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index f37b3bc2bb7d..6d64008f2ce0 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -495,6 +495,7 @@ static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
int rv;
/* Send HOSTIF_DM_ENUM_DEVICES */
+ client_data->enum_devices_done = false;
memset(&msg, 0, sizeof(struct hostif_msg));
msg.hdr.command = HOSTIF_DM_ENUM_DEVICES;
rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *)&msg,
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index c6ce37244e49..c3915f3a060e 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -240,9 +240,17 @@ static int ishtp_cl_bus_match(struct device *dev, const struct device_driver *dr
{
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
+ struct ishtp_fw_client *client = device->fw_client;
+ const struct ishtp_device_id *id;
- return(device->fw_client ? guid_equal(&driver->id[0].guid,
- &device->fw_client->props.protocol_name) : 0);
+ if (client) {
+ for (id = driver->id; !guid_is_null(&id->guid); id++) {
+ if (guid_equal(&id->guid, &client->props.protocol_name))
+ return 1;
+ }
+ }
+
+ return 0;
}
/**
diff --git a/drivers/hid/intel-thc-hid/Kconfig b/drivers/hid/intel-thc-hid/Kconfig
index 0351d1137607..9d74e53b8c62 100644
--- a/drivers/hid/intel-thc-hid/Kconfig
+++ b/drivers/hid/intel-thc-hid/Kconfig
@@ -7,6 +7,7 @@ menu "Intel THC HID Support"
config INTEL_THC_HID
tristate "Intel Touch Host Controller"
depends on ACPI
+ select SGL_ALLOC
help
THC (Touch Host Controller) is the name of the IP block in PCH that
interfaces with Touch Devices (ex: touchscreen, touchpad etc.). It
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index 636a68306501..7e220a4c5ded 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -1593,7 +1593,7 @@ int thc_i2c_set_rx_max_size(struct thc_device *dev, u32 max_rx_size)
if (!max_rx_size)
return -EOPNOTSUPP;
- ret = regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &val);
+ ret = regmap_read(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, &val);
if (ret)
return ret;
@@ -1662,7 +1662,7 @@ int thc_i2c_set_rx_int_delay(struct thc_device *dev, u32 delay_us)
if (!delay_us)
return -EOPNOTSUPP;
- ret = regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &val);
+ ret = regmap_read(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, &val);
if (ret)
return ret;
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
index 82b8854843e0..6ee675e0a738 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
@@ -232,6 +232,7 @@ static int setup_dma_buffers(struct thc_device *dev,
return 0;
memset(config->sgls, 0, sizeof(config->sgls));
+ memset(config->sgls_nent_pages, 0, sizeof(config->sgls_nent_pages));
memset(config->sgls_nent, 0, sizeof(config->sgls_nent));
cpu_addr = dma_alloc_coherent(dev->dev, prd_tbls_size,
@@ -254,6 +255,7 @@ static int setup_dma_buffers(struct thc_device *dev,
}
count = dma_map_sg(dev->dev, config->sgls[i], nent, dir);
+ config->sgls_nent_pages[i] = nent;
config->sgls_nent[i] = count;
}
@@ -299,7 +301,7 @@ static void release_dma_buffers(struct thc_device *dev,
continue;
dma_unmap_sg(dev->dev, config->sgls[i],
- config->sgls_nent[i],
+ config->sgls_nent_pages[i],
config->dir);
sgl_free(config->sgls[i]);
@@ -573,6 +575,11 @@ static int read_dma_buffer(struct thc_device *dev,
return -EINVAL;
}
+ if (!read_config->prd_tbls || !read_config->sgls[prd_table_index]) {
+ dev_err_once(dev->dev, "PRD tables are not ready yet\n");
+ return -EINVAL;
+ }
+
prd_tbl = &read_config->prd_tbls[prd_table_index];
mes_len = calc_message_len(prd_tbl, &nent);
if (mes_len > read_config->max_packet_size) {
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
index 78917400492c..541d33995baf 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
@@ -91,6 +91,7 @@ struct thc_prd_table {
* @dir: Direction of DMA for this config
* @prd_tbls: PRD tables for current DMA
* @sgls: Array of pointers to scatter-gather lists
+ * @sgls_nent_pages: Number of pages per scatter-gather list
* @sgls_nent: Actual number of entries per scatter-gather list
* @prd_tbl_num: Actual number of PRD tables
* @max_packet_size: Size of the buffer needed for 1 DMA message (1 PRD table)
@@ -107,6 +108,7 @@ struct thc_dma_configuration {
struct thc_prd_table *prd_tbls;
struct scatterlist *sgls[PRD_TABLES_NUM];
+ u8 sgls_nent_pages[PRD_TABLES_NUM];
u8 sgls_nent[PRD_TABLES_NUM];
u8 prd_tbl_num;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index aac0051a2cf6..758eb21430cd 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -985,6 +985,7 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
struct hid_class_descriptor *hcdesc;
+ __u8 fixed_opt_descriptors_size;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
@@ -1015,7 +1016,21 @@ static int usbhid_parse(struct hid_device *hid)
(hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
hdesc->bLength, hdesc->bNumDescriptors);
- return -EINVAL;
+
+ /*
+ * Some devices may expose a wrong number of descriptors compared
+ * to the provided length.
+ * However, we ignore the optional hid class descriptors entirely
+ * so we can safely recompute the proper field.
+ */
+ if (hdesc->bLength >= sizeof(*hdesc)) {
+ fixed_opt_descriptors_size = hdesc->bLength - sizeof(*hdesc);
+
+ hid_warn(intf, "fixing wrong optional hid class descriptors count\n");
+ hdesc->bNumDescriptors = fixed_opt_descriptors_size / sizeof(*hcdesc) + 1;
+ } else {
+ return -EINVAL;
+ }
}
hid->version = le16_to_cpu(hdesc->bcdHID);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index a34753fc2973..6040a8940674 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -76,6 +76,9 @@
#define DELL_SMM_NO_TEMP 10
#define DELL_SMM_NO_FANS 4
+/* limit fan multiplier to avoid overflow */
+#define DELL_SMM_MAX_FAN_MULT (INT_MAX / U16_MAX)
+
struct smm_regs {
unsigned int eax;
unsigned int ebx;
@@ -1253,6 +1256,12 @@ static int dell_smm_init_data(struct device *dev, const struct dell_smm_ops *ops
data->ops = ops;
/* All options must not be 0 */
data->i8k_fan_mult = fan_mult ? : I8K_FAN_MULT;
+ if (data->i8k_fan_mult > DELL_SMM_MAX_FAN_MULT) {
+ dev_err(dev,
+ "fan multiplier %u is too large (max %u)\n",
+ data->i8k_fan_mult, DELL_SMM_MAX_FAN_MULT);
+ return -EINVAL;
+ }
data->i8k_fan_max = fan_max ? : I8K_FAN_HIGH;
data->i8k_pwm_mult = DIV_ROUND_UP(255, data->i8k_fan_max);
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 228c5f6c6f38..129f3a9e8fe9 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -277,6 +277,9 @@ static ssize_t ibmpex_high_low_store(struct device *dev,
{
struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
+ if (!data)
+ return -ENODEV;
+
ibmpex_reset_high_low_data(data);
return count;
@@ -508,6 +511,9 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
{
int i, j;
+ hwmon_device_unregister(data->hwmon_dev);
+ dev_set_drvdata(data->bmc_device, NULL);
+
device_remove_file(data->bmc_device,
&sensor_dev_attr_reset_high_low.dev_attr);
device_remove_file(data->bmc_device, &dev_attr_name.attr);
@@ -521,8 +527,7 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
}
list_del(&data->list);
- dev_set_drvdata(data->bmc_device, NULL);
- hwmon_device_unregister(data->hwmon_dev);
+
ipmi_destroy_user(data->user);
kfree(data->sensors);
kfree(data);
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
index b9cad89f2cd9..db6534e67991 100644
--- a/drivers/hwmon/ltc4282.c
+++ b/drivers/hwmon/ltc4282.c
@@ -1000,8 +1000,9 @@ static umode_t ltc4282_in_is_visible(const struct ltc4282_state *st, u32 attr)
case hwmon_in_max:
case hwmon_in_min:
case hwmon_in_enable:
- case hwmon_in_reset_history:
return 0644;
+ case hwmon_in_reset_history:
+ return 0200;
default:
return 0;
}
@@ -1020,8 +1021,9 @@ static umode_t ltc4282_curr_is_visible(u32 attr)
return 0444;
case hwmon_curr_max:
case hwmon_curr_min:
- case hwmon_curr_reset_history:
return 0644;
+ case hwmon_curr_reset_history:
+ return 0200;
default:
return 0;
}
@@ -1039,8 +1041,9 @@ static umode_t ltc4282_power_is_visible(u32 attr)
return 0444;
case hwmon_power_max:
case hwmon_power_min:
- case hwmon_power_reset_history:
return 0644;
+ case hwmon_power_reset_history:
+ return 0200;
default:
return 0;
}
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index fbaa34973694..07f596581c6e 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -397,7 +397,7 @@ static int tmp401_chip_read(struct device *dev, u32 attr, int channel, long *val
ret = regmap_read(data->regmap, TMP401_CONVERSION_RATE, &regval);
if (ret < 0)
return ret;
- *val = (1 << (7 - regval)) * 125;
+ *val = (1 << (7 - min(regval, 7))) * 125;
break;
case hwmon_chip_temp_reset_history:
*val = 0;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index cea87fcb4a1a..09ba55bae1fa 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -167,6 +167,7 @@ config I2C_I801
Panther Lake (SOC)
Wildcat Lake (SOC)
Diamond Rapids (SOC)
+ Nova Lake (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index e418a4f23f15..b5629cffe99b 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -1098,8 +1098,7 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iproc_i2c);
iproc_i2c->device = &pdev->dev;
- iproc_i2c->type =
- (enum bcm_iproc_i2c_type)of_device_get_match_data(&pdev->dev);
+ iproc_i2c->type = (kernel_ulong_t)of_device_get_match_data(&pdev->dev);
init_completion(&iproc_i2c->done);
iproc_i2c->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 81e6e2d7ad3d..9e1789725edf 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -85,6 +85,7 @@
* Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
* Wildcat Lake-U (SOC) 0x4d22 32 hard yes yes yes
* Diamond Rapids (SOC) 0x5827 32 hard yes yes yes
+ * Nova Lake-S (PCH) 0x6e23 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -245,6 +246,7 @@
#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
#define PCI_DEVICE_ID_INTEL_DIAMOND_RAPIDS_SMBUS 0x5827
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
+#define PCI_DEVICE_ID_INTEL_NOVA_LAKE_S_SMBUS 0x6e23
#define PCI_DEVICE_ID_INTEL_ARROW_LAKE_H_SMBUS 0x7722
#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
@@ -1061,6 +1063,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, WILDCAT_LAKE_U_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, NOVA_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 968a8b8794da..09af3b3625f1 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1266,7 +1266,7 @@ static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c,
i2c->use_pio = of_property_read_bool(np, "mrvl,i2c-polling");
i2c->fast_mode = of_property_read_bool(np, "mrvl,i2c-fast-mode");
- *i2c_types = (enum pxa_i2c_types)device_get_match_data(&pdev->dev);
+ *i2c_types = (kernel_ulong_t)device_get_match_data(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d51884ab99f4..5ce8f8e4856f 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1141,7 +1141,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (IS_ERR(priv->io))
return PTR_ERR(priv->io);
- priv->devtype = (enum rcar_i2c_type)of_device_get_match_data(dev);
+ priv->devtype = (kernel_ulong_t)of_device_get_match_data(dev);
init_waitqueue_head(&priv->wait);
adap = &priv->adap;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 61596cda2b65..35ba852a172a 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -80,37 +80,25 @@ static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
.min = sizeof(struct rdma_nla_ls_gid)},
};
-static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
+static void ib_nl_process_ip_rsep(const struct nlmsghdr *nlh)
{
struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
+ union ib_gid gid;
+ struct addr_req *req;
+ int found = 0;
int ret;
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
- return false;
+ return;
ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
nlmsg_len(nlh), ib_nl_addr_policy, NULL);
if (ret)
- return false;
-
- return true;
-}
-
-static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
-{
- const struct nlattr *head, *curr;
- union ib_gid gid;
- struct addr_req *req;
- int len, rem;
- int found = 0;
-
- head = (const struct nlattr *)nlmsg_data(nlh);
- len = nlmsg_len(nlh);
+ return;
- nla_for_each_attr(curr, head, len, rem) {
- if (curr->nla_type == LS_NLA_TYPE_DGID)
- memcpy(&gid, nla_data(curr), nla_len(curr));
- }
+ if (!tb[LS_NLA_TYPE_DGID])
+ return;
+ memcpy(&gid, nla_data(tb[LS_NLA_TYPE_DGID]), sizeof(gid));
spin_lock_bh(&lock);
list_for_each_entry(req, &req_list, list) {
@@ -137,8 +125,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
!(NETLINK_CB(skb).sk))
return -EPERM;
- if (ib_nl_is_good_ip_resp(nlh))
- ib_nl_process_good_ip_rsep(nlh);
+ ib_nl_process_ip_rsep(nlh);
return 0;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 95e89f5c147c..f00f1d3fbd9c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2009,6 +2009,7 @@ static void destroy_mc(struct rdma_id_private *id_priv,
ib_sa_free_multicast(mc->sa_mc);
if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
+ struct rdma_cm_event *event = &mc->iboe_join.event;
struct rdma_dev_addr *dev_addr =
&id_priv->id.route.addr.dev_addr;
struct net_device *ndev = NULL;
@@ -2031,6 +2032,8 @@ static void destroy_mc(struct rdma_id_private *id_priv,
dev_put(ndev);
cancel_work_sync(&mc->iboe_join.work);
+ if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
+ rdma_destroy_ah_attr(&event->param.ud.ah_attr);
}
kfree(mc);
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 13e8a1714bbd..1174ab7da629 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2881,8 +2881,10 @@ int ib_del_sub_device_and_put(struct ib_device *sub)
{
struct ib_device *parent = sub->parent;
- if (!parent)
+ if (!parent) {
+ ib_device_put(sub);
return -EOPNOTSUPP;
+ }
mutex_lock(&parent->subdev_lock);
list_del(&sub->subdev_list);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 11b1a194de44..ee390928511a 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -738,7 +738,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
(struct in6_addr *)dgid);
return 0;
} else if (net_type == RDMA_NETWORK_IPV6 ||
- net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
+ net_type == RDMA_NETWORK_IB || net_type == RDMA_NETWORK_ROCE_V1) {
*dgid = hdr->ibgrh.dgid;
*sgid = hdr->ibgrh.sgid;
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index 09d371d442aa..cebec033f4a0 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -89,6 +89,9 @@ enum bnxt_re_hw_stats {
BNXT_RE_RES_SRQ_LOAD_ERR,
BNXT_RE_RES_TX_PCI_ERR,
BNXT_RE_RES_RX_PCI_ERR,
+ BNXT_RE_REQ_CQE_ERROR,
+ BNXT_RE_RESP_CQE_ERROR,
+ BNXT_RE_RESP_REMOTE_ACCESS_ERRS,
BNXT_RE_OUT_OF_SEQ_ERR,
BNXT_RE_TX_ATOMIC_REQ,
BNXT_RE_TX_READ_REQ,
@@ -110,9 +113,6 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_CNP,
BNXT_RE_RX_CNP,
BNXT_RE_RX_ECN,
- BNXT_RE_REQ_CQE_ERROR,
- BNXT_RE_RESP_CQE_ERROR,
- BNXT_RE_RESP_REMOTE_ACCESS_ERRS,
BNXT_RE_NUM_EXT_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index f19b55c13d58..ff91511bd338 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -2919,14 +2919,9 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
wqe.rawqp1.lflags |=
SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
}
- switch (wr->send_flags) {
- case IB_SEND_IP_CSUM:
+ if (wr->send_flags & IB_SEND_IP_CSUM)
wqe.rawqp1.lflags |=
SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
- break;
- default:
- break;
- }
fallthrough;
case IB_WR_SEND_WITH_INV:
rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 295a9610f3e6..4dad0cfcfa98 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -1112,7 +1112,7 @@ static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
creq_db->dbinfo.flags = 0;
creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
- if (!creq_db->reg.bar_id)
+ if (!creq_db->reg.bar_base)
dev_err(&pdev->dev,
"QPLIB: CREQ BAR region %d resc start is 0!",
creq_db->reg.bar_id);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 875d7b52c06a..4d674a3aee1a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -64,9 +64,7 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
for (i = 0; i < pbl->pg_count; i++) {
if (pbl->pg_arr[i])
dma_free_coherent(&pdev->dev, pbl->pg_size,
- (void *)((unsigned long)
- pbl->pg_arr[i] &
- PAGE_MASK),
+ pbl->pg_arr[i],
pbl->pg_map_arr[i]);
else
dev_warn(&pdev->dev,
@@ -237,7 +235,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
if (npbl % BIT(MAX_PDL_LVL_SHIFT))
npde++;
/* Alloc PDE pages */
- sginfo.pgsize = npde * pg_size;
+ sginfo.pgsize = npde * ROCE_PG_SIZE_4K;
sginfo.npages = 1;
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
if (rc)
@@ -245,7 +243,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
/* Alloc PBL pages */
sginfo.npages = npbl;
- sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgsize = ROCE_PG_SIZE_4K;
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 22d3e25c3b9d..755bba8d58bb 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1320,13 +1320,9 @@ static int umem_to_page_list(struct efa_dev *dev,
u32 hp_cnt,
u8 hp_shift)
{
- u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
struct ib_block_iter biter;
unsigned int hp_idx = 0;
- ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
- hp_cnt, pages_in_hp);
-
rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index cc2a12f735d3..13d7499131d4 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -251,7 +251,7 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
void *ptr)
{
struct neighbour *neigh = ptr;
- struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev;
+ struct net_device *real_dev, *netdev;
struct irdma_device *iwdev;
struct ib_device *ibdev;
__be32 *p;
@@ -260,6 +260,7 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
switch (event) {
case NETEVENT_NEIGH_UPDATE:
+ netdev = neigh->dev;
real_dev = rdma_vlan_dev_real_dev(netdev);
if (!real_dev)
real_dev = netdev;
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 1becc8779123..7600412b0739 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -56,6 +56,10 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
doorbell = mana_ucontext->doorbell;
} else {
is_rnic_cq = true;
+ if (attr->cqe > U32_MAX / COMP_ENTRY_SIZE / 2 + 1) {
+ ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
+ return -EINVAL;
+ }
buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr->cqe * COMP_ENTRY_SIZE));
cq->cqe = buf_size / COMP_ENTRY_SIZE;
err = mana_ib_create_kernel_queue(mdev, buf_size, GDMA_CQ, &cq->queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 0195d361e5e3..0bd0902b11f7 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -64,7 +64,39 @@ static inline void rxe_reclassify_recv_socket(struct socket *sock)
break;
default:
WARN_ON_ONCE(1);
+ return;
}
+ /*
+ * sock_lock_init_class_and_name() calls
+ * sk_owner_set(sk, THIS_MODULE); in order
+ * to make sure the referenced global
+ * variables rxe_recv_slock_key and
+ * rxe_recv_sk_key are not removed
+ * before the socket is closed.
+ *
+ * However this prevents rxe_net_exit()
+ * from being called and 'rmmod rdma_rxe'
+ * is refused because of the references.
+ *
+ * For the global sockets in recv_sockets,
+ * we are sure that rxe_net_exit() will call
+ * rxe_release_udp_tunnel -> udp_tunnel_sock_release.
+ *
+ * So we don't need the additional reference to
+ * our own (THIS_MODULE).
+ */
+ sk_owner_put(sk);
+ /*
+ * We also call sk_owner_clear() otherwise
+ * sk_owner_put(sk) in sk_prot_free will
+ * fail, which is called via
+ * sk_free -> __sk_free -> sk_destruct
+ * and sk_destruct calls __sk_destruct
+ * directly or via call_rcu()
+ * so sk_prot_free() might be called
+ * after rxe_net_exit().
+ */
+ sk_owner_clear(sk);
#endif /* CONFIG_DEBUG_LOCK_ALLOC */
}
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index ae71812bea82..c928cbf2e35f 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -179,8 +179,10 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
return err;
need_fault = rxe_check_pagefault(umem_odp, iova, length);
- if (need_fault)
+ if (need_fault) {
+ mutex_unlock(&umem_odp->umem_mutex);
return -EFAULT;
+ }
}
return 0;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 71387811b281..2b397a544cb9 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1464,6 +1464,7 @@ static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
max_pages_per_mr = ib_dev->attrs.max_mr_size;
do_div(max_pages_per_mr, (1ull << mr_page_shift));
+ max_pages_per_mr = min_not_zero((u32)max_pages_per_mr, U32_MAX);
clt_path->max_pages_per_mr =
min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
ib_dev->attrs.max_fast_reg_page_list_len);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index ef29bd483b5a..59529d593869 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -150,7 +150,7 @@ enum rtrs_msg_types {
/**
* enum rtrs_msg_flags - RTRS message flags.
- * @RTRS_NEED_INVAL: Send invalidation in response.
+ * @RTRS_MSG_NEED_INVAL_F: Send invalidation in response.
* @RTRS_MSG_NEW_RKEY_F: Send refreshed rkey in response.
*/
enum rtrs_msg_flags {
@@ -179,16 +179,19 @@ struct rtrs_sg_desc {
* @recon_cnt: Reconnections counter
* @sess_uuid: UUID of a session (path)
* @paths_uuid: UUID of a group of sessions (paths)
- *
+ * @first_conn: %1 if the connection request is the first for that session,
+ * otherwise %0
* NOTE: max size 56 bytes, see man rdma_connect().
*/
struct rtrs_msg_conn_req {
- /* Is set to 0 by cma.c in case of AF_IB, do not touch that.
- * see https://www.spinics.net/lists/linux-rdma/msg22397.html
+ /**
+ * @__cma_version: Is set to 0 by cma.c in case of AF_IB, do not touch
+ * that. See https://www.spinics.net/lists/linux-rdma/msg22397.html
*/
u8 __cma_version;
- /* On sender side that should be set to 0, or cma_save_ip_info()
- * extract garbage and will fail.
+ /**
+ * @__ip_version: On sender side that should be set to 0, or
+ * cma_save_ip_info() extract garbage and will fail.
*/
u8 __ip_version;
__le16 magic;
@@ -199,6 +202,7 @@ struct rtrs_msg_conn_req {
uuid_t sess_uuid;
uuid_t paths_uuid;
u8 first_conn : 1;
+ /* private: */
u8 reserved_bits : 7;
u8 reserved[11];
};
@@ -211,6 +215,7 @@ struct rtrs_msg_conn_req {
* @queue_depth: max inflight messages (queue-depth) in this session
* @max_io_size: max io size server supports
* @max_hdr_size: max msg header size server supports
+ * @flags: RTRS message flags for this message
*
* NOTE: size is 56 bytes, max possible is 136 bytes, see man rdma_accept().
*/
@@ -222,22 +227,24 @@ struct rtrs_msg_conn_rsp {
__le32 max_io_size;
__le32 max_hdr_size;
__le32 flags;
+ /* private: */
u8 reserved[36];
};
/**
- * struct rtrs_msg_info_req
+ * struct rtrs_msg_info_req - client additional info request
* @type: @RTRS_MSG_INFO_REQ
* @pathname: Path name chosen by client
*/
struct rtrs_msg_info_req {
__le16 type;
u8 pathname[NAME_MAX];
+ /* private: */
u8 reserved[15];
};
/**
- * struct rtrs_msg_info_rsp
+ * struct rtrs_msg_info_rsp - server additional info response
* @type: @RTRS_MSG_INFO_RSP
* @sg_cnt: Number of @desc entries
* @desc: RDMA buffers where the client can write to server
@@ -245,12 +252,14 @@ struct rtrs_msg_info_req {
struct rtrs_msg_info_rsp {
__le16 type;
__le16 sg_cnt;
+ /* private: */
u8 reserved[4];
+ /* public: */
struct rtrs_sg_desc desc[];
};
/**
- * struct rtrs_msg_rkey_rsp
+ * struct rtrs_msg_rkey_rsp - server refreshed rkey response
* @type: @RTRS_MSG_RKEY_RSP
* @buf_id: RDMA buf_id of the new rkey
* @rkey: new remote key for RDMA buffers id from server
@@ -264,6 +273,7 @@ struct rtrs_msg_rkey_rsp {
/**
* struct rtrs_msg_rdma_read - RDMA data transfer request from client
* @type: always @RTRS_MSG_READ
+ * @flags: RTRS message flags (enum rtrs_msg_flags)
* @usr_len: length of user payload
* @sg_cnt: number of @desc entries
* @desc: RDMA buffers where the server can write the result to
@@ -277,7 +287,7 @@ struct rtrs_msg_rdma_read {
};
/**
- * struct_msg_rdma_write - Message transferred to server with RDMA-Write
+ * struct rtrs_msg_rdma_write - Message transferred to server with RDMA-Write
* @type: always @RTRS_MSG_WRITE
* @usr_len: length of user payload
*/
@@ -287,7 +297,7 @@ struct rtrs_msg_rdma_write {
};
/**
- * struct_msg_rdma_hdr - header for read or write request
+ * struct rtrs_msg_rdma_hdr - header for read or write request
* @type: @RTRS_MSG_WRITE | @RTRS_MSG_READ
*/
struct rtrs_msg_rdma_hdr {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index b48b53a7c143..b5bd35712de0 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -24,8 +24,8 @@ struct rtrs_srv_op;
/**
* enum rtrs_clt_link_ev - Events about connectivity state of a client
- * @RTRS_CLT_LINK_EV_RECONNECTED Client was reconnected.
- * @RTRS_CLT_LINK_EV_DISCONNECTED Client was disconnected.
+ * @RTRS_CLT_LINK_EV_RECONNECTED: Client was reconnected.
+ * @RTRS_CLT_LINK_EV_DISCONNECTED: Client was disconnected.
*/
enum rtrs_clt_link_ev {
RTRS_CLT_LINK_EV_RECONNECTED,
@@ -33,7 +33,9 @@ enum rtrs_clt_link_ev {
};
/**
- * Source and destination address of a path to be established
+ * struct rtrs_addr - Source and destination address of a path to be established
+ * @src: source address
+ * @dst: destination address
*/
struct rtrs_addr {
struct sockaddr_storage *src;
@@ -41,7 +43,7 @@ struct rtrs_addr {
};
/**
- * rtrs_clt_ops - it holds the link event callback and private pointer.
+ * struct rtrs_clt_ops - it holds the link event callback and private pointer.
* @priv: User supplied private data.
* @link_ev: Event notification callback function for connection state changes
* @priv: User supplied data that was passed to rtrs_clt_open()
@@ -67,10 +69,10 @@ enum wait_type {
};
/**
- * enum rtrs_clt_con_type() type of ib connection to use with a given
+ * enum rtrs_clt_con_type - type of ib connection to use with a given
* rtrs_permit
- * @ADMIN_CON - use connection reserved for "service" messages
- * @IO_CON - use a connection reserved for IO
+ * @RTRS_ADMIN_CON: use connection reserved for "service" messages
+ * @RTRS_IO_CON: use a connection reserved for IO
*/
enum rtrs_clt_con_type {
RTRS_ADMIN_CON,
@@ -85,7 +87,7 @@ void rtrs_clt_put_permit(struct rtrs_clt_sess *sess,
struct rtrs_permit *permit);
/**
- * rtrs_clt_req_ops - it holds the request confirmation callback
+ * struct rtrs_clt_req_ops - it holds the request confirmation callback
* and a private pointer.
* @priv: User supplied private data.
* @conf_fn: callback function to be called as confirmation
@@ -105,7 +107,11 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index);
/**
- * rtrs_attrs - RTRS session attributes
+ * struct rtrs_attrs - RTRS session attributes
+ * @queue_depth: queue_depth saved from rtrs_clt_sess message
+ * @max_io_size: max_io_size from rtrs_clt_sess message, capped to
+ * @max_segments * %SZ_4K
+ * @max_segments: max_segments saved from rtrs_clt_sess message
*/
struct rtrs_attrs {
u32 queue_depth;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d72e89c25e50..363d50949386 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -133,6 +133,8 @@ static const struct xpad_device {
} xpad_device[] = {
/* Please keep this list sorted by vendor and product ID. */
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ { 0x0351, 0x1000, "CRKD LP Blueberry Burst Pro Edition (Xbox)", 0, XTYPE_XBOX360 },
+ { 0x0351, 0x2000, "CRKD LP Black Tribal Edition (Xbox) ", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
{ 0x03f0, 0x038D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wired */
@@ -420,6 +422,7 @@ static const struct xpad_device {
{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
+ { 0x3651, 0x1000, "CRKD SG", 0, XTYPE_XBOX360 },
{ 0x366c, 0x0005, "ByoWave Proteus Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE, FLAG_DELAY_INIT },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0x37d7, 0x2501, "Flydigi Apex 5", 0, XTYPE_XBOX360 },
@@ -518,6 +521,7 @@ static const struct usb_device_id xpad_table[] = {
*/
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */
+ XPAD_XBOX360_VENDOR(0x0351), /* CRKD Controllers */
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
@@ -578,6 +582,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x3651), /* CRKD Controllers */
XPAD_XBOXONE_VENDOR(0x366c), /* ByoWave controllers */
XPAD_XBOX360_VENDOR(0x37d7), /* Flydigi Controllers */
XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6c999d89ee4b..422e28ad1e8e 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1937,6 +1937,13 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
},
.callback = atkbd_deactivate_fixup,
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FMB-P"),
+ },
+ .callback = atkbd_deactivate_fixup,
+ },
{ }
};
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index c035216dd27c..2f130f819363 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -670,7 +670,8 @@ static int lkkbd_connect(struct serio *serio, struct serio_driver *drv)
return 0;
- fail3: serio_close(serio);
+ fail3: disable_work_sync(&lk->tq);
+ serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(input_dev);
kfree(lk);
@@ -684,6 +685,8 @@ static void lkkbd_disconnect(struct serio *serio)
{
struct lkkbd *lk = serio_get_drvdata(serio);
+ disable_work_sync(&lk->tq);
+
input_get_device(lk->dev);
input_unregister_device(lk->dev);
serio_close(serio);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d0cb9fb94821..df8953a5196e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -2975,6 +2975,7 @@ static void alps_disconnect(struct psmouse *psmouse)
psmouse_reset(psmouse);
timer_shutdown_sync(&priv->timer);
+ disable_delayed_work_sync(&priv->dev3_register_work);
if (priv->dev2)
input_unregister_device(priv->dev2);
if (!IS_ERR_OR_NULL(priv->dev3))
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 1caa6c4ca435..654771275ce8 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1169,6 +1169,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X5KK45xS_X5SP45xS"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
/*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
* after suspend fixable with the forcenorestore quirk.
diff --git a/drivers/input/touchscreen/apple_z2.c b/drivers/input/touchscreen/apple_z2.c
index 0de161eae59a..271ababf0ad5 100644
--- a/drivers/input/touchscreen/apple_z2.c
+++ b/drivers/input/touchscreen/apple_z2.c
@@ -21,6 +21,7 @@
#define APPLE_Z2_TOUCH_STARTED 3
#define APPLE_Z2_TOUCH_MOVED 4
#define APPLE_Z2_CMD_READ_INTERRUPT_DATA 0xEB
+#define APPLE_Z2_REPLY_INTERRUPT_DATA 0xE1
#define APPLE_Z2_HBPP_CMD_BLOB 0x3001
#define APPLE_Z2_FW_MAGIC 0x5746325A
#define LOAD_COMMAND_INIT_PAYLOAD 0
@@ -142,6 +143,9 @@ static int apple_z2_read_packet(struct apple_z2 *z2)
if (error)
return error;
+ if (z2->rx_buf[0] != APPLE_Z2_REPLY_INTERRUPT_DATA)
+ return 0;
+
pkt_len = (get_unaligned_le16(z2->rx_buf + 1) + 8) & 0xfffffffc;
error = spi_read(z2->spidev, z2->rx_buf, pkt_len);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index d6edfab16770..0534b2ba650b 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -85,7 +85,7 @@ static int titsc_config_wires(struct titsc *ts_dev)
wire_order[i] = ts_dev->config_inp[i] & 0x0F;
if (WARN_ON(analog_line[i] > 7))
return -EINVAL;
- if (WARN_ON(wire_order[i] > ARRAY_SIZE(config_pins)))
+ if (WARN_ON(wire_order[i] >= ARRAY_SIZE(config_pins)))
return -EINVAL;
}
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 25044d28f28a..b742ef1adb35 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -173,6 +173,11 @@ static inline struct protection_domain *to_pdomain(struct iommu_domain *dom)
bool translation_pre_enabled(struct amd_iommu *iommu);
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
+int amd_iommu_pdom_id_alloc(void);
+int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp);
+void amd_iommu_pdom_id_free(int id);
+void amd_iommu_pdom_id_destroy(void);
+
#ifdef CONFIG_DMI
void amd_iommu_apply_ivrs_quirks(void);
#else
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 4b2953418977..384c90b4f90a 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1136,8 +1136,11 @@ static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
static bool __reuse_device_table(struct amd_iommu *iommu)
{
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- u32 lo, hi, old_devtb_size;
+ struct dev_table_entry *old_dev_tbl_entry;
+ u32 lo, hi, old_devtb_size, devid;
phys_addr_t old_devtb_phys;
+ u16 dom_id;
+ bool dte_v;
u64 entry;
/* Each IOMMU use separate device table with the same size */
@@ -1173,6 +1176,22 @@ static bool __reuse_device_table(struct amd_iommu *iommu)
return false;
}
+ for (devid = 0; devid <= pci_seg->last_bdf; devid++) {
+ old_dev_tbl_entry = &pci_seg->old_dev_tbl_cpy[devid];
+ dte_v = FIELD_GET(DTE_FLAG_V, old_dev_tbl_entry->data[0]);
+ dom_id = FIELD_GET(DEV_DOMID_MASK, old_dev_tbl_entry->data[1]);
+
+ if (!dte_v || !dom_id)
+ continue;
+ /*
+ * ID reservation can fail with -ENOSPC when there
+ * are multiple devices present in the same domain,
+ * hence check only for -ENOMEM.
+ */
+ if (amd_iommu_pdom_id_reserve(dom_id, GFP_KERNEL) == -ENOMEM)
+ return false;
+ }
+
return true;
}
@@ -3127,8 +3146,7 @@ static bool __init check_ioapic_information(void)
static void __init free_dma_resources(void)
{
- ida_destroy(&pdom_ids);
-
+ amd_iommu_pdom_id_destroy();
free_unity_maps();
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 9f1d56a5e145..5d45795c367a 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1811,17 +1811,26 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
* contain.
*
****************************************************************************/
-
-static int pdom_id_alloc(void)
+int amd_iommu_pdom_id_alloc(void)
{
return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC);
}
-static void pdom_id_free(int id)
+int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp)
+{
+ return ida_alloc_range(&pdom_ids, id, id, gfp);
+}
+
+void amd_iommu_pdom_id_free(int id)
{
ida_free(&pdom_ids, id);
}
+void amd_iommu_pdom_id_destroy(void)
+{
+ ida_destroy(&pdom_ids);
+}
+
static void free_gcr3_tbl_level1(u64 *tbl)
{
u64 *ptr;
@@ -1864,7 +1873,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
gcr3_info->glx = 0;
/* Free per device domain ID */
- pdom_id_free(gcr3_info->domid);
+ amd_iommu_pdom_id_free(gcr3_info->domid);
iommu_free_pages(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
@@ -1900,14 +1909,14 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -EBUSY;
/* Allocate per device domain ID */
- domid = pdom_id_alloc();
+ domid = amd_iommu_pdom_id_alloc();
if (domid <= 0)
return -ENOSPC;
gcr3_info->domid = domid;
gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) {
- pdom_id_free(domid);
+ amd_iommu_pdom_id_free(domid);
return -ENOMEM;
}
@@ -2503,7 +2512,7 @@ struct protection_domain *protection_domain_alloc(void)
if (!domain)
return NULL;
- domid = pdom_id_alloc();
+ domid = amd_iommu_pdom_id_alloc();
if (domid <= 0) {
kfree(domain);
return NULL;
@@ -2802,7 +2811,7 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
WARN_ON(!list_empty(&domain->dev_list));
pt_iommu_deinit(&domain->iommu);
- pdom_id_free(domain->id);
+ amd_iommu_pdom_id_free(domain->id);
kfree(domain);
}
@@ -2853,7 +2862,7 @@ void amd_iommu_init_identity_domain(void)
domain->ops = &identity_domain_ops;
domain->owner = &amd_iommu_ops;
- identity_domain.id = pdom_id_alloc();
+ identity_domain.id = amd_iommu_pdom_id_alloc();
protection_domain_init(&identity_domain);
}
diff --git a/drivers/iommu/generic_pt/.kunitconfig b/drivers/iommu/generic_pt/.kunitconfig
index 52ac9e661ffd..a78b295f264d 100644
--- a/drivers/iommu/generic_pt/.kunitconfig
+++ b/drivers/iommu/generic_pt/.kunitconfig
@@ -1,4 +1,5 @@
CONFIG_KUNIT=y
+CONFIG_COMPILE_TEST=y
CONFIG_GENERIC_PT=y
CONFIG_DEBUG_GENERIC_PT=y
CONFIG_IOMMU_PT=y
@@ -11,4 +12,3 @@ CONFIG_IOMMUFD=y
CONFIG_DEBUG_KERNEL=y
CONFIG_FAULT_INJECTION=y
CONFIG_RUNTIME_TESTING_MENU=y
-CONFIG_IOMMUFD_TEST=y
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
index 97aeda1ad01c..3327116a441c 100644
--- a/drivers/iommu/generic_pt/iommu_pt.h
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -372,6 +372,9 @@ static inline struct pt_table_p *_table_alloc(struct pt_common *common,
table_mem = iommu_alloc_pages_node_sz(iommu_table->nid, gfp,
log2_to_int(lg2sz));
+ if (!table_mem)
+ return ERR_PTR(-ENOMEM);
+
if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
mode == ALLOC_NORMAL) {
int ret = iommu_pages_start_incoherent(
diff --git a/drivers/iommu/generic_pt/pt_defs.h b/drivers/iommu/generic_pt/pt_defs.h
index c25544d72f97..707b3b0282fa 100644
--- a/drivers/iommu/generic_pt/pt_defs.h
+++ b/drivers/iommu/generic_pt/pt_defs.h
@@ -202,7 +202,7 @@ static inline bool pt_table_install32(struct pt_state *pts, u32 table_entry)
#define PT_SUPPORTED_FEATURE(feature_nr) (PT_SUPPORTED_FEATURES & BIT(feature_nr))
-static inline bool pt_feature(const struct pt_common *common,
+static __always_inline bool pt_feature(const struct pt_common *common,
unsigned int feature_nr)
{
if (PT_FORCE_ENABLED_FEATURES & BIT(feature_nr))
@@ -212,7 +212,7 @@ static inline bool pt_feature(const struct pt_common *common,
return common->features & BIT(feature_nr);
}
-static inline bool pts_feature(const struct pt_state *pts,
+static __always_inline bool pts_feature(const struct pt_state *pts,
unsigned int feature_nr)
{
return pt_feature(pts->range->common, feature_nr);
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 4f9b01dc91e8..8bcbfe3d9c72 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1303,17 +1303,17 @@ static struct irq_chip intel_ir_chip = {
* irq_enter();
* handle_edge_irq()
* irq_chip_ack_parent()
- * irq_move_irq(); // No EOI
+ * intel_ack_posted_msi_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* handle_edge_irq()
* irq_chip_ack_parent()
- * irq_move_irq(); // No EOI
+ * intel_ack_posted_msi_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* handle_edge_irq()
* irq_chip_ack_parent()
- * irq_move_irq(); // No EOI
+ * intel_ack_posted_msi_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* apic_eoi()
@@ -1322,7 +1322,7 @@ static struct irq_chip intel_ir_chip = {
*/
static struct irq_chip intel_ir_chip_post_msi = {
.name = "INTEL-IR-POST",
- .irq_ack = irq_move_irq,
+ .irq_ack = intel_ack_posted_msi_irq,
.irq_set_affinity = intel_ir_set_affinity,
.irq_compose_msi_msg = intel_ir_compose_msi_msg,
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index eae3f03629b0..455bac0351f2 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -41,7 +41,8 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
- depends on IOMMU_PT_AMDV1
+ depends on IOMMU_PT_AMDV1=y || IOMMUFD=IOMMU_PT_AMDV1
+ select DMA_SHARED_BUFFER
select IOMMUFD_DRIVER
default n
help
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 54cf4d856179..436992331111 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -495,7 +495,11 @@ int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
return -EOVERFLOW;
start_byte = start - ALIGN_DOWN(start, PAGE_SIZE);
- dmabuf = dma_buf_get(fd);
+ if (IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
+ dmabuf = dma_buf_get(fd);
+ else
+ dmabuf = ERR_PTR(-ENXIO);
+
if (!IS_ERR(dmabuf)) {
pages = iopt_alloc_dmabuf_pages(ictx, dmabuf, start_byte, start,
length,
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index c4322fd26f93..550ff36dec3a 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -1184,14 +1184,20 @@ static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
unsigned int mockpt_id,
unsigned long start, size_t length)
{
+ unsigned long last;
struct iommufd_ioas *ioas;
int rc;
+ if (!length)
+ return -EINVAL;
+ if (check_add_overflow(start, length - 1, &last))
+ return -EOVERFLOW;
+
ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
down_write(&ioas->iopt.iova_rwsem);
- rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
+ rc = iopt_reserve_iova(&ioas->iopt, start, last, NULL);
up_write(&ioas->iopt.iova_rwsem);
iommufd_put_object(ucmd->ictx, &ioas->obj);
return rc;
@@ -1215,8 +1221,10 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
page_size = 1 << __ffs(mock->domain.pgsize_bitmap);
if (iova % page_size || length % page_size ||
(uintptr_t)uptr % page_size ||
- check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
- return -EINVAL;
+ check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) {
+ rc = -EINVAL;
+ goto out_put;
+ }
for (; length; length -= page_size) {
struct page *pages[1];
diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c
index 554485f0be1f..8e22134b9f48 100644
--- a/drivers/irqchip/irq-gic-v5-its.c
+++ b/drivers/irqchip/irq-gic-v5-its.c
@@ -849,7 +849,7 @@ static int gicv5_its_map_event(struct gicv5_its_dev *its_dev, u16 event_id, u32
itte = gicv5_its_device_get_itte_ref(its_dev, event_id);
- if (FIELD_GET(GICV5_ITTL2E_VALID, *itte))
+ if (FIELD_GET(GICV5_ITTL2E_VALID, le64_to_cpu(*itte)))
return -EEXIST;
itt_entry = FIELD_PREP(GICV5_ITTL2E_LPI_ID, lpi) |
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index 385368052d5c..b6cebfee9461 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -477,6 +477,7 @@ static void __init imsic_local_cleanup(void)
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
bitmap_free(lpriv->dirty_bitmap);
+ kfree(lpriv->vectors);
}
free_percpu(imsic->lpriv);
@@ -490,8 +491,7 @@ static int __init imsic_local_init(void)
int cpu, i;
/* Allocate per-CPU private state */
- imsic->lpriv = __alloc_percpu(struct_size(imsic->lpriv, vectors, global->nr_ids + 1),
- __alignof__(*imsic->lpriv));
+ imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
if (!imsic->lpriv)
return -ENOMEM;
@@ -511,6 +511,12 @@ static int __init imsic_local_init(void)
timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED);
#endif
+ /* Allocate vector array */
+ lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors),
+ GFP_KERNEL);
+ if (!lpriv->vectors)
+ goto fail_local_cleanup;
+
/* Setup vector array */
for (i = 0; i <= global->nr_ids; i++) {
vec = &lpriv->vectors[i];
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 6332501dcbd8..c42ee180b305 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -40,7 +40,7 @@ struct imsic_local_priv {
#endif
/* Local vector table */
- struct imsic_vector vectors[];
+ struct imsic_vector *vectors;
};
struct imsic_priv {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e5922a682953..6d73f6e196a9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1999,7 +1999,6 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
- mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
@@ -2015,6 +2014,9 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->max_disks = (4096-256)/2;
+ if (!mddev->logical_block_size)
+ mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
+
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
@@ -3882,7 +3884,6 @@ out_free_rdev:
static int analyze_sbs(struct mddev *mddev)
{
- int i;
struct md_rdev *rdev, *freshest, *tmp;
freshest = NULL;
@@ -3909,11 +3910,9 @@ static int analyze_sbs(struct mddev *mddev)
super_types[mddev->major_version].
validate_super(mddev, NULL/*freshest*/, freshest);
- i = 0;
rdev_for_each_safe(rdev, tmp, mddev) {
if (mddev->max_disks &&
- (rdev->desc_nr >= mddev->max_disks ||
- i > mddev->max_disks)) {
+ rdev->desc_nr >= mddev->max_disks) {
pr_warn("md: %s: %pg: only %d devices permitted\n",
mdname(mddev), rdev->bdev,
mddev->max_disks);
@@ -4407,7 +4406,7 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
if (err < 0)
return err;
- err = mddev_lock(mddev);
+ err = mddev_suspend_and_lock(mddev);
if (err)
return err;
if (mddev->pers)
@@ -4432,7 +4431,7 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
} else
mddev->raid_disks = n;
out_unlock:
- mddev_unlock(mddev);
+ mddev_unlock_and_resume(mddev);
return err ? err : len;
}
static struct md_sysfs_entry md_raid_disks =
@@ -5981,13 +5980,33 @@ lbs_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->major_version == 0)
return -EINVAL;
- if (mddev->pers)
- return -EBUSY;
-
err = kstrtouint(buf, 10, &lbs);
if (err < 0)
return -EINVAL;
+ if (mddev->pers) {
+ unsigned int curr_lbs;
+
+ if (mddev->logical_block_size)
+ return -EBUSY;
+ /*
+ * To fix forward compatibility issues, LBS is not
+ * configured for arrays from old kernels (<=6.18) by default.
+ * If the user confirms no rollback to old kernels,
+ * enable LBS by writing current LBS — to prevent data
+ * loss from LBS changes.
+ */
+ curr_lbs = queue_logical_block_size(mddev->gendisk->queue);
+ if (lbs != curr_lbs)
+ return -EINVAL;
+
+ mddev->logical_block_size = curr_lbs;
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ pr_info("%s: logical block size configured successfully, array will not be assembled in old kernels (<= 6.18)\n",
+ mdname(mddev));
+ return len;
+ }
+
err = mddev_lock(mddev);
if (err)
goto unlock;
@@ -6163,7 +6182,27 @@ int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
mdname(mddev));
return -EINVAL;
}
- mddev->logical_block_size = lim->logical_block_size;
+
+ /* Only 1.x meta needs to set logical block size */
+ if (mddev->major_version == 0)
+ return 0;
+
+ /*
+ * Fix forward compatibility issue. Only set LBS by default for
+ * new arrays, mddev->events == 0 indicates the array was just
+ * created. When assembling an array, read LBS from the superblock
+ * instead — LBS is 0 in superblocks created by old kernels.
+ */
+ if (!mddev->events) {
+ pr_info("%s: array will not be assembled in old kernels that lack configurable LBS support (<= 6.18)\n",
+ mdname(mddev));
+ mddev->logical_block_size = lim->logical_block_size;
+ }
+
+ if (!mddev->logical_block_size)
+ pr_warn("%s: echo current LBS to md/logical_block_size to prevent data loss issues from LBS changes.\n"
+ "\tNote: After setting, array will not be assembled in old kernels (<= 6.18)\n",
+ mdname(mddev));
return 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e57ce3295292..8dc98f545969 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7187,12 +7187,14 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
err = mddev_suspend_and_lock(mddev);
if (err)
return err;
+ conf = mddev->private;
+ if (!conf) {
+ mddev_unlock_and_resume(mddev);
+ return -ENODEV;
+ }
raid5_quiesce(mddev, true);
- conf = mddev->private;
- if (!conf)
- err = -ENODEV;
- else if (new != conf->worker_cnt_per_group) {
+ if (new != conf->worker_cnt_per_group) {
old_groups = conf->worker_groups;
if (old_groups)
flush_workqueue(raid5_wq);
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
index 2ac9ac0a740b..3cca9a0c7c97 100644
--- a/drivers/media/mc/mc-request.c
+++ b/drivers/media/mc/mc-request.c
@@ -315,12 +315,12 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
fd_prepare_file(fdf)->private_data = req;
- *alloc_fd = fd_publish(fdf);
-
snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
- atomic_inc_return(&mdev->request_id), *alloc_fd);
+ atomic_inc_return(&mdev->request_id), fd_prepare_fd(fdf));
dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
+ *alloc_fd = fd_publish(fdf);
+
return 0;
err_free_req:
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 376047beea3d..502059078b45 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -8,6 +8,7 @@
#include "lkdtm.h"
#include <linux/cpu.h>
#include <linux/list.h>
+#include <linux/hrtimer.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
@@ -100,11 +101,61 @@ static void lkdtm_PANIC_STOP_IRQOFF(void)
stop_machine(panic_stop_irqoff_fn, &v, cpu_online_mask);
}
+static bool wait_for_panic;
+
+static enum hrtimer_restart panic_in_hardirq(struct hrtimer *timer)
+{
+ panic("from hard IRQ context");
+
+ wait_for_panic = false;
+ return HRTIMER_NORESTART;
+}
+
+static void lkdtm_PANIC_IN_HARDIRQ(void)
+{
+ struct hrtimer timer;
+
+ wait_for_panic = true;
+ hrtimer_setup_on_stack(&timer, panic_in_hardirq,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ hrtimer_start(&timer, us_to_ktime(100), HRTIMER_MODE_REL_HARD);
+
+ while (READ_ONCE(wait_for_panic))
+ cpu_relax();
+
+ hrtimer_cancel(&timer);
+}
+
static void lkdtm_BUG(void)
{
BUG();
}
+static bool wait_for_bug;
+
+static enum hrtimer_restart bug_in_hardirq(struct hrtimer *timer)
+{
+ BUG();
+
+ wait_for_bug = false;
+ return HRTIMER_NORESTART;
+}
+
+static void lkdtm_BUG_IN_HARDIRQ(void)
+{
+ struct hrtimer timer;
+
+ wait_for_bug = true;
+ hrtimer_setup_on_stack(&timer, bug_in_hardirq,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ hrtimer_start(&timer, us_to_ktime(100), HRTIMER_MODE_REL_HARD);
+
+ while (READ_ONCE(wait_for_bug))
+ cpu_relax();
+
+ hrtimer_cancel(&timer);
+}
+
static int warn_counter;
static void lkdtm_WARNING(void)
@@ -696,7 +747,9 @@ static noinline void lkdtm_CORRUPT_PAC(void)
static struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(PANIC_STOP_IRQOFF),
+ CRASHTYPE(PANIC_IN_HARDIRQ),
CRASHTYPE(BUG),
+ CRASHTYPE(BUG_IN_HARDIRQ),
CRASHTYPE(WARNING),
CRASHTYPE(WARNING_MESSAGE),
CRASHTYPE(EXCEPTION),
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index a4f75dc36929..fa30899a5fa2 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -122,6 +122,8 @@
#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */
+#define MEI_DEV_ID_NVL_S 0x6E68 /* Nova Lake Point S */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 73cad914be9f..2a6e569558b9 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -129,6 +129,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_NVL_S, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/rp1/Kconfig b/drivers/misc/rp1/Kconfig
index 5232e70d3079..2c13b3968b01 100644
--- a/drivers/misc/rp1/Kconfig
+++ b/drivers/misc/rp1/Kconfig
@@ -5,8 +5,7 @@
config MISC_RP1
tristate "RaspberryPi RP1 misc device"
- depends on OF_IRQ && OF_OVERLAY && PCI_MSI && PCI_QUIRKS
- select PCI_DYNAMIC_OF_NODES
+ depends on OF_IRQ && PCI_MSI
help
Support the RP1 peripheral chip found on Raspberry Pi 5 board.
@@ -15,6 +14,3 @@ config MISC_RP1
The driver is responsible for enabling the DT node once the PCIe
endpoint has been configured, and handling interrupts.
-
- This driver uses an overlay to load other drivers to support for
- RP1 internal sub-devices.
diff --git a/drivers/misc/rp1/Makefile b/drivers/misc/rp1/Makefile
index 508b4cb05627..ab32b433d7ed 100644
--- a/drivers/misc/rp1/Makefile
+++ b/drivers/misc/rp1/Makefile
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MISC_RP1) += rp1-pci.o
-rp1-pci-objs := rp1_pci.o rp1-pci.dtbo.o
+obj-$(CONFIG_MISC_RP1) += rp1_pci.o
diff --git a/drivers/misc/rp1/rp1-pci.dtso b/drivers/misc/rp1/rp1-pci.dtso
deleted file mode 100644
index eea826b36e02..000000000000
--- a/drivers/misc/rp1/rp1-pci.dtso
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-
-/*
- * The dts overlay is included from the dts directory so
- * it can be possible to check it with CHECK_DTBS while
- * also compile it from the driver source directory.
- */
-
-/dts-v1/;
-/plugin/;
-
-/ {
- fragment@0 {
- target-path="";
- __overlay__ {
- compatible = "pci1de4,1";
- #address-cells = <3>;
- #size-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
-
- #include "arm64/broadcom/rp1-common.dtsi"
- };
- };
-};
diff --git a/drivers/misc/rp1/rp1_pci.c b/drivers/misc/rp1/rp1_pci.c
index a342bcc6164b..d210da84c30a 100644
--- a/drivers/misc/rp1/rp1_pci.c
+++ b/drivers/misc/rp1/rp1_pci.c
@@ -34,16 +34,11 @@
/* Interrupts */
#define RP1_INT_END 61
-/* Embedded dtbo symbols created by cmd_wrap_S_dtb in scripts/Makefile.lib */
-extern char __dtbo_rp1_pci_begin[];
-extern char __dtbo_rp1_pci_end[];
-
struct rp1_dev {
struct pci_dev *pdev;
struct irq_domain *domain;
struct irq_data *pcie_irqds[64];
void __iomem *bar1;
- int ovcs_id; /* overlay changeset id */
bool level_triggered_irq[RP1_INT_END];
};
@@ -184,24 +179,13 @@ static void rp1_unregister_interrupts(struct pci_dev *pdev)
static int rp1_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- u32 dtbo_size = __dtbo_rp1_pci_end - __dtbo_rp1_pci_begin;
- void *dtbo_start = __dtbo_rp1_pci_begin;
struct device *dev = &pdev->dev;
struct device_node *rp1_node;
- bool skip_ovl = true;
struct rp1_dev *rp1;
int err = 0;
int i;
- /*
- * Either use rp1_nexus node if already present in DT, or
- * set a flag to load it from overlay at runtime
- */
- rp1_node = of_find_node_by_name(NULL, "rp1_nexus");
- if (!rp1_node) {
- rp1_node = dev_of_node(dev);
- skip_ovl = false;
- }
+ rp1_node = dev_of_node(dev);
if (!rp1_node) {
dev_err(dev, "Missing of_node for device\n");
@@ -276,42 +260,29 @@ static int rp1_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rp1_chained_handle_irq, rp1);
}
- if (!skip_ovl) {
- err = of_overlay_fdt_apply(dtbo_start, dtbo_size, &rp1->ovcs_id,
- rp1_node);
- if (err)
- goto err_unregister_interrupts;
- }
-
err = of_platform_default_populate(rp1_node, NULL, dev);
if (err) {
dev_err_probe(&pdev->dev, err, "Error populating devicetree\n");
- goto err_unload_overlay;
+ goto err_unregister_interrupts;
}
- if (skip_ovl)
- of_node_put(rp1_node);
+ of_node_put(rp1_node);
return 0;
-err_unload_overlay:
- of_overlay_remove(&rp1->ovcs_id);
err_unregister_interrupts:
rp1_unregister_interrupts(pdev);
err_put_node:
- if (skip_ovl)
- of_node_put(rp1_node);
+ of_node_put(rp1_node);
return err;
}
static void rp1_remove(struct pci_dev *pdev)
{
- struct rp1_dev *rp1 = pci_get_drvdata(pdev);
struct device *dev = &pdev->dev;
of_platform_depopulate(dev);
- of_overlay_remove(&rp1->ovcs_id);
rp1_unregister_interrupts(pdev);
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 24f07df32a1a..6d79cc9a79e2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -315,14 +315,14 @@ config MMC_SDHCI_ESDHC_MCF
config MMC_SDHCI_ESDHC_IMX
tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
- depends on ARCH_MXC || COMPILE_TEST
+ depends on ARCH_MXC || ARCH_S32 || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
depends on OF
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
help
This selects the Freescale eSDHC/uSDHC controller support
- found on i.MX25, i.MX35 i.MX5x and i.MX6x.
+ found on i.MX25, i.MX35, i.MX5x, i.MX6x, and S32G.
If you have a controller with this interface, say Y or M here.
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index b97d042897ad..ab7f0ffe7b4f 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -99,7 +99,7 @@
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
-#define CD_STABLE_TIMEOUT_US 1000000
+#define CD_STABLE_TIMEOUT_US 2000000
#define CD_STABLE_MAX_SLEEP_US 10
/**
diff --git a/drivers/mtd/nand/ecc-sw-hamming.c b/drivers/mtd/nand/ecc-sw-hamming.c
index f2d0effad9d2..bc62a71f9fdd 100644
--- a/drivers/mtd/nand/ecc-sw-hamming.c
+++ b/drivers/mtd/nand/ecc-sw-hamming.c
@@ -8,7 +8,7 @@
*
* Completely replaces the previous ECC implementation which was written by:
* Steven J. Hill (sjhill@realitydiluted.com)
- * Thomas Gleixner (tglx@linutronix.de)
+ * Thomas Gleixner (tglx@kernel.org)
*
* Information on how this algorithm works and how it was developed
* can be found in Documentation/driver-api/mtd/nand_ecc.rst
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 70d6c2250f32..540b6baf8bb1 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -11,7 +11,7 @@
* Error correction code lifted from the old docecc code
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
- * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
+ * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@kernel.org>
*
* Interface to generic NAND code for M-Systems DiskOnChip devices
*/
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index ad6d66309597..f2322de93ab4 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -8,7 +8,7 @@
* http://www.linux-mtd.infradead.org/doc/nand.html
*
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ * 2002-2006 Thomas Gleixner (tglx@kernel.org)
*
* Credits:
* David Woodhouse for adding multichip support
@@ -6594,5 +6594,5 @@ EXPORT_SYMBOL_GPL(nand_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@kernel.org>");
MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index a8fba5f39f59..3050ab7e6eb6 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -3,7 +3,7 @@
* Overview:
* Bad block table support for the NAND driver
*
- * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
+ * Copyright © 2004 Thomas Gleixner (tglx@kernel.org)
*
* Description:
*
diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c
index 650351c62af6..62a8cf86d9e2 100644
--- a/drivers/mtd/nand/raw/nand_ids.c
+++ b/drivers/mtd/nand/raw/nand_ids.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ * Copyright (C) 2002 Thomas Gleixner (tglx@kernel.org)
*/
#include <linux/sizes.h>
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index b3cc8f360529..89e6dd8ed1a8 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ * 2002-2006 Thomas Gleixner (tglx@kernel.org)
*
* Credits:
* David Woodhouse for adding multichip support
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index 743792edf98d..97700f80d5b8 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ * 2002-2006 Thomas Gleixner (tglx@kernel.org)
*
* Credits:
* David Woodhouse for adding multichip support
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index 861975e44b55..11954440e4de 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ * 2002-2006 Thomas Gleixner (tglx@kernel.org)
*
* Credits:
* David Woodhouse for adding multichip support
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 13365128194d..7ad8bc04be1a 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -272,5 +272,5 @@ static struct platform_driver ndfc_driver = {
module_platform_driver(ndfc_driver);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@kernel.org>");
MODULE_DESCRIPTION("OF Platform driver for NDFC");
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index e15e320db476..cfaea6178a71 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CAN_DEV
- tristate "CAN Device Drivers"
+ bool "CAN Device Drivers"
default y
depends on CAN
help
@@ -17,10 +17,7 @@ menuconfig CAN_DEV
virtual ones. If you own such devices or plan to use the virtual CAN
interfaces to develop applications, say Y here.
- To compile as a module, choose M here: the module will be called
- can-dev.
-
-if CAN_DEV
+if CAN_DEV && CAN
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index d7bc10a6b8ea..37e2f1a2faec 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan/
-obj-y += dev/
+obj-$(CONFIG_CAN_DEV) += dev/
obj-y += esd/
obj-y += rcar/
obj-y += rockchip/
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index 633687d6b6c0..64226acf0f3d 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-
-can-dev-y += skb.o
+obj-$(CONFIG_CAN) += can-dev.o
+can-dev-$(CONFIG_CAN_DEV) += skb.o
can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
can-dev-$(CONFIG_CAN_NETLINK) += dev.o
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index e29e85b67fd4..a0233e550a5a 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -1074,7 +1074,7 @@ out_usb_free_urb:
usb_free_urb(urb);
out_usb_kill_anchored_urbs:
if (!parent->active_channels) {
- usb_kill_anchored_urbs(&dev->tx_submitted);
+ usb_kill_anchored_urbs(&parent->rx_submitted);
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
gs_usb_timestamp_stop(parent);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index a1a177713d99..2c4131ed7e30 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2169,6 +2169,9 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
if (!ent->is_valid)
return 0;
+ if (is_multicast_ether_addr(ent->mac))
+ return 0;
+
if (port != ent->port)
return 0;
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c
index 57dd063c0740..b094001a7c80 100644
--- a/drivers/net/dsa/lantiq/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.c
@@ -444,9 +444,6 @@ static void gswip_remove(struct platform_device *pdev)
if (!priv)
return;
- /* disable the switch */
- gswip_disable_switch(priv);
-
dsa_unregister_switch(priv->ds);
for (i = 0; i < priv->num_gphy_fw; i++)
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h
index 9c38e51a75e8..2e0f2afbadbb 100644
--- a/drivers/net/dsa/lantiq/lantiq_gswip.h
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.h
@@ -294,8 +294,6 @@ struct gswip_priv {
u16 version;
};
-void gswip_disable_switch(struct gswip_priv *priv);
-
int gswip_probe_common(struct gswip_priv *priv, u32 version);
#endif /* __LANTIQ_GSWIP_H */
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip_common.c b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
index 9da39edf8f57..e790f2ef7588 100644
--- a/drivers/net/dsa/lantiq/lantiq_gswip_common.c
+++ b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
@@ -752,6 +752,13 @@ static int gswip_setup(struct dsa_switch *ds)
return 0;
}
+static void gswip_teardown(struct dsa_switch *ds)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ regmap_clear_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
+}
+
static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -1629,6 +1636,7 @@ static const struct phylink_mac_ops gswip_phylink_mac_ops = {
static const struct dsa_switch_ops gswip_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
+ .teardown = gswip_teardown,
.port_setup = gswip_port_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
@@ -1656,12 +1664,6 @@ static const struct dsa_switch_ops gswip_switch_ops = {
.port_hsr_leave = dsa_port_simple_hsr_leave,
};
-void gswip_disable_switch(struct gswip_priv *priv)
-{
- regmap_clear_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
-}
-EXPORT_SYMBOL_GPL(gswip_disable_switch);
-
static int gswip_validate_cpu_port(struct dsa_switch *ds)
{
struct gswip_priv *priv = ds->priv;
@@ -1718,15 +1720,14 @@ int gswip_probe_common(struct gswip_priv *priv, u32 version)
err = gswip_validate_cpu_port(priv->ds);
if (err)
- goto disable_switch;
+ goto unregister_switch;
dev_info(priv->dev, "probed GSWIP version %lx mod %lx\n",
GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version));
return 0;
-disable_switch:
- gswip_disable_switch(priv);
+unregister_switch:
dsa_unregister_switch(priv->ds);
return err;
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.c b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
index 0816c61a47f1..f8ff8a604bf5 100644
--- a/drivers/net/dsa/lantiq/mxl-gsw1xx.c
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
@@ -11,10 +11,12 @@
#include <linux/bits.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/regmap.h>
+#include <linux/workqueue.h>
#include <net/dsa.h>
#include "lantiq_gswip.h"
@@ -29,6 +31,7 @@ struct gsw1xx_priv {
struct regmap *clk;
struct regmap *shell;
struct phylink_pcs pcs;
+ struct delayed_work clear_raneg;
phy_interface_t tbi_interface;
struct gswip_priv gswip;
};
@@ -145,7 +148,9 @@ static void gsw1xx_pcs_disable(struct phylink_pcs *pcs)
{
struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
- /* Assert SGMII shell reset */
+ cancel_delayed_work_sync(&priv->clear_raneg);
+
+ /* Assert SGMII shell reset (will also clear RANEG bit) */
regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
GSW1XX_RST_REQ_SGMII_SHELL);
@@ -255,10 +260,16 @@ static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv)
FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT,
GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF);
- /* TODO: Take care of inverted RX pair once generic property is
+ /* RX lane seems to be inverted internally, so bit
+ * GSW1XX_SGMII_PHY_RX0_CFG2_INVERT needs to be set for normal
+ * (ie. non-inverted) operation.
+ *
+ * TODO: Take care of inverted RX pair once generic property is
* available
*/
+ val |= GSW1XX_SGMII_PHY_RX0_CFG2_INVERT;
+
ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_RX0_CFG2, val);
if (ret < 0)
return ret;
@@ -422,12 +433,29 @@ static int gsw1xx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
return 0;
}
+static void gsw1xx_pcs_clear_raneg(struct work_struct *work)
+{
+ struct gsw1xx_priv *priv =
+ container_of(work, struct gsw1xx_priv, clear_raneg.work);
+
+ regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL,
+ GSW1XX_SGMII_TBI_ANEGCTL_RANEG);
+}
+
static void gsw1xx_pcs_an_restart(struct phylink_pcs *pcs)
{
struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ cancel_delayed_work_sync(&priv->clear_raneg);
+
regmap_set_bits(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL,
GSW1XX_SGMII_TBI_ANEGCTL_RANEG);
+
+ /* despite being documented as self-clearing, the RANEG bit
+ * sometimes remains set, preventing auto-negotiation from happening.
+ * MaxLinear advises to manually clear the bit after 10ms.
+ */
+ schedule_delayed_work(&priv->clear_raneg, msecs_to_jiffies(10));
}
static void gsw1xx_pcs_link_up(struct phylink_pcs *pcs,
@@ -630,6 +658,8 @@ static int gsw1xx_probe(struct mdio_device *mdiodev)
if (ret)
return ret;
+ INIT_DELAYED_WORK(&priv->clear_raneg, gsw1xx_pcs_clear_raneg);
+
ret = gswip_probe_common(&priv->gswip, version);
if (ret)
return ret;
@@ -642,25 +672,31 @@ static int gsw1xx_probe(struct mdio_device *mdiodev)
static void gsw1xx_remove(struct mdio_device *mdiodev)
{
struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ struct gsw1xx_priv *gsw1xx_priv;
if (!priv)
return;
- gswip_disable_switch(priv);
-
dsa_unregister_switch(priv->ds);
+
+ gsw1xx_priv = container_of(priv, struct gsw1xx_priv, gswip);
+ cancel_delayed_work_sync(&gsw1xx_priv->clear_raneg);
}
static void gsw1xx_shutdown(struct mdio_device *mdiodev)
{
struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ struct gsw1xx_priv *gsw1xx_priv;
if (!priv)
return;
+ dsa_switch_shutdown(priv->ds);
+
dev_set_drvdata(&mdiodev->dev, NULL);
- gswip_disable_switch(priv);
+ gsw1xx_priv = container_of(priv, struct gsw1xx_priv, gswip);
+ cancel_delayed_work_sync(&gsw1xx_priv->clear_raneg);
}
static const struct gswip_hw_info gsw12x_data = {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index b4d48997bf46..09002c853b78 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3364,13 +3364,10 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
- struct device_node *phy_handle = NULL;
struct fwnode_handle *ports_fwnode;
struct fwnode_handle *port_fwnode;
struct dsa_switch *ds = chip->ds;
struct mv88e6xxx_port *p;
- struct dsa_port *dp;
- int tx_amp;
int err;
u16 reg;
u32 val;
@@ -3582,23 +3579,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- if (chip->info->ops->serdes_set_tx_amplitude) {
- dp = dsa_to_port(ds, port);
- if (dp)
- phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
-
- if (phy_handle && !of_property_read_u32(phy_handle,
- "tx-p2p-microvolt",
- &tx_amp))
- err = chip->info->ops->serdes_set_tx_amplitude(chip,
- port, tx_amp);
- if (phy_handle) {
- of_node_put(phy_handle);
- if (err)
- return err;
- }
- }
-
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
@@ -4768,7 +4748,6 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
.pcs_ops = &mv88e6352_pcs_ops,
@@ -5044,7 +5023,6 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5481,7 +5459,6 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.phylink_get_caps = mv88e6352_phylink_get_caps,
.pcs_ops = &mv88e6352_pcs_ops,
};
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 2f211e55cb47..e073446ee7d0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -642,10 +642,6 @@ struct mv88e6xxx_ops {
void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
void *_p);
- /* SERDES SGMII/Fiber Output Amplitude */
- int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
- int val);
-
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index b3330211edbc..a936ee80ce00 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -25,14 +25,6 @@ static int mv88e6352_serdes_read(struct mv88e6xxx_chip *chip, int reg,
reg, val);
}
-static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
- u16 val)
-{
- return mv88e6xxx_phy_page_write(chip, MV88E6352_ADDR_SERDES,
- MV88E6352_SERDES_PAGE_FIBER,
- reg, val);
-}
-
static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 *val)
{
@@ -506,41 +498,3 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
p[i] = reg;
}
}
-
-static const int mv88e6352_serdes_p2p_to_reg[] = {
- /* Index of value in microvolts corresponds to the register value */
- 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
-};
-
-int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
- int val)
-{
- bool found = false;
- u16 ctrl, reg;
- int err;
- int i;
-
- err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
- if (err <= 0)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
- if (mv88e6352_serdes_p2p_to_reg[i] == val) {
- reg = i;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
- if (err)
- return err;
-
- ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
- ctrl |= reg;
-
- return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
-}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index ad887d8601bc..17a3e85fabaa 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -29,8 +29,6 @@ struct phylink_link_state;
#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
#define MV88E6352_SERDES_INT_STATUS 0x13
-#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
-#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
#define MV88E6341_PORT5_LANE 0x15
@@ -140,9 +138,6 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
-int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
- int val);
-
/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8c9cc97efd4e..4fe4efdb3737 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1473,7 +1473,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
return 0;
free_ring:
- dma_free_coherent(&pdev->dev,
+ dma_free_coherent(gendev,
sizeof(struct boom_rx_desc) * RX_RING_SIZE +
sizeof(struct boom_tx_desc) * TX_RING_SIZE,
vp->rx_ring, vp->rx_ring_dma);
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 75893c90a0a1..315d97036ac1 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -2924,19 +2924,26 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
port->id = id;
eth->ports[p] = port;
- err = airoha_metadata_dst_alloc(port);
- if (err)
- return err;
+ return airoha_metadata_dst_alloc(port);
+}
- err = register_netdev(dev);
- if (err)
- goto free_metadata_dst;
+static int airoha_register_gdm_devices(struct airoha_eth *eth)
+{
+ int i;
- return 0;
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+ int err;
-free_metadata_dst:
- airoha_metadata_dst_free(port);
- return err;
+ if (!port)
+ continue;
+
+ err = register_netdev(port->dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int airoha_probe(struct platform_device *pdev)
@@ -3027,6 +3034,10 @@ static int airoha_probe(struct platform_device *pdev)
}
}
+ err = airoha_register_gdm_devices(eth);
+ if (err)
+ goto error_napi_stop;
+
return 0;
error_napi_stop:
@@ -3040,10 +3051,12 @@ error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
- if (port && port->dev->reg_state == NETREG_REGISTERED) {
+ if (!port)
+ continue;
+
+ if (port->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(port->dev);
- airoha_metadata_dst_free(port);
- }
+ airoha_metadata_dst_free(port);
}
free_netdev(eth->napi_dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 0caabb0c3aa0..2221bafaf7c9 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -1547,13 +1547,16 @@ void airoha_ppe_deinit(struct airoha_eth *eth)
{
struct airoha_npu *npu;
- rcu_read_lock();
- npu = rcu_dereference(eth->npu);
+ mutex_lock(&flow_offload_mutex);
+
+ npu = rcu_replace_pointer(eth->npu, NULL,
+ lockdep_is_held(&flow_offload_mutex));
if (npu) {
npu->ops.ppe_deinit(npu);
airoha_npu_put(npu);
}
- rcu_read_unlock();
+
+ mutex_unlock(&flow_offload_mutex);
rhashtable_destroy(&eth->ppe->l2_flows);
rhashtable_destroy(&eth->flow_table);
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c
index ac81c24016dd..4772185e669d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_devlink.c
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c
@@ -53,10 +53,12 @@ void ena_devlink_disable_phc_param(struct devlink *devlink)
{
union devlink_param_value value;
+ devl_lock(devlink);
value.vbool = false;
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
value);
+ devl_unlock(devlink);
}
static void ena_devlink_port_register(struct devlink *devlink)
@@ -145,10 +147,12 @@ static int ena_devlink_configure_params(struct devlink *devlink)
return rc;
}
+ devl_lock(devlink);
value.vbool = ena_phc_is_enabled(adapter);
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
value);
+ devl_unlock(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index a68757e8fd22..c63ddb12237e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1928,6 +1928,7 @@ static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
{
if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
pdata->rx_adapt_retries = 0;
+ pdata->mode_set = false;
return;
}
@@ -1974,6 +1975,7 @@ static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
*/
netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
pdata->rx_adapt_done = true;
+ pdata->rx_adapt_retries = 0;
pdata->mode_set = false;
return;
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 666522d64775..cd7dddeb91dd 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -255,14 +255,15 @@ config BNXT_HWMON
devices, via the hwmon sysfs interface.
config BNGE
- tristate "Broadcom Ethernet device support"
+ tristate "Broadcom ThorUltra Ethernet device support"
depends on PCI
select NET_DEVLINK
select PAGE_POOL
+ select AUXILIARY_BUS
help
- This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
- The module will be called bng_en. To compile this driver as a module,
- choose M here.
+ This driver supports Broadcom ThorUltra 50/100/200/400/800 gigabit
+ Ethernet cards. The module will be called bng_en. To compile this
+ driver as a module, choose M here.
config BCMASP
tristate "Broadcom ASP 2.0 Ethernet support"
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 888f28f11406..90df02e0039c 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1790,6 +1790,9 @@ static int b44_nway_reset(struct net_device *dev)
u32 bmcr;
int r;
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ return phy_ethtool_nway_reset(dev);
+
spin_lock_irq(&bp->lock);
b44_readphy(bp, MII_BMCR, &bmcr);
b44_readphy(bp, MII_BMCR, &bmcr);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
index 411744894349..32fc16a37d02 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -5,7 +5,7 @@
#define _BNGE_H_
#define DRV_NAME "bng_en"
-#define DRV_SUMMARY "Broadcom 800G Ethernet Linux Driver"
+#define DRV_SUMMARY "Broadcom ThorUltra NIC Ethernet Driver"
#include <linux/etherdevice.h>
#include <linux/bnxt/hsi.h>
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
index c94e132bebc8..b4090283df0f 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_core.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -19,7 +19,7 @@ char bnge_driver_name[] = DRV_NAME;
static const struct {
char *name;
} board_info[] = {
- [BCM57708] = { "Broadcom BCM57708 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
+ [BCM57708] = { "Broadcom BCM57708 ThorUltra 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
};
static const struct pci_device_id bnge_pci_tbl[] = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d17d0ea89c36..8419d1eb4035 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1482,9 +1482,11 @@ static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
u16 idx = agg_id & MAX_TPA_P5_MASK;
- if (test_bit(idx, map->agg_idx_bmap))
- idx = find_first_zero_bit(map->agg_idx_bmap,
- BNXT_AGG_IDX_BMAP_SIZE);
+ if (test_bit(idx, map->agg_idx_bmap)) {
+ idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
+ if (idx >= MAX_TPA_P5)
+ return INVALID_HW_RING_ID;
+ }
__set_bit(idx, map->agg_idx_bmap);
map->agg_id_tbl[agg_id] = idx;
return idx;
@@ -1548,6 +1550,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_START_AGG_ID_P5(tpa_start);
agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
+ if (unlikely(agg_id == INVALID_HW_RING_ID)) {
+ netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
+ rxr->bnapi->index,
+ TPA_START_AGG_ID_P5(tpa_start));
+ bnxt_sched_reset_rxr(bp, rxr);
+ return;
+ }
} else {
agg_id = TPA_START_AGG_ID(tpa_start);
}
@@ -16882,12 +16891,12 @@ init_err_dl:
init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
- bnxt_free_hwrm_resources(bp);
- bnxt_hwmon_uninit(bp);
- bnxt_ethtool_free(bp);
bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
+ bnxt_free_hwrm_resources(bp);
+ bnxt_hwmon_uninit(bp);
+ bnxt_ethtool_free(bp);
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f5f07a7e6b29..f88e7769a838 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1080,11 +1080,9 @@ struct bnxt_tpa_info {
struct rx_agg_cmp *agg_arr;
};
-#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
-
struct bnxt_tpa_idx_map {
u16 agg_id_tbl[1024];
- unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
+ DECLARE_BITMAP(agg_idx_bmap, MAX_TPA_P5);
};
struct bnxt_rx_ring_info {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 3e77a96e5a3e..c94a391b1ba5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -268,13 +268,11 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
case XDP_TX:
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
- *event &= BNXT_TX_CMP_EVENT;
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags;
- *event = BNXT_AGG_EVENT;
}
if (tx_avail < tx_needed) {
@@ -287,6 +285,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
+ *event &= ~BNXT_RX_EVENT;
*event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
NEXT_RX(rxr->rx_prod), xdp);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e461f5072884..6511ecd5856b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -708,7 +708,6 @@ static void macb_mac_link_up(struct phylink_config *config,
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
@@ -2954,6 +2953,8 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
+ bp->macbgem_ops.mog_init_rings(bp);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_enable(&queue->napi_rx);
napi_enable(&queue->napi_tx);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index d5e5800b84ef..53b26cece16a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1787,7 +1787,8 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
int xdp_tx_bd_cnt, i, k;
int xdp_tx_frm_cnt = 0;
- if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags) ||
+ !netif_carrier_ok(ndev)))
return -ENETDOWN;
enetc_lock_mdio();
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index dce27bd67a7d..aecd40aeef9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -79,9 +79,9 @@ struct enetc_lso_t {
#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
- (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+ min(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD, 0xffff)
#define ENETC_RXB_DMA_SIZE_XDP \
- (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
+ min(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM, 0xffff)
struct enetc_rx_swbd {
dma_addr_t dma;
diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
index 443983fdecd9..7fd39f895290 100644
--- a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
+++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
@@ -577,11 +577,17 @@ static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv,
}
addr = netc_get_phy_addr(np);
- if (addr <= 0) {
+ if (addr < 0) {
dev_err(dev, "Failed to get PHY address\n");
return addr;
}
+ /* The default value of LaBCR[MDIO_PHYAD_PRTAD] is 0,
+ * so no need to set the register.
+ */
+ if (!addr)
+ return 0;
+
if (phy_mask & BIT(addr)) {
dev_err(dev,
"Find same PHY address in EMDIO and ENETC node\n");
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c685a5c0cc51..a753265961af 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3933,7 +3933,12 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
txq->bd.cur = bdp;
/* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index a5a2b18d309b..7eb64e1e4d85 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -558,7 +558,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
block->priv = priv;
err = request_irq(priv->msix_vectors[msix_idx].vector,
gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
- 0, block->name, block);
+ IRQF_NO_AUTOEN, block->name, block);
if (err) {
dev_err(&priv->pdev->dev,
"Failed to receive msix vector %d\n", i);
@@ -647,12 +647,9 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_counter_array(priv);
if (err)
goto abort_with_rss_config_cache;
- err = gve_init_clock(priv);
- if (err)
- goto abort_with_counter;
err = gve_alloc_notify_blocks(priv);
if (err)
- goto abort_with_clock;
+ goto abort_with_counter;
err = gve_alloc_stats_report(priv);
if (err)
goto abort_with_ntfy_blocks;
@@ -683,10 +680,16 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
+ err = gve_init_clock(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to init clock");
+ goto abort_with_ptype_lut;
+ }
+
err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
if (err) {
dev_err(&priv->pdev->dev, "Failed to init RSS config");
- goto abort_with_ptype_lut;
+ goto abort_with_clock;
}
err = gve_adminq_report_stats(priv, priv->stats_report_len,
@@ -698,6 +701,8 @@ static int gve_setup_device_resources(struct gve_priv *priv)
gve_set_device_resources_ok(priv);
return 0;
+abort_with_clock:
+ gve_teardown_clock(priv);
abort_with_ptype_lut:
kvfree(priv->ptype_lut_dqo);
priv->ptype_lut_dqo = NULL;
@@ -705,8 +710,6 @@ abort_with_stats_report:
gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
-abort_with_clock:
- gve_teardown_clock(priv);
abort_with_counter:
gve_free_counter_array(priv);
abort_with_rss_config_cache:
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index ace9b8698021..b53b7fcdcdaf 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -112,11 +112,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
netif_napi_set_irq_locked(&block->napi, block->irq);
+ enable_irq(block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ disable_irq(block->irq);
netif_napi_del_locked(&block->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index cf8abbe01840..c589baea7c77 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -10555,6 +10555,9 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
+ if (vlan_id >= VLAN_N_VID)
+ return -EINVAL;
+
/* When device is resetting or reset failed, firmware is unable to
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index c7ff12a6c076..b7d4e06a55d4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -193,10 +193,10 @@ static int hclge_get_ring_chain_from_mbx(
return -EINVAL;
for (i = 0; i < ring_num; i++) {
- if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.num_tqps) {
dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
req->msg.param[i].tqp_index,
- vport->nic.kinfo.rss_size - 1U);
+ vport->nic.kinfo.num_tqps - 1U);
return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8fcf220a120d..70327a73dee3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -368,12 +368,12 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
new_tqps = kinfo->rss_size * num_tc;
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
- kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
return -ENOMEM;
- for (i = 0; i < kinfo->num_tqps; i++) {
+ for (i = 0; i < hdev->num_tqps; i++) {
hdev->htqp[i].q.handle = &hdev->nic;
hdev->htqp[i].q.tqp_index = i;
kinfo->tqp[i] = &hdev->htqp[i].q;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 292389aceb2d..7f078ec9c14c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4094,7 +4094,15 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
u32 length, const u8 *data)
{
struct e1000_hw *hw = &adapter->hw;
- u8 last_byte = *(data + length - 1);
+ u8 last_byte;
+
+ /* Guard against OOB on data[length - 1] */
+ if (unlikely(!length))
+ return false;
+ /* Upper bound: length must not exceed rx_buffer_len */
+ if (unlikely(length > adapter->rx_buffer_len))
+ return false;
+ last_byte = *(data + length - 1);
if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
unsigned long irq_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d2d03db2acec..dcb50c2e1aa2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1422,4 +1422,15 @@ static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf)
return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL;
}
+static inline u32 i40e_get_max_num_descriptors(const struct i40e_pf *pf)
+{
+ const struct i40e_hw *hw = &pf->hw;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ return I40E_MAX_NUM_DESCRIPTORS_XL710;
+ default:
+ return I40E_MAX_NUM_DESCRIPTORS;
+ }
+}
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index f2c2646ea298..6a47ea0927e9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2013,18 +2013,6 @@ static void i40e_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
-static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
-
- switch (hw->mac.type) {
- case I40E_MAC_XL710:
- return I40E_MAX_NUM_DESCRIPTORS_XL710;
- default:
- return I40E_MAX_NUM_DESCRIPTORS;
- }
-}
-
static void i40e_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d8192aa23254..0b1cc0481027 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2234,6 +2234,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
}
+ i40e_service_event_schedule(vsi->back);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8b30a3accd31..1fa877b52f61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -656,7 +656,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 8 */
if (!IS_ALIGNED(info->ring_len, 8) ||
- info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ info->ring_len > i40e_get_max_num_descriptors(pf)) {
ret = -EINVAL;
goto error_context;
}
@@ -726,7 +726,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 32 */
if (!IS_ALIGNED(info->ring_len, 32) ||
- info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ info->ring_len > i40e_get_max_num_descriptors(pf)) {
ret = -EINVAL;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c2fbe443ef85..4b0fc8f354bc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1726,11 +1726,11 @@ static int iavf_config_rss_reg(struct iavf_adapter *adapter)
u16 i;
dw = (u32 *)adapter->rss_key;
- for (i = 0; i <= adapter->rss_key_size / 4; i++)
+ for (i = 0; i < adapter->rss_key_size / 4; i++)
wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
dw = (u32 *)adapter->rss_lut;
- for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+ for (i = 0; i < adapter->rss_lut_size / 4; i++)
wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
iavf_flush(hw);
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 8cfc68cbfa06..1bf7934d4e28 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -284,8 +284,7 @@ struct idpf_port_stats {
struct idpf_fsteer_fltr {
struct list_head list;
- u32 loc;
- u32 q_index;
+ struct ethtool_rx_flow_spec fs;
};
/**
@@ -424,14 +423,12 @@ enum idpf_user_flags {
* @rss_key: RSS hash key
* @rss_lut_size: Size of RSS lookup table
* @rss_lut: RSS lookup table
- * @cached_lut: Used to restore previously init RSS lut
*/
struct idpf_rss_data {
u16 rss_key_size;
u8 *rss_key;
u16 rss_lut_size;
u32 *rss_lut;
- u32 *cached_lut;
};
/**
@@ -558,6 +555,7 @@ struct idpf_vector_lifo {
* @max_q: Maximum possible queues
* @req_qs_chunks: Queue chunk data for requested queues
* @mac_filter_list_lock: Lock to protect mac filters
+ * @flow_steer_list_lock: Lock to protect fsteer filters
* @flags: See enum idpf_vport_config_flags
*/
struct idpf_vport_config {
@@ -565,6 +563,7 @@ struct idpf_vport_config {
struct idpf_vport_max_q max_q;
struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
+ spinlock_t flow_steer_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 2589e124e41c..2efa3c08aba5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -37,6 +37,7 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
+ struct idpf_vport_config *vport_config;
struct idpf_fsteer_fltr *f;
struct idpf_vport *vport;
unsigned int cnt = 0;
@@ -44,7 +45,8 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
switch (cmd->cmd) {
case ETHTOOL_GRXCLSRLCNT:
@@ -52,26 +54,34 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = idpf_fsteer_max_rules(vport);
break;
case ETHTOOL_GRXCLSRULE:
- err = -EINVAL;
+ err = -ENOENT;
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry(f, &user_config->flow_steer_list, list)
- if (f->loc == cmd->fs.location) {
- cmd->fs.ring_cookie = f->q_index;
+ if (f->fs.location == cmd->fs.location) {
+ /* Avoid infoleak from padding: zero first,
+ * then assign fields
+ */
+ memset(&cmd->fs, 0, sizeof(cmd->fs));
+ cmd->fs = f->fs;
err = 0;
break;
}
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
break;
case ETHTOOL_GRXCLSRLALL:
cmd->data = idpf_fsteer_max_rules(vport);
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry(f, &user_config->flow_steer_list, list) {
if (cnt == cmd->rule_cnt) {
err = -EMSGSIZE;
break;
}
- rule_locs[cnt] = f->loc;
+ rule_locs[cnt] = f->fs.location;
cnt++;
}
if (!err)
cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
break;
default:
break;
@@ -168,7 +178,7 @@ static int idpf_add_flow_steer(struct net_device *netdev,
struct idpf_vport *vport;
u32 flow_type, q_index;
u16 num_rxq;
- int err;
+ int err = 0;
vport = idpf_netdev_to_vport(netdev);
vport_config = vport->adapter->vport_config[np->vport_idx];
@@ -194,6 +204,29 @@ static int idpf_add_flow_steer(struct net_device *netdev,
if (!rule)
return -ENOMEM;
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr) {
+ err = -ENOMEM;
+ goto out_free_rule;
+ }
+
+ /* detect duplicate entry and reject before adding rules */
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (f->fs.location == fsp->location) {
+ err = -EEXIST;
+ break;
+ }
+
+ if (f->fs.location > fsp->location)
+ break;
+ parent = f;
+ }
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+
+ if (err)
+ goto out;
+
rule->vport_id = cpu_to_le32(vport->vport_id);
rule->count = cpu_to_le32(1);
info = &rule->rule_info[0];
@@ -232,26 +265,20 @@ static int idpf_add_flow_steer(struct net_device *netdev,
goto out;
}
- fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
- if (!fltr) {
- err = -ENOMEM;
- goto out;
- }
-
- fltr->loc = fsp->location;
- fltr->q_index = q_index;
- list_for_each_entry(f, &user_config->flow_steer_list, list) {
- if (f->loc >= fltr->loc)
- break;
- parent = f;
- }
+ /* Save a copy of the user's flow spec so ethtool can later retrieve it */
+ fltr->fs = *fsp;
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
parent ? list_add(&fltr->list, &parent->list) :
list_add(&fltr->list, &user_config->flow_steer_list);
user_config->num_fsteer_fltrs++;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+ goto out_free_rule;
out:
+ kfree(fltr);
+out_free_rule:
kfree(rule);
return err;
}
@@ -302,17 +329,20 @@ static int idpf_del_flow_steer(struct net_device *netdev,
goto out;
}
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry_safe(f, iter,
&user_config->flow_steer_list, list) {
- if (f->loc == fsp->location) {
+ if (f->fs.location == fsp->location) {
list_del(&f->list);
kfree(f);
user_config->num_fsteer_fltrs--;
- goto out;
+ goto out_unlock;
}
}
- err = -EINVAL;
+ err = -ENOENT;
+out_unlock:
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
out:
kfree(rule);
return err;
@@ -381,7 +411,10 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
* @netdev: network interface device structure
* @rxfh: pointer to param struct (indir, key, hfunc)
*
- * Reads the indirection table directly from the hardware. Always returns 0.
+ * RSS LUT and Key information are read from driver's cached
+ * copy. When rxhash is off, rss lut will be displayed as zeros.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
static int idpf_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
@@ -389,10 +422,13 @@ static int idpf_get_rxfh(struct net_device *netdev,
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ bool rxhash_ena;
int err = 0;
u16 i;
idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
adapter = np->adapter;
@@ -402,9 +438,8 @@ static int idpf_get_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
- if (!test_bit(IDPF_VPORT_UP, np->state))
- goto unlock_mutex;
+ rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->key)
@@ -412,7 +447,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
if (rxfh->indir) {
for (i = 0; i < rss_data->rss_lut_size; i++)
- rxfh->indir[i] = rss_data->rss_lut[i];
+ rxfh->indir[i] = rxhash_ena ? rss_data->rss_lut[i] : 0;
}
unlock_mutex:
@@ -452,8 +487,6 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- if (!test_bit(IDPF_VPORT_UP, np->state))
- goto unlock_mutex;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
@@ -469,7 +502,8 @@ static int idpf_set_rxfh(struct net_device *netdev,
rss_data->rss_lut[lut] = rxfh->indir[lut];
}
- err = idpf_config_rss(vport);
+ if (test_bit(IDPF_VPORT_UP, np->state))
+ err = idpf_config_rss(vport);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c
index 7e20a07e98e5..6dad0593f7f2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_idc.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c
@@ -322,7 +322,7 @@ static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter)
for (i = 0; i < adapter->num_alloc_vports; i++) {
struct idpf_vport *vport = adapter->vports[i];
- if (!vport)
+ if (!vport || !vport->vdev_info)
continue;
idpf_unplug_aux_dev(vport->vdev_info->adev);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 7a7e101afeb6..131a8121839b 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -443,6 +443,29 @@ send_dealloc_vecs:
}
/**
+ * idpf_del_all_flow_steer_filters - Delete all flow steer filters in list
+ * @vport: main vport struct
+ *
+ * Takes flow_steer_list_lock spinlock. Deletes all filters
+ */
+static void idpf_del_all_flow_steer_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_fsteer_fltr *f, *ftmp;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
+ list_for_each_entry_safe(f, ftmp, &vport_config->user_config.flow_steer_list,
+ list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ vport_config->user_config.num_fsteer_fltrs = 0;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+}
+
+/**
* idpf_find_mac_filter - Search filter list for specific mac filter
* @vconfig: Vport config structure
* @macaddr: The MAC address
@@ -729,6 +752,65 @@ static int idpf_init_mac_addr(struct idpf_vport *vport,
return 0;
}
+static void idpf_detach_and_close(struct idpf_adapter *adapter)
+{
+ int max_vports = adapter->max_vports;
+
+ for (int i = 0; i < max_vports; i++) {
+ struct net_device *netdev = adapter->netdevs[i];
+
+ /* If the interface is in detached state, that means the
+ * previous reset was not handled successfully for this
+ * vport.
+ */
+ if (!netif_device_present(netdev))
+ continue;
+
+ /* Hold RTNL to protect racing with callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ if (netif_running(netdev)) {
+ set_bit(IDPF_VPORT_UP_REQUESTED,
+ adapter->vport_config[i]->flags);
+ dev_close(netdev);
+ }
+ rtnl_unlock();
+ }
+}
+
+static void idpf_attach_and_open(struct idpf_adapter *adapter)
+{
+ int max_vports = adapter->max_vports;
+
+ for (int i = 0; i < max_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+ struct idpf_vport_config *vport_config;
+ struct net_device *netdev;
+
+ /* In case of a critical error in the init task, the vport
+ * will be freed. Only continue to restore the netdevs
+ * if the vport is allocated.
+ */
+ if (!vport)
+ continue;
+
+ /* No need for RTNL on attach as this function is called
+ * following detach and dev_close(). We do take RTNL for
+ * dev_open() below as it can race with external callbacks
+ * following the call to netif_device_attach().
+ */
+ netdev = adapter->netdevs[i];
+ netif_device_attach(netdev);
+ vport_config = adapter->vport_config[vport->idx];
+ if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED,
+ vport_config->flags)) {
+ rtnl_lock();
+ dev_open(netdev, NULL);
+ rtnl_unlock();
+ }
+ }
+}
+
/**
* idpf_cfg_netdev - Allocate, configure and register a netdev
* @vport: main vport structure
@@ -991,7 +1073,7 @@ static void idpf_vport_rel(struct idpf_vport *vport)
u16 idx = vport->idx;
vport_config = adapter->vport_config[vport->idx];
- idpf_deinit_rss(vport);
+ idpf_deinit_rss_lut(vport);
rss_data = &vport_config->user_config.rss_data;
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
@@ -1023,6 +1105,8 @@ static void idpf_vport_rel(struct idpf_vport *vport)
kfree(adapter->vport_config[idx]->req_qs_chunks);
adapter->vport_config[idx]->req_qs_chunks = NULL;
}
+ kfree(vport->rx_ptype_lkup);
+ vport->rx_ptype_lkup = NULL;
kfree(vport);
adapter->num_alloc_vports--;
}
@@ -1041,12 +1125,15 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
idpf_idc_deinit_vport_aux_device(vport->vdev_info);
idpf_deinit_mac_addr(vport);
- idpf_vport_stop(vport, true);
- if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) {
+ idpf_vport_stop(vport, true);
idpf_decfg_netdev(vport);
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ }
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
idpf_del_all_mac_filters(vport);
+ idpf_del_all_flow_steer_filters(vport);
+ }
if (adapter->netdevs[i]) {
struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
@@ -1139,6 +1226,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
u16 idx = adapter->next_vport;
struct idpf_vport *vport;
u16 num_max_q;
+ int err;
if (idx == IDPF_NO_FREE_SLOT)
return NULL;
@@ -1189,10 +1277,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
idpf_vport_init(vport, max_q);
- /* This alloc is done separate from the LUT because it's not strictly
- * dependent on how many queues we have. If we change number of queues
- * and soft reset we'll need a new LUT but the key can remain the same
- * for as long as the vport exists.
+ /* LUT and key are both initialized here. Key is not strictly dependent
+ * on how many queues we have. If we change number of queues and soft
+ * reset is initiated, LUT will be freed and a new LUT will be allocated
+ * as per the updated number of queues during vport bringup. However,
+ * the key remains the same for as long as the vport exists.
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
@@ -1202,6 +1291,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
+ /* Initialize default rss LUT */
+ err = idpf_init_rss_lut(vport);
+ if (err)
+ goto free_rss_key;
+
/* fill vport slot in the adapter struct */
adapter->vports[idx] = vport;
adapter->vport_ids[idx] = idpf_get_vport_id(vport);
@@ -1212,6 +1306,8 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return vport;
+free_rss_key:
+ kfree(rss_data->rss_key);
free_vector_idxs:
kfree(vport->q_vector_idxs);
free_vport:
@@ -1271,7 +1367,7 @@ void idpf_mbx_task(struct work_struct *work)
idpf_mb_irq_enable(adapter);
else
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
- msecs_to_jiffies(300));
+ usecs_to_jiffies(300));
idpf_recv_mb_msg(adapter);
}
@@ -1388,7 +1484,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_vport_config *vport_config;
int err;
if (test_bit(IDPF_VPORT_UP, np->state))
@@ -1429,14 +1524,14 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
- goto queues_rel;
+ goto intr_deinit;
}
err = idpf_rx_bufs_init_all(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
- goto queues_rel;
+ goto intr_deinit;
}
idpf_rx_init_buf_tail(vport);
@@ -1482,13 +1577,9 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
idpf_restore_features(vport);
- vport_config = adapter->vport_config[vport->idx];
- if (vport_config->user_config.rss_data.rss_lut)
- err = idpf_config_rss(vport);
- else
- err = idpf_init_rss(vport);
+ err = idpf_config_rss(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
vport->vport_id, err);
goto disable_vport;
}
@@ -1497,7 +1588,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
vport->vport_id, err);
- goto deinit_rss;
+ goto disable_vport;
}
if (rtnl)
@@ -1505,8 +1596,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
return 0;
-deinit_rss:
- idpf_deinit_rss(vport);
disable_vport:
idpf_send_disable_vport_msg(vport);
disable_queues:
@@ -1544,7 +1633,6 @@ void idpf_init_task(struct work_struct *work)
struct idpf_vport_config *vport_config;
struct idpf_vport_max_q max_q;
struct idpf_adapter *adapter;
- struct idpf_netdev_priv *np;
struct idpf_vport *vport;
u16 num_default_vports;
struct pci_dev *pdev;
@@ -1579,10 +1667,15 @@ void idpf_init_task(struct work_struct *work)
goto unwind_vports;
}
+ err = idpf_send_get_rx_ptype_msg(vport);
+ if (err)
+ goto unwind_vports;
+
index = vport->idx;
vport_config = adapter->vport_config[index];
spin_lock_init(&vport_config->mac_filter_list_lock);
+ spin_lock_init(&vport_config->flow_steer_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
@@ -1590,21 +1683,11 @@ void idpf_init_task(struct work_struct *work)
err = idpf_check_supported_desc_ids(vport);
if (err) {
dev_err(&pdev->dev, "failed to get required descriptor ids\n");
- goto cfg_netdev_err;
+ goto unwind_vports;
}
if (idpf_cfg_netdev(vport))
- goto cfg_netdev_err;
-
- err = idpf_send_get_rx_ptype_msg(vport);
- if (err)
- goto handle_err;
-
- /* Once state is put into DOWN, driver is ready for dev_open */
- np = netdev_priv(vport->netdev);
- clear_bit(IDPF_VPORT_UP, np->state);
- if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
- idpf_vport_open(vport, true);
+ goto unwind_vports;
/* Spawn and return 'idpf_init_task' work queue until all the
* default vports are created
@@ -1635,21 +1718,15 @@ void idpf_init_task(struct work_struct *work)
set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
- /* As all the required vports are created, clear the reset flag
- * unconditionally here in case we were in reset and the link was down.
- */
+ /* Clear the reset and load bits as all vports are created */
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ clear_bit(IDPF_HR_DRV_LOAD, adapter->flags);
/* Start the statistics task now */
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
return;
-handle_err:
- idpf_decfg_netdev(vport);
-cfg_netdev_err:
- idpf_vport_rel(vport);
- adapter->vports[index] = NULL;
unwind_vports:
if (default_vport) {
for (index = 0; index < adapter->max_vports; index++) {
@@ -1657,6 +1734,15 @@ unwind_vports:
idpf_vport_dealloc(adapter->vports[index]);
}
}
+ /* Cleanup after vc_core_init, which has no way of knowing the
+ * init task failed on driver load.
+ */
+ if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->mbx_task);
+ }
+ idpf_ptp_release(adapter);
+
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
}
@@ -1787,27 +1873,6 @@ static int idpf_check_reset_complete(struct idpf_hw *hw,
}
/**
- * idpf_set_vport_state - Set the vport state to be after the reset
- * @adapter: Driver specific private structure
- */
-static void idpf_set_vport_state(struct idpf_adapter *adapter)
-{
- u16 i;
-
- for (i = 0; i < adapter->max_vports; i++) {
- struct idpf_netdev_priv *np;
-
- if (!adapter->netdevs[i])
- continue;
-
- np = netdev_priv(adapter->netdevs[i]);
- if (test_bit(IDPF_VPORT_UP, np->state))
- set_bit(IDPF_VPORT_UP_REQUESTED,
- adapter->vport_config[i]->flags);
- }
-}
-
-/**
* idpf_init_hard_reset - Initiate a hardware reset
* @adapter: Driver specific private structure
*
@@ -1815,37 +1880,25 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter)
* reallocate. Also reinitialize the mailbox. Return 0 on success,
* negative on failure.
*/
-static int idpf_init_hard_reset(struct idpf_adapter *adapter)
+static void idpf_init_hard_reset(struct idpf_adapter *adapter)
{
struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
struct device *dev = &adapter->pdev->dev;
- struct net_device *netdev;
int err;
- u16 i;
+ idpf_detach_and_close(adapter);
mutex_lock(&adapter->vport_ctrl_lock);
dev_info(dev, "Device HW Reset initiated\n");
- /* Avoid TX hangs on reset */
- for (i = 0; i < adapter->max_vports; i++) {
- netdev = adapter->netdevs[i];
- if (!netdev)
- continue;
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- }
-
/* Prepare for reset */
- if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
idpf_idc_issue_reset_event(adapter->cdev_info);
- idpf_set_vport_state(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
@@ -1892,11 +1945,14 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
- /* Wait until all vports are created to init RDMA CORE AUX */
- if (!err)
- err = idpf_idc_init(adapter);
-
- return err;
+ /* Attempt to restore netdevs and initialize RDMA CORE AUX device,
+ * provided vc_core_init succeeded. It is still possible that
+ * vports are not allocated at this point if the init task failed.
+ */
+ if (!err) {
+ idpf_attach_and_open(adapter);
+ idpf_idc_init(adapter);
+ }
}
/**
@@ -1997,7 +2053,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_vport_stop(vport, false);
}
- idpf_deinit_rss(vport);
/* We're passing in vport here because we need its wait_queue
* to send a message and it should be getting all the vport
* config data out of the adapter but we need to be careful not
@@ -2023,6 +2078,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (err)
goto err_open;
+ if (reset_cause == IDPF_SR_Q_CHANGE &&
+ !netif_is_rxfh_configured(vport->netdev))
+ idpf_fill_dflt_rss_lut(vport);
+
if (vport_is_up)
err = idpf_vport_open(vport, false);
@@ -2166,40 +2225,6 @@ static void idpf_set_rx_mode(struct net_device *netdev)
}
/**
- * idpf_vport_manage_rss_lut - disable/enable RSS
- * @vport: the vport being changed
- *
- * In the event of disable request for RSS, this function will zero out RSS
- * LUT, while in the event of enable request for RSS, it will reconfigure RSS
- * LUT with the default LUT configuration.
- */
-static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
-{
- bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
- struct idpf_rss_data *rss_data;
- u16 idx = vport->idx;
- int lut_size;
-
- rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
- lut_size = rss_data->rss_lut_size * sizeof(u32);
-
- if (ena) {
- /* This will contain the default or user configured LUT */
- memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
- } else {
- /* Save a copy of the current LUT to be restored later if
- * requested.
- */
- memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
-
- /* Zero out the current LUT to disable */
- memset(rss_data->rss_lut, 0, lut_size);
- }
-
- return idpf_config_rss(vport);
-}
-
-/**
* idpf_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -2224,10 +2249,19 @@ static int idpf_set_features(struct net_device *netdev,
}
if (changed & NETIF_F_RXHASH) {
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
netdev->features ^= NETIF_F_RXHASH;
- err = idpf_vport_manage_rss_lut(vport);
- if (err)
- goto unlock_mutex;
+
+ /* If the interface is not up when changing the rxhash, update
+ * to the HW is skipped. The updated LUT will be committed to
+ * the HW when the interface is brought up.
+ */
+ if (test_bit(IDPF_VPORT_UP, np->state)) {
+ err = idpf_config_rss(vport);
+ if (err)
+ goto unlock_mutex;
+ }
}
if (changed & NETIF_F_GRO_HW) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 1d91c56f7469..7f3933ca9edc 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -695,9 +695,10 @@ err:
static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
{
struct libeth_fq fq = {
- .count = rxq->desc_count,
- .type = LIBETH_FQE_MTU,
- .nid = idpf_q_vector_to_mem(rxq->q_vector),
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
int ret;
@@ -754,6 +755,7 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.truesize = bufq->truesize,
.count = bufq->desc_count,
.type = type,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
@@ -4641,7 +4643,7 @@ int idpf_config_rss(struct idpf_vport *vport)
* idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
* @vport: virtual port structure
*/
-static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
u16 num_active_rxq = vport->num_rxq;
@@ -4650,57 +4652,47 @@ static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- for (i = 0; i < rss_data->rss_lut_size; i++) {
+ for (i = 0; i < rss_data->rss_lut_size; i++)
rss_data->rss_lut[i] = i % num_active_rxq;
- rss_data->cached_lut[i] = rss_data->rss_lut[i];
- }
}
/**
- * idpf_init_rss - Allocate and initialize RSS resources
+ * idpf_init_rss_lut - Allocate and initialize RSS LUT
* @vport: virtual port
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_init_rss(struct idpf_vport *vport)
+int idpf_init_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_rss_data *rss_data;
- u32 lut_size;
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ if (!rss_data->rss_lut) {
+ u32 lut_size;
- lut_size = rss_data->rss_lut_size * sizeof(u32);
- rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
- if (!rss_data->rss_lut)
- return -ENOMEM;
-
- rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
- if (!rss_data->cached_lut) {
- kfree(rss_data->rss_lut);
- rss_data->rss_lut = NULL;
-
- return -ENOMEM;
+ lut_size = rss_data->rss_lut_size * sizeof(u32);
+ rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
+ if (!rss_data->rss_lut)
+ return -ENOMEM;
}
/* Fill the default RSS lut values */
idpf_fill_dflt_rss_lut(vport);
- return idpf_config_rss(vport);
+ return 0;
}
/**
- * idpf_deinit_rss - Release RSS resources
+ * idpf_deinit_rss_lut - Release RSS LUT
* @vport: virtual port
*/
-void idpf_deinit_rss(struct idpf_vport *vport)
+void idpf_deinit_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_rss_data *rss_data;
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- kfree(rss_data->cached_lut);
- rss_data->cached_lut = NULL;
kfree(rss_data->rss_lut);
rss_data->rss_lut = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 75b977094741..423cc9486dce 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -101,6 +101,7 @@ do { \
idx = 0; \
} while (0)
+#define IDPF_RX_MAX_BUF_SZ (16384 - 128)
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64
@@ -1085,9 +1086,10 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
void idpf_vport_intr_ena(struct idpf_vport *vport);
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
int idpf_config_rss(struct idpf_vport *vport);
-int idpf_init_rss(struct idpf_vport *vport);
-void idpf_deinit_rss(struct idpf_vport *vport);
+int idpf_init_rss_lut(struct idpf_vport *vport);
+void idpf_deinit_rss_lut(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 44cd4b466c48..cb702eac86c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1016,6 +1016,9 @@ static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
struct idpf_vc_xn_params xn_params = {
.vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .send_buf.iov_len =
+ sizeof(struct virtchnl2_get_lan_memory_regions) +
+ sizeof(struct virtchnl2_mem_region),
.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
};
int num_regions, size;
@@ -1028,6 +1031,8 @@ static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
return -ENOMEM;
xn_params.recv_buf.iov_base = rcvd_regions;
+ rcvd_regions->num_memory_regions = cpu_to_le16(1);
+ xn_params.send_buf.iov_base = rcvd_regions;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -2799,6 +2804,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
* @vport: virtual port data structure
* @get: flag to set or get rss look up table
*
+ * When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash
+ * is enabled, the LUT values stored in driver's soft copy will be used to setup
+ * the HW.
+ *
* Returns 0 on success, negative on failure.
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
@@ -2809,10 +2818,12 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
struct idpf_rss_data *rss_data;
int buf_size, lut_buf_size;
ssize_t reply_sz;
+ bool rxhash_ena;
int i;
rss_data =
&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
+ rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
@@ -2834,7 +2845,8 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
} else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
- rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
+ rl->lut[i] = rxhash_ena ?
+ cpu_to_le32(rss_data->rss_lut[i]) : 0;
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
@@ -3565,6 +3577,7 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ struct idpf_hw *hw = &adapter->hw;
bool remove_in_prog;
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
@@ -3588,6 +3601,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
idpf_vport_params_buf_rel(adapter);
+ kfree(hw->lan_regs);
+ hw->lan_regs = NULL;
+
kfree(adapter->vports);
adapter->vports = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index b90e23dc49de..b6449f0a9e7d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -418,6 +418,14 @@ static int otx2_set_ringparam(struct net_device *netdev,
*/
if (rx_count < pfvf->hw.rq_skid)
rx_count = pfvf->hw.rq_skid;
+
+ if (ring->rx_pending < 16) {
+ netdev_err(netdev,
+ "rx ring size %u invalid, min is 16\n",
+ ring->rx_pending);
+ return -EINVAL;
+ }
+
rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
/* Due pipelining impact minimum 2000 unused SQ CQE's
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 2a4c9df4eb79..e63d95c1842f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -387,6 +387,8 @@ struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev)
dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch),
dev->dev);
+ if (!dl)
+ return NULL;
return devlink_priv(dl);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 887adf4807d1..ea77fbd98396 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -197,6 +197,11 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct pci_dev *pdev = dev->pdev;
int ret = 0;
+ if (mlx5_fw_reset_in_progress(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't reload during firmware reset");
+ return -EBUSY;
+ }
+
if (mlx5_dev_is_lightweight(dev)) {
if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 7bcf822a89f9..6b4ec457ce22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -33,6 +33,7 @@
#include "lib/eq.h"
#include "fw_tracer.h"
#include "fw_tracer_tracepoint.h"
+#include <linux/ctype.h>
static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
{
@@ -358,6 +359,47 @@ static const char *VAL_PARM = "%llx";
static const char *REPLACE_64_VAL_PARM = "%x%x";
static const char *PARAM_CHAR = "%";
+static bool mlx5_is_valid_spec(const char *str)
+{
+ /* Parse format specifiers to find the actual type.
+ * Structure: %[flags][width][.precision][length]type
+ * Skip flags, width, precision & length.
+ */
+ while (isdigit(*str) || *str == '#' || *str == '.' || *str == 'l')
+ str++;
+
+ /* Check if it's a valid integer/hex specifier or %%:
+ * Valid formats: %x, %d, %i, %u, etc.
+ */
+ if (*str != 'x' && *str != 'X' && *str != 'd' && *str != 'i' &&
+ *str != 'u' && *str != 'c' && *str != '%')
+ return false;
+
+ return true;
+}
+
+static bool mlx5_tracer_validate_params(const char *str)
+{
+ const char *substr = str;
+
+ if (!str)
+ return false;
+
+ substr = strstr(substr, PARAM_CHAR);
+ while (substr) {
+ if (!mlx5_is_valid_spec(substr + 1))
+ return false;
+
+ if (*(substr + 1) == '%')
+ substr = strstr(substr + 2, PARAM_CHAR);
+ else
+ substr = strstr(substr + 1, PARAM_CHAR);
+
+ }
+
+ return true;
+}
+
static int mlx5_tracer_message_hash(u32 message_id)
{
return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1);
@@ -419,6 +461,10 @@ static int mlx5_tracer_get_num_of_params(char *str)
char *substr, *pstr = str;
int num_of_params = 0;
+ /* Validate that all parameters are valid before processing */
+ if (!mlx5_tracer_validate_params(str))
+ return -EINVAL;
+
/* replace %llx with %x%x */
substr = strstr(pstr, VAL_PARM);
while (substr) {
@@ -427,11 +473,15 @@ static int mlx5_tracer_get_num_of_params(char *str)
substr = strstr(pstr, VAL_PARM);
}
- /* count all the % characters */
+ /* count all the % characters, but skip %% (escaped percent) */
substr = strstr(str, PARAM_CHAR);
while (substr) {
- num_of_params += 1;
- str = substr + 1;
+ if (*(substr + 1) != '%') {
+ num_of_params += 1;
+ str = substr + 1;
+ } else {
+ str = substr + 2;
+ }
substr = strstr(str, PARAM_CHAR);
}
@@ -570,14 +620,17 @@ void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
{
char tmp[512];
- snprintf(tmp, sizeof(tmp), str_frmt->string,
- str_frmt->params[0],
- str_frmt->params[1],
- str_frmt->params[2],
- str_frmt->params[3],
- str_frmt->params[4],
- str_frmt->params[5],
- str_frmt->params[6]);
+ if (str_frmt->invalid_string)
+ snprintf(tmp, sizeof(tmp), "BAD_FORMAT: %s", str_frmt->string);
+ else
+ snprintf(tmp, sizeof(tmp), str_frmt->string,
+ str_frmt->params[0],
+ str_frmt->params[1],
+ str_frmt->params[2],
+ str_frmt->params[3],
+ str_frmt->params[4],
+ str_frmt->params[5],
+ str_frmt->params[6]);
trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost,
str_frmt->event_id, tmp);
@@ -609,6 +662,13 @@ static int mlx5_tracer_handle_raw_string(struct mlx5_fw_tracer *tracer,
return 0;
}
+static void mlx5_tracer_handle_bad_format_string(struct mlx5_fw_tracer *tracer,
+ struct tracer_string_format *cur_string)
+{
+ cur_string->invalid_string = true;
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+}
+
static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
@@ -619,12 +679,18 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
if (!cur_string)
return mlx5_tracer_handle_raw_string(tracer, tracer_event);
- cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
- cur_string->last_param_num = 0;
cur_string->event_id = tracer_event->event_id;
cur_string->tmsn = tracer_event->string_event.tmsn;
cur_string->timestamp = tracer_event->string_event.timestamp;
cur_string->lost = tracer_event->lost_event;
+ cur_string->last_param_num = 0;
+ cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
+ if (cur_string->num_of_params < 0) {
+ pr_debug("%s Invalid format string parameters\n",
+ __func__);
+ mlx5_tracer_handle_bad_format_string(tracer, cur_string);
+ return 0;
+ }
if (cur_string->num_of_params == 0) /* trace with no params */
list_add_tail(&cur_string->list, &tracer->ready_strings_list);
} else {
@@ -634,6 +700,11 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
__func__, tracer_event->string_event.tmsn);
return mlx5_tracer_handle_raw_string(tracer, tracer_event);
}
+ if (cur_string->num_of_params < 0) {
+ pr_debug("%s string parameter of invalid string, dumping\n",
+ __func__);
+ return 0;
+ }
cur_string->last_param_num += 1;
if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
pr_debug("%s Number of params exceeds the max (%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 5c548bb74f07..30d0bcba8847 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -125,6 +125,7 @@ struct tracer_string_format {
struct list_head list;
u32 timestamp;
bool lost;
+ bool invalid_string;
};
enum mlx5_fw_tracer_ownership_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 811178d8976c..262dc032e276 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -69,7 +69,7 @@ struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
-#define MLX5E_ETH_HARD_MTU (ETH_HLEN + PSP_ENCAP_HLEN + PSP_TRL_SIZE + VLAN_HLEN + ETH_FCS_LEN)
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 35d9530037a6..a8fb4bec369c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -342,9 +342,8 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
rt_dst_entry = &rt->dst;
break;
case AF_INET6:
- rt_dst_entry = ipv6_stub->ipv6_dst_lookup_flow(
- dev_net(netdev), NULL, &fl6, NULL);
- if (IS_ERR(rt_dst_entry))
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ ip6_dst_lookup(dev_net(netdev), NULL, &rt_dst_entry, &fl6))
goto neigh;
break;
default:
@@ -359,6 +358,9 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
neigh_ha_snapshot(addr, n, netdev);
ether_addr_copy(dst, addr);
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT &&
+ is_zero_ether_addr(addr))
+ neigh_event_send(n, NULL);
dst_release(rt_dst_entry);
neigh_release(n);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
index 38e7c77cc851..9a74438ce10a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
@@ -44,6 +44,7 @@ struct mlx5e_accel_fs_psp_prot {
struct mlx5_flow_table *ft;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
+ struct mlx5_modify_hdr *rx_modify_hdr;
struct mlx5_flow_destination default_dest;
struct mlx5e_psp_rx_err rx_err;
u32 refcnt;
@@ -286,13 +287,19 @@ out_err:
return err;
}
-static void accel_psp_fs_rx_fs_destroy(struct mlx5e_accel_fs_psp_prot *fs_prot)
+static void accel_psp_fs_rx_fs_destroy(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot)
{
if (fs_prot->def_rule) {
mlx5_del_flow_rules(fs_prot->def_rule);
fs_prot->def_rule = NULL;
}
+ if (fs_prot->rx_modify_hdr) {
+ mlx5_modify_header_dealloc(fs->mdev, fs_prot->rx_modify_hdr);
+ fs_prot->rx_modify_hdr = NULL;
+ }
+
if (fs_prot->miss_rule) {
mlx5_del_flow_rules(fs_prot->miss_rule);
fs_prot->miss_rule = NULL;
@@ -396,6 +403,7 @@ static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
modify_hdr = NULL;
goto out_err;
}
+ fs_prot->rx_modify_hdr = modify_hdr;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
@@ -416,7 +424,7 @@ static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
goto out;
out_err:
- accel_psp_fs_rx_fs_destroy(fs_prot);
+ accel_psp_fs_rx_fs_destroy(fs, fs_prot);
out:
kvfree(flow_group_in);
kvfree(spec);
@@ -433,7 +441,7 @@ static int accel_psp_fs_rx_destroy(struct mlx5e_psp_fs *fs, enum accel_fs_psp_ty
/* The netdev unreg already happened, so all offloaded rule are already removed */
fs_prot = &accel_psp->fs_prot[type];
- accel_psp_fs_rx_fs_destroy(fs_prot);
+ accel_psp_fs_rx_fs_destroy(fs, fs_prot);
accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6168f0814414..07fc4d2c8fad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -6825,7 +6825,6 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
* is already unregistered before changing to NIC profile.
*/
if (priv->netdev->reg_state == NETREG_REGISTERED) {
- mlx5e_psp_unregister(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev, false);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index a2802cfc9b98..a8af84fc9763 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1608,12 +1608,13 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
{
int mode = fec_active_mode(priv->mdev);
- if (mode == MLX5E_FEC_NOFEC ||
- !MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ if (mode == MLX5E_FEC_NOFEC)
return;
- fec_set_corrected_bits_total(priv, fec_stats);
- fec_set_block_stats(priv, mode, fec_stats);
+ if (MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) {
+ fec_set_corrected_bits_total(priv, fec_stats);
+ fec_set_block_stats(priv, mode, fec_stats);
+ }
if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
fec_set_histograms_stats(priv, mode, hist);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 14884b9ea7f3..a01ee656a1e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -939,7 +939,11 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
sq->dma_fifo_cc = dma_fifo_cc;
sq->cc = sqcc;
- netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+ /* Do not update BQL for TXQs that got replaced by new active ones, as
+ * netdev_tx_reset_queue() is called for them in mlx5e_activate_txqsq().
+ */
+ if (sq == sq->priv->txq2sq[sq->txq_ix])
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
}
#ifdef CONFIG_MLX5_CORE_IPOIB
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 8de6c7f6c294..ea94a727633f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -52,6 +52,7 @@
#include "devlink.h"
#include "lag/lag.h"
#include "en/tc/post_meter.h"
+#include "fw_reset.h"
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
@@ -3991,6 +3992,11 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (IS_ERR(esw))
return PTR_ERR(esw);
+ if (mlx5_fw_reset_in_progress(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't change eswitch mode during firmware reset");
+ return -EBUSY;
+ }
+
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 2bceb42c98cc..ae10665c53f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -15,6 +15,7 @@ enum {
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED,
MLX5_FW_RESET_FLAGS_UNLOAD_EVENT,
+ MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS,
};
struct mlx5_fw_reset {
@@ -128,6 +129,16 @@ int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_ty
return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL);
}
+bool mlx5_fw_reset_in_progress(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (!fw_reset)
+ return false;
+
+ return test_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags);
+}
+
static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev,
u8 *reset_method)
{
@@ -243,6 +254,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
devl_unlock(devlink);
}
+
+ clear_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags);
}
static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
@@ -462,27 +475,48 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
reset_request_work);
struct mlx5_core_dev *dev = fw_reset->dev;
+ bool nack_request = false;
+ struct devlink *devlink;
int err;
err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method);
- if (err)
+ if (err) {
+ nack_request = true;
mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err);
+ } else if (!mlx5_is_reset_now_capable(dev, fw_reset->reset_method) ||
+ test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
+ &fw_reset->reset_flags)) {
+ nack_request = true;
+ }
- if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
- !mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) {
+ devlink = priv_to_devlink(dev);
+ /* For external resets, try to acquire devl_lock. Skip if devlink reset is
+ * pending (lock already held)
+ */
+ if (nack_request ||
+ (!test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP,
+ &fw_reset->reset_flags) &&
+ !devl_trylock(devlink))) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
return;
}
+
if (mlx5_sync_reset_set_reset_requested(dev))
- return;
+ goto unlock;
+
+ set_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags);
err = mlx5_fw_reset_set_reset_sync_ack(dev);
if (err)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
else
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
+
+unlock:
+ if (!test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
+ devl_unlock(devlink);
}
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id)
@@ -722,6 +756,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
if (mlx5_sync_reset_clear_reset_requested(dev, true))
return;
+
+ clear_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
@@ -758,6 +794,7 @@ static void mlx5_sync_reset_timeout_work(struct work_struct *work)
if (mlx5_sync_reset_clear_reset_requested(dev, true))
return;
+ clear_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
}
@@ -844,7 +881,8 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
- cancel_delayed_work(&fw_reset->reset_timeout_work);
+ if (test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ mlx5_sync_reset_clear_reset_requested(dev, true);
}
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index d5b28525c960..2d96b2adc1cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -10,6 +10,7 @@ int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_ty
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
struct netlink_ext_ack *extack);
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
+bool mlx5_fw_reset_in_progress(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 1ac933cd8f02..a459a30f36ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1413,6 +1413,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
static void mlx5_lag_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
{
mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
+ dev->priv.hca_devcom_comp = NULL;
}
static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index aee17fcf3b36..cdc99fe5c956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -173,10 +173,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
/* Handle multipath entry with lower priority value */
- if (mp->fib.mfi && mp->fib.mfi != fi &&
+ if (mp->fib.mfi &&
(mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) &&
- fi->fib_priority >= mp->fib.priority)
+ mp->fib.dst_len <= fen_info->dst_len &&
+ !(mp->fib.dst_len == fen_info->dst_len &&
+ fi->fib_priority < mp->fib.priority)) {
+ mlx5_core_dbg(ldev->pf[idx].dev,
+ "Multipath entry with lower priority was rejected\n");
return;
+ }
nh_dev0 = mlx5_lag_get_next_fib_dev(ldev, fi, NULL);
nh_dev1 = mlx5_lag_get_next_fib_dev(ldev, fi, nh_dev0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index aad52d3a90e6..2d86af8f0d9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -67,12 +67,19 @@ err_metadata:
static int enable_mpesw(struct mlx5_lag *ldev)
{
- int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev0;
int err;
+ int idx;
int i;
- if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
+ if (ldev->mode == MLX5_LAG_MODE_MPESW)
+ return 0;
+
+ if (ldev->mode != MLX5_LAG_MODE_NONE)
+ return -EINVAL;
+
+ idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ if (idx < 0)
return -EINVAL;
dev0 = ldev->pf[idx].dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 1ab569ce3fcf..4209da722f9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -2231,6 +2231,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n");
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev);
err = mlx5_try_fast_unload(dev);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 85a9e534f442..7f8bed353e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -393,9 +393,11 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
if (err)
return err;
- *status = MLX5_GET(mcia_reg, out, status);
- if (*status)
+ if (MLX5_GET(mcia_reg, out, status)) {
+ if (status)
+ *status = MLX5_GET(mcia_reg, out, status);
return -EIO;
+ }
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size);
@@ -429,7 +431,8 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
default:
- mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+ mlx5_core_dbg(dev, "Module ID not recognized: 0x%x\n",
+ module_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 5afe6b155ef0..81935f87bfcd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -440,7 +440,9 @@ int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
rhashtable_remove_fast(&mr_table->route_ht,
&mr_orig_route->ht_node,
mlxsw_sp_mr_route_ht_params);
+ mutex_lock(&mr_table->route_list_lock);
list_del(&mr_orig_route->node);
+ mutex_unlock(&mr_table->route_list_lock);
mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index a2033837182e..2d0e89bd2fb9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2265,6 +2265,7 @@ mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
if (!neigh_entry)
return NULL;
+ neigh_hold(n);
neigh_entry->key.n = n;
neigh_entry->rif = rif;
INIT_LIST_HEAD(&neigh_entry->nexthop_list);
@@ -2274,6 +2275,7 @@ mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
{
+ neigh_release(neigh_entry->key.n);
kfree(neigh_entry);
}
@@ -2858,6 +2860,11 @@ static int mlxsw_sp_router_schedule_work(struct net *net,
if (!net_work)
return NOTIFY_BAD;
+ /* Take a reference to ensure the neighbour won't be destructed until
+ * we drop the reference in the work item.
+ */
+ neigh_clone(n);
+
INIT_WORK(&net_work->work, cb);
net_work->mlxsw_sp = router->mlxsw_sp;
net_work->n = n;
@@ -2881,11 +2888,6 @@ static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
struct net *net;
net = neigh_parms_net(n->parms);
-
- /* Take a reference to ensure the neighbour won't be destructed until we
- * drop the reference in delayed work.
- */
- neigh_clone(n);
return mlxsw_sp_router_schedule_work(net, router, n,
mlxsw_sp_router_neigh_event_work);
}
@@ -4320,6 +4322,8 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_neigh_entry_insert;
+ neigh_release(old_n);
+
read_lock_bh(&n->lock);
nud_state = n->nud_state;
dead = n->dead;
@@ -4328,14 +4332,10 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(nh, &neigh_entry->nexthop_list,
neigh_list_node) {
- neigh_release(old_n);
- neigh_clone(n);
__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
- neigh_release(n);
-
return 0;
err_neigh_entry_insert:
@@ -4428,6 +4428,11 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
}
}
+ /* Release the reference taken by neigh_lookup() / neigh_create() since
+ * neigh_entry already holds one.
+ */
+ neigh_release(n);
+
/* If that is the first nexthop connected to that neigh, add to
* nexthop_neighs_list
*/
@@ -4454,11 +4459,9 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
- struct neighbour *n;
if (!neigh_entry)
return;
- n = neigh_entry->key.n;
__mlxsw_sp_nexthop_neigh_update(nh, true);
list_del(&nh->neigh_list_node);
@@ -4472,8 +4475,6 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
-
- neigh_release(n);
}
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index efb4e412ec7e..0055c231acf6 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -481,7 +481,7 @@ static void mana_serv_reset(struct pci_dev *pdev)
/* Perform PCI rescan on device if we failed on HWC */
dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n");
mana_serv_rescan(pdev);
- goto out;
+ return;
}
if (ret)
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 08bee56aea35..c345d9b17c89 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -2307,14 +2307,16 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
/* Now, set PGIDs for each active LAG */
for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
- struct net_device *bond = ocelot->ports[lag]->bond;
+ struct ocelot_port *ocelot_port = ocelot->ports[lag];
int num_active_ports = 0;
+ struct net_device *bond;
unsigned long bond_mask;
u8 aggr_idx[16];
- if (!bond || (visited & BIT(lag)))
+ if (!ocelot_port || !ocelot_port->bond || (visited & BIT(lag)))
continue;
+ bond = ocelot_port->bond;
bond_mask = ocelot_get_bond_mask(ocelot, bond);
for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 405e91eb3141..755083852eef 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2655,9 +2655,6 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
- if (tp->dash_enabled)
- return;
-
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(tp, 0x19, 0xff64);
@@ -4812,7 +4809,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl_disable_exit_l1(tp);
rtl_prepare_power_down(tp);
- if (tp->dash_type != RTL_DASH_NONE)
+ if (tp->dash_type != RTL_DASH_NONE && !tp->saved_wolopts)
rtl8168_driver_stop(tp);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 9d1a83a5fa7e..d16c178d1034 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -516,15 +516,7 @@ static inline void smc_rcv(struct net_device *dev)
* any other concurrent access and C would always interrupt B. But life
* isn't that easy in a SMP world...
*/
-#define smc_special_trylock(lock, flags) \
-({ \
- int __ret; \
- local_irq_save(flags); \
- __ret = spin_trylock(lock); \
- if (!__ret) \
- local_irq_restore(flags); \
- __ret; \
-})
+#define smc_special_trylock(lock, flags) spin_trylock_irqsave(lock, flags)
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index da206b24aaed..b3730312aeed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -89,6 +89,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_CONSUMED BIT(0)
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
+#define STMMAC_XSK_CONSUMED BIT(3)
static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
@@ -5126,6 +5127,7 @@ static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
struct xdp_buff *xdp)
{
+ bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
struct netdev_queue *nq;
@@ -5142,9 +5144,18 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
- res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
- if (res == STMMAC_XDP_TX)
+ /* For zero copy XDP_TX action, dma_map is true */
+ res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc);
+ if (res == STMMAC_XDP_TX) {
stmmac_flush_tx_descriptors(priv, queue);
+ } else if (res == STMMAC_XDP_CONSUMED && zc) {
+ /* xdp has been freed by xdp_convert_buff_to_frame(),
+ * no need to call xsk_buff_free() again, so return
+ * STMMAC_XSK_CONSUMED.
+ */
+ res = STMMAC_XSK_CONSUMED;
+ xdp_return_frame(xdpf);
+ }
__netif_tx_unlock(nq);
@@ -5494,6 +5505,8 @@ read_again:
break;
case STMMAC_XDP_CONSUMED:
xsk_buff_free(buf->xdp);
+ fallthrough;
+ case STMMAC_XSK_CONSUMED:
rx_dropped++;
break;
case STMMAC_XDP_TX:
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index a54d71155263..fe5b2926d8ab 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -209,6 +209,7 @@ config TI_ICSSG_PRUETH_SR1
depends on PRU_REMOTEPROC
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available on the AM65 SR1.0 platform.
@@ -234,7 +235,7 @@ config TI_PRUETH
depends on PRU_REMOTEPROC
depends on NET_SWITCHDEV
select TI_ICSS_IEP
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Some TI SoCs has Programmable Realtime Unit (PRU) cores which can
support Single or Dual Ethernet ports with the help of firmware code
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index d138dea7d208..ec278f99d295 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -21,6 +21,7 @@ config LIBWX
depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
select DIMLIB
+ select PHYLINK
help
Common library for Wangxun(R) Ethernet drivers.
@@ -29,7 +30,6 @@ config NGBE
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
- select PHYLINK
help
This driver supports Wangxun(R) GbE PCI Express family of
adapters.
@@ -48,7 +48,6 @@ config TXGBE
depends on PTP_1588_CLOCK_OPTIONAL
select MARVELL_10G_PHY
select REGMAP
- select PHYLINK
select HWMON if TXGBE=y
select SFP
select GPIOLIB
@@ -71,7 +70,6 @@ config TXGBEVF
depends on PCI_MSI
depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
- select PHYLINK
help
This driver supports virtual functions for SP1000A, WX1820AL,
WX5XXX, WX5XXXAL.
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b9b5554ea862..5ad2673f213d 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -334,7 +334,7 @@ int fjes_hw_init(struct fjes_hw *hw)
ret = fjes_hw_reset(hw);
if (ret)
- return ret;
+ goto err_iounmap;
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
@@ -347,8 +347,10 @@ int fjes_hw_init(struct fjes_hw *hw)
hw->max_epid = fjes_hw_get_max_epid(hw);
hw->my_epid = fjes_hw_get_my_epid(hw);
- if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
- return -ENXIO;
+ if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) {
+ ret = -ENXIO;
+ goto err_iounmap;
+ }
ret = fjes_hw_setup(hw);
@@ -356,6 +358,10 @@ int fjes_hw_init(struct fjes_hw *hw)
hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
return ret;
+
+err_iounmap:
+ fjes_hw_iounmap(hw);
+ return ret;
}
void fjes_hw_exit(struct fjes_hw *hw)
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index dea411e132db..2efa3ba148aa 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -737,6 +737,9 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
struct ethhdr *eth = eth_hdr(skb);
rx_handler_result_t ret = RX_HANDLER_PASS;
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+ return RX_HANDLER_PASS;
+
if (is_multicast_ether_addr(eth->h_dest)) {
if (ipvlan_external_frame(skb, port)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index e55be6dc9ae7..d6b9004c61dc 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -63,6 +63,13 @@ static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad,
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+ /* Workaround for read-after-write issue.
+ * The controller may return stale data if a read follows immediately
+ * after a write. A dummy read forces the hardware to update its
+ * internal state, ensuring that the next real read returns correct data.
+ */
+ ioread32(ctx->base + ASPEED_MDIO_CTRL);
+
return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
!(ctrl & ASPEED_MDIO_CTRL_FIRE),
ASPEED_MDIO_INTERVAL_US,
diff --git a/drivers/net/mdio/mdio-realtek-rtl9300.c b/drivers/net/mdio/mdio-realtek-rtl9300.c
index 33694c3ff9a7..405a07075dd1 100644
--- a/drivers/net/mdio/mdio-realtek-rtl9300.c
+++ b/drivers/net/mdio/mdio-realtek-rtl9300.c
@@ -354,7 +354,6 @@ static int rtl9300_mdiobus_probe_one(struct device *dev, struct rtl9300_mdio_pri
struct fwnode_handle *node)
{
struct rtl9300_mdio_chan *chan;
- struct fwnode_handle *child;
struct mii_bus *bus;
u32 mdio_bus;
int err;
@@ -371,7 +370,7 @@ static int rtl9300_mdiobus_probe_one(struct device *dev, struct rtl9300_mdio_pri
* compatible = "ethernet-phy-ieee802.3-c45". This does mean we can't
* support both c45 and c22 on the same MDIO bus.
*/
- fwnode_for_each_child_node(node, child)
+ fwnode_for_each_child_node_scoped(node, child)
if (fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45"))
priv->smi_bus_is_c45[mdio_bus] = true;
@@ -409,7 +408,6 @@ static int rtl9300_mdiobus_map_ports(struct device *dev)
{
struct rtl9300_mdio_priv *priv = dev_get_drvdata(dev);
struct device *parent = dev->parent;
- struct fwnode_handle *port;
int err;
struct fwnode_handle *ports __free(fwnode_handle) =
@@ -418,7 +416,7 @@ static int rtl9300_mdiobus_map_ports(struct device *dev)
return dev_err_probe(dev, -EINVAL, "%pfwP missing ethernet-ports\n",
dev_fwnode(parent));
- fwnode_for_each_child_node(ports, port) {
+ fwnode_for_each_child_node_scoped(ports, port) {
struct device_node *mdio_dn;
u32 addr;
u32 bus;
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 70e8c38ddad6..d16b95304aa7 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -332,6 +332,11 @@ static ssize_t link_device_store(const struct bus_type *bus, const char *buf, si
rcu_assign_pointer(nsim_a->peer, nsim_b);
rcu_assign_pointer(nsim_b->peer, nsim_a);
+ if (netif_running(dev_a) && netif_running(dev_b)) {
+ netif_carrier_on(dev_a);
+ netif_carrier_on(dev_b);
+ }
+
out_err:
put_net(ns_b);
put_net(ns_a);
@@ -381,6 +386,9 @@ static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf,
if (!peer)
goto out_put_netns;
+ netif_carrier_off(dev);
+ netif_carrier_off(peer->netdev);
+
err = 0;
RCU_INIT_POINTER(nsim->peer, NULL);
RCU_INIT_POINTER(peer->peer, NULL);
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index f3d83b04c953..201dee1a1698 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -698,7 +698,7 @@ static int mv88q2xxx_hwmon_write(struct device *dev,
switch (attr) {
case hwmon_temp_max:
- clamp_val(val, -75000, 180000);
+ val = clamp(val, -75000, 180000);
val = (val / 1000) + 75;
val = FIELD_PREP(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
val);
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index cd09fbf92ef2..2c4bbc236202 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -1167,9 +1167,9 @@ static int mt798x_phy_calibration(struct phy_device *phydev)
}
buf = (u32 *)nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
if (IS_ERR(buf))
return PTR_ERR(buf);
- nvmem_cell_put(cell);
if (!buf[0] || !buf[1] || !buf[2] || !buf[3] || len < 4 * sizeof(u32)) {
phydev_err(phydev, "invalid efuse data\n");
diff --git a/drivers/net/phy/mxl-86110.c b/drivers/net/phy/mxl-86110.c
index e5d137a37a1d..42a5fe3f115f 100644
--- a/drivers/net/phy/mxl-86110.c
+++ b/drivers/net/phy/mxl-86110.c
@@ -938,6 +938,9 @@ static struct phy_driver mxl_phy_drvs[] = {
PHY_ID_MATCH_EXACT(PHY_ID_MXL86110),
.name = "MXL86110 Gigabit Ethernet",
.config_init = mxl86110_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .soft_reset = genphy_soft_reset,
.get_wol = mxl86110_get_wol,
.set_wol = mxl86110_set_wol,
.led_brightness_set = mxl86110_led_brightness_set,
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index 67ecf3d4af2b..6ff0385201a5 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -691,10 +691,6 @@ static int rtl8211f_config_aldps(struct phy_device *phydev)
static int rtl8211f_config_phy_eee(struct phy_device *phydev)
{
- /* RTL8211FVD has no PHYCR2 register */
- if (phydev->drv->phy_id == RTL_8211FVD_PHYID)
- return 0;
-
/* Disable PHY-mode EEE so LPI is passed to the MAC */
return phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2,
RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 0401fa6b24d2..84bef5099dda 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -497,6 +497,8 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
sfp_fixup_nokia),
+ SFP_QUIRK_F("BIDB", "X-ONU-SFPP", sfp_fixup_potron),
+
// FLYPRO SFP-10GT-CS-30M uses Rollball protocol to talk to the PHY.
SFP_QUIRK_F("FLYPRO", "SFP-10GT-CS-30M", sfp_fixup_rollball),
@@ -763,7 +765,7 @@ static int sfp_smbus_byte_write(struct sfp *sfp, bool a2, u8 dev_addr,
dev_addr++;
}
- return 0;
+ return data - (u8 *)buf;
}
static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index 4d5c9ae8f221..c08a5c1bd6e4 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -878,7 +878,7 @@ static void __team_queue_override_enabled_check(struct team *team)
static void team_queue_override_port_prio_changed(struct team *team,
struct team_port *port)
{
- if (!port->queue_id || team_port_enabled(port))
+ if (!port->queue_id || !team_port_enabled(port))
return;
__team_queue_override_port_del(team, port);
__team_queue_override_port_add(team, port);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7fd763917ae2..6ab3486072cb 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -335,6 +335,11 @@ int asix_read_phy_addr(struct usbnet *dev, bool internal)
offset = (internal ? 1 : 0);
ret = buf[offset];
+ if (ret >= PHY_MAX_ADDR) {
+ netdev_err(dev->net, "invalid PHY address: %d\n", ret);
+ return -ENODEV;
+ }
+
netdev_dbg(dev->net, "%s PHY address 0x%x\n",
internal ? "internal" : "external", ret);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index f613e4bc68c8..758a423a459b 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -210,11 +210,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_phy_addr(dev, priv->use_embdphy);
if (ret < 0)
goto free;
- if (ret >= PHY_MAX_ADDR) {
- netdev_err(dev->net, "Invalid PHY address %#x\n", ret);
- ret = -ENODEV;
- goto free;
- }
+
priv->phy_addr = ret;
ax88172a_reset_phy(dev, priv->use_embdphy);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 81ca64debc5b..c514483134f0 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -168,6 +168,8 @@ static int update_eth_regs_async(pegasus_t *pegasus)
netif_device_detach(pegasus->net);
netif_err(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ usb_free_urb(async_urb);
+ kfree(req);
}
return ret;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 278e6cb6f4d9..e40b0669d9f4 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -211,6 +211,8 @@ static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg)
if (res == -ENODEV)
netif_device_detach(dev->netdev);
dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res);
+ kfree(req);
+ usb_free_urb(async_urb);
}
return res;
}
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 091bc2aca7e8..820c4c506979 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -52,7 +52,7 @@ static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value)
static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ return usbnet_write_cmd(dev, SR_WR_REG, SR_REQ_WR_REG,
value, reg, NULL, 0);
}
@@ -65,7 +65,7 @@ static void sr_write_async(struct usbnet *dev, u8 reg, u16 length,
static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ usbnet_write_cmd_async(dev, SR_WR_REG, SR_REQ_WR_REG,
value, reg, NULL, 0);
}
@@ -539,6 +539,11 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0fe6, 0x9700), /* SR9700 device */
.driver_info = (unsigned long)&sr9700_driver_info,
},
+ {
+ /* SR9700 with virtual driver CD-ROM - interface 0 is the CD-ROM device */
+ USB_DEVICE_INTERFACE_NUMBER(0x0fe6, 0x9702, 1),
+ .driver_info = (unsigned long)&sr9700_driver_info,
+ },
{}, /* END */
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1d9faa70ba3b..36742e64cff7 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -831,7 +831,6 @@ int usbnet_stop(struct net_device *net)
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue(net);
- netdev_reset_queue(net);
netif_info(dev, ifdown, dev->net,
"stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
@@ -875,6 +874,8 @@ int usbnet_stop(struct net_device *net)
timer_delete_sync(&dev->delay);
cancel_work_sync(&dev->kevent);
+ netdev_reset_queue(net);
+
if (!pm)
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1bb3aeca66c6..22d894101c01 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3791,7 +3791,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
old_rss_hdr = vi->rss_hdr;
old_rss_trailer = vi->rss_trailer;
- vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ vi->rss_hdr = devm_kzalloc(&vi->vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
if (!vi->rss_hdr) {
vi->rss_hdr = old_rss_hdr;
return -ENOMEM;
@@ -3802,7 +3802,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (!virtnet_commit_rss_command(vi)) {
/* restore ctrl_rss if commit_rss_command failed */
- devm_kfree(&dev->dev, vi->rss_hdr);
+ devm_kfree(&vi->vdev->dev, vi->rss_hdr);
vi->rss_hdr = old_rss_hdr;
vi->rss_trailer = old_rss_trailer;
@@ -3810,7 +3810,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
queue_pairs);
return -EINVAL;
}
- devm_kfree(&dev->dev, old_rss_hdr);
+ devm_kfree(&vi->vdev->dev, old_rss_hdr);
goto succ;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 3391f07b01de..f8fc6f30fbe5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1597,7 +1597,7 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
*/
static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
- unsigned int min_core, max_core, loaded_core;
+ int min_core, max_core, loaded_core;
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
const struct iwl_ucode_header *ucode;
@@ -1676,7 +1676,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (loaded_core < min_core || loaded_core > max_core) {
IWL_ERR(drv,
"Driver unable to support your firmware API. "
- "Driver supports FW core %u..%u, firmware is %u.\n",
+ "Driver supports FW core %d..%d, firmware is %d.\n",
min_core, max_core, loaded_core);
goto try_again;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
index ffeb37a7f830..231920425c06 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
@@ -121,6 +121,12 @@ static int iwl_mld_ptp_gettime(struct ptp_clock_info *ptp,
return 0;
}
+static int iwl_mld_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
static int iwl_mld_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
@@ -279,6 +285,7 @@ void iwl_mld_ptp_init(struct iwl_mld *mld)
mld->ptp_data.ptp_clock_info.owner = THIS_MODULE;
mld->ptp_data.ptp_clock_info.gettime64 = iwl_mld_ptp_gettime;
+ mld->ptp_data.ptp_clock_info.settime64 = iwl_mld_ptp_settime;
mld->ptp_data.ptp_clock_info.max_adj = 0x7fffffff;
mld->ptp_data.ptp_clock_info.adjtime = iwl_mld_ptp_adjtime;
mld->ptp_data.ptp_clock_info.adjfine = iwl_mld_ptp_adjfine;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ptp.c b/drivers/net/wireless/intel/iwlwifi/mvm/ptp.c
index 06a4c9f74797..ad156b82eaa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ptp.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ptp.c
@@ -220,6 +220,12 @@ static int iwl_mvm_ptp_gettime(struct ptp_clock_info *ptp,
return 0;
}
+static int iwl_mvm_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
static int iwl_mvm_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm,
@@ -281,6 +287,7 @@ void iwl_mvm_ptp_init(struct iwl_mvm *mvm)
mvm->ptp_data.ptp_clock_info.adjfine = iwl_mvm_ptp_adjfine;
mvm->ptp_data.ptp_clock_info.adjtime = iwl_mvm_ptp_adjtime;
mvm->ptp_data.ptp_clock_info.gettime64 = iwl_mvm_ptp_gettime;
+ mvm->ptp_data.ptp_clock_info.settime64 = iwl_mvm_ptp_settime;
mvm->ptp_data.scaled_freq = SCALE_FACTOR;
/* Give a short 'friendly name' to identify the PHC clock */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index ea99167765b0..0457712286d5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -3019,7 +3019,7 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
}
hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
- dev_info(dev->dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ dev_info(dev->dev, "WM Firmware Version: %.10s, Build Time: %.15s",
hdr->fw_ver, hdr->build_date);
ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, false);
@@ -3048,7 +3048,7 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
}
hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
- dev_info(dev->dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
+ dev_info(dev->dev, "WA Firmware Version: %.10s, Build Time: %.15s",
hdr->fw_ver, hdr->build_date);
ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, true);
@@ -3101,7 +3101,6 @@ int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name)
int i, ret, sem, max_len = mt76_is_sdio(dev) ? 2048 : 4096;
const struct mt76_connac2_patch_hdr *hdr;
const struct firmware *fw = NULL;
- char build_date[17];
sem = mt76_connac_mcu_patch_sem_ctrl(dev, true);
switch (sem) {
@@ -3125,11 +3124,8 @@ int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name)
}
hdr = (const void *)fw->data;
- strscpy(build_date, hdr->build_date, sizeof(build_date));
- build_date[16] = '\0';
- strim(build_date);
- dev_info(dev->dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
- be32_to_cpu(hdr->hw_sw_ver), build_date);
+ dev_info(dev->dev, "HW/SW Version: 0x%x, Build Time: %.16s",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
struct mt76_connac2_patch_sec *sec;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index aa702ba7c9f5..d6c35e8d02a5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -511,7 +511,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
if (sta) {
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
tid = ieee80211_get_tid(hdr);
- agg_state = sta_entry->tids[tid].agg.agg_state;
+ if (tid < MAX_TID_COUNT)
+ agg_state = sta_entry->tids[tid].agg.agg_state;
ampdu_density = sta->deflink.ht_cap.ampdu_density;
}
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 99d7c629eac6..e35de52d8eb4 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -144,8 +144,10 @@ static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr)
{
+ bool might_indirect_under_power_off = rtwdev->chip->id == RTW_CHIP_TYPE_8822C;
+
if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) &&
- !rtw_sdio_is_bus_addr(addr))
+ !rtw_sdio_is_bus_addr(addr) && might_indirect_under_power_off)
return false;
return !rtw_sdio_is_sdio30_supported(rtwdev) ||
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index 009202c627d2..3b5126ffc81a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -965,8 +965,7 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
struct sk_buff *rx_skb;
int i;
- rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH | WQ_UNBOUND,
- 0);
+ rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH, 0);
if (!rtwusb->rxwq) {
rtw_err(rtwdev, "failed to create RX work queue\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f76087be2f75..6241866d39df 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -207,6 +207,11 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
if (total_blocks <= wl->tx_blocks_available) {
+ if (skb_headroom(skb) < (total_len - skb->len) &&
+ pskb_expand_head(skb, (total_len - skb->len), 0, GFP_ATOMIC)) {
+ wl1271_free_tx_id(wl, id);
+ return -EAGAIN;
+ }
desc = skb_push(skb, total_len - skb->len);
wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 551f5eb4e747..79cc63272134 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4040,7 +4040,7 @@ mac80211_hwsim_nan_dw_start(struct hrtimer *timer)
ieee80211_vif_to_wdev(data->nan_device_vif);
if (data->nan_curr_dw_band == NL80211_BAND_5GHZ)
- ch = ieee80211_get_channel(hw->wiphy, 5475);
+ ch = ieee80211_get_channel(hw->wiphy, 5745);
else
ch = ieee80211_get_channel(hw->wiphy, 2437);
@@ -4112,14 +4112,14 @@ static int mac80211_hwsim_stop_nan(struct ieee80211_hw *hw,
hrtimer_cancel(&data->nan_timer);
data->nan_device_vif = NULL;
- spin_lock(&hwsim_radio_lock);
+ spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data2, &hwsim_radios, list) {
if (data2->nan_device_vif) {
nan_cluster_running = true;
break;
}
}
- spin_unlock(&hwsim_radio_lock);
+ spin_unlock_bh(&hwsim_radio_lock);
if (!nan_cluster_running)
memset(hwsim_nan_cluster_id, 0, ETH_ALEN);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
index fc928b298a98..b846889fcb09 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -456,6 +456,7 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux)
struct sk_buff_head *free_list;
union mux_msg mux_msg;
struct sk_buff *skb;
+ int i;
if (!ipc_mux->initialized)
return;
@@ -479,5 +480,10 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux)
ipc_mux->channel->dl_pipe.is_open = false;
}
+ if (ipc_mux->protocol != MUX_LITE) {
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
+ kfree(ipc_mux->ul_adb.pp_qlt[i]);
+ }
+
kfree(ipc_mux);
}
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index ffd7367ce119..018a80674f06 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -406,7 +406,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
if (rc || (transferred != sizeof(cmd))) {
nfc_err(&phy->udev->dev,
"Reader power on cmd error %d\n", rc);
- return rc;
+ return rc ?: -EINVAL;
}
rc = usb_submit_urb(phy->in_urb, GFP_KERNEL);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index d378d4b4109f..331646d667b9 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -503,8 +503,8 @@ void __init early_init_fdt_scan_reserved_mem(void)
if (!initial_boot_params)
return;
- fdt_scan_reserved_mem();
fdt_reserve_elfcorehdr();
+ fdt_scan_reserved_mem();
/* Process header /memreserve/ fields */
for (n = 0; ; n++) {
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 388e9ec2cccf..3b773aaf9d05 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1985,7 +1985,6 @@ static void attach_node_and_children(struct device_node *np)
*/
static int __init unittest_data_add(void)
{
- void *unittest_data;
void *unittest_data_align;
struct device_node *unittest_data_node = NULL, *np;
/*
@@ -2004,7 +2003,7 @@ static int __init unittest_data_add(void)
}
/* creating copy */
- unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
+ void *unittest_data __free(kfree) = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
if (!unittest_data)
return -ENOMEM;
@@ -2014,12 +2013,10 @@ static int __init unittest_data_add(void)
ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node);
if (!ret) {
pr_warn("%s: unflatten testcases tree failed\n", __func__);
- kfree(unittest_data);
return -ENODATA;
}
if (!unittest_data_node) {
pr_warn("%s: testcases tree is empty\n", __func__);
- kfree(unittest_data);
return -ENODATA;
}
@@ -2038,7 +2035,6 @@ static int __init unittest_data_add(void)
/* attach the sub-tree to live tree */
if (!of_root) {
pr_warn("%s: no live tree to attach sub-tree\n", __func__);
- kfree(unittest_data);
rc = -ENODEV;
goto unlock;
}
@@ -2059,6 +2055,8 @@ static int __init unittest_data_add(void)
EXPECT_END(KERN_INFO,
"Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
+ retain_and_null_ptr(unittest_data);
+
unlock:
of_overlay_mutex_unlock();
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index a6eb6bffa5ea..eefb2bac8443 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -578,8 +578,8 @@ sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
pba &= IOVP_MASK;
pba |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
- pba |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
- *pdir_ptr = cpu_to_le64(pba); /* swap and store into I/O Pdir */
+ /* set "valid" bit, swap and store into I/O Pdir */
+ *pdir_ptr = cpu_to_le64((unsigned long)pba | SBA_PDIR_VALID_BIT);
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 54b6a4196f17..0694084f612b 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -37,7 +37,6 @@
#define PCIE_CFG_STATUS17 0x44
#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
-#define WAIT_LINKUP_TIMEOUT 4000
#define PORT_CLK_RATE 100000000UL
#define MAX_PAYLOAD_SIZE 256
#define MAX_READ_REQ_SIZE 256
@@ -350,40 +349,10 @@ static struct pci_ops meson_pci_ops = {
static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
- struct device *dev = pci->dev;
- u32 speed_okay = 0;
- u32 cnt = 0;
- u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
-
- do {
- state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
- state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
- smlh_up = IS_SMLH_LINK_UP(state12);
- rdlh_up = IS_RDLH_LINK_UP(state12);
- ltssm_up = IS_LTSSM_UP(state12);
-
- if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
- speed_okay = 1;
-
- if (smlh_up)
- dev_dbg(dev, "smlh_link_up is on\n");
- if (rdlh_up)
- dev_dbg(dev, "rdlh_link_up is on\n");
- if (ltssm_up)
- dev_dbg(dev, "ltssm_up is on\n");
- if (speed_okay)
- dev_dbg(dev, "speed_okay\n");
-
- if (smlh_up && rdlh_up && ltssm_up && speed_okay)
- return true;
-
- cnt++;
-
- udelay(10);
- } while (cnt < WAIT_LINKUP_TIMEOUT);
-
- dev_err(dev, "error: wait linkup timeout\n");
- return false;
+ u32 state12;
+
+ state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
+ return IS_SMLH_LINK_UP(state12) && IS_RDLH_LINK_UP(state12);
}
static int meson_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 7b92e7a1c0d9..5a318487b2b3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1047,7 +1047,6 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
pcie->parf + PARF_NO_SNOOP_OVERRIDE);
- qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
return 0;
@@ -1316,6 +1315,8 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
goto err_disable_phy;
}
+ qcom_pcie_clear_aspm_l0s(pcie->pci);
+
qcom_ep_reset_deassert(pcie);
if (pcie->cfg->ops->config_sid) {
@@ -1464,6 +1465,7 @@ static const struct qcom_pcie_cfg cfg_2_1_0 = {
static const struct qcom_pcie_cfg cfg_2_3_2 = {
.ops = &ops_2_3_2,
+ .no_l0s = true,
};
static const struct qcom_pcie_cfg cfg_2_3_3 = {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b9c252aa6fe0..280cd50d693b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -6308,7 +6308,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, 0x9660, of_pci_make_dev_node);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_RPI, PCI_DEVICE_ID_RPI_RP1_C0, of_pci_make_dev_node);
/*
* Devices known to require a longer delay before first config space access
diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c
index 436fa7f4c387..baa242b14099 100644
--- a/drivers/pci/vgaarb.c
+++ b/drivers/pci/vgaarb.c
@@ -652,13 +652,6 @@ static bool vga_is_boot_device(struct vga_device *vgadev)
return true;
}
- /*
- * Vgadev has neither IO nor MEM enabled. If we haven't found any
- * other VGA devices, it is the best candidate so far.
- */
- if (!boot_vga)
- return true;
-
return false;
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index bc7f37afc48b..7b9f792acb0e 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -491,6 +491,7 @@ config PINCTRL_PIC64GX
depends on ARCH_MICROCHIP || COMPILE_TEST
depends on OF
select GENERIC_PINCONF
+ select REGMAP_MMIO
default y
help
This selects the pinctrl driver for gpio2 on pic64gx.
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8189.c b/drivers/pinctrl/mediatek/pinctrl-mt8189.c
index f6a3e584588b..cd4cdff309a1 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8189.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8189.c
@@ -1642,7 +1642,7 @@ static const struct mtk_pin_reg_calc mt8189_reg_cals[PINCTRL_PIN_REG_MAX] = {
};
static const char * const mt8189_pinctrl_register_base_names[] = {
- "base", "lm", "rb0", "rb1", "bm0", "bm1", "bm2", "lt0", "lt1", "rt",
+ "base", "bm0", "bm1", "bm2", "lm", "lt0", "lt1", "rb0", "rb1", "rt",
};
static const struct mtk_eint_hw mt8189_eint_hw = {
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 1c97ec44aa5f..78212f992843 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -498,7 +498,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
pctrl->chip.base = -1;
pctrl->chip.ngpio = data->npins;
pctrl->chip.label = dev_name(dev);
- pctrl->chip.can_sleep = false;
+ pctrl->chip.can_sleep = true;
mutex_init(&pctrl->lock);
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 16a2fd9fdd9b..5ec1ad471696 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -801,18 +801,18 @@ static const struct mlxbf_pmc_events mlxbf_pmc_llt_miss_events[] = {
{11, "GDC_MISS_MACHINE_CHI_TXDAT"},
{12, "GDC_MISS_MACHINE_CHI_RXDAT"},
{13, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_0"},
- {14, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_1 "},
+ {14, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_1"},
{15, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_2"},
- {16, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_3 "},
- {17, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_0 "},
- {18, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_1 "},
- {19, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_2 "},
- {20, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_3 "},
+ {16, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_3"},
+ {17, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_0"},
+ {18, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_1"},
+ {19, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_2"},
+ {20, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_3"},
{21, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE0_0"},
{22, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE0_1"},
{23, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE0_2"},
{24, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE0_3"},
- {25, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_0 "},
+ {25, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_0"},
{26, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_1"},
{27, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_2"},
{28, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_3"},
diff --git a/drivers/platform/x86/asus-armoury.h b/drivers/platform/x86/asus-armoury.h
index a1bb2005c3f3..3ac7aea37838 100644
--- a/drivers/platform/x86/asus-armoury.h
+++ b/drivers/platform/x86/asus-armoury.h
@@ -449,12 +449,27 @@ static const struct dmi_system_id power_limits[] = {
.ac_data = &(struct power_limits) {
.ppt_pl1_spl_min = 15,
.ppt_pl1_spl_max = 80,
- .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_min = 35,
.ppt_pl2_sppt_max = 80,
.ppt_pl3_fppt_min = 35,
- .ppt_pl3_fppt_max = 80
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
},
- .dc_data = NULL,
},
},
{
@@ -554,6 +569,42 @@ static const struct dmi_system_id power_limits[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA608UM"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 90,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 90,
+ .ppt_pl3_fppt_max = 65,
+ .nv_dynamic_boost_min = 10,
+ .nv_dynamic_boost_max = 15,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 100,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_NAME, "FA608WI"),
},
.driver_data = &(struct power_data) {
@@ -824,6 +875,38 @@ static const struct dmi_system_id power_limits[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA403WR"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 0,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 80,
+ .nv_tgp_max = 95,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 35,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_NAME, "GA503QR"),
},
.driver_data = &(struct power_data) {
@@ -952,6 +1035,35 @@ static const struct dmi_system_id power_limits[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605CR"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 30,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 38,
+ .ppt_pl2_sppt_max = 110,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 80,
+ .nv_tgp_def = 90,
+ .nv_tgp_max = 105,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 30,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 38,
+ .ppt_pl2_sppt_max = 110,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_NAME, "GU605CW"),
},
.driver_data = &(struct power_data) {
@@ -1262,6 +1374,35 @@ static const struct dmi_system_id power_limits[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G615LR"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_def = 140,
+ .ppt_pl1_spl_max = 175,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_tgp_min = 65,
+ .nv_tgp_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_NAME, "G634J"),
},
.driver_data = &(struct power_data) {
@@ -1428,6 +1569,35 @@ static const struct dmi_system_id power_limits[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G835LW"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_def = 140,
+ .ppt_pl1_spl_max = 175,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 80,
+ .nv_tgp_max = 150,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_NAME, "H7606W"),
},
.driver_data = &(struct power_data) {
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 6a62bc5b02fd..a38a65f5c550 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -580,6 +580,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x2a, { KEY_SELECTIVE_SCREENSHOT } },
{ KE_IGNORE, 0x2b, }, /* PrintScreen (also send via PS/2) on newer models */
{ KE_IGNORE, 0x2c, }, /* CapsLock (also send via PS/2) on newer models */
+ { KE_KEY, 0x2d, { KEY_DISPLAYTOGGLE } },
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 1418bd326edf..e69b50162bb1 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -90,6 +90,30 @@ static struct awcc_quirks empty_quirks;
static const struct dmi_system_id awcc_dmi_table[] __initconst = {
{
+ .ident = "Alienware 16 Area-51",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware 16 Area-51"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Alienware 16X Aurora",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware 16X Aurora"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Alienware 18 Area-51",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware 18 Area-51"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
.ident = "Alienware 16 Aurora",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -162,6 +186,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &generic_quirks,
},
{
+ .ident = "Alienware x16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x16"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
.ident = "Alienware x17",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c
index 77905a9ddde9..fe52bcd896f7 100644
--- a/drivers/platform/x86/dell/dell-lis3lv02d.c
+++ b/drivers/platform/x86/dell/dell-lis3lv02d.c
@@ -44,6 +44,7 @@ static const struct dmi_system_id lis3lv02d_devices[] __initconst = {
/*
* Additional individual entries were added after verification.
*/
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude 5400", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude 5480", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude 5500", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29),
diff --git a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
index c50ad5880503..f346aad8e9d8 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
@@ -207,7 +207,7 @@ static int hp_populate_enumeration_elements_from_package(union acpi_object *enum
case PREREQUISITES:
size = min_t(u32, enum_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
- if (elem >= enum_obj_count) {
+ if (elem + reqs >= enum_obj_count) {
pr_err("Error enum-objects package is too small\n");
return -EINVAL;
}
@@ -255,7 +255,7 @@ static int hp_populate_enumeration_elements_from_package(union acpi_object *enum
for (pos_values = 0; pos_values < size && pos_values < MAX_VALUES_SIZE;
pos_values++) {
- if (elem >= enum_obj_count) {
+ if (elem + pos_values >= enum_obj_count) {
pr_err("Error enum-objects package is too small\n");
return -EINVAL;
}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
index 6c7f4d5fa9cb..63b1fda2be4e 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
@@ -227,7 +227,7 @@ static int hp_populate_integer_elements_from_package(union acpi_object *integer_
size = min_t(u32, integer_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
- if (elem >= integer_obj_count) {
+ if (elem + reqs >= integer_obj_count) {
pr_err("Error elem-objects package is too small\n");
return -EINVAL;
}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
index c6e57bb9d8b7..6a31f47ce3f5 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
@@ -216,6 +216,11 @@ static int hp_populate_ordered_list_elements_from_package(union acpi_object *ord
size = min_t(u32, ordered_list_data->common.prerequisites_size,
MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
+ if (elem + reqs >= order_obj_count) {
+ pr_err("Error elem-objects package is too small\n");
+ return -EINVAL;
+ }
+
ret = hp_convert_hexstr_to_str(order_obj[elem + reqs].string.pointer,
order_obj[elem + reqs].string.length,
&str_value, &value_len);
diff --git a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
index 187b372123ed..ec79d9d50377 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
@@ -303,6 +303,11 @@ static int hp_populate_password_elements_from_package(union acpi_object *passwor
MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
+ if (elem + reqs >= password_obj_count) {
+ pr_err("Error elem-objects package is too small\n");
+ return -EINVAL;
+ }
+
ret = hp_convert_hexstr_to_str(password_obj[elem + reqs].string.pointer,
password_obj[elem + reqs].string.length,
&str_value, &value_len);
diff --git a/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
index 27758b779b2d..7b885d25650c 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
@@ -217,7 +217,7 @@ static int hp_populate_string_elements_from_package(union acpi_object *string_ob
MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
- if (elem >= string_obj_count) {
+ if (elem + reqs >= string_obj_count) {
pr_err("Error elem-objects package is too small\n");
return -EINVAL;
}
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 231b37909801..139956168cf9 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -273,7 +273,7 @@ static int __init ibm_rtl_init(void) {
/* search for the _RTL_ signature at the start of the table */
for (i = 0 ; i < ebda_size/sizeof(unsigned int); i++) {
struct ibm_rtl_table __iomem * tmp;
- tmp = (struct ibm_rtl_table __iomem *) (ebda_map+i);
+ tmp = (struct ibm_rtl_table __iomem *) (ebda_map + i*sizeof(unsigned int));
if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) {
phys_addr_t addr;
unsigned int plen;
diff --git a/drivers/platform/x86/intel/pmt/discovery.c b/drivers/platform/x86/intel/pmt/discovery.c
index 32713a194a55..e500aa327d23 100644
--- a/drivers/platform/x86/intel/pmt/discovery.c
+++ b/drivers/platform/x86/intel/pmt/discovery.c
@@ -503,8 +503,10 @@ static int pmt_features_discovery(struct pmt_features_priv *priv,
ret = kobject_init_and_add(&feature->kobj, ktype, &priv->dev->kobj,
"%s", pmt_feature_names[feature->id]);
- if (ret)
+ if (ret) {
+ kobject_put(&feature->kobj);
return ret;
+ }
kobject_uevent(&feature->kobj, KOBJ_ADD);
pmt_features_add_feat(feature);
@@ -546,9 +548,9 @@ static int pmt_features_probe(struct auxiliary_device *auxdev, const struct auxi
priv->dev = device_create(&intel_pmt_class, &auxdev->dev, MKDEV(0, 0), priv,
"%s-%s", "features", dev_name(priv->parent));
if (IS_ERR(priv->dev))
- return dev_err_probe(priv->dev, PTR_ERR(priv->dev),
+ return dev_err_probe(&auxdev->dev, PTR_ERR(priv->dev),
"Could not create %s-%s device node\n",
- "features", dev_name(priv->dev));
+ "features", dev_name(priv->parent));
/* Initialize each feature */
for (i = 0; i < ivdev->num_resources; i++) {
diff --git a/drivers/platform/x86/lenovo/ideapad-laptop.c b/drivers/platform/x86/lenovo/ideapad-laptop.c
index 5171a077f62c..7d5f7a2f6564 100644
--- a/drivers/platform/x86/lenovo/ideapad-laptop.c
+++ b/drivers/platform/x86/lenovo/ideapad-laptop.c
@@ -1367,7 +1367,7 @@ static const struct key_entry ideapad_keymap[] = {
/* Performance toggle also Fn+Q, handled inside ideapad_wmi_notify() */
{ KE_KEY, 0x3d | IDEAPAD_WMI_KEY, { KEY_PROG4 } },
/* shift + prtsc */
- { KE_KEY, 0x2d | IDEAPAD_WMI_KEY, { KEY_CUT } },
+ { KE_KEY, 0x2d | IDEAPAD_WMI_KEY, { KEY_SELECTIVE_SCREENSHOT } },
{ KE_KEY, 0x29 | IDEAPAD_WMI_KEY, { KEY_TOUCHPAD_TOGGLE } },
{ KE_KEY, 0x2a | IDEAPAD_WMI_KEY, { KEY_ROOT_MENU } },
diff --git a/drivers/platform/x86/lenovo/think-lmi.c b/drivers/platform/x86/lenovo/think-lmi.c
index 540b472b1bf3..c45f0206b4ab 100644
--- a/drivers/platform/x86/lenovo/think-lmi.c
+++ b/drivers/platform/x86/lenovo/think-lmi.c
@@ -195,7 +195,7 @@ static const struct tlmi_cert_guids thinkpad_cert_guid = {
};
static const struct tlmi_cert_guids thinkcenter_cert_guid = {
- .thumbprint = NULL,
+ .thumbprint = LENOVO_CERT_THUMBPRINT_GUID, /* Same GUID as TP */
.set_bios_setting = LENOVO_TC_SET_BIOS_SETTING_CERT_GUID,
.save_bios_setting = LENOVO_TC_SAVE_BIOS_SETTING_CERT_GUID,
.cert_to_password = LENOVO_TC_CERT_TO_PASSWORD_GUID,
@@ -709,6 +709,10 @@ static ssize_t cert_thumbprint(char *buf, const char *arg, int count)
if (!tlmi_priv.cert_guid->thumbprint)
return -EOPNOTSUPP;
+ /* Older ThinkCenter BIOS may not have support */
+ if (!wmi_has_guid(tlmi_priv.cert_guid->thumbprint))
+ return -EOPNOTSUPP;
+
status = wmi_evaluate_method(tlmi_priv.cert_guid->thumbprint, 0, 0, &input, &output);
if (ACPI_FAILURE(status)) {
kfree(output.pointer);
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index c4b150fa093f..ddef6b78d2fa 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -1130,6 +1130,9 @@ static void __exit msi_cleanup(void)
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
if (!quirks->old_ec_model && threeg_exists)
device_remove_file(&msipf_device->dev, &dev_attr_threeg);
+ if (quirks->old_ec_model)
+ sysfs_remove_group(&msipf_device->dev.kobj,
+ &msipf_old_attribute_group);
platform_device_unregister(msipf_device);
platform_driver_unregister(&msipf_driver);
backlight_device_unregister(msibl_device);
diff --git a/drivers/platform/x86/samsung-galaxybook.c b/drivers/platform/x86/samsung-galaxybook.c
index 3c13e13d4885..755cb82bdb60 100644
--- a/drivers/platform/x86/samsung-galaxybook.c
+++ b/drivers/platform/x86/samsung-galaxybook.c
@@ -442,12 +442,13 @@ static int galaxybook_battery_ext_property_get(struct power_supply *psy,
union power_supply_propval *val)
{
struct samsung_galaxybook *galaxybook = ext_data;
+ u8 value;
int err;
if (psp != POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
return -EINVAL;
- err = charge_control_end_threshold_acpi_get(galaxybook, (u8 *)&val->intval);
+ err = charge_control_end_threshold_acpi_get(galaxybook, &value);
if (err)
return err;
@@ -455,8 +456,10 @@ static int galaxybook_battery_ext_property_get(struct power_supply *psy,
* device stores "no end threshold" as 0 instead of 100;
* if device has 0, report 100
*/
- if (val->intval == 0)
- val->intval = 100;
+ if (value == 0)
+ value = 100;
+
+ val->intval = value;
return 0;
}
diff --git a/drivers/platform/x86/uniwill/uniwill-acpi.c b/drivers/platform/x86/uniwill/uniwill-acpi.c
index bd7e63dd5181..0f935532f250 100644
--- a/drivers/platform/x86/uniwill/uniwill-acpi.c
+++ b/drivers/platform/x86/uniwill/uniwill-acpi.c
@@ -1845,6 +1845,13 @@ static const struct dmi_system_id uniwill_dmi_table[] __initconst = {
},
},
{
+ .ident = "TUXEDO Book BA15 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
+ },
+ },
+ {
.ident = "TUXEDO Pulse 14 Gen1 AMD",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
index a34b260274f7..de695f1944ab 100644
--- a/drivers/pmdomain/imx/gpc.c
+++ b/drivers/pmdomain/imx/gpc.c
@@ -402,13 +402,12 @@ clk_err:
static int imx_gpc_probe(struct platform_device *pdev)
{
const struct imx_gpc_dt_data *of_id_data = device_get_match_data(&pdev->dev);
- struct device_node *pgc_node;
+ struct device_node *pgc_node __free(device_node)
+ = of_get_child_by_name(pdev->dev.of_node, "pgc");
struct regmap *regmap;
void __iomem *base;
int ret;
- pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
-
/* bail out if DT too old and doesn't provide the necessary info */
if (!of_property_present(pdev->dev.of_node, "#power-domain-cells") &&
!pgc_node)
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index 80561d27f2b2..f64f24d520dd 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
@@ -984,18 +984,6 @@ static void scpsys_domain_cleanup(struct scpsys *scpsys)
}
}
-static struct device_node *scpsys_get_legacy_regmap(struct device_node *np, const char *pn)
-{
- struct device_node *local_node;
-
- for_each_child_of_node(np, local_node) {
- if (of_property_present(local_node, pn))
- return local_node;
- }
-
- return NULL;
-}
-
static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *scpsys)
{
const u8 bp_blocks[3] = {
@@ -1017,7 +1005,8 @@ static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *s
* this makes it then possible to allocate the array of bus_prot
* regmaps and convert all to the new style handling.
*/
- node = scpsys_get_legacy_regmap(np, "mediatek,infracfg");
+ of_node_get(np);
+ node = of_find_node_with_property(np, "mediatek,infracfg");
if (node) {
regmap[0] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg");
of_node_put(node);
@@ -1030,7 +1019,8 @@ static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *s
regmap[0] = NULL;
}
- node = scpsys_get_legacy_regmap(np, "mediatek,smi");
+ of_node_get(np);
+ node = of_find_node_with_property(np, "mediatek,smi");
if (node) {
smi_np = of_parse_phandle(node, "mediatek,smi", 0);
of_node_put(node);
@@ -1048,7 +1038,8 @@ static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *s
regmap[1] = NULL;
}
- node = scpsys_get_legacy_regmap(np, "mediatek,infracfg-nao");
+ of_node_get(np);
+ node = of_find_node_with_property(np, "mediatek,infracfg-nao");
if (node) {
regmap[2] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao");
num_regmaps++;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index b9d87e56cbbc..3ff6da3bf4e6 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -2032,7 +2032,7 @@ end:
return ret;
}
-int rapl_package_add_pmu(struct rapl_package *rp)
+int rapl_package_add_pmu_locked(struct rapl_package *rp)
{
struct rapl_package_pmu_data *data = &rp->pmu_data;
int idx;
@@ -2040,8 +2040,6 @@ int rapl_package_add_pmu(struct rapl_package *rp)
if (rp->has_pmu)
return -EEXIST;
- guard(cpus_read_lock)();
-
for (idx = 0; idx < rp->nr_domains; idx++) {
struct rapl_domain *rd = &rp->domains[idx];
int domain = rd->id;
@@ -2091,17 +2089,23 @@ int rapl_package_add_pmu(struct rapl_package *rp)
return rapl_pmu_update(rp);
}
+EXPORT_SYMBOL_GPL(rapl_package_add_pmu_locked);
+
+int rapl_package_add_pmu(struct rapl_package *rp)
+{
+ guard(cpus_read_lock)();
+
+ return rapl_package_add_pmu_locked(rp);
+}
EXPORT_SYMBOL_GPL(rapl_package_add_pmu);
-void rapl_package_remove_pmu(struct rapl_package *rp)
+void rapl_package_remove_pmu_locked(struct rapl_package *rp)
{
struct rapl_package *pos;
if (!rp->has_pmu)
return;
- guard(cpus_read_lock)();
-
list_for_each_entry(pos, &rapl_packages, plist) {
/* PMU is still needed */
if (pos->has_pmu && pos != rp)
@@ -2111,6 +2115,14 @@ void rapl_package_remove_pmu(struct rapl_package *rp)
perf_pmu_unregister(&rapl_pmu.pmu);
memset(&rapl_pmu, 0, sizeof(struct rapl_pmu));
}
+EXPORT_SYMBOL_GPL(rapl_package_remove_pmu_locked);
+
+void rapl_package_remove_pmu(struct rapl_package *rp)
+{
+ guard(cpus_read_lock)();
+
+ rapl_package_remove_pmu_locked(rp);
+}
EXPORT_SYMBOL_GPL(rapl_package_remove_pmu);
#endif
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index 0ce1096b6314..9a7e150b3536 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -82,7 +82,7 @@ static int rapl_cpu_online(unsigned int cpu)
if (IS_ERR(rp))
return PTR_ERR(rp);
if (rapl_msr_pmu)
- rapl_package_add_pmu(rp);
+ rapl_package_add_pmu_locked(rp);
}
cpumask_set_cpu(cpu, &rp->cpumask);
return 0;
@@ -101,7 +101,7 @@ static int rapl_cpu_down_prep(unsigned int cpu)
lead_cpu = cpumask_first(&rp->cpumask);
if (lead_cpu >= nr_cpu_ids) {
if (rapl_msr_pmu)
- rapl_package_remove_pmu(rp);
+ rapl_package_remove_pmu_locked(rp);
rapl_remove_package_cpuslocked(rp);
} else if (rp->lead_cpu == cpu) {
rp->lead_cpu = lead_cpu;
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 4112a0097338..1ff369880beb 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -68,7 +68,7 @@ static ssize_t show_constraint_##_attr(struct device *dev, \
int id; \
struct powercap_zone_constraint *pconst;\
\
- if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+ if (sscanf(dev_attr->attr.name, "constraint_%d_", &id) != 1) \
return -EINVAL; \
if (id >= power_zone->const_id_cnt) \
return -EINVAL; \
@@ -93,7 +93,7 @@ static ssize_t store_constraint_##_attr(struct device *dev,\
int id; \
struct powercap_zone_constraint *pconst;\
\
- if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+ if (sscanf(dev_attr->attr.name, "constraint_%d_", &id) != 1) \
return -EINVAL; \
if (id >= power_zone->const_id_cnt) \
return -EINVAL; \
@@ -162,7 +162,7 @@ static ssize_t show_constraint_name(struct device *dev,
ssize_t len = -ENODATA;
struct powercap_zone_constraint *pconst;
- if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
+ if (sscanf(dev_attr->attr.name, "constraint_%d_", &id) != 1)
return -EINVAL;
if (id >= power_zone->const_id_cnt)
return -EINVAL;
@@ -625,17 +625,23 @@ struct powercap_control_type *powercap_register_control_type(
INIT_LIST_HEAD(&control_type->node);
control_type->dev.class = &powercap_class;
dev_set_name(&control_type->dev, "%s", name);
- result = device_register(&control_type->dev);
- if (result) {
- put_device(&control_type->dev);
- return ERR_PTR(result);
- }
idr_init(&control_type->idr);
mutex_lock(&powercap_cntrl_list_lock);
list_add_tail(&control_type->node, &powercap_cntrl_list);
mutex_unlock(&powercap_cntrl_list_lock);
+ result = device_register(&control_type->dev);
+ if (result) {
+ mutex_lock(&powercap_cntrl_list_lock);
+ list_del(&control_type->node);
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ idr_destroy(&control_type->idr);
+ put_device(&control_type->dev);
+ return ERR_PTR(result);
+ }
+
return control_type;
}
EXPORT_SYMBOL_GPL(powercap_register_control_type);
diff --git a/drivers/regulator/fp9931.c b/drivers/regulator/fp9931.c
index fef0bb07fd5d..69b3c712e5d5 100644
--- a/drivers/regulator/fp9931.c
+++ b/drivers/regulator/fp9931.c
@@ -391,6 +391,7 @@ static const struct regulator_desc regulators[] = {
{
.name = "v3p3",
.of_match = of_match_ptr("v3p3"),
+ .regulators_node = of_match_ptr("regulators"),
.id = 0,
.ops = &fp9931_v3p3ops,
.type = REGULATOR_VOLTAGE,
@@ -403,6 +404,7 @@ static const struct regulator_desc regulators[] = {
{
.name = "vposneg",
.of_match = of_match_ptr("vposneg"),
+ .regulators_node = of_match_ptr("regulators"),
.id = 1,
.ops = &fp9931_vposneg_ops,
.type = REGULATOR_VOLTAGE,
@@ -415,6 +417,7 @@ static const struct regulator_desc regulators[] = {
{
.name = "vcom",
.of_match = of_match_ptr("vcom"),
+ .regulators_node = of_match_ptr("regulators"),
.id = 2,
.ops = &fp9931_vcom_ops,
.type = REGULATOR_VOLTAGE,
diff --git a/drivers/resctrl/mpam_devices.c b/drivers/resctrl/mpam_devices.c
index 0b5b158e1aaf..b495d5291868 100644
--- a/drivers/resctrl/mpam_devices.c
+++ b/drivers/resctrl/mpam_devices.c
@@ -1072,7 +1072,7 @@ static void __ris_msmon_read(void *arg)
u64 now;
bool nrdy = false;
bool config_mismatch;
- bool overflow;
+ bool overflow = false;
struct mon_read *m = arg;
struct mon_cfg *ctx = m->ctx;
bool reset_on_next_read = false;
@@ -1176,10 +1176,11 @@ static void __ris_msmon_read(void *arg)
}
mpam_mon_sel_unlock(msc);
- if (nrdy) {
+ if (nrdy)
m->err = -EBUSY;
+
+ if (m->err)
return;
- }
*m->val += now;
}
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index b42933fcd423..6561f98c3cb2 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -166,6 +166,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
+#define MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT (0x00000040)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_SHIFT (4)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 8fe6e0bf342e..8c4bb7169a87 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -3158,6 +3158,8 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.dma_mask = (facts_flags &
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.max_req_limit = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT);
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1f2a53ba5dd9..c5085e6d2e75 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7459,7 +7459,7 @@ MODULE_PARM_DESC(lbprz,
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
-MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
+MODULE_PARM_DESC(atomic_wr, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index b3af9b78fa12..57fba34832ad 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -731,6 +731,8 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EFAULT;
}
+ hp->duration = jiffies_to_msecs(jiffies);
+
if (hp->interface_id != 'S') {
sg_remove_request(sfp, srp);
return -ENOSYS;
@@ -815,7 +817,6 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
return -ENODEV;
}
- hp->duration = jiffies_to_msecs(jiffies);
if (hp->interface_id != '\0' && /* v3 (or later) interface */
(SG_FLAG_Q_AT_TAIL & hp->flags))
at_head = 0;
@@ -1338,9 +1339,6 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
"sg_cmd_done: pack_id=%d, res=0x%x\n",
srp->header.pack_id, result));
srp->header.resid = resid;
- ms = jiffies_to_msecs(jiffies);
- srp->header.duration = (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
if (0 != result) {
struct scsi_sense_hdr sshdr;
@@ -1389,6 +1387,9 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
done = 0;
}
srp->done = done;
+ ms = jiffies_to_msecs(jiffies);
+ srp->header.duration = (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (likely(done)) {
@@ -2533,6 +2534,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
const sg_io_hdr_t *hp;
const char * cp;
unsigned int ms;
+ unsigned int duration;
k = 0;
list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
@@ -2570,13 +2572,17 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
- seq_printf(s, " dur=%d", hp->duration);
+ seq_printf(s, " dur=%u", hp->duration);
else {
ms = jiffies_to_msecs(jiffies);
- seq_printf(s, " t_o/elap=%d/%d",
+ duration = READ_ONCE(hp->duration);
+ if (duration)
+ duration = (ms > duration ?
+ ms - duration : 0);
+ seq_printf(s, " t_o/elap=%u/%u",
(new_interface ? hp->timeout :
jiffies_to_msecs(fp->timeout)),
- (ms > hp->duration ? ms - hp->duration : 0));
+ duration);
}
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index af6d050da1c8..965b4cea3388 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -300,6 +300,9 @@ struct cqspi_driver_platdata {
CQSPI_REG_IRQ_IND_SRAM_FULL | \
CQSPI_REG_IRQ_IND_COMP)
+#define CQSPI_IRQ_MASK_RD_SLOW_SRAM (CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_IND_COMP)
+
#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
CQSPI_REG_IRQ_WATERMARK | \
CQSPI_REG_IRQ_UNDERFLOW)
@@ -381,7 +384,7 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
else if (!cqspi->slow_sram)
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
else
- irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
+ irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR;
if (irq_status)
complete(&cqspi->transfer_complete);
@@ -757,7 +760,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
*/
if (use_irq && cqspi->slow_sram)
- writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+ writel(CQSPI_IRQ_MASK_RD_SLOW_SRAM, reg_base + CQSPI_REG_IRQMASK);
else if (use_irq)
writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
else
@@ -769,17 +772,19 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */
while (remaining > 0) {
+ ret = 0;
if (use_irq &&
!wait_for_completion_timeout(&cqspi->transfer_complete,
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT;
/*
- * Disable all read interrupts until
- * we are out of "bytes to read"
+ * Prevent lost interrupt and race condition by reinitializing early.
+ * A spurious wakeup and another wait cycle can occur here,
+ * which is preferable to waiting until timeout if interrupt is lost.
*/
- if (cqspi->slow_sram)
- writel(0x0, reg_base + CQSPI_REG_IRQMASK);
+ if (use_irq)
+ reinit_completion(&cqspi->transfer_complete);
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
@@ -811,12 +816,6 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
remaining -= bytes_to_read;
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
}
-
- if (use_irq && remaining > 0) {
- reinit_completion(&cqspi->transfer_complete);
- if (cqspi->slow_sram)
- writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
- }
}
/* Check indirect done status */
@@ -2001,8 +2000,10 @@ static int cqspi_probe(struct platform_device *pdev)
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ dev_err_probe(&pdev->dev, ret, "Failed to request mmap DMA\n");
goto probe_setup_failed;
+ }
}
ret = spi_register_controller(host);
@@ -2024,7 +2025,9 @@ probe_setup_failed:
probe_reset_failed:
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
- clk_disable_unprepare(cqspi->clk);
+
+ if (pm_runtime_get_sync(&pdev->dev) >= 0)
+ clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
return ret;
}
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 2f2082652a1a..481a7b28aacd 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -335,7 +335,7 @@ static int fsl_spi_prepare_message(struct spi_controller *ctlr,
if (t->bits_per_word == 16 || t->bits_per_word == 32)
t->bits_per_word = 8; /* pretend its 8 bits */
if (t->bits_per_word == 8 && t->len >= 256 &&
- (mpc8xxx_spi->flags & SPI_CPM1))
+ !(t->len & 1) && (mpc8xxx_spi->flags & SPI_CPM1))
t->bits_per_word = 16;
}
}
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index c8b2add2640e..6c7921469b90 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -178,8 +178,19 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
if (op->data.swap16 && !spi_mem_controller_is_capable(ctlr, swap16))
return false;
- if (op->cmd.nbytes != 2)
- return false;
+ /* Extra 8D-8D-8D limitations */
+ if (op->cmd.dtr && op->cmd.buswidth == 8) {
+ if (op->cmd.nbytes != 2)
+ return false;
+
+ if ((op->addr.nbytes % 2) ||
+ (op->dummy.nbytes % 2) ||
+ (op->data.nbytes % 2)) {
+ dev_err(&ctlr->dev,
+ "Even byte numbers not allowed in octal DTR operations\n");
+ return false;
+ }
+ }
} else {
if (op->cmd.nbytes != 1)
return false;
diff --git a/drivers/spi/spi-mpfs.c b/drivers/spi/spi-mpfs.c
index 9a14d1732a15..7e9e64d8e6c8 100644
--- a/drivers/spi/spi-mpfs.c
+++ b/drivers/spi/spi-mpfs.c
@@ -577,6 +577,7 @@ static int mpfs_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
+ mpfs_spi_disable_ints(spi);
mpfs_spi_disable(spi);
return dev_err_probe(&pdev->dev, ret,
"unable to register host for SPI controller\n");
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 4b40985af1ea..90e5813cfdc3 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1320,7 +1320,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, irq, mtk_spi_interrupt,
mtk_spi_interrupt_thread,
- IRQF_TRIGGER_NONE, dev_name(dev), host);
+ IRQF_ONESHOT, dev_name(dev), host);
if (ret)
return dev_err_probe(dev, ret, "failed to register irq\n");
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 871dfd3e77be..d1de6c99e762 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -795,10 +795,13 @@ static const struct sun6i_spi_cfg sun50i_r329_spi_cfg = {
static const struct of_device_id sun6i_spi_match[] = {
{ .compatible = "allwinner,sun6i-a31-spi", .data = &sun6i_a31_spi_cfg },
{ .compatible = "allwinner,sun8i-h3-spi", .data = &sun8i_h3_spi_cfg },
- {
- .compatible = "allwinner,sun50i-r329-spi",
- .data = &sun50i_r329_spi_cfg
- },
+ { .compatible = "allwinner,sun50i-r329-spi", .data = &sun50i_r329_spi_cfg },
+ /*
+ * A523's SPI controller has a combined RX buffer + FIFO counter
+ * at offset 0x400, instead of split buffer count in FIFO status
+ * register. But in practice we only care about the FIFO level.
+ */
+ { .compatible = "allwinner,sun55i-a523-spi", .data = &sun50i_r329_spi_cfg },
{}
};
MODULE_DEVICE_TABLE(of, sun6i_spi_match);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 0d4dcc66e097..c693d934103a 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -503,7 +503,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, WCL_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT |
PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR |
PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_HINT |
- PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC) },
+ PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC |
+ PROC_THERMAL_FEATURE_SOC_POWER_SLIDER) },
{ PCI_DEVICE_DATA(INTEL, NVL_H_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS |
PROC_THERMAL_FEATURE_MSI_SUPPORT | PROC_THERMAL_FEATURE_WT_HINT |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 17ca5c082643..89758c9934ec 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -500,7 +500,7 @@ void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz,
WRITE_ONCE(trip->hysteresis, hyst);
thermal_notify_tz_trip_change(tz, trip);
/*
- * If the zone temperature is above or at the trip tmperature, the trip
+ * If the zone temperature is above or at the trip temperature, the trip
* is in the trips_reached list and its threshold is equal to its low
* temperature. It needs to stay in that list, but its threshold needs
* to be updated and the list ordering may need to be restored.
@@ -1043,7 +1043,7 @@ static void thermal_cooling_device_init_complete(struct thermal_cooling_device *
* @np: a pointer to a device tree node.
* @type: the thermal cooling device type.
* @devdata: device private data.
- * @ops: standard thermal cooling devices callbacks.
+ * @ops: standard thermal cooling devices callbacks.
*
* This interface function adds a new thermal cooling device (fan/processor/...)
* to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
diff --git a/drivers/tty/serial/8250/8250_loongson.c b/drivers/tty/serial/8250/8250_loongson.c
index 53153a116c01..47df3c4c9d21 100644
--- a/drivers/tty/serial/8250/8250_loongson.c
+++ b/drivers/tty/serial/8250/8250_loongson.c
@@ -128,8 +128,8 @@ static int loongson_uart_probe(struct platform_device *pdev)
port->private_data = priv;
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->res);
- if (!port->membase)
- return -ENOMEM;
+ if (IS_ERR(port->membase))
+ return PTR_ERR(port->membase);
port->mapbase = priv->res->start;
port->mapsize = resource_size(priv->res);
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index 22749ab0428a..1e1ad28d83fc 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -13,7 +13,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -60,6 +60,7 @@ void serial_base_driver_unregister(struct device_driver *driver)
driver_unregister(driver);
}
+/* On failure the caller must put device @dev with put_device() */
static int serial_base_device_init(struct uart_port *port,
struct device *dev,
struct device *parent_dev,
@@ -73,7 +74,9 @@ static int serial_base_device_init(struct uart_port *port,
dev->parent = parent_dev;
dev->bus = &serial_base_bus_type;
dev->release = release;
- device_set_of_node_from_dev(dev, parent_dev);
+ dev->of_node_reused = true;
+
+ device_set_node(dev, fwnode_handle_get(dev_fwnode(parent_dev)));
if (!serial_base_initialized) {
dev_dbg(port->dev, "uart_add_one_port() called before arch_initcall()?\n");
@@ -94,7 +97,7 @@ static void serial_base_ctrl_release(struct device *dev)
{
struct serial_ctrl_device *ctrl_dev = to_serial_base_ctrl_device(dev);
- of_node_put(dev->of_node);
+ fwnode_handle_put(dev_fwnode(dev));
kfree(ctrl_dev);
}
@@ -142,7 +145,7 @@ static void serial_base_port_release(struct device *dev)
{
struct serial_port_device *port_dev = to_serial_base_port_device(dev);
- of_node_put(dev->of_node);
+ fwnode_handle_put(dev_fwnode(dev));
kfree(port_dev);
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 53edbf1d8963..fbfe5575bd3c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1914,7 +1914,7 @@ static void sci_dma_check_tx_occurred(struct sci_port *s)
struct dma_tx_state state;
enum dma_status status;
- if (!s->chan_tx)
+ if (!s->chan_tx || s->cookie_tx <= 0)
return;
status = dmaengine_tx_status(s->chan_tx, s->cookie_tx, &state);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index c793fc74c26b..c593d20a1b5b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -428,10 +428,17 @@ static void cdns_uart_handle_tx(void *dev_id)
struct tty_port *tport = &port->state->port;
unsigned int numbytes;
unsigned char ch;
+ ktime_t rts_delay;
if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) {
/* Disable the TX Empty interrupt */
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
+ /* Set RTS line after delay */
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
+ cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
+ rts_delay = ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart));
+ hrtimer_start(&cdns_uart->tx_timer, rts_delay, HRTIMER_MODE_REL);
+ }
return;
}
@@ -448,13 +455,6 @@ static void cdns_uart_handle_tx(void *dev_id)
/* Enable the TX Empty interrupt */
writel(CDNS_UART_IXR_TXEMPTY, cdns_uart->port->membase + CDNS_UART_IER);
-
- if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
- (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) {
- hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_rx_callback);
- hrtimer_start(&cdns_uart->tx_timer,
- ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
- }
}
/**
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 80c0b49f30b0..0babb7035200 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -10359,7 +10359,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
ret = ufshcd_setup_clocks(hba, false);
if (ret) {
ufshcd_enable_irq(hba);
- return ret;
+ goto out;
}
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
@@ -10371,6 +10371,9 @@ static int ufshcd_suspend(struct ufs_hba *hba)
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
ufshcd_pm_qos_update(hba, false);
+out:
+ if (ret)
+ ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
return ret;
}
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index d93ed4e86a17..fa0d4e6aee16 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -3,7 +3,7 @@
* drivers/uio/uio.c
*
* Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
- * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
* Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de>
* Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
*
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index a4954a21be93..c116143335d9 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -70,11 +70,11 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
simple->num_clocks = ret;
ret = clk_bulk_prepare_enable(simple->num_clocks, simple->clks);
if (ret)
- goto err_resetc_assert;
+ goto err_clk_put_all;
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret)
- goto err_clk_put;
+ goto err_clk_disable;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -82,8 +82,9 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
return 0;
-err_clk_put:
+err_clk_disable:
clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
+err_clk_put_all:
clk_bulk_put_all(simple->num_clocks, simple->clks);
err_resetc_assert:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index bc3fe31638b9..8a35a6901db7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4826,7 +4826,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
if (!dwc->gadget)
return;
- dwc3_enable_susphy(dwc, false);
+ dwc3_enable_susphy(dwc, true);
usb_del_gadget(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
usb_put_gadget(dwc->gadget);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index cf6512ed17a6..96b588bd08cd 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -227,7 +227,7 @@ void dwc3_host_exit(struct dwc3 *dwc)
if (dwc->sys_wakeup)
device_init_wakeup(&dwc->xhci->dev, false);
- dwc3_enable_susphy(dwc, false);
+ dwc3_enable_susphy(dwc, true);
platform_device_unregister(dwc->xhci);
dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 1a7d3c4f652f..83c7e243dcf9 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3020,7 +3020,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (retval)
- return retval;
+ goto err_put_client;
udc->board = &lpc32xx_usbddata;
@@ -3038,28 +3038,32 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
/* Get IRQs */
for (i = 0; i < 4; i++) {
udc->udp_irq[i] = platform_get_irq(pdev, i);
- if (udc->udp_irq[i] < 0)
- return udc->udp_irq[i];
+ if (udc->udp_irq[i] < 0) {
+ retval = udc->udp_irq[i];
+ goto err_put_client;
+ }
}
udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr)) {
dev_err(udc->dev, "IO map failure\n");
- return PTR_ERR(udc->udp_baseaddr);
+ retval = PTR_ERR(udc->udp_baseaddr);
+ goto err_put_client;
}
/* Get USB device clock */
udc->usb_slv_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(udc->usb_slv_clk)) {
dev_err(udc->dev, "failed to acquire USB device clock\n");
- return PTR_ERR(udc->usb_slv_clk);
+ retval = PTR_ERR(udc->usb_slv_clk);
+ goto err_put_client;
}
/* Enable USB device clock */
retval = clk_prepare_enable(udc->usb_slv_clk);
if (retval < 0) {
dev_err(udc->dev, "failed to start USB device clock\n");
- return retval;
+ goto err_put_client;
}
/* Setup deferred workqueue data */
@@ -3080,7 +3084,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
if (!udc->udca_v_base) {
dev_err(udc->dev, "error getting UDCA region\n");
retval = -ENOMEM;
- goto i2c_fail;
+ goto err_disable_clk;
}
udc->udca_p_base = dma_handle;
dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
@@ -3093,7 +3097,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
if (!udc->dd_cache) {
dev_err(udc->dev, "error getting DD DMA region\n");
retval = -ENOMEM;
- goto dma_alloc_fail;
+ goto err_free_dma;
}
/* Clear USB peripheral and initialize gadget endpoints */
@@ -3107,14 +3111,14 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
if (retval < 0) {
dev_err(udc->dev, "LP request irq %d failed\n",
udc->udp_irq[IRQ_USB_LP]);
- goto irq_req_fail;
+ goto err_destroy_pool;
}
retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_HP],
lpc32xx_usb_hp_irq, 0, "udc_hp", udc);
if (retval < 0) {
dev_err(udc->dev, "HP request irq %d failed\n",
udc->udp_irq[IRQ_USB_HP]);
- goto irq_req_fail;
+ goto err_destroy_pool;
}
retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_DEVDMA],
@@ -3122,7 +3126,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
if (retval < 0) {
dev_err(udc->dev, "DEV request irq %d failed\n",
udc->udp_irq[IRQ_USB_DEVDMA]);
- goto irq_req_fail;
+ goto err_destroy_pool;
}
/* The transceiver interrupt is used for VBUS detection and will
@@ -3133,7 +3137,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
if (retval < 0) {
dev_err(udc->dev, "VBUS request irq %d failed\n",
udc->udp_irq[IRQ_USB_ATX]);
- goto irq_req_fail;
+ goto err_destroy_pool;
}
/* Initialize wait queue */
@@ -3142,7 +3146,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
retval = usb_add_gadget_udc(dev, &udc->gadget);
if (retval < 0)
- goto add_gadget_fail;
+ goto err_destroy_pool;
dev_set_drvdata(dev, udc);
device_init_wakeup(dev, 1);
@@ -3154,14 +3158,16 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
return 0;
-add_gadget_fail:
-irq_req_fail:
+err_destroy_pool:
dma_pool_destroy(udc->dd_cache);
-dma_alloc_fail:
+err_free_dma:
dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
udc->udca_v_base, udc->udca_p_base);
-i2c_fail:
+err_disable_clk:
clk_disable_unprepare(udc->usb_slv_clk);
+err_put_client:
+ put_device(&udc->isp1301_i2c_client->dev);
+
dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
return retval;
@@ -3190,6 +3196,8 @@ static void lpc32xx_udc_remove(struct platform_device *pdev)
udc->udca_v_base, udc->udca_p_base);
clk_disable_unprepare(udc->usb_slv_clk);
+
+ put_device(&udc->isp1301_i2c_client->dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 24d5a1dc5056..7663f2aa35e9 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -169,13 +169,13 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
- goto fail_disable;
+ goto err_put_client;
dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
if (usb_disabled()) {
dev_err(&pdev->dev, "USB is disabled\n");
ret = -ENODEV;
- goto fail_disable;
+ goto err_put_client;
}
/* Enable USB host clock */
@@ -183,7 +183,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
if (IS_ERR(usb_host_clk)) {
dev_err(&pdev->dev, "failed to acquire and start USB OHCI clock\n");
ret = PTR_ERR(usb_host_clk);
- goto fail_disable;
+ goto err_put_client;
}
isp1301_configure();
@@ -192,13 +192,13 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
if (!hcd) {
dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
ret = -ENOMEM;
- goto fail_disable;
+ goto err_put_client;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
- goto fail_resource;
+ goto err_put_hcd;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
@@ -206,7 +206,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
- goto fail_resource;
+ goto err_put_hcd;
}
ohci_nxp_start_hc();
@@ -220,9 +220,10 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
ohci_nxp_stop_hc();
-fail_resource:
+err_put_hcd:
usb_put_hcd(hcd);
-fail_disable:
+err_put_client:
+ put_device(&isp1301_i2c_client->dev);
isp1301_i2c_client = NULL;
return ret;
}
@@ -234,6 +235,7 @@ static void ohci_hcd_nxp_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ohci_nxp_stop_hc();
usb_put_hcd(hcd);
+ put_device(&isp1301_i2c_client->dev);
isp1301_i2c_client = NULL;
}
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index 57cdda4e09c8..90282e51e23e 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -554,7 +554,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
* Hang up the TTY. This wakes up any blocked
* writers and causes subsequent writes to fail.
*/
- tty_vhangup(port->port.tty);
+ tty_port_tty_vhangup(&port->port);
tty_unregister_device(dbc_tty_driver, port->minor);
xhci_dbc_tty_exit_port(port);
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 40ac68e52cee..e266a47c4d48 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -988,6 +988,7 @@ static void fsl_otg_remove(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ disable_delayed_work_sync(&fsl_otg_dev->otg_event);
usb_remove_phy(&fsl_otg_dev->phy);
free_irq(fsl_otg_dev->irq, fsl_otg_dev);
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index f9b5c411aee4..2940f0c84e1b 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -149,7 +149,12 @@ struct i2c_client *isp1301_get_client(struct device_node *node)
return client;
/* non-DT: only one ISP1301 chip supported */
- return isp1301_i2c_client;
+ if (isp1301_i2c_client) {
+ get_device(&isp1301_i2c_client->dev);
+ return isp1301_i2c_client;
+ }
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(isp1301_get_client);
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 75fff2e4cbc6..56fc3ff5016f 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -713,11 +713,13 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
/* make sure pipe is not busy */
ret = usbhsp_pipe_barrier(pipe);
if (ret < 0) {
+ usbhsp_put_pipe(pipe);
dev_err(dev, "pipe setup failed %d\n", usbhs_pipe_number(pipe));
return NULL;
}
if (usbhsp_setup_pipecfg(pipe, is_host, dir_in, &pipecfg)) {
+ usbhsp_put_pipe(pipe);
dev_err(dev, "can't setup pipe\n");
return NULL;
}
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index b695f5ba9a40..939a98c2d3f7 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -98,7 +98,7 @@ UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160,
US_FL_NO_ATA_1X),
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
-UNUSUAL_DEV(0x13fd, 0x3940, 0x0309, 0x0309,
+UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x0309,
"Initio Corporation",
"INIC-3069",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 8d111ad3b71b..d96ab106a980 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -766,12 +766,16 @@ int dp_altmode_probe(struct typec_altmode *alt)
if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
!(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
- DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
+ DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo))) {
+ typec_altmode_put_plug(plug);
return -ENODEV;
+ }
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
- if (!dp)
+ if (!dp) {
+ typec_altmode_put_plug(plug);
return -ENOMEM;
+ }
INIT_WORK(&dp->work, dp_altmode_work);
mutex_init(&dp->lock);
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index 7fcb1e1de5d6..b812be4d0e67 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -96,6 +96,7 @@ config UCSI_LENOVO_YOGA_C630
config UCSI_HUAWEI_GAOKUN
tristate "UCSI Interface Driver for Huawei Matebook E Go"
depends on EC_HUAWEI_GAOKUN
+ depends on DRM || !DRM
select DRM_AUX_HPD_BRIDGE if DRM_BRIDGE && OF
help
This driver enables UCSI support on the Huawei Matebook E Go tablet,
diff --git a/drivers/usb/typec/ucsi/cros_ec_ucsi.c b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
index d753f2188e25..eed2a7d0ebc6 100644
--- a/drivers/usb/typec/ucsi/cros_ec_ucsi.c
+++ b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -105,12 +105,13 @@ static int cros_ucsi_async_control(struct ucsi *ucsi, u64 cmd)
return 0;
}
-static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd, u32 *cci)
+static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd, u32 *cci,
+ void *data, size_t size)
{
struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, cmd, cci);
+ ret = ucsi_sync_control_common(ucsi, cmd, cci, data, size);
switch (ret) {
case -EBUSY:
/* EC may return -EBUSY if CCI.busy is set.
diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c
index 174f4d53b777..f3684ab787fe 100644
--- a/drivers/usb/typec/ucsi/debugfs.c
+++ b/drivers/usb/typec/ucsi/debugfs.c
@@ -37,9 +37,7 @@ static int ucsi_cmd(void *data, u64 val)
case UCSI_SET_USB:
case UCSI_SET_POWER_LEVEL:
case UCSI_READ_POWER_LEVEL:
- case UCSI_SET_PDOS:
- ucsi->message_in_size = 0;
- ret = ucsi_send_command(ucsi, val);
+ ret = ucsi_send_command(ucsi, val, NULL, 0);
break;
case UCSI_GET_CAPABILITY:
case UCSI_GET_CONNECTOR_CAPABILITY:
@@ -54,9 +52,9 @@ static int ucsi_cmd(void *data, u64 val)
case UCSI_GET_ATTENTION_VDO:
case UCSI_GET_CAM_CS:
case UCSI_GET_LPM_PPM_INFO:
- ucsi->message_in_size = sizeof(ucsi->debugfs->response);
- ret = ucsi_send_command(ucsi, val);
- memcpy(&ucsi->debugfs->response, ucsi->message_in, sizeof(ucsi->debugfs->response));
+ ret = ucsi_send_command(ucsi, val,
+ &ucsi->debugfs->response,
+ sizeof(ucsi->debugfs->response));
break;
default:
ret = -EOPNOTSUPP;
@@ -111,30 +109,6 @@ static int ucsi_vbus_volt_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(ucsi_vbus_volt);
-static ssize_t ucsi_message_out_write(struct file *file,
- const char __user *data, size_t count, loff_t *ppos)
-{
- struct ucsi *ucsi = file->private_data;
- int ret;
-
- char *buf __free(kfree) = memdup_user_nul(data, count);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ucsi->message_out_size = min(count / 2, UCSI_MAX_MESSAGE_OUT_LENGTH);
- ret = hex2bin(ucsi->message_out, buf, ucsi->message_out_size);
- if (ret)
- return ret;
-
- return count;
-}
-
-static const struct file_operations ucsi_message_out_fops = {
- .open = simple_open,
- .write = ucsi_message_out_write,
- .llseek = generic_file_llseek,
-};
-
void ucsi_debugfs_register(struct ucsi *ucsi)
{
ucsi->debugfs = kzalloc(sizeof(*ucsi->debugfs), GFP_KERNEL);
@@ -147,8 +121,6 @@ void ucsi_debugfs_register(struct ucsi *ucsi)
debugfs_create_file("peak_current", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_peak_curr_fops);
debugfs_create_file("avg_current", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_avg_curr_fops);
debugfs_create_file("vbus_voltage", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_vbus_volt_fops);
- debugfs_create_file("message_out", 0200, ucsi->debugfs->dentry, ucsi,
- &ucsi_message_out_fops);
}
void ucsi_debugfs_unregister(struct ucsi *ucsi)
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index a09b4900ec76..8aae80b457d7 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -67,14 +67,11 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
}
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(dp->con->num);
- ucsi->message_in_size = sizeof(cur);
- ret = ucsi_send_command(ucsi, command);
+ ret = ucsi_send_command(ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
if (ucsi->version > 0x0100)
goto err_unlock;
cur = 0xff;
- } else {
- memcpy(&cur, ucsi->message_in, ucsi->message_in_size);
}
if (cur != 0xff) {
@@ -129,8 +126,7 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
}
command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
- dp->con->ucsi->message_in_size = 0;
- ret = ucsi_send_command(dp->con->ucsi, command);
+ ret = ucsi_send_command(dp->con->ucsi, command, NULL, 0);
if (ret < 0)
goto out_unlock;
@@ -197,8 +193,7 @@ static int ucsi_displayport_configure(struct ucsi_dp *dp)
command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
- dp->con->ucsi->message_in_size = 0;
- return ucsi_send_command(dp->con->ucsi, command);
+ return ucsi_send_command(dp->con->ucsi, command, NULL, 0);
}
static int ucsi_displayport_vdm(struct typec_altmode *alt,
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 9b3df776137a..a7b388dc7fa0 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -55,7 +55,8 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
}
EXPORT_SYMBOL_GPL(ucsi_notify_common);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci)
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
bool ack = UCSI_COMMAND(command) == UCSI_ACK_CC_CI;
int ret;
@@ -67,20 +68,6 @@ int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci)
reinit_completion(&ucsi->complete);
- if (ucsi->message_out_size > 0) {
- if (!ucsi->ops->write_message_out) {
- ucsi->message_out_size = 0;
- ret = -EOPNOTSUPP;
- goto out_clear_bit;
- }
-
- ret = ucsi->ops->write_message_out(ucsi, ucsi->message_out,
- ucsi->message_out_size);
- ucsi->message_out_size = 0;
- if (ret)
- goto out_clear_bit;
- }
-
ret = ucsi->ops->async_control(ucsi, command);
if (ret)
goto out_clear_bit;
@@ -97,10 +84,9 @@ out_clear_bit:
if (!ret && cci)
ret = ucsi->ops->read_cci(ucsi, cci);
- if (!ret && ucsi->message_in_size > 0 &&
+ if (!ret && data &&
(*cci & UCSI_CCI_COMMAND_COMPLETE))
- ret = ucsi->ops->read_message_in(ucsi, ucsi->message_in,
- ucsi->message_in_size);
+ ret = ucsi->ops->read_message_in(ucsi, data, size);
return ret;
}
@@ -117,25 +103,23 @@ static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
}
- ucsi->message_in_size = 0;
- return ucsi->ops->sync_control(ucsi, ctrl, NULL);
+ return ucsi->ops->sync_control(ucsi, ctrl, NULL, NULL, 0);
}
-static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci, bool conn_ack)
+static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size, bool conn_ack)
{
int ret, err;
*cci = 0;
- if (ucsi->message_in_size > UCSI_MAX_DATA_LENGTH(ucsi))
+ if (size > UCSI_MAX_DATA_LENGTH(ucsi))
return -EINVAL;
- ret = ucsi->ops->sync_control(ucsi, command, cci);
+ ret = ucsi->ops->sync_control(ucsi, command, cci, data, size);
- if (*cci & UCSI_CCI_BUSY) {
- ucsi->message_in_size = 0;
- return ucsi_run_command(ucsi, UCSI_CANCEL, cci, false) ?: -EBUSY;
- }
+ if (*cci & UCSI_CCI_BUSY)
+ return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY;
if (ret)
return ret;
@@ -167,13 +151,10 @@ static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num)
int ret;
command = UCSI_GET_ERROR_STATUS | UCSI_CONNECTOR_NUMBER(connector_num);
- ucsi->message_in_size = sizeof(error);
- ret = ucsi_run_command(ucsi, command, &cci, false);
+ ret = ucsi_run_command(ucsi, command, &cci, &error, sizeof(error), false);
if (ret < 0)
return ret;
- memcpy(&error, ucsi->message_in, sizeof(error));
-
switch (error) {
case UCSI_ERROR_INCOMPATIBLE_PARTNER:
return -EOPNOTSUPP;
@@ -219,7 +200,8 @@ static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num)
return -EIO;
}
-static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd, bool conn_ack)
+static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd,
+ void *data, size_t size, bool conn_ack)
{
u8 connector_num;
u32 cci;
@@ -247,7 +229,7 @@ static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd, bool conn_ack)
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi_run_command(ucsi, cmd, &cci, conn_ack);
+ ret = ucsi_run_command(ucsi, cmd, &cci, data, size, conn_ack);
if (cci & UCSI_CCI_ERROR)
ret = ucsi_read_error(ucsi, connector_num);
@@ -256,9 +238,10 @@ static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd, bool conn_ack)
return ret;
}
-int ucsi_send_command(struct ucsi *ucsi, u64 command)
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ void *data, size_t size)
{
- return ucsi_send_command_common(ucsi, command, false);
+ return ucsi_send_command_common(ucsi, command, data, size, false);
}
EXPORT_SYMBOL_GPL(ucsi_send_command);
@@ -336,8 +319,7 @@ void ucsi_altmode_update_active(struct ucsi_connector *con)
int i;
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
- con->ucsi->message_in_size = sizeof(cur);
- ret = ucsi_send_command(con->ucsi, command);
+ ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
@@ -345,8 +327,6 @@ void ucsi_altmode_update_active(struct ucsi_connector *con)
return;
}
cur = 0xff;
- } else {
- memcpy(&cur, con->ucsi->message_in, sizeof(cur));
}
if (cur < UCSI_MAX_ALTMODES)
@@ -530,8 +510,7 @@ ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
- ucsi->message_in_size = sizeof(alt);
- len = ucsi_send_command(con->ucsi, command);
+ len = ucsi_send_command(con->ucsi, command, &alt, sizeof(alt));
/*
* We are collecting all altmodes first and then registering.
* Some type-C device will return zero length data beyond last
@@ -540,8 +519,6 @@ ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
if (len < 0)
return len;
- memcpy(&alt, ucsi->message_in, sizeof(alt));
-
/* We got all altmodes, now break out and register them */
if (!len || !alt.svid)
break;
@@ -609,15 +586,12 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
- con->ucsi->message_in_size = sizeof(alt);
- len = ucsi_send_command(con->ucsi, command);
+ len = ucsi_send_command(con->ucsi, command, alt, sizeof(alt));
if (len == -EBUSY)
continue;
if (len <= 0)
return len;
- memcpy(&alt, con->ucsi->message_in, sizeof(alt));
-
/*
* This code is requesting one alt mode at a time, but some PPMs
* may still return two. If that happens both alt modes need be
@@ -685,9 +659,7 @@ static int ucsi_get_connector_status(struct ucsi_connector *con, bool conn_ack)
UCSI_MAX_DATA_LENGTH(con->ucsi));
int ret;
- con->ucsi->message_in_size = size;
- ret = ucsi_send_command_common(con->ucsi, command, conn_ack);
- memcpy(&con->status, con->ucsi->message_in, size);
+ ret = ucsi_send_command_common(con->ucsi, command, &con->status, size, conn_ack);
return ret < 0 ? ret : 0;
}
@@ -710,9 +682,8 @@ static int ucsi_read_pdos(struct ucsi_connector *con,
command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
command |= is_source(role) ? UCSI_GET_PDOS_SRC_PDOS : 0;
- ucsi->message_in_size = num_pdos * sizeof(u32);
- ret = ucsi_send_command(ucsi, command);
- memcpy(pdos + offset, ucsi->message_in, num_pdos * sizeof(u32));
+ ret = ucsi_send_command(ucsi, command, pdos + offset,
+ num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
@@ -799,9 +770,7 @@ static int ucsi_get_pd_message(struct ucsi_connector *con, u8 recipient,
command |= UCSI_GET_PD_MESSAGE_BYTES(len);
command |= UCSI_GET_PD_MESSAGE_TYPE(type);
- con->ucsi->message_in_size = len;
- ret = ucsi_send_command(con->ucsi, command);
- memcpy(data + offset, con->ucsi->message_in, len);
+ ret = ucsi_send_command(con->ucsi, command, data + offset, len);
if (ret < 0)
return ret;
}
@@ -966,9 +935,7 @@ static int ucsi_register_cable(struct ucsi_connector *con)
int ret;
command = UCSI_GET_CABLE_PROPERTY | UCSI_CONNECTOR_NUMBER(con->num);
- con->ucsi->message_in_size = sizeof(cable_prop);
- ret = ucsi_send_command(con->ucsi, command);
- memcpy(&cable_prop, con->ucsi->message_in, sizeof(cable_prop));
+ ret = ucsi_send_command(con->ucsi, command, &cable_prop, sizeof(cable_prop));
if (ret < 0) {
dev_err(con->ucsi->dev, "GET_CABLE_PROPERTY failed (%d)\n", ret);
return ret;
@@ -1029,9 +996,7 @@ static int ucsi_check_connector_capability(struct ucsi_connector *con)
return 0;
command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num);
- con->ucsi->message_in_size = sizeof(con->cap);
- ret = ucsi_send_command(con->ucsi, command);
- memcpy(&con->cap, con->ucsi->message_in, sizeof(con->cap));
+ ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0) {
dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret);
return ret;
@@ -1415,8 +1380,7 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
else if (con->ucsi->version >= UCSI_VERSION_2_0)
command |= hard ? 0 : UCSI_CONNECTOR_RESET_DATA_VER_2_0;
- con->ucsi->message_in_size = 0;
- return ucsi_send_command(con->ucsi, command);
+ return ucsi_send_command(con->ucsi, command, NULL, 0);
}
static int ucsi_reset_ppm(struct ucsi *ucsi)
@@ -1497,8 +1461,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
{
int ret;
- con->ucsi->message_in_size = 0;
- ret = ucsi_send_command(con->ucsi, command);
+ ret = ucsi_send_command(con->ucsi, command, NULL, 0);
if (ret == -ETIMEDOUT) {
u64 c;
@@ -1506,8 +1469,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
ucsi_reset_ppm(con->ucsi);
c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
- con->ucsi->message_in_size = 0;
- ucsi_send_command(con->ucsi, c);
+ ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
}
@@ -1660,13 +1622,10 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
/* Get connector capability */
command = UCSI_GET_CONNECTOR_CAPABILITY;
command |= UCSI_CONNECTOR_NUMBER(con->num);
- ucsi->message_in_size = sizeof(con->cap);
- ret = ucsi_send_command(ucsi, command);
+ ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
goto out_unlock;
- memcpy(&con->cap, ucsi->message_in, sizeof(con->cap));
-
if (UCSI_CONCAP(con, OPMODE_DRP))
cap->data = TYPEC_PORT_DRD;
else if (UCSI_CONCAP(con, OPMODE_DFP))
@@ -1863,20 +1822,17 @@ static int ucsi_init(struct ucsi *ucsi)
/* Enable basic notifications */
ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
- ucsi->message_in_size = 0;
- ret = ucsi_send_command(ucsi, command);
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
command = UCSI_GET_CAPABILITY;
- ucsi->message_in_size = BITS_TO_BYTES(UCSI_GET_CAPABILITY_SIZE);
- ret = ucsi_send_command(ucsi, command);
+ ret = ucsi_send_command(ucsi, command, &ucsi->cap,
+ BITS_TO_BYTES(UCSI_GET_CAPABILITY_SIZE));
if (ret < 0)
goto err_reset;
- memcpy(&ucsi->cap, ucsi->message_in, BITS_TO_BYTES(UCSI_GET_CAPABILITY_SIZE));
-
if (!ucsi->cap.num_connectors) {
ret = -ENODEV;
goto err_reset;
@@ -1906,8 +1862,7 @@ static int ucsi_init(struct ucsi *ucsi)
/* Enable all supported notifications */
ntfy = ucsi_get_supported_notifications(ucsi);
command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
- ucsi->message_in_size = 0;
- ret = ucsi_send_command(ucsi, command);
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
@@ -1958,8 +1913,7 @@ static void ucsi_resume_work(struct work_struct *work)
/* Restore UCSI notification enable mask after system resume */
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
- ucsi->message_in_size = 0;
- ret = ucsi_send_command(ucsi, command);
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0) {
dev_err(ucsi->dev, "failed to re-enable notifications (%d)\n", ret);
return;
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index f946b728c373..410389ef173a 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -29,10 +29,6 @@ struct dentry;
#define UCSI_MESSAGE_OUT 32
#define UCSIv2_MESSAGE_OUT 272
-/* Define maximum lengths for message buffers */
-#define UCSI_MAX_MESSAGE_IN_LENGTH 256
-#define UCSI_MAX_MESSAGE_OUT_LENGTH 256
-
/* UCSI versions */
#define UCSI_VERSION_1_0 0x0100
#define UCSI_VERSION_1_1 0x0110
@@ -69,7 +65,6 @@ struct dentry;
* @read_cci: Read CCI register
* @poll_cci: Read CCI register while polling with notifications disabled
* @read_message_in: Read message data from UCSI
- * @write_message_out: Write message data to UCSI
* @sync_control: Blocking control operation
* @async_control: Non-blocking control operation
* @update_altmodes: Squashes duplicate DP altmodes
@@ -85,8 +80,8 @@ struct ucsi_operations {
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
- int (*write_message_out)(struct ucsi *ucsi, void *data, size_t data_len);
- int (*sync_control)(struct ucsi *ucsi, u64 command, u32 *cci);
+ int (*sync_control)(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
int (*async_control)(struct ucsi *ucsi, u64 command);
bool (*update_altmodes)(struct ucsi *ucsi, u8 recipient,
struct ucsi_altmode *orig,
@@ -137,7 +132,6 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_PD_MESSAGE 0x15
#define UCSI_GET_CAM_CS 0x18
#define UCSI_SET_SINK_PATH 0x1c
-#define UCSI_SET_PDOS 0x1d
#define UCSI_READ_POWER_LEVEL 0x1e
#define UCSI_SET_USB 0x21
#define UCSI_GET_LPM_PPM_INFO 0x22
@@ -499,12 +493,6 @@ struct ucsi {
unsigned long quirks;
#define UCSI_NO_PARTNER_PDOS BIT(0) /* Don't read partner's PDOs */
#define UCSI_DELAY_DEVICE_PDOS BIT(1) /* Reading PDOs fails until the parter is in PD mode */
-
- /* Fixed-size buffers for incoming and outgoing messages */
- u8 message_in[UCSI_MAX_MESSAGE_IN_LENGTH];
- size_t message_in_size;
- u8 message_out[UCSI_MAX_MESSAGE_OUT_LENGTH];
- size_t message_out_size;
};
#define UCSI_MAX_DATA_LENGTH(u) (((u)->version < UCSI_VERSION_2_0) ? 0x10 : 0xff)
@@ -567,13 +555,15 @@ struct ucsi_connector {
struct usb_pd_identity cable_identity;
};
-int ucsi_send_command(struct ucsi *ucsi, u64 command);
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ void *retval, size_t size);
void ucsi_altmode_update_active(struct ucsi_connector *con);
int ucsi_resume(struct ucsi *ucsi);
void ucsi_notify_common(struct ucsi *ucsi, u32 cci);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci);
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
int ucsi_register_port_psy(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index f9beeb835238..6b92f296e985 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -86,21 +86,6 @@ static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_le
return 0;
}
-static int ucsi_acpi_write_message_out(struct ucsi *ucsi, void *data, size_t data_len)
-{
- struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
-
- if (!data || !data_len)
- return -EINVAL;
-
- if (ucsi->version <= UCSI_VERSION_1_2)
- memcpy(ua->base + UCSI_MESSAGE_OUT, data, data_len);
- else
- memcpy(ua->base + UCSIv2_MESSAGE_OUT, data, data_len);
-
- return 0;
-}
-
static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
@@ -116,19 +101,19 @@ static const struct ucsi_operations ucsi_acpi_ops = {
.read_cci = ucsi_acpi_read_cci,
.poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_acpi_read_message_in,
- .write_message_out = ucsi_acpi_write_message_out,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_acpi_async_control
};
-static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci)
+static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *val, size_t len)
{
u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
UCSI_CONSTAT_PDOS_CHANGE;
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, command, cci);
+ ret = ucsi_sync_control_common(ucsi, command, cci, val, len);
if (ret < 0)
return ret;
@@ -140,8 +125,8 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci)
if (UCSI_COMMAND(ua->cmd) == UCSI_GET_CONNECTOR_STATUS &&
ua->check_bogus_event) {
/* Clear the bogus change */
- if (*(u16 *)ucsi->message_in == bogus_change)
- *(u16 *)ucsi->message_in = 0;
+ if (*(u16 *)val == bogus_change)
+ *(u16 *)val = 0;
ua->check_bogus_event = false;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index ead1b2a25c79..d83a0051c737 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -606,7 +606,8 @@ static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
}
-static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci)
+static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
struct ucsi_connector *con;
@@ -628,16 +629,16 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci)
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
}
- ret = ucsi_sync_control_common(ucsi, command, cci);
+ ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
switch (UCSI_COMMAND(command)) {
case UCSI_GET_CURRENT_CAM:
if (uc->has_multiple_dp)
- ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)ucsi->message_in);
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
break;
case UCSI_GET_ALTERNATE_MODES:
if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
- struct ucsi_altmode *alt = (struct ucsi_altmode *)ucsi->message_in;
+ struct ucsi_altmode *alt = data;
if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
ucsi_ccg_nvidia_altmode(uc, alt, command);
@@ -645,7 +646,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci)
break;
case UCSI_GET_CAPABILITY:
if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
- struct ucsi_capability *cap = (struct ucsi_capability *)ucsi->message_in;
+ struct ucsi_capability *cap = data;
cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
index 299081444caa..0187c1c4b21a 100644
--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
@@ -88,7 +88,8 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
static int yoga_c630_ucsi_sync_control(struct ucsi *ucsi,
u64 command,
- u32 *cci)
+ u32 *cci,
+ void *data, size_t size)
{
int ret;
@@ -106,8 +107,8 @@ static int yoga_c630_ucsi_sync_control(struct ucsi *ucsi,
};
dev_dbg(ucsi->dev, "faking DP altmode for con1\n");
- memset(ucsi->message_in, 0, ucsi->message_in_size);
- memcpy(ucsi->message_in, &alt, min(sizeof(alt), ucsi->message_in_size));
+ memset(data, 0, size);
+ memcpy(data, &alt, min(sizeof(alt), size));
*cci = UCSI_CCI_COMMAND_COMPLETE | UCSI_SET_CCI_LENGTH(sizeof(alt));
return 0;
}
@@ -120,18 +121,18 @@ static int yoga_c630_ucsi_sync_control(struct ucsi *ucsi,
if (UCSI_COMMAND(command) == UCSI_GET_ALTERNATE_MODES &&
UCSI_GET_ALTMODE_GET_CONNECTOR_NUMBER(command) == 2) {
dev_dbg(ucsi->dev, "ignoring altmodes for con2\n");
- memset(ucsi->message_in, 0, ucsi->message_in_size);
+ memset(data, 0, size);
*cci = UCSI_CCI_COMMAND_COMPLETE;
return 0;
}
- ret = ucsi_sync_control_common(ucsi, command, cci);
+ ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
if (ret < 0)
return ret;
/* UCSI_GET_CURRENT_CAM is off-by-one on all ports */
- if (UCSI_COMMAND(command) == UCSI_GET_CURRENT_CAM && ucsi->message_in_size > 0)
- ucsi->message_in[0]--;
+ if (UCSI_COMMAND(command) == UCSI_GET_CURRENT_CAM && data)
+ ((u8 *)data)[0]--;
return ret;
}
diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
index 84d142a47ec6..b45a24d00387 100644
--- a/drivers/vfio/pci/nvgrace-gpu/main.c
+++ b/drivers/vfio/pci/nvgrace-gpu/main.c
@@ -561,7 +561,7 @@ nvgrace_gpu_map_and_read(struct nvgrace_gpu_pci_core_device *nvdev,
ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
nvdev->resmem.ioaddr,
buf, offset, mem_count,
- 0, 0, false);
+ 0, 0, false, VFIO_PCI_IO_WIDTH_8);
}
return ret;
@@ -693,7 +693,7 @@ nvgrace_gpu_map_and_write(struct nvgrace_gpu_pci_core_device *nvdev,
ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
nvdev->resmem.ioaddr,
(char __user *)buf, pos, mem_count,
- 0, 0, true);
+ 0, 0, true, VFIO_PCI_IO_WIDTH_8);
}
return ret;
diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c
index 481992142f79..4915a7c1c491 100644
--- a/drivers/vfio/pci/pds/dirty.c
+++ b/drivers/vfio/pci/pds/dirty.c
@@ -292,8 +292,11 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
len = num_ranges * sizeof(*region_info);
node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
- if (!node)
- return -EINVAL;
+ if (!node) {
+ err = -EINVAL;
+ goto out_free_region_info;
+ }
+
for (int i = 0; i < num_ranges; i++) {
struct pds_lm_dirty_region_info *ri = &region_info[i];
u64 region_size = node->last - node->start + 1;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 6192788c8ba3..b38627b35c35 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -135,7 +135,8 @@ VFIO_IORDWR(64)
ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
void __iomem *io, char __user *buf,
loff_t off, size_t count, size_t x_start,
- size_t x_end, bool iswrite)
+ size_t x_end, bool iswrite,
+ enum vfio_pci_io_width max_width)
{
ssize_t done = 0;
int ret;
@@ -150,20 +151,19 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
else
fillable = 0;
- if (fillable >= 8 && !(off % 8)) {
+ if (fillable >= 8 && !(off % 8) && max_width >= 8) {
ret = vfio_pci_iordwr64(vdev, iswrite, test_mem,
io, buf, off, &filled);
if (ret)
return ret;
- } else
- if (fillable >= 4 && !(off % 4)) {
+ } else if (fillable >= 4 && !(off % 4) && max_width >= 4) {
ret = vfio_pci_iordwr32(vdev, iswrite, test_mem,
io, buf, off, &filled);
if (ret)
return ret;
- } else if (fillable >= 2 && !(off % 2)) {
+ } else if (fillable >= 2 && !(off % 2) && max_width >= 2) {
ret = vfio_pci_iordwr16(vdev, iswrite, test_mem,
io, buf, off, &filled);
if (ret)
@@ -234,6 +234,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
void __iomem *io;
struct resource *res = &vdev->pdev->resource[bar];
ssize_t done;
+ enum vfio_pci_io_width max_width = VFIO_PCI_IO_WIDTH_8;
if (pci_resource_start(pdev, bar))
end = pci_resource_len(pdev, bar);
@@ -262,6 +263,16 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
if (!io)
return -ENOMEM;
x_end = end;
+
+ /*
+ * Certain devices (e.g. Intel X710) don't support qword
+ * access to the ROM bar. Otherwise PCI AER errors might be
+ * triggered.
+ *
+ * Disable qword access to the ROM bar universally, which
+ * worked reliably for years before qword access is enabled.
+ */
+ max_width = VFIO_PCI_IO_WIDTH_4;
} else {
int ret = vfio_pci_core_setup_barmap(vdev, bar);
if (ret) {
@@ -278,7 +289,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
}
done = vfio_pci_core_do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
- count, x_start, x_end, iswrite);
+ count, x_start, x_end, iswrite, max_width);
if (done >= 0)
*ppos += done;
@@ -352,7 +363,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
* to the memory enable bit in the command register.
*/
done = vfio_pci_core_do_io_rw(vdev, false, iomem, buf, off, count,
- 0, 0, iswrite);
+ 0, 0, iswrite, VFIO_PCI_IO_WIDTH_4);
vga_put(vdev->pdev, rsrc);
diff --git a/drivers/vfio/pci/xe/main.c b/drivers/vfio/pci/xe/main.c
index 0156b53c678b..2a5eb9260ec7 100644
--- a/drivers/vfio/pci/xe/main.c
+++ b/drivers/vfio/pci/xe/main.c
@@ -250,6 +250,7 @@ xe_vfio_pci_alloc_file(struct xe_vfio_pci_core_device *xe_vdev,
struct xe_vfio_pci_migration_file *migf;
const struct file_operations *fops;
int flags;
+ int ret;
migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
if (!migf)
@@ -259,8 +260,9 @@ xe_vfio_pci_alloc_file(struct xe_vfio_pci_core_device *xe_vdev,
flags = type == XE_VFIO_FILE_SAVE ? O_RDONLY : O_WRONLY;
migf->filp = anon_inode_getfile("xe_vfio_mig", fops, migf, flags);
if (IS_ERR(migf->filp)) {
+ ret = PTR_ERR(migf->filp);
kfree(migf);
- return ERR_CAST(migf->filp);
+ return ERR_PTR(ret);
}
mutex_init(&migf->lock);
@@ -504,6 +506,7 @@ static const struct vfio_device_ops xe_vfio_pci_ops = {
.open_device = xe_vfio_pci_open_device,
.close_device = xe_vfio_pci_close_device,
.ioctl = vfio_pci_core_ioctl,
+ .get_region_info_caps = vfio_pci_ioctl_get_region_info,
.device_feature = vfio_pci_core_ioctl_feature,
.read = vfio_pci_core_read,
.write = vfio_pci_core_write,
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 0298ddc34824..552cfb53498a 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -66,14 +66,15 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID;
}
-/* Callers that dereference the return value must hold vhost_vsock_mutex or the
- * RCU read lock.
+/* Callers must be in an RCU read section or hold the vhost_vsock_mutex.
+ * The return value can only be dereferenced while within the section.
*/
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
- hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
+ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid,
+ lockdep_is_held(&vhost_vsock_mutex)) {
u32 other_cid = vsock->guest_cid;
/* Skip instances that have no CID yet */
@@ -709,9 +710,15 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
* executing.
*/
+ rcu_read_lock();
+
/* If the peer is still valid, no need to reset connection */
- if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid)) {
+ rcu_read_unlock();
return;
+ }
+
+ rcu_read_unlock();
/* If the close timeout is pending, let it expire. This avoids races
* with the timeout callback.
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
index d2ee605c5ca1..eab28cfe9939 100644
--- a/drivers/xen/acpi.c
+++ b/drivers/xen/acpi.c
@@ -89,11 +89,11 @@ int xen_acpi_get_gsi_info(struct pci_dev *dev,
int *trigger_out,
int *polarity_out)
{
- int gsi;
+ u32 gsi;
u8 pin;
struct acpi_prt_entry *entry;
int trigger = ACPI_LEVEL_SENSITIVE;
- int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
+ int ret, polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
if (!dev || !gsi_out || !trigger_out || !polarity_out)
@@ -105,17 +105,18 @@ int xen_acpi_get_gsi_info(struct pci_dev *dev,
entry = acpi_pci_irq_lookup(dev, pin);
if (entry) {
+ ret = 0;
if (entry->link)
- gsi = acpi_pci_link_allocate_irq(entry->link,
+ ret = acpi_pci_link_allocate_irq(entry->link,
entry->index,
&trigger, &polarity,
- NULL);
+ NULL, &gsi);
else
gsi = entry->index;
} else
- gsi = -1;
+ ret = -ENODEV;
- if (gsi < 0)
+ if (ret < 0)
return -EINVAL;
*gsi_out = gsi;