summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c65
-rw-r--r--drivers/accel/amdxdna/aie2_error.c8
-rw-r--r--drivers/accel/amdxdna/aie2_message.c10
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h10
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c2
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c2
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h3
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c7
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.h2
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.c6
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h2
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c5
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c89
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c98
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h14
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c9
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c43
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h1
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c110
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h14
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c10
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c4
-rw-r--r--drivers/accel/ivpu/ivpu_hw_reg_io.h64
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c7
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h2
-rw-r--r--drivers/accel/ivpu/ivpu_job.c522
-rw-r--r--drivers/accel/ivpu/ivpu_job.h8
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c29
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c121
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h2
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c13
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h2
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c94
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h2
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c35
-rw-r--r--drivers/accel/qaic/mhi_controller.c360
-rw-r--r--drivers/accel/qaic/mhi_controller.h2
-rw-r--r--drivers/accel/qaic/qaic.h14
-rw-r--r--drivers/accel/qaic/qaic_drv.c97
-rw-r--r--drivers/accel/qaic/qaic_timesync.c2
-rw-r--r--drivers/accel/qaic/sahara.c43
-rw-r--r--drivers/acpi/arm64/gtdt.c12
-rw-r--r--drivers/acpi/platform_profile.c8
-rw-r--r--drivers/acpi/prmt.c4
-rw-r--r--drivers/acpi/property.c10
-rw-r--r--drivers/acpi/resource.c6
-rw-r--r--drivers/ata/libahci_platform.c2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/faux.c232
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/power/main.c21
-rw-r--r--drivers/base/regmap/regmap-irq.c2
-rw-r--r--drivers/block/sunvdc.c4
-rw-r--r--drivers/bluetooth/btintel_pcie.c5
-rw-r--r--drivers/bus/mhi/host/boot.c203
-rw-r--r--drivers/bus/mhi/host/init.c2
-rw-r--r--drivers/bus/mhi/host/internal.h7
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/clocksource/jcore-pit.c15
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/amd-pstate.c20
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/crypto/ccp/sp-dev.c14
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c11
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c268
-rw-r--r--drivers/dma/tegra210-adma.c17
-rw-r--r--drivers/edac/qcom_edac.c4
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c4
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c3
-rw-r--r--drivers/firmware/efi/libstub/relocate.c3
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/iscsi_ibft.c5
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c71
-rw-r--r--drivers/gpio/gpio-pca953x.c19
-rw-r--r--drivers/gpio/gpio-sim.c13
-rw-r--r--drivers/gpio/gpio-stmpe.c15
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c14
-rw-r--r--drivers/gpio/gpiolib.c106
-rw-r--r--drivers/gpio/gpiolib.h4
-rw-r--r--drivers/gpu/drm/Kconfig13
-rw-r--r--drivers/gpu/drm/Makefile20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c11
-rw-r--r--drivers/gpu/drm/arm/Kconfig1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c6
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/ast/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c309
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c267
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c8
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h73
-rw-r--r--drivers/gpu/drm/ast/ast_main.c215
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c588
-rw-r--r--drivers/gpu/drm/ast/ast_post.c51
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h31
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h187
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.c241
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.h108
-rw-r--r--drivers/gpu/drm/bridge/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c16
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c29
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c8
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c19
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c7
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c10
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c21
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c10
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c8
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c11
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c16
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c18
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c7
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c3
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c11
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c1
-rw-r--r--drivers/gpu/drm/bridge/panel.c16
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c5
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c8
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c20
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c1
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c (renamed from drivers/gpu/drm/i2c/tda998x_drv.c)49
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c151
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c13
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c6
-rw-r--r--drivers/gpu/drm/ci/build.sh2
-rw-r--r--drivers/gpu/drm/ci/build.yml104
-rw-r--r--drivers/gpu/drm/ci/container.yml22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml197
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh13
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml11
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh100
-rw-r--r--drivers/gpu/drm/ci/test.yml37
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt31
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt298
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt15
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt112
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt55
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt15
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt12
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt56
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt543
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c14
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c60
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c31
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c329
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c37
-rw-r--r--drivers/gpu/drm/drm_bridge.c79
-rw-r--r--drivers/gpu/drm/drm_buddy.c11
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c68
-rw-r--r--drivers/gpu/drm/drm_file.c26
-rw-r--r--drivers/gpu/drm/drm_format_helper.c69
-rw-r--r--drivers/gpu/drm/drm_managed.c8
-rw-r--r--drivers/gpu/drm/drm_of.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c5
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c46
-rw-r--r--drivers/gpu/drm/drm_panic.c2
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs108
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c35
-rw-r--r--drivers/gpu/drm/drm_writeback.c184
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/i2c/Kconfig36
-rw-r--r--drivers/gpu/drm/i2c/Makefile10
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c40
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c18
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c16
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c5
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c5
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c5
-rw-r--r--drivers/gpu/drm/mgag200/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh5.c204
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c39
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c53
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h11
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c17
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml11
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c17
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c (renamed from drivers/gpu/drm/i2c/ch7006_drv.c)32
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c (renamed from drivers/gpu/drm/i2c/ch7006_mode.c)8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h (renamed from drivers/gpu/drm/i2c/ch7006_priv.h)11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c (renamed from drivers/gpu/drm/i2c/sil164_drv.c)35
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c (renamed from drivers/gpu/drm/drm_encoder_slave.c)95
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h87
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h220
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c539
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c17
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h1
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c67
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c23
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c380
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c8
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c91
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c59
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c181
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c178
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c20
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c22
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c15
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c1
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h5
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c26
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c120
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h3
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c89
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c5
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c5
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c234
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c36
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c92
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c7
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c6
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c12
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/stm/lvds.c5
-rw-r--r--drivers/gpu/drm/tegra/dc.c3
-rw-r--r--drivers/gpu/drm/tegra/dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tests/Makefile1
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_state_test.c375
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c30
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c200
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c183
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c50
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c25
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h1
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c67
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c33
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h27
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c183
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c30
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c14
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c96
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c154
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c35
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c24
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h11
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c32
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c61
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c40
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c7
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c2
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c5
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c14
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c21
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c54
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c165
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c2
-rw-r--r--drivers/gpu/host1x/dev.c2
-rw-r--r--drivers/gpu/host1x/intr.c2
-rw-r--r--drivers/hid/Kconfig15
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig1
-rw-r--r--drivers/hid/hid-apple.c8
-rw-r--r--drivers/hid/hid-corsair-void.c3
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lenovo.c7
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-steam.c46
-rw-r--r--drivers/hid/hid-thrustmaster.c2
-rw-r--r--drivers/hid/hid-topre.c7
-rw-r--r--drivers/hid/hid-winwing.c2
-rw-r--r--drivers/hid/i2c-hid/Kconfig2
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c15
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c7
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h2
-rw-r--r--drivers/hid/intel-thc-hid/Kconfig1
-rw-r--r--drivers/hid/surface-hid/Kconfig2
-rw-r--r--drivers/hid/usbhid/Kconfig3
-rw-r--r--drivers/i2c/i2c-core-base.c128
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c38
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel/prq.c4
-rw-r--r--drivers/iommu/io-pgfault.c1
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-apple-aic.c3
-rw-r--r--drivers/irqchip/irq-gic-v3.c53
-rw-r--r--drivers/irqchip/irq-jcore-aic.c2
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c3
-rw-r--r--drivers/irqchip/irq-partition-percpu.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c2
-rw-r--r--drivers/irqchip/irq-thead-c900-aclint-sswi.c2
-rw-r--r--drivers/irqchip/qcom-pdc.c67
-rw-r--r--drivers/md/md-linear.c4
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/cec/i2c/Kconfig9
-rw-r--r--drivers/media/cec/i2c/Makefile1
-rw-r--r--drivers/media/cec/i2c/tda9950.c (renamed from drivers/gpu/drm/i2c/tda9950.c)0
-rw-r--r--drivers/mfd/syscon.c29
-rw-r--r--drivers/mmc/host/mtk-sd.c31
-rw-r--r--drivers/mmc/host/sdhci_am654.c30
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c44
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c24
-rw-r--r--drivers/mtd/spi-nor/sst.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c5
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c10
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c58
-rw-r--r--drivers/net/ethernet/google/gve/gve.h10
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c150
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h43
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c40
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c50
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/gtp.c5
-rw-r--r--drivers/net/phy/phylink.c15
-rw-r--r--drivers/net/pse-pd/pd692x0.c2
-rw-r--r--drivers/net/pse-pd/pse_core.c4
-rw-r--r--drivers/net/team/team_core.c4
-rw-r--r--drivers/net/tun.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c14
-rw-r--r--drivers/net/vxlan/vxlan_core.c7
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c61
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c4
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c2
-rw-r--r--drivers/nvme/host/apple.c55
-rw-r--r--drivers/nvme/host/core.c10
-rw-r--r--drivers/nvme/host/fc.c68
-rw-r--r--drivers/nvme/host/ioctl.c3
-rw-r--r--drivers/nvme/host/pci.c14
-rw-r--r--drivers/nvme/host/sysfs.c2
-rw-r--r--drivers/nvme/host/tcp.c50
-rw-r--r--drivers/nvme/target/admin-cmd.c1
-rw-r--r--drivers/nvme/target/core.c40
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/nvmet.h16
-rw-r--r--drivers/nvme/target/pci-epf.c39
-rw-r--r--drivers/nvme/target/rdma.c33
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/of_private.h4
-rw-r--r--drivers/of/of_test.c119
-rw-r--r--drivers/pci/pcie/aspm.c3
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c3
-rw-r--r--drivers/pci/tph.c2
-rw-r--r--drivers/pinctrl/pinconf-generic.c8
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c42
-rw-r--r--drivers/platform/cznic/Kconfig1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h9
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c85
-rw-r--r--drivers/platform/x86/intel/pmc/core.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c61
-rw-r--r--drivers/power/supply/axp20x_battery.c31
-rw-r--r--drivers/power/supply/da9150-fg.c4
-rw-r--r--drivers/power/supply/power_supply_core.c8
-rw-r--r--drivers/powercap/powercap_sys.c3
-rw-r--r--drivers/ptp/ptp_vmclock.c47
-rw-r--r--drivers/regulator/core.c61
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/net/ism_drv.c14
-rw-r--r--drivers/s390/net/qeth_core_main.c8
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_lib_test.c7
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/soc/loongson/loongson2_guts.c5
-rw-r--r--drivers/soc/qcom/smp2p.c2
-rw-r--r--drivers/spi/atmel-quadspi.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-sn-f-ospi.c3
-rw-r--r--drivers/target/target_core_stat.c4
-rw-r--r--drivers/tee/optee/supp.c35
-rw-r--r--drivers/thermal/cpufreq_cooling.c2
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/8250/8250.h2
-rw-r--r--drivers/tty/serial/8250/8250_dma.c16
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_platform.c9
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c10
-rw-r--r--drivers/tty/serial/8250/8250_port.c9
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/serial/serial_port.c12
-rw-r--r--drivers/ufs/core/ufshcd.c68
-rw-r--r--drivers/ufs/host/ufshcd-pci.c2
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c28
-rw-r--r--drivers/usb/class/cdc-acm.c28
-rw-r--r--drivers/usb/core/hub.c14
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/gadget.c34
-rw-r--r--drivers/usb/gadget/function/f_midi.c19
-rw-r--r--drivers/usb/gadget/function/uvc_video.c2
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/pci-quirks.c9
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/phy/phy-generic.c2
-rw-r--r--drivers/usb/roles/class.c5
-rw-r--r--drivers/usb/serial/option.c49
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c3
-rw-r--r--drivers/xen/swiotlb-xen.c22
602 files changed, 11946 insertions, 5452 deletions
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 5f43db02b240..00d215ac866e 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -34,6 +34,8 @@ static void aie2_job_release(struct kref *ref)
job = container_of(ref, struct amdxdna_sched_job, refcnt);
amdxdna_sched_job_cleanup(job);
+ atomic64_inc(&job->hwctx->job_free_cnt);
+ wake_up(&job->hwctx->priv->job_free_wq);
if (job->out_fence)
dma_fence_put(job->out_fence);
kfree(job);
@@ -134,7 +136,8 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
if (!fence)
return;
- dma_fence_wait(fence, false);
+ /* Wait up to 2 seconds for fw to finish all pending requests */
+ dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
dma_fence_put(fence);
}
@@ -185,7 +188,7 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
}
static int
-aie2_sched_resp_handler(void *handle, const u32 *data, size_t size)
+aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
@@ -203,7 +206,7 @@ aie2_sched_resp_handler(void *handle, const u32 *data, size_t size)
goto out;
}
- status = *data;
+ status = readl(data);
XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
if (status == AIE2_STATUS_SUCCESS)
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
@@ -216,7 +219,7 @@ out:
}
static int
-aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size)
+aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
u32 ret = 0;
@@ -230,7 +233,7 @@ aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size)
goto out;
}
- status = *data;
+ status = readl(data);
XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
out:
@@ -239,14 +242,14 @@ out:
}
static int
-aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size)
+aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
- struct cmd_chain_resp *resp;
struct amdxdna_dev *xdna;
u32 fail_cmd_status;
u32 fail_cmd_idx;
+ u32 cmd_status;
u32 ret = 0;
cmd_abo = job->cmd_bo;
@@ -256,17 +259,17 @@ aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size)
goto out;
}
- resp = (struct cmd_chain_resp *)data;
+ cmd_status = readl(data + offsetof(struct cmd_chain_resp, status));
xdna = job->hwctx->client->xdna;
- XDNA_DBG(xdna, "Status 0x%x", resp->status);
- if (resp->status == AIE2_STATUS_SUCCESS) {
+ XDNA_DBG(xdna, "Status 0x%x", cmd_status);
+ if (cmd_status == AIE2_STATUS_SUCCESS) {
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
goto out;
}
/* Slow path to handle error, read from ringbuf on BAR */
- fail_cmd_idx = resp->fail_cmd_idx;
- fail_cmd_status = resp->fail_cmd_status;
+ fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx));
+ fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status));
XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
fail_cmd_idx, fail_cmd_status);
@@ -361,7 +364,7 @@ aie2_sched_job_timedout(struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NOMINAL;
}
-const struct drm_sched_backend_ops sched_ops = {
+static const struct drm_sched_backend_ops sched_ops = {
.run_job = aie2_sched_job_run,
.free_job = aie2_sched_job_free,
.timedout_job = aie2_sched_job_timedout,
@@ -516,6 +519,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
{
struct amdxdna_client *client = hwctx->client;
struct amdxdna_dev *xdna = client->xdna;
+ const struct drm_sched_init_args args = {
+ .ops = &sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = HWCTX_MAX_CMDS,
+ .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
+ .name = hwctx->name,
+ .dev = xdna->ddev.dev,
+ };
struct drm_gpu_scheduler *sched;
struct amdxdna_hwctx_priv *priv;
struct amdxdna_gem_obj *heap;
@@ -573,9 +584,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
might_lock(&priv->io_lock);
fs_reclaim_release(GFP_KERNEL);
- ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT,
- HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
- NULL, NULL, hwctx->name, xdna->ddev.dev);
+ ret = drm_sched_init(sched, &args);
if (ret) {
XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
goto free_cmd_bufs;
@@ -616,6 +625,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
hwctx->status = HWCTX_STAT_INIT;
ndev = xdna->dev_handle;
ndev->hwctx_num++;
+ init_waitqueue_head(&priv->job_free_wq);
XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
@@ -652,25 +662,23 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
xdna = hwctx->client->xdna;
ndev = xdna->dev_handle;
ndev->hwctx_num--;
- drm_sched_wqueue_stop(&hwctx->priv->sched);
- /* Now, scheduler will not send command to device. */
+ XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+
+ aie2_hwctx_wait_for_idle(hwctx);
+
+ /* Request fw to destroy hwctx and cancel the rest pending requests */
aie2_release_resource(hwctx);
- /*
- * All submitted commands are aborted.
- * Restart scheduler queues to cleanup jobs. The amdxdna_sched_job_run()
- * will return NODEV if it is called.
- */
- drm_sched_wqueue_start(&hwctx->priv->sched);
+ /* Wait for all submitted jobs to be completed or canceled */
+ wait_event(hwctx->priv->job_free_wq,
+ atomic64_read(&hwctx->job_submit_cnt) ==
+ atomic64_read(&hwctx->job_free_cnt));
- aie2_hwctx_wait_for_idle(hwctx);
- drm_sched_entity_destroy(&hwctx->priv->entity);
drm_sched_fini(&hwctx->priv->sched);
aie2_ctx_syncobj_destroy(hwctx);
- XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
-
for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
amdxdna_gem_unpin(hwctx->priv->heap);
@@ -879,6 +887,7 @@ retry:
drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
aie2_job_put(job);
+ atomic64_inc(&hwctx->job_submit_cnt);
return 0;
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
index b1defaa8513b..5ee905632a39 100644
--- a/drivers/accel/amdxdna/aie2_error.c
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -209,16 +209,14 @@ static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u3
return err_col;
}
-static int aie2_error_async_cb(void *handle, const u32 *data, size_t size)
+static int aie2_error_async_cb(void *handle, void __iomem *data, size_t size)
{
- struct async_event_msg_resp *resp;
struct async_event *e = handle;
if (data) {
- resp = (struct async_event_msg_resp *)data;
- e->resp.type = resp->type;
+ e->resp.type = readl(data + offsetof(struct async_event_msg_resp, type));
wmb(); /* Update status in the end, so that no lock for here */
- e->resp.status = resp->status;
+ e->resp.status = readl(data + offsetof(struct async_event_msg_resp, status));
}
queue_work(e->wq, &e->work);
return 0;
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index 9e2c9a44f76a..bf4219e32cc1 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -356,7 +356,7 @@ fail:
}
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
- void *handle, int (*cb)(void*, const u32 *, size_t))
+ void *handle, int (*cb)(void*, void __iomem *, size_t))
{
struct async_event_msg_req req = { 0 };
struct xdna_mailbox_msg msg = {
@@ -435,7 +435,7 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx)
}
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t))
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_dev *xdna = hwctx->client->xdna;
@@ -640,7 +640,7 @@ aie2_cmd_op_to_msg_op(u32 op)
int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t))
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
@@ -705,7 +705,7 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t))
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
@@ -740,7 +740,7 @@ int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
}
int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t))
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
index f2d95531ddc2..385914840eaa 100644
--- a/drivers/accel/amdxdna/aie2_pci.h
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -271,18 +271,18 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc
int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
- void *handle, int (*cb)(void*, const u32 *, size_t));
+ void *handle, int (*cb)(void*, void __iomem *, size_t));
int aie2_config_cu(struct amdxdna_hwctx *hwctx);
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t));
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t));
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t));
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t));
+ int (*notify_cb)(void *, void __iomem *, size_t));
/* aie2_hwctx.c */
int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
index 73388443c676..d303701b0ded 100644
--- a/drivers/accel/amdxdna/aie2_smu.c
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -64,6 +64,7 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
if (ret) {
XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
+ return ret;
}
ndev->npuclk_freq = freq;
@@ -72,6 +73,7 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
if (ret) {
XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
+ return ret;
}
ndev->hclk_freq = freq;
ndev->dpm_level = dpm_level;
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index d11b1c83d9c3..43442b9e273b 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -220,6 +220,8 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
args->syncobj_handle = hwctx->syncobj_hdl;
mutex_unlock(&xdna->dev_lock);
+ atomic64_set(&hwctx->job_submit_cnt, 0);
+ atomic64_set(&hwctx->job_free_cnt, 0);
XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
drm_dev_exit(idx);
return 0;
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
index 80b0304193ec..f0a4a8586d85 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.h
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -87,6 +87,9 @@ struct amdxdna_hwctx {
struct amdxdna_qos_info qos;
struct amdxdna_hwctx_param_config_cu *cus;
u32 syncobj_hdl;
+
+ atomic64_t job_submit_cnt;
+ atomic64_t job_free_cnt ____cacheline_aligned_in_smp;
};
#define drm_job_to_xdna_job(j) \
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
index 814b16bb1953..aa07e67400ef 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/slab.h>
#include <linux/xarray.h>
#define CREATE_TRACE_POINTS
@@ -90,7 +91,7 @@ struct mailbox_pkg {
struct mailbox_msg {
void *handle;
- int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ int (*notify_cb)(void *handle, void __iomem *data, size_t size);
size_t pkg_size; /* package size in bytes */
struct mailbox_pkg pkg;
};
@@ -243,7 +244,7 @@ no_space:
static int
mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
- void *data)
+ void __iomem *data)
{
struct mailbox_msg *mb_msg;
int msg_id;
@@ -331,7 +332,7 @@ static int mailbox_get_msg(struct mailbox_channel *mb_chann)
memcpy_fromio((u32 *)&header + 1, read_addr, rest);
read_addr += rest;
- ret = mailbox_get_resp(mb_chann, &header, (u32 *)read_addr);
+ ret = mailbox_get_resp(mb_chann, &header, read_addr);
mailbox_set_headptr(mb_chann, head + msg_size);
/* After update head, it can equal to ringbuf_size. This is expected. */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.h b/drivers/accel/amdxdna/amdxdna_mailbox.h
index 57954c303bdd..ea367f2fb738 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.h
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.h
@@ -25,7 +25,7 @@ struct mailbox_channel;
struct xdna_mailbox_msg {
u32 opcode;
void *handle;
- int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ int (*notify_cb)(void *handle, void __iomem *data, size_t size);
u8 *send_data;
size_t send_size;
};
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
index 5139a9c96a91..6d0c24513476 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
@@ -16,7 +16,7 @@
#include "amdxdna_mailbox_helper.h"
#include "amdxdna_pci_drv.h"
-int xdna_msg_cb(void *handle, const u32 *data, size_t size)
+int xdna_msg_cb(void *handle, void __iomem *data, size_t size)
{
struct xdna_notify *cb_arg = handle;
int ret;
@@ -29,9 +29,9 @@ int xdna_msg_cb(void *handle, const u32 *data, size_t size)
goto out;
}
+ memcpy_fromio(cb_arg->data, data, cb_arg->size);
print_hex_dump_debug("resp data: ", DUMP_PREFIX_OFFSET,
- 16, 4, data, cb_arg->size, true);
- memcpy(cb_arg->data, data, cb_arg->size);
+ 16, 4, cb_arg->data, cb_arg->size, true);
out:
ret = cb_arg->error;
complete(&cb_arg->comp);
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
index 23e1317b79fe..710ff8873d61 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -35,7 +35,7 @@ struct xdna_notify {
.notify_cb = xdna_msg_cb, \
}
-int xdna_msg_cb(void *handle, const u32 *data, size_t size);
+int xdna_msg_cb(void *handle, void __iomem *data, size_t size);
int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
struct xdna_mailbox_msg *msg);
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index 97d4a032171f..f5b8497cf5ad 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -21,6 +21,11 @@
#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+
/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
* (device_id, rev_id) pair as a key to select the devices. The devices with
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 8180b95ed69d..0825851656a2 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -4,6 +4,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@@ -397,6 +398,88 @@ static int dct_active_set(void *data, u64 active_percent)
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+static int priority_bands_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = s->private;
+ struct ivpu_hw_info *hw = vdev->hw;
+
+ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
+ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
+ switch (band) {
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
+ seq_puts(s, "Idle: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
+ seq_puts(s, "Normal: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
+ seq_puts(s, "Focus: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
+ seq_puts(s, "Realtime: ");
+ break;
+ }
+
+ seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
+ hw->hws.grace_period[band], hw->hws.process_grace_period[band],
+ hw->hws.process_quantum[band]);
+ }
+
+ return 0;
+}
+
+static int priority_bands_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, priority_bands_show, inode->i_private);
+}
+
+static ssize_t
+priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct ivpu_device *vdev = s->private;
+ char buf[64];
+ u32 grace_period;
+ u32 process_grace_period;
+ u32 process_quantum;
+ u32 band;
+ int ret;
+
+ if (size >= sizeof(buf))
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
+ if (ret < 0)
+ return ret;
+
+ buf[size] = '\0';
+ ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
+ &process_quantum);
+ if (ret != 4)
+ return -EINVAL;
+
+ if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
+ return -EINVAL;
+
+ vdev->hw->hws.grace_period[band] = grace_period;
+ vdev->hw->hws.process_grace_period[band] = process_grace_period;
+ vdev->hw->hws.process_quantum[band] = process_quantum;
+
+ return size;
+}
+
+static const struct file_operations ivpu_hws_priority_bands_fops = {
+ .owner = THIS_MODULE,
+ .open = priority_bands_fops_open,
+ .write = priority_bands_fops_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void ivpu_debugfs_init(struct ivpu_device *vdev)
{
struct dentry *debugfs_root = vdev->drm.debugfs_root;
@@ -419,6 +502,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
&fw_trace_hw_comp_mask_fops);
debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
&fw_trace_level_fops);
+ debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
+ &ivpu_hws_priority_bands_fops);
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
&ivpu_reset_engine_fops);
@@ -430,4 +515,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_root, vdev, &fw_profiling_freq_fops);
debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
}
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_hw", debugfs_root, &ivpu_hw_failure);
+#endif
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 1e8ffbe25eee..4fa73189502e 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
#include <generated/utsrelease.h>
#include <drm/drm_accel.h>
@@ -36,8 +37,6 @@
#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
#endif
-static struct lock_class_key submitted_jobs_xa_lock_class_key;
-
int ivpu_dbg_mask;
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
@@ -128,20 +127,18 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
-static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
{
- switch (args->index) {
+ switch (capability) {
case DRM_IVPU_CAP_METRIC_STREAMER:
- args->value = 1;
- break;
+ return true;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
- args->value = 1;
- break;
+ return true;
+ case DRM_IVPU_CAP_MANAGE_CMDQ:
+ return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
default:
- return -EINVAL;
+ return false;
}
-
- return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -201,7 +198,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->hw->sku;
break;
case DRM_IVPU_PARAM_CAPABILITIES:
- ret = ivpu_get_capabilities(vdev, args);
+ args->value = ivpu_is_capable(vdev, args->index);
break;
default:
ret = -EINVAL;
@@ -310,6 +307,9 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
};
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
@@ -397,15 +397,19 @@ int ivpu_boot(struct ivpu_device *vdev)
if (ivpu_fw_is_cold_boot(vdev)) {
ret = ivpu_pm_dct_init(vdev);
if (ret)
- goto err_diagnose_failure;
+ goto err_disable_ipc;
ret = ivpu_hw_sched_init(vdev);
if (ret)
- goto err_diagnose_failure;
+ goto err_disable_ipc;
}
return 0;
+err_disable_ipc:
+ ivpu_ipc_disable(vdev);
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
err_diagnose_failure:
ivpu_hw_diagnose_failure(vdev);
ivpu_mmu_evtq_dump(vdev);
@@ -417,6 +421,9 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
+ cancel_work_sync(&vdev->irq_ipc_work);
+ cancel_work_sync(&vdev->irq_dct_work);
+ cancel_work_sync(&vdev->context_abort_work);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
}
@@ -449,7 +456,7 @@ static const struct drm_driver driver = {
.postclose = ivpu_postclose,
.gem_create_object = ivpu_gem_create_object,
- .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+ .gem_prime_import = ivpu_gem_prime_import,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
@@ -461,54 +468,6 @@ static const struct drm_driver driver = {
.major = 1,
};
-static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
-{
- struct ivpu_file_priv *file_priv;
- unsigned long ctx_id;
-
- mutex_lock(&vdev->context_list_lock);
-
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- if (!file_priv->has_mmu_faults || file_priv->aborted)
- continue;
-
- mutex_lock(&file_priv->lock);
- ivpu_context_abort_locked(file_priv);
- file_priv->aborted = true;
- mutex_unlock(&file_priv->lock);
- }
-
- mutex_unlock(&vdev->context_list_lock);
-}
-
-static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
-{
- struct ivpu_device *vdev = arg;
- u8 irq_src;
-
- if (kfifo_is_empty(&vdev->hw->irq.fifo))
- return IRQ_NONE;
-
- while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
- switch (irq_src) {
- case IVPU_HW_IRQ_SRC_IPC:
- ivpu_ipc_irq_thread_handler(vdev);
- break;
- case IVPU_HW_IRQ_SRC_MMU_EVTQ:
- ivpu_context_abort_invalid(vdev);
- break;
- case IVPU_HW_IRQ_SRC_DCT:
- ivpu_pm_dct_irq_thread_handler(vdev);
- break;
- default:
- ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
- break;
- }
- }
-
- return IRQ_HANDLED;
-}
-
static int ivpu_irq_init(struct ivpu_device *vdev)
{
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
@@ -520,12 +479,16 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
return ret;
}
+ INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
+ INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
+ INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
+
ivpu_irq_handlers_init(vdev);
vdev->irq = pci_irq_vector(pdev, 0);
- ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
- ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
+ IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
@@ -613,7 +576,6 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
- lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
vdev->db_limit.min = IVPU_MIN_DB;
@@ -623,6 +585,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (ret)
goto err_xa_destroy;
+ ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
+ if (ret)
+ goto err_xa_destroy;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret)
goto err_xa_destroy;
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 3fdff3f6cffd..92753effb1c9 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -58,6 +58,7 @@
#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS 2
#define IVPU_PLATFORM_FPGA 3
+#define IVPU_PLATFORM_HSLE 4
#define IVPU_PLATFORM_INVALID 8
#define IVPU_SCHED_MODE_AUTO -1
@@ -110,6 +111,7 @@ struct ivpu_wa_table {
bool disable_clock_relinquish;
bool disable_d0i3_msg;
bool wp0_during_power_up;
+ bool disable_d0i2;
};
struct ivpu_hw_info;
@@ -142,9 +144,14 @@ struct ivpu_device {
struct xa_limit db_limit;
u32 db_next;
+ struct work_struct irq_ipc_work;
+ struct work_struct irq_dct_work;
+ struct work_struct context_abort_work;
+
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
+ struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;
@@ -200,6 +207,9 @@ extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
#define IVPU_TEST_MODE_TURBO BIT(9)
+#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(10)
+#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(11)
+#define IVPU_TEST_MODE_D0I2_DISABLE BIT(12)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@@ -208,6 +218,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
void ivpu_prepare_for_reset(struct ivpu_device *vdev);
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability);
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
@@ -282,7 +293,8 @@ static inline bool ivpu_is_simics(struct ivpu_device *vdev)
static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
{
- return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA ||
+ ivpu_get_platform(vdev) == IVPU_PLATFORM_HSLE;
}
static inline bool ivpu_is_force_snoop_enabled(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 6037ec0b3096..7a1bb92d8c81 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -145,7 +145,10 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he
if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
return ivpu_sched_mode;
- return VPU_SCHEDULING_MODE_OS;
+ if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, JSM, 3, 24))
+ return VPU_SCHEDULING_MODE_OS;
+
+ return VPU_SCHEDULING_MODE_HW;
}
static int ivpu_fw_parse(struct ivpu_device *vdev)
@@ -531,6 +534,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_entry_vpu_ts);
ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
boot_params->system_time_us);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n",
+ boot_params->power_profile);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -631,6 +636,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_delayed_entry = 1;
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
+ if (IVPU_WA(disable_d0i2))
+ boot_params->power_profile = 1;
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 16178054e629..8741c73b92ce 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -20,6 +20,8 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
@@ -172,6 +174,47 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
return &bo->base.base;
}
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct device *attach_dev = dev->dev;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ attach = dma_buf_attach(dma_buf, attach_dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+ obj->resv = dma_buf->resv;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index d975000abd78..a222a9ec9d61 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -28,6 +28,7 @@ int ivpu_bo_pin(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
struct ivpu_addr_range *range, u64 size, u32 flags);
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 4e1054f3466e..ec9a3629da3a 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -9,6 +9,16 @@
#include "ivpu_hw_ip.h"
#include <linux/dmi.h>
+#include <linux/fault-inject.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(ivpu_hw_failure);
+
+static char *ivpu_fail_hw;
+module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
+MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
+#endif
static char *platform_to_str(u32 platform)
{
@@ -19,43 +29,36 @@ static char *platform_to_str(u32 platform)
return "SIMICS";
case IVPU_PLATFORM_FPGA:
return "FPGA";
+ case IVPU_PLATFORM_HSLE:
+ return "HSLE";
default:
return "Invalid platform";
}
}
-static const struct dmi_system_id dmi_platform_simulation[] = {
- {
- .ident = "Intel Simics",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
- DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
- },
- },
- {
- .ident = "Intel Simics",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Simics"),
- },
- },
- { }
-};
-
static void platform_init(struct ivpu_device *vdev)
{
- if (dmi_check_system(dmi_platform_simulation))
- vdev->platform = IVPU_PLATFORM_SIMICS;
- else
- vdev->platform = IVPU_PLATFORM_SILICON;
+ int platform = ivpu_hw_btrs_platform_read(vdev);
+
+ ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", platform_to_str(platform), platform);
+
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ case IVPU_PLATFORM_SIMICS:
+ case IVPU_PLATFORM_FPGA:
+ case IVPU_PLATFORM_HSLE:
+ vdev->platform = platform;
+ break;
- ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
- platform_to_str(vdev->platform), vdev->platform);
+ default:
+ ivpu_err(vdev, "Invalid platform type: %d\n", platform);
+ break;
+ }
}
static void wa_init(struct ivpu_device *vdev)
{
- vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+ vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
@@ -65,14 +68,24 @@ static void wa_init(struct ivpu_device *vdev)
ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
vdev->wa.disable_clock_relinquish = true;
+ if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_ENABLE)
+ vdev->wa.disable_clock_relinquish = false;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_DISABLE)
+ vdev->wa.disable_clock_relinquish = true;
+
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->wa.wp0_during_power_up = true;
+ if (ivpu_test_mode & IVPU_TEST_MODE_D0I2_DISABLE)
+ vdev->wa.disable_d0i2 = true;
+
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
IVPU_PRINT_WA(interrupt_clear_with_0);
IVPU_PRINT_WA(disable_clock_relinquish);
IVPU_PRINT_WA(wp0_during_power_up);
+ IVPU_PRINT_WA(disable_d0i2);
}
static void timeouts_init(struct ivpu_device *vdev)
@@ -84,12 +97,12 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = -1;
} else if (ivpu_is_fpga(vdev)) {
- vdev->timeout.boot = 100000;
- vdev->timeout.jsm = 50000;
- vdev->timeout.tdr = 2000000;
+ vdev->timeout.boot = 50;
+ vdev->timeout.jsm = 15000;
+ vdev->timeout.tdr = 30000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 500;
- vdev->timeout.state_dump_msg = 10;
+ vdev->timeout.state_dump_msg = 10000;
} else if (ivpu_is_simics(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
@@ -110,6 +123,26 @@ static void timeouts_init(struct ivpu_device *vdev)
}
}
+static void priority_bands_init(struct ivpu_device *vdev)
+{
+ /* Idle */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
+ /* Normal */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
+ /* Focus */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
+ /* Realtime */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
+}
+
static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
@@ -248,12 +281,18 @@ int ivpu_hw_init(struct ivpu_device *vdev)
{
ivpu_hw_btrs_info_init(vdev);
ivpu_hw_btrs_freq_ratios_init(vdev);
+ priority_bands_init(vdev);
memory_ranges_init(vdev);
platform_init(vdev);
wa_init(vdev);
timeouts_init(vdev);
atomic_set(&vdev->hw->firewall_irq_counter, 0);
+#ifdef CONFIG_FAULT_INJECTION
+ if (ivpu_fail_hw)
+ setup_fault_attr(&ivpu_hw_failure, ivpu_fail_hw);
+#endif
+
return 0;
}
@@ -285,8 +324,6 @@ void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
{
- INIT_KFIFO(vdev->hw->irq.fifo);
-
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
else
@@ -300,7 +337,6 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev)
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
{
- kfifo_reset(&vdev->hw->irq.fifo);
ivpu_hw_ip_irq_enable(vdev);
ivpu_hw_btrs_irq_enable(vdev);
}
@@ -327,9 +363,9 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
ivpu_hw_btrs_global_int_enable(vdev);
- if (!kfifo_is_empty(&vdev->hw->irq.fifo))
- return IRQ_WAKE_THREAD;
- if (ip_handled || btrs_handled)
- return IRQ_HANDLED;
- return IRQ_NONE;
+ if (!ip_handled && !btrs_handled)
+ return IRQ_NONE;
+
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ return IRQ_HANDLED;
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index fc4dbfc980c8..16435f2756d0 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -6,18 +6,10 @@
#ifndef __IVPU_HW_H__
#define __IVPU_HW_H__
-#include <linux/kfifo.h>
-
#include "ivpu_drv.h"
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
-#define IVPU_HW_IRQ_FIFO_LENGTH 1024
-
-#define IVPU_HW_IRQ_SRC_IPC 1
-#define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
-#define IVPU_HW_IRQ_SRC_DCT 3
-
struct ivpu_addr_range {
resource_size_t start;
resource_size_t end;
@@ -27,7 +19,6 @@ struct ivpu_hw_info {
struct {
bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
- DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH);
} irq;
struct {
struct ivpu_addr_range global;
@@ -45,6 +36,11 @@ struct ivpu_hw_info {
u8 pn_ratio;
u32 profiling_freq;
} pll;
+ struct {
+ u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ } hws;
u32 tile_fuse;
u32 sku;
u16 config;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index 3212c99f3682..56c56012b980 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -630,8 +630,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->irq_dct_work);
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
@@ -888,3 +887,10 @@ void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
else
return diagnose_failure_lnl(vdev);
}
+
+int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev)
+{
+ u32 reg = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
+
+ return REG_GET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PLATFORM, reg);
+}
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index 04f14f50fed6..1fd71b4d4ab0 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -46,5 +46,6 @@ void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev);
+int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev);
#endif /* __IVPU_HW_BTRS_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
index fc51f3098f97..fff2ef2cada6 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
@@ -86,6 +86,7 @@
#define VPU_HW_BTRS_LNL_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
#define VPU_HW_BTRS_LNL_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_HW_BTRS_LNL_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_PLATFORM_MASK GENMASK(31, 29)
#define VPU_HW_BTRS_LNL_IP_RESET 0x00000160u
#define VPU_HW_BTRS_LNL_IP_RESET_TRIGGER_MASK BIT_MASK(0)
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index 029dd065614b..823f6a57dc54 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -968,14 +968,14 @@ void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
{
- u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
+ u32 count = readl(vdev->regv + VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
{
- u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
+ u32 count = readl(vdev->regv + VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
diff --git a/drivers/accel/ivpu/ivpu_hw_reg_io.h b/drivers/accel/ivpu/ivpu_hw_reg_io.h
index 79b3f441eac4..66259b0ead02 100644
--- a/drivers/accel/ivpu/ivpu_hw_reg_io.h
+++ b/drivers/accel/ivpu/ivpu_hw_reg_io.h
@@ -7,6 +7,7 @@
#define __IVPU_HW_REG_IO_H__
#include <linux/bitfield.h>
+#include <linux/fault-inject.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -16,13 +17,11 @@
#define REG_IO_ERROR 0xffffffff
#define REGB_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regb, (reg), #reg, __func__)
-#define REGB_RD32_SILENT(reg) readl(vdev->regb + (reg))
#define REGB_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regb, (reg), #reg, __func__)
#define REGB_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regb, (reg), (val), #reg, __func__)
#define REGB_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regb, (reg), (val), #reg, __func__)
#define REGV_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regv, (reg), #reg, __func__)
-#define REGV_RD32_SILENT(reg) readl(vdev->regv + (reg))
#define REGV_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regv, (reg), #reg, __func__)
#define REGV_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regv, (reg), (val), #reg, __func__)
#define REGV_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regv, (reg), (val), #reg, __func__)
@@ -47,31 +46,42 @@
#define REG_TEST_FLD_NUM(REG, FLD, num, val) \
((num) == FIELD_GET(REG##_##FLD##_MASK, val))
-#define REGB_POLL_FLD(reg, fld, val, timeout_us) \
-({ \
- u32 var; \
- int r; \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
- __func__, #reg, reg, #fld, val); \
- r = read_poll_timeout(REGB_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
- REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
- __func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
- r; \
-})
-
-#define REGV_POLL_FLD(reg, fld, val, timeout_us) \
-({ \
- u32 var; \
- int r; \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
- __func__, #reg, reg, #fld, val); \
- r = read_poll_timeout(REGV_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
- REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
- __func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
- r; \
-})
+#define REGB_POLL_FLD(reg, fld, exp_fld_val, timeout_us) \
+ ivpu_hw_reg_poll_fld(vdev, vdev->regb, reg, reg##_##fld##_MASK, \
+ FIELD_PREP(reg##_##fld##_MASK, exp_fld_val), timeout_us, \
+ __func__, #reg, #fld)
+
+#define REGV_POLL_FLD(reg, fld, exp_fld_val, timeout_us) \
+ ivpu_hw_reg_poll_fld(vdev, vdev->regv, reg, reg##_##fld##_MASK, \
+ FIELD_PREP(reg##_##fld##_MASK, exp_fld_val), timeout_us, \
+ __func__, #reg, #fld)
+
+extern struct fault_attr ivpu_hw_failure;
+
+static inline int __must_check
+ivpu_hw_reg_poll_fld(struct ivpu_device *vdev, void __iomem *base,
+ u32 reg_offset, u32 reg_mask, u32 exp_masked_val, u32 timeout_us,
+ const char *func_name, const char *reg_name, const char *fld_name)
+{
+ u32 reg_val;
+ int ret;
+
+ ivpu_dbg(vdev, REG, "%s : %s (0x%08x) POLL %s started (exp_val 0x%x)\n",
+ func_name, reg_name, reg_offset, fld_name, exp_masked_val);
+
+ ret = read_poll_timeout(readl, reg_val, (reg_val & reg_mask) == exp_masked_val,
+ REG_POLL_SLEEP_US, timeout_us, false, base + reg_offset);
+
+#ifdef CONFIG_FAULT_INJECTION
+ if (should_fail(&ivpu_hw_failure, 1))
+ ret = -ETIMEDOUT;
+#endif
+
+ ivpu_dbg(vdev, REG, "%s : %s (0x%08x) POLL %s %s (reg_val 0x%08x)\n",
+ func_name, reg_name, reg_offset, fld_name, ret ? "ETIMEDOUT" : "OK", reg_val);
+
+ return ret;
+}
static inline u32
ivpu_hw_reg_rd32(struct ivpu_device *vdev, void __iomem *base, u32 reg,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 01ebf88fe6ef..0e096fd9b95d 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -459,13 +459,12 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
}
- if (!list_empty(&ipc->cb_msg_list))
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->irq_ipc_work);
}
-void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_ipc_irq_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_ipc_work);
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
struct list_head cb_msg_list;
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
index b4dfb504679b..b524a1985b9d 100644
--- a/drivers/accel/ivpu/ivpu_ipc.h
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -90,7 +90,7 @@ void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);
void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
-void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_ipc_irq_work_fn(struct work_struct *work);
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 channel, ivpu_ipc_rx_callback_t callback);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 7149312f16e1..004059e4f1e8 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <uapi/drm/ivpu_accel.h>
@@ -17,6 +18,7 @@
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
+#include "ivpu_mmu.h"
#include "ivpu_pm.h"
#include "ivpu_trace.h"
#include "vpu_boot_api.h"
@@ -83,23 +85,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq)
return NULL;
- ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
- GFP_KERNEL);
- if (ret < 0) {
- ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
- goto err_free_cmdq;
- }
-
- ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
- &file_priv->cmdq_id_next, GFP_KERNEL);
- if (ret < 0) {
- ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
- goto err_erase_db_xa;
- }
-
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
- goto err_erase_cmdq_xa;
+ goto err_free_cmdq;
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
if (ret)
@@ -107,10 +95,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
return cmdq;
-err_erase_cmdq_xa:
- xa_erase(&file_priv->cmdq_xa, cmdq->id);
-err_erase_db_xa:
- xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
kfree(cmdq);
return NULL;
@@ -118,15 +102,44 @@ err_free_cmdq:
static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- if (!cmdq)
- return;
-
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
ivpu_bo_free(cmdq->mem);
- xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
kfree(cmdq);
}
+static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority,
+ bool is_legacy)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq = NULL;
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_alloc(file_priv);
+ if (!cmdq) {
+ ivpu_err(vdev, "Failed to allocate command queue\n");
+ return NULL;
+ }
+
+ cmdq->priority = priority;
+ cmdq->is_legacy = is_legacy;
+
+ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
+ &file_priv->cmdq_id_next, GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
+ goto err_free_cmdq;
+ }
+
+ ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d\n", cmdq->id, file_priv->ctx.id);
+ return cmdq;
+
+err_free_cmdq:
+ ivpu_cmdq_free(file_priv, cmdq);
+ return NULL;
+}
+
static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
u8 priority)
{
@@ -152,6 +165,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
struct ivpu_device *vdev = file_priv->vdev;
int ret;
+ ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+ GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
+ return ret;
+ }
+
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
@@ -160,41 +180,52 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (!ret)
- ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
- cmdq->db_id, cmdq->id, file_priv->ctx.id);
+ ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
+ cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
+ else
+ xa_erase(&vdev->db_xa, cmdq->db_id);
return ret;
}
-static int
-ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
+static void ivpu_cmdq_jobq_init(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
+{
+ jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
+ jobq->header.head = 0;
+ jobq->header.tail = 0;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
+ ivpu_dbg(vdev, JOB, "Turbo mode enabled");
+ jobq->header.flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ }
+
+ wmb(); /* Flush WC buffer for jobq->header */
+}
+
+static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
+{
+ size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
+
+ return size / sizeof(struct vpu_job_queue_entry);
+}
+
+static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
- struct vpu_job_queue_header *jobq_header;
int ret;
lockdep_assert_held(&file_priv->lock);
- if (cmdq->db_registered)
+ if (cmdq->db_id)
return 0;
- cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
- sizeof(struct vpu_job_queue_entry));
-
+ cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
- jobq_header = &cmdq->jobq->header;
- jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
- jobq_header->head = 0;
- jobq_header->tail = 0;
- if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
- ivpu_dbg(vdev, JOB, "Turbo mode enabled");
- jobq_header->flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
- }
- wmb(); /* Flush WC buffer for jobq->header */
+ ivpu_cmdq_jobq_init(vdev, cmdq->jobq);
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
+ ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, cmdq->priority);
if (ret)
return ret;
}
@@ -203,58 +234,83 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 prio
if (ret)
return ret;
- cmdq->db_registered = true;
-
return 0;
}
-static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
int ret;
lockdep_assert_held(&file_priv->lock);
- if (!cmdq->db_registered)
+ if (!cmdq->db_id)
return 0;
- cmdq->db_registered = false;
-
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
- ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
+ ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
+ cmdq->id, file_priv->ctx.id);
}
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
if (!ret)
ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+
return 0;
}
-static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
+static inline u8 ivpu_job_to_jsm_priority(u8 priority)
+{
+ if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
+ return VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL;
+
+ return priority - 1;
+}
+
+static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ ivpu_cmdq_unregister(file_priv, cmdq);
+ xa_erase(&file_priv->cmdq_xa, cmdq->id);
+ ivpu_cmdq_free(file_priv, cmdq);
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire_legacy(struct ivpu_file_priv *file_priv, u8 priority)
{
struct ivpu_cmdq *cmdq;
- unsigned long cmdq_id;
- int ret;
+ unsigned long id;
lockdep_assert_held(&file_priv->lock);
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
- if (cmdq->priority == priority)
+ xa_for_each(&file_priv->cmdq_xa, id, cmdq)
+ if (cmdq->is_legacy && cmdq->priority == priority)
break;
if (!cmdq) {
- cmdq = ivpu_cmdq_alloc(file_priv);
+ cmdq = ivpu_cmdq_create(file_priv, priority, true);
if (!cmdq)
return NULL;
- cmdq->priority = priority;
}
- ret = ivpu_cmdq_init(file_priv, cmdq, priority);
- if (ret)
+ return cmdq;
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 cmdq_id)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
+ if (!cmdq) {
+ ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
return NULL;
+ }
return cmdq;
}
@@ -266,11 +322,8 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
lockdep_assert_held(&file_priv->lock);
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
- xa_erase(&file_priv->cmdq_xa, cmdq_id);
- ivpu_cmdq_fini(file_priv, cmdq);
- ivpu_cmdq_free(file_priv, cmdq);
- }
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_destroy(file_priv, cmdq);
}
/*
@@ -286,8 +339,10 @@ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
mutex_lock(&file_priv->lock);
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
- cmdq->db_registered = false;
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+ }
mutex_unlock(&file_priv->lock);
}
@@ -305,25 +360,24 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
mutex_unlock(&vdev->context_list_lock);
}
-static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
-{
- struct ivpu_cmdq *cmdq;
- unsigned long cmdq_id;
-
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
- ivpu_cmdq_fini(file_priv, cmdq);
-}
-
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
lockdep_assert_held(&file_priv->lock);
+ ivpu_dbg(vdev, JOB, "Context ID: %u abort\n", file_priv->ctx.id);
- ivpu_cmdq_fini_all(file_priv);
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_unregister(file_priv, cmdq);
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+
+ ivpu_mmu_disable_ssid_events(vdev, file_priv->ctx.id);
+
+ file_priv->aborted = true;
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -462,16 +516,14 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
{
struct ivpu_job *job;
- xa_lock(&vdev->submitted_jobs_xa);
- job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+ job = xa_erase(&vdev->submitted_jobs_xa, job_id);
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
vdev->busy_time);
}
- xa_unlock(&vdev->submitted_jobs_xa);
-
return job;
}
@@ -479,6 +531,28 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
{
struct ivpu_job *job;
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
+ job = xa_load(&vdev->submitted_jobs_xa, job_id);
+ if (!job)
+ return -ENOENT;
+
+ if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
+ guard(mutex)(&job->file_priv->lock);
+
+ if (job->file_priv->has_mmu_faults)
+ return 0;
+
+ /*
+ * Mark context as faulty and defer destruction of the job to jobs abort thread
+ * handler to synchronize between both faults and jobs returning context violation
+ * status and ensure both are handled in the same way
+ */
+ job->file_priv->has_mmu_faults = true;
+ queue_work(system_wq, &vdev->context_abort_work);
+ return 0;
+ }
+
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
if (!job)
return -ENOENT;
@@ -497,6 +571,10 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
ivpu_stop_job_timeout_detection(vdev);
ivpu_rpm_put(vdev);
+
+ if (!xa_empty(&vdev->submitted_jobs_xa))
+ ivpu_start_job_timeout_detection(vdev);
+
return 0;
}
@@ -505,11 +583,29 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
struct ivpu_job *job;
unsigned long id;
+ mutex_lock(&vdev->submitted_jobs_lock);
+
xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
-static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
+{
+ struct ivpu_job *job;
+ unsigned long id;
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->ctx.id == ctx_id && job->cmdq_id == cmdq_id)
+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
+}
+
+static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
@@ -521,25 +617,35 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
if (ret < 0)
return ret;
+ mutex_lock(&vdev->submitted_jobs_lock);
mutex_lock(&file_priv->lock);
- cmdq = ivpu_cmdq_acquire(file_priv, priority);
+ if (cmdq_id == 0)
+ cmdq = ivpu_cmdq_acquire_legacy(file_priv, priority);
+ else
+ cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id);
if (!cmdq) {
- ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
- file_priv->ctx.id, job->engine_idx, priority);
+ ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id);
ret = -EINVAL;
- goto err_unlock_file_priv;
+ goto err_unlock;
}
- xa_lock(&vdev->submitted_jobs_xa);
+ ret = ivpu_cmdq_register(file_priv, cmdq);
+ if (ret) {
+ ivpu_err(vdev, "Failed to register command queue: %d\n", ret);
+ goto err_unlock;
+ }
+
+ job->cmdq_id = cmdq->id;
+
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
- ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
- &file_priv->job_id_next, GFP_KERNEL);
+ ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+ &file_priv->job_id_next, GFP_KERNEL);
if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;
- goto err_unlock_submitted_jobs_xa;
+ goto err_unlock;
}
ret = ivpu_cmdq_push_job(cmdq, job);
@@ -559,23 +665,23 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
trace_job("submit", job);
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
- job->job_id, file_priv->ctx.id, job->engine_idx, priority,
+ job->job_id, file_priv->ctx.id, job->engine_idx, cmdq->priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
- xa_unlock(&vdev->submitted_jobs_xa);
-
mutex_unlock(&file_priv->lock);
- if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ }
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
return 0;
err_erase_xa:
- __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_unlock_submitted_jobs_xa:
- xa_unlock(&vdev->submitted_jobs_xa);
-err_unlock_file_priv:
+ xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock:
+ mutex_unlock(&vdev->submitted_jobs_lock);
mutex_unlock(&file_priv->lock);
ivpu_rpm_put(vdev);
return ret;
@@ -585,7 +691,7 @@ static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
u32 buf_count, u32 commands_offset)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ww_acquire_ctx acquire_ctx;
enum dma_resv_usage usage;
@@ -647,49 +753,20 @@ unlock_reservations:
return ret;
}
-static inline u8 ivpu_job_to_hws_priority(struct ivpu_file_priv *file_priv, u8 priority)
-{
- if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
- return DRM_IVPU_JOB_PRIORITY_NORMAL;
-
- return priority - 1;
-}
-
-int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
+ u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
+ u8 priority)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
- struct drm_ivpu_submit *params = data;
struct ivpu_job *job;
u32 *buf_handles;
int idx, ret;
- u8 priority;
-
- if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
- return -EINVAL;
-
- if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
- return -EINVAL;
-
- if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
- return -EINVAL;
-
- if (!IS_ALIGNED(params->commands_offset, 8))
- return -EINVAL;
-
- if (!file_priv->ctx.id)
- return -EINVAL;
- if (file_priv->has_mmu_faults)
- return -EBADFD;
-
- buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
+ buf_handles = kcalloc(buffer_count, sizeof(u32), GFP_KERNEL);
if (!buf_handles)
return -ENOMEM;
- ret = copy_from_user(buf_handles,
- (void __user *)params->buffers_ptr,
- params->buffer_count * sizeof(u32));
+ ret = copy_from_user(buf_handles, buffers_ptr, buffer_count * sizeof(u32));
if (ret) {
ret = -EFAULT;
goto err_free_handles;
@@ -700,27 +777,23 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_free_handles;
}
- ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
- file_priv->ctx.id, params->buffer_count);
+ ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", file_priv->ctx.id, buffer_count);
- job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
+ job = ivpu_job_create(file_priv, engine, buffer_count);
if (!job) {
ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
- params->commands_offset);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
if (ret) {
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
goto err_destroy_job;
}
- priority = ivpu_job_to_hws_priority(file_priv, params->priority);
-
down_read(&vdev->pm->reset_lock);
- ret = ivpu_job_submit(job, priority);
+ ret = ivpu_job_submit(job, priority, cmdq_id);
up_read(&vdev->pm->reset_lock);
if (ret)
goto err_signal_fence;
@@ -740,12 +813,122 @@ err_free_handles:
return ret;
}
+int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_submit *args = data;
+ u8 priority;
+
+ if (args->engine != DRM_IVPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(args->commands_offset, 8))
+ return -EINVAL;
+
+ if (!file_priv->ctx.id)
+ return -EINVAL;
+
+ if (file_priv->has_mmu_faults)
+ return -EBADFD;
+
+ priority = ivpu_job_to_jsm_priority(args->priority);
+
+ return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
+ (void __user *)args->buffers_ptr, args->commands_offset, priority);
+}
+
+int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_cmdq_submit *args = data;
+
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
+ return -EINVAL;
+
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(args->commands_offset, 8))
+ return -EINVAL;
+
+ if (!file_priv->ctx.id)
+ return -EINVAL;
+
+ if (file_priv->has_mmu_faults)
+ return -EBADFD;
+
+ return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
+ (void __user *)args->buffers_ptr, args->commands_offset, 0);
+}
+
+int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_cmdq_create *args = data;
+ struct ivpu_cmdq *cmdq;
+
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
+ if (cmdq)
+ args->cmdq_id = cmdq->id;
+
+ mutex_unlock(&file_priv->lock);
+
+ return cmdq ? 0 : -ENOMEM;
+}
+
+int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_cmdq_destroy *args = data;
+ struct ivpu_cmdq *cmdq;
+ u32 cmdq_id;
+ int ret;
+
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
+ if (!cmdq || cmdq->is_legacy) {
+ ret = -ENOENT;
+ goto err_unlock;
+ }
+
+ cmdq_id = cmdq->id;
+ ivpu_cmdq_destroy(file_priv, cmdq);
+ mutex_unlock(&file_priv->lock);
+ ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&file_priv->lock);
+ return ret;
+}
+
static void
ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
struct vpu_jsm_msg *jsm_msg)
{
struct vpu_ipc_msg_payload_job_done *payload;
- int ret;
if (!jsm_msg) {
ivpu_err(vdev, "IPC message has no JSM payload\n");
@@ -758,9 +941,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
}
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
- ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
- if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
- ivpu_start_job_timeout_detection(vdev);
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+ ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
@@ -773,3 +957,55 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
{
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
}
+
+void ivpu_context_abort_work_fn(struct work_struct *work)
+{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
+ struct ivpu_file_priv *file_priv;
+ struct ivpu_job *job;
+ unsigned long ctx_id;
+ unsigned long id;
+
+ if (drm_WARN_ON(&vdev->drm, pm_runtime_get_if_active(vdev->drm.dev) <= 0))
+ return;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ ivpu_jsm_reset_engine(vdev, 0);
+
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ if (!file_priv->has_mmu_faults || file_priv->aborted)
+ continue;
+
+ mutex_lock(&file_priv->lock);
+ ivpu_context_abort_locked(file_priv);
+ mutex_unlock(&file_priv->lock);
+ }
+ mutex_unlock(&vdev->context_list_lock);
+
+ /*
+ * We will not receive new MMU event interrupts until existing events are discarded
+ * however, we want to discard these events only after aborting the faulty context
+ * to avoid generating new faults from that context
+ */
+ ivpu_mmu_discard_events(vdev);
+
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ goto runtime_put;
+
+ ivpu_jsm_hws_resume_engine(vdev, 0);
+ /*
+ * In hardware scheduling mode NPU already has stopped processing jobs
+ * and won't send us any further notifications, thus we have to free job related resources
+ * and notify userspace
+ */
+ mutex_lock(&vdev->submitted_jobs_lock);
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->aborted)
+ ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
+ mutex_unlock(&vdev->submitted_jobs_lock);
+
+runtime_put:
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 8b19e3f8b4cf..2e301c2eea7b 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -30,8 +30,8 @@ struct ivpu_cmdq {
u32 entry_count;
u32 id;
u32 db_id;
- bool db_registered;
u8 priority;
+ bool is_legacy;
};
/**
@@ -51,6 +51,7 @@ struct ivpu_job {
struct ivpu_file_priv *file_priv;
struct dma_fence *done_fence;
u64 cmd_buf_vpu_addr;
+ u32 cmdq_id;
u32 job_id;
u32 engine_idx;
size_t bo_count;
@@ -58,14 +59,19 @@ struct ivpu_job {
};
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
+void ivpu_context_abort_work_fn(struct work_struct *work);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 30a40be76930..219ab8afefab 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -7,6 +7,7 @@
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
+#include "vpu_jsm_api.h"
const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
{
@@ -407,26 +408,18 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
struct vpu_jsm_msg resp;
+ struct ivpu_hw_info *hw = vdev->hw;
+ struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
+ &req.payload.hws_priority_band_setup;
int ret;
- /* Idle */
- req.payload.hws_priority_band_setup.grace_period[0] = 0;
- req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
- /* Normal */
- req.payload.hws_priority_band_setup.grace_period[1] = 50000;
- req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
- /* Focus */
- req.payload.hws_priority_band_setup.grace_period[2] = 50000;
- req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
- /* Realtime */
- req.payload.hws_priority_band_setup.grace_period[3] = 0;
- req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
-
- req.payload.hws_priority_band_setup.normal_band_percentage = 10;
+ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
+ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
+ setup->grace_period[band] = hw->hws.grace_period[band];
+ setup->process_grace_period[band] = hw->hws.process_grace_period[band];
+ setup->process_quantum[band] = hw->hws.process_quantum[band];
+ }
+ setup->normal_band_percentage = 10;
ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
&resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 26ef52fbb93e..5ea010568faa 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -20,6 +20,12 @@
#define IVPU_MMU_REG_CR0 0x00200020u
#define IVPU_MMU_REG_CR0ACK 0x00200024u
#define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
+#define IVPU_MMU_REG_CR0_ATSCHK_MASK BIT(4)
+#define IVPU_MMU_REG_CR0_CMDQEN_MASK BIT(3)
+#define IVPU_MMU_REG_CR0_EVTQEN_MASK BIT(2)
+#define IVPU_MMU_REG_CR0_PRIQEN_MASK BIT(1)
+#define IVPU_MMU_REG_CR0_SMMUEN_MASK BIT(0)
+
#define IVPU_MMU_REG_CR1 0x00200028u
#define IVPU_MMU_REG_CR2 0x0020002cu
#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
@@ -141,12 +147,6 @@
#define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
#define IVPU_MMU_IRQ_GERROR_EN BIT(0)
-#define IVPU_MMU_CR0_ATSCHK BIT(4)
-#define IVPU_MMU_CR0_CMDQEN BIT(3)
-#define IVPU_MMU_CR0_EVTQEN BIT(2)
-#define IVPU_MMU_CR0_PRIQEN BIT(1)
-#define IVPU_MMU_CR0_SMMUEN BIT(0)
-
#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
@@ -596,7 +596,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
- val = IVPU_MMU_CR0_CMDQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, CMDQEN, 0);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -617,12 +617,12 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
- val |= IVPU_MMU_CR0_EVTQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
- val |= IVPU_MMU_CR0_ATSCHK;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, ATSCHK, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -631,7 +631,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
- val |= IVPU_MMU_CR0_SMMUEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, SMMUEN, val);
return ivpu_mmu_reg_write_cr0(vdev, val);
}
@@ -725,8 +725,8 @@ static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_d
cd[2] = 0;
cd[3] = 0x0000000000007444;
- /* For global context generate memory fault on VPU */
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
+ /* For global and reserved contexts generate memory fault on VPU */
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID || ssid == IVPU_RESERVED_CONTEXT_MMU_SSID)
cd[0] |= IVPU_MMU_CD_0_A;
if (valid)
@@ -870,28 +870,107 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
return evt;
}
+static int ivpu_mmu_evtq_set(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(IVPU_MMU_REG_CR0);
+
+ if (enable)
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ else
+ val = REG_CLR_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ REGV_WR32(IVPU_MMU_REG_CR0, val);
+
+ return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
+}
+
+static int ivpu_mmu_evtq_enable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, true);
+}
+
+static int ivpu_mmu_evtq_disable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, false);
+}
+
+void ivpu_mmu_discard_events(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+
+ mutex_lock(&mmu->lock);
+ /*
+ * Disable event queue (stop MMU from updating the producer)
+ * to allow synchronization of consumer and producer indexes
+ */
+ ivpu_mmu_evtq_disable(vdev);
+
+ vdev->mmu->evtq.cons = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ vdev->mmu->evtq.prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+
+ ivpu_mmu_evtq_enable(vdev);
+
+ drm_WARN_ON_ONCE(&vdev->drm, vdev->mmu->evtq.cons != vdev->mmu->evtq.prod);
+
+ mutex_unlock(&mmu->lock);
+}
+
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry;
+ u64 val;
+
+ if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
+ return -EINVAL;
+
+ mutex_lock(&mmu->lock);
+
+ entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
+
+ val = READ_ONCE(entry[0]);
+ val &= ~IVPU_MMU_CD_0_R;
+ WRITE_ONCE(entry[0], val);
+
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
+
+ ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ ivpu_mmu_cmdq_sync(vdev);
+
+ mutex_unlock(&mmu->lock);
+
+ return 0;
+}
+
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
+ struct ivpu_file_priv *file_priv;
u32 *event;
u32 ssid;
ivpu_dbg(vdev, IRQ, "MMU event queue\n");
- while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
- ivpu_mmu_dump_event(vdev, event);
-
- ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+ while ((event = ivpu_mmu_get_event(vdev))) {
+ ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, *event);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID ||
+ ssid == IVPU_RESERVED_CONTEXT_MMU_SSID) {
+ ivpu_mmu_dump_event(vdev, event);
ivpu_pm_trigger_recovery(vdev, "MMU event");
return;
}
- ivpu_mmu_user_context_mark_invalid(vdev, ssid);
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ file_priv = xa_load(&vdev->context_xa, ssid);
+ if (file_priv) {
+ if (!READ_ONCE(file_priv->has_mmu_faults)) {
+ ivpu_mmu_dump_event(vdev, event);
+ WRITE_ONCE(file_priv->has_mmu_faults, true);
+ }
+ }
}
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->context_abort_work);
}
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index 7afea9cd8731..1ce7529746ad 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -47,5 +47,7 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
+void ivpu_mmu_discard_events(struct ivpu_device *vdev);
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid);
#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 0af614dfb6f9..f0267efa55aa 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -635,16 +635,3 @@ void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
ivpu_mmu_cd_clear(vdev, vdev->rctx.id);
ivpu_mmu_context_fini(vdev, &vdev->rctx);
}
-
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
-{
- struct ivpu_file_priv *file_priv;
-
- xa_lock(&vdev->context_xa);
-
- file_priv = xa_load(&vdev->context_xa, ssid);
- if (file_priv)
- file_priv->has_mmu_faults = true;
-
- xa_unlock(&vdev->context_xa);
-}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index 8042fc067062..f255310968cf 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -37,8 +37,6 @@ void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
-
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
u64 size, struct drm_mm_node *node);
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 87d7411ae059..b5891e91f7ab 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -115,41 +115,57 @@ err_power_down:
return ret;
}
-static void ivpu_pm_recovery_work(struct work_struct *work)
+static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
{
- struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
- struct ivpu_device *vdev = pm->vdev;
- char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
- int ret;
-
- ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
-
- ret = pm_runtime_resume_and_get(vdev->drm.dev);
- if (ret)
- ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
-
- ivpu_jsm_state_dump(vdev);
- ivpu_dev_coredump(vdev);
+ pm_runtime_disable(vdev->drm.dev);
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->reset_pending, 1);
down_write(&vdev->pm->reset_lock);
+}
+
+static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
+{
+ int ret;
- ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ } else {
+ pm_runtime_set_active(vdev->drm.dev);
+ }
up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
- kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ pm_runtime_enable(vdev->drm.dev);
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_jsm_state_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ ivpu_suspend(vdev);
+ }
+
+ ivpu_pm_reset_complete(vdev);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
}
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
@@ -161,16 +177,11 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
return;
}
- if (ivpu_is_fpga(vdev)) {
- ivpu_err(vdev, "Recovery not available on FPGA\n");
- return;
- }
-
/* Trigger recovery if it's not in progress */
if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
ivpu_hw_diagnose_failure(vdev);
ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
- queue_work(system_long_wq, &vdev->pm->recovery_work);
+ queue_work(system_unbound_wq, &vdev->pm->recovery_work);
}
}
@@ -309,7 +320,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
- drm_WARN_ON(&vdev->drm, ret < 0);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ }
return ret;
}
@@ -325,16 +339,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
struct ivpu_device *vdev = pci_get_drvdata(pdev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
- atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->reset_pending, 1);
- pm_runtime_get_sync(vdev->drm.dev);
- down_write(&vdev->pm->reset_lock);
- ivpu_prepare_for_reset(vdev);
- ivpu_hw_reset(vdev);
- ivpu_pm_prepare_cold_boot(vdev);
- ivpu_jobs_abort_all(vdev);
- ivpu_ms_cleanup_all(vdev);
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
+ }
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
@@ -342,18 +353,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- int ret;
ivpu_dbg(vdev, PM, "Post-reset..\n");
- ret = ivpu_resume(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- up_write(&vdev->pm->reset_lock);
- atomic_set(&vdev->pm->reset_pending, 0);
- ivpu_dbg(vdev, PM, "Post-reset done.\n");
- pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ ivpu_pm_reset_complete(vdev);
+
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
}
void ivpu_pm_init(struct ivpu_device *vdev)
@@ -452,8 +457,9 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
return 0;
}
-void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_dct_work);
bool enable;
int ret;
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index b70efe6c36e4..89b264cc0e3e 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -45,6 +45,6 @@ void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
int ivpu_pm_dct_init(struct ivpu_device *vdev);
int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
int ivpu_pm_dct_disable(struct ivpu_device *vdev);
-void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work);
#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 616477fc17fa..97102feaf8dd 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -7,11 +7,14 @@
#include <linux/err.h>
#include "ivpu_drv.h"
+#include "ivpu_gem.h"
#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_sysfs.h"
-/*
+/**
+ * DOC: npu_busy_time_us
+ *
* npu_busy_time_us is the time that the device spent executing jobs.
* The time is counted when and only when there are jobs submitted to firmware.
*
@@ -30,11 +33,12 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
struct ivpu_device *vdev = to_ivpu_device(drm);
ktime_t total, now = 0;
- xa_lock(&vdev->submitted_jobs_xa);
+ mutex_lock(&vdev->submitted_jobs_lock);
+
total = vdev->busy_time;
if (!xa_empty(&vdev->submitted_jobs_xa))
now = ktime_sub(ktime_get(), vdev->busy_start_ts);
- xa_unlock(&vdev->submitted_jobs_xa);
+ mutex_unlock(&vdev->submitted_jobs_lock);
return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
}
@@ -42,6 +46,30 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
static DEVICE_ATTR_RO(npu_busy_time_us);
/**
+ * DOC: npu_memory_utilization
+ *
+ * The npu_memory_utilization is used to report in bytes a current NPU memory utilization.
+ *
+ */
+static ssize_t
+npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ struct ivpu_bo *bo;
+ u64 total_npu_memory = 0;
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
+ total_npu_memory += bo->base.base.size;
+ mutex_unlock(&vdev->bo_list_lock);
+
+ return sysfs_emit(buf, "%lld\n", total_npu_memory);
+}
+
+static DEVICE_ATTR_RO(npu_memory_utilization);
+
+/**
* DOC: sched_mode
*
* The sched_mode is used to report current NPU scheduling mode.
@@ -64,6 +92,7 @@ static DEVICE_ATTR_RO(sched_mode);
static struct attribute *ivpu_dev_attrs[] = {
&dev_attr_npu_busy_time_us.attr,
+ &dev_attr_npu_memory_utilization.attr,
&dev_attr_sched_mode.attr,
NULL,
};
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index 8ab82e78dd94..13a14c6c6168 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -20,6 +20,11 @@ static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
module_param(mhi_timeout_ms, uint, 0600);
MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
+static const char *fw_image_paths[FAMILY_MAX] = {
+ [FAMILY_AIC100] = "qcom/aic100/sbl.bin",
+ [FAMILY_AIC200] = "qcom/aic200/sbl.bin",
+};
+
static const struct mhi_channel_config aic100_channels[] = {
{
.name = "QAIC_LOOPBACK",
@@ -439,6 +444,297 @@ static const struct mhi_channel_config aic100_channels[] = {
},
};
+static const struct mhi_channel_config aic200_channels[] = {
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 0,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 1,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 2,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 3,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 6,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 7,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 10,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 11,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 12,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 13,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 14,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 15,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 16,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 17,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 22,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 23,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 24,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 25,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .wake_capable = false,
+ },
+};
+
static struct mhi_event_config aic100_events[] = {
{
.num_elements = 32,
@@ -454,16 +750,44 @@ static struct mhi_event_config aic100_events[] = {
},
};
-static struct mhi_controller_config aic100_config = {
- .max_channels = 128,
- .timeout_ms = 0, /* controlled by mhi_timeout */
- .buf_len = 0,
- .num_channels = ARRAY_SIZE(aic100_channels),
- .ch_cfg = aic100_channels,
- .num_events = ARRAY_SIZE(aic100_events),
- .event_cfg = aic100_events,
- .use_bounce_buf = false,
- .m2_no_db = false,
+static struct mhi_event_config aic200_events[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 0,
+ .channel = U32_MAX,
+ .priority = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config mhi_cntrl_configs[] = {
+ [FAMILY_AIC100] = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic100_channels),
+ .ch_cfg = aic100_channels,
+ .num_events = ARRAY_SIZE(aic100_events),
+ .event_cfg = aic100_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+ },
+ [FAMILY_AIC200] = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic200_channels),
+ .ch_cfg = aic200_channels,
+ .num_events = ARRAY_SIZE(aic200_events),
+ .event_cfg = aic200_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+ },
};
static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out)
@@ -545,8 +869,9 @@ static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
}
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq, bool shared_msi)
+ int mhi_irq, bool shared_msi, int family)
{
+ struct mhi_controller_config mhi_config = mhi_cntrl_configs[family];
struct mhi_controller *mhi_cntrl;
int ret;
@@ -581,11 +906,18 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi
if (shared_msi) /* MSI shared with data path, no IRQF_NO_SUSPEND */
mhi_cntrl->irq_flags = IRQF_SHARED;
- mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
+ mhi_cntrl->fw_image = fw_image_paths[family];
+
+ if (family == FAMILY_AIC200) {
+ mhi_cntrl->name = "AIC200";
+ mhi_cntrl->seg_len = SZ_512K;
+ } else {
+ mhi_cntrl->name = "AIC100";
+ }
/* use latest configured timeout */
- aic100_config.timeout_ms = mhi_timeout_ms;
- ret = mhi_register_controller(mhi_cntrl, &aic100_config);
+ mhi_config.timeout_ms = mhi_timeout_ms;
+ ret = mhi_register_controller(mhi_cntrl, &mhi_config);
if (ret) {
pci_err(pci_dev, "mhi_register_controller failed %d\n", ret);
return ERR_PTR(ret);
diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h
index 500e7f4af2af..8939f6ae185e 100644
--- a/drivers/accel/qaic/mhi_controller.h
+++ b/drivers/accel/qaic/mhi_controller.h
@@ -8,7 +8,7 @@
#define MHICONTROLLERQAIC_H_
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq, bool shared_msi);
+ int mhi_irq, bool shared_msi, int family);
void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up);
void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl);
void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 02561b6cecc6..0dbb8e32e4b9 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -32,6 +32,12 @@
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
+enum aic_families {
+ FAMILY_AIC100,
+ FAMILY_AIC200,
+ FAMILY_MAX,
+};
+
enum __packed dev_states {
/* Device is offline or will be very soon */
QAIC_OFFLINE,
@@ -113,10 +119,10 @@ struct qaic_device {
struct pci_dev *pdev;
/* Req. ID of request that will be queued next in MHI control device */
u32 next_seq_num;
- /* Base address of bar 0 */
- void __iomem *bar_0;
- /* Base address of bar 2 */
- void __iomem *bar_2;
+ /* Base address of the MHI bar */
+ void __iomem *bar_mhi;
+ /* Base address of the DBCs bar */
+ void __iomem *bar_dbc;
/* Controller structure for MHI devices */
struct mhi_controller *mhi_cntrl;
/* MHI control channel device */
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 81819b9ef8d4..3b415e2c9431 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -34,13 +34,46 @@
MODULE_IMPORT_NS("DMA_BUF");
-#define PCI_DEV_AIC080 0xa080
-#define PCI_DEV_AIC100 0xa100
+#define PCI_DEVICE_ID_QCOM_AIC080 0xa080
+#define PCI_DEVICE_ID_QCOM_AIC100 0xa100
+#define PCI_DEVICE_ID_QCOM_AIC200 0xa110
#define QAIC_NAME "qaic"
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
#define CNTL_MAJOR 5
#define CNTL_MINOR 0
+struct qaic_device_config {
+ /* Indicates the AIC family the device belongs to */
+ int family;
+ /* A bitmask representing the available BARs */
+ int bar_mask;
+ /* An index value used to identify the MHI controller BAR */
+ unsigned int mhi_bar_idx;
+ /* An index value used to identify the DBCs BAR */
+ unsigned int dbc_bar_idx;
+};
+
+static const struct qaic_device_config aic080_config = {
+ .family = FAMILY_AIC100,
+ .bar_mask = BIT(0) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 0,
+ .dbc_bar_idx = 2,
+};
+
+static const struct qaic_device_config aic100_config = {
+ .family = FAMILY_AIC100,
+ .bar_mask = BIT(0) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 0,
+ .dbc_bar_idx = 2,
+};
+
+static const struct qaic_device_config aic200_config = {
+ .family = FAMILY_AIC200,
+ .bar_mask = BIT(0) | BIT(1) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 1,
+ .dbc_bar_idx = 2,
+};
+
bool datapath_polling;
module_param(datapath_polling, bool, 0400);
MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
@@ -352,7 +385,8 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
release_dbc(qdev, i);
}
-static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct qaic_device *create_qdev(struct pci_dev *pdev,
+ const struct qaic_device_config *config)
{
struct device *dev = &pdev->dev;
struct qaic_drm_device *qddev;
@@ -365,12 +399,10 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return NULL;
qdev->dev_state = QAIC_OFFLINE;
- if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) {
- qdev->num_dbc = 16;
- qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
- if (!qdev->dbc)
- return NULL;
- }
+ qdev->num_dbc = 16;
+ qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ if (!qdev->dbc)
+ return NULL;
qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
if (IS_ERR(qddev))
@@ -426,17 +458,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return qdev;
}
-static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
+static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev,
+ const struct qaic_device_config *config)
{
int bars;
int ret;
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
/* make sure the device has the expected BARs */
- if (bars != (BIT(0) | BIT(2) | BIT(4))) {
- pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n",
- __func__, bars);
+ if (bars != config->bar_mask) {
+ pci_dbg(pdev, "%s: expected BARs %#x not found in device. Found %#x\n",
+ __func__, config->bar_mask, bars);
return -EINVAL;
}
@@ -449,13 +482,13 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
return ret;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
- if (IS_ERR(qdev->bar_0))
- return PTR_ERR(qdev->bar_0);
+ qdev->bar_mhi = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->mhi_bar_idx]);
+ if (IS_ERR(qdev->bar_mhi))
+ return PTR_ERR(qdev->bar_mhi);
- qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]);
- if (IS_ERR(qdev->bar_2))
- return PTR_ERR(qdev->bar_2);
+ qdev->bar_dbc = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->dbc_bar_idx]);
+ if (IS_ERR(qdev->bar_dbc))
+ return PTR_ERR(qdev->bar_dbc);
/* Managed release since we use pcim_enable_device above */
pci_set_master(pdev);
@@ -465,14 +498,15 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
{
+ int irq_count = qdev->num_dbc + 1;
int mhi_irq;
int ret;
int i;
/* Managed release since we use pcim_enable_device */
- ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, irq_count, irq_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret == -ENOSPC) {
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0)
return ret;
@@ -485,7 +519,8 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
* interrupted, it shouldn't race with itself.
*/
qdev->single_msi = true;
- pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n");
+ pci_info(pdev, "Allocating %d MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n",
+ irq_count);
} else if (ret < 0) {
return ret;
}
@@ -515,21 +550,22 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct qaic_device_config *config = (struct qaic_device_config *)id->driver_data;
struct qaic_device *qdev;
int mhi_irq;
int ret;
int i;
- qdev = create_qdev(pdev, id);
+ qdev = create_qdev(pdev, config);
if (!qdev)
return -ENOMEM;
- ret = init_pci(qdev, pdev);
+ ret = init_pci(qdev, pdev, config);
if (ret)
return ret;
for (i = 0; i < qdev->num_dbc; ++i)
- qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
+ qdev->dbc[i].dbc_base = qdev->bar_dbc + QAIC_DBC_OFF(i);
mhi_irq = init_msi(qdev, pdev);
if (mhi_irq < 0)
@@ -539,8 +575,8 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
- qdev->single_msi);
+ qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_mhi, mhi_irq,
+ qdev->single_msi, config->family);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
@@ -607,8 +643,9 @@ static struct mhi_driver qaic_mhi_driver = {
};
static const struct pci_device_id qaic_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), },
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
+ { PCI_DEVICE_DATA(QCOM, AIC080, (kernel_ulong_t)&aic080_config), },
+ { PCI_DEVICE_DATA(QCOM, AIC100, (kernel_ulong_t)&aic100_config), },
+ { PCI_DEVICE_DATA(QCOM, AIC200, (kernel_ulong_t)&aic200_config), },
{ }
};
MODULE_DEVICE_TABLE(pci, qaic_ids);
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
index 301f4462d51b..2473c66309d4 100644
--- a/drivers/accel/qaic/qaic_timesync.c
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -201,7 +201,7 @@ static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_devi
goto free_sync_msg;
/* Qtimer register pointer */
- mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET;
+ mqtsdev->qtimer_addr = qdev->bar_mhi + QTIMER_REG_OFFSET;
timer_setup(timer, qaic_timesync_timer, 0);
timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
add_timer(timer);
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index 21d58aed0deb..3ebcc1f7ff58 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -160,7 +160,7 @@ struct sahara_context {
struct work_struct fw_work;
struct work_struct dump_work;
struct mhi_device *mhi_dev;
- const char **image_table;
+ const char * const *image_table;
u32 table_size;
u32 active_image_id;
const struct firmware *firmware;
@@ -177,7 +177,7 @@ struct sahara_context {
bool is_mem_dump_mode;
};
-static const char *aic100_image_table[] = {
+static const char * const aic100_image_table[] = {
[1] = "qcom/aic100/fw1.bin",
[2] = "qcom/aic100/fw2.bin",
[4] = "qcom/aic100/fw4.bin",
@@ -188,6 +188,34 @@ static const char *aic100_image_table[] = {
[10] = "qcom/aic100/fw10.bin",
};
+static const char * const aic200_image_table[] = {
+ [5] = "qcom/aic200/uefi.elf",
+ [12] = "qcom/aic200/aic200-nsp.bin",
+ [23] = "qcom/aic200/aop.mbn",
+ [32] = "qcom/aic200/tz.mbn",
+ [33] = "qcom/aic200/hypvm.mbn",
+ [39] = "qcom/aic200/aic200_abl.elf",
+ [40] = "qcom/aic200/apdp.mbn",
+ [41] = "qcom/aic200/devcfg.mbn",
+ [42] = "qcom/aic200/sec.elf",
+ [43] = "qcom/aic200/aic200-hlos.elf",
+ [49] = "qcom/aic200/shrm.elf",
+ [50] = "qcom/aic200/cpucp.elf",
+ [51] = "qcom/aic200/aop_devcfg.mbn",
+ [57] = "qcom/aic200/cpucp_dtbs.elf",
+ [62] = "qcom/aic200/uefi_dtbs.elf",
+ [63] = "qcom/aic200/xbl_ac_config.mbn",
+ [64] = "qcom/aic200/tz_ac_config.mbn",
+ [65] = "qcom/aic200/hyp_ac_config.mbn",
+ [66] = "qcom/aic200/pdp.elf",
+ [67] = "qcom/aic200/pdp_cdb.elf",
+ [68] = "qcom/aic200/sdi.mbn",
+ [69] = "qcom/aic200/dcd.mbn",
+ [73] = "qcom/aic200/gearvm.mbn",
+ [74] = "qcom/aic200/sti.bin",
+ [75] = "qcom/aic200/pvs.bin",
+};
+
static int sahara_find_image(struct sahara_context *context, u32 image_id)
{
int ret;
@@ -748,8 +776,15 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
context->mhi_dev = mhi_dev;
INIT_WORK(&context->fw_work, sahara_processing);
INIT_WORK(&context->dump_work, sahara_dump_processing);
- context->image_table = aic100_image_table;
- context->table_size = ARRAY_SIZE(aic100_image_table);
+
+ if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
+ context->image_table = aic200_image_table;
+ context->table_size = ARRAY_SIZE(aic200_image_table);
+ } else {
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ }
+
context->active_image_id = SAHARA_IMAGE_ID_NONE;
dev_set_drvdata(&mhi_dev->dev, context);
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 3561553eff8b..70f8290b659d 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -163,7 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
{
void *platform_timer;
struct acpi_table_gtdt *gtdt;
- int cnt = 0;
+ u32 cnt = 0;
gtdt = container_of(table, struct acpi_table_gtdt, header);
acpi_gtdt_desc.gtdt = gtdt;
@@ -188,13 +188,17 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
cnt++;
if (cnt != gtdt->platform_timer_count) {
+ cnt = min(cnt, gtdt->platform_timer_count);
+ pr_err(FW_BUG "limiting Platform Timer count to %d\n", cnt);
+ }
+
+ if (!cnt) {
acpi_gtdt_desc.platform_timer = NULL;
- pr_err(FW_BUG "invalid timer data.\n");
- return -EINVAL;
+ return 0;
}
if (platform_timer_count)
- *platform_timer_count = gtdt->platform_timer_count;
+ *platform_timer_count = cnt;
return 0;
}
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index fc92e43d0fe9..2ad53cc6aae5 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -417,8 +417,14 @@ static int profile_class_registered(struct device *dev, const void *data)
static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
- if (!class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered))
+ struct device *dev;
+
+ dev = class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered);
+ if (!dev)
return 0;
+
+ put_device(dev);
+
return attr->mode;
}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 747f83f7114d..e549914a636c 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -287,9 +287,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
if (!handler || !module)
goto invalid_guid;
- if (!handler->handler_addr ||
- !handler->static_data_buffer_addr ||
- !handler->acpi_param_buffer_addr) {
+ if (!handler->handler_addr) {
buffer->prm_status = PRM_HANDLER_ERROR;
return AE_OK;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 98d93ed58315..436019d96027 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1187,8 +1187,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
break;
}
- if (nval == 0)
- return -EINVAL;
if (obj->type == ACPI_TYPE_BUFFER) {
if (proptype != DEV_PROP_U8)
@@ -1212,9 +1210,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- ret = acpi_copy_property_array_string(
- items, (char **)val,
- min_t(u32, nval, obj->package.count));
+ nval = min_t(u32, nval, obj->package.count);
+ if (nval == 0)
+ return -ENODATA;
+
+ ret = acpi_copy_property_array_string(items, (char **)val, nval);
break;
default:
ret = -EINVAL;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 90aaec923889..b4cd14e7fa76 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -564,6 +564,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
+ },
+ },
+ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 53b2c7719dc5..91d44302eac9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -651,8 +651,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
* If no sub-node was found, keep this for device tree
* compatibility
*/
- hpriv->mask_port_map |= BIT(0);
-
rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
if (rc)
goto err_out;
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 7fb21768ca36..8074a10183dc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- swnode.o
+ swnode.o faux.o
obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 8cf04a557bdb..0042e4774b0c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -137,6 +137,7 @@ int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
int platform_bus_init(void);
+int faux_bus_init(void);
void cpu_dev_init(void);
void container_dev_init(void);
#ifdef CONFIG_AUXILIARY_BUS
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
new file mode 100644
index 000000000000..531e9d789ee0
--- /dev/null
+++ b/drivers/base/faux.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/container_of.h>
+#include <linux/device/faux.h>
+#include "base.h"
+
+/*
+ * Internal wrapper structure so we can hold a pointer to the
+ * faux_device_ops for this device.
+ */
+struct faux_object {
+ struct faux_device faux_dev;
+ const struct faux_device_ops *faux_ops;
+};
+#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
+
+static struct device faux_bus_root = {
+ .init_name = "faux",
+};
+
+static int faux_match(struct device *dev, const struct device_driver *drv)
+{
+ /* Match always succeeds, we only have one driver */
+ return 1;
+}
+
+static int faux_probe(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ int ret = 0;
+
+ if (faux_ops && faux_ops->probe)
+ ret = faux_ops->probe(faux_dev);
+
+ return ret;
+}
+
+static void faux_remove(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+
+ if (faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
+}
+
+static const struct bus_type faux_bus_type = {
+ .name = "faux",
+ .match = faux_match,
+ .probe = faux_probe,
+ .remove = faux_remove,
+};
+
+static struct device_driver faux_driver = {
+ .name = "faux_driver",
+ .bus = &faux_bus_type,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+};
+
+static void faux_device_release(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+
+ kfree(faux_obj);
+}
+
+/**
+ * faux_device_create_with_groups - Create and register with the driver
+ * core a faux device and populate the device with an initial
+ * set of sysfs attributes.
+ * @name: The name of the device we are adding, must be unique for
+ * all faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ * @groups: The set of sysfs attributes that will be created for this
+ * device when it is registered with the driver core.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups)
+{
+ struct faux_object *faux_obj;
+ struct faux_device *faux_dev;
+ struct device *dev;
+ int ret;
+
+ faux_obj = kzalloc(sizeof(*faux_obj), GFP_KERNEL);
+ if (!faux_obj)
+ return NULL;
+
+ /* Save off the callbacks so we can use them in the future */
+ faux_obj->faux_ops = faux_ops;
+
+ /* Initialize the device portion and register it with the driver core */
+ faux_dev = &faux_obj->faux_dev;
+ dev = &faux_dev->dev;
+
+ device_initialize(dev);
+ dev->release = faux_device_release;
+ if (parent)
+ dev->parent = parent;
+ else
+ dev->parent = &faux_bus_root;
+ dev->bus = &faux_bus_type;
+ dev->groups = groups;
+ dev_set_name(dev, "%s", name);
+
+ ret = device_add(dev);
+ if (ret) {
+ pr_err("%s: device_add for faux device '%s' failed with %d\n",
+ __func__, name, ret);
+ put_device(dev);
+ return NULL;
+ }
+
+ return faux_dev;
+}
+EXPORT_SYMBOL_GPL(faux_device_create_with_groups);
+
+/**
+ * faux_device_create - create and register with the driver core a faux device
+ * @name: The name of the device we are adding, must be unique for all
+ * faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops)
+{
+ return faux_device_create_with_groups(name, parent, faux_ops, NULL);
+}
+EXPORT_SYMBOL_GPL(faux_device_create);
+
+/**
+ * faux_device_destroy - destroy a faux device
+ * @faux_dev: faux device to destroy
+ *
+ * Unregisters and cleans up a device that was created with a call to
+ * faux_device_create()
+ */
+void faux_device_destroy(struct faux_device *faux_dev)
+{
+ struct device *dev = &faux_dev->dev;
+
+ if (!faux_dev)
+ return;
+
+ device_del(dev);
+
+ /* The final put_device() will clean up the memory we allocated for this device. */
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(faux_device_destroy);
+
+int __init faux_bus_init(void)
+{
+ int ret;
+
+ ret = device_register(&faux_bus_root);
+ if (ret) {
+ put_device(&faux_bus_root);
+ return ret;
+ }
+
+ ret = bus_register(&faux_bus_type);
+ if (ret)
+ goto error_bus;
+
+ ret = driver_register(&faux_driver);
+ if (ret)
+ goto error_driver;
+
+ return ret;
+
+error_driver:
+ bus_unregister(&faux_bus_type);
+
+error_bus:
+ device_unregister(&faux_bus_root);
+ return ret;
+}
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c4954835128c..9d2b06d65dfc 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -32,6 +32,7 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
+ faux_bus_init();
of_core_init();
platform_bus_init();
auxiliary_bus_init();
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index d497d448e4b2..40e1d8d8a589 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1191,24 +1191,18 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
-static void dpm_superior_set_must_resume(struct device *dev, bool set_active)
+static void dpm_superior_set_must_resume(struct device *dev)
{
struct device_link *link;
int idx;
- if (dev->parent) {
+ if (dev->parent)
dev->parent->power.must_resume = true;
- if (set_active)
- dev->parent->power.set_active = true;
- }
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
link->supplier->power.must_resume = true;
- if (set_active)
- link->supplier->power.set_active = true;
- }
device_links_read_unlock(idx);
}
@@ -1287,9 +1281,12 @@ Skip:
dev->power.must_resume = true;
if (dev->power.must_resume) {
- dev->power.set_active = dev->power.set_active ||
- dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
- dpm_superior_set_must_resume(dev, dev->power.set_active);
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
+ dev->power.set_active = true;
+ if (dev->parent && !dev->parent->power.ignore_children)
+ dev->parent->power.set_active = true;
+ }
+ dpm_superior_set_must_resume(dev);
}
Complete:
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 0bcd81389a29..978613407ea3 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -906,6 +906,7 @@ err_alloc:
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
@@ -981,6 +982,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
if (d->config_buf) {
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 33b3bc99d532..282f81616a78 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -1127,8 +1127,8 @@ static void vdc_queue_drain(struct vdc_port *port)
spin_lock_irq(&port->vio.lock);
port->drain = 0;
- blk_mq_unquiesce_queue(q, memflags);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static void vdc_ldc_reset_timer_work(struct work_struct *work)
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 2b79952f3628..091ffe3e1495 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -1320,6 +1320,10 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
if (opcode == 0xfc01)
btintel_pcie_inject_cmd_complete(hdev, opcode);
}
+ /* Firmware raises alive interrupt on HCI_OP_RESET */
+ if (opcode == HCI_OP_RESET)
+ data->gp0_received = false;
+
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
@@ -1357,7 +1361,6 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
if (opcode == HCI_OP_RESET) {
- data->gp0_received = false;
ret = wait_event_timeout(data->gp0_wait_q,
data->gp0_received,
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index 9dcc7184817d..c8e48f621a8c 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -177,6 +177,36 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
}
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
+static void mhi_fw_load_error_dump(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ int ret, i;
+ u32 val;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base, error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n", error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+}
+
static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
@@ -226,24 +256,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
}
static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
- dma_addr_t dma_addr,
- size_t size)
+ const struct mhi_buf *mhi_buf)
{
- u32 tx_status, val, session_id;
- int i, ret;
- void __iomem *base = mhi_cntrl->bhi;
- rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct {
- char *name;
- u32 offset;
- } error_reg[] = {
- { "ERROR_CODE", BHI_ERRCODE },
- { "ERROR_DBG1", BHI_ERRDBG1 },
- { "ERROR_DBG2", BHI_ERRDBG2 },
- { "ERROR_DBG3", BHI_ERRDBG3 },
- { NULL },
- };
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ u32 tx_status, session_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -255,11 +274,9 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
- upper_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
- lower_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, mhi_buf->len);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -274,18 +291,7 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
if (tx_status == BHI_STATUS_ERROR) {
dev_err(dev, "Image transfer failed\n");
- read_lock_bh(pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- for (i = 0; error_reg[i].name; i++) {
- ret = mhi_read_reg(mhi_cntrl, base,
- error_reg[i].offset, &val);
- if (ret)
- break;
- dev_err(dev, "Reg: %s value: 0x%x\n",
- error_reg[i].name, val);
- }
- }
- read_unlock_bh(pm_lock);
+ mhi_fw_load_error_dump(mhi_cntrl);
goto invalid_pm_state;
}
@@ -296,6 +302,16 @@ invalid_pm_state:
return -EIO;
}
+static void mhi_free_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
@@ -310,6 +326,45 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
kfree(image_info);
}
+static int mhi_alloc_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entry */
+ img_info->mhi_buf = kzalloc(sizeof(*img_info->mhi_buf), GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+
+ mhi_buf->len = alloc_size;
+ mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ &mhi_buf->dma_addr, GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+
+ img_info->bhi_vec = NULL;
+ img_info->entries = 1;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ kfree(mhi_buf);
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
@@ -365,9 +420,9 @@ error_alloc_mhi_buf:
return -ENOMEM;
}
-static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
- const u8 *buf, size_t remainder,
- struct image_info *img_info)
+static void mhi_firmware_copy_bhie(struct mhi_controller *mhi_cntrl,
+ const u8 *buf, size_t remainder,
+ struct image_info *img_info)
{
size_t to_cpy;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
@@ -386,15 +441,61 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
+static enum mhi_fw_load_type mhi_fw_load_type_get(const struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_download) {
+ return MHI_FW_LOAD_FBC;
+ } else {
+ if (mhi_cntrl->seg_len)
+ return MHI_FW_LOAD_BHIE;
+ else
+ return MHI_FW_LOAD_BHI;
+ }
+}
+
+static int mhi_load_image_bhi(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhi_buffer(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ /* Load the firmware into BHI vec table */
+ memcpy(image->mhi_buf->buf, fw_data, size);
+
+ ret = mhi_fw_load_bhi(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhi_buffer(mhi_cntrl, image);
+
+ return ret;
+}
+
+static int mhi_load_image_bhie(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, size, image);
+
+ ret = mhi_fw_load_bhie(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhie_table(mhi_cntrl, image);
+
+ return ret;
+}
+
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_fw_load_type fw_load_type;
enum mhi_pm_state new_state;
const char *fw_name;
const u8 *fw_data;
- void *buf;
- dma_addr_t dma_addr;
size_t size, fw_sz;
int ret;
@@ -453,21 +554,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
fw_sz = firmware->size;
skip_req_fw:
- buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
- GFP_KERNEL);
- if (!buf) {
- release_firmware(firmware);
- goto error_fw_load;
- }
-
- /* Download image using BHI */
- memcpy(buf, fw_data, size);
- ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
- dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
+ fw_load_type = mhi_fw_load_type_get(mhi_cntrl);
+ if (fw_load_type == MHI_FW_LOAD_BHIE)
+ ret = mhi_load_image_bhie(mhi_cntrl, fw_data, size);
+ else
+ ret = mhi_load_image_bhi(mhi_cntrl, fw_data, size);
/* Error or in EDL mode, we're done */
if (ret) {
- dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
+ dev_err(dev, "MHI did not load image over BHI%s, ret: %d\n",
+ fw_load_type == MHI_FW_LOAD_BHIE ? "e" : "",
+ ret);
release_firmware(firmware);
goto error_fw_load;
}
@@ -486,7 +583,7 @@ skip_req_fw:
* If we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
- if (mhi_cntrl->fbc_download) {
+ if (fw_load_type == MHI_FW_LOAD_FBC) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
if (ret) {
release_firmware(firmware);
@@ -494,7 +591,7 @@ skip_req_fw:
}
/* Load the firmware into BHIE vec table */
- mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
}
release_firmware(firmware);
@@ -511,7 +608,7 @@ fw_load_ready_state:
return;
error_ready_state:
- if (mhi_cntrl->fbc_download) {
+ if (fw_load_type == MHI_FW_LOAD_FBC) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index a9b1f8beee7b..13e7a55f54ff 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -1144,7 +1144,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
}
mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
- if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
+ if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size || mhi_cntrl->seg_len) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
&bhie_off);
if (ret) {
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 3134f111be35..ce566f7d2e92 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -29,6 +29,13 @@ struct bhi_vec_entry {
u64 size;
};
+enum mhi_fw_load_type {
+ MHI_FW_LOAD_BHI, /* BHI only in PBL */
+ MHI_FW_LOAD_BHIE, /* BHIe only in PBL */
+ MHI_FW_LOAD_FBC, /* BHI in PBL followed by BHIe in SBL */
+ MHI_FW_LOAD_MAX,
+};
+
enum mhi_ch_state_type {
MHI_CH_STATE_TYPE_RESET,
MHI_CH_STATE_TYPE_STOP,
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 6276551d7968..1e57ebfb7622 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -657,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
id = moxtet->modules[pos->idx];
- seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index a3fe98cd3838..82815428f8f9 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu)
pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+ enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int jcore_pit_local_teardown(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+
+ pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
+
+ disable_percpu_irq(pit->ced.irq);
return 0;
}
@@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node)
return -ENOMEM;
}
+ irq_set_percpu_devid(pit_irq);
err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
"jcore_pit", jcore_pit_percpu);
if (err) {
@@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node)
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
"clockevents/jcore:starting",
- jcore_pit_local_init, NULL);
+ jcore_pit_local_init, jcore_pit_local_teardown);
return 0;
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0ee5c691fb36..9e46960f6a86 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -17,7 +17,8 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
config ARM_AIROHA_SOC_CPUFREQ
tristate "Airoha EN7581 SoC CPUFreq support"
- depends on (ARCH_AIROHA && OF) || COMPILE_TEST
+ depends on ARCH_AIROHA || COMPILE_TEST
+ depends on OF
select PM_OPP
default ARCH_AIROHA
help
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index dd9b8d6993d6..313550fa62d4 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -699,7 +699,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (min_perf < lowest_nonlinear_perf)
min_perf = lowest_nonlinear_perf;
- max_perf = cap_perf;
+ max_perf = cpudata->max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
@@ -747,7 +747,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state);
- policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy);
return ret;
@@ -822,25 +821,28 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
int ret;
bool highest_perf_changed = false;
+ if (!amd_pstate_prefcore)
+ return;
+
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
cpudata = policy->driver_data;
- if (!amd_pstate_prefcore)
- return;
-
guard(mutex)(&amd_pstate_driver_lock);
ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret)
- goto free_cpufreq_put;
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return;
+ }
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -850,8 +852,6 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
-
-free_cpufreq_put:
cpufreq_cpu_put(policy);
if (!highest_perf_changed)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e0048856ecee..30ffbddc7ece 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1571,7 +1571,8 @@ static int cpufreq_online(unsigned int cpu)
policy->cdev = of_cpufreq_cooling_register(policy);
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (policy->boost_enabled != cpufreq_boost_enabled()) {
+ if (cpufreq_driver->set_boost &&
+ policy->boost_enabled != cpufreq_boost_enabled()) {
policy->boost_enabled = cpufreq_boost_enabled();
ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
if (ret) {
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index 7eb3e4668286..3467f6db4f50 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/ccp.h>
+#include "sev-dev.h"
#include "ccp-dev.h"
#include "sp-dev.h"
@@ -253,8 +254,12 @@ unlock:
static int __init sp_mod_init(void)
{
#ifdef CONFIG_X86
+ static bool initialized;
int ret;
+ if (initialized)
+ return 0;
+
ret = sp_pci_init();
if (ret)
return ret;
@@ -263,6 +268,8 @@ static int __init sp_mod_init(void)
psp_pci_init();
#endif
+ initialized = true;
+
return 0;
#endif
@@ -279,6 +286,13 @@ static int __init sp_mod_init(void)
return -ENODEV;
}
+#if IS_BUILTIN(CONFIG_KVM_AMD) && IS_ENABLED(CONFIG_KVM_AMD_SEV)
+int __init sev_module_init(void)
+{
+ return sp_mod_init();
+}
+#endif
+
static void __exit sp_mod_exit(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 6345062731f1..2a059ac0ed27 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -84,8 +84,8 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
struct dma_fence_unwrap *iter)
{
+ struct dma_fence *tmp, *unsignaled = NULL, **array;
struct dma_fence_array *result;
- struct dma_fence *tmp, **array;
ktime_t timestamp;
int i, j, count;
@@ -94,6 +94,8 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
for (i = 0; i < num_fences; ++i) {
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
if (!dma_fence_is_signaled(tmp)) {
+ dma_fence_put(unsignaled);
+ unsignaled = dma_fence_get(tmp);
++count;
} else {
ktime_t t = dma_fence_timestamp(tmp);
@@ -107,9 +109,16 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
/*
* If we couldn't find a pending fence just return a private signaled
* fence with the timestamp of the last signaled one.
+ *
+ * Or if there was a single unsignaled fence left we can return it
+ * directly and early since that is a major path on many workloads.
*/
if (count == 0)
return dma_fence_allocate_private_stub(timestamp);
+ else if (count == 1)
+ return unsignaled;
+
+ dma_fence_put(unsignaled);
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
if (!array)
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
index f0cee984b6c7..a3be888ae2e8 100644
--- a/drivers/dma-buf/st-dma-fence-unwrap.c
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -28,7 +28,7 @@ static const struct dma_fence_ops mock_ops = {
.get_timeline_name = mock_name,
};
-static struct dma_fence *mock_fence(void)
+static struct dma_fence *__mock_fence(u64 context, u64 seqno)
{
struct mock_fence *f;
@@ -37,12 +37,16 @@ static struct dma_fence *mock_fence(void)
return NULL;
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &mock_ops, &f->lock,
- dma_fence_context_alloc(1), 1);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, context, seqno);
return &f->base;
}
+static struct dma_fence *mock_fence(void)
+{
+ return __mock_fence(dma_fence_context_alloc(1), 1);
+}
+
static struct dma_fence *mock_array(unsigned int num_fences, ...)
{
struct dma_fence_array *array;
@@ -304,6 +308,177 @@ error_put_f1:
return err;
}
+static int unwrap_merge_duplicate(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = dma_fence_unwrap_merge(f1, f1);
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f2) {
+ if (fence == f1) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_seqno(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+ u64 ctx[2];
+
+ ctx[0] = dma_fence_context_alloc(1);
+ ctx[1] = dma_fence_context_alloc(1);
+
+ f1 = __mock_fence(ctx[1], 1);
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = __mock_fence(ctx[1], 2);
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = __mock_fence(ctx[0], 1);
+ if (!f3) {
+ err = -ENOMEM;
+ goto error_put_f2;
+ }
+
+ dma_fence_enable_sw_signaling(f3);
+
+ f4 = dma_fence_unwrap_merge(f1, f2, f3);
+ if (!f4) {
+ err = -ENOMEM;
+ goto error_put_f3;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f4) {
+ if (fence == f3 && f2) {
+ dma_fence_put(f3);
+ f3 = NULL;
+ } else if (fence == f2 && !f3) {
+ dma_fence_put(f2);
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f2 || f3) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_order(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *a1, *a2, *c1, *c2;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ a1 = mock_array(2, f1, f2);
+ if (!a1)
+ return -ENOMEM;
+
+ c1 = mock_chain(NULL, dma_fence_get(f1));
+ if (!c1)
+ goto error_put_a1;
+
+ c2 = mock_chain(c1, dma_fence_get(f2));
+ if (!c2)
+ goto error_put_a1;
+
+ /*
+ * The fences in the chain are the same as in a1 but in oposite order,
+ * the dma_fence_merge() function should be able to handle that.
+ */
+ a2 = dma_fence_unwrap_merge(a1, c2);
+
+ dma_fence_unwrap_for_each(fence, &iter, a2) {
+ if (fence == f1) {
+ f1 = NULL;
+ if (!f2)
+ pr_err("Unexpected order!\n");
+ } else if (fence == f2) {
+ f2 = NULL;
+ if (f1)
+ pr_err("Unexpected order!\n");
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(a2);
+ return err;
+
+error_put_a1:
+ dma_fence_put(a1);
+ return -ENOMEM;
+}
+
static int unwrap_merge_complex(void *arg)
{
struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5;
@@ -327,7 +502,7 @@ static int unwrap_merge_complex(void *arg)
goto error_put_f2;
/* The resulting array has the fences in reverse */
- f4 = dma_fence_unwrap_merge(f2, f1);
+ f4 = mock_array(2, dma_fence_get(f2), dma_fence_get(f1));
if (!f4)
goto error_put_f3;
@@ -367,6 +542,87 @@ error_put_f1:
return err;
}
+static int unwrap_merge_complex_seqno(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5, *f6, *f7;
+ struct dma_fence_unwrap iter;
+ int err = -ENOMEM;
+ u64 ctx[2];
+
+ ctx[0] = dma_fence_context_alloc(1);
+ ctx[1] = dma_fence_context_alloc(1);
+
+ f1 = __mock_fence(ctx[0], 2);
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = __mock_fence(ctx[1], 1);
+ if (!f2)
+ goto error_put_f1;
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = __mock_fence(ctx[0], 1);
+ if (!f3)
+ goto error_put_f2;
+
+ dma_fence_enable_sw_signaling(f3);
+
+ f4 = __mock_fence(ctx[1], 2);
+ if (!f4)
+ goto error_put_f3;
+
+ dma_fence_enable_sw_signaling(f4);
+
+ f5 = mock_array(2, dma_fence_get(f1), dma_fence_get(f2));
+ if (!f5)
+ goto error_put_f4;
+
+ f6 = mock_array(2, dma_fence_get(f3), dma_fence_get(f4));
+ if (!f6)
+ goto error_put_f5;
+
+ f7 = dma_fence_unwrap_merge(f5, f6);
+ if (!f7)
+ goto error_put_f6;
+
+ err = 0;
+ dma_fence_unwrap_for_each(fence, &iter, f7) {
+ if (fence == f1 && f4) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else if (fence == f4 && !f1) {
+ dma_fence_put(f4);
+ f4 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f4) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f7);
+error_put_f6:
+ dma_fence_put(f6);
+error_put_f5:
+ dma_fence_put(f5);
+error_put_f4:
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
int dma_fence_unwrap(void)
{
static const struct subtest tests[] = {
@@ -375,7 +631,11 @@ int dma_fence_unwrap(void)
SUBTEST(unwrap_chain),
SUBTEST(unwrap_chain_array),
SUBTEST(unwrap_merge),
+ SUBTEST(unwrap_merge_duplicate),
+ SUBTEST(unwrap_merge_seqno),
+ SUBTEST(unwrap_merge_order),
SUBTEST(unwrap_merge_complex),
+ SUBTEST(unwrap_merge_complex_seqno),
};
return subtests(tests, NULL);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 6896da8ac7ef..5c6a5b358987 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -887,7 +887,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
struct resource *res_page, *res_base;
- int ret, i, page_no;
+ int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
if (!cdata) {
@@ -914,9 +914,20 @@ static int tegra_adma_probe(struct platform_device *pdev)
res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global");
if (res_base) {
- page_no = (res_page->start - res_base->start) / cdata->ch_base_offset;
- if (page_no <= 0)
+ resource_size_t page_offset, page_no;
+ unsigned int ch_base_offset;
+
+ if (res_page->start < res_base->start)
+ return -EINVAL;
+ page_offset = res_page->start - res_base->start;
+ ch_base_offset = cdata->ch_base_offset;
+ if (!ch_base_offset)
return -EINVAL;
+
+ page_no = div_u64(page_offset, ch_base_offset);
+ if (!page_no || page_no > INT_MAX)
+ return -EINVAL;
+
tdma->ch_page_no = page_no - 1;
tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
if (IS_ERR(tdma->base_addr))
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 04c42c83a2ba..f3da9385ca0d 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
* Configure interrupt enable registers such that Tag, Data RAM related
* interrupts are propagated to interrupt controller for servicing
*/
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
TRP0_INTERRUPT_ENABLE,
TRP0_INTERRUPT_ENABLE);
if (ret)
@@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
DRP0_INTERRUPT_ENABLE,
DRP0_INTERRUPT_ENABLE);
if (ret)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 71d8b26c4103..9f35f69e0f9e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -106,7 +106,7 @@ config ISCSI_IBFT
select ISCSI_BOOT_SYSFS
select ISCSI_IBFT_FIND if X86
depends on ACPI && SCSI && SCSI_LOWLEVEL
- default n
+ default n
help
This option enables support for detection and exposing of iSCSI
Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index 83b69fc4fba5..a8915d3b4df5 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -254,8 +254,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
if (num > max_num)
return -EINVAL;
- ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in),
- 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET,
+ sizeof(*in) + num * sizeof(__le32), 0, &t);
if (ret)
return ret;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8296bf985d1d..7309394b8fc9 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -934,13 +934,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
- EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
+ EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
+ EFI_MEMORY_RUNTIME))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+ attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
attr & EFI_MEMORY_SP ? "SP" : "",
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index e5872e38d9a4..5a732018be36 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
+ if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE)
+ return 0;
+
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
return 0;
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index 99b45d1cd624..d4264bfb6dc1 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE)
+ continue;
+
if (efi_soft_reserve_enabled() &&
(desc->attribute & EFI_MEMORY_SP))
continue;
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 907cd149c40a..c964f4924359 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -25,6 +25,7 @@ config IMX_SCU
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
default y if ARCH_MXC
help
The System Controller Management Interface firmware (SCMI FW) is
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6e9788324fea..371f24569b3b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
str += sprintf_ipaddr(str, nic->ip_addr);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ if (nic->subnet_mask_prefix > 32)
+ val = cpu_to_be32(~0);
+ else
+ val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index add5ad29a673..98b4d1633b25 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -338,6 +338,7 @@ config GPIO_GRANITERAPIDS
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
+ depends on OF || COMPILE_TEST
select GPIO_GENERIC
select IRQ_DOMAIN
help
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 5321ef98f442..64908f1a5e7f 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -69,6 +69,22 @@ struct bcm_kona_gpio {
struct bcm_kona_gpio_bank {
int id;
int irq;
+ /*
+ * Used to keep track of lock/unlock operations for each GPIO in the
+ * bank.
+ *
+ * All GPIOs are locked by default (see bcm_kona_gpio_reset), and the
+ * unlock count for all GPIOs is 0 by default. Each unlock increments
+ * the counter, and each lock decrements the counter.
+ *
+ * The lock function only locks the GPIO once its unlock counter is
+ * down to 0. This is necessary because the GPIO is unlocked in two
+ * places in this driver: once for requested GPIOs, and once for
+ * requested IRQs. Since it is possible for a GPIO to be requested
+ * as both a GPIO and an IRQ, we need to ensure that we don't lock it
+ * too early.
+ */
+ u8 gpio_unlock_count[GPIO_PER_BANK];
/* Used in the interrupt handler */
struct bcm_kona_gpio *kona_gpio;
};
@@ -86,14 +102,24 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
u32 val;
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ if (bank->gpio_unlock_count[bit] == 0) {
+ dev_err(kona_gpio->gpio_chip.parent,
+ "Unbalanced locks for GPIO %u\n", gpio);
+ return;
+ }
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
- val |= BIT(gpio);
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ if (--bank->gpio_unlock_count[bit] == 0) {
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val |= BIT(bit);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ }
}
static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
@@ -102,14 +128,20 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
u32 val;
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ if (bank->gpio_unlock_count[bit] == 0) {
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
- val &= ~BIT(gpio);
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val &= ~BIT(bit);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ }
+
+ ++bank->gpio_unlock_count[bit];
}
static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
@@ -360,6 +392,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
+
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
@@ -382,6 +415,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
+
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
@@ -477,15 +511,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
- return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
+ /*
+ * We need to unlock the GPIO before any other operations are performed
+ * on the relevant GPIO configuration registers
+ */
+ bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
+
+ return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio);
}
static void bcm_kona_gpio_irq_relres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
+
+ /* Once we no longer use it, lock the GPIO again */
+ bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
- gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
+ gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio);
}
static struct irq_chip bcm_gpio_irq_chip = {
@@ -614,7 +659,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
bank->irq = platform_get_irq(pdev, i);
bank->kona_gpio = kona_gpio;
if (bank->irq < 0) {
- dev_err(dev, "Couldn't get IRQ for bank %d", i);
+ dev_err(dev, "Couldn't get IRQ for bank %d\n", i);
ret = -ENOENT;
goto err_irq_domain;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index be4c9981ebc4..d63c1030e6ac 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -841,25 +841,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(trigger, MAX_LINE);
int ret;
- if (chip->driver_data & PCA_PCAL) {
- /* Read the current interrupt status from the device */
- ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
- if (ret)
- return false;
-
- /* Check latched inputs and clear interrupt status */
- ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
- if (ret)
- return false;
-
- /* Apply filter for rising/falling edge selection */
- bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
-
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
-
- return !bitmap_empty(pending, gc->ngpio);
- }
-
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index a086087ada17..b6c230fab840 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -1028,20 +1028,23 @@ gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
struct configfs_subsystem *subsys = dev->group.cg_subsys;
struct gpio_sim_bank *bank;
struct gpio_sim_line *line;
+ struct config_item *item;
/*
- * The device only needs to depend on leaf line entries. This is
+ * The device only needs to depend on leaf entries. This is
* sufficient to lock up all the configfs entries that the
* instantiated, alive device depends on.
*/
list_for_each_entry(bank, &dev->bank_list, siblings) {
list_for_each_entry(line, &bank->line_list, siblings) {
+ item = line->hog ? &line->hog->item
+ : &line->group.cg_item;
+
if (lock)
- WARN_ON(configfs_depend_item_unlocked(
- subsys, &line->group.cg_item));
+ WARN_ON(configfs_depend_item_unlocked(subsys,
+ item));
else
- configfs_undepend_item_unlocked(
- &line->group.cg_item);
+ configfs_undepend_item_unlocked(item);
}
}
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 75a3633ceddb..222279a9d82b 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -191,7 +191,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
[REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
[REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
};
- int i, j;
+ int ret, i, j;
/*
* STMPE1600: to be able to get IRQ from pins,
@@ -199,8 +199,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
* GPSR or GPCR registers
*/
if (stmpe->partnum == STMPE1600) {
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+ if (ret < 0) {
+ dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret);
+ goto err;
+ }
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ if (ret < 0) {
+ dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret);
+ goto err;
+ }
}
for (i = 0; i < CACHE_NR_REGS; i++) {
@@ -222,6 +230,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
}
}
+err:
mutex_unlock(&stmpe_gpio->irq_lock);
}
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index c4f34a347cb6..c36a9dbccd4d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -36,6 +36,7 @@ struct vf610_gpio_port {
struct clk *clk_port;
struct clk *clk_gpio;
int irq;
+ spinlock_t lock; /* protect gpio direction registers */
};
#define GPIO_PDOR 0x00
@@ -124,6 +125,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
u32 val;
if (port->sdata->have_paddr) {
+ guard(spinlock_irqsave)(&port->lock);
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val &= ~mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
@@ -142,6 +144,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio
vf610_gpio_set(chip, gpio, value);
if (port->sdata->have_paddr) {
+ guard(spinlock_irqsave)(&port->lock);
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
@@ -297,6 +300,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
port->sdata = device_get_match_data(dev);
+ spin_lock_init(&port->lock);
dual_base = port->sdata->have_dual_base;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 1f9fe50bba00..f7746c57ba76 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1689,6 +1689,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "PNP0C50:00@8",
},
},
+ {
+ /*
+ * Spurious wakeups from GPIO 11
+ * Found in BIOS 1.04
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@11",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 679ed764cb14..fc19df5a64c2 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -904,13 +904,13 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
}
if (gc->ngpio == 0) {
- chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
+ dev_err(dev, "tried to insert a GPIO chip with zero lines\n");
return -EINVAL;
}
if (gc->ngpio > FASTPATH_NGPIO)
- chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
- gc->ngpio, FASTPATH_NGPIO);
+ dev_warn(dev, "line cnt %u is greater than fast path cnt %u\n",
+ gc->ngpio, FASTPATH_NGPIO);
return 0;
}
@@ -1057,8 +1057,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
desc->gdev = gdev;
if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
- assign_bit(FLAG_IS_OUT,
- &desc->flags, !gc->get_direction(gc, desc_index));
+ ret = gc->get_direction(gc, desc_index);
+ if (ret < 0)
+ /*
+ * FIXME: Bail-out here once all GPIO drivers
+ * are updated to not return errors in
+ * situations that can be considered normal
+ * operation.
+ */
+ dev_warn(&gdev->dev,
+ "%s: get_direction failed: %d\n",
+ __func__, ret);
+
+ assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
} else {
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
@@ -2728,13 +2739,18 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
if (guard.gc->direction_input) {
ret = guard.gc->direction_input(guard.gc,
gpio_chip_hwgpio(desc));
- } else if (guard.gc->get_direction &&
- (guard.gc->get_direction(guard.gc,
- gpio_chip_hwgpio(desc)) != 1)) {
- gpiod_warn(desc,
- "%s: missing direction_input() operation and line is output\n",
- __func__);
- return -EIO;
+ } else if (guard.gc->get_direction) {
+ ret = guard.gc->get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
+ if (ret < 0)
+ return ret;
+
+ if (ret != GPIO_LINE_DIRECTION_IN) {
+ gpiod_warn(desc,
+ "%s: missing direction_input() operation and line is output\n",
+ __func__);
+ return -EIO;
+ }
}
if (ret == 0) {
clear_bit(FLAG_IS_OUT, &desc->flags);
@@ -2771,12 +2787,18 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
gpio_chip_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
- if (guard.gc->get_direction &&
- guard.gc->get_direction(guard.gc, gpio_chip_hwgpio(desc))) {
- gpiod_warn(desc,
- "%s: missing direction_output() operation\n",
- __func__);
- return -EIO;
+ if (guard.gc->get_direction) {
+ ret = guard.gc->get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
+ if (ret < 0)
+ return ret;
+
+ if (ret != GPIO_LINE_DIRECTION_OUT) {
+ gpiod_warn(desc,
+ "%s: missing direction_output() operation\n",
+ __func__);
+ return -EIO;
+ }
}
/*
* If we can't actively set the direction, we are some
@@ -3129,6 +3151,8 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
if (gc->get_multiple)
return gc->get_multiple(gc, mask, bits);
if (gc->get) {
@@ -3159,6 +3183,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
+ struct gpio_chip *gc;
int ret, i = 0;
/*
@@ -3170,10 +3195,15 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
array_size <= array_info->size &&
(void *)array_info == desc_array + array_info->size) {
if (!can_sleep)
- WARN_ON(array_info->chip->can_sleep);
+ WARN_ON(array_info->gdev->can_sleep);
+
+ guard(srcu)(&array_info->gdev->srcu);
+ gc = srcu_dereference(array_info->gdev->chip,
+ &array_info->gdev->srcu);
+ if (!gc)
+ return -ENODEV;
- ret = gpio_chip_get_multiple(array_info->chip,
- array_info->get_mask,
+ ret = gpio_chip_get_multiple(gc, array_info->get_mask,
value_bitmap);
if (ret)
return ret;
@@ -3454,6 +3484,8 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
static void gpio_chip_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
if (gc->set_multiple) {
gc->set_multiple(gc, mask, bits);
} else {
@@ -3471,6 +3503,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
+ struct gpio_chip *gc;
int i = 0;
/*
@@ -3482,14 +3515,19 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
array_size <= array_info->size &&
(void *)array_info == desc_array + array_info->size) {
if (!can_sleep)
- WARN_ON(array_info->chip->can_sleep);
+ WARN_ON(array_info->gdev->can_sleep);
+
+ guard(srcu)(&array_info->gdev->srcu);
+ gc = srcu_dereference(array_info->gdev->chip,
+ &array_info->gdev->srcu);
+ if (!gc)
+ return -ENODEV;
if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
bitmap_xor(value_bitmap, value_bitmap,
array_info->invert_mask, array_size);
- gpio_chip_set_multiple(array_info->chip, array_info->set_mask,
- value_bitmap);
+ gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap);
i = find_first_zero_bit(array_info->set_mask, array_size);
if (i == array_size)
@@ -4751,9 +4789,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
{
struct gpio_desc *desc;
struct gpio_descs *descs;
+ struct gpio_device *gdev;
struct gpio_array *array_info = NULL;
- struct gpio_chip *gc;
int count, bitmap_size;
+ unsigned long dflags;
size_t descs_size;
count = gpiod_count(dev, con_id);
@@ -4774,7 +4813,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
descs->desc[descs->ndescs] = desc;
- gc = gpiod_to_chip(desc);
+ gdev = gpiod_to_gpio_device(desc);
/*
* If pin hardware number of array member 0 is also 0, select
* its chip as a candidate for fast bitmap processing path.
@@ -4782,8 +4821,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
struct gpio_descs *array;
- bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
- gc->ngpio : count);
+ bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ?
+ gdev->ngpio : count);
array = krealloc(descs, descs_size +
struct_size(array_info, invert_mask, 3 * bitmap_size),
@@ -4803,7 +4842,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->desc = descs->desc;
array_info->size = count;
- array_info->chip = gc;
+ array_info->gdev = gdev;
bitmap_set(array_info->get_mask, descs->ndescs,
count - descs->ndescs);
bitmap_set(array_info->set_mask, descs->ndescs,
@@ -4816,7 +4855,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
continue;
/* Unmark array members which don't belong to the 'fast' chip */
- if (array_info->chip != gc) {
+ if (array_info->gdev != gdev) {
__clear_bit(descs->ndescs, array_info->get_mask);
__clear_bit(descs->ndescs, array_info->set_mask);
}
@@ -4839,9 +4878,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->set_mask);
}
} else {
+ dflags = READ_ONCE(desc->flags);
/* Exclude open drain or open source from fast output */
- if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
- gpiochip_line_is_open_source(gc, descs->ndescs))
+ if (test_bit(FLAG_OPEN_DRAIN, &dflags) ||
+ test_bit(FLAG_OPEN_SOURCE, &dflags))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -4853,7 +4893,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (array_info)
dev_dbg(dev,
"GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n",
- array_info->chip->label, array_info->size,
+ array_info->gdev->label, array_info->size,
*array_info->get_mask, *array_info->set_mask,
*array_info->invert_mask);
return descs;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 83690f72f7e5..147156ec502b 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -114,7 +114,7 @@ extern const char *const gpio_suffixes[];
*
* @desc: Array of pointers to the GPIO descriptors
* @size: Number of elements in desc
- * @chip: Parent GPIO chip
+ * @gdev: Parent GPIO device
* @get_mask: Get mask used in fastpath
* @set_mask: Set mask used in fastpath
* @invert_mask: Invert mask used in fastpath
@@ -126,7 +126,7 @@ extern const char *const gpio_suffixes[];
struct gpio_array {
struct gpio_desc **desc;
unsigned int size;
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long *get_mask;
unsigned long *set_mask;
unsigned long invert_mask[];
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index fbef3f471bd0..d9986fd52194 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -326,8 +326,6 @@ config DRM_SCHED
tristate
depends on DRM
-source "drivers/gpu/drm/i2c/Kconfig"
-
source "drivers/gpu/drm/arm/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
@@ -494,6 +492,17 @@ config DRM_WERROR
If in doubt, say N.
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 19fb370fbc56..50604b49d1ac 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -135,7 +135,6 @@ drm_kms_helper-y := \
drm_atomic_state_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
- drm_encoder_slave.o \
drm_flip_work.o \
drm_format_helper.o \
drm_gem_atomic_helper.o \
@@ -198,7 +197,6 @@ obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_LOGICVC) += logicvc/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
-obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
@@ -223,3 +221,21 @@ obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
obj-$(CONFIG_DRM_POWERVR) += imagination/
+
+# Ensure drm headers are self-contained and pass kernel-doc
+hdrtest-files := \
+ $(shell cd $(src) && find . -maxdepth 1 -name 'drm_*.h') \
+ $(shell cd $(src) && find display lib -name '*.h')
+
+always-$(CONFIG_DRM_HEADER_TEST) += \
+ $(patsubst %.h,%.hdrtest, $(hdrtest-files))
+
+# Include the header twice to detect missing include guard.
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = \
+ $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
+ $(srctree)/scripts/kernel-doc -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 344e0a9ee08a..5e375e9c4f5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -674,7 +674,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
@@ -839,7 +839,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -1196,7 +1196,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -1464,7 +1464,7 @@ out:
}
static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c8ceea01a221..17e5967bfa60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2859,6 +2859,12 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
{
+ struct drm_sched_init_args args = {
+ .ops = &amdgpu_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .timeout_wq = adev->reset_domain->wq,
+ .dev = adev->dev,
+ };
long timeout;
int r, i;
@@ -2884,12 +2890,12 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
break;
}
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- ring->num_hw_submission, 0,
- timeout, adev->reset_domain->wq,
- ring->sched_score, ring->name,
- adev->dev);
+ args.timeout = timeout;
+ args.credit_limit = ring->num_hw_submission;
+ args.score = ring->sched_score;
+ args.name = ring->name;
+
+ r = drm_sched_init(&ring->sched, &args);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
@@ -6156,6 +6162,10 @@ end_reset:
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
atomic_set(&adev->reset_domain->reset_res, r);
+
+ if (!r)
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 622634c08c7b..521b9faab180 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -430,7 +430,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
}
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
index f59d85eaddf0..3e24acf8133f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
@@ -32,7 +32,7 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode);
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector);
void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8672c0c3c5db..16e4eb474eec 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7532,10 +7532,11 @@ create_validate_stream_for_sink(struct drm_connector *connector,
}
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int result = MODE_ERROR;
struct dc_sink *dc_sink;
+ struct drm_display_mode *test_mode;
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
/* we always have an amdgpu_dm_connector here since we got
@@ -7563,11 +7564,16 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- drm_mode_set_crtcinfo(mode, 0);
+ test_mode = drm_mode_duplicate(connector->dev, mode);
+ if (!test_mode)
+ goto fail;
+
+ drm_mode_set_crtcinfo(test_mode, 0);
- stream = create_validate_stream_for_sink(connector, mode,
+ stream = create_validate_stream_for_sink(connector, test_mode,
to_dm_connector_state(connector->state),
NULL);
+ drm_mode_destroy(connector->dev, test_mode);
if (stream) {
dc_stream_release(stream);
result = MODE_OK;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index f3bc00e587ad..acead14ab45d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -956,7 +956,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
int link_index);
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index dcf2b98566ea..fcb0e900a38a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1258,21 +1258,24 @@ static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
}
static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_crtc_state *new_crtc_state;
struct drm_plane_state *new_plane_state;
struct dm_crtc_state *dm_new_crtc_state;
- /* Only support async updates on cursor planes. */
- if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ if (flip) {
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -EINVAL;
+ } else if (plane->type != DRM_PLANE_TYPE_CURSOR) {
return -EINVAL;
+ }
new_plane_state = drm_atomic_get_new_plane_state(state, plane);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
/* Reject overlay cursors for now*/
- if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+ if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index c901ac00c0c3..ed3ed617c688 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -9,6 +9,7 @@ config DRM_HDLCD
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
+ select DRM_BRIDGE # for TDA998x
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index ebccb74306a7..875cdbff18c9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -88,7 +88,7 @@ komeda_wb_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
komeda_wb_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
&n_formats);
+ if (!formats) {
+ kfree(kwb_conn);
+ return -ENOMEM;
+ }
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 2577f0cef8fc..600af5ad81b1 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -43,7 +43,7 @@ static int malidp_mw_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
malidp_mw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 47da848fa3fc..8d09ba5d5889 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -4,6 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := \
+ ast_cursor.o \
ast_ddc.o \
ast_dp501.o \
ast_dp.o \
@@ -13,6 +14,7 @@ ast-y := \
ast_mode.o \
ast_post.o \
ast_sil164.o \
+ ast_vbios.o \
ast_vga.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
new file mode 100644
index 000000000000..139ab00dee8f
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+#include <linux/bits.h>
+#include <linux/sizes.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+
+/*
+ * Hardware cursor
+ */
+
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
+{
+ u32 csum = 0;
+ unsigned int one_pixel_copy = width & BIT(0);
+ unsigned int two_pixel_copy = width - one_pixel_copy;
+ unsigned int trailing_bytes = (AST_MAX_HWC_WIDTH - width) * sizeof(u16);
+ unsigned int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < two_pixel_copy; x += 2) {
+ const u32 *src32 = (const u32 *)src;
+
+ csum += *src32;
+ src += SZ_4;
+ }
+ if (one_pixel_copy) {
+ const u16 *src16 = (const u16 *)src;
+
+ csum += *src16;
+ src += SZ_2;
+ }
+ src += trailing_bytes;
+ }
+
+ return csum;
+}
+
+static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
+ unsigned int width, unsigned int height)
+{
+ u8 __iomem *dst = ast->cursor_plane.base.vaddr;
+ u32 csum;
+
+ csum = ast_cursor_calculate_checksum(src, width, height);
+
+ /* write pixel data */
+ memcpy_toio(dst, src, AST_HWC_SIZE);
+
+ /* write checksum + signature */
+ dst += AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+}
+
+static void ast_set_cursor_base(struct ast_device *ast, u64 address)
+{
+ u8 addr0 = (address >> 3) & 0xff;
+ u8 addr1 = (address >> 11) & 0xff;
+ u8 addr2 = (address >> 19) & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc8, addr0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc9, addr1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xca, addr2);
+}
+
+static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
+ u8 x_offset, u8 y_offset)
+{
+ u8 x0 = (x & 0x00ff);
+ u8 x1 = (x & 0x0f00) >> 8;
+ u8 y0 = (y & 0x00ff);
+ u8 y1 = (y & 0x0700) >> 8;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc4, x0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc5, x1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc6, y0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc7, y1);
+}
+
+static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
+{
+ static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
+ AST_IO_VGACRCB_HWC_ENABLED);
+
+ u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
+
+ if (enabled)
+ vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
+
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xcb, mask, vgacrcb);
+}
+
+/*
+ * Cursor plane
+ */
+
+static const uint32_t ast_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+};
+
+static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ if (new_plane_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+ if (ret || !new_plane_state->visible)
+ return ret;
+
+ if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct ast_device *ast = to_ast_device(plane->dev);
+ struct drm_rect damage;
+ u64 dst_off = ast_plane->offset;
+ u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
+ u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
+ unsigned int offset_x, offset_y;
+ u16 x, y;
+ u8 x_offset, y_offset;
+
+ /*
+ * Do data transfer to hardware buffer and point the scanout
+ * engine to the offset.
+ */
+
+ if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
+ u8 *argb4444;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_ARGB4444:
+ argb4444 = shadow_plane_state->data[0].vaddr;
+ break;
+ default:
+ argb4444 = ast_cursor_plane->argb4444;
+ {
+ struct iosys_map argb4444_dst[DRM_FORMAT_MAX_PLANES] = {
+ IOSYS_MAP_INIT_VADDR(argb4444),
+ };
+ unsigned int argb4444_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ AST_HWC_PITCH,
+ };
+
+ drm_fb_argb8888_to_argb4444(argb4444_dst, argb4444_dst_pitch,
+ shadow_plane_state->data, fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
+ }
+ break;
+ }
+ ast_set_cursor_image(ast, argb4444, fb->width, fb->height);
+ ast_set_cursor_base(ast, dst_off);
+ }
+
+ /*
+ * Update location in HWC signature and registers.
+ */
+
+ writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
+ writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
+
+ offset_x = AST_MAX_HWC_WIDTH - fb->width;
+ offset_y = AST_MAX_HWC_HEIGHT - fb->height;
+
+ if (plane_state->crtc_x < 0) {
+ x_offset = (-plane_state->crtc_x) + offset_x;
+ x = 0;
+ } else {
+ x_offset = offset_x;
+ x = plane_state->crtc_x;
+ }
+ if (plane_state->crtc_y < 0) {
+ y_offset = (-plane_state->crtc_y) + offset_y;
+ y = 0;
+ } else {
+ y_offset = offset_y;
+ y = plane_state->crtc_y;
+ }
+
+ ast_set_cursor_location(ast, x, y, x_offset, y_offset);
+
+ /* Dummy write to enable HWC and make the HW pick-up the changes. */
+ ast_set_cursor_enabled(ast, true);
+}
+
+static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_device *ast = to_ast_device(plane->dev);
+
+ ast_set_cursor_enabled(ast, false);
+}
+
+static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = ast_cursor_plane_helper_atomic_check,
+ .atomic_update = ast_cursor_plane_helper_atomic_update,
+ .atomic_disable = ast_cursor_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_cursor_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+int ast_cursor_plane_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
+ struct ast_plane *ast_plane = &ast_cursor_plane->base;
+ struct drm_plane *cursor_plane = &ast_plane->base;
+ size_t size;
+ void __iomem *vaddr;
+ u64 offset;
+ int ret;
+
+ /*
+ * Allocate backing storage for cursors. The BOs are permanently
+ * pinned to the top end of the VRAM.
+ */
+
+ size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+
+ if (ast->vram_fb_available < size)
+ return -ENOMEM;
+
+ vaddr = ast->vram + ast->vram_fb_available - size;
+ offset = ast->vram_fb_available - size;
+
+ ret = ast_plane_init(dev, ast_plane, vaddr, offset, size,
+ 0x01, &ast_cursor_plane_funcs,
+ ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR);
+ if (ret) {
+ drm_err(dev, "ast_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(cursor_plane);
+
+ ast->vram_fb_available -= size;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 0e282b7b167c..19c04687b0fe 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -5,6 +5,7 @@
#include <linux/firmware.h>
#include <linux/delay.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -12,11 +13,71 @@
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
+#include "ast_vbios.h"
+
+struct ast_astdp_mode_index_table_entry {
+ unsigned int hdisplay;
+ unsigned int vdisplay;
+ unsigned int mode_index;
+};
+
+/* FIXME: Do refresh rate and flags actually matter? */
+static const struct ast_astdp_mode_index_table_entry ast_astdp_mode_index_table[] = {
+ { 320, 240, ASTDP_320x240_60 },
+ { 400, 300, ASTDP_400x300_60 },
+ { 512, 384, ASTDP_512x384_60 },
+ { 640, 480, ASTDP_640x480_60 },
+ { 800, 600, ASTDP_800x600_56 },
+ { 1024, 768, ASTDP_1024x768_60 },
+ { 1152, 864, ASTDP_1152x864_75 },
+ { 1280, 800, ASTDP_1280x800_60_RB },
+ { 1280, 1024, ASTDP_1280x1024_60 },
+ { 1360, 768, ASTDP_1366x768_60 }, // same as 1366x786
+ { 1366, 768, ASTDP_1366x768_60 },
+ { 1440, 900, ASTDP_1440x900_60_RB },
+ { 1600, 900, ASTDP_1600x900_60_RB },
+ { 1600, 1200, ASTDP_1600x1200_60 },
+ { 1680, 1050, ASTDP_1680x1050_60_RB },
+ { 1920, 1080, ASTDP_1920x1080_60 },
+ { 1920, 1200, ASTDP_1920x1200_60 },
+ { 0 }
+};
+
+struct ast_astdp_connector_state {
+ struct drm_connector_state base;
+
+ int mode_index;
+};
+
+static struct ast_astdp_connector_state *
+to_ast_astdp_connector_state(const struct drm_connector_state *state)
+{
+ return container_of(state, struct ast_astdp_connector_state, base);
+}
+
+static int ast_astdp_get_mode_index(unsigned int hdisplay, unsigned int vdisplay)
+{
+ const struct ast_astdp_mode_index_table_entry *entry = ast_astdp_mode_index_table;
+
+ while (entry->hdisplay && entry->vdisplay) {
+ if (entry->hdisplay == hdisplay && entry->vdisplay == vdisplay)
+ return entry->mode_index;
+ ++entry;
+ }
+
+ return -EINVAL;
+}
static bool ast_astdp_is_connected(struct ast_device *ast)
{
if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD))
return false;
+ /*
+ * HPD might be set even if no monitor is connected, so also check that
+ * the link training was successful.
+ */
+ if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, AST_IO_VGACRDC_LINK_SUCCESS))
+ return false;
return true;
}
@@ -195,7 +256,7 @@ static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled)
if (enabled)
vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE;
- for (i = 0; i < 200; ++i) {
+ for (i = 0; i < 1000; ++i) {
if (i)
mdelay(1);
vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf,
@@ -221,80 +282,6 @@ static void ast_dp_set_enable(struct ast_device *ast, bool enabled)
drm_WARN_ON(dev, !__ast_dp_wait_enable(ast, enabled));
}
-static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
-{
- struct ast_device *ast = to_ast_device(crtc->dev);
-
- u32 ulRefreshRateIndex;
- u8 ModeIdx;
-
- ulRefreshRateIndex = vbios_mode->enh_table->refresh_rate_index - 1;
-
- switch (crtc->mode.crtc_hdisplay) {
- case 320:
- ModeIdx = ASTDP_320x240_60;
- break;
- case 400:
- ModeIdx = ASTDP_400x300_60;
- break;
- case 512:
- ModeIdx = ASTDP_512x384_60;
- break;
- case 640:
- ModeIdx = (ASTDP_640x480_60 + (u8) ulRefreshRateIndex);
- break;
- case 800:
- ModeIdx = (ASTDP_800x600_56 + (u8) ulRefreshRateIndex);
- break;
- case 1024:
- ModeIdx = (ASTDP_1024x768_60 + (u8) ulRefreshRateIndex);
- break;
- case 1152:
- ModeIdx = ASTDP_1152x864_75;
- break;
- case 1280:
- if (crtc->mode.crtc_vdisplay == 800)
- ModeIdx = (ASTDP_1280x800_60_RB - (u8) ulRefreshRateIndex);
- else // 1024
- ModeIdx = (ASTDP_1280x1024_60 + (u8) ulRefreshRateIndex);
- break;
- case 1360:
- case 1366:
- ModeIdx = ASTDP_1366x768_60;
- break;
- case 1440:
- ModeIdx = (ASTDP_1440x900_60_RB - (u8) ulRefreshRateIndex);
- break;
- case 1600:
- if (crtc->mode.crtc_vdisplay == 900)
- ModeIdx = (ASTDP_1600x900_60_RB - (u8) ulRefreshRateIndex);
- else //1200
- ModeIdx = ASTDP_1600x1200_60;
- break;
- case 1680:
- ModeIdx = (ASTDP_1680x1050_60_RB - (u8) ulRefreshRateIndex);
- break;
- case 1920:
- if (crtc->mode.crtc_vdisplay == 1080)
- ModeIdx = ASTDP_1920x1080_60;
- else //1200
- ModeIdx = ASTDP_1920x1200_60;
- break;
- default:
- return;
- }
-
- /*
- * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
- * CRE1[7:0]: MISC1 (default: 0x00)
- * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
- */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE0, ASTDP_AND_CLEAR_MASK,
- ASTDP_MISC0_24bpp);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
-}
-
static void ast_wait_for_vretrace(struct ast_device *ast)
{
unsigned long timeout = jiffies + HZ;
@@ -313,15 +300,62 @@ static const struct drm_encoder_funcs ast_astdp_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static enum drm_mode_status
+ast_astdp_encoder_helper_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ int res;
+
+ res = ast_astdp_get_mode_index(mode->hdisplay, mode->vdisplay);
+ if (res < 0)
+ return MODE_NOMODE;
+
+ return MODE_OK;
+}
+
static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_device *dev = encoder->dev;
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
+ const struct ast_vbios_enhtable *vmode = ast_crtc_state->vmode;
+ struct ast_astdp_connector_state *astdp_conn_state =
+ to_ast_astdp_connector_state(conn_state);
+ int mode_index = astdp_conn_state->mode_index;
+ u8 refresh_rate_index;
+ u8 vgacre0, vgacre1, vgacre2;
+
+ if (drm_WARN_ON(dev, vmode->refresh_rate_index < 1 || vmode->refresh_rate_index > 255))
+ return;
+ refresh_rate_index = vmode->refresh_rate_index - 1;
+
+ /* FIXME: Why are we doing this? */
+ switch (mode_index) {
+ case ASTDP_1280x800_60_RB:
+ case ASTDP_1440x900_60_RB:
+ case ASTDP_1600x900_60_RB:
+ case ASTDP_1680x1050_60_RB:
+ mode_index = (u8)(mode_index - (u8)refresh_rate_index);
+ break;
+ default:
+ mode_index = (u8)(mode_index + (u8)refresh_rate_index);
+ break;
+ }
- ast_dp_set_mode(crtc, vbios_mode_info);
+ /*
+ * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
+ * CRE1[7:0]: MISC1 (default: 0x00)
+ * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
+ */
+ vgacre0 = AST_IO_VGACRE0_24BPP;
+ vgacre1 = 0x00;
+ vgacre2 = mode_index & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe0, vgacre0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe1, vgacre1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe2, vgacre2);
}
static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
@@ -348,10 +382,31 @@ static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder,
ast_dp_set_phy_sleep(ast, true);
}
+static int ast_astdp_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct ast_astdp_connector_state *astdp_conn_state =
+ to_ast_astdp_connector_state(conn_state);
+ int res;
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ res = ast_astdp_get_mode_index(mode->hdisplay, mode->vdisplay);
+ if (res < 0)
+ return res;
+ astdp_conn_state->mode_index = res;
+ }
+
+ return 0;
+}
+
static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = {
+ .mode_valid = ast_astdp_encoder_helper_mode_valid,
.atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set,
.atomic_enable = ast_astdp_encoder_helper_atomic_enable,
.atomic_disable = ast_astdp_encoder_helper_atomic_disable,
+ .atomic_check = ast_astdp_encoder_helper_atomic_check,
};
/*
@@ -422,18 +477,62 @@ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs
.detect_ctx = ast_astdp_connector_helper_detect_ctx,
};
-/*
- * Output
- */
+static void ast_astdp_connector_reset(struct drm_connector *connector)
+{
+ struct ast_astdp_connector_state *astdp_state =
+ kzalloc(sizeof(*astdp_state), GFP_KERNEL);
+
+ if (connector->state)
+ connector->funcs->atomic_destroy_state(connector, connector->state);
+
+ if (astdp_state)
+ __drm_atomic_helper_connector_reset(connector, &astdp_state->base);
+ else
+ __drm_atomic_helper_connector_reset(connector, NULL);
+}
+
+static struct drm_connector_state *
+ast_astdp_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct ast_astdp_connector_state *new_astdp_state, *astdp_state;
+ struct drm_device *dev = connector->dev;
+
+ if (drm_WARN_ON(dev, !connector->state))
+ return NULL;
+
+ new_astdp_state = kmalloc(sizeof(*new_astdp_state), GFP_KERNEL);
+ if (!new_astdp_state)
+ return NULL;
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_astdp_state->base);
+
+ astdp_state = to_ast_astdp_connector_state(connector->state);
+
+ new_astdp_state->mode_index = astdp_state->mode_index;
+
+ return &new_astdp_state->base;
+}
+
+static void ast_astdp_connector_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct ast_astdp_connector_state *astdp_state = to_ast_astdp_connector_state(state);
+
+ __drm_atomic_helper_connector_destroy_state(&astdp_state->base);
+ kfree(astdp_state);
+}
static const struct drm_connector_funcs ast_astdp_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
+ .reset = ast_astdp_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = ast_astdp_connector_atomic_duplicate_state,
+ .atomic_destroy_state = ast_astdp_connector_atomic_destroy_state,
};
+/*
+ * Output
+ */
+
int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index ff3bcdd1cff2..6fbf62a99c48 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -170,7 +170,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
/* Patch AST2500/AST2510 */
if ((pdev->revision & 0xf0) == 0x40) {
- if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK))
+ if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK))
ast_patch_ahb_2500(regs);
}
@@ -393,11 +393,15 @@ static int ast_drm_freeze(struct drm_device *dev)
static int ast_drm_thaw(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
+ int ret;
ast_enable_vga(ast->ioregs);
ast_open_key(ast->ioregs);
ast_enable_mmio(dev->dev, ast->ioregs);
- ast_post_gpu(ast);
+
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ret;
return drm_mode_config_helper_resume(dev);
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 6b4305ac07d4..d2c2605d2728 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -39,6 +39,8 @@
#include "ast_reg.h"
+struct ast_vbios_enhtable;
+
#define DRIVER_AUTHOR "Dave Airlie"
#define DRIVER_NAME "ast"
@@ -111,17 +113,10 @@ enum ast_config_mode {
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
-#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
-#define AST_HWC_SIGNATURE_SIZE 32
+#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
+#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
-/* define for signature structure */
-#define AST_HWC_SIGNATURE_CHECKSUM 0x00
-#define AST_HWC_SIGNATURE_SizeX 0x04
-#define AST_HWC_SIGNATURE_SizeY 0x08
-#define AST_HWC_SIGNATURE_X 0x0C
-#define AST_HWC_SIGNATURE_Y 0x10
-#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
-#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+#define AST_HWC_SIGNATURE_SIZE 32
/*
* Planes
@@ -140,6 +135,17 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
return container_of(plane, struct ast_plane, base);
}
+struct ast_cursor_plane {
+ struct ast_plane base;
+
+ u8 argb4444[AST_HWC_SIZE];
+};
+
+static inline struct ast_cursor_plane *to_ast_cursor_plane(struct drm_plane *plane)
+{
+ return container_of(to_ast_plane(plane), struct ast_cursor_plane, base);
+}
+
/*
* Connector
*/
@@ -184,7 +190,7 @@ struct ast_device {
enum ast_tx_chip tx_chip;
struct ast_plane primary_plane;
- struct ast_plane cursor_plane;
+ struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
union {
struct {
@@ -205,7 +211,9 @@ struct ast_device {
} astdp;
} output;
- bool support_wide_screen;
+ bool support_wsxga_p; /* 1680x1050 */
+ bool support_fullhd; /* 1920x1080 */
+ bool support_wuxga; /* 1920x1200 */
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
@@ -348,46 +356,24 @@ struct ast_vbios_stdtable {
u8 gr[9];
};
-struct ast_vbios_enhtable {
- u32 ht;
- u32 hde;
- u32 hfp;
- u32 hsync;
- u32 vt;
- u32 vde;
- u32 vfp;
- u32 vsync;
- u32 dclk_index;
- u32 flags;
- u32 refresh_rate;
- u32 refresh_rate_index;
- u32 mode_id;
-};
-
struct ast_vbios_dclk_info {
u8 param1;
u8 param2;
u8 param3;
};
-struct ast_vbios_mode_info {
- const struct ast_vbios_stdtable *std_table;
- const struct ast_vbios_enhtable *enh_table;
-};
-
struct ast_crtc_state {
struct drm_crtc_state base;
/* Last known format of primary plane */
const struct drm_format_info *format;
- struct ast_vbios_mode_info vbios_mode_info;
+ const struct ast_vbios_stdtable *std_table;
+ const struct ast_vbios_enhtable *vmode;
};
#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
-int ast_mode_config_init(struct ast_device *ast);
-
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
@@ -445,7 +431,7 @@ int ast_mode_config_init(struct ast_device *ast);
int ast_mm_init(struct ast_device *ast);
/* ast post */
-void ast_post_gpu(struct ast_device *ast);
+int ast_post_gpu(struct ast_device *ast);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
void ast_patch_ahb_2500(void __iomem *regs);
@@ -453,6 +439,9 @@ void ast_patch_ahb_2500(void __iomem *regs);
int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
+/* ast_cursor.c */
+int ast_cursor_plane_init(struct ast_device *ast);
+
/* ast dp501 */
bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size);
void ast_init_3rdtx(struct ast_device *ast);
@@ -462,4 +451,14 @@ int ast_dp501_output_init(struct ast_device *ast);
int ast_dp_launch(struct ast_device *ast);
int ast_astdp_output_init(struct ast_device *ast);
+/* ast_mode.c */
+int ast_mode_config_init(struct ast_device *ast);
+int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
+ void __iomem *vaddr, u64 offset, unsigned long size,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type);
+
#endif
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index bc37c65305d4..44b9b5f659fc 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -36,33 +36,89 @@
#include "ast_drv.h"
+/* Try to detect WSXGA+ on Gen2+ */
+static bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
+{
+ u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
+
+ if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
+ return true;
+ if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
+ return true;
+
+ return false;
+}
+
+/* Try to detect WUXGA on Gen2+ */
+static bool __ast_2100_detect_wuxga(struct ast_device *ast)
+{
+ u8 vgacrd1;
+
+ if (ast->support_fullhd) {
+ vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
+ if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
+ return true;
+ }
+
+ return false;
+}
+
static void ast_detect_widescreen(struct ast_device *ast)
{
- u8 jreg;
+ ast->support_wsxga_p = false;
+ ast->support_fullhd = false;
+ ast->support_wuxga = false;
- /* Check if we support wide screen */
- switch (AST_GEN(ast)) {
- case 1:
- ast->support_wide_screen = false;
- break;
- default:
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if (!(jreg & 0x80))
- ast->support_wide_screen = true;
- else if (jreg & 0x01)
- ast->support_wide_screen = true;
- else {
- ast->support_wide_screen = false;
- if (ast->chip == AST1300)
- ast->support_wide_screen = true;
- if (ast->chip == AST1400)
- ast->support_wide_screen = true;
- if (ast->chip == AST2510)
- ast->support_wide_screen = true;
- if (IS_AST_GEN7(ast))
- ast->support_wide_screen = true;
+ if (AST_GEN(ast) >= 7) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 6) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ else if (ast->chip == AST2510)
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p)
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 5) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ else if (ast->chip == AST1400)
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p)
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 4) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ else if (ast->chip == AST1300)
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p)
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 3) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p) {
+ if (ast->chip == AST2200)
+ ast->support_fullhd = true;
}
- break;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 2) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p) {
+ if (ast->chip == AST2100)
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
}
}
@@ -76,49 +132,37 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
};
struct drm_device *dev = &ast->base;
- u8 jreg, vgacrd1;
-
- /*
- * Several of the listed TX chips are not explicitly supported
- * by the ast driver. If these exist in real-world devices, they
- * are most likely reported as VGA or SIL164 outputs. We warn here
- * to get bug reports for these devices. If none come in for some
- * time, we can begin to fail device probing on these values.
- */
- vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK);
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ITE66121_VBIOS,
- "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_CH7003_VBIOS,
- "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ANX9807_VBIOS,
- "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ u8 vgacra3, vgacrd1;
/* Check 3rd Tx option (digital output afaik) */
ast->tx_chip = AST_TX_NONE;
- /*
- * VGACRA3 Enhanced Color Mode Register, check if DVO is already
- * enabled, in that case, assume we have a SIL164 TMDS transmitter
- *
- * Don't make that assumption if we the chip wasn't enabled and
- * is at power-on reset, otherwise we'll incorrectly "detect" a
- * SIL164 when there is none.
- */
- if (!need_post) {
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
- if (jreg & 0x80)
- ast->tx_chip = AST_TX_SIL164;
- }
-
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
+ if (AST_GEN(ast) <= 3) {
/*
- * On AST GEN4+, look the configuration set by the SoC in
+ * VGACRA3 Enhanced Color Mode Register, check if DVO is already
+ * enabled, in that case, assume we have a SIL164 TMDS transmitter
+ *
+ * Don't make that assumption if we the chip wasn't enabled and
+ * is at power-on reset, otherwise we'll incorrectly "detect" a
+ * SIL164 when there is none.
+ */
+ if (!need_post) {
+ vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
+ if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
+ ast->tx_chip = AST_TX_SIL164;
+ }
+ } else {
+ /*
+ * On AST GEN4+, look at the configuration set by the SoC in
* the SOC scratch register #1 bits 11:8 (interestingly marked
* as "reserved" in the spec)
*/
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
- AST_IO_VGACRD1_TX_TYPE_MASK);
- switch (jreg) {
+ vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
+ AST_IO_VGACRD1_TX_TYPE_MASK);
+ switch (vgacrd1) {
+ /*
+ * GEN4 to GEN6
+ */
case AST_IO_VGACRD1_TX_SIL164_VBIOS:
ast->tx_chip = AST_TX_SIL164;
break;
@@ -134,14 +178,32 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
fallthrough;
case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
ast->tx_chip = AST_TX_DP501;
- }
- } else if (IS_AST_GEN7(ast)) {
- if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK) ==
- AST_IO_VGACRD1_TX_ASTDP) {
- int ret = ast_dp_launch(ast);
-
- if (!ret)
- ast->tx_chip = AST_TX_ASTDP;
+ break;
+ /*
+ * GEN7+
+ */
+ case AST_IO_VGACRD1_TX_ASTDP:
+ ast->tx_chip = AST_TX_ASTDP;
+ break;
+ /*
+ * Several of the listed TX chips are not explicitly supported
+ * by the ast driver. If these exist in real-world devices, they
+ * are most likely reported as VGA or SIL164 outputs. We warn here
+ * to get bug reports for these devices. If none come in for some
+ * time, we can begin to fail device probing on these values.
+ */
+ case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
+ drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n",
+ vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_CH7003_VBIOS:
+ drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n",
+ vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
+ drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n",
+ vgacrd1, AST_GEN(ast));
+ break;
}
}
@@ -290,18 +352,25 @@ struct drm_device *ast_device_create(struct pci_dev *pdev,
ast->regs = regs;
ast->ioregs = ioregs;
- ast_detect_widescreen(ast);
- ast_detect_tx_chip(ast, need_post);
-
ret = ast_get_dram_info(ast);
if (ret)
return ERR_PTR(ret);
-
drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
ast->mclk, ast->dram_type, ast->dram_bus_width);
- if (need_post)
- ast_post_gpu(ast);
+ ast_detect_tx_chip(ast, need_post);
+ switch (ast->tx_chip) {
+ case AST_TX_ASTDP:
+ ret = ast_post_gpu(ast);
+ break;
+ default:
+ ret = 0;
+ if (need_post)
+ ret = ast_post_gpu(ast);
+ break;
+ }
+ if (ret)
+ return ERR_PTR(ret);
ret = ast_mm_init(ast);
if (ret)
@@ -315,6 +384,8 @@ struct drm_device *ast_device_create(struct pci_dev *pdev,
drm_info(dev, "failed to map reserved buffer!\n");
}
+ ast_detect_widescreen(ast);
+
ret = ast_mode_config_init(ast);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9d5321c81e68..c3b950675485 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -47,6 +47,7 @@
#include "ast_drv.h"
#include "ast_tables.h"
+#include "ast_vbios.h"
#define AST_LUT_SIZE 256
@@ -106,134 +107,9 @@ static void ast_crtc_set_gamma(struct ast_device *ast,
}
}
-static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- struct ast_vbios_mode_info *vbios_mode)
-{
- u32 refresh_rate_index = 0, refresh_rate;
- const struct ast_vbios_enhtable *best = NULL;
- u32 hborder, vborder;
- bool check_sync;
-
- switch (format->cpp[0] * 8) {
- case 8:
- vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
- break;
- case 16:
- vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
- break;
- case 24:
- case 32:
- vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
- break;
- default:
- return false;
- }
-
- switch (mode->crtc_hdisplay) {
- case 640:
- vbios_mode->enh_table = &res_640x480[refresh_rate_index];
- break;
- case 800:
- vbios_mode->enh_table = &res_800x600[refresh_rate_index];
- break;
- case 1024:
- vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
- break;
- case 1152:
- vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
- break;
- case 1280:
- if (mode->crtc_vdisplay == 800)
- vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
- break;
- case 1360:
- vbios_mode->enh_table = &res_1360x768[refresh_rate_index];
- break;
- case 1440:
- vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
- break;
- case 1600:
- if (mode->crtc_vdisplay == 900)
- vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
- break;
- case 1680:
- vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
- break;
- case 1920:
- if (mode->crtc_vdisplay == 1080)
- vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
- break;
- default:
- return false;
- }
-
- refresh_rate = drm_mode_vrefresh(mode);
- check_sync = vbios_mode->enh_table->flags & WideScreenMode;
-
- while (1) {
- const struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
-
- while (loop->refresh_rate != 0xff) {
- if ((check_sync) &&
- (((mode->flags & DRM_MODE_FLAG_NVSYNC) &&
- (loop->flags & PVSync)) ||
- ((mode->flags & DRM_MODE_FLAG_PVSYNC) &&
- (loop->flags & NVSync)) ||
- ((mode->flags & DRM_MODE_FLAG_NHSYNC) &&
- (loop->flags & PHSync)) ||
- ((mode->flags & DRM_MODE_FLAG_PHSYNC) &&
- (loop->flags & NHSync)))) {
- loop++;
- continue;
- }
- if (loop->refresh_rate <= refresh_rate
- && (!best || loop->refresh_rate > best->refresh_rate))
- best = loop;
- loop++;
- }
- if (best || !check_sync)
- break;
- check_sync = 0;
- }
-
- if (best)
- vbios_mode->enh_table = best;
-
- hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
- vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
-
- adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
- adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
- adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
- adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
- vbios_mode->enh_table->hfp;
- adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
- vbios_mode->enh_table->hfp +
- vbios_mode->enh_table->hsync);
-
- adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
- adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
- adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
- adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
- vbios_mode->enh_table->vfp;
- adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
- vbios_mode->enh_table->vfp +
- vbios_mode->enh_table->vsync);
-
- return true;
-}
-
static void ast_set_vbios_color_reg(struct ast_device *ast,
const struct drm_format_info *format,
- const struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u32 color_index;
@@ -256,7 +132,7 @@ static void ast_set_vbios_color_reg(struct ast_device *ast,
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0x00);
- if (vbios_mode->enh_table->flags & NewModeInfo) {
+ if (vmode->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x92, format->cpp[0] * 8);
}
@@ -264,19 +140,19 @@ static void ast_set_vbios_color_reg(struct ast_device *ast,
static void ast_set_vbios_mode_reg(struct ast_device *ast,
const struct drm_display_mode *adjusted_mode,
- const struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u32 refresh_rate_index, mode_id;
- refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
- mode_id = vbios_mode->enh_table->mode_id;
+ refresh_rate_index = vmode->refresh_rate_index;
+ mode_id = vmode->mode_id;
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8d, refresh_rate_index & 0xff);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8e, mode_id & 0xff);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0x00);
- if (vbios_mode->enh_table->flags & NewModeInfo) {
+ if (vmode->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x94, adjusted_mode->crtc_hdisplay);
@@ -288,14 +164,11 @@ static void ast_set_vbios_mode_reg(struct ast_device *ast,
static void ast_set_std_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_stdtable *stdtable)
{
- const struct ast_vbios_stdtable *stdtable;
u32 i;
u8 jreg;
- stdtable = vbios_mode->std_table;
-
jreg = stdtable->misc;
ast_io_write8(ast, AST_IO_VGAMR_W, jreg);
@@ -336,13 +209,13 @@ static void ast_set_std_reg(struct ast_device *ast,
static void ast_set_crtc_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
u16 temp, precache = 0;
if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) &&
- (vbios_mode->enh_table->flags & AST2500PreCatchCRT))
+ (vmode->flags & AST2500PreCatchCRT))
precache = 40;
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00);
@@ -461,14 +334,14 @@ static void ast_set_offset_reg(struct ast_device *ast,
static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
const struct ast_vbios_dclk_info *clk_info;
if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
- clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index];
+ clk_info = &dclk_table_ast2500[vmode->dclk_index];
else
- clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+ clk_info = &dclk_table[vmode->dclk_index];
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2);
@@ -526,15 +399,15 @@ static void ast_set_crtthd_reg(struct ast_device *ast)
static void ast_set_sync_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u8 jreg;
jreg = ast_io_read8(ast, AST_IO_VGAMR_R);
jreg &= ~0xC0;
- if (vbios_mode->enh_table->flags & NVSync)
+ if (vmode->flags & NVSync)
jreg |= 0x80;
- if (vbios_mode->enh_table->flags & NHSync)
+ if (vmode->flags & NHSync)
jreg |= 0x40;
ast_io_write8(ast, AST_IO_VGAMR_W, jreg);
}
@@ -565,13 +438,13 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
* Planes
*/
-static int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- const uint64_t *format_modifiers,
- enum drm_plane_type type)
+int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
+ void __iomem *vaddr, u64 offset, unsigned long size,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type)
{
struct drm_plane *plane = &ast_plane->base;
@@ -654,10 +527,9 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!old_fb || (fb->format != old_fb->format) || crtc_state->mode_changed) {
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
ast_set_color_reg(ast, fb->format);
- ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info);
+ ast_set_vbios_color_reg(ast, fb->format, ast_crtc_state->vmode);
}
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
@@ -757,263 +629,6 @@ static int ast_primary_plane_init(struct ast_device *ast)
}
/*
- * Cursor plane
- */
-
-static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
-{
- union {
- u32 ul;
- u8 b[4];
- } srcdata32[2], data32;
- union {
- u16 us;
- u8 b[2];
- } data16;
- u32 csum = 0;
- s32 alpha_dst_delta, last_alpha_dst_delta;
- u8 __iomem *dstxor;
- const u8 *srcxor;
- int i, j;
- u32 per_pixel_copy, two_pixel_copy;
-
- alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
- last_alpha_dst_delta = alpha_dst_delta - (width << 1);
-
- srcxor = src;
- dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
- per_pixel_copy = width & 1;
- two_pixel_copy = width >> 1;
-
- for (j = 0; j < height; j++) {
- for (i = 0; i < two_pixel_copy; i++) {
- srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
- srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
- data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
- data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
- data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
-
- writel(data32.ul, dstxor);
- csum += data32.ul;
-
- dstxor += 4;
- srcxor += 8;
-
- }
-
- for (i = 0; i < per_pixel_copy; i++) {
- srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
- data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
- data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- writew(data16.us, dstxor);
- csum += (u32)data16.us;
-
- dstxor += 2;
- srcxor += 4;
- }
- dstxor += last_alpha_dst_delta;
- }
-
- /* write checksum + signature */
- dst += AST_HWC_SIZE;
- writel(csum, dst);
- writel(width, dst + AST_HWC_SIGNATURE_SizeX);
- writel(height, dst + AST_HWC_SIGNATURE_SizeY);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
-}
-
-static void ast_set_cursor_base(struct ast_device *ast, u64 address)
-{
- u8 addr0 = (address >> 3) & 0xff;
- u8 addr1 = (address >> 11) & 0xff;
- u8 addr2 = (address >> 19) & 0xff;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc8, addr0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc9, addr1);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xca, addr2);
-}
-
-static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
- u8 x_offset, u8 y_offset)
-{
- u8 x0 = (x & 0x00ff);
- u8 x1 = (x & 0x0f00) >> 8;
- u8 y0 = (y & 0x00ff);
- u8 y1 = (y & 0x0700) >> 8;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc2, x_offset);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc3, y_offset);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc4, x0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc5, x1);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc6, y0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc7, y1);
-}
-
-static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
-{
- static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
- AST_IO_VGACRCB_HWC_ENABLED);
-
- u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
-
- if (enabled)
- vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
-
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xcb, mask, vgacrcb);
-}
-
-static const uint32_t ast_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
-static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc_state *new_crtc_state = NULL;
- int ret;
-
- if (new_plane_state->crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret || !new_plane_state->visible)
- return ret;
-
- if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
- return -EINVAL;
-
- return 0;
-}
-
-static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct ast_plane *ast_plane = to_ast_plane(plane);
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct ast_device *ast = to_ast_device(plane->dev);
- struct iosys_map src_map = shadow_plane_state->data[0];
- struct drm_rect damage;
- const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
- u64 dst_off = ast_plane->offset;
- u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
- u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
- unsigned int offset_x, offset_y;
- u16 x, y;
- u8 x_offset, y_offset;
-
- /*
- * Do data transfer to hardware buffer and point the scanout
- * engine to the offset.
- */
-
- if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
- ast_update_cursor_image(dst, src, fb->width, fb->height);
- ast_set_cursor_base(ast, dst_off);
- }
-
- /*
- * Update location in HWC signature and registers.
- */
-
- writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
- writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
-
- offset_x = AST_MAX_HWC_WIDTH - fb->width;
- offset_y = AST_MAX_HWC_HEIGHT - fb->height;
-
- if (plane_state->crtc_x < 0) {
- x_offset = (-plane_state->crtc_x) + offset_x;
- x = 0;
- } else {
- x_offset = offset_x;
- x = plane_state->crtc_x;
- }
- if (plane_state->crtc_y < 0) {
- y_offset = (-plane_state->crtc_y) + offset_y;
- y = 0;
- } else {
- y_offset = offset_y;
- y = plane_state->crtc_y;
- }
-
- ast_set_cursor_location(ast, x, y, x_offset, y_offset);
-
- /* Dummy write to enable HWC and make the HW pick-up the changes. */
- ast_set_cursor_enabled(ast, true);
-}
-
-static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct ast_device *ast = to_ast_device(plane->dev);
-
- ast_set_cursor_enabled(ast, false);
-}
-
-static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ast_cursor_plane_helper_atomic_check,
- .atomic_update = ast_cursor_plane_helper_atomic_update,
- .atomic_disable = ast_cursor_plane_helper_atomic_disable,
-};
-
-static const struct drm_plane_funcs ast_cursor_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
-};
-
-static int ast_cursor_plane_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
- struct drm_plane *cursor_plane = &ast_cursor_plane->base;
- size_t size;
- void __iomem *vaddr;
- u64 offset;
- int ret;
-
- /*
- * Allocate backing storage for cursors. The BOs are permanently
- * pinned to the top end of the VRAM.
- */
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
-
- if (ast->vram_fb_available < size)
- return -ENOMEM;
-
- vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_fb_available - size;
-
- ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
- 0x01, &ast_cursor_plane_funcs,
- ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
- NULL, DRM_PLANE_TYPE_CURSOR);
- if (ret) {
- drm_err(dev, "ast_plane_init() failed: %d\n", ret);
- return ret;
- }
- drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
- drm_plane_enable_fb_damage_clips(cursor_plane);
-
- ast->vram_fb_available -= size;
-
- return 0;
-}
-
-/*
* CRTC
*/
@@ -1021,72 +636,13 @@ static enum drm_mode_status
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
struct ast_device *ast = to_ast_device(crtc->dev);
- enum drm_mode_status status;
- uint32_t jtemp;
-
- if (ast->support_wide_screen) {
- if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
- return MODE_OK;
- if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
- return MODE_OK;
- if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
- return MODE_OK;
- if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
- return MODE_OK;
- if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
- return MODE_OK;
- if ((mode->hdisplay == 1152) && (mode->vdisplay == 864))
- return MODE_OK;
-
- if ((ast->chip == AST2100) || // GEN2, but not AST1100 (?)
- (ast->chip == AST2200) || // GEN3, but not AST2150 (?)
- IS_AST_GEN4(ast) || IS_AST_GEN5(ast) ||
- IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) {
- if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
- return MODE_OK;
-
- if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
- jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
- if (jtemp & 0x01)
- return MODE_NOMODE;
- else
- return MODE_OK;
- }
- }
- }
+ const struct ast_vbios_enhtable *vmode;
- status = MODE_NOMODE;
+ vmode = ast_vbios_find_mode(ast, mode);
+ if (!vmode)
+ return MODE_NOMODE;
- switch (mode->hdisplay) {
- case 640:
- if (mode->vdisplay == 480)
- status = MODE_OK;
- break;
- case 800:
- if (mode->vdisplay == 600)
- status = MODE_OK;
- break;
- case 1024:
- if (mode->vdisplay == 768)
- status = MODE_OK;
- break;
- case 1152:
- if (mode->vdisplay == 864)
- status = MODE_OK;
- break;
- case 1280:
- if (mode->vdisplay == 1024)
- status = MODE_OK;
- break;
- case 1600:
- if (mode->vdisplay == 1200)
- status = MODE_OK;
- break;
- default:
- break;
- }
-
- return status;
+ return MODE_OK;
}
static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
@@ -1095,8 +651,8 @@ static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
struct ast_device *ast = to_ast_device(dev);
struct drm_crtc_state *crtc_state = crtc->state;
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info =
- &ast_crtc_state->vbios_mode_info;
+ const struct ast_vbios_stdtable *std_table = ast_crtc_state->std_table;
+ const struct ast_vbios_enhtable *vmode = ast_crtc_state->vmode;
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
/*
@@ -1107,25 +663,29 @@ static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
*/
ast_wait_for_vretrace(ast);
- ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_vbios_mode_reg(ast, adjusted_mode, vmode);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
- ast_set_std_reg(ast, adjusted_mode, vbios_mode_info);
- ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info);
- ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_std_reg(ast, adjusted_mode, std_table);
+ ast_set_crtc_reg(ast, adjusted_mode, vmode);
+ ast_set_dclk_reg(ast, adjusted_mode, vmode);
ast_set_crtthd_reg(ast);
- ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_sync_reg(ast, adjusted_mode, vmode);
}
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
struct drm_device *dev = crtc->dev;
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
- bool succ;
+ const struct ast_vbios_enhtable *vmode;
+ unsigned int hborder = 0;
+ unsigned int vborder = 0;
int ret;
if (!crtc_state->enable)
@@ -1157,11 +717,56 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
}
}
- succ = ast_get_vbios_mode_info(format, &crtc_state->mode,
- &crtc_state->adjusted_mode,
- &ast_state->vbios_mode_info);
- if (!succ)
+ /*
+ * Set register tables.
+ *
+ * TODO: These tables mix all kinds of fields and should
+ * probably be resolved into various helper functions.
+ */
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ ast_state->std_table = &vbios_stdtable[VGAModeIndex];
+ break;
+ case DRM_FORMAT_RGB565:
+ ast_state->std_table = &vbios_stdtable[HiCModeIndex];
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ ast_state->std_table = &vbios_stdtable[TrueCModeIndex];
+ break;
+ default:
return -EINVAL;
+ }
+
+ /*
+ * Find the VBIOS mode and adjust the DRM display mode accordingly
+ * if a full modeset is required. Otherwise keep the existing values.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ vmode = ast_vbios_find_mode(ast, &crtc_state->mode);
+ if (!vmode)
+ return -EINVAL;
+ ast_state->vmode = vmode;
+
+ if (vmode->flags & HBorder)
+ hborder = 8;
+ if (vmode->flags & VBorder)
+ vborder = 8;
+
+ adjusted_mode->crtc_hdisplay = vmode->hde;
+ adjusted_mode->crtc_hblank_start = vmode->hde + hborder;
+ adjusted_mode->crtc_hblank_end = vmode->ht - hborder;
+ adjusted_mode->crtc_hsync_start = vmode->hde + hborder + vmode->hfp;
+ adjusted_mode->crtc_hsync_end = vmode->hde + hborder + vmode->hfp + vmode->hsync;
+ adjusted_mode->crtc_htotal = vmode->ht;
+
+ adjusted_mode->crtc_vdisplay = vmode->vde;
+ adjusted_mode->crtc_vblank_start = vmode->vde + vborder;
+ adjusted_mode->crtc_vblank_end = vmode->vt - vborder;
+ adjusted_mode->crtc_vsync_start = vmode->vde + vborder + vmode->vfp;
+ adjusted_mode->crtc_vsync_end = vmode->vde + vborder + vmode->vfp + vmode->vsync;
+ adjusted_mode->crtc_vtotal = vmode->vt;
+ }
return 0;
}
@@ -1263,8 +868,8 @@ ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
ast_state = to_ast_crtc_state(crtc->state);
new_ast_state->format = ast_state->format;
- memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info,
- sizeof(new_ast_state->vbios_mode_info));
+ new_ast_state->std_table = ast_state->std_table;
+ new_ast_state->vmode = ast_state->vmode;
return &new_ast_state->base;
}
@@ -1294,7 +899,7 @@ static int ast_crtc_init(struct ast_device *ast)
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane.base,
- &ast->cursor_plane.base, &ast_crtc_funcs,
+ &ast->cursor_plane.base.base, &ast_crtc_funcs,
NULL);
if (ret)
return ret;
@@ -1373,12 +978,7 @@ int ast_mode_config_init(struct ast_device *ast)
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
- if (ast->chip == AST2100 || // GEN2, but not AST1100 (?)
- ast->chip == AST2200 || // GEN3, but not AST2150 (?)
- IS_AST_GEN7(ast) ||
- IS_AST_GEN6(ast) ||
- IS_AST_GEN5(ast) ||
- IS_AST_GEN4(ast)) {
+ if (ast->support_fullhd) {
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 2048;
} else {
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 364030f97571..91e85e457bdf 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -340,26 +340,49 @@ static void ast_init_dram_reg(struct ast_device *ast)
} while ((j & 0x40) == 0);
}
-void ast_post_gpu(struct ast_device *ast)
+int ast_post_gpu(struct ast_device *ast)
{
+ int ret;
+
ast_set_def_ext_reg(ast);
- if (IS_AST_GEN7(ast)) {
- if (ast->tx_chip == AST_TX_ASTDP)
- ast_dp_launch(ast);
- } else if (ast->config_mode == ast_use_p2a) {
- if (IS_AST_GEN6(ast))
+ if (AST_GEN(ast) >= 7) {
+ if (ast->tx_chip == AST_TX_ASTDP) {
+ ret = ast_dp_launch(ast);
+ if (ret)
+ return ret;
+ }
+ } else if (AST_GEN(ast) >= 6) {
+ if (ast->config_mode == ast_use_p2a) {
ast_post_chip_2500(ast);
- else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast))
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+ } else if (AST_GEN(ast) >= 4) {
+ if (ast->config_mode == ast_use_p2a) {
ast_post_chip_2300(ast);
- else
+ ast_init_3rdtx(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+ } else {
+ if (ast->config_mode == ast_use_p2a) {
ast_init_dram_reg(ast);
-
- ast_init_3rdtx(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164)
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); /* Enable DVO */
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
}
+
+ return 0;
}
/* AST 2300 DRAM settings */
@@ -2039,7 +2062,7 @@ void ast_post_chip_2500(struct ast_device *ast)
u8 reg;
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
+ if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
/* Clear bus lock condition */
ast_patch_ahb_2500(ast->regs);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 2aadf07d135a..bb2cc1d8b84e 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -32,11 +32,18 @@
#define AST_IO_VGACR80_PASSWORD (0xa8)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
+#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
+/* mirrors SCU100[7:0] */
+#define AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
+#define AST_IO_VGACRD0_VRAM_INIT_BY_BMC BIT(7)
+#define AST_IO_VGACRD0_VRAM_INIT_READY BIT(6)
+#define AST_IO_VGACRD0_IKVM_WIDESCREEN BIT(0)
+
#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5)
/* Display Transmitter Type */
#define AST_IO_VGACRD1_TX_TYPE_MASK GENMASK(3, 1)
@@ -48,11 +55,16 @@
#define AST_IO_VGACRD1_TX_ANX9807_VBIOS 0x0a
#define AST_IO_VGACRD1_TX_FW_EMBEDDED_FW 0x0c /* special case of DP501 */
#define AST_IO_VGACRD1_TX_ASTDP 0x0e
+#define AST_IO_VGACRD1_SUPPORTS_WUXGA BIT(0)
+/*
+ * AST DisplayPort
+ */
#define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0)
#define AST_IO_VGACRDC_LINK_SUCCESS BIT(0)
#define AST_IO_VGACRDF_HPD BIT(0)
#define AST_IO_VGACRDF_DP_VIDEO_ENABLE BIT(4) /* mirrors AST_IO_VGACRE3_DP_VIDEO_ENABLE */
+#define AST_IO_VGACRE0_24BPP BIT(5) /* 18 bpp, if unset */
#define AST_IO_VGACRE3_DP_VIDEO_ENABLE BIT(0)
#define AST_IO_VGACRE3_DP_PHY_SLEEP BIT(4)
#define AST_IO_VGACRE5_EDID_READ_DONE BIT(0)
@@ -60,23 +72,4 @@
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
-
-#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
-//#define AST_VRAM_INIT_BY_BMC BIT(7)
-//#define AST_VRAM_INIT_READY BIT(6)
-
-/*
- * AST DisplayPort
- */
-
-/*
- * ASTDP setmode registers:
- * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
- * CRE1[7:0]: MISC1 (default: 0x00)
- * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
- */
-#define ASTDP_MISC0_24bpp BIT(5)
-#define ASTDP_MISC1 0
-#define ASTDP_AND_CLEAR_MASK 0x00
-
#endif
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 0378c9bc079b..f1c9f7e1f1fc 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -24,6 +24,8 @@
#ifndef AST_TABLES_H
#define AST_TABLES_H
+#include "ast_drv.h"
+
/* Std. Table Index Definition */
#define TextModeIndex 0
#define EGAModeIndex 1
@@ -31,54 +33,6 @@
#define HiCModeIndex 3
#define TrueCModeIndex 4
-#define Charx8Dot 0x00000001
-#define HalfDCLK 0x00000002
-#define DoubleScanMode 0x00000004
-#define LineCompareOff 0x00000008
-#define HBorder 0x00000020
-#define VBorder 0x00000010
-#define WideScreenMode 0x00000100
-#define NewModeInfo 0x00000200
-#define NHSync 0x00000400
-#define PHSync 0x00000800
-#define NVSync 0x00001000
-#define PVSync 0x00002000
-#define SyncPP (PVSync | PHSync)
-#define SyncPN (PVSync | NHSync)
-#define SyncNP (NVSync | PHSync)
-#define SyncNN (NVSync | NHSync)
-#define AST2500PreCatchCRT 0x00004000
-
-/* DCLK Index */
-#define VCLK25_175 0x00
-#define VCLK28_322 0x01
-#define VCLK31_5 0x02
-#define VCLK36 0x03
-#define VCLK40 0x04
-#define VCLK49_5 0x05
-#define VCLK50 0x06
-#define VCLK56_25 0x07
-#define VCLK65 0x08
-#define VCLK75 0x09
-#define VCLK78_75 0x0A
-#define VCLK94_5 0x0B
-#define VCLK108 0x0C
-#define VCLK135 0x0D
-#define VCLK157_5 0x0E
-#define VCLK162 0x0F
-/* #define VCLK193_25 0x10 */
-#define VCLK154 0x10
-#define VCLK83_5 0x11
-#define VCLK106_5 0x12
-#define VCLK146_25 0x13
-#define VCLK148_5 0x14
-#define VCLK71 0x15
-#define VCLK88_75 0x16
-#define VCLK119 0x17
-#define VCLK85_5 0x18
-#define VCLK97_75 0x19
-#define VCLK118_25 0x1A
-
static const struct ast_vbios_dclk_info dclk_table[] = {
{0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
{0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
@@ -212,141 +166,4 @@ static const struct ast_vbios_stdtable vbios_stdtable[] = {
},
};
-static const struct ast_vbios_enhtable res_640x480[] = {
- { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
- (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
- { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
- (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
- { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
- (SyncNN | Charx8Dot) , 75, 3, 0x2E },
- { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
- (SyncNN | Charx8Dot) , 85, 4, 0x2E },
- { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
- (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
-};
-
-static const struct ast_vbios_enhtable res_800x600[] = {
- {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
- (SyncPP | Charx8Dot), 56, 1, 0x30 },
- {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 2, 0x30 },
- {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
- (SyncPP | Charx8Dot), 72, 3, 0x30 },
- {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 4, 0x30 },
- {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
- (SyncPP | Charx8Dot), 84, 5, 0x30 },
- {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
- (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
-};
-
-
-static const struct ast_vbios_enhtable res_1024x768[] = {
- {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
- (SyncNN | Charx8Dot), 60, 1, 0x31 },
- {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
- (SyncNN | Charx8Dot), 70, 2, 0x31 },
- {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 3, 0x31 },
- {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
- (SyncPP | Charx8Dot), 84, 4, 0x31 },
- {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
- (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
-};
-
-static const struct ast_vbios_enhtable res_1280x1024[] = {
- {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 1, 0x32 },
- {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 2, 0x32 },
- {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
- (SyncPP | Charx8Dot), 85, 3, 0x32 },
- {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
- (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
-};
-
-static const struct ast_vbios_enhtable res_1600x1200[] = {
- {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 1, 0x33 },
- {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
- (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
-};
-
-static const struct ast_vbios_enhtable res_1152x864[] = {
- {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */
- (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B },
- {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */
- (SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B },
-};
-
-/* 16:9 */
-static const struct ast_vbios_enhtable res_1360x768[] = {
- {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
- {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* end */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x39 },
-};
-
-static const struct ast_vbios_enhtable res_1600x900[] = {
- {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x3A },
- {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3A },
- {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x3A },
-};
-
-static const struct ast_vbios_enhtable res_1920x1080[] = {
- {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x38 },
- {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x38 },
-};
-
-
-/* 16:10 */
-static const struct ast_vbios_enhtable res_1280x800[] = {
- {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x35 },
- {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 },
- {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x35 },
-
-};
-
-static const struct ast_vbios_enhtable res_1440x900[] = {
- {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x36 },
- {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 },
- {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x36 },
-};
-
-static const struct ast_vbios_enhtable res_1680x1050[] = {
- {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x37 },
- {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 },
- {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x37 },
-};
-
-static const struct ast_vbios_enhtable res_1920x1200[] = {
- {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x34 },
- {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x34 },
-};
-
#endif
diff --git a/drivers/gpu/drm/ast/ast_vbios.c b/drivers/gpu/drm/ast/ast_vbios.c
new file mode 100644
index 000000000000..0953e6dd3976
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_vbios.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ast_drv.h"
+#include "ast_vbios.h"
+
+/* 4:3 */
+
+static const struct ast_vbios_enhtable res_640x480[] = {
+ { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60 Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2e },
+ { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72 Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2e },
+ { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75 Hz */
+ (SyncNN | Charx8Dot), 75, 3, 0x2e },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85 Hz */
+ (SyncNN | Charx8Dot), 85, 4, 0x2e },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_800x600[] = {
+ { 1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56 Hz */
+ (SyncPP | Charx8Dot), 56, 1, 0x30 },
+ { 1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 2, 0x30 },
+ { 1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72 Hz */
+ (SyncPP | Charx8Dot), 72, 3, 0x30 },
+ { 1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 4, 0x30 },
+ { 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85 Hz */
+ (SyncPP | Charx8Dot), 84, 5, 0x30 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1024x768[] = {
+ { 1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60 Hz */
+ (SyncNN | Charx8Dot), 60, 1, 0x31 },
+ { 1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70 Hz */
+ (SyncNN | Charx8Dot), 70, 2, 0x31 },
+ { 1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 3, 0x31 },
+ { 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85 Hz */
+ (SyncPP | Charx8Dot), 84, 4, 0x31 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1152x864[] = {
+ { 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75 Hz */
+ (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3b },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1280x1024[] = {
+ { 1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x32 },
+ { 1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 2, 0x32 },
+ { 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85 Hz */
+ (SyncPP | Charx8Dot), 85, 3, 0x32 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1600x1200[] = {
+ { 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x33 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/* 16:9 */
+
+static const struct ast_vbios_enhtable res_1360x768[] = {
+ { 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60 Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1600x900[] = {
+ { 1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60 Hz CVT RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x3a },
+ { 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60 Hz CVT */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3a },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1920x1080[] = {
+ { 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60 Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x38 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/* 16:10 */
+
+static const struct ast_vbios_enhtable res_1280x800[] = {
+ { 1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x35 },
+ { 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1440x900[] = {
+ { 1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x36 },
+ { 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1680x1050[] = {
+ { 1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x37 },
+ { 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1920x1200[] = {
+ { 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60 Hz RB*/
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x34 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/*
+ * VBIOS mode tables
+ */
+
+static const struct ast_vbios_enhtable *res_table_wuxga[] = {
+ &res_1920x1200[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table_fullhd[] = {
+ &res_1920x1080[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table_wsxga_p[] = {
+ &res_1280x800[0],
+ &res_1360x768[0],
+ &res_1440x900[0],
+ &res_1600x900[0],
+ &res_1680x1050[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table[] = {
+ &res_640x480[0],
+ &res_800x600[0],
+ &res_1024x768[0],
+ &res_1152x864[0],
+ &res_1280x1024[0],
+ &res_1600x1200[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *
+__ast_vbios_find_mode_table(const struct ast_vbios_enhtable **vmode_tables,
+ unsigned int hdisplay,
+ unsigned int vdisplay)
+{
+ while (*vmode_tables) {
+ if ((*vmode_tables)->hde == hdisplay && (*vmode_tables)->vde == vdisplay)
+ return *vmode_tables;
+ ++vmode_tables;
+ }
+
+ return NULL;
+}
+
+static const struct ast_vbios_enhtable *ast_vbios_find_mode_table(const struct ast_device *ast,
+ unsigned int hdisplay,
+ unsigned int vdisplay)
+{
+ const struct ast_vbios_enhtable *vmode_table = NULL;
+
+ if (ast->support_wuxga)
+ vmode_table = __ast_vbios_find_mode_table(res_table_wuxga, hdisplay, vdisplay);
+ if (!vmode_table && ast->support_fullhd)
+ vmode_table = __ast_vbios_find_mode_table(res_table_fullhd, hdisplay, vdisplay);
+ if (!vmode_table && ast->support_wsxga_p)
+ vmode_table = __ast_vbios_find_mode_table(res_table_wsxga_p, hdisplay, vdisplay);
+ if (!vmode_table)
+ vmode_table = __ast_vbios_find_mode_table(res_table, hdisplay, vdisplay);
+
+ return vmode_table;
+}
+
+const struct ast_vbios_enhtable *ast_vbios_find_mode(const struct ast_device *ast,
+ const struct drm_display_mode *mode)
+{
+ const struct ast_vbios_enhtable *best_vmode = NULL;
+ const struct ast_vbios_enhtable *vmode_table;
+ const struct ast_vbios_enhtable *vmode;
+ u32 refresh_rate;
+
+ vmode_table = ast_vbios_find_mode_table(ast, mode->hdisplay, mode->vdisplay);
+ if (!vmode_table)
+ return NULL;
+
+ refresh_rate = drm_mode_vrefresh(mode);
+
+ for (vmode = vmode_table; ast_vbios_mode_is_valid(vmode); ++vmode) {
+ if (((mode->flags & DRM_MODE_FLAG_NVSYNC) && (vmode->flags & PVSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_PVSYNC) && (vmode->flags & NVSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) && (vmode->flags & PHSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_PHSYNC) && (vmode->flags & NHSync))) {
+ continue;
+ }
+ if (vmode->refresh_rate <= refresh_rate &&
+ (!best_vmode || vmode->refresh_rate > best_vmode->refresh_rate))
+ best_vmode = vmode;
+ }
+
+ return best_vmode;
+}
diff --git a/drivers/gpu/drm/ast/ast_vbios.h b/drivers/gpu/drm/ast/ast_vbios.h
new file mode 100644
index 000000000000..8cf025010594
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_vbios.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_VBIOS_H
+#define AST_VBIOS_H
+
+#include <linux/types.h>
+
+struct ast_device;
+struct drm_display_mode;
+
+#define Charx8Dot 0x00000001
+#define HalfDCLK 0x00000002
+#define DoubleScanMode 0x00000004
+#define LineCompareOff 0x00000008
+#define HBorder 0x00000020
+#define VBorder 0x00000010
+#define WideScreenMode 0x00000100
+#define NewModeInfo 0x00000200
+#define NHSync 0x00000400
+#define PHSync 0x00000800
+#define NVSync 0x00001000
+#define PVSync 0x00002000
+#define SyncPP (PVSync | PHSync)
+#define SyncPN (PVSync | NHSync)
+#define SyncNP (NVSync | PHSync)
+#define SyncNN (NVSync | NHSync)
+#define AST2500PreCatchCRT 0x00004000
+
+/* DCLK Index */
+#define VCLK25_175 0x00
+#define VCLK28_322 0x01
+#define VCLK31_5 0x02
+#define VCLK36 0x03
+#define VCLK40 0x04
+#define VCLK49_5 0x05
+#define VCLK50 0x06
+#define VCLK56_25 0x07
+#define VCLK65 0x08
+#define VCLK75 0x09
+#define VCLK78_75 0x0a
+#define VCLK94_5 0x0b
+#define VCLK108 0x0c
+#define VCLK135 0x0d
+#define VCLK157_5 0x0e
+#define VCLK162 0x0f
+/* #define VCLK193_25 0x10 */
+#define VCLK154 0x10
+#define VCLK83_5 0x11
+#define VCLK106_5 0x12
+#define VCLK146_25 0x13
+#define VCLK148_5 0x14
+#define VCLK71 0x15
+#define VCLK88_75 0x16
+#define VCLK119 0x17
+#define VCLK85_5 0x18
+#define VCLK97_75 0x19
+#define VCLK118_25 0x1a
+
+struct ast_vbios_enhtable {
+ u32 ht;
+ u32 hde;
+ u32 hfp;
+ u32 hsync;
+ u32 vt;
+ u32 vde;
+ u32 vfp;
+ u32 vsync;
+ u32 dclk_index;
+ u32 flags;
+ u32 refresh_rate;
+ u32 refresh_rate_index;
+ u32 mode_id;
+};
+
+#define AST_VBIOS_INVALID_MODE \
+ {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}
+
+static inline bool ast_vbios_mode_is_valid(const struct ast_vbios_enhtable *vmode)
+{
+ return vmode->ht && vmode->vt && vmode->refresh_rate;
+}
+
+const struct ast_vbios_enhtable *ast_vbios_find_mode(const struct ast_device *ast,
+ const struct drm_display_mode *mode);
+
+#endif
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 6b4664d91faa..d20f1646dac2 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -90,6 +90,14 @@ config DRM_FSL_LDB
help
Support for i.MX8MP DPI-to-LVDS on-SoC encoder.
+config DRM_I2C_NXP_TDA998X
+ tristate "NXP Semiconductors TDA998X HDMI encoder"
+ default m if DRM_TILCDC
+ select CEC_CORE if CEC_NOTIFIER
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ help
+ Support for NXP Semiconductors TDA998X HDMI encoders.
+
config DRM_ITE_IT6263
tristate "ITE IT6263 LVDS/HDMI bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 97304b429a53..245e8a27e3fc 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,10 @@ obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_FSL_LDB) += fsl-ldb.o
+
+tda998x-y := tda998x_drv.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
+
obj-$(CONFIG_DRM_ITE_IT6263) += ite-it6263.o
obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 657bc3dd18df..1ff8c815ec79 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -243,9 +243,14 @@ static const struct hdmi_codec_ops adv7511_codec_ops = {
static const struct hdmi_codec_pdata codec_data = {
.ops = &adv7511_codec_ops,
+ .i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
.max_i2s_channels = 2,
.i2s = 1,
+ .no_i2s_capture = 1,
.spdif = 1,
+ .no_spdif_capture = 1,
};
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a13b3d8ab6ac..050dae338ffe 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -847,7 +847,7 @@ static int adv7511_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
adv7511_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct adv7511 *adv = connector_to_adv7511(connector);
@@ -910,14 +910,16 @@ static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
return container_of(bridge, struct adv7511, bridge);
}
-static void adv7511_bridge_enable(struct drm_bridge *bridge)
+static void adv7511_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
adv7511_power_on(adv);
}
-static void adv7511_bridge_disable(struct drm_bridge *bridge)
+static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
@@ -996,14 +998,18 @@ static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
}
static const struct drm_bridge_funcs adv7511_bridge_funcs = {
- .enable = adv7511_bridge_enable,
- .disable = adv7511_bridge_disable,
.mode_set = adv7511_bridge_mode_set,
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.edid_read = adv7511_bridge_edid_read,
.hpd_notify = adv7511_bridge_hpd_notify,
+
+ .atomic_enable = adv7511_bridge_atomic_enable,
+ .atomic_disable = adv7511_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index bfa88409a7ff..071168aa0c3b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1197,11 +1197,9 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
return conn_state->crtc;
}
-static void
-analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1257,11 +1255,9 @@ out_dp_init:
return ret;
}
-static void
-analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1327,11 +1323,9 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static void
-analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *old_crtc, *new_crtc;
struct drm_crtc_state *old_crtc_state = NULL;
@@ -1367,11 +1361,9 @@ out:
analogix_dp_bridge_disable(bridge);
}
-static void
-analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -1553,7 +1545,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
struct platform_device *pdev = to_platform_device(dev);
struct analogix_dp_device *dp;
- struct resource *res;
unsigned int irq_flags;
int ret;
@@ -1605,9 +1596,7 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
return ERR_CAST(dp->clock);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ dp->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dp->reg_base)) {
ret = PTR_ERR(dp->reg_base);
goto err_disable_clk;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 4be34d5c7a3b..0b97b66de577 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2380,7 +2380,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
}
static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *state)
+ struct drm_atomic_state *state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
@@ -2389,7 +2389,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
dev_dbg(dev, "drm atomic enable\n");
- connector = drm_atomic_get_new_connector_for_encoder(state->base.state,
+ connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (!connector)
return;
@@ -2401,7 +2401,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
anx7625_dp_start(ctx);
- conn_state = drm_atomic_get_new_connector_state(state->base.state, connector);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
@@ -2419,7 +2419,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old)
+ struct drm_atomic_state *state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index d081850e3c03..81fad14c2cd5 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1619,7 +1619,7 @@ bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
static
enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
@@ -1979,10 +1979,9 @@ static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
}
static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct cdns_mhdp_bridge_state *mhdp_state;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
@@ -2070,7 +2069,7 @@ out:
}
static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
u32 resp;
@@ -2463,9 +2462,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
if (!mhdp)
return -ENOMEM;
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
- dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
+ dev_err(dev, "couldn't get and enable clk: %ld\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
@@ -2504,14 +2503,12 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->info = of_device_get_match_data(dev);
- clk_prepare_enable(clk);
-
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_resume_and_get failed\n");
pm_runtime_disable(dev);
- goto clk_disable;
+ return ret;
}
if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
@@ -2590,8 +2587,6 @@ plat_fini:
runtime_put:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-clk_disable:
- clk_disable_unprepare(mhdp->clk);
return ret;
}
@@ -2632,8 +2627,6 @@ static void cdns_mhdp_remove(struct platform_device *pdev)
cancel_work_sync(&mhdp->modeset_retry_work);
flush_work(&mhdp->hpd_work);
/* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
-
- clk_disable_unprepare(mhdp->clk);
}
static const struct of_device_id mhdp_ids[] = {
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index d47703559b0d..81f7c701961f 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -341,10 +341,9 @@ static void chipone_configure_pll(struct chipone *icn,
}
static void chipone_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_display_mode *mode = &icn->mode;
const struct drm_bridge_state *bridge_state;
u16 hfp, hbp, hsync;
@@ -445,7 +444,7 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
}
static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
int ret;
@@ -482,7 +481,7 @@ static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
}
static void chipone_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 0fc8a14fd800..26ae1ab5237f 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -122,10 +122,9 @@ static int fsl_ldb_attach(struct drm_bridge *bridge,
}
static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -224,7 +223,7 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
}
static void fsl_ldb_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index 0d1ac3edcab4..a17433a7c755 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -49,16 +49,17 @@ static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
}
static void imx8mp_hdmi_pvi_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = bridge_state->base.state;
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
struct drm_connector_state *conn_state;
+ struct drm_bridge_state *bridge_state;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
u32 bus_flags = 0, val;
+ bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
conn_state = drm_atomic_get_new_connector_state(state, connector);
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -88,7 +89,7 @@ static void imx8mp_hdmi_pvi_bridge_enable(struct drm_bridge *bridge,
}
static void imx8mp_hdmi_pvi_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index dd5823f04c70..524aac751359 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -200,9 +200,8 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
CH_HSYNC_M(chno), CH_PHSYNC(chno));
}
-static void
-imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -247,9 +246,8 @@ imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
ldb_bridge_enable_helper(bridge);
}
-static void
-imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 7bce2305d676..3cb484773ddf 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -203,9 +203,8 @@ imx8qxp_ldb_bridge_mode_set(struct drm_bridge *bridge,
companion->funcs->mode_set(companion, mode, adjusted_mode);
}
-static void
-imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -217,12 +216,11 @@ imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
clk_prepare_enable(imx8qxp_ldb->clk_bypass);
if (is_split && companion)
- companion->funcs->atomic_pre_enable(companion, old_bridge_state);
+ companion->funcs->atomic_pre_enable(companion, state);
}
-static void
-imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -252,12 +250,11 @@ imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
DRM_DEV_ERROR(dev, "failed to power on PHY: %d\n", ret);
if (is_split && companion)
- companion->funcs->atomic_enable(companion, old_bridge_state);
+ companion->funcs->atomic_enable(companion, state);
}
-static void
-imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -283,7 +280,7 @@ imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
clk_disable_unprepare(imx8qxp_ldb->clk_pixel);
if (is_split && companion)
- companion->funcs->atomic_disable(companion, old_bridge_state);
+ companion->funcs->atomic_disable(companion, state);
ret = pm_runtime_put(dev);
if (ret < 0)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1812bd106261..1d9529dc7f2a 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -176,9 +176,8 @@ imx8qxp_pc_bridge_mode_set(struct drm_bridge *bridge,
clk_disable_unprepare(pc->clk_apb);
}
-static void
-imx8qxp_pc_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pc_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
struct imx8qxp_pc *pc = ch->pc;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index 4b0715ed6f38..cd6818db0fd3 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -153,9 +153,8 @@ imx8qxp_pixel_link_bridge_mode_set(struct drm_bridge *bridge,
imx8qxp_pixel_link_set_mst_addr(pl);
}
-static void
-imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -164,9 +163,8 @@ imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
imx8qxp_pixel_link_enable_sync(pl);
}
-static void
-imx8qxp_pixel_link_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pixel_link_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index 65cf3a6c8ec6..49dd4f96d52c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -122,9 +122,8 @@ imx8qxp_pxl2dpi_bridge_mode_set(struct drm_bridge *bridge,
}
}
-static void
-imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
int ret;
@@ -134,8 +133,7 @@ imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_ERROR(p2d->dev, "failed to put runtime PM: %d\n", ret);
if (p2d->companion)
- p2d->companion->funcs->atomic_disable(p2d->companion,
- old_bridge_state);
+ p2d->companion->funcs->atomic_disable(p2d->companion, state);
}
static const u32 imx8qxp_pxl2dpi_bus_output_fmts[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index 306b5e374b9e..21152a1c28f7 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -569,9 +569,8 @@ static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len)
return 0;
}
-static void
-it6263_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void it6263_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct it6263 *it = bridge_to_it6263(bridge);
@@ -581,11 +580,9 @@ it6263_bridge_atomic_disable(struct drm_bridge *bridge,
AFE_DRV_RST | AFE_DRV_PWD);
}
-static void
-it6263_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void it6263_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct it6263 *it = bridge_to_it6263(bridge);
const struct drm_crtc_state *crtc_state;
struct regmap *regmap = it->hdmi_regmap;
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 88ef76a37fe6..8a607558ac89 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -2250,12 +2250,13 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
continue;
}
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 5; i++)
if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
- av[i][1] != av[i][2] || bv[i][0] != av[i][3])
+ bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
break;
- DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
+ if (i == 5) {
+ DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d", retry);
return true;
}
}
@@ -3182,11 +3183,10 @@ it6505_bridge_mode_valid(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
- struct drm_atomic_state *state = old_state->base.state;
struct hdmi_avi_infoframe frame;
struct drm_crtc_state *crtc_state;
struct drm_connector_state *conn_state;
@@ -3238,7 +3238,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
@@ -3253,7 +3253,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
@@ -3264,7 +3264,7 @@ static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 23edcde6b9a7..b9f90f32145d 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -721,10 +721,9 @@ static u32 *it66121_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
}
static void it66121_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
ctx->connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
@@ -732,7 +731,7 @@ static void it66121_bridge_enable(struct drm_bridge *bridge,
}
static void it66121_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 999ddebb832d..0fc5ea18fe6a 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -455,10 +455,9 @@ static int lt9211_configure_tx(struct lt9211 *ctx, bool jeida,
}
static void lt9211_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -553,7 +552,7 @@ static void lt9211_atomic_enable(struct drm_bridge *bridge,
}
static void lt9211_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
int ret;
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index e650cd83fc8d..026803034231 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -640,12 +640,10 @@ lt9611_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
}
/* bridge funcs */
-static void
-lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -689,9 +687,8 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
regmap_write(lt9611->regmap, 0x8130, 0xea);
}
-static void
-lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
int ret;
@@ -767,7 +764,7 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
}
static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
static const struct reg_sequence reg_cfg[] = {
@@ -786,9 +783,8 @@ static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
lt9611->sleep = false;
}
-static void
-lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index a3dcee62e7a5..a47aabf134fd 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -115,16 +115,9 @@ static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
return num_modes;
}
-static enum drm_mode_status ge_b850v3_lvds_mode_valid(
- struct drm_connector *connector, struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static const struct
drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = {
.get_modes = ge_b850v3_lvds_get_modes,
- .mode_valid = ge_b850v3_lvds_mode_valid,
};
static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index b8313dad6072..53dd140a1b8d 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -162,8 +162,7 @@ static int mchp_lvds_probe(struct platform_device *pdev)
lvds->dev = dev;
- lvds->regs = devm_ioremap_resource(lvds->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 1e5b2a37cb8c..d04c62a0cb9f 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -736,9 +736,8 @@ static int nwl_dsi_disable(struct nwl_dsi *dsi)
return 0;
}
-static void
-nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -898,9 +897,8 @@ runtime_put:
pm_runtime_put_sync(dev);
}
-static void
-nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -1184,6 +1182,7 @@ static int nwl_dsi_probe(struct platform_device *pdev)
dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.timings = &nwl_dsi_timings;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
dev_set_drvdata(dev, dsi);
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 44e36ae66db4..27261b2ac9c8 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -15,7 +15,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 6e88339dec0f..258c85c83a28 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -109,10 +109,9 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
}
static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -129,10 +128,9 @@ static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -149,10 +147,9 @@ static void panel_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -169,10 +166,9 @@ static void panel_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -322,8 +318,10 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
if (!bridge)
return;
- if (bridge->funcs != &panel_bridge_bridge_funcs)
+ if (!drm_bridge_is_panel(bridge)) {
+ drm_warn(bridge->dev, "%s: called on non-panel bridge!\n", __func__);
return;
+ }
panel_bridge = drm_bridge_to_panel_bridge(bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index ae3ab9262ef1..13ada42a5514 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -19,7 +19,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 14d4dcf239da..a42138b33258 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -20,7 +20,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#define PAGE0_AUXCH_CFG3 0x76
@@ -438,7 +437,7 @@ static const struct dev_pm_ops ps8640_pm_ops = {
};
static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
@@ -473,7 +472,7 @@ static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
}
static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index f8b4fb835765..54de6ed2fae8 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -1457,7 +1457,7 @@ static int samsung_dsim_init(struct samsung_dsim *dsi)
}
static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
int ret;
@@ -1485,7 +1485,7 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
@@ -1496,7 +1496,7 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
@@ -1508,7 +1508,7 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index bf2d1632b020..2100a687096e 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -325,7 +325,7 @@ static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs =
};
static void sii902x_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -339,7 +339,7 @@ static void sii902x_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void sii902x_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index b281cabfe992..1d39015f1533 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -362,10 +362,9 @@ static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
}
static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
- struct drm_atomic_state *state = old_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
unsigned int op_mode;
@@ -396,7 +395,7 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 996733ed2c00..0890add5f707 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2621,6 +2621,7 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
* - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
* - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
* - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
* - MEDIA_BUS_FMT_YUV16_1X48,
* - MEDIA_BUS_FMT_RGB161616_1X48,
* - MEDIA_BUS_FMT_UYVY12_1X24,
@@ -2631,7 +2632,6 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
* - MEDIA_BUS_FMT_RGB101010_1X30,
* - MEDIA_BUS_FMT_UYVY8_1X16,
* - MEDIA_BUS_FMT_YUV8_1X24,
- * - MEDIA_BUS_FMT_RGB888_1X24,
*/
/* Can return a maximum of 11 possible output formats for a mode/connector */
@@ -2669,7 +2669,7 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
}
/*
- * If the current mode enforces 4:2:0, force the output but format
+ * If the current mode enforces 4:2:0, force the output bus format
* to 4:2:0 and do not add the YUV422/444/RGB formats
*/
if (conn->ycbcr_420_allowed &&
@@ -2945,7 +2945,7 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi *hdmi = bridge->driver_private;
@@ -2959,10 +2959,9 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi *hdmi = bridge->driver_private;
- struct drm_atomic_state *state = old_state->base.state;
struct drm_connector *connector;
connector = drm_atomic_get_new_connector_for_encoder(state,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 0fb02e4e7f4e..2b6e70a49f43 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -934,7 +934,7 @@ static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
@@ -1022,7 +1022,7 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -1043,7 +1043,7 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index d7569bf2d9c3..5fd7a459efdd 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -745,7 +745,7 @@ static int dw_mipi_dsi2_bridge_atomic_check(struct drm_bridge *bridge,
}
static void dw_mipi_dsi2_bridge_post_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
@@ -821,7 +821,7 @@ static void dw_mipi_dsi2_mode_set(struct dw_mipi_dsi2 *dsi2,
}
static void dw_mipi_dsi2_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
@@ -840,7 +840,7 @@ static void dw_mipi_dsi2_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_mipi_dsi2_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 46198af9eebb..49c76027f831 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -20,10 +20,10 @@
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -149,7 +149,8 @@ static int tc358762_init(struct tc358762 *ctx)
return tc358762_clear_error(ctx);
}
-static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -171,7 +172,8 @@ static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_s
dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
}
-static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -188,7 +190,8 @@ static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_sta
ctx->pre_enabled = true;
}
-static void tc358762_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 4637bf6ea7a3..39e2d3a7a27d 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1548,9 +1548,8 @@ static int tc_edp_stream_disable(struct tc_data *tc)
return 0;
}
-static void
-tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1564,9 +1563,8 @@ tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
}
}
-static void
-tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
@@ -1576,9 +1574,8 @@ tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
dev_err(tc->dev, "main link stream stop error: %d\n", ret);
}
-static void
-tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
@@ -1603,9 +1600,8 @@ tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
}
}
-static void
-tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 0b4efaca6d68..c89757bec4e6 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -26,7 +26,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#define FLD_VAL(val, start, end) FIELD_PREP(GENMASK(start, end), val)
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index 82d4a4e206a5..ebc758c72891 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -21,10 +21,11 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/i2c/tda998x.h>
#include <media/cec-notifier.h>
+#include <dt-bindings/display/tda998x.h>
+
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
enum {
@@ -1717,10 +1718,10 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
u8 ena_ap = be32_to_cpup(&port_data[2*i+1]);
switch (afmt) {
- case AFMT_I2S:
+ case TDA998x_I2S:
route = AUDIO_ROUTE_I2S;
break;
- case AFMT_SPDIF:
+ case TDA998x_SPDIF:
route = AUDIO_ROUTE_SPDIF;
break;
default:
@@ -1746,44 +1747,6 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
return 0;
}
-static int tda998x_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
-{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-
- if (p->audio_params.format != AFMT_UNUSED) {
- unsigned int ratio, route;
- bool spdif = p->audio_params.format == AFMT_SPDIF;
-
- route = AUDIO_ROUTE_I2S + spdif;
-
- priv->audio.route = &tda998x_audio_route[route];
- priv->audio.cea = p->audio_params.cea;
- priv->audio.sample_rate = p->audio_params.sample_rate;
- memcpy(priv->audio.status, p->audio_params.status,
- min(sizeof(priv->audio.status),
- sizeof(p->audio_params.status)));
- priv->audio.ena_ap = p->audio_params.config;
- priv->audio.i2s_format = I2S_FORMAT_PHILIPS;
-
- ratio = spdif ? 64 : p->audio_params.sample_width * 2;
- return tda998x_derive_cts_n(priv, &priv->audio, ratio);
- }
-
- return 0;
-}
-
static void tda998x_destroy(struct device *dev)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -1982,10 +1945,6 @@ static int tda998x_create(struct device *dev)
if (priv->audio_port_enable[AUDIO_ROUTE_I2S] ||
priv->audio_port_enable[AUDIO_ROUTE_SPDIF])
tda998x_audio_codec_init(priv, &client->dev);
- } else if (dev->platform_data) {
- ret = tda998x_set_config(priv, dev->platform_data);
- if (ret)
- goto fail;
}
priv->bridge.funcs = &tda998x_bridge_funcs;
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index eaec70fa42b6..85f2a0e74a1c 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -105,7 +105,7 @@ static const struct regmap_config dlpc_regmap_config = {
};
static void dlpc_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
struct device *dev = dlpc->dev;
@@ -170,7 +170,7 @@ static void dlpc_atomic_enable(struct drm_bridge *bridge,
}
static void dlpc_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
int ret;
@@ -193,7 +193,7 @@ static void dlpc_atomic_pre_enable(struct drm_bridge *bridge,
}
static void dlpc_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 336380114eea..54ad462d17ef 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -35,12 +35,14 @@
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -159,6 +161,9 @@ struct sn65dsi83 {
bool lvds_dual_link_even_odd_swap;
int lvds_vod_swing_conf[2];
int lvds_term_conf[2];
+ int irq;
+ struct delayed_work monitor_work;
+ struct work_struct reset_work;
};
static const struct regmap_range sn65dsi83_readable_ranges[] = {
@@ -363,11 +368,110 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
return dsi_div - 1;
}
+static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
+{
+ struct drm_device *dev = sn65dsi83->bridge.dev;
+ struct drm_modeset_acquire_ctx ctx;
+ int err;
+
+ /*
+ * Reset active outputs of the related CRTC.
+ *
+ * This way, drm core will reconfigure each components in the CRTC
+ * outputs path. In our case, this will force the previous component to
+ * go back in LP11 mode and so allow the reconfiguration of SN65DSI83
+ * bridge.
+ *
+ * Keep the lock during the whole operation to be atomic.
+ */
+
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
+
+ if (!sn65dsi83->bridge.encoder->crtc) {
+ /*
+ * No CRTC attached -> No CRTC active outputs to reset
+ * This can happen when the SN65DSI83 is reset. Simply do
+ * nothing without returning any errors.
+ */
+ err = 0;
+ goto end;
+ }
+
+ dev_warn(sn65dsi83->dev, "reset the pipe\n");
+
+ err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx);
+
+end:
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
+
+ return err;
+}
+
+static void sn65dsi83_reset_work(struct work_struct *ws)
+{
+ struct sn65dsi83 *ctx = container_of(ws, struct sn65dsi83, reset_work);
+ int ret;
+
+ /* Reset the pipe */
+ ret = sn65dsi83_reset_pipe(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "reset pipe failed %pe\n", ERR_PTR(ret));
+ return;
+ }
+ if (ctx->irq)
+ enable_irq(ctx->irq);
+}
+
+static void sn65dsi83_handle_errors(struct sn65dsi83 *ctx)
+{
+ unsigned int irq_stat;
+ int ret;
+
+ /*
+ * Schedule a reset in case of:
+ * - the bridge doesn't answer
+ * - the bridge signals an error
+ */
+
+ ret = regmap_read(ctx->regmap, REG_IRQ_STAT, &irq_stat);
+ if (ret || irq_stat) {
+ /*
+ * IRQ acknowledged is not always possible (the bridge can be in
+ * a state where it doesn't answer anymore). To prevent an
+ * interrupt storm, disable interrupt. The interrupt will be
+ * after the reset.
+ */
+ if (ctx->irq)
+ disable_irq_nosync(ctx->irq);
+
+ schedule_work(&ctx->reset_work);
+ }
+}
+
+static void sn65dsi83_monitor_work(struct work_struct *work)
+{
+ struct sn65dsi83 *ctx = container_of(to_delayed_work(work),
+ struct sn65dsi83, monitor_work);
+
+ sn65dsi83_handle_errors(ctx);
+
+ schedule_delayed_work(&ctx->monitor_work, msecs_to_jiffies(1000));
+}
+
+static void sn65dsi83_monitor_start(struct sn65dsi83 *ctx)
+{
+ schedule_delayed_work(&ctx->monitor_work, msecs_to_jiffies(1000));
+}
+
+static void sn65dsi83_monitor_stop(struct sn65dsi83 *ctx)
+{
+ cancel_delayed_work_sync(&ctx->monitor_work);
+}
+
static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -535,7 +639,7 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
}
static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
unsigned int pval;
@@ -549,14 +653,32 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
if (pval)
dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval);
+
+ if (ctx->irq) {
+ /* Enable irq to detect errors */
+ regmap_write(ctx->regmap, REG_IRQ_GLOBAL, REG_IRQ_GLOBAL_IRQ_EN);
+ regmap_write(ctx->regmap, REG_IRQ_EN, 0xff);
+ } else {
+ /* Use the polling task */
+ sn65dsi83_monitor_start(ctx);
+ }
}
static void sn65dsi83_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
int ret;
+ if (ctx->irq) {
+ /* Disable irq */
+ regmap_write(ctx->regmap, REG_IRQ_EN, 0x0);
+ regmap_write(ctx->regmap, REG_IRQ_GLOBAL, 0x0);
+ } else {
+ /* Stop the polling task */
+ sn65dsi83_monitor_stop(ctx);
+ }
+
/* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
gpiod_set_value_cansleep(ctx->enable_gpio, 0);
usleep_range(10000, 11000);
@@ -806,6 +928,14 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
return 0;
}
+static irqreturn_t sn65dsi83_irq(int irq, void *data)
+{
+ struct sn65dsi83 *ctx = data;
+
+ sn65dsi83_handle_errors(ctx);
+ return IRQ_HANDLED;
+}
+
static int sn65dsi83_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -819,6 +949,8 @@ static int sn65dsi83_probe(struct i2c_client *client)
return -ENOMEM;
ctx->dev = dev;
+ INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
+ INIT_DELAYED_WORK(&ctx->monitor_work, sn65dsi83_monitor_work);
if (dev->of_node) {
model = (enum sn65dsi83_model)(uintptr_t)
@@ -843,12 +975,21 @@ static int sn65dsi83_probe(struct i2c_client *client)
if (IS_ERR(ctx->regmap))
return dev_err_probe(dev, PTR_ERR(ctx->regmap), "failed to get regmap\n");
+ if (client->irq) {
+ ctx->irq = client->irq;
+ ret = devm_request_threaded_irq(ctx->dev, ctx->irq, NULL, sn65dsi83_irq,
+ IRQF_ONESHOT, dev_name(ctx->dev), ctx);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq\n");
+ }
+
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
ctx->bridge.funcs = &sn65dsi83_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
+ ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
drm_bridge_add(&ctx->bridge);
ret = sn65dsi83_host_attach(ctx);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index e4d9006b59f1..ae34585e05b3 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -32,7 +32,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -480,6 +479,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
const char *name)
{
struct device *dev = pdata->dev;
+ const struct i2c_client *client = to_i2c_client(dev);
struct auxiliary_device *aux;
int ret;
@@ -488,6 +488,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
return -ENOMEM;
aux->name = name;
+ aux->id = (client->adapter->nr << 10) | client->addr;
aux->dev.parent = dev;
aux->dev.release = ti_sn65dsi86_aux_device_release;
device_set_of_node_from_dev(&aux->dev, dev);
@@ -812,7 +813,7 @@ ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
}
static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1072,7 +1073,7 @@ exit:
}
static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
struct drm_connector *connector;
@@ -1084,7 +1085,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
int max_dp_lanes;
unsigned int bpp;
- connector = drm_atomic_get_new_connector_for_encoder(old_bridge_state->base.state,
+ connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (!connector) {
dev_err_ratelimited(pdata->dev, "Could not get the connector\n");
@@ -1163,7 +1164,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1177,7 +1178,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index 3472ed5924e8..22316382451f 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -18,7 +18,8 @@ struct tdp158 {
struct device *dev;
};
-static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *prev)
+static void tdp158_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
int err;
struct tdp158 *tdp158 = bridge->driver_private;
@@ -34,7 +35,8 @@ static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *pr
gpiod_set_value_cansleep(tdp158->enable, 1);
}
-static void tdp158_disable(struct drm_bridge *bridge, struct drm_bridge_state *prev)
+static void tdp158_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tdp158 *tdp158 = bridge->driver_private;
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 139b81db6312..19fe01257ab9 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -132,7 +132,7 @@ fi
# Pass needed files to the test stage
mkdir -p install
cp -rfv .gitlab-ci/* install/.
-cp -rfv ci/* install/.
+cp -rfv bin/ci/* install/.
cp -rfv install/common install/ci-common
cp -rfv drivers/gpu/drm/ci/* install/.
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 9c198239033d..274f118533a7 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -1,8 +1,7 @@
.build:
extends:
- - .build-rules
- .container+build-rules
- stage: build
+ stage: build-only
artifacts:
paths:
- artifacts
@@ -110,3 +109,104 @@ build-nodebugfs:arm64:
build:x86_64:
extends: .build:x86_64
+
+# Disable build jobs that we won't use
+alpine-build-testing:
+ rules:
+ - when: never
+
+debian-android:
+ rules:
+ - when: never
+
+debian-arm32:
+ rules:
+ - when: never
+
+debian-arm32-asan:
+ rules:
+ - when: never
+
+debian-arm64:
+ rules:
+ - when: never
+
+debian-arm64-asan:
+ rules:
+ - when: never
+
+debian-arm64-build-test:
+ rules:
+ - when: never
+
+debian-arm64-release:
+ rules:
+ - when: never
+
+debian-build-testing:
+ rules:
+ - when: never
+
+debian-clang:
+ rules:
+ - when: never
+
+debian-clang-release:
+ rules:
+ - when: never
+
+debian-no-libdrm:
+ rules:
+ - when: never
+
+debian-ppc64el:
+ rules:
+ - when: never
+
+debian-release:
+ rules:
+ - when: never
+
+debian-s390x:
+ rules:
+ - when: never
+
+debian-testing:
+ rules:
+ - when: never
+
+debian-testing-asan:
+ rules:
+ - when: never
+
+debian-testing-msan:
+ rules:
+ - when: never
+
+debian-vulkan:
+ rules:
+ - when: never
+
+debian-x86_32:
+ rules:
+ - when: never
+
+fedora-release:
+ rules:
+ - when: never
+
+rustfmt:
+ rules:
+ - when: never
+
+shader-db:
+ rules:
+ - when: never
+
+windows-msvc:
+ rules:
+ - when: never
+
+yaml-toml-shell-py-test:
+ rules:
+ - when: never
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 2a94f54ce4cf..07dc13ff865d 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -24,7 +24,7 @@ alpine/x86_64_build:
rules:
- when: never
-debian/x86_64_test-vk:
+debian/arm64_test-gl:
rules:
- when: never
@@ -32,7 +32,15 @@ debian/arm64_test-vk:
rules:
- when: never
-debian/arm64_test-gl:
+debian/ppc64el_build:
+ rules:
+ - when: never
+
+debian/s390x_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-vk:
rules:
- when: never
@@ -56,14 +64,6 @@ windows_test_msvc:
rules:
- when: never
-.debian/x86_64_build-mingw:
- rules:
- - when: never
-
-rustfmt:
- rules:
- - when: never
-
windows_msvc:
rules:
- - when: never \ No newline at end of file
+ - when: never
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index 90bde9f00cc3..f04aabe8327c 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,11 +1,11 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha c6a9a9c3bce90923f7700219354e0b6e5a3c9ba6
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: a73311079a5d8ac99eb25336a8369a2c3c6b519b
+ IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
@@ -20,6 +20,9 @@ variables:
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
+ S3_JWT_FILE_SCRIPT: |-
+ echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
+ unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
S3_KERNEL_BUCKET: mesa-rootfs
@@ -31,17 +34,14 @@ variables:
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
- # default kernel for rootfs before injecting the current kernel tree
- KERNEL_REPO: "gfx-ci/linux"
- KERNEL_TAG: "v6.6.21-mesa-f8ea"
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
- PKG_REPO_REV: "3cc12a2a"
LAVA_TAGS: subset-1-gfx
LAVA_JOB_PRIORITY: 30
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
# Python scripts for structured logger
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
+
default:
id_tokens:
S3_JWT:
@@ -50,16 +50,13 @@ default:
- export SCRIPTS_DIR=$(mktemp -d)
- curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${DRM_CI_PROJECT_URL}/-/raw/${DRM_CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh"
- source ${SCRIPTS_DIR}/setup-test-env.sh
- - echo -e "\e[0Ksection_start:$(date +%s):unset_env_vars_section[collapsed=true]\r\e[0KUnsetting vulnerable environment variables"
- - echo -n "${S3_JWT}" > "${S3_JWT_FILE}"
- - unset CI_JOB_JWT S3_JWT
- - echo -e "\e[0Ksection_end:$(date +%s):unset_env_vars_section\r\e[0K"
+ - eval "$S3_JWT_FILE_SCRIPT"
- echo -e "\e[0Ksection_start:$(date +%s):drm_ci_download_section[collapsed=true]\r\e[0KDownloading mesa from $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz"
- cd $CI_PROJECT_DIR
- curl --output - $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz | tar -xz
- mv mesa-$DRM_CI_COMMIT_SHA/.gitlab-ci* .
- - mv mesa-$DRM_CI_COMMIT_SHA/bin/ci .
+ - mv mesa-$DRM_CI_COMMIT_SHA/bin .
- rm -rf mesa-$DRM_CI_COMMIT_SHA/
- echo -e "\e[0Ksection_end:$(date +%s):drm_ci_download_section\r\e[0K"
@@ -71,6 +68,7 @@ default:
export S3_JWT="$(<${S3_JWT_FILE})" &&
rm "${S3_JWT_FILE}"
+
include:
- project: 'freedesktop/ci-templates'
ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
@@ -85,6 +83,7 @@ include:
- project: *drm-ci-project-path
ref: *drm-ci-commit-sha
file:
+ - '/.gitlab-ci/build/gitlab-ci.yml'
- '/.gitlab-ci/container/gitlab-ci.yml'
- '/.gitlab-ci/farm-rules.yml'
- '/.gitlab-ci/lava/lava-gitlab-ci.yml'
@@ -115,9 +114,10 @@ include:
stages:
- sanity
- container
- - code-validation
- git-archive
- - build
+ - build-for-tests
+ - build-only
+ - code-validation
- amdgpu
- i915
- mediatek
@@ -128,33 +128,27 @@ stages:
- rockchip
- software-driver
+
# YAML anchors for rule conditions
# --------------------------------
.rules-anchors:
rules:
- # Pipeline for forked project branch
- - if: &is-forked-branch '$CI_COMMIT_BRANCH && $CI_PROJECT_NAMESPACE != "mesa"'
- when: manual
- # Forked project branch / pre-merge pipeline not for Marge bot
- - if: &is-forked-branch-or-pre-merge-not-for-marge '$CI_PROJECT_NAMESPACE != "mesa" || ($GITLAB_USER_LOGIN != "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event")'
- when: manual
- # Pipeline runs for the main branch of the upstream Mesa project
- - if: &is-mesa-main '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_COMMIT_BRANCH'
- when: always
- # Post-merge pipeline
- - if: &is-post-merge '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
- when: on_success
- # Post-merge pipeline, not for Marge Bot
- - if: &is-post-merge-not-for-marge '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
- when: on_success
+ # do not duplicate pipelines on merge pipelines
+ - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
+ when: never
+ # merge pipeline
+ - if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
+ # post-merge pipeline
+ - if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
# Pre-merge pipeline
- - if: &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"'
- when: on_success
- # Pre-merge pipeline for Marge Bot
- - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
- when: on_success
+ - if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- - &is-fork-push '$CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"'
+ - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
+ # nightly pipeline
+ - if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
+ # pipeline for direct pushes that bypassed the CI
+ - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
+
# Rules applied to every job in the pipeline
.common-rules:
@@ -162,42 +156,51 @@ stages:
- if: *is-fork-push
when: manual
+
.never-post-merge-rules:
rules:
- if: *is-post-merge
when: never
-# Rule to filter for only scheduled pipelines.
-.scheduled_pipeline-rules:
- rules:
- - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
- when: on_success
-
-# Generic rule to not run the job during scheduled pipelines. Jobs that aren't
-# something like a nightly run should include this rule.
-.no_scheduled_pipelines-rules:
- rules:
- - if: *is-scheduled-pipeline
- when: never
-
-# When to automatically run the CI for build jobs
-.build-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - !reference [.never-post-merge-rules, rules]
- # Run automatically once all dependency jobs have passed
- - when: on_success
-# When to automatically run the CI for container jobs
.container+build-rules:
rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
- !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines, if any files affecting the pipeline
+ # were changed
+ - if: *is-merge-attempt
+ changes: &all_paths
+ - drivers/gpu/drm/ci/**/*
+ when: on_success
+ # Same as above, but for pre-merge pipelines
+ - if: *is-pre-merge
+ changes:
+ *all_paths
+ when: manual
+ # Skip everything for pre-merge and merge pipelines which don't change
+ # anything in the build
+ - if: *is-merge-attempt
+ when: never
+ - if: *is-pre-merge
+ when: never
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: on_success
+ # Build everything in scheduled pipelines
+ - if: *is-scheduled-pipeline
+ when: on_success
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
- when: manual
+
.ci-deqp-artifacts:
artifacts:
- name: "mesa_${CI_JOB_NAME}"
+ name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
when: always
untracked: false
paths:
@@ -208,31 +211,7 @@ stages:
- _build/meson-logs/strace
-.container-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - !reference [.never-post-merge-rules, rules]
- # Run pipeline by default in the main project if any CI pipeline
- # configuration files were changed, to ensure docker images are up to date
- - if: *is-post-merge
- changes:
- - drivers/gpu/drm/ci/**/*
- when: on_success
- # Run pipeline by default if it was triggered by Marge Bot, is for a
- # merge request, and any files affecting the pipeline were changed
- - if: *is-pre-merge-for-marge
- when: on_success
- # Run pipeline by default in the main project if it was not triggered by
- # Marge Bot, and any files affecting the pipeline were changed
- - if: *is-post-merge-not-for-marge
- when: on_success
- # Allow triggering jobs manually in other cases
- - when: manual
-
-
-
# Git archive
-
make git archive:
extends:
- .fdo.ci-fairy
@@ -264,30 +243,64 @@ sanity:
rules:
- if: *is-pre-merge
when: on_success
- # Other cases default to never
+ - when: never
variables:
GIT_STRATEGY: none
script:
# ci-fairy check-commits --junit-xml=check-commits.xml
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ - |
+ set -eu
+ image_tags=(
+ ALPINE_X86_64_LAVA_SSH_TAG
+ CONTAINER_TAG
+ DEBIAN_BASE_TAG
+ DEBIAN_BUILD_TAG
+ DEBIAN_PYUTILS_TAG
+ DEBIAN_TEST_GL_TAG
+ KERNEL_ROOTFS_TAG
+ KERNEL_TAG
+ PKG_REPO_REV
+ )
+ for var in "${image_tags[@]}"
+ do
+ if [ "$(echo -n "${!var}" | wc -c)" -gt 20 ]
+ then
+ echo "$var is too long; please make sure it is at most 20 chars."
+ exit 1
+ fi
+ done
artifacts:
when: on_failure
reports:
junit: check-*.xml
+ tags:
+ - placeholder-job
-# Rules for tests that should not block merging, but should be available to
-# optionally run with the "play" button in the UI in pre-merge non-marge
-# pipelines. This should appear in "extends:" after any includes of
-# test-source-dep.yml rules, so that these rules replace those.
-.test-manual-mr:
+
+mr-label-maker-test:
+ extends:
+ - .fdo.ci-fairy
+ stage: sanity
rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - if: *is-forked-branch-or-pre-merge-not-for-marge
- when: manual
+ - !reference [.mr-label-maker-rules, rules]
variables:
- JOB_TIMEOUT: 80
+ GIT_STRATEGY: fetch
+ timeout: 10m
+ script:
+ - set -eu
+ - python3 -m venv .venv
+ - source .venv/bin/activate
+ - pip install git+https://gitlab.freedesktop.org/freedesktop/mr-label-maker
+ - mr-label-maker --dry-run --mr $CI_MERGE_REQUEST_IID
# Jobs that need to pass before spending hardware resources on further testing
.required-for-hardware-jobs:
- needs: []
+ needs:
+ - job: clang-format
+ optional: true
+ - job: rustfmt
+ optional: true
+ - job: toml-lint
+ optional: true
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index f38836ec837c..68b042e43b7f 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -47,7 +47,7 @@ else
ARCH="x86_64"
fi
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s ${FDO_HTTP_CACHE_URI:-}$PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
+curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s $PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
TESTLIST="/igt/libexec/igt-gpu-tools/ci-testlist.txt"
@@ -69,7 +69,7 @@ igt-runner \
run \
--igt-folder /igt/libexec/igt-gpu-tools \
--caselist $TESTLIST \
- --output /results \
+ --output $RESULTS_DIR \
-vvvv \
$IGT_SKIPS \
$IGT_FLAKES \
@@ -80,13 +80,10 @@ set -e
deqp-runner junit \
--testsuite IGT \
- --results /results/failures.csv \
- --output /results/junit.xml \
+ --results $RESULTS_DIR/failures.csv \
+ --output $RESULTS_DIR/junit.xml \
--limit 50 \
- --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
-
-# Store the results also in the simpler format used by the runner in ChromeOS CI
-#sed -r 's/(dmesg-warn|pass)/success/g' /results/results.txt > /results/results_simple.txt
+ --template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
cd $oldpath
exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 8d8b9e71852e..20049f3626b2 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,5 +1,5 @@
variables:
- CONTAINER_TAG: "2024-09-09-uprevs"
+ CONTAINER_TAG: "20250204-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
@@ -7,9 +7,16 @@ variables:
DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
+ # default kernel for rootfs before injecting the current kernel tree
+ KERNEL_TAG: "v6.13-rc4-mesa-5e77"
+ KERNEL_REPO: "gfx-ci/linux"
+ PKG_REPO_REV: "bca9635d"
DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
- ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" \ No newline at end of file
+ DEBIAN_PYUTILS_IMAGE: "debian/x86_64_pyutils"
+ DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
+
+ ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 6add15083c78..6e5ac51e8c0a 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -1,58 +1,102 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: MIT
+# shellcheck disable=SC2086 # we want word splitting
+# shellcheck disable=SC1091 # paths only become valid at runtime
-set -e
-set -x
+# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
+_check_artifact_path() {
+ _url="https://${1}/${2}"
+ if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "${_url}"; then
+ echo -n "${_url}"
+ fi
+}
-# Try to use the kernel and rootfs built in mainline first, so we're more
-# likely to hit cache
-if curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
- BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
-else
- BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
-fi
+get_path_to_artifact() {
+ _mainline_artifact="$(_check_artifact_path ${BASE_SYSTEM_MAINLINE_HOST_PATH} ${1})"
+ if [ -n "${_mainline_artifact}" ]; then
+ echo -n "${_mainline_artifact}"
+ return
+ fi
+ _fork_artifact="$(_check_artifact_path ${BASE_SYSTEM_FORK_HOST_PATH} ${1})"
+ if [ -n "${_fork_artifact}" ]; then
+ echo -n "${_fork_artifact}"
+ return
+ fi
+ set +x
+ error "Sorry, I couldn't find a viable built path for ${1} in either mainline or a fork." >&2
+ echo "" >&2
+ echo "If you're working on CI, this probably means that you're missing a dependency:" >&2
+ echo "this job ran ahead of the job which was supposed to upload that artifact." >&2
+ echo "" >&2
+ echo "If you aren't working on CI, please ping @mesa/ci-helpers to see if we can help." >&2
+ echo "" >&2
+ echo "This job is going to fail, because I can't find the resources I need. Sorry." >&2
+ set -x
+ exit 1
+}
+
+. "${SCRIPTS_DIR}/setup-test-env.sh"
+
+section_start prepare_rootfs "Preparing root filesystem"
+
+set -ex
+
+section_switch rootfs "Assembling root filesystem"
+ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
+[ $? != 1 ] || exit 1
rm -rf results
mkdir -p results/job-rootfs-overlay/
-cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
+artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
-cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
-# Prepare env vars for upload.
-section_start variables "Variables passed through:"
-KERNEL_IMAGE_BASE="https://${BASE_SYSTEM_HOST_PATH}" \
- artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
-section_end variables
-
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
+# Prepare env vars for upload.
+section_switch variables "Environment variables passed through to device:"
+cat results/job-rootfs-overlay/set-job-env-vars.sh
+
+section_switch lava_submit "Submitting job for scheduling"
+
touch results/lava.log
tail -f results/lava.log &
-
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
- submit \
+ --farm "${FARM}" \
+ --device-type "${DEVICE_TYPE}" \
+ --boot-method "${BOOT_METHOD}" \
+ --job-timeout-min $((CI_JOB_TIMEOUT/60 - 5)) \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
- --rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
+ --rootfs-url "${ROOTFS_URL}" \
--kernel-url-prefix "https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}" \
- --build-url "${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/kernel-files.tar.zst" \
- --job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
- --job-timeout-min ${JOB_TIMEOUT:-80} \
+ --kernel-external "${EXTERNAL_KERNEL_TAG}" \
--first-stage-init artifacts/ci-common/init-stage1.sh \
- --ci-project-dir "${CI_PROJECT_DIR}" \
- --device-type "${DEVICE_TYPE}" \
- --farm "${FARM}" \
--dtb-filename "${DTB}" \
--jwt-file "${S3_JWT_FILE}" \
--kernel-image-name "${KERNEL_IMAGE_NAME}" \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
- --boot-method "${BOOT_METHOD}" \
--visibility-group "${VISIBILITY_GROUP}" \
--lava-tags "${LAVA_TAGS}" \
--mesa-job-name "$CI_JOB_NAME" \
--structured-log-file "results/lava_job_detail.json" \
--ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \
+ --project-name "${CI_PROJECT_NAME}" \
+ --starting-section "${CURRENT_SECTION}" \
+ --job-submitted-at "${CI_JOB_STARTED_AT}" \
+ - append-overlay \
+ --name=kernel-build \
+ --url="${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/kernel-files.tar.zst" \
+ --compression=zstd \
+ --path="${CI_PROJECT_DIR}" \
+ --format=tar \
+ - append-overlay \
+ --name=job-overlay \
+ --url="https://${JOB_ROOTFS_OVERLAY_PATH}" \
+ --compression=gz \
+ --path="/" \
+ --format=tar \
+ - submit \
>> results/lava.log
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index f0ef60c8f56d..6a1e059858e5 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -1,21 +1,16 @@
-.test-rules:
- rules:
- - if: '$FD_FARM == "offline" && $RUNNER_TAG =~ /^google-freedreno-/'
- when: never
- - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
- when: never
- - !reference [.no_scheduled_pipelines-rules, rules]
- - when: on_success
-
.lava-test:
extends:
- - .test-rules
+ - .container+build-rules
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - !reference [.collabora-farm-rules, rules]
+ - when: on_success
script:
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- rm -rf install
- tar -xf artifacts/install.tar
- - mv install/* artifacts/.
+ - mv -n install/* artifacts/.
# Override it with our lava-submit.sh script
- ./artifacts/lava-submit.sh
@@ -32,6 +27,7 @@
- alpine/x86_64_lava_ssh_client
- kernel+rootfs_arm32
- debian/x86_64_build
+ - python-artifacts
- testing:arm32
- igt:arm32
@@ -48,6 +44,7 @@
- alpine/x86_64_lava_ssh_client
- kernel+rootfs_arm64
- debian/x86_64_build
+ - python-artifacts
- testing:arm64
- igt:arm64
@@ -64,6 +61,7 @@
- alpine/x86_64_lava_ssh_client
- kernel+rootfs_x86_64
- debian/x86_64_build
+ - python-artifacts
- testing:x86_64
- igt:x86_64
@@ -71,8 +69,11 @@
extends:
- .baremetal-test-arm64
- .use-debian/baremetal_arm64_test
- - .test-rules
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - !reference [.google-freedreno-farm-rules, rules]
+ - when: on_success
variables:
FDO_CI_CONCURRENT: 10
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
@@ -441,20 +442,20 @@ panfrost:g12b:
virtio_gpu:none:
stage: software-driver
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
GPU_VERSION: none
extends:
- .test-gl
- - .test-rules
tags:
- kvm
script:
- ln -sf $CI_PROJECT_DIR/install /install
- mv install/bzImage /lava-files/bzImage
- - mkdir -p $CI_PROJECT_DIR/results
- - ln -sf $CI_PROJECT_DIR/results /results
- install/crosvm-runner.sh install/igt_runner.sh
needs:
- debian/x86_64_test-gl
@@ -464,20 +465,20 @@ virtio_gpu:none:
vkms:none:
stage: software-driver
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
extends:
- .test-gl
- - .test-rules
tags:
- kvm
script:
- ln -sf $CI_PROJECT_DIR/install /install
- mv install/bzImage /lava-files/bzImage
- mkdir -p /lib/modules
- - mkdir -p $CI_PROJECT_DIR/results
- - ln -sf $CI_PROJECT_DIR/results /results
- ./install/crosvm-runner.sh ./install/igt_runner.sh
needs:
- debian/x86_64_test-gl
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index f44dbce3151a..75374085f40f 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -14,10 +14,16 @@ amdgpu/amd_plane@mpo-scale-nv12,Fail
amdgpu/amd_plane@mpo-scale-p010,Fail
amdgpu/amd_plane@mpo-scale-rgb,Crash
amdgpu/amd_plane@mpo-swizzle-toggle,Fail
-amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail
+amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Crash
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
+kms_async_flips@alternate-sync-async-flip,Fail
+kms_async_flips@alternate-sync-async-flip-atomic,Fail
+kms_async_flips@test-cursor,Fail
+kms_async_flips@test-cursor-atomic,Fail
+kms_async_flips@test-time-stamp,Fail
+kms_async_flips@test-time-stamp-atomic,Fail
kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail
kms_atomic_transition@plane-all-transition,Fail
kms_atomic_transition@plane-all-transition-nonblocking,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
index e70bd9d447ca..adffb011298a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -3,13 +3,6 @@
# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-kms_async_flips@async-flip-with-page-flip-events
-
-# Board Name: hp-11A-G6-EE-grunt
-# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
kms_async_flips@crc
# Board Name: hp-11A-G6-EE-grunt
@@ -25,3 +18,17 @@ kms_plane@pixel-format-source-clamping
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_async_flips@async-flip-with-page-flip-events
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://gitlab.freedesktop.org/drm/amd/-/issues/3835
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_async_flips@async-flip-with-page-flip-events-atomic
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://gitlab.freedesktop.org/drm/amd/-/issues/3836
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_async_flips@crc-atomic
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index f41b3e112976..3879c4812a22 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -32,3 +32,8 @@ amdgpu/amd_pci_unplug@amdgpu_hotunplug_with_exported_bo
amdgpu/amd_pci_unplug@amdgpu_hotunplug_with_exported_fence
amdgpu/amd_vrr_range@freesync-parsing
device_reset.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index 0907cb0f6d9e..a29cea4f234c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,16 +1,14 @@
core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
-core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
-kms_fb_coherency@memset-crc,Crash
-kms_flip@busy-flip,Timeout
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
+kms_async_flips@test-time-stamp,Timeout
+kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
+kms_flip@dpms-off-confusion-interruptible,Timeout
+kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -33,20 +31,12 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_prop_blob@invalid-set-prop,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
index 0207c9807bee..fee246c25b9a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -53,3 +53,17 @@ kms_pm_rpm@modeset-lpsp-stress
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_pm_rpm@drm-resources-equal
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13346
+# Failure Rate: 60
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+i915_hangman@gt-engine-error
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13345
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 5186ba3dbbc6..2ef1dc35a7fa 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -20,6 +20,7 @@ i915_pm_rc6_residency.*
i915_suspend.*
kms_scaling_modes.*
i915_pm_rpm.*
+i915_module_load@reload-with-fault-injection
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index 64772fedaed5..ee11999e3da1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -1,3 +1,4 @@
+core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -8,13 +9,13 @@ kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
@@ -29,13 +30,10 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_pm_backlight@basic-brightness,Fail
+kms_pm_backlight@brightness-with-dpms,Crash
kms_pm_backlight@fade,Fail
kms_pm_backlight@fade-with-dpms,Fail
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
kms_sysfs_edid_timing,Fail
perf@i915-ref-count,Fail
perf@non-zero-reason,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index f352b719cf7d..47b3f1d42bb6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,5 +1,4 @@
core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -9,9 +8,8 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-i915_pm_rps@engine-order,Fail
-kms_big_fb@linear-16bpp-rotate-180,Timeout
-kms_fb_coherency@memset-crc,Crash
+kms_async_flips@test-time-stamp,Timeout
+kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
kms_flip@busy-flip,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -36,25 +34,29 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
kms_psr2_sf@plane-move-sf-dmg-area,Fail
+kms_psr2_sf@pr-cursor-plane-update-sf,Timeout
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
+kms_psr2_sf@psr2-cursor-plane-update-sf,Fail
+kms_psr2_sf@psr2-overlay-plane-update-continuous-sf,Fail
+kms_psr2_sf@psr2-overlay-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-overlay-primary-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-plane-move-sf-dmg-area,Fail
+kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
+kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail
-kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
index d8401251e5f4..5343cc1c8696 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -25,3 +25,10 @@ kms_plane_alpha_blend@constant-alpha-min
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_async_flips@crc
+
+# Board Name: asus-C436FA-Flip-hatch
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13347
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index 9d753d97c9ab..c87ff8b40e99 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -20,6 +20,7 @@ xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
i915_pm_rpm.*
+kms_flip.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 6eb64c672f7d..843c363b42f5 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,61 +1,35 @@
+core_setmaster@master-drop-set-shared-fd,Fail
core_setmaster@master-drop-set-user,Fail
-core_setmaster_vs_auth,Fail
+gen9_exec_parse@unaligned-access,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
-kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Timeout
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
-kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_prop_blob@invalid-set-prop,Fail
-kms_rotation_crc@multiplane-rotation,Fail
-kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
-sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
xe_module_load@force-load,Fail
@@ -63,4 +37,3 @@ xe_module_load@load,Fail
xe_module_load@many-reload,Fail
xe_module_load@reload,Fail
xe_module_load@reload-no-display,Fail
-core_setmaster@master-drop-set-shared-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
index 077886b76093..2a909c657343 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
@@ -11,3 +11,10 @@ core_hotunplug@unplug-rescan
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_fb_coherency@memset-crc
+
+# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13348
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@dpms-off-confusion-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index 9c64146aed90..219ae839323a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -27,3 +27,301 @@ perf.*
# Kernel panic
drm_fdinfo.*
kms_plane_alpha_blend.*
+
+kms_async_flips.*
+# [IGT] kms_async_flips: executing
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON(intel_dp->pps.vdd_wakeref)
+# WARNING: CPU: 0 PID: 1253 at drivers/gpu/drm/i915/display/intel_pps.c:760 intel_pps_vdd_on_unlocked+0x351/0x360
+# Modules linked in:
+# CPU: 0 UID: 0 PID: 1253 Comm: kms_async_flips Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: HP Bloog/Bloog, BIOS 09/19/2019
+# RIP: 0010:intel_pps_vdd_on_unlocked+0x351/0x360
+# Code: 8b 77 50 4d 85 f6 75 03 4c 8b 37 e8 19 98 03 00 48 c7 c1 10 d3 61 84 4c 89 f2 48 c7 c7 67 56 69 84 48 89 c6 e8 a0 22 63 ff 90 <0f> 0b 90 90 e9 5f fd ff ff e8 81 94 69 00 90 90 90 90 90 90 90 90
+# RSP: 0018:ffff9573c0dcfad8 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffff93498148e238 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffff9573c0dcf988 RDI: 0000000000000001
+# RBP: ffff934980d80000 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff8488c8a0 R12: ffff934980d80d68
+# R13: 0000000000000000 R14: ffff93498093c9b0 R15: 0000000000000000
+# FS: 00007fbf1f4486c0(0000) GS:ffff9349fbc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055f260419028 CR3: 0000000106594000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# intel_pps_vdd_on+0x78/0x150
+# intel_dp_detect+0xb9/0x7c0
+# drm_helper_probe_detect+0x47/0xb0
+# drm_helper_probe_single_connector_modes+0x40b/0x660
+# drm_mode_getconnector+0x369/0x490
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7fbf208e4cdb
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffe061f6c60 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbf208e4cdb
+# RDX: 00007ffe061f6d00 RSI: 00000000c05064a7 RDI: 0000000000000003
+# RBP: 00007ffe061f6d00 R08: 0000000000000007 R09: 000055f260410cb0
+# R10: 650094cf40322201 R11: 0000000000000246 R12: 00000000c05064a7
+# R13: 0000000000000003 R14: 00000000c05064a7 R15: 00007ffe061f6d00
+# </TASK>
+# irq event stamp: 0
+# hardirqs last enabled at (0): [<0000000000000000>] 0x0
+# hardirqs last disabled at (0): [<ffffffff82d0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last enabled at (0): [<ffffffff82d0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last disabled at (0): [<0000000000000000>] 0x0
+# ---[ end trace 0000000000000000 ]---
+# i915 0000:00:02.0: [drm] *ERROR* PPS state mismatch
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* Failed to read DPCD register 0x92
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# [IGT] kms_async_flips: starting subtest test-time-stamp
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] commit wait timed out
+# INFO: task kms_async_flips:1253 blocked for more than 122 seconds.
+# Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+# task:kms_async_flips state:D stack:12720 pid:1253 tgid:1253 ppid:164 flags:0x00004006
+# Call Trace:
+# <TASK>
+# __schedule+0x495/0xd20
+# schedule+0x35/0x120
+# drm_vblank_work_flush+0x91/0xf0
+# intel_wait_for_vblank_workers+0x58/0x90
+# intel_atomic_commit_tail+0xa86/0x15f0
+# intel_atomic_commit+0x257/0x2a0
+# drm_atomic_commit+0xac/0xe0
+# set_property_atomic+0x91/0x160
+# drm_mode_obj_set_property_ioctl+0xc1/0x110
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7fbf208e4cdb
+# RSP: 002b:00007ffe061f6c50 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000055f24e272330 RCX: 00007fbf208e4cdb
+# RDX: 00007ffe061f6ce0 RSI: 00000000c01864ba RDI: 0000000000000003
+# RBP: 00007ffe061f6ce0 R08: 0000000000000000 R09: 000055f2604184f0
+# R10: 00007fbf209ca310 R11: 0000000000000246 R12: 00000000c01864ba
+# R13: 0000000000000003 R14: 0000000000000000 R15: 000055f26040ab90
+# </TASK>
+# INFO: lockdep is turned off.
+
+kms_lease.*
+# [IGT] kms_lease: executing
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON(intel_dp->pps.vdd_wakeref)
+# WARNING: CPU: 1 PID: 1271 at drivers/gpu/drm/i915/display/intel_pps.c:760 intel_pps_vdd_on_unlocked+0x351/0x360
+# Modules linked in:
+# CPU: 1 UID: 0 PID: 1271 Comm: kms_lease Tainted: G W 6.13.0-rc2-gfb6fd142a8eb #1
+# Tainted: [W]=WARN
+# Hardware name: HP Bloog/Bloog, BIOS 09/19/2019
+# RIP: 0010:intel_pps_vdd_on_unlocked+0x351/0x360
+# Code: 8b 77 50 4d 85 f6 75 03 4c 8b 37 e8 19 98 03 00 48 c7 c1 10 d3 61 a1 4c 89 f2 48 c7 c7 67 56 69 a1 48 89 c6 e8 a0 22 63 ff 90 <0f> 0b 90 90 e9 5f fd ff ff e8 81 94 69 00 90 90 90 90 90 90 90 90
+# RSP: 0018:ffffb26500ddfad8 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffff988580dc6238 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffb26500ddf988 RDI: 0000000000000001
+# RBP: ffff988580d90000 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffffa188c8a0 R12: ffff988580d90d68
+# R13: 0000000000000000 R14: ffff98858093c9b0 R15: 0000000000000000
+# FS: 00007f6b0c60d6c0(0000) GS:ffff9885fbd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055eea27460a8 CR3: 0000000105ac6000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# intel_pps_vdd_on+0x78/0x150
+# intel_dp_detect+0xb9/0x7c0
+# drm_helper_probe_detect+0x47/0xb0
+# drm_helper_probe_single_connector_modes+0x40b/0x660
+# drm_mode_getconnector+0x369/0x490
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f6b0d637c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffc42b13670 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f6b0d637c5b
+# RDX: 00007ffc42b13710 RSI: 00000000c05064a7 RDI: 0000000000000003
+# RBP: 00007ffc42b13710 R08: 0000000000000007 R09: 000055eea2741720
+# R10: 9fb9be4e118bfa43 R11: 0000000000000246 R12: 00000000c05064a7
+# R13: 0000000000000003 R14: 00000000c05064a7 R15: 00007ffc42b13710
+# </TASK>
+# irq event stamp: 0
+# hardirqs last enabled at (0): [<0000000000000000>] 0x0
+# hardirqs last disabled at (0): [<ffffffff9fd0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last enabled at (0): [<ffffffff9fd0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last disabled at (0): [<0000000000000000>] 0x0
+# ---[ end trace 0000000000000000 ]---
+# i915 0000:00:02.0: [drm] *ERROR* PPS state mismatch
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* Failed to read DPCD register 0x92
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# [IGT] kms_lease: starting subtest page-flip-implicit-plane
+# [IGT] kms_lease: starting dynamic subtest pipe-A-eDP-1
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] commit wait timed out
+# INFO: task kms_lease:1271 blocked for more than 122 seconds.
+# Tainted: G W 6.13.0-rc2-gfb6fd142a8eb #1
+# "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+# task:kms_lease state:D stack:12720 pid:1271 tgid:1271 ppid:184 flags:0x00004006
+# Call Trace:
+# <TASK>
+# __schedule+0x495/0xd20
+# schedule+0x35/0x120
+# drm_vblank_work_flush+0x91/0xf0
+# intel_wait_for_vblank_workers+0x58/0x90
+# intel_atomic_commit_tail+0xa86/0x15f0
+# intel_atomic_commit+0x257/0x2a0
+# drm_atomic_commit+0xac/0xe0
+# set_property_atomic+0x91/0x160
+# drm_mode_obj_set_property_ioctl+0xc1/0x110
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f6b0d637c5b
+# RSP: 002b:00007ffc42b13650 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 00007ffc42b14270 RCX: 00007f6b0d637c5b
+# RDX: 00007ffc42b136e0 RSI: 00000000c01864ba RDI: 0000000000000003
+# RBP: 00007ffc42b136e0 R08: 0000000000000000 R09: 000055eea2741750
+# R10: 00007f6b0d71d310 R11: 0000000000000246 R12: 00000000c01864ba
+# R13: 0000000000000003 R14: 0000000000000000 R15: 000055eea27378d0
+# </TASK>
+# INFO: lockdep is turned off.
+
+# The test alternates between failing and timing out on reruns, causing the pipeline to fail
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
index ed9f7b576843..0e08fff741aa 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -1,15 +1,16 @@
-core_setmaster@master-drop-set-user,Fail
+drm_fdinfo@busy-check-all,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-i915_pm_rpm@gem-execbuf-stress,Timeout
-i915_pm_rpm@module-reload,Fail
-kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
@@ -17,6 +18,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
@@ -26,19 +28,15 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-blt,Timeout
kms_lease@lease-uevent,Fail
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
+kms_rotation_crc@bad-pixel-format,Fail
kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
-perf_pmu@busy-accuracy-50,Fail
perf_pmu@module-unload,Fail
-perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
index 5c3ef4486b9d..8b2aefd1cc52 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
@@ -1,13 +1,6 @@
# Board Name: acer-cb317-1h-c3z6-dedede
-# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12475
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13349
# Failure Rate: 100
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.12.0-rc1
-kms_flip@flip-vs-panning-interruptible
-
-# Board Name: acer-cb317-1h-c3z6-dedede
-# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12476
-# Failure Rate: 100
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.12.0-rc1
-kms_universal_plane@cursor-fb-leak
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-fences-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index 6ec2f83ffe13..dc722d6a774e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -37,3 +37,115 @@ tools_test.*
# GPU hang
sysfs_timeslice_.*
sysfs_heartbeat_.*
+
+device_reset@unbind-reset-rebind
+# [IGT] device_reset: starting subtest unbind-reset-rebind
+# i915 0000:00:02.0: [drm] Found kabylake (device ID 5917) display version 9.00 stepping C0
+# i915 0000:00:02.0: vgaarb: deactivate vga console
+# i915 0000:00:02.0: [drm] *ERROR* DPLL0 not locked
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON((val & (1 << 30)) == 0)
+# WARNING: CPU: 4 PID: 472 at drivers/gpu/drm/i915/display/intel_cdclk.c:944 skl_get_cdclk+0x1ca/0x360
+# Modules linked in:
+# CPU: 4 UID: 0 PID: 472 Comm: device_reset Not tainted 6.13.0-rc2-ge95c88d68ac3 #1
+# Hardware name: HP Nami/Nami, BIOS 09/19/2019
+# RIP: 0010:skl_get_cdclk+0x1ca/0x360
+# Code: 67 50 4d 85 e4 0f 84 89 01 00 00 e8 e0 16 13 00 48 c7 c1 a0 71 5f 9a 4c 89 e2 48 c7 c7 67 56 69 9a 48 89 c6 e8 67 a1 72 ff 90 <0f> 0b 90 90 8b 43 08 8b 53 04 89 43 0c 89 03 85 d2 0f 85 26 ff ff
+# RSP: 0018:ffffacba40f5fbd0 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: ffff953182571058 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffacba40f5fa80 RDI: 0000000000000001
+# RBP: ffff953182570d68 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff9a88c8a0 R12: ffff953180d3f4a0
+# R13: 0000000000000000 R14: ffff953182570d68 R15: 0000000000000000
+# FS: 00007f74989c46c0(0000) GS:ffff9534eed00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f1df530eff8 CR3: 0000000102302002 CR4: 00000000003706f0
+# DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+# DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+# Call Trace:
+# <TASK>
+# intel_update_cdclk+0x1c/0x90
+# intel_cdclk_init_hw+0x46e/0x4c0
+# intel_power_domains_init_hw+0x3eb/0x770
+# intel_display_driver_probe_noirq+0x85/0x230
+# i915_driver_probe+0x66d/0xc60
+# local_pci_probe+0x3a/0x90
+# pci_device_probe+0xb5/0x180
+# really_probe+0xc9/0x2b0
+# __driver_probe_device+0x6e/0x110
+# device_driver_attach+0x42/0xa0
+# bind_store+0x71/0xc0
+# kernfs_fop_write_iter+0x121/0x1c0
+# vfs_write+0x2a8/0x530
+# ksys_write+0x6f/0xf0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f7499e662c0
+# Code: 40 00 48 8b 15 41 9b 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 80 3d 21 23 0e 00 00 74 17 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 58 c3 0f 1f 80 00 00 00 00 48 83 ec 28 48 89
+# RSP: 002b:00007ffffaf36a68 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7499e662c0
+# RDX: 000000000000000c RSI: 00007ffffaf36b50 RDI: 0000000000000003
+# RBP: 000000000000000c R08: 0000000000000007 R09: 000055b37d0964a0
+# R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffffaf36b50
+# R13: 0000000000000003 R14: 000055b3526a7220 R15: 00007f749a0dc020
+# </TASK>
+# irq event stamp: 28591
+# hardirqs last enabled at (28625): [<ffffffff98da3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (28658): [<ffffffff98da3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (28688): [<ffffffff98d17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (28701): [<ffffffff98d17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON((val & (1 << 30)) == 0)
+# WARNING: CPU: 1 PID: 472 at drivers/gpu/drm/i915/display/intel_cdclk.c:944 skl_get_cdclk+0x1ca/0x360
+# Modules linked in:
+# CPU: 1 UID: 0 PID: 472 Comm: device_reset Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: HP Nami/Nami, BIOS 09/19/2019
+# RIP: 0010:skl_get_cdclk+0x1ca/0x360
+# Code: 67 50 4d 85 e4 0f 84 89 01 00 00 e8 e0 16 13 00 48 c7 c1 a0 71 5f 9a 4c 89 e2 48 c7 c7 67 56 69 9a 48 89 c6 e8 67 a1 72 ff 90 <0f> 0b 90 90 8b 43 08 8b 53 04 89 43 0c 89 03 85 d2 0f 85 26 ff ff
+# RSP: 0018:ffffacba40f5fb98 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffffacba40f5fbbc RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffacba40f5fa48 RDI: 0000000000000001
+# RBP: ffff953182570d68 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff9a88c8a0 R12: ffff953180d3f4a0
+# R13: ffff953182573208 R14: ffff953182570d68 R15: 0000000000000000
+# FS: 00007f74989c46c0(0000) GS:ffff9534eec40000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f2b6598ee00 CR3: 0000000102302001 CR4: 00000000003706f0
+# DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+# DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+# Call Trace:
+# <TASK>
+# gen9_disable_dc_states+0x86/0x300
+# intel_power_well_enable+0x56/0x70
+# __intel_display_power_get_domain.part.0+0x4d/0x70
+# intel_power_domains_init_hw+0x95/0x770
+# intel_display_driver_probe_noirq+0x85/0x230
+# i915_driver_probe+0x66d/0xc60
+# local_pci_probe+0x3a/0x90
+# pci_device_probe+0xb5/0x180
+# really_probe+0xc9/0x2b0
+# __driver_probe_device+0x6e/0x110
+# device_driver_attach+0x42/0xa0
+# bind_store+0x71/0xc0
+# kernfs_fop_write_iter+0x121/0x1c0
+# vfs_write+0x2a8/0x530
+# ksys_write+0x6f/0xf0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f7499e662c0
+# Code: 40 00 48 8b 15 41 9b 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 80 3d 21 23 0e 00 00 74 17 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 58 c3 0f 1f 80 00 00 00 00 48 83 ec 28 48 89
+# RSP: 002b:00007ffffaf36a68 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7499e662c0
+# RDX: 000000000000000c RSI: 00007ffffaf36b50 RDI: 0000000000000003
+# RBP: 000000000000000c R08: 0000000000000007 R09: 000055b37d0964a0
+# R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffffaf36b50
+# R13: 0000000000000003 R14: 000055b3526a7220 R15: 00007f749a0dc020
+# </TASK>
+# irq event stamp: 29605
+# hardirqs last enabled at (29617): [<ffffffff98da3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (29634): [<ffffffff98da3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (29630): [<ffffffff98d17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (29625): [<ffffffff98d17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 461ef69ef08a..93d42b146df9 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,64 +1,24 @@
-api_intel_allocator@fork-simple-stress-signal,Timeout
-api_intel_allocator@open-vm,Timeout
-api_intel_allocator@simple-allocator,Timeout
-api_intel_bb@lot-of-buffers,Timeout
-api_intel_bb@object-reloc-keep-cache,Timeout
-api_intel_bb@offset-control,Timeout
-api_intel_bb@render-ccs,Timeout
-api_intel_bb@reset-bb,Timeout
-core_auth@basic-auth,Timeout
-core_hotunplug@hotrebind,Timeout
-core_setmaster@master-drop-set-user,Fail
-debugfs_test@read_all_entries_display_on,Timeout
-drm_read@empty-block,Timeout
-dumb_buffer@create-clear,Timeout
-dumb_buffer@invalid-bpp,Timeout
-gen3_render_tiledx_blits,Timeout
-gen7_exec_parse@basic-allocation,Timeout
-gen9_exec_parse@batch-invalid-length,Timeout
-gen9_exec_parse@bb-secure,Timeout
-gen9_exec_parse@secure-batches,Timeout
-gen9_exec_parse@shadow-peek,Timeout
-gen9_exec_parse@unaligned-jump,Timeout
-i915_getparams_basic@basic-subslice-total,Timeout
-i915_hangman@gt-engine-hang,Timeout
+api_intel_allocator@reopen,Timeout
+api_intel_bb@destroy-bb,Timeout
+core_hotunplug@hotrebind-lateclose,Timeout
+drm_read@short-buffer-block,Timeout
+dumb_buffer@map-valid,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-i915_pciid,Timeout
-i915_pipe_stress@stress-xrgb8888-ytiled,Timeout
-i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rps@engine-order,Timeout
-i915_pm_rps@thresholds-idle-park,Timeout
-i915_query@engine-info,Timeout
-i915_query@query-topology-kernel-writes,Timeout
-i915_query@test-query-geometry-subslices,Timeout
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
perf_pmu@busy,Timeout
perf_pmu@enable-race,Timeout
-perf_pmu@event-wait,Timeout
-perf_pmu@faulting-read,Timeout
-perf_pmu@gt-awake,Timeout
-perf_pmu@interrupts,Timeout
perf_pmu@module-unload,Fail
-perf_pmu@most-busy-idle-check-all,Timeout
perf_pmu@rc6,Crash
-perf_pmu@render-node-busy-idle,Fail
perf_pmu@semaphore-wait-idle,Timeout
-prime_busy@after,Timeout
-prime_mmap@test_aperture_limit,Timeout
-prime_mmap@test_map_unmap,Timeout
prime_mmap@test_refcounting,Timeout
-prime_self_import@basic-with_one_bo,Timeout
-sriov_basic@enable-vfs-autoprobe-off,Timeout
-syncobj_basic@bad-destroy,Timeout
-syncobj_basic@bad-flags-fd-to-handle,Timeout
-syncobj_basic@create-signaled,Timeout
-syncobj_eventfd@invalid-bad-pad,Timeout
-syncobj_eventfd@timeline-wait-before-signal,Timeout
+sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout
+syncobj_basic@illegal-fd-to-handle,Timeout
syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout
syncobj_wait@invalid-signal-illegal-handle,Timeout
syncobj_wait@invalid-single-wait-all-unsubmitted,Timeout
@@ -66,7 +26,6 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
-template@A,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index b47df5855e8d..938377896841 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -35,3 +35,16 @@ syncobj_wait.*
# Kernel panic and test hangs with multiple kms tests
kms_.*
+
+# The test alternates between failing and timing out on reruns, causing the pipeline to fail
+api_intel_allocator@reopen
+api_intel_bb@destroy-bb
+core_hotunplug@hotrebind-lateclose
+dumb_buffer@map-valid
+i915_pm_rps@engine-order
+perf_pmu@busy
+perf_pmu@enable-race
+perf_pmu@semaphore-wait-idle
+prime_mmap@test_refcounting
+sriov_basic@enable-vfs-bind-unbind-each-numvfs-all
+syncobj_basic@illegal-fd-to-handle
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 0ce240e3aa07..1cb6978c86dc 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,5 +1,4 @@
core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -7,12 +6,10 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-i915_pm_rps@engine-order,Fail
-kms_big_fb@linear-16bpp-rotate-180,Timeout
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
+kms_async_flips@test-time-stamp,Timeout
+kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
-kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -34,18 +31,12 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
-kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack-mmap-gtt,Timeout
kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
index 60b8d1c64e70..19a289617080 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
@@ -4,3 +4,10 @@
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_pm_rpm@modeset-lpsp-stress
+
+# Board Name: dell-latitude-5400-8665U-sarien
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13351
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 8e0efc80d510..4f176c04ec4e 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -1,8 +1,5 @@
-fbdev@eof,Fail
-fbdev@read,Fail
kms_3d,Fail
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
@@ -22,6 +19,6 @@ kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_lease@lease-uevent,Fail
-kms_rmfb@close-fd,Fail
+kms_flip@flip-vs-suspend,Fail
kms_flip@flip-vs-suspend-interruptible,Fail
+kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 2e5bf6ae25f2..2956567c3048 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -39,3 +39,10 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_prop_blob@invalid-set-prop
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/dri-devel/c0b4cded-0971-4abb-9965-2acf4602d7cd@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index 8198e06344a3..d0db51874aef 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -15,3 +15,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 845f852bb4a0..5a063361d7f2 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,16 +1,12 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster@master-drop-set-user,Fail
dumb_buffer@create-clear,Crash
-fbdev@eof,Fail
-fbdev@pan,Fail
-fbdev@read,Fail
-fbdev@unaligned-read,Fail
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
-kms_color@invalid-gamma-lut-sizes,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
kms_lease@lease-uevent,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index 8198e06344a3..d0db51874aef 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -15,3 +15,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 066d24ee3e08..7752adff05c1 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,4 +1,7 @@
kms_3d,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_cursor_legacy@single-bo,Fail
kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
index 6dbc2080347d..e4a8f8352cd6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -15,4 +15,3 @@ kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index d74e04405e65..8910afb6acf2 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -18,3 +18,17 @@ msm/msm_shrink@copy-gpu-oom-32
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
msm/msm_shrink@copy-gpu-oom-8
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/68
+# Failure Rate: 40
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/69
+# Failure Rate: 20
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane@plane-position-hole-dpms
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index c2833eee1c4b..478d7c161616 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -23,3 +23,8 @@ kms_flip@2x-wf_vblank-ts-check
# Hangs the machine
kms_cursor_crc@cursor-random-max-size
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
index 6dbc2080347d..e4a8f8352cd6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -15,4 +15,3 @@ kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index 7c69c1f1d55b..ef9318afcd89 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -17,3 +17,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
index fa8c7e663858..7a2ab58b706f 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -1,4 +1,3 @@
-drm_read@invalid-buffer,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
@@ -28,4 +27,3 @@ kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
kms_plane_cursor@overlay,Fail
kms_plane_cursor@viewport,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 94783cafc21a..2ce7f7e23a01 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -30,3 +30,8 @@ kms_cursor_crc.*
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_content_protection@uevent
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
index 4892c0c70a6d..8d26b23133aa 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
@@ -7,9 +7,4 @@ kms_cursor_legacy@torture-bo,Fail
kms_cursor_legacy@torture-move,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
msm/msm_recovery@gpu-fault-parallel,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index 90282dfa19f4..ba9160d4d8eb 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -1,24 +1,10 @@
+core_setmaster@master-drop-set-root,Crash
+core_setmaster@master-drop-set-shared-fd,Crash
core_setmaster@master-drop-set-user,Crash
+core_setmaster_vs_auth,Crash
dumb_buffer@create-clear,Crash
fbdev@pan,Crash
-kms_bw@linear-tiling-2-displays-1920x1080p,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Crash
-kms_cursor_crc@cursor-onscreen-32x32,Crash
-kms_cursor_crc@cursor-onscreen-64x64,Crash
-kms_cursor_crc@cursor-random-32x10,Crash
-kms_cursor_crc@cursor-sliding-32x10,Crash
-kms_cursor_crc@cursor-sliding-32x32,Crash
-kms_cursor_crc@cursor-sliding-64x21,Crash
-kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
-kms_cursor_legacy@cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-atomic,Crash
-kms_flip@flip-vs-panning-vs-hang,Crash
-kms_invalid_mode@int-max-clock,Crash
-kms_lease@invalid-create-leases,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Crash
-kms_plane@pixel-format,Crash
-kms_plane@pixel-format-source-clamping,Crash
+kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
+kms_flip@flip-vs-modeset-vs-hang,Crash
kms_prop_blob@invalid-set-prop,Crash
-kms_properties@get_properties-sanity-atomic,Crash
-kms_properties@get_properties-sanity-non-atomic,Crash
-kms_rmfb@close-fd,Crash
+kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 83a38853b4af..2803d0d80192 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -2,6 +2,7 @@ dumb_buffer@create-clear,Crash
kms_atomic_transition@modeset-transition,Fail
kms_atomic_transition@modeset-transition-fencing,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_color@gamma,Fail
kms_color@legacy-gamma,Fail
kms_cursor_crc@cursor-alpha-opaque,Fail
@@ -24,6 +25,7 @@ kms_cursor_crc@cursor-rapid-movement-32x32,Fail
kms_cursor_crc@cursor-rapid-movement-64x21,Fail
kms_cursor_crc@cursor-rapid-movement-64x64,Fail
kms_cursor_crc@cursor-size-change,Fail
+kms_cursor_crc@cursor-size-hints,Fail
kms_cursor_crc@cursor-sliding-32x10,Fail
kms_cursor_crc@cursor-sliding-32x32,Fail
kms_cursor_crc@cursor-sliding-64x21,Fail
@@ -32,6 +34,7 @@ kms_cursor_edge_walk@64x64-left-edge,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-toggle,Fail
kms_cursor_legacy@flip-vs-cursor-atomic,Fail
@@ -41,14 +44,11 @@ kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail
kms_flip@basic-flip-vs-wf_vblank,Fail
kms_flip@blocking-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning,Fail
kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
kms_flip@plain-flip-fb-recreate,Fail
kms_flip@plain-flip-fb-recreate-interruptible,Fail
kms_flip@plain-flip-ts-check,Fail
@@ -64,14 +64,11 @@ kms_pipe_crc_basic@nonblocking-crc,Fail
kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
kms_pipe_crc_basic@read-crc,Fail
kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Crash
-kms_plane@pixel-format-source-clamping,Crash
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
kms_plane@plane-panning-bottom-right,Fail
kms_plane@plane-panning-top-left,Fail
kms_plane@plane-position-covered,Fail
kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_cursor@primary,Fail
-kms_plane_multiple@tiling-none,Fail
-kms_rmfb@close-fd,Fail
kms_universal_plane@universal-plane-functional,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 56f7d4f1ed15..348b4ce7eb4b 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -74,3 +74,59 @@ kms_bw@linear-tiling-2-displays-2160x1440p
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u
+# Failure Rate: 40
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-1-displays-2160x1440p
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u
+# Failure Rate: 80
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane_multiple@tiling-none
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-1-displays-1920x1080p
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane@plane-position-hole-dpms
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-absolute-wf_vblank
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-blocking-wf-vblank
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u
+# Failure Rate: 60
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-2-displays-1920x1080p
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@modeset-vs-vblank-race-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index eb16b29dee48..e8e994d92557 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -18,3 +18,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index 9c9e048725f8..adbcdd0f28d2 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -23,3 +23,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
index 71c02104a683..6ebcc7d89fbd 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
@@ -1,12 +1,5 @@
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
kms_cursor_crc@cursor-rapid-movement-256x85,Fail
kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
kms_cursor_crc@cursor-rapid-movement-64x64,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
@@ -20,8 +13,9 @@ kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
kms_lease@lease-uevent,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
kms_writeback@writeback-check-output,Fail
kms_writeback@writeback-check-output-XRGB2101010,Fail
kms_writeback@writeback-fb-id,Fail
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index b3d16e82e9a2..319789806271 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -1,8 +1,9 @@
# keeps printing vkms_vblank_simulate: vblank timer overrun and never ends
kms_invalid_mode@int-max-clock
-# Kernel panic
-kms_cursor_crc@cursor-rapid-movement-32x10
+# kernel panic seen with kms_cursor_crc tests
+kms_cursor_crc.*
+# kms_cursor_crc@cursor-rapid-movement-32x10
# Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 0 PID: 2635 Comm: kworker/u8:13 Not tainted 6.9.0-rc7-g40935263a1fd #1
# Hardware name: ChromiumOS crosvm, BIOS 0
@@ -52,7 +53,7 @@ kms_cursor_crc@cursor-rapid-movement-32x10
# FS: 0000000000000000(0000) GS:ffffa2ad2bc00000(0000) knlGS:0000000000000000
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-kms_cursor_crc@cursor-rapid-movement-256x85
+#kms_cursor_crc@cursor-rapid-movement-256x85
# [drm:drm_crtc_add_crc_entry] *ERROR* Overflow of CRC buffer, userspace reads too slow.
# Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 1 PID: 10 Comm: kworker/u8:0 Not tainted 6.9.0-rc7-g646381cde463 #1
@@ -104,7 +105,7 @@ kms_cursor_crc@cursor-rapid-movement-256x85
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
# CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0
-kms_cursor_crc@cursor-onscreen-256x256
+#kms_cursor_crc@cursor-onscreen-256x256
# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 1 PID: 1913 Comm: kworker/u8:6 Not tainted 6.10.0-rc5-g8a28e73ebead #1
# Hardware name: ChromiumOS crosvm, BIOS 0
@@ -258,6 +259,535 @@ kms_cursor_edge_walk@128x128-left-edge
# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0
# vkms_vblank_simulate: vblank timer overrun
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-64x64
+# ------------[ cut here ]------------
+# WARNING: CPU: 1 PID: 1250 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 1 UID: 0 PID: 1250 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-ge95c88d68ac3 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf ee ec 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 95 ec 48 89 df 5b e9 50 05 95 ec 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffff9fb640aafb08 EFLAGS: 00010202
+# RAX: ffff8e7240859e05 RBX: ffff8e7241cd6400 RCX: ffffffffae496b65
+# RDX: ffffffffad2d1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff8e72590cc000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f0942ad56c0(0000) GS:ffff8e726bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f0942ad0008 CR3: 0000000118d1e000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f0943a84cdb
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007fff267d68d0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000000000000005a RCX: 00007f0943a84cdb
+# RDX: 00007fff267d6960 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007fff267d6960 R08: 0000000000000007 R09: 00005619a60f3450
+# R10: fe95a83851609dee R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 00005619a3cd2c68 R15: 00005619a608c830
+# </TASK>
+# irq event stamp: 57793
+# hardirqs last enabled at (57799): [<ffffffffacba3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (57804): [<ffffffffacba3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (45586): [<ffffffffacb17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (45569): [<ffffffffacb17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 0 UID: 0 PID: 119 Comm: kworker/u8:6 Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffff9fb640efbd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8e7241926000 RSI: ffff8e7241927ffc RDI: 000000000b7fb767
+# RBP: ffff9fb640efbde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8e7241b09a80 R11: 0000000000000000 R12: ffff8e7241cd6200
+# R13: 0000000000000013 R14: 0000000000000000 R15: ffff8e7241b09a80
+# FS: 0000000000000000(0000) GS:ffff8e726bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000118d1e000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffff9fb640efbd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8e7241926000 RSI: ffff8e7241927ffc RDI: 000000000b7fb767
+# RBP: ffff9fb640efbde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8e7241b09a80 R11: 0000000000000000 R12: ffff8e7241cd6200
+# R13: 0000000000000013 R14: 0000000000000000 R15: ffff8e7241b09a80
+# FS: 0000000000000000(0000) GS:ffff8e726bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000118d1e000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-128x42
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-32x32
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-32x32
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-128x128
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2898 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2898 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g1f006005ebf8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e e1 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 e1 48 89 df 5b e9 50 05 15 e1 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa35c4082fb08 EFLAGS: 00010202
+# RAX: ffff8b02c1c0f005 RBX: ffff8b02d4509600 RCX: ffffffffa2c96b65
+# RDX: ffffffffa1ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000030f11ddf R11: 00000000f30f11dd R12: ffff8b02d2528000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4b071ab6c0(0000) GS:ffff8b02efc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055f7f8384b48 CR3: 0000000104ef0000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4b0815ac5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffd726f75e0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000000000000005d RCX: 00007f4b0815ac5b
+# RDX: 00007ffd726f7670 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffd726f7670 R08: 0000000000000007 R09: 000055f7f83e30c0
+# R10: 2b16893c03bd381a R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055f7dbee72c8 R15: 000055f7f837bab0
+# </TASK>
+# irq event stamp: 58921
+# hardirqs last enabled at (58927): [<ffffffffa13a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58932): [<ffffffffa13a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (46140): [<ffffffffa1317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (46135): [<ffffffffa1317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 1657 Comm: kworker/u8:14 Tainted: G W 6.13.0-rc2-g1f006005ebf8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa35c40c43d20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8b02c52c8000 RSI: ffff8b02c52c9ffc RDI: 00000000fa4761c9
+# RBP: ffffa35c40c43de0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8b02c1c87840 R11: 0000000000000000 R12: ffff8b02d4508800
+# R13: 0000000000000021 R14: 0000000000000000 R15: ffff8b02c1c87840
+# FS: 0000000000000000(0000) GS:ffff8b02efd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000104ef0000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa35c40c43d20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8b02c52c8000 RSI: ffff8b02c52c9ffc RDI: 00000000fa4761c9
+# RBP: ffffa35c40c43de0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8b02c1c87840 R11: 0000000000000000 R12: ffff8b02d4508800
+# R13: 0000000000000021 R14: 0000000000000000 R15: ffff8b02c1c87840
+# FS: 0000000000000000(0000) GS:ffff8b02efd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+
# Skip driver specific tests
^amdgpu.*
^msm.*
@@ -272,3 +802,8 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 56f977bbe62d..30c736fc0067 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -327,7 +327,7 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
drm_bridge_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 007ceb281d00..56a4965e518c 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
if (!aux->transfer)
return;
-#ifndef CONFIG_MEDIA_CEC_RC
- /*
- * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
- * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
- *
- * Do this here as well to ensure the tests against cec_caps are
- * correct.
- */
- cec_caps &= ~CEC_CAP_RC;
-#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
- if (aux->cec.adap->capabilities == cec_caps &&
+ /* Check if the adapter properties have changed */
+ if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
+ (cec_caps & CEC_CAP_MONITOR_ALL) &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index da3c8521a7fa..f5c596234729 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -459,6 +459,64 @@ void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
+/**
+ * drm_dp_lttpr_wake_timeout_setup() - Grant extended time for sink to wake up
+ * @aux: The DP AUX channel to use
+ * @transparent_mode: This is true if lttpr is in transparent mode
+ *
+ * This function checks if the sink needs any extended wake time, if it does
+ * it grants this request. Post this setup the source device can keep trying
+ * the Aux transaction till the granted wake timeout.
+ * If this function is not called all Aux transactions are expected to take
+ * a default of 1ms before they throw an error.
+ */
+void drm_dp_lttpr_wake_timeout_setup(struct drm_dp_aux *aux, bool transparent_mode)
+{
+ u8 val = 1;
+ int ret;
+
+ if (transparent_mode) {
+ static const u8 timeout_mapping[] = {
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_1_MS] = 1,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_20_MS] = 20,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_40_MS] = 40,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_60_MS] = 60,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_80_MS] = 80,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_100_MS] = 100,
+ };
+
+ ret = drm_dp_dpcd_readb(aux, DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST, &val);
+ if (ret != 1) {
+ drm_dbg_kms(aux->drm_dev,
+ "Failed to read Extended sleep wake timeout request\n");
+ return;
+ }
+
+ val = (val < sizeof(timeout_mapping) && timeout_mapping[val]) ?
+ timeout_mapping[val] : 1;
+
+ if (val > 1)
+ drm_dp_dpcd_writeb(aux,
+ DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_GRANT,
+ DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_GRANTED);
+ } else {
+ ret = drm_dp_dpcd_readb(aux, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &val);
+ if (ret != 1) {
+ drm_dbg_kms(aux->drm_dev,
+ "Failed to read Extended sleep wake timeout request\n");
+ return;
+ }
+
+ val = (val & DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK) ?
+ (val & DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK) * 10 : 1;
+
+ if (val > 1)
+ drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT,
+ DP_EXTENDED_WAKE_TIMEOUT_GRANT);
+ }
+}
+EXPORT_SYMBOL(drm_dp_lttpr_wake_timeout_setup);
+
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
@@ -2544,7 +2602,7 @@ u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
- switch (bpp_increment_dpcd) {
+ switch (bpp_increment_dpcd & DP_DSC_BITS_PER_PIXEL_MASK) {
case DP_DSC_BITS_PER_PIXEL_1_16:
return 16;
case DP_DSC_BITS_PER_PIXEL_1_8:
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 06c91c5b7f7c..8b68bb3fbffb 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -171,18 +171,30 @@ static const char *drm_dp_mst_sideband_tx_state_str(int state)
return sideband_reason_str[state];
}
+static inline u8
+drm_dp_mst_get_ufp_num_at_lct_from_rad(u8 lct, const u8 *rad)
+{
+ int idx = (lct / 2) - 1;
+ int shift = (lct % 2) ? 0 : 4;
+ u8 ufp_num;
+
+ /* mst_primary, it's rad is unset*/
+ if (lct == 1)
+ return 0;
+
+ ufp_num = (rad[idx] >> shift) & 0xf;
+
+ return ufp_num;
+}
+
static int
drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
{
int i;
- u8 unpacked_rad[16];
+ u8 unpacked_rad[16] = {};
- for (i = 0; i < lct; i++) {
- if (i % 2)
- unpacked_rad[i] = rad[i / 2] >> 4;
- else
- unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
- }
+ for (i = 0; i < lct; i++)
+ unpacked_rad[i] = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
/* TODO: Eventually add something to printk so we can format the rad
* like this: 1.2.3
@@ -2544,9 +2556,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
if (!mstb)
goto out;
- for (i = 0; i < lct - 1; i++) {
- int shift = (i % 2) ? 0 : 4;
- int port_num = (rad[i / 2] >> shift) & 0xf;
+ for (i = 1; i < lct; i++) {
+ int port_num = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index 9b2ee2385634..c205f37da1e1 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
*/
enum drm_mode_status
drm_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
unsigned long long clock;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5186d2114a50..7a25e70694ba 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -574,6 +574,30 @@ mode_valid(struct drm_atomic_state *state)
return 0;
}
+static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
+ if (!drm_enc->possible_clones) {
+ DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
+ continue;
+ }
+
+ if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
+ crtc_state->encoder_mask) {
+ DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
+ crtc->base.id, crtc_state->encoder_mask);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
@@ -666,8 +690,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
}
if (new_crtc_state->enable != has_connectors) {
- drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n",
+ crtc->base.id, crtc->name,
+ new_crtc_state->enable, has_connectors);
return -EINVAL;
}
@@ -745,6 +770,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
+
+ ret = drm_atomic_check_valid_clones(state, crtc);
+ if (ret != 0)
+ return ret;
}
/*
@@ -1059,6 +1088,15 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
* For example enable/disable of a cursor plane which have fixed zpos value
* would trigger all other enabled planes to be forced to the state change.
*
+ * IMPORTANT:
+ *
+ * As this function calls drm_atomic_helper_check_modeset() internally, its
+ * restrictions also apply:
+ * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
+ * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
+ * without a full modeset) _must_ call drm_atomic_helper_check_modeset()
+ * function again after that change.
+ *
* RETURNS:
* Zero for success or -errno
*/
@@ -1122,7 +1160,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
}
static void
-disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1130,7 +1168,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
@@ -1142,11 +1180,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state->crtc)
continue;
- old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
if (new_conn_state->crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(
- old_state,
+ state,
new_conn_state->crtc);
else
new_crtc_state = NULL;
@@ -1173,12 +1211,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* it away), so we won't call disable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_disable(bridge, old_state);
+ drm_atomic_bridge_chain_disable(bridge, state);
/* Right function depends upon target state. */
if (funcs) {
if (funcs->atomic_disable)
- funcs->atomic_disable(encoder, old_state);
+ funcs->atomic_disable(encoder, state);
else if (new_conn_state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
@@ -1187,10 +1225,10 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
- drm_atomic_bridge_chain_post_disable(bridge, old_state);
+ drm_atomic_bridge_chain_post_disable(bridge, state);
}
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
int ret;
@@ -1211,7 +1249,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (new_crtc_state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->atomic_disable)
- funcs->atomic_disable(crtc, old_state);
+ funcs->atomic_disable(crtc, state);
else if (funcs->disable)
funcs->disable(crtc);
else if (funcs->dpms)
@@ -1239,7 +1277,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function updates all the various legacy modeset state pointers in
* connectors, encoders and CRTCs.
@@ -1255,7 +1293,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
*/
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1264,7 +1302,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
int i;
/* clear out existing links and update dpms */
- for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
if (connector->encoder) {
WARN_ON(!connector->encoder->crtc);
@@ -1285,7 +1323,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
}
/* set new links */
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
if (!new_conn_state->crtc)
continue;
@@ -1297,7 +1335,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
}
/* set legacy state in the crtc structure */
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct drm_plane *primary = crtc->primary;
struct drm_plane_state *new_plane_state;
@@ -1305,7 +1343,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
crtc->enabled = new_crtc_state->enable;
new_plane_state =
- drm_atomic_get_new_plane_state(old_state, primary);
+ drm_atomic_get_new_plane_state(state, primary);
if (new_plane_state && new_plane_state->crtc == crtc) {
crtc->x = new_plane_state->src_x >> 16;
@@ -1337,7 +1375,7 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
static void
-crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -1345,7 +1383,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
if (!new_crtc_state->mode_changed)
@@ -1361,7 +1399,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
}
}
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
@@ -1376,7 +1414,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
- if (!new_crtc_state->mode_changed)
+ if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
continue;
drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
@@ -1401,7 +1439,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
@@ -1413,25 +1451,25 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- disable_outputs(dev, old_state);
+ disable_outputs(dev, state);
- drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
- drm_atomic_helper_calc_timestamping_constants(old_state);
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_atomic_helper_calc_timestamping_constants(state);
- crtc_set_mode(dev, old_state);
+ crtc_set_mode(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_connector_helper_funcs *funcs;
funcs = connector->helper_private;
@@ -1440,7 +1478,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
- funcs->atomic_commit(connector, old_state);
+ funcs->atomic_commit(connector, state);
}
}
}
@@ -1448,7 +1486,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function enables all the outputs with the new configuration which had to
* be turned off for the update.
@@ -1460,7 +1498,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1469,7 +1507,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_connector_state *new_conn_state;
int i;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
@@ -1485,13 +1523,13 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (funcs->atomic_enable)
- funcs->atomic_enable(crtc, old_state);
+ funcs->atomic_enable(crtc, state);
else if (funcs->commit)
funcs->commit(crtc);
}
}
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
@@ -1514,21 +1552,21 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* it away), so we won't call enable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_pre_enable(bridge, old_state);
+ drm_atomic_bridge_chain_pre_enable(bridge, state);
if (funcs) {
if (funcs->atomic_enable)
- funcs->atomic_enable(encoder, old_state);
+ funcs->atomic_enable(encoder, state);
else if (funcs->enable)
funcs->enable(encoder);
else if (funcs->commit)
funcs->commit(encoder);
}
- drm_atomic_bridge_chain_enable(bridge, old_state);
+ drm_atomic_bridge_chain_enable(bridge, state);
}
- drm_atomic_helper_commit_writebacks(dev, old_state);
+ drm_atomic_helper_commit_writebacks(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@@ -1630,7 +1668,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
/**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* Helper to, after atomic commit, wait for vblanks on all affected
* CRTCs (ie. before cleaning up old framebuffers using
@@ -1644,7 +1682,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
*/
void
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1655,10 +1693,10 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates).
*/
- if (old_state->legacy_cursor_update)
+ if (state->legacy_cursor_update)
return;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!new_crtc_state->active)
continue;
@@ -1667,17 +1705,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
continue;
crtc_mask |= drm_crtc_mask(crtc);
- old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
+ state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
}
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
if (!(crtc_mask & drm_crtc_mask(crtc)))
continue;
ret = wait_event_timeout(dev->vblank[i].queue,
- old_state->crtcs[i].last_vblank_count !=
- drm_crtc_vblank_count(crtc),
- msecs_to_jiffies(100));
+ state->crtcs[i].last_vblank_count !=
+ drm_crtc_vblank_count(crtc),
+ msecs_to_jiffies(100));
WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
crtc->base.id, crtc->name);
@@ -1690,7 +1728,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
* drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* Helper to, after atomic commit, wait for page flips on all affected
* crtcs (ie. before cleaning up old framebuffers using
@@ -1703,16 +1741,16 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
* initialized using drm_atomic_helper_setup_commit().
*/
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
int i;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
+ struct drm_crtc_commit *commit = state->crtcs[i].commit;
int ret;
- crtc = old_state->crtcs[i].ptr;
+ crtc = state->crtcs[i].ptr;
if (!crtc || !commit)
continue;
@@ -1723,14 +1761,14 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
crtc->base.id, crtc->name);
}
- if (old_state->fake_commit)
- complete_all(&old_state->fake_commit->flip_done);
+ if (state->fake_commit)
+ complete_all(&state->fake_commit->flip_done);
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This is the default implementation for the
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
@@ -1741,29 +1779,29 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
* Note that the default ordering of how the various stages are called is to
* match the legacy modeset helper library closest.
*/
-void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
+ drm_atomic_helper_commit_planes(dev, state, 0);
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_fake_vblank(old_state);
+ drm_atomic_helper_fake_vblank(state);
- drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_cleanup_planes(dev, old_state);
+ drm_atomic_helper_cleanup_planes(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
/**
* drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
- * @old_state: new modeset state to be committed
+ * @state: new modeset state to be committed
*
* This is an alternative implementation for the
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
@@ -1771,30 +1809,30 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
* commit. Otherwise, one should use the default implementation
* drm_atomic_helper_commit_tail().
*/
-void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_planes(dev, old_state,
+ drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
- drm_atomic_helper_fake_vblank(old_state);
+ drm_atomic_helper_fake_vblank(state);
- drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_cleanup_planes(dev, old_state);
+ drm_atomic_helper_cleanup_planes(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
-static void commit_tail(struct drm_atomic_state *old_state)
+static void commit_tail(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
const struct drm_mode_config_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
@@ -1816,33 +1854,33 @@ static void commit_tail(struct drm_atomic_state *old_state)
*/
start = ktime_get();
- drm_atomic_helper_wait_for_fences(dev, old_state, false);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
- drm_atomic_helper_wait_for_dependencies(old_state);
+ drm_atomic_helper_wait_for_dependencies(state);
/*
* We cannot safely access new_crtc_state after
* drm_atomic_helper_commit_hw_done() so figure out which crtc's have
* self-refresh active beforehand:
*/
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
if (new_crtc_state->self_refresh_active)
new_self_refresh_mask |= BIT(i);
if (funcs && funcs->atomic_commit_tail)
- funcs->atomic_commit_tail(old_state);
+ funcs->atomic_commit_tail(state);
else
- drm_atomic_helper_commit_tail(old_state);
+ drm_atomic_helper_commit_tail(state);
commit_time_ms = ktime_ms_delta(ktime_get(), start);
if (commit_time_ms > 0)
- drm_self_refresh_helper_update_avg_times(old_state,
+ drm_self_refresh_helper_update_avg_times(state,
(unsigned long)commit_time_ms,
new_self_refresh_mask);
- drm_atomic_helper_commit_cleanup_done(old_state);
+ drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_put(old_state);
+ drm_atomic_state_put(state);
}
static void commit_work(struct work_struct *work)
@@ -1928,7 +1966,7 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
return -EBUSY;
}
- ret = funcs->atomic_async_check(plane, state);
+ ret = funcs->atomic_async_check(plane, state, false);
if (ret != 0)
drm_dbg_atomic(dev,
"[PLANE:%d:%s] driver async check failed\n",
@@ -2385,17 +2423,17 @@ EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function waits for all preceding commits that touch the same CRTC as
- * @old_state to both be committed to the hardware (as signalled by
+ * @state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -2406,7 +2444,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
int i;
long ret;
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
ret = drm_crtc_commit_wait(old_crtc_state->commit);
if (ret)
drm_err(crtc->dev,
@@ -2414,7 +2452,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
crtc->base.id, crtc->name);
}
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(state, conn, old_conn_state, i) {
ret = drm_crtc_commit_wait(old_conn_state->commit);
if (ret)
drm_err(conn->dev,
@@ -2422,7 +2460,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
conn->base.id, conn->name);
}
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
ret = drm_crtc_commit_wait(old_plane_state->commit);
if (ret)
drm_err(plane->dev,
@@ -2434,7 +2472,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
* drm_atomic_helper_fake_vblank - fake VBLANK events if needed
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function walks all CRTCs and fakes VBLANK events on those with
* &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
@@ -2450,32 +2488,32 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
unsigned long flags;
if (!new_crtc_state->no_vblank)
continue;
- spin_lock_irqsave(&old_state->dev->event_lock, flags);
+ spin_lock_irqsave(&state->dev->event_lock, flags);
if (new_crtc_state->event) {
drm_crtc_send_vblank_event(crtc,
new_crtc_state->event);
new_crtc_state->event = NULL;
}
- spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
+ spin_unlock_irqrestore(&state->dev->event_lock, flags);
}
}
EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function is used to signal completion of the hardware commit step. After
* this step the driver is not allowed to read or change any permanent software
@@ -2488,14 +2526,14 @@ EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
commit = new_crtc_state->commit;
if (!commit)
continue;
@@ -2515,32 +2553,32 @@ void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
complete_all(&commit->hw_done);
}
- if (old_state->fake_commit) {
- complete_all(&old_state->fake_commit->hw_done);
- complete_all(&old_state->fake_commit->flip_done);
+ if (state->fake_commit) {
+ complete_all(&state->fake_commit->hw_done);
+ complete_all(&state->fake_commit->flip_done);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
/**
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
- * This signals completion of the atomic update @old_state, including any
+ * This signals completion of the atomic update @state, including any
* cleanup work. If used, it must be called right before calling
* drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
commit = old_crtc_state->commit;
if (WARN_ON(!commit))
continue;
@@ -2553,9 +2591,9 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
spin_unlock(&crtc->commit_lock);
}
- if (old_state->fake_commit) {
- complete_all(&old_state->fake_commit->cleanup_done);
- WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
+ if (state->fake_commit) {
+ complete_all(&state->fake_commit->cleanup_done);
+ WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done));
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
@@ -2693,7 +2731,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
/**
* drm_atomic_helper_commit_planes - commit plane state
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
* @flags: flags for committing plane state
*
* This function commits the new plane state using the plane and atomic helper
@@ -2701,7 +2739,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
* been pushed into the relevant object state pointers, since this step can no
* longer fail.
*
- * It still requires the global state object @old_state to know which planes and
+ * It still requires the global state object @state to know which planes and
* crtcs need to be updated though.
*
* Note that this function does all plane updates across all CRTCs in one step.
@@ -2732,7 +2770,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
* This should not be copied blindly by drivers.
*/
void drm_atomic_helper_commit_planes(struct drm_device *dev,
- struct drm_atomic_state *old_state,
+ struct drm_atomic_state *state,
uint32_t flags)
{
struct drm_crtc *crtc;
@@ -2743,7 +2781,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -2754,10 +2792,10 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (active_only && !new_crtc_state->active)
continue;
- funcs->atomic_begin(crtc, old_state);
+ funcs->atomic_begin(crtc, state);
}
- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
bool disabling;
@@ -2795,18 +2833,18 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
no_disable)
continue;
- funcs->atomic_disable(plane, old_state);
+ funcs->atomic_disable(plane, state);
} else if (new_plane_state->crtc || disabling) {
- funcs->atomic_update(plane, old_state);
+ funcs->atomic_update(plane, state);
if (!disabling && funcs->atomic_enable) {
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
- funcs->atomic_enable(plane, old_state);
+ funcs->atomic_enable(plane, state);
}
}
}
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -2817,14 +2855,14 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (active_only && !new_crtc_state->active)
continue;
- funcs->atomic_flush(crtc, old_state);
+ funcs->atomic_flush(crtc, state);
}
/*
* Signal end of framebuffer access here before hw_done. After hw_done,
* a later commit might have already released the plane state.
*/
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
@@ -2951,10 +2989,10 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
/**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function cleans up plane state, specifically framebuffers, from the old
- * configuration. Hence the old configuration must be perserved in @old_state to
+ * configuration. Hence the old configuration must be perserved in @state to
* be able to call this function.
*
* This function may not be called on the new state when the atomic update
@@ -2962,13 +3000,13 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
* drm_atomic_helper_unprepare_planes() in this case.
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
int i;
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->cleanup_fb)
@@ -3363,6 +3401,47 @@ free:
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
/**
+ * drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC
+ * @crtc: DRM CRTC
+ * @ctx: lock acquisition context
+ *
+ * Reset the active outputs by indicating that connectors have changed.
+ * This implies a reset of all active components available between the CRTC and
+ * connectors.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+out:
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_reset_crtc);
+
+/**
* drm_atomic_helper_shutdown - shutdown all CRTC
* @dev: DRM device
*
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 370dc676e3aa..2765ba90ad8f 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -27,8 +27,9 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
@@ -1063,6 +1064,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
struct drm_mode_config *config = &plane->dev->mode_config;
+ const struct drm_plane_helper_funcs *plane_funcs = plane->helper_private;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -1070,15 +1072,30 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
- if (async_flip &&
- (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY ||
- (prop != config->prop_fb_id &&
- prop != config->prop_in_fence_fd &&
- prop != config->prop_fb_damage_clips))) {
- ret = drm_atomic_plane_get_property(plane, plane_state,
- prop, &old_val);
- ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- break;
+ if (async_flip) {
+ /* check if the prop does a nop change */
+ if ((prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips)) {
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ }
+
+ /* ask the driver if this non-primary plane is supported */
+ if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ ret = -EINVAL;
+
+ if (plane_funcs && plane_funcs->atomic_async_check)
+ ret = plane_funcs->atomic_async_check(plane, state, true);
+
+ if (ret) {
+ drm_dbg_atomic(prop->dev,
+ "[PLANE:%d:%s] does not support async flips\n",
+ obj->id, plane->name);
+ break;
+ }
+ }
}
ret = drm_atomic_plane_set_property(plane,
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 241a384ebce3..fa2794217a90 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -550,7 +550,7 @@ EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_disable (falls back on
* &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
@@ -560,7 +560,7 @@ EXPORT_SYMBOL(drm_bridge_chain_mode_set);
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *iter;
@@ -571,15 +571,7 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
if (iter->funcs->atomic_disable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- iter);
- if (WARN_ON(!old_bridge_state))
- return;
-
- iter->funcs->atomic_disable(iter, old_bridge_state);
+ iter->funcs->atomic_disable(iter, state);
} else if (iter->funcs->disable) {
iter->funcs->disable(iter);
}
@@ -591,29 +583,19 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- if (old_state && bridge->funcs->atomic_post_disable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_post_disable(bridge,
- old_bridge_state);
- } else if (bridge->funcs->post_disable) {
+ if (state && bridge->funcs->atomic_post_disable)
+ bridge->funcs->atomic_post_disable(bridge, state);
+ else if (bridge->funcs->post_disable)
bridge->funcs->post_disable(bridge);
- }
}
/**
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
* in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_post_disable (falls back on
* &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
@@ -634,7 +616,7 @@ static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *next, *limit;
@@ -681,12 +663,12 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
break;
drm_atomic_bridge_call_post_disable(next,
- old_state);
+ state);
}
}
}
- drm_atomic_bridge_call_post_disable(bridge, old_state);
+ drm_atomic_bridge_call_post_disable(bridge, state);
if (limit)
/* Jump all bridges that we have already post_disabled */
@@ -696,28 +678,19 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- if (old_state && bridge->funcs->atomic_pre_enable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
- } else if (bridge->funcs->pre_enable) {
+ if (state && bridge->funcs->atomic_pre_enable)
+ bridge->funcs->atomic_pre_enable(bridge, state);
+ else if (bridge->funcs->pre_enable)
bridge->funcs->pre_enable(bridge);
- }
}
/**
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
* the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
* &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
@@ -737,7 +710,7 @@ static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *iter, *next, *limit;
@@ -776,11 +749,11 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
*/
break;
- drm_atomic_bridge_call_pre_enable(next, old_state);
+ drm_atomic_bridge_call_pre_enable(next, state);
}
}
- drm_atomic_bridge_call_pre_enable(iter, old_state);
+ drm_atomic_bridge_call_pre_enable(iter, state);
if (iter->pre_enable_prev_first)
/* Jump all bridges that we have already pre_enabled */
@@ -795,7 +768,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
/**
* drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_enable (falls back on
* &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
@@ -805,7 +778,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
@@ -815,15 +788,7 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
if (bridge->funcs->atomic_enable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_enable(bridge, old_bridge_state);
+ bridge->funcs->atomic_enable(bridge, state);
} else if (bridge->funcs->enable) {
bridge->funcs->enable(bridge);
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 103c185bb1c8..241c855f891f 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <kunit/test-bug.h>
+
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/sizes.h>
@@ -324,7 +326,7 @@ EXPORT_SYMBOL(drm_buddy_init);
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
- u64 root_size, size;
+ u64 root_size, size, start;
unsigned int order;
int i;
@@ -332,9 +334,12 @@ void drm_buddy_fini(struct drm_buddy *mm)
for (i = 0; i < mm->n_roots; ++i) {
order = ilog2(size) - ilog2(mm->chunk_size);
- __force_merge(mm, 0, size, order);
+ start = drm_buddy_block_offset(mm->roots[i]);
+ __force_merge(mm, start, start + size, order);
+
+ if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i])))
+ kunit_fail_current_test("buddy_fini() root");
- WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
drm_block_free(mm, mm->roots[i]);
root_size = mm->chunk_size << order;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3488ff067c69..46655339003d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -939,3 +939,23 @@ int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
return 0;
}
EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property);
+
+/**
+ * drm_crtc_in_clone_mode - check if the given CRTC state is in clone mode
+ *
+ * @crtc_state: CRTC state to check
+ *
+ * This function determines if the given CRTC state is being cloned by multiple
+ * encoders.
+ *
+ * RETURNS:
+ * True if the CRTC state is in clone mode. False otherwise
+ */
+bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state)
+{
+ if (!crtc_state)
+ return false;
+
+ return hweight32(crtc_state->encoder_mask) > 1;
+}
+EXPORT_SYMBOL(drm_crtc_in_clone_mode);
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 8059f65c5d6c..bae73936acf9 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -43,7 +43,7 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode);
int
drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 536409a35df4..6b2178864c7e 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -748,7 +748,7 @@ static int bridges_show(struct seq_file *m, void *data)
unsigned int idx = 0;
drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs);
+ drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs);
drm_printf(&p, "\ttype: [%d] %s\n",
bridge->type,
drm_get_connector_type_name(bridge->type));
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3cf440eee8a2..17fc5dc708f4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -26,6 +26,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/bitops.h>
#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
@@ -34,6 +35,7 @@
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/slab.h>
+#include <linux/sprintf.h>
#include <linux/srcu.h>
#include <linux/xarray.h>
@@ -499,6 +501,72 @@ void drm_dev_unplug(struct drm_device *dev)
EXPORT_SYMBOL(drm_dev_unplug);
/*
+ * Available recovery methods for wedged device. To be sent along with device
+ * wedged uevent.
+ */
+static const char *drm_get_wedge_recovery(unsigned int opt)
+{
+ switch (BIT(opt)) {
+ case DRM_WEDGE_RECOVERY_NONE:
+ return "none";
+ case DRM_WEDGE_RECOVERY_REBIND:
+ return "rebind";
+ case DRM_WEDGE_RECOVERY_BUS_RESET:
+ return "bus-reset";
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * drm_dev_wedged_event - generate a device wedged uevent
+ * @dev: DRM device
+ * @method: method(s) to be used for recovery
+ *
+ * This generates a device wedged uevent for the DRM device specified by @dev.
+ * Recovery @method\(s) of choice will be sent in the uevent environment as
+ * ``WEDGED=<method1>[,..,<methodN>]`` in order of less to more side-effects.
+ * If caller is unsure about recovery or @method is unknown (0),
+ * ``WEDGED=unknown`` will be sent instead.
+ *
+ * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
+ * details.
+ *
+ * Returns: 0 on success, negative error code otherwise.
+ */
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
+{
+ const char *recovery = NULL;
+ unsigned int len, opt;
+ /* Event string length up to 28+ characters with available methods */
+ char event_string[32];
+ char *envp[] = { event_string, NULL };
+
+ len = scnprintf(event_string, sizeof(event_string), "%s", "WEDGED=");
+
+ for_each_set_bit(opt, &method, BITS_PER_TYPE(method)) {
+ recovery = drm_get_wedge_recovery(opt);
+ if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
+ break;
+
+ len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery);
+ }
+
+ if (recovery)
+ /* Get rid of trailing comma */
+ event_string[len - 1] = '\0';
+ else
+ /* Caller is unsure about recovery, do the best we can at this point. */
+ snprintf(event_string, sizeof(event_string), "%s", "WEDGED=unknown");
+
+ drm_info(dev, "device wedged, %s\n", method == DRM_WEDGE_RECOVERY_NONE ?
+ "but recovered through reset" : "needs recovery");
+
+ return kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_dev_wedged_event);
+
+/*
* DRM internal mount
* We want to be able to allocate our own "struct address_space" to control
* memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 2289e71e2fa2..c299cd94d3f7 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -830,8 +830,11 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
}
EXPORT_SYMBOL(drm_send_event);
-static void print_size(struct drm_printer *p, const char *stat,
- const char *region, u64 sz)
+void drm_fdinfo_print_size(struct drm_printer *p,
+ const char *prefix,
+ const char *stat,
+ const char *region,
+ u64 sz)
{
const char *units[] = {"", " KiB", " MiB"};
unsigned u;
@@ -842,8 +845,10 @@ static void print_size(struct drm_printer *p, const char *stat,
sz = div_u64(sz, SZ_1K);
}
- drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
+ drm_printf(p, "%s-%s-%s:\t%llu%s\n",
+ prefix, stat, region, sz, units[u]);
}
+EXPORT_SYMBOL(drm_fdinfo_print_size);
int drm_memory_stats_is_zero(const struct drm_memory_stats *stats)
{
@@ -868,17 +873,22 @@ void drm_print_memory_stats(struct drm_printer *p,
enum drm_gem_object_status supported_status,
const char *region)
{
- print_size(p, "total", region, stats->private + stats->shared);
- print_size(p, "shared", region, stats->shared);
+ const char *prefix = "drm";
+
+ drm_fdinfo_print_size(p, prefix, "total", region,
+ stats->private + stats->shared);
+ drm_fdinfo_print_size(p, prefix, "shared", region, stats->shared);
if (supported_status & DRM_GEM_OBJECT_ACTIVE)
- print_size(p, "active", region, stats->active);
+ drm_fdinfo_print_size(p, prefix, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
- print_size(p, "resident", region, stats->resident);
+ drm_fdinfo_print_size(p, prefix, "resident", region,
+ stats->resident);
if (supported_status & DRM_GEM_OBJECT_PURGEABLE)
- print_size(p, "purgeable", region, stats->purgeable);
+ drm_fdinfo_print_size(p, prefix, "purgeable", region,
+ stats->purgeable);
}
EXPORT_SYMBOL(drm_print_memory_stats);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index b1be458ed4dd..ecb278b63e8c 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -978,6 +978,75 @@ void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pit
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
+static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ unsigned int pixels2 = pixels & ~GENMASK_ULL(0, 0);
+ __le32 *dbuf32 = dbuf;
+ __le16 *dbuf16 = dbuf + pixels2 * sizeof(*dbuf16);
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 val32;
+ u16 val16;
+ u32 pix[2];
+
+ for (x = 0; x < pixels2; x += 2, ++dbuf32) {
+ pix[0] = le32_to_cpu(sbuf32[x]);
+ pix[1] = le32_to_cpu(sbuf32[x + 1]);
+ val32 = ((pix[0] & 0xf0000000) >> 16) |
+ ((pix[0] & 0x00f00000) >> 12) |
+ ((pix[0] & 0x0000f000) >> 8) |
+ ((pix[0] & 0x000000f0) >> 4) |
+ ((pix[1] & 0xf0000000) >> 0) |
+ ((pix[1] & 0x00f00000) << 4) |
+ ((pix[1] & 0x0000f000) << 8) |
+ ((pix[1] & 0x000000f0) << 12);
+ *dbuf32 = cpu_to_le32(val32);
+ }
+ for (; x < pixels; x++) {
+ pix[0] = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix[0] & 0xf0000000) >> 16) |
+ ((pix[0] & 0x00f00000) >> 12) |
+ ((pix[0] & 0x0000f000) >> 8) |
+ ((pix[0] & 0x000000f0) >> 4);
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_argb8888_to_argb4444 - Convert ARGB8888 to ARGB4444 clip buffer
+ * @dst: Array of ARGB4444 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of ARGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB4444 devices that don't support
+ * ARGB8888 natively.
+ */
+void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_argb8888_to_argb4444_line);
+}
+EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444);
+
/**
* drm_fb_blit - Copy parts of a framebuffer to display memory
* @dst: Array of display-memory addresses to copy to
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index 79ce86a5bd67..cc4c463daae7 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -310,3 +310,11 @@ void __drmm_mutex_release(struct drm_device *dev, void *res)
mutex_destroy(lock);
}
EXPORT_SYMBOL(__drmm_mutex_release);
+
+void __drmm_workqueue_release(struct drm_device *device, void *res)
+{
+ struct workqueue_struct *wq = res;
+
+ destroy_workqueue(wq);
+}
+EXPORT_SYMBOL(__drmm_workqueue_release);
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 5530919e0ba0..d0183dea7703 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -268,9 +268,9 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
*panel = NULL;
}
- /* No panel found yet, check for a bridge next. */
if (bridge) {
if (ret) {
+ /* No panel found yet, check for a bridge next. */
*bridge = of_drm_find_bridge(remote);
if (*bridge)
ret = 0;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 9940e96d35e3..c627e42a7ce7 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -50,7 +50,7 @@ static LIST_HEAD(panel_list);
* @dev: parent device of the panel
* @funcs: panel operations
* @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
- * the panel interface
+ * the panel interface (must NOT be DRM_MODE_CONNECTOR_Unknown)
*
* Initialize the panel structure for subsequent registration with
* drm_panel_add().
@@ -58,6 +58,9 @@ static LIST_HEAD(panel_list);
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs, int connector_type)
{
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+ DRM_WARN("%s: %s: a valid connector type is required!\n", __func__, dev_name(dev));
+
INIT_LIST_HEAD(&panel->list);
INIT_LIST_HEAD(&panel->followers);
mutex_init(&panel->follower_lock);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 4a73821b81f6..c554ad8f246b 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -93,6 +93,12 @@ static const struct drm_dmi_panel_orientation_data onegx1_pro = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd640x960_leftside_up = {
+ .width = 640,
+ .height = 960,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720,
.height = 1280,
@@ -123,6 +129,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1200x1920_leftside_up = {
+ .width = 1200,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -184,10 +196,10 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
- }, { /* AYA NEO AYANEO 2 */
+ }, { /* AYA NEO AYANEO 2/2S */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* AYA NEO 2021 */
@@ -202,6 +214,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* AYA NEO Flip DS Bottom Screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FLIP DS"),
+ },
+ .driver_data = (void *)&lcd640x960_leftside_up,
+ }, { /* AYA NEO Flip KB/DS Top Screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FLIP"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO Founder */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
@@ -226,6 +250,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "KUN"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
+ }, { /* AYA NEO SLIDE */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SLIDE"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYN Loki Max */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
@@ -315,6 +345,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win2,
+ }, { /* GPD Win 2 (correct DMI strings) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WIN2")
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* GPD Win 3 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
@@ -443,6 +479,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
+ }, { /* OneXPlayer Mini (Intel) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)&lcd1200x1920_leftside_up,
}, { /* OrangePi Neo */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index f128d345b16d..ab42a2b1567d 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -499,7 +499,7 @@ static int drm_panic_get_qr_code_url(u8 **qr_image)
char *kmsg;
int max_qr_data_size, url_len;
- url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&zl=",
+ url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&z=",
utsname()->machine, utsname()->release);
max_qr_data_size = drm_panic_qr_max_data_size(panic_qr_version, url_len);
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index bcf248f69252..62cb8a162483 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -13,12 +13,13 @@
//! The binary data must be a valid URL parameter, so the easiest way is
//! to use base64 encoding. But this wastes 25% of data space, so the
//! whole stack trace won't fit in the QR code. So instead it encodes
-//! every 13bits of input into 4 decimal digits, and then uses the
+//! every 7 bytes of input into 17 decimal digits, and then uses the
//! efficient numeric encoding, that encode 3 decimal digits into
-//! 10bits. This makes 39bits of compressed data into 12 decimal digits,
-//! into 40bits in the QR code, so wasting only 2.5%. And the numbers are
+//! 10bits. This makes 168bits of compressed data into 51 decimal digits,
+//! into 170bits in the QR code, so wasting only 1.17%. And the numbers are
//! valid URL parameter, so the website can do the reverse, to get the
-//! binary data.
+//! binary data. This is the same algorithm used by Fido v2.2 QR-initiated
+//! authentication specification.
//!
//! Inspired by these 3 projects, all under MIT license:
//!
@@ -26,7 +27,6 @@
//! * <https://github.com/erwanvivien/fast_qr>
//! * <https://github.com/bjguillot/qr>
-use core::cmp;
use kernel::str::CStr;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
@@ -296,35 +296,11 @@ const MODE_BINARY: u16 = 4;
/// Padding bytes.
const PADDING: [u8; 2] = [236, 17];
-/// Get the next 13 bits of data, starting at specified offset (in bits).
-fn get_next_13b(data: &[u8], offset: usize) -> Option<(u16, usize)> {
- if offset < data.len() * 8 {
- let size = cmp::min(13, data.len() * 8 - offset);
- let byte_off = offset / 8;
- let bit_off = offset % 8;
- // `b` is 20 at max (`bit_off` <= 7 and `size` <= 13).
- let b = (bit_off + size) as u16;
-
- let first_byte = (data[byte_off] << bit_off >> bit_off) as u16;
-
- let number = match b {
- 0..=8 => first_byte >> (8 - b),
- 9..=16 => (first_byte << (b - 8)) + (data[byte_off + 1] >> (16 - b)) as u16,
- _ => {
- (first_byte << (b - 8))
- + ((data[byte_off + 1] as u16) << (b - 16))
- + (data[byte_off + 2] >> (24 - b)) as u16
- }
- };
- Some((number, size))
- } else {
- None
- }
-}
-
/// Number of bits to encode characters in numeric mode.
const NUM_CHARS_BITS: [usize; 4] = [0, 4, 7, 10];
-const POW10: [u16; 4] = [1, 10, 100, 1000];
+/// Number of decimal digits required to encode n bytes of binary data.
+/// eg: you need 15 decimal digits to fit 6 bytes of binary data.
+const BYTES_TO_DIGITS: [usize; 8] = [0, 3, 5, 8, 10, 13, 15, 17];
enum Segment<'a> {
Numeric(&'a [u8]),
@@ -360,13 +336,9 @@ impl Segment<'_> {
match self {
Segment::Binary(data) => data.len(),
Segment::Numeric(data) => {
- let data_bits = data.len() * 8;
- let last_chars = match data_bits % 13 {
- 1 => 1,
- k => (k + 1) / 3,
- };
- // 4 decimal numbers per 13bits + remainder.
- 4 * (data_bits / 13) + last_chars
+ let last_chars = BYTES_TO_DIGITS[data.len() % 7];
+ // 17 decimal numbers per 7bytes + remainder.
+ 17 * (data.len() / 7) + last_chars
}
}
}
@@ -403,7 +375,7 @@ impl Segment<'_> {
struct SegmentIterator<'a> {
segment: &'a Segment<'a>,
offset: usize,
- carry: u16,
+ carry: u64,
carry_len: usize,
}
@@ -422,40 +394,30 @@ impl Iterator for SegmentIterator<'_> {
}
}
Segment::Numeric(data) => {
- if self.carry_len == 3 {
- let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
- self.carry_len = 0;
- self.carry = 0;
- Some(out)
- } else if let Some((bits, size)) = get_next_13b(data, self.offset) {
- self.offset += size;
- let new_chars = match size {
- 1 => 1,
- k => (k + 1) / 3,
- };
- if self.carry_len + new_chars > 3 {
- self.carry_len = new_chars + self.carry_len - 3;
- let out = (
- self.carry * POW10[new_chars - self.carry_len]
- + bits / POW10[self.carry_len],
- NUM_CHARS_BITS[3],
- );
- self.carry = bits % POW10[self.carry_len];
- Some(out)
- } else {
- let out = (
- self.carry * POW10[new_chars] + bits,
- NUM_CHARS_BITS[self.carry_len + new_chars],
- );
- self.carry_len = 0;
- Some(out)
+ if self.carry_len < 3 && self.offset < data.len() {
+ // If there are less than 3 decimal digits in the carry,
+ // take the next 7 bytes of input, and add them to the carry.
+ let mut buf = [0u8; 8];
+ let len = 7.min(data.len() - self.offset);
+ buf[..len].copy_from_slice(&data[self.offset..self.offset + len]);
+ let chunk = u64::from_le_bytes(buf);
+ let pow = u64::pow(10, BYTES_TO_DIGITS[len] as u32);
+ self.carry = chunk + self.carry * pow;
+ self.offset += len;
+ self.carry_len += BYTES_TO_DIGITS[len];
+ }
+ match self.carry_len {
+ 0 => None,
+ len => {
+ // take the next 3 decimal digits of the carry
+ // and return 10bits of numeric data.
+ let out_len = 3.min(len);
+ self.carry_len -= out_len;
+ let pow = u64::pow(10, self.carry_len as u32);
+ let out = (self.carry / pow) as u16;
+ self.carry = self.carry % pow;
+ Some((out, NUM_CHARS_BITS[out_len]))
}
- } else if self.carry_len > 0 {
- let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
- self.carry_len = 0;
- Some(out)
- } else {
- None
}
}
}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 96b266b37ba4..7ba16323e7c2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -202,7 +202,7 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
int
drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
@@ -338,10 +338,23 @@ void drm_kms_helper_poll_reschedule(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_reschedule);
+static int detect_connector_status(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+
+ if (funcs->detect_ctx)
+ return funcs->detect_ctx(connector, ctx, force);
+ else if (connector->funcs->detect)
+ return connector->funcs->detect(connector, force);
+
+ return connector_status_connected;
+}
+
static enum drm_connector_status
drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
{
- const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -349,14 +362,8 @@ drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
retry:
ret = drm_modeset_lock(&connector->dev->mode_config.connection_mutex, &ctx);
- if (!ret) {
- if (funcs->detect_ctx)
- ret = funcs->detect_ctx(connector, &ctx, force);
- else if (connector->funcs->detect)
- ret = connector->funcs->detect(connector, force);
- else
- ret = connector_status_connected;
- }
+ if (!ret)
+ ret = detect_connector_status(connector, &ctx, force);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
@@ -390,7 +397,6 @@ drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_device *dev = connector->dev;
int ret;
@@ -401,12 +407,7 @@ drm_helper_probe_detect(struct drm_connector *connector,
if (ret)
return ret;
- if (funcs->detect_ctx)
- ret = funcs->detect_ctx(connector, ctx, force);
- else if (connector->funcs->detect)
- ret = connector->funcs->detect(connector, force);
- else
- ret = connector_status_connected;
+ ret = detect_connector_status(connector, ctx, force);
if (ret != connector->status)
connector->epoch_counter += 1;
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index 33a3c98a962d..f139b49af4c9 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -15,6 +15,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_property.h>
#include <drm/drm_writeback.h>
@@ -195,6 +196,22 @@ int drm_writeback_connector_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_writeback_connector_init);
+static void delete_writeback_properties(struct drm_device *dev)
+{
+ if (dev->mode_config.writeback_pixel_formats_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_pixel_formats_property);
+ dev->mode_config.writeback_pixel_formats_property = NULL;
+ }
+ if (dev->mode_config.writeback_out_fence_ptr_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_out_fence_ptr_property);
+ dev->mode_config.writeback_out_fence_ptr_property = NULL;
+ }
+ if (dev->mode_config.writeback_fb_id_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_fb_id_property);
+ dev->mode_config.writeback_fb_id_property = NULL;
+ }
+}
+
/**
* drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
* a custom encoder
@@ -202,7 +219,6 @@ EXPORT_SYMBOL(drm_writeback_connector_init);
* @dev: DRM device
* @wb_connector: Writeback connector to initialize
* @enc: handle to the already initialized drm encoder
- * @con_funcs: Connector funcs vtable
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
*
@@ -218,41 +234,33 @@ EXPORT_SYMBOL(drm_writeback_connector_init);
* assigning the encoder helper functions, possible_crtcs and any other encoder
* specific operation.
*
- * Drivers should always use this function instead of drm_connector_init() to
- * set up writeback connectors if they want to manage themselves the lifetime of the
- * associated encoder.
- *
* Returns: 0 on success, or a negative error code
*/
-int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
- struct drm_writeback_connector *wb_connector, struct drm_encoder *enc,
- const struct drm_connector_funcs *con_funcs, const u32 *formats,
- int n_formats)
+static int __drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc, const u32 *formats,
+ int n_formats)
{
- struct drm_property_blob *blob;
struct drm_connector *connector = &wb_connector->base;
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
int ret = create_writeback_properties(dev);
if (ret != 0)
- return ret;
-
- blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
- formats);
- if (IS_ERR(blob))
- return PTR_ERR(blob);
-
+ goto failed_properties;
connector->interlace_allowed = 0;
- ret = drm_connector_init(dev, connector, con_funcs,
- DRM_MODE_CONNECTOR_WRITEBACK);
- if (ret)
- goto connector_fail;
-
ret = drm_connector_attach_encoder(connector, enc);
if (ret)
- goto attach_fail;
+ goto failed_properties;
+
+ blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+ formats);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ goto failed_properties;
+ }
INIT_LIST_HEAD(&wb_connector->job_queue);
spin_lock_init(&wb_connector->job_lock);
@@ -275,15 +283,137 @@ int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
wb_connector->pixel_formats_blob_ptr = blob;
return 0;
+failed_properties:
+ delete_writeback_properties(dev);
+ return ret;
+}
+
+/**
+ * drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
+ * a custom encoder
+ *
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @enc: handle to the already initialized drm encoder
+ * @con_funcs: Connector funcs vtable
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values.
+ *
+ * This function assumes that the drm_writeback_connector's encoder has already been
+ * created and initialized before invoking this function.
+ *
+ * In addition, this function also assumes that callers of this API will manage
+ * assigning the encoder helper functions, possible_crtcs and any other encoder
+ * specific operation.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors if they want to manage themselves the lifetime of the
+ * associated encoder.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc,
+ const struct drm_connector_funcs *con_funcs,
+ const u32 *formats, int n_formats)
+{
+ struct drm_connector *connector = &wb_connector->base;
+ int ret;
+
+ ret = drm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK);
+ if (ret)
+ return ret;
+
+ ret = __drm_writeback_connector_init(dev, wb_connector, enc, formats,
+ n_formats);
+ if (ret)
+ drm_connector_cleanup(connector);
-attach_fail:
- drm_connector_cleanup(connector);
-connector_fail:
- drm_property_blob_put(blob);
return ret;
}
EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
+/**
+ * drm_writeback_connector_cleanup - Cleanup the writeback connector
+ * @dev: DRM device
+ * @wb_connector: Pointer to the writeback connector to clean up
+ *
+ * This will decrement the reference counter of blobs and destroy properties. It
+ * will also clean the remaining jobs in this writeback connector. Caution: This helper will not
+ * clean up the attached encoder and the drm_connector.
+ */
+static void drm_writeback_connector_cleanup(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector)
+{
+ unsigned long flags;
+ struct drm_writeback_job *pos, *n;
+
+ delete_writeback_properties(dev);
+ drm_property_blob_put(wb_connector->pixel_formats_blob_ptr);
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ list_for_each_entry_safe(pos, n, &wb_connector->job_queue, list_entry) {
+ list_del(&pos->list_entry);
+ drm_writeback_cleanup_job(pos);
+ }
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+
+/**
+ * drmm_writeback_connector_init - Initialize a writeback connector with
+ * a custom encoder
+ *
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc: Encoder to connect this writeback connector
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function initialize a writeback connector and register its cleanup.
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drmm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ struct drm_encoder *enc,
+ const u32 *formats, int n_formats)
+{
+ struct drm_connector *connector = &wb_connector->base;
+ int ret;
+
+ ret = drmm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK, NULL);
+ if (ret)
+ return ret;
+
+ ret = __drm_writeback_connector_init(dev, wb_connector, enc, formats,
+ n_formats);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(dev, (void *)drm_writeback_connector_cleanup,
+ wb_connector);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drmm_writeback_connector_init);
+
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 5b67eda122db..76a3a3e517d8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -144,17 +144,17 @@ out_unlock:
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
{
- int ret;
-
- ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
- msecs_to_jiffies(500), NULL, NULL,
- dev_name(gpu->dev), gpu->dev);
- if (ret)
- return ret;
-
- return 0;
+ const struct drm_sched_init_args args = {
+ .ops = &etnaviv_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = etnaviv_hw_jobs_limit,
+ .hang_limit = etnaviv_job_hang_limit,
+ .timeout = msecs_to_jiffies(500),
+ .name = dev_name(gpu->dev),
+ .dev = gpu->dev,
+ };
+
+ return drm_sched_init(&gpu->sched, &args);
}
void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 176fd8871759..01813e11e6c6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -931,7 +931,7 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
}
static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c418e8496bdf..84eff7519e32 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -63,7 +63,7 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->hdisplay & 0xf)
return MODE_ERROR;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 5a0acd914f76..06fe7480e7af 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -69,7 +69,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index cc2ed9b3fd2d..53990d27c39f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -505,7 +505,7 @@ static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
static enum drm_mode_status
cdv_intel_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 2d95e0471291..f2a3e37ef632 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -222,7 +222,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index f3a4517bdf27..9276e3676ba0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -153,7 +153,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
}
static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index ed8626c73541..1cf394369127 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -514,7 +514,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 2499fd6a80c9..9dc9dcd1b09f 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -212,7 +212,7 @@ extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 138f153d38ba..9ad611b5956e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -331,7 +331,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
}
enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 8dafff963ca8..afda40fc4494 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1159,7 +1159,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 93b8d32e3be1..98d77d74999d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -4,6 +4,8 @@ config DRM_HISI_HIBMC
depends on DRM && PCI
depends on MMU
select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
deleted file mode 100644
index 6f19e1c35e30..000000000000
--- a/drivers/gpu/drm/i2c/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "I2C encoder or helper chips"
- depends on DRM && DRM_KMS_HELPER && I2C
-
-config DRM_I2C_CH7006
- tristate "Chrontel ch7006 TV encoder"
- default m if DRM_NOUVEAU
- help
- Support for Chrontel ch7006 and similar TV encoders, found
- on some nVidia video cards.
-
- This driver is currently only useful if you're also using
- the nouveau driver.
-
-config DRM_I2C_SIL164
- tristate "Silicon Image sil164 TMDS transmitter"
- default m if DRM_NOUVEAU
- help
- Support for sil164 and similar single-link (or dual-link
- when used in pairs) TMDS transmitters, used in some nVidia
- video cards.
-
-config DRM_I2C_NXP_TDA998X
- tristate "NXP Semiconductors TDA998X HDMI encoder"
- default m if DRM_TILCDC
- select CEC_CORE if CEC_NOTIFIER
- select SND_SOC_HDMI_CODEC if SND_SOC
- help
- Support for NXP Semiconductors TDA998X HDMI encoders.
-
-config DRM_I2C_NXP_TDA9950
- tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC"
- select CEC_NOTIFIER
- select CEC_CORE
-
-endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
deleted file mode 100644
index a962f6f08568..000000000000
--- a/drivers/gpu/drm/i2c/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ch7006-y := ch7006_drv.o ch7006_mode.o
-obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
-
-sil164-y := sil164_drv.o
-obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
-
-tda998x-y := tda998x_drv.o
-obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
-obj-$(CONFIG_DRM_I2C_NXP_TDA9950) += tda9950.o
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 493e730c685b..206818f9ad49 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -247,7 +247,7 @@ static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 160000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 534b8544e0a4..10ab3cc73e58 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -276,7 +276,7 @@ static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index 0d5cce6051b1..d9c3152d4338 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -314,7 +314,7 @@ static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 112000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 686393dfbbf5..7146a9ed2213 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -523,7 +523,7 @@ static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index a8dd40c00997..b42c717085f3 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -189,7 +189,7 @@ static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index d9a0cd753a87..280699438526 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -217,7 +217,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index c977b74f82f0..9ed0924cc0fb 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -809,8 +809,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* select data lane width */
tmp = intel_de_read(display,
TRANS_DDI_FUNC_CTL(display, dsi_trans));
- tmp &= ~DDI_PORT_WIDTH_MASK;
- tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+ tmp &= ~TRANS_DDI_PORT_WIDTH_MASK;
+ tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count);
/* select input pipe */
tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
@@ -1460,7 +1460,7 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
}
static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
enum drm_mode_status status;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index fc1e517e074a..7e6ce905bdaf 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -41,8 +41,9 @@ static u32 scale(u32 source_val,
{
u64 target_val;
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
+ if (WARN_ON(source_min >= source_max) ||
+ WARN_ON(target_min > target_max))
+ return target_min;
/* defensive */
source_val = clamp(source_val, source_min, source_max);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 4634d3fd9f20..968ac705c3c6 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -358,7 +358,7 @@ static void intel_enable_crt(struct intel_atomic_state *state,
static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index acb986bc1f33..4daed0223fe5 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -658,7 +658,6 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
u32 ctl;
if (DISPLAY_VER(dev_priv) >= 11)
@@ -678,8 +677,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
if (DISPLAY_VER(dev_priv) >= 12) {
- if (!intel_dp_mst_is_master_trans(crtc_state) ||
- (!is_mst && intel_dp_is_uhbr(crtc_state))) {
+ if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
}
@@ -2594,6 +2592,7 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ bool transparent_mode;
int ret;
intel_dp_set_link_params(intel_dp,
@@ -2645,6 +2644,9 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ transparent_mode = intel_dp_lttpr_transparent_mode_enabled(intel_dp);
+ drm_dp_lttpr_wake_timeout_setup(&intel_dp->aux, transparent_mode);
+
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
if (!is_mst)
intel_dp_sink_enable_decompression(state,
@@ -3134,7 +3136,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
if (DISPLAY_VER(dev_priv) >= 12) {
- if (is_mst) {
+ if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
intel_de_rmw(dev_priv,
@@ -3487,7 +3489,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
- buf_ctl |= DDI_PORT_WIDTH(lane_count);
+ buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
if (DISPLAY_VER(dev_priv) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 4271da219b41..41128469f12a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -6628,12 +6628,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_plane_state *plane_state;
struct intel_crtc_state *crtc_state;
+ struct intel_plane *plane;
struct intel_crtc *crtc;
u8 affected_pipes = 0;
u8 modeset_pipes = 0;
int i;
+ /*
+ * Any plane which is in use by the joiner needs its crtc.
+ * Pull those in first as this will not have happened yet
+ * if the plane remains disabled according to uapi.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ crtc = to_intel_crtc(plane_state->hw.crtc);
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ /* Now pull in all joined crtcs */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
affected_pipes |= crtc_state->joiner_pipes;
if (intel_crtc_needs_modeset(crtc_state))
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f1f3b1bb1e89..48e89b66f8e8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1391,7 +1391,7 @@ bool intel_dp_has_dsc(const struct intel_connector *connector)
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *_connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
@@ -1791,7 +1791,7 @@ int intel_dp_dsc_max_src_input_bpc(struct intel_display *display)
if (DISPLAY_VER(display) == 11)
return 10;
- return 0;
+ return intel_dp_dsc_min_src_input_bpc();
}
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
@@ -2072,11 +2072,10 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp)
+ for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
+ if (valid_dsc_bpp[i] < dsc_min_bpp ||
+ valid_dsc_bpp[i] > dsc_max_bpp)
continue;
- if (valid_dsc_bpp[i] > dsc_max_bpp)
- break;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
@@ -2829,7 +2828,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
- /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
as_sdp->length = 0x9;
as_sdp->duration_incr_ms = 0;
@@ -2840,7 +2838,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
as_sdp->target_rr_divider = true;
} else {
- as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
+ as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
as_sdp->vtotal = adjusted_mode->vtotal;
as_sdp->target_rr = 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 8b1977cfec50..05a8b6f6f349 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -128,7 +128,7 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
return true;
}
-static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
+bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
{
return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] ==
@@ -1563,7 +1563,7 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
- return false;
+ goto out;
}
if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
@@ -1575,6 +1575,19 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
passed ? "passed" : "failed",
crtc_state->port_clock, crtc_state->lane_count);
+out:
+ /*
+ * Ensure that the training pattern does get set to TPS2 even in case
+ * of a failure, as is the case at the end of a passing link training
+ * and what is expected by the transcoder. Leaving TPS1 set (and
+ * disabling the link train mode in DP_TP_CTL later from TPS1 directly)
+ * would result in a stuck transcoder HW state and flip-done timeouts
+ * later in the modeset sequence.
+ */
+ if (!passed)
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
+
return passed;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 2066b9146762..46614124569f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -15,6 +15,7 @@ struct intel_dp;
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
+bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp);
void intel_dp_link_training_set_mode(struct intel_dp *intel_dp,
int link_rate, bool is_vrr);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 0c44fc7dd86c..aecaaf1d0fe2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -341,6 +341,10 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
break;
}
+
+ /* Allow using zero step to indicate one try */
+ if (!step)
+ break;
}
if (slots < 0) {
@@ -1419,7 +1423,7 @@ static int mst_connector_get_modes(struct drm_connector *connector)
static int
mst_connector_mode_valid_ctx(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 0be46c6c9611..c93a3cf75c52 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -58,7 +58,7 @@ int intel_dsi_get_modes(struct drm_connector *connector)
}
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index e8ba4ccd99d3..89c7166a3860 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -165,7 +165,7 @@ enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector);
int intel_dsi_get_modes(struct drm_connector *connector);
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index abf19dfd6d9d..c310698a1a86 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -217,7 +217,7 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *_connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index 4bf476656b8c..d20667870e50 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -71,7 +71,7 @@ struct intel_dvo_dev_ops {
* \return MODE_OK if the mode is valid, or another MODE_* otherwise.
*/
enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
/*
* Callback for setting up a video mode after fixups have been made.
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 7464b44c8bb3..1bab7c34a794 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -41,7 +41,7 @@ intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
u32 rekey_bit = 0;
/* Here we assume HDMI is in TMDS mode of operation */
- if (encoder->type != INTEL_OUTPUT_HDMI)
+ if (!intel_encoder_is_hdmi(encoder))
return;
if (DISPLAY_VER(display) >= 30) {
@@ -2188,6 +2188,19 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
drm_dbg_kms(display->drm,
"HDCP2.2 Downstream topology change\n");
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ goto out;
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
+ connector->base.base.id, connector->base.name,
+ ret);
} else {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ed29dd0ccef0..5ae678f4eaa7 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2023,7 +2023,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 6ffd55c17445..4b0dce169d4e 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -390,7 +390,7 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder)
static enum drm_mode_status
intel_lvds_mode_valid(struct drm_connector *_connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 498b35ec4e0f..1b6040892c40 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1938,7 +1938,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 6e311dcc1a61..1c50732a099d 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -957,7 +957,7 @@ static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index ff9764cac1e7..80e558042d97 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -106,8 +106,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_sdr_uv_plane_formats[] = {
@@ -134,8 +132,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_hdr_plane_formats[] = {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d49e9b3c7627..c4d731ab28eb 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1541,7 +1541,7 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
};
static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index fe69f2c8527d..ae3343c81a64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct address_space *mapping = obj->base.filp->f_mapping;
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st;
- struct sgt_iter sgt_iter;
- struct page *page;
int ret;
/*
@@ -239,9 +237,7 @@ rebuild_st:
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
- sg_free_table(st);
+ shmem_sg_free_table(st, mapping, false, false);
kfree(st);
max_segment = PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index aae5a081cb53..d6dc12fd87c1 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1422,6 +1422,9 @@ static void intel_gt_reset_global(struct intel_gt *gt,
if (!test_bit(I915_WEDGED, &gt->reset.flags))
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
+ else
+ drm_dev_wedged_event(&gt->i915->drm,
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 12f1ba7ca9c1..3fce5c000144 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1469,6 +1469,19 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
+static void __update_guc_busyness_running_state(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ for_each_engine(engine, gt, id)
+ engine->stats.guc.running = false;
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
static void __update_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1619,6 +1632,9 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
+ /* Assume no engines are running and set running state to false */
+ __update_guc_busyness_running_state(guc);
+
/*
* There is a race with suspend flow where the worker runs after suspend
* and causes an unclaimed register access warning. Cancel the worker
@@ -3433,10 +3449,10 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
*/
ret = deregister_context(ce, ce->guc_id.id);
if (ret) {
- spin_lock(&ce->guc_state.lock);
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
clr_context_destroyed(ce);
- spin_unlock(&ce->guc_state.lock);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
* the wakeref immediately but per function spec usage call this after unlock.
@@ -5519,12 +5535,20 @@ static inline void guc_log_context(struct drm_printer *p,
{
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
- drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
- ce->ring->head,
- ce->lrc_reg_state[CTX_RING_HEAD]);
- drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
- ce->ring->tail,
- ce->lrc_reg_state[CTX_RING_TAIL]);
+ if (intel_context_pin_if_active(ce)) {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ intel_context_unpin(ce);
+ } else {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
+ ce->ring->head);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
+ ce->ring->tail);
+ }
drm_printf(p, "\t\tContext Pin Count: %u\n",
atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 765e6c0528fb..786c727aea45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3633,7 +3633,7 @@ enum skl_power_gate {
#define DDI_BUF_IS_IDLE (1 << 7)
#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
#define DDI_A_4_LANES (1 << 4)
-#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
+#define DDI_PORT_WIDTH(width) (((width) == 3 ? 4 : ((width) - 1)) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
#define DDI_PORT_WIDTH_SHIFT 1
#define DDI_INIT_DISPLAY_DETECTED (1 << 0)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 5c397a2df70e..5d27e1c733c5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -168,7 +168,7 @@ static int igt_ppgtt_alloc(void *arg)
return PTR_ERR(ppgtt);
if (!ppgtt->vm.allocate_va_range)
- goto err_ppgtt_cleanup;
+ goto ppgtt_vm_put;
/*
* While we only allocate the page tables here and so we could
@@ -236,7 +236,7 @@ err_ppgtt_cleanup:
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
-
+ppgtt_vm_put:
i915_vm_put(&ppgtt->vm);
return err;
}
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 618503a212a7..1cdb3cfd058d 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -597,8 +597,6 @@ update_job_resvs_for_each(struct pvr_job_data *job_data, u32 job_count)
static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b)
{
struct pvr_job *geom_job = a, *frag_job = b;
- struct dma_fence *fence;
- unsigned long index;
/* Geometry and fragment jobs can be combined if they are queued to the
* same context and targeting the same HWRT.
@@ -609,13 +607,9 @@ static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b)
a->hwrt != b->hwrt)
return false;
- xa_for_each(&frag_job->base.dependencies, index, fence) {
- /* We combine when we see an explicit geom -> frag dep. */
- if (&geom_job->base.s_fence->scheduled == fence)
- return true;
- }
-
- return false;
+ /* We combine when we see an explicit geom -> frag dep. */
+ return drm_sched_job_has_dependency(&frag_job->base,
+ &geom_job->base.s_fence->scheduled);
}
static struct dma_fence *
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index c4f08432882b..21c185d18bb2 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -1210,6 +1210,17 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
},
};
struct pvr_device *pvr_dev = ctx->pvr_dev;
+ const struct drm_sched_init_args sched_args = {
+ .ops = &pvr_queue_sched_ops,
+ .submit_wq = pvr_dev->sched_wq,
+ .num_rqs = 1,
+ .credit_limit = 64 * 1024,
+ .hang_limit = 1,
+ .timeout = msecs_to_jiffies(500),
+ .timeout_wq = pvr_dev->sched_wq,
+ .name = "pvr-queue",
+ .dev = pvr_dev->base.dev,
+ };
struct drm_gpu_scheduler *sched;
struct pvr_queue *queue;
int ctx_state_size, err;
@@ -1282,12 +1293,7 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
queue->timeline_ufo.value = cpu_map;
- err = drm_sched_init(&queue->scheduler,
- &pvr_queue_sched_ops,
- pvr_dev->sched_wq, 1, 64 * 1024, 1,
- msecs_to_jiffies(500),
- pvr_dev->sched_wq, NULL, "pvr-queue",
- pvr_dev->base.dev);
+ err = drm_sched_init(&queue->scheduler, &sched_args);
if (err)
goto err_release_ufo;
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index 3a3c8a195119..c5629e155d25 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
@@ -217,7 +217,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
imx_tve_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index c23ee2d214de..20b93fff0239 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -229,7 +229,7 @@ static int ingenic_drm_update_pixclk(struct notifier_block *nb,
}
static void ingenic_drm_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(bridge->dev);
@@ -260,7 +260,7 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void ingenic_drm_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(bridge->dev);
unsigned int var;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index b40c90e97d7e..825135a26aa4 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -515,18 +515,22 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
lima_sched_timeout_ms : 10000;
+ const struct drm_sched_init_args args = {
+ .ops = &lima_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .hang_limit = lima_job_hang_limit,
+ .timeout = msecs_to_jiffies(timeout),
+ .name = name,
+ .dev = pipe->ldev->dev,
+ };
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
- return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- 1,
- lima_job_hang_limit,
- msecs_to_jiffies(timeout), NULL,
- NULL, name, pipe->ldev->dev);
+ return drm_sched_init(&pipe->base, &args);
}
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
index d227a2c1dcf1..aa9a97f9c4dc 100644
--- a/drivers/gpu/drm/loongson/lsdc_plane.c
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -171,7 +171,8 @@ static const struct drm_plane_helper_funcs lsdc_primary_helper_funcs = {
};
static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state,
+ bool flip)
{
struct drm_plane_state *new_state;
struct drm_crtc_state *crtc_state;
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index cd385ba4c66a..3d4648d2e15f 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2350,12 +2350,12 @@ static void mtk_dp_bridge_detach(struct drm_bridge *bridge)
}
static void mtk_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
int ret;
- mtk_dp->conn = drm_atomic_get_new_connector_for_encoder(old_state->base.state,
+ mtk_dp->conn = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (!mtk_dp->conn) {
drm_err(mtk_dp->drm_dev,
@@ -2400,7 +2400,7 @@ power_off_aux:
}
static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 40752f232054..b50dc9a013ac 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -826,7 +826,7 @@ static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge,
}
static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
@@ -834,7 +834,7 @@ static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
@@ -845,7 +845,7 @@ static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -856,7 +856,7 @@ static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ca82bc829cb9..ac5e40c27617 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1309,7 +1309,7 @@ static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
}
static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1326,7 +1326,7 @@ static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1362,7 +1362,7 @@ static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
}
static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1383,9 +1383,8 @@ static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
}
static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_state->base.state;
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
/* Retrieve the connector through the atomic state. */
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 8a48b3b0a956..655106bbb76d 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -101,7 +101,7 @@ static void mtk_plane_destroy_state(struct drm_plane *plane,
}
static int mtk_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index d1191de855d9..e79f7c3ce32e 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -139,10 +139,9 @@ static int meson_encoder_cvbs_atomic_check(struct drm_bridge *bridge,
}
static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_cvbs *encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct meson_drm *priv = encoder_cvbs->priv;
const struct meson_cvbs_mode *meson_mode;
struct drm_connector_state *conn_state;
@@ -191,7 +190,7 @@ static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge,
}
static void meson_encoder_cvbs_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 7816902f5907..fe204437bd65 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -42,10 +42,9 @@ static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
}
static void meson_encoder_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct meson_drm *priv = encoder_dsi->priv;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -80,7 +79,7 @@ static void meson_encoder_dsi_atomic_enable(struct drm_bridge *bridge,
}
static void meson_encoder_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_dsi *meson_encoder_dsi =
bridge_to_meson_encoder_dsi(bridge);
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 0593a1cde906..6d1c9262a2cf 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -186,10 +186,9 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
}
static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
struct meson_drm *priv = encoder_hdmi->priv;
struct drm_connector_state *conn_state;
@@ -250,7 +249,7 @@ static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
}
static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 5a02203fad12..94f063c8722a 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -6,6 +6,7 @@ mgag200-y := \
mgag200_g200.o \
mgag200_g200eh.o \
mgag200_g200eh3.o \
+ mgag200_g200eh5.o \
mgag200_g200er.o \
mgag200_g200ev.o \
mgag200_g200ew3.o \
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 069fdd2dc8f6..32cd8ac018c0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -214,6 +214,7 @@ static const struct pci_device_id mgag200_pciidlist[] = {
{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
{ PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
{ PCI_VENDOR_ID_MATROX, 0x538, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH3 },
+ { PCI_VENDOR_ID_MATROX, 0x53a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH5 },
{0,}
};
@@ -256,6 +257,9 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case G200_EH3:
mdev = mgag200_g200eh3_device_create(pdev, &mgag200_driver);
break;
+ case G200_EH5:
+ mdev = mgag200_g200eh5_device_create(pdev, &mgag200_driver);
+ break;
case G200_ER:
mdev = mgag200_g200er_device_create(pdev, &mgag200_driver);
break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 0608fc63e588..819a7e9381e3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -196,6 +196,7 @@ enum mga_type {
G200_EV,
G200_EH,
G200_EH3,
+ G200_EH5,
G200_ER,
G200_EW3,
};
@@ -334,6 +335,8 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev,
const struct drm_driver *drv);
struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
const struct drm_driver *drv);
+struct mga_device *mgag200_g200eh5_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev,
const struct drm_driver *drv);
struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh5.c b/drivers/gpu/drm/mgag200/mgag200_g200eh5.c
new file mode 100644
index 000000000000..e2a2942a80a0
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh5.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/limits.h>
+#include <linux/pci.h>
+#include <linux/units.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mgag200_drv.h"
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200eh5_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ const unsigned long long VCO_MAX = 10 * GIGA; // Hz
+ const unsigned long long VCO_MIN = 2500 * MEGA; // Hz
+ const unsigned long long PLL_FREQ_REF = 25 * MEGA; // Hz
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+
+ unsigned long long fdelta = ULLONG_MAX;
+
+ u16 mult_max = (u16)(VCO_MAX / PLL_FREQ_REF); // 400 (0x190)
+ u16 mult_min = (u16)(VCO_MIN / PLL_FREQ_REF); // 100 (0x64)
+
+ u64 ftmp_delta;
+ u64 computed_fo;
+
+ u16 test_m;
+ u8 test_div_a;
+ u8 test_div_b;
+ u64 fo_hz;
+
+ u8 uc_m = 0;
+ u8 uc_n = 0;
+ u8 uc_p = 0;
+
+ fo_hz = (u64)clock * HZ_PER_KHZ;
+
+ for (test_m = mult_min; test_m <= mult_max; test_m++) { // This gives 100 <= M <= 400
+ for (test_div_a = 8; test_div_a > 0; test_div_a--) { // This gives 1 <= A <= 8
+ for (test_div_b = 1; test_div_b <= test_div_a; test_div_b++) {
+ // This gives 1 <= B <= A
+ computed_fo = (PLL_FREQ_REF * test_m) /
+ (4 * test_div_a * test_div_b);
+
+ if (computed_fo > fo_hz)
+ ftmp_delta = computed_fo - fo_hz;
+ else
+ ftmp_delta = fo_hz - computed_fo;
+
+ if (ftmp_delta < fdelta) {
+ fdelta = ftmp_delta;
+ uc_m = (u8)(0xFF & test_m);
+ uc_n = (u8)((0x7 & (test_div_a - 1))
+ | (0x70 & (0x7 & (test_div_b - 1)) << 4));
+ uc_p = (u8)(1 & (test_m >> 8));
+ }
+ if (fdelta == 0)
+ break;
+ }
+ if (fdelta == 0)
+ break;
+ }
+ if (fdelta == 0)
+ break;
+ }
+
+ pixpllc->m = uc_m + 1;
+ pixpllc->n = uc_n + 1;
+ pixpllc->p = uc_p + 1;
+ pixpllc->s = 0;
+
+ return 0;
+ }
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200eh5_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200eh5_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200eh5_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200eh5_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static int mgag200_g200eh5_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200eh5_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200eh5_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200eh5_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &mgag200_g200eh5_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+ ret = mgag200_vga_bmc_output_init(mdev);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200eh5_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 0, false);
+
+static const struct mgag200_device_funcs mgag200_g200eh5_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200eh5_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200eh_pixpllc_atomic_update, // same as G200EH
+};
+
+struct mga_device *mgag200_g200eh5_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv)
+{
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
+
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_init(mdev, &mgag200_g200eh5_device_info,
+ &mgag200_g200eh5_device_funcs);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ mgag200_g200eh_init_registers(mdev); // same as G200EH
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_mode_config_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_g200eh5_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+ drm_kms_helper_poll_init(dev);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 65d38b25c070..699b0dd34b18 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -813,10 +813,10 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
}
ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION);
- DRM_INFO("Loaded GMU firmware v%u.%u.%u\n",
- FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
- FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
- FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
+ DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n",
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 421afacb7248..36cc9dbc00b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -297,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index 641023b102bf..e8eacdb47967 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -304,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index 621a2140f675..d761ed705bac 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -116,14 +116,12 @@ static const struct dpu_lm_cfg sm6150_lm[] = {
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
- .lm_pair = LM_1,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
.features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
- .lm_pair = LM_0,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index d039b96beb97..76f60a2df7a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 7191b1a6d41b..e5dcd41a361f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1228,8 +1228,6 @@ static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state
done:
kfree(states);
return ret;
-
- return 0;
}
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 5172ab4dea99..48e6e8d74c85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2281,6 +2281,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
}
+ if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
+ phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
+
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 657200401f57..cec6d4e8baec 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
u32 slice_last_group_size;
u32 det_thresh_flatness;
bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+ bool input_10_bits = dsc->bits_per_component == 10;
DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
@@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
data |= (dsc->line_buf_depth << 3);
data |= (dsc->simple_422 << 2);
data |= (dsc->convert_rgb << 1);
- data |= dsc->bits_per_component;
+ data |= input_10_bits;
DPU_REG_WRITE(c, DSC_ENC, data);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index ad19330de61a..562a3f4c5238 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
if (cap & BIT(DPU_MDP_VSYNC_SEL))
ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
- else
+ else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
ops->setup_vsync_source = dpu_hw_setup_wd_timer;
ops->get_safe_status = dpu_hw_get_safe_status;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 098abc2c0003..af3e541f60c3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1164,7 +1164,6 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
unsigned int num_planes)
{
unsigned int i;
- int ret;
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
@@ -1173,13 +1172,13 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
!plane_state->visible)
continue;
- ret = dpu_plane_virtual_assign_resources(crtc, global_state,
+ int ret = dpu_plane_virtual_assign_resources(crtc, global_state,
state, plane_state);
if (ret)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index 7444b75c4215..52e728181b52 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -58,7 +58,7 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 62de248ed1b0..bb1601921938 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -368,7 +368,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
}
static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 24dd37f1682b..a542d2781a09 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -930,16 +930,17 @@ enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
- if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
- return MODE_CLOCK_HIGH;
-
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
link_info = &msm_dp_display->panel->link_info;
- if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
- msm_dp_display->panel->vsc_sdp_supported)
+ if ((drm_mode_is_420_only(&dp->connector->display_info, mode) &&
+ msm_dp_display->panel->vsc_sdp_supported) ||
+ msm_dp_wide_bus_available(dp))
mode_pclk_khz /= 2;
+ if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
+ return MODE_CLOCK_HIGH;
+
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;
@@ -1491,13 +1492,13 @@ int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
}
void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
int rc = 0;
struct msm_dp_display_private *msm_dp_display;
- u32 state;
+ u32 hpd_state;
bool force_link_train = false;
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
@@ -1516,8 +1517,8 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
return;
}
- state = msm_dp_display->hpd_state;
- if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state != ST_DISPLAY_OFF && hpd_state != ST_MAINLINK_READY) {
mutex_unlock(&msm_dp_display->event_mutex);
return;
}
@@ -1529,9 +1530,9 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
return;
}
- state = msm_dp_display->hpd_state;
+ hpd_state = msm_dp_display->hpd_state;
- if (state == ST_DISPLAY_OFF) {
+ if (hpd_state == ST_DISPLAY_OFF) {
msm_dp_display_host_phy_init(msm_dp_display);
force_link_train = true;
}
@@ -1552,7 +1553,7 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
}
void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
@@ -1564,11 +1565,11 @@ void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
}
void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
- u32 state;
+ u32 hpd_state;
struct msm_dp_display_private *msm_dp_display;
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
@@ -1578,15 +1579,15 @@ void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
mutex_lock(&msm_dp_display->event_mutex);
- state = msm_dp_display->hpd_state;
- if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED)
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state != ST_DISCONNECT_PENDING && hpd_state != ST_CONNECTED)
drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
- dp->connector_type, state);
+ dp->connector_type, hpd_state);
msm_dp_display_disable(msm_dp_display);
- state = msm_dp_display->hpd_state;
- if (state == ST_DISCONNECT_PENDING) {
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state == ST_DISCONNECT_PENDING) {
/* completed disconnection */
msm_dp_display->hpd_state = ST_DISCONNECTED;
} else {
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index d3e241ea6941..022b3e815cf3 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -137,9 +137,8 @@ static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
}
static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
@@ -151,25 +150,24 @@ static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
* If the panel is in psr, just exit psr state and skip the full
* bridge enable sequence.
*/
- crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state,
+ crtc = drm_atomic_get_new_crtc_for_encoder(state,
drm_bridge->encoder);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
if (old_crtc_state && old_crtc_state->self_refresh_active) {
msm_dp_display_set_psr(dp, false);
return;
}
- msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_enable(drm_bridge, state);
}
static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL;
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
@@ -208,13 +206,12 @@ static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
}
out:
- msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_disable(drm_bridge, atomic_state);
}
static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -233,7 +230,7 @@ static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
if (new_crtc_state->self_refresh_active)
return;
- msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_post_disable(drm_bridge, atomic_state);
}
/**
@@ -257,7 +254,10 @@ static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
- if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
+ if (msm_dp_wide_bus_available(dp))
+ mode_pclk_khz /= 2;
+
+ if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
/*
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 8eae2f74839f..d8c9b905f8bf 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -26,11 +26,11 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
bool yuv_supported);
void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 031446c87dae..798168180c1a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -83,6 +83,9 @@ struct dsi_pll_7nm {
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
spinlock_t postdiv_lock;
+ /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
+ spinlock_t pclk_mux_lock;
+
struct pll_7nm_cached_state cached_state;
struct dsi_pll_7nm *slave;
@@ -372,22 +375,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
ndelay(250);
}
-static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->postdiv_lock, flags);
+ writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+ spin_unlock_irqrestore(&pll->postdiv_lock, flags);
+}
+
+static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask,
+ u32 val)
+{
+ unsigned long flags;
u32 data;
+ spin_lock_irqsave(&pll->pclk_mux_lock, flags);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ data &= ~mask;
+ data |= val & mask;
+
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ spin_unlock_irqrestore(&pll->pclk_mux_lock, flags);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+ dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0);
}
static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
{
- u32 data;
+ u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL;
writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3);
-
- data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1);
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@@ -565,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
- void __iomem *phy_base = pll_7nm->phy->base;
u32 val;
int ret;
@@ -574,13 +595,10 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
val |= cached->pll_out_div;
writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
- writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
- phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
-
- val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- val &= ~0x3;
- val |= cached->pll_mux;
- writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg0_write(pll_7nm,
+ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
+ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@@ -599,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
- void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->phy->id);
@@ -618,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
}
/* set PLL src */
- writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK,
+ DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data));
return 0;
}
@@ -733,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
pll_by_2_bit,
}), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 1, 0, NULL);
+ 0, 1, 0, &pll_7nm->pclk_mux_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -778,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
pll_7nm_list[phy->id] = pll_7nm;
spin_lock_init(&pll_7nm->postdiv_lock);
+ spin_lock_init(&pll_7nm->pclk_mux_lock);
pll_7nm->phy = phy;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index fee31680a6d5..a65077855201 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -537,15 +537,12 @@ static inline int align_pitch(int width, int bpp)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{
ktime_t now = ktime_get();
- s64 remaining_jiffies;
- if (ktime_compare(*timeout, now) < 0) {
- remaining_jiffies = 0;
- } else {
- ktime_t rem = ktime_sub(*timeout, now);
- remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
- }
+ if (ktime_compare(*timeout, now) <= 0)
+ return 0;
+ ktime_t rem = ktime_sub(*timeout, now);
+ s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index c803556a8f64..c5651c39ac2a 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -59,8 +59,14 @@ static const struct drm_sched_backend_ops msm_sched_ops = {
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{
+ struct drm_sched_init_args args = {
+ .ops = &msm_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = num_hw_submissions,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .dev = gpu->dev->dev,
+ };
struct msm_ringbuffer *ring;
- long sched_timeout;
char name[32];
int ret;
@@ -87,6 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
}
msm_gem_object_set_name(ring->bo, "ring%d", id);
+ args.name = to_msm_bo(ring->bo)->name,
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
@@ -95,13 +102,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
- /* currently managing hangcheck ourselves: */
- sched_timeout = MAX_SCHEDULE_TIMEOUT;
-
- ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- num_hw_submissions, 0, sched_timeout,
- NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
+ ret = drm_sched_init(&ring->sched, &args);
if (ret) {
goto fail;
}
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index d54b72f92449..35f7f40e405b 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -9,8 +9,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00004" name="REVISION_ID1"/>
<reg32 offset="0x00008" name="REVISION_ID2"/>
<reg32 offset="0x0000c" name="REVISION_ID3"/>
- <reg32 offset="0x00010" name="CLK_CFG0"/>
- <reg32 offset="0x00014" name="CLK_CFG1"/>
+ <reg32 offset="0x00010" name="CLK_CFG0">
+ <bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/>
+ <bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00014" name="CLK_CFG1">
+ <bitfield name="CLK_EN" pos="5" type="boolean"/>
+ <bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
+ <bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
+ </reg32>
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
<reg32 offset="0x00020" name="VREG_CTRL_0"/>
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ce840300578d..ac76c0787010 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -109,3 +109,21 @@ config DRM_NOUVEAU_GSP_DEFAULT
help
Say Y here if you want to use the GSP codepaths by default on
Turing and Ampere GPUs.
+
+config DRM_NOUVEAU_CH7006
+ tristate "Chrontel ch7006 TV encoder"
+ depends on DRM_NOUVEAU
+ default m
+ help
+ Support for Chrontel ch7006 and similar TV encoders.
+
+ This driver is currently only useful if you're also using
+ the nouveau driver.
+
+config DRM_NOUVEAU_SIL164
+ tristate "Silicon Image sil164 TMDS transmitter"
+ depends on DRM_NOUVEAU
+ default m
+ help
+ Support for sil164 and similar single-link (or dual-link
+ when used in pairs) TMDS transmitters.
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Kbuild b/drivers/gpu/drm/nouveau/dispnv04/Kbuild
index 975c4e226936..4c7bc6bb81b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv04/Kbuild
@@ -6,7 +6,10 @@ nouveau-y += dispnv04/dac.o
nouveau-y += dispnv04/dfp.o
nouveau-y += dispnv04/disp.o
nouveau-y += dispnv04/hw.o
+nouveau-y += dispnv04/nouveau_i2c_encoder.o
nouveau-y += dispnv04/overlay.o
nouveau-y += dispnv04/tvmodesnv17.o
nouveau-y += dispnv04/tvnv04.o
nouveau-y += dispnv04/tvnv17.o
+
+include $(src)/dispnv04/i2c/Kbuild
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 504c421aa176..c724bacc67f8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -35,7 +35,7 @@
#include "hw.h"
#include "nvreg.h"
-#include <drm/i2c/sil164.h>
+#include <dispnv04/i2c/sil164.h>
#include <subdev/i2c.h>
@@ -171,7 +171,7 @@ static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
struct dcb_output *slave_dcb = nouveau_encoder(slave)->dcb;
- if (slave_dcb->type == DCB_OUTPUT_TMDS && get_slave_funcs(slave) &&
+ if (slave_dcb->type == DCB_OUTPUT_TMDS && get_encoder_i2c_funcs(slave) &&
slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
return slave;
}
@@ -473,8 +473,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
/* Init external transmitters */
slave_encoder = get_tmds_slave(encoder);
if (slave_encoder)
- get_slave_funcs(slave_encoder)->mode_set(
- slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
+ get_encoder_i2c_funcs(slave_encoder)->mode_set(slave_encoder,
+ &nv_encoder->mode,
+ &nv_encoder->mode);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -614,8 +615,8 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- if (get_slave_funcs(encoder))
- get_slave_funcs(encoder)->destroy(encoder);
+ if (get_encoder_i2c_funcs(encoder))
+ get_encoder_i2c_funcs(encoder)->destroy(encoder);
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
@@ -649,8 +650,8 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
if (type < 0)
return;
- drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
- &bus->i2c, &info[type].dev);
+ nouveau_i2c_encoder_init(dev, to_encoder_i2c(encoder),
+ &bus->i2c, &info[type].dev);
}
static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild b/drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild
new file mode 100644
index 000000000000..3fddfc97bcb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild
@@ -0,0 +1,5 @@
+ch7006-y := dispnv04/i2c/ch7006_drv.o dispnv04/i2c/ch7006_mode.o
+obj-$(CONFIG_DRM_NOUVEAU_CH7006) += ch7006.o
+
+sil164-y := dispnv04/i2c/sil164_drv.o
+obj-$(CONFIG_DRM_NOUVEAU_SIL164) += sil164.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c
index fcb0fcd6c897..fd2150e07e36 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c
@@ -47,14 +47,14 @@ static void ch7006_encoder_destroy(struct drm_encoder *encoder)
drm_property_destroy(encoder->dev, priv->scale_property);
kfree(priv);
- to_encoder_slave(encoder)->slave_priv = NULL;
+ to_encoder_i2c(encoder)->encoder_i2c_priv = NULL;
- drm_i2c_encoder_destroy(encoder);
+ nouveau_i2c_encoder_destroy(encoder);
}
static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
@@ -71,7 +71,7 @@ static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
static void ch7006_encoder_save(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
ch7006_dbg(client, "\n");
@@ -81,7 +81,7 @@ static void ch7006_encoder_save(struct drm_encoder *encoder)
static void ch7006_encoder_restore(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
ch7006_dbg(client, "\n");
@@ -104,7 +104,7 @@ static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
}
static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (ch7006_lookup_mode(encoder, mode))
return MODE_OK;
@@ -116,7 +116,7 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *drm_mode,
struct drm_display_mode *adjusted_mode)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_encoder_params *params = &priv->params;
struct ch7006_state *state = &priv->state;
@@ -179,7 +179,7 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
int det;
@@ -285,7 +285,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
struct drm_property *property,
uint64_t val)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
struct drm_mode_config *conf = &encoder->dev->mode_config;
@@ -370,7 +370,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
return 0;
}
-static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
+static const struct nouveau_i2c_encoder_funcs ch7006_encoder_funcs = {
.set_config = ch7006_encoder_set_config,
.destroy = ch7006_encoder_destroy,
.dpms = ch7006_encoder_dpms,
@@ -437,7 +437,7 @@ static int ch7006_resume(struct device *dev)
static int ch7006_encoder_init(struct i2c_client *client,
struct drm_device *dev,
- struct drm_encoder_slave *encoder)
+ struct nouveau_i2c_encoder *encoder)
{
struct ch7006_priv *priv;
int i;
@@ -448,8 +448,8 @@ static int ch7006_encoder_init(struct i2c_client *client,
if (!priv)
return -ENOMEM;
- encoder->slave_priv = priv;
- encoder->slave_funcs = &ch7006_encoder_funcs;
+ encoder->encoder_i2c_priv = priv;
+ encoder->encoder_i2c_funcs = &ch7006_encoder_funcs;
priv->norm = TV_NORM_PAL;
priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
@@ -495,7 +495,7 @@ static const struct dev_pm_ops ch7006_pm_ops = {
.resume = ch7006_resume,
};
-static struct drm_i2c_encoder_driver ch7006_driver = {
+static struct nouveau_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
.probe = ch7006_probe,
.remove = ch7006_remove,
@@ -516,12 +516,12 @@ static struct drm_i2c_encoder_driver ch7006_driver = {
static int __init ch7006_init(void)
{
- return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
+ return i2c_add_driver(&ch7006_driver.i2c_driver);
}
static void __exit ch7006_exit(void)
{
- drm_i2c_encoder_unregister(&ch7006_driver);
+ i2c_del_driver(&ch7006_driver.i2c_driver);
}
int ch7006_debug;
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c
index 6afe6d0ee630..e58d94451959 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c
@@ -198,7 +198,7 @@ const struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
void ch7006_setup_levels(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
uint8_t *regs = priv->state.regs;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
@@ -229,7 +229,7 @@ void ch7006_setup_levels(struct drm_encoder *encoder)
void ch7006_setup_subcarrier(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
@@ -253,7 +253,7 @@ void ch7006_setup_subcarrier(struct drm_encoder *encoder)
void ch7006_setup_pll(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
uint8_t *regs = priv->state.regs;
const struct ch7006_mode *mode = priv->mode;
@@ -324,7 +324,7 @@ void ch7006_setup_power_state(struct drm_encoder *encoder)
void ch7006_setup_properties(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h
index 052bdc48a339..5ad5157a2c02 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h
@@ -24,12 +24,13 @@
*
*/
-#ifndef __DRM_I2C_CH7006_PRIV_H__
-#define __DRM_I2C_CH7006_PRIV_H__
+#ifndef __NOUVEAU_I2C_CH7006_PRIV_H__
+#define __NOUVEAU_I2C_CH7006_PRIV_H__
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i2c/ch7006.h>
+
+#include <dispnv04/i2c/encoder_i2c.h>
+#include <dispnv04/i2c/ch7006.h>
typedef int64_t fixed;
#define fixed1 (1LL << 32)
@@ -99,7 +100,7 @@ struct ch7006_priv {
};
#define to_ch7006_priv(x) \
- ((struct ch7006_priv *)to_encoder_slave(x)->slave_priv)
+ ((struct ch7006_priv *)to_encoder_i2c(x)->encoder_i2c_priv)
extern int ch7006_debug;
extern char *ch7006_tv_norm;
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c
index c17afa025d9d..54ea8459332d 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c
@@ -27,10 +27,11 @@
#include <linux/module.h>
#include <drm/drm_drv.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i2c/sil164.h>
+
+#include <dispnv04/i2c/encoder_i2c.h>
+#include <dispnv04/i2c/sil164.h>
struct sil164_priv {
struct sil164_encoder_params config;
@@ -41,7 +42,7 @@ struct sil164_priv {
};
#define to_sil164_priv(x) \
- ((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
+ ((struct sil164_priv *)to_encoder_i2c(x)->encoder_i2c_priv)
#define sil164_dbg(client, format, ...) do { \
if (drm_debug_enabled(DRM_UT_KMS)) \
@@ -221,7 +222,7 @@ sil164_encoder_dpms(struct drm_encoder *encoder, int mode)
bool on = (mode == DRM_MODE_DPMS_ON);
bool duallink = (on && encoder->crtc->mode.clock > 165000);
- sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on);
+ sil164_set_power_state(nouveau_i2c_encoder_get_client(encoder), on);
if (priv->duallink_slave)
sil164_set_power_state(priv->duallink_slave, duallink);
@@ -232,7 +233,7 @@ sil164_encoder_save(struct drm_encoder *encoder)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
- sil164_save_state(drm_i2c_encoder_get_client(encoder),
+ sil164_save_state(nouveau_i2c_encoder_get_client(encoder),
priv->saved_state);
if (priv->duallink_slave)
@@ -245,7 +246,7 @@ sil164_encoder_restore(struct drm_encoder *encoder)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
- sil164_restore_state(drm_i2c_encoder_get_client(encoder),
+ sil164_restore_state(nouveau_i2c_encoder_get_client(encoder),
priv->saved_state);
if (priv->duallink_slave)
@@ -255,7 +256,7 @@ sil164_encoder_restore(struct drm_encoder *encoder)
static int
sil164_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
@@ -277,7 +278,7 @@ sil164_encoder_mode_set(struct drm_encoder *encoder,
struct sil164_priv *priv = to_sil164_priv(encoder);
bool duallink = adjusted_mode->clock > 165000;
- sil164_init_state(drm_i2c_encoder_get_client(encoder),
+ sil164_init_state(nouveau_i2c_encoder_get_client(encoder),
&priv->config, duallink);
if (priv->duallink_slave)
@@ -291,7 +292,7 @@ static enum drm_connector_status
sil164_encoder_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT)
return connector_status_connected;
@@ -330,10 +331,10 @@ sil164_encoder_destroy(struct drm_encoder *encoder)
i2c_unregister_device(priv->duallink_slave);
kfree(priv);
- drm_i2c_encoder_destroy(encoder);
+ nouveau_i2c_encoder_destroy(encoder);
}
-static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
+static const struct nouveau_i2c_encoder_funcs sil164_encoder_funcs = {
.set_config = sil164_encoder_set_config,
.destroy = sil164_encoder_destroy,
.dpms = sil164_encoder_dpms,
@@ -393,7 +394,7 @@ sil164_detect_slave(struct i2c_client *client)
static int
sil164_encoder_init(struct i2c_client *client,
struct drm_device *dev,
- struct drm_encoder_slave *encoder)
+ struct nouveau_i2c_encoder *encoder)
{
struct sil164_priv *priv;
struct i2c_client *slave_client;
@@ -402,8 +403,8 @@ sil164_encoder_init(struct i2c_client *client,
if (!priv)
return -ENOMEM;
- encoder->slave_priv = priv;
- encoder->slave_funcs = &sil164_encoder_funcs;
+ encoder->encoder_i2c_priv = priv;
+ encoder->encoder_i2c_funcs = &sil164_encoder_funcs;
slave_client = sil164_detect_slave(client);
if (!IS_ERR(slave_client))
@@ -418,7 +419,7 @@ static const struct i2c_device_id sil164_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, sil164_ids);
-static struct drm_i2c_encoder_driver sil164_driver = {
+static struct nouveau_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
.probe = sil164_probe,
.driver = {
@@ -434,13 +435,13 @@ static struct drm_i2c_encoder_driver sil164_driver = {
static int __init
sil164_init(void)
{
- return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver);
+ return i2c_add_driver(&sil164_driver.i2c_driver);
}
static void __exit
sil164_exit(void)
{
- drm_i2c_encoder_unregister(&sil164_driver);
+ i2c_del_driver(&sil164_driver.i2c_driver);
}
MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c
index e464429d32df..e2bf99c43336 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c
@@ -26,10 +26,10 @@
#include <linux/module.h>
-#include <drm/drm_encoder_slave.h>
+#include <dispnv04/i2c/encoder_i2c.h>
/**
- * drm_i2c_encoder_init - Initialize an I2C slave encoder
+ * nouveau_i2c_encoder_init - Initialize an I2C slave encoder
* @dev: DRM device.
* @encoder: Encoder to be attached to the I2C device. You aren't
* required to have called drm_encoder_init() before.
@@ -40,7 +40,7 @@
*
* Create an I2C device on the specified bus (the module containing its
* driver is transparently loaded) and attach it to the specified
- * &drm_encoder_slave. The @slave_funcs field will be initialized with
+ * &nouveau_i2c_encoder. The @encoder_i2c_funcs field will be initialized with
* the hooks provided by the slave driver.
*
* If @info.platform_data is non-NULL it will be used as the initial
@@ -49,14 +49,14 @@
* Returns 0 on success or a negative errno on failure, in particular,
* -ENODEV is returned when no matching driver is found.
*/
-int drm_i2c_encoder_init(struct drm_device *dev,
- struct drm_encoder_slave *encoder,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info)
+int nouveau_i2c_encoder_init(struct drm_device *dev,
+ struct nouveau_i2c_encoder *encoder,
+ struct i2c_adapter *adap,
+ const struct i2c_board_info *info)
{
struct module *module = NULL;
struct i2c_client *client;
- struct drm_i2c_encoder_driver *encoder_drv;
+ struct nouveau_i2c_encoder_driver *encoder_drv;
int err = 0;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -73,16 +73,16 @@ int drm_i2c_encoder_init(struct drm_device *dev,
goto fail_unregister;
}
- encoder->bus_priv = client;
+ encoder->i2c_client = client;
- encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
+ encoder_drv = to_nouveau_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
goto fail_module_put;
if (info->platform_data)
- encoder->slave_funcs->set_config(&encoder->base,
+ encoder->encoder_i2c_funcs->set_config(&encoder->base,
info->platform_data);
return 0;
@@ -93,90 +93,53 @@ fail_unregister:
i2c_unregister_device(client);
return err;
}
-EXPORT_SYMBOL(drm_i2c_encoder_init);
/**
- * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
+ * nouveau_i2c_encoder_destroy - Unregister the I2C device backing an encoder
* @drm_encoder: Encoder to be unregistered.
*
* This should be called from the @destroy method of an I2C slave
* encoder driver once I2C access is no longer needed.
*/
-void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
+void nouveau_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
{
- struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
- struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
+ struct nouveau_i2c_encoder *encoder = to_encoder_i2c(drm_encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(drm_encoder);
struct module *module = client->dev.driver->owner;
i2c_unregister_device(client);
- encoder->bus_priv = NULL;
+ encoder->i2c_client = NULL;
module_put(module);
}
-EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+EXPORT_SYMBOL(nouveau_i2c_encoder_destroy);
/*
* Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
*/
-static inline const struct drm_encoder_slave_funcs *
-get_slave_funcs(struct drm_encoder *enc)
+bool nouveau_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- return to_encoder_slave(enc)->slave_funcs;
-}
-
-void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- get_slave_funcs(encoder)->dpms(encoder, mode);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_dpms);
-
-bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- if (!get_slave_funcs(encoder)->mode_fixup)
+ if (!get_encoder_i2c_funcs(encoder)->mode_fixup)
return true;
- return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
-
-void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
-{
- drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_prepare);
-
-void drm_i2c_encoder_commit(struct drm_encoder *encoder)
-{
- drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_commit);
-
-void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+ return get_encoder_i2c_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
}
-EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
-enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+enum drm_connector_status nouveau_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
- return get_slave_funcs(encoder)->detect(encoder, connector);
+ return get_encoder_i2c_funcs(encoder)->detect(encoder, connector);
}
-EXPORT_SYMBOL(drm_i2c_encoder_detect);
-void drm_i2c_encoder_save(struct drm_encoder *encoder)
+void nouveau_i2c_encoder_save(struct drm_encoder *encoder)
{
- get_slave_funcs(encoder)->save(encoder);
+ get_encoder_i2c_funcs(encoder)->save(encoder);
}
-EXPORT_SYMBOL(drm_i2c_encoder_save);
-void drm_i2c_encoder_restore(struct drm_encoder *encoder)
+void nouveau_i2c_encoder_restore(struct drm_encoder *encoder)
{
- get_slave_funcs(encoder)->restore(encoder);
+ get_encoder_i2c_funcs(encoder)->restore(encoder);
}
-EXPORT_SYMBOL(drm_i2c_encoder_restore);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index d3014027a812..c61ab083f62e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -32,7 +32,7 @@
#include "hw.h"
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/i2c/ch7006.h>
+#include <dispnv04/i2c/ch7006.h>
static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
{
@@ -99,7 +99,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
- get_slave_funcs(encoder)->dpms(encoder, mode);
+ get_encoder_i2c_funcs(encoder)->dpms(encoder, mode);
}
static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
@@ -158,7 +158,7 @@ static void nv04_tv_mode_set(struct drm_encoder *encoder,
regp->tv_vskew = 1;
regp->tv_vsync_delay = 1;
- get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+ get_encoder_i2c_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
}
static void nv04_tv_commit(struct drm_encoder *encoder)
@@ -178,7 +178,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
static void nv04_tv_destroy(struct drm_encoder *encoder)
{
- get_slave_funcs(encoder)->destroy(encoder);
+ get_encoder_i2c_funcs(encoder)->destroy(encoder);
drm_encoder_cleanup(encoder);
kfree(encoder->helper_private);
@@ -191,11 +191,11 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
.dpms = nv04_tv_dpms,
- .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .mode_fixup = nouveau_i2c_encoder_mode_fixup,
.prepare = nv04_tv_prepare,
.commit = nv04_tv_commit,
.mode_set = nv04_tv_mode_set,
- .detect = drm_i2c_encoder_detect,
+ .detect = nouveau_i2c_encoder_detect,
};
int
@@ -226,8 +226,8 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
NULL);
drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
- nv_encoder->enc_save = drm_i2c_encoder_save;
- nv_encoder->enc_restore = drm_i2c_encoder_restore;
+ nv_encoder->enc_save = nouveau_i2c_encoder_save;
+ nv_encoder->enc_restore = nouveau_i2c_encoder_restore;
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
@@ -235,14 +235,14 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
- ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
- &bus->i2c,
- &nv04_tv_encoder_info[type].dev);
+ ret = nouveau_i2c_encoder_init(dev, to_encoder_i2c(encoder),
+ &bus->i2c,
+ &nv04_tv_encoder_info[type].dev);
if (ret < 0)
goto fail_cleanup;
/* Attach it to the specified connector. */
- get_slave_funcs(encoder)->create_resources(encoder, connector);
+ get_encoder_i2c_funcs(encoder)->create_resources(encoder, connector);
drm_connector_attach_encoder(connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 3ecb101d23e9..06de05fe5db6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -308,7 +308,7 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
}
static int nv17_tv_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -779,7 +779,7 @@ static const struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.detect = nv17_tv_detect,
};
-static const struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
+static const struct nouveau_i2c_encoder_funcs nv17_tv_encoder_i2c_funcs = {
.get_modes = nv17_tv_get_modes,
.mode_valid = nv17_tv_mode_valid,
.create_resources = nv17_tv_create_resources,
@@ -818,7 +818,7 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC,
NULL);
drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
- to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
+ to_encoder_i2c(encoder)->encoder_i2c_funcs = &nv17_tv_encoder_i2c_funcs;
tv_enc->base.enc_save = nv17_tv_save;
tv_enc->base.enc_restore = nv17_tv_restore;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 62d72b7a8d04..504cb3f2054b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1141,7 +1141,7 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
static enum drm_mode_status
nv50_mstc_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
struct nouveau_encoder *outp = mstc->mstm->outp;
diff --git a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h
new file mode 100644
index 000000000000..1a6fa405f85b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_I2C_CH7006_H__
+#define __NOUVEAU_I2C_CH7006_H__
+
+/**
+ * struct ch7006_encoder_params
+ *
+ * Describes how the ch7006 is wired up with the GPU. It should be
+ * used as the @params parameter of its @set_config method.
+ *
+ * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
+ * meaning.
+ */
+struct ch7006_encoder_params {
+ /* private: FIXME: document the members */
+ enum {
+ CH7006_FORMAT_RGB16 = 0,
+ CH7006_FORMAT_YCrCb24m16,
+ CH7006_FORMAT_RGB24m16,
+ CH7006_FORMAT_RGB15,
+ CH7006_FORMAT_RGB24m12C,
+ CH7006_FORMAT_RGB24m12I,
+ CH7006_FORMAT_RGB24m8,
+ CH7006_FORMAT_RGB16m8,
+ CH7006_FORMAT_RGB15m8,
+ CH7006_FORMAT_YCrCb24m8,
+ } input_format;
+
+ enum {
+ CH7006_CLOCK_SLAVE = 0,
+ CH7006_CLOCK_MASTER,
+ } clock_mode;
+
+ enum {
+ CH7006_CLOCK_EDGE_NEG = 0,
+ CH7006_CLOCK_EDGE_POS,
+ } clock_edge;
+
+ int xcm, pcm;
+
+ enum {
+ CH7006_SYNC_SLAVE = 0,
+ CH7006_SYNC_MASTER,
+ } sync_direction;
+
+ enum {
+ CH7006_SYNC_SEPARATED = 0,
+ CH7006_SYNC_EMBEDDED,
+ } sync_encoding;
+
+ enum {
+ CH7006_POUT_1_8V = 0,
+ CH7006_POUT_3_3V,
+ } pout_level;
+
+ enum {
+ CH7006_ACTIVE_HSYNC = 0,
+ CH7006_ACTIVE_DSTART,
+ } active_detect;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h
new file mode 100644
index 000000000000..31334aa90781
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_ENCODER_I2C_H__
+#define __NOUVEAU_ENCODER_I2C_H__
+
+#include <linux/i2c.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+
+/**
+ * struct nouveau_i2c_encoder_funcs - Entry points exposed by a I2C encoder driver
+ *
+ * Most of its members are analogous to the function pointers in
+ * &drm_encoder_helper_funcs and they can optionally be used to
+ * initialize the latter. Connector-like methods (e.g. @get_modes and
+ * @set_property) will typically be wrapped around and only be called
+ * if the encoder is the currently selected one for the connector.
+ */
+struct nouveau_i2c_encoder_funcs {
+ /**
+ * @set_config: Initialize any encoder-specific modesetting parameters.
+ * The meaning of the @params parameter is implementation dependent. It
+ * will usually be a structure with DVO port data format settings or
+ * timings. It's not required for the new parameters to take effect
+ * until the next mode is set.
+ */
+ void (*set_config)(struct drm_encoder *encoder,
+ void *params);
+
+ /**
+ * @destroy: Analogous to &drm_encoder_funcs @destroy callback.
+ */
+ void (*destroy)(struct drm_encoder *encoder);
+
+ /**
+ * @dpms: Analogous to &drm_encoder_helper_funcs @dpms callback.
+ */
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+
+ /**
+ * @save: Save state. Wrapped by nouveau_i2c_encoder_save().
+ */
+ void (*save)(struct drm_encoder *encoder);
+
+ /**
+ * @restore: Restore state. Wrapped by nouveau_i2c_encoder_restore().
+ */
+ void (*restore)(struct drm_encoder *encoder);
+
+ /**
+ * @mode_fixup: Analogous to &drm_encoder_helper_funcs @mode_fixup
+ * callback. Wrapped by nouveau_i2c_encoder_mode_fixup().
+ */
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /**
+ * @mode_valid: Analogous to &drm_encoder_helper_funcs @mode_valid.
+ */
+ int (*mode_valid)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode);
+ /**
+ * @mode_set: Analogous to &drm_encoder_helper_funcs @mode_set
+ * callback.
+ */
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /**
+ * @detect: Analogous to &drm_encoder_helper_funcs @detect
+ * callback. Wrapped by nouveau_i2c_encoder_detect().
+ */
+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /**
+ * @get_modes: Get modes.
+ */
+ int (*get_modes)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /**
+ * @create_resources: Create resources.
+ */
+ int (*create_resources)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /**
+ * @set_property: Set property.
+ */
+ int (*set_property)(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+};
+
+/**
+ * struct nouveau_i2c_encoder - I2C encoder struct
+ *
+ * A &nouveau_i2c_encoder has two sets of callbacks, @encoder_i2c_funcs and the
+ * ones in @base. The former are never actually called by the common
+ * CRTC code, it's just a convenience for splitting the encoder
+ * functions in an upper, GPU-specific layer and a (hopefully)
+ * GPU-agnostic lower layer: It's the GPU driver responsibility to
+ * call the nouveau_i2c_encoder methods when appropriate.
+ *
+ * nouveau_i2c_encoder_init() provides a way to get an implementation of
+ * this.
+ */
+struct nouveau_i2c_encoder {
+ /**
+ * @base: DRM encoder object.
+ */
+ struct drm_encoder base;
+
+ /**
+ * @encoder_i2c_funcs: I2C encoder callbacks.
+ */
+ const struct nouveau_i2c_encoder_funcs *encoder_i2c_funcs;
+
+ /**
+ * @encoder_i2c_priv: I2C encoder private data.
+ */
+ void *encoder_i2c_priv;
+
+ /**
+ * @i2c_client: corresponding I2C client structure
+ */
+ struct i2c_client *i2c_client;
+};
+
+#define to_encoder_i2c(x) container_of((x), struct nouveau_i2c_encoder, base)
+
+int nouveau_i2c_encoder_init(struct drm_device *dev,
+ struct nouveau_i2c_encoder *encoder,
+ struct i2c_adapter *adap,
+ const struct i2c_board_info *info);
+
+static inline const struct nouveau_i2c_encoder_funcs *
+get_encoder_i2c_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_i2c(enc)->encoder_i2c_funcs;
+}
+
+/**
+ * struct nouveau_i2c_encoder_driver
+ *
+ * Describes a device driver for an encoder connected to the GPU through an I2C
+ * bus.
+ */
+struct nouveau_i2c_encoder_driver {
+ /**
+ * @i2c_driver: I2C device driver description.
+ */
+ struct i2c_driver i2c_driver;
+
+ /**
+ * @encoder_init: Callback to allocate any per-encoder data structures
+ * and to initialize the @encoder_i2c_funcs and (optionally) @encoder_i2c_priv
+ * members of @encoder.
+ */
+ int (*encoder_init)(struct i2c_client *client,
+ struct drm_device *dev,
+ struct nouveau_i2c_encoder *encoder);
+
+};
+
+#define to_nouveau_i2c_encoder_driver(x) container_of((x), \
+ struct nouveau_i2c_encoder_driver, \
+ i2c_driver)
+
+/**
+ * nouveau_i2c_encoder_get_client - Get the I2C client corresponding to an encoder
+ * @encoder: The encoder
+ */
+static inline struct i2c_client *nouveau_i2c_encoder_get_client(struct drm_encoder *encoder)
+{
+ return to_encoder_i2c(encoder)->i2c_client;
+}
+
+void nouveau_i2c_encoder_destroy(struct drm_encoder *encoder);
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+bool nouveau_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+enum drm_connector_status nouveau_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+void nouveau_i2c_encoder_save(struct drm_encoder *encoder);
+void nouveau_i2c_encoder_restore(struct drm_encoder *encoder);
+
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h
new file mode 100644
index 000000000000..b86750d7abe1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_I2C_SIL164_H__
+#define __NOUVEAU_I2C_SIL164_H__
+
+/**
+ * struct sil164_encoder_params
+ *
+ * Describes how the sil164 is connected to the GPU. It should be used
+ * as the @params parameter of its @set_config method.
+ *
+ * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf".
+ */
+struct sil164_encoder_params {
+ /* private: FIXME: document the members */
+ enum {
+ SIL164_INPUT_EDGE_FALLING = 0,
+ SIL164_INPUT_EDGE_RISING
+ } input_edge;
+
+ enum {
+ SIL164_INPUT_WIDTH_12BIT = 0,
+ SIL164_INPUT_WIDTH_24BIT
+ } input_width;
+
+ enum {
+ SIL164_INPUT_SINGLE_EDGE = 0,
+ SIL164_INPUT_DUAL_EDGE
+ } input_dual;
+
+ enum {
+ SIL164_PLL_FILTER_ON = 0,
+ SIL164_PLL_FILTER_OFF,
+ } pll_filter;
+
+ int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */
+ int duallink_skew; /** < Allowed range [-4, 3]. */
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 5c5f4607fcc9..746e126c3ecf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -196,7 +196,7 @@ struct nvkm_gsp {
void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
- void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+ void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv);
void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
int (*rm_free)(struct nvkm_gsp_object *);
@@ -353,9 +353,9 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
}
static inline void *
-nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = object->client->gsp->rm->rm_alloc_push(object, argv, repc);
+ void *repv = object->client->gsp->rm->rm_alloc_push(object, argv);
if (IS_ERR(repv))
object->client = NULL;
@@ -366,7 +366,7 @@ nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
static inline int
nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = nvkm_gsp_rm_alloc_push(object, argv, 0);
+ void *repv = nvkm_gsp_rm_alloc_push(object, argv);
if (IS_ERR(repv))
return PTR_ERR(repv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8d5c9c74cbb9..6fb9719d721f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -797,8 +797,10 @@ nouveau_connector_set_property(struct drm_connector *connector,
property, value);
if (ret) {
if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
- return get_slave_funcs(encoder)->set_property(
- encoder, connector, property, value);
+ return get_encoder_i2c_funcs(encoder)->set_property(encoder,
+ connector,
+ property,
+ value);
return ret;
}
@@ -1015,7 +1017,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
nouveau_connector_detect_depth(connector);
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
- ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
+ ret = get_encoder_i2c_funcs(encoder)->get_modes(encoder, connector);
if (nv_connector->type == DCB_CONNECTOR_LVDS ||
nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
@@ -1074,7 +1076,7 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
static enum drm_mode_status
nouveau_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
@@ -1100,7 +1102,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
max_clock = 350000;
break;
case DCB_OUTPUT_TV:
- return get_slave_funcs(encoder)->mode_valid(encoder, mode);
+ return get_encoder_i2c_funcs(encoder)->mode_valid(encoder, mode);
case DCB_OUTPUT_DP:
return nv50_dp_mode_valid(nv_encoder, mode, NULL);
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5664c4c71faf..e154d08857c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -118,7 +118,7 @@ static struct drm_driver driver_platform;
#ifdef CONFIG_DEBUG_FS
struct dentry *nouveau_debugfs_root;
-/**
+/*
* gsp_logs - list of nvif_log GSP-RM logging buffers
*
* Head pointer to a a list of nvif_log buffers that is created for each GPU
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 333042fc493f..dce8e5d9d496 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -31,7 +31,8 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
-#include <drm/drm_encoder_slave.h>
+
+#include <dispnv04/i2c/encoder_i2c.h>
#include "dispnv04/disp.h"
@@ -43,7 +44,7 @@ struct nouveau_connector;
struct nvkm_i2c_port;
struct nouveau_encoder {
- struct drm_encoder_slave base;
+ struct nouveau_i2c_encoder base;
struct dcb_output *dcb;
struct nvif_outp outp;
@@ -137,7 +138,7 @@ find_encoder(struct drm_connector *connector, int type);
static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
{
- struct drm_encoder_slave *slave = to_encoder_slave(enc);
+ struct nouveau_i2c_encoder *slave = to_encoder_i2c(enc);
return container_of(slave, struct nouveau_encoder, base);
}
@@ -147,12 +148,6 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
return &enc->base.base;
}
-static inline const struct drm_encoder_slave_funcs *
-get_slave_funcs(struct drm_encoder *enc)
-{
- return to_encoder_slave(enc)->slave_funcs;
-}
-
/* nouveau_dp.c */
enum nouveau_dp_status {
NOUVEAU_DP_NONE,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 4412f2711fb5..d326e55d2d24 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -404,7 +404,14 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- const long timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ struct drm_sched_init_args args = {
+ .ops = &nouveau_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = credit_limit,
+ .timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS),
+ .name = "nouveau_sched",
+ .dev = drm->dev->dev
+ };
int ret;
if (!wq) {
@@ -416,10 +423,9 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
sched->wq = wq;
}
- ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
- NOUVEAU_SCHED_PRIORITY_COUNT,
- credit_limit, 0, timeout,
- NULL, NULL, "nouveau_sched", drm->dev->dev);
+ args.submit_wq = wq,
+
+ ret = drm_sched_init(drm_sched, &args);
if (ret)
goto fail_wq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index b4da82ddbb6b..8ea98f06d39a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
struct mm_struct *mm = svmm->notifier.mm;
+ struct folio *folio;
struct page *page;
unsigned long start = args->p.addr;
unsigned long notifier_seq;
@@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = -EINVAL;
goto out;
}
+ folio = page_folio(page);
mutex_lock(&svmm->mutex);
if (!mmu_interval_read_retry(&notifier->notifier,
notifier_seq))
break;
mutex_unlock(&svmm->mutex);
+
+ folio_unlock(folio);
+ folio_put(folio);
}
/* Map the page on the GPU. */
@@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
mutex_unlock(&svmm->mutex);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
mmu_interval_notifier_remove(&notifier->notifier);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 58502102926b..db2602e88006 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -61,7 +61,72 @@
extern struct dentry *nouveau_debugfs_root;
#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
-#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
+#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
+
+/**
+ * DOC: GSP message queue element
+ *
+ * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
+ *
+ * The GSP command queue and status queue are message queues for the
+ * communication between software and GSP. The software submits the GSP
+ * RPC via the GSP command queue, GSP writes the status of the submitted
+ * RPC in the status queue.
+ *
+ * A GSP message queue element consists of three parts:
+ *
+ * - message element header (struct r535_gsp_msg), which mostly maintains
+ * the metadata for queuing the element.
+ *
+ * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
+ * of the RPC. E.g., the RPC function number.
+ *
+ * - The payload, where the RPC message stays. E.g. the params of a
+ * specific RPC function. Some RPC functions also have their headers
+ * in the payload. E.g. rm_alloc, rm_control.
+ *
+ * The memory layout of a GSP message element can be illustrated below::
+ *
+ * +------------------------+
+ * | Message Element Header |
+ * | (r535_gsp_msg) |
+ * | |
+ * | (r535_gsp_msg.data) |
+ * | | |
+ * |----------V-------------|
+ * | GSP RPC Header |
+ * | (nvfw_gsp_rpc) |
+ * | |
+ * | (nvfw_gsp_rpc.data) |
+ * | | |
+ * |----------V-------------|
+ * | Payload |
+ * | |
+ * | header(optional) |
+ * | params |
+ * +------------------------+
+ *
+ * The max size of a message queue element is 16 pages (including the
+ * headers). When a GSP message to be sent is larger than 16 pages, the
+ * message should be split into multiple elements and sent accordingly.
+ *
+ * In the bunch of the split elements, the first element has the expected
+ * function number, while the rest of the elements are sent with the
+ * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
+ *
+ * GSP consumes the elements from the cmdq and always writes the result
+ * back to the msgq. The result is also formed as split elements.
+ *
+ * Terminology:
+ *
+ * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
+ * payload)
+ * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
+ * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
+ * - gsp_rpc_len: size of (GSP RPC header + payload)
+ * - params_size: size of params in the payload
+ * - payload_size: size of (header if exists + params) in the payload
+ */
struct r535_gsp_msg {
u8 auth_tag_buffer[16];
@@ -73,8 +138,29 @@ struct r535_gsp_msg {
u8 data[];
};
+struct nvfw_gsp_rpc {
+ u32 header_version;
+ u32 signature;
+ u32 length;
+ u32 function;
+ u32 rpc_result;
+ u32 rpc_result_private;
+ u32 sequence;
+ union {
+ u32 spare;
+ u32 cpuRmGfid;
+ };
+ u8 data[];
+};
+
#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+#define to_gsp_hdr(p, header) \
+ container_of((void *)p, typeof(*header), data)
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
static int
r535_rpc_status_to_errno(uint32_t rpc_status)
{
@@ -89,18 +175,16 @@ r535_rpc_status_to_errno(uint32_t rpc_status)
}
}
-static void *
-r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+static int
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
{
- struct r535_gsp_msg *mqe;
u32 size, rptr = *gsp->msgq.rptr;
int used;
- u8 *msg;
- u32 len;
- size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE);
+ size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
+ GSP_PAGE_SIZE);
if (WARN_ON(!size || size >= gsp->msgq.cnt))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
do {
u32 wptr = *gsp->msgq.wptr;
@@ -115,70 +199,217 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
} while (--(*ptime));
if (WARN_ON(!*ptime))
- return ERR_PTR(-ETIMEDOUT);
+ return -ETIMEDOUT;
- mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000);
+ return used;
+}
- if (prepc) {
- *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe);
- return mqe->data;
- }
+static struct r535_gsp_msg *
+r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
+{
+ u32 rptr = *gsp->msgq.rptr;
- size = ALIGN(repc + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+ /* Skip the first page, which is the message queue info */
+ return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
+ rptr * GSP_PAGE_SIZE);
+}
- msg = kvmalloc(repc, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
+/**
+ * DOC: Receive a GSP message queue element
+ *
+ * Receiving a GSP message queue element from the message queue consists of
+ * the following steps:
+ *
+ * - Peek the element from the queue: r535_gsp_msgq_peek().
+ * Peek the first page of the element to determine the total size of the
+ * message before allocating the proper memory.
+ *
+ * - Allocate memory for the message.
+ * Once the total size of the message is determined from the GSP message
+ * queue element, the caller of r535_gsp_msgq_recv() allocates the
+ * required memory.
+ *
+ * - Receive the message: r535_gsp_msgq_recv().
+ * Copy the message into the allocated memory. Advance the read pointer.
+ * If the message is a large GSP message, r535_gsp_msgq_recv() calls
+ * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
+ * until the complete message is received.
+ * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
+ * the return of the large GSP message.
+ *
+ * - Free the allocated memory: r535_gsp_msg_done().
+ * The user is responsible for freeing the memory allocated for the GSP
+ * message pages after they have been processed.
+ */
+static void *
+r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ int ret;
+
+ ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ return mqe->data;
+}
+
+struct r535_gsp_msg_info {
+ int *retries;
+ u32 gsp_rpc_len;
+ void *gsp_rpc_buf;
+ bool continuation;
+};
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
+
+static void *
+r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
+ struct r535_gsp_msg_info *info)
+{
+ u8 *buf = info->gsp_rpc_buf;
+ u32 rptr = *gsp->msgq.rptr;
+ struct r535_gsp_msg *mqe;
+ u32 size, expected, len;
+ int ret;
+
+ expected = info->gsp_rpc_len;
+
+ ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ if (info->continuation) {
+ struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
+ nvkm_error(&gsp->subdev,
+ "Not a continuation of a large RPC\n");
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ return ERR_PTR(-EIO);
+ }
+ }
+
+ size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
- len = min_t(u32, repc, len);
- memcpy(msg, mqe->data, len);
+ len = min_t(u32, expected, len);
+
+ if (info->continuation)
+ memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
+ len - sizeof(struct nvfw_gsp_rpc));
+ else
+ memcpy(buf, mqe->data, len);
- repc -= len;
+ expected -= len;
- if (repc) {
+ if (expected) {
mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
- memcpy(msg + len, mqe, repc);
+ memcpy(buf + len, mqe, expected);
}
rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
mb();
(*gsp->msgq.rptr) = rptr;
- return msg;
+ return buf;
}
static void *
-r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime)
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
{
- return r535_gsp_msgq_wait(gsp, repc, NULL, ptime);
+ struct r535_gsp_msg *mqe;
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
+ struct nvfw_gsp_rpc *rpc;
+ struct r535_gsp_msg_info info = {0};
+ u32 expected = gsp_rpc_len;
+ void *buf;
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+ rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (WARN_ON(rpc->length > max_rpc_size))
+ return NULL;
+
+ buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ info.gsp_rpc_buf = buf;
+ info.retries = retries;
+ info.gsp_rpc_len = rpc->length;
+
+ buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR(buf)) {
+ kvfree(info.gsp_rpc_buf);
+ info.gsp_rpc_buf = NULL;
+ return buf;
+ }
+
+ if (expected <= max_rpc_size)
+ return buf;
+
+ info.gsp_rpc_buf += info.gsp_rpc_len;
+ expected -= info.gsp_rpc_len;
+
+ while (expected) {
+ u32 size;
+
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ info.gsp_rpc_len = rpc->length;
+ info.continuation = true;
+
+ rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ size = info.gsp_rpc_len - sizeof(*rpc);
+ expected -= size;
+ info.gsp_rpc_buf += size;
+ }
+
+ rpc = buf;
+ rpc->length = gsp_rpc_len;
+ return buf;
}
static int
-r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
{
- struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
struct r535_gsp_msg *cqe;
- u32 argc = cmd->checksum;
- u64 *ptr = (void *)cmd;
+ u32 gsp_rpc_len = msg->checksum;
+ u64 *ptr = (void *)msg;
u64 *end;
u64 csum = 0;
int free, time = 1000000;
- u32 wptr, size, step;
+ u32 wptr, size, step, len;
u32 off = 0;
- argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
+ len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
- end = (u64 *)((char *)ptr + argc);
- cmd->pad = 0;
- cmd->checksum = 0;
- cmd->sequence = gsp->cmdq.seq++;
- cmd->elem_count = DIV_ROUND_UP(argc, 0x1000);
+ end = (u64 *)((char *)ptr + len);
+ msg->pad = 0;
+ msg->checksum = 0;
+ msg->sequence = gsp->cmdq.seq++;
+ msg->elem_count = DIV_ROUND_UP(len, 0x1000);
while (ptr < end)
csum ^= *ptr++;
- cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+ msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
wptr = *gsp->cmdq.wptr;
do {
@@ -193,23 +424,23 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
} while(--time);
if (WARN_ON(!time)) {
- kvfree(cmd);
+ kvfree(msg);
return -ETIMEDOUT;
}
cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
- size = min_t(u32, argc, step * GSP_PAGE_SIZE);
+ size = min_t(u32, len, step * GSP_PAGE_SIZE);
- memcpy(cqe, (u8 *)cmd + off, size);
+ memcpy(cqe, (u8 *)msg + off, size);
wptr += DIV_ROUND_UP(size, 0x1000);
if (wptr == gsp->cmdq.cnt)
wptr = 0;
off += size;
- argc -= size;
- } while(argc);
+ len -= size;
+ } while (len);
nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
wmb();
@@ -218,40 +449,25 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
- kvfree(cmd);
+ kvfree(msg);
return 0;
}
static void *
-r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc)
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
{
- struct r535_gsp_msg *cmd;
- u32 size = GSP_MSG_HDR_SIZE + argc;
+ struct r535_gsp_msg *msg;
+ u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
size = ALIGN(size, GSP_MSG_MIN_SIZE);
- cmd = kvzalloc(size, GFP_KERNEL);
- if (!cmd)
+ msg = kvzalloc(size, GFP_KERNEL);
+ if (!msg)
return ERR_PTR(-ENOMEM);
- cmd->checksum = argc;
- return cmd->data;
+ msg->checksum = gsp_rpc_len;
+ return msg->data;
}
-struct nvfw_gsp_rpc {
- u32 header_version;
- u32 signature;
- u32 length;
- u32 function;
- u32 rpc_result;
- u32 rpc_result_private;
- u32 sequence;
- union {
- u32 spare;
- u32 cpuRmGfid;
- };
- u8 data[];
-};
-
static void
r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
{
@@ -272,61 +488,61 @@ r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
}
static struct nvfw_gsp_rpc *
-r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc)
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
{
struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvfw_gsp_rpc *msg;
- int time = 4000000, i;
- u32 size;
+ struct nvfw_gsp_rpc *rpc;
+ int retries = 4000000, i;
retry:
- msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time);
- if (IS_ERR_OR_NULL(msg))
- return msg;
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
- msg = r535_gsp_msgq_recv(gsp, msg->length, &time);
- if (IS_ERR_OR_NULL(msg))
- return msg;
+ rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
- if (msg->rpc_result) {
- r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, msg);
+ if (rpc->rpc_result) {
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
return ERR_PTR(-EINVAL);
}
- r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
- if (fn && msg->function == fn) {
- if (repc) {
- if (msg->length < sizeof(*msg) + repc) {
- nvkm_error(subdev, "msg len %d < %zd\n",
- msg->length, sizeof(*msg) + repc);
- r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, msg);
+ if (fn && rpc->function == fn) {
+ if (gsp_rpc_len) {
+ if (rpc->length < gsp_rpc_len) {
+ nvkm_error(subdev, "rpc len %d < %d\n",
+ rpc->length, gsp_rpc_len);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
return ERR_PTR(-EIO);
}
- return msg;
+ return rpc;
}
- r535_gsp_msg_done(gsp, msg);
+ r535_gsp_msg_done(gsp, rpc);
return NULL;
}
for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
- if (ntfy->fn == msg->function) {
+ if (ntfy->fn == rpc->function) {
if (ntfy->func)
- ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
+ ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
+ rpc->length - sizeof(*rpc));
break;
}
}
if (i == gsp->msgq.ntfy_nr)
- r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
- r535_gsp_msg_done(gsp, msg);
+ r535_gsp_msg_done(gsp, rpc);
if (fn)
goto retry;
@@ -369,9 +585,10 @@ r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
}
static void *
-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
+ u32 gsp_rpc_len)
{
- struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
struct nvfw_gsp_rpc *msg;
u32 fn = rpc->function;
void *repv = NULL;
@@ -389,7 +606,7 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
return ERR_PTR(ret);
if (wait) {
- msg = r535_gsp_msg_recv(gsp, fn, repc);
+ msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
if (!IS_ERR_OR_NULL(msg))
repv = msg->data;
else
@@ -584,21 +801,21 @@ r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
}
static void
-r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
{
- rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
nvkm_gsp_rpc_done(object->client->gsp, rpc);
}
static void *
-r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
{
- rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
struct nvkm_gsp *gsp = object->client->gsp;
- void *ret;
+ void *ret = NULL;
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc);
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
if (IS_ERR_OR_NULL(rpc))
return rpc;
@@ -606,8 +823,6 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- } else {
- ret = repc ? rpc->params : NULL;
}
nvkm_gsp_rpc_done(gsp, rpc);
@@ -616,16 +831,22 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
}
static void *
-r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc)
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
+ u32 params_size)
{
struct nvkm_gsp_client *client = object->client;
struct nvkm_gsp *gsp = client->gsp;
rpc_gsp_rm_alloc_v03_00 *rpc;
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n",
- client->object.handle, object->parent->handle, object->handle, oclass, argc);
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
+ client->object.handle, object->parent->handle,
+ object->handle);
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc);
+ nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
+ params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
+ sizeof(*rpc) + params_size);
if (IS_ERR(rpc))
return rpc;
@@ -634,30 +855,30 @@ r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc)
rpc->hObject = object->handle;
rpc->hClass = oclass;
rpc->status = 0;
- rpc->paramsSize = argc;
+ rpc->paramsSize = params_size;
return rpc->params;
}
static void
-r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
{
- rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
- if (!repv)
+ if (!params)
return;
nvkm_gsp_rpc_done(object->client->gsp, rpc);
}
static int
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
{
- rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params);
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
struct nvkm_gsp *gsp = object->client->gsp;
int ret = 0;
rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
if (IS_ERR_OR_NULL(rpc)) {
- *argv = NULL;
+ *params = NULL;
return PTR_ERR(rpc);
}
@@ -669,7 +890,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
}
if (repc)
- *argv = rpc->params;
+ *params = rpc->params;
else
nvkm_gsp_rpc_done(gsp, rpc);
@@ -677,16 +898,17 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
}
static void *
-r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
{
struct nvkm_gsp_client *client = object->client;
struct nvkm_gsp *gsp = client->gsp;
rpc_gsp_rm_control_v03_00 *rpc;
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n",
- client->object.handle, object->handle, cmd, argc);
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
+ client->object.handle, object->handle, cmd, params_size);
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc);
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
+ sizeof(*rpc) + params_size);
if (IS_ERR(rpc))
return rpc;
@@ -694,7 +916,7 @@ r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
rpc->hObject = object->handle;
rpc->cmd = cmd;
rpc->status = 0;
- rpc->paramsSize = argc;
+ rpc->paramsSize = params_size;
return rpc->params;
}
@@ -707,11 +929,12 @@ r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
}
static void *
-r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
{
struct nvfw_gsp_rpc *rpc;
- rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64)));
+ rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
+ sizeof(u64)));
if (IS_ERR(rpc))
return ERR_CAST(rpc);
@@ -720,38 +943,41 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
rpc->function = fn;
rpc->rpc_result = 0xffffffff;
rpc->rpc_result_private = 0xffffffff;
- rpc->length = sizeof(*rpc) + argc;
+ rpc->length = sizeof(*rpc) + payload_size;
return rpc->data;
}
static void *
-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
-{
- struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
- struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data);
- const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg);
- const u32 max_rpc_size = max_msg_size - sizeof(*rpc);
- u32 rpc_size = rpc->length - sizeof(*rpc);
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
+ u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
+ const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
+ u32 payload_size = rpc->length - sizeof(*rpc);
void *repv;
mutex_lock(&gsp->cmdq.mutex);
- if (rpc_size > max_rpc_size) {
+ if (payload_size > max_payload_size) {
const u32 fn = rpc->function;
+ u32 remain_payload_size = payload_size;
/* Adjust length, and send initial RPC. */
- rpc->length = sizeof(*rpc) + max_rpc_size;
- cmd->checksum = rpc->length;
+ rpc->length = sizeof(*rpc) + max_payload_size;
+ msg->checksum = rpc->length;
- repv = r535_gsp_rpc_send(gsp, argv, false, 0);
+ repv = r535_gsp_rpc_send(gsp, payload, false, 0);
if (IS_ERR(repv))
goto done;
- argv += max_rpc_size;
- rpc_size -= max_rpc_size;
+ payload += max_payload_size;
+ remain_payload_size -= max_payload_size;
/* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
- while (rpc_size) {
- u32 size = min(rpc_size, max_rpc_size);
+ while (remain_payload_size) {
+ u32 size = min(remain_payload_size,
+ max_payload_size);
void *next;
next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
@@ -760,28 +986,31 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
goto done;
}
- memcpy(next, argv, size);
+ memcpy(next, payload, size);
repv = r535_gsp_rpc_send(gsp, next, false, 0);
if (IS_ERR(repv))
goto done;
- argv += size;
- rpc_size -= size;
+ payload += size;
+ remain_payload_size -= size;
}
/* Wait for reply. */
- if (wait) {
- rpc = r535_gsp_msg_recv(gsp, fn, repc);
- if (!IS_ERR_OR_NULL(rpc))
+ rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
+ sizeof(*rpc));
+ if (!IS_ERR_OR_NULL(rpc)) {
+ if (wait) {
repv = rpc->data;
- else
- repv = rpc;
+ } else {
+ nvkm_gsp_rpc_done(gsp, rpc);
+ repv = NULL;
+ }
} else {
- repv = NULL;
+ repv = wait ? rpc : NULL;
}
} else {
- repv = r535_gsp_rpc_send(gsp, argv, wait, repc);
+ repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
}
done:
@@ -1111,7 +1340,7 @@ enum registry_type {
#define REGISTRY_MAX_KEY_LENGTH 64
/**
- * registry_list_entry - linked list member for a registry key/value
+ * struct registry_list_entry - linked list member for a registry key/value
* @head: list_head struct
* @type: dword, binary, or string
* @klen: the length of name of the key
@@ -1327,7 +1556,7 @@ struct nv_gsp_registry_entries {
u32 value;
};
-/**
+/*
* r535_registry_entries - required registry entries for GSP-RM
*
* This array lists registry entries that are required for GSP-RM to
@@ -2101,7 +2330,7 @@ MODULE_PARM_DESC(keep_gsp_logging,
#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU 0xf3d722
/**
- * rpc_ucode_libos_print_v1E_08 - RPC payload for libos print buffers
+ * struct rpc_ucode_libos_print_v1e_08 - RPC payload for libos print buffers
* @ucode_eng_desc: the engine descriptor
* @libos_print_buf_size: the size of the libos_print_buf[]
* @libos_print_buf: the actual buffer
@@ -2162,7 +2391,7 @@ r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc)
}
/**
- * create_debufgs - create a blob debugfs entry
+ * create_debugfs - create a blob debugfs entry
* @gsp: gsp pointer
* @name: name of this dentry
* @blob: blob wrapper
@@ -2788,6 +3017,10 @@ static bool is_empty(const struct debugfs_blob_wrapper *b)
/**
* r535_gsp_copy_log - preserve the logging buffers in a blob
+ * @parent: the top-level dentry for this GPU
+ * @name: name of debugfs entry to create
+ * @s: original wrapper object to copy from
+ * @t: new wrapper object to copy to
*
* When GSP shuts down, the nvkm_gsp object and all its memory is deleted.
* To preserve the logging buffers, the buffers need to be copied, but only
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index a6f410ba60bc..d393bc540f86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -75,7 +75,7 @@ gp10b_pmu_acr = {
.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
};
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 7b2df3185de4..692df747e2ae 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1236,21 +1236,15 @@ static int dss_video_pll_probe(struct dss_device *dss)
if (!np)
return 0;
- if (of_property_read_bool(np, "syscon-pll-ctrl")) {
- dss->syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np,
- "syscon-pll-ctrl");
+ if (of_property_present(np, "syscon-pll-ctrl")) {
+ dss->syscon_pll_ctrl =
+ syscon_regmap_lookup_by_phandle_args(np, "syscon-pll-ctrl",
+ 1, &dss->syscon_pll_ctrl_offset);
if (IS_ERR(dss->syscon_pll_ctrl)) {
dev_err(&pdev->dev,
"failed to get syscon-pll-ctrl regmap\n");
return PTR_ERR(dss->syscon_pll_ctrl);
}
-
- if (of_property_read_u32_index(np, "syscon-pll-ctrl", 1,
- &dss->syscon_pll_ctrl_offset)) {
- dev_err(&pdev->dev,
- "failed to get syscon-pll-ctrl offset\n");
- return -EINVAL;
- }
}
pll_regulator = devm_regulator_get(&pdev->dev, "vdda_video");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 4435f0027c78..e1ac447221ee 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -341,10 +341,9 @@ static void hdmi4_bridge_mode_set(struct drm_bridge *bridge,
}
static void hdmi4_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
@@ -410,7 +409,7 @@ done:
}
static void hdmi4_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index a8c740df3146..fa9904e4c218 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -339,10 +339,9 @@ static void hdmi5_bridge_mode_set(struct drm_bridge *bridge,
}
static void hdmi5_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
@@ -408,7 +407,7 @@ done:
}
static void hdmi5_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 21564c38234f..12ef47cd232b 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -545,23 +545,6 @@ static void hdmi_core_enable_interrupts(struct hdmi_core_data *core)
REG_FLD_MOD(core->base, HDMI_CORE_IH_MUTE, 0x0, 1, 0);
}
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
-{
- void __iomem *base = core->base;
-
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CMPHY_STAT0, 0xff, 7, 0);
-
- return 0;
-}
-
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
index 070cbf5fb57d..b8ed21156e8c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
@@ -286,7 +286,6 @@ int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
void hdmi5_core_ddc_uninit(struct hdmi_core_data *core);
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index e85d63a176d0..0bfed0ec0bbc 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -57,65 +57,39 @@ static void ebbg_ft8719_reset(struct ebbg_ft8719 *ctx)
static int ebbg_ft8719_on(struct ebbg_ft8719 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00ff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 90);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(90);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return dsi_ctx.accum_err;
}
static int ebbg_ft8719_off(struct ebbg_ft8719 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- usleep_range(10000, 11000);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(90);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 90);
- return 0;
+ return dsi_ctx.accum_err;
}
static int ebbg_ft8719_prepare(struct drm_panel *panel)
{
struct ebbg_ft8719 *ctx = to_ebbg_ft8719(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@@ -126,7 +100,6 @@ static int ebbg_ft8719_prepare(struct drm_panel *panel)
ret = ebbg_ft8719_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -137,18 +110,10 @@ static int ebbg_ft8719_prepare(struct drm_panel *panel)
static int ebbg_ft8719_unprepare(struct drm_panel *panel)
{
struct ebbg_ft8719 *ctx = to_ebbg_ft8719(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
-
- ret = ebbg_ft8719_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ ebbg_ft8719_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
-
- ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret)
- dev_err(panel->dev, "Failed to disable regulators: %d\n", ret);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index f8511fe5fb0d..52028c8f8988 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1808,6 +1808,20 @@ static const struct panel_delay delay_200_150_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_200_500_e250 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 250,
+};
+
+static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
+ .hpd_absent = 50,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 200,
+ .powered_on_to_enable = 335,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1862,6 +1876,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1914,6 +1929,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
@@ -1951,9 +1967,14 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+
+ EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "MB116AN01"),
@@ -1993,6 +2014,8 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"),
+ EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"),
+ EDP_PANEL_ENTRY('S', 'T', 'A', 0x0009, &delay_200_500_e250, "116QHD024002"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
{ /* sentinal */ }
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 3644a7544b93..66abfc44e424 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -24,6 +24,8 @@
#define HX83102_SETPOWER 0xb1
#define HX83102_SETDISP 0xb2
#define HX83102_SETCYC 0xb4
+#define HX83102_UNKNOWN_B6 0xb6
+#define HX83102_UNKNOWN_B8 0xb8
#define HX83102_SETEXTC 0xb9
#define HX83102_SETMIPI 0xba
#define HX83102_SETVDC 0xbc
@@ -43,6 +45,7 @@
#define HX83102_SETGIP1 0xd5
#define HX83102_SETGIP2 0xd6
#define HX83102_SETGIP3 0xd8
+#define HX83102_UNKNOWN_D9 0xd9
#define HX83102_SETGMA 0xe0
#define HX83102_UNKNOWN_E1 0xe1
#define HX83102_SETTP1 0xe7
@@ -291,6 +294,103 @@ static int boe_nv110wum_init(struct hx83102 *ctx)
return dsi_ctx.accum_err;
};
+static int csot_pna957qt1_1_init(struct hx83102 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(60);
+
+ hx83102_enable_extended_cmds(&dsi_ctx, true);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D9, 0xd2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xb3, 0xb3, 0x31, 0xf1, 0x33,
+ 0xe0, 0x54, 0x36, 0x36, 0x3a, 0x3a, 0x32, 0x8b, 0x11, 0xe5,
+ 0x98);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x8b, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x2c,
+ 0x80, 0x3c, 0x9f, 0x22, 0x20, 0x00, 0x00, 0x98, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x41, 0x41, 0x41, 0x41, 0x64, 0x64,
+ 0x40, 0x84, 0x64, 0x84, 0x01, 0x9d, 0x01, 0x02, 0x01, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETVDC, 0x1b, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_BE, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPTBA, 0xfc, 0xc4, 0x80, 0x9c, 0x36, 0x00,
+ 0x0d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSTBA, 0x32, 0x32, 0x22, 0x11, 0x22, 0xa0,
+ 0x31, 0x08, 0xf5, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTCON, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETRAMDMY, 0x97);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPWM, 0x00, 0x1e, 0x13, 0x88, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x08, 0x13, 0x07, 0x00, 0x0f,
+ 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPANEL, 0x02, 0x03, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPCTRL, 0x07, 0x06, 0x00, 0x02, 0x04, 0x2c,
+ 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x06, 0x00, 0x00, 0x00, 0x40, 0x04,
+ 0x08, 0x04, 0x08, 0x37, 0x07, 0x44, 0x37, 0x2b, 0x2b, 0x03,
+ 0x03, 0x32, 0x10, 0x22, 0x00, 0x25, 0x32, 0x10, 0x29, 0x00,
+ 0x29, 0x32, 0x10, 0x08, 0x00, 0x08, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP1, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x07, 0x06, 0x07, 0x06, 0x05, 0x04,
+ 0x05, 0x04, 0x03, 0x02, 0x03, 0x02, 0x01, 0x00, 0x01, 0x00,
+ 0x18, 0x18, 0x25, 0x24, 0x25, 0x24, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1e, 0x1e, 0x1e, 0x1e, 0x20, 0x20, 0x20, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x0a, 0x0e, 0x1a, 0x21, 0x28, 0x46,
+ 0x5c, 0x61, 0x63, 0x5e, 0x78, 0x7d, 0x80, 0x8e, 0x89, 0x90,
+ 0x98, 0xaa, 0xa8, 0x52, 0x59, 0x60, 0x6f, 0x06, 0x0a, 0x16,
+ 0x1d, 0x24, 0x46, 0x5c, 0x61, 0x6b, 0x66, 0x7c, 0x7d, 0x80,
+ 0x8e, 0x89, 0x90, 0x98, 0xaa, 0xa8, 0x52, 0x59, 0x60, 0x6f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xe0, 0x10, 0x10, 0x0d, 0x1e, 0x9d,
+ 0x02, 0x52, 0x9d, 0x14, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x01, 0x7f, 0x11, 0xfd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D2, 0x64);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0, 0x05, 0x15, 0x55, 0x45,
+ 0x55, 0x50, 0x05, 0x15, 0x55, 0x45, 0x55, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x02, 0x00, 0x24, 0x01, 0x7e, 0x0f,
+ 0x7c, 0x10, 0xa0, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x03, 0x07, 0x00, 0x10, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0f, 0x3f, 0xff, 0xcf, 0xff, 0xf0,
+ 0x0f, 0x3f, 0xff, 0xcf, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xfe, 0x01, 0xfe, 0x01, 0xfe, 0x01,
+ 0x00, 0x00, 0x00, 0x23, 0x00, 0x23, 0x81, 0x02, 0x40, 0x00,
+ 0x20, 0x9d, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x66, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x03, 0xff, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0, 0x0f, 0x2a, 0xaa, 0x8a,
+ 0xaa, 0xf0, 0x0f, 0x2a, 0xaa, 0x8a, 0xaa, 0xf0, 0x0a, 0x2a,
+ 0xaa, 0x8a, 0xaa, 0xa0, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x00);
+ hx83102_enable_extended_cmds(&dsi_ctx, false);
+
+ mipi_dsi_msleep(&dsi_ctx, 60);
+
+ return dsi_ctx.accum_err;
+};
+
static int ivo_t109nw41_init(struct hx83102 *ctx)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
@@ -396,6 +496,211 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
return dsi_ctx.accum_err;
};
+static int kingdisplay_kd110n11_51ie_init(struct hx83102 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(50);
+
+ hx83102_enable_extended_cmds(&dsi_ctx, true);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D9, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xb3, 0xb3, 0x31, 0xf1,
+ 0x33, 0xe0, 0x54, 0x36, 0x36, 0x3a, 0x3a, 0x32, 0x8b,
+ 0x11, 0xe5, 0x98);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x8b, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x2c,
+ 0x80, 0x3c, 0x9f, 0x22, 0x20, 0x00, 0x00, 0x98, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64,
+ 0x40, 0x84, 0x64, 0x84, 0x01, 0x9d, 0x01, 0x02, 0x01, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETVDC, 0x1b, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_BE, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPTBA, 0xfc, 0xc4, 0x80, 0x9c, 0x36, 0x00,
+ 0x0d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSTBA, 0x32, 0x32, 0x22, 0x11, 0x22, 0xa0,
+ 0x31, 0x08, 0xf5, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTCON, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETRAMDMY, 0x97);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPWM, 0x00, 0x1e, 0x13, 0x88, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x08, 0x13, 0x07, 0x00,
+ 0x0f, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPANEL, 0x02, 0x03, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPCTRL, 0x07, 0x06, 0x00, 0x02,
+ 0x04, 0x2c, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x06, 0x00, 0x00, 0x00, 0x40, 0x04,
+ 0x08, 0x04, 0x08, 0x37, 0x07, 0x44, 0x37, 0x2b, 0x2b, 0x03,
+ 0x03, 0x32, 0x10, 0x22, 0x00, 0x25, 0x32, 0x10, 0x29, 0x00,
+ 0x29, 0x32, 0x10, 0x08, 0x00, 0x08, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP1, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x07, 0x06, 0x07, 0x06, 0x05, 0x04,
+ 0x05, 0x04, 0x03, 0x02, 0x03, 0x02, 0x01, 0x00, 0x01, 0x00,
+ 0x18, 0x18, 0x25, 0x24, 0x25, 0x24, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1e, 0x1e, 0x1e, 0x1e, 0x20, 0x20, 0x20, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xe0, 0x10, 0x10, 0x0d, 0x1e, 0x9d,
+ 0x02, 0x52, 0x9d, 0x14, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x01, 0x7f, 0x11, 0xfd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D2, 0x64);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0, 0x05, 0x15, 0x55, 0x45,
+ 0x55, 0x50, 0x05, 0x15, 0x55, 0x45, 0x55, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x02, 0x00, 0x24, 0x01, 0x7e, 0x0f,
+ 0x7c, 0x10, 0xa0, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x03, 0x07, 0x00, 0x10, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0f, 0x3f, 0xff, 0xcf, 0xff, 0xf0,
+ 0x0f, 0x3f, 0xff, 0xcf, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xfe, 0x01, 0xfe, 0x01, 0xfe, 0x01,
+ 0x00, 0x00, 0x00, 0x23, 0x00, 0x23, 0x81, 0x02, 0x40, 0x00,
+ 0x20, 0x9d, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x66, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x03, 0xff, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0, 0x0f, 0x2a, 0xaa, 0x8a,
+ 0xaa, 0xf0, 0x0f, 0x2a, 0xaa, 0x8a, 0xaa, 0xf0, 0x0a, 0x2a,
+ 0xaa, 0x8a, 0xaa, 0xa0, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x00);
+ hx83102_enable_extended_cmds(&dsi_ctx, false);
+
+ return dsi_ctx.accum_err;
+}
+
+static int starry_2082109qfh040022_50e_init(struct hx83102 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(50);
+
+ hx83102_enable_extended_cmds(&dsi_ctx, true);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D9, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xb5, 0xb5, 0x31, 0xf1, 0x33,
+ 0xc3, 0x57, 0x36, 0x36, 0x36, 0x36, 0x1a, 0x8b, 0x11, 0x65,
+ 0x00, 0x88, 0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0x3c, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x22,
+ 0x70, 0x3c, 0xa1, 0x22, 0x00, 0x00, 0x00, 0x88, 0xf4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x14, 0x16, 0x14, 0x50, 0x14, 0x50,
+ 0x0d, 0x6a, 0x0d, 0x6a, 0x01, 0x9e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_B6, 0x34, 0x34, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_B8, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x84);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETVDC, 0x1b, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_BE, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPTBA, 0xfc, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSTBA, 0x38, 0x38, 0x22, 0x11, 0x33, 0xa0,
+ 0x61, 0x08, 0xf5, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTCON, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETRAMDMY, 0x97);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPWM, 0x00, 0x1e, 0x30, 0xd4, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x08, 0x13, 0x07, 0x00, 0x0f,
+ 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPANEL, 0x02, 0x03, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCASCADE, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPCTRL, 0x37, 0x06, 0x00, 0x02, 0x04,
+ 0x2c, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3b, 0x03, 0x73, 0x3b, 0x21, 0x21, 0x03,
+ 0x03, 0x98, 0x10, 0x1d, 0x00, 0x1d, 0x32, 0x17, 0xa1, 0x07,
+ 0xa1, 0x43, 0x17, 0xa6, 0x07, 0xa6, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP1, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x2a, 0x2b, 0x1f, 0x1f,
+ 0x1e, 0x1e, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0a, 0x0b, 0x20, 0x21, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x02, 0xaa, 0xea, 0xaa, 0xaa, 0x00,
+ 0x02, 0xaa, 0xea, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x2a, 0x32, 0x9f,
+ 0x01, 0x5a, 0x91, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12,
+ 0x05, 0x02, 0x02, 0x10, 0x33, 0x02, 0x04, 0x18, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x01, 0x7f, 0x11, 0xfd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D2, 0x3d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x00, 0x00, 0x00, 0x80, 0x80, 0x0c,
+ 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x03, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x03, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x02, 0x00, 0x2d, 0x01, 0x7f, 0x0f,
+ 0x7c, 0x10, 0xa0, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPTBA, 0xf2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x02, 0x00, 0x00, 0x10, 0x58);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D2, 0x0a, 0x0a, 0x05, 0x03, 0x0a,
+ 0x0a, 0x01, 0x03, 0x01, 0x01, 0x05, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x03, 0x1f, 0xe0, 0x11, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0xab, 0xff, 0xff, 0xff, 0xff, 0xa0,
+ 0xab, 0xff, 0xff, 0xff, 0xff, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xfe, 0x01, 0xfe, 0x01, 0xfe, 0x01,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x81, 0x02, 0x40, 0x00,
+ 0x20, 0x9e, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x03, 0xff, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0xaa, 0xab, 0xea, 0xaa, 0xaa, 0xa0,
+ 0xaa, 0xab, 0xea, 0xaa, 0xaa, 0xa0, 0xaa, 0xbf, 0xff, 0xff,
+ 0xfe, 0xa0, 0xaa, 0xbf, 0xff, 0xff, 0xfe, 0xa0, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xa0, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_E1, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x96);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x84);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x00);
+ hx83102_enable_extended_cmds(&dsi_ctx, false);
+
+ mipi_dsi_msleep(&dsi_ctx, 110);
+
+ return dsi_ctx.accum_err;
+}
+
static const struct drm_display_mode starry_mode = {
.clock = 162680,
.hdisplay = 1200,
@@ -440,6 +745,28 @@ static const struct hx83102_panel_desc boe_nv110wum_desc = {
.init = boe_nv110wum_init,
};
+static const struct drm_display_mode csot_pna957qt1_1_default_mode = {
+ .clock = 177958,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 124,
+ .hsync_end = 1200 + 124 + 80,
+ .htotal = 1200 + 124 + 80 + 40,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 88,
+ .vsync_end = 1920 + 88 + 8,
+ .vtotal = 1920 + 88 + 8 + 38,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct hx83102_panel_desc csot_pna957qt1_1_desc = {
+ .modes = &csot_pna957qt1_1_default_mode,
+ .size = {
+ .width_mm = 147,
+ .height_mm = 235,
+ },
+ .init = csot_pna957qt1_1_init,
+};
+
static const struct drm_display_mode ivo_t109nw41_default_mode = {
.clock = 167700,
.hdisplay = 1200,
@@ -462,6 +789,50 @@ static const struct hx83102_panel_desc ivo_t109nw41_desc = {
.init = ivo_t109nw41_init,
};
+static const struct drm_display_mode kingdisplay_kd110n11_51ie_default_mode = {
+ .clock = 182750,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 124,
+ .hsync_end = 1200 + 124 + 80,
+ .htotal = 1200 + 124 + 80 + 80,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 88,
+ .vsync_end = 1920 + 88 + 8,
+ .vtotal = 1920 + 88 + 8 + 38,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct hx83102_panel_desc kingdisplay_kd110n11_51ie_desc = {
+ .modes = &kingdisplay_kd110n11_51ie_default_mode,
+ .size = {
+ .width_mm = 147,
+ .height_mm = 235,
+ },
+ .init = kingdisplay_kd110n11_51ie_init,
+};
+
+static const struct drm_display_mode starry_2082109qfh040022_50e_default_mode = {
+ .clock = 192050,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 160,
+ .hsync_end = 1200 + 160 + 66,
+ .htotal = 1200 + 160 + 66 + 120,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 115,
+ .vsync_end = 1920 + 115 + 8,
+ .vtotal = 1920 + 115 + 8 + 28,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct hx83102_panel_desc starry_2082109qfh040022_50e_desc = {
+ .modes = &starry_2082109qfh040022_50e_default_mode,
+ .size = {
+ .width_mm = 147,
+ .height_mm = 235,
+ },
+ .init = starry_2082109qfh040022_50e_init,
+};
+
static int hx83102_enable(struct drm_panel *panel)
{
msleep(130);
@@ -683,9 +1054,18 @@ static const struct of_device_id hx83102_of_match[] = {
{ .compatible = "boe,nv110wum-l60",
.data = &boe_nv110wum_desc
},
+ { .compatible = "csot,pna957qt1-1",
+ .data = &csot_pna957qt1_1_desc
+ },
{ .compatible = "ivo,t109nw41",
.data = &ivo_t109nw41_desc
},
+ { .compatible = "kingdisplay,kd110n11-51ie",
+ .data = &kingdisplay_kd110n11_51ie_desc
+ },
+ { .compatible = "starry,2082109qfh040022-50e",
+ .data = &starry_2082109qfh040022_50e_desc
+ },
{ .compatible = "starry,himax83102-j02",
.data = &starry_desc
},
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 45d09e6fa667..7d68a8acfe2e 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel)
if (jadard->desc->lp11_to_reset_delay_ms)
msleep(jadard->desc->lp11_to_reset_delay_ms);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(5);
- gpiod_set_value(jadard->reset, 0);
+ gpiod_set_value(jadard->reset, 1);
msleep(10);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(130);
ret = jadard->desc->init(jadard);
@@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format;
dsi->lanes = desc->lanes;
- jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(jadard->reset)) {
DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
return PTR_ERR(jadard->reset);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index d2df227abbea..57b1a899bbdc 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -39,91 +39,66 @@ static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
- mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcc, 0x4c); // set Pixel Clock Divider polarity
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
// set default brightness/gama
- mipi_dsi_dcs_write_seq(dsi, 0xca,
- 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
- 0x80, 0x80, 0x80, // V203 R,G,B
- 0x80, 0x80, 0x80, // V151 R,G,B
- 0x80, 0x80, 0x80, // V87 R,G,B
- 0x80, 0x80, 0x80, // V51 R,G,B
- 0x80, 0x80, 0x80, // V35 R,G,B
- 0x80, 0x80, 0x80, // V23 R,G,B
- 0x80, 0x80, 0x80, // V11 R,G,B
- 0x6b, 0x68, 0x71, // V3 R,G,B
- 0x00, 0x00, 0x00); // V1 R,G,B
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,// V255 RR,GG,BB
+ 0x80, 0x80, 0x80, // V203 R,G,B
+ 0x80, 0x80, 0x80, // V151 R,G,B
+ 0x80, 0x80, 0x80, // V87 R,G,B
+ 0x80, 0x80, 0x80, // V51 R,G,B
+ 0x80, 0x80, 0x80, // V35 R,G,B
+ 0x80, 0x80, 0x80, // V23 R,G,B
+ 0x80, 0x80, 0x80, // V11 R,G,B
+ 0x6b, 0x68, 0x71, // V3 R,G,B
+ 0x00, 0x00, 0x00); // V1 R,G,B
// set default Amoled Off Ratio
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf7, 0x03); // gamma/aor update
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
+static void s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi};
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(35);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
-
- return 0;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 35);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
{
struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
s6e88a0_ams452ef01_reset(ctx);
ret = s6e88a0_ams452ef01_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
@@ -136,12 +111,8 @@ static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
{
struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = s6e88a0_ams452ef01_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ s6e88a0_ams452ef01_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 74c760ee0c2d..0b4e0983639b 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -44,60 +44,39 @@ static void sharp_ls060_reset(struct sharp_ls060 *ctx)
static int sharp_ls060_on(struct sharp_ls060 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_MEMORY_START);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(50);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int sharp_ls060_off(struct sharp_ls060 *ctx)
+static void sharp_ls060_off(struct sharp_ls060 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- usleep_range(2000, 3000);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(121);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 2000, 3000);
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 121);
}
static int sharp_ls060_prepare(struct drm_panel *panel)
{
struct sharp_ls060 *ctx = to_sharp_ls060(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_enable(ctx->vddi_supply);
@@ -125,10 +104,8 @@ static int sharp_ls060_prepare(struct drm_panel *panel)
sharp_ls060_reset(ctx);
ret = sharp_ls060_on(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ if (ret < 0)
goto err_on;
- }
return 0;
@@ -154,12 +131,8 @@ err_avdd:
static int sharp_ls060_unprepare(struct drm_panel *panel)
{
struct sharp_ls060 *ctx = to_sharp_ls060(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = sharp_ls060_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ sharp_ls060_off(ctx);
regulator_disable(ctx->vddh_supply);
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index 493f2a6076f8..3ea0a86f6e69 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -42,85 +42,84 @@ static void visionox_r66451_reset(struct visionox_r66451 *ctx)
static int visionox_r66451_on(struct visionox_r66451 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc2,
- 0x09, 0x24, 0x0c, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
- 0x09, 0x3c);
- mipi_dsi_dcs_write_seq(dsi, 0xd7,
- 0x00, 0xb9, 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
- 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0xde,
- 0x40, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18,
- 0x10, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x02, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xe8, 0x00, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x00, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc4,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0xcf,
- 0x64, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
- 0x00, 0x0b, 0x77, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0xd3,
- 0x45, 0x00, 0x00, 0x01, 0x13, 0x15, 0x00, 0x15, 0x07,
- 0x0f, 0x77, 0x77, 0x77, 0x37, 0xb2, 0x11, 0x00, 0xa0,
- 0x3c, 0x9c);
- mipi_dsi_dcs_write_seq(dsi, 0xd7,
- 0x00, 0xb9, 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
- 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xd8,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a,
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x0a, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
- 0x00, 0x32, 0x00, 0x0a, 0x00, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf,
- 0x50, 0x42, 0x58, 0x81, 0x2d, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x01, 0x0f, 0xff, 0xd4, 0x0e, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x0f, 0x53, 0xf1, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x34, 0xb4, 0x00, 0x00, 0x00, 0x39, 0x04, 0x09, 0x34);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x50, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x01, 0x00, 0x00, 0x00, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x00, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x19);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x42);
- mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- mipi_dsi_dcs_set_column_address(dsi, 0, 1080 - 1);
- mipi_dsi_dcs_set_page_address(dsi, 0, 2340 - 1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2,
+ 0x09, 0x24, 0x0c, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
+ 0x09, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7,
+ 0x00, 0xb9, 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+ 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde,
+ 0x40, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18,
+ 0x10, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x02, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe8, 0x00, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcf,
+ 0x64, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x0b, 0x77, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd3,
+ 0x45, 0x00, 0x00, 0x01, 0x13, 0x15, 0x00, 0x15, 0x07,
+ 0x0f, 0x77, 0x77, 0x77, 0x37, 0xb2, 0x11, 0x00, 0xa0,
+ 0x3c, 0x9c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7,
+ 0x00, 0xb9, 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+ 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a,
+ 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0a, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x32, 0x00, 0x0a, 0x00, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf,
+ 0x50, 0x42, 0x58, 0x81, 0x2d, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x0f, 0xff, 0xd4, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x53, 0xf1, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf7, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x34, 0xb4, 0x00, 0x00, 0x00, 0x39,
+ 0x04, 0x09, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x50, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x50, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf2, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x01, 0x00, 0x00, 0x00, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x00, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf2, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x50, 0x42);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0, 1080 - 1);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0, 2340 - 1);
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- return 0;
+ return dsi_ctx.accum_err;
}
-static int visionox_r66451_off(struct visionox_r66451 *ctx)
+static void visionox_r66451_off(struct visionox_r66451 *ctx)
{
ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- return 0;
}
static int visionox_r66451_prepare(struct drm_panel *panel)
{
struct visionox_r66451 *ctx = to_visionox_r66451(panel);
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
@@ -132,7 +131,6 @@ static int visionox_r66451_prepare(struct drm_panel *panel)
ret = visionox_r66451_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return ret;
@@ -146,12 +144,8 @@ static int visionox_r66451_prepare(struct drm_panel *panel)
static int visionox_r66451_unprepare(struct drm_panel *panel)
{
struct visionox_r66451 *ctx = to_visionox_r66451(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = visionox_r66451_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ visionox_r66451_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@@ -179,7 +173,7 @@ static int visionox_r66451_enable(struct drm_panel *panel)
struct visionox_r66451 *ctx = to_visionox_r66451(panel);
struct mipi_dsi_device *dsi = ctx->dsi;
struct drm_dsc_picture_parameter_set pps;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
if (!dsi->dsc) {
dev_err(&dsi->dev, "DSC not attached to DSI\n");
@@ -187,51 +181,30 @@ static int visionox_r66451_enable(struct drm_panel *panel)
}
drm_dsc_pps_payload_pack(&pps, dsi->dsc);
- ret = mipi_dsi_picture_parameter_set(dsi, &pps);
- if (ret) {
- dev_err(&dsi->dev, "Failed to set PPS\n");
- return ret;
- }
+ mipi_dsi_picture_parameter_set_multi(&dsi_ctx, &pps);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(&dsi->dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(&dsi->dev, "Failed on set display on: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- return 0;
+ return dsi_ctx.accum_err;
}
static int visionox_r66451_disable(struct drm_panel *panel)
{
struct visionox_r66451 *ctx = to_visionox_r66451(panel);
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
+ return dsi_ctx.accum_err;
}
static int visionox_r66451_get_modes(struct drm_panel *panel,
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 22a14006765e..2b91414c2829 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -59,91 +59,80 @@ static inline struct xpp055c272 *panel_to_xpp055c272(struct drm_panel *panel)
return container_of(panel, struct xpp055c272, panel);
}
-static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
+static void xpp055c272_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- struct device *dev = ctx->dev;
-
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETMIPI,
- 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
- 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
- 0x00, 0x00, 0x37);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
- 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
- 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETSCR,
- 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
- 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEQ,
- 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
- 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER,
- 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
- 0x67, 0x77, 0x33, 0x33);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
- 0xff, 0x01, 0xff);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
- msleep(20);
-
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP1,
- 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
- 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
- 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
- 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
- 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
- 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
- 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP2,
- 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
- 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
- 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
- 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
- 0xa0, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
- 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
- 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
- 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
- 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
- 0x11, 0x18);
-
- msleep(60);
-
- dev_dbg(dev, "Panel init sequence done\n");
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETMIPI,
+ 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
+ 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
+ 0x00, 0x00, 0x37);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETPOWER_EXT, 0x25);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETRGBIF,
+ 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETVDC, 0x46);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETPANEL, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETEQ,
+ 0x07, 0x07, 0x0b, 0x0b, 0x03, 0x0b, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0x00, 0xC0, 0x10);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETPOWER,
+ 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
+ 0x67, 0x77, 0x33, 0x33);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
+ 0xff, 0x01, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETBGP, 0x09, 0x09);
+ mipi_dsi_msleep(dsi_ctx, 20);
+
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETGIP1,
+ 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
+ 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
+ 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
+ 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
+ 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
+ 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETGIP2,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
+ 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
+ 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
+ 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
+ 0xa0, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETGAMMA,
+ 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
+ 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
+ 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
+ 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
+ 0x11, 0x18);
+
+ mipi_dsi_msleep(dsi_ctx, 60);
}
static int xpp055c272_unprepare(struct drm_panel *panel)
{
struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(ctx->dev, "failed to set display off: %d\n", ret);
-
- mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vci);
@@ -155,17 +144,19 @@ static int xpp055c272_prepare(struct drm_panel *panel)
{
struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dev_dbg(ctx->dev, "Resetting the panel\n");
- ret = regulator_enable(ctx->vci);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
- return ret;
+ dsi_ctx.accum_err = regulator_enable(ctx->vci);
+ if (dsi_ctx.accum_err) {
+ dev_err(ctx->dev, "Failed to enable vci supply: %d\n",
+ dsi_ctx.accum_err);
+ return dsi_ctx.accum_err;
}
- ret = regulator_enable(ctx->iovcc);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ dsi_ctx.accum_err = regulator_enable(ctx->iovcc);
+ if (dsi_ctx.accum_err) {
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n",
+ dsi_ctx.accum_err);
goto disable_vci;
}
@@ -177,26 +168,17 @@ static int xpp055c272_prepare(struct drm_panel *panel)
/* T8: 20ms */
msleep(20);
- ret = xpp055c272_init_sequence(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- goto disable_iovcc;
- }
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- goto disable_iovcc;
- }
+ xpp055c272_init_sequence(&dsi_ctx);
+ if (!dsi_ctx.accum_err)
+ dev_dbg(ctx->dev, "Panel init sequence done\n");
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
/* T9: 120ms */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
+ if (dsi_ctx.accum_err)
goto disable_iovcc;
- }
msleep(50);
@@ -206,7 +188,7 @@ disable_iovcc:
regulator_disable(ctx->iovcc);
disable_vci:
regulator_disable(ctx->vci);
- return ret;
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode default_mode = {
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9b8e82fb8bc4..5657106c2f7d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -836,8 +836,16 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
int panfrost_job_init(struct panfrost_device *pfdev)
{
+ struct drm_sched_init_args args = {
+ .ops = &panfrost_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 2,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .timeout_wq = pfdev->reset.wq,
+ .name = "pan_js",
+ .dev = pfdev->dev,
+ };
struct panfrost_job_slot *js;
- unsigned int nentries = 2;
int ret, j;
/* All GPUs have two entries per queue, but without jobchain
@@ -845,7 +853,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
* so let's just advertise one entry in that case.
*/
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
- nentries = 1;
+ args.credit_limit = 1;
pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
if (!js)
@@ -875,13 +883,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);
- ret = drm_sched_init(&js->queue[j].sched,
- &panfrost_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- nentries, 0,
- msecs_to_jiffies(JOB_TIMEOUT_MS),
- pfdev->reset.wq,
- NULL, "pan_js", pfdev->dev);
+ ret = drm_sched_init(&js->queue[j].sched, &args);
if (ret) {
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 0a37cfeeb181..a9da1d1eeb70 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -128,14 +128,11 @@ static void panthor_device_reset_work(struct work_struct *work)
struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
int ret = 0, cookie;
- if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) {
- /*
- * No need for a reset as the device has been (or will be)
- * powered down
- */
- atomic_set(&ptdev->reset.pending, 0);
+ /* If the device is entering suspend, we don't reset. A slow reset will
+ * be forced at resume time instead.
+ */
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
return;
- }
if (!drm_dev_enter(&ptdev->base, &cookie))
return;
@@ -477,6 +474,14 @@ int panthor_device_resume(struct device *dev)
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
+ /* If there was a reset pending at the time we suspended the
+ * device, we force a slow reset.
+ */
+ if (atomic_read(&ptdev->reset.pending)) {
+ ptdev->reset.fast = false;
+ atomic_set(&ptdev->reset.pending, 0);
+ }
+
ret = panthor_device_resume_hw_components(ptdev);
if (ret && ptdev->reset.fast) {
drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
@@ -493,9 +498,6 @@ int panthor_device_resume(struct device *dev)
goto err_suspend_devfreq;
}
- if (atomic_read(&ptdev->reset.pending))
- queue_work(ptdev->reset.wq, &ptdev->reset.work);
-
/* Clear all IOMEM mappings pointing to this device after we've
* resumed. This way the fake mappings pointing to the dummy pages
* are removed and the real iomem mapping will be restored on next
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index d5dcd3d1b33a..06fe46e32073 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -802,6 +802,7 @@ static void panthor_query_group_priorities_info(struct drm_file *file,
{
int prio;
+ memset(arg, 0, sizeof(*arg));
for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) {
if (!group_priority_permit(file, prio))
arg->allowed_mask |= BIT(prio);
@@ -1457,12 +1458,26 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
}
+static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
+{
+ char *drv_name = file->minor->dev->driver->name;
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_memory_stats stats = {0};
+
+ panthor_fdinfo_gather_group_mem_info(pfile, &stats);
+ panthor_vm_heaps_sizes(pfile, &stats);
+
+ drm_fdinfo_print_size(p, drv_name, "resident", "memory", stats.resident);
+ drm_fdinfo_print_size(p, drv_name, "active", "memory", stats.active);
+}
+
static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
struct drm_device *dev = file->minor->dev;
struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
+ panthor_show_internal_memory_stats(p, file);
drm_show_memory_stats(p, file);
}
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 68eb4fb4d3a8..4a9c4afa9ad7 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -201,7 +201,6 @@ struct panthor_fw_section {
#define MIN_CS_PER_CSG 8
#define MIN_CSGS 3
-#define MAX_CSG_PRIO 0xf
#define CSF_IFACE_VERSION(major, minor, patch) \
(((major) << 24) | ((minor) << 16) | (patch))
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index e43021cf6d45..5749ef2ebe03 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -85,11 +85,6 @@ struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj)
struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size);
-struct drm_gem_object *
-panthor_gem_prime_import_sg_table(struct drm_device *ddev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-
int
panthor_gem_create_with_handle(struct drm_file *file,
struct drm_device *ddev,
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index 3796a9eb22af..db0285ce5812 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -603,3 +603,29 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
panthor_heap_pool_put(pool);
}
+
+/**
+ * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
+ * @pool: Pool whose total chunk size to calculate.
+ *
+ * This function adds the size of all heap chunks across all heaps in the
+ * argument pool. It also adds the size of the gpu contexts kernel bo.
+ * It is meant to be used by fdinfo for displaying the size of internal
+ * driver BO's that aren't exposed to userspace through a GEM handle.
+ *
+ */
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
+{
+ struct panthor_heap *heap;
+ unsigned long i;
+ size_t size = 0;
+
+ down_read(&pool->lock);
+ xa_for_each(&pool->xa, i, heap)
+ size += heap->chunk_size * heap->chunk_count;
+ up_read(&pool->lock);
+
+ size += pool->gpu_contexts->obj->size;
+
+ return size;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_heap.h b/drivers/gpu/drm/panthor/panthor_heap.h
index 25a5f2bba445..e3358d4e8edb 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.h
+++ b/drivers/gpu/drm/panthor/panthor_heap.h
@@ -27,6 +27,8 @@ struct panthor_heap_pool *
panthor_heap_pool_get(struct panthor_heap_pool *pool);
void panthor_heap_pool_put(struct panthor_heap_pool *pool);
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool);
+
int panthor_heap_grow(struct panthor_heap_pool *pool,
u64 heap_gpu_va,
u32 renderpasses_in_flight,
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index c39e3eb1c15d..8c6fc587ddc3 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -53,26 +53,27 @@ struct panthor_mmu {
/** @irq: The MMU irq. */
struct panthor_irq irq;
- /** @as: Address space related fields.
+ /**
+ * @as: Address space related fields.
*
* The GPU has a limited number of address spaces (AS) slots, forcing
* us to re-assign them to re-assign slots on-demand.
*/
struct {
- /** @slots_lock: Lock protecting access to all other AS fields. */
+ /** @as.slots_lock: Lock protecting access to all other AS fields. */
struct mutex slots_lock;
- /** @alloc_mask: Bitmask encoding the allocated slots. */
+ /** @as.alloc_mask: Bitmask encoding the allocated slots. */
unsigned long alloc_mask;
- /** @faulty_mask: Bitmask encoding the faulty slots. */
+ /** @as.faulty_mask: Bitmask encoding the faulty slots. */
unsigned long faulty_mask;
- /** @slots: VMs currently bound to the AS slots. */
+ /** @as.slots: VMs currently bound to the AS slots. */
struct panthor_as_slot slots[MAX_AS_SLOTS];
/**
- * @lru_list: List of least recently used VMs.
+ * @as.lru_list: List of least recently used VMs.
*
* We use this list to pick a VM to evict when all slots are
* used.
@@ -87,16 +88,16 @@ struct panthor_mmu {
/** @vm: VMs management fields */
struct {
- /** @lock: Lock protecting access to list. */
+ /** @vm.lock: Lock protecting access to list. */
struct mutex lock;
- /** @list: List containing all VMs. */
+ /** @vm.list: List containing all VMs. */
struct list_head list;
- /** @reset_in_progress: True if a reset is in progress. */
+ /** @vm.reset_in_progress: True if a reset is in progress. */
bool reset_in_progress;
- /** @wq: Workqueue used for the VM_BIND queues. */
+ /** @vm.wq: Workqueue used for the VM_BIND queues. */
struct workqueue_struct *wq;
} vm;
};
@@ -143,14 +144,14 @@ struct panthor_vma {
struct panthor_vm_op_ctx {
/** @rsvd_page_tables: Pages reserved for the MMU page table update. */
struct {
- /** @count: Number of pages reserved. */
+ /** @rsvd_page_tables.count: Number of pages reserved. */
u32 count;
- /** @ptr: Point to the first unused page in the @pages table. */
+ /** @rsvd_page_tables.ptr: Point to the first unused page in the @pages table. */
u32 ptr;
/**
- * @page: Array of pages that can be used for an MMU page table update.
+ * @rsvd_page_tables.pages: Array of pages to be used for an MMU page table update.
*
* After an VM operation, there might be free pages left in this array.
* They should be returned to the pt_cache as part of the op_ctx cleanup.
@@ -172,10 +173,10 @@ struct panthor_vm_op_ctx {
/** @va: Virtual range targeted by the VM operation. */
struct {
- /** @addr: Start address. */
+ /** @va.addr: Start address. */
u64 addr;
- /** @range: Range size. */
+ /** @va.range: Range size. */
u64 range;
} va;
@@ -195,14 +196,14 @@ struct panthor_vm_op_ctx {
/** @map: Fields specific to a map operation. */
struct {
- /** @vm_bo: Buffer object to map. */
+ /** @map.vm_bo: Buffer object to map. */
struct drm_gpuvm_bo *vm_bo;
- /** @bo_offset: Offset in the buffer object. */
+ /** @map.bo_offset: Offset in the buffer object. */
u64 bo_offset;
/**
- * @sgt: sg-table pointing to pages backing the GEM object.
+ * @map.sgt: sg-table pointing to pages backing the GEM object.
*
* This is gathered at job creation time, such that we don't have
* to allocate in ::run_job().
@@ -210,7 +211,7 @@ struct panthor_vm_op_ctx {
struct sg_table *sgt;
/**
- * @new_vma: The new VMA object that will be inserted to the VA tree.
+ * @map.new_vma: The new VMA object that will be inserted to the VA tree.
*/
struct panthor_vma *new_vma;
} map;
@@ -304,27 +305,27 @@ struct panthor_vm {
/** @kernel_auto_va: Automatic VA-range for kernel BOs. */
struct {
- /** @start: Start of the automatic VA-range for kernel BOs. */
+ /** @kernel_auto_va.start: Start of the automatic VA-range for kernel BOs. */
u64 start;
- /** @size: Size of the automatic VA-range for kernel BOs. */
+ /** @kernel_auto_va.size: Size of the automatic VA-range for kernel BOs. */
u64 end;
} kernel_auto_va;
/** @as: Address space related fields. */
struct {
/**
- * @id: ID of the address space this VM is bound to.
+ * @as.id: ID of the address space this VM is bound to.
*
* A value of -1 means the VM is inactive/not bound.
*/
int id;
- /** @active_cnt: Number of active users of this VM. */
+ /** @as.active_cnt: Number of active users of this VM. */
refcount_t active_cnt;
/**
- * @lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
+ * @as.lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
*
* Active VMs should not be inserted in the LRU list.
*/
@@ -336,13 +337,13 @@ struct panthor_vm {
*/
struct {
/**
- * @pool: The heap pool attached to this VM.
+ * @heaps.pool: The heap pool attached to this VM.
*
* Will stay NULL until someone creates a heap context on this VM.
*/
struct panthor_heap_pool *pool;
- /** @lock: Lock used to protect access to @pool. */
+ /** @heaps.lock: Lock used to protect access to @pool. */
struct mutex lock;
} heaps;
@@ -408,7 +409,7 @@ struct panthor_vm_bind_job {
struct panthor_vm_op_ctx ctx;
};
-/**
+/*
* @pt_cache: Cache used to allocate MMU page tables.
*
* The pre-allocation pattern forces us to over-allocate to plan for
@@ -478,7 +479,7 @@ static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
}
/**
- * @free_pt() - Custom page table free function
+ * free_pt() - Custom page table free function
* @cookie: Cookie passed at page table allocation time.
* @data: Page table to free.
* @size: Size of the page table. This size should be fixed,
@@ -697,7 +698,7 @@ static void panthor_vm_release_as_locked(struct panthor_vm *vm)
/**
* panthor_vm_active() - Flag a VM as active
- * @VM: VM to flag as active.
+ * @vm: VM to flag as active.
*
* Assigns an address space to a VM so it can be used by the GPU/MCU.
*
@@ -801,7 +802,7 @@ out_dev_exit:
/**
* panthor_vm_idle() - Flag a VM idle
- * @VM: VM to flag as idle.
+ * @vm: VM to flag as idle.
*
* When we know the GPU is done with the VM (no more jobs to process),
* we can relinquish the AS slot attached to this VM, if any.
@@ -1017,7 +1018,7 @@ static int flags_to_prot(u32 flags)
/**
* panthor_vm_alloc_va() - Allocate a region in the auto-va space
- * @VM: VM to allocate a region on.
+ * @vm: VM to allocate a region on.
* @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
* wants the VA to be automatically allocated from the auto-VA range.
* @size: size of the VA range.
@@ -1063,7 +1064,7 @@ panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
/**
* panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
- * @VM: VM to free the region on.
+ * @vm: VM to free the region on.
* @va_node: Memory node representing the region to free.
*/
void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
@@ -1492,9 +1493,9 @@ panthor_vm_create_check_args(const struct panthor_device *ptdev,
/**
* panthor_vm_pool_create_vm() - Create a VM
+ * @ptdev: The panthor device
* @pool: The VM to create this VM on.
- * @kernel_va_start: Start of the region reserved for kernel objects.
- * @kernel_va_range: Size of the region reserved for kernel objects.
+ * @args: VM creation args.
*
* Return: a positive VM ID on success, a negative error code otherwise.
*/
@@ -1558,6 +1559,8 @@ static void panthor_vm_destroy(struct panthor_vm *vm)
*
* The VM resources are freed when the last reference on the VM object is
* dropped.
+ *
+ * Return: %0 for success, negative errno value for failure
*/
int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
{
@@ -1941,6 +1944,39 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
return pool;
}
+/**
+ * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all
+ * heaps over all the heap pools in a VM
+ * @pfile: File.
+ * @stats: Memory stats to be updated.
+ *
+ * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
+ * is active, record the size as active as well.
+ */
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats)
+{
+ struct panthor_vm *vm;
+ unsigned long i;
+
+ if (!pfile->vms)
+ return;
+
+ xa_lock(&pfile->vms->xa);
+ xa_for_each(&pfile->vms->xa, i, vm) {
+ size_t size = 0;
+
+ mutex_lock(&vm->heaps.lock);
+ if (vm->heaps.pool)
+ size = panthor_heap_pool_size(vm->heaps.pool);
+ mutex_unlock(&vm->heaps.lock);
+
+ stats->resident += size;
+ if (vm->as.id >= 0)
+ stats->active += size;
+ }
+ xa_unlock(&pfile->vms->xa);
+}
+
static u64 mair_to_memattr(u64 mair, bool coherent)
{
u64 memattr = 0;
@@ -2275,6 +2311,16 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
u64 full_va_range = 1ull << va_bits;
struct drm_gem_object *dummy_gem;
struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args sched_args = {
+ .ops = &panthor_vm_bind_ops,
+ .submit_wq = ptdev->mmu->vm.wq,
+ .num_rqs = 1,
+ .credit_limit = 1,
+ /* Bind operations are synchronous for now, no timeout needed. */
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .name = "panthor-vm-bind",
+ .dev = ptdev->base.dev,
+ };
struct io_pgtable_cfg pgtbl_cfg;
u64 mair, min_va, va_range;
struct panthor_vm *vm;
@@ -2332,11 +2378,7 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
goto err_mm_takedown;
}
- /* Bind operations are synchronous for now, no timeout needed. */
- ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq,
- 1, 1, 0,
- MAX_SCHEDULE_TIMEOUT, NULL, NULL,
- "panthor-vm-bind", ptdev->base.dev);
+ ret = drm_sched_init(&vm->sched, &sched_args);
if (ret)
goto err_free_io_pgtable;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index 8d21e83d8aba..fc274637114e 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -9,6 +9,7 @@
struct drm_exec;
struct drm_sched_job;
+struct drm_memory_stats;
struct panthor_gem_object;
struct panthor_heap_pool;
struct panthor_vm;
@@ -37,6 +38,8 @@ int panthor_vm_flush_all(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats);
+
struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
void panthor_vm_put(struct panthor_vm *vm);
struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 77b184c3fb0c..1a276db095ff 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -88,9 +88,6 @@
#define JOB_TIMEOUT_MS 5000
-#define MIN_CS_PER_CSG 8
-
-#define MIN_CSGS 3
#define MAX_CSG_PRIO 0xf
#define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64))
@@ -628,7 +625,7 @@ struct panthor_group {
*/
struct panthor_kernel_bo *syncobjs;
- /** @fdinfo: Per-file total cycle and timestamp values reference. */
+ /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */
struct {
/** @data: Total sampled values for jobs in queues from this group. */
struct panthor_gpu_usage data;
@@ -638,6 +635,9 @@ struct panthor_group {
* and job post-completion processing function
*/
struct mutex lock;
+
+ /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
+ size_t kbo_sizes;
} fdinfo;
/** @state: Group state. */
@@ -2878,6 +2878,7 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
if (IS_ERR_OR_NULL(gpool))
return;
+ xa_lock(&gpool->xa);
xa_for_each(&gpool->xa, i, group) {
mutex_lock(&group->fdinfo.lock);
pfile->stats.cycles += group->fdinfo.data.cycles;
@@ -2886,6 +2887,7 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
group->fdinfo.data.time = 0;
mutex_unlock(&group->fdinfo.lock);
}
+ xa_unlock(&gpool->xa);
}
static void group_sync_upd_work(struct work_struct *work)
@@ -3287,6 +3289,22 @@ static struct panthor_queue *
group_create_queue(struct panthor_group *group,
const struct drm_panthor_queue_create *args)
{
+ const struct drm_sched_init_args sched_args = {
+ .ops = &panthor_queue_sched_ops,
+ .submit_wq = group->ptdev->scheduler->wq,
+ .num_rqs = 1,
+ /*
+ * The credit limit argument tells us the total number of
+ * instructions across all CS slots in the ringbuffer, with
+ * some jobs requiring twice as many as others, depending on
+ * their profiling status.
+ */
+ .credit_limit = args->ringbuf_size / sizeof(u64),
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .timeout_wq = group->ptdev->reset.wq,
+ .name = "panthor-queue",
+ .dev = group->ptdev->base.dev,
+ };
struct drm_gpu_scheduler *drm_sched;
struct panthor_queue *queue;
int ret;
@@ -3357,17 +3375,7 @@ group_create_queue(struct panthor_group *group,
if (ret)
goto err_free_queue;
- /*
- * Credit limit argument tells us the total number of instructions
- * across all CS slots in the ringbuffer, with some jobs requiring
- * twice as many as others, depending on their profiling status.
- */
- ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
- group->ptdev->scheduler->wq, 1,
- args->ringbuf_size / sizeof(u64),
- 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
- group->ptdev->reset.wq,
- NULL, "panthor-queue", group->ptdev->base.dev);
+ ret = drm_sched_init(&queue->scheduler, &sched_args);
if (ret)
goto err_free_queue;
@@ -3381,6 +3389,29 @@ err_free_queue:
return ERR_PTR(ret);
}
+static void add_group_kbo_sizes(struct panthor_device *ptdev,
+ struct panthor_group *group)
+{
+ struct panthor_queue *queue;
+ int i;
+
+ if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
+ return;
+ if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
+ return;
+
+ group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
+ group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
+ group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
+
+ for (i = 0; i < group->queue_count; i++) {
+ queue = group->queues[i];
+ group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
+ group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
+ group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
+ }
+}
+
#define MAX_GROUPS_PER_POOL 128
int panthor_group_create(struct panthor_file *pfile,
@@ -3505,6 +3536,7 @@ int panthor_group_create(struct panthor_file *pfile,
}
mutex_unlock(&sched->reset.lock);
+ add_group_kbo_sizes(group->ptdev, group);
mutex_init(&group->fdinfo.lock);
return gid;
@@ -3624,6 +3656,33 @@ void panthor_group_pool_destroy(struct panthor_file *pfile)
pfile->groups = NULL;
}
+/**
+ * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's
+ * belonging to all the groups owned by an open Panthor file
+ * @pfile: File.
+ * @stats: Memory statistics to be updated.
+ *
+ */
+void
+panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
+ struct drm_memory_stats *stats)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_group *group;
+ unsigned long i;
+
+ if (IS_ERR_OR_NULL(gpool))
+ return;
+
+ xa_lock(&gpool->xa);
+ xa_for_each(&gpool->xa, i, group) {
+ stats->resident += group->fdinfo.kbo_sizes;
+ if (group->csg_id >= 0)
+ stats->active += group->fdinfo.kbo_sizes;
+ }
+ xa_unlock(&gpool->xa);
+}
+
static void job_release(struct kref *ref)
{
struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index 5ae6b4bde7c5..e650a445cf50 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -9,6 +9,7 @@ struct dma_fence;
struct drm_file;
struct drm_gem_object;
struct drm_sched_job;
+struct drm_memory_stats;
struct drm_panthor_group_create;
struct drm_panthor_queue_create;
struct drm_panthor_group_get_state;
@@ -36,6 +37,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
int panthor_group_pool_create(struct panthor_file *pfile);
void panthor_group_pool_destroy(struct panthor_file *pfile);
+void panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
+ struct drm_memory_stats *stats);
int panthor_sched_init(struct panthor_device *ptdev);
void panthor_sched_unplug(struct panthor_device *ptdev);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index bc24af08dfcd..70aff64ced87 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1044,7 +1044,7 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = to_qxl(ddev);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6328627b7c34..fa78824931cc 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -467,7 +467,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
}
int radeon_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index f9996304d943..9f6a3df951ba 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -806,7 +806,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -968,7 +968,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1116,7 +1116,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
return MODE_CLOCK_RANGE;
@@ -1447,7 +1447,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
}
static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1723,7 +1723,7 @@ out:
}
static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 4063d3801e81..3102f6c2d055 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -706,7 +706,7 @@ extern int radeon_get_monitor_bpc(struct drm_connector *connector);
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern void radeon_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode);
extern void radeon_dp_link_train(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index e8d64583e3bd..380a855b832a 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -582,9 +582,8 @@ EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
*/
static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_crtc *crtc;
@@ -596,7 +595,7 @@ static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 3c0c18d5249a..d1e626068065 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -808,7 +808,7 @@ static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
}
static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
@@ -816,7 +816,7 @@ static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index fa7a1ae22aa3..4550c6d84796 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -532,9 +532,8 @@ static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
}
static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
const struct drm_display_mode *mode;
struct drm_connector *connector;
@@ -568,7 +567,7 @@ err_stop:
}
static void rzg2l_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index b17de83b988b..292c31de18f1 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -275,7 +275,7 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index e498767a0a66..f41151d49fca 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -25,6 +25,41 @@
#include "rockchip_drm_drv.h"
+#define RK3576_IOC_MISC_CON0 0xa400
+#define RK3576_HDMI_HPD_INT_MSK BIT(2)
+#define RK3576_HDMI_HPD_INT_CLR BIT(1)
+
+#define RK3576_IOC_HDMI_HPD_STATUS 0xa440
+#define RK3576_HDMI_LEVEL_INT BIT(3)
+
+#define RK3576_VO0_GRF_SOC_CON1 0x0004
+#define RK3576_HDMI_FRL_MOD BIT(0)
+#define RK3576_HDMI_HDCP14_MEM_EN BIT(15)
+
+#define RK3576_VO0_GRF_SOC_CON8 0x0020
+#define RK3576_COLOR_FORMAT_MASK (0xf << 4)
+#define RK3576_COLOR_DEPTH_MASK (0xf << 8)
+#define RK3576_RGB (0 << 4)
+#define RK3576_YUV422 (0x1 << 4)
+#define RK3576_YUV444 (0x2 << 4)
+#define RK3576_YUV420 (0x3 << 4)
+#define RK3576_8BPC (0x0 << 8)
+#define RK3576_10BPC (0x6 << 8)
+#define RK3576_CECIN_MASK BIT(3)
+
+#define RK3576_VO0_GRF_SOC_CON12 0x0030
+#define RK3576_GRF_OSDA_DLYN (0xf << 12)
+#define RK3576_GRF_OSDA_DIV (0x7f << 1)
+#define RK3576_GRF_OSDA_DLY_EN BIT(0)
+
+#define RK3576_VO0_GRF_SOC_CON14 0x0038
+#define RK3576_I2S_SEL_MASK BIT(0)
+#define RK3576_SPDIF_SEL_MASK BIT(1)
+#define HDCP0_P1_GPIO_IN BIT(2)
+#define RK3576_SCLIN_MASK BIT(4)
+#define RK3576_SDAIN_MASK BIT(5)
+#define RK3576_HDMI_GRANT_SEL BIT(6)
+
#define RK3588_GRF_SOC_CON2 0x0308
#define RK3588_HDMI0_HPD_INT_MSK BIT(13)
#define RK3588_HDMI0_HPD_INT_CLR BIT(12)
@@ -54,7 +89,6 @@ struct rockchip_hdmi_qp {
struct regmap *regmap;
struct regmap *vo_regmap;
struct rockchip_encoder encoder;
- struct clk *ref_clk;
struct dw_hdmi_qp *hdmi;
struct phy *phy;
struct gpio_desc *enable_gpio;
@@ -62,6 +96,12 @@ struct rockchip_hdmi_qp {
int port_id;
};
+struct rockchip_hdmi_qp_ctrl_ops {
+ void (*io_init)(struct rockchip_hdmi_qp *hdmi);
+ irqreturn_t (*irq_callback)(int irq, void *dev_id);
+ irqreturn_t (*hardirq_callback)(int irq, void *dev_id);
+};
+
static struct rockchip_hdmi_qp *to_rockchip_hdmi_qp(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
@@ -81,7 +121,6 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
if (crtc && crtc->state) {
rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode,
8, HDMI_COLORSPACE_RGB);
- clk_set_rate(hdmi->ref_clk, rate);
/*
* FIXME: Temporary workaround to pass pixel clock rate
* to the PHY driver until phy_configure_opts_hdmi
@@ -161,6 +200,37 @@ static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = {
.setup_hpd = dw_hdmi_qp_rk3588_setup_hpd,
};
+static enum drm_connector_status
+dw_hdmi_qp_rk3576_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+ u32 val;
+
+ regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &val);
+
+ return val & RK3576_HDMI_LEVEL_INT ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void dw_hdmi_qp_rk3576_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+ u32 val;
+
+ val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR,
+ RK3576_HDMI_HPD_INT_CLR | RK3576_HDMI_HPD_INT_MSK);
+
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+ regmap_write(hdmi->regmap, 0xa404, 0xffff0102);
+}
+
+static const struct dw_hdmi_qp_phy_ops rk3576_hdmi_phy_ops = {
+ .init = dw_hdmi_qp_rk3588_phy_init,
+ .disable = dw_hdmi_qp_rk3588_phy_disable,
+ .read_hpd = dw_hdmi_qp_rk3576_read_hpd,
+ .setup_hpd = dw_hdmi_qp_rk3576_setup_hpd,
+};
+
static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work)
{
struct rockchip_hdmi_qp *hdmi = container_of(work,
@@ -176,6 +246,43 @@ static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work)
}
}
+static irqreturn_t dw_hdmi_qp_rk3576_hardirq(int irq, void *dev_id)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_id;
+ u32 intr_stat, val;
+
+ regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &intr_stat);
+ if (intr_stat) {
+ val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_MSK, RK3576_HDMI_HPD_INT_MSK);
+
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t dw_hdmi_qp_rk3576_irq(int irq, void *dev_id)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_id;
+ u32 intr_stat, val;
+
+ regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &intr_stat);
+
+ if (!intr_stat)
+ return IRQ_NONE;
+
+ val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR, RK3576_HDMI_HPD_INT_CLR);
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+ mod_delayed_work(system_wq, &hdmi->hpd_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+
+ val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id)
{
struct rockchip_hdmi_qp *hdmi = dev_id;
@@ -226,24 +333,95 @@ static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void dw_hdmi_qp_rk3576_io_init(struct rockchip_hdmi_qp *hdmi)
+{
+ u32 val;
+
+ val = HIWORD_UPDATE(RK3576_SCLIN_MASK, RK3576_SCLIN_MASK) |
+ HIWORD_UPDATE(RK3576_SDAIN_MASK, RK3576_SDAIN_MASK) |
+ HIWORD_UPDATE(RK3576_HDMI_GRANT_SEL, RK3576_HDMI_GRANT_SEL) |
+ HIWORD_UPDATE(RK3576_I2S_SEL_MASK, RK3576_I2S_SEL_MASK);
+
+ regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON14, val);
+
+ val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+}
+
+static void dw_hdmi_qp_rk3588_io_init(struct rockchip_hdmi_qp *hdmi)
+{
+ u32 val;
+
+ val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
+ HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
+ HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
+ HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
+
+ val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, RK3588_SET_HPD_PATH_MASK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL, RK3588_HDMI1_GRANT_SEL);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, RK3588_HDMI0_GRANT_SEL);
+ regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+}
+
+static const struct rockchip_hdmi_qp_ctrl_ops rk3576_hdmi_ctrl_ops = {
+ .io_init = dw_hdmi_qp_rk3576_io_init,
+ .irq_callback = dw_hdmi_qp_rk3576_irq,
+ .hardirq_callback = dw_hdmi_qp_rk3576_hardirq,
+};
+
+static const struct rockchip_hdmi_qp_ctrl_ops rk3588_hdmi_ctrl_ops = {
+ .io_init = dw_hdmi_qp_rk3588_io_init,
+ .irq_callback = dw_hdmi_qp_rk3588_irq,
+ .hardirq_callback = dw_hdmi_qp_rk3588_hardirq,
+};
+
struct rockchip_hdmi_qp_cfg {
unsigned int num_ports;
unsigned int port_ids[MAX_HDMI_PORT_NUM];
+ const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops;
const struct dw_hdmi_qp_phy_ops *phy_ops;
};
+static const struct rockchip_hdmi_qp_cfg rk3576_hdmi_cfg = {
+ .num_ports = 1,
+ .port_ids = {
+ 0x27da0000,
+ },
+ .ctrl_ops = &rk3576_hdmi_ctrl_ops,
+ .phy_ops = &rk3576_hdmi_phy_ops,
+};
+
static const struct rockchip_hdmi_qp_cfg rk3588_hdmi_cfg = {
.num_ports = 2,
.port_ids = {
0xfde80000,
0xfdea0000,
},
+ .ctrl_ops = &rk3588_hdmi_ctrl_ops,
.phy_ops = &rk3588_hdmi_phy_ops,
};
static const struct of_device_id dw_hdmi_qp_rockchip_dt_ids[] = {
- { .compatible = "rockchip,rk3588-dw-hdmi-qp",
- .data = &rk3588_hdmi_cfg },
+ {
+ .compatible = "rockchip,rk3576-dw-hdmi-qp",
+ .data = &rk3576_hdmi_cfg
+ }, {
+ .compatible = "rockchip,rk3588-dw-hdmi-qp",
+ .data = &rk3588_hdmi_cfg
+ },
{},
};
MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids);
@@ -261,7 +439,6 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
struct resource *res;
struct clk_bulk_data *clks;
int ret, irq, i;
- u32 val;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -278,6 +455,12 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
if (!cfg)
return -ENODEV;
+ if (!cfg->ctrl_ops || !cfg->ctrl_ops->io_init ||
+ !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback) {
+ dev_err(dev, "Missing platform ctrl ops\n");
+ return -ENODEV;
+ }
+
hdmi->dev = &pdev->dev;
hdmi->port_id = -ENODEV;
@@ -330,17 +513,6 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
- for (i = 0; i < ret; i++) {
- if (!strcmp(clks[i].id, "ref")) {
- hdmi->ref_clk = clks[1].clk;
- break;
- }
- }
- if (!hdmi->ref_clk) {
- drm_err(hdmi, "Missing ref clock\n");
- return -EINVAL;
- }
-
hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(hdmi->enable_gpio)) {
@@ -357,31 +529,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
- val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
- HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
- HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
- HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap,
- hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
- val);
-
- val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
- RK3588_SET_HPD_PATH_MASK);
- regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
-
- if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
- RK3588_HDMI1_GRANT_SEL);
- else
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
-
- if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
- else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
- regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+ cfg->ctrl_ops->io_init(hdmi);
INIT_DELAYED_WORK(&hdmi->hpd_work, dw_hdmi_qp_rk3588_hpd_work);
@@ -394,8 +542,8 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return irq;
ret = devm_request_threaded_irq(hdmi->dev, irq,
- dw_hdmi_qp_rk3588_hardirq,
- dw_hdmi_qp_rk3588_irq,
+ cfg->ctrl_ops->hardirq_callback,
+ cfg->ctrl_ops->irq_callback,
IRQF_SHARED, "dw-hdmi-qp-hpd",
hdmi);
if (ret)
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 898d90155057..483ecfeaebb0 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -471,7 +471,7 @@ static int inno_hdmi_setup(struct inno_hdmi *hdmi,
}
static enum drm_mode_status inno_hdmi_display_mode_valid(struct inno_hdmi *hdmi,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
unsigned long mpixelclk, max_tolerance;
long rounded_refclk;
@@ -577,7 +577,7 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
inno_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 403336397214..f7a460190313 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -482,7 +482,7 @@ static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
u32 vic = drm_match_cea_mode(mode);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 57747f1cff26..e3596e2b557d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1072,7 +1072,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
}
static int vop_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 17a98845fd31..afc946ead870 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -159,6 +159,7 @@ struct vop2_video_port {
struct drm_crtc crtc;
struct vop2 *vop2;
struct clk *dclk;
+ struct clk *dclk_src;
unsigned int id;
const struct vop2_video_port_data *data;
@@ -214,6 +215,7 @@ struct vop2 {
struct clk *hclk;
struct clk *aclk;
struct clk *pclk;
+ struct clk *pll_hdmiphy0;
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
@@ -222,6 +224,8 @@ struct vop2 {
struct vop2_win win[];
};
+#define VOP2_MAX_DCLK_RATE 600000000
+
#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
(x) == ROCKCHIP_VOP2_EP_HDMI1)
@@ -1155,6 +1159,9 @@ static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
vop2_crtc_disable_irq(vp, VP_INT_DSP_HOLD_VALID);
+ if (vp->dclk_src)
+ clk_set_parent(vp->dclk, vp->dclk_src);
+
clk_disable_unprepare(vp->dclk);
vop2->enable_count--;
@@ -1905,8 +1912,8 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
K = 2;
}
- if_pixclk_rate = (dclk_core_rate << 1) / K;
/*
+ * if_pixclk_rate = (dclk_core_rate << 1) / K;
* if_dclk_rate = dclk_core_rate / K;
* *if_pixclk_div = dclk_rate / if_pixclk_rate;
* *if_dclk_div = dclk_rate / if_dclk_rate;
@@ -2259,6 +2266,27 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2_vp_write(vp, RK3568_VP_MIPI_CTRL, 0);
+ /*
+ * Switch to HDMI PHY PLL as DCLK source for display modes up
+ * to 4K@60Hz, if available, otherwise keep using the system CRU.
+ */
+ if (vop2->pll_hdmiphy0 && clock <= VOP2_MAX_DCLK_RATE) {
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
+ struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
+
+ if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
+ if (!vp->dclk_src)
+ vp->dclk_src = clk_get_parent(vp->dclk);
+
+ ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0);
+ if (ret < 0)
+ drm_warn(vop2->drm,
+ "Could not switch to HDMI0 PHY PLL: %d\n", ret);
+ break;
+ }
+ }
+ }
+
clk_set_rate(vp->dclk, clock);
vop2_post_config(crtc);
@@ -3699,6 +3727,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(vop2->pclk);
}
+ vop2->pll_hdmiphy0 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy0");
+ if (IS_ERR(vop2->pll_hdmiphy0)) {
+ drm_err(vop2->drm, "failed to get pll_hdmiphy0\n");
+ return PTR_ERR(vop2->pll_hdmiphy0);
+ }
+
vop2->irq = platform_get_irq(pdev, 0);
if (vop2->irq < 0) {
drm_err(vop2->drm, "cannot find irq for vop2\n");
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 57da84908752..8c36a59afb72 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -64,12 +64,6 @@
* credit limit, the job won't be executed. Instead, the scheduler will wait
* until the credit count has decreased enough to not overflow its credit limit.
* This implies waiting for previously executed jobs.
- *
- * Optionally, drivers may register a callback (update_job_credits) provided by
- * struct drm_sched_backend_ops to update the job's credits dynamically. The
- * scheduler executes this callback every time the scheduler considers a job for
- * execution and subsequently checks whether the job fits the scheduler's credit
- * limit.
*/
#include <linux/wait.h>
@@ -133,13 +127,6 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
if (!s_job)
return false;
- if (sched->ops->update_job_credits) {
- s_job->credits = sched->ops->update_job_credits(s_job);
-
- drm_WARN(sched, !s_job->credits,
- "Jobs with zero credits bypass job-flow control.\n");
- }
-
/* If a job exceeds the credit limit, truncate it to the credit limit
* itself to guarantee forward progress.
*/
@@ -998,6 +985,29 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
/**
+ * drm_sched_job_has_dependency - check whether fence is the job's dependency
+ * @job: scheduler job to check
+ * @fence: fence to look for
+ *
+ * Returns:
+ * True if @fence is found within the job's dependencies, or otherwise false.
+ */
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence)
+{
+ struct dma_fence *f;
+ unsigned long index;
+
+ xa_for_each(&job->dependencies, index, f) {
+ if (f == fence)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_sched_job_has_dependency);
+
+/**
* drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
*
@@ -1166,9 +1176,6 @@ static void drm_sched_free_job_work(struct work_struct *w)
container_of(w, struct drm_gpu_scheduler, work_free_job);
struct drm_sched_job *job;
- if (READ_ONCE(sched->pause_submit))
- return;
-
job = drm_sched_get_finished_job(sched);
if (job)
sched->ops->free_job(job);
@@ -1192,9 +1199,6 @@ static void drm_sched_run_job_work(struct work_struct *w)
struct drm_sched_job *sched_job;
int r;
- if (READ_ONCE(sched->pause_submit))
- return;
-
/* Find entity with a ready job */
entity = drm_sched_select_entity(sched);
if (!entity)
@@ -1240,40 +1244,24 @@ static void drm_sched_run_job_work(struct work_struct *w)
* drm_sched_init - Init a gpu scheduler instance
*
* @sched: scheduler instance
- * @ops: backend operations for this scheduler
- * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
- * allocated and used
- * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
- * @credit_limit: the number of credits this scheduler can hold from all jobs
- * @hang_limit: number of times to allow a job to hang before dropping it
- * @timeout: timeout value in jiffies for the scheduler
- * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
- * used
- * @score: optional score atomic shared with other schedulers
- * @name: name used for debugging
- * @dev: target &struct device
+ * @args: scheduler initialization arguments
*
* Return 0 on success, otherwise error code.
*/
-int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- struct workqueue_struct *submit_wq,
- u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
- long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name, struct device *dev)
+int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
{
int i;
- sched->ops = ops;
- sched->credit_limit = credit_limit;
- sched->name = name;
- sched->timeout = timeout;
- sched->timeout_wq = timeout_wq ? : system_wq;
- sched->hang_limit = hang_limit;
- sched->score = score ? score : &sched->_score;
- sched->dev = dev;
+ sched->ops = args->ops;
+ sched->credit_limit = args->credit_limit;
+ sched->name = args->name;
+ sched->timeout = args->timeout;
+ sched->hang_limit = args->hang_limit;
+ sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
+ sched->score = args->score ? args->score : &sched->_score;
+ sched->dev = args->dev;
- if (num_rqs > DRM_SCHED_PRIORITY_COUNT) {
+ if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
/* This is a gross violation--tell drivers what the problem is.
*/
drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
@@ -1288,16 +1276,16 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
return 0;
}
- if (submit_wq) {
- sched->submit_wq = submit_wq;
+ if (args->submit_wq) {
+ sched->submit_wq = args->submit_wq;
sched->own_submit_wq = false;
} else {
#ifdef CONFIG_LOCKDEP
- sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
+ sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name,
WQ_MEM_RECLAIM,
&drm_sched_lockdep_map);
#else
- sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM);
#endif
if (!sched->submit_wq)
return -ENOMEM;
@@ -1305,11 +1293,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->own_submit_wq = true;
}
- sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
+ sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
if (!sched->sched_rq)
goto Out_check_own;
- sched->num_rqs = num_rqs;
+ sched->num_rqs = args->num_rqs;
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 08334be38694..7c935870f7d2 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -151,7 +151,6 @@ static const struct of_device_id ssd130x_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ssd130x_of_match);
-#if IS_MODULE(CONFIG_DRM_SSD130X_SPI)
/*
* The SPI core always reports a MODALIAS uevent of the form "spi:<dev>", even
* if the device was registered via OF. This means that the module will not be
@@ -160,7 +159,7 @@ MODULE_DEVICE_TABLE(of, ssd130x_of_match);
* To workaround this issue, add a SPI device ID table. Even when this should
* not be needed for this driver to match the registered SPI devices.
*/
-static const struct spi_device_id ssd130x_spi_table[] = {
+static const struct spi_device_id ssd130x_spi_id[] = {
/* ssd130x family */
{ "sh1106", SH1106_ID },
{ "ssd1305", SSD1305_ID },
@@ -175,14 +174,14 @@ static const struct spi_device_id ssd130x_spi_table[] = {
{ "ssd1331", SSD1331_ID },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(spi, ssd130x_spi_table);
-#endif
+MODULE_DEVICE_TABLE(spi, ssd130x_spi_id);
static struct spi_driver ssd130x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = ssd130x_of_match,
},
+ .id_table = ssd130x_spi_id,
.probe = ssd130x_spi_probe,
.remove = ssd130x_spi_remove,
.shutdown = ssd130x_spi_shutdown,
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index b777690fd660..dd2006d51c7a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -880,7 +880,7 @@ static int ssd132x_update_rect(struct ssd130x_device *ssd130x,
u8 n1 = buf[i * width + j];
u8 n2 = buf[i * width + j + 1];
- data_array[array_idx++] = (n2 << 4) | n1;
+ data_array[array_idx++] = (n2 & 0xf0) | (n1 >> 4);
}
}
@@ -1037,7 +1037,7 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
struct drm_format_conv_state *fmtcnv_state)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
- unsigned int dst_pitch = drm_rect_width(rect);
+ unsigned int dst_pitch;
struct iosys_map dst;
int ret = 0;
@@ -1046,6 +1046,8 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
rect->x2 = min_t(unsigned int, round_up(rect->x2, SSD132X_SEGMENT_WIDTH),
ssd130x->width);
+ dst_pitch = drm_rect_width(rect);
+
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index c6c2abaa1891..4dcddd02629b 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -349,7 +349,7 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
sti_dvo_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index b12863bea955..14fdc00d2ba0 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -280,12 +280,12 @@ static void hda_write(struct sti_hda *hda, u32 val, int offset)
*
* Return true if mode is found
*/
-static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
+static bool hda_get_mode_idx(const struct drm_display_mode *mode, int *idx)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++)
- if (drm_mode_equal(&hda_supported_modes[i].mode, &mode)) {
+ if (drm_mode_equal(&hda_supported_modes[i].mode, mode)) {
*idx = i;
return true;
}
@@ -443,7 +443,7 @@ static void sti_hda_pre_enable(struct drm_bridge *bridge)
if (clk_prepare_enable(hda->clk_hddac))
DRM_ERROR("Failed to prepare/enable hda_hddac clk\n");
- if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ if (!hda_get_mode_idx(&hda->mode, &mode_idx)) {
DRM_ERROR("Undefined mode\n");
return;
}
@@ -526,7 +526,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
drm_mode_copy(&hda->mode, mode);
- if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ if (!hda_get_mode_idx(&hda->mode, &mode_idx)) {
DRM_ERROR("Undefined mode\n");
return;
}
@@ -603,7 +603,7 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
sti_hda_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
@@ -614,7 +614,7 @@ sti_hda_connector_mode_valid(struct drm_connector *connector,
= to_sti_hda_connector(connector);
struct sti_hda *hda = hda_connector->hda;
- if (!hda_get_mode_idx(*mode, &idx)) {
+ if (!hda_get_mode_idx(mode, &idx)) {
return MODE_BAD;
} else {
result = clk_round_rate(hda->clk_pix, target);
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index ca2fe17de4a5..164a34d793d8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1011,7 +1011,7 @@ fail:
static enum drm_mode_status
sti_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 06f2d7a56cc9..4613e8e3b8fd 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -980,9 +980,8 @@ static int lvds_attach(struct drm_bridge *bridge,
}
static void lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
struct drm_connector_state *conn_state;
struct drm_connector *connector;
@@ -1017,7 +1016,7 @@ static void lvds_atomic_enable(struct drm_bridge *bridge,
}
static void lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 430b2eededb2..798507a8ae56 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1025,7 +1025,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
}
-static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state)
+static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state,
+ bool flip)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc_state *crtc_state;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 4a8cd9ed0a94..9bb077558167 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -812,7 +812,7 @@ static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
static enum drm_mode_status
tegra_dsi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index e705f8590c13..8cd2969e7d4b 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1137,7 +1137,7 @@ static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
static enum drm_mode_status
tegra_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct tegra_output *output = connector_to_output(connector);
struct tegra_hdmi *hdmi = to_hdmi(output);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 802d2db7007a..f98f70eda906 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -1789,7 +1789,7 @@ static int tegra_sor_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
tegra_sor_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 56dab563abd7..0109bcf7faa5 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
drm_kunit_helpers.o
obj-$(CONFIG_DRM_KUNIT_TEST) += \
+ drm_atomic_state_test.o \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
drm_connector_test.o \
diff --git a/drivers/gpu/drm/tests/drm_atomic_state_test.c b/drivers/gpu/drm/tests/drm_atomic_state_test.c
new file mode 100644
index 000000000000..5945c3298901
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_atomic_state_test.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_atomic_state helpers
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_probe_helper.h>
+
+#define DRM_TEST_ENC_0 BIT(0)
+#define DRM_TEST_ENC_1 BIT(1)
+#define DRM_TEST_ENC_2 BIT(2)
+
+#define DRM_TEST_CONN_0 BIT(0)
+
+struct drm_clone_mode_test {
+ const char *name;
+ u32 encoder_mask;
+ int expected_result;
+};
+
+static const struct drm_display_mode drm_atomic_test_mode = {
+ DRM_MODE("1024x768", 0, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+};
+
+struct drm_atomic_test_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoders[3];
+ struct drm_connector connectors[2];
+};
+
+static int modeset_counter;
+
+static void drm_test_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ modeset_counter++;
+}
+
+static const struct drm_encoder_helper_funcs drm_atomic_test_encoder_funcs = {
+ .atomic_mode_set = drm_test_encoder_mode_set,
+};
+
+static const struct drm_connector_funcs dummy_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int drm_atomic_test_dummy_get_modes(struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &drm_atomic_test_mode);
+}
+
+static const struct drm_connector_helper_funcs dummy_connector_helper_funcs = {
+ .get_modes = drm_atomic_test_dummy_get_modes,
+};
+
+static struct drm_atomic_test_priv *
+drm_atomic_test_init_drm_components(struct kunit *test, bool has_connectors)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_encoder *enc;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_atomic_test_priv,
+ drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ test->priv = priv;
+
+ drm = &priv->drm;
+ priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->plane);
+
+ priv->crtc = drm_kunit_helper_create_crtc(test, drm,
+ priv->plane, NULL,
+ NULL,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->crtc);
+
+ for (int i = 0; i < ARRAY_SIZE(priv->encoders); i++) {
+ enc = &priv->encoders[i];
+
+ ret = drmm_encoder_init(drm, enc, NULL,
+ DRM_MODE_ENCODER_DSI, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ enc->possible_crtcs = drm_crtc_mask(priv->crtc);
+ }
+
+ priv->encoders[0].possible_clones = DRM_TEST_ENC_0 | DRM_TEST_ENC_1;
+ priv->encoders[1].possible_clones = DRM_TEST_ENC_0 | DRM_TEST_ENC_1;
+ priv->encoders[2].possible_clones = DRM_TEST_ENC_2;
+
+ if (!has_connectors)
+ goto done;
+
+ BUILD_BUG_ON(ARRAY_SIZE(priv->connectors) > ARRAY_SIZE(priv->encoders));
+
+ for (int i = 0; i < ARRAY_SIZE(priv->connectors); i++) {
+ conn = &priv->connectors[i];
+
+ ret = drmm_connector_init(drm, conn, &dummy_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_connector_helper_add(conn, &dummy_connector_helper_funcs);
+ drm_encoder_helper_add(&priv->encoders[i],
+ &drm_atomic_test_encoder_funcs);
+
+ drm_connector_attach_encoder(conn, &priv->encoders[i]);
+ }
+
+done:
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+static int set_up_atomic_state(struct kunit *test,
+ struct drm_atomic_test_priv *priv,
+ struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *drm = &priv->drm;
+ struct drm_crtc *crtc = priv->crtc;
+ struct drm_atomic_state *state;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ if (connector) {
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, &drm_atomic_test_mode);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ crtc_state->enable = true;
+ crtc_state->active = true;
+
+ if (connector) {
+ ret = drm_atomic_commit(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ } else {
+ // dummy connector mask
+ crtc_state->connector_mask = DRM_TEST_CONN_0;
+ }
+
+ return 0;
+}
+
+/*
+ * Test that the DRM encoder mode_set() is called when the atomic state
+ * connectors are changed but the CRTC mode is not.
+ */
+static void drm_test_check_connector_changed_modeset(struct kunit *test)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_connector *old_conn, *new_conn;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct drm_connector_state *new_conn_state, *old_conn_state;
+ int ret, initial_modeset_count;
+
+ priv = drm_atomic_test_init_drm_components(test, true);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ old_conn = &priv->connectors[0];
+ new_conn = &priv->connectors[1];
+
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ // first modeset to enable
+ ret = set_up_atomic_state(test, priv, old_conn, ctx);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ new_conn_state = drm_atomic_get_connector_state(state, new_conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_conn_state);
+
+ old_conn_state = drm_atomic_get_connector_state(state, old_conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, old_conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(old_conn_state, NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_set_crtc_for_connector(new_conn_state, priv->crtc);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ initial_modeset_count = modeset_counter;
+
+ // modeset_disables is called as part of the atomic commit tail
+ ret = drm_atomic_commit(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_EQ(test, modeset_counter, initial_modeset_count + 1);
+}
+
+/*
+ * Test that the drm_crtc_in_clone_mode() helper can detect if a given CRTC
+ * state is in clone mode
+ */
+static void drm_test_check_in_clone_mode(struct kunit *test)
+{
+ bool ret;
+ const struct drm_clone_mode_test *param = test->param_value;
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = kunit_kzalloc(test, sizeof(*crtc_state), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, crtc_state);
+
+ crtc_state->encoder_mask = param->encoder_mask;
+
+ ret = drm_crtc_in_clone_mode(crtc_state);
+
+ KUNIT_ASSERT_EQ(test, ret, param->expected_result);
+}
+
+/*
+ * Test that the atomic commit path will succeed for valid clones (or non-cloned
+ * states) and fail for states where the cloned encoders are not possible_clones
+ * of each other.
+ */
+static void drm_test_check_valid_clones(struct kunit *test)
+{
+ int ret;
+ const struct drm_clone_mode_test *param = test->param_value;
+ struct drm_atomic_test_priv *priv;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_device *drm;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+
+ priv = drm_atomic_test_init_drm_components(test, false);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ret = set_up_atomic_state(test, priv, NULL, ctx);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ crtc_state = drm_atomic_get_crtc_state(state, priv->crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ crtc_state->encoder_mask = param->encoder_mask;
+
+ // force modeset
+ crtc_state->mode_changed = true;
+
+ ret = drm_atomic_helper_check_modeset(drm, state);
+ KUNIT_ASSERT_EQ(test, ret, param->expected_result);
+}
+
+static void drm_check_in_clone_mode_desc(const struct drm_clone_mode_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+static void drm_check_valid_clones_desc(const struct drm_clone_mode_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+static const struct drm_clone_mode_test drm_clone_mode_tests[] = {
+ {
+ .name = "in_clone_mode",
+ .encoder_mask = DRM_TEST_ENC_0 | DRM_TEST_ENC_1,
+ .expected_result = true,
+ },
+ {
+ .name = "not_in_clone_mode",
+ .encoder_mask = DRM_TEST_ENC_0,
+ .expected_result = false,
+ },
+};
+
+static const struct drm_clone_mode_test drm_valid_clone_mode_tests[] = {
+ {
+ .name = "not_in_clone_mode",
+ .encoder_mask = DRM_TEST_ENC_0,
+ .expected_result = 0,
+ },
+
+ {
+ .name = "valid_clone",
+ .encoder_mask = DRM_TEST_ENC_0 | DRM_TEST_ENC_1,
+ .expected_result = 0,
+ },
+ {
+ .name = "invalid_clone",
+ .encoder_mask = DRM_TEST_ENC_0 | DRM_TEST_ENC_2,
+ .expected_result = -EINVAL,
+ },
+};
+
+KUNIT_ARRAY_PARAM(drm_check_in_clone_mode, drm_clone_mode_tests,
+ drm_check_in_clone_mode_desc);
+
+KUNIT_ARRAY_PARAM(drm_check_valid_clones, drm_valid_clone_mode_tests,
+ drm_check_valid_clones_desc);
+
+static struct kunit_case drm_test_check_modeset_test[] = {
+ KUNIT_CASE(drm_test_check_connector_changed_modeset),
+ {}
+};
+
+static struct kunit_case drm_in_clone_mode_check_test[] = {
+ KUNIT_CASE_PARAM(drm_test_check_in_clone_mode,
+ drm_check_in_clone_mode_gen_params),
+ KUNIT_CASE_PARAM(drm_test_check_valid_clones,
+ drm_check_valid_clones_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_test_check_modeset_test_suite = {
+ .name = "drm_validate_modeset",
+ .test_cases = drm_test_check_modeset_test,
+};
+
+static struct kunit_suite drm_in_clone_mode_check_test_suite = {
+ .name = "drm_validate_clone_mode",
+ .test_cases = drm_in_clone_mode_check_test,
+};
+
+kunit_test_suites(&drm_in_clone_mode_check_test_suite,
+ &drm_test_check_modeset_test_suite);
+
+MODULE_AUTHOR("Jessica Zhang <quic_jesszhan@quicinc.com");
+MODULE_DESCRIPTION("Test cases for the drm_atomic_helper functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 9662c949d0e3..7a0e523651f0 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -261,7 +261,6 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
static void drm_test_buddy_alloc_clear(struct kunit *test)
{
unsigned long n_pages, total, i = 0;
- DRM_RND_STATE(prng, random_seed);
const unsigned long ps = SZ_4K;
struct drm_buddy_block *block;
const int max_order = 12;
@@ -385,17 +384,28 @@ static void drm_test_buddy_alloc_clear(struct kunit *test)
drm_buddy_fini(&mm);
/*
- * Create a new mm with a non power-of-two size. Allocate a random size, free as
- * cleared and then call fini. This will ensure the multi-root force merge during
- * fini.
+ * Create a new mm with a non power-of-two size. Allocate a random size from each
+ * root, free as cleared and then call fini. This will ensure the multi-root
+ * force merge during fini.
*/
- mm_size = 12 * SZ_4K;
- size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
+ mm_size = (SZ_4K << max_order) + (SZ_4K << (max_order - 2));
+
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
- size, ps, &allocated,
- DRM_BUDDY_TOPDOWN_ALLOCATION),
- "buddy_alloc hit an error size=%u\n", size);
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
+ 4 * ps, ps, &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 4 * ps);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CLEAR_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 2 * ps);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size,
+ ps, ps, &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", ps);
drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
drm_buddy_fini(&mm);
}
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index b976a5e9aef5..23ecc00accb2 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -70,10 +70,17 @@ static int light_up_connector(struct kunit *test,
state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+retry:
conn_state = drm_atomic_get_connector_state(state, connector);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -282,15 +289,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -345,15 +353,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -408,18 +417,18 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -474,7 +483,6 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -520,18 +528,18 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -588,7 +596,6 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -636,18 +643,18 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -704,7 +711,6 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -754,20 +760,20 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -828,20 +834,20 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -899,6 +905,8 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_dvi_1080p,
@@ -908,14 +916,12 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -946,21 +952,21 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -993,21 +999,21 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1040,21 +1046,21 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1091,15 +1097,16 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1147,6 +1154,8 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
@@ -1157,9 +1166,6 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1170,8 +1176,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1216,6 +1223,8 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
@@ -1226,9 +1235,6 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1242,8 +1248,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1290,9 +1297,6 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
@@ -1306,7 +1310,9 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
rate = mode->clock * 1500;
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1340,6 +1346,8 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
@@ -1350,9 +1358,6 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1371,8 +1376,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1407,6 +1413,8 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
@@ -1417,9 +1425,6 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1438,8 +1443,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1473,6 +1479,8 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
@@ -1483,9 +1491,6 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1496,8 +1501,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1533,6 +1539,8 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_340mhz,
@@ -1543,9 +1551,6 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1556,8 +1561,9 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index 13feedfe5d6d..e88148e44937 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -49,7 +49,7 @@ enum dispc_common_regs {
/*
* dispc_common_regmap should be defined as const u16 * and pointing
* to a valid dss common register map for the platform, before the
- * macros bellow can be used.
+ * macros below can be used.
*/
#define REG(r) (dispc_common_regmap[r ## _OFF])
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 116de124bddb..719412e6c346 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -67,7 +67,7 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
/*
* The HW is only able to start drawing at subpixel boundary
- * (the two first checks bellow). At the end of a row the HW
+ * (the two first checks below). At the end of a row the HW
* can only jump integer number of subpixels forward to the
* beginning of the next row. So we can only show picture with
* integer subpixel width (the third check). However, after
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ea5e49858857..95b86003c50d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -451,7 +451,8 @@ int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man
int ret = 0;
spin_lock(&bdev->lru_lock);
- res = ttm_resource_manager_first(man, &cursor);
+ ttm_resource_cursor_init(&cursor, man);
+ res = ttm_resource_manager_first(&cursor);
ttm_resource_cursor_fini(&cursor);
if (!res) {
ret = -ENOENT;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d939925efa81..917096bd5f68 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -865,7 +865,8 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
s64 lret;
spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
+ ttm_resource_cursor_init(&cursor, man);
+ ttm_resource_manager_for_each_res(&cursor, res) {
struct ttm_buffer_object *bo = res->bo;
bool bo_needs_unlock = false;
bool bo_locked = false;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 8504dbe19c1a..c9eba76d5143 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -58,6 +58,23 @@ struct ttm_pool_dma {
unsigned long vaddr;
};
+/**
+ * struct ttm_pool_alloc_state - Current state of the tt page allocation process
+ * @pages: Pointer to the next tt page pointer to populate.
+ * @caching_divide: Pointer to the first page pointer whose page has a staged but
+ * not committed caching transition from write-back to @tt_caching.
+ * @dma_addr: Pointer to the next tt dma_address entry to populate if any.
+ * @remaining_pages: Remaining pages to populate.
+ * @tt_caching: The requested cpu-caching for the pages allocated.
+ */
+struct ttm_pool_alloc_state {
+ struct page **pages;
+ struct page **caching_divide;
+ dma_addr_t *dma_addr;
+ pgoff_t remaining_pages;
+ enum ttm_caching tt_caching;
+};
+
static unsigned long page_pool_size;
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
@@ -160,25 +177,25 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
kfree(dma);
}
-/* Apply a new caching to an array of pages */
-static int ttm_pool_apply_caching(struct page **first, struct page **last,
- enum ttm_caching caching)
+/* Apply any cpu-caching deferred during page allocation */
+static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
{
#ifdef CONFIG_X86
- unsigned int num_pages = last - first;
+ unsigned int num_pages = alloc->pages - alloc->caching_divide;
if (!num_pages)
return 0;
- switch (caching) {
+ switch (alloc->tt_caching) {
case ttm_cached:
break;
case ttm_write_combined:
- return set_pages_array_wc(first, num_pages);
+ return set_pages_array_wc(alloc->caching_divide, num_pages);
case ttm_uncached:
- return set_pages_array_uc(first, num_pages);
+ return set_pages_array_uc(alloc->caching_divide, num_pages);
}
#endif
+ alloc->caching_divide = alloc->pages;
return 0;
}
@@ -354,24 +371,41 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
return p->private;
}
-/* Called when we got a page, either from a pool or newly allocated */
+/*
+ * Called when we got a page, either from a pool or newly allocated.
+ * if needed, dma map the page and populate the dma address array.
+ * Populate the page address array.
+ * If the caching is consistent, update any deferred caching. Otherwise
+ * stage this page for an upcoming deferred caching update.
+ */
static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
- struct page *p, dma_addr_t **dma_addr,
- unsigned long *num_pages,
- struct page ***pages)
+ struct page *p, enum ttm_caching page_caching,
+ struct ttm_pool_alloc_state *alloc)
{
- unsigned int i;
- int r;
+ pgoff_t i, nr = 1UL << order;
+ bool caching_consistent;
+ int r = 0;
+
+ caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
+
+ if (caching_consistent) {
+ r = ttm_pool_apply_caching(alloc);
+ if (r)
+ return r;
+ }
- if (*dma_addr) {
- r = ttm_pool_map(pool, order, p, dma_addr);
+ if (alloc->dma_addr) {
+ r = ttm_pool_map(pool, order, p, &alloc->dma_addr);
if (r)
return r;
}
- *num_pages -= 1 << order;
- for (i = 1 << order; i; --i, ++(*pages), ++p)
- **pages = p;
+ alloc->remaining_pages -= nr;
+ for (i = 0; i < nr; ++i)
+ *alloc->pages++ = p++;
+
+ if (caching_consistent)
+ alloc->caching_divide = alloc->pages;
return 0;
}
@@ -413,6 +447,26 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
}
}
+static void ttm_pool_alloc_state_init(const struct ttm_tt *tt,
+ struct ttm_pool_alloc_state *alloc)
+{
+ alloc->pages = tt->pages;
+ alloc->caching_divide = tt->pages;
+ alloc->dma_addr = tt->dma_address;
+ alloc->remaining_pages = tt->num_pages;
+ alloc->tt_caching = tt->caching;
+}
+
+/*
+ * Find a suitable allocation order based on highest desired order
+ * and number of remaining pages
+ */
+static unsigned int ttm_pool_alloc_find_order(unsigned int highest,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
+}
+
/**
* ttm_pool_alloc - Fill a ttm_tt object
*
@@ -428,19 +482,19 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
- pgoff_t num_pages = tt->num_pages;
- dma_addr_t *dma_addr = tt->dma_address;
- struct page **caching = tt->pages;
- struct page **pages = tt->pages;
+ struct ttm_pool_alloc_state alloc;
enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
pgoff_t caching_divide;
unsigned int order;
+ bool allow_pools;
struct page *p;
int r;
- WARN_ON(!num_pages || ttm_tt_is_populated(tt));
- WARN_ON(dma_addr && !pool->dev);
+ ttm_pool_alloc_state_init(tt, &alloc);
+
+ WARN_ON(!alloc.remaining_pages || ttm_tt_is_populated(tt));
+ WARN_ON(alloc.dma_addr && !pool->dev);
if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
@@ -453,67 +507,46 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages));
- num_pages;
- order = min_t(unsigned int, order, __fls(num_pages))) {
+ page_caching = tt->caching;
+ allow_pools = true;
+ for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, &alloc);
+ alloc.remaining_pages;
+ order = ttm_pool_alloc_find_order(order, &alloc)) {
struct ttm_pool_type *pt;
- page_caching = tt->caching;
- pt = ttm_pool_select_type(pool, tt->caching, order);
- p = pt ? ttm_pool_type_take(pt) : NULL;
- if (p) {
- r = ttm_pool_apply_caching(caching, pages,
- tt->caching);
- if (r)
- goto error_free_page;
-
- caching = pages;
- do {
- r = ttm_pool_page_allocated(pool, order, p,
- &dma_addr,
- &num_pages,
- &pages);
- if (r)
- goto error_free_page;
-
- caching = pages;
- if (num_pages < (1 << order))
- break;
-
- p = ttm_pool_type_take(pt);
- } while (p);
- }
-
- page_caching = ttm_cached;
- while (num_pages >= (1 << order) &&
- (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
-
- if (PageHighMem(p)) {
- r = ttm_pool_apply_caching(caching, pages,
- tt->caching);
- if (r)
- goto error_free_page;
- caching = pages;
- }
- r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
- &num_pages, &pages);
- if (r)
- goto error_free_page;
- if (PageHighMem(p))
- caching = pages;
+ /* First, try to allocate a page from a pool if one exists. */
+ p = NULL;
+ pt = ttm_pool_select_type(pool, page_caching, order);
+ if (pt && allow_pools)
+ p = ttm_pool_type_take(pt);
+ /*
+ * If that fails or previously failed, allocate from system.
+ * Note that this also disallows additional pool allocations using
+ * write-back cached pools of the same order. Consider removing
+ * that behaviour.
+ */
+ if (!p) {
+ page_caching = ttm_cached;
+ allow_pools = false;
+ p = ttm_pool_alloc_page(pool, gfp_flags, order);
}
-
+ /* If that fails, lower the order if possible and retry. */
if (!p) {
if (order) {
--order;
+ page_caching = tt->caching;
+ allow_pools = true;
continue;
}
r = -ENOMEM;
goto error_free_all;
}
+ r = ttm_pool_page_allocated(pool, order, p, page_caching, &alloc);
+ if (r)
+ goto error_free_page;
}
- r = ttm_pool_apply_caching(caching, pages, tt->caching);
+ r = ttm_pool_apply_caching(&alloc);
if (r)
goto error_free_all;
@@ -523,10 +556,10 @@ error_free_page:
ttm_pool_free_page(pool, page_caching, order, p);
error_free_all:
- num_pages = tt->num_pages - num_pages;
- caching_divide = caching - tt->pages;
+ caching_divide = alloc.caching_divide - tt->pages;
ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
- ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
+ ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
+ tt->num_pages - alloc.remaining_pages);
return r;
}
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index cc29bbf3eabb..7e5a60c55813 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -83,6 +83,23 @@ static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
}
/**
+ * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor
+ * @cursor: The cursor to initialize.
+ * @man: The resource manager.
+ *
+ * Initialize the cursor before using it for iteration.
+ */
+void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor,
+ struct ttm_resource_manager *man)
+{
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ INIT_LIST_HEAD(&cursor->hitch.link);
+}
+
+/**
* ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
* @cursor: The struct ttm_resource_cursor to finalize.
*
@@ -252,11 +269,16 @@ static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_
return ttm_tt_is_swapped(bo->ttm);
}
+static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ return bo->pin_count || ttm_resource_is_swapped(res, bo);
+}
+
/* Add the resource to a bulk move if the BO is configured for it */
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
ttm_lru_bulk_move_add(bo->bulk_move, res);
}
@@ -264,7 +286,7 @@ void ttm_resource_add_bulk_move(struct ttm_resource *res,
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
@@ -276,10 +298,10 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
- if (bo->pin_count || ttm_resource_is_swapped(res, bo)) {
+ if (ttm_resource_unevictable(res, bo)) {
list_move_tail(&res->lru.link, &bdev->unevictable);
- } else if (bo->bulk_move) {
+ } else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
ttm_lru_bulk_move_pos(bo->bulk_move, res);
@@ -318,7 +340,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- if (bo->pin_count || ttm_resource_is_swapped(res, bo))
+ if (ttm_resource_unevictable(res, bo))
list_add_tail(&res->lru.link, &bo->bdev->unevictable);
else
list_add_tail(&res->lru.link, &man->lru[bo->priority]);
@@ -612,7 +634,6 @@ ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
/**
* ttm_resource_manager_first() - Start iterating over the resources
* of a resource manager
- * @man: resource manager to iterate over
* @cursor: cursor to record the position
*
* Initializes the cursor and starts iterating. When done iterating,
@@ -621,17 +642,16 @@ ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
* Return: The first resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_first(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor)
+ttm_resource_manager_first(struct ttm_resource_cursor *cursor)
{
- lockdep_assert_held(&man->bdev->lru_lock);
+ struct ttm_resource_manager *man = cursor->man;
- cursor->priority = 0;
- cursor->man = man;
- ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
- INIT_LIST_HEAD(&cursor->bulk_link);
- list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
+ if (WARN_ON_ONCE(!man))
+ return NULL;
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
return ttm_resource_manager_next(cursor);
}
@@ -667,8 +687,6 @@ ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
ttm_resource_cursor_clear_bulk(cursor);
}
- ttm_resource_cursor_fini(cursor);
-
return NULL;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 930737a9347b..852015214e97 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -295,11 +295,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ v3d->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(v3d->clk))
+ return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
+
+ ret = clk_prepare_enable(v3d->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the V3D clock\n");
+ return ret;
+ }
+
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
if (ret)
- return ret;
+ goto clk_disable;
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
@@ -319,28 +329,29 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ret = PTR_ERR(v3d->reset);
if (ret == -EPROBE_DEFER)
- return ret;
+ goto clk_disable;
v3d->reset = NULL;
ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
if (ret) {
dev_err(dev,
"Failed to get reset control or bridge regs\n");
- return ret;
+ goto clk_disable;
}
}
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
- return ret;
+ goto clk_disable;
}
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->mmu_scratch) {
dev_err(dev, "Failed to allocate MMU scratch page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto clk_disable;
}
ret = v3d_gem_init(drm);
@@ -369,6 +380,8 @@ gem_destroy:
v3d_gem_destroy(drm);
dma_free:
dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+clk_disable:
+ clk_disable_unprepare(v3d->clk);
return ret;
}
@@ -386,6 +399,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
+
+ clk_disable_unprepare(v3d->clk);
}
static struct platform_driver v3d_platform_driver = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index dc1cfe2e14be..9deaefa0f95b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -147,7 +147,6 @@ struct v3d_dev {
struct v3d_render_job *render_job;
struct v3d_tfu_job *tfu_job;
struct v3d_csd_job *csd_job;
- struct v3d_cpu_job *cpu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index da08ddb01d21..80466ce8c7df 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -656,8 +656,6 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
struct v3d_cpu_job *job = to_cpu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- v3d->cpu_job = job;
-
if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
return NULL;
@@ -822,67 +820,54 @@ static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
.free_job = v3d_cpu_job_free
};
+static int
+v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops,
+ enum v3d_queue queue, const char *name)
+{
+ struct drm_sched_init_args args = {
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(500),
+ .dev = v3d->drm.dev,
+ };
+
+ args.ops = ops;
+ args.name = name;
+
+ return drm_sched_init(&v3d->queue[queue].sched, &args);
+}
+
int
v3d_sched_init(struct v3d_dev *v3d)
{
- int hw_jobs_limit = 1;
- int job_hang_limit = 0;
- int hang_limit_ms = 500;
int ret;
- ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
- &v3d_bin_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_bin", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin");
if (ret)
return ret;
- ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
- &v3d_render_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_render", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER,
+ "v3d_render");
if (ret)
goto fail;
- ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
- &v3d_tfu_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_tfu", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu");
if (ret)
goto fail;
if (v3d_has_csd(v3d)) {
- ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
- &v3d_csd_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_csd", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD,
+ "v3d_csd");
if (ret)
goto fail;
- ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
- &v3d_cache_clean_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_cache_clean", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops,
+ V3D_CACHE_CLEAN, "v3d_cache_clean");
if (ret)
goto fail;
}
- ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
- &v3d_cpu_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- 1, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_cpu", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu");
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 6cc7b7e6294a..123ab0ce1781 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -14,6 +14,7 @@ config DRM_VC4
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_EXEC
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index cf40a53ad42e..2a48038abe7a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -884,11 +884,7 @@ struct vc4_async_flip_state {
struct drm_framebuffer *fb;
struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event;
-
- union {
- struct dma_fence_cb fence;
- struct vc4_seqno_cb seqno;
- } cb;
+ struct dma_fence_cb cb;
};
/* Called when the V3D execution for the BO being flipped to is done, so that
@@ -919,10 +915,11 @@ vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
kfree(flip_state);
}
-static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
+static void vc4_async_page_flip_complete_with_cleanup(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
{
struct vc4_async_flip_state *flip_state =
- container_of(cb, struct vc4_async_flip_state, cb.seqno);
+ container_of(cb, struct vc4_async_flip_state, cb);
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
@@ -932,6 +929,7 @@ static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
}
vc4_async_page_flip_complete(flip_state);
+ dma_fence_put(fence);
/*
* Decrement the BO usecnt in order to keep the inc/dec
@@ -950,7 +948,7 @@ static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
struct vc4_async_flip_state *flip_state =
- container_of(cb, struct vc4_async_flip_state, cb.fence);
+ container_of(cb, struct vc4_async_flip_state, cb);
vc4_async_page_flip_complete(flip_state);
dma_fence_put(fence);
@@ -961,16 +959,15 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
{
struct drm_framebuffer *fb = flip_state->fb;
struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ dma_fence_func_t async_page_flip_complete_function;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
- if (vc4->gen == VC4_GEN_4) {
- struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
-
- return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
- vc4_async_page_flip_seqno_complete);
- }
+ if (vc4->gen == VC4_GEN_4)
+ async_page_flip_complete_function = vc4_async_page_flip_complete_with_cleanup;
+ else
+ async_page_flip_complete_function = vc4_async_page_flip_fence_complete;
ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
@@ -978,14 +975,14 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
/* If there's no fence, complete the page flip immediately */
if (!fence) {
- vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+ async_page_flip_complete_function(fence, &flip_state->cb);
return 0;
}
/* If the fence has already been completed, complete the page flip */
- if (dma_fence_add_callback(fence, &flip_state->cb.fence,
- vc4_async_page_flip_fence_complete))
- vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+ if (dma_fence_add_callback(fence, &flip_state->cb,
+ async_page_flip_complete_function))
+ async_page_flip_complete_function(fence, &flip_state->cb);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 4a078ffd9f82..221d8e01d539 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -186,11 +186,6 @@ struct vc4_dev {
*/
struct vc4_perfmon *active_perfmon;
- /* List of struct vc4_seqno_cb for callbacks to be made from a
- * workqueue when the given seqno is passed.
- */
- struct list_head seqno_cb_list;
-
/* The memory used for storing binner tile alloc, tile state,
* and overflow memory allocations. This is freed when V3D
* powers down.
@@ -247,16 +242,6 @@ struct vc4_dev {
struct vc4_bo {
struct drm_gem_dma_object base;
- /* seqno of the last job to render using this BO. */
- uint64_t seqno;
-
- /* seqno of the last job to use the RCL to write to this BO.
- *
- * Note that this doesn't include binner overflow memory
- * writes.
- */
- uint64_t write_seqno;
-
bool t_format;
/* List entry for the BO's position in either
@@ -304,12 +289,6 @@ struct vc4_fence {
#define to_vc4_fence(_fence) \
container_of_const(_fence, struct vc4_fence, base)
-struct vc4_seqno_cb {
- struct work_struct work;
- uint64_t seqno;
- void (*func)(struct vc4_seqno_cb *cb);
-};
-
struct vc4_v3d {
struct vc4_dev *vc4;
struct platform_device *pdev;
@@ -695,9 +674,6 @@ struct vc4_exec_info {
/* Sequence number for this bin/render job. */
uint64_t seqno;
- /* Latest write_seqno of any BO that binning depends on. */
- uint64_t bin_dep_seqno;
-
struct dma_fence *fence;
/* Last current addresses the hardware was processing when the
@@ -1025,9 +1001,6 @@ void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
uint64_t timeout_ns, bool interruptible);
void vc4_job_handle_completed(struct vc4_dev *vc4);
-int vc4_queue_seqno_cb(struct drm_device *dev,
- struct vc4_seqno_cb *cb, uint64_t seqno,
- void (*func)(struct vc4_seqno_cb *cb));
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 5eb293bdb363..779b22efe27b 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -800,7 +800,7 @@ dsi_esc_timing(u32 ns)
}
static void vc4_dsi_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *state)
+ struct drm_atomic_state *state)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
u32 disp0_ctrl;
@@ -811,7 +811,7 @@ static void vc4_dsi_bridge_disable(struct drm_bridge *bridge,
}
static void vc4_dsi_bridge_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *state)
+ struct drm_atomic_state *state)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
struct device *dev = &dsi->pdev->dev;
@@ -873,9 +873,8 @@ static bool vc4_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
}
static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_state->base.state;
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
const struct drm_crtc_state *crtc_state;
struct device *dev = &dsi->pdev->dev;
@@ -1143,7 +1142,7 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
}
static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
bool debug_dump_regs = false;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 22bccd69eb62..8125f87edc60 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -29,6 +29,7 @@
#include <linux/sched/signal.h>
#include <linux/dma-fence-array.h>
+#include <drm/drm_exec.h>
#include <drm/drm_syncobj.h>
#include "uapi/drm/vc4_drm.h"
@@ -552,45 +553,24 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
}
static void
-vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
+vc4_attach_fences(struct vc4_exec_info *exec)
{
struct vc4_bo *bo;
unsigned i;
for (i = 0; i < exec->bo_count; i++) {
bo = to_vc4_bo(exec->bo[i]);
- bo->seqno = seqno;
-
dma_resv_add_fence(bo->base.base.resv, exec->fence,
DMA_RESV_USAGE_READ);
}
- list_for_each_entry(bo, &exec->unref_list, unref_head) {
- bo->seqno = seqno;
- }
-
for (i = 0; i < exec->rcl_write_bo_count; i++) {
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
- bo->write_seqno = seqno;
-
dma_resv_add_fence(bo->base.base.resv, exec->fence,
DMA_RESV_USAGE_WRITE);
}
}
-static void
-vc4_unlock_bo_reservations(struct drm_device *dev,
- struct vc4_exec_info *exec,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int i;
-
- for (i = 0; i < exec->bo_count; i++)
- dma_resv_unlock(exec->bo[i]->resv);
-
- ww_acquire_fini(acquire_ctx);
-}
-
/* Takes the reservation lock on all the BOs being referenced, so that
* at queue submit time we can update the reservations.
*
@@ -599,70 +579,23 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
* to vc4, so we don't attach dma-buf fences to them.
*/
static int
-vc4_lock_bo_reservations(struct drm_device *dev,
- struct vc4_exec_info *exec,
- struct ww_acquire_ctx *acquire_ctx)
+vc4_lock_bo_reservations(struct vc4_exec_info *exec,
+ struct drm_exec *exec_ctx)
{
- int contended_lock = -1;
- int i, ret;
- struct drm_gem_object *bo;
-
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
-
-retry:
- if (contended_lock != -1) {
- bo = exec->bo[contended_lock];
- ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
- if (ret) {
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- for (i = 0; i < exec->bo_count; i++) {
- if (i == contended_lock)
- continue;
-
- bo = exec->bo[i];
-
- ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
- if (ret) {
- int j;
-
- for (j = 0; j < i; j++) {
- bo = exec->bo[j];
- dma_resv_unlock(bo->resv);
- }
-
- if (contended_lock != -1 && contended_lock >= i) {
- bo = exec->bo[contended_lock];
-
- dma_resv_unlock(bo->resv);
- }
-
- if (ret == -EDEADLK) {
- contended_lock = i;
- goto retry;
- }
-
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- ww_acquire_done(acquire_ctx);
+ int ret;
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
- for (i = 0; i < exec->bo_count; i++) {
- bo = exec->bo[i];
+ drm_exec_init(exec_ctx, DRM_EXEC_INTERRUPTIBLE_WAIT, exec->bo_count);
+ drm_exec_until_all_locked(exec_ctx) {
+ ret = drm_exec_prepare_array(exec_ctx, exec->bo,
+ exec->bo_count, 1);
+ }
- ret = dma_resv_reserve_fences(bo->resv, 1);
- if (ret) {
- vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
- return ret;
- }
+ if (ret) {
+ drm_exec_fini(exec_ctx);
+ return ret;
}
return 0;
@@ -679,7 +612,7 @@ retry:
*/
static int
vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
- struct ww_acquire_ctx *acquire_ctx,
+ struct drm_exec *exec_ctx,
struct drm_syncobj *out_sync)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -706,9 +639,9 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
if (out_sync)
drm_syncobj_replace_fence(out_sync, exec->fence);
- vc4_update_bo_seqnos(exec, seqno);
+ vc4_attach_fences(exec);
- vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
+ drm_exec_fini(exec_ctx);
list_add_tail(&exec->head, &vc4->bin_job_list);
@@ -904,12 +837,6 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
goto fail;
}
- /* Block waiting on any previous rendering into the CS's VBO,
- * IB, or textures, so that pixels are actually written by the
- * time we try to read them.
- */
- ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
-
fail:
kvfree(temp);
return ret;
@@ -968,7 +895,6 @@ void
vc4_job_handle_completed(struct vc4_dev *vc4)
{
unsigned long irqflags;
- struct vc4_seqno_cb *cb, *cb_temp;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
@@ -985,46 +911,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
spin_lock_irqsave(&vc4->job_lock, irqflags);
}
- list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
- if (cb->seqno <= vc4->finished_seqno) {
- list_del_init(&cb->work.entry);
- schedule_work(&cb->work);
- }
- }
-
- spin_unlock_irqrestore(&vc4->job_lock, irqflags);
-}
-
-static void vc4_seqno_cb_work(struct work_struct *work)
-{
- struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
-
- cb->func(cb);
-}
-
-int vc4_queue_seqno_cb(struct drm_device *dev,
- struct vc4_seqno_cb *cb, uint64_t seqno,
- void (*func)(struct vc4_seqno_cb *cb))
-{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- unsigned long irqflags;
-
- if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
- return -ENODEV;
-
- cb->func = func;
- INIT_WORK(&cb->work, vc4_seqno_cb_work);
-
- spin_lock_irqsave(&vc4->job_lock, irqflags);
- if (seqno > vc4->finished_seqno) {
- cb->seqno = seqno;
- list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
- } else {
- schedule_work(&cb->work);
- }
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
-
- return 0;
}
/* Scheduled when any job has been completed, this walks the list of
@@ -1079,8 +966,10 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
struct drm_vc4_wait_bo *args = data;
- struct drm_gem_object *gem_obj;
- struct vc4_bo *bo;
+ unsigned long timeout_jiffies =
+ usecs_to_jiffies(div_u64(args->timeout_ns, 1000));
+ ktime_t start = ktime_get();
+ u64 delta_ns;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
@@ -1088,17 +977,18 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- gem_obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!gem_obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
- return -EINVAL;
- }
- bo = to_vc4_bo(gem_obj);
+ ret = drm_gem_dma_resv_wait(file_priv, args->handle,
+ true, timeout_jiffies);
- ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
- &args->timeout_ns);
+ /* Decrement the user's timeout, in case we got interrupted
+ * such that the ioctl will be restarted.
+ */
+ delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (delta_ns < args->timeout_ns)
+ args->timeout_ns -= delta_ns;
+ else
+ args->timeout_ns = 0;
- drm_gem_object_put(gem_obj);
return ret;
}
@@ -1123,7 +1013,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_vc4_submit_cl *args = data;
struct drm_syncobj *out_sync = NULL;
struct vc4_exec_info *exec;
- struct ww_acquire_ctx acquire_ctx;
+ struct drm_exec exec_ctx;
struct dma_fence *in_fence;
int ret = 0;
@@ -1216,7 +1106,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
- ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
+ ret = vc4_lock_bo_reservations(exec, &exec_ctx);
if (ret)
goto fail;
@@ -1224,7 +1114,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
out_sync = drm_syncobj_find(file_priv, args->out_sync);
if (!out_sync) {
ret = -EINVAL;
- goto fail;
+ goto fail_unreserve;
}
/* We replace the fence in out_sync in vc4_queue_submit since
@@ -1239,7 +1129,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
*/
exec->args = NULL;
- ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
+ ret = vc4_queue_submit(dev, exec, &exec_ctx, out_sync);
/* The syncobj isn't part of the exec data and we need to free our
* reference even if job submission failed.
@@ -1248,13 +1138,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
drm_syncobj_put(out_sync);
if (ret)
- goto fail;
+ goto fail_unreserve;
/* Return the seqno for our job. */
args->seqno = vc4->emit_seqno;
return 0;
+fail_unreserve:
+ drm_exec_fini(&exec_ctx);
fail:
vc4_complete_exec(&vc4->base, exec);
@@ -1275,7 +1167,6 @@ int vc4_gem_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->bin_job_list);
INIT_LIST_HEAD(&vc4->render_job_list);
INIT_LIST_HEAD(&vc4->job_done_list);
- INIT_LIST_HEAD(&vc4->seqno_cb_list);
spin_lock_init(&vc4->job_lock);
INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 47d9ada98430..e878eddc9c3f 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -270,34 +270,6 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
#endif
-static int reset_pipe(struct drm_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- int ret;
-
- state = drm_atomic_state_alloc(crtc->dev);
- if (!state)
- return -ENOMEM;
-
- state->acquire_ctx = ctx;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto out;
- }
-
- crtc_state->connectors_changed = true;
-
- ret = drm_atomic_commit(state);
-out:
- drm_atomic_state_put(state);
-
- return ret;
-}
-
static int vc4_hdmi_reset_link(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -376,7 +348,7 @@ static int vc4_hdmi_reset_link(struct drm_connector *connector,
* would be perfectly happy if were to just reconfigure
* the SCDC settings on the fly.
*/
- return reset_pipe(crtc, ctx);
+ return drm_atomic_helper_reset_crtc(crtc, ctx);
}
static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index d608860d525f..c5e84d3494d2 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -2338,7 +2338,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
}
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 4eab069cda75..42acac05fe47 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -196,7 +196,7 @@ static int vc4_txp_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
vc4_txp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 5bf134968ade..1e7bdda55698 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -284,9 +284,6 @@ validate_indexed_prim_list(VALIDATE_ARGS)
if (!ib)
return -EINVAL;
- exec->bin_dep_seqno = max(exec->bin_dep_seqno,
- to_vc4_bo(&ib->base)->write_seqno);
-
if (offset > ib->base.size ||
(ib->base.size - offset) / index_size < length) {
DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n",
@@ -738,11 +735,6 @@ reloc_tex(struct vc4_exec_info *exec,
*validated_p0 = tex->dma_addr + p0;
- if (is_cs) {
- exec->bin_dep_seqno = max(exec->bin_dep_seqno,
- to_vc4_bo(&tex->base)->write_seqno);
- }
-
return true;
fail:
DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
@@ -904,9 +896,6 @@ validate_gl_shader_rec(struct drm_device *dev,
uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
uint32_t max_index;
- exec->bin_dep_seqno = max(exec->bin_dep_seqno,
- to_vc4_bo(&vbo->base)->write_seqno);
-
if (state->addr & 0x8)
stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 64baf2f22d9f..59a45e74a641 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -189,7 +189,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct virtio_gpu_output *output =
drm_connector_to_virtio_gpu_output(connector);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 6a67c6297d58..2d88e390feb4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
+#include <linux/vgaarb.h>
#include <linux/wait.h>
#include <drm/clients/drm_client_setup.h>
@@ -41,6 +42,8 @@
#include "virtgpu_drv.h"
+#define PCI_DEVICE_ID_VIRTIO_GPU 0x1050
+
static const struct drm_driver driver;
static int virtio_gpu_modeset = -1;
@@ -162,7 +165,43 @@ static struct virtio_driver virtio_gpu_driver = {
.config_changed = virtio_gpu_config_changed
};
-module_virtio_driver(virtio_gpu_driver);
+static int __init virtio_gpu_driver_init(void)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ PCI_DEVICE_ID_VIRTIO_GPU,
+ NULL);
+ if (pdev && pci_is_vga(pdev)) {
+ ret = vga_get_interruptible(pdev,
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+ if (ret) {
+ pci_dev_put(pdev);
+ return ret;
+ }
+ }
+
+ ret = register_virtio_driver(&virtio_gpu_driver);
+
+ if (pdev) {
+ if (pci_is_vga(pdev))
+ vga_put(pdev,
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+
+ pci_dev_put(pdev);
+ }
+
+ return ret;
+}
+
+static void __exit virtio_gpu_driver_exit(void)
+{
+ unregister_virtio_driver(&virtio_gpu_driver);
+}
+
+module_init(virtio_gpu_driver_init);
+module_exit(virtio_gpu_driver_exit);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio GPU driver");
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index f42ca9d8ed10..f17660a71a3e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -310,6 +310,7 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void);
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
@@ -334,12 +335,21 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo);
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -408,6 +418,7 @@ void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
int
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 5aab588fc400..dde8fc1a3689 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -148,6 +148,20 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_notify(vgdev);
}
+/* For drm panic */
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void)
+{
+ struct virtio_gpu_object_array *objs;
+
+ objs = kmalloc(sizeof(struct virtio_gpu_object_array), GFP_ATOMIC);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = 1;
+ return objs;
+}
+
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 42aa554eca9f..a6f5a78f436a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -28,6 +28,8 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <linux/virtio_dma_buf.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_panic.h>
#include "virtgpu_drv.h"
@@ -127,6 +129,30 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return ret;
}
+/* For drm panic */
+static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev,
+ struct drm_plane_state *state,
+ struct drm_rect *rect)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(state->fb->obj[0]);
+ struct virtio_gpu_object_array *objs;
+ uint32_t w = rect->x2 - rect->x1;
+ uint32_t h = rect->y2 - rect->y1;
+ uint32_t x = rect->x1;
+ uint32_t y = rect->y1;
+ uint32_t off = x * state->fb->format->cpp[0] +
+ y * state->fb->pitches[0];
+
+ objs = virtio_gpu_panic_array_alloc();
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+ objs);
+}
+
static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
struct drm_plane_state *state,
struct drm_rect *rect)
@@ -150,6 +176,24 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
objs, NULL);
}
+/* For drm_panic */
+static void virtio_gpu_panic_resource_flush(struct drm_plane *plane,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ virtio_gpu_panic_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ width, height);
+ virtio_gpu_panic_notify(vgdev);
+}
+
static void virtio_gpu_resource_flush(struct drm_plane *plane,
uint32_t x, uint32_t y,
uint32_t width, uint32_t height)
@@ -446,11 +490,63 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
virtio_gpu_cursor_ping(vgdev, output);
}
+static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct virtio_gpu_object *bo;
+
+ if (!plane->state || !plane->state->fb || !plane->state->visible)
+ return -ENODEV;
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+ /* Only support mapped shmem bo */
+ if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr)
+ return -ENODEV;
+
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+
+ sb->format = plane->state->fb->format;
+ sb->height = plane->state->fb->height;
+ sb->width = plane->state->fb->width;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ return 0;
+}
+
+static void virtio_panic_flush(struct drm_plane *plane)
+{
+ struct virtio_gpu_object *bo;
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_rect rect;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+ if (bo->dumb) {
+ if (virtio_gpu_panic_update_dumb_bo(vgdev, plane->state,
+ &rect))
+ return;
+ }
+
+ virtio_gpu_panic_resource_flush(plane,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+}
+
static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
.prepare_fb = virtio_gpu_plane_prepare_fb,
.cleanup_fb = virtio_gpu_plane_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_primary_plane_update,
+ .get_scanout_buffer = virtio_drm_get_scanout_buffer,
+ .panic_flush = virtio_panic_flush,
};
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index f92133a01195..fe6a0b018571 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -184,6 +184,23 @@ int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
return 0;
}
+static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo)
+{
+ struct dma_buf_attachment *attach = bo->base.base.import_attach;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (bo->created) {
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ bo->sgt = NULL;
+ }
+}
+
static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -194,13 +211,7 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
struct dma_buf *dmabuf = attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
-
- virtio_gpu_detach_object_fenced(bo);
-
- if (bo->sgt)
- dma_buf_unmap_attachment(attach, bo->sgt,
- DMA_BIDIRECTIONAL);
-
+ virtgpu_dma_buf_unmap(bo);
dma_resv_unlock(dmabuf->resv);
dma_buf_detach(dmabuf, attach);
@@ -250,7 +261,6 @@ static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
ents, nents);
bo->guest_blob = true;
- bo->attached = true;
dma_buf_unpin(attach);
dma_resv_unlock(resv);
@@ -274,15 +284,7 @@ static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->importer_priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
- if (bo->created && kref_read(&obj->refcount)) {
- virtio_gpu_detach_object_fenced(bo);
-
- if (bo->sgt)
- dma_buf_unmap_attachment(attach, bo->sgt,
- DMA_BIDIRECTIONAL);
-
- bo->sgt = NULL;
- }
+ virtgpu_dma_buf_unmap(bo);
}
static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index ad91624df42d..55a15e247dd1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -86,6 +86,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
vgdev->vbufs = NULL;
}
+/* For drm_panic */
+static struct virtio_gpu_vbuffer*
+virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC);
+
+ vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+ vbuf->size = size;
+ vbuf->resp_cb = NULL;
+ vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
+ vbuf->resp_buf = (void *)vbuf->buf + size;
+ return vbuf;
+}
+
static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
int size, int resp_size, void *resp_buf,
@@ -137,6 +153,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_update_cursor *)vbuf->buf;
}
+/* For drm_panic */
+static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int cmd_size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size);
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_command *)vbuf->buf;
+}
+
static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
virtio_gpu_resp_cb cb,
struct virtio_gpu_vbuffer **vbuffer_p,
@@ -311,6 +339,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
+{
+ struct virtqueue *vq = vgdev->ctrlq.vq;
+ int ret;
+
+ if (vgdev->has_indirect)
+ elemcnt = 1;
+
+ if (vq->num_free < elemcnt)
+ return -ENOMEM;
+
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ WARN_ON(ret);
+
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
+
+ atomic_inc(&vgdev->pending_commands);
+
+ return 0;
+}
+
static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence,
@@ -368,6 +424,32 @@ again:
return 0;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct scatterlist *sgs[3], vcmd, vresp;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
+ }
+
+ return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
+ elemcnt, sgs,
+ outcnt, incnt);
+}
+
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence)
@@ -422,6 +504,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
return ret;
}
+/* For drm_panic */
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
+{
+ bool notify;
+
+ if (!atomic_read(&vgdev->pending_commands))
+ return;
+
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
+}
+
void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
bool notify;
@@ -567,6 +664,29 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
+/* For drm_panic */
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct virtio_gpu_resource_flush *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = NULL;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -591,6 +711,37 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+/* For drm_panic */
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_transfer_to_host_2d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ bo->base.sgt, DMA_TO_DEVICE);
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -1300,6 +1451,9 @@ virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true;
+
+ if (nents)
+ bo->attached = true;
}
void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 25df81c02783..5ad3b7c6f73c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -37,6 +37,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
unsigned long vm_size = vma->vm_end - vma->vm_start;
+ unsigned long vm_end;
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
return -EINVAL;
@@ -56,12 +57,14 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- /* Partial mappings of GEM buffers don't happen much in practice. */
- if (vm_size != vram->vram_node.size)
+ if (check_add_overflow(vma->vm_pgoff << PAGE_SHIFT, vm_size, &vm_end))
+ return -EINVAL;
+
+ if (vm_end > vram->vram_node.size)
return -EINVAL;
ret = io_remap_pfn_range(vma, vma->vm_start,
- vram->vram_node.start >> PAGE_SHIFT,
+ (vram->vram_node.start >> PAGE_SHIFT) + vma->vm_pgoff,
vm_size, vma->vm_page_prot);
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 28a57ae109fc..cf229aec71c3 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -4,6 +4,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -83,9 +84,7 @@ static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
int *max_error, ktime_t *vblank_time,
bool in_vblank_irq)
{
- struct drm_device *dev = crtc->dev;
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
- struct vkms_output *output = &vkmsdev->output;
+ struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
if (!READ_ONCE(vblank->enabled)) {
@@ -270,25 +269,29 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
.atomic_disable = vkms_crtc_atomic_disable,
};
-int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *primary, struct drm_plane *cursor)
+struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary,
+ struct drm_plane *cursor)
{
- struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
+ struct vkms_output *vkms_out;
+ struct drm_crtc *crtc;
int ret;
- ret = drmm_crtc_init_with_planes(dev, crtc, primary, cursor,
- &vkms_crtc_funcs, NULL);
- if (ret) {
- DRM_ERROR("Failed to init CRTC\n");
- return ret;
+ vkms_out = drmm_crtc_alloc_with_planes(dev, struct vkms_output, crtc,
+ primary, cursor,
+ &vkms_crtc_funcs, NULL);
+ if (IS_ERR(vkms_out)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init CRTC\n");
+ return vkms_out;
}
+ crtc = &vkms_out->crtc;
+
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
if (ret) {
DRM_ERROR("Failed to set gamma size\n");
- return ret;
+ return ERR_PTR(ret);
}
drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
@@ -296,9 +299,11 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
spin_lock_init(&vkms_out->lock);
spin_lock_init(&vkms_out->composer_lock);
- vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
+ vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0);
+ if (IS_ERR(vkms_out->composer_workq))
+ return ERR_CAST(vkms_out->composer_workq);
if (!vkms_out->composer_workq)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- return ret;
+ return vkms_out;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index e0409aba9349..b6de91134a22 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -53,14 +53,6 @@ MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support");
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
-static void vkms_release(struct drm_device *dev)
-{
- struct vkms_device *vkms = drm_device_to_vkms_device(dev);
-
- if (vkms->output.composer_workq)
- destroy_workqueue(vkms->output.composer_workq);
-}
-
static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
@@ -108,7 +100,6 @@ static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
- .release = vkms_release,
.fops = &vkms_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
DRM_FBDEV_SHMEM_DRIVER_OPS,
@@ -244,17 +235,19 @@ static int __init vkms_init(void)
if (!config)
return -ENOMEM;
- default_config = config;
-
config->cursor = enable_cursor;
config->writeback = enable_writeback;
config->overlay = enable_overlay;
ret = vkms_create(config);
- if (ret)
+ if (ret) {
kfree(config);
+ return ret;
+ }
- return ret;
+ default_config = config;
+
+ return 0;
}
static void vkms_destroy(struct vkms_config *config)
@@ -278,9 +271,10 @@ static void vkms_destroy(struct vkms_config *config)
static void __exit vkms_exit(void)
{
- if (default_config->dev)
- vkms_destroy(default_config);
+ if (!default_config)
+ return;
+ vkms_destroy(default_config);
kfree(default_config);
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 00541eff3d1b..abbb652be2b5 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -176,9 +176,8 @@ struct vkms_crtc_state {
*/
struct vkms_output {
struct drm_crtc crtc;
- struct drm_encoder encoder;
- struct drm_connector connector;
struct drm_writeback_connector wb_connector;
+ struct drm_encoder wb_encoder;
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct workqueue_struct *composer_workq;
@@ -216,7 +215,6 @@ struct vkms_config {
struct vkms_device {
struct drm_device drm;
struct platform_device *platform;
- struct vkms_output output;
const struct vkms_config *config;
};
@@ -243,8 +241,9 @@ struct vkms_device {
* @primary: primary plane to attach to the CRTC
* @cursor: plane to attach to the CRTC
*/
-int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *primary, struct drm_plane *cursor);
+struct vkms_output *vkms_crtc_init(struct drm_device *dev,
+ struct drm_plane *primary,
+ struct drm_plane *cursor);
/**
* vkms_output_init() - Initialize all sub-components needed for a VKMS device.
@@ -275,6 +274,6 @@ void vkms_set_composer(struct vkms_output *out, bool enabled);
void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y);
/* Writeback */
-int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
+int vkms_enable_writeback_connector(struct vkms_device *vkmsdev, struct vkms_output *vkms_out);
#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 39b1d7c97d45..30a64ecca87c 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -253,6 +253,26 @@ static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start
}
}
+static void ABGR8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
+ enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ u8 *px = (u8 *)src_pixels;
+ /* Switch blue and red pixels. */
+ *out_pixel = argb_u16_from_u8888(px[3], px[0], px[1], px[2]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
int y_start, enum pixel_read_direction direction, int count,
struct pixel_argb_u16 out_pixel[])
@@ -344,6 +364,14 @@ static void argb_u16_to_XRGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_
out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
+static void argb_u16_to_ABGR8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
+{
+ out_pixel[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
+ out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+ out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+}
+
static void argb_u16_to_ARGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
__le16 *pixel = (__le16 *)out_pixel;
@@ -420,6 +448,8 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
return &ARGB8888_read_line;
case DRM_FORMAT_XRGB8888:
return &XRGB8888_read_line;
+ case DRM_FORMAT_ABGR8888:
+ return &ABGR8888_read_line;
case DRM_FORMAT_ARGB16161616:
return &ARGB16161616_read_line;
case DRM_FORMAT_XRGB16161616:
@@ -453,6 +483,8 @@ pixel_write_t get_pixel_write_function(u32 format)
return &argb_u16_to_ARGB8888;
case DRM_FORMAT_XRGB8888:
return &argb_u16_to_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return &argb_u16_to_ABGR8888;
case DRM_FORMAT_ARGB16161616:
return &argb_u16_to_ARGB16161616;
case DRM_FORMAT_XRGB16161616:
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 8f4bd5aef087..22f0d678af3a 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -3,20 +3,16 @@
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
static const struct drm_connector_funcs vkms_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_encoder_funcs vkms_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vkms_conn_get_modes(struct drm_connector *connector)
{
int count;
@@ -34,11 +30,10 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
int vkms_output_init(struct vkms_device *vkmsdev)
{
- struct vkms_output *output = &vkmsdev->output;
struct drm_device *dev = &vkmsdev->drm;
- struct drm_connector *connector = &output->connector;
- struct drm_encoder *encoder = &output->encoder;
- struct drm_crtc *crtc = &output->crtc;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct vkms_output *output;
struct vkms_plane *primary, *overlay, *cursor = NULL;
int ret;
int writeback;
@@ -60,9 +55,12 @@ int vkms_output_init(struct vkms_device *vkmsdev)
return PTR_ERR(cursor);
}
- ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base);
- if (ret)
- return ret;
+ output = vkms_crtc_init(dev, &primary->base,
+ cursor ? &cursor->base : NULL);
+ if (IS_ERR(output)) {
+ DRM_ERROR("Failed to allocate CRTC\n");
+ return PTR_ERR(output);
+ }
if (vkmsdev->config->overlay) {
for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
@@ -71,12 +69,18 @@ int vkms_output_init(struct vkms_device *vkmsdev)
DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
return PTR_ERR(overlay);
}
- overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+ overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc);
}
}
- ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
+ connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector) {
+ DRM_ERROR("Failed to allocate connector\n");
+ return -ENOMEM;
+ }
+
+ ret = drmm_connector_init(dev, connector, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL, NULL);
if (ret) {
DRM_ERROR("Failed to init connector\n");
return ret;
@@ -84,35 +88,34 @@ int vkms_output_init(struct vkms_device *vkmsdev)
drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
- ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder) {
+ DRM_ERROR("Failed to allocate encoder\n");
+ return -ENOMEM;
+ }
+ ret = drmm_encoder_init(dev, encoder, NULL,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret) {
DRM_ERROR("Failed to init encoder\n");
- goto err_encoder;
+ return ret;
}
- encoder->possible_crtcs = drm_crtc_mask(crtc);
+ encoder->possible_crtcs = drm_crtc_mask(&output->crtc);
+ /* Attach the encoder and the connector */
ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("Failed to attach connector to encoder\n");
- goto err_attach;
+ return ret;
}
+ /* Initialize the writeback component */
if (vkmsdev->config->writeback) {
- writeback = vkms_enable_writeback_connector(vkmsdev);
+ writeback = vkms_enable_writeback_connector(vkmsdev, output);
if (writeback)
DRM_ERROR("Failed to init writeback connector\n");
}
drm_mode_config_reset(dev);
- return 0;
-
-err_attach:
- drm_encoder_cleanup(encoder);
-
-err_encoder:
- drm_connector_cleanup(connector);
-
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e2fce471870f..e34f8c7f83c3 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -15,6 +15,7 @@
static const u32 vkms_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
DRM_FORMAT_RGB565
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 79918b44fedd..fe163271d5b5 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -17,6 +17,7 @@
static const u32 vkms_wb_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
DRM_FORMAT_RGB565
@@ -24,7 +25,6 @@ static const u32 vkms_wb_formats[] = {
static const struct drm_connector_funcs vkms_wb_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -106,7 +106,9 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct vkms_writeback_job *vkmsjob = job->priv;
- struct vkms_device *vkmsdev;
+ struct vkms_output *vkms_output = container_of(connector,
+ struct vkms_output,
+ wb_connector);
if (!job->fb)
return;
@@ -115,8 +117,7 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
drm_framebuffer_put(vkmsjob->wb_frame_info.fb);
- vkmsdev = drm_device_to_vkms_device(job->fb->dev);
- vkms_set_composer(&vkmsdev->output, false);
+ vkms_set_composer(vkms_output, false);
kfree(vkmsjob);
}
@@ -125,8 +126,7 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
conn);
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev);
- struct vkms_output *output = &vkmsdev->output;
+ struct vkms_output *output = drm_crtc_to_vkms_output(connector_state->crtc);
struct drm_writeback_connector *wb_conn = &output->wb_connector;
struct drm_connector_state *conn_state = wb_conn->base.state;
struct vkms_crtc_state *crtc_state = output->composer_state;
@@ -140,7 +140,7 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
if (!conn_state)
return;
- vkms_set_composer(&vkmsdev->output, true);
+ vkms_set_composer(output, true);
active_wb = conn_state->writeback_job->priv;
wb_frame_info = &active_wb->wb_frame_info;
@@ -163,16 +163,23 @@ static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
.atomic_check = vkms_wb_atomic_check,
};
-int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
+int vkms_enable_writeback_connector(struct vkms_device *vkmsdev,
+ struct vkms_output *vkms_output)
{
- struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector;
+ struct drm_writeback_connector *wb = &vkms_output->wb_connector;
+ int ret;
+
+ ret = drmm_encoder_init(&vkmsdev->drm, &vkms_output->wb_encoder,
+ NULL, DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ return ret;
+ vkms_output->wb_encoder.possible_crtcs |= drm_crtc_mask(&vkms_output->crtc);
drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
- return drm_writeback_connector_init(&vkmsdev->drm, wb,
- &vkms_wb_connector_funcs,
- NULL,
- vkms_wb_formats,
- ARRAY_SIZE(vkms_wb_formats),
- 1);
+ return drmm_writeback_connector_init(&vkmsdev->drm, wb,
+ &vkms_wb_connector_funcs,
+ &vkms_output->wb_encoder,
+ vkms_wb_formats,
+ ARRAY_SIZE(vkms_wb_formats));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 800a79e035ed..1912ac1cde6d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2670,7 +2670,7 @@ out_unref:
* Returns MODE_OK on success, or a drm_mode_status error code.
*/
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 2a6c6d6581e0..4eab581883e2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -435,7 +435,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int increment,
struct vmw_kms_dirty *dirty);
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
int vmw_connector_get_modes(struct drm_connector *connector);
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 114a75069e1c..f5d2ed1b0a72 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -839,7 +839,7 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
static enum drm_mode_status
vmw_stdu_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a49561e9f3c3..a79ad2da070c 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -51,6 +51,10 @@
/* Common to all OA units */
#define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9)
#define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8)
+#define OAG_OACONTROL_USED_BITS \
+ (OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \
+ OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \
+ OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK)
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
@@ -78,6 +82,8 @@
#define OAM_CONTEXT_CONTROL_OFFSET (0x1bc)
#define OAM_CONTROL_OFFSET (0x194)
#define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
+#define OAM_OACONTROL_USED_BITS \
+ (OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE)
#define OAM_DEBUG_OFFSET (0x198)
#define OAM_STATUS_OFFSET (0x19c)
#define OAM_MMIO_TRG_OFFSET (0x1d0)
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 81dc7795c065..39fe485d2085 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -119,11 +119,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_puts(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
- /*
- * Don't add a new section header here because the mesa debug decoder
- * tool expects the context information to be in the 'GuC CT' section.
- */
- /* drm_puts(&p, "\n**** Contexts ****\n"); */
+ drm_puts(&p, "\n**** Contexts ****\n");
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
drm_puts(&p, "\n**** Job ****\n");
@@ -395,42 +391,34 @@ int xe_devcoredump_init(struct xe_device *xe)
/**
* xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
*
- * The output is split to multiple lines because some print targets, e.g. dmesg
- * cannot handle arbitrarily long lines. Note also that printing to dmesg in
- * piece-meal fashion is not possible, each separate call to drm_puts() has a
- * line-feed automatically added! Therefore, the entire output line must be
- * constructed in a local buffer first, then printed in one atomic output call.
+ * The output is split into multiple calls to drm_puts() because some print
+ * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
+ * add newlines, as is the case with dmesg: each drm_puts() call creates a
+ * separate line.
*
* There is also a scheduler yield call to prevent the 'task has been stuck for
* 120s' kernel hang check feature from firing when printing to a slow target
* such as dmesg over a serial port.
*
- * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
- *
* @p: the printer object to output to
* @prefix: optional prefix to add to output string
+ * @suffix: optional suffix to add at the end. 0 disables it and is
+ * not added to the output, which is useful when using multiple calls
+ * to dump data to @p
* @blob: the Binary Large OBject to dump out
* @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
* @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
*/
-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
const void *blob, size_t offset, size_t size)
{
const u32 *blob32 = (const u32 *)blob;
char buff[ASCII85_BUFSZ], *line_buff;
size_t line_pos = 0;
- /*
- * Splitting blobs across multiple lines is not compatible with the mesa
- * debug decoder tool. Note that even dropping the explicit '\n' below
- * doesn't help because the GuC log is so big some underlying implementation
- * still splits the lines at 512K characters. So just bail completely for
- * the moment.
- */
- return;
-
#define DMESG_MAX_LINE_LEN 800
-#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
+ /* Always leave space for the suffix char and the \0 */
+#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "<suffix>\0" */
if (size & 3)
drm_printf(p, "Size not word aligned: %zu", size);
@@ -462,7 +450,6 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
line_pos += strlen(line_buff + line_pos);
if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
- line_buff[line_pos++] = '\n';
line_buff[line_pos++] = 0;
drm_puts(p, line_buff);
@@ -474,10 +461,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
}
}
+ if (suffix)
+ line_buff[line_pos++] = suffix;
+
if (line_pos) {
- line_buff[line_pos++] = '\n';
line_buff[line_pos++] = 0;
-
drm_puts(p, line_buff);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index 6a17e6d60102..5391a80a4d1b 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -29,7 +29,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
}
#endif
-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
const void *blob, size_t offset, size_t size);
#endif
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 4e1839b483a0..fc4a49f25c09 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -1003,7 +1003,8 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
* re-probe (unbind + bind).
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
- * failure or guc loading failure.
+ * failure or guc loading failure. Userspace will be notified of this state
+ * through device wedged uevent.
* If xe.wedged module parameter is set to 2, this function will be called
* on every single execution timeout (a.k.a. GPU hang) right after devcoredump
* snapshot capture. In this mode, GT reset won't be attempted so the state of
@@ -1033,6 +1034,10 @@ void xe_device_declare_wedged(struct xe_device *xe)
"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
+
+ /* Notify userspace of wedged device */
+ drm_dev_wedged_event(&xe->drm,
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
}
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 63f30b6df70b..2d4874d2b922 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -135,8 +135,8 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
XE_WARN_ON(bo->client);
XE_WARN_ON(!list_empty(&bo->client_link));
- spin_lock(&client->bos_lock);
bo->client = xe_drm_client_get(client);
+ spin_lock(&client->bos_lock);
list_add_tail(&bo->client_link, &client->bos_list);
spin_unlock(&client->bos_lock);
}
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 5ef96deaa881..66001af5cf55 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -336,6 +336,15 @@ static const struct drm_sched_backend_ops drm_sched_ops = {
static int execlist_exec_queue_init(struct xe_exec_queue *q)
{
struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args args = {
+ .ops = &drm_sched_ops,
+ .num_rqs = 1,
+ .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
+ .hang_limit = XE_SCHED_HANG_LIMIT,
+ .timeout = XE_SCHED_JOB_TIMEOUT,
+ .name = q->hwe->name,
+ .dev = gt_to_xe(q->gt)->drm.dev,
+ };
struct xe_execlist_exec_queue *exl;
struct xe_device *xe = gt_to_xe(q->gt);
int err;
@@ -350,11 +359,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
exl->q = q;
- err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
- q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
- XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
- NULL, NULL, q->hwe->name,
- gt_to_xe(q->gt)->drm.dev);
+ err = drm_sched_init(&exl->sched, &args);
if (err)
goto err_free;
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index 50361b4638f9..869b43a4151d 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -63,13 +63,24 @@ int xe_sched_init(struct xe_gpu_scheduler *sched,
atomic_t *score, const char *name,
struct device *dev)
{
+ const struct drm_sched_init_args args = {
+ .ops = ops,
+ .submit_wq = submit_wq,
+ .num_rqs = 1,
+ .credit_limit = hw_submission,
+ .hang_limit = hang_limit,
+ .timeout = timeout,
+ .timeout_wq = timeout_wq,
+ .score = score,
+ .name = name,
+ .dev = dev,
+ };
+
sched->ops = xe_ops;
INIT_LIST_HEAD(&sched->msgs);
INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
- return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
- hang_limit, timeout, timeout_wq, score, name,
- dev);
+ return drm_sched_init(&sched->base, &args);
}
void xe_sched_fini(struct xe_gpu_scheduler *sched)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 26e64530ada2..5d6fb79957b6 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -532,8 +532,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
- if (IS_SRIOV_PF(gt_to_xe(gt)))
+ if (IS_SRIOV_PF(gt_to_xe(gt))) {
+ xe_gt_sriov_pf_init(gt);
xe_gt_sriov_pf_init_hw(gt);
+ }
xe_force_wake_put(gt_to_fw(gt), fw_ref);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index e71fc3d2bda2..6f906c8e8108 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -68,6 +68,19 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
+ * @gt: the &xe_gt to initialize
+ *
+ * Late one-time initialization of the PF data.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return xe_gt_sriov_pf_migration_init(gt);
+}
+
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) == 1200;
@@ -90,7 +103,6 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
pf_enable_ggtt_guest_update(gt);
xe_gt_sriov_pf_service_update(gt);
- xe_gt_sriov_pf_migration_init(gt);
}
static u32 pf_get_vf_regs_stride(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index 96fab779a906..f474509411c0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -10,6 +10,7 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
@@ -19,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static inline int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return 0;
+}
+
static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 8b65c5e959cc..72ad576fc18e 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -1723,8 +1723,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
- if (snapshot->ctb)
- xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size);
+ if (snapshot->ctb) {
+ drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
+ xe_print_blob_ascii85(p, "[CTB].data", '\n',
+ snapshot->ctb, 0, snapshot->ctb_size);
+ }
} else {
drm_puts(p, "CT disabled\n");
}
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index df4cfb698cdb..0ca3056d8bd3 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -208,11 +208,14 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_
drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp);
drm_printf(p, "Log level: %u\n", snapshot->level);
+ drm_printf(p, "[LOG].length: 0x%zx\n", snapshot->size);
remain = snapshot->size;
for (i = 0; i < snapshot->num_chunks; i++) {
size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
+ const char *prefix = i ? NULL : "[LOG].data";
+ char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
- xe_print_blob_ascii85(p, i ? NULL : "Log data", snapshot->copy[i], 0, size);
+ xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);
remain -= size;
}
}
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 32f5a67a917b..08552ee3fb94 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -757,19 +757,7 @@ int xe_irq_install(struct xe_device *xe)
xe_irq_postinstall(xe);
- err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
- if (err)
- goto free_irq_handler;
-
- return 0;
-
-free_irq_handler:
- if (xe_device_has_msix(xe))
- xe_irq_msix_free(xe);
- else
- xe_irq_msi_free(xe);
-
- return err;
+ return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
}
static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index eeb96b5f49e2..fa873f3d0a9d 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -237,7 +237,6 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
u32 tail, hw_tail, partial_report_size, available;
int report_size = stream->oa_buffer.format->size;
unsigned long flags;
- bool pollin;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -282,11 +281,11 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
stream->oa_buffer.tail = tail;
available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
- pollin = available >= stream->wait_num_reports * report_size;
+ stream->pollin = available >= stream->wait_num_reports * report_size;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return pollin;
+ return stream->pollin;
}
static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
@@ -294,10 +293,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
struct xe_oa_stream *stream =
container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (xe_oa_buffer_check_unlocked(stream)) {
- stream->pollin = true;
+ if (xe_oa_buffer_check_unlocked(stream))
wake_up(&stream->poll_wq);
- }
hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
@@ -452,6 +449,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
return val;
}
+static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
+{
+ return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
+}
+
static void xe_oa_enable(struct xe_oa_stream *stream)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
@@ -472,14 +475,14 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
- xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val);
+ xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
}
static void xe_oa_disable(struct xe_oa_stream *stream)
{
struct xe_mmio *mmio = &stream->gt->mmio;
- xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0);
+ xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0);
if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
@@ -2534,6 +2537,8 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt)
u->type = DRM_XE_OA_UNIT_TYPE_OAM;
}
+ xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
+
/* Ensure MMIO trigger remains disabled till there is a stream */
xe_mmio_write32(&gt->mmio, u->regs.oa_debug,
oag_configure_mmio_trigger(NULL, false));
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index 423856cc18d4..d414421f8c13 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -57,12 +57,35 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
}
+static u32 get_wopcm_size(struct xe_device *xe)
+{
+ u32 wopcm_size;
+ u64 val;
+
+ val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
+ val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
+
+ switch (val) {
+ case 0x5 ... 0x6:
+ val--;
+ fallthrough;
+ case 0x0 ... 0x3:
+ wopcm_size = (1U << val) * SZ_1M;
+ break;
+ default:
+ WARN(1, "Missing case wopcm_size=%llx\n", val);
+ wopcm_size = 0;
+ }
+
+ return wopcm_size;
+}
+
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- u64 stolen_size;
+ u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
@@ -74,7 +97,13 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
return 0;
+ /* Carve out the top of DSM as it contains the reserved WOPCM region */
+ wopcm_size = get_wopcm_size(xe);
+ if (drm_WARN_ON(&xe->drm, !wopcm_size))
+ return 0;
+
stolen_size = tile_size - mgr->stolen_base;
+ stolen_size -= wopcm_size;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
@@ -89,29 +118,6 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
return ALIGN_DOWN(stolen_size, SZ_1M);
}
-static u32 get_wopcm_size(struct xe_device *xe)
-{
- u32 wopcm_size;
- u64 val;
-
- val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
- val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
-
- switch (val) {
- case 0x5 ... 0x6:
- val--;
- fallthrough;
- case 0x0 ... 0x3:
- wopcm_size = (1U << val) * SZ_1M;
- break;
- default:
- WARN(1, "Missing case wopcm_size=%llx\n", val);
- wopcm_size = 0;
- }
-
- return wopcm_size;
-}
-
static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 979f6d3239ba..a6a4a871f197 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1437,7 +1437,7 @@ zynqmp_dp_disp_connected_live_layer(struct zynqmp_dp *dp)
}
static void zynqmp_dp_disp_enable(struct zynqmp_dp *dp,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct zynqmp_disp_layer *layer;
struct drm_bridge_state *bridge_state;
@@ -1447,8 +1447,7 @@ static void zynqmp_dp_disp_enable(struct zynqmp_dp *dp,
if (!layer)
return;
- bridge_state = drm_atomic_get_new_bridge_state(old_bridge_state->base.state,
- old_bridge_state->bridge);
+ bridge_state = drm_atomic_get_new_bridge_state(state, &dp->bridge);
if (WARN_ON(!bridge_state))
return;
@@ -1534,10 +1533,10 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge,
}
/* Check with link rate and lane count */
- mutex_lock(&dp->lock);
- rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
- dp->link_config.max_lanes, dp->config.bpp);
- mutex_unlock(&dp->lock);
+ scoped_guard(mutex, &dp->lock)
+ rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
+ dp->link_config.max_lanes,
+ dp->config.bpp);
if (mode->clock > rate) {
dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n",
mode->name);
@@ -1549,10 +1548,9 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge,
}
static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
const struct drm_display_mode *mode;
@@ -1565,7 +1563,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(dp->dev);
guard(mutex)(&dp->lock);
- zynqmp_dp_disp_enable(dp, old_bridge_state);
+ zynqmp_dp_disp_enable(dp, state);
/*
* Retrieve the CRTC mode and adjusted mode. This requires a little
@@ -1627,8 +1625,10 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
+ struct drm_bridge_state *old_bridge_state = drm_atomic_get_old_bridge_state(state,
+ bridge);
struct zynqmp_dp *dp = bridge_to_dp(bridge);
mutex_lock(&dp->lock);
@@ -1722,13 +1722,9 @@ disconnected:
static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
- enum drm_connector_status ret;
-
- mutex_lock(&dp->lock);
- ret = __zynqmp_dp_bridge_detect(dp);
- mutex_unlock(&dp->lock);
- return ret;
+ guard(mutex)(&dp->lock);
+ return __zynqmp_dp_bridge_detect(dp);
}
static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge,
@@ -1881,10 +1877,9 @@ static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf,
if (unlikely(ret))
return ret;
- mutex_lock(&dp->lock);
- ret = snprintf(buf, sizeof(buf), "%s\n",
- test_pattern_str[dp->test.pattern]);
- mutex_unlock(&dp->lock);
+ scoped_guard(mutex, &dp->lock)
+ ret = snprintf(buf, sizeof(buf), "%s\n",
+ test_pattern_str[dp->test.pattern]);
debugfs_file_put(dentry);
return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
@@ -1939,24 +1934,18 @@ static int zynqmp_dp_enhanced_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.enhanced;
- mutex_unlock(&dp->lock);
return 0;
}
static int zynqmp_dp_enhanced_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
- int ret = 0;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
dp->test.enhanced = val;
- if (dp->test.active)
- ret = zynqmp_dp_test_setup(dp);
- mutex_unlock(&dp->lock);
-
- return ret;
+ return dp->test.active ? zynqmp_dp_test_setup(dp) : 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_enhanced, zynqmp_dp_enhanced_get,
@@ -1966,24 +1955,19 @@ static int zynqmp_dp_downspread_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.downspread;
- mutex_unlock(&dp->lock);
return 0;
}
static int zynqmp_dp_downspread_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
- int ret = 0;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
dp->test.downspread = val;
- if (dp->test.active)
- ret = zynqmp_dp_test_setup(dp);
- mutex_unlock(&dp->lock);
- return ret;
+ return dp->test.active ? zynqmp_dp_test_setup(dp) : 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_downspread, zynqmp_dp_downspread_get,
@@ -1993,33 +1977,32 @@ static int zynqmp_dp_active_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.active;
- mutex_unlock(&dp->lock);
return 0;
}
static int zynqmp_dp_active_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
- int ret = 0;
+ int ret;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
if (val) {
if (val < 2) {
ret = zynqmp_dp_test_setup(dp);
if (ret)
- goto out;
+ return ret;
}
ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern,
dp->test.custom);
if (ret)
- goto out;
+ return ret;
ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
if (ret)
- goto out;
+ return ret;
dp->test.active = true;
} else {
@@ -2032,10 +2015,8 @@ static int zynqmp_dp_active_set(void *data, u64 val)
err);
zynqmp_dp_train_loop(dp);
}
-out:
- mutex_unlock(&dp->lock);
- return ret;
+ return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get,
@@ -2102,9 +2083,8 @@ static int zynqmp_dp_swing_get(void *data, u64 *val)
struct zynqmp_dp_train_set_priv *priv = data;
struct zynqmp_dp *dp = priv->dp;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.train_set[priv->lane] & DP_TRAIN_VOLTAGE_SWING_MASK;
- mutex_unlock(&dp->lock);
return 0;
}
@@ -2113,12 +2093,11 @@ static int zynqmp_dp_swing_set(void *data, u64 val)
struct zynqmp_dp_train_set_priv *priv = data;
struct zynqmp_dp *dp = priv->dp;
u8 *train_set = &dp->test.train_set[priv->lane];
- int ret = 0;
if (val > 3)
return -EINVAL;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*train_set &= ~(DP_TRAIN_MAX_SWING_REACHED |
DP_TRAIN_VOLTAGE_SWING_MASK);
*train_set |= val;
@@ -2126,10 +2105,9 @@ static int zynqmp_dp_swing_set(void *data, u64 val)
*train_set |= DP_TRAIN_MAX_SWING_REACHED;
if (dp->test.active)
- ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
- mutex_unlock(&dp->lock);
+ return zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
- return ret;
+ return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_swing, zynqmp_dp_swing_get,
@@ -2140,10 +2118,9 @@ static int zynqmp_dp_preemphasis_get(void *data, u64 *val)
struct zynqmp_dp_train_set_priv *priv = data;
struct zynqmp_dp *dp = priv->dp;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK,
dp->test.train_set[priv->lane]);
- mutex_unlock(&dp->lock);
return 0;
}
@@ -2152,12 +2129,11 @@ static int zynqmp_dp_preemphasis_set(void *data, u64 val)
struct zynqmp_dp_train_set_priv *priv = data;
struct zynqmp_dp *dp = priv->dp;
u8 *train_set = &dp->test.train_set[priv->lane];
- int ret = 0;
if (val > 2)
return -EINVAL;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*train_set &= ~(DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
DP_TRAIN_PRE_EMPHASIS_MASK);
*train_set |= val;
@@ -2165,10 +2141,9 @@ static int zynqmp_dp_preemphasis_set(void *data, u64 val)
*train_set |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
if (dp->test.active)
- ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
- mutex_unlock(&dp->lock);
+ return zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
- return ret;
+ return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_preemphasis, zynqmp_dp_preemphasis_get,
@@ -2178,31 +2153,24 @@ static int zynqmp_dp_lanes_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.link_cnt;
- mutex_unlock(&dp->lock);
return 0;
}
static int zynqmp_dp_lanes_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
- int ret = 0;
if (val > ZYNQMP_DP_MAX_LANES)
return -EINVAL;
- mutex_lock(&dp->lock);
- if (val > dp->num_lanes) {
- ret = -EINVAL;
- } else {
- dp->test.link_cnt = val;
- if (dp->test.active)
- ret = zynqmp_dp_test_setup(dp);
- }
- mutex_unlock(&dp->lock);
+ guard(mutex)(&dp->lock);
+ if (val > dp->num_lanes)
+ return -EINVAL;
- return ret;
+ dp->test.link_cnt = val;
+ return dp->test.active ? zynqmp_dp_test_setup(dp) : 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_lanes, zynqmp_dp_lanes_get,
@@ -2212,9 +2180,8 @@ static int zynqmp_dp_rate_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000ULL;
- mutex_unlock(&dp->lock);
return 0;
}
@@ -2222,7 +2189,6 @@ static int zynqmp_dp_rate_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
int link_rate;
- int ret = 0;
u8 bw_code;
if (do_div(val, 10000))
@@ -2237,13 +2203,9 @@ static int zynqmp_dp_rate_set(void *data, u64 val)
bw_code != DP_LINK_BW_5_4)
return -EINVAL;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
dp->test.bw_code = bw_code;
- if (dp->test.active)
- ret = zynqmp_dp_test_setup(dp);
- mutex_unlock(&dp->lock);
-
- return ret;
+ return dp->test.active ? zynqmp_dp_test_setup(dp) : 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_rate, zynqmp_dp_rate_get,
@@ -2253,9 +2215,8 @@ static int zynqmp_dp_ignore_aux_errors_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->aux.hw_mutex);
+ guard(mutex)(&dp->lock);
*val = dp->ignore_aux_errors;
- mutex_unlock(&dp->aux.hw_mutex);
return 0;
}
@@ -2266,9 +2227,8 @@ static int zynqmp_dp_ignore_aux_errors_set(void *data, u64 val)
if (val != !!val)
return -EINVAL;
- mutex_lock(&dp->aux.hw_mutex);
+ guard(mutex)(&dp->lock);
dp->ignore_aux_errors = val;
- mutex_unlock(&dp->aux.hw_mutex);
return 0;
}
@@ -2280,9 +2240,8 @@ static int zynqmp_dp_ignore_hpd_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->ignore_hpd;
- mutex_unlock(&dp->lock);
return 0;
}
@@ -2293,9 +2252,8 @@ static int zynqmp_dp_ignore_hpd_set(void *data, u64 val)
if (val != !!val)
return -EINVAL;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
dp->ignore_hpd = val;
- mutex_lock(&dp->lock);
return 0;
}
@@ -2391,14 +2349,12 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work)
struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work);
enum drm_connector_status status;
- mutex_lock(&dp->lock);
- if (dp->ignore_hpd) {
- mutex_unlock(&dp->lock);
- return;
- }
+ scoped_guard(mutex, &dp->lock) {
+ if (dp->ignore_hpd)
+ return;
- status = __zynqmp_dp_bridge_detect(dp);
- mutex_unlock(&dp->lock);
+ status = __zynqmp_dp_bridge_detect(dp);
+ }
drm_bridge_hpd_notify(&dp->bridge, status);
}
@@ -2410,11 +2366,9 @@ static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
u8 status[DP_LINK_STATUS_SIZE + 2];
int err;
- mutex_lock(&dp->lock);
- if (dp->ignore_hpd) {
- mutex_unlock(&dp->lock);
+ guard(mutex)(&dp->lock);
+ if (dp->ignore_hpd)
return;
- }
err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
DP_LINK_STATUS_SIZE + 2);
@@ -2428,7 +2382,6 @@ static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
zynqmp_dp_train_loop(dp);
}
}
- mutex_unlock(&dp->lock);
}
static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
@@ -2483,7 +2436,6 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
struct platform_device *pdev = to_platform_device(dpsub->dev);
struct drm_bridge *bridge;
struct zynqmp_dp *dp;
- struct resource *res;
int ret;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
@@ -2500,8 +2452,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func);
/* Acquire all resources (IOMEM, IRQ and PHYs). */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
- dp->iomem = devm_ioremap_resource(dp->dev, res);
+ dp->iomem = devm_platform_ioremap_resource_byname(pdev, "dp");
if (IS_ERR(dp->iomem)) {
ret = PTR_ERR(dp->iomem);
goto err_free;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
index fa5f0ace6084..f07ff4eb3a6d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -323,12 +323,16 @@ int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
audio->dai_name = devm_kasprintf(dev, GFP_KERNEL,
"%s-dai", dev_name(dev));
+ if (!audio->dai_name)
+ return -ENOMEM;
for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
audio->link_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"%s-dp-%u", dev_name(dev), i);
audio->pcm_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"%s-pcm-%u", dev_name(dev), i);
+ if (!audio->link_names[i] || !audio->pcm_names[i])
+ return -ENOMEM;
}
audio->base = devm_platform_ioremap_resource_byname(pdev, "aud");
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index f953ca48a930..3a9544b97bc5 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -201,6 +201,8 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
if (ret)
return ret;
+ dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
/* Try the reserved memory. Proceed if there's none. */
of_reserved_mem_device_init(&pdev->dev);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 7b1d091f3c09..46cae925b095 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -619,6 +619,8 @@ static int host1x_probe(struct platform_device *pdev)
goto free_contexts;
}
+ mutex_init(&host->intr_mutex);
+
pm_runtime_enable(&pdev->dev);
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index b3285dd10180..f77a678949e9 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -104,8 +104,6 @@ int host1x_intr_init(struct host1x *host)
unsigned int id;
int i, err;
- mutex_init(&host->intr_mutex);
-
for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
struct host1x_syncpt *syncpt = &host->syncpt[id];
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index b53eb569bd49..dfc245867a46 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -570,6 +570,8 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
+ depends on ACPI
+ select ACPI_PLATFORM_PROFILE
select NEW_LEDS
select LEDS_CLASS
help
@@ -1167,7 +1169,8 @@ config HID_TOPRE
tristate "Topre REALFORCE keyboards"
depends on HID
help
- Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards.
+ Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key and
+ Topre REALFORCE R3S 87 key keyboards.
config HID_THINGM
tristate "ThingM blink(1) USB RGB LED"
@@ -1374,10 +1377,6 @@ endmenu
source "drivers/hid/bpf/Kconfig"
-endif # HID
-
-source "drivers/hid/usbhid/Kconfig"
-
source "drivers/hid/i2c-hid/Kconfig"
source "drivers/hid/intel-ish-hid/Kconfig"
@@ -1388,4 +1387,10 @@ source "drivers/hid/surface-hid/Kconfig"
source "drivers/hid/intel-thc-hid/Kconfig"
+endif # HID
+
+# USB support may be used with HID disabled
+
+source "drivers/hid/usbhid/Kconfig"
+
endif # HID_SUPPORT
diff --git a/drivers/hid/amd-sfh-hid/Kconfig b/drivers/hid/amd-sfh-hid/Kconfig
index 329de5e12c1a..3291786a5ee6 100644
--- a/drivers/hid/amd-sfh-hid/Kconfig
+++ b/drivers/hid/amd-sfh-hid/Kconfig
@@ -5,7 +5,6 @@ menu "AMD SFH HID Support"
config AMD_SFH_HID
tristate "AMD Sensor Fusion Hub"
- depends on HID
depends on X86
help
If you say yes to this option, support will be included for the
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 7e1ae2a2bcc2..49812a76b7ed 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -474,6 +474,7 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015)
table = magic_keyboard_2015_fn_keys;
else if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 ||
+ hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 ||
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 ||
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021)
table = apple2021_fn_keys;
@@ -545,6 +546,9 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
+ if (usage->hid == 0xc0301) /* Omoton KB066 quirk */
+ code = KEY_F6;
+
if (usage->code != code) {
input_event_with_scancode(input, usage->type, code, usage->hid, value);
@@ -1150,6 +1154,10 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index 6ece56b850fc..56e858066c3c 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -553,7 +553,7 @@ static void corsair_void_battery_remove_work_handler(struct work_struct *work)
static void corsair_void_battery_add_work_handler(struct work_struct *work)
{
struct corsair_void_drvdata *drvdata;
- struct power_supply_config psy_cfg;
+ struct power_supply_config psy_cfg = {};
struct power_supply *new_supply;
drvdata = container_of(work, struct corsair_void_drvdata,
@@ -726,6 +726,7 @@ static void corsair_void_remove(struct hid_device *hid_dev)
if (drvdata->battery)
power_supply_unregister(drvdata->battery);
+ cancel_delayed_work_sync(&drvdata->delayed_status_work);
cancel_delayed_work_sync(&drvdata->delayed_firmware_work);
sysfs_remove_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c448de53bf91..7e400624908e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -184,6 +184,7 @@
#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 0x0320
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
@@ -1095,6 +1096,7 @@
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
+#define USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473 0x5473
#define I2C_VENDOR_ID_RAYDIUM 0x2386
#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
@@ -1301,6 +1303,7 @@
#define USB_VENDOR_ID_TOPRE 0x0853
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108 0x0148
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87 0x0146
+#define USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87 0x0313
#define USB_VENDOR_ID_TOPSEED 0x0766
#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 4d00bc4d656e..a7d9ca02779e 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,9 +32,7 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
-#if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)
#include <linux/platform_profile.h>
-#endif /* CONFIG_ACPI_PLATFORM_PROFILE */
#include "hid-ids.h"
@@ -730,13 +728,10 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) {
report_key_event(input, KEY_RFKILL);
return 1;
- }
-#if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)
- else {
+ } else {
platform_profile_cycle();
return 1;
}
-#endif /* CONFIG_ACPI_PLATFORM_PROFILE */
return 0;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 82900857bfd8..e50887a6d22c 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1679,9 +1679,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
break;
}
- if (suffix)
+ if (suffix) {
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
"%s %s", hdev->name, suffix);
+ if (!hi->input->name)
+ return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e0bbf0c6345d..5d7a418ccdbe 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -891,6 +891,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
{ }
};
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index af38fc8eb34f..c9e65e9088b3 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -313,6 +313,7 @@ struct steam_device {
u16 rumble_left;
u16 rumble_right;
unsigned int sensor_timestamp_us;
+ struct work_struct unregister_work;
};
static int steam_recv_report(struct steam_device *steam,
@@ -1050,10 +1051,10 @@ static void steam_mode_switch_cb(struct work_struct *work)
struct steam_device, mode_switch);
unsigned long flags;
bool client_opened;
- steam->gamepad_mode = !steam->gamepad_mode;
if (!lizard_mode)
return;
+ steam->gamepad_mode = !steam->gamepad_mode;
if (steam->gamepad_mode)
steam_set_lizard_mode(steam, false);
else {
@@ -1072,6 +1073,31 @@ static void steam_mode_switch_cb(struct work_struct *work)
}
}
+static void steam_work_unregister_cb(struct work_struct *work)
+{
+ struct steam_device *steam = container_of(work, struct steam_device,
+ unregister_work);
+ unsigned long flags;
+ bool connected;
+ bool opened;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ opened = steam->client_opened;
+ connected = steam->connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+ if (connected) {
+ if (opened) {
+ steam_sensors_unregister(steam);
+ steam_input_unregister(steam);
+ } else {
+ steam_set_lizard_mode(steam, lizard_mode);
+ steam_input_register(steam);
+ steam_sensors_register(steam);
+ }
+ }
+}
+
static bool steam_is_valve_interface(struct hid_device *hdev)
{
struct hid_report_enum *rep_enum;
@@ -1117,8 +1143,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
steam->client_opened++;
spin_unlock_irqrestore(&steam->lock, flags);
- steam_sensors_unregister(steam);
- steam_input_unregister(steam);
+ schedule_work(&steam->unregister_work);
return 0;
}
@@ -1135,11 +1160,7 @@ static void steam_client_ll_close(struct hid_device *hdev)
connected = steam->connected && !steam->client_opened;
spin_unlock_irqrestore(&steam->lock, flags);
- if (connected) {
- steam_set_lizard_mode(steam, lizard_mode);
- steam_input_register(steam);
- steam_sensors_register(steam);
- }
+ schedule_work(&steam->unregister_work);
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
@@ -1231,6 +1252,7 @@ static int steam_probe(struct hid_device *hdev,
INIT_LIST_HEAD(&steam->list);
INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
steam->sensor_timestamp_us = 0;
+ INIT_WORK(&steam->unregister_work, steam_work_unregister_cb);
/*
* With the real steam controller interface, do not connect hidraw.
@@ -1291,6 +1313,7 @@ err_cancel_work:
cancel_work_sync(&steam->work_connect);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->rumble_work);
+ cancel_work_sync(&steam->unregister_work);
return ret;
}
@@ -1307,6 +1330,7 @@ static void steam_remove(struct hid_device *hdev)
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->work_connect);
cancel_work_sync(&steam->rumble_work);
+ cancel_work_sync(&steam->unregister_work);
hid_destroy_device(steam->client_hdev);
steam->client_hdev = NULL;
steam->client_opened = 0;
@@ -1593,13 +1617,13 @@ static void steam_do_deck_input_event(struct steam_device *steam,
if (!(b9 & BIT(6)) && steam->did_mode_switch) {
steam->did_mode_switch = false;
- cancel_delayed_work_sync(&steam->mode_switch);
+ cancel_delayed_work(&steam->mode_switch);
} else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) {
steam->did_mode_switch = true;
schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
}
- if (!steam->gamepad_mode)
+ if (!steam->gamepad_mode && lizard_mode)
return;
lpad_touched = b10 & BIT(3);
@@ -1669,7 +1693,7 @@ static void steam_do_deck_sensors_event(struct steam_device *steam,
*/
steam->sensor_timestamp_us += 4000;
- if (!steam->gamepad_mode)
+ if (!steam->gamepad_mode && lizard_mode)
return;
input_event(sensors, EV_MSC, MSC_TIMESTAMP, steam->sensor_timestamp_us);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 6c3e758bbb09..3b81468a1df2 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -171,7 +171,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
b_ep = ep->desc.bEndpointAddress;
/* Are the expected endpoints present? */
- u8 ep_addr[1] = {b_ep};
+ u8 ep_addr[2] = {b_ep, 0};
if (!usb_check_int_endpoints(usbif, ep_addr)) {
hid_err(hdev, "Unexpected non-int endpoint\n");
diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c
index 848361f6225d..ccedf8721722 100644
--- a/drivers/hid/hid-topre.c
+++ b/drivers/hid/hid-topre.c
@@ -29,6 +29,11 @@ static const __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev,
"fixing up Topre REALFORCE keyboard report descriptor\n");
rdesc[72] = 0x02;
+ } else if (*rsize >= 106 && rdesc[28] == 0x29 && rdesc[29] == 0xe7 &&
+ rdesc[30] == 0x81 && rdesc[31] == 0x00) {
+ hid_info(hdev,
+ "fixing up Topre REALFORCE keyboard report descriptor\n");
+ rdesc[31] = 0x02;
}
return rdesc;
}
@@ -38,6 +43,8 @@ static const struct hid_device_id topre_id_table[] = {
USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
+ USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87) },
{ }
};
MODULE_DEVICE_TABLE(hid, topre_id_table);
diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
index 831b760c66ea..d4afbbd27807 100644
--- a/drivers/hid/hid-winwing.c
+++ b/drivers/hid/hid-winwing.c
@@ -106,6 +106,8 @@ static int winwing_init_led(struct hid_device *hdev,
"%s::%s",
dev_name(&input->dev),
info->led_name);
+ if (!led->cdev.name)
+ return -ENOMEM;
ret = devm_led_classdev_register(&hdev->dev, &led->cdev);
if (ret)
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
index ef7c595c9403..e8d51f410cc1 100644
--- a/drivers/hid/i2c-hid/Kconfig
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -2,7 +2,7 @@
menuconfig I2C_HID
tristate "I2C HID support"
default y
- depends on I2C && INPUT && HID
+ depends on I2C
if I2C_HID
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
index 253dc10d35ef..568c8688784e 100644
--- a/drivers/hid/intel-ish-hid/Kconfig
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -6,7 +6,6 @@ config INTEL_ISH_HID
tristate "Intel Integrated Sensor Hub"
default n
depends on X86
- depends on HID
help
The Integrated Sensor Hub (ISH) enables the ability to offload
sensor polling and algorithm processing to a dedicated low power
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index cdd80c653918..07e90d51f073 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -36,6 +36,8 @@
#define PCI_DEVICE_ID_INTEL_ISH_ARL_H 0x7745
#define PCI_DEVICE_ID_INTEL_ISH_ARL_S 0x7F78
#define PCI_DEVICE_ID_INTEL_ISH_LNL_M 0xA845
+#define PCI_DEVICE_ID_INTEL_ISH_PTL_H 0xE345
+#define PCI_DEVICE_ID_INTEL_ISH_PTL_P 0xE445
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 3cd53fc80634..4c861119e97a 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -517,6 +517,10 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
/* ISH FW is dead */
if (!ish_is_input_ready(dev))
return -EPIPE;
+
+ /* Send clock sync at once after reset */
+ ishtp_dev->prev_sync = 0;
+
/*
* Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
* RESET_NOTIFY_ACK - FW will be checking for it
@@ -577,15 +581,14 @@ static void fw_reset_work_fn(struct work_struct *work)
*/
static void _ish_sync_fw_clock(struct ishtp_device *dev)
{
- static unsigned long prev_sync;
- uint64_t usec;
+ struct ipc_time_update_msg time = {};
- if (prev_sync && time_before(jiffies, prev_sync + 20 * HZ))
+ if (dev->prev_sync && time_before(jiffies, dev->prev_sync + 20 * HZ))
return;
- prev_sync = jiffies;
- usec = ktime_to_us(ktime_get_boottime());
- ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t));
+ dev->prev_sync = jiffies;
+ /* The fields of time would be updated while sending message */
+ ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &time, sizeof(time));
}
/**
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 9e2401291a2f..ff0fc8010072 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -26,9 +26,11 @@
enum ishtp_driver_data_index {
ISHTP_DRIVER_DATA_NONE,
ISHTP_DRIVER_DATA_LNL_M,
+ ISHTP_DRIVER_DATA_PTL,
};
#define ISH_FW_GEN_LNL_M "lnlm"
+#define ISH_FW_GEN_PTL "ptl"
#define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin"
#define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin"
@@ -37,6 +39,9 @@ static struct ishtp_driver_data ishtp_driver_data[] = {
[ISHTP_DRIVER_DATA_LNL_M] = {
.fw_generation = ISH_FW_GEN_LNL_M,
},
+ [ISHTP_DRIVER_DATA_PTL] = {
+ .fw_generation = ISH_FW_GEN_PTL,
+ },
};
static const struct pci_device_id ish_pci_tbl[] = {
@@ -63,6 +68,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_H)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_S)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_LNL_M), .driver_data = ISHTP_DRIVER_DATA_LNL_M},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_H), .driver_data = ISHTP_DRIVER_DATA_PTL},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_P), .driver_data = ISHTP_DRIVER_DATA_PTL},
{}
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 44eddc411e97..ec9f6e87aaf2 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -253,6 +253,8 @@ struct ishtp_device {
unsigned int ipc_tx_cnt;
unsigned long long ipc_tx_bytes_cnt;
+ /* Time of the last clock sync */
+ unsigned long prev_sync;
const struct ishtp_hw_ops *ops;
size_t mtu;
uint32_t ishtp_msg_hdr;
diff --git a/drivers/hid/intel-thc-hid/Kconfig b/drivers/hid/intel-thc-hid/Kconfig
index 91ec84902db8..0351d1137607 100644
--- a/drivers/hid/intel-thc-hid/Kconfig
+++ b/drivers/hid/intel-thc-hid/Kconfig
@@ -7,7 +7,6 @@ menu "Intel THC HID Support"
config INTEL_THC_HID
tristate "Intel Touch Host Controller"
depends on ACPI
- select HID
help
THC (Touch Host Controller) is the name of the IP block in PCH that
interfaces with Touch Devices (ex: touchscreen, touchpad etc.). It
diff --git a/drivers/hid/surface-hid/Kconfig b/drivers/hid/surface-hid/Kconfig
index 7ce9b5d641eb..d0cfd0d29926 100644
--- a/drivers/hid/surface-hid/Kconfig
+++ b/drivers/hid/surface-hid/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
menu "Surface System Aggregator Module HID support"
depends on SURFACE_AGGREGATOR
- depends on INPUT
config SURFACE_HID
tristate "HID transport driver for Surface System Aggregator Module"
@@ -39,4 +38,3 @@ endmenu
config SURFACE_HID_CORE
tristate
- select HID
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 7c2032f7f44d..f3194767a45e 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -5,8 +5,7 @@ menu "USB HID support"
config USB_HID
tristate "USB HID transport layer"
default y
- depends on USB && INPUT
- select HID
+ depends on HID
help
Say Y here if you want to connect USB keyboards,
mice, joysticks, graphic tablets, or any other HID based devices
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 5546184df05f..7ad1ad5c8c3f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1300,12 +1300,14 @@ new_device_store(struct device *dev, struct device_attribute *attr,
info.flags |= I2C_CLIENT_SLAVE;
}
- info.flags |= I2C_CLIENT_USER;
-
client = i2c_new_client_device(adap, &info);
if (IS_ERR(client))
return PTR_ERR(client);
+ /* Keep track of the added device */
+ mutex_lock(&adap->userspace_clients_lock);
+ list_add_tail(&client->detected, &adap->userspace_clients);
+ mutex_unlock(&adap->userspace_clients_lock);
dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
info.type, info.addr);
@@ -1313,15 +1315,6 @@ new_device_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_WO(new_device);
-static int __i2c_find_user_addr(struct device *dev, const void *addrp)
-{
- struct i2c_client *client = i2c_verify_client(dev);
- unsigned short addr = *(unsigned short *)addrp;
-
- return client && client->flags & I2C_CLIENT_USER &&
- i2c_encode_flags_to_addr(client) == addr;
-}
-
/*
* And of course let the users delete the devices they instantiated, if
* they got it wrong. This interface can only be used to delete devices
@@ -1336,7 +1329,7 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_adapter *adap = to_i2c_adapter(dev);
- struct device *child_dev;
+ struct i2c_client *client, *next;
unsigned short addr;
char end;
int res;
@@ -1352,19 +1345,28 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- mutex_lock(&core_lock);
/* Make sure the device was added through sysfs */
- child_dev = device_find_child(&adap->dev, &addr, __i2c_find_user_addr);
- if (child_dev) {
- i2c_unregister_device(i2c_verify_client(child_dev));
- put_device(child_dev);
- } else {
- dev_err(dev, "Can't find userspace-created device at %#x\n", addr);
- count = -ENOENT;
+ res = -ENOENT;
+ mutex_lock_nested(&adap->userspace_clients_lock,
+ i2c_adapter_depth(adap));
+ list_for_each_entry_safe(client, next, &adap->userspace_clients,
+ detected) {
+ if (i2c_encode_flags_to_addr(client) == addr) {
+ dev_info(dev, "%s: Deleting device %s at 0x%02hx\n",
+ "delete_device", client->name, client->addr);
+
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ res = count;
+ break;
+ }
}
- mutex_unlock(&core_lock);
+ mutex_unlock(&adap->userspace_clients_lock);
- return count;
+ if (res < 0)
+ dev_err(dev, "%s: Can't find device in list\n",
+ "delete_device");
+ return res;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
delete_device_store);
@@ -1535,6 +1537,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
adap->locked_flags = 0;
rt_mutex_init(&adap->bus_lock);
rt_mutex_init(&adap->mux_lock);
+ mutex_init(&adap->userspace_clients_lock);
+ INIT_LIST_HEAD(&adap->userspace_clients);
/* Set default timeout to 1 second if not already set */
if (adap->timeout == 0)
@@ -1700,6 +1704,23 @@ int i2c_add_numbered_adapter(struct i2c_adapter *adap)
}
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
+static void i2c_do_del_adapter(struct i2c_driver *driver,
+ struct i2c_adapter *adapter)
+{
+ struct i2c_client *client, *_n;
+
+ /* Remove the devices we created ourselves as the result of hardware
+ * probing (using a driver's detect method) */
+ list_for_each_entry_safe(client, _n, &driver->clients, detected) {
+ if (client->adapter == adapter) {
+ dev_dbg(&adapter->dev, "Removing %s at 0x%x\n",
+ client->name, client->addr);
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ }
+ }
+}
+
static int __unregister_client(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -1715,6 +1736,12 @@ static int __unregister_dummy(struct device *dev, void *dummy)
return 0;
}
+static int __process_removed_adapter(struct device_driver *d, void *data)
+{
+ i2c_do_del_adapter(to_i2c_driver(d), data);
+ return 0;
+}
+
/**
* i2c_del_adapter - unregister I2C adapter
* @adap: the adapter being unregistered
@@ -1726,6 +1753,7 @@ static int __unregister_dummy(struct device *dev, void *dummy)
void i2c_del_adapter(struct i2c_adapter *adap)
{
struct i2c_adapter *found;
+ struct i2c_client *client, *next;
/* First make sure that this adapter was ever added */
mutex_lock(&core_lock);
@@ -1737,16 +1765,31 @@ void i2c_del_adapter(struct i2c_adapter *adap)
}
i2c_acpi_remove_space_handler(adap);
+ /* Tell drivers about this removal */
+ mutex_lock(&core_lock);
+ bus_for_each_drv(&i2c_bus_type, NULL, adap,
+ __process_removed_adapter);
+ mutex_unlock(&core_lock);
+
+ /* Remove devices instantiated from sysfs */
+ mutex_lock_nested(&adap->userspace_clients_lock,
+ i2c_adapter_depth(adap));
+ list_for_each_entry_safe(client, next, &adap->userspace_clients,
+ detected) {
+ dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
+ client->addr);
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ }
+ mutex_unlock(&adap->userspace_clients_lock);
/* Detach any active clients. This can't fail, thus we do not
* check the returned value. This is a two-pass process, because
* we can't remove the dummy devices during the first pass: they
* could have been instantiated by real devices wishing to clean
* them up properly, so we give them a chance to do that first. */
- mutex_lock(&core_lock);
device_for_each_child(&adap->dev, NULL, __unregister_client);
device_for_each_child(&adap->dev, NULL, __unregister_dummy);
- mutex_unlock(&core_lock);
/* device name is gone after device_unregister */
dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
@@ -1966,6 +2009,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -1983,13 +2027,10 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
}
EXPORT_SYMBOL(i2c_register_driver);
-static int __i2c_unregister_detected_client(struct device *dev, void *argp)
+static int __process_removed_driver(struct device *dev, void *data)
{
- struct i2c_client *client = i2c_verify_client(dev);
-
- if (client && client->flags & I2C_CLIENT_AUTO)
- i2c_unregister_device(client);
-
+ if (dev->type == &i2c_adapter_type)
+ i2c_do_del_adapter(data, to_i2c_adapter(dev));
return 0;
}
@@ -2000,12 +2041,7 @@ static int __i2c_unregister_detected_client(struct device *dev, void *argp)
*/
void i2c_del_driver(struct i2c_driver *driver)
{
- mutex_lock(&core_lock);
- /* Satisfy __must_check, function can't fail */
- if (driver_for_each_device(&driver->driver, NULL, NULL,
- __i2c_unregister_detected_client)) {
- }
- mutex_unlock(&core_lock);
+ i2c_for_each_dev(driver, __process_removed_driver);
driver_unregister(&driver->driver);
pr_debug("driver [%s] unregistered\n", driver->driver.name);
@@ -2432,7 +2468,6 @@ static int i2c_detect_address(struct i2c_client *temp_client,
/* Finally call the custom detection function */
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = addr;
- info.flags = I2C_CLIENT_AUTO;
err = driver->detect(temp_client, &info);
if (err) {
/* -ENODEV is returned if the detection fails. We catch it
@@ -2459,7 +2494,9 @@ static int i2c_detect_address(struct i2c_client *temp_client,
dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
info.type, info.addr);
client = i2c_new_client_device(adapter, &info);
- if (IS_ERR(client))
+ if (!IS_ERR(client))
+ list_add_tail(&client->detected, &driver->clients);
+ else
dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n",
info.type, info.addr);
}
@@ -2469,7 +2506,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
{
const unsigned short *address_list;
- struct i2c_client temp_client;
+ struct i2c_client *temp_client;
int i, err = 0;
address_list = driver->address_list;
@@ -2490,19 +2527,24 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
return 0;
/* Set up a temporary client to help detect callback */
- memset(&temp_client, 0, sizeof(temp_client));
- temp_client.adapter = adapter;
+ temp_client = kzalloc(sizeof(*temp_client), GFP_KERNEL);
+ if (!temp_client)
+ return -ENOMEM;
+
+ temp_client->adapter = adapter;
for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
dev_dbg(&adapter->dev,
"found normal entry for adapter %d, addr 0x%02x\n",
i2c_adapter_id(adapter), address_list[i]);
- temp_client.addr = address_list[i];
- err = i2c_detect_address(&temp_client, driver);
+ temp_client->addr = address_list[i];
+ err = i2c_detect_address(temp_client, driver);
if (unlikely(err))
break;
}
+ kfree(temp_client);
+
return err;
}
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 0bbda60d3cdc..23caea22f8dc 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -175,6 +175,7 @@
#define CONTROL_GAM_EN 25
#define CONTROL_GALOG_EN 28
#define CONTROL_GAINT_EN 29
+#define CONTROL_EPH_EN 45
#define CONTROL_XT_EN 50
#define CONTROL_INTCAPXT_EN 51
#define CONTROL_IRTCACHEDIS 59
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index c5cd92edada0..cb536d372b12 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2653,6 +2653,10 @@ static void iommu_init_flags(struct amd_iommu *iommu)
/* Set IOTLB invalidation timeout to 1s */
iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
+
+ /* Enable Enhanced Peripheral Page Request Handling */
+ if (check_feature(FEATURE_EPHSUP))
+ iommu_feature_enable(iommu, CONTROL_EPH_EN);
}
static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
@@ -3194,7 +3198,7 @@ out:
return true;
}
-static void iommu_snp_enable(void)
+static __init void iommu_snp_enable(void)
{
#ifdef CONFIG_KVM_AMD_SEV
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
@@ -3219,6 +3223,14 @@ static void iommu_snp_enable(void)
goto disable_snp;
}
+ /*
+ * Enable host SNP support once SNP support is checked on IOMMU.
+ */
+ if (snp_rmptable_init()) {
+ pr_warn("SNP: RMP initialization failed, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
pr_info("IOMMU SNP support enabled.\n");
return;
@@ -3318,6 +3330,19 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
ret = state_next();
}
+ /*
+ * SNP platform initilazation requires IOMMUs to be fully configured.
+ * If the SNP support on IOMMUs has NOT been checked, simply mark SNP
+ * as unsupported. If the SNP support on IOMMUs has been checked and
+ * host SNP support enabled but RMP enforcement has not been enabled
+ * in IOMMUs, then the system is in a half-baked state, but can limp
+ * along as all memory should be Hypervisor-Owned in the RMP. WARN,
+ * but leave SNP as "supported" to avoid confusing the kernel.
+ */
+ if (ret && cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
+ !WARN_ON_ONCE(amd_iommu_snp_en))
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
+
return ret;
}
@@ -3426,18 +3451,23 @@ void __init amd_iommu_detect(void)
int ret;
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
- return;
+ goto disable_snp;
if (!amd_iommu_sme_check())
- return;
+ goto disable_snp;
ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
if (ret)
- return;
+ goto disable_snp;
amd_iommu_detected = true;
iommu_detected = 1;
x86_init.iommu.iommu_init = amd_iommu_init;
+ return;
+
+disable_snp:
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
}
/****************************************************************************
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c666ecab955d..69e23e017d9e 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -249,7 +249,7 @@ struct exynos_iommu_domain {
struct list_head clients; /* list of sysmmu_drvdata.domain_node */
sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
short *lv2entcnt; /* free lv2 entry counter for each section */
- spinlock_t lock; /* lock for modyfying list of clients */
+ spinlock_t lock; /* lock for modifying list of clients */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
struct iommu_domain domain; /* generic domain data structure */
};
@@ -292,7 +292,7 @@ struct sysmmu_drvdata {
struct clk *aclk; /* SYSMMU's aclk clock */
struct clk *pclk; /* SYSMMU's pclk clock */
struct clk *clk_master; /* master's device clock */
- spinlock_t lock; /* lock for modyfying state */
+ spinlock_t lock; /* lock for modifying state */
bool active; /* current status */
struct exynos_iommu_domain *domain; /* domain we belong to */
struct list_head domain_node; /* node for domain clients list */
@@ -746,7 +746,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
dev_name(dev), data);
if (ret) {
- dev_err(dev, "Unabled to register handler of irq %d\n", irq);
+ dev_err(dev, "Unable to register handler of irq %d\n", irq);
return ret;
}
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index c2d792db52c3..064194399b38 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -87,7 +87,9 @@ prq_retry:
struct page_req_dsc *req;
req = &iommu->prq[head / sizeof(*req)];
- if (!req->pasid_present || req->pasid != pasid) {
+ if (req->rid != sid ||
+ (req->pasid_present && pasid != req->pasid) ||
+ (!req->pasid_present && pasid != IOMMU_NO_PASID)) {
head = (head + sizeof(*req)) & PRQ_RING_MASK;
continue;
}
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index 4674e618797c..8b5926c1452e 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -478,6 +478,7 @@ void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
ops->page_response(dev, iopf, &resp);
list_del_init(&group->pending_node);
+ iopf_free_group(group);
}
mutex_unlock(&fault_param->lock);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 870c3cdbd0f6..60aed01e54f2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1756,7 +1756,7 @@ static int iommu_get_def_domain_type(struct iommu_group *group,
group->id);
/*
- * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY
+ * Try to recover, drivers are allowed to force IDENTITY or DMA, IDENTITY
* takes precedence.
*/
if (type == IOMMU_DOMAIN_IDENTITY)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index be063bfb50c4..c11b9965c4ad 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -169,6 +169,7 @@ config IXP4XX_IRQ
config LAN966X_OIC
tristate "Microchip LAN966x OIC Support"
+ depends on MCHP_LAN966X_PCI || COMPILE_TEST
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
help
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index da5250f0155c..2b1684c60e3c 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -577,7 +577,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
}
- if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
+ if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
+ (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
int irq;
if (cpumask_test_cpu(smp_processor_id(),
&aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 76dce0aac246..270d7a4d85a6 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -44,6 +44,7 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
+#define FLAGS_WORKAROUND_INSECURE (1ULL << 3)
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
@@ -83,6 +84,8 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+static bool nmi_support_forbidden;
+
/*
* There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
* are potentially stolen by the secure side. Some code, especially code dealing
@@ -163,21 +166,27 @@ static void __init gic_prio_init(void)
{
bool ds;
- ds = gic_dist_security_disabled();
- if (!ds) {
- u32 val;
-
- val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
- val |= GICD_CTLR_DS;
- writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
+ cpus_have_group0 = gic_has_group0();
- ds = gic_dist_security_disabled();
- if (ds)
- pr_warn("Broken GIC integration, security disabled");
+ ds = gic_dist_security_disabled();
+ if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) {
+ if (cpus_have_group0) {
+ u32 val;
+
+ val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
+ val |= GICD_CTLR_DS;
+ writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
+
+ ds = gic_dist_security_disabled();
+ if (ds)
+ pr_warn("Broken GIC integration, security disabled\n");
+ } else {
+ pr_warn("Broken GIC integration, pNMI forbidden\n");
+ nmi_support_forbidden = true;
+ }
}
cpus_have_security_disabled = ds;
- cpus_have_group0 = gic_has_group0();
/*
* How priority values are used by the GIC depends on two things:
@@ -209,7 +218,7 @@ static void __init gic_prio_init(void)
* be in the non-secure range, we program the non-secure values into
* the distributor to match the PMR values we want.
*/
- if (cpus_have_group0 & !cpus_have_security_disabled) {
+ if (cpus_have_group0 && !cpus_have_security_disabled) {
dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
}
@@ -1922,6 +1931,18 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
return true;
}
+static bool gic_enable_quirk_rk3399(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ if (of_machine_is_compatible("rockchip,rk3399")) {
+ d->flags |= FLAGS_WORKAROUND_INSECURE;
+ return true;
+ }
+
+ return false;
+}
+
static bool rd_set_non_coherent(void *data)
{
struct gic_chip_data *d = data;
@@ -1997,6 +2018,12 @@ static const struct gic_quirk gic_quirks[] = {
.init = rd_set_non_coherent,
},
{
+ .desc = "GICv3: Insecure RK3399 integration",
+ .iidr = 0x0000043b,
+ .mask = 0xff000fff,
+ .init = gic_enable_quirk_rk3399,
+ },
+ {
}
};
@@ -2004,7 +2031,7 @@ static void gic_enable_nmi_support(void)
{
int i;
- if (!gic_prio_masking_enabled())
+ if (!gic_prio_masking_enabled() || nmi_support_forbidden)
return;
rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index b9dcc8e78c75..1f613eb7b7f0 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -38,7 +38,7 @@ static struct irq_chip jcore_aic;
static void handle_jcore_irq(struct irq_desc *desc)
{
if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
- handle_percpu_irq(desc);
+ handle_percpu_devid_irq(desc);
else
handle_simple_irq(desc);
}
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index b337f6c05f18..4eebed39880a 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -68,7 +68,8 @@ static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
- struct mvebu_icu_msi_data *msi_data = d->host_data;
+ struct msi_domain_info *info = d->host_data;
+ struct mvebu_icu_msi_data *msi_data = info->chip_data;
struct mvebu_icu *icu = msi_data->icu;
/* Check the count of the parameters in dt */
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
index 8e76d2913e6b..4441ffe149ea 100644
--- a/drivers/irqchip/irq-partition-percpu.c
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -98,7 +98,7 @@ static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
- seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+ seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
}
static struct irq_chip partition_irq_chip = {
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index c5c2e6929a2f..275df5005705 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -27,7 +27,7 @@ static void imsic_ipi_send(unsigned int cpu)
{
struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
- writel_relaxed(IMSIC_IPI_ID, local->msi_va);
+ writel(IMSIC_IPI_ID, local->msi_va);
}
static void imsic_ipi_starting_cpu(void)
diff --git a/drivers/irqchip/irq-thead-c900-aclint-sswi.c b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
index b0e366ade427..8ff6e7a1363b 100644
--- a/drivers/irqchip/irq-thead-c900-aclint-sswi.c
+++ b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
@@ -31,7 +31,7 @@ static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs);
static void thead_aclint_sswi_ipi_send(unsigned int cpu)
{
- writel_relaxed(0x1, per_cpu(sswi_cpu_regs, cpu));
+ writel(0x1, per_cpu(sswi_cpu_regs, cpu));
}
static void thead_aclint_sswi_ipi_clear(void)
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 74b2f124116e..52d77546aacb 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -21,9 +21,11 @@
#include <linux/types.h>
#define PDC_MAX_GPIO_IRQS 256
+#define PDC_DRV_OFFSET 0x10000
/* Valid only on HW version < 3.2 */
#define IRQ_ENABLE_BANK 0x10
+#define IRQ_ENABLE_BANK_MAX (IRQ_ENABLE_BANK + BITS_TO_BYTES(PDC_MAX_GPIO_IRQS))
#define IRQ_i_CFG 0x110
/* Valid only on HW version >= 3.2 */
@@ -46,13 +48,20 @@ struct pdc_pin_region {
static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
+static void __iomem *pdc_prev_base;
static struct pdc_pin_region *pdc_region;
static int pdc_region_cnt;
static unsigned int pdc_version;
+static bool pdc_x1e_quirk;
+
+static void pdc_base_reg_write(void __iomem *base, int reg, u32 i, u32 val)
+{
+ writel_relaxed(val, base + reg + i * sizeof(u32));
+}
static void pdc_reg_write(int reg, u32 i, u32 val)
{
- writel_relaxed(val, pdc_base + reg + i * sizeof(u32));
+ pdc_base_reg_write(pdc_base, reg, i, val);
}
static u32 pdc_reg_read(int reg, u32 i)
@@ -60,6 +69,34 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
+static void pdc_x1e_irq_enable_write(u32 bank, u32 enable)
+{
+ void __iomem *base;
+
+ /* Remap the write access to work around a hardware bug on X1E */
+ switch (bank) {
+ case 0 ... 1:
+ /* Use previous DRV (client) region and shift to bank 3-4 */
+ base = pdc_prev_base;
+ bank += 3;
+ break;
+ case 2 ... 4:
+ /* Use our own region and shift to bank 0-2 */
+ base = pdc_base;
+ bank -= 2;
+ break;
+ case 5:
+ /* No fixup required for bank 5 */
+ base = pdc_base;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ pdc_base_reg_write(base, IRQ_ENABLE_BANK, bank, enable);
+}
+
static void __pdc_enable_intr(int pin_out, bool on)
{
unsigned long enable;
@@ -72,7 +109,11 @@ static void __pdc_enable_intr(int pin_out, bool on)
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
__assign_bit(mask, &enable, on);
- pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+
+ if (pdc_x1e_quirk)
+ pdc_x1e_irq_enable_write(index, enable);
+ else
+ pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
} else {
enable = pdc_reg_read(IRQ_i_CFG, pin_out);
__assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on);
@@ -324,10 +365,29 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
if (res_size > resource_size(&res))
pr_warn("%pOF: invalid reg size, please fix DT\n", node);
+ /*
+ * PDC has multiple DRV regions, each one provides the same set of
+ * registers for a particular client in the system. Due to a hardware
+ * bug on X1E, some writes to the IRQ_ENABLE_BANK register must be
+ * issued inside the previous region. This region belongs to
+ * a different client and is not described in the device tree. Map the
+ * region with the expected offset to preserve support for old DTs.
+ */
+ if (of_device_is_compatible(node, "qcom,x1e80100-pdc")) {
+ pdc_prev_base = ioremap(res.start - PDC_DRV_OFFSET, IRQ_ENABLE_BANK_MAX);
+ if (!pdc_prev_base) {
+ pr_err("%pOF: unable to map previous PDC DRV region\n", node);
+ return -ENXIO;
+ }
+
+ pdc_x1e_quirk = true;
+ }
+
pdc_base = ioremap(res.start, res_size);
if (!pdc_base) {
pr_err("%pOF: unable to map PDC registers\n", node);
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
pdc_version = pdc_reg_read(PDC_VERSION_REG, 0);
@@ -363,6 +423,7 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
fail:
kfree(pdc_region);
iounmap(pdc_base);
+ iounmap(pdc_prev_base);
return ret;
}
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index a382929ce7ba..369aed044b40 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -76,10 +76,8 @@ static int linear_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 8fc9339b00c7..70bcc3cdf2cd 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -386,10 +386,8 @@ static int raid0_set_limits(struct mddev *mddev)
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 9d57a88dbd26..10ea3af40991 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3219,10 +3219,8 @@ static int raid1_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = 0;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index efe93b979167..15b9ae5bf84d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4020,10 +4020,8 @@ static int raid10_set_queue_limits(struct mddev *mddev)
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/media/cec/i2c/Kconfig b/drivers/media/cec/i2c/Kconfig
index d912d143fb31..b9d21643eef1 100644
--- a/drivers/media/cec/i2c/Kconfig
+++ b/drivers/media/cec/i2c/Kconfig
@@ -13,3 +13,12 @@ config CEC_CH7322
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
between compatible devices.
+
+config CEC_NXP_TDA9950
+ tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC"
+ select CEC_NOTIFIER
+ select CEC_CORE
+ default DRM_I2C_NXP_TDA998X
+ help
+ This is a driver for the NXP TDA9950 CEC controller and for the CEC
+ controller block integrated into several NXP TDA998x HDMI encoders.
diff --git a/drivers/media/cec/i2c/Makefile b/drivers/media/cec/i2c/Makefile
index d7496dfd0fa4..95c9eda52583 100644
--- a/drivers/media/cec/i2c/Makefile
+++ b/drivers/media/cec/i2c/Makefile
@@ -3,3 +3,4 @@
# Makefile for the CEC I2C device drivers.
#
obj-$(CONFIG_CEC_CH7322) += ch7322.o
+obj-$(CONFIG_CEC_NXP_TDA9950) += tda9950.o
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/media/cec/i2c/tda9950.c
index cbff851e0c85..cbff851e0c85 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/media/cec/i2c/tda9950.c
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 226915ca3c93..aa4a9940b569 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -159,6 +159,7 @@ err_regmap:
}
static struct regmap *device_node_get_regmap(struct device_node *np,
+ bool create_regmap,
bool check_res)
{
struct syscon *entry, *syscon = NULL;
@@ -172,7 +173,7 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
}
if (!syscon) {
- if (of_device_is_compatible(np, "syscon"))
+ if (create_regmap)
syscon = of_syscon_register(np, check_res);
else
syscon = ERR_PTR(-EINVAL);
@@ -233,15 +234,37 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(of_syscon_register_regmap);
+/**
+ * device_node_to_regmap() - Get or create a regmap for specified device node
+ * @np: Device tree node
+ *
+ * Get a regmap for the specified device node. If there's not an existing
+ * regmap, then one is instantiated. This function should not be used if the
+ * device node has a custom regmap driver or has resources (clocks, resets) to
+ * be managed. Use syscon_node_to_regmap() instead for those cases.
+ *
+ * Return: regmap ptr on success, negative error code on failure.
+ */
struct regmap *device_node_to_regmap(struct device_node *np)
{
- return device_node_get_regmap(np, false);
+ return device_node_get_regmap(np, true, false);
}
EXPORT_SYMBOL_GPL(device_node_to_regmap);
+/**
+ * syscon_node_to_regmap() - Get or create a regmap for specified syscon device node
+ * @np: Device tree node
+ *
+ * Get a regmap for the specified device node. If there's not an existing
+ * regmap, then one is instantiated if the node is a generic "syscon". This
+ * function is safe to use for a syscon registered with
+ * of_syscon_register_regmap().
+ *
+ * Return: regmap ptr on success, negative error code on failure.
+ */
struct regmap *syscon_node_to_regmap(struct device_node *np)
{
- return device_node_get_regmap(np, true);
+ return device_node_get_regmap(np, of_device_is_compatible(np, "syscon"), true);
}
EXPORT_SYMBOL_GPL(syscon_node_to_regmap);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4b6e91372526..345ea91629e0 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -273,6 +273,7 @@
#define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */
#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
+#define PAD_DS_TUNE_DLY2_SEL BIT(1) /* RW */
#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
#define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */
#define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */
@@ -318,6 +319,7 @@
/* EMMC50_PAD_DS_TUNE mask */
#define PAD_DS_DLY_SEL BIT(16) /* RW */
+#define PAD_DS_DLY2_SEL BIT(15) /* RW */
#define PAD_DS_DLY1 GENMASK(14, 10) /* RW */
#define PAD_DS_DLY3 GENMASK(4, 0) /* RW */
@@ -2504,13 +2506,23 @@ tune_done:
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
+
host->hs400_mode = true;
- if (host->top_base)
- writel(host->hs400_ds_delay,
- host->top_base + EMMC50_PAD_DS_TUNE);
- else
- writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ if (host->top_base) {
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY3, host->hs400_ds_dly3);
+ if (host->hs400_ds_delay)
+ writel(host->hs400_ds_delay,
+ host->top_base + EMMC50_PAD_DS_TUNE);
+ } else {
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ if (host->hs400_ds_delay)
+ writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ }
/* hs400 mode must set it to 0 */
sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
/* to improve read performance, set outstanding to 2 */
@@ -2530,14 +2542,11 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
if (host->top_base) {
sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY_SEL);
- if (host->hs400_ds_dly3)
- sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
- PAD_DS_DLY3, host->hs400_ds_dly3);
+ sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY2_SEL);
} else {
sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
- if (host->hs400_ds_dly3)
- sdr_set_field(host->base + PAD_DS_TUNE,
- PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL);
}
host->hs400_tuning = true;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index b73f673db92b..f75c31815ab0 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -155,7 +155,6 @@ struct sdhci_am654_data {
u32 tuning_loop;
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
-#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
};
struct window {
@@ -357,29 +356,6 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
sdhci_set_clock(host, clock);
}
-static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
- int ret;
-
- if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) &&
- ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
- if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret < 0) {
- pr_err("%s: Switching to 1.8V signalling voltage failed,\n",
- mmc_hostname(mmc));
- return -EIO;
- }
- }
- return 0;
- }
-
- return sdhci_start_signal_voltage_switch(mmc, ios);
-}
-
static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg)
{
writeb(val, host->ioaddr + reg);
@@ -868,11 +844,6 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
if (device_property_read_bool(dev, "ti,fails-without-test-cd"))
sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST;
- /* Suppress v1p8 ena for eMMC and SD with vqmmc supply */
- if (!!of_parse_phandle(dev->of_node, "vmmc-supply", 0) ==
- !!of_parse_phandle(dev->of_node, "vqmmc-supply", 0))
- sdhci_am654->quirks |= SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA;
-
sdhci_get_of_property(pdev);
return 0;
@@ -969,7 +940,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto err_pltfm_free;
}
- host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
pm_runtime_get_noresume(dev);
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 8d1d710e439d..6667eea95597 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -471,6 +471,8 @@ struct cdns_nand_ctrl {
struct {
void __iomem *virt;
dma_addr_t dma;
+ dma_addr_t iova_dma;
+ u32 size;
} io;
int irq;
@@ -1835,11 +1837,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
}
if (dir == DMA_FROM_DEVICE) {
- src_dma = cdns_ctrl->io.dma;
+ src_dma = cdns_ctrl->io.iova_dma;
dst_dma = buf_dma;
} else {
src_dma = buf_dma;
- dst_dma = cdns_ctrl->io.dma;
+ dst_dma = cdns_ctrl->io.iova_dma;
}
tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
@@ -1861,12 +1863,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
dma_async_issue_pending(cdns_ctrl->dmac);
wait_for_completion(&finished);
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
return 0;
err_unmap:
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
err:
dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
@@ -2869,6 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
+ struct dma_device *dma_dev = cdns_ctrl->dmac->device;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
@@ -2904,15 +2907,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
dma_cap_set(DMA_MEMCPY, mask);
if (cdns_ctrl->caps1->has_dma) {
- cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
- if (!cdns_ctrl->dmac) {
- dev_err(cdns_ctrl->dev,
- "Unable to get a DMA channel\n");
- ret = -EBUSY;
+ cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(cdns_ctrl->dmac)) {
+ ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
+ "%d: Failed to get a DMA channel\n", ret);
goto disable_irq;
}
}
+ cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
+ cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
+
+ ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
+ goto dma_release_chnl;
+ }
+
nand_controller_init(&cdns_ctrl->controller);
INIT_LIST_HEAD(&cdns_ctrl->chips);
@@ -2923,18 +2935,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
if (ret) {
dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
ret);
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
kfree(cdns_ctrl->buf);
cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
if (!cdns_ctrl->buf) {
ret = -ENOMEM;
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
return 0;
+unmap_dma_resource:
+ dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
+ cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+
dma_release_chnl:
if (cdns_ctrl->dmac)
dma_release_channel(cdns_ctrl->dmac);
@@ -2956,6 +2972,10 @@ free_buf_desc:
static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
{
cadence_nand_chips_cleanup(cdns_ctrl);
+ if (cdns_ctrl->dmac)
+ dma_unmap_resource(cdns_ctrl->dmac->device->dev,
+ cdns_ctrl->io.iova_dma, cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
kfree(cdns_ctrl->buf);
dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
@@ -3020,7 +3040,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
+
cdns_ctrl->io.dma = res->start;
+ cdns_ctrl->io.size = resource_size(res);
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index d2d2aeee42a7..6720b547892b 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1881,18 +1881,18 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
nandc->regs->addr0 = 0;
nandc->regs->addr1 = 0;
- host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
- FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
- FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
- FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-
- host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
- FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
- FIELD_PREP(CS_ACTIVE_BSY, 0) |
- FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
- FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
- FIELD_PREP(WIDE_FLASH, 0) |
- FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+ nandc->regs->cfg0 = cpu_to_le32(FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0));
+
+ nandc->regs->cfg1 = cpu_to_le32(FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1));
if (!nandc->props->qpic_version2)
nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index b5ad7118c49a..175211fe6a5e 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -174,7 +174,7 @@ static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
int ret;
nor->program_opcode = op;
- ret = spi_nor_write_data(nor, to, 1, buf);
+ ret = spi_nor_write_data(nor, to, len, buf);
if (ret < 0)
return ret;
WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 6cba9717a6d8..399844809bbe 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -385,15 +385,16 @@ static int c_can_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
- goto exit_free_device;
+ goto exit_pm_runtime;
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
KBUILD_MODNAME, priv->base, dev->irq);
return 0;
-exit_free_device:
+exit_pm_runtime:
pm_runtime_disable(priv->device);
+exit_free_device:
free_c_can_dev(dev);
exit:
dev_err(&pdev->dev, "probe failed\n");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 64c349fd4600..f65c1a1e05cc 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -867,10 +867,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
}
break;
case CAN_STATE_ERROR_ACTIVE:
- cf->can_id |= CAN_ERR_CNT;
- cf->data[1] = CAN_ERR_CRTL_ACTIVE;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
break;
default:
netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index df18c85fc078..d9a937ba126c 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -622,7 +622,7 @@ rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
- if (skb)
+ if (!skb)
return 0;
rkcanfd_get_berr_counter_corrected(priv, &bec);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
index eee20839d96f..0d155eb1b9e9 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
return ret;
}
- return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
+ if (es58x_dev->udev->serial)
+ ret = devlink_info_serial_number_put(req,
+ es58x_dev->udev->serial);
+
+ return ret;
}
const struct devlink_ops es58x_dl_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index fe0e3e2a8117..71e50fc65c14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1441,7 +1441,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit) && link_down) {
+ /* May be invoked during hot unplug. */
+ if (pci_device_is_present(self->pdev) &&
+ likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 0715ea5bf13e..3b082114f2e5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
- if (dev->phydev)
+ if (dev->phydev) {
phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
/* MAC is not wake-up capable, return what the PHY does */
if (!device_can_wakeup(kdev))
@@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Overlay MAC capabilities with that of the PHY queried before */
wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ wol->wolopts |= priv->wolopts;
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Try Wake-on-LAN from the PHY first */
if (dev->phydev) {
ret = phy_ethtool_set_wol(dev->phydev, wol);
- if (ret != -EOPNOTSUPP)
+ if (ret != -EOPNOTSUPP && wol->wolopts)
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1c94bf1db718..d9d675f1ebfe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -55,6 +55,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/crc32poly.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
#include <net/gso.h>
@@ -18212,6 +18213,50 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -18228,6 +18273,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 8167cc5fb0df..78d2a19593d1 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1116,6 +1116,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
+{
+ switch (priv->queue_format) {
+ case GVE_GQI_QPL_FORMAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 533e659b15b3..92237fb0b60c 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1903,6 +1903,8 @@ static void gve_turndown(struct gve_priv *priv)
/* Stop tx queues */
netif_tx_disable(priv->dev);
+ xdp_features_clear_redirect_target(priv->dev);
+
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
@@ -1972,6 +1974,9 @@ static void gve_turnup(struct gve_priv *priv)
napi_schedule(&block->napi);
}
+ if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
+ xdp_features_set_redirect_target(priv->dev, false);
+
gve_set_napi_enabled(priv);
}
@@ -2246,7 +2251,6 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
xdp_features = NETDEV_XDP_ACT_BASIC;
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
- xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
xdp_features = 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e95ae0d39948..0676fc547b6f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2408,6 +2408,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
unsigned long lpar_rc;
+ unsigned int skblen;
union sub_crq tx_crq;
unsigned int offset;
bool use_scrq_send_direct = false;
@@ -2522,6 +2523,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
+ skblen = skb->len;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -2614,7 +2616,7 @@ early_exit:
netif_stop_subqueue(netdev, queue_num);
}
- tx_bytes += skb->len;
+ tx_bytes += skblen;
txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2d7a18fcc3be..852e5b62f0a5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2903,8 +2903,8 @@ static void iavf_watchdog_task(struct work_struct *work)
}
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
restart_watchdog:
+ netdev_unlock(netdev);
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
if (adapter->aq_required)
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index d116e2b10bce..dbdb83567364 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -981,6 +981,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
/* preallocate memory for ice_sched_node */
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
*priv = node;
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5d2d7736fd5f..9c9ea4c1b93b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -527,15 +527,14 @@ err:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
- * @rx_buf: Rx buffer to store the XDP action
* @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static void
+static u32
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
+ union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -574,7 +573,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ return ret;
}
/**
@@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
xdp_buff_set_frags_flag(xdp);
}
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS))
return -ENOMEM;
- }
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
@@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt = page_count(rx_buf->page);
prefetchw(rx_buf->page);
if (!size)
@@ -941,6 +937,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
}
/**
+ * ice_get_pgcnts - grab page_count() for gathered fragments
+ * @rx_ring: Rx descriptor ring to store the page counts on
+ *
+ * This function is intended to be called right before running XDP
+ * program so that the page recycling mechanism will be able to take
+ * a correct decision regarding underlying pages; this is done in such
+ * way as XDP program can change the refcount of page
+ */
+static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ struct ice_rx_buf *rx_buf;
+ u32 cnt = rx_ring->count;
+
+ for (int i = 0; i < nr_frags; i++) {
+ rx_buf = &rx_ring->rx_buf[idx];
+ rx_buf->pgcnt = page_count(rx_buf->page);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+}
+
+/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
@@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
rx_buf->page_offset + headlen, size,
xdp->frame_sz);
} else {
- /* buffer is unused, change the act that should be taken later
- * on; data was copied onto skb's linear part so there's no
+ /* buffer is unused, restore biased page count in Rx buffer;
+ * data was copied onto skb's linear part so there's no
* need for adjusting page offset and we can reuse this buffer
* as-is
*/
- rx_buf->act = ICE_SKB_CONSUMED;
+ rx_buf->pagecnt_bias++;
}
if (unlikely(xdp_buff_has_frags(xdp))) {
@@ -1104,6 +1125,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
+ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
+ * @rx_ring: Rx ring with all the auxiliary data
+ * @xdp: XDP buffer carrying linear + frags part
+ * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
+ * @ntc: a current next_to_clean value to be stored at rx_ring
+ * @verdict: return code from XDP program execution
+ *
+ * Walk through gathered fragments and satisfy internal page
+ * recycle mechanism; we take here an action related to verdict
+ * returned by XDP program;
+ */
+static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ u32 *xdp_xmit, u32 ntc, u32 verdict)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+ u32 post_xdp_frags = 1;
+ struct ice_rx_buf *buf;
+ int i;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
+
+ for (i = 0; i < post_xdp_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+
+ if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ *xdp_xmit |= verdict;
+ } else if (verdict & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (verdict == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+ /* handle buffers that represented frags released by XDP prog;
+ * for these we keep pagecnt_bias as-is; refcount from struct page
+ * has been decremented within XDP prog and we do not have to increase
+ * the biased refcnt
+ */
+ for (; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+ ice_put_rx_buf(rx_ring, buf);
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
+}
+
+/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp;
- u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
+ u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0;
- u32 cached_ntu;
bool failure;
- u32 first;
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
@@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
break;
}
if (++ntc == cnt)
@@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
- if (rx_buf->act == ICE_XDP_PASS)
+ ice_get_pgcnts(rx_ring);
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1217,18 +1296,12 @@ construct_skb:
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- rx_buf->act = ICE_XDP_CONSUMED;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring,
- ICE_XDP_CONSUMED);
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- break;
+ xdp_verdict = ICE_XDP_CONSUMED;
}
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
+ if (!skb)
+ break;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1257,23 +1330,6 @@ construct_skb:
total_rx_pkts++;
}
- first = rx_ring->first_desc;
- while (cached_ntc != first) {
- struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
-
- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- xdp_xmit |= buf->act;
- } else if (buf->act & ICE_XDP_CONSUMED) {
- buf->pagecnt_bias++;
- } else if (buf->act == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
-
- ice_put_rx_buf(rx_ring, buf);
- if (++cached_ntc >= cnt)
- cached_ntc = 0;
- }
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cb347c852ba9..806bce701df3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -201,7 +201,6 @@ struct ice_rx_buf {
struct page *page;
unsigned int page_offset;
unsigned int pgcnt;
- unsigned int act;
unsigned int pagecnt_bias;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 79f960c6680d..6cf32b404127 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,49 +6,6 @@
#include "ice.h"
/**
- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
- * @xdp: XDP buffer representing frame (linear and frags part)
- * @rx_ring: Rx ring struct
- * act: action to store onto Rx buffers related to XDP buffer parts
- *
- * Set action that should be taken before putting Rx buffer from first frag
- * to the last.
- */
-static inline void
-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
- const unsigned int act)
-{
- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- u32 nr_frags = rx_ring->nr_frags + 1;
- u32 idx = rx_ring->first_desc;
- u32 cnt = rx_ring->count;
- struct ice_rx_buf *buf;
-
- for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- buf->act = act;
-
- if (++idx == cnt)
- idx = 0;
- }
-
- /* adjust pagecnt_bias on frags freed by XDP prog */
- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
- u32 delta = rx_ring->nr_frags - sinfo_frags;
-
- while (delta) {
- if (idx == 0)
- idx = cnt - 1;
- else
- idx--;
- buf = &rx_ring->rx_buf[idx];
- buf->pagecnt_bias--;
- delta--;
- }
- }
-}
-
-/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index b4fbb99bfad2..a3d6b8f198a8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2159,8 +2159,13 @@ static int idpf_open(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ err = idpf_set_real_num_queues(vport);
+ if (err)
+ goto unlock;
+
err = idpf_vport_open(vport);
+unlock:
idpf_vport_ctrl_unlock(netdev);
return err;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 2fa9c36e33c9..9be6a6b59c4e 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3008,8 +3008,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
return -EINVAL;
rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
- if (unlikely(rsc_segments == 1))
- return 0;
NAPI_GRO_CB(skb)->count = rsc_segments;
skb_shinfo(skb)->gso_size = rsc_seg_len;
@@ -3072,6 +3070,7 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
idpf_rx_hash(rxq, skb, rx_desc, decoded);
skb->protocol = eth_type_trans(skb, rxq->netdev);
+ skb_record_rx_queue(skb, rxq->idx);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
@@ -3080,8 +3079,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
idpf_rx_csum(rxq, skb, csum_bits, decoded);
- skb_record_rx_queue(skb, rxq->idx);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 56a35d58e7a6..84307bb7313e 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return -ENOMEM;
}
+ buffer->type = IGC_TX_BUFFER_TYPE_SKB;
buffer->skb = skb;
buffer->protocol = 0;
buffer->bytecount = skb->len;
@@ -2701,8 +2702,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
}
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
- struct xdp_buff *xdp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
@@ -2721,27 +2723,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
__skb_pull(skb, metasize);
}
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
+
return skb;
}
static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
union igc_adv_rx_desc *desc,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
struct igc_ring *ring = q_vector->rx.ring;
struct sk_buff *skb;
- skb = igc_construct_skb_zc(ring, xdp);
+ skb = igc_construct_skb_zc(ring, ctx);
if (!skb) {
ring->rx_stats.alloc_failed++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
-
if (igc_cleanup_headers(ring, desc, skb))
return;
@@ -2777,7 +2780,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
union igc_adv_rx_desc *desc;
struct igc_rx_buffer *bi;
struct igc_xdp_buff *ctx;
- ktime_t timestamp = 0;
unsigned int size;
int res;
@@ -2807,6 +2809,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
*/
bi->xdp->data_meta += IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
+ } else {
+ ctx->rx_ts = NULL;
}
bi->xdp->data_end = bi->xdp->data + size;
@@ -2815,7 +2819,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
case IGC_XDP_PASS:
- igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
+ igc_dispatch_skb_zc(q_vector, desc, ctx);
fallthrough;
case IGC_XDP_CONSUMED:
xsk_buff_free(bi->xdp);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7236f20c9a30..467f81239e12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2105,7 +2105,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
- if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 2bed8c86b7cf..3f64cdbabfa3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ if (err)
+ return;
for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
if (!hw_stats[i].cells_bytes)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2ec62c8d86e1..59486fe2ad18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
struct sk_buff *skb;
skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
skb_put(skb, size);
return skb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d04543e5697b..c0ae7db96f46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2094,6 +2094,11 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
pp_params.offset = stmmac_rx_offset(priv);
pp_params.max_len = dma_conf->dma_buf_sz;
+ if (priv->sph) {
+ pp_params.offset = 0;
+ pp_params.max_len += stmmac_rx_offset(priv);
+ }
+
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
ret = PTR_ERR(rx_q->page_pool);
@@ -2424,6 +2429,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
u32 chan = 0;
u8 qmode = 0;
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
rxfifosz /= rx_channels_count;
@@ -2892,6 +2902,11 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
int rxfifosz = priv->plat->rx_fifo_size;
int txfifosz = priv->plat->tx_fifo_size;
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
/* Adjust for real per queue fifo size */
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
@@ -5868,6 +5883,9 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
const int mtu = new_mtu;
int ret;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
txfifosz /= priv->plat->tx_queues_to_use;
if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
@@ -7219,29 +7237,15 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
}
- if (!priv->plat->rx_fifo_size) {
- if (priv->dma_cap.rx_fifo_size) {
- priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
- } else {
- dev_err(priv->device, "Can't specify Rx FIFO size\n");
- return -ENODEV;
- }
- } else if (priv->dma_cap.rx_fifo_size &&
- priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
+ if (priv->dma_cap.rx_fifo_size &&
+ priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
dev_warn(priv->device,
"Rx FIFO size (%u) exceeds dma capability\n",
priv->plat->rx_fifo_size);
priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
}
- if (!priv->plat->tx_fifo_size) {
- if (priv->dma_cap.tx_fifo_size) {
- priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
- } else {
- dev_err(priv->device, "Can't specify Tx FIFO size\n");
- return -ENODEV;
- }
- } else if (priv->dma_cap.tx_fifo_size &&
- priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
+ if (priv->dma_cap.tx_fifo_size &&
+ priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
dev_warn(priv->device,
"Tx FIFO size (%u) exceeds dma capability\n",
priv->plat->tx_fifo_size);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index b663271e79f7..2806238629f8 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -828,21 +828,30 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
+ enum am65_cpsw_tx_buf_type buf_type;
struct cppi5_host_desc_t *desc_tx;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = *(swdata);
+ dev_kfree_skb_any(skb);
+ } else {
+ xdpf = *(swdata);
+ xdp_return_frame(xdpf);
+ }
- dev_kfree_skb_any(skb);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
}
static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
struct net_device *ndev,
- unsigned int len)
+ unsigned int len,
+ unsigned int headroom)
{
struct sk_buff *skb;
@@ -852,7 +861,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, AM65_CPSW_HEADROOM);
+ skb_reserve(skb, headroom);
skb->dev = ndev;
return skb;
@@ -1169,9 +1178,11 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct xdp_frame *xdpf;
struct bpf_prog *prog;
struct page *page;
+ int pkt_len;
u32 act;
int err;
+ pkt_len = *len;
prog = READ_ONCE(port->xdp_prog);
if (!prog)
return AM65_CPSW_XDP_PASS;
@@ -1189,8 +1200,10 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
goto drop;
+ }
__netif_tx_lock(netif_txq, cpu);
err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
@@ -1199,14 +1212,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
if (err)
goto drop;
- dev_sw_netstats_tx_add(ndev, 1, *len);
+ dev_sw_netstats_rx_add(ndev, pkt_len);
ret = AM65_CPSW_XDP_CONSUMED;
goto out;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
- dev_sw_netstats_rx_add(ndev, *len);
+ dev_sw_netstats_rx_add(ndev, pkt_len);
ret = AM65_CPSW_XDP_REDIRECT;
goto out;
default:
@@ -1315,16 +1328,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
-
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb = am65_cpsw_build_skb(page_addr, ndev,
- AM65_CPSW_MAX_PACKET_SIZE);
- if (unlikely(!skb)) {
- new_page = page;
- goto requeue;
- }
-
if (port->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
@@ -1334,9 +1339,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
- /* Compute additional headroom to be reserved */
- headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
- skb_reserve(skb, headroom);
+ headroom = xdp.data - xdp.data_hard_start;
+ } else {
+ headroom = AM65_CPSW_HEADROOM;
+ }
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ AM65_CPSW_MAX_PACKET_SIZE, headroom);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
}
ndev_priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9e7fa012e4fa..f33178f90c42 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2897,6 +2897,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_managed_pm = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 642155cb8315..dbb3960126ee 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1902,21 +1902,9 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
- struct net_device *dev, *aux;
- /* gather any geneve devices that were moved into this ns */
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &geneve_link_ops)
- unregister_netdevice_queue(dev, head);
-
- /* now gather any other geneve devices that were created in this ns */
- list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
- /* If geneve->dev is in the same netns, it was already added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(geneve->dev), net))
- unregister_netdevice_queue(geneve->dev, head);
- }
+ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
+ geneve_dellink(geneve->dev, head);
}
static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index d64740bf44ed..b7b46c5e6399 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -2481,11 +2481,6 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
struct gtp_dev *gtp, *gtp_next;
- struct net_device *dev;
-
- for_each_netdev(net, dev)
- if (dev->rtnl_link_ops == &gtp_link_ops)
- gtp_dellink(dev, dev_to_kill);
list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 214b62fba991..b00a315de060 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -2265,12 +2265,15 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
/* Allow the MAC to stop its clock if the PHY has the capability */
pl->mac_tx_clk_stop = phy_eee_tx_clock_stop_capable(phy) > 0;
- /* Explicitly configure whether the PHY is allowed to stop it's
- * receive clock.
- */
- ret = phy_eee_rx_clock_stop(phy, pl->config->eee_rx_clk_stop_enable);
- if (ret == -EOPNOTSUPP)
- ret = 0;
+ if (pl->mac_supports_eee_ops) {
+ /* Explicitly configure whether the PHY is allowed to stop it's
+ * receive clock.
+ */
+ ret = phy_eee_rx_clock_stop(phy,
+ pl->config->eee_rx_clk_stop_enable);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
return ret;
}
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index fc9e23927b3b..7d60a714ca53 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -1047,7 +1047,7 @@ static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- return pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
+ return pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
}
static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 4f2a54afc4d0..4602e26eb8c8 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -319,7 +319,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
goto out;
mW = ret;
- ret = pse_pi_get_voltage(rdev);
+ ret = _pse_pi_get_voltage(rdev);
if (!ret) {
dev_err(pcdev->dev, "Voltage null\n");
ret = -ERANGE;
@@ -356,7 +356,7 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = pse_pi_get_voltage(rdev);
+ ret = _pse_pi_get_voltage(rdev);
if (!ret) {
dev_err(pcdev->dev, "Voltage null\n");
ret = -ERANGE;
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index dc7cbd6a9798..f4019815f473 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -2639,7 +2639,9 @@ int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info)
ctx.data.u32_val = nla_get_u32(attr_data);
break;
case TEAM_OPTION_TYPE_STRING:
- if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
+ if (nla_len(attr_data) > TEAM_STRING_MAX_LEN ||
+ !memchr(nla_data(attr_data), '\0',
+ nla_len(attr_data))) {
err = -EINVAL;
goto team_put;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 28624cca91f8..acf96f262488 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -574,18 +574,14 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
return ret;
}
-static inline bool tun_capable(struct tun_struct *tun)
+static inline bool tun_not_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
struct net *net = dev_net(tun->dev);
- if (ns_capable(net->user_ns, CAP_NET_ADMIN))
- return 1;
- if (uid_valid(tun->owner) && uid_eq(cred->euid, tun->owner))
- return 1;
- if (gid_valid(tun->group) && in_egroup_p(tun->group))
- return 1;
- return 0;
+ return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+ (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN);
}
static void tun_set_real_num_queues(struct tun_struct *tun)
@@ -2782,7 +2778,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
!!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
- if (!tun_capable(tun))
+ if (tun_not_capable(tun))
return -EPERM;
err = security_tun_dev_open(tun->security);
if (err < 0)
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 1341374a4588..616ecc38d172 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -28,7 +28,7 @@ vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
if (likely(cpu < tq_number))
tq = &adapter->tx_queue[cpu];
else
- tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
+ tq = &adapter->tx_queue[cpu % tq_number];
return tq;
}
@@ -124,6 +124,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
u32 buf_size;
u32 dw2;
+ spin_lock_irq(&tq->tx_lock);
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
dw2 |= xdpf->len;
ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
@@ -134,6 +135,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
tq->stats.tx_ring_full++;
+ spin_unlock_irq(&tq->tx_lock);
return -ENOSPC;
}
@@ -142,8 +144,10 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
xdpf->data, buf_size,
DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) {
+ spin_unlock_irq(&tq->tx_lock);
return -EFAULT;
+ }
tbi->map_type |= VMXNET3_MAP_SINGLE;
} else { /* XDP buffer from page pool */
page = virt_to_page(xdpf->data);
@@ -182,6 +186,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
dma_wmb();
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
+ spin_unlock_irq(&tq->tx_lock);
/* No need to handle the case when tx_num_deferred doesn't reach
* threshold. Backend driver at hypervisor side will poll and reset
@@ -225,6 +230,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
{
struct vmxnet3_adapter *adapter = netdev_priv(dev);
struct vmxnet3_tx_queue *tq;
+ struct netdev_queue *nq;
int i;
if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
@@ -236,6 +242,9 @@ vmxnet3_xdp_xmit(struct net_device *dev,
if (tq->stopped)
return -ENETDOWN;
+ nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
+
+ __netif_tx_lock(nq, smp_processor_id());
for (i = 0; i < n; i++) {
if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
tq->stats.xdp_xmit_err++;
@@ -243,6 +252,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
}
}
tq->stats.xdp_xmit += i;
+ __netif_tx_unlock(nq);
return i;
}
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 05c10acb2a57..92516189e792 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2898,8 +2898,11 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
- if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
- vxlan_vnigroup_init(vxlan);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ err = vxlan_vnigroup_init(vxlan);
+ if (err)
+ return err;
+ }
err = gro_cells_init(&vxlan->gro_cells, dev);
if (err)
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 4dd6cdf84571..abb510d235a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -4851,6 +4851,22 @@ static struct ath12k_reg_rule
return reg_rule_ptr;
}
+static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
+ u32 num_reg_rules)
+{
+ u8 num_invalid_5ghz_rules = 0;
+ u32 count, start_freq;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
+
+ if (start_freq >= ATH12K_MIN_6G_FREQ)
+ num_invalid_5ghz_rules++;
+ }
+
+ return num_invalid_5ghz_rules;
+}
+
static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
struct sk_buff *skb,
struct ath12k_reg_info *reg_info)
@@ -4861,6 +4877,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
u32 num_2g_reg_rules, num_5g_reg_rules;
u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u8 num_invalid_5ghz_ext_rules;
u32 total_reg_rules = 0;
int ret, i, j;
@@ -4954,20 +4971,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
- /* FIXME: Currently FW includes 6G reg rule also in 5G rule
- * list for country US.
- * Having same 6G reg rule in 5G and 6G rules list causes
- * intersect check to be true, and same rules will be shown
- * multiple times in iw cmd. So added hack below to avoid
- * parsing 6G rule from 5G reg rule list, and this can be
- * removed later, after FW updates to remove 6G reg rule
- * from 5G rules list.
- */
- if (memcmp(reg_info->alpha2, "US", 2) == 0) {
- reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
- num_5g_reg_rules = reg_info->num_5g_reg_rules;
- }
-
reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
reg_info->num_phy = le32_to_cpu(ev->num_phy);
@@ -5070,8 +5073,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
+ ext_wmi_reg_rule += num_2g_reg_rules;
+
+ /* Firmware might include 6 GHz reg rule in 5 GHz rule list
+ * for few countries along with separate 6 GHz rule.
+ * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
+ * causes intersect check to be true, and same rules will be
+ * shown multiple times in iw cmd.
+ * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
+ */
+ num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
+ num_5g_reg_rules);
+
+ if (num_invalid_5ghz_ext_rules) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
+ reg_info->alpha2, reg_info->num_5g_reg_rules,
+ num_invalid_5ghz_ext_rules);
+
+ num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
+ reg_info->num_5g_reg_rules = num_5g_reg_rules;
+ }
+
if (num_5g_reg_rules) {
- ext_wmi_reg_rule += num_2g_reg_rules;
reg_info->reg_rules_5g_ptr =
create_ext_reg_rules_from_wmi(num_5g_reg_rules,
ext_wmi_reg_rule);
@@ -5083,7 +5107,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
- ext_wmi_reg_rule += num_5g_reg_rules;
+ /* We have adjusted the number of 5 GHz reg rules above. But still those
+ * many rules needs to be adjusted in ext_wmi_reg_rule.
+ *
+ * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
+ */
+ ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
reg_info->reg_rules_6g_ap_ptr[i] =
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index b6a197389277..45fe699ce8a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -4073,7 +4073,6 @@ struct ath12k_wmi_eht_rate_set_params {
#define MAX_REG_RULES 10
#define REG_ALPHA2_LEN 2
#define MAX_6G_REG_RULES 5
-#define REG_US_5G_NUM_REG_RULES 4
enum wmi_start_event_param {
WMI_VDEV_START_RESP_EVENT = 0,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index e4395b1f8c11..d2caa80e9412 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2712,7 +2712,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC),
@@ -2723,7 +2723,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA),
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index d5a9360323d2..8755c5e6a65b 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -220,7 +220,7 @@ static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *s
if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
(mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
!(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
- net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
+ net_dbg_ratelimited("sequence number glitch prev=%d curr=%d\n",
mbim->rx_seq, le16_to_cpu(nth16->wSequence));
}
mbim->rx_seq = le16_to_cpu(nth16->wSequence);
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 1de11b722f04..a060f69558e7 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1011,25 +1011,37 @@ static void apple_nvme_reset_work(struct work_struct *work)
ret = apple_rtkit_shutdown(anv->rtk);
if (ret)
goto out;
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
}
- writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ /*
+ * Only do the soft-reset if the CPU is not running, which means either we
+ * or the previous stage shut it down cleanly.
+ */
+ if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) &
+ APPLE_ANS_COPROC_CPU_CONTROL_RUN)) {
- ret = reset_control_assert(anv->reset);
- if (ret)
- goto out;
+ ret = reset_control_assert(anv->reset);
+ if (ret)
+ goto out;
- ret = apple_rtkit_reinit(anv->rtk);
- if (ret)
- goto out;
+ ret = apple_rtkit_reinit(anv->rtk);
+ if (ret)
+ goto out;
- ret = reset_control_deassert(anv->reset);
- if (ret)
- goto out;
+ ret = reset_control_deassert(anv->reset);
+ if (ret)
+ goto out;
+
+ writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
+ anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ ret = apple_rtkit_boot(anv->rtk);
+ } else {
+ ret = apple_rtkit_wake(anv->rtk);
+ }
- writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
- anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
- ret = apple_rtkit_boot(anv->rtk);
if (ret) {
dev_err(anv->dev, "ANS did not boot");
goto out;
@@ -1516,6 +1528,7 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
return anv;
put_dev:
+ apple_nvme_detach_genpd(anv);
put_device(anv->dev);
return ERR_PTR(ret);
}
@@ -1549,6 +1562,7 @@ out_uninit_ctrl:
nvme_uninit_ctrl(&anv->ctrl);
out_put_ctrl:
nvme_put_ctrl(&anv->ctrl);
+ apple_nvme_detach_genpd(anv);
return ret;
}
@@ -1563,9 +1577,12 @@ static void apple_nvme_remove(struct platform_device *pdev)
apple_nvme_disable(anv, true);
nvme_uninit_ctrl(&anv->ctrl);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
apple_rtkit_shutdown(anv->rtk);
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
+
apple_nvme_detach_genpd(anv);
}
@@ -1574,8 +1591,11 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
struct apple_nvme *anv = platform_get_drvdata(pdev);
apple_nvme_disable(anv, true);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
apple_rtkit_shutdown(anv->rtk);
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
}
static int apple_nvme_resume(struct device *dev)
@@ -1592,10 +1612,11 @@ static int apple_nvme_suspend(struct device *dev)
apple_nvme_disable(anv, true);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
ret = apple_rtkit_shutdown(anv->rtk);
- writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
return ret;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 40046770f1bf..f028913e2e62 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -564,8 +564,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
fallthrough;
@@ -1700,7 +1698,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
- if (status < 0)
+
+ /*
+ * It's either a kernel error or the host observed a connection
+ * lost. In either case it's not possible communicate with the
+ * controller and thus enter the error code path.
+ */
+ if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
return status;
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 094be164ffdc..b9929a5a7f4e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -785,49 +785,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);
- switch (nvme_ctrl_state(&ctrl->ctrl)) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_LIVE:
- /*
- * Schedule a controller reset. The reset will terminate the
- * association and schedule the reconnect timer. Reconnects
- * will be attempted until either the ctlr_loss_tmo
- * (max_retries * connect_delay) expires or the remoteport's
- * dev_loss_tmo expires.
- */
- if (nvme_reset_ctrl(&ctrl->ctrl)) {
- dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: Couldn't schedule reset.\n",
- ctrl->cnum);
- nvme_delete_ctrl(&ctrl->ctrl);
- }
- break;
-
- case NVME_CTRL_CONNECTING:
- /*
- * The association has already been terminated and the
- * controller is attempting reconnects. No need to do anything
- * futher. Reconnects will be attempted until either the
- * ctlr_loss_tmo (max_retries * connect_delay) expires or the
- * remoteport's dev_loss_tmo expires.
- */
- break;
-
- case NVME_CTRL_RESETTING:
- /*
- * Controller is already in the process of terminating the
- * association. No need to do anything further. The reconnect
- * step will kick in naturally after the association is
- * terminated.
- */
- break;
-
- case NVME_CTRL_DELETING:
- case NVME_CTRL_DELETING_NOIO:
- default:
- /* no action to take - let it delete */
- break;
- }
+ set_bit(ASSOC_FAILED, &ctrl->flags);
+ nvme_reset_ctrl(&ctrl->ctrl);
}
/**
@@ -2079,7 +2038,8 @@ done:
nvme_fc_complete_rq(rq);
check_error:
- if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ if (terminate_assoc &&
+ nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING)
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
@@ -2533,6 +2493,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
/*
* if an error (io timeout, etc) while (re)connecting, the remote
* port requested terminating of the association (disconnect_ls)
@@ -2540,9 +2502,8 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
- if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ if (state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
- set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
@@ -2550,7 +2511,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
}
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
- if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ if (state != NVME_CTRL_LIVE)
return;
dev_warn(ctrl->ctrl.device,
@@ -3061,7 +3022,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
int ret;
- bool changed;
++ctrl->ctrl.nr_reconnects;
@@ -3172,12 +3132,13 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_term_aen_ops;
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) {
+ ret = -EIO;
+ goto out_term_aen_ops;
+ }
ctrl->ctrl.nr_reconnects = 0;
-
- if (changed)
- nvme_start_ctrl(&ctrl->ctrl);
+ nvme_start_ctrl(&ctrl->ctrl);
return 0; /* Success */
@@ -3578,8 +3539,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
goto fail_ctrl;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index e8930146847a..b1b46c2713e1 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -283,8 +283,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
{
if (ns && nsid != ns->head->ns_id) {
dev_err(ctrl->device,
- "%s: nsid (%u) in cmd does not match nsid (%u)"
- "of namespace\n",
+ "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, nsid, ns->head->ns_id);
return false;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 278bed4e35bb..950289405ef2 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2153,14 +2153,6 @@ static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
return 0;
out_free_bufs:
- while (--i >= 0) {
- size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
-
- dma_free_attrs(dev->dev, size, bufs[i],
- le64_to_cpu(descs[i].addr),
- DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
- }
-
kfree(bufs);
out_free_descs:
dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
@@ -3147,7 +3139,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
*/
- if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
+ dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
@@ -3712,6 +3706,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index b68a9e5f1ea3..3a41b9ab0f13 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -792,7 +792,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-const struct attribute_group nvme_tls_attrs_group = {
+static const struct attribute_group nvme_tls_attrs_group = {
.attrs = nvme_tls_attrs,
.is_visible = nvme_tls_attrs_are_visible,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 841238f38fdd..8a9131c95a3d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -763,6 +763,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return 0;
}
+static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_term_pdu *pdu)
+{
+ u16 fes;
+ const char *msg;
+ u32 plen = le32_to_cpu(pdu->hdr.plen);
+
+ static const char * const msg_table[] = {
+ [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field",
+ [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
+ [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
+ [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
+ [NVME_TCP_FES_R2T_LIMIT_EXCEEDED] = "R2T Limit Exceeded",
+ [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
+ };
+
+ if (plen < NVME_TCP_MIN_C2HTERM_PLEN ||
+ plen > NVME_TCP_MAX_C2HTERM_PLEN) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Received a malformed C2HTermReq PDU (plen = %u)\n",
+ plen);
+ return;
+ }
+
+ fes = le16_to_cpu(pdu->fes);
+ if (fes && fes < ARRAY_SIZE(msg_table))
+ msg = msg_table[fes];
+ else
+ msg = "Unknown";
+
+ dev_err(queue->ctrl->ctrl.device,
+ "Received C2HTermReq (FES = %s)\n", msg);
+}
+
static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
@@ -784,6 +818,15 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return 0;
hdr = queue->pdu;
+ if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
+ /*
+ * C2HTermReq never includes Header or Data digests.
+ * Skip the checks.
+ */
+ nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu);
+ return -EINVAL;
+ }
+
if (queue->hdr_digest) {
ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
if (unlikely(ret))
@@ -1449,11 +1492,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
+ msg.msg_flags = MSG_WAITALL;
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
- if (ret < 0) {
+ if (ret < sizeof(*icresp)) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
+ if (ret >= 0)
+ ret = -ECONNRESET;
goto free_icresp;
}
ret = -ENOTCONN;
@@ -1565,7 +1611,7 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}
-/**
+/*
* Track the number of queues assigned to each cpu using a global per-cpu
* counter and select the least used cpu from the mq_map. Our goal is to spread
* different controllers I/O threads across different cpu cores.
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index e670dc185a96..acc138bbf8f2 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1068,6 +1068,7 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
goto out;
}
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ kfree(id);
out:
nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index cdc4a09a6e8a..2e741696f371 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -606,6 +606,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_dev_put;
}
+ if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
+ goto out_pr_exit;
+
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
@@ -613,6 +616,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+out_pr_exit:
+ if (ns->pr.enable)
+ nvmet_pr_exit_ns(ns);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -638,6 +644,19 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
mutex_unlock(&subsys->lock);
+ /*
+ * Now that we removed the namespaces from the lookup list, we
+ * can kill the per_cpu ref and wait for any remaining references
+ * to be dropped, as well as a RCU grace period for anyone only
+ * using the namepace under rcu_read_lock(). Note that we can't
+ * use call_rcu here as we need to ensure the namespaces have
+ * been fully destroyed before unloading the module.
+ */
+ percpu_ref_kill(&ns->ref);
+ synchronize_rcu();
+ wait_for_completion(&ns->disable_done);
+ percpu_ref_exit(&ns->ref);
+
if (ns->pr.enable)
nvmet_pr_exit_ns(ns);
@@ -660,22 +679,6 @@ void nvmet_ns_free(struct nvmet_ns *ns)
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
- mutex_unlock(&subsys->lock);
-
- /*
- * Now that we removed the namespaces from the lookup list, we
- * can kill the per_cpu ref and wait for any remaining references
- * to be dropped, as well as a RCU grace period for anyone only
- * using the namepace under rcu_read_lock(). Note that we can't
- * use call_rcu here as we need to ensure the namespaces have
- * been fully destroyed before unloading the module.
- */
- percpu_ref_kill(&ns->ref);
- synchronize_rcu();
- wait_for_completion(&ns->disable_done);
- percpu_ref_exit(&ns->ref);
-
- mutex_lock(&subsys->lock);
subsys->nr_namespaces--;
mutex_unlock(&subsys->lock);
@@ -705,9 +708,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->nsid = nsid;
ns->subsys = subsys;
- if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
- goto out_free;
-
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = nsid;
@@ -730,8 +730,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
return ns;
out_exit:
subsys->max_nsid = nvmet_max_nsid(subsys);
- percpu_ref_exit(&ns->ref);
-out_free:
kfree(ns);
out_unlock:
mutex_unlock(&subsys->lock);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index a7ff05b3be29..eb406c90c167 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -287,7 +287,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
args.subsysnqn = d->subsysnqn;
args.hostnqn = d->hostnqn;
args.hostid = &d->hostid;
- args.kato = c->kato;
+ args.kato = le32_to_cpu(c->kato);
ctrl = nvmet_alloc_ctrl(&args);
if (!ctrl)
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index c1f574fe3280..83be0657e6df 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -272,7 +272,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
iter_flags = SG_MITER_FROM_SG;
}
- if (req->cmd->rw.control & NVME_RW_LR)
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_LR))
opf |= REQ_FAILFAST_DEV;
if (is_pci_p2pdma_page(sg_page(req->sg)))
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index b540216c0c9a..d2c1233981e1 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -589,7 +589,7 @@ struct nvmet_alloc_ctrl_args {
const struct nvmet_fabrics_ops *ops;
struct device *p2p_client;
u32 kato;
- u32 result;
+ __le32 result;
u16 error_loc;
u16 status;
};
@@ -784,37 +784,37 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
static inline bool nvmet_cc_en(u32 cc)
{
- return (cc >> NVME_CC_EN_SHIFT) & 0x1;
+ return (cc & NVME_CC_ENABLE) >> NVME_CC_EN_SHIFT;
}
static inline u8 nvmet_cc_css(u32 cc)
{
- return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
+ return (cc & NVME_CC_CSS_MASK) >> NVME_CC_CSS_SHIFT;
}
static inline u8 nvmet_cc_mps(u32 cc)
{
- return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
+ return (cc & NVME_CC_MPS_MASK) >> NVME_CC_MPS_SHIFT;
}
static inline u8 nvmet_cc_ams(u32 cc)
{
- return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
+ return (cc & NVME_CC_AMS_MASK) >> NVME_CC_AMS_SHIFT;
}
static inline u8 nvmet_cc_shn(u32 cc)
{
- return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
+ return (cc & NVME_CC_SHN_MASK) >> NVME_CC_SHN_SHIFT;
}
static inline u8 nvmet_cc_iosqes(u32 cc)
{
- return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
+ return (cc & NVME_CC_IOSQES_MASK) >> NVME_CC_IOSQES_SHIFT;
}
static inline u8 nvmet_cc_iocqes(u32 cc)
{
- return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
+ return (cc & NVME_CC_IOCQES_MASK) >> NVME_CC_IOCQES_SHIFT;
}
/* Convert a 32-bit number to a 16-bit 0's based number */
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index ac30b42cc622..565d2bd36dcd 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -46,7 +46,7 @@ static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
/*
* BAR CC register and SQ polling intervals.
*/
-#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(5)
+#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10)
#define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5)
#define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
@@ -1694,6 +1694,7 @@ static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
struct nvmet_pci_epf_ctrl *ctrl =
container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
struct nvmet_pci_epf_queue *sq;
+ unsigned long limit = jiffies;
unsigned long last = 0;
int i, nr_sqs;
@@ -1708,6 +1709,16 @@ static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
nr_sqs++;
}
+ /*
+ * If we have been running for a while, reschedule to let other
+ * tasks run and to avoid RCU stalls.
+ */
+ if (time_is_before_jiffies(limit + secs_to_jiffies(1))) {
+ cond_resched();
+ limit = jiffies;
+ continue;
+ }
+
if (nr_sqs) {
last = jiffies;
continue;
@@ -1822,14 +1833,14 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
if (ctrl->io_sqes < sizeof(struct nvme_command)) {
dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
ctrl->io_sqes, sizeof(struct nvme_command));
- return -EINVAL;
+ goto err;
}
ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
ctrl->io_sqes, sizeof(struct nvme_completion));
- return -EINVAL;
+ goto err;
}
/* Create the admin queue. */
@@ -1844,7 +1855,7 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
qsize, pci_addr, 0);
if (status != NVME_SC_SUCCESS) {
dev_err(ctrl->dev, "Failed to create admin completion queue\n");
- return -EINVAL;
+ goto err;
}
qsize = aqa & 0x00000fff;
@@ -1854,17 +1865,22 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
if (status != NVME_SC_SUCCESS) {
dev_err(ctrl->dev, "Failed to create admin submission queue\n");
nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
- return -EINVAL;
+ goto err;
}
ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
ctrl->enabled = true;
+ ctrl->csts = NVME_CSTS_RDY;
/* Start polling the controller SQs. */
schedule_delayed_work(&ctrl->poll_sqs, 0);
return 0;
+
+err:
+ ctrl->csts = 0;
+ return -EINVAL;
}
static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
@@ -1889,6 +1905,8 @@ static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
/* Delete the admin queue last. */
nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
+
+ ctrl->csts &= ~NVME_CSTS_RDY;
}
static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
@@ -1903,19 +1921,19 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
old_cc = ctrl->cc;
new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
+ if (new_cc == old_cc)
+ goto reschedule_work;
+
ctrl->cc = new_cc;
if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
ret = nvmet_pci_epf_enable_ctrl(ctrl);
if (ret)
- return;
- ctrl->csts |= NVME_CSTS_RDY;
+ goto reschedule_work;
}
- if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) {
+ if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc))
nvmet_pci_epf_disable_ctrl(ctrl);
- ctrl->csts &= ~NVME_CSTS_RDY;
- }
if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) {
nvmet_pci_epf_disable_ctrl(ctrl);
@@ -1928,6 +1946,7 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
nvmet_update_cc(ctrl->tctrl, ctrl->cc);
nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+reschedule_work:
schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1afd93026f9b..2a4536ef6184 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -996,6 +996,27 @@ out_err:
nvmet_req_complete(&cmd->req, status);
}
+static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue,
+ struct nvmet_rdma_rsp *rsp)
+{
+ unsigned long flags;
+ bool ret = true;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ /*
+ * recheck queue state is not live to prevent a race condition
+ * with RDMA_CM_EVENT_ESTABLISHED handler.
+ */
+ if (queue->state == NVMET_RDMA_Q_LIVE)
+ ret = false;
+ else if (queue->state == NVMET_RDMA_Q_CONNECTING)
+ list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
+ else
+ nvmet_rdma_put_rsp(rsp);
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ return ret;
+}
+
static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_cmd *cmd =
@@ -1038,17 +1059,9 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->n_rdma = 0;
rsp->invalidate_rkey = 0;
- if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
- unsigned long flags;
-
- spin_lock_irqsave(&queue->state_lock, flags);
- if (queue->state == NVMET_RDMA_Q_CONNECTING)
- list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
- else
- nvmet_rdma_put_rsp(rsp);
- spin_unlock_irqrestore(&queue->state_lock, flags);
+ if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) &&
+ nvmet_rdma_recv_not_live(queue, rsp))
return;
- }
nvmet_rdma_handle_command(queue, rsp);
}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 125833e5ce52..d177a2b9edaf 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -16,6 +16,8 @@
#include <linux/string.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
+#include <kunit/visibility.h>
+
/* Uncomment me to enable of_dump_addr() debugging output */
// #define DEBUG
@@ -183,7 +185,7 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
#endif /* CONFIG_PCI */
-static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
+VISIBLE_IF_KUNIT int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
{
if (overflows_type(start, r->start))
return -EOVERFLOW;
@@ -197,6 +199,7 @@ static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(__of_address_resource_bounds);
/*
* of_pci_range_to_resource - Create a resource from an of_pci_range
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index f3e1193c8ded..1bdc7ceef3c5 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -208,4 +208,8 @@ static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int n
static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na) { }
#endif
+#if IS_ENABLED(CONFIG_KUNIT)
+int __of_address_resource_bounds(struct resource *r, u64 start, u64 size);
+#endif
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c
index b0557ded838f..8bba5a72c9c7 100644
--- a/drivers/of/of_test.c
+++ b/drivers/of/of_test.c
@@ -2,6 +2,7 @@
/*
* KUnit tests for OF APIs
*/
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -54,8 +55,124 @@ static struct kunit_suite of_dtb_suite = {
.init = of_dtb_test_init,
};
+struct of_address_resource_bounds_case {
+ u64 start;
+ u64 size;
+ int ret;
+
+ u64 res_start;
+ u64 res_end;
+};
+
+static void of_address_resource_bounds_case_desc(const struct of_address_resource_bounds_case *p,
+ char *name)
+{
+ snprintf(name, KUNIT_PARAM_DESC_SIZE, "start=0x%016llx,size=0x%016llx", p->start, p->size);
+}
+
+static const struct of_address_resource_bounds_case of_address_resource_bounds_cases[] = {
+ {
+ .start = 0,
+ .size = 0,
+ .ret = 0,
+ .res_start = 0,
+ .res_end = -1,
+ },
+ {
+ .start = 0,
+ .size = 0x1000,
+ .ret = 0,
+ .res_start = 0,
+ .res_end = 0xfff,
+ },
+ {
+ .start = 0x1000,
+ .size = 0,
+ .ret = 0,
+ .res_start = 0x1000,
+ .res_end = 0xfff,
+ },
+ {
+ .start = 0x1000,
+ .size = 0x1000,
+ .ret = 0,
+ .res_start = 0x1000,
+ .res_end = 0x1fff,
+ },
+ {
+ .start = 1,
+ .size = RESOURCE_SIZE_MAX,
+ .ret = 0,
+ .res_start = 1,
+ .res_end = RESOURCE_SIZE_MAX,
+ },
+ {
+ .start = RESOURCE_SIZE_MAX,
+ .size = 1,
+ .ret = 0,
+ .res_start = RESOURCE_SIZE_MAX,
+ .res_end = RESOURCE_SIZE_MAX,
+ },
+ {
+ .start = 2,
+ .size = RESOURCE_SIZE_MAX,
+ .ret = -EOVERFLOW,
+ },
+ {
+ .start = RESOURCE_SIZE_MAX,
+ .size = 2,
+ .ret = -EOVERFLOW,
+ },
+ {
+ .start = ULL(0x100000000),
+ .size = 1,
+ .ret = sizeof(resource_size_t) > sizeof(u32) ? 0 : -EOVERFLOW,
+ .res_start = ULL(0x100000000),
+ .res_end = ULL(0x100000000),
+ },
+ {
+ .start = 0x1000,
+ .size = 0xffffffff,
+ .ret = sizeof(resource_size_t) > sizeof(u32) ? 0 : -EOVERFLOW,
+ .res_start = 0x1000,
+ .res_end = ULL(0x100000ffe),
+ },
+};
+
+KUNIT_ARRAY_PARAM(of_address_resource_bounds,
+ of_address_resource_bounds_cases, of_address_resource_bounds_case_desc);
+
+static void of_address_resource_bounds(struct kunit *test)
+{
+ const struct of_address_resource_bounds_case *param = test->param_value;
+ struct resource r; /* Intentionally uninitialized */
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_ADDRESS))
+ kunit_skip(test, "CONFIG_OF_ADDRESS not enabled\n");
+
+ ret = __of_address_resource_bounds(&r, param->start, param->size);
+ KUNIT_EXPECT_EQ(test, param->ret, ret);
+ if (ret == 0) {
+ KUNIT_EXPECT_EQ(test, (resource_size_t)param->res_start, r.start);
+ KUNIT_EXPECT_EQ(test, (resource_size_t)param->res_end, r.end);
+ KUNIT_EXPECT_EQ(test, param->size, resource_size(&r));
+ }
+}
+
+static struct kunit_case of_address_test_cases[] = {
+ KUNIT_CASE_PARAM(of_address_resource_bounds, of_address_resource_bounds_gen_params),
+ {}
+};
+
+static struct kunit_suite of_address_suite = {
+ .name = "of_address",
+ .test_cases = of_address_test_cases,
+};
+
kunit_test_suites(
- &of_dtb_suite,
+ &of_dtb_suite, &of_address_suite,
);
MODULE_DESCRIPTION("KUnit tests for OF APIs");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e0bc90597dca..da3e7edcf49d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -108,9 +108,6 @@ void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
- if (parent->state_saved)
- return;
-
/*
* Save parent's L1 substate configuration so we have it for
* pci_restore_aspm_l1ss_state(pdev) to restore.
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b6536ed599c3..246744d8d268 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -339,13 +339,14 @@ out:
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
-static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+static __always_inline void pci_read_bases(struct pci_dev *dev,
+ unsigned int howmany, int rom)
{
u32 rombar, stdbars[PCI_STD_NUM_BARS];
unsigned int pos, reg;
u16 orig_cmd;
- BUILD_BUG_ON(howmany > PCI_STD_NUM_BARS);
+ BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS));
if (dev->non_compliant_bars)
return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b84ff7bade82..82b21e34c545 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5522,7 +5522,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
* AMD Matisse USB 3.0 Host Controller 0x149c
* Intel 82579LM Gigabit Ethernet Controller 0x1502
* Intel 82579V Gigabit Ethernet Controller 0x1503
- *
+ * Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
*/
static void quirk_no_flr(struct pci_dev *dev)
{
@@ -5534,6 +5534,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
/* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
static void quirk_no_flr_snet(struct pci_dev *dev)
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 1e604fbbda65..07de59ca2ebf 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -360,7 +360,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
return err;
}
- set_ctrl_reg_req_en(pdev, pdev->tph_mode);
+ set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
(loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 0b13d7f17b32..42547f64453e 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -89,12 +89,12 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
seq_puts(s, items[i].display);
/* Print unit if available */
if (items[i].has_arg) {
- seq_printf(s, " (0x%x",
- pinconf_to_config_argument(config));
+ u32 val = pinconf_to_config_argument(config);
+
if (items[i].format)
- seq_printf(s, " %s)", items[i].format);
+ seq_printf(s, " (%u %s)", val, items[i].format);
else
- seq_puts(s, ")");
+ seq_printf(s, " (0x%x)", val);
}
}
}
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 0d6c2027d4c1..d73004b4a45e 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -42,7 +42,7 @@
#define CY8C95X0_PORTSEL 0x18
/* Port settings, write PORTSEL first */
#define CY8C95X0_INTMASK 0x19
-#define CY8C95X0_PWMSEL 0x1A
+#define CY8C95X0_SELPWM 0x1A
#define CY8C95X0_INVERT 0x1B
#define CY8C95X0_DIRECTION 0x1C
/* Drive mode register change state on writing '1' */
@@ -328,14 +328,14 @@ static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
{
/*
- * Only 12 registers are present per port (see Table 6 in the
- * datasheet).
+ * Only 12 registers are present per port (see Table 6 in the datasheet).
*/
- if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) < 12)
- return true;
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
+ return false;
switch (reg) {
case 0x24 ... 0x27:
+ case 0x31 ... 0x3f:
return false;
default:
return true;
@@ -344,8 +344,11 @@ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
{
- if (reg >= CY8C95X0_VIRTUAL)
- return true;
+ /*
+ * Only 12 registers are present per port (see Table 6 in the datasheet).
+ */
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
+ return false;
switch (reg) {
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
@@ -353,6 +356,7 @@ static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
case CY8C95X0_DEVID:
return false;
case 0x24 ... 0x27:
+ case 0x31 ... 0x3f:
return false;
default:
return true;
@@ -365,8 +369,8 @@ static bool cy8c95x0_volatile_register(struct device *dev, unsigned int reg)
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
case CY8C95X0_INTMASK:
+ case CY8C95X0_SELPWM:
case CY8C95X0_INVERT:
- case CY8C95X0_PWMSEL:
case CY8C95X0_DIRECTION:
case CY8C95X0_DRV_PU:
case CY8C95X0_DRV_PD:
@@ -395,7 +399,7 @@ static bool cy8c95x0_muxed_register(unsigned int reg)
{
switch (reg) {
case CY8C95X0_INTMASK:
- case CY8C95X0_PWMSEL:
+ case CY8C95X0_SELPWM:
case CY8C95X0_INVERT:
case CY8C95X0_DIRECTION:
case CY8C95X0_DRV_PU:
@@ -466,7 +470,11 @@ static const struct regmap_config cy8c9520_i2c_regmap = {
.max_register = 0, /* Updated at runtime */
.num_reg_defaults_raw = 0, /* Updated at runtime */
.use_single_read = true, /* Workaround for regcache bug */
+#if IS_ENABLED(CONFIG_DEBUG_PINCTRL)
+ .disable_locking = false,
+#else
.disable_locking = true,
+#endif
};
static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
@@ -789,7 +797,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_DIRECTION;
break;
case PIN_CONFIG_MODE_PWM:
- reg = CY8C95X0_PWMSEL;
+ reg = CY8C95X0_SELPWM;
break;
case PIN_CONFIG_OUTPUT:
reg = CY8C95X0_OUTPUT;
@@ -868,7 +876,7 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_DRV_PP_FAST;
break;
case PIN_CONFIG_MODE_PWM:
- reg = CY8C95X0_PWMSEL;
+ reg = CY8C95X0_SELPWM;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
return cy8c95x0_pinmux_direction(chip, off, !arg);
@@ -1153,7 +1161,7 @@ static void cy8c95x0_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *
bitmap_zero(mask, MAX_LINE);
__set_bit(pin, mask);
- if (cy8c95x0_read_regs_mask(chip, CY8C95X0_PWMSEL, pwm, mask)) {
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_SELPWM, pwm, mask)) {
seq_puts(s, "not available");
return;
}
@@ -1198,7 +1206,7 @@ static int cy8c95x0_set_mode(struct cy8c95x0_pinctrl *chip, unsigned int off, bo
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- return cy8c95x0_regmap_write_bits(chip, CY8C95X0_PWMSEL, port, bit, mode ? bit : 0);
+ return cy8c95x0_regmap_write_bits(chip, CY8C95X0_SELPWM, port, bit, mode ? bit : 0);
}
static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
@@ -1347,7 +1355,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
ret = devm_request_threaded_irq(chip->dev, irq,
NULL, cy8c95x0_irq_handler,
- IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
+ IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
if (ret) {
dev_err(chip->dev, "failed to request irq %d\n", irq);
@@ -1438,15 +1446,15 @@ static int cy8c95x0_probe(struct i2c_client *client)
switch (chip->tpin) {
case 20:
strscpy(chip->name, cy8c95x0_id[0].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE - 1;
break;
case 40:
strscpy(chip->name, cy8c95x0_id[1].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE - 1;
break;
case 60:
strscpy(chip->name, cy8c95x0_id[2].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE - 1;
break;
default:
return -ENODEV;
diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
index 49c383eb6785..13e37b49d9d0 100644
--- a/drivers/platform/cznic/Kconfig
+++ b/drivers/platform/cznic/Kconfig
@@ -6,6 +6,7 @@
menuconfig CZNIC_PLATFORMS
bool "Platform support for CZ.NIC's Turris hardware"
+ depends on ARCH_MVEBU || COMPILE_TEST
help
Say Y here to be able to choose driver support for CZ.NIC's Turris
devices. This option alone does not add any kernel code.
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index dfb5d4b8c046..30bd366d7b58 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1121,7 +1121,7 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv)
/* Create platform_profile structure and register */
priv->dytc->ppdev = devm_platform_profile_register(&priv->platform_device->dev,
- "ideapad-laptop", &priv->dytc,
+ "ideapad-laptop", priv->dytc,
&dytc_profile_ops);
if (IS_ERR(priv->dytc->ppdev)) {
err = PTR_ERR(priv->dytc->ppdev);
diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
index 5c3c0dfa1bf8..f369fb0d3d82 100644
--- a/drivers/platform/x86/intel/ifs/ifs.h
+++ b/drivers/platform/x86/intel/ifs/ifs.h
@@ -23,12 +23,14 @@
* IFS Image
* ---------
*
- * Intel provides a firmware file containing the scan tests via
- * github [#f1]_. Similar to microcode there is a separate file for each
+ * Intel provides firmware files containing the scan tests via the webpage [#f1]_.
+ * Look under "In-Field Scan Test Images Download" section towards the
+ * end of the page. Similar to microcode, there are separate files for each
* family-model-stepping. IFS Images are not applicable for some test types.
* Wherever applicable the sysfs directory would provide a "current_batch" file
* (see below) for loading the image.
*
+ * .. [#f1] https://intel.com/InFieldScan
*
* IFS Image Loading
* -----------------
@@ -125,9 +127,6 @@
* 2) Hardware allows for some number of cores to be tested in parallel.
* The driver does not make use of this, it only tests one core at a time.
*
- * .. [#f1] https://github.com/intel/TBD
- *
- *
* Structural Based Functional Test at Field (SBAF):
* -------------------------------------------------
*
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 31015ebe20d8..092252eb95a8 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -2,6 +2,7 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
@@ -55,7 +56,7 @@ static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *i
static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
char *path = agpio->resource_source.string_ptr;
struct acpi_device *adev;
@@ -70,14 +71,14 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
if (!adev)
return -ENODEV;
- *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, polarity);
+ *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, gpio_flags);
return 0;
}
static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
int ret;
@@ -87,7 +88,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
}
ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios],
- agpio, func, polarity);
+ agpio, func, gpio_flags);
if (ret)
return ret;
@@ -100,7 +101,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
static struct gpio_desc *
skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
struct gpio_desc *desc;
int ret;
@@ -111,7 +112,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return ERR_PTR(-ENOMEM);
lookup->dev_id = dev_name(int3472->dev);
- ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, polarity);
+ ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, gpio_flags);
if (ret)
return ERR_PTR(ret);
@@ -122,32 +123,76 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return desc;
}
-static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polarity)
+/**
+ * struct int3472_gpio_map - Map GPIOs to whatever is expected by the
+ * sensor driver (as in DT bindings)
+ * @hid: The ACPI HID of the device without the instance number e.g. INT347E
+ * @type_from: The GPIO type from ACPI ?SDT
+ * @type_to: The assigned GPIO type, typically same as @type_from
+ * @func: The function, e.g. "enable"
+ * @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true,
+ * GPIO_ACTIVE_HIGH otherwise
+ */
+struct int3472_gpio_map {
+ const char *hid;
+ u8 type_from;
+ u8 type_to;
+ bool polarity_low;
+ const char *func;
+};
+
+static const struct int3472_gpio_map int3472_gpio_map[] = {
+ { "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
+};
+
+static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
+ const char **func, unsigned long *gpio_flags)
{
- switch (type) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(int3472_gpio_map); i++) {
+ /*
+ * Map the firmware-provided GPIO to whatever a driver expects
+ * (as in DT bindings). First check if the type matches with the
+ * GPIO map, then further check that the device _HID matches.
+ */
+ if (*type != int3472_gpio_map[i].type_from)
+ continue;
+
+ if (!acpi_dev_hid_uid_match(adev, int3472_gpio_map[i].hid, NULL))
+ continue;
+
+ *type = int3472_gpio_map[i].type_to;
+ *gpio_flags = int3472_gpio_map[i].polarity_low ?
+ GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
+ *func = int3472_gpio_map[i].func;
+ return;
+ }
+
+ switch (*type) {
case INT3472_GPIO_TYPE_RESET:
*func = "reset";
- *polarity = GPIO_ACTIVE_LOW;
+ *gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_POWERDOWN:
*func = "powerdown";
- *polarity = GPIO_ACTIVE_LOW;
+ *gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_CLK_ENABLE:
*func = "clk-enable";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_PRIVACY_LED:
*func = "privacy-led";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
*func = "power-enable";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
default:
*func = "unknown";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
}
}
@@ -194,7 +239,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
struct gpio_desc *gpio;
const char *err_msg;
const char *func;
- u32 polarity;
+ unsigned long gpio_flags;
int ret;
if (!acpi_gpio_get_io_resource(ares, &agpio))
@@ -217,7 +262,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
- int3472_get_func_and_polarity(type, &func, &polarity);
+ int3472_get_func_and_polarity(int3472->sensor, &type, &func, &gpio_flags);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
/* Pin field is not really used under Windows and wraps around at 8 bits */
@@ -227,16 +272,16 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
if (!active_value)
- polarity ^= GPIO_ACTIVE_LOW;
+ gpio_flags ^= GPIO_ACTIVE_LOW;
dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
agpio->resource_source.string_ptr, agpio->pin_table[0],
- str_high_low(polarity == GPIO_ACTIVE_HIGH));
+ str_high_low(gpio_flags == GPIO_ACTIVE_HIGH));
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
- ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity);
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
@@ -244,7 +289,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
case INT3472_GPIO_TYPE_CLK_ENABLE:
case INT3472_GPIO_TYPE_PRIVACY_LED:
case INT3472_GPIO_TYPE_POWER_ENABLE:
- gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, polarity);
+ gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, gpio_flags);
if (IS_ERR(gpio)) {
ret = PTR_ERR(gpio);
err_msg = "Failed to get GPIO\n";
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 10f04b944117..1ee0fb5f8250 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -626,8 +626,8 @@ static u32 convert_ltr_scale(u32 val)
static int pmc_core_ltr_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
- u32 ltr_raw_data, scale, val;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
+ u32 ltr_raw_data, scale;
u16 snoop_ltr, nonsnoop_ltr;
unsigned int i, index, ltr_index = 0;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 1fcb0f99695a..72a10ed2017c 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7885,6 +7885,7 @@ static struct ibm_struct volume_driver_data = {
#define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */
#define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */
+#define FAN_CLOCK_TPM (22500*60) /* Ticks per minute for a 22.5 kHz clock */
enum { /* Fan control constants */
fan_status_offset = 0x2f, /* EC register 0x2f */
@@ -7940,6 +7941,7 @@ static int fan_watchdog_maxinterval;
static bool fan_with_ns_addr;
static bool ecfw_with_fan_dec_rpm;
+static bool fan_speed_in_tpr;
static struct mutex fan_mutex;
@@ -8142,8 +8144,11 @@ static int fan_get_speed(unsigned int *speed)
!acpi_ec_read(fan_rpm_offset + 1, &hi)))
return -EIO;
- if (likely(speed))
+ if (likely(speed)) {
*speed = (hi << 8) | lo;
+ if (fan_speed_in_tpr && *speed != 0)
+ *speed = FAN_CLOCK_TPM / *speed;
+ }
break;
case TPACPI_FAN_RD_TPEC_NS:
if (!acpi_ec_read(fan_rpm_status_ns, &lo))
@@ -8176,8 +8181,11 @@ static int fan2_get_speed(unsigned int *speed)
if (rc)
return -EIO;
- if (likely(speed))
+ if (likely(speed)) {
*speed = (hi << 8) | lo;
+ if (fan_speed_in_tpr && *speed != 0)
+ *speed = FAN_CLOCK_TPM / *speed;
+ }
break;
case TPACPI_FAN_RD_TPEC_NS:
@@ -8788,6 +8796,7 @@ static const struct attribute_group fan_driver_attr_group = {
#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */
#define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */
+#define TPACPI_FAN_TPR 0x0040 /* Fan speed is in Ticks Per Revolution */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8817,6 +8826,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS), /* 11e Gen5 KL-Y */
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */
+ TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR), /* ThinkPad x120e */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8887,6 +8897,8 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (quirks & TPACPI_FAN_Q1)
fan_quirk1_setup();
+ if (quirks & TPACPI_FAN_TPR)
+ fan_speed_in_tpr = true;
/* Try and probe the 2nd fan */
tp_features.second_fan = 1; /* needed for get_speed to work */
res = fan2_get_speed(&speed);
@@ -10319,6 +10331,10 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_MODE_PSC_BALANCE 5 /* Default mode aka balanced */
#define DYTC_MODE_PSC_PERFORM 7 /* High power mode aka performance */
+#define DYTC_MODE_PSCV9_LOWPOWER 1 /* Low power mode */
+#define DYTC_MODE_PSCV9_BALANCE 3 /* Default mode aka balanced */
+#define DYTC_MODE_PSCV9_PERFORM 4 /* High power mode aka performance */
+
#define DYTC_ERR_MASK 0xF /* Bits 0-3 in cmd result are the error result */
#define DYTC_ERR_SUCCESS 1 /* CMD completed successful */
@@ -10339,6 +10355,10 @@ static int dytc_capabilities;
static bool dytc_mmc_get_available;
static int profile_force;
+static int platform_psc_profile_lowpower = DYTC_MODE_PSC_LOWPOWER;
+static int platform_psc_profile_balanced = DYTC_MODE_PSC_BALANCE;
+static int platform_psc_profile_performance = DYTC_MODE_PSC_PERFORM;
+
static int convert_dytc_to_profile(int funcmode, int dytcmode,
enum platform_profile_option *profile)
{
@@ -10360,19 +10380,15 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
}
return 0;
case DYTC_FUNCTION_PSC:
- switch (dytcmode) {
- case DYTC_MODE_PSC_LOWPOWER:
+ if (dytcmode == platform_psc_profile_lowpower)
*profile = PLATFORM_PROFILE_LOW_POWER;
- break;
- case DYTC_MODE_PSC_BALANCE:
+ else if (dytcmode == platform_psc_profile_balanced)
*profile = PLATFORM_PROFILE_BALANCED;
- break;
- case DYTC_MODE_PSC_PERFORM:
+ else if (dytcmode == platform_psc_profile_performance)
*profile = PLATFORM_PROFILE_PERFORMANCE;
- break;
- default: /* Unknown mode */
+ else
return -EINVAL;
- }
+
return 0;
case DYTC_FUNCTION_AMT:
/* For now return balanced. It's the closest we have to 'auto' */
@@ -10393,19 +10409,19 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_LOWPOWER;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_LOWPOWER;
+ *perfmode = platform_psc_profile_lowpower;
break;
case PLATFORM_PROFILE_BALANCED:
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_BALANCE;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_BALANCE;
+ *perfmode = platform_psc_profile_balanced;
break;
case PLATFORM_PROFILE_PERFORMANCE:
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_PERFORM;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_PERFORM;
+ *perfmode = platform_psc_profile_performance;
break;
default: /* Unknown profile */
return -EOPNOTSUPP;
@@ -10599,6 +10615,7 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (output & BIT(DYTC_QUERY_ENABLE_BIT))
dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+ dbg_printk(TPACPI_DBG_INIT, "DYTC version %d\n", dytc_version);
/* Check DYTC is enabled and supports mode setting */
if (dytc_version < 5)
return -ENODEV;
@@ -10637,6 +10654,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
pr_debug("PSC is supported\n");
+ if (dytc_version >= 9) { /* update profiles for DYTC 9 and up */
+ platform_psc_profile_lowpower = DYTC_MODE_PSCV9_LOWPOWER;
+ platform_psc_profile_balanced = DYTC_MODE_PSCV9_BALANCE;
+ platform_psc_profile_performance = DYTC_MODE_PSCV9_PERFORM;
+ }
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
return -ENODEV;
@@ -10646,8 +10668,8 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
"DYTC version %d: thermal mode available\n", dytc_version);
/* Create platform_profile structure and register */
- tpacpi_pprof = devm_platform_profile_register(&tpacpi_pdev->dev, "thinkpad-acpi",
- NULL, &dytc_profile_ops);
+ tpacpi_pprof = platform_profile_register(&tpacpi_pdev->dev, "thinkpad-acpi-profile",
+ NULL, &dytc_profile_ops);
/*
* If for some reason platform_profiles aren't enabled
* don't quit terminally.
@@ -10665,8 +10687,15 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
return 0;
}
+static void dytc_profile_exit(void)
+{
+ if (!IS_ERR_OR_NULL(tpacpi_pprof))
+ platform_profile_remove(tpacpi_pprof);
+}
+
static struct ibm_struct dytc_profile_driver_data = {
.name = "dytc-profile",
+ .exit = dytc_profile_exit,
};
/*************************************************************************
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index fa27195f074e..3c3158f31a48 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -466,10 +466,9 @@ static int axp717_battery_get_prop(struct power_supply *psy,
/*
* If a fault is detected it must also be cleared; if the
- * condition persists it should reappear (This is an
- * assumption, it's actually not documented). A restart was
- * not sufficient to clear the bit in testing despite the
- * register listed as POR.
+ * condition persists it should reappear. A restart was not
+ * sufficient to clear the bit in testing despite the register
+ * listed as POR.
*/
case POWER_SUPPLY_PROP_HEALTH:
ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT,
@@ -480,26 +479,26 @@ static int axp717_battery_get_prop(struct power_supply *psy,
switch (reg & AXP717_BATT_PMU_FAULT_MASK) {
case AXP717_BATT_UVLO_2_5V:
val->intval = POWER_SUPPLY_HEALTH_DEAD;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_UVLO_2_5V,
- AXP717_BATT_UVLO_2_5V);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_UVLO_2_5V,
+ AXP717_BATT_UVLO_2_5V);
return 0;
case AXP717_BATT_OVER_TEMP:
val->intval = POWER_SUPPLY_HEALTH_HOT;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_OVER_TEMP,
- AXP717_BATT_OVER_TEMP);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_OVER_TEMP,
+ AXP717_BATT_OVER_TEMP);
return 0;
case AXP717_BATT_UNDER_TEMP:
val->intval = POWER_SUPPLY_HEALTH_COLD;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_UNDER_TEMP,
- AXP717_BATT_UNDER_TEMP);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_UNDER_TEMP,
+ AXP717_BATT_UNDER_TEMP);
return 0;
default:
diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c
index 652c1f213af1..4f28ef1bba1a 100644
--- a/drivers/power/supply/da9150-fg.c
+++ b/drivers/power/supply/da9150-fg.c
@@ -247,9 +247,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg,
DA9150_QIF_SD_GAIN_SIZE);
da9150_fg_read_sync_end(fg);
- div = (u64) (sd_gain * shunt_val * 65536ULL);
+ div = 65536ULL * sd_gain * shunt_val;
do_div(div, 1000000);
- res = (u64) (iavg * 1000000ULL);
+ res = 1000000ULL * iavg;
do_div(res, div);
val->intval = (int) res;
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index d0bb52a7a036..76c340b38015 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1592,11 +1592,11 @@ __power_supply_register(struct device *parent,
if (rc)
goto register_thermal_failed;
- scoped_guard(rwsem_read, &psy->extensions_sem) {
- rc = power_supply_create_triggers(psy);
- if (rc)
- goto create_triggers_failed;
+ rc = power_supply_create_triggers(psy);
+ if (rc)
+ goto create_triggers_failed;
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
rc = power_supply_add_hwmon_sysfs(psy);
if (rc)
goto add_hwmon_sysfs_failed;
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 52c32dcbf7d8..4112a0097338 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -627,8 +627,7 @@ struct powercap_control_type *powercap_register_control_type(
dev_set_name(&control_type->dev, "%s", name);
result = device_register(&control_type->dev);
if (result) {
- if (control_type->allocated)
- kfree(control_type);
+ put_device(&control_type->dev);
return ERR_PTR(result);
}
idr_init(&control_type->idr);
diff --git a/drivers/ptp/ptp_vmclock.c b/drivers/ptp/ptp_vmclock.c
index 0a2cfc8ad3c5..b3a83b03d9c1 100644
--- a/drivers/ptp/ptp_vmclock.c
+++ b/drivers/ptp/ptp_vmclock.c
@@ -414,16 +414,16 @@ static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf,
}
static const struct file_operations vmclock_miscdev_fops = {
+ .owner = THIS_MODULE,
.mmap = vmclock_miscdev_mmap,
.read = vmclock_miscdev_read,
};
/* module operations */
-static void vmclock_remove(struct platform_device *pdev)
+static void vmclock_remove(void *data)
{
- struct device *dev = &pdev->dev;
- struct vmclock_state *st = dev_get_drvdata(dev);
+ struct vmclock_state *st = data;
if (st->ptp_clock)
ptp_clock_unregister(st->ptp_clock);
@@ -506,14 +506,13 @@ static int vmclock_probe(struct platform_device *pdev)
if (ret) {
dev_info(dev, "Failed to obtain physical address: %d\n", ret);
- goto out;
+ return ret;
}
if (resource_size(&st->res) < VMCLOCK_MIN_SIZE) {
dev_info(dev, "Region too small (0x%llx)\n",
resource_size(&st->res));
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
st->clk = devm_memremap(dev, st->res.start, resource_size(&st->res),
MEMREMAP_WB | MEMREMAP_DEC);
@@ -521,31 +520,34 @@ static int vmclock_probe(struct platform_device *pdev)
ret = PTR_ERR(st->clk);
dev_info(dev, "failed to map shared memory\n");
st->clk = NULL;
- goto out;
+ return ret;
}
if (le32_to_cpu(st->clk->magic) != VMCLOCK_MAGIC ||
le32_to_cpu(st->clk->size) > resource_size(&st->res) ||
le16_to_cpu(st->clk->version) != 1) {
dev_info(dev, "vmclock magic fields invalid\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ret = ida_alloc(&vmclock_ida, GFP_KERNEL);
if (ret < 0)
- goto out;
+ return ret;
st->index = ret;
ret = devm_add_action_or_reset(&pdev->dev, vmclock_put_idx, st);
if (ret)
- goto out;
+ return ret;
st->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "vmclock%d", st->index);
- if (!st->name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!st->name)
+ return -ENOMEM;
+
+ st->miscdev.minor = MISC_DYNAMIC_MINOR;
+
+ ret = devm_add_action_or_reset(&pdev->dev, vmclock_remove, st);
+ if (ret)
+ return ret;
/*
* If the structure is big enough, it can be mapped to userspace.
@@ -554,13 +556,12 @@ static int vmclock_probe(struct platform_device *pdev)
* cross that bridge if/when we come to it.
*/
if (le32_to_cpu(st->clk->size) >= PAGE_SIZE) {
- st->miscdev.minor = MISC_DYNAMIC_MINOR;
st->miscdev.fops = &vmclock_miscdev_fops;
st->miscdev.name = st->name;
ret = misc_register(&st->miscdev);
if (ret)
- goto out;
+ return ret;
}
/* If there is valid clock information, register a PTP clock */
@@ -570,16 +571,14 @@ static int vmclock_probe(struct platform_device *pdev)
if (IS_ERR(st->ptp_clock)) {
ret = PTR_ERR(st->ptp_clock);
st->ptp_clock = NULL;
- vmclock_remove(pdev);
- goto out;
+ return ret;
}
}
if (!st->miscdev.minor && !st->ptp_clock) {
/* Neither miscdev nor PTP registered */
dev_info(dev, "vmclock: Neither miscdev nor PTP available; not registering\n");
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
dev_info(dev, "%s: registered %s%s%s\n", st->name,
@@ -587,10 +586,7 @@ static int vmclock_probe(struct platform_device *pdev)
(st->miscdev.minor && st->ptp_clock) ? ", " : "",
st->ptp_clock ? "PTP" : "");
- dev_set_drvdata(dev, st);
-
- out:
- return ret;
+ return 0;
}
static const struct acpi_device_id vmclock_acpi_ids[] = {
@@ -601,7 +597,6 @@ MODULE_DEVICE_TABLE(acpi, vmclock_acpi_ids);
static struct platform_driver vmclock_platform_driver = {
.probe = vmclock_probe,
- .remove = vmclock_remove,
.driver = {
.name = "vmclock",
.acpi_match_table = vmclock_acpi_ids,
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 89578b91c468..4ddf0efead68 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5774,43 +5774,36 @@ regulator_register(struct device *dev,
goto clean;
}
- if (config->init_data) {
- /*
- * Providing of_match means the framework is expected to parse
- * DT to get the init_data. This would conflict with provided
- * init_data, if set. Warn if it happens.
- */
- if (regulator_desc->of_match)
- dev_warn(dev, "Using provided init data - OF match ignored\n");
+ /*
+ * DT may override the config->init_data provided if the platform
+ * needs to do so. If so, config->init_data is completely ignored.
+ */
+ init_data = regulator_of_get_init_data(dev, regulator_desc, config,
+ &rdev->dev.of_node);
+ /*
+ * Sometimes not all resources are probed already so we need to take
+ * that into account. This happens most the time if the ena_gpiod comes
+ * from a gpio extender or something else.
+ */
+ if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto clean;
+ }
+
+ /*
+ * We need to keep track of any GPIO descriptor coming from the
+ * device tree until we have handled it over to the core. If the
+ * config that was passed in to this function DOES NOT contain
+ * a descriptor, and the config after this call DOES contain
+ * a descriptor, we definitely got one from parsing the device
+ * tree.
+ */
+ if (!cfg->ena_gpiod && config->ena_gpiod)
+ dangling_of_gpiod = true;
+ if (!init_data) {
init_data = config->init_data;
rdev->dev.of_node = of_node_get(config->of_node);
-
- } else {
- init_data = regulator_of_get_init_data(dev, regulator_desc,
- config,
- &rdev->dev.of_node);
-
- /*
- * Sometimes not all resources are probed already so we need to
- * take that into account. This happens most the time if the
- * ena_gpiod comes from a gpio extender or something else.
- */
- if (PTR_ERR(init_data) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto clean;
- }
-
- /*
- * We need to keep track of any GPIO descriptor coming from the
- * device tree until we have handled it over to the core. If the
- * config that was passed in to this function DOES NOT contain a
- * descriptor, and the config after this call DOES contain a
- * descriptor, we definitely got one from parsing the device
- * tree.
- */
- if (!cfg->ena_gpiod && config->ena_gpiod)
- dangling_of_gpiod = true;
}
ww_mutex_init(&rdev->mutex, &regulator_ww_class);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 4a0b3f19bd8e..4f01b1929240 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -695,7 +695,8 @@ static int info_update(void)
if (time_after(jiffies, chp_info_expires)) {
/* Data is too old, update. */
rc = sclp_chp_read_info(&chp_info);
- chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+ if (!rc)
+ chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL;
}
mutex_unlock(&info_lock);
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index e36e3ea165d3..2f34761e6413 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -588,6 +588,15 @@ out:
return ret;
}
+static void ism_dev_release(struct device *dev)
+{
+ struct ism_dev *ism;
+
+ ism = container_of(dev, struct ism_dev, dev);
+
+ kfree(ism);
+}
+
static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ism_dev *ism;
@@ -601,6 +610,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
ism->dev.parent = &pdev->dev;
+ ism->dev.release = ism_dev_release;
device_initialize(&ism->dev);
dev_set_name(&ism->dev, dev_name(&pdev->dev));
ret = device_add(&ism->dev);
@@ -637,7 +647,7 @@ err:
device_del(&ism->dev);
err_dev:
dev_set_drvdata(&pdev->dev, NULL);
- kfree(ism);
+ put_device(&ism->dev);
return ret;
}
@@ -682,7 +692,7 @@ static void ism_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
- kfree(ism);
+ put_device(&ism->dev);
}
static struct pci_driver ism_driver = {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a3adaec5504e..20328d695ef9 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -7050,14 +7050,16 @@ int qeth_open(struct net_device *dev)
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
- local_bh_disable();
qeth_for_each_output_queue(card, queue, i) {
netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll);
napi_enable(&queue->napi);
- napi_schedule(&queue->napi);
}
-
napi_enable(&card->napi);
+
+ local_bh_disable();
+ qeth_for_each_output_queue(card, queue, i) {
+ napi_schedule(&queue->napi);
+ }
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 1fd2da0264e3..47d74f881948 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2867,7 +2867,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
cpu_to_le32(upper_32_bits(dma_handle)),
cpu_to_le32(lower_32_bits(dma_handle)),
- cpu_to_le32(sg_dma_len(sg_next(s))));
+ cpu_to_le32(sg_dma_len(s)));
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d776f13cd160..be0890e4e706 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -872,13 +872,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x1a: /* start stop unit in progress */
case 0x1b: /* sanitize in progress */
case 0x1d: /* configuration in progress */
- case 0x24: /* depopulation in progress */
- case 0x25: /* depopulation restore in progress */
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
action = ACTION_DELAYED_REPREP;
break;
+ /*
+ * Depopulation might take many hours,
+ * thus it is not worthwhile to retry.
+ */
+ case 0x24: /* depopulation in progress */
+ case 0x25: /* depopulation restore in progress */
+ fallthrough;
default:
action = ACTION_FAIL;
break;
diff --git a/drivers/scsi/scsi_lib_test.c b/drivers/scsi/scsi_lib_test.c
index 99834426a100..ae8af0e0047a 100644
--- a/drivers/scsi/scsi_lib_test.c
+++ b/drivers/scsi/scsi_lib_test.c
@@ -67,6 +67,13 @@ static void scsi_lib_test_multiple_sense(struct kunit *test)
};
int i;
+ /* Success */
+ sc.result = 0;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
+ /* Command failed but caller did not pass in a failures array */
+ scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
/* Match end of array */
scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 087fcbfc9aaa..96d7e1a9a7c7 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -246,7 +246,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
}
ret = sbitmap_init_node(&sdev->budget_map,
scsi_device_max_queue_depth(sdev),
- new_shift, GFP_KERNEL,
+ new_shift, GFP_NOIO,
sdev->request_queue->node, false, true);
if (!ret)
sbitmap_resize(&sdev->budget_map, depth);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 5a101ac06c47..a8614e54544e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1800,6 +1800,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
+ payload->range.len = 0;
payload_sz = 0;
if (scsi_sg_count(scmnd)) {
diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
index ae42e3a9127f..16913c3ef65c 100644
--- a/drivers/soc/loongson/loongson2_guts.c
+++ b/drivers/soc/loongson/loongson2_guts.c
@@ -114,8 +114,11 @@ static int loongson2_guts_probe(struct platform_device *pdev)
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
of_node_put(root);
- if (machine)
+ if (machine) {
soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ if (!soc_dev_attr.machine)
+ return -ENOMEM;
+ }
svr = loongson2_guts_get_svr();
soc_die = loongson2_soc_die_match(svr, loongson2_soc_die);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 4783ab1adb8d..a3e88ced328a 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -365,7 +365,7 @@ static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
{
struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
- seq_printf(p, " %8s", dev_name(entry->smp2p->dev));
+ seq_printf(p, "%8s", dev_name(entry->smp2p->dev));
}
static struct irq_chip smp2p_irq_chip = {
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index abdc49d9d940..d8c9be64d006 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -235,8 +235,8 @@
/**
* struct atmel_qspi_pcal - Pad Calibration Clock Division
* @pclk_rate: peripheral clock rate.
- * @pclkdiv: calibration clock division. The clock applied to the calibration
- * cell is divided by pclkdiv + 1.
+ * @pclk_div: calibration clock division. The clock applied to the calibration
+ * cell is divided by pclk_div + 1.
*/
struct atmel_qspi_pcal {
u32 pclk_rate;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 5f9cac41baff..06711a62fa3d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -399,7 +399,7 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
lpss_ssp_select_cs(spi, config);
mask = LPSS_CS_CONTROL_CS_HIGH;
- __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, mask, enable ? mask : 0);
+ __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, mask, enable ? 0 : mask);
if (config->cs_clk_stays_gated) {
/*
* Changing CS alone when dynamic clock gating is on won't
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
index 6ad4b729897e..c4969f66a0ba 100644
--- a/drivers/spi/spi-sn-f-ospi.c
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -116,6 +116,9 @@ struct f_ospi {
static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
{
+ if (!op->dummy.nbytes)
+ return 0;
+
return (op->dummy.nbytes * 8) / op->dummy.buswidth;
}
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index c42cbde8a31b..210648a0092e 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -117,9 +117,9 @@ static ssize_t target_stat_tgt_status_show(struct config_item *item,
char *page)
{
if (to_stat_tgt_dev(item)->export_count)
- return snprintf(page, PAGE_SIZE, "activated");
+ return snprintf(page, PAGE_SIZE, "activated\n");
else
- return snprintf(page, PAGE_SIZE, "deactivated");
+ return snprintf(page, PAGE_SIZE, "deactivated\n");
}
static ssize_t target_stat_tgt_non_access_lus_show(struct config_item *item,
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index 322a543b8c27..d0f397c90242 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req;
- bool interruptable;
u32 ret;
/*
@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
/*
* Wait for supplicant to process and return result, once we've
* returned from wait_for_completion(&req->c) successfully we have
- * exclusive access again.
+ * exclusive access again. Allow the wait to be killable such that
+ * the wait doesn't turn into an indefinite state if the supplicant
+ * gets hung for some reason.
*/
- while (wait_for_completion_interruptible(&req->c)) {
+ if (wait_for_completion_killable(&req->c)) {
mutex_lock(&supp->mutex);
- interruptable = !supp->ctx;
- if (interruptable) {
- /*
- * There's no supplicant available and since the
- * supp->mutex currently is held none can
- * become available until the mutex released
- * again.
- *
- * Interrupting an RPC to supplicant is only
- * allowed as a way of slightly improving the user
- * experience in case the supplicant hasn't been
- * started yet. During normal operation the supplicant
- * will serve all requests in a timely manner and
- * interrupting then wouldn't make sense.
- */
- if (req->in_queue) {
- list_del(&req->link);
- req->in_queue = false;
- }
+ if (req->in_queue) {
+ list_del(&req->link);
+ req->in_queue = false;
}
mutex_unlock(&supp->mutex);
-
- if (interruptable) {
- req->ret = TEEC_ERROR_COMMUNICATION;
- break;
- }
+ req->ret = TEEC_ERROR_COMMUNICATION;
}
ret = req->ret;
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 280071be30b1..6b7ab1814c12 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -57,8 +57,6 @@ struct time_in_idle {
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
* @em: Reference on the Energy Model of the device
- * @cdev: thermal_cooling_device pointer to keep track of the
- * registered cooling device.
* @policy: cpufreq policy.
* @cooling_ops: cpufreq callbacks to thermal cooling device ops
* @idle_time: idle time stats
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index df08f13052ff..8bb1a01fef2a 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -798,7 +798,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
/* We refuse fsnotify events on ptmx, since it's a shared resource */
- filp->f_mode |= FMODE_NONOTIFY;
+ file_set_fsnotify_mode(filp, FMODE_NONOTIFY);
retval = tty_alloc_file(filp);
if (retval)
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 11e05aa014e5..b861585ca02a 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -374,6 +374,7 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
#ifdef CONFIG_SERIAL_8250_DMA
extern int serial8250_tx_dma(struct uart_8250_port *);
+extern void serial8250_tx_dma_flush(struct uart_8250_port *);
extern int serial8250_rx_dma(struct uart_8250_port *);
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
@@ -406,6 +407,7 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
return -1;
}
+static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { }
static inline int serial8250_rx_dma(struct uart_8250_port *p)
{
return -1;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index d215c494ee24..f245a84f4a50 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -149,6 +149,22 @@ err:
return ret;
}
+void serial8250_tx_dma_flush(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (!dma->tx_running)
+ return;
+
+ /*
+ * kfifo_reset() has been called by the serial core, avoid
+ * advancing and underflowing in __dma_tx_complete().
+ */
+ dma->tx_size = 0;
+
+ dmaengine_terminate_async(dma->rxchan);
+}
+
int serial8250_rx_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 64aed7efc569..11c860ea80f6 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -110,7 +110,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
spin_lock_init(&port->lock);
if (resource_type(&resource) == IORESOURCE_IO) {
- port->iotype = UPIO_PORT;
port->iobase = resource.start;
} else {
port->mapbase = resource.start;
diff --git a/drivers/tty/serial/8250/8250_platform.c b/drivers/tty/serial/8250/8250_platform.c
index 8bdc1879d952..c0343bfb8064 100644
--- a/drivers/tty/serial/8250/8250_platform.c
+++ b/drivers/tty/serial/8250/8250_platform.c
@@ -112,7 +112,6 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uart_8250_port uart = { };
struct resource *regs;
- unsigned char iotype;
int ret, line;
regs = platform_get_mem_or_io(pdev, 0);
@@ -122,13 +121,11 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
switch (resource_type(regs)) {
case IORESOURCE_IO:
uart.port.iobase = regs->start;
- iotype = UPIO_PORT;
break;
case IORESOURCE_MEM:
uart.port.mapbase = regs->start;
uart.port.mapsize = resource_size(regs);
uart.port.flags = UPF_IOREMAP;
- iotype = UPIO_MEM;
break;
default:
return -EINVAL;
@@ -147,12 +144,6 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * The previous call may not set iotype correctly when reg-io-width
- * property is absent and it doesn't support IO port resource.
- */
- uart.port.iotype = iotype;
-
line = serial8250_register_8250_port(&uart);
if (line < 0)
return line;
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 7c06ae79d8e2..7a837fdf9df1 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -436,7 +436,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct uart_8250_port uart, *port;
int ret, flags = dev_id->driver_data;
- unsigned char iotype;
long line;
if (flags & UNKNOWN_DEV) {
@@ -448,14 +447,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
memset(&uart, 0, sizeof(uart));
if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
uart.port.iobase = pnp_port_start(dev, 2);
- iotype = UPIO_PORT;
} else if (pnp_port_valid(dev, 0)) {
uart.port.iobase = pnp_port_start(dev, 0);
- iotype = UPIO_PORT;
} else if (pnp_mem_valid(dev, 0)) {
uart.port.mapbase = pnp_mem_start(dev, 0);
uart.port.mapsize = pnp_mem_len(dev, 0);
- iotype = UPIO_MEM;
uart.port.flags = UPF_IOREMAP;
} else
return -ENODEV;
@@ -471,12 +467,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
if (ret)
return ret;
- /*
- * The previous call may not set iotype correctly when reg-io-width
- * property is absent and it doesn't support IO port resource.
- */
- uart.port.iotype = iotype;
-
if (flags & CIR_PORT) {
uart.port.flags |= UPF_FIXED_PORT | UPF_FIXED_TYPE;
uart.port.type = PORT_8250_CIR;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index d7976a21cca9..442967a6cd52 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2555,6 +2555,14 @@ static void serial8250_shutdown(struct uart_port *port)
serial8250_do_shutdown(port);
}
+static void serial8250_flush_buffer(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ if (up->dma)
+ serial8250_tx_dma_flush(up);
+}
+
static unsigned int serial8250_do_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
@@ -3244,6 +3252,7 @@ static const struct uart_ops serial8250_pops = {
.break_ctl = serial8250_break_ctl,
.startup = serial8250_startup,
.shutdown = serial8250_shutdown,
+ .flush_buffer = serial8250_flush_buffer,
.set_termios = serial8250_set_termios,
.set_ldisc = serial8250_set_ldisc,
.pm = serial8250_pm,
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 7b51cdc274fd..560f45ed19ae 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1561,7 +1561,7 @@ int sc16is7xx_probe(struct device *dev, const struct sc16is7xx_devtype *devtype,
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
- s->polling = !!irq;
+ s->polling = (irq <= 0);
if (s->polling)
dev_dbg(dev,
"No interrupt pin definition, falling back to polling mode\n");
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index d35f1d24156c..2fc48cd63f6c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -173,6 +173,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
* The caller is responsible to initialize the following fields of the @port
* ->dev (must be valid)
* ->flags
+ * ->iobase
* ->mapbase
* ->mapsize
* ->regshift (if @use_defaults is false)
@@ -214,7 +215,7 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
/* Read the registers I/O access type (default: MMIO 8-bit) */
ret = device_property_read_u32(dev, "reg-io-width", &value);
if (ret) {
- port->iotype = UPIO_MEM;
+ port->iotype = port->iobase ? UPIO_PORT : UPIO_MEM;
} else {
switch (value) {
case 1:
@@ -227,15 +228,16 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
break;
default:
- if (!use_defaults) {
- dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
- return -EINVAL;
- }
port->iotype = UPIO_UNKNOWN;
break;
}
}
+ if (!use_defaults && port->iotype == UPIO_UNKNOWN) {
+ dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
+ return -EINVAL;
+ }
+
/* Read the address mapping base offset (default: no offset) */
ret = device_property_read_u32(dev, "reg-offset", &value);
if (ret)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index cd404ade48dc..1893a7ad9531 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -2120,8 +2120,6 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
- spin_lock_init(&hba->clk_gating.lock);
-
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
"ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
hba->host->host_no);
@@ -3106,8 +3104,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case UPIU_TRANSACTION_QUERY_RSP: {
u8 response = lrbp->ucd_rsp_ptr->header.response;
- if (response == 0)
+ if (response == 0) {
err = ufshcd_copy_query_response(hba, lrbp);
+ } else {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
+ __func__, response);
+ }
break;
}
case UPIU_TRANSACTION_REJECT_UPIU:
@@ -5976,24 +5979,6 @@ out:
__func__, err);
}
-static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
-{
- u32 value;
-
- if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
- return;
-
- dev_info(hba->dev, "exception Tcase %d\n", value - 80);
-
- ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
-
- /*
- * A placeholder for the platform vendors to add whatever additional
- * steps required
- */
-}
-
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6214,7 +6199,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
- ufshcd_temp_exception_event_handler(hba, status);
+ ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
ufs_debugfs_exception_event(hba, status);
}
@@ -9160,7 +9145,7 @@ out:
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
- } else if (!ret && on) {
+ } else if (!ret && on && hba->clk_gating.is_initialized) {
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
@@ -10247,16 +10232,6 @@ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
#endif /* CONFIG_PM_SLEEP */
/**
- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
- * @hba: pointer to Host Bus Adapter (HBA)
- */
-void ufshcd_dealloc_host(struct ufs_hba *hba)
-{
- scsi_host_put(hba->host);
-}
-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
-
-/**
* ufshcd_set_dma_mask - Set dma mask based on the controller
* addressing capability
* @hba: per adapter instance
@@ -10275,11 +10250,25 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
}
/**
+ * ufshcd_devres_release - devres cleanup handler, invoked during release of
+ * hba->dev
+ * @host: pointer to SCSI host
+ */
+static void ufshcd_devres_release(void *host)
+{
+ scsi_host_put(host);
+}
+
+/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
*
* Return: 0 on success, non-zero value on failure.
+ *
+ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
+ * keeps track of its allocations using devres and deallocates everything on
+ * device removal automatically.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
@@ -10301,6 +10290,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ err = devm_add_action_or_reset(dev, ufshcd_devres_release,
+ host);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to add ufshcd dealloc action\n");
+
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
@@ -10429,6 +10425,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->irq = irq;
hba->vps = &ufs_hba_vps;
+ /*
+ * Initialize clk_gating.lock early since it is being used in
+ * ufshcd_setup_clocks()
+ */
+ spin_lock_init(&hba->clk_gating.lock);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index ea39c5d5b8cf..9cfcaad23cf9 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -562,7 +562,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
}
/**
@@ -605,7 +604,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
- ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 505572d4fa87..ffe5d1d2b215 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -465,21 +465,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
struct device *dev = &pdev->dev;
mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mmio_base)) {
- err = PTR_ERR(mmio_base);
- goto out;
- }
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto out;
- }
+ if (irq < 0)
+ return irq;
err = ufshcd_alloc_host(dev, &hba);
if (err) {
dev_err(dev, "Allocation failed\n");
- goto out;
+ return err;
}
hba->vops = vops;
@@ -488,13 +484,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
if (err) {
dev_err(dev, "%s: clock parse failed %d\n",
__func__, err);
- goto dealloc_host;
+ return err;
}
err = ufshcd_parse_regulator_info(hba);
if (err) {
dev_err(dev, "%s: regulator init failed %d\n",
__func__, err);
- goto dealloc_host;
+ return err;
}
ufshcd_init_lanes_per_dir(hba);
@@ -502,25 +498,20 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
err = ufshcd_parse_operating_points(hba);
if (err) {
dev_err(dev, "%s: OPP parse failed %d\n", __func__, err);
- goto dealloc_host;
+ return err;
}
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err_probe(dev, err, "Initialization failed with error %d\n",
err);
- goto dealloc_host;
+ return err;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
-
-dealloc_host:
- ufshcd_dealloc_host(hba);
-out:
- return err;
}
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
@@ -534,7 +525,6 @@ void ufshcd_pltfrm_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6b37d1c47fce..c2ecfa3c8349 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -371,7 +371,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
- struct usb_cdc_notification *dr = urb->transfer_buffer;
+ struct usb_cdc_notification *dr;
unsigned int current_size = urb->actual_length;
unsigned int expected_size, copy_size, alloc_size;
int retval;
@@ -398,14 +398,25 @@ static void acm_ctrl_irq(struct urb *urb)
usb_mark_last_busy(acm->dev);
- if (acm->nb_index)
+ if (acm->nb_index == 0) {
+ /*
+ * The first chunk of a message must contain at least the
+ * notification header with the length field, otherwise we
+ * can't get an expected_size.
+ */
+ if (current_size < sizeof(struct usb_cdc_notification)) {
+ dev_dbg(&acm->control->dev, "urb too short\n");
+ goto exit;
+ }
+ dr = urb->transfer_buffer;
+ } else {
dr = (struct usb_cdc_notification *)acm->notification_buffer;
-
+ }
/* size = notification-header + (optional) data */
expected_size = sizeof(struct usb_cdc_notification) +
le16_to_cpu(dr->wLength);
- if (current_size < expected_size) {
+ if (acm->nb_index != 0 || current_size < expected_size) {
/* notification is transmitted fragmented, reassemble */
if (acm->nb_size < expected_size) {
u8 *new_buffer;
@@ -1727,13 +1738,16 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
- { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x023c), /* Renesas R-Car H3 USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
+ { USB_DEVICE(0x045b, 0x0247), /* Renesas R-Car D3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
- { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x0248), /* Renesas R-Car M3-N USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
- { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x024D), /* Renesas R-Car E3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c3f839637cb5..a76bb50b6202 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1849,6 +1849,17 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
hdev = interface_to_usbdev(intf);
/*
+ * The USB 2.0 spec prohibits hubs from having more than one
+ * configuration or interface, and we rely on this prohibition.
+ * Refuse to accept a device that violates it.
+ */
+ if (hdev->descriptor.bNumConfigurations > 1 ||
+ hdev->actconfig->desc.bNumInterfaces > 1) {
+ dev_err(&intf->dev, "Invalid hub with more than one config or interface\n");
+ return -EINVAL;
+ }
+
+ /*
* Set default autosuspend delay as 0 to speedup bus suspend,
* based on the below considerations:
*
@@ -4698,7 +4709,6 @@ void usb_ep0_reinit(struct usb_device *udev)
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
@@ -4804,7 +4814,7 @@ static int get_bMaxPacketSize0(struct usb_device *udev,
for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
/* Start with invalid values in case the transfer fails */
buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
- rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
+ rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
USB_DT_DEVICE << 8, 0,
buf, size,
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 67732c791c93..dfcfc142bd5e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -435,6 +435,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */
+ { USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@@ -525,6 +528,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* Teclast disk */
+ { USB_DEVICE(0x1f75, 0x0917), .driver_info = USB_QUIRK_NO_LPM },
+
/* Hauppauge HVR-950q */
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e7bf9cc635be..bd4c788f03bc 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4615,6 +4615,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->driver = NULL;
+ hsotg->gadget.dev.of_node = NULL;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
hsotg->enabled = 0;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index ac7c730f81ac..c955039bb4f6 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -717,6 +717,7 @@ struct dwc3_event_buffer {
/**
* struct dwc3_ep - device side endpoint representation
* @endpoint: usb endpoint
+ * @nostream_work: work for handling bulk NoStream
* @cancelled_list: list of cancelled requests for this endpoint
* @pending_list: list of pending requests for this endpoint
* @started_list: list of started requests on this endpoint
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index d27af65eb08a..ddd6b2ce5710 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2629,10 +2629,38 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
{
u32 reg;
u32 timeout = 2000;
+ u32 saved_config = 0;
if (pm_runtime_suspended(dwc->dev))
return 0;
+ /*
+ * When operating in USB 2.0 speeds (HS/FS), ensure that
+ * GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY are cleared before starting
+ * or stopping the controller. This resolves timeout issues that occur
+ * during frequent role switches between host and device modes.
+ *
+ * Save and clear these settings, then restore them after completing the
+ * controller start or stop sequence.
+ *
+ * This solution was discovered through experimentation as it is not
+ * mentioned in the dwc3 programming guide. It has been tested on an
+ * Exynos platforms.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
+ saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ }
+
+ if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
+ saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ }
+
+ if (saved_config)
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
@@ -2660,6 +2688,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg));
+ if (saved_config) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= saved_config;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
if (!timeout)
return -ETIMEDOUT;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 837fcdfa3840..da82598fcef8 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -283,7 +283,7 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req)
/* Our transmit completed. See if there's more to go.
* f_midi_transmit eats req, don't queue it again. */
req->length = 0;
- f_midi_transmit(midi);
+ queue_work(system_highpri_wq, &midi->work);
return;
}
break;
@@ -907,6 +907,15 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
status = -ENODEV;
+ /*
+ * Reset wMaxPacketSize with maximum packet size of FS bulk transfer before
+ * endpoint claim. This ensures that the wMaxPacketSize does not exceed the
+ * limit during bind retries where configured dwc3 TX/RX FIFO's maxpacket
+ * size of 512 bytes for IN/OUT endpoints in support HS speed only.
+ */
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(64);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(64);
+
/* allocate instance-specific endpoints */
midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
if (!midi->in_ep)
@@ -1000,11 +1009,11 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
}
/* configure the endpoint descriptors ... */
- ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
- ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
+ ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
+ ms_out_desc.bNumEmbMIDIJack = midi->out_ports;
- ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
- ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
+ ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
+ ms_in_desc.bNumEmbMIDIJack = midi->in_ports;
/* ... and add them to the list */
endpoint_descriptor_index = i;
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 79e223713d8b..fb77b0b21790 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -818,7 +818,7 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
return -EINVAL;
/* Allocate a kthread for asynchronous hw submit handler. */
- video->kworker = kthread_create_worker(0, "UVCG");
+ video->kworker = kthread_run_worker(0, "UVCG");
if (IS_ERR(video->kworker)) {
uvcg_err(&video->uvc->func, "failed to create UVCG kworker\n");
return PTR_ERR(video->kworker);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index a6f46364be65..4b3d5075621a 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1543,8 +1543,8 @@ void usb_del_gadget(struct usb_gadget *gadget)
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
sysfs_remove_link(&udc->dev.kobj, "gadget");
- flush_work(&gadget->work);
device_del(&gadget->dev);
+ flush_work(&gadget->work);
ida_free(&gadget_id_numbers, gadget->id_number);
cancel_work_sync(&udc->vbus_work);
device_unregister(&udc->dev);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fce5c41d9f29..89b304cf6d03 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -310,7 +310,7 @@ struct renesas_usb3_request {
struct list_head queue;
};
-#define USB3_EP_NAME_SIZE 8
+#define USB3_EP_NAME_SIZE 16
struct renesas_usb3_ep {
struct usb_ep ep;
struct renesas_usb3 *usb3;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 1f9c1b1435d8..0404489c2f6a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -958,6 +958,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
* booting from USB disk or using a usb keyboard
*/
hcc_params = readl(base + EHCI_HCC_PARAMS);
+
+ /* LS7A EHCI controller doesn't have extended capabilities, the
+ * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS
+ * register should be 0x0 but it reads as 0xa0. So clear it to
+ * avoid error messages on boot.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14)
+ hcc_params &= ~(0xffL << 8);
+
offset = (hcc_params >> 8) & 0xff;
while (offset && --count) {
pci_read_config_dword(pdev, offset, &cap);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 2d1e205c14c6..ad0ff356f6fa 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -653,8 +653,8 @@ put_runtime_pm:
}
EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, "xhci");
-static const struct pci_device_id pci_ids_reject[] = {
- /* handled by xhci-pci-renesas */
+/* handled by xhci-pci-renesas if enabled */
+static const struct pci_device_id pci_ids_renesas[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
{ /* end: all zeroes */ }
@@ -662,7 +662,8 @@ static const struct pci_device_id pci_ids_reject[] = {
static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- if (pci_match_id(pci_ids_reject, dev))
+ if (IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) &&
+ pci_match_id(pci_ids_renesas, dev))
return -ENODEV;
return xhci_pci_common_probe(dev, id);
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 6c3ececf9137..8423be59ec0f 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -212,7 +212,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
if (of_property_read_u32(node, "clock-frequency", &clk_rate))
clk_rate = 0;
- needs_clk = of_property_read_bool(node, "clocks");
+ needs_clk = of_property_present(node, "clocks");
}
nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_ASIS);
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index c58a12c147f4..30482d4cf826 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -387,8 +387,11 @@ usb_role_switch_register(struct device *parent,
dev_set_name(&sw->dev, "%s-role-switch",
desc->name ? desc->name : dev_name(parent));
+ sw->registered = true;
+
ret = device_register(&sw->dev);
if (ret) {
+ sw->registered = false;
put_device(&sw->dev);
return ERR_PTR(ret);
}
@@ -399,8 +402,6 @@ usb_role_switch_register(struct device *parent,
dev_warn(&sw->dev, "failed to add component\n");
}
- sw->registered = true;
-
/* TODO: Symlinks for the host port and the device controller. */
return sw;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1e2ae0c6c41c..58bd54e8c483 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -619,15 +619,6 @@ static void option_instat_callback(struct urb *urb);
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
-/* MeiG Smart Technology products */
-#define MEIGSMART_VENDOR_ID 0x2dee
-/* MeiG Smart SRM815/SRM825L based on Qualcomm 315 */
-#define MEIGSMART_PRODUCT_SRM825L 0x4d22
-/* MeiG Smart SLM320 based on UNISOC UIS8910 */
-#define MEIGSMART_PRODUCT_SLM320 0x4d41
-/* MeiG Smart SLM770A based on ASR1803 */
-#define MEIGSMART_PRODUCT_SLM770A 0x4d57
-
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
@@ -1367,15 +1358,15 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990A (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990A (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990A (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990A (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */
.driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
@@ -1403,6 +1394,22 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) }, /* Telit FN990B (rmnet) */
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
+ .driver_info = NCTRL(5) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) }, /* Telit FN990B (MBIM) */
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) }, /* Telit FN990B (RNDIS) */
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) }, /* Telit FN990B (ECM) */
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
+ .driver_info = NCTRL(6) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2347,6 +2354,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d41, 0xff, 0, 0) }, /* MeiG Smart SLM320 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d57, 0xff, 0, 0) }, /* MeiG Smart SLM770A */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0, 0) }, /* MeiG Smart SRM815 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x02) }, /* MeiG Smart SLM828 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x03) }, /* MeiG Smart SLM828 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x30) }, /* MeiG Smart SRM815 and SRM825L */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x40) }, /* MeiG Smart SRM825L */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x60) }, /* MeiG Smart SRM825L */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
@@ -2403,12 +2418,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
.driver_info = NCTRL(1) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff), /* TCL IK512 ECM */
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 47be450d2be3..6bf1a22c785a 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5591,8 +5591,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
port->pps_data.active, 0);
tcpm_set_charge(port, false);
- tcpm_set_state(port, hard_reset_state(port),
- port->timings.ps_src_off_time);
+ tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
break;
case PR_SWAP_SNK_SRC_SOURCE_ON:
tcpm_enable_auto_vbus_discharge(port, true);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index a337edcf8faf..1f65795cf5d7 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
}
+static inline bool range_requires_alignment(phys_addr_t p, size_t size)
+{
+ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+ phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
+
+ return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
+}
+
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
- phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
- /* If buffer is physically aligned, ensure DMA alignment. */
- if (IS_ALIGNED(p, algn) &&
- !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
- return 1;
-
for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
return 1;
@@ -111,7 +113,7 @@ static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
}
#ifdef CONFIG_X86
-int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
+int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
*dma_handle = xen_phys_to_dma(dev, phys);
if (*dma_handle + size - 1 > dma_mask ||
- range_straddles_page_boundary(phys, size)) {
+ range_straddles_page_boundary(phys, size) ||
+ range_requires_alignment(phys, size)) {
if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
dma_handle) != 0)
goto out_free_pages;
@@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
size = ALIGN(size, XEN_PAGE_SIZE);
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
- WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
+ WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
+ range_requires_alignment(phys, size)))
return;
if (TestClearPageXenRemapped(virt_to_page(vaddr)))