diff options
| author | Dave Airlie <airlied@redhat.com> | 2026-04-03 18:56:58 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2026-04-03 18:57:00 +1000 |
| commit | 512f9f150f367176fa9e5f4613b4863409a6f686 (patch) | |
| tree | 32e12e5821628d3fc58c38e6cae8298cb36a50b7 /drivers/gpu/drm/msm | |
| parent | cdd65e8bb9540b67d164cec760614ff74c560c49 (diff) | |
| parent | a972d1b864e8efcfda0b387debac8ea2875a182c (diff) | |
Merge tag 'drm-msm-next-2026-04-02' of https://gitlab.freedesktop.org/drm/msm into drm-next
Changes for v7.1
CI:
- Uprev mesa
- Restore CI jobs for Qualcomm APQ8016 and APQ8096 devices
Core:
- Switched to of_get_available_child_by_name()
DPU:
- Fixes for DSC panels
- Fixed brownout because of the frequency / OPP mismatch
- Quad pipe preparation (not enabled yet)
- Switched to virtual planes by default
- Dropped VBIF_NRT support
- Added support for Eliza platform
- Reworked alpha handling
- Switched to correct CWB definitions on Eliza
- Dropped dummy INTF_0 on MSM8953
- Corrected INTFs related to DP-MST
DP:
- Removed debug prints looking into PHY internals
DSI:
- Fixes for DSC panels
- RGB101010 support
- Support for SC8280XP
- Moved PHY bindings from display/ to phy/
GPU:
- Preemption support for x2-85 and a840
- IFPC support for a840
- SKU detection support for x2-85 and a840
- Expose AQE support (VK ray-pipeline)
- Avoid locking in VM_BIND fence signaling path
- Fix to avoid reclaim in GPU snapshot path
- Disallow foreign mapping of _NO_SHARE BOs
- Couple a6xx gpu snapshot fixes
- Various other fixes
HDMI:
- Fixed infoframes programming
MDP5:
- Dropped support for MSM8974v1
- Dropped now unused code for MSM8974 v1 and SDM660 / MSM8998
Also misc small fixes
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <rob.clark@oss.qualcomm.com>
Link: https://patch.msgid.link/CACSVV012vn73BaUfk=Hw4WkQHZNPHiqfifWEunAqMc2EGOWUEQ@mail.gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm')
104 files changed, 2127 insertions, 1068 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 8b94c5f1cb68..ba45e99be05b 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -25,6 +25,7 @@ adreno-y := \ adreno/a6xx_hfi.o \ adreno/a6xx_preempt.o \ adreno/a8xx_gpu.o \ + adreno/a8xx_preempt.o \ adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 8464d89e37f3..e6ab731f8e9a 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -604,11 +604,9 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) { return 0; } -static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +static u64 a4xx_get_timestamp(struct msm_gpu *gpu) { - *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO); - - return 0; + return gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO); } static u64 a4xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index ef9fd6171af7..79acae11154a 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1435,11 +1435,9 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) return 0; } -static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +static u64 a5xx_get_timestamp(struct msm_gpu *gpu) { - *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO); - - return 0; + return gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO); } struct a5xx_crashdumper { @@ -1732,6 +1730,7 @@ static struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; unsigned int nr_rings; + u32 speedbin; int ret; a5xx_gpu = kzalloc_obj(*a5xx_gpu); @@ -1758,6 +1757,11 @@ static struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } + /* Set the speedbin value that is passed to userspace */ + if (adreno_read_speedbin(&pdev->dev, &speedbin) || !speedbin) + speedbin = 0xffff; + adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, a5xx_fault_handler); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 38561f26837e..550ff3a9b82e 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -1761,6 +1761,44 @@ static const u32 x285_protect_regs[] = { DECLARE_ADRENO_PROTECT(x285_protect, 15); +static const struct adreno_reglist_pipe x285_dyn_pwrup_reglist_regs[] = { + { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_GRAS_NC_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_GRAS_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_1, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_2, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_3, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_4, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_VIS_STREAM_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A7XX_RB_CCU_CNTL, 0, BIT(PIPE_BR) }, + { REG_A7XX_RB_CCU_DBG_ECO_CNTL, 0, BIT(PIPE_BR)}, + { REG_A8XX_RB_CCU_NC_MODE_CNTL, 0, BIT(PIPE_BR) }, + { REG_A8XX_RB_CMP_NC_MODE_CNTL, 0, BIT(PIPE_BR) }, + { REG_A6XX_RB_RBP_CNTL, 0, BIT(PIPE_BR) }, + { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0, BIT(PIPE_BR) }, + { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0, BIT(PIPE_BR) }, + { REG_A7XX_VFD_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BV_THRESHOLD, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BR_THRESHOLD, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_LP_REQ_CNT, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_CP_HW_FAULT_STATUS_MASK_PIPE, 0, BIT(PIPE_BR) | + BIT(PIPE_BV) | BIT(PIPE_LPAC) | BIT(PIPE_AQE0) | + BIT(PIPE_AQE1) | BIT(PIPE_DDE_BR) | BIT(PIPE_DDE_BV) }, + { REG_A8XX_CP_INTERRUPT_STATUS_MASK_PIPE, 0, BIT(PIPE_BR) | + BIT(PIPE_BV) | BIT(PIPE_LPAC) | BIT(PIPE_AQE0) | + BIT(PIPE_AQE1) | BIT(PIPE_DDE_BR) | BIT(PIPE_DDE_BV) }, + { REG_A8XX_CP_PROTECT_CNTL_PIPE, 0, BIT(PIPE_BR) | BIT(PIPE_BV) | BIT(PIPE_LPAC)}, + { REG_A8XX_CP_PROTECT_PIPE(15), 0, BIT(PIPE_BR) | BIT(PIPE_BV) | BIT(PIPE_LPAC) }, + { REG_A8XX_RB_GC_GMEM_PROTECT, 0, BIT(PIPE_BR) }, + { REG_A8XX_RB_LPAC_GMEM_PROTECT, 0, BIT(PIPE_BR) }, + { REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0, BIT(PIPE_BR) }, +}; +DECLARE_ADRENO_REGLIST_PIPE_LIST(x285_dyn_pwrup_reglist); + static const struct adreno_reglist_pipe a840_nonctxt_regs[] = { { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) }, { REG_A8XX_GRAS_DBG_ECO_CNTL, 0x00000800, BIT(PIPE_BV) | BIT(PIPE_BR) }, @@ -1891,6 +1929,185 @@ static const struct adreno_reglist a840_gbif[] = { { }, }; +static const uint32_t a840_pwrup_reglist_regs[] = { + REG_A7XX_SP_HLSQ_TIMEOUT_THRESHOLD_DP, + REG_A7XX_SP_READ_SEL, + REG_A6XX_UCHE_MODE_CNTL, + REG_A8XX_UCHE_VARB_IDLE_TIMEOUT, + REG_A8XX_UCHE_GBIF_GX_CONFIG, + REG_A8XX_UCHE_CCHE_MODE_CNTL, + REG_A8XX_UCHE_CCHE_CACHE_WAYS, + REG_A8XX_UCHE_CACHE_WAYS, + REG_A8XX_UCHE_CCHE_GC_GMEM_RANGE_MIN, + REG_A8XX_UCHE_CCHE_GC_GMEM_RANGE_MIN + 1, + REG_A8XX_UCHE_CCHE_LPAC_GMEM_RANGE_MIN, + REG_A8XX_UCHE_CCHE_LPAC_GMEM_RANGE_MIN + 1, + REG_A8XX_UCHE_CCHE_TRAP_BASE, + REG_A8XX_UCHE_CCHE_TRAP_BASE + 1, + REG_A8XX_UCHE_CCHE_WRITE_THRU_BASE, + REG_A8XX_UCHE_CCHE_WRITE_THRU_BASE + 1, + REG_A8XX_UCHE_HW_DBG_CNTL, + REG_A8XX_UCHE_WRITE_THRU_BASE, + REG_A8XX_UCHE_WRITE_THRU_BASE + 1, + REG_A8XX_UCHE_TRAP_BASE, + REG_A8XX_UCHE_TRAP_BASE + 1, + REG_A8XX_UCHE_CLIENT_PF, + REG_A8XX_RB_CMP_NC_MODE_CNTL, + REG_A8XX_SP_HLSQ_GC_GMEM_RANGE_MIN, + REG_A8XX_SP_HLSQ_GC_GMEM_RANGE_MIN + 1, + REG_A6XX_TPL1_NC_MODE_CNTL, + REG_A6XX_TPL1_DBG_ECO_CNTL, + REG_A6XX_TPL1_DBG_ECO_CNTL1, + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(1), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(2), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(3), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(4), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(5), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(6), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(7), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(8), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(9), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(10), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(11), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(12), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(13), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(14), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(15), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(16), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(17), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(18), + REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(19), +}; +DECLARE_ADRENO_REGLIST_LIST(a840_pwrup_reglist); + +static const u32 a840_ifpc_reglist_regs[] = { + REG_A8XX_RBBM_NC_MODE_CNTL, + REG_A8XX_RBBM_SLICE_NC_MODE_CNTL, + REG_A6XX_SP_NC_MODE_CNTL, + REG_A6XX_SP_CHICKEN_BITS, + REG_A8XX_SP_SS_CHICKEN_BITS_0, + REG_A7XX_SP_CHICKEN_BITS_1, + REG_A7XX_SP_CHICKEN_BITS_2, + REG_A7XX_SP_CHICKEN_BITS_3, + REG_A8XX_SP_CHICKEN_BITS_4, + REG_A6XX_SP_PERFCTR_SHADER_MASK, + REG_A8XX_RBBM_SLICE_PERFCTR_CNTL, + REG_A8XX_RBBM_SLICE_INTERFACE_HANG_INT_CNTL, + REG_A7XX_SP_HLSQ_DBG_ECO_CNTL, + REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_1, + REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_2, + REG_A8XX_SP_HLSQ_DBG_ECO_CNTL_3, + REG_A8XX_SP_HLSQ_LPAC_GMEM_RANGE_MIN, + REG_A8XX_SP_HLSQ_LPAC_GMEM_RANGE_MIN + 1, + REG_A8XX_CP_INTERRUPT_STATUS_MASK_GLOBAL, + REG_A8XX_RBBM_PERFCTR_CNTL, + REG_A8XX_CP_PROTECT_GLOBAL(0), + REG_A8XX_CP_PROTECT_GLOBAL(1), + REG_A8XX_CP_PROTECT_GLOBAL(2), + REG_A8XX_CP_PROTECT_GLOBAL(3), + REG_A8XX_CP_PROTECT_GLOBAL(4), + REG_A8XX_CP_PROTECT_GLOBAL(5), + REG_A8XX_CP_PROTECT_GLOBAL(6), + REG_A8XX_CP_PROTECT_GLOBAL(7), + REG_A8XX_CP_PROTECT_GLOBAL(8), + REG_A8XX_CP_PROTECT_GLOBAL(9), + REG_A8XX_CP_PROTECT_GLOBAL(10), + REG_A8XX_CP_PROTECT_GLOBAL(11), + REG_A8XX_CP_PROTECT_GLOBAL(12), + REG_A8XX_CP_PROTECT_GLOBAL(13), + REG_A8XX_CP_PROTECT_GLOBAL(14), + REG_A8XX_CP_PROTECT_GLOBAL(15), + REG_A8XX_CP_PROTECT_GLOBAL(16), + REG_A8XX_CP_PROTECT_GLOBAL(17), + REG_A8XX_CP_PROTECT_GLOBAL(18), + REG_A8XX_CP_PROTECT_GLOBAL(19), + REG_A8XX_CP_PROTECT_GLOBAL(20), + REG_A8XX_CP_PROTECT_GLOBAL(21), + REG_A8XX_CP_PROTECT_GLOBAL(22), + REG_A8XX_CP_PROTECT_GLOBAL(23), + REG_A8XX_CP_PROTECT_GLOBAL(24), + REG_A8XX_CP_PROTECT_GLOBAL(25), + REG_A8XX_CP_PROTECT_GLOBAL(26), + REG_A8XX_CP_PROTECT_GLOBAL(27), + REG_A8XX_CP_PROTECT_GLOBAL(28), + REG_A8XX_CP_PROTECT_GLOBAL(29), + REG_A8XX_CP_PROTECT_GLOBAL(30), + REG_A8XX_CP_PROTECT_GLOBAL(31), + REG_A8XX_CP_PROTECT_GLOBAL(32), + REG_A8XX_CP_PROTECT_GLOBAL(33), + REG_A8XX_CP_PROTECT_GLOBAL(34), + REG_A8XX_CP_PROTECT_GLOBAL(35), + REG_A8XX_CP_PROTECT_GLOBAL(36), + REG_A8XX_CP_PROTECT_GLOBAL(37), + REG_A8XX_CP_PROTECT_GLOBAL(38), + REG_A8XX_CP_PROTECT_GLOBAL(39), + REG_A8XX_CP_PROTECT_GLOBAL(40), + REG_A8XX_CP_PROTECT_GLOBAL(41), + REG_A8XX_CP_PROTECT_GLOBAL(42), + REG_A8XX_CP_PROTECT_GLOBAL(43), + REG_A8XX_CP_PROTECT_GLOBAL(44), + REG_A8XX_CP_PROTECT_GLOBAL(45), + REG_A8XX_CP_PROTECT_GLOBAL(46), + REG_A8XX_CP_PROTECT_GLOBAL(47), + REG_A8XX_CP_PROTECT_GLOBAL(48), + REG_A8XX_CP_PROTECT_GLOBAL(49), + REG_A8XX_CP_PROTECT_GLOBAL(50), + REG_A8XX_CP_PROTECT_GLOBAL(51), + REG_A8XX_CP_PROTECT_GLOBAL(52), + REG_A8XX_CP_PROTECT_GLOBAL(53), + REG_A8XX_CP_PROTECT_GLOBAL(54), + REG_A8XX_CP_PROTECT_GLOBAL(55), + REG_A8XX_CP_PROTECT_GLOBAL(56), + REG_A8XX_CP_PROTECT_GLOBAL(57), + REG_A8XX_CP_PROTECT_GLOBAL(58), + REG_A8XX_CP_PROTECT_GLOBAL(59), + REG_A8XX_CP_PROTECT_GLOBAL(60), + REG_A8XX_CP_PROTECT_GLOBAL(61), + REG_A8XX_CP_PROTECT_GLOBAL(62), + REG_A8XX_CP_PROTECT_GLOBAL(63), +}; +DECLARE_ADRENO_REGLIST_LIST(a840_ifpc_reglist); + +static const struct adreno_reglist_pipe a840_dyn_pwrup_reglist_regs[] = { + { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_GRAS_NC_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_GRAS_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_1, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_2, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_3, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CHICKEN_BITS_4, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_PC_VIS_STREAM_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A7XX_RB_CCU_CNTL, 0, BIT(PIPE_BR) }, + { REG_A7XX_RB_CCU_DBG_ECO_CNTL, 0, BIT(PIPE_BR)}, + { REG_A8XX_RB_CCU_NC_MODE_CNTL, 0, BIT(PIPE_BR) }, + { REG_A8XX_RB_CMP_NC_MODE_CNTL, 0, BIT(PIPE_BR) }, + { REG_A6XX_RB_RBP_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0, BIT(PIPE_BR) }, + { REG_A6XX_RB_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0, BIT(PIPE_BR) }, + { REG_A7XX_VFD_DBG_ECO_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BV_THRESHOLD, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BR_THRESHOLD, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VFD_CB_LP_REQ_CNT, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, + { REG_A8XX_CP_HW_FAULT_STATUS_MASK_PIPE, 0, BIT(PIPE_BR) | + BIT(PIPE_BV) | BIT(PIPE_LPAC) | BIT(PIPE_AQE0) | + BIT(PIPE_AQE1) | BIT(PIPE_DDE_BR) | BIT(PIPE_DDE_BV) }, + { REG_A8XX_CP_INTERRUPT_STATUS_MASK_PIPE, 0, BIT(PIPE_BR) | + BIT(PIPE_BV) | BIT(PIPE_LPAC) | BIT(PIPE_AQE0) | + BIT(PIPE_AQE1) | BIT(PIPE_DDE_BR) | BIT(PIPE_DDE_BV) }, + { REG_A8XX_CP_PROTECT_CNTL_PIPE, 0, BIT(PIPE_BR) | BIT(PIPE_BV) | BIT(PIPE_LPAC)}, + { REG_A8XX_CP_PROTECT_PIPE(15), 0, BIT(PIPE_BR) | BIT(PIPE_BV) | BIT(PIPE_LPAC) }, + { REG_A8XX_RB_GC_GMEM_PROTECT, 0, BIT(PIPE_BR) }, + { REG_A8XX_RB_LPAC_GMEM_PROTECT, 0, BIT(PIPE_BR) }, + { REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0, BIT(PIPE_BR) }, +}; +DECLARE_ADRENO_REGLIST_PIPE_LIST(a840_dyn_pwrup_reglist); + static const struct adreno_info a8xx_gpus[] = { { .chip_ids = ADRENO_CHIP_IDS(0x44070001), @@ -1902,11 +2119,16 @@ static const struct adreno_info a8xx_gpus[] = { .gmem = 21 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION | + ADRENO_QUIRK_SOFTFUSE, .funcs = &a8xx_gpu_funcs, .a6xx = &(const struct a6xx_info) { .protect = &x285_protect, .nonctxt_reglist = x285_nonctxt_regs, + .pwrup_reglist = &a840_pwrup_reglist, + .dyn_pwrup_reglist = &x285_dyn_pwrup_reglist, + .ifpc_reglist = &a840_ifpc_reglist, .gbif_cx = a840_gbif, .max_slices = 4, .gmu_chipid = 0x8010100, @@ -1922,6 +2144,12 @@ static const struct adreno_info a8xx_gpus[] = { { /* sentinel */ }, }, }, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 388, 1 }, + { 357, 2 }, + { 284, 3 }, + ), }, { .chip_ids = ADRENO_CHIP_IDS(0x44050a01), .family = ADRENO_8XX_GEN2, @@ -1933,11 +2161,16 @@ static const struct adreno_info a8xx_gpus[] = { .gmem = 18 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION | + ADRENO_QUIRK_IFPC, .funcs = &a8xx_gpu_funcs, .a6xx = &(const struct a6xx_info) { .protect = &a840_protect, .nonctxt_reglist = a840_nonctxt_regs, + .pwrup_reglist = &a840_pwrup_reglist, + .dyn_pwrup_reglist = &a840_dyn_pwrup_reglist, + .ifpc_reglist = &a840_ifpc_reglist, .gbif_cx = a840_gbif, .max_slices = 3, .gmu_chipid = 0x8020100, @@ -1954,6 +2187,12 @@ static const struct adreno_info a8xx_gpus[] = { }, }, .preempt_record_size = 19708 * SZ_1K, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 273, 1 }, + { 252, 2 }, + { 221, 3 }, + ), } }; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 9662201cd2e9..1b44b9e21ad8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -3,6 +3,7 @@ #include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/firmware/qcom/qcom_scm.h> #include <linux/interconnect.h> #include <linux/of_platform.h> #include <linux/platform_device.h> @@ -91,10 +92,10 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) } /* Check to see if the GX rail is still powered */ -bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) +bool a6xx_gmu_gx_is_on(struct adreno_gpu *adreno_gpu) { - struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); - struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u32 val; /* This can be called from gpu state code so make sure GMU is valid */ @@ -117,6 +118,40 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); } +bool a7xx_gmu_gx_is_on(struct adreno_gpu *adreno_gpu) +{ + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u32 val; + + /* This can be called from gpu state code so make sure GMU is valid */ + if (!gmu->initialized) + return false; + + val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); + + return !(val & + (A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | + A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); +} + +bool a8xx_gmu_gx_is_on(struct adreno_gpu *adreno_gpu) +{ + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u32 val; + + /* This can be called from gpu state code so make sure GMU is valid */ + if (!gmu->initialized) + return false; + + val = gmu_read(gmu, REG_A8XX_GMU_PWR_CLK_STATUS); + + return !(val & + (A8XX_GMU_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | + A8XX_GMU_PWR_CLK_STATUS_GX_HM_CLK_OFF)); +} + void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, bool suspended) { @@ -240,7 +275,7 @@ static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) if (val == local) { if (gmu->idle_level != GMU_IDLE_STATE_IFPC || - !a6xx_gmu_gx_is_on(gmu)) + !adreno_gpu->funcs->gx_is_on(adreno_gpu)) return true; } @@ -1157,6 +1192,65 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) dev_pm_opp_put(gpu_opp); } +static int a6xx_gmu_secure_init(struct a6xx_gpu *a6xx_gpu) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u32 fuse_val; + int ret; + + if (test_bit(GMU_STATUS_SECURE_INIT, &gmu->status)) + return 0; + + if (adreno_is_a750(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { + /* + * Assume that if qcom scm isn't available, that whatever + * replacement allows writing the fuse register ourselves. + * Users of alternative firmware need to make sure this + * register is writeable or indicate that it's not somehow. + * Print a warning because if you mess this up you're about to + * crash horribly. + */ + if (!qcom_scm_is_available()) { + dev_warn_once(gpu->dev->dev, + "SCM is not available, poking fuse register\n"); + a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_SW_FUSE_VALUE, + A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING | + A7XX_CX_MISC_SW_FUSE_VALUE_FASTBLEND | + A7XX_CX_MISC_SW_FUSE_VALUE_LPAC); + adreno_gpu->has_ray_tracing = true; + goto done; + } + + ret = qcom_scm_gpu_init_regs(QCOM_SCM_GPU_ALWAYS_EN_REQ | + QCOM_SCM_GPU_TSENSE_EN_REQ); + if (ret) { + dev_warn_once(gpu->dev->dev, + "SCM call failed\n"); + return ret; + } + + /* + * On A7XX_GEN3 and newer, raytracing may be disabled by the + * firmware, find out whether that's the case. The scm call + * above sets the fuse register. + */ + fuse_val = a6xx_llc_read(a6xx_gpu, + REG_A7XX_CX_MISC_SW_FUSE_VALUE); + adreno_gpu->has_ray_tracing = + !!(fuse_val & A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING); + } else if (adreno_is_a740(adreno_gpu)) { + /* Raytracing is always enabled on a740 */ + adreno_gpu->has_ray_tracing = true; + } + +done: + set_bit(GMU_STATUS_SECURE_INIT, &gmu->status); + return 0; +} + + int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; @@ -1185,11 +1279,12 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? 200000000 : 150000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); - if (ret) { - pm_runtime_put(gmu->gxpd); - pm_runtime_put(gmu->dev); - return ret; - } + if (ret) + goto rpm_put; + + ret = a6xx_gmu_secure_init(a6xx_gpu); + if (ret) + goto disable_clk; /* Read the slice info on A8x GPUs */ a8xx_gpu_get_slice_info(gpu); @@ -1219,11 +1314,11 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) ret = a6xx_gmu_fw_start(gmu, status); if (ret) - goto out; + goto disable_irq; ret = a6xx_hfi_start(gmu, status); if (ret) - goto out; + goto disable_irq; /* * Turn on the GMU firmware fault interrupt after we know the boot @@ -1236,19 +1331,16 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Set the GPU to the current freq */ a6xx_gmu_set_initial_freq(gpu, gmu); - if (refcount_read(&gpu->sysprof_active) > 1) { - ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); - if (!ret) - set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status); - } -out: - /* On failure, shut down the GMU to leave it in a good state */ - if (ret) { - disable_irq(gmu->gmu_irq); - a6xx_rpmh_stop(gmu); - pm_runtime_put(gmu->gxpd); - pm_runtime_put(gmu->dev); - } + return 0; + +disable_irq: + disable_irq(gmu->gmu_irq); + a6xx_rpmh_stop(gmu); +disable_clk: + clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); +rpm_put: + pm_runtime_put(gmu->gxpd); + pm_runtime_put(gmu->dev); return ret; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 2af074c8e8cf..3f96b10b5f61 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -10,6 +10,7 @@ #include <linux/notifier.h> #include <linux/soc/qcom/qcom_aoss.h> #include "msm_drv.h" +#include "adreno_gpu.h" #include "a6xx_hfi.h" struct a6xx_gmu_bo { @@ -110,7 +111,7 @@ struct a6xx_gmu { unsigned long freq; - struct a6xx_hfi_queue queues[2]; + struct a6xx_hfi_queue queues[HFI_MAX_QUEUES]; bool initialized; bool hung; @@ -129,6 +130,8 @@ struct a6xx_gmu { #define GMU_STATUS_PDC_SLEEP 1 /* To track Perfcounter OOB set status */ #define GMU_STATUS_OOB_PERF_SET 2 +/* To track whether secure world init was done */ +#define GMU_STATUS_SECURE_INIT 3 unsigned long status; }; @@ -231,7 +234,9 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu); int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu); int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 perf_index, u32 bw_index); -bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu); +bool a6xx_gmu_gx_is_on(struct adreno_gpu *adreno_gpu); +bool a7xx_gmu_gx_is_on(struct adreno_gpu *adreno_gpu); +bool a8xx_gmu_gx_is_on(struct adreno_gpu *adreno_gpu); bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu); void a6xx_sptprac_disable(struct a6xx_gmu *gmu); int a6xx_sptprac_enable(struct a6xx_gmu *gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index d6dfe6337bc3..d5aba072f44c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -10,14 +10,15 @@ #include <linux/bitfield.h> #include <linux/devfreq.h> -#include <linux/firmware/qcom/qcom_scm.h> #include <linux/pm_domain.h> #include <linux/soc/qcom/llcc-qcom.h> #define GPU_PAS_ID 13 -static u64 read_gmu_ao_counter(struct a6xx_gpu *a6xx_gpu) +static u64 a6xx_gmu_get_timestamp(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); u64 count_hi, count_lo, temp; do { @@ -345,7 +346,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) * GPU registers so we need to add 0x1a800 to the register value on A630 * to get the right value from PM4. */ - get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, + get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_CONTEXT, rbmemptr_stats(ring, index, alwayson_start)); /* Invalidate CCU depth and color */ @@ -386,7 +387,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_end)); - get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, + get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_CONTEXT, rbmemptr_stats(ring, index, alwayson_end)); /* Write the fence to the scratch register */ @@ -404,12 +405,12 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->seqno); - trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu)); + trace_msm_gpu_submit_flush(submit, adreno_gpu->funcs->get_timestamp(gpu)); a6xx_flush(gpu, ring); } -static void a6xx_emit_set_pseudo_reg(struct msm_ringbuffer *ring, +void a6xx_emit_set_pseudo_reg(struct msm_ringbuffer *ring, struct a6xx_gpu *a6xx_gpu, struct msm_gpu_submitqueue *queue) { u64 preempt_postamble; @@ -455,7 +456,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; - u32 rbbm_perfctr_cp0, cp_always_on_counter; + u32 rbbm_perfctr_cp0, cp_always_on_context; unsigned int i, ibs = 0; adreno_check_and_reenable_stall(adreno_gpu); @@ -478,14 +479,14 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) if (adreno_is_a8xx(adreno_gpu)) { rbbm_perfctr_cp0 = REG_A8XX_RBBM_PERFCTR_CP(0); - cp_always_on_counter = REG_A8XX_CP_ALWAYS_ON_COUNTER; + cp_always_on_context = REG_A8XX_CP_ALWAYS_ON_CONTEXT; } else { rbbm_perfctr_cp0 = REG_A7XX_RBBM_PERFCTR_CP(0); - cp_always_on_counter = REG_A6XX_CP_ALWAYS_ON_COUNTER; + cp_always_on_context = REG_A6XX_CP_ALWAYS_ON_CONTEXT; } get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_start)); - get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_start)); + get_stats_counter(ring, cp_always_on_context, rbmemptr_stats(ring, index, alwayson_start)); OUT_PKT7(ring, CP_THREAD_CONTROL, 1); OUT_RING(ring, CP_SET_THREAD_BOTH); @@ -533,7 +534,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) } get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_end)); - get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_end)); + get_stats_counter(ring, cp_always_on_context, rbmemptr_stats(ring, index, alwayson_end)); /* Write the fence to the scratch register */ if (adreno_is_a8xx(adreno_gpu)) { @@ -614,12 +615,15 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) } - trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu)); + trace_msm_gpu_submit_flush(submit, adreno_gpu->funcs->get_timestamp(gpu)); a6xx_flush(gpu, ring); /* Check to see if we need to start preemption */ - a6xx_preempt_trigger(gpu); + if (adreno_is_a8xx(adreno_gpu)) + a8xx_preempt_trigger(gpu); + else + a6xx_preempt_trigger(gpu); } static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) @@ -1603,6 +1607,12 @@ out: a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); } + if (!ret && (refcount_read(&gpu->sysprof_active) > 1)) { + ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + if (!ret) + set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status); + } + return ret; } @@ -1635,7 +1645,7 @@ static void a6xx_recover(struct msm_gpu *gpu) adreno_dump_info(gpu); - if (a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) { + if (adreno_gpu->funcs->gx_is_on(adreno_gpu)) { /* Sometimes crashstate capture is skipped, so SQE should be halted here again */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); @@ -2152,56 +2162,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev, a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); } -static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu) -{ - struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - struct msm_gpu *gpu = &adreno_gpu->base; - u32 fuse_val; - int ret; - - if (adreno_is_a750(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { - /* - * Assume that if qcom scm isn't available, that whatever - * replacement allows writing the fuse register ourselves. - * Users of alternative firmware need to make sure this - * register is writeable or indicate that it's not somehow. - * Print a warning because if you mess this up you're about to - * crash horribly. - */ - if (!qcom_scm_is_available()) { - dev_warn_once(gpu->dev->dev, - "SCM is not available, poking fuse register\n"); - a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_SW_FUSE_VALUE, - A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING | - A7XX_CX_MISC_SW_FUSE_VALUE_FASTBLEND | - A7XX_CX_MISC_SW_FUSE_VALUE_LPAC); - adreno_gpu->has_ray_tracing = true; - return 0; - } - - ret = qcom_scm_gpu_init_regs(QCOM_SCM_GPU_ALWAYS_EN_REQ | - QCOM_SCM_GPU_TSENSE_EN_REQ); - if (ret) - return ret; - - /* - * On A7XX_GEN3 and newer, raytracing may be disabled by the - * firmware, find out whether that's the case. The scm call - * above sets the fuse register. - */ - fuse_val = a6xx_llc_read(a6xx_gpu, - REG_A7XX_CX_MISC_SW_FUSE_VALUE); - adreno_gpu->has_ray_tracing = - !!(fuse_val & A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING); - } else if (adreno_is_a740(adreno_gpu)) { - /* Raytracing is always enabled on a740 */ - adreno_gpu->has_ray_tracing = true; - } - - return 0; -} - - #define GBIF_CLIENT_HALT_MASK BIT(0) #define GBIF_ARB_HALT_MASK BIT(1) #define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0) @@ -2414,20 +2374,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) return 0; } -static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - - *value = read_gmu_ao_counter(a6xx_gpu); - - return 0; -} - -static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +static u64 a6xx_get_timestamp(struct msm_gpu *gpu) { - *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); - return 0; + return gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); } static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu) @@ -2600,13 +2549,33 @@ static u32 fuse_to_supp_hw(const struct adreno_info *info, u32 fuse) return UINT_MAX; } -static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *info) +static int a6xx_read_speedbin(struct device *dev, struct a6xx_gpu *a6xx_gpu, + const struct adreno_info *info, u32 *speedbin) +{ + int ret; + + /* Use speedbin fuse if present. Otherwise, fallback to softfuse */ + ret = adreno_read_speedbin(dev, speedbin); + if (ret != -ENOENT) + return ret; + + if (info->quirks & ADRENO_QUIRK_SOFTFUSE) { + *speedbin = a6xx_llc_read(a6xx_gpu, REG_A8XX_CX_MISC_SW_FUSE_FREQ_LIMIT_STATUS); + *speedbin = A8XX_CX_MISC_SW_FUSE_FREQ_LIMIT_STATUS_FINALFREQLIMIT(*speedbin); + return 0; + } + + return -ENOENT; +} + +static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, + const struct adreno_info *info) { u32 supp_hw; u32 speedbin; int ret; - ret = adreno_read_speedbin(dev, &speedbin); + ret = a6xx_read_speedbin(dev, a6xx_gpu, info, &speedbin); /* * -ENOENT means that the platform doesn't support speedbin which is * fine @@ -2635,16 +2604,29 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i return 0; } +static bool a6xx_aqe_is_enabled(struct adreno_gpu *adreno_gpu) +{ + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + /* + * AQE uses preemption context record as scratch pad, so check if + * preemption is enabled + */ + return (adreno_gpu->base.nr_rings > 1) && !!a6xx_gpu->aqe_bo; +} + static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; struct adreno_platform_config *config = pdev->dev.platform_data; + const struct adreno_info *info = config->info; struct device_node *node; struct a6xx_gpu *a6xx_gpu; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; extern int enable_preemption; + u32 speedbin; bool is_a7xx; int ret, nr_rings = 1; @@ -2656,6 +2638,7 @@ static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) gpu = &adreno_gpu->base; mutex_init(&a6xx_gpu->gmu.lock); + spin_lock_init(&a6xx_gpu->aperture_lock); adreno_gpu->registers = NULL; @@ -2667,14 +2650,14 @@ static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper"); adreno_gpu->base.hw_apriv = - !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); + !!(info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); /* gpu->info only gets assigned in adreno_gpu_init(). A8x is included intentionally */ - is_a7xx = config->info->family >= ADRENO_7XX_GEN1; + is_a7xx = info->family >= ADRENO_7XX_GEN1; a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx); - ret = a6xx_set_supported_hw(&pdev->dev, config->info); + ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info); if (ret) { a6xx_llc_slices_destroy(a6xx_gpu); kfree(a6xx_gpu); @@ -2682,15 +2665,20 @@ static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) } if ((enable_preemption == 1) || (enable_preemption == -1 && - (config->info->quirks & ADRENO_QUIRK_PREEMPTION))) + (info->quirks & ADRENO_QUIRK_PREEMPTION))) nr_rings = 4; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, info->funcs, nr_rings); if (ret) { a6xx_destroy(&(a6xx_gpu->base.base)); return ERR_PTR(ret); } + /* Set the speedbin value that is passed to userspace */ + if (a6xx_read_speedbin(&pdev->dev, a6xx_gpu, info, &speedbin) || !speedbin) + speedbin = 0xffff; + adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); + /* * For now only clamp to idle freq for devices where this is known not * to cause power supply issues: @@ -2708,14 +2696,6 @@ static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { - ret = a7xx_cx_mem_init(a6xx_gpu); - if (ret) { - a6xx_destroy(&(a6xx_gpu->base.base)); - return ERR_PTR(ret); - } - } - adreno_gpu->uche_trap_base = 0x1fffffffff000ull; msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, @@ -2765,6 +2745,7 @@ const struct adreno_gpu_funcs a6xx_gpu_funcs = { .get_timestamp = a6xx_gmu_get_timestamp, .bus_halt = a6xx_bus_clear_pending_transactions, .mmu_fault_handler = a6xx_fault_handler, + .gx_is_on = a6xx_gmu_gx_is_on, }; const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs = { @@ -2797,6 +2778,7 @@ const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs = { .get_timestamp = a6xx_get_timestamp, .bus_halt = a6xx_bus_clear_pending_transactions, .mmu_fault_handler = a6xx_fault_handler, + .gx_is_on = a6xx_gmu_gx_is_on, }; const struct adreno_gpu_funcs a7xx_gpu_funcs = { @@ -2831,6 +2813,8 @@ const struct adreno_gpu_funcs a7xx_gpu_funcs = { .get_timestamp = a6xx_gmu_get_timestamp, .bus_halt = a6xx_bus_clear_pending_transactions, .mmu_fault_handler = a6xx_fault_handler, + .gx_is_on = a7xx_gmu_gx_is_on, + .aqe_is_enabled = a6xx_aqe_is_enabled, }; const struct adreno_gpu_funcs a8xx_gpu_funcs = { @@ -2858,4 +2842,6 @@ const struct adreno_gpu_funcs a8xx_gpu_funcs = { .get_timestamp = a8xx_gmu_get_timestamp, .bus_halt = a8xx_bus_clear_pending_transactions, .mmu_fault_handler = a8xx_fault_handler, + .gx_is_on = a8xx_gmu_gx_is_on, + .aqe_is_enabled = a6xx_aqe_is_enabled, }; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 4eaa04711246..eb431e5e00b1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -278,6 +278,8 @@ void a6xx_preempt_hw_init(struct msm_gpu *gpu); void a6xx_preempt_trigger(struct msm_gpu *gpu); void a6xx_preempt_irq(struct msm_gpu *gpu); void a6xx_preempt_fini(struct msm_gpu *gpu); +void a6xx_emit_set_pseudo_reg(struct msm_ringbuffer *ring, + struct a6xx_gpu *a6xx_gpu, struct msm_gpu_submitqueue *queue); int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue); void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu, @@ -320,13 +322,16 @@ int a6xx_zap_shader_init(struct msm_gpu *gpu); void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off); int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data); void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); -int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value); +u64 a8xx_gmu_get_timestamp(struct msm_gpu *gpu); u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate); int a8xx_gpu_feature_probe(struct msm_gpu *gpu); void a8xx_gpu_get_slice_info(struct msm_gpu *gpu); int a8xx_hw_init(struct msm_gpu *gpu); irqreturn_t a8xx_irq(struct msm_gpu *gpu); void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu); +void a8xx_preempt_hw_init(struct msm_gpu *gpu); +void a8xx_preempt_trigger(struct msm_gpu *gpu); +void a8xx_preempt_irq(struct msm_gpu *gpu); bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring); void a8xx_recover(struct msm_gpu *gpu); #endif /* __A6XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 2d56fe0a65b7..166365359fa6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -57,7 +57,7 @@ struct a6xx_gpu_state { struct msm_gpu_state_bo *gmu_hfi; struct msm_gpu_state_bo *gmu_debug; - s32 hfi_queue_history[2][HFI_HISTORY_SZ]; + s32 hfi_queue_history[HFI_MAX_QUEUES][HFI_HISTORY_SZ]; struct list_head objs; @@ -361,7 +361,7 @@ static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu, sizeof(*a6xx_state->debugbus)); if (a6xx_state->debugbus) { - int i; + int i, j; for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++) a6xx_get_debugbus_block(gpu, @@ -369,8 +369,6 @@ static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu, &a6xx_debugbus_blocks[i], &a6xx_state->debugbus[i]); - a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks); - /* * GBIF has same debugbus as of other GPU blocks, fall back to * default path if GPU uses GBIF, also GBIF uses exactly same @@ -381,17 +379,19 @@ static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu, &a6xx_gbif_debugbus_block, &a6xx_state->debugbus[i]); - a6xx_state->nr_debugbus += 1; + i++; } if (adreno_is_a650_family(to_adreno_gpu(gpu))) { - for (i = 0; i < ARRAY_SIZE(a650_debugbus_blocks); i++) + for (j = 0; j < ARRAY_SIZE(a650_debugbus_blocks); i++, j++) a6xx_get_debugbus_block(gpu, a6xx_state, - &a650_debugbus_blocks[i], + &a650_debugbus_blocks[j], &a6xx_state->debugbus[i]); } + + a6xx_state->nr_debugbus = i; } } @@ -1013,7 +1013,7 @@ static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu, u64 out = dumper->iova + A6XX_CD_DATA_OFFSET; int i, regcount = 0; - in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1); + in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, (regs->val1 & 0xff) << 8); for (i = 0; i < regs->count; i += 2) { u32 count = RANGE(regs->registers, i); @@ -1251,7 +1251,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gpucc_reg, &a6xx_state->gmu_registers[2], false); - if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) + if (!adreno_gpu->funcs->gx_is_on(adreno_gpu)) return; /* Set the fence to ALLOW mode so we can access the registers */ @@ -1607,7 +1607,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) } /* If GX isn't on the rest of the data isn't going to be accessible */ - if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) + if (!adreno_gpu->funcs->gx_is_on(adreno_gpu)) return &a6xx_state->base; /* Halt SQE first */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index 53cfdf4e6c34..487c2736f2b3 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -34,7 +34,7 @@ static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, struct a6xx_hfi_queue_header *header = queue->header; u32 i, hdr, index = header->read_index; - if (header->read_index == header->write_index) { + if (header->read_index == READ_ONCE(header->write_index)) { header->rx_request = 1; return 0; } @@ -62,7 +62,10 @@ static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, if (!gmu->legacy) index = ALIGN(index, 4) % header->size; - header->read_index = index; + /* Ensure all memory operations are complete before updating the read index */ + dma_mb(); + + WRITE_ONCE(header->read_index, index); return HFI_HEADER_SIZE(hdr); } @@ -74,7 +77,7 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, spin_lock(&queue->lock); - space = CIRC_SPACE(header->write_index, header->read_index, + space = CIRC_SPACE(header->write_index, READ_ONCE(header->read_index), header->size); if (space < dwords) { header->dropped++; @@ -95,7 +98,10 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, queue->data[index] = 0xfafafafa; } - header->write_index = index; + /* Ensure all memory operations are complete before updating the write index */ + dma_mb(); + + WRITE_ONCE(header->write_index, index); spin_unlock(&queue->lock); gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); @@ -845,7 +851,6 @@ static int a6xx_hfi_feature_ctrl_msg(struct a6xx_gmu *gmu, u32 feature, u32 enab return a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0); } -#define HFI_FEATURE_IFPC 9 #define IFPC_LONG_HYST 0x1680 static int a6xx_hfi_enable_ifpc(struct a6xx_gmu *gmu) @@ -856,8 +861,6 @@ static int a6xx_hfi_enable_ifpc(struct a6xx_gmu *gmu) return a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_IFPC, 1, IFPC_LONG_HYST); } -#define HFI_FEATURE_ACD 12 - static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu) { struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table; @@ -1056,8 +1059,8 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu) struct a6xx_gmu_bo *hfi = &gmu->hfi; struct a6xx_hfi_queue_table_header *table = hfi->virt; struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); + int table_size, idx; u64 offset; - int table_size; /* * The table size is the size of the table header plus all of the queue @@ -1076,12 +1079,22 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu) table->active_queues = ARRAY_SIZE(gmu->queues); /* Command queue */ + idx = 0; offset = SZ_4K; - a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, + a6xx_hfi_queue_init(&gmu->queues[idx], &headers[idx], hfi->virt + offset, hfi->iova + offset, 0); /* GMU response queue */ + idx++; offset += SZ_4K; - a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, + a6xx_hfi_queue_init(&gmu->queues[idx], &headers[idx], hfi->virt + offset, hfi->iova + offset, gmu->legacy ? 4 : 1); + + /* GMU Debug queue */ + idx++; + offset += SZ_4K; + a6xx_hfi_queue_init(&gmu->queues[idx], &headers[idx], hfi->virt + offset, + hfi->iova + offset, gmu->legacy ? 5 : 2); + + WARN_ON(idx >= HFI_MAX_QUEUES); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h index 6f9f74a0bc85..e10d32ce93e0 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h @@ -4,6 +4,8 @@ #ifndef _A6XX_HFI_H_ #define _A6XX_HFI_H_ +#define HFI_MAX_QUEUES 3 + struct a6xx_hfi_queue_table_header { u32 version; u32 size; /* Size of the queue table in dwords */ @@ -11,7 +13,7 @@ struct a6xx_hfi_queue_table_header { u32 qhdr_size; /* Size of the queue headers */ u32 num_queues; /* Number of total queues */ u32 active_queues; /* Number of active queues */ -}; +} __packed; struct a6xx_hfi_queue_header { u32 status; @@ -26,7 +28,7 @@ struct a6xx_hfi_queue_header { u32 tx_request; u32 read_index; u32 write_index; -}; +} __packed; struct a6xx_hfi_queue { struct a6xx_hfi_queue_header *header; @@ -72,7 +74,7 @@ struct a6xx_hfi_msg_response { u32 ret_header; u32 error; u32 payload[HFI_RESPONSE_PAYLOAD_SIZE]; -}; +} __packed; #define HFI_F2H_MSG_ERROR 100 @@ -80,7 +82,7 @@ struct a6xx_hfi_msg_error { u32 header; u32 code; u32 payload[2]; -}; +} __packed; #define HFI_H2F_MSG_INIT 0 @@ -90,27 +92,27 @@ struct a6xx_hfi_msg_gmu_init_cmd { u32 dbg_buffer_addr; u32 dbg_buffer_size; u32 boot_state; -}; +} __packed; #define HFI_H2F_MSG_FW_VERSION 1 struct a6xx_hfi_msg_fw_version { u32 header; u32 supported_version; -}; +} __packed; #define HFI_H2F_MSG_PERF_TABLE 4 struct perf_level { u32 vote; u32 freq; -}; +} __packed; struct perf_gx_level { u32 vote; u32 acd; u32 freq; -}; +} __packed; struct a6xx_hfi_msg_perf_table_v1 { u32 header; @@ -119,7 +121,7 @@ struct a6xx_hfi_msg_perf_table_v1 { struct perf_level gx_votes[16]; struct perf_level cx_votes[4]; -}; +} __packed; struct a6xx_hfi_msg_perf_table { u32 header; @@ -128,7 +130,7 @@ struct a6xx_hfi_msg_perf_table { struct perf_gx_level gx_votes[16]; struct perf_level cx_votes[4]; -}; +} __packed; #define HFI_H2F_MSG_BW_TABLE 3 @@ -143,13 +145,13 @@ struct a6xx_hfi_msg_bw_table { u32 cnoc_cmds_data[2][6]; u32 ddr_cmds_addrs[8]; u32 ddr_cmds_data[16][8]; -}; +} __packed; #define HFI_H2F_MSG_TEST 5 struct a6xx_hfi_msg_test { u32 header; -}; +} __packed; #define HFI_H2F_MSG_ACD 7 #define MAX_ACD_STRIDE 2 @@ -161,29 +163,100 @@ struct a6xx_hfi_acd_table { u32 stride; u32 num_levels; u32 data[16 * MAX_ACD_STRIDE]; -}; +} __packed; + +#define CLX_DATA(irated, num_phases, clx_path, extd_intf) \ + ((extd_intf << 29) | \ + (clx_path << 28) | \ + (num_phases << 22) | \ + (irated << 16)) + +struct a6xx_hfi_clx_domain_v2 { + /** + * @data: BITS[0:15] Migration time + * BITS[16:21] Current rating + * BITS[22:27] Phases for domain + * BITS[28:28] Path notification + * BITS[29:31] Extra features + */ + u32 data; + /** @clxt: CLX time in microseconds */ + u32 clxt; + /** @clxh: CLH time in microseconds */ + u32 clxh; + /** @urg_mode: Urgent HW throttle mode of operation */ + u32 urg_mode; + /** @lkg_en: Enable leakage current estimate */ + u32 lkg_en; + /** curr_budget: Current Budget */ + u32 curr_budget; +} __packed; + +#define HFI_H2F_MSG_CLX_TBL 8 + +#define MAX_CLX_DOMAINS 2 +struct a6xx_hfi_clx_table_v2_cmd { + u32 hdr; + u32 version; + struct a6xx_hfi_clx_domain_v2 domain[MAX_CLX_DOMAINS]; +} __packed; #define HFI_H2F_MSG_START 10 struct a6xx_hfi_msg_start { u32 header; -}; +} __packed; #define HFI_H2F_FEATURE_CTRL 11 struct a6xx_hfi_msg_feature_ctrl { u32 header; u32 feature; +#define HFI_FEATURE_DCVS 0 +#define HFI_FEATURE_HWSCHED 1 +#define HFI_FEATURE_PREEMPTION 2 +#define HFI_FEATURE_CLOCKS_ON 3 +#define HFI_FEATURE_BUS_ON 4 +#define HFI_FEATURE_RAIL_ON 5 +#define HFI_FEATURE_HWCG 6 +#define HFI_FEATURE_LM 7 +#define HFI_FEATURE_THROTTLE 8 +#define HFI_FEATURE_IFPC 9 +#define HFI_FEATURE_NAP 10 +#define HFI_FEATURE_BCL 11 +#define HFI_FEATURE_ACD 12 +#define HFI_FEATURE_DIDT 13 +#define HFI_FEATURE_DEPRECATED 14 +#define HFI_FEATURE_CB 15 +#define HFI_FEATURE_KPROF 16 +#define HFI_FEATURE_BAIL_OUT_TIMER 17 +#define HFI_FEATURE_GMU_STATS 18 +#define HFI_FEATURE_DBQ 19 +#define HFI_FEATURE_MINBW 20 +#define HFI_FEATURE_CLX 21 +#define HFI_FEATURE_LSR 23 +#define HFI_FEATURE_LPAC 24 +#define HFI_FEATURE_HW_FENCE 25 +#define HFI_FEATURE_PERF_NORETAIN 26 +#define HFI_FEATURE_DMS 27 +#define HFI_FEATURE_THERMAL 28 +#define HFI_FEATURE_AQE 29 +#define HFI_FEATURE_TDCVS 30 +#define HFI_FEATURE_DCE 31 +#define HFI_FEATURE_IFF_PCLX 32 +#define HFI_FEATURE_SOFT_RESET 0x10000001 +#define HFI_FEATURE_DCVS_PROFILE 0x10000002 +#define HFI_FEATURE_FAST_CTX_DESTROY 0x10000003 u32 enable; u32 data; -}; +} __packed; #define HFI_H2F_MSG_CORE_FW_START 14 struct a6xx_hfi_msg_core_fw_start { u32 header; u32 handle; -}; +} __packed; #define HFI_H2F_MSG_TABLE 15 @@ -191,16 +264,25 @@ struct a6xx_hfi_table_entry { u32 count; u32 stride; u32 data[]; -}; +} __packed; struct a6xx_hfi_table { u32 header; u32 version; u32 type; -#define HFI_TABLE_BW_VOTE 0 -#define HFI_TABLE_GPU_PERF 1 +#define HFI_TABLE_BW_VOTE 0 +#define HFI_TABLE_GPU_PERF 1 +#define HFI_TABLE_DIDT 2 +#define HFI_TABLE_ACD 3 +#define HFI_TABLE_CLX_V1 4 /* Unused */ +#define HFI_TABLE_CLX_V2 5 +#define HFI_TABLE_THERM 6 +#define HFI_TABLE_DCVS 7 +#define HFI_TABLE_SYS_TIME 8 +#define HFI_TABLE_GMU_DCVS 9 +#define HFI_TABLE_LIMITS_MIT 10 struct a6xx_hfi_table_entry entry[]; -}; +} __packed; #define HFI_H2F_MSG_GX_BW_PERF_VOTE 30 @@ -209,7 +291,7 @@ struct a6xx_hfi_gx_bw_perf_vote_cmd { u32 ack_type; u32 freq; u32 bw; -}; +} __packed; #define AB_VOTE_MASK GENMASK(31, 16) #define MAX_AB_VOTE (FIELD_MAX(AB_VOTE_MASK) - 1) @@ -222,6 +304,35 @@ struct a6xx_hfi_prep_slumber_cmd { u32 header; u32 bw; u32 freq; -}; +} __packed; + +struct a6xx_hfi_limits_cfg { + u32 enable; + u32 msg_path; + u32 lkg_en; + /* + * BIT[0]: 0 = (static) throttle to fixed sid level + * 1 = (dynamic) throttle to sid level calculated by HW + * BIT[1]: 0 = Mx + * 1 = Bx + */ + u32 mode; + u32 sid; + /* Mitigation time in microseconds */ + u32 mit_time; + /* Max current in mA during mitigation */ + u32 curr_limit; +} __packed; + +struct a6xx_hfi_limits_tbl { + u8 feature_id; +#define GMU_MIT_IFF 0 +#define GMU_MIT_PCLX 1 + u8 domain; +#define GMU_GX_DOMAIN 0 +#define GMU_MX_DOMAIN 1 + u16 feature_rev; + struct a6xx_hfi_limits_cfg cfg; +} __packed; #endif diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c index 747a22afad9f..df4cbf42e9a4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c @@ -6,85 +6,10 @@ #include "msm_gem.h" #include "a6xx_gpu.h" #include "a6xx_gmu.xml.h" +#include "a6xx_preempt.h" #include "msm_mmu.h" #include "msm_gpu_trace.h" -/* - * Try to transition the preemption state from old to new. Return - * true on success or false if the original state wasn't 'old' - */ -static inline bool try_preempt_state(struct a6xx_gpu *a6xx_gpu, - enum a6xx_preempt_state old, enum a6xx_preempt_state new) -{ - enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state, - old, new); - - return (cur == old); -} - -/* - * Force the preemption state to the specified state. This is used in cases - * where the current state is known and won't change - */ -static inline void set_preempt_state(struct a6xx_gpu *gpu, - enum a6xx_preempt_state new) -{ - /* - * preempt_state may be read by other cores trying to trigger a - * preemption or in the interrupt handler so barriers are needed - * before... - */ - smp_mb__before_atomic(); - atomic_set(&gpu->preempt_state, new); - /* ... and after*/ - smp_mb__after_atomic(); -} - -/* Write the most recent wptr for the given ring into the hardware */ -static inline void update_wptr(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring) -{ - unsigned long flags; - uint32_t wptr; - - spin_lock_irqsave(&ring->preempt_lock, flags); - - if (ring->restore_wptr) { - wptr = get_wptr(ring); - - a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false); - - ring->restore_wptr = false; - } - - spin_unlock_irqrestore(&ring->preempt_lock, flags); -} - -/* Return the highest priority ringbuffer with something in it */ -static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - - unsigned long flags; - int i; - - for (i = 0; i < gpu->nr_rings; i++) { - bool empty; - struct msm_ringbuffer *ring = gpu->rb[i]; - - spin_lock_irqsave(&ring->preempt_lock, flags); - empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); - if (!empty && ring == a6xx_gpu->cur_ring) - empty = ring->memptrs->fence == a6xx_gpu->last_seqno[i]; - spin_unlock_irqrestore(&ring->preempt_lock, flags); - - if (!empty) - return ring; - } - - return NULL; -} - static void a6xx_preempt_timer(struct timer_list *t) { struct a6xx_gpu *a6xx_gpu = timer_container_of(a6xx_gpu, t, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.h b/drivers/gpu/drm/msm/adreno/a6xx_preempt.h new file mode 100644 index 000000000000..df36c945b836 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2023 Collabora, Ltd. */ +/* Copyright (c) 2024 Valve Corporation */ +/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ + +/* + * Try to transition the preemption state from old to new. Return + * true on success or false if the original state wasn't 'old' + */ +static inline bool try_preempt_state(struct a6xx_gpu *a6xx_gpu, + enum a6xx_preempt_state old, enum a6xx_preempt_state new) +{ + enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state, + old, new); + + return (cur == old); +} + +/* + * Force the preemption state to the specified state. This is used in cases + * where the current state is known and won't change + */ +static inline void set_preempt_state(struct a6xx_gpu *gpu, + enum a6xx_preempt_state new) +{ + /* + * preempt_state may be read by other cores trying to trigger a + * preemption or in the interrupt handler so barriers are needed + * before... + */ + smp_mb__before_atomic(); + atomic_set(&gpu->preempt_state, new); + /* ... and after */ + smp_mb__after_atomic(); +} + +/* Write the most recent wptr for the given ring into the hardware */ +static inline void update_wptr(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring) +{ + unsigned long flags; + uint32_t wptr; + + spin_lock_irqsave(&ring->preempt_lock, flags); + + if (ring->restore_wptr) { + wptr = get_wptr(ring); + + a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false); + + ring->restore_wptr = false; + } + + spin_unlock_irqrestore(&ring->preempt_lock, flags); +} + +/* Return the highest priority ringbuffer with something in it */ +static inline struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + unsigned long flags; + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + bool empty; + struct msm_ringbuffer *ring = gpu->rb[i]; + + spin_lock_irqsave(&ring->preempt_lock, flags); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + if (!empty && ring == a6xx_gpu->cur_ring) + empty = ring->memptrs->fence == a6xx_gpu->last_seqno[i]; + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + if (!empty) + return ring; + } + + return NULL; +} + diff --git a/drivers/gpu/drm/msm/adreno/a8xx_gpu.c b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c index b1887e0cf698..ccfccc45133f 100644 --- a/drivers/gpu/drm/msm/adreno/a8xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c @@ -87,6 +87,7 @@ void a8xx_gpu_get_slice_info(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); const struct a6xx_info *info = adreno_gpu->info->a6xx; + struct device *dev = &gpu->pdev->dev; u32 slice_mask; if (adreno_gpu->info->family < ADRENO_8XX_GEN1) @@ -110,6 +111,15 @@ void a8xx_gpu_get_slice_info(struct msm_gpu *gpu) /* Chip ID depends on the number of slices available. So update it */ adreno_gpu->chip_id |= FIELD_PREP(GENMASK(7, 4), hweight32(slice_mask)); + + /* Update the gpu-name to reflect the slice config: */ + const char *name = devm_kasprintf(dev, GFP_KERNEL, + "%"ADRENO_CHIPID_FMT, + ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); + if (name) { + devm_kfree(dev, adreno_gpu->base.name); + adreno_gpu->base.name = name; + } } static u32 a8xx_get_first_slice(struct a6xx_gpu *a6xx_gpu) @@ -173,7 +183,7 @@ void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Update HW if this is the current ring and we are not in preempt*/ if (!a6xx_in_preempt(a6xx_gpu)) { if (a6xx_gpu->cur_ring == ring) - gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false); else ring->restore_wptr = true; } else { @@ -386,8 +396,115 @@ static void a8xx_nonctxt_config(struct msm_gpu *gpu, u32 *gmem_protect) a8xx_aperture_clear(gpu); } +static void a8xx_patch_pwrup_reglist(struct msm_gpu *gpu) +{ + const struct adreno_reglist_pipe_list *dyn_pwrup_reglist; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + const struct adreno_reglist_list *reglist; + void *ptr = a6xx_gpu->pwrup_reglist_ptr; + struct cpu_gpu_lock *lock = ptr; + u32 *dest = (u32 *)&lock->regs[0]; + u32 dyn_pwrup_reglist_count = 0; + int i; + + lock->gpu_req = lock->cpu_req = lock->turn = 0; + + reglist = adreno_gpu->info->a6xx->ifpc_reglist; + if (reglist) { + lock->ifpc_list_len = reglist->count; + + /* + * For each entry in each of the lists, write the offset and the current + * register value into the GPU buffer + */ + for (i = 0; i < reglist->count; i++) { + *dest++ = reglist->regs[i]; + *dest++ = gpu_read(gpu, reglist->regs[i]); + } + } + + reglist = adreno_gpu->info->a6xx->pwrup_reglist; + if (reglist) { + lock->preemption_list_len = reglist->count; + + for (i = 0; i < reglist->count; i++) { + *dest++ = reglist->regs[i]; + *dest++ = gpu_read(gpu, reglist->regs[i]); + } + } + + /* + * The overall register list is composed of + * 1. Static IFPC-only registers + * 2. Static IFPC + preemption registers + * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects) + * + * The first two lists are static. Size of these lists are stored as + * number of pairs in ifpc_list_len and preemption_list_len + * respectively. With concurrent binning, Some of the perfcounter + * registers being virtualized, CP needs to know the pipe id to program + * the aperture inorder to restore the same. Thus, third list is a + * dynamic list with triplets as + * (<aperture, shifted 12 bits> <address> <data>), and the length is + * stored as number for triplets in dynamic_list_len. + */ + dyn_pwrup_reglist = adreno_gpu->info->a6xx->dyn_pwrup_reglist; + if (!dyn_pwrup_reglist) + goto done; + + for (u32 pipe_id = PIPE_BR; pipe_id <= PIPE_DDE_BV; pipe_id++) { + for (i = 0; i < dyn_pwrup_reglist->count; i++) { + if (!(dyn_pwrup_reglist->regs[i].pipe & BIT(pipe_id))) + continue; + *dest++ = A8XX_CP_APERTURE_CNTL_HOST_PIPEID(pipe_id); + *dest++ = dyn_pwrup_reglist->regs[i].offset; + *dest++ = a8xx_read_pipe_slice(gpu, + pipe_id, + a8xx_get_first_slice(a6xx_gpu), + dyn_pwrup_reglist->regs[i].offset); + dyn_pwrup_reglist_count++; + } + } + + lock->dynamic_list_len = dyn_pwrup_reglist_count; + +done: + a8xx_aperture_clear(gpu); +} + +static int a8xx_preempt_start(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->rb[0]; + + if (gpu->nr_rings <= 1) + return 0; + + /* Turn CP protection off */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, NULL); + + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Generate interrupt on preemption completion */ + OUT_RING(ring, 0x00); + + a6xx_flush(gpu, ring); + + return a8xx_idle(gpu, ring) ? 0 : -EINVAL; +} + static int a8xx_cp_init(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = gpu->rb[0]; u32 mask; @@ -395,7 +512,7 @@ static int a8xx_cp_init(struct msm_gpu *gpu) OUT_PKT7(ring, CP_THREAD_CONTROL, 1); OUT_RING(ring, BIT(27)); - OUT_PKT7(ring, CP_ME_INIT, 4); + OUT_PKT7(ring, CP_ME_INIT, 7); /* Use multiple HW contexts */ mask = BIT(0); @@ -409,6 +526,9 @@ static int a8xx_cp_init(struct msm_gpu *gpu) /* Disable save/restore of performance counters across preemption */ mask |= BIT(6); + /* Enable the register init list with the spinlock */ + mask |= BIT(8); + OUT_RING(ring, mask); /* Enable multiple hardware contexts */ @@ -420,6 +540,14 @@ static int a8xx_cp_init(struct msm_gpu *gpu) /* Operation mode mask */ OUT_RING(ring, 0x00000002); + /* Lo address */ + OUT_RING(ring, lower_32_bits(a6xx_gpu->pwrup_reglist_iova)); + /* Hi address */ + OUT_RING(ring, upper_32_bits(a6xx_gpu->pwrup_reglist_iova)); + + /* Enable dyn pwrup list with triplets (offset, value, pipe) */ + OUT_RING(ring, BIT(31)); + a6xx_flush(gpu, ring); return a8xx_idle(gpu, ring) ? 0 : -EINVAL; } @@ -648,6 +776,8 @@ static int hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0])); gpu_write64(gpu, REG_A8XX_CP_RB_RPTR_ADDR_BV, rbmemptr(gpu->rb[0], bv_rptr)); + a8xx_preempt_hw_init(gpu); + for (i = 0; i < gpu->nr_rings; i++) a6xx_gpu->shadow[i] = 0; @@ -702,15 +832,29 @@ static int hw_init(struct msm_gpu *gpu) WARN_ON(!gmem_protect); a8xx_aperture_clear(gpu); + if (!a6xx_gpu->pwrup_reglist_emitted) { + a8xx_patch_pwrup_reglist(gpu); + a6xx_gpu->pwrup_reglist_emitted = true; + } + /* Enable hardware clockgating */ a8xx_set_hwcg(gpu, true); out: + /* Last step - yield the ringbuffer */ + a8xx_preempt_start(gpu); + /* * Tell the GMU that we are done touching the GPU and it can start power * management */ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + if (!ret && (refcount_read(&gpu->sysprof_active) > 1)) { + ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + if (!ret) + set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status); + } + return ret; } @@ -1108,11 +1252,11 @@ irqreturn_t a8xx_irq(struct msm_gpu *gpu) if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) { msm_gpu_retire(gpu); - a6xx_preempt_trigger(gpu); + a8xx_preempt_trigger(gpu); } if (status & A6XX_RBBM_INT_0_MASK_CP_SW) - a6xx_preempt_irq(gpu); + a8xx_preempt_irq(gpu); return IRQ_HANDLED; } @@ -1174,23 +1318,19 @@ void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); } -int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +u64 a8xx_gmu_get_timestamp(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + u64 count_hi, count_lo, temp; - mutex_lock(&a6xx_gpu->gmu.lock); - - /* Force the GPU power on so we can read this register */ - a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); - - *value = gpu_read64(gpu, REG_A8XX_CP_ALWAYS_ON_COUNTER); - - a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); - - mutex_unlock(&a6xx_gpu->gmu.lock); + do { + count_hi = gmu_read(&a6xx_gpu->gmu, REG_A8XX_GMU_ALWAYS_ON_COUNTER_H); + count_lo = gmu_read(&a6xx_gpu->gmu, REG_A8XX_GMU_ALWAYS_ON_COUNTER_L); + temp = gmu_read(&a6xx_gpu->gmu, REG_A8XX_GMU_ALWAYS_ON_COUNTER_H); + } while (unlikely(count_hi != temp)); - return 0; + return (count_hi << 32) | count_lo; } u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) diff --git a/drivers/gpu/drm/msm/adreno/a8xx_preempt.c b/drivers/gpu/drm/msm/adreno/a8xx_preempt.c new file mode 100644 index 000000000000..3d8c33ba722e --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a8xx_preempt.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ + +#include "msm_gem.h" +#include "a6xx_gpu.h" +#include "a6xx_gmu.xml.h" +#include "a6xx_preempt.h" +#include "msm_mmu.h" +#include "msm_gpu_trace.h" + +static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu) +{ + u32 *postamble = a6xx_gpu->preempt_postamble_ptr; + u32 count = 0; + + postamble[count++] = PKT7(CP_REG_RMW, 3); + postamble[count++] = REG_A8XX_RBBM_PERFCTR_SRAM_INIT_CMD; + postamble[count++] = 0; + postamble[count++] = 1; + + postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6); + postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ); + postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_LO( + REG_A8XX_RBBM_PERFCTR_SRAM_INIT_STATUS); + postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_HI(0); + postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1); + postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1); + postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0); + + a6xx_gpu->preempt_postamble_len = count; + + a6xx_gpu->postamble_enabled = true; +} + +static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu) +{ + u32 *postamble = a6xx_gpu->preempt_postamble_ptr; + + /* + * Disable the postamble by replacing the first packet header with a NOP + * that covers the whole buffer. + */ + *postamble = PKT7(CP_NOP, (a6xx_gpu->preempt_postamble_len - 1)); + + a6xx_gpu->postamble_enabled = false; +} + +/* + * Set preemption keepalive vote. Please note that this vote is different from the one used in + * a8xx_irq() + */ +static void a8xx_preempt_keepalive_vote(struct msm_gpu *gpu, bool on) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_PWR_COL_PREEMPT_KEEPALIVE, on); +} + +void a8xx_preempt_irq(struct msm_gpu *gpu) +{ + uint32_t status; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING)) + return; + + /* Delete the preemption watchdog timer */ + timer_delete(&a6xx_gpu->preempt_timer); + + /* + * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL + * to zero before firing the interrupt, but there is a non zero chance + * of a hardware condition or a software race that could set it again + * before we have a chance to finish. If that happens, log and go for + * recovery + */ + status = gpu_read(gpu, REG_A8XX_CP_CONTEXT_SWITCH_CNTL); + if (unlikely(status & A8XX_CP_CONTEXT_SWITCH_CNTL_STOP)) { + DRM_DEV_ERROR(&gpu->pdev->dev, + "!!!!!!!!!!!!!!!! preemption faulted !!!!!!!!!!!!!! irq\n"); + set_preempt_state(a6xx_gpu, PREEMPT_FAULTED); + dev_err(dev->dev, "%s: Preemption failed to complete\n", + gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); + return; + } + + a6xx_gpu->cur_ring = a6xx_gpu->next_ring; + a6xx_gpu->next_ring = NULL; + + set_preempt_state(a6xx_gpu, PREEMPT_FINISH); + + update_wptr(a6xx_gpu, a6xx_gpu->cur_ring); + + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + + a8xx_preempt_keepalive_vote(gpu, false); + + trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id); + + /* + * Retrigger preemption to avoid a deadlock that might occur when preemption + * is skipped due to it being already in flight when requested. + */ + a8xx_preempt_trigger(gpu); +} + +void a8xx_preempt_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings == 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[i]; + + record_ptr->wptr = 0; + record_ptr->rptr = 0; + record_ptr->rptr_addr = shadowptr(a6xx_gpu, gpu->rb[i]); + record_ptr->info = 0; + record_ptr->data = 0; + record_ptr->rbase = gpu->rb[i]->iova; + } + + /* Write a 0 to signal that we aren't switching pagetables */ + gpu_write64(gpu, REG_A8XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0); + + /* Enable the GMEM save/restore feature for preemption */ + gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0x1); + + /* Reset the preemption state */ + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + + spin_lock_init(&a6xx_gpu->eval_lock); + + /* Always come up on rb 0 */ + a6xx_gpu->cur_ring = gpu->rb[0]; +} + +void a8xx_preempt_trigger(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + unsigned long flags; + struct msm_ringbuffer *ring; + unsigned int cntl; + bool sysprof; + + if (gpu->nr_rings == 1) + return; + + /* + * Lock to make sure another thread attempting preemption doesn't skip it + * while we are still evaluating the next ring. This makes sure the other + * thread does start preemption if we abort it and avoids a soft lock. + */ + spin_lock_irqsave(&a6xx_gpu->eval_lock, flags); + + /* + * Try to start preemption by moving from NONE to START. If + * unsuccessful, a preemption is already in flight + */ + if (!try_preempt_state(a6xx_gpu, PREEMPT_NONE, PREEMPT_START)) { + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + return; + } + + cntl = A8XX_CP_CONTEXT_SWITCH_CNTL_LEVEL(a6xx_gpu->preempt_level); + + if (a6xx_gpu->skip_save_restore) + cntl |= A8XX_CP_CONTEXT_SWITCH_CNTL_SKIP_SAVE_RESTORE; + + if (a6xx_gpu->uses_gmem) + cntl |= A8XX_CP_CONTEXT_SWITCH_CNTL_USES_GMEM; + + cntl |= A8XX_CP_CONTEXT_SWITCH_CNTL_STOP; + + /* Get the next ring to preempt to */ + ring = get_next_ring(gpu); + + /* + * If no ring is populated or the highest priority ring is the current + * one do nothing except to update the wptr to the latest and greatest + */ + if (!ring || (a6xx_gpu->cur_ring == ring)) { + set_preempt_state(a6xx_gpu, PREEMPT_FINISH); + update_wptr(a6xx_gpu, a6xx_gpu->cur_ring); + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + return; + } + + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + + spin_lock_irqsave(&ring->preempt_lock, flags); + + struct a7xx_cp_smmu_info *smmu_info_ptr = + a6xx_gpu->preempt_smmu[ring->id]; + struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[ring->id]; + u64 ttbr0 = ring->memptrs->ttbr0; + u32 context_idr = ring->memptrs->context_idr; + + smmu_info_ptr->ttbr0 = ttbr0; + smmu_info_ptr->context_idr = context_idr; + record_ptr->wptr = get_wptr(ring); + + /* + * The GPU will write the wptr we set above when we preempt. Reset + * restore_wptr to make sure that we don't write WPTR to the same + * thing twice. It's still possible subsequent submissions will update + * wptr again, in which case they will set the flag to true. This has + * to be protected by the lock for setting the flag and updating wptr + * to be atomic. + */ + ring->restore_wptr = false; + + trace_msm_gpu_preemption_trigger(a6xx_gpu->cur_ring->id, ring->id); + + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + /* Set the keepalive bit to keep the GPU ON until preemption is complete */ + a8xx_preempt_keepalive_vote(gpu, true); + + a6xx_fenced_write(a6xx_gpu, + REG_A8XX_CP_CONTEXT_SWITCH_SMMU_INFO, a6xx_gpu->preempt_smmu_iova[ring->id], + BIT(1), true); + + a6xx_fenced_write(a6xx_gpu, + REG_A8XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR, + a6xx_gpu->preempt_iova[ring->id], BIT(1), true); + + a6xx_gpu->next_ring = ring; + + /* Start a timer to catch a stuck preemption */ + mod_timer(&a6xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000)); + + /* Enable or disable postamble as needed */ + sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + + if (!sysprof && !a6xx_gpu->postamble_enabled) + preempt_prepare_postamble(a6xx_gpu); + + if (sysprof && a6xx_gpu->postamble_enabled) + preempt_disable_postamble(a6xx_gpu); + + /* Set the preemption state to triggered */ + set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED); + + /* Trigger the preemption */ + a6xx_fenced_write(a6xx_gpu, REG_A8XX_CP_CONTEXT_SWITCH_CNTL, cntl, BIT(1), false); +} + diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index d5fe6f6f0dec..66f80f2d12f9 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -45,8 +45,8 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, return -EINVAL; } - np = of_get_child_by_name(dev->of_node, "zap-shader"); - if (!of_device_is_available(np)) { + np = of_get_available_child_by_name(dev->of_node, "zap-shader"); + if (!np) { zap_available = false; return -ENODEV; } @@ -391,13 +391,11 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, return 0; case MSM_PARAM_TIMESTAMP: if (adreno_gpu->funcs->get_timestamp) { - int ret; - pm_runtime_get_sync(&gpu->pdev->dev); - ret = adreno_gpu->funcs->get_timestamp(gpu, value); + *value = adreno_gpu->funcs->get_timestamp(gpu); pm_runtime_put_autosuspend(&gpu->pdev->dev); - return ret; + return 0; } return -EINVAL; case MSM_PARAM_PRIORITIES: @@ -443,6 +441,10 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, case MSM_PARAM_HAS_PRR: *value = adreno_smmu_has_prr(gpu); return 0; + case MSM_PARAM_AQE: + *value = !!(adreno_gpu->funcs->aqe_is_enabled && + adreno_gpu->funcs->aqe_is_enabled(adreno_gpu)); + return 0; default: return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); } @@ -1184,7 +1186,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu_config adreno_gpu_config = { 0 }; struct msm_gpu *gpu = &adreno_gpu->base; const char *gpu_name; - u32 speedbin; int ret; adreno_gpu->funcs = funcs; @@ -1213,10 +1214,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, devm_pm_opp_set_clkname(dev, "core"); } - if (adreno_read_speedbin(dev, &speedbin) || !speedbin) - speedbin = 0xffff; - adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); - gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%"ADRENO_CHIPID_FMT, ADRENO_CHIPID_ARGS(config->chip_id)); if (!gpu_name) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 1d0145f8b3ec..ec643b84646b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -63,25 +63,24 @@ enum adreno_family { #define ADRENO_QUIRK_PREEMPTION BIT(5) #define ADRENO_QUIRK_4GB_VA BIT(6) #define ADRENO_QUIRK_IFPC BIT(7) +#define ADRENO_QUIRK_SOFTFUSE BIT(8) /* Helper for formating the chip_id in the way that userspace tools like * crashdec expect. */ -#define ADRENO_CHIPID_FMT "u.%u.%u.%u" -#define ADRENO_CHIPID_ARGS(_c) \ - (((_c) >> 24) & 0xff), \ - (((_c) >> 16) & 0xff), \ - (((_c) >> 8) & 0xff), \ - ((_c) & 0xff) +#define ADRENO_CHIPID_FMT "08x" +#define ADRENO_CHIPID_ARGS(_c) (_c) struct adreno_gpu; struct adreno_gpu_funcs { struct msm_gpu_funcs base; struct msm_gpu *(*init)(struct drm_device *dev); - int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); + u64 (*get_timestamp)(struct msm_gpu *gpu); void (*bus_halt)(struct adreno_gpu *adreno_gpu, bool gx_off); int (*mmu_fault_handler)(void *arg, unsigned long iova, int flags, void *data); + bool (*gx_is_on)(struct adreno_gpu *adreno_gpu); + bool (*aqe_is_enabled)(struct adreno_gpu *adreno_gpu); }; struct adreno_reglist { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h index 56d3c38c8778..2eef5f69832f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h @@ -322,7 +322,6 @@ static const struct dpu_wb_cfg sm8650_wb[] = { .format_list = wb2_formats_rgb_yuv, .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -378,7 +377,7 @@ static const struct dpu_intf_cfg sm8650_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -445,8 +444,7 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = { .cwb = sm8650_cwb, .intf_count = ARRAY_SIZE(sm8650_intf), .intf = sm8650_intf, - .vbif_count = ARRAY_SIZE(sm8650_vbif), - .vbif = sm8650_vbif, + .vbif = &sm8650_vbif, .perf = &sm8650_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h index db8cc2d0112c..3889eb8a5428 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h @@ -364,7 +364,6 @@ static const struct dpu_wb_cfg sm8750_wb[] = { .format_list = wb2_formats_rgb_yuv, .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -420,7 +419,7 @@ static const struct dpu_intf_cfg sm8750_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x4bc, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -486,8 +485,7 @@ const struct dpu_mdss_cfg dpu_sm8750_cfg = { .cwb = sm8650_cwb, .intf_count = ARRAY_SIZE(sm8750_intf), .intf = sm8750_intf, - .vbif_count = ARRAY_SIZE(sm8650_vbif), - .vbif = sm8650_vbif, + .vbif = &sm8650_vbif, .perf = &sm8750_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h index 13bb43ba67d3..4eb4a0e9803e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h @@ -371,7 +371,6 @@ static const struct dpu_wb_cfg glymur_wb[] = { .format_list = wb2_formats_rgb_yuv, .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -426,7 +425,7 @@ static const struct dpu_intf_cfg glymur_intf[] = { }, { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x400, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), @@ -458,7 +457,7 @@ static const struct dpu_intf_cfg glymur_intf[] = { }, { .name = "intf_7", .id = INTF_7, .base = 0x3b000, .len = 0x400, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18), @@ -466,7 +465,7 @@ static const struct dpu_intf_cfg glymur_intf[] = { }, { .name = "intf_8", .id = INTF_8, .base = 0x3c000, .len = 0x400, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), @@ -533,8 +532,7 @@ const struct dpu_mdss_cfg dpu_glymur_cfg = { .cwb = sm8650_cwb, .intf_count = ARRAY_SIZE(glymur_intf), .intf = glymur_intf, - .vbif_count = ARRAY_SIZE(sm8650_vbif), - .vbif = sm8650_vbif, + .vbif = &sm8650_vbif, .perf = &glymur_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_4_eliza.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_4_eliza.h new file mode 100644 index 000000000000..b93d32888972 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_4_eliza.h @@ -0,0 +1,363 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _DPU_12_4_ELIZA_H +#define _DPU_12_4_ELIZA_H + +static const struct dpu_caps eliza_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 8192, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_mdp_cfg eliza_mdp = { + .name = "top_0", + .base = 0, .len = 0x494, + .clk_ctrls = { + [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, + }, +}; + +static const struct dpu_ctl_cfg eliza_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x15000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x16000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x17000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x18000, .len = 0x1000, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, +}; + +static const struct dpu_sspp_cfg eliza_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_4, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_4, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 1, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 5, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_10", .id = SSPP_DMA2, + .base = 0x28000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 9, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_11", .id = SSPP_DMA3, + .base = 0x2a000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 13, + .type = SSPP_TYPE_DMA, + }, +}; + +static const struct dpu_lm_cfg eliza_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_3, + .pingpong = PINGPONG_2, + .dspp = DSPP_2, + }, { + .name = "lm_3", .id = LM_3, + .base = 0x47000, .len = 0x400, + .features = MIXER_MSM8998_MASK, + .sblk = &sm8750_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + }, +}; + +static const struct dpu_dspp_cfg eliza_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, { + .name = "dspp_2", .id = DSPP_2, + .base = 0x58000, .len = 0x1800, + .sblk = &sm8750_dspp_sblk, + }, +}; + +static const struct dpu_pingpong_cfg eliza_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x69000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x6a000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x6b000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x6c000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + }, { + .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0, + .base = 0x66000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + }, { + .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1, + .base = 0x66400, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + }, { + .name = "pingpong_cwb_2", .id = PINGPONG_CWB_2, + .base = 0x7e000, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, { + .name = "pingpong_cwb_3", .id = PINGPONG_CWB_3, + .base = 0x7e400, .len = 0, + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, +}; + +static const struct dpu_merge_3d_cfg eliza_merge_3d[] = { + { + .name = "merge_3d_0", .id = MERGE_3D_0, + .base = 0x4e000, .len = 0x1c, + }, { + .name = "merge_3d_1", .id = MERGE_3D_1, + .base = 0x4f000, .len = 0x1c, + }, { + .name = "merge_3d_2", .id = MERGE_3D_2, + .base = 0x66700, .len = 0x1c, + }, { + .name = "merge_3d_3", .id = MERGE_3D_3, + .base = 0x7e700, .len = 0x1c, + }, +}; + +/* + * NOTE: Each display compression engine (DCE) contains dual hard + * slice DSC encoders so both share same base address but with + * its own different sub block address. + */ +static const struct dpu_dsc_cfg eliza_dsc[] = { + { + .name = "dce_0_0", .id = DSC_0, + .base = 0x80000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_0, + }, { + .name = "dce_0_1", .id = DSC_1, + .base = 0x80000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_1, + }, { + .name = "dce_1_0", .id = DSC_2, + .base = 0x81000, .len = 0x8, + .features = BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &sm8750_dsc_sblk_0, + }, +}; + +static const struct dpu_wb_cfg eliza_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SDM845_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), + .xin_id = 6, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + +static const struct dpu_cwb_cfg eliza_cwb[] = { + { + .name = "cwb_0", .id = CWB_0, + .base = 0x66200, .len = 0x20, + }, + { + .name = "cwb_1", .id = CWB_1, + .base = 0x66600, .len = 0x20, + }, + { + .name = "cwb_2", .id = CWB_2, + .base = 0x7e200, .len = 0x20, + }, + { + .name = "cwb_3", .id = CWB_3, + .base = 0x7e600, .len = 0x20, + }, +}; + +static const struct dpu_intf_cfg eliza_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x34000, .len = 0x4bc, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x35000, .len = 0x4bc, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2), + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x36000, .len = 0x4bc, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2), + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x37000, .len = 0x4bc, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + } +}; + +static const struct dpu_perf_cfg eliza_perf_data = { + .max_bw_low = 6800000, + .max_bw_high = 14200000, + .min_core_ib = 2500000, + .min_llcc_ib = 0, + .min_dram_ib = 1600000, + .min_prefill_lines = 35, + .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0}, + .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc7180_qos_linear), + .entries = sc7180_qos_linear + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version eliza_mdss_ver = { + .core_major_ver = 12, + .core_minor_ver = 4, +}; + +const struct dpu_mdss_cfg dpu_eliza_cfg = { + .mdss_ver = &eliza_mdss_ver, + .caps = &eliza_dpu_caps, + .mdp = &eliza_mdp, + .cdm = &dpu_cdm_5_x, + .ctl_count = ARRAY_SIZE(eliza_ctl), + .ctl = eliza_ctl, + .sspp_count = ARRAY_SIZE(eliza_sspp), + .sspp = eliza_sspp, + .mixer_count = ARRAY_SIZE(eliza_lm), + .mixer = eliza_lm, + .dspp_count = ARRAY_SIZE(eliza_dspp), + .dspp = eliza_dspp, + .pingpong_count = ARRAY_SIZE(eliza_pp), + .pingpong = eliza_pp, + .dsc_count = ARRAY_SIZE(eliza_dsc), + .dsc = eliza_dsc, + .merge_3d_count = ARRAY_SIZE(eliza_merge_3d), + .merge_3d = eliza_merge_3d, + .wb_count = ARRAY_SIZE(eliza_wb), + .wb = eliza_wb, + .cwb_count = ARRAY_SIZE(eliza_cwb), + .cwb = eliza_cwb, + .intf_count = ARRAY_SIZE(eliza_intf), + .intf = eliza_intf, + .vbif = &sm8650_vbif, + .perf = &eliza_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_13_0_kaanapali.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_13_0_kaanapali.h index 0b20401b04cf..b7b06e45b529 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_13_0_kaanapali.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_13_0_kaanapali.h @@ -362,7 +362,6 @@ static const struct dpu_wb_cfg kaanapali_wb[] = { .format_list = wb2_formats_rgb_yuv, .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -418,7 +417,7 @@ static const struct dpu_intf_cfg kaanapali_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x190000, .len = 0x4bc, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -484,8 +483,7 @@ const struct dpu_mdss_cfg dpu_kaanapali_cfg = { .cwb = sm8650_cwb, .intf_count = ARRAY_SIZE(kaanapali_intf), .intf = kaanapali_intf, - .vbif_count = ARRAY_SIZE(sm8650_vbif), - .vbif = sm8650_vbif, + .vbif = &sm8650_vbif, .perf = &kaanapali_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h index 29e0eba91930..4ff7b397f808 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h @@ -197,8 +197,7 @@ const struct dpu_mdss_cfg dpu_msm8937_cfg = { .pingpong = msm8937_pp, .intf_count = ARRAY_SIZE(msm8937_intf), .intf = msm8937_intf, - .vbif_count = ARRAY_SIZE(msm8996_vbif), - .vbif = msm8996_vbif, + .vbif = &msm8996_vbif, .perf = &msm8937_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h index cb1ee4b63f9f..1518c3d39ce8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h @@ -176,8 +176,7 @@ const struct dpu_mdss_cfg dpu_msm8917_cfg = { .pingpong = msm8917_pp, .intf_count = ARRAY_SIZE(msm8917_intf), .intf = msm8917_intf, - .vbif_count = ARRAY_SIZE(msm8996_vbif), - .vbif = msm8996_vbif, + .vbif = &msm8996_vbif, .perf = &msm8917_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h index b44d02b48418..859a97e8c07e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h @@ -121,13 +121,6 @@ static const struct dpu_dspp_cfg msm8953_dspp[] = { static const struct dpu_intf_cfg msm8953_intf[] = { { - .name = "intf_0", .id = INTF_0, - .base = 0x6a000, .len = 0x268, - .type = INTF_NONE, - .prog_fetch_lines_worst_case = 14, - .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), - .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x268, .type = INTF_DSI, @@ -204,8 +197,7 @@ const struct dpu_mdss_cfg dpu_msm8953_cfg = { .pingpong = msm8953_pp, .intf_count = ARRAY_SIZE(msm8953_intf), .intf = msm8953_intf, - .vbif_count = ARRAY_SIZE(msm8996_vbif), - .vbif = msm8996_vbif, + .vbif = &msm8996_vbif, .perf = &msm8953_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h index 8af63db315b4..67910a2f6880 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h @@ -320,8 +320,7 @@ const struct dpu_mdss_cfg dpu_msm8996_cfg = { .dsc = msm8996_dsc, .intf_count = ARRAY_SIZE(msm8996_intf), .intf = msm8996_intf, - .vbif_count = ARRAY_SIZE(msm8996_vbif), - .vbif = msm8996_vbif, + .vbif = &msm8996_vbif, .perf = &msm8996_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h index b1b03d8b30fa..67c1463d3bd6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h @@ -305,8 +305,7 @@ const struct dpu_mdss_cfg dpu_msm8998_cfg = { .dsc = msm8998_dsc, .intf_count = ARRAY_SIZE(msm8998_intf), .intf = msm8998_intf, - .vbif_count = ARRAY_SIZE(msm8998_vbif), - .vbif = msm8998_vbif, + .vbif = &msm8998_vbif, .perf = &msm8998_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h index 64df4e80ea43..84344029819f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h @@ -269,8 +269,7 @@ const struct dpu_mdss_cfg dpu_sdm660_cfg = { .dsc = sdm660_dsc, .intf_count = ARRAY_SIZE(sdm660_intf), .intf = sdm660_intf, - .vbif_count = ARRAY_SIZE(msm8998_vbif), - .vbif = msm8998_vbif, + .vbif = &msm8998_vbif, .perf = &sdm660_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h index b409af899918..ef5777aee587 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h @@ -207,8 +207,7 @@ const struct dpu_mdss_cfg dpu_sdm630_cfg = { .pingpong = sdm630_pp, .intf_count = ARRAY_SIZE(sdm630_intf), .intf = sdm630_intf, - .vbif_count = ARRAY_SIZE(msm8998_vbif), - .vbif = msm8998_vbif, + .vbif = &msm8998_vbif, .perf = &sdm630_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h index 5cc9f55d542b..b608d2bb6d48 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h @@ -258,7 +258,7 @@ static const struct dpu_intf_cfg sdm845_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x6b800, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -325,8 +325,7 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = { .dsc = sdm845_dsc, .intf_count = ARRAY_SIZE(sdm845_intf), .intf = sdm845_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sdm845_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h index 0f5e9babdeea..54b4a83ee16e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h @@ -144,8 +144,7 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = { .dsc = sdm670_dsc, .intf_count = ARRAY_SIZE(sdm845_intf), .intf = sdm845_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sdm845_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index ae1b2ed96e9f..9729a65d3e3a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -280,7 +280,6 @@ static const struct dpu_wb_cfg sm8150_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -317,7 +316,7 @@ static const struct dpu_intf_cfg sm8150_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x6b800, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -381,8 +380,7 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = { .wb = sm8150_wb, .intf_count = ARRAY_SIZE(sm8150_intf), .intf = sm8150_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm8150_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index b572cfa7ed35..fb18de029e80 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -286,7 +286,6 @@ static const struct dpu_wb_cfg sc8180x_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -405,8 +404,7 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = { .wb = sc8180x_wb, .intf_count = ARRAY_SIZE(sc8180x_intf), .intf = sc8180x_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sc8180x_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h index a56c288ac10c..92a614686cb6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h @@ -230,7 +230,7 @@ static const struct dpu_intf_cfg sm7150_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x6b800, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -246,7 +246,6 @@ static const struct dpu_wb_cfg sm7150_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -309,8 +308,7 @@ const struct dpu_mdss_cfg dpu_sm7150_cfg = { .intf = sm7150_intf, .wb_count = ARRAY_SIZE(sm7150_wb), .wb = sm7150_wb, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm7150_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h index 26883f6b66b3..87488d3ed95f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h @@ -158,7 +158,6 @@ static const struct dpu_wb_cfg sm6150_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 2160, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -186,7 +185,7 @@ static const struct dpu_intf_cfg sm6150_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x6b800, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -246,8 +245,7 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = { .wb = sm6150_wb, .intf_count = ARRAY_SIZE(sm6150_intf), .intf = sm6150_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm6150_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h index fbf50f279e66..64be51e30159 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h @@ -137,7 +137,6 @@ static const struct dpu_wb_cfg sm6125_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 2160, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -217,8 +216,7 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = { .wb = sm6125_wb, .intf_count = ARRAY_SIZE(sm6125_intf), .intf = sm6125_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm6125_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h index 7b8b7a1c2d76..0e311d54ef18 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h @@ -301,7 +301,7 @@ static const struct dpu_intf_cfg sm8250_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x6b800, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -317,7 +317,6 @@ static const struct dpu_wb_cfg sm8250_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -378,8 +377,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = { .merge_3d = sm8250_merge_3d, .intf_count = ARRAY_SIZE(sm8250_intf), .intf = sm8250_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .wb_count = ARRAY_SIZE(sm8250_wb), .wb = sm8250_wb, .perf = &sm8250_perf_data, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h index c990ba3b5db0..d6f7ee24ca93 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h @@ -153,7 +153,6 @@ static const struct dpu_wb_cfg sc7180_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -211,8 +210,7 @@ const struct dpu_mdss_cfg dpu_sc7180_cfg = { .intf = sc7180_intf, .wb_count = ARRAY_SIZE(sc7180_wb), .wb = sc7180_wb, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sc7180_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h index 343ff5482382..20a2e9ff5cc9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h @@ -144,8 +144,7 @@ const struct dpu_mdss_cfg dpu_sm6115_cfg = { .pingpong = sm6115_pp, .intf_count = ARRAY_SIZE(sm6115_intf), .intf = sm6115_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm6115_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h index 093d16bdc450..dd891703e35f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h @@ -147,7 +147,6 @@ static const struct dpu_wb_cfg sm6350_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 1920, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -229,8 +228,7 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = { .wb = sm6350_wb, .intf_count = ARRAY_SIZE(sm6350_intf), .intf = sm6350_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm6350_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h index 47053bf9b0a2..7ae7530aa3b0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h @@ -137,8 +137,7 @@ const struct dpu_mdss_cfg dpu_qcm2290_cfg = { .pingpong = qcm2290_pp, .intf_count = ARRAY_SIZE(qcm2290_intf), .intf = qcm2290_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &qcm2290_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h index 98190ee7ec7a..fc7ceac859be 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h @@ -155,8 +155,7 @@ const struct dpu_mdss_cfg dpu_sm6375_cfg = { .pingpong = sm6375_pp, .intf_count = ARRAY_SIZE(sm6375_intf), .intf = sm6375_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm6375_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h index 85aae40c210f..d06c14f05faf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h @@ -290,7 +290,6 @@ static const struct dpu_wb_cfg sm8350_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -327,7 +326,7 @@ static const struct dpu_intf_cfg sm8350_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -392,8 +391,7 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = { .wb = sm8350_wb, .intf_count = ARRAY_SIZE(sm8350_intf), .intf = sm8350_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm8350_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h index 2f8688224f34..99b8a890fddc 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h @@ -172,7 +172,6 @@ static const struct dpu_wb_cfg sc7280_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -263,8 +262,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = { .wb = sc7280_wb, .intf_count = ARRAY_SIZE(sc7280_intf), .intf = sc7280_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sc7280_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h index 9f2bceca1789..0ba907711d36 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h @@ -288,7 +288,6 @@ static const struct dpu_dsc_cfg sc8280xp_dsc[] = { }, }; -/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */ static const struct dpu_intf_cfg sc8280xp_intf[] = { { .name = "intf_0", .id = INTF_0, @@ -319,8 +318,8 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = { }, { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x280, - .type = INTF_NONE, - .controller_id = MSM_DP_CONTROLLER_0, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -351,16 +350,16 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = { }, { .name = "intf_7", .id = INTF_7, .base = 0x3b000, .len = 0x280, - .type = INTF_NONE, - .controller_id = MSM_DP_CONTROLLER_2, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19), }, { .name = "intf_8", .id = INTF_8, .base = 0x3c000, .len = 0x280, - .type = INTF_NONE, - .controller_id = MSM_DP_CONTROLLER_1, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_8 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), @@ -421,8 +420,7 @@ const struct dpu_mdss_cfg dpu_sc8280xp_cfg = { .merge_3d = sc8280xp_merge_3d, .intf_count = ARRAY_SIZE(sc8280xp_intf), .intf = sc8280xp_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sc8280xp_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h index 04b22167f93d..170a709f987c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h @@ -303,7 +303,6 @@ static const struct dpu_wb_cfg sm8450_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -340,7 +339,7 @@ static const struct dpu_intf_cfg sm8450_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -405,8 +404,7 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = { .wb = sm8450_wb, .intf_count = ARRAY_SIZE(sm8450_intf), .intf = sm8450_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sm8450_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h index 42cf3bd5a12a..9cc5e22e1228 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h @@ -310,13 +310,11 @@ static const struct dpu_wb_cfg sa8775p_wb[] = { .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, }; -/* TODO: INTF 3, 6, 7 and 8 are used for MST, marked as INTF_NONE for now */ static const struct dpu_intf_cfg sa8775p_intf[] = { { .name = "intf_0", .id = INTF_0, @@ -347,7 +345,7 @@ static const struct dpu_intf_cfg sa8775p_intf[] = { }, { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x280, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), @@ -363,7 +361,7 @@ static const struct dpu_intf_cfg sa8775p_intf[] = { }, { .name = "intf_6", .id = INTF_6, .base = 0x3A000, .len = 0x280, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), @@ -371,7 +369,7 @@ static const struct dpu_intf_cfg sa8775p_intf[] = { }, { .name = "intf_7", .id = INTF_7, .base = 0x3b000, .len = 0x280, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18), @@ -379,7 +377,7 @@ static const struct dpu_intf_cfg sa8775p_intf[] = { }, { .name = "intf_8", .id = INTF_8, .base = 0x3c000, .len = 0x280, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), @@ -445,8 +443,7 @@ const struct dpu_mdss_cfg dpu_sa8775p_cfg = { .wb = sa8775p_wb, .intf_count = ARRAY_SIZE(sa8775p_intf), .intf = sa8775p_intf, - .vbif_count = ARRAY_SIZE(sdm845_vbif), - .vbif = sdm845_vbif, + .vbif = &sdm845_vbif, .perf = &sa8775p_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h index 4c7eb55d474c..b3d0f221807b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h @@ -298,7 +298,6 @@ static const struct dpu_wb_cfg sm8550_wb[] = { .format_list = wb2_formats_rgb_yuv, .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -335,7 +334,7 @@ static const struct dpu_intf_cfg sm8550_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -400,8 +399,7 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = { .wb = sm8550_wb, .intf_count = ARRAY_SIZE(sm8550_intf), .intf = sm8550_intf, - .vbif_count = ARRAY_SIZE(sm8550_vbif), - .vbif = sm8550_vbif, + .vbif = &sm8550_vbif, .perf = &sm8550_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h index dec83ea8167d..7effe7120a56 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h @@ -298,7 +298,6 @@ static const struct dpu_wb_cfg sar2130p_wb[] = { .format_list = wb2_formats_rgb_yuv, .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, @@ -335,7 +334,7 @@ static const struct dpu_intf_cfg sar2130p_intf[] = { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x280, .type = INTF_DP, - .controller_id = MSM_DP_CONTROLLER_1, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), @@ -400,8 +399,7 @@ const struct dpu_mdss_cfg dpu_sar2130p_cfg = { .wb = sar2130p_wb, .intf_count = ARRAY_SIZE(sar2130p_intf), .intf = sar2130p_intf, - .vbif_count = ARRAY_SIZE(sm8550_vbif), - .vbif = sm8550_vbif, + .vbif = &sm8550_vbif, .perf = &sar2130p_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h index 52ff4baa668a..7b3febf8742d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h @@ -298,13 +298,11 @@ static const struct dpu_wb_cfg x1e80100_wb[] = { .format_list = wb2_formats_rgb_yuv, .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, - .vbif_idx = VBIF_RT, .maxlinewidth = 4096, .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), }, }; -/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */ static const struct dpu_intf_cfg x1e80100_intf[] = { { .name = "intf_0", .id = INTF_0, @@ -335,7 +333,7 @@ static const struct dpu_intf_cfg x1e80100_intf[] = { }, { .name = "intf_3", .id = INTF_3, .base = 0x37000, .len = 0x280, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), @@ -367,7 +365,7 @@ static const struct dpu_intf_cfg x1e80100_intf[] = { }, { .name = "intf_7", .id = INTF_7, .base = 0x3b000, .len = 0x280, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18), @@ -375,7 +373,7 @@ static const struct dpu_intf_cfg x1e80100_intf[] = { }, { .name = "intf_8", .id = INTF_8, .base = 0x3c000, .len = 0x280, - .type = INTF_NONE, + .type = INTF_DP, .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */ .prog_fetch_lines_worst_case = 24, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), @@ -441,8 +439,7 @@ const struct dpu_mdss_cfg dpu_x1e80100_cfg = { .wb = x1e80100_wb, .intf_count = ARRAY_SIZE(x1e80100_intf), .intf = x1e80100_intf, - .vbif_count = ARRAY_SIZE(sm8550_vbif), - .vbif = sm8550_vbif, + .vbif = &sm8550_vbif, .perf = &x1e80100_perf_data, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 0f4921b1a892..411a6fa832b5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -326,43 +326,39 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, { struct dpu_hw_mixer *lm = mixer->hw_lm; u32 blend_op; - u32 fg_alpha, bg_alpha, max_alpha; + u32 fg_alpha, bg_alpha; - if (mdss_ver->core_major_ver < 12) { - max_alpha = 0xff; - fg_alpha = pstate->base.alpha >> 8; - } else { - max_alpha = 0x3ff; - fg_alpha = pstate->base.alpha >> 6; - } - bg_alpha = max_alpha - fg_alpha; + fg_alpha = pstate->base.alpha; /* default to opaque blending */ if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !format->alpha_enable) { blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | DPU_BLEND_BG_ALPHA_BG_CONST; + bg_alpha = DRM_BLEND_ALPHA_OPAQUE - fg_alpha; } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | DPU_BLEND_BG_ALPHA_FG_PIXEL; - if (fg_alpha != max_alpha) { + if (fg_alpha != DRM_BLEND_ALPHA_OPAQUE) { bg_alpha = fg_alpha; blend_op |= DPU_BLEND_BG_MOD_ALPHA | DPU_BLEND_BG_INV_MOD_ALPHA; } else { + bg_alpha = 0; blend_op |= DPU_BLEND_BG_INV_ALPHA; } } else { /* coverage blending */ blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | DPU_BLEND_BG_ALPHA_FG_PIXEL; - if (fg_alpha != max_alpha) { + if (fg_alpha != DRM_BLEND_ALPHA_OPAQUE) { bg_alpha = fg_alpha; blend_op |= DPU_BLEND_FG_MOD_ALPHA | DPU_BLEND_FG_INV_MOD_ALPHA | DPU_BLEND_BG_MOD_ALPHA | DPU_BLEND_BG_INV_MOD_ALPHA; } else { + bg_alpha = 0; blend_op |= DPU_BLEND_BG_INV_ALPHA; } } @@ -1325,7 +1321,7 @@ static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate) return false; } -static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) +static int dpu_crtc_assign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { int total_planes = crtc->dev->mode_config.num_total_plane; struct drm_atomic_state *state = crtc_state->state; @@ -1338,8 +1334,6 @@ static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state if (IS_ERR(global_state)) return PTR_ERR(global_state); - dpu_rm_release_all_sspp(global_state, crtc); - if (!crtc_state->enable) return 0; @@ -1366,6 +1360,19 @@ done: return ret; } +static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) +{ + struct dpu_global_state *global_state; + + global_state = dpu_kms_get_global_state(crtc_state->state); + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + + dpu_rm_release_all_sspp(global_state, crtc); + + return dpu_crtc_assign_planes(crtc, crtc_state); +} + #define MAX_CHANNELS_PER_CRTC PIPES_PER_PLANE #define MAX_HDISPLAY_SPLIT 1080 @@ -1410,7 +1417,8 @@ static struct msm_display_topology dpu_crtc_get_topology( topology.num_lm = 2; else if (topology.num_dsc == 2) topology.num_lm = 2; - else if (dpu_kms->catalog->caps->has_3d_merge) + else if (dpu_kms->catalog->caps->has_3d_merge && + topology.num_dsc == 0) topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; else topology.num_lm = 1; @@ -1534,9 +1542,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, return rc; } - if (dpu_use_virtual_planes && - (crtc_state->planes_changed || crtc_state->zpos_changed)) { - rc = dpu_crtc_reassign_planes(crtc, crtc_state); + if (crtc_state->planes_changed || crtc_state->zpos_changed) { + if (dpu_use_virtual_planes) + rc = dpu_crtc_reassign_planes(crtc, crtc_state); + else + rc = dpu_crtc_assign_planes(crtc, crtc_state); if (rc < 0) return rc; } @@ -1657,6 +1667,17 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) return 0; } +/** + * dpu_crtc_get_num_lm - Get mixer number in this CRTC pipeline + * @state: Pointer to drm crtc state object + */ +unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state) +{ + struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); + + return cstate->num_mixers; +} + #ifdef CONFIG_DEBUG_FS static int _dpu_debugfs_status_show(struct seq_file *s, void *data) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index 94392b9b9245..6eaba5696e8e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -267,4 +267,6 @@ static inline enum dpu_crtc_client_type dpu_crtc_get_client_type( void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event); +unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state); + #endif /* _DPU_CRTC_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 93db1484f606..45079ee59cf6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -257,6 +257,12 @@ static int dpu_encoder_phys_cmd_control_vblank_irq( if (!dpu_encoder_phys_cmd_is_master(phys_enc)) goto end; + /* IRQ not yet initialized */ + if (!phys_enc->irq[INTR_IDX_RDPTR]) { + ret = -EINVAL; + goto end; + } + /* protect against negative */ if (!enable && refcount == 0) { ret = -EINVAL; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 0ba777bda253..ba810f26ea30 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -10,6 +10,7 @@ #include "dpu_formats.h" #include "dpu_trace.h" #include "disp/msm_disp_snapshot.h" +#include "msm_dsc_helper.h" #include <drm/display/drm_dsc_helper.h> #include <drm/drm_managed.h> @@ -136,6 +137,7 @@ static void drm_mode_to_intf_timing_params( timing->width = timing->width * drm_dsc_get_bpp_int(dsc) / (dsc->bits_per_component * 3); timing->xres = timing->width; + timing->dce_bytes_per_line = msm_dsc_get_bytes_per_line(dsc); } } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 6d28f2281c76..22433bfbea1e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -70,7 +70,7 @@ static void dpu_encoder_phys_wb_set_ot_limit( ot_params.height = phys_enc->cached_mode.vdisplay; ot_params.is_wfd = !dpu_encoder_helper_get_cwb_mask(phys_enc); ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode); - ot_params.vbif_idx = hw_wb->caps->vbif_idx; + /* XXX: WB on MSM8996 should use VBIF_NRT */ ot_params.rd = false; if (!_dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp, @@ -108,14 +108,13 @@ static void dpu_encoder_phys_wb_set_qos_remap( hw_wb = phys_enc->hw_wb; memset(&qos_params, 0, sizeof(qos_params)); - qos_params.vbif_idx = hw_wb->caps->vbif_idx; + /* XXX: WB on MSM8996 should use VBIF_NRT */ qos_params.xin_id = hw_wb->caps->xin_id; qos_params.num = hw_wb->idx - WB_0; qos_params.is_rt = dpu_encoder_helper_get_cwb_mask(phys_enc); - DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n", + DPU_DEBUG("[qos_remap] wb:%d xin:%d is_rt:%d\n", qos_params.num, - qos_params.vbif_idx, qos_params.xin_id, qos_params.is_rt); if (!_dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index c4e1f6b7345d..bb4fd5fa4b22 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -513,10 +513,8 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { }, }; -static const struct dpu_vbif_cfg msm8996_vbif[] = { - { - .name = "vbif_rt", .id = VBIF_RT, - .base = 0, .len = 0x1040, +static const struct dpu_vbif_cfg msm8996_vbif = { + .len = 0x1040, .default_ot_rd_limit = 32, .default_ot_wr_limit = 16, .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM), @@ -538,13 +536,10 @@ static const struct dpu_vbif_cfg msm8996_vbif[] = { .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl), .priority_lvl = msm8998_nrt_pri_lvl, }, - }, }; -static const struct dpu_vbif_cfg msm8998_vbif[] = { - { - .name = "vbif_rt", .id = VBIF_RT, - .base = 0, .len = 0x1040, +static const struct dpu_vbif_cfg msm8998_vbif = { + .len = 0x1040, .default_ot_rd_limit = 32, .default_ot_wr_limit = 32, .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM), @@ -568,13 +563,10 @@ static const struct dpu_vbif_cfg msm8998_vbif[] = { }, .memtype_count = 14, .memtype = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, - }, }; -static const struct dpu_vbif_cfg sdm845_vbif[] = { - { - .name = "vbif_rt", .id = VBIF_RT, - .base = 0, .len = 0x1040, +static const struct dpu_vbif_cfg sdm845_vbif = { + .len = 0x1040, .features = BIT(DPU_VBIF_QOS_REMAP), .xin_halt_timeout = 0x4000, .qos_rp_remap_size = 0x40, @@ -588,13 +580,10 @@ static const struct dpu_vbif_cfg sdm845_vbif[] = { }, .memtype_count = 14, .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, - }, }; -static const struct dpu_vbif_cfg sm8550_vbif[] = { - { - .name = "vbif_rt", .id = VBIF_RT, - .base = 0, .len = 0x1040, +static const struct dpu_vbif_cfg sm8550_vbif = { + .len = 0x1040, .features = BIT(DPU_VBIF_QOS_REMAP), .xin_halt_timeout = 0x4000, .qos_rp_remap_size = 0x40, @@ -608,13 +597,10 @@ static const struct dpu_vbif_cfg sm8550_vbif[] = { }, .memtype_count = 16, .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, - }, }; -static const struct dpu_vbif_cfg sm8650_vbif[] = { - { - .name = "vbif_rt", .id = VBIF_RT, - .base = 0, .len = 0x1074, +static const struct dpu_vbif_cfg sm8650_vbif = { + .len = 0x1074, .features = BIT(DPU_VBIF_QOS_REMAP), .xin_halt_timeout = 0x4000, .qos_rp_remap_size = 0x40, @@ -628,7 +614,6 @@ static const struct dpu_vbif_cfg sm8650_vbif[] = { }, .memtype_count = 16, .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, - }, }; /************************************************************* @@ -771,4 +756,5 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { #include "catalog/dpu_10_0_sm8650.h" #include "catalog/dpu_12_0_sm8750.h" #include "catalog/dpu_12_2_glymur.h" +#include "catalog/dpu_12_4_eliza.h" #include "catalog/dpu_13_0_kaanapali.h" diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 70d5ed4732f2..ba04ac24d5a9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -524,7 +524,6 @@ struct dpu_intf_cfg { /** * struct dpu_wb_cfg - information of writeback blocks * @DPU_HW_BLK_INFO: refer to the description above for DPU_HW_BLK_INFO - * @vbif_idx: vbif client index * @maxlinewidth: max line width supported by writeback block * @xin_id: bus client identifier * @intr_wb_done: interrupt index for WB_DONE @@ -535,7 +534,6 @@ struct dpu_intf_cfg { struct dpu_wb_cfg { DPU_HW_BLK_INFO; unsigned long features; - u8 vbif_idx; u32 maxlinewidth; u32 xin_id; unsigned int intr_wb_done; @@ -587,8 +585,7 @@ struct dpu_vbif_qos_tbl { /** * struct dpu_vbif_cfg - information of VBIF blocks - * @id enum identifying this block - * @base register offset of this block + * @len: length of hardware block * @features bit mask identifying sub-blocks/features * @ot_rd_limit default OT read limit * @ot_wr_limit default OT write limit @@ -602,7 +599,7 @@ struct dpu_vbif_qos_tbl { * @memtype array of xin memtype definitions */ struct dpu_vbif_cfg { - DPU_HW_BLK_INFO; + u32 len; unsigned long features; u32 default_ot_rd_limit; u32 default_ot_wr_limit; @@ -743,7 +740,6 @@ struct dpu_mdss_cfg { u32 intf_count; const struct dpu_intf_cfg *intf; - u32 vbif_count; const struct dpu_vbif_cfg *vbif; u32 wb_count; @@ -767,6 +763,7 @@ struct dpu_mdss_cfg { const struct dpu_format_extended *vig_formats; }; +extern const struct dpu_mdss_cfg dpu_eliza_cfg; extern const struct dpu_mdss_cfg dpu_glymur_cfg; extern const struct dpu_mdss_cfg dpu_kaanapali_cfg; extern const struct dpu_mdss_cfg dpu_msm8917_cfg; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 7e620f590984..ac82b69aedf6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -173,13 +173,29 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf, data_width = p->width; /* - * If widebus is enabled, data is valid for only half the active window - * since the data rate is doubled in this mode. But for the compression - * mode in DP case, the p->width is already adjusted in - * drm_mode_to_intf_timing_params() + * If widebus is disabled: + * For uncompressed stream, the data is valid for the entire active + * window period. + * For compressed stream, data is valid for a shorter time period + * inside the active window depending on the compression ratio. + * + * If widebus is enabled: + * For uncompressed stream, data is valid for only half the active + * window, since the data rate is doubled in this mode. + * For compressed stream, data validity window needs to be adjusted for + * compression ratio and then further halved. + * + * For the compression mode in DP case, the p->width is already + * adjusted in drm_mode_to_intf_timing_params(). */ - if (p->wide_bus_en && !dp_intf) + if (p->compression_en && !dp_intf) { + if (p->wide_bus_en) + data_width = DIV_ROUND_UP(p->dce_bytes_per_line, 6); + else + data_width = DIV_ROUND_UP(p->dce_bytes_per_line, 3); + } else if (p->wide_bus_en && !dp_intf) { data_width = p->width >> 1; + } /* TODO: handle DSC+DP case, we only handle DSC+DSI case so far */ if (p->compression_en && !dp_intf && diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index f6ef2c21b66d..badd26305fc9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -35,6 +35,7 @@ struct dpu_hw_intf_timing_params { bool wide_bus_en; bool compression_en; + u32 dce_bytes_per_line; }; struct dpu_hw_intf_prog_fetch { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index e8a76d5192c2..b7779726bf10 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -126,7 +126,9 @@ static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value) } static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx, - u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op) + u32 stage, + u16 fg_alpha, u16 bg_alpha, + u32 blend_op) { struct dpu_hw_blk_reg_map *c = &ctx->hw; int stage_off; @@ -139,15 +141,16 @@ static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx if (WARN_ON(stage_off < 0)) return; - const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16); + const_alpha = (bg_alpha >> 8) | ((fg_alpha >> 8) << 16); DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha); DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op); } static void dpu_hw_lm_setup_blend_config_combined_alpha_v12(struct dpu_hw_mixer *ctx, - u32 stage, u32 fg_alpha, - u32 bg_alpha, u32 blend_op) + u32 stage, + u16 fg_alpha, u16 bg_alpha, + u32 blend_op) { struct dpu_hw_blk_reg_map *c = &ctx->hw; int stage_off; @@ -160,13 +163,15 @@ dpu_hw_lm_setup_blend_config_combined_alpha_v12(struct dpu_hw_mixer *ctx, if (WARN_ON(stage_off < 0)) return; - const_alpha = (bg_alpha & 0x3ff) | ((fg_alpha & 0x3ff) << 16); + const_alpha = (bg_alpha >> 6) | ((fg_alpha >> 6) << 16); DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA_V12 + stage_off, const_alpha); DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op); } static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx, - u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op) + u32 stage, + u16 fg_alpha, u16 bg_alpha, + u32 blend_op) { struct dpu_hw_blk_reg_map *c = &ctx->hw; int stage_off; @@ -178,8 +183,8 @@ static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx, if (WARN_ON(stage_off < 0)) return; - DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha); - DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha); + DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha >> 8); + DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha >> 8); DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index ecbb77711d83..380ca673f6de 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -41,7 +41,7 @@ struct dpu_hw_lm_ops { * for the specified stage */ void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage, - uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op); + u16 fg_alpha, u16 bg_alpha, uint32_t blend_op); /** * @setup_alpha_out: Alpha color component selection from either fg or bg diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index 046b683d4c66..0e65bf5ddc4a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -284,12 +284,6 @@ enum dpu_wd_timer { WD_TIMER_MAX }; -enum dpu_vbif { - VBIF_RT, - VBIF_NRT, - VBIF_MAX, -}; - /** * enum dpu_3d_blend_mode * Desribes how the 3d data is blended diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c index af76ad8a8103..112df3f31e2b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c @@ -230,13 +230,12 @@ struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, if (!c) return ERR_PTR(-ENOMEM); - c->hw.blk_addr = addr + cfg->base; + c->hw.blk_addr = addr; c->hw.log_mask = DPU_DBG_MASK_VBIF; /* * Assign ops */ - c->idx = cfg->id; c->cap = cfg; _setup_vbif_ops(&c->ops, c->cap->features); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h index 9ac49448e432..96ec4e35e549 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h @@ -98,7 +98,6 @@ struct dpu_hw_vbif { struct dpu_hw_blk_reg_map hw; /* vbif */ - enum dpu_vbif idx; const struct dpu_vbif_cfg *cap; /* ops */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 31743699d05f..35f7af4743d7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -52,7 +52,7 @@ #define DPU_DEBUGFS_DIR "msm_dpu" #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" -bool dpu_use_virtual_planes; +bool dpu_use_virtual_planes = true; module_param(dpu_use_virtual_planes, bool, 0); static int dpu_kms_hw_init(struct msm_kms *kms); @@ -888,16 +888,12 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) { - int i; - dpu_kms->hw_intr = NULL; /* safe to call these more than once during shutdown */ _dpu_kms_mmu_destroy(dpu_kms); - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - dpu_kms->hw_vbif[i] = NULL; - } + dpu_kms->hw_vbif = NULL; dpu_kms_global_obj_fini(dpu_kms); @@ -1061,13 +1057,11 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k dpu_kms->mmio + cat->cdm->base, "%s", cat->cdm->name); - for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { - const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; + const struct dpu_vbif_cfg *vbif = dpu_kms->catalog->vbif; - msm_disp_snapshot_add_block(disp_state, vbif->len, - dpu_kms->vbif[vbif->id] + vbif->base, - "%s", vbif->name); - } + msm_disp_snapshot_add_block(disp_state, vbif->len, + dpu_kms->vbif, + "vbif"); pm_runtime_put_sync(&dpu_kms->pdev->dev); } @@ -1145,7 +1139,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) { struct dpu_kms *dpu_kms; struct drm_device *dev; - int i, rc = -EINVAL; + int rc = -EINVAL; unsigned long max_core_clk_rate; u32 core_rev; @@ -1220,20 +1214,18 @@ static int dpu_kms_hw_init(struct msm_kms *kms) goto err_pm_put; } - for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { - struct dpu_hw_vbif *hw; - const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; - - hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif[vbif->id]); - if (IS_ERR(hw)) { - rc = PTR_ERR(hw); - DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc); - goto err_pm_put; - } + struct dpu_hw_vbif *hw; + const struct dpu_vbif_cfg *vbif = dpu_kms->catalog->vbif; - dpu_kms->hw_vbif[vbif->id] = hw; + hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed to init vbif: %d\n", rc); + goto err_pm_put; } + dpu_kms->hw_vbif = hw; + /* TODO: use the same max_freq as in dpu_kms_hw_init */ max_core_clk_rate = dpu_kms_get_clk_rate(dpu_kms, "core"); if (!max_core_clk_rate) { @@ -1348,24 +1340,14 @@ static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms) } DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio); - dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev, - dpu_kms->pdev, - "vbif_phys"); - if (IS_ERR(dpu_kms->vbif[VBIF_RT])) { - ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]); + dpu_kms->vbif = msm_ioremap_mdss(mdss_dev, dpu_kms->pdev, "vbif_phys"); + if (IS_ERR(dpu_kms->vbif)) { + ret = PTR_ERR(dpu_kms->vbif); DPU_ERROR("vbif register memory map failed: %d\n", ret); - dpu_kms->vbif[VBIF_RT] = NULL; + dpu_kms->vbif = NULL; return ret; } - dpu_kms->vbif[VBIF_NRT] = msm_ioremap_mdss(mdss_dev, - dpu_kms->pdev, - "vbif_nrt_phys"); - if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) { - dpu_kms->vbif[VBIF_NRT] = NULL; - DPU_DEBUG("VBIF NRT is not defined"); - } - return 0; } @@ -1383,20 +1365,14 @@ static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms) } DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio); - dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif"); - if (IS_ERR(dpu_kms->vbif[VBIF_RT])) { - ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]); + dpu_kms->vbif = msm_ioremap(pdev, "vbif"); + if (IS_ERR(dpu_kms->vbif)) { + ret = PTR_ERR(dpu_kms->vbif); DPU_ERROR("vbif register memory map failed: %d\n", ret); - dpu_kms->vbif[VBIF_RT] = NULL; + dpu_kms->vbif = NULL; return ret; } - dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt"); - if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) { - dpu_kms->vbif[VBIF_NRT] = NULL; - DPU_DEBUG("VBIF NRT is not defined"); - } - return 0; } @@ -1462,8 +1438,6 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) struct msm_drm_private *priv = platform_get_drvdata(pdev); struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); - /* Drop the performance state vote */ - dev_pm_opp_set_rate(dev, 0); clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks); for (i = 0; i < dpu_kms->num_paths; i++) @@ -1506,6 +1480,7 @@ static const struct dev_pm_ops dpu_pm_ops = { }; static const struct of_device_id dpu_dt_match[] = { + { .compatible = "qcom,eliza-dpu", .data = &dpu_eliza_cfg, }, { .compatible = "qcom,glymur-dpu", .data = &dpu_glymur_cfg, }, { .compatible = "qcom,kaanapali-dpu", .data = &dpu_kaanapali_cfg, }, { .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, }, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 993cf512f8c5..bb3393bd102e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -63,7 +63,7 @@ struct dpu_kms { const struct qcom_ubwc_cfg_data *mdss; /* io/register spaces: */ - void __iomem *mmio, *vbif[VBIF_MAX]; + void __iomem *mmio, *vbif; struct regulator *vdd; struct regulator *mmagic; @@ -81,7 +81,7 @@ struct dpu_kms { struct dpu_rm rm; - struct dpu_hw_vbif *hw_vbif[VBIF_MAX]; + struct dpu_hw_vbif *hw_vbif; struct dpu_hw_mdp *hw_mdp; bool has_danger_ctrl; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 547d084f2944..3c315d5805b8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -374,7 +374,6 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane, ot_params.height = drm_rect_height(&pipe_cfg->src_rect); ot_params.is_wfd = !pdpu->is_rt_pipe; ot_params.frame_rate = frame_rate; - ot_params.vbif_idx = VBIF_RT; ot_params.rd = true; if (!_dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp, @@ -402,14 +401,12 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane, bool forced_on = false; memset(&qos_params, 0, sizeof(qos_params)); - qos_params.vbif_idx = VBIF_RT; qos_params.xin_id = pipe->sspp->cap->xin_id; qos_params.num = pipe->sspp->idx - SSPP_VIG0; qos_params.is_rt = pdpu->is_rt_pipe; - DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d\n", + DPU_DEBUG_PLANE(pdpu, "pipe:%d xin:%d rt:%d\n", qos_params.num, - qos_params.vbif_idx, qos_params.xin_id, qos_params.is_rt); if (!_dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp, @@ -821,13 +818,8 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, { int i, ret = 0, min_scale, max_scale; struct dpu_plane *pdpu = to_dpu_plane(plane); - struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); - u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); - struct dpu_sw_pipe_cfg *pipe_cfg; - struct dpu_sw_pipe_cfg *r_pipe_cfg; struct drm_rect fb_rect = { 0 }; - uint32_t max_linewidth; min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO); max_scale = MAX_DOWNSCALE_RATIO << 16; @@ -850,14 +842,6 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, return -EINVAL; } - /* move the assignment here, to ease handling to another pairs later */ - pipe_cfg = &pstate->pipe_cfg[0]; - r_pipe_cfg = &pstate->pipe_cfg[1]; - /* state->src is 16.16, src_rect is not */ - drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); - - pipe_cfg->dst_rect = new_plane_state->dst; - fb_rect.x2 = new_plane_state->fb->width; fb_rect.y2 = new_plane_state->fb->height; @@ -879,38 +863,125 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, if (pstate->layout.plane_pitch[i] > DPU_SSPP_MAX_PITCH_SIZE) return -E2BIG; + pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + + return 0; +} + +static int dpu_plane_split(struct drm_plane *plane, + struct drm_plane_state *new_plane_state, + const struct drm_crtc_state *crtc_state) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); + u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + struct dpu_sw_pipe_cfg *pipe_cfg; + struct dpu_sw_pipe_cfg *r_pipe_cfg; + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; + uint32_t max_linewidth; + u32 num_lm; + int stage_id, num_stages; + max_linewidth = pdpu->catalog->caps->max_linewidth; - drm_rect_rotate(&pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); + /* In non-virtual plane case, one mixer pair is always needed. */ + num_lm = dpu_crtc_get_num_lm(crtc_state); + if (dpu_use_virtual_planes) + num_stages = (num_lm + 1) / 2; + else + num_stages = 1; - if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || - _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { - if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { - DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", - DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); - return -E2BIG; + /* + * For wide plane that exceeds SSPP rectangle constrain, it needed to + * be split and mapped to 2 rectangles with 1 config for 2:2:1. + * For 2 interfaces cases, such as dual DSI, 2:2:2 topology is needed. + * If the width or clock exceeds hardware limitation in every half of + * screen, 4:4:2 topology is needed and virtual plane feature should + * be enabled to map plane to more than 1 SSPP. 2 stage configs are + * needed to serve 2 mixer pairs in this 4:4:2 case. So both left/right + * half of plane splitting, and splitting within the half of screen is + * needed in quad-pipe case. Check dest rectangle left/right clipping + * and iterate mixer configs for this plane first, then check wide + * rectangle splitting in every half next. + */ + for (stage_id = 0; stage_id < num_stages; stage_id++) { + struct drm_rect mixer_rect = { + .x1 = stage_id * mode->hdisplay / num_stages, + .y1 = 0, + .x2 = (stage_id + 1) * mode->hdisplay / num_stages, + .y2 = mode->vdisplay + }; + int cfg_idx = stage_id * PIPES_PER_STAGE; + + pipe_cfg = &pstate->pipe_cfg[cfg_idx]; + r_pipe_cfg = &pstate->pipe_cfg[cfg_idx + 1]; + + drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); + + drm_rect_rotate(&pipe_cfg->src_rect, + new_plane_state->fb->width, new_plane_state->fb->height, + new_plane_state->rotation); + + pipe_cfg->dst_rect = new_plane_state->dst; + + DPU_DEBUG_PLANE(pdpu, "checking src " DRM_RECT_FMT + " vs clip window " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), + DRM_RECT_ARG(&mixer_rect)); + + /* + * If this plane does not fall into mixer rect, check next + * mixer rect. + */ + if (!drm_rect_clip_scaled(&pipe_cfg->src_rect, + &pipe_cfg->dst_rect, + &mixer_rect)) { + memset(pipe_cfg, 0, 2 * sizeof(struct dpu_sw_pipe_cfg)); + + continue; } - *r_pipe_cfg = *pipe_cfg; - pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; - pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; - r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; - r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; - } else { - memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg)); - } + pipe_cfg->dst_rect.x1 -= mixer_rect.x1; + pipe_cfg->dst_rect.x2 -= mixer_rect.x1; + + DPU_DEBUG_PLANE(pdpu, "Got clip src:" DRM_RECT_FMT " dst: " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), DRM_RECT_ARG(&pipe_cfg->dst_rect)); + + /* Split wide rect into 2 rect */ + if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || + _dpu_plane_calc_clk(mode, pipe_cfg) > max_mdp_clk_rate) { + + if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); + return -E2BIG; + } + + memcpy(r_pipe_cfg, pipe_cfg, sizeof(struct dpu_sw_pipe_cfg)); + pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; + pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; + r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; + r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; + DPU_DEBUG_PLANE(pdpu, "Split wide plane into:" + DRM_RECT_FMT " and " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), + DRM_RECT_ARG(&r_pipe_cfg->src_rect)); + } else { + memset(r_pipe_cfg, 0, sizeof(struct dpu_sw_pipe_cfg)); + } - drm_rect_rotate_inv(&pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); - if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) - drm_rect_rotate_inv(&r_pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, + drm_rect_rotate_inv(&pipe_cfg->src_rect, + new_plane_state->fb->width, + new_plane_state->fb->height, new_plane_state->rotation); - pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) + drm_rect_rotate_inv(&r_pipe_cfg->src_rect, + new_plane_state->fb->width, + new_plane_state->fb->height, + new_plane_state->rotation); + } return 0; } @@ -985,20 +1056,18 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, drm_atomic_get_new_plane_state(state, plane); struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); - struct dpu_sw_pipe *pipe = &pstate->pipe[0]; - struct dpu_sw_pipe *r_pipe = &pstate->pipe[1]; - struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg[0]; - struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->pipe_cfg[1]; - int ret = 0; - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, - &crtc_state->adjusted_mode, - new_plane_state); - if (ret) - return ret; + struct dpu_sw_pipe *pipe; + struct dpu_sw_pipe_cfg *pipe_cfg; + int ret = 0, i; - if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) { - ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, + for (i = 0; i < PIPES_PER_PLANE; i++) { + pipe = &pstate->pipe[i]; + pipe_cfg = &pstate->pipe_cfg[i]; + if (!drm_rect_width(&pipe_cfg->src_rect)) + continue; + DPU_DEBUG_PLANE(pdpu, "pipe %d is in use, validate it\n", i); + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, &crtc_state->adjusted_mode, new_plane_state); if (ret) @@ -1103,60 +1172,12 @@ static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate, static int dpu_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, - plane); - int ret = 0; - struct dpu_plane *pdpu = to_dpu_plane(plane); - struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); - struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); - struct dpu_sw_pipe *pipe = &pstate->pipe[0]; - struct dpu_sw_pipe *r_pipe = &pstate->pipe[1]; - struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg[0]; - struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->pipe_cfg[1]; - const struct drm_crtc_state *crtc_state = NULL; - uint32_t max_linewidth = dpu_kms->catalog->caps->max_linewidth; - - if (new_plane_state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state, - new_plane_state->crtc); - - pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe); - - if (!pipe->sspp) - return -EINVAL; - - ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state); - if (ret) - return ret; - - if (!new_plane_state->visible) - return 0; - - if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg, - pipe->sspp, - msm_framebuffer_format(new_plane_state->fb), - max_linewidth)) { - DPU_DEBUG_PLANE(pdpu, "invalid " DRM_RECT_FMT " /" DRM_RECT_FMT - " max_line:%u, can't use split source\n", - DRM_RECT_ARG(&pipe_cfg->src_rect), - DRM_RECT_ARG(&r_pipe_cfg->src_rect), - max_linewidth); - return -E2BIG; - } - - return dpu_plane_atomic_check_sspp(plane, state, crtc_state); -} - -static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane); struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); - struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state); + int ret = 0; struct drm_crtc_state *crtc_state = NULL; - int ret, i; if (IS_ERR(plane_state)) return PTR_ERR(plane_state); @@ -1169,16 +1190,8 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, if (ret) return ret; - if (!plane_state->visible) { - /* - * resources are freed by dpu_crtc_assign_plane_resources(), - * but clean them here. - */ - for (i = 0; i < PIPES_PER_PLANE; i++) - pstate->pipe[i].sspp = NULL; - + if (!plane_state->visible) return 0; - } /* * Force resource reallocation if the format of FB or src/dst have @@ -1193,7 +1206,6 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, msm_framebuffer_format(old_plane_state->fb) != msm_framebuffer_format(plane_state->fb)) crtc_state->planes_changed = true; - return 0; } @@ -1240,9 +1252,9 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, struct dpu_global_state *global_state, struct drm_atomic_state *state, struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state, struct drm_plane_state **prev_adjacent_plane_state) { - const struct drm_crtc_state *crtc_state = NULL; struct drm_plane *plane = plane_state->plane; struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); struct dpu_rm_sspp_requirements reqs; @@ -1252,10 +1264,6 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, const struct msm_format *fmt; int i, ret; - if (plane_state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state, - plane_state->crtc); - pstate = to_dpu_plane_state(plane_state); for (i = 0; i < STAGES_PER_PLANE; i++) prev_adjacent_pstate[i] = prev_adjacent_plane_state[i] ? @@ -1267,6 +1275,10 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, if (!plane_state->fb) return -EINVAL; + ret = dpu_plane_split(plane, plane_state, crtc_state); + if (ret) + return ret; + fmt = msm_framebuffer_format(plane_state->fb); reqs.yuv = MSM_FORMAT_IS_YUV(fmt); reqs.scale = (plane_state->src_w >> 16 != plane_state->crtc_w) || @@ -1297,14 +1309,55 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, return dpu_plane_atomic_check_sspp(plane, state, crtc_state); } +static int dpu_plane_assign_resources(struct drm_crtc *crtc, + struct dpu_global_state *global_state, + struct drm_atomic_state *state, + struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state) +{ + struct drm_plane *plane = plane_state->plane; + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state); + struct dpu_sw_pipe *pipe = &pstate->pipe[0]; + struct dpu_sw_pipe *r_pipe = &pstate->pipe[1]; + struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg[0]; + struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->pipe_cfg[1]; + struct dpu_plane *pdpu = to_dpu_plane(plane); + int ret; + + pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe); + if (!pipe->sspp) + return -EINVAL; + + ret = dpu_plane_split(plane, plane_state, crtc_state); + if (ret) + return ret; + + if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg, + pipe->sspp, + msm_framebuffer_format(plane_state->fb), + dpu_kms->catalog->caps->max_linewidth)) { + DPU_DEBUG_PLANE(pdpu, "invalid " DRM_RECT_FMT " /" DRM_RECT_FMT + " max_line:%u, can't use split source\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), + DRM_RECT_ARG(&r_pipe_cfg->src_rect), + dpu_kms->catalog->caps->max_linewidth); + return -E2BIG; + } + + return dpu_plane_atomic_check_sspp(plane, state, crtc_state); +} + int dpu_assign_plane_resources(struct dpu_global_state *global_state, struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_plane_state **states, unsigned int num_planes) { - unsigned int i; struct drm_plane_state *prev_adjacent_plane_state[STAGES_PER_PLANE] = { NULL }; + const struct drm_crtc_state *crtc_state = NULL; + unsigned int i; + int ret; for (i = 0; i < num_planes; i++) { struct drm_plane_state *plane_state = states[i]; @@ -1313,8 +1366,18 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state, !plane_state->visible) continue; - int ret = dpu_plane_virtual_assign_resources(crtc, global_state, + if (plane_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(state, + plane_state->crtc); + + if (!dpu_use_virtual_planes) + ret = dpu_plane_assign_resources(crtc, global_state, + state, plane_state, + crtc_state); + else + ret = dpu_plane_virtual_assign_resources(crtc, global_state, state, plane_state, + crtc_state, prev_adjacent_plane_state); if (ret) return ret; @@ -1751,7 +1814,7 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { static const struct drm_plane_helper_funcs dpu_plane_virtual_helper_funcs = { .prepare_fb = dpu_plane_prepare_fb, .cleanup_fb = dpu_plane_cleanup_fb, - .atomic_check = dpu_plane_virtual_atomic_check, + .atomic_check = dpu_plane_atomic_check, .atomic_update = dpu_plane_atomic_update, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h index cb24ad2a6d8d..805d117493da 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h @@ -72,23 +72,20 @@ TRACE_EVENT(dpu_perf_set_danger_luts, ); TRACE_EVENT(dpu_perf_set_ot, - TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx), - TP_ARGS(pnum, xin_id, rd_lim, vbif_idx), + TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim), + TP_ARGS(pnum, xin_id, rd_lim), TP_STRUCT__entry( __field(u32, pnum) __field(u32, xin_id) __field(u32, rd_lim) - __field(u32, vbif_idx) ), TP_fast_assign( __entry->pnum = pnum; __entry->xin_id = xin_id; __entry->rd_lim = rd_lim; - __entry->vbif_idx = vbif_idx; ), - TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d", - __entry->pnum, __entry->xin_id, __entry->rd_lim, - __entry->vbif_idx) + TP_printk("pnum:%d xin_id:%d ot:%d", + __entry->pnum, __entry->xin_id, __entry->rd_lim) ) TRACE_EVENT(dpu_cmd_release_bw, @@ -861,17 +858,15 @@ TRACE_EVENT(dpu_rm_reserve_lms, ); TRACE_EVENT(dpu_vbif_wait_xin_halt_fail, - TP_PROTO(enum dpu_vbif index, u32 xin_id), - TP_ARGS(index, xin_id), + TP_PROTO(u32 xin_id), + TP_ARGS(xin_id), TP_STRUCT__entry( - __field( enum dpu_vbif, index ) __field( u32, xin_id ) ), TP_fast_assign( - __entry->index = index; __entry->xin_id = xin_id; ), - TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id) + TP_printk("xin_id:%u", __entry->xin_id) ); TRACE_EVENT(dpu_pp_connect_ext_te, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 2a551e455aa3..0c6fa9bb0cb6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -11,26 +11,6 @@ #include "dpu_hw_vbif.h" #include "dpu_trace.h" -static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx) -{ - if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif)) - return dpu_kms->hw_vbif[vbif_idx]; - - return NULL; -} - -static const char *dpu_vbif_name(enum dpu_vbif idx) -{ - switch (idx) { - case VBIF_RT: - return "VBIF_RT"; - case VBIF_NRT: - return "VBIF_NRT"; - default: - return "??"; - } -} - /** * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt * @vbif: Pointer to hardware vbif driver @@ -62,12 +42,10 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id) if (!status) { rc = -ETIMEDOUT; - DPU_ERROR("%s client %d not halting. TIMEDOUT.\n", - dpu_vbif_name(vbif->idx), xin_id); + DPU_ERROR("VBIF client %d not halting. TIMEDOUT.\n", xin_id); } else { rc = 0; - DRM_DEBUG_ATOMIC("%s client %d is halted\n", - dpu_vbif_name(vbif->idx), xin_id); + DRM_DEBUG_ATOMIC("VBIF client %d is halted\n", xin_id); } return rc; @@ -107,10 +85,10 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif, } } - DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n", - dpu_vbif_name(vbif->idx), params->xin_id, - params->width, params->height, params->frame_rate, - pps, *ot_lim); + DRM_DEBUG_ATOMIC("VBIF xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n", + params->xin_id, + params->width, params->height, params->frame_rate, + pps, *ot_lim); } /** @@ -153,8 +131,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif, } exit: - DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n", - dpu_vbif_name(vbif->idx), params->xin_id, ot_lim); + DRM_DEBUG_ATOMIC("VBIF xin:%d ot_lim:%d\n", params->xin_id, ot_lim); return ot_lim; } @@ -172,7 +149,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, u32 ot_lim; int ret; - vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); + vbif = dpu_kms->hw_vbif; if (!vbif) { DRM_DEBUG_ATOMIC("invalid arguments vbif %d\n", vbif != NULL); return; @@ -190,8 +167,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, if (ot_lim == 0) return; - trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim, - params->vbif_idx); + trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim); vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim); @@ -199,7 +175,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id); if (ret) - trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id); + trace_dpu_vbif_wait_xin_halt_fail(params->xin_id); vbif->ops.set_halt_ctrl(vbif, params->xin_id, false); } @@ -221,10 +197,10 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, return; } - vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); + vbif = dpu_kms->hw_vbif; if (!vbif || !vbif->cap) { - DPU_ERROR("invalid vbif %d\n", params->vbif_idx); + DPU_ERROR("invalid vbif\n"); return; } @@ -242,8 +218,8 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, } for (i = 0; i < qos_tbl->npriority_lvl; i++) { - DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n", - dpu_vbif_name(params->vbif_idx), params->xin_id, i, + DRM_DEBUG_ATOMIC("VBIF xin:%d lvl:%d/%d\n", + params->xin_id, i, qos_tbl->priority_lvl[i]); vbif->ops.set_qos_remap(vbif, params->xin_id, i, qos_tbl->priority_lvl[i]); @@ -257,16 +233,13 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) { struct dpu_hw_vbif *vbif; - u32 i, pnd, src; - - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - vbif = dpu_kms->hw_vbif[i]; - if (vbif && vbif->ops.clear_errors) { - vbif->ops.clear_errors(vbif, &pnd, &src); - if (pnd || src) { - DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n", - dpu_vbif_name(vbif->idx), pnd, src); - } + u32 pnd, src; + + vbif = dpu_kms->hw_vbif; + if (vbif && vbif->ops.clear_errors) { + vbif->ops.clear_errors(vbif, &pnd, &src); + if (pnd || src) { + DRM_DEBUG_KMS("VBIF: pnd 0x%X, src 0x%X\n", pnd, src); } } } @@ -278,15 +251,12 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms) { struct dpu_hw_vbif *vbif; - int i, j; - - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - vbif = dpu_kms->hw_vbif[i]; - if (vbif && vbif->cap && vbif->ops.set_mem_type) { - for (j = 0; j < vbif->cap->memtype_count; j++) - vbif->ops.set_mem_type( - vbif, j, vbif->cap->memtype[j]); - } + int j; + + vbif = dpu_kms->hw_vbif; + if (vbif && vbif->cap && vbif->ops.set_mem_type) { + for (j = 0; j < vbif->cap->memtype_count; j++) + vbif->ops.set_mem_type(vbif, j, vbif->cap->memtype[j]); } } @@ -294,58 +264,51 @@ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms) void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) { + const struct dpu_vbif_cfg *vbif = dpu_kms->catalog->vbif; char vbif_name[32]; - struct dentry *entry, *debugfs_vbif; - int i, j; - - entry = debugfs_create_dir("vbif", debugfs_root); - - for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { - const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; + struct dentry *debugfs_vbif; + int j; - snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id); + debugfs_vbif = debugfs_create_dir("vbif", debugfs_root); - debugfs_vbif = debugfs_create_dir(vbif_name, entry); + debugfs_create_u32("features", 0600, debugfs_vbif, + (u32 *)&vbif->features); - debugfs_create_u32("features", 0600, debugfs_vbif, - (u32 *)&vbif->features); + debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif, + (u32 *)&vbif->xin_halt_timeout); - debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif, - (u32 *)&vbif->xin_halt_timeout); + debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif, + (u32 *)&vbif->default_ot_rd_limit); - debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif, - (u32 *)&vbif->default_ot_rd_limit); + debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif, + (u32 *)&vbif->default_ot_wr_limit); - debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif, - (u32 *)&vbif->default_ot_wr_limit); + for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) { + const struct dpu_vbif_dynamic_ot_cfg *cfg = + &vbif->dynamic_ot_rd_tbl.cfg[j]; - for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) { - const struct dpu_vbif_dynamic_ot_cfg *cfg = - &vbif->dynamic_ot_rd_tbl.cfg[j]; - - snprintf(vbif_name, sizeof(vbif_name), - "dynamic_ot_rd_%d_pps", j); - debugfs_create_u64(vbif_name, 0400, debugfs_vbif, - (u64 *)&cfg->pps); - snprintf(vbif_name, sizeof(vbif_name), - "dynamic_ot_rd_%d_ot_limit", j); - debugfs_create_u32(vbif_name, 0400, debugfs_vbif, - (u32 *)&cfg->ot_limit); - } + snprintf(vbif_name, sizeof(vbif_name), + "dynamic_ot_rd_%d_pps", j); + debugfs_create_u64(vbif_name, 0400, debugfs_vbif, + (u64 *)&cfg->pps); + snprintf(vbif_name, sizeof(vbif_name), + "dynamic_ot_rd_%d_ot_limit", j); + debugfs_create_u32(vbif_name, 0400, debugfs_vbif, + (u32 *)&cfg->ot_limit); + } - for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) { - const struct dpu_vbif_dynamic_ot_cfg *cfg = - &vbif->dynamic_ot_wr_tbl.cfg[j]; - - snprintf(vbif_name, sizeof(vbif_name), - "dynamic_ot_wr_%d_pps", j); - debugfs_create_u64(vbif_name, 0400, debugfs_vbif, - (u64 *)&cfg->pps); - snprintf(vbif_name, sizeof(vbif_name), - "dynamic_ot_wr_%d_ot_limit", j); - debugfs_create_u32(vbif_name, 0400, debugfs_vbif, - (u32 *)&cfg->ot_limit); - } + for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) { + const struct dpu_vbif_dynamic_ot_cfg *cfg = + &vbif->dynamic_ot_wr_tbl.cfg[j]; + + snprintf(vbif_name, sizeof(vbif_name), + "dynamic_ot_wr_%d_pps", j); + debugfs_create_u64(vbif_name, 0400, debugfs_vbif, + (u64 *)&cfg->pps); + snprintf(vbif_name, sizeof(vbif_name), + "dynamic_ot_wr_%d_ot_limit", j); + debugfs_create_u32(vbif_name, 0400, debugfs_vbif, + (u32 *)&cfg->ot_limit); } } #endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h index 62e47ae1e3ee..f47a89cb34ea 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h @@ -15,24 +15,20 @@ struct dpu_vbif_set_ot_params { u32 frame_rate; bool rd; bool is_wfd; - u32 vbif_idx; }; struct dpu_vbif_set_memtype_params { u32 xin_id; - u32 vbif_idx; bool is_cacheable; }; /** * struct dpu_vbif_set_qos_params - QoS remapper parameter - * @vbif_idx: vbif identifier * @xin_id: client interface identifier * @num: pipe identifier (debug only) * @is_rt: true if pipe is used in real-time use case */ struct dpu_vbif_set_qos_params { - u32 vbif_idx; u32 xin_id; u32 num; bool is_rt; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index 69fef034d0df..7c91fc1915f3 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -14,95 +14,6 @@ struct mdp5_cfg_handler { /* mdp5_cfg must be exposed (used in mdp5.xml.h) */ const struct mdp5_cfg_hw *mdp5_cfg = NULL; -static const struct mdp5_cfg_hw msm8x74v1_config = { - .name = "msm8x74v1", - .mdp = { - .count = 1, - .caps = MDP_CAP_SMP | - 0, - }, - .smp = { - .mmb_count = 22, - .mmb_size = 4096, - .clients = { - [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, - [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, - [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, - }, - }, - .ctl = { - .count = 5, - .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, - .flush_hw_mask = 0x0003ffff, - }, - .pipe_vig = { - .count = 3, - .base = { 0x01100, 0x01500, 0x01900 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | - MDP_PIPE_CAP_CSC | - 0, - }, - .pipe_rgb = { - .count = 3, - .base = { 0x01d00, 0x02100, 0x02500 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | - 0, - }, - .pipe_dma = { - .count = 2, - .base = { 0x02900, 0x02d00 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - 0, - }, - .lm = { - .count = 5, - .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, - .instances = { - { .id = 0, .pp = 0, .dspp = 0, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 1, .pp = 1, .dspp = 1, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 2, .pp = 2, .dspp = 2, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 3, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB }, - { .id = 4, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB }, - }, - .nb_stages = 5, - .max_width = 2048, - .max_height = 0xFFFF, - }, - .dspp = { - .count = 3, - .base = { 0x04500, 0x04900, 0x04d00 }, - }, - .pp = { - .count = 3, - .base = { 0x21a00, 0x21b00, 0x21c00 }, - }, - .intf = { - .base = { 0x21000, 0x21200, 0x21400, 0x21600 }, - .connect = { - [0] = INTF_eDP, - [1] = INTF_DSI, - [2] = INTF_DSI, - [3] = INTF_HDMI, - }, - }, - .perf = { - .ab_inefficiency = 200, - .ib_inefficiency = 120, - .clk_inefficiency = 125 - }, - .max_clk = 200000000, -}; - static const struct mdp5_cfg_hw msm8x26_config = { .name = "msm8x26", .mdp = { @@ -184,7 +95,7 @@ static const struct mdp5_cfg_hw msm8x26_config = { .max_clk = 200000000, }; -static const struct mdp5_cfg_hw msm8x74v2_config = { +static const struct mdp5_cfg_hw msm8x74_config = { .name = "msm8x74", .mdp = { .count = 1, @@ -1098,9 +1009,8 @@ static const struct mdp5_cfg_hw msm8937_config = { }; static const struct mdp5_cfg_handler cfg_handlers_v1[] = { - { .revision = 0, .config = { .hw = &msm8x74v1_config } }, { .revision = 1, .config = { .hw = &msm8x26_config } }, - { .revision = 2, .config = { .hw = &msm8x74v2_config } }, + { .revision = 2, .config = { .hw = &msm8x74_config } }, { .revision = 3, .config = { .hw = &apq8084_config } }, { .revision = 6, .config = { .hw = &msm8x16_config } }, { .revision = 8, .config = { .hw = &msm8x36_config } }, diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index fc183fe37f56..1eca140616c6 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -17,9 +17,6 @@ * a specific data path ID - REG_MDP5_CTL_*(<id>, ...) * * Hardware capabilities determine the number of concurrent data paths - * - * In certain use cases (high-resolution dual pipe), one single CTL can be - * shared across multiple CRTCs. */ #define CTL_STAT_BUSY 0x1 @@ -46,11 +43,6 @@ struct mdp5_ctl { u32 pending_ctl_trigger; bool cursor_on; - - /* True if the current CTL has FLUSH bits pending for single FLUSH. */ - bool flush_pending; - - struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ }; struct mdp5_ctl_manager { @@ -63,10 +55,6 @@ struct mdp5_ctl_manager { /* to filter out non-present bits in the current hardware config */ u32 flush_hw_mask; - /* status for single FLUSH */ - bool single_flush_supported; - u32 single_flush_pending_mask; - /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ spinlock_t pool_lock; struct mdp5_ctl ctls[MAX_CTL]; @@ -485,31 +473,6 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, return sw_mask; } -static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, - u32 *flush_id) -{ - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - - if (ctl->pair) { - DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); - ctl->flush_pending = true; - ctl_mgr->single_flush_pending_mask |= (*flush_mask); - *flush_mask = 0; - - if (ctl->pair->flush_pending) { - *flush_id = min_t(u32, ctl->id, ctl->pair->id); - *flush_mask = ctl_mgr->single_flush_pending_mask; - - ctl->flush_pending = false; - ctl->pair->flush_pending = false; - ctl_mgr->single_flush_pending_mask = 0; - - DBG("Single FLUSH mask %x,ID %d", *flush_mask, - *flush_id); - } - } -} - /** * mdp5_ctl_commit() - Register Flush * @@ -555,8 +518,6 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, curr_ctl_flush_mask = flush_mask; - fix_for_single_flush(ctl, &flush_mask, &flush_id); - if (!start) { ctl->flush_mask |= flush_mask; return curr_ctl_flush_mask; @@ -589,40 +550,6 @@ int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) } /* - * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH - */ -int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) -{ - struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; - struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); - - /* do nothing silently if hw doesn't support */ - if (!ctl_mgr->single_flush_supported) - return 0; - - if (!enable) { - ctlx->pair = NULL; - ctly->pair = NULL; - mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0); - return 0; - } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { - DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n"); - return -EINVAL; - } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { - DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); - return -EINVAL; - } - - ctlx->pair = ctly; - ctly->pair = ctlx; - - mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, - MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); - - return 0; -} - -/* * mdp5_ctl_request() - CTL allocation * * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. @@ -687,8 +614,6 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, { struct mdp5_ctl_manager *ctl_mgr; const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); - int rev = mdp5_cfg_get_hw_rev(cfg_hnd); - unsigned dsi_cnt = 0; const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; unsigned long flags; int c, ret; @@ -730,21 +655,6 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, spin_lock_init(&ctl->hw_lock); } - /* - * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI - * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when - * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. - * Single FLUSH is supported from hw rev v3.0. - */ - for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++) - if (hw_cfg->intf.connect[c] == INTF_DSI) - dsi_cnt++; - if ((rev >= 3) && (dsi_cnt > 1)) { - ctl_mgr->single_flush_supported = true; - /* Reserve CTL0/1 for INTF1/2 */ - ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; - ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; - } spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); DBG("Pool of %d CTLs created.", ctl_mgr->nctl); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h index 9020e8efc4e4..0c45f7874c24 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h @@ -35,7 +35,6 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p, int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, int cursor_id, bool enable); -int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); #define MAX_PIPE_STAGE 2 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index 500b7dc895d0..890d2f31510e 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -118,8 +118,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, u32 width, bool hdecim) { const struct drm_format_info *info = drm_format_info(format->pixel_format); - struct mdp5_kms *mdp5_kms = get_kms(smp); - int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); int i, hsub, nplanes, nlines; uint32_t blkcfg = 0; @@ -133,7 +131,7 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, * U and V components (splits them from Y if necessary) and packs * them together, writes to SMP using a single client. */ - if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { + if (format->chroma_sample > CHROMA_FULL) { nplanes = 2; /* if decimation is enabled, HW decimates less on the @@ -151,10 +149,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); - /* for hw rev v1.00 */ - if (rev == 0) - n = roundup_pow_of_two(n); - blkcfg |= (n << (8 * i)); } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index ef298c7d3e5e..cba8a71a2561 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1928,9 +1928,6 @@ void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl) msm_dp_ctrl_phy_reset(ctrl); phy_init(phy); - - drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", - phy, phy->init_count, phy->power_count); } void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl) @@ -1943,8 +1940,6 @@ void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl) msm_dp_ctrl_phy_reset(ctrl); phy_exit(phy); - drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", - phy, phy->init_count, phy->power_count); } static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl) @@ -1996,8 +1991,6 @@ static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl) phy_exit(phy); phy_init(phy); - drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", - phy, phy->init_count, phy->power_count); return 0; } @@ -2588,9 +2581,6 @@ void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl) /* aux channel down, reinit phy */ phy_exit(phy); phy_init(phy); - - drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", - phy, phy->init_count, phy->power_count); } void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl) @@ -2606,13 +2596,7 @@ void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl) dev_pm_opp_set_rate(ctrl->dev, 0); msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); - DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n", - phy, phy->init_count, phy->power_count); - phy_power_off(phy); - - DRM_DEBUG_DP("After, phy=%p init_count=%d power_on=%d\n", - phy, phy->init_count, phy->power_count); } void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl) @@ -2638,8 +2622,6 @@ void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl) msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); - drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", - phy, phy->init_count, phy->power_count); } irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl) diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 476848bf8cd1..d2124d625485 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -210,6 +210,7 @@ static const struct of_device_id msm_dp_dt_match[] = { { .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 }, {} }; +MODULE_DEVICE_TABLE(of, msm_dp_dt_match); static struct msm_dp_display_private *dev_get_dp_display_private(struct device *dev) { diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index d8bb40ef820e..3c9f01ed6271 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -198,6 +198,7 @@ static const struct of_device_id dt_match[] = { { .compatible = "qcom,dsi-ctrl-6g-qcm2290" }, {} }; +MODULE_DEVICE_TABLE(of, dt_match); static const struct dev_pm_ops dsi_pm_ops = { SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index bd3c51c350e7..da3fe6824495 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -317,10 +317,10 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &msm8996_dsi_cfg, &msm_dsi_6g_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2, &msm8976_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_0_0, + &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_1_0, &sdm660_dsi_cfg, &msm_dsi_6g_v2_host_ops}, - {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0, - &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0, diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index 5dc812028bd5..ccf06679608e 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -19,8 +19,8 @@ #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 #define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 #define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002 +#define MSM_DSI_6G_VER_MINOR_V2_0_0 0x20000000 #define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000 -#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 #define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000 #define MSM_DSI_6G_VER_MINOR_V2_3_1 0x20030001 diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index db6da99375a1..565d425f88b8 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -569,6 +569,7 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host) * dsi_adjust_pclk_for_compression() - Adjust the pclk rate for compression case * @mode: The selected mode for the DSI output * @dsc: DRM DSC configuration for this DSI output + * @is_bonded_dsi: True if two DSI controllers are bonded * * Adjust the pclk rate by calculating a new hdisplay proportional to * the compression ratio such that: @@ -757,6 +758,7 @@ static inline enum dsi_vid_dst_format dsi_get_vid_fmt(const enum mipi_dsi_pixel_format mipi_fmt) { switch (mipi_fmt) { + case MIPI_DSI_FMT_RGB101010: return VID_DST_FORMAT_RGB101010; case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888; case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE; case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666; @@ -769,6 +771,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(const enum mipi_dsi_pixel_format mipi_fmt) { switch (mipi_fmt) { + case MIPI_DSI_FMT_RGB101010: return CMD_DST_FORMAT_RGB101010; case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; case MIPI_DSI_FMT_RGB666_PACKED: case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; @@ -782,13 +785,21 @@ static void dsi_ctrl_disable(struct msm_dsi_host *msm_host) dsi_write(msm_host, REG_DSI_CTRL, 0); } +static bool msm_dsi_host_version_geq(struct msm_dsi_host *msm_host, + u32 major, u32 minor) +{ + return msm_host->cfg_hnd->major > major || + (msm_host->cfg_hnd->major == major && + msm_host->cfg_hnd->minor >= minor); +} + bool msm_dsi_host_is_wide_bus_enabled(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); return msm_host->dsc && - (msm_host->cfg_hnd->major == MSM_DSI_VER_MAJOR_6G && - msm_host->cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_5_0); + msm_dsi_host_version_geq(msm_host, MSM_DSI_VER_MAJOR_6G, + MSM_DSI_6G_VER_MINOR_V2_5_0); } static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, @@ -1033,8 +1044,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) /* * DPU sends 3 bytes per pclk cycle to DSI. If widebus is * enabled, MDP always sends out 48-bit compressed data per - * pclk and on average, DSI consumes an amount of compressed - * data equivalent to the uncompressed pixel depth per pclk. + * pclk and on average, for video mode, DSI consumes only an + * amount of compressed data equivalent to the uncompressed + * pixel depth per pclk. * * Calculate the number of pclks needed to transmit one line of * the compressed data. @@ -1046,10 +1058,14 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) * unused anyway. */ h_total -= hdisplay; - if (wide_bus_enabled) - bits_per_pclk = mipi_dsi_pixel_format_to_bpp(msm_host->format); - else + if (wide_bus_enabled) { + if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) + bits_per_pclk = dsc->bits_per_component * 3; + else + bits_per_pclk = 48; + } else { bits_per_pclk = 24; + } hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc) * 8, bits_per_pclk); @@ -1705,6 +1721,26 @@ static int dsi_host_attach(struct mipi_dsi_host *host, if (dsi->dsc) msm_host->dsc = dsi->dsc; + if (msm_host->format == MIPI_DSI_FMT_RGB101010) { + if (!msm_dsi_host_version_geq(msm_host, MSM_DSI_VER_MAJOR_6G, + MSM_DSI_6G_VER_MINOR_V2_1_0)) { + DRM_DEV_ERROR(&msm_host->pdev->dev, + "RGB101010 not supported on this DSI controller\n"); + return -EINVAL; + } + + /* + * Downstream overrides RGB101010 back to RGB888 when DSC is enabled + * but widebus is not. Using RGB101010 in this case may require some + * extra changes. + */ + if (msm_host->dsc && + !msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) { + dev_warn(&msm_host->pdev->dev, + "RGB101010 with DSC but without widebus, may need extra changes\n"); + } + } + ret = dsi_dev_attach(msm_host->pdev); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 7937266de1d2..c59375aaae19 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -582,6 +582,7 @@ static const struct of_device_id dsi_phy_dt_match[] = { #endif {} }; +MODULE_DEVICE_TABLE(of, dsi_phy_dt_match); /* * Currently, we only support one SoC for each PHY type. When we have multiple diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 01182442dfd6..8f4b03713f25 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -41,8 +41,8 @@ #define VCO_REF_CLK_RATE 19200000 #define FRAC_BITS 18 -/* Hardware is pre V4.1 */ -#define DSI_PHY_7NM_QUIRK_PRE_V4_1 BIT(0) +/* Hardware is V4.0 */ +#define DSI_PHY_7NM_QUIRK_V4_0 BIT(0) /* Hardware is V4.1 */ #define DSI_PHY_7NM_QUIRK_V4_1 BIT(1) /* Hardware is V4.2 */ @@ -141,7 +141,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config dec_multiple = div_u64(pll_freq * multiplier, divider); dec = div_u64_rem(dec_multiple, multiplier, &frac); - if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) { + if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_0) { config->pll_clock_inverters = 0x28; } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { if (pll_freq < 163000000ULL) @@ -264,7 +264,7 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll) void __iomem *base = pll->phy->pll_base; u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00; - if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) + if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_0)) if (pll->vco_current_rate >= 3100000000ULL) analog_controls_five_1 = 0x03; @@ -313,10 +313,10 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll) writel(0x29, base + REG_DSI_7nm_PHY_PLL_PFILT); writel(0x2f, base + REG_DSI_7nm_PHY_PLL_PFILT); writel(0x2a, base + REG_DSI_7nm_PHY_PLL_IFILT); - writel(!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) ? 0x3f : 0x22, + writel(!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_0) ? 0x3f : 0x22, base + REG_DSI_7nm_PHY_PLL_IFILT); - if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) { + if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_0)) { writel(0x22, base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE); if (pll->slave) writel(0x22, pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE); @@ -928,7 +928,7 @@ static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy) const u8 *tx_dctrl = tx_dctrl_0; void __iomem *lane_base = phy->lane_base; - if (!(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) + if (!(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_0)) tx_dctrl = tx_dctrl_1; /* Strength ctrl settings */ @@ -1319,7 +1319,7 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = { .max_pll_rate = 3500000000UL, .io_start = { 0xae94400, 0xae96400 }, .num_dsi_phy = 2, - .quirks = DSI_PHY_7NM_QUIRK_PRE_V4_1, + .quirks = DSI_PHY_7NM_QUIRK_V4_0, }; const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 5afac09c0d33..852abb2466f0 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -20,7 +20,7 @@ void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on) { - uint32_t ctrl = 0; + u32 ctrl = 0; unsigned long flags; spin_lock_irqsave(&hdmi->reg_lock, flags); @@ -91,7 +91,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi) struct platform_device *phy_pdev; struct device_node *phy_node; - phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0); + phy_node = of_parse_phandle(dev_of_node(&pdev->dev), "phys", 0); if (!phy_node) { DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n"); return -ENXIO; @@ -278,7 +278,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) if (!config) return -EINVAL; - hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; @@ -287,7 +287,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) spin_lock_init(&hdmi->reg_lock); mutex_init(&hdmi->state_mutex); - ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge); + ret = drm_of_find_panel_or_bridge(dev_of_node(dev), 1, 0, NULL, &hdmi->next_bridge); if (ret && ret != -ENODEV) return ret; @@ -304,7 +304,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) hdmi->qfprom_mmio = msm_ioremap(pdev, "qfprom_physical"); if (IS_ERR(hdmi->qfprom_mmio)) { - DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n"); + DRM_DEV_INFO(dev, "can't find qfprom resource\n"); hdmi->qfprom_mmio = NULL; } @@ -312,8 +312,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) if (hdmi->irq < 0) return hdmi->irq; - hdmi->pwr_regs = devm_kcalloc(&pdev->dev, - config->pwr_reg_cnt, + hdmi->pwr_regs = devm_kcalloc(dev, config->pwr_reg_cnt, sizeof(hdmi->pwr_regs[0]), GFP_KERNEL); if (!hdmi->pwr_regs) @@ -322,12 +321,11 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) for (i = 0; i < config->pwr_reg_cnt; i++) hdmi->pwr_regs[i].supply = config->pwr_reg_names[i]; - ret = devm_regulator_bulk_get(&pdev->dev, config->pwr_reg_cnt, hdmi->pwr_regs); + ret = devm_regulator_bulk_get(dev, config->pwr_reg_cnt, hdmi->pwr_regs); if (ret) return dev_err_probe(dev, ret, "failed to get pwr regulators\n"); - hdmi->pwr_clks = devm_kcalloc(&pdev->dev, - config->pwr_clk_cnt, + hdmi->pwr_clks = devm_kcalloc(dev, config->pwr_clk_cnt, sizeof(hdmi->pwr_clks[0]), GFP_KERNEL); if (!hdmi->pwr_clks) @@ -336,16 +334,16 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) for (i = 0; i < config->pwr_clk_cnt; i++) hdmi->pwr_clks[i].id = config->pwr_clk_names[i]; - ret = devm_clk_bulk_get(&pdev->dev, config->pwr_clk_cnt, hdmi->pwr_clks); + ret = devm_clk_bulk_get(dev, config->pwr_clk_cnt, hdmi->pwr_clks); if (ret) return ret; - hdmi->extp_clk = devm_clk_get_optional(&pdev->dev, "extp"); + hdmi->extp_clk = devm_clk_get_optional(dev, "extp"); if (IS_ERR(hdmi->extp_clk)) return dev_err_probe(dev, PTR_ERR(hdmi->extp_clk), "failed to get extp clock\n"); - hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); + hdmi->hpd_gpiod = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN); /* This will catch e.g. -EPROBE_DEFER */ if (IS_ERR(hdmi->hpd_gpiod)) return dev_err_probe(dev, PTR_ERR(hdmi->hpd_gpiod), @@ -358,18 +356,16 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD"); ret = msm_hdmi_get_phy(hdmi); - if (ret) { - DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n"); + if (ret) return ret; - } - ret = devm_pm_runtime_enable(&pdev->dev); + ret = devm_pm_runtime_enable(dev); if (ret) goto err_put_phy; platform_set_drvdata(pdev, hdmi); - ret = component_add(&pdev->dev, &msm_hdmi_ops); + ret = component_add(dev, &msm_hdmi_ops); if (ret) goto err_put_phy; @@ -429,7 +425,7 @@ fail: return ret; } -DEFINE_RUNTIME_DEV_PM_OPS(msm_hdmi_pm_ops, msm_hdmi_runtime_suspend, msm_hdmi_runtime_resume, NULL); +static DEFINE_RUNTIME_DEV_PM_OPS(msm_hdmi_pm_ops, msm_hdmi_runtime_suspend, msm_hdmi_runtime_resume, NULL); static const struct of_device_id msm_hdmi_dt_match[] = { { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config }, @@ -441,6 +437,7 @@ static const struct of_device_id msm_hdmi_dt_match[] = { { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8960_config }, {} }; +MODULE_DEVICE_TABLE(of, msm_hdmi_dt_match); static struct platform_driver msm_hdmi_driver = { .probe = msm_hdmi_dev_probe, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 02cfd46df594..49433f7727c3 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -43,7 +43,7 @@ struct hdmi { bool power_on; bool hpd_enabled; struct mutex state_mutex; /* protects two booleans */ - unsigned long int pixclock; + unsigned long pixclock; void __iomem *mmio; void __iomem *qfprom_mmio; @@ -132,7 +132,7 @@ enum hdmi_phy_type { struct hdmi_phy_cfg { enum hdmi_phy_type type; - void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock); + void (*powerup)(struct hdmi_phy *phy, unsigned long pixclock); void (*powerdown)(struct hdmi_phy *phy); const char * const *reg_names; int num_regs; @@ -167,7 +167,7 @@ static inline u32 hdmi_phy_read(struct hdmi_phy *phy, u32 reg) int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy); void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy); -void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock); +void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long pixclock); void msm_hdmi_phy_powerdown(struct hdmi_phy *phy); void __init msm_hdmi_phy_driver_register(void); void __exit msm_hdmi_phy_driver_unregister(void); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index d9a8dc9dae8f..249c167ab04d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c @@ -17,8 +17,7 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) { struct hdmi_audio *audio = &hdmi->audio; bool enabled = audio->enabled; - uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl; - uint32_t audio_config; + u32 acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl, audio_config; if (!hdmi->connector->display_info.is_hdmi) return -EINVAL; @@ -43,7 +42,7 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SELECT__MASK; if (enabled) { - uint32_t n, cts, multiplier; + u32 n, cts, multiplier; enum hdmi_acr_cts select; drm_hdmi_acr_get_n_cts(hdmi->pixclock, audio->rate, &n, &cts); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 98cd490e7ab0..a9eb6489c520 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -153,7 +153,7 @@ static int msm_hdmi_bridge_write_avi_infoframe(struct drm_bridge *bridge, for (i = 0; i < ARRAY_SIZE(buf); i++) hdmi_write(hdmi, REG_HDMI_AVI_INFO(i), buf[i]); - val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1); + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0); val |= HDMI_INFOFRAME_CTRL0_AVI_SEND | HDMI_INFOFRAME_CTRL0_AVI_CONT; hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val); @@ -193,7 +193,7 @@ static int msm_hdmi_bridge_write_audio_infoframe(struct drm_bridge *bridge, buffer[9] << 16 | buffer[10] << 24); - val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1); + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0); val |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND | HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT | HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE | @@ -356,7 +356,7 @@ static void msm_hdmi_set_timings(struct hdmi *hdmi, const struct drm_display_mode *mode) { int hstart, hend, vstart, vend; - uint32_t frame_ctrl; + u32 frame_ctrl; hstart = mode->htotal - mode->hsync_start; hend = mode->htotal - mode->hsync_start + mode->hdisplay; @@ -409,7 +409,7 @@ static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridg struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; const struct drm_edid *drm_edid; - uint32_t hdmi_ctrl; + u32 hdmi_ctrl; hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c index 114b0d507700..2cccd9062584 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c @@ -65,7 +65,7 @@ void msm_hdmi_hpd_enable(struct drm_bridge *bridge) struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; struct device *dev = &hdmi->pdev->dev; - uint32_t hpd_ctrl; + u32 hpd_ctrl; int ret; unsigned long flags; @@ -125,7 +125,7 @@ void msm_hdmi_hpd_irq(struct drm_bridge *bridge) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; - uint32_t hpd_int_status, hpd_int_ctrl; + u32 hpd_int_status, hpd_int_ctrl; /* Process HPD: */ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c index 6b9265159195..4a4a2cf90234 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c @@ -40,8 +40,8 @@ static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c) { struct hdmi *hdmi = hdmi_i2c->hdmi; struct drm_device *dev = hdmi->dev; - uint32_t retry = 0xffff; - uint32_t ddc_int_ctrl; + u32 retry = 0xffff; + u32 ddc_int_ctrl; do { --retry; @@ -71,7 +71,7 @@ static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c) struct hdmi *hdmi = hdmi_i2c->hdmi; if (!hdmi_i2c->sw_done) { - uint32_t ddc_int_ctrl; + u32 ddc_int_ctrl; ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL); @@ -92,13 +92,13 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c, struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c); struct hdmi *hdmi = hdmi_i2c->hdmi; struct drm_device *dev = hdmi->dev; - static const uint32_t nack[] = { + static const u32 nack[] = { HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1, HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3, }; int indices[MAX_TRANSACTIONS]; int ret, i, j, index = 0; - uint32_t ddc_status, ddc_data, i2c_trans; + u32 ddc_status, ddc_data, i2c_trans; num = min(num, MAX_TRANSACTIONS); @@ -119,7 +119,7 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c, for (i = 0; i < num; i++) { struct i2c_msg *p = &msgs[i]; - uint32_t raw_addr = p->addr << 1; + u32 raw_addr = p->addr << 1; if (p->flags & I2C_M_RD) raw_addr |= 1; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 667573f1db7c..eb1088755cb3 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -94,7 +94,7 @@ void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy) pm_runtime_put_sync(dev); } -void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock) +void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long pixclock) { if (!phy || !phy->cfg->powerup) return; @@ -204,6 +204,7 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = { .data = &msm_hdmi_phy_8998_cfg }, {} }; +MODULE_DEVICE_TABLE(of, msm_hdmi_phy_dt_match); static struct platform_driver msm_hdmi_phy_platform_driver = { .probe = msm_hdmi_phy_probe, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c index cf90a0c1f822..cfa8fc494199 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c @@ -7,7 +7,7 @@ #include "hdmi.h" static void hdmi_phy_8960_powerup(struct hdmi_phy *phy, - unsigned long int pixclock) + unsigned long pixclock) { DBG("pixclock: %lu", pixclock); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c index 1d97640d8c24..10ee91818364 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c @@ -9,7 +9,7 @@ #include "hdmi.h" static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy, - unsigned long int pixclock) + unsigned long pixclock) { /* De-serializer delay D/C for non-lbk mode: */ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG0, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c index a2a6940e195a..6f40820d9071 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c @@ -7,7 +7,7 @@ #include "hdmi.h" static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy, - unsigned long int pixclock) + unsigned long pixclock) { hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG0, 0x1b); hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG1, 0xf2); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index e5ab1e28851d..195f40e331e5 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -536,6 +536,11 @@ static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj, len = msm_obj->metadata_size; buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL); + if (!buf) { + msm_gem_unlock(obj); + return -ENOMEM; + } + msm_gem_unlock(obj); if (*metadata_size < len) { @@ -548,7 +553,7 @@ static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj, kfree(buf); - return 0; + return ret; } static int msm_ioctl_gem_info(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index b3fdb83202ab..9b681e144c07 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -219,7 +219,12 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, + mode_cmd->offsets[i]; if (bos[i]->size < min_size) { - ret = -EINVAL; + ret = UERR(EINVAL, dev, "plane %d too small", i); + goto fail; + } + + if (to_msm_bo(bos[i])->flags & MSM_BO_NO_SHARE) { + ret = UERR(EINVAL, dev, "Cannot map _NO_SHARE to kms vm"); goto fail; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b27abaa13926..2cb3ab04f125 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -507,8 +507,11 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj) */ void msm_gem_unpin_active(struct drm_gem_object *obj) { + struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); + GEM_WARN_ON(!mutex_is_locked(&priv->lru.lock)); + msm_obj->pin_count--; GEM_WARN_ON(msm_obj->pin_count < 0); update_lru_active(obj); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index cb32093fda47..762e546d25ef 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -452,6 +452,7 @@ struct msm_gem_submit { bool bos_pinned : 1; bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */ bool in_rb : 1; /* "sudo" mode, copy cmds into RB */ + bool has_exec : 1; /* @exec is initialized. */ struct msm_ringbuffer *ring; unsigned int nr_cmds; unsigned int nr_bos; diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 1039e3c0a47b..31fa51a44f86 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -26,9 +26,8 @@ static bool can_swap(void) static bool can_block(struct shrink_control *sc) { - if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM)) - return false; - return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM); + return (sc->gfp_mask & __GFP_DIRECT_RECLAIM) || + (current_is_kswapd() && (sc->gfp_mask & __GFP_KSWAPD_RECLAIM)); } static unsigned long diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 75d9f3574370..26ea8a28be47 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -278,6 +278,7 @@ static int submit_lock_objects_vmbind(struct msm_gem_submit *submit) int ret = 0; drm_exec_init(&submit->exec, flags, submit->nr_bos); + submit->has_exec = true; drm_exec_until_all_locked (&submit->exec) { ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1); @@ -304,6 +305,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit) return submit_lock_objects_vmbind(submit); drm_exec_init(&submit->exec, flags, submit->nr_bos); + submit->has_exec = true; drm_exec_until_all_locked (&submit->exec) { ret = drm_exec_lock_obj(&submit->exec, @@ -523,7 +525,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error) if (error) submit_unpin_objects(submit); - if (submit->exec.objects) + if (submit->has_exec) drm_exec_fini(&submit->exec); /* if job wasn't enqueued to scheduler, early retirement: */ diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index adf88cf8f41a..1a952b171ed7 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -373,6 +373,12 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, struct msm_gem_vma *vma; int ret; + /* _NO_SHARE objs cannot be mapped outside of their "host" vm: */ + if (obj && (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE) && + GEM_WARN_ON(obj->resv != drm_gpuvm_resv(gpuvm))) { + return ERR_PTR(-EINVAL); + } + drm_gpuvm_resv_assert_held(&vm->base); vma = kzalloc_obj(*vma); @@ -696,6 +702,7 @@ static struct dma_fence * msm_vma_job_run(struct drm_sched_job *_job) { struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job); + struct msm_drm_private *priv = job->vm->drm->dev_private; struct msm_gem_vm *vm = to_msm_vm(job->vm); struct drm_gem_object *obj; int ret = vm->unusable ? -EINVAL : 0; @@ -738,12 +745,14 @@ msm_vma_job_run(struct drm_sched_job *_job) if (ret) msm_gem_vm_unusable(job->vm); + mutex_lock(&priv->lru.lock); + job_foreach_bo (obj, job) { - msm_gem_lock(obj); - msm_gem_unpin_locked(obj); - msm_gem_unlock(obj); + msm_gem_unpin_active(obj); } + mutex_unlock(&priv->lru.lock); + /* VM_BIND ops are synchronous, so no fence to wait on: */ return NULL; } @@ -1242,7 +1251,7 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) case MSM_VM_BIND_OP_UNMAP: ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec, op->iova, - op->obj_offset); + op->range); break; case MSM_VM_BIND_OP_MAP: case MSM_VM_BIND_OP_MAP_NULL: { diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 84d6c7f50c8d..930e54d1b0a7 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -17,6 +17,7 @@ #include <linux/string_helpers.h> #include <linux/devcoredump.h> #include <linux/sched/task.h> +#include <linux/sched/mm.h> /* * Power Management: @@ -468,6 +469,7 @@ static void recover_worker(struct kthread_work *work) struct msm_gem_submit *submit; struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); char *comm = NULL, *cmd = NULL; + unsigned int noreclaim_flag; struct task_struct *task; int i; @@ -505,6 +507,8 @@ static void recover_worker(struct kthread_work *work) msm_gem_vm_unusable(submit->vm); } + noreclaim_flag = memalloc_noreclaim_save(); + get_comm_cmdline(submit, &comm, &cmd); if (comm && cmd) { @@ -523,6 +527,8 @@ static void recover_worker(struct kthread_work *work) pm_runtime_get_sync(&gpu->pdev->dev); msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd); + memalloc_noreclaim_restore(noreclaim_flag); + kfree(cmd); kfree(comm); @@ -546,32 +552,30 @@ static void recover_worker(struct kthread_work *work) msm_update_fence(ring->fctx, fence); } - if (msm_gpu_active(gpu)) { - /* retire completed submits, plus the one that hung: */ - retire_submits(gpu); + /* retire completed submits, plus the one that hung: */ + retire_submits(gpu); - gpu->funcs->recover(gpu); + gpu->funcs->recover(gpu); - /* - * Replay all remaining submits starting with highest priority - * ring - */ - for (i = 0; i < gpu->nr_rings; i++) { - struct msm_ringbuffer *ring = gpu->rb[i]; - unsigned long flags; + /* + * Replay all remaining submits starting with highest priority + * ring + */ + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + unsigned long flags; - spin_lock_irqsave(&ring->submit_lock, flags); - list_for_each_entry(submit, &ring->submits, node) { - /* - * If the submit uses an unusable vm make sure - * we don't actually run it - */ - if (to_msm_vm(submit->vm)->unusable) - submit->nr_cmds = 0; - gpu->funcs->submit(gpu, submit); - } - spin_unlock_irqrestore(&ring->submit_lock, flags); + spin_lock_irqsave(&ring->submit_lock, flags); + list_for_each_entry(submit, &ring->submits, node) { + /* + * If the submit uses an unusable vm make sure + * we don't actually run it + */ + if (to_msm_vm(submit->vm)->unusable) + submit->nr_cmds = 0; + gpu->funcs->submit(gpu, submit); } + spin_unlock_irqrestore(&ring->submit_lock, flags); } pm_runtime_put(&gpu->pdev->dev); @@ -587,6 +591,7 @@ void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_ struct msm_gem_submit *submit; struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); char *comm = NULL, *cmd = NULL; + unsigned int noreclaim_flag; mutex_lock(&gpu->lock); @@ -594,6 +599,8 @@ void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_ if (submit && submit->fault_dumped) goto resume_smmu; + noreclaim_flag = memalloc_noreclaim_save(); + if (submit) { get_comm_cmdline(submit, &comm, &cmd); @@ -609,6 +616,8 @@ void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_ msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd); pm_runtime_put_sync(&gpu->pdev->dev); + memalloc_noreclaim_restore(noreclaim_flag); + kfree(cmd); kfree(comm); diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index 9047e8d9ee89..90c3fa0681a0 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -262,6 +262,14 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss) icc_set_bw(msm_mdss->reg_bus_path, 0, msm_mdss->reg_bus_bw); + /* + * TODO: + * Previous users (e.g. the bootloader) may have left this clock at a high rate, which + * would remain set, as prepare_enable() doesn't reprogram it. This theoretically poses a + * risk of brownout, but realistically this path is almost exclusively excercised after the + * correct OPP has been set in one of the MDPn or DPU drivers, or during initial probe, + * before the RPM(H)PD sync_state is done. + */ ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); if (ret) { dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret); @@ -560,6 +568,7 @@ static const struct msm_mdss_data data_153k6 = { static const struct of_device_id mdss_dt_match[] = { { .compatible = "qcom,mdss", .data = &data_153k6 }, + { .compatible = "qcom,eliza-mdss", .data = &data_57k }, { .compatible = "qcom,glymur-mdss", .data = &data_57k }, { .compatible = "qcom,kaanapali-mdss", .data = &data_57k }, { .compatible = "qcom,msm8998-mdss", .data = &data_76k8 }, diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml index 3941e7510754..2309870f5031 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml @@ -5016,6 +5016,10 @@ by a particular renderpass/blit. <bitfield pos="1" name="LPAC" type="boolean"/> <bitfield pos="2" name="RAYTRACING" type="boolean"/> </reg32> + <reg32 offset="0x0405" name="CX_MISC_SW_FUSE_FREQ_LIMIT_STATUS" variants="A8XX-"> + <bitfield high="8" low="0" name="FINALFREQLIMIT"/> + <bitfield pos="24" name="SOFTSKUDISABLED" type="boolean"/> + </reg32> </domain> </database> diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml index c4e00b1263cd..33404eb18fd0 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml @@ -141,8 +141,10 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <reg32 offset="0x1f9f0" name="GMU_BOOT_KMD_LM_HANDSHAKE"/> <reg32 offset="0x1f957" name="GMU_LLM_GLM_SLEEP_CTRL"/> <reg32 offset="0x1f958" name="GMU_LLM_GLM_SLEEP_STATUS"/> - <reg32 offset="0x1f888" name="GMU_ALWAYS_ON_COUNTER_L"/> - <reg32 offset="0x1f889" name="GMU_ALWAYS_ON_COUNTER_H"/> + <reg32 offset="0x1f888" name="GMU_ALWAYS_ON_COUNTER_L" variants="A6XX-A7XX"/> + <reg32 offset="0x1f840" name="GMU_ALWAYS_ON_COUNTER_L" variants="A8XX-"/> + <reg32 offset="0x1f889" name="GMU_ALWAYS_ON_COUNTER_H" variants="A6XX-A7XX"/> + <reg32 offset="0x1f841" name="GMU_ALWAYS_ON_COUNTER_H" variants="A8XX-"/> <reg32 offset="0x1f8c3" name="GMU_GMU_PWR_COL_KEEPALIVE" variants="A6XX-A7XX"/> <reg32 offset="0x1f7e4" name="GMU_GMU_PWR_COL_KEEPALIVE" variants="A8XX-"/> <reg32 offset="0x1f8c4" name="GMU_PWR_COL_PREEMPT_KEEPALIVE" variants="A6XX-A7XX"/> diff --git a/drivers/gpu/drm/msm/registers/display/dsi.xml b/drivers/gpu/drm/msm/registers/display/dsi.xml index c7a7b633d747..e40125f75175 100644 --- a/drivers/gpu/drm/msm/registers/display/dsi.xml +++ b/drivers/gpu/drm/msm/registers/display/dsi.xml @@ -15,6 +15,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <value name="VID_DST_FORMAT_RGB666" value="1"/> <value name="VID_DST_FORMAT_RGB666_LOOSE" value="2"/> <value name="VID_DST_FORMAT_RGB888" value="3"/> + <value name="VID_DST_FORMAT_RGB101010" value="4"/> </enum> <enum name="dsi_rgb_swap"> <value name="SWAP_RGB" value="0"/> @@ -39,6 +40,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <value name="CMD_DST_FORMAT_RGB565" value="6"/> <value name="CMD_DST_FORMAT_RGB666" value="7"/> <value name="CMD_DST_FORMAT_RGB888" value="8"/> + <value name="CMD_DST_FORMAT_RGB101010" value="9"/> </enum> <enum name="dsi_lane_swap"> <value name="LANE_SWAP_0123" value="0"/> @@ -142,7 +144,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> </reg32> <reg32 offset="0x0000c" name="VID_CFG0"> <bitfield name="VIRT_CHANNEL" low="0" high="1" type="uint"/> <!-- always zero? --> - <bitfield name="DST_FORMAT" low="4" high="5" type="dsi_vid_dst_format"/> + <!-- high was 5 before DSI 6G 2.1.0 --> + <bitfield name="DST_FORMAT" low="4" high="6" type="dsi_vid_dst_format"/> <bitfield name="TRAFFIC_MODE" low="8" high="9" type="dsi_traffic_mode"/> <bitfield name="BLLP_POWER_STOP" pos="12" type="boolean"/> <bitfield name="EOF_BLLP_POWER_STOP" pos="15" type="boolean"/> |
