summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAkhil P Oommen <akhilpo@oss.qualcomm.com>2025-09-08 13:57:03 +0530
committerRob Clark <robin.clark@oss.qualcomm.com>2025-09-08 07:25:00 -0700
commit04ca842ef2473120a9587dfe49523c8c243159ea (patch)
tree7ca835299b5e1444e0da09333f4dc5fb62b67281
parentf195421318bd00151b3a111af6f315a25c3438a8 (diff)
drm/msm/a6xx: Poll AHB fence status in GPU IRQ handler
Even though the GX power domain is kept ON when there is a pending GPU interrupt, there is a small window of potential race with GMU where it may move the AHB fence to 'Drop' mode. Once the GMU sees the pending IRQ, it will move back the fence state to ALLOW mode. Close this race window by polling for AHB fence to ensure that it is in 'Allow' mode. Signed-off-by: Akhil P Oommen <akhilpo@oss.qualcomm.com> Patchwork: https://patchwork.freedesktop.org/patch/673377/ Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c26
2 files changed, 29 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 9494bbed9a1f..40c4c0445b8e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -167,6 +167,9 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
+#define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout_atomic((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
{
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 48aa07b6b2f5..4cdbad05affc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1845,6 +1845,28 @@ static void a6xx_gpu_keepalive_vote(struct msm_gpu *gpu, bool on)
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, on);
}
+static int irq_poll_fence(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 status;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return 0;
+
+ if (gmu_poll_timeout_atomic(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, status, !status, 1, 100)) {
+ u32 rbbm_unmasked = gmu_read(gmu, REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "irq fence poll timeout, fence_ctrl=0x%x, unmasked_status=0x%x\n",
+ status, rbbm_unmasked);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
@@ -1852,6 +1874,9 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
/* Set keepalive vote to avoid power collapse after RBBM_INT_0_STATUS is read */
a6xx_gpu_keepalive_vote(gpu, true);
+ if (irq_poll_fence(gpu))
+ goto done;
+
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
@@ -1888,6 +1913,7 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
a6xx_preempt_irq(gpu);
+done:
a6xx_gpu_keepalive_vote(gpu, false);
return IRQ_HANDLED;