diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-07-10 14:11:59 +0200 |
---|---|---|
committer | Dave Airlie <airlied@gmail.com> | 2013-07-23 20:14:24 +1000 |
commit | b72a8925fd5cc80107e3988536290d087b1079aa (patch) | |
tree | 5980bab820bb68a4ac990519ff39b31d99b6f1f4 /drivers | |
parent | 0e267944f6ffc908eec4751d887f3a8936f412fb (diff) |
drm/radeon: s/drm_order/order_base_2/
Last driver and pretty obviously a major user of this little function.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@gmail.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/radeon/cik.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600_cp.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cp.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 14 |
8 files changed, 33 insertions, 33 deletions
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dacec4e2090..6adbc998349e 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2535,8 +2535,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev) /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -2915,7 +2915,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ tmp = RREG32(CP_HPD_EOP_CONTROL); tmp &= ~EOP_SIZE_MASK; - tmp |= drm_order(MEC_HPD_SIZE / 8); + tmp |= order_base_2(MEC_HPD_SIZE / 8); WREG32(CP_HPD_EOP_CONTROL, tmp); } cik_srbm_select(rdev, 0, 0, 0, 0); @@ -3030,9 +3030,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK); mqd->queue_state.cp_hqd_pq_control |= - drm_order(rdev->ring[idx].ring_size / 8); + order_base_2(rdev->ring[idx].ring_size / 8); mqd->queue_state.cp_hqd_pq_control |= - (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8); + (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8); #ifdef __BIG_ENDIAN mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT; #endif @@ -3375,7 +3375,7 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev) WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); + rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; @@ -5030,7 +5030,7 @@ static int cik_irq_init(struct radeon_device *rdev) WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); - rb_bufsz = drm_order(rdev->ih.ring_size / 4); + rb_bufsz = order_base_2(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 038dcac7670c..b67c9ec7f690 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2881,8 +2881,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev) RREG32(GRBM_SOFT_RESET); /* Set ring buffer size */ - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 56bd4f3be4fe..5b6e47765656 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1560,8 +1560,8 @@ static int cayman_cp_resume(struct radeon_device *rdev) /* Set ring buffer size */ ring = &rdev->ring[ridx[i]]; - rb_cntl = drm_order(ring->ring_size / 8); - rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; + rb_cntl = order_base_2(ring->ring_size / 8); + rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8; #ifdef __BIG_ENDIAN rb_cntl |= BUF_SWAP_32BIT; #endif @@ -1720,7 +1720,7 @@ int cayman_dma_resume(struct radeon_device *rdev) WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); + rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 75349cdaa84b..5625cf706f0c 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1097,7 +1097,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) } /* Align ring size */ - rb_bufsz = drm_order(ring_size / 8); + rb_bufsz = order_base_2(ring_size / 8); ring_size = (1 << (rb_bufsz + 1)) * 4; r100_cp_load_microcode(rdev); r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 393880a09412..319e1ee1844a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2413,8 +2413,8 @@ int r600_cp_resume(struct radeon_device *rdev) WREG32(GRBM_SOFT_RESET, 0); /* Set ring buffer size */ - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -2467,7 +2467,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign int r; /* Align ring size */ - rb_bufsz = drm_order(ring_size / 8); + rb_bufsz = order_base_2(ring_size / 8); ring_size = (1 << (rb_bufsz + 1)) * 4; ring->ring_size = ring_size; ring->align_mask = 16 - 1; @@ -2547,7 +2547,7 @@ int r600_dma_resume(struct radeon_device *rdev) WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); + rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; @@ -2656,7 +2656,7 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) WREG32(UVD_RBC_RB_BASE, ring->gpu_addr); /* Set ring buffer size */ - rb_bufsz = drm_order(ring->ring_size); + rb_bufsz = order_base_2(ring->ring_size); rb_bufsz = (0x1 << 8) | rb_bufsz; WREG32(UVD_RBC_RB_CNTL, rb_bufsz); @@ -3812,7 +3812,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) u32 rb_bufsz; /* Align ring size */ - rb_bufsz = drm_order(ring_size / 4); + rb_bufsz = order_base_2(ring_size / 4); ring_size = (1 << rb_bufsz) * 4; rdev->ih.ring_size = ring_size; rdev->ih.ptr_mask = rdev->ih.ring_size - 1; @@ -4049,7 +4049,7 @@ int r600_irq_init(struct radeon_device *rdev) WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); - rb_bufsz = drm_order(rdev->ih.ring_size / 4); + rb_bufsz = order_base_2(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 1c51c08b1fde..d8eb48bff0ed 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; - dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); + dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; - dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8); + dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8); dev_priv->ring.fetch_size = /* init->fetch_size */ 32; - dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16); + dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index efc4f6441ef4..3cae2bbc1854 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; - dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); + dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; - dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); + dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8); dev_priv->ring.fetch_size = /* init->fetch_size */ 32; - dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); + dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index d325280e2f9f..d71037f4f68f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3383,8 +3383,8 @@ static int si_cp_resume(struct radeon_device *rdev) /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3416,8 +3416,8 @@ static int si_cp_resume(struct radeon_device *rdev) /* ring1 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3442,8 +3442,8 @@ static int si_cp_resume(struct radeon_device *rdev) /* ring2 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -5651,7 +5651,7 @@ static int si_irq_init(struct radeon_device *rdev) WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); - rb_bufsz = drm_order(rdev->ih.ring_size / 4); + rb_bufsz = order_base_2(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | |