drm/radeon: make ib size variable

This avoid to waste ib pool size and avoid a bunch of wait for
previous ib to finish.

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Jerome Glisse 2011-12-21 12:13:46 -05:00 committed by Dave Airlie
parent 1f2e124df9
commit 69e130a6a4
6 changed files with 21 additions and 14 deletions

View file

@ -3708,7 +3708,7 @@ int r100_ib_test(struct radeon_device *rdev)
return r; return r;
} }
WREG32(scratch, 0xCAFEDEAD); WREG32(scratch, 0xCAFEDEAD);
r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib); r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
if (r) { if (r) {
return r; return r;
} }

View file

@ -2711,7 +2711,7 @@ int r600_ib_test(struct radeon_device *rdev, int ring)
return r; return r;
} }
WREG32(scratch, 0xCAFEDEAD); WREG32(scratch, 0xCAFEDEAD);
r = radeon_ib_get(rdev, ring, &ib); r = radeon_ib_get(rdev, ring, &ib, 256);
if (r) { if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r); DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r; return r;

View file

@ -619,16 +619,17 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj); radeon_bo_unref(&rdev->r600_blit.shader_obj);
} }
static int r600_vb_ib_get(struct radeon_device *rdev) static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
{ {
int r; int r;
r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->r600_blit.vb_ib); r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
&rdev->r600_blit.vb_ib, size);
if (r) { if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n"); DRM_ERROR("failed to get IB for vertex buffer\n");
return r; return r;
} }
rdev->r600_blit.vb_total = 64*1024; rdev->r600_blit.vb_total = size;
rdev->r600_blit.vb_used = 0; rdev->r600_blit.vb_used = 0;
return 0; return 0;
} }
@ -693,10 +694,6 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
int num_loops = 0; int num_loops = 0;
int dwords_per_loop = rdev->r600_blit.ring_size_per_loop; int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
r = r600_vb_ib_get(rdev);
if (r)
return r;
/* num loops */ /* num loops */
while (num_gpu_pages) { while (num_gpu_pages) {
num_gpu_pages -= num_gpu_pages -=
@ -705,6 +702,11 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
num_loops++; num_loops++;
} }
/* 48 bytes for vertex per loop */
r = r600_vb_ib_get(rdev, (num_loops*48)+256);
if (r)
return r;
/* calculate number of loops correctly */ /* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop; ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common; ring_size += rdev->r600_blit.ring_size_common;

View file

@ -638,7 +638,8 @@ struct r600_blit {
void r600_blit_suspend(struct radeon_device *rdev); void r600_blit_suspend(struct radeon_device *rdev);
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib); int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib **ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev); int radeon_ib_pool_init(struct radeon_device *rdev);

View file

@ -246,7 +246,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
radeon_mutex_unlock(&rdev->cs_mutex); radeon_mutex_unlock(&rdev->cs_mutex);
return r; return r;
} }
r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &parser.ib); ib_chunk = &parser.chunks[parser.chunk_ib_idx];
r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &parser.ib,
ib_chunk->length_dw * 4);
if (r) { if (r) {
DRM_ERROR("Failed to get ib !\n"); DRM_ERROR("Failed to get ib !\n");
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
@ -264,7 +266,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/* Copy the packet into the IB, the parser will read from the /* Copy the packet into the IB, the parser will read from the
* input memory (cached) and write to the IB (which can be * input memory (cached) and write to the IB (which can be
* uncached). */ * uncached). */
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib->length_dw = ib_chunk->length_dw; parser.ib->length_dw = ib_chunk->length_dw;
r = radeon_cs_parse(&parser); r = radeon_cs_parse(&parser);
if (r || parser.parser_error) { if (r || parser.parser_error) {

View file

@ -93,13 +93,16 @@ static bool radeon_ib_try_free(struct radeon_device *rdev,
return done; return done;
} }
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib) int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib **ib, unsigned size)
{ {
struct radeon_fence *fence; struct radeon_fence *fence;
unsigned cretry = 0; unsigned cretry = 0;
int r = 0, i, idx; int r = 0, i, idx;
*ib = NULL; *ib = NULL;
/* align size on 256 bytes */
size = ALIGN(size, 256);
r = radeon_fence_create(rdev, &fence, ring); r = radeon_fence_create(rdev, &fence, ring);
if (r) { if (r) {
@ -122,7 +125,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
if (rdev->ib_pool.ibs[idx].fence == NULL) { if (rdev->ib_pool.ibs[idx].fence == NULL) {
r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
&rdev->ib_pool.ibs[idx].sa_bo, &rdev->ib_pool.ibs[idx].sa_bo,
64*1024, 64); size, 256);
if (!r) { if (!r) {
*ib = &rdev->ib_pool.ibs[idx]; *ib = &rdev->ib_pool.ibs[idx];
(*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr; (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;