Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: use pointers instead of indexes for CS chunks

Nobody is interested at which index the chunk is. What's needed is
a pointer to the chunk. Remove unused chunk_id field as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
6d2d13dd 466be338

+53 -55
+3 -3
drivers/gpu/drm/radeon/evergreen_cs.c
··· 2661 2661 p->track = NULL; 2662 2662 return r; 2663 2663 } 2664 - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2664 + } while (p->idx < p->chunk_ib->length_dw); 2665 2665 #if 0 2666 2666 for (r = 0; r < p->ib.length_dw; r++) { 2667 2667 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); ··· 2684 2684 **/ 2685 2685 int evergreen_dma_cs_parse(struct radeon_cs_parser *p) 2686 2686 { 2687 - struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2687 + struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 2688 2688 struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc; 2689 2689 u32 header, cmd, count, sub_cmd; 2690 2690 volatile u32 *ib = p->ib.ptr; ··· 3100 3100 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 3101 3101 return -EINVAL; 3102 3102 } 3103 - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 3103 + } while (p->idx < p->chunk_ib->length_dw); 3104 3104 #if 0 3105 3105 for (r = 0; r < p->ib->length_dw; r++) { 3106 3106 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+1 -1
drivers/gpu/drm/radeon/r100.c
··· 2061 2061 } 2062 2062 if (r) 2063 2063 return r; 2064 - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2064 + } while (p->idx < p->chunk_ib->length_dw); 2065 2065 return 0; 2066 2066 } 2067 2067
+1 -1
drivers/gpu/drm/radeon/r300.c
··· 1283 1283 if (r) { 1284 1284 return r; 1285 1285 } 1286 - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1286 + } while (p->idx < p->chunk_ib->length_dw); 1287 1287 return 0; 1288 1288 } 1289 1289
+7 -7
drivers/gpu/drm/radeon/r600_cs.c
··· 2316 2316 p->track = NULL; 2317 2317 return r; 2318 2318 } 2319 - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2319 + } while (p->idx < p->chunk_ib->length_dw); 2320 2320 #if 0 2321 2321 for (r = 0; r < p->ib.length_dw; r++) { 2322 2322 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); ··· 2351 2351 2352 2352 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) 2353 2353 { 2354 - if (p->chunk_relocs_idx == -1) { 2354 + if (p->chunk_relocs == NULL) { 2355 2355 return 0; 2356 2356 } 2357 2357 p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL); ··· 2398 2398 /* Copy the packet into the IB, the parser will read from the 2399 2399 * input memory (cached) and write to the IB (which can be 2400 2400 * uncached). */ 2401 - ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2401 + ib_chunk = parser.chunk_ib; 2402 2402 parser.ib.length_dw = ib_chunk->length_dw; 2403 2403 *l = parser.ib.length_dw; 2404 2404 if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) { ··· 2441 2441 unsigned idx; 2442 2442 2443 2443 *cs_reloc = NULL; 2444 - if (p->chunk_relocs_idx == -1) { 2444 + if (p->chunk_relocs == NULL) { 2445 2445 DRM_ERROR("No relocation chunk !\n"); 2446 2446 return -EINVAL; 2447 2447 } 2448 - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2448 + relocs_chunk = p->chunk_relocs; 2449 2449 idx = p->dma_reloc_idx; 2450 2450 if (idx >= p->nrelocs) { 2451 2451 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", ··· 2472 2472 **/ 2473 2473 int r600_dma_cs_parse(struct radeon_cs_parser *p) 2474 2474 { 2475 - struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2475 + struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 2476 2476 struct radeon_bo_list *src_reloc, *dst_reloc; 2477 2477 u32 header, cmd, count, tiled; 2478 2478 volatile u32 *ib = p->ib.ptr; ··· 2619 2619 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 2620 2620 return -EINVAL; 2621 2621 } 2622 - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2622 + } while (p->idx < p->chunk_ib->length_dw); 2623 2623 #if 0 2624 2624 for (r = 0; r < p->ib->length_dw; r++) { 2625 2625 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+5 -6
drivers/gpu/drm/radeon/radeon.h
··· 1057 1057 * CS. 1058 1058 */ 1059 1059 struct radeon_cs_chunk { 1060 - uint32_t chunk_id; 1061 1060 uint32_t length_dw; 1062 1061 uint32_t *kdata; 1063 1062 void __user *user_ptr; ··· 1079 1080 struct list_head validated; 1080 1081 unsigned dma_reloc_idx; 1081 1082 /* indices of various chunks */ 1082 - int chunk_ib_idx; 1083 - int chunk_relocs_idx; 1084 - int chunk_flags_idx; 1085 - int chunk_const_ib_idx; 1083 + struct radeon_cs_chunk *chunk_ib; 1084 + struct radeon_cs_chunk *chunk_relocs; 1085 + struct radeon_cs_chunk *chunk_flags; 1086 + struct radeon_cs_chunk *chunk_const_ib; 1086 1087 struct radeon_ib ib; 1087 1088 struct radeon_ib const_ib; 1088 1089 void *track; ··· 1096 1097 1097 1098 static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 1098 1099 { 1099 - struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 1100 + struct radeon_cs_chunk *ibc = p->chunk_ib; 1100 1101 1101 1102 if (ibc->kdata) 1102 1103 return ibc->kdata[idx];
+28 -29
drivers/gpu/drm/radeon/radeon_cs.c
··· 81 81 bool need_mmap_lock = false; 82 82 int r; 83 83 84 - if (p->chunk_relocs_idx == -1) { 84 + if (p->chunk_relocs == NULL) { 85 85 return 0; 86 86 } 87 - chunk = &p->chunks[p->chunk_relocs_idx]; 87 + chunk = p->chunk_relocs; 88 88 p->dma_reloc_idx = 0; 89 89 /* FIXME: we assume that each relocs use 4 dwords */ 90 90 p->nrelocs = chunk->length_dw / 4; ··· 265 265 p->idx = 0; 266 266 p->ib.sa_bo = NULL; 267 267 p->const_ib.sa_bo = NULL; 268 - p->chunk_ib_idx = -1; 269 - p->chunk_relocs_idx = -1; 270 - p->chunk_flags_idx = -1; 271 - p->chunk_const_ib_idx = -1; 268 + p->chunk_ib = NULL; 269 + p->chunk_relocs = NULL; 270 + p->chunk_flags = NULL; 271 + p->chunk_const_ib = NULL; 272 272 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 273 273 if (p->chunks_array == NULL) { 274 274 return -ENOMEM; ··· 295 295 return -EFAULT; 296 296 } 297 297 p->chunks[i].length_dw = user_chunk.length_dw; 298 - p->chunks[i].chunk_id = user_chunk.chunk_id; 299 - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 300 - p->chunk_relocs_idx = i; 298 + if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) { 299 + p->chunk_relocs = &p->chunks[i]; 301 300 } 302 - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 303 - p->chunk_ib_idx = i; 301 + if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { 302 + p->chunk_ib = &p->chunks[i]; 304 303 /* zero length IB isn't useful */ 305 304 if (p->chunks[i].length_dw == 0) 306 305 return -EINVAL; 307 306 } 308 - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { 309 - p->chunk_const_ib_idx = i; 307 + if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) { 308 + p->chunk_const_ib = &p->chunks[i]; 310 309 /* zero length CONST IB isn't useful */ 311 310 if (p->chunks[i].length_dw == 0) 312 311 return -EINVAL; 313 312 } 314 - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 315 - p->chunk_flags_idx = i; 313 + if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { 314 + p->chunk_flags = &p->chunks[i]; 316 315 /* zero length flags aren't useful */ 317 316 if (p->chunks[i].length_dw == 0) 318 317 return -EINVAL; ··· 320 321 size = p->chunks[i].length_dw; 321 322 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 322 323 p->chunks[i].user_ptr = cdata; 323 - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) 324 + if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) 324 325 continue; 325 326 326 - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 327 + if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { 327 328 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) 328 329 continue; 329 330 } ··· 336 337 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 337 338 return -EFAULT; 338 339 } 339 - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 340 + if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { 340 341 p->cs_flags = p->chunks[i].kdata[0]; 341 342 if (p->chunks[i].length_dw > 1) 342 343 ring = p->chunks[i].kdata[1]; ··· 442 443 { 443 444 int r; 444 445 445 - if (parser->chunk_ib_idx == -1) 446 + if (parser->chunk_ib == NULL) 446 447 return 0; 447 448 448 449 if (parser->cs_flags & RADEON_CS_USE_VM) ··· 526 527 struct radeon_vm *vm = &fpriv->vm; 527 528 int r; 528 529 529 - if (parser->chunk_ib_idx == -1) 530 + if (parser->chunk_ib == NULL) 530 531 return 0; 531 532 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 532 533 return 0; ··· 560 561 } 561 562 562 563 if ((rdev->family >= CHIP_TAHITI) && 563 - (parser->chunk_const_ib_idx != -1)) { 564 + (parser->chunk_const_ib != NULL)) { 564 565 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); 565 566 } else { 566 567 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); ··· 587 588 struct radeon_vm *vm = NULL; 588 589 int r; 589 590 590 - if (parser->chunk_ib_idx == -1) 591 + if (parser->chunk_ib == NULL) 591 592 return 0; 592 593 593 594 if (parser->cs_flags & RADEON_CS_USE_VM) { ··· 595 596 vm = &fpriv->vm; 596 597 597 598 if ((rdev->family >= CHIP_TAHITI) && 598 - (parser->chunk_const_ib_idx != -1)) { 599 - ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; 599 + (parser->chunk_const_ib != NULL)) { 600 + ib_chunk = parser->chunk_const_ib; 600 601 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 601 602 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); 602 603 return -EINVAL; ··· 615 616 return -EFAULT; 616 617 } 617 618 618 - ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 619 + ib_chunk = parser->chunk_ib; 619 620 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 620 621 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); 621 622 return -EINVAL; 622 623 } 623 624 } 624 - ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 625 + ib_chunk = parser->chunk_ib; 625 626 626 627 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 627 628 vm, ib_chunk->length_dw * 4); ··· 713 714 struct radeon_cs_packet *pkt, 714 715 unsigned idx) 715 716 { 716 - struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 717 + struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 717 718 struct radeon_device *rdev = p->rdev; 718 719 uint32_t header; 719 720 ··· 815 816 unsigned idx; 816 817 int r; 817 818 818 - if (p->chunk_relocs_idx == -1) { 819 + if (p->chunk_relocs == NULL) { 819 820 DRM_ERROR("No relocation chunk !\n"); 820 821 return -EINVAL; 821 822 } 822 823 *cs_reloc = NULL; 823 - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 824 + relocs_chunk = p->chunk_relocs; 824 825 r = radeon_cs_packet_parse(p, &p3reloc, p->idx); 825 826 if (r) 826 827 return r;
+1 -1
drivers/gpu/drm/radeon/radeon_trace.h
··· 38 38 39 39 TP_fast_assign( 40 40 __entry->ring = p->ring; 41 - __entry->dw = p->chunks[p->chunk_ib_idx].length_dw; 41 + __entry->dw = p->chunk_ib->length_dw; 42 42 __entry->fences = radeon_fence_count_emitted( 43 43 p->rdev, p->ring); 44 44 ),
+5 -5
drivers/gpu/drm/radeon/radeon_uvd.c
··· 493 493 uint64_t start, end; 494 494 int r; 495 495 496 - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 496 + relocs_chunk = p->chunk_relocs; 497 497 offset = radeon_get_ib_value(p, data0); 498 498 idx = radeon_get_ib_value(p, data1); 499 499 if (idx >= relocs_chunk->length_dw) { ··· 610 610 [0x00000003] = 2048, 611 611 }; 612 612 613 - if (p->chunks[p->chunk_ib_idx].length_dw % 16) { 613 + if (p->chunk_ib->length_dw % 16) { 614 614 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", 615 - p->chunks[p->chunk_ib_idx].length_dw); 615 + p->chunk_ib->length_dw); 616 616 return -EINVAL; 617 617 } 618 618 619 - if (p->chunk_relocs_idx == -1) { 619 + if (p->chunk_relocs == NULL) { 620 620 DRM_ERROR("No relocation chunk !\n"); 621 621 return -EINVAL; 622 622 } ··· 640 640 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 641 641 return -EINVAL; 642 642 } 643 - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 643 + } while (p->idx < p->chunk_ib->length_dw); 644 644 645 645 if (!has_msg_cmd) { 646 646 DRM_ERROR("UVD-IBs need a msg command!\n");
+2 -2
drivers/gpu/drm/radeon/radeon_vce.c
··· 457 457 uint64_t start, end, offset; 458 458 unsigned idx; 459 459 460 - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 460 + relocs_chunk = p->chunk_relocs; 461 461 offset = radeon_get_ib_value(p, lo); 462 462 idx = radeon_get_ib_value(p, hi); 463 463 ··· 534 534 uint32_t *size = &tmp; 535 535 int i, r; 536 536 537 - while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { 537 + while (p->idx < p->chunk_ib->length_dw) { 538 538 uint32_t len = radeon_get_ib_value(p, p->idx); 539 539 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); 540 540