Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/via: Embed via_verifier in via_dri1

Embed the header file in via_drv.h and the code in via_dri1.
All functions are made static as there are no more external users.

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Kevin Brace <kevinbrace@bracecomputerlab.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220713170202.1798216-9-sam@ravnborg.org

+1099 -1175
+1 -1
drivers/gpu/drm/via/Makefile
··· 3 3 # Makefile for the drm device driver. This driver provides support for the 4 4 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 5 5 6 - via-y := via_dri1.o via_verifier.o 6 + via-y := via_dri1.o 7 7 8 8 obj-$(CONFIG_DRM_VIA) +=via.o
+1071
drivers/gpu/drm/via/via_dri1.c
··· 201 201 uint32_t next; 202 202 } drm_via_descriptor_t; 203 203 204 + typedef enum { 205 + state_command, 206 + state_header2, 207 + state_header1, 208 + state_vheader5, 209 + state_vheader6, 210 + state_error 211 + } verifier_state_t; 204 212 213 + typedef enum { 214 + no_check = 0, 215 + check_for_header2, 216 + check_for_header1, 217 + check_for_header2_err, 218 + check_for_header1_err, 219 + check_for_fire, 220 + check_z_buffer_addr0, 221 + check_z_buffer_addr1, 222 + check_z_buffer_addr_mode, 223 + check_destination_addr0, 224 + check_destination_addr1, 225 + check_destination_addr_mode, 226 + check_for_dummy, 227 + check_for_dd, 228 + check_texture_addr0, 229 + check_texture_addr1, 230 + check_texture_addr2, 231 + check_texture_addr3, 232 + check_texture_addr4, 233 + check_texture_addr5, 234 + check_texture_addr6, 235 + check_texture_addr7, 236 + check_texture_addr8, 237 + check_texture_addr_mode, 238 + check_for_vertex_count, 239 + check_number_texunits, 240 + forbidden_command 241 + } hazard_t; 242 + 243 + /* 244 + * Associates each hazard above with a possible multi-command 245 + * sequence. For example an address that is split over multiple 246 + * commands and that needs to be checked at the first command 247 + * that does not include any part of the address. 248 + */ 249 + 250 + static drm_via_sequence_t seqs[] = { 251 + no_sequence, 252 + no_sequence, 253 + no_sequence, 254 + no_sequence, 255 + no_sequence, 256 + no_sequence, 257 + z_address, 258 + z_address, 259 + z_address, 260 + dest_address, 261 + dest_address, 262 + dest_address, 263 + no_sequence, 264 + no_sequence, 265 + tex_address, 266 + tex_address, 267 + tex_address, 268 + tex_address, 269 + tex_address, 270 + tex_address, 271 + tex_address, 272 + tex_address, 273 + tex_address, 274 + tex_address, 275 + no_sequence 276 + }; 277 + 278 + typedef struct { 279 + unsigned int code; 280 + hazard_t hz; 281 + } hz_init_t; 282 + 283 + static hz_init_t init_table1[] = { 284 + {0xf2, check_for_header2_err}, 285 + {0xf0, check_for_header1_err}, 286 + {0xee, check_for_fire}, 287 + {0xcc, check_for_dummy}, 288 + {0xdd, check_for_dd}, 289 + {0x00, no_check}, 290 + {0x10, check_z_buffer_addr0}, 291 + {0x11, check_z_buffer_addr1}, 292 + {0x12, check_z_buffer_addr_mode}, 293 + {0x13, no_check}, 294 + {0x14, no_check}, 295 + {0x15, no_check}, 296 + {0x23, no_check}, 297 + {0x24, no_check}, 298 + {0x33, no_check}, 299 + {0x34, no_check}, 300 + {0x35, no_check}, 301 + {0x36, no_check}, 302 + {0x37, no_check}, 303 + {0x38, no_check}, 304 + {0x39, no_check}, 305 + {0x3A, no_check}, 306 + {0x3B, no_check}, 307 + {0x3C, no_check}, 308 + {0x3D, no_check}, 309 + {0x3E, no_check}, 310 + {0x40, check_destination_addr0}, 311 + {0x41, check_destination_addr1}, 312 + {0x42, check_destination_addr_mode}, 313 + {0x43, no_check}, 314 + {0x44, no_check}, 315 + {0x50, no_check}, 316 + {0x51, no_check}, 317 + {0x52, no_check}, 318 + {0x53, no_check}, 319 + {0x54, no_check}, 320 + {0x55, no_check}, 321 + {0x56, no_check}, 322 + {0x57, no_check}, 323 + {0x58, no_check}, 324 + {0x70, no_check}, 325 + {0x71, no_check}, 326 + {0x78, no_check}, 327 + {0x79, no_check}, 328 + {0x7A, no_check}, 329 + {0x7B, no_check}, 330 + {0x7C, no_check}, 331 + {0x7D, check_for_vertex_count} 332 + }; 333 + 334 + static hz_init_t init_table2[] = { 335 + {0xf2, check_for_header2_err}, 336 + {0xf0, check_for_header1_err}, 337 + {0xee, check_for_fire}, 338 + {0xcc, check_for_dummy}, 339 + {0x00, check_texture_addr0}, 340 + {0x01, check_texture_addr0}, 341 + {0x02, check_texture_addr0}, 342 + {0x03, check_texture_addr0}, 343 + {0x04, check_texture_addr0}, 344 + {0x05, check_texture_addr0}, 345 + {0x06, check_texture_addr0}, 346 + {0x07, check_texture_addr0}, 347 + {0x08, check_texture_addr0}, 348 + {0x09, check_texture_addr0}, 349 + {0x20, check_texture_addr1}, 350 + {0x21, check_texture_addr1}, 351 + {0x22, check_texture_addr1}, 352 + {0x23, check_texture_addr4}, 353 + {0x2B, check_texture_addr3}, 354 + {0x2C, check_texture_addr3}, 355 + {0x2D, check_texture_addr3}, 356 + {0x2E, check_texture_addr3}, 357 + {0x2F, check_texture_addr3}, 358 + {0x30, check_texture_addr3}, 359 + {0x31, check_texture_addr3}, 360 + {0x32, check_texture_addr3}, 361 + {0x33, check_texture_addr3}, 362 + {0x34, check_texture_addr3}, 363 + {0x4B, check_texture_addr5}, 364 + {0x4C, check_texture_addr6}, 365 + {0x51, check_texture_addr7}, 366 + {0x52, check_texture_addr8}, 367 + {0x77, check_texture_addr2}, 368 + {0x78, no_check}, 369 + {0x79, no_check}, 370 + {0x7A, no_check}, 371 + {0x7B, check_texture_addr_mode}, 372 + {0x7C, no_check}, 373 + {0x7D, no_check}, 374 + {0x7E, no_check}, 375 + {0x7F, no_check}, 376 + {0x80, no_check}, 377 + {0x81, no_check}, 378 + {0x82, no_check}, 379 + {0x83, no_check}, 380 + {0x85, no_check}, 381 + {0x86, no_check}, 382 + {0x87, no_check}, 383 + {0x88, no_check}, 384 + {0x89, no_check}, 385 + {0x8A, no_check}, 386 + {0x90, no_check}, 387 + {0x91, no_check}, 388 + {0x92, no_check}, 389 + {0x93, no_check} 390 + }; 391 + 392 + static hz_init_t init_table3[] = { 393 + {0xf2, check_for_header2_err}, 394 + {0xf0, check_for_header1_err}, 395 + {0xcc, check_for_dummy}, 396 + {0x00, check_number_texunits} 397 + }; 398 + 399 + static hazard_t table1[256]; 400 + static hazard_t table2[256]; 401 + static hazard_t table3[256]; 402 + 403 + static __inline__ int 404 + eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words) 405 + { 406 + if ((buf_end - *buf) >= num_words) { 407 + *buf += num_words; 408 + return 0; 409 + } 410 + DRM_ERROR("Illegal termination of DMA command buffer\n"); 411 + return 1; 412 + } 413 + 414 + /* 415 + * Partially stolen from drm_memory.h 416 + */ 417 + 418 + static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, 419 + unsigned long offset, 420 + unsigned long size, 421 + struct drm_device *dev) 422 + { 423 + struct drm_map_list *r_list; 424 + drm_local_map_t *map = seq->map_cache; 425 + 426 + if (map && map->offset <= offset 427 + && (offset + size) <= (map->offset + map->size)) { 428 + return map; 429 + } 430 + 431 + list_for_each_entry(r_list, &dev->maplist, head) { 432 + map = r_list->map; 433 + if (!map) 434 + continue; 435 + if (map->offset <= offset 436 + && (offset + size) <= (map->offset + map->size) 437 + && !(map->flags & _DRM_RESTRICTED) 438 + && (map->type == _DRM_AGP)) { 439 + seq->map_cache = map; 440 + return map; 441 + } 442 + } 443 + return NULL; 444 + } 445 + 446 + /* 447 + * Require that all AGP texture levels reside in the same AGP map which should 448 + * be mappable by the client. This is not a big restriction. 449 + * FIXME: To actually enforce this security policy strictly, drm_rmmap 450 + * would have to wait for dma quiescent before removing an AGP map. 451 + * The via_drm_lookup_agp_map call in reality seems to take 452 + * very little CPU time. 453 + */ 454 + 455 + static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq) 456 + { 457 + switch (cur_seq->unfinished) { 458 + case z_address: 459 + DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr); 460 + break; 461 + case dest_address: 462 + DRM_DEBUG("Destination start address is 0x%x\n", 463 + cur_seq->d_addr); 464 + break; 465 + case tex_address: 466 + if (cur_seq->agp_texture) { 467 + unsigned start = 468 + cur_seq->tex_level_lo[cur_seq->texture]; 469 + unsigned end = cur_seq->tex_level_hi[cur_seq->texture]; 470 + unsigned long lo = ~0, hi = 0, tmp; 471 + uint32_t *addr, *pitch, *height, tex; 472 + unsigned i; 473 + int npot; 474 + 475 + if (end > 9) 476 + end = 9; 477 + if (start > 9) 478 + start = 9; 479 + 480 + addr = 481 + &(cur_seq->t_addr[tex = cur_seq->texture][start]); 482 + pitch = &(cur_seq->pitch[tex][start]); 483 + height = &(cur_seq->height[tex][start]); 484 + npot = cur_seq->tex_npot[tex]; 485 + for (i = start; i <= end; ++i) { 486 + tmp = *addr++; 487 + if (tmp < lo) 488 + lo = tmp; 489 + if (i == 0 && npot) 490 + tmp += (*height++ * *pitch++); 491 + else 492 + tmp += (*height++ << *pitch++); 493 + if (tmp > hi) 494 + hi = tmp; 495 + } 496 + 497 + if (!via_drm_lookup_agp_map 498 + (cur_seq, lo, hi - lo, cur_seq->dev)) { 499 + DRM_ERROR 500 + ("AGP texture is not in allowed map\n"); 501 + return 2; 502 + } 503 + } 504 + break; 505 + default: 506 + break; 507 + } 508 + cur_seq->unfinished = no_sequence; 509 + return 0; 510 + } 511 + 512 + static __inline__ int 513 + investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq) 514 + { 515 + register uint32_t tmp, *tmp_addr; 516 + 517 + if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) { 518 + int ret; 519 + if ((ret = finish_current_sequence(cur_seq))) 520 + return ret; 521 + } 522 + 523 + switch (hz) { 524 + case check_for_header2: 525 + if (cmd == HALCYON_HEADER2) 526 + return 1; 527 + return 0; 528 + case check_for_header1: 529 + if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 530 + return 1; 531 + return 0; 532 + case check_for_header2_err: 533 + if (cmd == HALCYON_HEADER2) 534 + return 1; 535 + DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n"); 536 + break; 537 + case check_for_header1_err: 538 + if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 539 + return 1; 540 + DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n"); 541 + break; 542 + case check_for_fire: 543 + if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD) 544 + return 1; 545 + DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n"); 546 + break; 547 + case check_for_dummy: 548 + if (HC_DUMMY == cmd) 549 + return 0; 550 + DRM_ERROR("Illegal DMA HC_DUMMY command\n"); 551 + break; 552 + case check_for_dd: 553 + if (0xdddddddd == cmd) 554 + return 0; 555 + DRM_ERROR("Illegal DMA 0xdddddddd command\n"); 556 + break; 557 + case check_z_buffer_addr0: 558 + cur_seq->unfinished = z_address; 559 + cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) | 560 + (cmd & 0x00FFFFFF); 561 + return 0; 562 + case check_z_buffer_addr1: 563 + cur_seq->unfinished = z_address; 564 + cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) | 565 + ((cmd & 0xFF) << 24); 566 + return 0; 567 + case check_z_buffer_addr_mode: 568 + cur_seq->unfinished = z_address; 569 + if ((cmd & 0x0000C000) == 0) 570 + return 0; 571 + DRM_ERROR("Attempt to place Z buffer in system memory\n"); 572 + return 2; 573 + case check_destination_addr0: 574 + cur_seq->unfinished = dest_address; 575 + cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) | 576 + (cmd & 0x00FFFFFF); 577 + return 0; 578 + case check_destination_addr1: 579 + cur_seq->unfinished = dest_address; 580 + cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) | 581 + ((cmd & 0xFF) << 24); 582 + return 0; 583 + case check_destination_addr_mode: 584 + cur_seq->unfinished = dest_address; 585 + if ((cmd & 0x0000C000) == 0) 586 + return 0; 587 + DRM_ERROR 588 + ("Attempt to place 3D drawing buffer in system memory\n"); 589 + return 2; 590 + case check_texture_addr0: 591 + cur_seq->unfinished = tex_address; 592 + tmp = (cmd >> 24); 593 + tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp]; 594 + *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF); 595 + return 0; 596 + case check_texture_addr1: 597 + cur_seq->unfinished = tex_address; 598 + tmp = ((cmd >> 24) - 0x20); 599 + tmp += tmp << 1; 600 + tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp]; 601 + *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24); 602 + tmp_addr++; 603 + *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16); 604 + tmp_addr++; 605 + *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8); 606 + return 0; 607 + case check_texture_addr2: 608 + cur_seq->unfinished = tex_address; 609 + cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F; 610 + cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6; 611 + return 0; 612 + case check_texture_addr3: 613 + cur_seq->unfinished = tex_address; 614 + tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit); 615 + if (tmp == 0 && 616 + (cmd & HC_HTXnEnPit_MASK)) { 617 + cur_seq->pitch[cur_seq->texture][tmp] = 618 + (cmd & HC_HTXnLnPit_MASK); 619 + cur_seq->tex_npot[cur_seq->texture] = 1; 620 + } else { 621 + cur_seq->pitch[cur_seq->texture][tmp] = 622 + (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT; 623 + cur_seq->tex_npot[cur_seq->texture] = 0; 624 + if (cmd & 0x000FFFFF) { 625 + DRM_ERROR 626 + ("Unimplemented texture level 0 pitch mode.\n"); 627 + return 2; 628 + } 629 + } 630 + return 0; 631 + case check_texture_addr4: 632 + cur_seq->unfinished = tex_address; 633 + tmp_addr = &cur_seq->t_addr[cur_seq->texture][9]; 634 + *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24); 635 + return 0; 636 + case check_texture_addr5: 637 + case check_texture_addr6: 638 + cur_seq->unfinished = tex_address; 639 + /* 640 + * Texture width. We don't care since we have the pitch. 641 + */ 642 + return 0; 643 + case check_texture_addr7: 644 + cur_seq->unfinished = tex_address; 645 + tmp_addr = &(cur_seq->height[cur_seq->texture][0]); 646 + tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20); 647 + tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16); 648 + tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12); 649 + tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8); 650 + tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4); 651 + tmp_addr[0] = 1 << (cmd & 0x0000000F); 652 + return 0; 653 + case check_texture_addr8: 654 + cur_seq->unfinished = tex_address; 655 + tmp_addr = &(cur_seq->height[cur_seq->texture][0]); 656 + tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12); 657 + tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8); 658 + tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4); 659 + tmp_addr[6] = 1 << (cmd & 0x0000000F); 660 + return 0; 661 + case check_texture_addr_mode: 662 + cur_seq->unfinished = tex_address; 663 + if (2 == (tmp = cmd & 0x00000003)) { 664 + DRM_ERROR 665 + ("Attempt to fetch texture from system memory.\n"); 666 + return 2; 667 + } 668 + cur_seq->agp_texture = (tmp == 3); 669 + cur_seq->tex_palette_size[cur_seq->texture] = 670 + (cmd >> 16) & 0x000000007; 671 + return 0; 672 + case check_for_vertex_count: 673 + cur_seq->vertex_count = cmd & 0x0000FFFF; 674 + return 0; 675 + case check_number_texunits: 676 + cur_seq->multitex = (cmd >> 3) & 1; 677 + return 0; 678 + default: 679 + DRM_ERROR("Illegal DMA data: 0x%x\n", cmd); 680 + return 2; 681 + } 682 + return 2; 683 + } 684 + 685 + static __inline__ int 686 + via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end, 687 + drm_via_state_t *cur_seq) 688 + { 689 + drm_via_private_t *dev_priv = 690 + (drm_via_private_t *) cur_seq->dev->dev_private; 691 + uint32_t a_fire, bcmd, dw_count; 692 + int ret = 0; 693 + int have_fire; 694 + const uint32_t *buf = *buffer; 695 + 696 + while (buf < buf_end) { 697 + have_fire = 0; 698 + if ((buf_end - buf) < 2) { 699 + DRM_ERROR 700 + ("Unexpected termination of primitive list.\n"); 701 + ret = 1; 702 + break; 703 + } 704 + if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB) 705 + break; 706 + bcmd = *buf++; 707 + if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) { 708 + DRM_ERROR("Expected Vertex List A command, got 0x%x\n", 709 + *buf); 710 + ret = 1; 711 + break; 712 + } 713 + a_fire = 714 + *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK | 715 + HC_HE3Fire_MASK; 716 + 717 + /* 718 + * How many dwords per vertex ? 719 + */ 720 + 721 + if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) { 722 + DRM_ERROR("Illegal B command vertex data for AGP.\n"); 723 + ret = 1; 724 + break; 725 + } 726 + 727 + dw_count = 0; 728 + if (bcmd & (1 << 7)) 729 + dw_count += (cur_seq->multitex) ? 2 : 1; 730 + if (bcmd & (1 << 8)) 731 + dw_count += (cur_seq->multitex) ? 2 : 1; 732 + if (bcmd & (1 << 9)) 733 + dw_count++; 734 + if (bcmd & (1 << 10)) 735 + dw_count++; 736 + if (bcmd & (1 << 11)) 737 + dw_count++; 738 + if (bcmd & (1 << 12)) 739 + dw_count++; 740 + if (bcmd & (1 << 13)) 741 + dw_count++; 742 + if (bcmd & (1 << 14)) 743 + dw_count++; 744 + 745 + while (buf < buf_end) { 746 + if (*buf == a_fire) { 747 + if (dev_priv->num_fire_offsets >= 748 + VIA_FIRE_BUF_SIZE) { 749 + DRM_ERROR("Fire offset buffer full.\n"); 750 + ret = 1; 751 + break; 752 + } 753 + dev_priv->fire_offsets[dev_priv-> 754 + num_fire_offsets++] = 755 + buf; 756 + have_fire = 1; 757 + buf++; 758 + if (buf < buf_end && *buf == a_fire) 759 + buf++; 760 + break; 761 + } 762 + if ((*buf == HALCYON_HEADER2) || 763 + ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) { 764 + DRM_ERROR("Missing Vertex Fire command, " 765 + "Stray Vertex Fire command or verifier " 766 + "lost sync.\n"); 767 + ret = 1; 768 + break; 769 + } 770 + if ((ret = eat_words(&buf, buf_end, dw_count))) 771 + break; 772 + } 773 + if (buf >= buf_end && !have_fire) { 774 + DRM_ERROR("Missing Vertex Fire command or verifier " 775 + "lost sync.\n"); 776 + ret = 1; 777 + break; 778 + } 779 + if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) { 780 + DRM_ERROR("AGP Primitive list end misaligned.\n"); 781 + ret = 1; 782 + break; 783 + } 784 + } 785 + *buffer = buf; 786 + return ret; 787 + } 788 + 789 + static __inline__ verifier_state_t 790 + via_check_header2(uint32_t const **buffer, const uint32_t *buf_end, 791 + drm_via_state_t *hc_state) 792 + { 793 + uint32_t cmd; 794 + int hz_mode; 795 + hazard_t hz; 796 + const uint32_t *buf = *buffer; 797 + const hazard_t *hz_table; 798 + 799 + if ((buf_end - buf) < 2) { 800 + DRM_ERROR 801 + ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n"); 802 + return state_error; 803 + } 804 + buf++; 805 + cmd = (*buf++ & 0xFFFF0000) >> 16; 806 + 807 + switch (cmd) { 808 + case HC_ParaType_CmdVdata: 809 + if (via_check_prim_list(&buf, buf_end, hc_state)) 810 + return state_error; 811 + *buffer = buf; 812 + return state_command; 813 + case HC_ParaType_NotTex: 814 + hz_table = table1; 815 + break; 816 + case HC_ParaType_Tex: 817 + hc_state->texture = 0; 818 + hz_table = table2; 819 + break; 820 + case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)): 821 + hc_state->texture = 1; 822 + hz_table = table2; 823 + break; 824 + case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)): 825 + hz_table = table3; 826 + break; 827 + case HC_ParaType_Auto: 828 + if (eat_words(&buf, buf_end, 2)) 829 + return state_error; 830 + *buffer = buf; 831 + return state_command; 832 + case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)): 833 + if (eat_words(&buf, buf_end, 32)) 834 + return state_error; 835 + *buffer = buf; 836 + return state_command; 837 + case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)): 838 + case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)): 839 + DRM_ERROR("Texture palettes are rejected because of " 840 + "lack of info how to determine their size.\n"); 841 + return state_error; 842 + case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)): 843 + DRM_ERROR("Fog factor palettes are rejected because of " 844 + "lack of info how to determine their size.\n"); 845 + return state_error; 846 + default: 847 + 848 + /* 849 + * There are some unimplemented HC_ParaTypes here, that 850 + * need to be implemented if the Mesa driver is extended. 851 + */ 852 + 853 + DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 " 854 + "DMA subcommand: 0x%x. Previous dword: 0x%x\n", 855 + cmd, *(buf - 2)); 856 + *buffer = buf; 857 + return state_error; 858 + } 859 + 860 + while (buf < buf_end) { 861 + cmd = *buf++; 862 + if ((hz = hz_table[cmd >> 24])) { 863 + if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) { 864 + if (hz_mode == 1) { 865 + buf--; 866 + break; 867 + } 868 + return state_error; 869 + } 870 + } else if (hc_state->unfinished && 871 + finish_current_sequence(hc_state)) { 872 + return state_error; 873 + } 874 + } 875 + if (hc_state->unfinished && finish_current_sequence(hc_state)) 876 + return state_error; 877 + *buffer = buf; 878 + return state_command; 879 + } 880 + 881 + static __inline__ verifier_state_t 882 + via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer, 883 + const uint32_t *buf_end, int *fire_count) 884 + { 885 + uint32_t cmd; 886 + const uint32_t *buf = *buffer; 887 + const uint32_t *next_fire; 888 + int burst = 0; 889 + 890 + next_fire = dev_priv->fire_offsets[*fire_count]; 891 + buf++; 892 + cmd = (*buf & 0xFFFF0000) >> 16; 893 + via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++); 894 + switch (cmd) { 895 + case HC_ParaType_CmdVdata: 896 + while ((buf < buf_end) && 897 + (*fire_count < dev_priv->num_fire_offsets) && 898 + (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) { 899 + while (buf <= next_fire) { 900 + via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE + 901 + (burst & 63), *buf++); 902 + burst += 4; 903 + } 904 + if ((buf < buf_end) 905 + && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) 906 + buf++; 907 + 908 + if (++(*fire_count) < dev_priv->num_fire_offsets) 909 + next_fire = dev_priv->fire_offsets[*fire_count]; 910 + } 911 + break; 912 + default: 913 + while (buf < buf_end) { 914 + 915 + if (*buf == HC_HEADER2 || 916 + (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 || 917 + (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 || 918 + (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) 919 + break; 920 + 921 + via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE + 922 + (burst & 63), *buf++); 923 + burst += 4; 924 + } 925 + } 926 + *buffer = buf; 927 + return state_command; 928 + } 929 + 930 + static __inline__ int verify_mmio_address(uint32_t address) 931 + { 932 + if ((address > 0x3FF) && (address < 0xC00)) { 933 + DRM_ERROR("Invalid VIDEO DMA command. " 934 + "Attempt to access 3D- or command burst area.\n"); 935 + return 1; 936 + } else if ((address > 0xCFF) && (address < 0x1300)) { 937 + DRM_ERROR("Invalid VIDEO DMA command. " 938 + "Attempt to access PCI DMA area.\n"); 939 + return 1; 940 + } else if (address > 0x13FF) { 941 + DRM_ERROR("Invalid VIDEO DMA command. " 942 + "Attempt to access VGA registers.\n"); 943 + return 1; 944 + } 945 + return 0; 946 + } 947 + 948 + static __inline__ int 949 + verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end, 950 + uint32_t dwords) 951 + { 952 + const uint32_t *buf = *buffer; 953 + 954 + if (buf_end - buf < dwords) { 955 + DRM_ERROR("Illegal termination of video command.\n"); 956 + return 1; 957 + } 958 + while (dwords--) { 959 + if (*buf++) { 960 + DRM_ERROR("Illegal video command tail.\n"); 961 + return 1; 962 + } 963 + } 964 + *buffer = buf; 965 + return 0; 966 + } 967 + 968 + static __inline__ verifier_state_t 969 + via_check_header1(uint32_t const **buffer, const uint32_t * buf_end) 970 + { 971 + uint32_t cmd; 972 + const uint32_t *buf = *buffer; 973 + verifier_state_t ret = state_command; 974 + 975 + while (buf < buf_end) { 976 + cmd = *buf; 977 + if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) && 978 + (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) { 979 + if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) 980 + break; 981 + DRM_ERROR("Invalid HALCYON_HEADER1 command. " 982 + "Attempt to access 3D- or command burst area.\n"); 983 + ret = state_error; 984 + break; 985 + } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) { 986 + if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) 987 + break; 988 + DRM_ERROR("Invalid HALCYON_HEADER1 command. " 989 + "Attempt to access VGA registers.\n"); 990 + ret = state_error; 991 + break; 992 + } else { 993 + buf += 2; 994 + } 995 + } 996 + *buffer = buf; 997 + return ret; 998 + } 999 + 1000 + static __inline__ verifier_state_t 1001 + via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer, 1002 + const uint32_t *buf_end) 1003 + { 1004 + register uint32_t cmd; 1005 + const uint32_t *buf = *buffer; 1006 + 1007 + while (buf < buf_end) { 1008 + cmd = *buf; 1009 + if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) 1010 + break; 1011 + via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf); 1012 + buf++; 1013 + } 1014 + *buffer = buf; 1015 + return state_command; 1016 + } 1017 + 1018 + static __inline__ verifier_state_t 1019 + via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end) 1020 + { 1021 + uint32_t data; 1022 + const uint32_t *buf = *buffer; 1023 + 1024 + if (buf_end - buf < 4) { 1025 + DRM_ERROR("Illegal termination of video header5 command\n"); 1026 + return state_error; 1027 + } 1028 + 1029 + data = *buf++ & ~VIA_VIDEOMASK; 1030 + if (verify_mmio_address(data)) 1031 + return state_error; 1032 + 1033 + data = *buf++; 1034 + if (*buf++ != 0x00F50000) { 1035 + DRM_ERROR("Illegal header5 header data\n"); 1036 + return state_error; 1037 + } 1038 + if (*buf++ != 0x00000000) { 1039 + DRM_ERROR("Illegal header5 header data\n"); 1040 + return state_error; 1041 + } 1042 + if (eat_words(&buf, buf_end, data)) 1043 + return state_error; 1044 + if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3))) 1045 + return state_error; 1046 + *buffer = buf; 1047 + return state_command; 1048 + 1049 + } 1050 + 1051 + static __inline__ verifier_state_t 1052 + via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer, 1053 + const uint32_t *buf_end) 1054 + { 1055 + uint32_t addr, count, i; 1056 + const uint32_t *buf = *buffer; 1057 + 1058 + addr = *buf++ & ~VIA_VIDEOMASK; 1059 + i = count = *buf; 1060 + buf += 3; 1061 + while (i--) 1062 + via_write(dev_priv, addr, *buf++); 1063 + if (count & 3) 1064 + buf += 4 - (count & 3); 1065 + *buffer = buf; 1066 + return state_command; 1067 + } 1068 + 1069 + static __inline__ verifier_state_t 1070 + via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end) 1071 + { 1072 + uint32_t data; 1073 + const uint32_t *buf = *buffer; 1074 + uint32_t i; 1075 + 1076 + if (buf_end - buf < 4) { 1077 + DRM_ERROR("Illegal termination of video header6 command\n"); 1078 + return state_error; 1079 + } 1080 + buf++; 1081 + data = *buf++; 1082 + if (*buf++ != 0x00F60000) { 1083 + DRM_ERROR("Illegal header6 header data\n"); 1084 + return state_error; 1085 + } 1086 + if (*buf++ != 0x00000000) { 1087 + DRM_ERROR("Illegal header6 header data\n"); 1088 + return state_error; 1089 + } 1090 + if ((buf_end - buf) < (data << 1)) { 1091 + DRM_ERROR("Illegal termination of video header6 command\n"); 1092 + return state_error; 1093 + } 1094 + for (i = 0; i < data; ++i) { 1095 + if (verify_mmio_address(*buf++)) 1096 + return state_error; 1097 + buf++; 1098 + } 1099 + data <<= 1; 1100 + if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3))) 1101 + return state_error; 1102 + *buffer = buf; 1103 + return state_command; 1104 + } 1105 + 1106 + static __inline__ verifier_state_t 1107 + via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer, 1108 + const uint32_t *buf_end) 1109 + { 1110 + 1111 + uint32_t addr, count, i; 1112 + const uint32_t *buf = *buffer; 1113 + 1114 + i = count = *++buf; 1115 + buf += 3; 1116 + while (i--) { 1117 + addr = *buf++; 1118 + via_write(dev_priv, addr, *buf++); 1119 + } 1120 + count <<= 1; 1121 + if (count & 3) 1122 + buf += 4 - (count & 3); 1123 + *buffer = buf; 1124 + return state_command; 1125 + } 1126 + 1127 + static int 1128 + via_verify_command_stream(const uint32_t * buf, unsigned int size, 1129 + struct drm_device * dev, int agp) 1130 + { 1131 + 1132 + drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 1133 + drm_via_state_t *hc_state = &dev_priv->hc_state; 1134 + drm_via_state_t saved_state = *hc_state; 1135 + uint32_t cmd; 1136 + const uint32_t *buf_end = buf + (size >> 2); 1137 + verifier_state_t state = state_command; 1138 + int cme_video; 1139 + int supported_3d; 1140 + 1141 + cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A || 1142 + dev_priv->chipset == VIA_DX9_0); 1143 + 1144 + supported_3d = dev_priv->chipset != VIA_DX9_0; 1145 + 1146 + hc_state->dev = dev; 1147 + hc_state->unfinished = no_sequence; 1148 + hc_state->map_cache = NULL; 1149 + hc_state->agp = agp; 1150 + hc_state->buf_start = buf; 1151 + dev_priv->num_fire_offsets = 0; 1152 + 1153 + while (buf < buf_end) { 1154 + 1155 + switch (state) { 1156 + case state_header2: 1157 + state = via_check_header2(&buf, buf_end, hc_state); 1158 + break; 1159 + case state_header1: 1160 + state = via_check_header1(&buf, buf_end); 1161 + break; 1162 + case state_vheader5: 1163 + state = via_check_vheader5(&buf, buf_end); 1164 + break; 1165 + case state_vheader6: 1166 + state = via_check_vheader6(&buf, buf_end); 1167 + break; 1168 + case state_command: 1169 + cmd = *buf; 1170 + if ((cmd == HALCYON_HEADER2) && supported_3d) 1171 + state = state_header2; 1172 + else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 1173 + state = state_header1; 1174 + else if (cme_video 1175 + && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) 1176 + state = state_vheader5; 1177 + else if (cme_video 1178 + && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) 1179 + state = state_vheader6; 1180 + else if ((cmd == HALCYON_HEADER2) && !supported_3d) { 1181 + DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n"); 1182 + state = state_error; 1183 + } else { 1184 + DRM_ERROR 1185 + ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", 1186 + cmd); 1187 + state = state_error; 1188 + } 1189 + break; 1190 + case state_error: 1191 + default: 1192 + *hc_state = saved_state; 1193 + return -EINVAL; 1194 + } 1195 + } 1196 + if (state == state_error) { 1197 + *hc_state = saved_state; 1198 + return -EINVAL; 1199 + } 1200 + return 0; 1201 + } 1202 + 1203 + static int 1204 + via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, 1205 + unsigned int size) 1206 + { 1207 + 1208 + drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 1209 + uint32_t cmd; 1210 + const uint32_t *buf_end = buf + (size >> 2); 1211 + verifier_state_t state = state_command; 1212 + int fire_count = 0; 1213 + 1214 + while (buf < buf_end) { 1215 + 1216 + switch (state) { 1217 + case state_header2: 1218 + state = 1219 + via_parse_header2(dev_priv, &buf, buf_end, 1220 + &fire_count); 1221 + break; 1222 + case state_header1: 1223 + state = via_parse_header1(dev_priv, &buf, buf_end); 1224 + break; 1225 + case state_vheader5: 1226 + state = via_parse_vheader5(dev_priv, &buf, buf_end); 1227 + break; 1228 + case state_vheader6: 1229 + state = via_parse_vheader6(dev_priv, &buf, buf_end); 1230 + break; 1231 + case state_command: 1232 + cmd = *buf; 1233 + if (cmd == HALCYON_HEADER2) 1234 + state = state_header2; 1235 + else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 1236 + state = state_header1; 1237 + else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) 1238 + state = state_vheader5; 1239 + else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) 1240 + state = state_vheader6; 1241 + else { 1242 + DRM_ERROR 1243 + ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", 1244 + cmd); 1245 + state = state_error; 1246 + } 1247 + break; 1248 + case state_error: 1249 + default: 1250 + return -EINVAL; 1251 + } 1252 + } 1253 + if (state == state_error) 1254 + return -EINVAL; 1255 + return 0; 1256 + } 1257 + 1258 + static void 1259 + setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size) 1260 + { 1261 + int i; 1262 + 1263 + for (i = 0; i < 256; ++i) 1264 + table[i] = forbidden_command; 1265 + 1266 + for (i = 0; i < size; ++i) 1267 + table[init_table[i].code] = init_table[i].hz; 1268 + } 1269 + 1270 + static void via_init_command_verifier(void) 1271 + { 1272 + setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1)); 1273 + setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2)); 1274 + setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3)); 1275 + } 205 1276 /* 206 1277 * Unmap a DMA mapping. 207 1278 */
+27 -2
drivers/gpu/drm/via/via_drv.h
··· 46 46 #define DRIVER_MINOR 11 47 47 #define DRIVER_PATCHLEVEL 1 48 48 49 - #include "via_verifier.h" 49 + typedef enum { 50 + no_sequence = 0, 51 + z_address, 52 + dest_address, 53 + tex_address 54 + } drm_via_sequence_t; 55 + 56 + typedef struct { 57 + unsigned texture; 58 + uint32_t z_addr; 59 + uint32_t d_addr; 60 + uint32_t t_addr[2][10]; 61 + uint32_t pitch[2][10]; 62 + uint32_t height[2][10]; 63 + uint32_t tex_level_lo[2]; 64 + uint32_t tex_level_hi[2]; 65 + uint32_t tex_palette_size[2]; 66 + uint32_t tex_npot[2]; 67 + drm_via_sequence_t unfinished; 68 + int agp_texture; 69 + int multitex; 70 + struct drm_device *dev; 71 + drm_local_map_t *map_cache; 72 + uint32_t vertex_count; 73 + int agp; 74 + const uint32_t *buf_start; 75 + } drm_via_state_t; 50 76 51 77 #define VIA_PCI_BUF_SIZE 60000 52 78 #define VIA_FIRE_BUF_SIZE 1024 ··· 260 234 extern int via_do_cleanup_map(struct drm_device *dev); 261 235 262 236 extern int via_dma_cleanup(struct drm_device *dev); 263 - extern void via_init_command_verifier(void); 264 237 extern int via_driver_dma_quiescent(struct drm_device *dev); 265 238 266 239 #endif
-1110
drivers/gpu/drm/via/via_verifier.c
··· 1 - /* 2 - * Copyright 2004 The Unichrome Project. All Rights Reserved. 3 - * Copyright 2005 Thomas Hellstrom. All Rights Reserved. 4 - * 5 - * Permission is hereby granted, free of charge, to any person obtaining a 6 - * copy of this software and associated documentation files (the "Software"), 7 - * to deal in the Software without restriction, including without limitation 8 - * the rights to use, copy, modify, merge, publish, distribute, sub license, 9 - * and/or sell copies of the Software, and to permit persons to whom the 10 - * Software is furnished to do so, subject to the following conditions: 11 - * 12 - * The above copyright notice and this permission notice (including the 13 - * next paragraph) shall be included in all copies or substantial portions 14 - * of the Software. 15 - * 16 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 - * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 - * DEALINGS IN THE SOFTWARE. 23 - * 24 - * Author: Thomas Hellstrom 2004, 2005. 25 - * This code was written using docs obtained under NDA from VIA Inc. 26 - * 27 - * Don't run this code directly on an AGP buffer. Due to cache problems it will 28 - * be very slow. 29 - */ 30 - 31 - #include <drm/drm_device.h> 32 - #include <drm/drm_legacy.h> 33 - #include <drm/via_drm.h> 34 - 35 - #include "via_3d_reg.h" 36 - #include "via_drv.h" 37 - #include "via_verifier.h" 38 - 39 - typedef enum { 40 - state_command, 41 - state_header2, 42 - state_header1, 43 - state_vheader5, 44 - state_vheader6, 45 - state_error 46 - } verifier_state_t; 47 - 48 - typedef enum { 49 - no_check = 0, 50 - check_for_header2, 51 - check_for_header1, 52 - check_for_header2_err, 53 - check_for_header1_err, 54 - check_for_fire, 55 - check_z_buffer_addr0, 56 - check_z_buffer_addr1, 57 - check_z_buffer_addr_mode, 58 - check_destination_addr0, 59 - check_destination_addr1, 60 - check_destination_addr_mode, 61 - check_for_dummy, 62 - check_for_dd, 63 - check_texture_addr0, 64 - check_texture_addr1, 65 - check_texture_addr2, 66 - check_texture_addr3, 67 - check_texture_addr4, 68 - check_texture_addr5, 69 - check_texture_addr6, 70 - check_texture_addr7, 71 - check_texture_addr8, 72 - check_texture_addr_mode, 73 - check_for_vertex_count, 74 - check_number_texunits, 75 - forbidden_command 76 - } hazard_t; 77 - 78 - /* 79 - * Associates each hazard above with a possible multi-command 80 - * sequence. For example an address that is split over multiple 81 - * commands and that needs to be checked at the first command 82 - * that does not include any part of the address. 83 - */ 84 - 85 - static drm_via_sequence_t seqs[] = { 86 - no_sequence, 87 - no_sequence, 88 - no_sequence, 89 - no_sequence, 90 - no_sequence, 91 - no_sequence, 92 - z_address, 93 - z_address, 94 - z_address, 95 - dest_address, 96 - dest_address, 97 - dest_address, 98 - no_sequence, 99 - no_sequence, 100 - tex_address, 101 - tex_address, 102 - tex_address, 103 - tex_address, 104 - tex_address, 105 - tex_address, 106 - tex_address, 107 - tex_address, 108 - tex_address, 109 - tex_address, 110 - no_sequence 111 - }; 112 - 113 - typedef struct { 114 - unsigned int code; 115 - hazard_t hz; 116 - } hz_init_t; 117 - 118 - static hz_init_t init_table1[] = { 119 - {0xf2, check_for_header2_err}, 120 - {0xf0, check_for_header1_err}, 121 - {0xee, check_for_fire}, 122 - {0xcc, check_for_dummy}, 123 - {0xdd, check_for_dd}, 124 - {0x00, no_check}, 125 - {0x10, check_z_buffer_addr0}, 126 - {0x11, check_z_buffer_addr1}, 127 - {0x12, check_z_buffer_addr_mode}, 128 - {0x13, no_check}, 129 - {0x14, no_check}, 130 - {0x15, no_check}, 131 - {0x23, no_check}, 132 - {0x24, no_check}, 133 - {0x33, no_check}, 134 - {0x34, no_check}, 135 - {0x35, no_check}, 136 - {0x36, no_check}, 137 - {0x37, no_check}, 138 - {0x38, no_check}, 139 - {0x39, no_check}, 140 - {0x3A, no_check}, 141 - {0x3B, no_check}, 142 - {0x3C, no_check}, 143 - {0x3D, no_check}, 144 - {0x3E, no_check}, 145 - {0x40, check_destination_addr0}, 146 - {0x41, check_destination_addr1}, 147 - {0x42, check_destination_addr_mode}, 148 - {0x43, no_check}, 149 - {0x44, no_check}, 150 - {0x50, no_check}, 151 - {0x51, no_check}, 152 - {0x52, no_check}, 153 - {0x53, no_check}, 154 - {0x54, no_check}, 155 - {0x55, no_check}, 156 - {0x56, no_check}, 157 - {0x57, no_check}, 158 - {0x58, no_check}, 159 - {0x70, no_check}, 160 - {0x71, no_check}, 161 - {0x78, no_check}, 162 - {0x79, no_check}, 163 - {0x7A, no_check}, 164 - {0x7B, no_check}, 165 - {0x7C, no_check}, 166 - {0x7D, check_for_vertex_count} 167 - }; 168 - 169 - static hz_init_t init_table2[] = { 170 - {0xf2, check_for_header2_err}, 171 - {0xf0, check_for_header1_err}, 172 - {0xee, check_for_fire}, 173 - {0xcc, check_for_dummy}, 174 - {0x00, check_texture_addr0}, 175 - {0x01, check_texture_addr0}, 176 - {0x02, check_texture_addr0}, 177 - {0x03, check_texture_addr0}, 178 - {0x04, check_texture_addr0}, 179 - {0x05, check_texture_addr0}, 180 - {0x06, check_texture_addr0}, 181 - {0x07, check_texture_addr0}, 182 - {0x08, check_texture_addr0}, 183 - {0x09, check_texture_addr0}, 184 - {0x20, check_texture_addr1}, 185 - {0x21, check_texture_addr1}, 186 - {0x22, check_texture_addr1}, 187 - {0x23, check_texture_addr4}, 188 - {0x2B, check_texture_addr3}, 189 - {0x2C, check_texture_addr3}, 190 - {0x2D, check_texture_addr3}, 191 - {0x2E, check_texture_addr3}, 192 - {0x2F, check_texture_addr3}, 193 - {0x30, check_texture_addr3}, 194 - {0x31, check_texture_addr3}, 195 - {0x32, check_texture_addr3}, 196 - {0x33, check_texture_addr3}, 197 - {0x34, check_texture_addr3}, 198 - {0x4B, check_texture_addr5}, 199 - {0x4C, check_texture_addr6}, 200 - {0x51, check_texture_addr7}, 201 - {0x52, check_texture_addr8}, 202 - {0x77, check_texture_addr2}, 203 - {0x78, no_check}, 204 - {0x79, no_check}, 205 - {0x7A, no_check}, 206 - {0x7B, check_texture_addr_mode}, 207 - {0x7C, no_check}, 208 - {0x7D, no_check}, 209 - {0x7E, no_check}, 210 - {0x7F, no_check}, 211 - {0x80, no_check}, 212 - {0x81, no_check}, 213 - {0x82, no_check}, 214 - {0x83, no_check}, 215 - {0x85, no_check}, 216 - {0x86, no_check}, 217 - {0x87, no_check}, 218 - {0x88, no_check}, 219 - {0x89, no_check}, 220 - {0x8A, no_check}, 221 - {0x90, no_check}, 222 - {0x91, no_check}, 223 - {0x92, no_check}, 224 - {0x93, no_check} 225 - }; 226 - 227 - static hz_init_t init_table3[] = { 228 - {0xf2, check_for_header2_err}, 229 - {0xf0, check_for_header1_err}, 230 - {0xcc, check_for_dummy}, 231 - {0x00, check_number_texunits} 232 - }; 233 - 234 - static hazard_t table1[256]; 235 - static hazard_t table2[256]; 236 - static hazard_t table3[256]; 237 - 238 - static __inline__ int 239 - eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words) 240 - { 241 - if ((buf_end - *buf) >= num_words) { 242 - *buf += num_words; 243 - return 0; 244 - } 245 - DRM_ERROR("Illegal termination of DMA command buffer\n"); 246 - return 1; 247 - } 248 - 249 - /* 250 - * Partially stolen from drm_memory.h 251 - */ 252 - 253 - static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, 254 - unsigned long offset, 255 - unsigned long size, 256 - struct drm_device *dev) 257 - { 258 - struct drm_map_list *r_list; 259 - drm_local_map_t *map = seq->map_cache; 260 - 261 - if (map && map->offset <= offset 262 - && (offset + size) <= (map->offset + map->size)) { 263 - return map; 264 - } 265 - 266 - list_for_each_entry(r_list, &dev->maplist, head) { 267 - map = r_list->map; 268 - if (!map) 269 - continue; 270 - if (map->offset <= offset 271 - && (offset + size) <= (map->offset + map->size) 272 - && !(map->flags & _DRM_RESTRICTED) 273 - && (map->type == _DRM_AGP)) { 274 - seq->map_cache = map; 275 - return map; 276 - } 277 - } 278 - return NULL; 279 - } 280 - 281 - /* 282 - * Require that all AGP texture levels reside in the same AGP map which should 283 - * be mappable by the client. This is not a big restriction. 284 - * FIXME: To actually enforce this security policy strictly, drm_rmmap 285 - * would have to wait for dma quiescent before removing an AGP map. 286 - * The via_drm_lookup_agp_map call in reality seems to take 287 - * very little CPU time. 288 - */ 289 - 290 - static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq) 291 - { 292 - switch (cur_seq->unfinished) { 293 - case z_address: 294 - DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr); 295 - break; 296 - case dest_address: 297 - DRM_DEBUG("Destination start address is 0x%x\n", 298 - cur_seq->d_addr); 299 - break; 300 - case tex_address: 301 - if (cur_seq->agp_texture) { 302 - unsigned start = 303 - cur_seq->tex_level_lo[cur_seq->texture]; 304 - unsigned end = cur_seq->tex_level_hi[cur_seq->texture]; 305 - unsigned long lo = ~0, hi = 0, tmp; 306 - uint32_t *addr, *pitch, *height, tex; 307 - unsigned i; 308 - int npot; 309 - 310 - if (end > 9) 311 - end = 9; 312 - if (start > 9) 313 - start = 9; 314 - 315 - addr = 316 - &(cur_seq->t_addr[tex = cur_seq->texture][start]); 317 - pitch = &(cur_seq->pitch[tex][start]); 318 - height = &(cur_seq->height[tex][start]); 319 - npot = cur_seq->tex_npot[tex]; 320 - for (i = start; i <= end; ++i) { 321 - tmp = *addr++; 322 - if (tmp < lo) 323 - lo = tmp; 324 - if (i == 0 && npot) 325 - tmp += (*height++ * *pitch++); 326 - else 327 - tmp += (*height++ << *pitch++); 328 - if (tmp > hi) 329 - hi = tmp; 330 - } 331 - 332 - if (!via_drm_lookup_agp_map 333 - (cur_seq, lo, hi - lo, cur_seq->dev)) { 334 - DRM_ERROR 335 - ("AGP texture is not in allowed map\n"); 336 - return 2; 337 - } 338 - } 339 - break; 340 - default: 341 - break; 342 - } 343 - cur_seq->unfinished = no_sequence; 344 - return 0; 345 - } 346 - 347 - static __inline__ int 348 - investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq) 349 - { 350 - register uint32_t tmp, *tmp_addr; 351 - 352 - if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) { 353 - int ret; 354 - if ((ret = finish_current_sequence(cur_seq))) 355 - return ret; 356 - } 357 - 358 - switch (hz) { 359 - case check_for_header2: 360 - if (cmd == HALCYON_HEADER2) 361 - return 1; 362 - return 0; 363 - case check_for_header1: 364 - if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 365 - return 1; 366 - return 0; 367 - case check_for_header2_err: 368 - if (cmd == HALCYON_HEADER2) 369 - return 1; 370 - DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n"); 371 - break; 372 - case check_for_header1_err: 373 - if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 374 - return 1; 375 - DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n"); 376 - break; 377 - case check_for_fire: 378 - if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD) 379 - return 1; 380 - DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n"); 381 - break; 382 - case check_for_dummy: 383 - if (HC_DUMMY == cmd) 384 - return 0; 385 - DRM_ERROR("Illegal DMA HC_DUMMY command\n"); 386 - break; 387 - case check_for_dd: 388 - if (0xdddddddd == cmd) 389 - return 0; 390 - DRM_ERROR("Illegal DMA 0xdddddddd command\n"); 391 - break; 392 - case check_z_buffer_addr0: 393 - cur_seq->unfinished = z_address; 394 - cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) | 395 - (cmd & 0x00FFFFFF); 396 - return 0; 397 - case check_z_buffer_addr1: 398 - cur_seq->unfinished = z_address; 399 - cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) | 400 - ((cmd & 0xFF) << 24); 401 - return 0; 402 - case check_z_buffer_addr_mode: 403 - cur_seq->unfinished = z_address; 404 - if ((cmd & 0x0000C000) == 0) 405 - return 0; 406 - DRM_ERROR("Attempt to place Z buffer in system memory\n"); 407 - return 2; 408 - case check_destination_addr0: 409 - cur_seq->unfinished = dest_address; 410 - cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) | 411 - (cmd & 0x00FFFFFF); 412 - return 0; 413 - case check_destination_addr1: 414 - cur_seq->unfinished = dest_address; 415 - cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) | 416 - ((cmd & 0xFF) << 24); 417 - return 0; 418 - case check_destination_addr_mode: 419 - cur_seq->unfinished = dest_address; 420 - if ((cmd & 0x0000C000) == 0) 421 - return 0; 422 - DRM_ERROR 423 - ("Attempt to place 3D drawing buffer in system memory\n"); 424 - return 2; 425 - case check_texture_addr0: 426 - cur_seq->unfinished = tex_address; 427 - tmp = (cmd >> 24); 428 - tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp]; 429 - *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF); 430 - return 0; 431 - case check_texture_addr1: 432 - cur_seq->unfinished = tex_address; 433 - tmp = ((cmd >> 24) - 0x20); 434 - tmp += tmp << 1; 435 - tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp]; 436 - *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24); 437 - tmp_addr++; 438 - *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16); 439 - tmp_addr++; 440 - *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8); 441 - return 0; 442 - case check_texture_addr2: 443 - cur_seq->unfinished = tex_address; 444 - cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F; 445 - cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6; 446 - return 0; 447 - case check_texture_addr3: 448 - cur_seq->unfinished = tex_address; 449 - tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit); 450 - if (tmp == 0 && 451 - (cmd & HC_HTXnEnPit_MASK)) { 452 - cur_seq->pitch[cur_seq->texture][tmp] = 453 - (cmd & HC_HTXnLnPit_MASK); 454 - cur_seq->tex_npot[cur_seq->texture] = 1; 455 - } else { 456 - cur_seq->pitch[cur_seq->texture][tmp] = 457 - (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT; 458 - cur_seq->tex_npot[cur_seq->texture] = 0; 459 - if (cmd & 0x000FFFFF) { 460 - DRM_ERROR 461 - ("Unimplemented texture level 0 pitch mode.\n"); 462 - return 2; 463 - } 464 - } 465 - return 0; 466 - case check_texture_addr4: 467 - cur_seq->unfinished = tex_address; 468 - tmp_addr = &cur_seq->t_addr[cur_seq->texture][9]; 469 - *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24); 470 - return 0; 471 - case check_texture_addr5: 472 - case check_texture_addr6: 473 - cur_seq->unfinished = tex_address; 474 - /* 475 - * Texture width. We don't care since we have the pitch. 476 - */ 477 - return 0; 478 - case check_texture_addr7: 479 - cur_seq->unfinished = tex_address; 480 - tmp_addr = &(cur_seq->height[cur_seq->texture][0]); 481 - tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20); 482 - tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16); 483 - tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12); 484 - tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8); 485 - tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4); 486 - tmp_addr[0] = 1 << (cmd & 0x0000000F); 487 - return 0; 488 - case check_texture_addr8: 489 - cur_seq->unfinished = tex_address; 490 - tmp_addr = &(cur_seq->height[cur_seq->texture][0]); 491 - tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12); 492 - tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8); 493 - tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4); 494 - tmp_addr[6] = 1 << (cmd & 0x0000000F); 495 - return 0; 496 - case check_texture_addr_mode: 497 - cur_seq->unfinished = tex_address; 498 - if (2 == (tmp = cmd & 0x00000003)) { 499 - DRM_ERROR 500 - ("Attempt to fetch texture from system memory.\n"); 501 - return 2; 502 - } 503 - cur_seq->agp_texture = (tmp == 3); 504 - cur_seq->tex_palette_size[cur_seq->texture] = 505 - (cmd >> 16) & 0x000000007; 506 - return 0; 507 - case check_for_vertex_count: 508 - cur_seq->vertex_count = cmd & 0x0000FFFF; 509 - return 0; 510 - case check_number_texunits: 511 - cur_seq->multitex = (cmd >> 3) & 1; 512 - return 0; 513 - default: 514 - DRM_ERROR("Illegal DMA data: 0x%x\n", cmd); 515 - return 2; 516 - } 517 - return 2; 518 - } 519 - 520 - static __inline__ int 521 - via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end, 522 - drm_via_state_t *cur_seq) 523 - { 524 - drm_via_private_t *dev_priv = 525 - (drm_via_private_t *) cur_seq->dev->dev_private; 526 - uint32_t a_fire, bcmd, dw_count; 527 - int ret = 0; 528 - int have_fire; 529 - const uint32_t *buf = *buffer; 530 - 531 - while (buf < buf_end) { 532 - have_fire = 0; 533 - if ((buf_end - buf) < 2) { 534 - DRM_ERROR 535 - ("Unexpected termination of primitive list.\n"); 536 - ret = 1; 537 - break; 538 - } 539 - if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB) 540 - break; 541 - bcmd = *buf++; 542 - if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) { 543 - DRM_ERROR("Expected Vertex List A command, got 0x%x\n", 544 - *buf); 545 - ret = 1; 546 - break; 547 - } 548 - a_fire = 549 - *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK | 550 - HC_HE3Fire_MASK; 551 - 552 - /* 553 - * How many dwords per vertex ? 554 - */ 555 - 556 - if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) { 557 - DRM_ERROR("Illegal B command vertex data for AGP.\n"); 558 - ret = 1; 559 - break; 560 - } 561 - 562 - dw_count = 0; 563 - if (bcmd & (1 << 7)) 564 - dw_count += (cur_seq->multitex) ? 2 : 1; 565 - if (bcmd & (1 << 8)) 566 - dw_count += (cur_seq->multitex) ? 2 : 1; 567 - if (bcmd & (1 << 9)) 568 - dw_count++; 569 - if (bcmd & (1 << 10)) 570 - dw_count++; 571 - if (bcmd & (1 << 11)) 572 - dw_count++; 573 - if (bcmd & (1 << 12)) 574 - dw_count++; 575 - if (bcmd & (1 << 13)) 576 - dw_count++; 577 - if (bcmd & (1 << 14)) 578 - dw_count++; 579 - 580 - while (buf < buf_end) { 581 - if (*buf == a_fire) { 582 - if (dev_priv->num_fire_offsets >= 583 - VIA_FIRE_BUF_SIZE) { 584 - DRM_ERROR("Fire offset buffer full.\n"); 585 - ret = 1; 586 - break; 587 - } 588 - dev_priv->fire_offsets[dev_priv-> 589 - num_fire_offsets++] = 590 - buf; 591 - have_fire = 1; 592 - buf++; 593 - if (buf < buf_end && *buf == a_fire) 594 - buf++; 595 - break; 596 - } 597 - if ((*buf == HALCYON_HEADER2) || 598 - ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) { 599 - DRM_ERROR("Missing Vertex Fire command, " 600 - "Stray Vertex Fire command or verifier " 601 - "lost sync.\n"); 602 - ret = 1; 603 - break; 604 - } 605 - if ((ret = eat_words(&buf, buf_end, dw_count))) 606 - break; 607 - } 608 - if (buf >= buf_end && !have_fire) { 609 - DRM_ERROR("Missing Vertex Fire command or verifier " 610 - "lost sync.\n"); 611 - ret = 1; 612 - break; 613 - } 614 - if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) { 615 - DRM_ERROR("AGP Primitive list end misaligned.\n"); 616 - ret = 1; 617 - break; 618 - } 619 - } 620 - *buffer = buf; 621 - return ret; 622 - } 623 - 624 - static __inline__ verifier_state_t 625 - via_check_header2(uint32_t const **buffer, const uint32_t *buf_end, 626 - drm_via_state_t *hc_state) 627 - { 628 - uint32_t cmd; 629 - int hz_mode; 630 - hazard_t hz; 631 - const uint32_t *buf = *buffer; 632 - const hazard_t *hz_table; 633 - 634 - if ((buf_end - buf) < 2) { 635 - DRM_ERROR 636 - ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n"); 637 - return state_error; 638 - } 639 - buf++; 640 - cmd = (*buf++ & 0xFFFF0000) >> 16; 641 - 642 - switch (cmd) { 643 - case HC_ParaType_CmdVdata: 644 - if (via_check_prim_list(&buf, buf_end, hc_state)) 645 - return state_error; 646 - *buffer = buf; 647 - return state_command; 648 - case HC_ParaType_NotTex: 649 - hz_table = table1; 650 - break; 651 - case HC_ParaType_Tex: 652 - hc_state->texture = 0; 653 - hz_table = table2; 654 - break; 655 - case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)): 656 - hc_state->texture = 1; 657 - hz_table = table2; 658 - break; 659 - case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)): 660 - hz_table = table3; 661 - break; 662 - case HC_ParaType_Auto: 663 - if (eat_words(&buf, buf_end, 2)) 664 - return state_error; 665 - *buffer = buf; 666 - return state_command; 667 - case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)): 668 - if (eat_words(&buf, buf_end, 32)) 669 - return state_error; 670 - *buffer = buf; 671 - return state_command; 672 - case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)): 673 - case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)): 674 - DRM_ERROR("Texture palettes are rejected because of " 675 - "lack of info how to determine their size.\n"); 676 - return state_error; 677 - case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)): 678 - DRM_ERROR("Fog factor palettes are rejected because of " 679 - "lack of info how to determine their size.\n"); 680 - return state_error; 681 - default: 682 - 683 - /* 684 - * There are some unimplemented HC_ParaTypes here, that 685 - * need to be implemented if the Mesa driver is extended. 686 - */ 687 - 688 - DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 " 689 - "DMA subcommand: 0x%x. Previous dword: 0x%x\n", 690 - cmd, *(buf - 2)); 691 - *buffer = buf; 692 - return state_error; 693 - } 694 - 695 - while (buf < buf_end) { 696 - cmd = *buf++; 697 - if ((hz = hz_table[cmd >> 24])) { 698 - if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) { 699 - if (hz_mode == 1) { 700 - buf--; 701 - break; 702 - } 703 - return state_error; 704 - } 705 - } else if (hc_state->unfinished && 706 - finish_current_sequence(hc_state)) { 707 - return state_error; 708 - } 709 - } 710 - if (hc_state->unfinished && finish_current_sequence(hc_state)) 711 - return state_error; 712 - *buffer = buf; 713 - return state_command; 714 - } 715 - 716 - static __inline__ verifier_state_t 717 - via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer, 718 - const uint32_t *buf_end, int *fire_count) 719 - { 720 - uint32_t cmd; 721 - const uint32_t *buf = *buffer; 722 - const uint32_t *next_fire; 723 - int burst = 0; 724 - 725 - next_fire = dev_priv->fire_offsets[*fire_count]; 726 - buf++; 727 - cmd = (*buf & 0xFFFF0000) >> 16; 728 - via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++); 729 - switch (cmd) { 730 - case HC_ParaType_CmdVdata: 731 - while ((buf < buf_end) && 732 - (*fire_count < dev_priv->num_fire_offsets) && 733 - (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) { 734 - while (buf <= next_fire) { 735 - via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE + 736 - (burst & 63), *buf++); 737 - burst += 4; 738 - } 739 - if ((buf < buf_end) 740 - && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) 741 - buf++; 742 - 743 - if (++(*fire_count) < dev_priv->num_fire_offsets) 744 - next_fire = dev_priv->fire_offsets[*fire_count]; 745 - } 746 - break; 747 - default: 748 - while (buf < buf_end) { 749 - 750 - if (*buf == HC_HEADER2 || 751 - (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 || 752 - (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 || 753 - (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) 754 - break; 755 - 756 - via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE + 757 - (burst & 63), *buf++); 758 - burst += 4; 759 - } 760 - } 761 - *buffer = buf; 762 - return state_command; 763 - } 764 - 765 - static __inline__ int verify_mmio_address(uint32_t address) 766 - { 767 - if ((address > 0x3FF) && (address < 0xC00)) { 768 - DRM_ERROR("Invalid VIDEO DMA command. " 769 - "Attempt to access 3D- or command burst area.\n"); 770 - return 1; 771 - } else if ((address > 0xCFF) && (address < 0x1300)) { 772 - DRM_ERROR("Invalid VIDEO DMA command. " 773 - "Attempt to access PCI DMA area.\n"); 774 - return 1; 775 - } else if (address > 0x13FF) { 776 - DRM_ERROR("Invalid VIDEO DMA command. " 777 - "Attempt to access VGA registers.\n"); 778 - return 1; 779 - } 780 - return 0; 781 - } 782 - 783 - static __inline__ int 784 - verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end, 785 - uint32_t dwords) 786 - { 787 - const uint32_t *buf = *buffer; 788 - 789 - if (buf_end - buf < dwords) { 790 - DRM_ERROR("Illegal termination of video command.\n"); 791 - return 1; 792 - } 793 - while (dwords--) { 794 - if (*buf++) { 795 - DRM_ERROR("Illegal video command tail.\n"); 796 - return 1; 797 - } 798 - } 799 - *buffer = buf; 800 - return 0; 801 - } 802 - 803 - static __inline__ verifier_state_t 804 - via_check_header1(uint32_t const **buffer, const uint32_t * buf_end) 805 - { 806 - uint32_t cmd; 807 - const uint32_t *buf = *buffer; 808 - verifier_state_t ret = state_command; 809 - 810 - while (buf < buf_end) { 811 - cmd = *buf; 812 - if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) && 813 - (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) { 814 - if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) 815 - break; 816 - DRM_ERROR("Invalid HALCYON_HEADER1 command. " 817 - "Attempt to access 3D- or command burst area.\n"); 818 - ret = state_error; 819 - break; 820 - } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) { 821 - if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) 822 - break; 823 - DRM_ERROR("Invalid HALCYON_HEADER1 command. " 824 - "Attempt to access VGA registers.\n"); 825 - ret = state_error; 826 - break; 827 - } else { 828 - buf += 2; 829 - } 830 - } 831 - *buffer = buf; 832 - return ret; 833 - } 834 - 835 - static __inline__ verifier_state_t 836 - via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer, 837 - const uint32_t *buf_end) 838 - { 839 - register uint32_t cmd; 840 - const uint32_t *buf = *buffer; 841 - 842 - while (buf < buf_end) { 843 - cmd = *buf; 844 - if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) 845 - break; 846 - via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf); 847 - buf++; 848 - } 849 - *buffer = buf; 850 - return state_command; 851 - } 852 - 853 - static __inline__ verifier_state_t 854 - via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end) 855 - { 856 - uint32_t data; 857 - const uint32_t *buf = *buffer; 858 - 859 - if (buf_end - buf < 4) { 860 - DRM_ERROR("Illegal termination of video header5 command\n"); 861 - return state_error; 862 - } 863 - 864 - data = *buf++ & ~VIA_VIDEOMASK; 865 - if (verify_mmio_address(data)) 866 - return state_error; 867 - 868 - data = *buf++; 869 - if (*buf++ != 0x00F50000) { 870 - DRM_ERROR("Illegal header5 header data\n"); 871 - return state_error; 872 - } 873 - if (*buf++ != 0x00000000) { 874 - DRM_ERROR("Illegal header5 header data\n"); 875 - return state_error; 876 - } 877 - if (eat_words(&buf, buf_end, data)) 878 - return state_error; 879 - if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3))) 880 - return state_error; 881 - *buffer = buf; 882 - return state_command; 883 - 884 - } 885 - 886 - static __inline__ verifier_state_t 887 - via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer, 888 - const uint32_t *buf_end) 889 - { 890 - uint32_t addr, count, i; 891 - const uint32_t *buf = *buffer; 892 - 893 - addr = *buf++ & ~VIA_VIDEOMASK; 894 - i = count = *buf; 895 - buf += 3; 896 - while (i--) 897 - via_write(dev_priv, addr, *buf++); 898 - if (count & 3) 899 - buf += 4 - (count & 3); 900 - *buffer = buf; 901 - return state_command; 902 - } 903 - 904 - static __inline__ verifier_state_t 905 - via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end) 906 - { 907 - uint32_t data; 908 - const uint32_t *buf = *buffer; 909 - uint32_t i; 910 - 911 - if (buf_end - buf < 4) { 912 - DRM_ERROR("Illegal termination of video header6 command\n"); 913 - return state_error; 914 - } 915 - buf++; 916 - data = *buf++; 917 - if (*buf++ != 0x00F60000) { 918 - DRM_ERROR("Illegal header6 header data\n"); 919 - return state_error; 920 - } 921 - if (*buf++ != 0x00000000) { 922 - DRM_ERROR("Illegal header6 header data\n"); 923 - return state_error; 924 - } 925 - if ((buf_end - buf) < (data << 1)) { 926 - DRM_ERROR("Illegal termination of video header6 command\n"); 927 - return state_error; 928 - } 929 - for (i = 0; i < data; ++i) { 930 - if (verify_mmio_address(*buf++)) 931 - return state_error; 932 - buf++; 933 - } 934 - data <<= 1; 935 - if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3))) 936 - return state_error; 937 - *buffer = buf; 938 - return state_command; 939 - } 940 - 941 - static __inline__ verifier_state_t 942 - via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer, 943 - const uint32_t *buf_end) 944 - { 945 - 946 - uint32_t addr, count, i; 947 - const uint32_t *buf = *buffer; 948 - 949 - i = count = *++buf; 950 - buf += 3; 951 - while (i--) { 952 - addr = *buf++; 953 - via_write(dev_priv, addr, *buf++); 954 - } 955 - count <<= 1; 956 - if (count & 3) 957 - buf += 4 - (count & 3); 958 - *buffer = buf; 959 - return state_command; 960 - } 961 - 962 - int 963 - via_verify_command_stream(const uint32_t * buf, unsigned int size, 964 - struct drm_device * dev, int agp) 965 - { 966 - 967 - drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 968 - drm_via_state_t *hc_state = &dev_priv->hc_state; 969 - drm_via_state_t saved_state = *hc_state; 970 - uint32_t cmd; 971 - const uint32_t *buf_end = buf + (size >> 2); 972 - verifier_state_t state = state_command; 973 - int cme_video; 974 - int supported_3d; 975 - 976 - cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A || 977 - dev_priv->chipset == VIA_DX9_0); 978 - 979 - supported_3d = dev_priv->chipset != VIA_DX9_0; 980 - 981 - hc_state->dev = dev; 982 - hc_state->unfinished = no_sequence; 983 - hc_state->map_cache = NULL; 984 - hc_state->agp = agp; 985 - hc_state->buf_start = buf; 986 - dev_priv->num_fire_offsets = 0; 987 - 988 - while (buf < buf_end) { 989 - 990 - switch (state) { 991 - case state_header2: 992 - state = via_check_header2(&buf, buf_end, hc_state); 993 - break; 994 - case state_header1: 995 - state = via_check_header1(&buf, buf_end); 996 - break; 997 - case state_vheader5: 998 - state = via_check_vheader5(&buf, buf_end); 999 - break; 1000 - case state_vheader6: 1001 - state = via_check_vheader6(&buf, buf_end); 1002 - break; 1003 - case state_command: 1004 - cmd = *buf; 1005 - if ((cmd == HALCYON_HEADER2) && supported_3d) 1006 - state = state_header2; 1007 - else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 1008 - state = state_header1; 1009 - else if (cme_video 1010 - && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) 1011 - state = state_vheader5; 1012 - else if (cme_video 1013 - && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) 1014 - state = state_vheader6; 1015 - else if ((cmd == HALCYON_HEADER2) && !supported_3d) { 1016 - DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n"); 1017 - state = state_error; 1018 - } else { 1019 - DRM_ERROR 1020 - ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", 1021 - cmd); 1022 - state = state_error; 1023 - } 1024 - break; 1025 - case state_error: 1026 - default: 1027 - *hc_state = saved_state; 1028 - return -EINVAL; 1029 - } 1030 - } 1031 - if (state == state_error) { 1032 - *hc_state = saved_state; 1033 - return -EINVAL; 1034 - } 1035 - return 0; 1036 - } 1037 - 1038 - int 1039 - via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, 1040 - unsigned int size) 1041 - { 1042 - 1043 - drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 1044 - uint32_t cmd; 1045 - const uint32_t *buf_end = buf + (size >> 2); 1046 - verifier_state_t state = state_command; 1047 - int fire_count = 0; 1048 - 1049 - while (buf < buf_end) { 1050 - 1051 - switch (state) { 1052 - case state_header2: 1053 - state = 1054 - via_parse_header2(dev_priv, &buf, buf_end, 1055 - &fire_count); 1056 - break; 1057 - case state_header1: 1058 - state = via_parse_header1(dev_priv, &buf, buf_end); 1059 - break; 1060 - case state_vheader5: 1061 - state = via_parse_vheader5(dev_priv, &buf, buf_end); 1062 - break; 1063 - case state_vheader6: 1064 - state = via_parse_vheader6(dev_priv, &buf, buf_end); 1065 - break; 1066 - case state_command: 1067 - cmd = *buf; 1068 - if (cmd == HALCYON_HEADER2) 1069 - state = state_header2; 1070 - else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 1071 - state = state_header1; 1072 - else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) 1073 - state = state_vheader5; 1074 - else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) 1075 - state = state_vheader6; 1076 - else { 1077 - DRM_ERROR 1078 - ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", 1079 - cmd); 1080 - state = state_error; 1081 - } 1082 - break; 1083 - case state_error: 1084 - default: 1085 - return -EINVAL; 1086 - } 1087 - } 1088 - if (state == state_error) 1089 - return -EINVAL; 1090 - return 0; 1091 - } 1092 - 1093 - static void 1094 - setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size) 1095 - { 1096 - int i; 1097 - 1098 - for (i = 0; i < 256; ++i) 1099 - table[i] = forbidden_command; 1100 - 1101 - for (i = 0; i < size; ++i) 1102 - table[init_table[i].code] = init_table[i].hz; 1103 - } 1104 - 1105 - void via_init_command_verifier(void) 1106 - { 1107 - setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1)); 1108 - setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2)); 1109 - setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3)); 1110 - }
-62
drivers/gpu/drm/via/via_verifier.h
··· 1 - /* 2 - * Copyright 2004 The Unichrome Project. All Rights Reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the 12 - * next paragraph) shall be included in all copies or substantial portions 13 - * of the Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 - * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 - * DEALINGS IN THE SOFTWARE. 22 - * 23 - * Author: Thomas Hellström 2004. 24 - */ 25 - 26 - #ifndef _VIA_VERIFIER_H_ 27 - #define _VIA_VERIFIER_H_ 28 - 29 - typedef enum { 30 - no_sequence = 0, 31 - z_address, 32 - dest_address, 33 - tex_address 34 - } drm_via_sequence_t; 35 - 36 - typedef struct { 37 - unsigned texture; 38 - uint32_t z_addr; 39 - uint32_t d_addr; 40 - uint32_t t_addr[2][10]; 41 - uint32_t pitch[2][10]; 42 - uint32_t height[2][10]; 43 - uint32_t tex_level_lo[2]; 44 - uint32_t tex_level_hi[2]; 45 - uint32_t tex_palette_size[2]; 46 - uint32_t tex_npot[2]; 47 - drm_via_sequence_t unfinished; 48 - int agp_texture; 49 - int multitex; 50 - struct drm_device *dev; 51 - drm_local_map_t *map_cache; 52 - uint32_t vertex_count; 53 - int agp; 54 - const uint32_t *buf_start; 55 - } drm_via_state_t; 56 - 57 - extern int via_verify_command_stream(const uint32_t *buf, unsigned int size, 58 - struct drm_device *dev, int agp); 59 - extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, 60 - unsigned int size); 61 - 62 - #endif