Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.20-rc2 1590 lines 41 kB view raw
1/* i830_dma.c -- DMA support for the I830 -*- linux-c -*- 2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com 3 * 4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 * DEALINGS IN THE SOFTWARE. 26 * 27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com> 28 * Jeff Hartmann <jhartmann@valinux.com> 29 * Keith Whitwell <keith@tungstengraphics.com> 30 * Abraham vd Merwe <abraham@2d3d.co.za> 31 * 32 */ 33 34#include "drmP.h" 35#include "drm.h" 36#include "i830_drm.h" 37#include "i830_drv.h" 38#include <linux/interrupt.h> /* For task queue support */ 39#include <linux/pagemap.h> /* For FASTCALL on unlock_page() */ 40#include <linux/delay.h> 41#include <asm/uaccess.h> 42 43#define I830_BUF_FREE 2 44#define I830_BUF_CLIENT 1 45#define I830_BUF_HARDWARE 0 46 47#define I830_BUF_UNMAPPED 0 48#define I830_BUF_MAPPED 1 49 50static drm_buf_t *i830_freelist_get(drm_device_t * dev) 51{ 52 drm_device_dma_t *dma = dev->dma; 53 int i; 54 int used; 55 56 /* Linear search might not be the best solution */ 57 58 for (i = 0; i < dma->buf_count; i++) { 59 drm_buf_t *buf = dma->buflist[i]; 60 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 61 /* In use is already a pointer */ 62 used = cmpxchg(buf_priv->in_use, I830_BUF_FREE, 63 I830_BUF_CLIENT); 64 if (used == I830_BUF_FREE) { 65 return buf; 66 } 67 } 68 return NULL; 69} 70 71/* This should only be called if the buffer is not sent to the hardware 72 * yet, the hardware updates in use for us once its on the ring buffer. 73 */ 74 75static int i830_freelist_put(drm_device_t * dev, drm_buf_t * buf) 76{ 77 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 78 int used; 79 80 /* In use is already a pointer */ 81 used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE); 82 if (used != I830_BUF_CLIENT) { 83 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); 84 return -EINVAL; 85 } 86 87 return 0; 88} 89 90static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) 91{ 92 drm_file_t *priv = filp->private_data; 93 drm_device_t *dev; 94 drm_i830_private_t *dev_priv; 95 drm_buf_t *buf; 96 drm_i830_buf_priv_t *buf_priv; 97 98 lock_kernel(); 99 dev = priv->head->dev; 100 dev_priv = dev->dev_private; 101 buf = dev_priv->mmap_buffer; 102 buf_priv = buf->dev_private; 103 104 vma->vm_flags |= (VM_IO | VM_DONTCOPY); 105 vma->vm_file = filp; 106 107 buf_priv->currently_mapped = I830_BUF_MAPPED; 108 unlock_kernel(); 109 110 if (io_remap_pfn_range(vma, vma->vm_start, 111 vma->vm_pgoff, 112 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 113 return -EAGAIN; 114 return 0; 115} 116 117static struct file_operations i830_buffer_fops = { 118 .open = drm_open, 119 .release = drm_release, 120 .ioctl = drm_ioctl, 121 .mmap = i830_mmap_buffers, 122 .fasync = drm_fasync, 123}; 124 125static int i830_map_buffer(drm_buf_t * buf, struct file *filp) 126{ 127 drm_file_t *priv = filp->private_data; 128 drm_device_t *dev = priv->head->dev; 129 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 130 drm_i830_private_t *dev_priv = dev->dev_private; 131 const struct file_operations *old_fops; 132 unsigned long virtual; 133 int retcode = 0; 134 135 if (buf_priv->currently_mapped == I830_BUF_MAPPED) 136 return -EINVAL; 137 138 down_write(&current->mm->mmap_sem); 139 old_fops = filp->f_op; 140 filp->f_op = &i830_buffer_fops; 141 dev_priv->mmap_buffer = buf; 142 virtual = do_mmap(filp, 0, buf->total, PROT_READ | PROT_WRITE, 143 MAP_SHARED, buf->bus_address); 144 dev_priv->mmap_buffer = NULL; 145 filp->f_op = old_fops; 146 if (IS_ERR((void *)virtual)) { /* ugh */ 147 /* Real error */ 148 DRM_ERROR("mmap error\n"); 149 retcode = PTR_ERR((void *)virtual); 150 buf_priv->virtual = NULL; 151 } else { 152 buf_priv->virtual = (void __user *)virtual; 153 } 154 up_write(&current->mm->mmap_sem); 155 156 return retcode; 157} 158 159static int i830_unmap_buffer(drm_buf_t * buf) 160{ 161 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 162 int retcode = 0; 163 164 if (buf_priv->currently_mapped != I830_BUF_MAPPED) 165 return -EINVAL; 166 167 down_write(&current->mm->mmap_sem); 168 retcode = do_munmap(current->mm, 169 (unsigned long)buf_priv->virtual, 170 (size_t) buf->total); 171 up_write(&current->mm->mmap_sem); 172 173 buf_priv->currently_mapped = I830_BUF_UNMAPPED; 174 buf_priv->virtual = NULL; 175 176 return retcode; 177} 178 179static int i830_dma_get_buffer(drm_device_t * dev, drm_i830_dma_t * d, 180 struct file *filp) 181{ 182 drm_buf_t *buf; 183 drm_i830_buf_priv_t *buf_priv; 184 int retcode = 0; 185 186 buf = i830_freelist_get(dev); 187 if (!buf) { 188 retcode = -ENOMEM; 189 DRM_DEBUG("retcode=%d\n", retcode); 190 return retcode; 191 } 192 193 retcode = i830_map_buffer(buf, filp); 194 if (retcode) { 195 i830_freelist_put(dev, buf); 196 DRM_ERROR("mapbuf failed, retcode %d\n", retcode); 197 return retcode; 198 } 199 buf->filp = filp; 200 buf_priv = buf->dev_private; 201 d->granted = 1; 202 d->request_idx = buf->idx; 203 d->request_size = buf->total; 204 d->virtual = buf_priv->virtual; 205 206 return retcode; 207} 208 209static int i830_dma_cleanup(drm_device_t * dev) 210{ 211 drm_device_dma_t *dma = dev->dma; 212 213 /* Make sure interrupts are disabled here because the uninstall ioctl 214 * may not have been called from userspace and after dev_private 215 * is freed, it's too late. 216 */ 217 if (dev->irq_enabled) 218 drm_irq_uninstall(dev); 219 220 if (dev->dev_private) { 221 int i; 222 drm_i830_private_t *dev_priv = 223 (drm_i830_private_t *) dev->dev_private; 224 225 if (dev_priv->ring.virtual_start) { 226 drm_ioremapfree((void *)dev_priv->ring.virtual_start, 227 dev_priv->ring.Size, dev); 228 } 229 if (dev_priv->hw_status_page) { 230 pci_free_consistent(dev->pdev, PAGE_SIZE, 231 dev_priv->hw_status_page, 232 dev_priv->dma_status_page); 233 /* Need to rewrite hardware status page */ 234 I830_WRITE(0x02080, 0x1ffff000); 235 } 236 237 drm_free(dev->dev_private, sizeof(drm_i830_private_t), 238 DRM_MEM_DRIVER); 239 dev->dev_private = NULL; 240 241 for (i = 0; i < dma->buf_count; i++) { 242 drm_buf_t *buf = dma->buflist[i]; 243 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 244 if (buf_priv->kernel_virtual && buf->total) 245 drm_ioremapfree(buf_priv->kernel_virtual, 246 buf->total, dev); 247 } 248 } 249 return 0; 250} 251 252int i830_wait_ring(drm_device_t * dev, int n, const char *caller) 253{ 254 drm_i830_private_t *dev_priv = dev->dev_private; 255 drm_i830_ring_buffer_t *ring = &(dev_priv->ring); 256 int iters = 0; 257 unsigned long end; 258 unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 259 260 end = jiffies + (HZ * 3); 261 while (ring->space < n) { 262 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 263 ring->space = ring->head - (ring->tail + 8); 264 if (ring->space < 0) 265 ring->space += ring->Size; 266 267 if (ring->head != last_head) { 268 end = jiffies + (HZ * 3); 269 last_head = ring->head; 270 } 271 272 iters++; 273 if (time_before(end, jiffies)) { 274 DRM_ERROR("space: %d wanted %d\n", ring->space, n); 275 DRM_ERROR("lockup\n"); 276 goto out_wait_ring; 277 } 278 udelay(1); 279 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; 280 } 281 282 out_wait_ring: 283 return iters; 284} 285 286static void i830_kernel_lost_context(drm_device_t * dev) 287{ 288 drm_i830_private_t *dev_priv = dev->dev_private; 289 drm_i830_ring_buffer_t *ring = &(dev_priv->ring); 290 291 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 292 ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; 293 ring->space = ring->head - (ring->tail + 8); 294 if (ring->space < 0) 295 ring->space += ring->Size; 296 297 if (ring->head == ring->tail) 298 dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY; 299} 300 301static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv) 302{ 303 drm_device_dma_t *dma = dev->dma; 304 int my_idx = 36; 305 u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); 306 int i; 307 308 if (dma->buf_count > 1019) { 309 /* Not enough space in the status page for the freelist */ 310 return -EINVAL; 311 } 312 313 for (i = 0; i < dma->buf_count; i++) { 314 drm_buf_t *buf = dma->buflist[i]; 315 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 316 317 buf_priv->in_use = hw_status++; 318 buf_priv->my_use_idx = my_idx; 319 my_idx += 4; 320 321 *buf_priv->in_use = I830_BUF_FREE; 322 323 buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, 324 buf->total, dev); 325 } 326 return 0; 327} 328 329static int i830_dma_initialize(drm_device_t * dev, 330 drm_i830_private_t * dev_priv, 331 drm_i830_init_t * init) 332{ 333 struct list_head *list; 334 335 memset(dev_priv, 0, sizeof(drm_i830_private_t)); 336 337 list_for_each(list, &dev->maplist->head) { 338 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); 339 if (r_list->map && 340 r_list->map->type == _DRM_SHM && 341 r_list->map->flags & _DRM_CONTAINS_LOCK) { 342 dev_priv->sarea_map = r_list->map; 343 break; 344 } 345 } 346 347 if (!dev_priv->sarea_map) { 348 dev->dev_private = (void *)dev_priv; 349 i830_dma_cleanup(dev); 350 DRM_ERROR("can not find sarea!\n"); 351 return -EINVAL; 352 } 353 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 354 if (!dev_priv->mmio_map) { 355 dev->dev_private = (void *)dev_priv; 356 i830_dma_cleanup(dev); 357 DRM_ERROR("can not find mmio map!\n"); 358 return -EINVAL; 359 } 360 dev->agp_buffer_token = init->buffers_offset; 361 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 362 if (!dev->agp_buffer_map) { 363 dev->dev_private = (void *)dev_priv; 364 i830_dma_cleanup(dev); 365 DRM_ERROR("can not find dma buffer map!\n"); 366 return -EINVAL; 367 } 368 369 dev_priv->sarea_priv = (drm_i830_sarea_t *) 370 ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset); 371 372 dev_priv->ring.Start = init->ring_start; 373 dev_priv->ring.End = init->ring_end; 374 dev_priv->ring.Size = init->ring_size; 375 376 dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + 377 init->ring_start, 378 init->ring_size, dev); 379 380 if (dev_priv->ring.virtual_start == NULL) { 381 dev->dev_private = (void *)dev_priv; 382 i830_dma_cleanup(dev); 383 DRM_ERROR("can not ioremap virtual address for" 384 " ring buffer\n"); 385 return -ENOMEM; 386 } 387 388 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 389 390 dev_priv->w = init->w; 391 dev_priv->h = init->h; 392 dev_priv->pitch = init->pitch; 393 dev_priv->back_offset = init->back_offset; 394 dev_priv->depth_offset = init->depth_offset; 395 dev_priv->front_offset = init->front_offset; 396 397 dev_priv->front_di1 = init->front_offset | init->pitch_bits; 398 dev_priv->back_di1 = init->back_offset | init->pitch_bits; 399 dev_priv->zi1 = init->depth_offset | init->pitch_bits; 400 401 DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1); 402 DRM_DEBUG("back_offset %x\n", dev_priv->back_offset); 403 DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1); 404 DRM_DEBUG("pitch_bits %x\n", init->pitch_bits); 405 406 dev_priv->cpp = init->cpp; 407 /* We are using separate values as placeholders for mechanisms for 408 * private backbuffer/depthbuffer usage. 409 */ 410 411 dev_priv->back_pitch = init->back_pitch; 412 dev_priv->depth_pitch = init->depth_pitch; 413 dev_priv->do_boxes = 0; 414 dev_priv->use_mi_batchbuffer_start = 0; 415 416 /* Program Hardware Status Page */ 417 dev_priv->hw_status_page = 418 pci_alloc_consistent(dev->pdev, PAGE_SIZE, 419 &dev_priv->dma_status_page); 420 if (!dev_priv->hw_status_page) { 421 dev->dev_private = (void *)dev_priv; 422 i830_dma_cleanup(dev); 423 DRM_ERROR("Can not allocate hardware status page\n"); 424 return -ENOMEM; 425 } 426 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 427 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 428 429 I830_WRITE(0x02080, dev_priv->dma_status_page); 430 DRM_DEBUG("Enabled hardware status page\n"); 431 432 /* Now we need to init our freelist */ 433 if (i830_freelist_init(dev, dev_priv) != 0) { 434 dev->dev_private = (void *)dev_priv; 435 i830_dma_cleanup(dev); 436 DRM_ERROR("Not enough space in the status page for" 437 " the freelist\n"); 438 return -ENOMEM; 439 } 440 dev->dev_private = (void *)dev_priv; 441 442 return 0; 443} 444 445static int i830_dma_init(struct inode *inode, struct file *filp, 446 unsigned int cmd, unsigned long arg) 447{ 448 drm_file_t *priv = filp->private_data; 449 drm_device_t *dev = priv->head->dev; 450 drm_i830_private_t *dev_priv; 451 drm_i830_init_t init; 452 int retcode = 0; 453 454 if (copy_from_user(&init, (void *__user)arg, sizeof(init))) 455 return -EFAULT; 456 457 switch (init.func) { 458 case I830_INIT_DMA: 459 dev_priv = drm_alloc(sizeof(drm_i830_private_t), 460 DRM_MEM_DRIVER); 461 if (dev_priv == NULL) 462 return -ENOMEM; 463 retcode = i830_dma_initialize(dev, dev_priv, &init); 464 break; 465 case I830_CLEANUP_DMA: 466 retcode = i830_dma_cleanup(dev); 467 break; 468 default: 469 retcode = -EINVAL; 470 break; 471 } 472 473 return retcode; 474} 475 476#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) 477#define ST1_ENABLE (1<<16) 478#define ST1_MASK (0xffff) 479 480/* Most efficient way to verify state for the i830 is as it is 481 * emitted. Non-conformant state is silently dropped. 482 */ 483static void i830EmitContextVerified(drm_device_t * dev, unsigned int *code) 484{ 485 drm_i830_private_t *dev_priv = dev->dev_private; 486 int i, j = 0; 487 unsigned int tmp; 488 RING_LOCALS; 489 490 BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4); 491 492 for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) { 493 tmp = code[i]; 494 if ((tmp & (7 << 29)) == CMD_3D && 495 (tmp & (0x1f << 24)) < (0x1d << 24)) { 496 OUT_RING(tmp); 497 j++; 498 } else { 499 DRM_ERROR("Skipping %d\n", i); 500 } 501 } 502 503 OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD); 504 OUT_RING(code[I830_CTXREG_BLENDCOLR]); 505 j += 2; 506 507 for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) { 508 tmp = code[i]; 509 if ((tmp & (7 << 29)) == CMD_3D && 510 (tmp & (0x1f << 24)) < (0x1d << 24)) { 511 OUT_RING(tmp); 512 j++; 513 } else { 514 DRM_ERROR("Skipping %d\n", i); 515 } 516 } 517 518 OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD); 519 OUT_RING(code[I830_CTXREG_MCSB1]); 520 j += 2; 521 522 if (j & 1) 523 OUT_RING(0); 524 525 ADVANCE_LP_RING(); 526} 527 528static void i830EmitTexVerified(drm_device_t * dev, unsigned int *code) 529{ 530 drm_i830_private_t *dev_priv = dev->dev_private; 531 int i, j = 0; 532 unsigned int tmp; 533 RING_LOCALS; 534 535 if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO || 536 (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) == 537 (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) { 538 539 BEGIN_LP_RING(I830_TEX_SETUP_SIZE); 540 541 OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */ 542 OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */ 543 OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */ 544 OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */ 545 OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */ 546 OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */ 547 548 for (i = 6; i < I830_TEX_SETUP_SIZE; i++) { 549 tmp = code[i]; 550 OUT_RING(tmp); 551 j++; 552 } 553 554 if (j & 1) 555 OUT_RING(0); 556 557 ADVANCE_LP_RING(); 558 } else 559 printk("rejected packet %x\n", code[0]); 560} 561 562static void i830EmitTexBlendVerified(drm_device_t * dev, 563 unsigned int *code, unsigned int num) 564{ 565 drm_i830_private_t *dev_priv = dev->dev_private; 566 int i, j = 0; 567 unsigned int tmp; 568 RING_LOCALS; 569 570 if (!num) 571 return; 572 573 BEGIN_LP_RING(num + 1); 574 575 for (i = 0; i < num; i++) { 576 tmp = code[i]; 577 OUT_RING(tmp); 578 j++; 579 } 580 581 if (j & 1) 582 OUT_RING(0); 583 584 ADVANCE_LP_RING(); 585} 586 587static void i830EmitTexPalette(drm_device_t * dev, 588 unsigned int *palette, int number, int is_shared) 589{ 590 drm_i830_private_t *dev_priv = dev->dev_private; 591 int i; 592 RING_LOCALS; 593 594 return; 595 596 BEGIN_LP_RING(258); 597 598 if (is_shared == 1) { 599 OUT_RING(CMD_OP_MAP_PALETTE_LOAD | 600 MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH); 601 } else { 602 OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number)); 603 } 604 for (i = 0; i < 256; i++) { 605 OUT_RING(palette[i]); 606 } 607 OUT_RING(0); 608 /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop! 609 */ 610} 611 612/* Need to do some additional checking when setting the dest buffer. 613 */ 614static void i830EmitDestVerified(drm_device_t * dev, unsigned int *code) 615{ 616 drm_i830_private_t *dev_priv = dev->dev_private; 617 unsigned int tmp; 618 RING_LOCALS; 619 620 BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10); 621 622 tmp = code[I830_DESTREG_CBUFADDR]; 623 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { 624 if (((int)outring) & 8) { 625 OUT_RING(0); 626 OUT_RING(0); 627 } 628 629 OUT_RING(CMD_OP_DESTBUFFER_INFO); 630 OUT_RING(BUF_3D_ID_COLOR_BACK | 631 BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) | 632 BUF_3D_USE_FENCE); 633 OUT_RING(tmp); 634 OUT_RING(0); 635 636 OUT_RING(CMD_OP_DESTBUFFER_INFO); 637 OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE | 638 BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); 639 OUT_RING(dev_priv->zi1); 640 OUT_RING(0); 641 } else { 642 DRM_ERROR("bad di1 %x (allow %x or %x)\n", 643 tmp, dev_priv->front_di1, dev_priv->back_di1); 644 } 645 646 /* invarient: 647 */ 648 649 OUT_RING(GFX_OP_DESTBUFFER_VARS); 650 OUT_RING(code[I830_DESTREG_DV1]); 651 652 OUT_RING(GFX_OP_DRAWRECT_INFO); 653 OUT_RING(code[I830_DESTREG_DR1]); 654 OUT_RING(code[I830_DESTREG_DR2]); 655 OUT_RING(code[I830_DESTREG_DR3]); 656 OUT_RING(code[I830_DESTREG_DR4]); 657 658 /* Need to verify this */ 659 tmp = code[I830_DESTREG_SENABLE]; 660 if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) { 661 OUT_RING(tmp); 662 } else { 663 DRM_ERROR("bad scissor enable\n"); 664 OUT_RING(0); 665 } 666 667 OUT_RING(GFX_OP_SCISSOR_RECT); 668 OUT_RING(code[I830_DESTREG_SR1]); 669 OUT_RING(code[I830_DESTREG_SR2]); 670 OUT_RING(0); 671 672 ADVANCE_LP_RING(); 673} 674 675static void i830EmitStippleVerified(drm_device_t * dev, unsigned int *code) 676{ 677 drm_i830_private_t *dev_priv = dev->dev_private; 678 RING_LOCALS; 679 680 BEGIN_LP_RING(2); 681 OUT_RING(GFX_OP_STIPPLE); 682 OUT_RING(code[1]); 683 ADVANCE_LP_RING(); 684} 685 686static void i830EmitState(drm_device_t * dev) 687{ 688 drm_i830_private_t *dev_priv = dev->dev_private; 689 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; 690 unsigned int dirty = sarea_priv->dirty; 691 692 DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); 693 694 if (dirty & I830_UPLOAD_BUFFERS) { 695 i830EmitDestVerified(dev, sarea_priv->BufferState); 696 sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS; 697 } 698 699 if (dirty & I830_UPLOAD_CTX) { 700 i830EmitContextVerified(dev, sarea_priv->ContextState); 701 sarea_priv->dirty &= ~I830_UPLOAD_CTX; 702 } 703 704 if (dirty & I830_UPLOAD_TEX0) { 705 i830EmitTexVerified(dev, sarea_priv->TexState[0]); 706 sarea_priv->dirty &= ~I830_UPLOAD_TEX0; 707 } 708 709 if (dirty & I830_UPLOAD_TEX1) { 710 i830EmitTexVerified(dev, sarea_priv->TexState[1]); 711 sarea_priv->dirty &= ~I830_UPLOAD_TEX1; 712 } 713 714 if (dirty & I830_UPLOAD_TEXBLEND0) { 715 i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0], 716 sarea_priv->TexBlendStateWordsUsed[0]); 717 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0; 718 } 719 720 if (dirty & I830_UPLOAD_TEXBLEND1) { 721 i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1], 722 sarea_priv->TexBlendStateWordsUsed[1]); 723 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1; 724 } 725 726 if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) { 727 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); 728 } else { 729 if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { 730 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); 731 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); 732 } 733 if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { 734 i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); 735 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); 736 } 737 738 /* 1.3: 739 */ 740#if 0 741 if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) { 742 i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0); 743 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); 744 } 745 if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) { 746 i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0); 747 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); 748 } 749#endif 750 } 751 752 /* 1.3: 753 */ 754 if (dirty & I830_UPLOAD_STIPPLE) { 755 i830EmitStippleVerified(dev, sarea_priv->StippleState); 756 sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE; 757 } 758 759 if (dirty & I830_UPLOAD_TEX2) { 760 i830EmitTexVerified(dev, sarea_priv->TexState2); 761 sarea_priv->dirty &= ~I830_UPLOAD_TEX2; 762 } 763 764 if (dirty & I830_UPLOAD_TEX3) { 765 i830EmitTexVerified(dev, sarea_priv->TexState3); 766 sarea_priv->dirty &= ~I830_UPLOAD_TEX3; 767 } 768 769 if (dirty & I830_UPLOAD_TEXBLEND2) { 770 i830EmitTexBlendVerified(dev, 771 sarea_priv->TexBlendState2, 772 sarea_priv->TexBlendStateWordsUsed2); 773 774 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2; 775 } 776 777 if (dirty & I830_UPLOAD_TEXBLEND3) { 778 i830EmitTexBlendVerified(dev, 779 sarea_priv->TexBlendState3, 780 sarea_priv->TexBlendStateWordsUsed3); 781 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3; 782 } 783} 784 785/* ================================================================ 786 * Performance monitoring functions 787 */ 788 789static void i830_fill_box(drm_device_t * dev, 790 int x, int y, int w, int h, int r, int g, int b) 791{ 792 drm_i830_private_t *dev_priv = dev->dev_private; 793 u32 color; 794 unsigned int BR13, CMD; 795 RING_LOCALS; 796 797 BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24); 798 CMD = XY_COLOR_BLT_CMD; 799 x += dev_priv->sarea_priv->boxes[0].x1; 800 y += dev_priv->sarea_priv->boxes[0].y1; 801 802 if (dev_priv->cpp == 4) { 803 BR13 |= (1 << 25); 804 CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB); 805 color = (((0xff) << 24) | (r << 16) | (g << 8) | b); 806 } else { 807 color = (((r & 0xf8) << 8) | 808 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); 809 } 810 811 BEGIN_LP_RING(6); 812 OUT_RING(CMD); 813 OUT_RING(BR13); 814 OUT_RING((y << 16) | x); 815 OUT_RING(((y + h) << 16) | (x + w)); 816 817 if (dev_priv->current_page == 1) { 818 OUT_RING(dev_priv->front_offset); 819 } else { 820 OUT_RING(dev_priv->back_offset); 821 } 822 823 OUT_RING(color); 824 ADVANCE_LP_RING(); 825} 826 827static void i830_cp_performance_boxes(drm_device_t * dev) 828{ 829 drm_i830_private_t *dev_priv = dev->dev_private; 830 831 /* Purple box for page flipping 832 */ 833 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP) 834 i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255); 835 836 /* Red box if we have to wait for idle at any point 837 */ 838 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT) 839 i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0); 840 841 /* Blue box: lost context? 842 */ 843 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT) 844 i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255); 845 846 /* Yellow box for texture swaps 847 */ 848 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD) 849 i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0); 850 851 /* Green box if hardware never idles (as far as we can tell) 852 */ 853 if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY)) 854 i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0); 855 856 /* Draw bars indicating number of buffers allocated 857 * (not a great measure, easily confused) 858 */ 859 if (dev_priv->dma_used) { 860 int bar = dev_priv->dma_used / 10240; 861 if (bar > 100) 862 bar = 100; 863 if (bar < 1) 864 bar = 1; 865 i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128); 866 dev_priv->dma_used = 0; 867 } 868 869 dev_priv->sarea_priv->perf_boxes = 0; 870} 871 872static void i830_dma_dispatch_clear(drm_device_t * dev, int flags, 873 unsigned int clear_color, 874 unsigned int clear_zval, 875 unsigned int clear_depthmask) 876{ 877 drm_i830_private_t *dev_priv = dev->dev_private; 878 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; 879 int nbox = sarea_priv->nbox; 880 drm_clip_rect_t *pbox = sarea_priv->boxes; 881 int pitch = dev_priv->pitch; 882 int cpp = dev_priv->cpp; 883 int i; 884 unsigned int BR13, CMD, D_CMD; 885 RING_LOCALS; 886 887 if (dev_priv->current_page == 1) { 888 unsigned int tmp = flags; 889 890 flags &= ~(I830_FRONT | I830_BACK); 891 if (tmp & I830_FRONT) 892 flags |= I830_BACK; 893 if (tmp & I830_BACK) 894 flags |= I830_FRONT; 895 } 896 897 i830_kernel_lost_context(dev); 898 899 switch (cpp) { 900 case 2: 901 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24); 902 D_CMD = CMD = XY_COLOR_BLT_CMD; 903 break; 904 case 4: 905 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25); 906 CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA | 907 XY_COLOR_BLT_WRITE_RGB); 908 D_CMD = XY_COLOR_BLT_CMD; 909 if (clear_depthmask & 0x00ffffff) 910 D_CMD |= XY_COLOR_BLT_WRITE_RGB; 911 if (clear_depthmask & 0xff000000) 912 D_CMD |= XY_COLOR_BLT_WRITE_ALPHA; 913 break; 914 default: 915 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24); 916 D_CMD = CMD = XY_COLOR_BLT_CMD; 917 break; 918 } 919 920 if (nbox > I830_NR_SAREA_CLIPRECTS) 921 nbox = I830_NR_SAREA_CLIPRECTS; 922 923 for (i = 0; i < nbox; i++, pbox++) { 924 if (pbox->x1 > pbox->x2 || 925 pbox->y1 > pbox->y2 || 926 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) 927 continue; 928 929 if (flags & I830_FRONT) { 930 DRM_DEBUG("clear front\n"); 931 BEGIN_LP_RING(6); 932 OUT_RING(CMD); 933 OUT_RING(BR13); 934 OUT_RING((pbox->y1 << 16) | pbox->x1); 935 OUT_RING((pbox->y2 << 16) | pbox->x2); 936 OUT_RING(dev_priv->front_offset); 937 OUT_RING(clear_color); 938 ADVANCE_LP_RING(); 939 } 940 941 if (flags & I830_BACK) { 942 DRM_DEBUG("clear back\n"); 943 BEGIN_LP_RING(6); 944 OUT_RING(CMD); 945 OUT_RING(BR13); 946 OUT_RING((pbox->y1 << 16) | pbox->x1); 947 OUT_RING((pbox->y2 << 16) | pbox->x2); 948 OUT_RING(dev_priv->back_offset); 949 OUT_RING(clear_color); 950 ADVANCE_LP_RING(); 951 } 952 953 if (flags & I830_DEPTH) { 954 DRM_DEBUG("clear depth\n"); 955 BEGIN_LP_RING(6); 956 OUT_RING(D_CMD); 957 OUT_RING(BR13); 958 OUT_RING((pbox->y1 << 16) | pbox->x1); 959 OUT_RING((pbox->y2 << 16) | pbox->x2); 960 OUT_RING(dev_priv->depth_offset); 961 OUT_RING(clear_zval); 962 ADVANCE_LP_RING(); 963 } 964 } 965} 966 967static void i830_dma_dispatch_swap(drm_device_t * dev) 968{ 969 drm_i830_private_t *dev_priv = dev->dev_private; 970 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; 971 int nbox = sarea_priv->nbox; 972 drm_clip_rect_t *pbox = sarea_priv->boxes; 973 int pitch = dev_priv->pitch; 974 int cpp = dev_priv->cpp; 975 int i; 976 unsigned int CMD, BR13; 977 RING_LOCALS; 978 979 DRM_DEBUG("swapbuffers\n"); 980 981 i830_kernel_lost_context(dev); 982 983 if (dev_priv->do_boxes) 984 i830_cp_performance_boxes(dev); 985 986 switch (cpp) { 987 case 2: 988 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24); 989 CMD = XY_SRC_COPY_BLT_CMD; 990 break; 991 case 4: 992 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25); 993 CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | 994 XY_SRC_COPY_BLT_WRITE_RGB); 995 break; 996 default: 997 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24); 998 CMD = XY_SRC_COPY_BLT_CMD; 999 break; 1000 } 1001 1002 if (nbox > I830_NR_SAREA_CLIPRECTS) 1003 nbox = I830_NR_SAREA_CLIPRECTS; 1004 1005 for (i = 0; i < nbox; i++, pbox++) { 1006 if (pbox->x1 > pbox->x2 || 1007 pbox->y1 > pbox->y2 || 1008 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) 1009 continue; 1010 1011 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", 1012 pbox->x1, pbox->y1, pbox->x2, pbox->y2); 1013 1014 BEGIN_LP_RING(8); 1015 OUT_RING(CMD); 1016 OUT_RING(BR13); 1017 OUT_RING((pbox->y1 << 16) | pbox->x1); 1018 OUT_RING((pbox->y2 << 16) | pbox->x2); 1019 1020 if (dev_priv->current_page == 0) 1021 OUT_RING(dev_priv->front_offset); 1022 else 1023 OUT_RING(dev_priv->back_offset); 1024 1025 OUT_RING((pbox->y1 << 16) | pbox->x1); 1026 OUT_RING(BR13 & 0xffff); 1027 1028 if (dev_priv->current_page == 0) 1029 OUT_RING(dev_priv->back_offset); 1030 else 1031 OUT_RING(dev_priv->front_offset); 1032 1033 ADVANCE_LP_RING(); 1034 } 1035} 1036 1037static void i830_dma_dispatch_flip(drm_device_t * dev) 1038{ 1039 drm_i830_private_t *dev_priv = dev->dev_private; 1040 RING_LOCALS; 1041 1042 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 1043 __FUNCTION__, 1044 dev_priv->current_page, 1045 dev_priv->sarea_priv->pf_current_page); 1046 1047 i830_kernel_lost_context(dev); 1048 1049 if (dev_priv->do_boxes) { 1050 dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP; 1051 i830_cp_performance_boxes(dev); 1052 } 1053 1054 BEGIN_LP_RING(2); 1055 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 1056 OUT_RING(0); 1057 ADVANCE_LP_RING(); 1058 1059 BEGIN_LP_RING(6); 1060 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 1061 OUT_RING(0); 1062 if (dev_priv->current_page == 0) { 1063 OUT_RING(dev_priv->back_offset); 1064 dev_priv->current_page = 1; 1065 } else { 1066 OUT_RING(dev_priv->front_offset); 1067 dev_priv->current_page = 0; 1068 } 1069 OUT_RING(0); 1070 ADVANCE_LP_RING(); 1071 1072 BEGIN_LP_RING(2); 1073 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 1074 OUT_RING(0); 1075 ADVANCE_LP_RING(); 1076 1077 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 1078} 1079 1080static void i830_dma_dispatch_vertex(drm_device_t * dev, 1081 drm_buf_t * buf, int discard, int used) 1082{ 1083 drm_i830_private_t *dev_priv = dev->dev_private; 1084 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 1085 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; 1086 drm_clip_rect_t *box = sarea_priv->boxes; 1087 int nbox = sarea_priv->nbox; 1088 unsigned long address = (unsigned long)buf->bus_address; 1089 unsigned long start = address - dev->agp->base; 1090 int i = 0, u; 1091 RING_LOCALS; 1092 1093 i830_kernel_lost_context(dev); 1094 1095 if (nbox > I830_NR_SAREA_CLIPRECTS) 1096 nbox = I830_NR_SAREA_CLIPRECTS; 1097 1098 if (discard) { 1099 u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, 1100 I830_BUF_HARDWARE); 1101 if (u != I830_BUF_CLIENT) { 1102 DRM_DEBUG("xxxx 2\n"); 1103 } 1104 } 1105 1106 if (used > 4 * 1023) 1107 used = 0; 1108 1109 if (sarea_priv->dirty) 1110 i830EmitState(dev); 1111 1112 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", 1113 address, used, nbox); 1114 1115 dev_priv->counter++; 1116 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); 1117 DRM_DEBUG("i830_dma_dispatch\n"); 1118 DRM_DEBUG("start : %lx\n", start); 1119 DRM_DEBUG("used : %d\n", used); 1120 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); 1121 1122 if (buf_priv->currently_mapped == I830_BUF_MAPPED) { 1123 u32 *vp = buf_priv->kernel_virtual; 1124 1125 vp[0] = (GFX_OP_PRIMITIVE | 1126 sarea_priv->vertex_prim | ((used / 4) - 2)); 1127 1128 if (dev_priv->use_mi_batchbuffer_start) { 1129 vp[used / 4] = MI_BATCH_BUFFER_END; 1130 used += 4; 1131 } 1132 1133 if (used & 4) { 1134 vp[used / 4] = 0; 1135 used += 4; 1136 } 1137 1138 i830_unmap_buffer(buf); 1139 } 1140 1141 if (used) { 1142 do { 1143 if (i < nbox) { 1144 BEGIN_LP_RING(6); 1145 OUT_RING(GFX_OP_DRAWRECT_INFO); 1146 OUT_RING(sarea_priv-> 1147 BufferState[I830_DESTREG_DR1]); 1148 OUT_RING(box[i].x1 | (box[i].y1 << 16)); 1149 OUT_RING(box[i].x2 | (box[i].y2 << 16)); 1150 OUT_RING(sarea_priv-> 1151 BufferState[I830_DESTREG_DR4]); 1152 OUT_RING(0); 1153 ADVANCE_LP_RING(); 1154 } 1155 1156 if (dev_priv->use_mi_batchbuffer_start) { 1157 BEGIN_LP_RING(2); 1158 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 1159 OUT_RING(start | MI_BATCH_NON_SECURE); 1160 ADVANCE_LP_RING(); 1161 } else { 1162 BEGIN_LP_RING(4); 1163 OUT_RING(MI_BATCH_BUFFER); 1164 OUT_RING(start | MI_BATCH_NON_SECURE); 1165 OUT_RING(start + used - 4); 1166 OUT_RING(0); 1167 ADVANCE_LP_RING(); 1168 } 1169 1170 } while (++i < nbox); 1171 } 1172 1173 if (discard) { 1174 dev_priv->counter++; 1175 1176 (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, 1177 I830_BUF_HARDWARE); 1178 1179 BEGIN_LP_RING(8); 1180 OUT_RING(CMD_STORE_DWORD_IDX); 1181 OUT_RING(20); 1182 OUT_RING(dev_priv->counter); 1183 OUT_RING(CMD_STORE_DWORD_IDX); 1184 OUT_RING(buf_priv->my_use_idx); 1185 OUT_RING(I830_BUF_FREE); 1186 OUT_RING(CMD_REPORT_HEAD); 1187 OUT_RING(0); 1188 ADVANCE_LP_RING(); 1189 } 1190} 1191 1192static void i830_dma_quiescent(drm_device_t * dev) 1193{ 1194 drm_i830_private_t *dev_priv = dev->dev_private; 1195 RING_LOCALS; 1196 1197 i830_kernel_lost_context(dev); 1198 1199 BEGIN_LP_RING(4); 1200 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 1201 OUT_RING(CMD_REPORT_HEAD); 1202 OUT_RING(0); 1203 OUT_RING(0); 1204 ADVANCE_LP_RING(); 1205 1206 i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 1207} 1208 1209static int i830_flush_queue(drm_device_t * dev) 1210{ 1211 drm_i830_private_t *dev_priv = dev->dev_private; 1212 drm_device_dma_t *dma = dev->dma; 1213 int i, ret = 0; 1214 RING_LOCALS; 1215 1216 i830_kernel_lost_context(dev); 1217 1218 BEGIN_LP_RING(2); 1219 OUT_RING(CMD_REPORT_HEAD); 1220 OUT_RING(0); 1221 ADVANCE_LP_RING(); 1222 1223 i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 1224 1225 for (i = 0; i < dma->buf_count; i++) { 1226 drm_buf_t *buf = dma->buflist[i]; 1227 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 1228 1229 int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE, 1230 I830_BUF_FREE); 1231 1232 if (used == I830_BUF_HARDWARE) 1233 DRM_DEBUG("reclaimed from HARDWARE\n"); 1234 if (used == I830_BUF_CLIENT) 1235 DRM_DEBUG("still on client\n"); 1236 } 1237 1238 return ret; 1239} 1240 1241/* Must be called with the lock held */ 1242static void i830_reclaim_buffers(drm_device_t * dev, struct file *filp) 1243{ 1244 drm_device_dma_t *dma = dev->dma; 1245 int i; 1246 1247 if (!dma) 1248 return; 1249 if (!dev->dev_private) 1250 return; 1251 if (!dma->buflist) 1252 return; 1253 1254 i830_flush_queue(dev); 1255 1256 for (i = 0; i < dma->buf_count; i++) { 1257 drm_buf_t *buf = dma->buflist[i]; 1258 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 1259 1260 if (buf->filp == filp && buf_priv) { 1261 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, 1262 I830_BUF_FREE); 1263 1264 if (used == I830_BUF_CLIENT) 1265 DRM_DEBUG("reclaimed from client\n"); 1266 if (buf_priv->currently_mapped == I830_BUF_MAPPED) 1267 buf_priv->currently_mapped = I830_BUF_UNMAPPED; 1268 } 1269 } 1270} 1271 1272static int i830_flush_ioctl(struct inode *inode, struct file *filp, 1273 unsigned int cmd, unsigned long arg) 1274{ 1275 drm_file_t *priv = filp->private_data; 1276 drm_device_t *dev = priv->head->dev; 1277 1278 LOCK_TEST_WITH_RETURN(dev, filp); 1279 1280 i830_flush_queue(dev); 1281 return 0; 1282} 1283 1284static int i830_dma_vertex(struct inode *inode, struct file *filp, 1285 unsigned int cmd, unsigned long arg) 1286{ 1287 drm_file_t *priv = filp->private_data; 1288 drm_device_t *dev = priv->head->dev; 1289 drm_device_dma_t *dma = dev->dma; 1290 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1291 u32 *hw_status = dev_priv->hw_status_page; 1292 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1293 dev_priv->sarea_priv; 1294 drm_i830_vertex_t vertex; 1295 1296 if (copy_from_user 1297 (&vertex, (drm_i830_vertex_t __user *) arg, sizeof(vertex))) 1298 return -EFAULT; 1299 1300 LOCK_TEST_WITH_RETURN(dev, filp); 1301 1302 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n", 1303 vertex.idx, vertex.used, vertex.discard); 1304 1305 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 1306 return -EINVAL; 1307 1308 i830_dma_dispatch_vertex(dev, 1309 dma->buflist[vertex.idx], 1310 vertex.discard, vertex.used); 1311 1312 sarea_priv->last_enqueue = dev_priv->counter - 1; 1313 sarea_priv->last_dispatch = (int)hw_status[5]; 1314 1315 return 0; 1316} 1317 1318static int i830_clear_bufs(struct inode *inode, struct file *filp, 1319 unsigned int cmd, unsigned long arg) 1320{ 1321 drm_file_t *priv = filp->private_data; 1322 drm_device_t *dev = priv->head->dev; 1323 drm_i830_clear_t clear; 1324 1325 if (copy_from_user 1326 (&clear, (drm_i830_clear_t __user *) arg, sizeof(clear))) 1327 return -EFAULT; 1328 1329 LOCK_TEST_WITH_RETURN(dev, filp); 1330 1331 /* GH: Someone's doing nasty things... */ 1332 if (!dev->dev_private) { 1333 return -EINVAL; 1334 } 1335 1336 i830_dma_dispatch_clear(dev, clear.flags, 1337 clear.clear_color, 1338 clear.clear_depth, clear.clear_depthmask); 1339 return 0; 1340} 1341 1342static int i830_swap_bufs(struct inode *inode, struct file *filp, 1343 unsigned int cmd, unsigned long arg) 1344{ 1345 drm_file_t *priv = filp->private_data; 1346 drm_device_t *dev = priv->head->dev; 1347 1348 DRM_DEBUG("i830_swap_bufs\n"); 1349 1350 LOCK_TEST_WITH_RETURN(dev, filp); 1351 1352 i830_dma_dispatch_swap(dev); 1353 return 0; 1354} 1355 1356/* Not sure why this isn't set all the time: 1357 */ 1358static void i830_do_init_pageflip(drm_device_t * dev) 1359{ 1360 drm_i830_private_t *dev_priv = dev->dev_private; 1361 1362 DRM_DEBUG("%s\n", __FUNCTION__); 1363 dev_priv->page_flipping = 1; 1364 dev_priv->current_page = 0; 1365 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 1366} 1367 1368static int i830_do_cleanup_pageflip(drm_device_t * dev) 1369{ 1370 drm_i830_private_t *dev_priv = dev->dev_private; 1371 1372 DRM_DEBUG("%s\n", __FUNCTION__); 1373 if (dev_priv->current_page != 0) 1374 i830_dma_dispatch_flip(dev); 1375 1376 dev_priv->page_flipping = 0; 1377 return 0; 1378} 1379 1380static int i830_flip_bufs(struct inode *inode, struct file *filp, 1381 unsigned int cmd, unsigned long arg) 1382{ 1383 drm_file_t *priv = filp->private_data; 1384 drm_device_t *dev = priv->head->dev; 1385 drm_i830_private_t *dev_priv = dev->dev_private; 1386 1387 DRM_DEBUG("%s\n", __FUNCTION__); 1388 1389 LOCK_TEST_WITH_RETURN(dev, filp); 1390 1391 if (!dev_priv->page_flipping) 1392 i830_do_init_pageflip(dev); 1393 1394 i830_dma_dispatch_flip(dev); 1395 return 0; 1396} 1397 1398static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, 1399 unsigned long arg) 1400{ 1401 drm_file_t *priv = filp->private_data; 1402 drm_device_t *dev = priv->head->dev; 1403 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1404 u32 *hw_status = dev_priv->hw_status_page; 1405 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1406 dev_priv->sarea_priv; 1407 1408 sarea_priv->last_dispatch = (int)hw_status[5]; 1409 return 0; 1410} 1411 1412static int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, 1413 unsigned long arg) 1414{ 1415 drm_file_t *priv = filp->private_data; 1416 drm_device_t *dev = priv->head->dev; 1417 int retcode = 0; 1418 drm_i830_dma_t d; 1419 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1420 u32 *hw_status = dev_priv->hw_status_page; 1421 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1422 dev_priv->sarea_priv; 1423 1424 DRM_DEBUG("getbuf\n"); 1425 if (copy_from_user(&d, (drm_i830_dma_t __user *) arg, sizeof(d))) 1426 return -EFAULT; 1427 1428 LOCK_TEST_WITH_RETURN(dev, filp); 1429 1430 d.granted = 0; 1431 1432 retcode = i830_dma_get_buffer(dev, &d, filp); 1433 1434 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n", 1435 current->pid, retcode, d.granted); 1436 1437 if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d))) 1438 return -EFAULT; 1439 sarea_priv->last_dispatch = (int)hw_status[5]; 1440 1441 return retcode; 1442} 1443 1444static int i830_copybuf(struct inode *inode, 1445 struct file *filp, unsigned int cmd, unsigned long arg) 1446{ 1447 /* Never copy - 2.4.x doesn't need it */ 1448 return 0; 1449} 1450 1451static int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, 1452 unsigned long arg) 1453{ 1454 return 0; 1455} 1456 1457static int i830_getparam(struct inode *inode, struct file *filp, 1458 unsigned int cmd, unsigned long arg) 1459{ 1460 drm_file_t *priv = filp->private_data; 1461 drm_device_t *dev = priv->head->dev; 1462 drm_i830_private_t *dev_priv = dev->dev_private; 1463 drm_i830_getparam_t param; 1464 int value; 1465 1466 if (!dev_priv) { 1467 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1468 return -EINVAL; 1469 } 1470 1471 if (copy_from_user 1472 (&param, (drm_i830_getparam_t __user *) arg, sizeof(param))) 1473 return -EFAULT; 1474 1475 switch (param.param) { 1476 case I830_PARAM_IRQ_ACTIVE: 1477 value = dev->irq_enabled; 1478 break; 1479 default: 1480 return -EINVAL; 1481 } 1482 1483 if (copy_to_user(param.value, &value, sizeof(int))) { 1484 DRM_ERROR("copy_to_user\n"); 1485 return -EFAULT; 1486 } 1487 1488 return 0; 1489} 1490 1491static int i830_setparam(struct inode *inode, struct file *filp, 1492 unsigned int cmd, unsigned long arg) 1493{ 1494 drm_file_t *priv = filp->private_data; 1495 drm_device_t *dev = priv->head->dev; 1496 drm_i830_private_t *dev_priv = dev->dev_private; 1497 drm_i830_setparam_t param; 1498 1499 if (!dev_priv) { 1500 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1501 return -EINVAL; 1502 } 1503 1504 if (copy_from_user 1505 (&param, (drm_i830_setparam_t __user *) arg, sizeof(param))) 1506 return -EFAULT; 1507 1508 switch (param.param) { 1509 case I830_SETPARAM_USE_MI_BATCHBUFFER_START: 1510 dev_priv->use_mi_batchbuffer_start = param.value; 1511 break; 1512 default: 1513 return -EINVAL; 1514 } 1515 1516 return 0; 1517} 1518 1519int i830_driver_load(drm_device_t *dev, unsigned long flags) 1520{ 1521 /* i830 has 4 more counters */ 1522 dev->counters += 4; 1523 dev->types[6] = _DRM_STAT_IRQ; 1524 dev->types[7] = _DRM_STAT_PRIMARY; 1525 dev->types[8] = _DRM_STAT_SECONDARY; 1526 dev->types[9] = _DRM_STAT_DMA; 1527 1528 return 0; 1529} 1530 1531void i830_driver_lastclose(drm_device_t * dev) 1532{ 1533 i830_dma_cleanup(dev); 1534} 1535 1536void i830_driver_preclose(drm_device_t * dev, DRMFILE filp) 1537{ 1538 if (dev->dev_private) { 1539 drm_i830_private_t *dev_priv = dev->dev_private; 1540 if (dev_priv->page_flipping) { 1541 i830_do_cleanup_pageflip(dev); 1542 } 1543 } 1544} 1545 1546void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) 1547{ 1548 i830_reclaim_buffers(dev, filp); 1549} 1550 1551int i830_driver_dma_quiescent(drm_device_t * dev) 1552{ 1553 i830_dma_quiescent(dev); 1554 return 0; 1555} 1556 1557drm_ioctl_desc_t i830_ioctls[] = { 1558 [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1559 [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH}, 1560 [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH}, 1561 [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH}, 1562 [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH}, 1563 [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH}, 1564 [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH}, 1565 [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH}, 1566 [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH}, 1567 [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH}, 1568 [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH}, 1569 [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH}, 1570 [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH}, 1571 [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH} 1572}; 1573 1574int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1575 1576/** 1577 * Determine if the device really is AGP or not. 1578 * 1579 * All Intel graphics chipsets are treated as AGP, even if they are really 1580 * PCI-e. 1581 * 1582 * \param dev The device to be tested. 1583 * 1584 * \returns 1585 * A value of 1 is always retured to indictate every i8xx is AGP. 1586 */ 1587int i830_driver_device_is_agp(drm_device_t * dev) 1588{ 1589 return 1; 1590}