Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.14 1397 lines 38 kB view raw
1/* i810_dma.c -- DMA support for the i810 -*- linux-c -*- 2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com 3 * 4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 * DEALINGS IN THE SOFTWARE. 26 * 27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com> 28 * Jeff Hartmann <jhartmann@valinux.com> 29 * Keith Whitwell <keith@tungstengraphics.com> 30 * 31 */ 32 33#include "drmP.h" 34#include "drm.h" 35#include "i810_drm.h" 36#include "i810_drv.h" 37#include <linux/interrupt.h> /* For task queue support */ 38#include <linux/delay.h> 39#include <linux/pagemap.h> 40 41#define I810_BUF_FREE 2 42#define I810_BUF_CLIENT 1 43#define I810_BUF_HARDWARE 0 44 45#define I810_BUF_UNMAPPED 0 46#define I810_BUF_MAPPED 1 47 48static drm_buf_t *i810_freelist_get(drm_device_t *dev) 49{ 50 drm_device_dma_t *dma = dev->dma; 51 int i; 52 int used; 53 54 /* Linear search might not be the best solution */ 55 56 for (i = 0; i < dma->buf_count; i++) { 57 drm_buf_t *buf = dma->buflist[ i ]; 58 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 59 /* In use is already a pointer */ 60 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, 61 I810_BUF_CLIENT); 62 if (used == I810_BUF_FREE) { 63 return buf; 64 } 65 } 66 return NULL; 67} 68 69/* This should only be called if the buffer is not sent to the hardware 70 * yet, the hardware updates in use for us once its on the ring buffer. 71 */ 72 73static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf) 74{ 75 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 76 int used; 77 78 /* In use is already a pointer */ 79 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); 80 if (used != I810_BUF_CLIENT) { 81 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); 82 return -EINVAL; 83 } 84 85 return 0; 86} 87 88static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) 89{ 90 drm_file_t *priv = filp->private_data; 91 drm_device_t *dev; 92 drm_i810_private_t *dev_priv; 93 drm_buf_t *buf; 94 drm_i810_buf_priv_t *buf_priv; 95 96 lock_kernel(); 97 dev = priv->head->dev; 98 dev_priv = dev->dev_private; 99 buf = dev_priv->mmap_buffer; 100 buf_priv = buf->dev_private; 101 102 vma->vm_flags |= (VM_IO | VM_DONTCOPY); 103 vma->vm_file = filp; 104 105 buf_priv->currently_mapped = I810_BUF_MAPPED; 106 unlock_kernel(); 107 108 if (io_remap_pfn_range(vma, vma->vm_start, 109 VM_OFFSET(vma) >> PAGE_SHIFT, 110 vma->vm_end - vma->vm_start, 111 vma->vm_page_prot)) return -EAGAIN; 112 return 0; 113} 114 115static struct file_operations i810_buffer_fops = { 116 .open = drm_open, 117 .flush = drm_flush, 118 .release = drm_release, 119 .ioctl = drm_ioctl, 120 .mmap = i810_mmap_buffers, 121 .fasync = drm_fasync, 122}; 123 124static int i810_map_buffer(drm_buf_t *buf, struct file *filp) 125{ 126 drm_file_t *priv = filp->private_data; 127 drm_device_t *dev = priv->head->dev; 128 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 129 drm_i810_private_t *dev_priv = dev->dev_private; 130 struct file_operations *old_fops; 131 int retcode = 0; 132 133 if (buf_priv->currently_mapped == I810_BUF_MAPPED) 134 return -EINVAL; 135 136 down_write( &current->mm->mmap_sem ); 137 old_fops = filp->f_op; 138 filp->f_op = &i810_buffer_fops; 139 dev_priv->mmap_buffer = buf; 140 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, 141 PROT_READ|PROT_WRITE, 142 MAP_SHARED, 143 buf->bus_address); 144 dev_priv->mmap_buffer = NULL; 145 filp->f_op = old_fops; 146 if ((unsigned long)buf_priv->virtual > -1024UL) { 147 /* Real error */ 148 DRM_ERROR("mmap error\n"); 149 retcode = (signed int)buf_priv->virtual; 150 buf_priv->virtual = NULL; 151 } 152 up_write( &current->mm->mmap_sem ); 153 154 return retcode; 155} 156 157static int i810_unmap_buffer(drm_buf_t *buf) 158{ 159 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 160 int retcode = 0; 161 162 if (buf_priv->currently_mapped != I810_BUF_MAPPED) 163 return -EINVAL; 164 165 down_write(&current->mm->mmap_sem); 166 retcode = do_munmap(current->mm, 167 (unsigned long)buf_priv->virtual, 168 (size_t) buf->total); 169 up_write(&current->mm->mmap_sem); 170 171 buf_priv->currently_mapped = I810_BUF_UNMAPPED; 172 buf_priv->virtual = NULL; 173 174 return retcode; 175} 176 177static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d, 178 struct file *filp) 179{ 180 drm_buf_t *buf; 181 drm_i810_buf_priv_t *buf_priv; 182 int retcode = 0; 183 184 buf = i810_freelist_get(dev); 185 if (!buf) { 186 retcode = -ENOMEM; 187 DRM_DEBUG("retcode=%d\n", retcode); 188 return retcode; 189 } 190 191 retcode = i810_map_buffer(buf, filp); 192 if (retcode) { 193 i810_freelist_put(dev, buf); 194 DRM_ERROR("mapbuf failed, retcode %d\n", retcode); 195 return retcode; 196 } 197 buf->filp = filp; 198 buf_priv = buf->dev_private; 199 d->granted = 1; 200 d->request_idx = buf->idx; 201 d->request_size = buf->total; 202 d->virtual = buf_priv->virtual; 203 204 return retcode; 205} 206 207static int i810_dma_cleanup(drm_device_t *dev) 208{ 209 drm_device_dma_t *dma = dev->dma; 210 211 /* Make sure interrupts are disabled here because the uninstall ioctl 212 * may not have been called from userspace and after dev_private 213 * is freed, it's too late. 214 */ 215 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled) 216 drm_irq_uninstall(dev); 217 218 if (dev->dev_private) { 219 int i; 220 drm_i810_private_t *dev_priv = 221 (drm_i810_private_t *) dev->dev_private; 222 223 if (dev_priv->ring.virtual_start) { 224 drm_ioremapfree((void *) dev_priv->ring.virtual_start, 225 dev_priv->ring.Size, dev); 226 } 227 if (dev_priv->hw_status_page) { 228 pci_free_consistent(dev->pdev, PAGE_SIZE, 229 dev_priv->hw_status_page, 230 dev_priv->dma_status_page); 231 /* Need to rewrite hardware status page */ 232 I810_WRITE(0x02080, 0x1ffff000); 233 } 234 drm_free(dev->dev_private, sizeof(drm_i810_private_t), 235 DRM_MEM_DRIVER); 236 dev->dev_private = NULL; 237 238 for (i = 0; i < dma->buf_count; i++) { 239 drm_buf_t *buf = dma->buflist[ i ]; 240 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 241 if ( buf_priv->kernel_virtual && buf->total ) 242 drm_ioremapfree(buf_priv->kernel_virtual, buf->total, dev); 243 } 244 } 245 return 0; 246} 247 248static int i810_wait_ring(drm_device_t *dev, int n) 249{ 250 drm_i810_private_t *dev_priv = dev->dev_private; 251 drm_i810_ring_buffer_t *ring = &(dev_priv->ring); 252 int iters = 0; 253 unsigned long end; 254 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 255 256 end = jiffies + (HZ*3); 257 while (ring->space < n) { 258 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 259 ring->space = ring->head - (ring->tail+8); 260 if (ring->space < 0) ring->space += ring->Size; 261 262 if (ring->head != last_head) { 263 end = jiffies + (HZ*3); 264 last_head = ring->head; 265 } 266 267 iters++; 268 if (time_before(end, jiffies)) { 269 DRM_ERROR("space: %d wanted %d\n", ring->space, n); 270 DRM_ERROR("lockup\n"); 271 goto out_wait_ring; 272 } 273 udelay(1); 274 } 275 276out_wait_ring: 277 return iters; 278} 279 280static void i810_kernel_lost_context(drm_device_t *dev) 281{ 282 drm_i810_private_t *dev_priv = dev->dev_private; 283 drm_i810_ring_buffer_t *ring = &(dev_priv->ring); 284 285 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 286 ring->tail = I810_READ(LP_RING + RING_TAIL); 287 ring->space = ring->head - (ring->tail+8); 288 if (ring->space < 0) ring->space += ring->Size; 289} 290 291static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv) 292{ 293 drm_device_dma_t *dma = dev->dma; 294 int my_idx = 24; 295 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx); 296 int i; 297 298 if (dma->buf_count > 1019) { 299 /* Not enough space in the status page for the freelist */ 300 return -EINVAL; 301 } 302 303 for (i = 0; i < dma->buf_count; i++) { 304 drm_buf_t *buf = dma->buflist[ i ]; 305 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 306 307 buf_priv->in_use = hw_status++; 308 buf_priv->my_use_idx = my_idx; 309 my_idx += 4; 310 311 *buf_priv->in_use = I810_BUF_FREE; 312 313 buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, 314 buf->total, dev); 315 } 316 return 0; 317} 318 319static int i810_dma_initialize(drm_device_t *dev, 320 drm_i810_private_t *dev_priv, 321 drm_i810_init_t *init) 322{ 323 struct list_head *list; 324 325 memset(dev_priv, 0, sizeof(drm_i810_private_t)); 326 327 list_for_each(list, &dev->maplist->head) { 328 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); 329 if (r_list->map && 330 r_list->map->type == _DRM_SHM && 331 r_list->map->flags & _DRM_CONTAINS_LOCK ) { 332 dev_priv->sarea_map = r_list->map; 333 break; 334 } 335 } 336 if (!dev_priv->sarea_map) { 337 dev->dev_private = (void *)dev_priv; 338 i810_dma_cleanup(dev); 339 DRM_ERROR("can not find sarea!\n"); 340 return -EINVAL; 341 } 342 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 343 if (!dev_priv->mmio_map) { 344 dev->dev_private = (void *)dev_priv; 345 i810_dma_cleanup(dev); 346 DRM_ERROR("can not find mmio map!\n"); 347 return -EINVAL; 348 } 349 dev->agp_buffer_token = init->buffers_offset; 350 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 351 if (!dev->agp_buffer_map) { 352 dev->dev_private = (void *)dev_priv; 353 i810_dma_cleanup(dev); 354 DRM_ERROR("can not find dma buffer map!\n"); 355 return -EINVAL; 356 } 357 358 dev_priv->sarea_priv = (drm_i810_sarea_t *) 359 ((u8 *)dev_priv->sarea_map->handle + 360 init->sarea_priv_offset); 361 362 dev_priv->ring.Start = init->ring_start; 363 dev_priv->ring.End = init->ring_end; 364 dev_priv->ring.Size = init->ring_size; 365 366 dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + 367 init->ring_start, 368 init->ring_size, dev); 369 370 if (dev_priv->ring.virtual_start == NULL) { 371 dev->dev_private = (void *) dev_priv; 372 i810_dma_cleanup(dev); 373 DRM_ERROR("can not ioremap virtual address for" 374 " ring buffer\n"); 375 return -ENOMEM; 376 } 377 378 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 379 380 dev_priv->w = init->w; 381 dev_priv->h = init->h; 382 dev_priv->pitch = init->pitch; 383 dev_priv->back_offset = init->back_offset; 384 dev_priv->depth_offset = init->depth_offset; 385 dev_priv->front_offset = init->front_offset; 386 387 dev_priv->overlay_offset = init->overlay_offset; 388 dev_priv->overlay_physical = init->overlay_physical; 389 390 dev_priv->front_di1 = init->front_offset | init->pitch_bits; 391 dev_priv->back_di1 = init->back_offset | init->pitch_bits; 392 dev_priv->zi1 = init->depth_offset | init->pitch_bits; 393 394 /* Program Hardware Status Page */ 395 dev_priv->hw_status_page = 396 pci_alloc_consistent(dev->pdev, PAGE_SIZE, 397 &dev_priv->dma_status_page); 398 if (!dev_priv->hw_status_page) { 399 dev->dev_private = (void *)dev_priv; 400 i810_dma_cleanup(dev); 401 DRM_ERROR("Can not allocate hardware status page\n"); 402 return -ENOMEM; 403 } 404 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 405 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 406 407 I810_WRITE(0x02080, dev_priv->dma_status_page); 408 DRM_DEBUG("Enabled hardware status page\n"); 409 410 /* Now we need to init our freelist */ 411 if (i810_freelist_init(dev, dev_priv) != 0) { 412 dev->dev_private = (void *)dev_priv; 413 i810_dma_cleanup(dev); 414 DRM_ERROR("Not enough space in the status page for" 415 " the freelist\n"); 416 return -ENOMEM; 417 } 418 dev->dev_private = (void *)dev_priv; 419 420 return 0; 421} 422 423/* i810 DRM version 1.1 used a smaller init structure with different 424 * ordering of values than is currently used (drm >= 1.2). There is 425 * no defined way to detect the XFree version to correct this problem, 426 * however by checking using this procedure we can detect the correct 427 * thing to do. 428 * 429 * #1 Read the Smaller init structure from user-space 430 * #2 Verify the overlay_physical is a valid physical address, or NULL 431 * If it isn't then we have a v1.1 client. Fix up params. 432 * If it is, then we have a 1.2 client... get the rest of the data. 433 */ 434static int i810_dma_init_compat(drm_i810_init_t *init, unsigned long arg) 435{ 436 437 /* Get v1.1 init data */ 438 if (copy_from_user(init, (drm_i810_pre12_init_t __user *)arg, 439 sizeof(drm_i810_pre12_init_t))) { 440 return -EFAULT; 441 } 442 443 if ((!init->overlay_physical) || (init->overlay_physical > 4096)) { 444 445 /* This is a v1.2 client, just get the v1.2 init data */ 446 DRM_INFO("Using POST v1.2 init.\n"); 447 if (copy_from_user(init, (drm_i810_init_t __user *)arg, 448 sizeof(drm_i810_init_t))) { 449 return -EFAULT; 450 } 451 } else { 452 453 /* This is a v1.1 client, fix the params */ 454 DRM_INFO("Using PRE v1.2 init.\n"); 455 init->pitch_bits = init->h; 456 init->pitch = init->w; 457 init->h = init->overlay_physical; 458 init->w = init->overlay_offset; 459 init->overlay_physical = 0; 460 init->overlay_offset = 0; 461 } 462 463 return 0; 464} 465 466static int i810_dma_init(struct inode *inode, struct file *filp, 467 unsigned int cmd, unsigned long arg) 468{ 469 drm_file_t *priv = filp->private_data; 470 drm_device_t *dev = priv->head->dev; 471 drm_i810_private_t *dev_priv; 472 drm_i810_init_t init; 473 int retcode = 0; 474 475 /* Get only the init func */ 476 if (copy_from_user(&init, (void __user *)arg, sizeof(drm_i810_init_func_t))) 477 return -EFAULT; 478 479 switch(init.func) { 480 case I810_INIT_DMA: 481 /* This case is for backward compatibility. It 482 * handles XFree 4.1.0 and 4.2.0, and has to 483 * do some parameter checking as described below. 484 * It will someday go away. 485 */ 486 retcode = i810_dma_init_compat(&init, arg); 487 if (retcode) 488 return retcode; 489 490 dev_priv = drm_alloc(sizeof(drm_i810_private_t), 491 DRM_MEM_DRIVER); 492 if (dev_priv == NULL) 493 return -ENOMEM; 494 retcode = i810_dma_initialize(dev, dev_priv, &init); 495 break; 496 497 default: 498 case I810_INIT_DMA_1_4: 499 DRM_INFO("Using v1.4 init.\n"); 500 if (copy_from_user(&init, (drm_i810_init_t __user *)arg, 501 sizeof(drm_i810_init_t))) { 502 return -EFAULT; 503 } 504 dev_priv = drm_alloc(sizeof(drm_i810_private_t), 505 DRM_MEM_DRIVER); 506 if (dev_priv == NULL) 507 return -ENOMEM; 508 retcode = i810_dma_initialize(dev, dev_priv, &init); 509 break; 510 511 case I810_CLEANUP_DMA: 512 DRM_INFO("DMA Cleanup\n"); 513 retcode = i810_dma_cleanup(dev); 514 break; 515 } 516 517 return retcode; 518} 519 520 521 522/* Most efficient way to verify state for the i810 is as it is 523 * emitted. Non-conformant state is silently dropped. 524 * 525 * Use 'volatile' & local var tmp to force the emitted values to be 526 * identical to the verified ones. 527 */ 528static void i810EmitContextVerified( drm_device_t *dev, 529 volatile unsigned int *code ) 530{ 531 drm_i810_private_t *dev_priv = dev->dev_private; 532 int i, j = 0; 533 unsigned int tmp; 534 RING_LOCALS; 535 536 BEGIN_LP_RING( I810_CTX_SETUP_SIZE ); 537 538 OUT_RING( GFX_OP_COLOR_FACTOR ); 539 OUT_RING( code[I810_CTXREG_CF1] ); 540 541 OUT_RING( GFX_OP_STIPPLE ); 542 OUT_RING( code[I810_CTXREG_ST1] ); 543 544 for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) { 545 tmp = code[i]; 546 547 if ((tmp & (7<<29)) == (3<<29) && 548 (tmp & (0x1f<<24)) < (0x1d<<24)) 549 { 550 OUT_RING( tmp ); 551 j++; 552 } 553 else printk("constext state dropped!!!\n"); 554 } 555 556 if (j & 1) 557 OUT_RING( 0 ); 558 559 ADVANCE_LP_RING(); 560} 561 562static void i810EmitTexVerified( drm_device_t *dev, 563 volatile unsigned int *code ) 564{ 565 drm_i810_private_t *dev_priv = dev->dev_private; 566 int i, j = 0; 567 unsigned int tmp; 568 RING_LOCALS; 569 570 BEGIN_LP_RING( I810_TEX_SETUP_SIZE ); 571 572 OUT_RING( GFX_OP_MAP_INFO ); 573 OUT_RING( code[I810_TEXREG_MI1] ); 574 OUT_RING( code[I810_TEXREG_MI2] ); 575 OUT_RING( code[I810_TEXREG_MI3] ); 576 577 for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) { 578 tmp = code[i]; 579 580 if ((tmp & (7<<29)) == (3<<29) && 581 (tmp & (0x1f<<24)) < (0x1d<<24)) 582 { 583 OUT_RING( tmp ); 584 j++; 585 } 586 else printk("texture state dropped!!!\n"); 587 } 588 589 if (j & 1) 590 OUT_RING( 0 ); 591 592 ADVANCE_LP_RING(); 593} 594 595 596/* Need to do some additional checking when setting the dest buffer. 597 */ 598static void i810EmitDestVerified( drm_device_t *dev, 599 volatile unsigned int *code ) 600{ 601 drm_i810_private_t *dev_priv = dev->dev_private; 602 unsigned int tmp; 603 RING_LOCALS; 604 605 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 ); 606 607 tmp = code[I810_DESTREG_DI1]; 608 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { 609 OUT_RING( CMD_OP_DESTBUFFER_INFO ); 610 OUT_RING( tmp ); 611 } else 612 DRM_DEBUG("bad di1 %x (allow %x or %x)\n", 613 tmp, dev_priv->front_di1, dev_priv->back_di1); 614 615 /* invarient: 616 */ 617 OUT_RING( CMD_OP_Z_BUFFER_INFO ); 618 OUT_RING( dev_priv->zi1 ); 619 620 OUT_RING( GFX_OP_DESTBUFFER_VARS ); 621 OUT_RING( code[I810_DESTREG_DV1] ); 622 623 OUT_RING( GFX_OP_DRAWRECT_INFO ); 624 OUT_RING( code[I810_DESTREG_DR1] ); 625 OUT_RING( code[I810_DESTREG_DR2] ); 626 OUT_RING( code[I810_DESTREG_DR3] ); 627 OUT_RING( code[I810_DESTREG_DR4] ); 628 OUT_RING( 0 ); 629 630 ADVANCE_LP_RING(); 631} 632 633 634 635static void i810EmitState( drm_device_t *dev ) 636{ 637 drm_i810_private_t *dev_priv = dev->dev_private; 638 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; 639 unsigned int dirty = sarea_priv->dirty; 640 641 DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); 642 643 if (dirty & I810_UPLOAD_BUFFERS) { 644 i810EmitDestVerified( dev, sarea_priv->BufferState ); 645 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS; 646 } 647 648 if (dirty & I810_UPLOAD_CTX) { 649 i810EmitContextVerified( dev, sarea_priv->ContextState ); 650 sarea_priv->dirty &= ~I810_UPLOAD_CTX; 651 } 652 653 if (dirty & I810_UPLOAD_TEX0) { 654 i810EmitTexVerified( dev, sarea_priv->TexState[0] ); 655 sarea_priv->dirty &= ~I810_UPLOAD_TEX0; 656 } 657 658 if (dirty & I810_UPLOAD_TEX1) { 659 i810EmitTexVerified( dev, sarea_priv->TexState[1] ); 660 sarea_priv->dirty &= ~I810_UPLOAD_TEX1; 661 } 662} 663 664 665 666/* need to verify 667 */ 668static void i810_dma_dispatch_clear( drm_device_t *dev, int flags, 669 unsigned int clear_color, 670 unsigned int clear_zval ) 671{ 672 drm_i810_private_t *dev_priv = dev->dev_private; 673 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; 674 int nbox = sarea_priv->nbox; 675 drm_clip_rect_t *pbox = sarea_priv->boxes; 676 int pitch = dev_priv->pitch; 677 int cpp = 2; 678 int i; 679 RING_LOCALS; 680 681 if ( dev_priv->current_page == 1 ) { 682 unsigned int tmp = flags; 683 684 flags &= ~(I810_FRONT | I810_BACK); 685 if (tmp & I810_FRONT) flags |= I810_BACK; 686 if (tmp & I810_BACK) flags |= I810_FRONT; 687 } 688 689 i810_kernel_lost_context(dev); 690 691 if (nbox > I810_NR_SAREA_CLIPRECTS) 692 nbox = I810_NR_SAREA_CLIPRECTS; 693 694 for (i = 0 ; i < nbox ; i++, pbox++) { 695 unsigned int x = pbox->x1; 696 unsigned int y = pbox->y1; 697 unsigned int width = (pbox->x2 - x) * cpp; 698 unsigned int height = pbox->y2 - y; 699 unsigned int start = y * pitch + x * cpp; 700 701 if (pbox->x1 > pbox->x2 || 702 pbox->y1 > pbox->y2 || 703 pbox->x2 > dev_priv->w || 704 pbox->y2 > dev_priv->h) 705 continue; 706 707 if ( flags & I810_FRONT ) { 708 BEGIN_LP_RING( 6 ); 709 OUT_RING( BR00_BITBLT_CLIENT | 710 BR00_OP_COLOR_BLT | 0x3 ); 711 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); 712 OUT_RING( (height << 16) | width ); 713 OUT_RING( start ); 714 OUT_RING( clear_color ); 715 OUT_RING( 0 ); 716 ADVANCE_LP_RING(); 717 } 718 719 if ( flags & I810_BACK ) { 720 BEGIN_LP_RING( 6 ); 721 OUT_RING( BR00_BITBLT_CLIENT | 722 BR00_OP_COLOR_BLT | 0x3 ); 723 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); 724 OUT_RING( (height << 16) | width ); 725 OUT_RING( dev_priv->back_offset + start ); 726 OUT_RING( clear_color ); 727 OUT_RING( 0 ); 728 ADVANCE_LP_RING(); 729 } 730 731 if ( flags & I810_DEPTH ) { 732 BEGIN_LP_RING( 6 ); 733 OUT_RING( BR00_BITBLT_CLIENT | 734 BR00_OP_COLOR_BLT | 0x3 ); 735 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch ); 736 OUT_RING( (height << 16) | width ); 737 OUT_RING( dev_priv->depth_offset + start ); 738 OUT_RING( clear_zval ); 739 OUT_RING( 0 ); 740 ADVANCE_LP_RING(); 741 } 742 } 743} 744 745static void i810_dma_dispatch_swap( drm_device_t *dev ) 746{ 747 drm_i810_private_t *dev_priv = dev->dev_private; 748 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; 749 int nbox = sarea_priv->nbox; 750 drm_clip_rect_t *pbox = sarea_priv->boxes; 751 int pitch = dev_priv->pitch; 752 int cpp = 2; 753 int i; 754 RING_LOCALS; 755 756 DRM_DEBUG("swapbuffers\n"); 757 758 i810_kernel_lost_context(dev); 759 760 if (nbox > I810_NR_SAREA_CLIPRECTS) 761 nbox = I810_NR_SAREA_CLIPRECTS; 762 763 for (i = 0 ; i < nbox; i++, pbox++) 764 { 765 unsigned int w = pbox->x2 - pbox->x1; 766 unsigned int h = pbox->y2 - pbox->y1; 767 unsigned int dst = pbox->x1*cpp + pbox->y1*pitch; 768 unsigned int start = dst; 769 770 if (pbox->x1 > pbox->x2 || 771 pbox->y1 > pbox->y2 || 772 pbox->x2 > dev_priv->w || 773 pbox->y2 > dev_priv->h) 774 continue; 775 776 BEGIN_LP_RING( 6 ); 777 OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 ); 778 OUT_RING( pitch | (0xCC << 16)); 779 OUT_RING( (h << 16) | (w * cpp)); 780 if (dev_priv->current_page == 0) 781 OUT_RING(dev_priv->front_offset + start); 782 else 783 OUT_RING(dev_priv->back_offset + start); 784 OUT_RING( pitch ); 785 if (dev_priv->current_page == 0) 786 OUT_RING(dev_priv->back_offset + start); 787 else 788 OUT_RING(dev_priv->front_offset + start); 789 ADVANCE_LP_RING(); 790 } 791} 792 793 794static void i810_dma_dispatch_vertex(drm_device_t *dev, 795 drm_buf_t *buf, 796 int discard, 797 int used) 798{ 799 drm_i810_private_t *dev_priv = dev->dev_private; 800 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 801 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; 802 drm_clip_rect_t *box = sarea_priv->boxes; 803 int nbox = sarea_priv->nbox; 804 unsigned long address = (unsigned long)buf->bus_address; 805 unsigned long start = address - dev->agp->base; 806 int i = 0; 807 RING_LOCALS; 808 809 i810_kernel_lost_context(dev); 810 811 if (nbox > I810_NR_SAREA_CLIPRECTS) 812 nbox = I810_NR_SAREA_CLIPRECTS; 813 814 if (used > 4*1024) 815 used = 0; 816 817 if (sarea_priv->dirty) 818 i810EmitState( dev ); 819 820 if (buf_priv->currently_mapped == I810_BUF_MAPPED) { 821 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); 822 823 *(u32 *)buf_priv->kernel_virtual = ((GFX_OP_PRIMITIVE | prim | ((used/4)-2))); 824 825 if (used & 4) { 826 *(u32 *)((u32)buf_priv->kernel_virtual + used) = 0; 827 used += 4; 828 } 829 830 i810_unmap_buffer(buf); 831 } 832 833 if (used) { 834 do { 835 if (i < nbox) { 836 BEGIN_LP_RING(4); 837 OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR | 838 SC_ENABLE ); 839 OUT_RING( GFX_OP_SCISSOR_INFO ); 840 OUT_RING( box[i].x1 | (box[i].y1<<16) ); 841 OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) ); 842 ADVANCE_LP_RING(); 843 } 844 845 BEGIN_LP_RING(4); 846 OUT_RING( CMD_OP_BATCH_BUFFER ); 847 OUT_RING( start | BB1_PROTECTED ); 848 OUT_RING( start + used - 4 ); 849 OUT_RING( 0 ); 850 ADVANCE_LP_RING(); 851 852 } while (++i < nbox); 853 } 854 855 if (discard) { 856 dev_priv->counter++; 857 858 (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, 859 I810_BUF_HARDWARE); 860 861 BEGIN_LP_RING(8); 862 OUT_RING( CMD_STORE_DWORD_IDX ); 863 OUT_RING( 20 ); 864 OUT_RING( dev_priv->counter ); 865 OUT_RING( CMD_STORE_DWORD_IDX ); 866 OUT_RING( buf_priv->my_use_idx ); 867 OUT_RING( I810_BUF_FREE ); 868 OUT_RING( CMD_REPORT_HEAD ); 869 OUT_RING( 0 ); 870 ADVANCE_LP_RING(); 871 } 872} 873 874static void i810_dma_dispatch_flip( drm_device_t *dev ) 875{ 876 drm_i810_private_t *dev_priv = dev->dev_private; 877 int pitch = dev_priv->pitch; 878 RING_LOCALS; 879 880 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n", 881 __FUNCTION__, 882 dev_priv->current_page, 883 dev_priv->sarea_priv->pf_current_page); 884 885 i810_kernel_lost_context(dev); 886 887 BEGIN_LP_RING( 2 ); 888 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); 889 OUT_RING( 0 ); 890 ADVANCE_LP_RING(); 891 892 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 ); 893 /* On i815 at least ASYNC is buggy */ 894 /* pitch<<5 is from 11.2.8 p158, 895 its the pitch / 8 then left shifted 8, 896 so (pitch >> 3) << 8 */ 897 OUT_RING( CMD_OP_FRONTBUFFER_INFO | (pitch<<5) /*| ASYNC_FLIP */ ); 898 if ( dev_priv->current_page == 0 ) { 899 OUT_RING( dev_priv->back_offset ); 900 dev_priv->current_page = 1; 901 } else { 902 OUT_RING( dev_priv->front_offset ); 903 dev_priv->current_page = 0; 904 } 905 OUT_RING(0); 906 ADVANCE_LP_RING(); 907 908 BEGIN_LP_RING(2); 909 OUT_RING( CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP ); 910 OUT_RING( 0 ); 911 ADVANCE_LP_RING(); 912 913 /* Increment the frame counter. The client-side 3D driver must 914 * throttle the framerate by waiting for this value before 915 * performing the swapbuffer ioctl. 916 */ 917 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 918 919} 920 921static void i810_dma_quiescent(drm_device_t *dev) 922{ 923 drm_i810_private_t *dev_priv = dev->dev_private; 924 RING_LOCALS; 925 926/* printk("%s\n", __FUNCTION__); */ 927 928 i810_kernel_lost_context(dev); 929 930 BEGIN_LP_RING(4); 931 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); 932 OUT_RING( CMD_REPORT_HEAD ); 933 OUT_RING( 0 ); 934 OUT_RING( 0 ); 935 ADVANCE_LP_RING(); 936 937 i810_wait_ring( dev, dev_priv->ring.Size - 8 ); 938} 939 940static int i810_flush_queue(drm_device_t *dev) 941{ 942 drm_i810_private_t *dev_priv = dev->dev_private; 943 drm_device_dma_t *dma = dev->dma; 944 int i, ret = 0; 945 RING_LOCALS; 946 947/* printk("%s\n", __FUNCTION__); */ 948 949 i810_kernel_lost_context(dev); 950 951 BEGIN_LP_RING(2); 952 OUT_RING( CMD_REPORT_HEAD ); 953 OUT_RING( 0 ); 954 ADVANCE_LP_RING(); 955 956 i810_wait_ring( dev, dev_priv->ring.Size - 8 ); 957 958 for (i = 0; i < dma->buf_count; i++) { 959 drm_buf_t *buf = dma->buflist[ i ]; 960 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 961 962 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, 963 I810_BUF_FREE); 964 965 if (used == I810_BUF_HARDWARE) 966 DRM_DEBUG("reclaimed from HARDWARE\n"); 967 if (used == I810_BUF_CLIENT) 968 DRM_DEBUG("still on client\n"); 969 } 970 971 return ret; 972} 973 974/* Must be called with the lock held */ 975void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) 976{ 977 drm_device_dma_t *dma = dev->dma; 978 int i; 979 980 if (!dma) return; 981 if (!dev->dev_private) return; 982 if (!dma->buflist) return; 983 984 i810_flush_queue(dev); 985 986 for (i = 0; i < dma->buf_count; i++) { 987 drm_buf_t *buf = dma->buflist[ i ]; 988 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 989 990 if (buf->filp == filp && buf_priv) { 991 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, 992 I810_BUF_FREE); 993 994 if (used == I810_BUF_CLIENT) 995 DRM_DEBUG("reclaimed from client\n"); 996 if (buf_priv->currently_mapped == I810_BUF_MAPPED) 997 buf_priv->currently_mapped = I810_BUF_UNMAPPED; 998 } 999 } 1000} 1001 1002static int i810_flush_ioctl(struct inode *inode, struct file *filp, 1003 unsigned int cmd, unsigned long arg) 1004{ 1005 drm_file_t *priv = filp->private_data; 1006 drm_device_t *dev = priv->head->dev; 1007 1008 LOCK_TEST_WITH_RETURN(dev, filp); 1009 1010 i810_flush_queue(dev); 1011 return 0; 1012} 1013 1014 1015static int i810_dma_vertex(struct inode *inode, struct file *filp, 1016 unsigned int cmd, unsigned long arg) 1017{ 1018 drm_file_t *priv = filp->private_data; 1019 drm_device_t *dev = priv->head->dev; 1020 drm_device_dma_t *dma = dev->dma; 1021 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; 1022 u32 *hw_status = dev_priv->hw_status_page; 1023 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1024 dev_priv->sarea_priv; 1025 drm_i810_vertex_t vertex; 1026 1027 if (copy_from_user(&vertex, (drm_i810_vertex_t __user *)arg, sizeof(vertex))) 1028 return -EFAULT; 1029 1030 LOCK_TEST_WITH_RETURN(dev, filp); 1031 1032 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", 1033 vertex.idx, vertex.used, vertex.discard); 1034 1035 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 1036 return -EINVAL; 1037 1038 i810_dma_dispatch_vertex( dev, 1039 dma->buflist[ vertex.idx ], 1040 vertex.discard, vertex.used ); 1041 1042 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); 1043 atomic_inc(&dev->counts[_DRM_STAT_DMA]); 1044 sarea_priv->last_enqueue = dev_priv->counter-1; 1045 sarea_priv->last_dispatch = (int) hw_status[5]; 1046 1047 return 0; 1048} 1049 1050 1051 1052static int i810_clear_bufs(struct inode *inode, struct file *filp, 1053 unsigned int cmd, unsigned long arg) 1054{ 1055 drm_file_t *priv = filp->private_data; 1056 drm_device_t *dev = priv->head->dev; 1057 drm_i810_clear_t clear; 1058 1059 if (copy_from_user(&clear, (drm_i810_clear_t __user *)arg, sizeof(clear))) 1060 return -EFAULT; 1061 1062 LOCK_TEST_WITH_RETURN(dev, filp); 1063 1064 /* GH: Someone's doing nasty things... */ 1065 if (!dev->dev_private) { 1066 return -EINVAL; 1067 } 1068 1069 i810_dma_dispatch_clear( dev, clear.flags, 1070 clear.clear_color, 1071 clear.clear_depth ); 1072 return 0; 1073} 1074 1075static int i810_swap_bufs(struct inode *inode, struct file *filp, 1076 unsigned int cmd, unsigned long arg) 1077{ 1078 drm_file_t *priv = filp->private_data; 1079 drm_device_t *dev = priv->head->dev; 1080 1081 DRM_DEBUG("i810_swap_bufs\n"); 1082 1083 LOCK_TEST_WITH_RETURN(dev, filp); 1084 1085 i810_dma_dispatch_swap( dev ); 1086 return 0; 1087} 1088 1089static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, 1090 unsigned long arg) 1091{ 1092 drm_file_t *priv = filp->private_data; 1093 drm_device_t *dev = priv->head->dev; 1094 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; 1095 u32 *hw_status = dev_priv->hw_status_page; 1096 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1097 dev_priv->sarea_priv; 1098 1099 sarea_priv->last_dispatch = (int) hw_status[5]; 1100 return 0; 1101} 1102 1103static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, 1104 unsigned long arg) 1105{ 1106 drm_file_t *priv = filp->private_data; 1107 drm_device_t *dev = priv->head->dev; 1108 int retcode = 0; 1109 drm_i810_dma_t d; 1110 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; 1111 u32 *hw_status = dev_priv->hw_status_page; 1112 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1113 dev_priv->sarea_priv; 1114 1115 if (copy_from_user(&d, (drm_i810_dma_t __user *)arg, sizeof(d))) 1116 return -EFAULT; 1117 1118 LOCK_TEST_WITH_RETURN(dev, filp); 1119 1120 d.granted = 0; 1121 1122 retcode = i810_dma_get_buffer(dev, &d, filp); 1123 1124 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", 1125 current->pid, retcode, d.granted); 1126 1127 if (copy_to_user((drm_dma_t __user *)arg, &d, sizeof(d))) 1128 return -EFAULT; 1129 sarea_priv->last_dispatch = (int) hw_status[5]; 1130 1131 return retcode; 1132} 1133 1134static int i810_copybuf(struct inode *inode, 1135 struct file *filp, unsigned int cmd, unsigned long arg) 1136{ 1137 /* Never copy - 2.4.x doesn't need it */ 1138 return 0; 1139} 1140 1141static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, 1142 unsigned long arg) 1143{ 1144 /* Never copy - 2.4.x doesn't need it */ 1145 return 0; 1146} 1147 1148static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used, 1149 unsigned int last_render) 1150{ 1151 drm_i810_private_t *dev_priv = dev->dev_private; 1152 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 1153 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; 1154 unsigned long address = (unsigned long)buf->bus_address; 1155 unsigned long start = address - dev->agp->base; 1156 int u; 1157 RING_LOCALS; 1158 1159 i810_kernel_lost_context(dev); 1160 1161 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, 1162 I810_BUF_HARDWARE); 1163 if (u != I810_BUF_CLIENT) { 1164 DRM_DEBUG("MC found buffer that isn't mine!\n"); 1165 } 1166 1167 if (used > 4*1024) 1168 used = 0; 1169 1170 sarea_priv->dirty = 0x7f; 1171 1172 DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", 1173 address, used); 1174 1175 dev_priv->counter++; 1176 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); 1177 DRM_DEBUG("i810_dma_dispatch_mc\n"); 1178 DRM_DEBUG("start : %lx\n", start); 1179 DRM_DEBUG("used : %d\n", used); 1180 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); 1181 1182 if (buf_priv->currently_mapped == I810_BUF_MAPPED) { 1183 if (used & 4) { 1184 *(u32 *)((u32)buf_priv->virtual + used) = 0; 1185 used += 4; 1186 } 1187 1188 i810_unmap_buffer(buf); 1189 } 1190 BEGIN_LP_RING(4); 1191 OUT_RING( CMD_OP_BATCH_BUFFER ); 1192 OUT_RING( start | BB1_PROTECTED ); 1193 OUT_RING( start + used - 4 ); 1194 OUT_RING( 0 ); 1195 ADVANCE_LP_RING(); 1196 1197 1198 BEGIN_LP_RING(8); 1199 OUT_RING( CMD_STORE_DWORD_IDX ); 1200 OUT_RING( buf_priv->my_use_idx ); 1201 OUT_RING( I810_BUF_FREE ); 1202 OUT_RING( 0 ); 1203 1204 OUT_RING( CMD_STORE_DWORD_IDX ); 1205 OUT_RING( 16 ); 1206 OUT_RING( last_render ); 1207 OUT_RING( 0 ); 1208 ADVANCE_LP_RING(); 1209} 1210 1211static int i810_dma_mc(struct inode *inode, struct file *filp, 1212 unsigned int cmd, unsigned long arg) 1213{ 1214 drm_file_t *priv = filp->private_data; 1215 drm_device_t *dev = priv->head->dev; 1216 drm_device_dma_t *dma = dev->dma; 1217 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; 1218 u32 *hw_status = dev_priv->hw_status_page; 1219 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1220 dev_priv->sarea_priv; 1221 drm_i810_mc_t mc; 1222 1223 if (copy_from_user(&mc, (drm_i810_mc_t __user *)arg, sizeof(mc))) 1224 return -EFAULT; 1225 1226 LOCK_TEST_WITH_RETURN(dev, filp); 1227 1228 if (mc.idx >= dma->buf_count || mc.idx < 0) 1229 return -EINVAL; 1230 1231 i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, 1232 mc.last_render ); 1233 1234 atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); 1235 atomic_inc(&dev->counts[_DRM_STAT_DMA]); 1236 sarea_priv->last_enqueue = dev_priv->counter-1; 1237 sarea_priv->last_dispatch = (int) hw_status[5]; 1238 1239 return 0; 1240} 1241 1242static int i810_rstatus(struct inode *inode, struct file *filp, 1243 unsigned int cmd, unsigned long arg) 1244{ 1245 drm_file_t *priv = filp->private_data; 1246 drm_device_t *dev = priv->head->dev; 1247 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; 1248 1249 return (int)(((u32 *)(dev_priv->hw_status_page))[4]); 1250} 1251 1252static int i810_ov0_info(struct inode *inode, struct file *filp, 1253 unsigned int cmd, unsigned long arg) 1254{ 1255 drm_file_t *priv = filp->private_data; 1256 drm_device_t *dev = priv->head->dev; 1257 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; 1258 drm_i810_overlay_t data; 1259 1260 data.offset = dev_priv->overlay_offset; 1261 data.physical = dev_priv->overlay_physical; 1262 if (copy_to_user((drm_i810_overlay_t __user *)arg,&data,sizeof(data))) 1263 return -EFAULT; 1264 return 0; 1265} 1266 1267static int i810_fstatus(struct inode *inode, struct file *filp, 1268 unsigned int cmd, unsigned long arg) 1269{ 1270 drm_file_t *priv = filp->private_data; 1271 drm_device_t *dev = priv->head->dev; 1272 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; 1273 1274 LOCK_TEST_WITH_RETURN(dev, filp); 1275 1276 return I810_READ(0x30008); 1277} 1278 1279static int i810_ov0_flip(struct inode *inode, struct file *filp, 1280 unsigned int cmd, unsigned long arg) 1281{ 1282 drm_file_t *priv = filp->private_data; 1283 drm_device_t *dev = priv->head->dev; 1284 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; 1285 1286 LOCK_TEST_WITH_RETURN(dev, filp); 1287 1288 //Tell the overlay to update 1289 I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000); 1290 1291 return 0; 1292} 1293 1294 1295/* Not sure why this isn't set all the time: 1296 */ 1297static void i810_do_init_pageflip( drm_device_t *dev ) 1298{ 1299 drm_i810_private_t *dev_priv = dev->dev_private; 1300 1301 DRM_DEBUG("%s\n", __FUNCTION__); 1302 dev_priv->page_flipping = 1; 1303 dev_priv->current_page = 0; 1304 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 1305} 1306 1307static int i810_do_cleanup_pageflip( drm_device_t *dev ) 1308{ 1309 drm_i810_private_t *dev_priv = dev->dev_private; 1310 1311 DRM_DEBUG("%s\n", __FUNCTION__); 1312 if (dev_priv->current_page != 0) 1313 i810_dma_dispatch_flip( dev ); 1314 1315 dev_priv->page_flipping = 0; 1316 return 0; 1317} 1318 1319static int i810_flip_bufs(struct inode *inode, struct file *filp, 1320 unsigned int cmd, unsigned long arg) 1321{ 1322 drm_file_t *priv = filp->private_data; 1323 drm_device_t *dev = priv->head->dev; 1324 drm_i810_private_t *dev_priv = dev->dev_private; 1325 1326 DRM_DEBUG("%s\n", __FUNCTION__); 1327 1328 LOCK_TEST_WITH_RETURN(dev, filp); 1329 1330 if (!dev_priv->page_flipping) 1331 i810_do_init_pageflip( dev ); 1332 1333 i810_dma_dispatch_flip( dev ); 1334 return 0; 1335} 1336 1337void i810_driver_pretakedown(drm_device_t *dev) 1338{ 1339 i810_dma_cleanup( dev ); 1340} 1341 1342void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp) 1343{ 1344 if (dev->dev_private) { 1345 drm_i810_private_t *dev_priv = dev->dev_private; 1346 if (dev_priv->page_flipping) { 1347 i810_do_cleanup_pageflip(dev); 1348 } 1349 } 1350} 1351 1352void i810_driver_release(drm_device_t *dev, struct file *filp) 1353{ 1354 i810_reclaim_buffers(dev, filp); 1355} 1356 1357int i810_driver_dma_quiescent(drm_device_t *dev) 1358{ 1359 i810_dma_quiescent( dev ); 1360 return 0; 1361} 1362 1363drm_ioctl_desc_t i810_ioctls[] = { 1364 [DRM_IOCTL_NR(DRM_I810_INIT)] = { i810_dma_init, 1, 1 }, 1365 [DRM_IOCTL_NR(DRM_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, 1366 [DRM_IOCTL_NR(DRM_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, 1367 [DRM_IOCTL_NR(DRM_I810_FLUSH)] = { i810_flush_ioctl, 1, 0 }, 1368 [DRM_IOCTL_NR(DRM_I810_GETAGE)] = { i810_getage, 1, 0 }, 1369 [DRM_IOCTL_NR(DRM_I810_GETBUF)] = { i810_getbuf, 1, 0 }, 1370 [DRM_IOCTL_NR(DRM_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, 1371 [DRM_IOCTL_NR(DRM_I810_COPY)] = { i810_copybuf, 1, 0 }, 1372 [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = { i810_docopy, 1, 0 }, 1373 [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, 1374 [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, 1375 [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, 1376 [DRM_IOCTL_NR(DRM_I810_MC)] = { i810_dma_mc, 1, 1 }, 1377 [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = { i810_rstatus, 1, 0 }, 1378 [DRM_IOCTL_NR(DRM_I810_FLIP)] = { i810_flip_bufs, 1, 0 } 1379}; 1380 1381int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1382 1383/** 1384 * Determine if the device really is AGP or not. 1385 * 1386 * All Intel graphics chipsets are treated as AGP, even if they are really 1387 * PCI-e. 1388 * 1389 * \param dev The device to be tested. 1390 * 1391 * \returns 1392 * A value of 1 is always retured to indictate every i810 is AGP. 1393 */ 1394int i810_driver_device_is_agp(drm_device_t * dev) 1395{ 1396 return 1; 1397}