Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at c9a28fa7b9ac19b676deefa0a171ce7df8755c08 816 lines 22 kB view raw
1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro 2 * 3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sub license, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the 13 * next paragraph) shall be included in all copies or substantial portions 14 * of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Thomas Hellstrom. 26 * Partially based on code obtained from Digeo Inc. 27 */ 28 29 30/* 31 * Unmaps the DMA mappings. 32 * FIXME: Is this a NoOp on x86? Also 33 * FIXME: What happens if this one is called and a pending blit has previously done 34 * the same DMA mappings? 35 */ 36 37#include "drmP.h" 38#include "via_drm.h" 39#include "via_drv.h" 40#include "via_dmablit.h" 41 42#include <linux/pagemap.h> 43 44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) 47 48typedef struct _drm_via_descriptor { 49 uint32_t mem_addr; 50 uint32_t dev_addr; 51 uint32_t size; 52 uint32_t next; 53} drm_via_descriptor_t; 54 55 56/* 57 * Unmap a DMA mapping. 58 */ 59 60 61 62static void 63via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 64{ 65 int num_desc = vsg->num_desc; 66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; 67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; 68 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 69 descriptor_this_page; 70 dma_addr_t next = vsg->chain_start; 71 72 while(num_desc--) { 73 if (descriptor_this_page-- == 0) { 74 cur_descriptor_page--; 75 descriptor_this_page = vsg->descriptors_per_page - 1; 76 desc_ptr = vsg->desc_pages[cur_descriptor_page] + 77 descriptor_this_page; 78 } 79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); 80 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); 81 next = (dma_addr_t) desc_ptr->next; 82 desc_ptr--; 83 } 84} 85 86/* 87 * If mode = 0, count how many descriptors are needed. 88 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. 89 * Descriptors are run in reverse order by the hardware because we are not allowed to update the 90 * 'next' field without syncing calls when the descriptor is already mapped. 91 */ 92 93static void 94via_map_blit_for_device(struct pci_dev *pdev, 95 const drm_via_dmablit_t *xfer, 96 drm_via_sg_info_t *vsg, 97 int mode) 98{ 99 unsigned cur_descriptor_page = 0; 100 unsigned num_descriptors_this_page = 0; 101 unsigned char *mem_addr = xfer->mem_addr; 102 unsigned char *cur_mem; 103 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); 104 uint32_t fb_addr = xfer->fb_addr; 105 uint32_t cur_fb; 106 unsigned long line_len; 107 unsigned remaining_len; 108 int num_desc = 0; 109 int cur_line; 110 dma_addr_t next = 0 | VIA_DMA_DPR_EC; 111 drm_via_descriptor_t *desc_ptr = NULL; 112 113 if (mode == 1) 114 desc_ptr = vsg->desc_pages[cur_descriptor_page]; 115 116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { 117 118 line_len = xfer->line_length; 119 cur_fb = fb_addr; 120 cur_mem = mem_addr; 121 122 while (line_len > 0) { 123 124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 125 line_len -= remaining_len; 126 127 if (mode == 1) { 128 desc_ptr->mem_addr = 129 dma_map_page(&pdev->dev, 130 vsg->pages[VIA_PFN(cur_mem) - 131 VIA_PFN(first_addr)], 132 VIA_PGOFF(cur_mem), remaining_len, 133 vsg->direction); 134 desc_ptr->dev_addr = cur_fb; 135 136 desc_ptr->size = remaining_len; 137 desc_ptr->next = (uint32_t) next; 138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 139 DMA_TO_DEVICE); 140 desc_ptr++; 141 if (++num_descriptors_this_page >= vsg->descriptors_per_page) { 142 num_descriptors_this_page = 0; 143 desc_ptr = vsg->desc_pages[++cur_descriptor_page]; 144 } 145 } 146 147 num_desc++; 148 cur_mem += remaining_len; 149 cur_fb += remaining_len; 150 } 151 152 mem_addr += xfer->mem_stride; 153 fb_addr += xfer->fb_stride; 154 } 155 156 if (mode == 1) { 157 vsg->chain_start = next; 158 vsg->state = dr_via_device_mapped; 159 } 160 vsg->num_desc = num_desc; 161} 162 163/* 164 * Function that frees up all resources for a blit. It is usable even if the 165 * blit info has only been partially built as long as the status enum is consistent 166 * with the actual status of the used resources. 167 */ 168 169 170static void 171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 172{ 173 struct page *page; 174 int i; 175 176 switch(vsg->state) { 177 case dr_via_device_mapped: 178 via_unmap_blit_from_device(pdev, vsg); 179 case dr_via_desc_pages_alloc: 180 for (i=0; i<vsg->num_desc_pages; ++i) { 181 if (vsg->desc_pages[i] != NULL) 182 free_page((unsigned long)vsg->desc_pages[i]); 183 } 184 kfree(vsg->desc_pages); 185 case dr_via_pages_locked: 186 for (i=0; i<vsg->num_pages; ++i) { 187 if ( NULL != (page = vsg->pages[i])) { 188 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 189 SetPageDirty(page); 190 page_cache_release(page); 191 } 192 } 193 case dr_via_pages_alloc: 194 vfree(vsg->pages); 195 default: 196 vsg->state = dr_via_sg_init; 197 } 198 if (vsg->bounce_buffer) { 199 vfree(vsg->bounce_buffer); 200 vsg->bounce_buffer = NULL; 201 } 202 vsg->free_on_sequence = 0; 203} 204 205/* 206 * Fire a blit engine. 207 */ 208 209static void 210via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) 211{ 212 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 213 214 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); 215 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); 216 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 217 VIA_DMA_CSR_DE); 218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); 220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); 221 DRM_WRITEMEMORYBARRIER(); 222 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); 223 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04); 224} 225 226/* 227 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will 228 * occur here if the calling user does not have access to the submitted address. 229 */ 230 231static int 232via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 233{ 234 int ret; 235 unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 236 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 237 first_pfn + 1; 238 239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 240 return -ENOMEM; 241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); 242 down_read(&current->mm->mmap_sem); 243 ret = get_user_pages(current, current->mm, 244 (unsigned long)xfer->mem_addr, 245 vsg->num_pages, 246 (vsg->direction == DMA_FROM_DEVICE), 247 0, vsg->pages, NULL); 248 249 up_read(&current->mm->mmap_sem); 250 if (ret != vsg->num_pages) { 251 if (ret < 0) 252 return ret; 253 vsg->state = dr_via_pages_locked; 254 return -EINVAL; 255 } 256 vsg->state = dr_via_pages_locked; 257 DRM_DEBUG("DMA pages locked\n"); 258 return 0; 259} 260 261/* 262 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the 263 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be 264 * quite large for some blits, and pages don't need to be contingous. 265 */ 266 267static int 268via_alloc_desc_pages(drm_via_sg_info_t *vsg) 269{ 270 int i; 271 272 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); 273 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 274 vsg->descriptors_per_page; 275 276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 277 return -ENOMEM; 278 279 vsg->state = dr_via_desc_pages_alloc; 280 for (i=0; i<vsg->num_desc_pages; ++i) { 281 if (NULL == (vsg->desc_pages[i] = 282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 283 return -ENOMEM; 284 } 285 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 286 vsg->num_desc); 287 return 0; 288} 289 290static void 291via_abort_dmablit(struct drm_device *dev, int engine) 292{ 293 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 294 295 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); 296} 297 298static void 299via_dmablit_engine_off(struct drm_device *dev, int engine) 300{ 301 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 302 303 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 304} 305 306 307 308/* 309 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. 310 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue 311 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while 312 * the workqueue task takes care of processing associated with the old blit. 313 */ 314 315void 316via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) 317{ 318 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 319 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 320 int cur; 321 int done_transfer; 322 unsigned long irqsave=0; 323 uint32_t status = 0; 324 325 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", 326 engine, from_irq, (unsigned long) blitq); 327 328 if (from_irq) { 329 spin_lock(&blitq->blit_lock); 330 } else { 331 spin_lock_irqsave(&blitq->blit_lock, irqsave); 332 } 333 334 done_transfer = blitq->is_active && 335 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 336 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); 337 338 cur = blitq->cur; 339 if (done_transfer) { 340 341 blitq->blits[cur]->aborted = blitq->aborting; 342 blitq->done_blit_handle++; 343 DRM_WAKEUP(blitq->blit_queue + cur); 344 345 cur++; 346 if (cur >= VIA_NUM_BLIT_SLOTS) 347 cur = 0; 348 blitq->cur = cur; 349 350 /* 351 * Clear transfer done flag. 352 */ 353 354 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); 355 356 blitq->is_active = 0; 357 blitq->aborting = 0; 358 schedule_work(&blitq->wq); 359 360 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { 361 362 /* 363 * Abort transfer after one second. 364 */ 365 366 via_abort_dmablit(dev, engine); 367 blitq->aborting = 1; 368 blitq->end = jiffies + DRM_HZ; 369 } 370 371 if (!blitq->is_active) { 372 if (blitq->num_outstanding) { 373 via_fire_dmablit(dev, blitq->blits[cur], engine); 374 blitq->is_active = 1; 375 blitq->cur = cur; 376 blitq->num_outstanding--; 377 blitq->end = jiffies + DRM_HZ; 378 if (!timer_pending(&blitq->poll_timer)) 379 mod_timer(&blitq->poll_timer, jiffies + 1); 380 } else { 381 if (timer_pending(&blitq->poll_timer)) { 382 del_timer(&blitq->poll_timer); 383 } 384 via_dmablit_engine_off(dev, engine); 385 } 386 } 387 388 if (from_irq) { 389 spin_unlock(&blitq->blit_lock); 390 } else { 391 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 392 } 393} 394 395 396 397/* 398 * Check whether this blit is still active, performing necessary locking. 399 */ 400 401static int 402via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) 403{ 404 unsigned long irqsave; 405 uint32_t slot; 406 int active; 407 408 spin_lock_irqsave(&blitq->blit_lock, irqsave); 409 410 /* 411 * Allow for handle wraparounds. 412 */ 413 414 active = ((blitq->done_blit_handle - handle) > (1 << 23)) && 415 ((blitq->cur_blit_handle - handle) <= (1 << 23)); 416 417 if (queue && active) { 418 slot = handle - blitq->done_blit_handle + blitq->cur -1; 419 if (slot >= VIA_NUM_BLIT_SLOTS) { 420 slot -= VIA_NUM_BLIT_SLOTS; 421 } 422 *queue = blitq->blit_queue + slot; 423 } 424 425 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 426 427 return active; 428} 429 430/* 431 * Sync. Wait for at least three seconds for the blit to be performed. 432 */ 433 434static int 435via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) 436{ 437 438 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 439 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 440 wait_queue_head_t *queue; 441 int ret = 0; 442 443 if (via_dmablit_active(blitq, engine, handle, &queue)) { 444 DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, 445 !via_dmablit_active(blitq, engine, handle, NULL)); 446 } 447 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", 448 handle, engine, ret); 449 450 return ret; 451} 452 453 454/* 455 * A timer that regularly polls the blit engine in cases where we don't have interrupts: 456 * a) Broken hardware (typically those that don't have any video capture facility). 457 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. 458 * The timer and hardware IRQ's can and do work in parallel. If the hardware has 459 * irqs, it will shorten the latency somewhat. 460 */ 461 462 463 464static void 465via_dmablit_timer(unsigned long data) 466{ 467 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; 468 struct drm_device *dev = blitq->dev; 469 int engine = (int) 470 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); 471 472 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 473 (unsigned long) jiffies); 474 475 via_dmablit_handler(dev, engine, 0); 476 477 if (!timer_pending(&blitq->poll_timer)) { 478 mod_timer(&blitq->poll_timer, jiffies + 1); 479 480 /* 481 * Rerun handler to delete timer if engines are off, and 482 * to shorten abort latency. This is a little nasty. 483 */ 484 485 via_dmablit_handler(dev, engine, 0); 486 487 } 488} 489 490 491 492 493/* 494 * Workqueue task that frees data and mappings associated with a blit. 495 * Also wakes up waiting processes. Each of these tasks handles one 496 * blit engine only and may not be called on each interrupt. 497 */ 498 499 500static void 501via_dmablit_workqueue(struct work_struct *work) 502{ 503 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); 504 struct drm_device *dev = blitq->dev; 505 unsigned long irqsave; 506 drm_via_sg_info_t *cur_sg; 507 int cur_released; 508 509 510 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 511 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 512 513 spin_lock_irqsave(&blitq->blit_lock, irqsave); 514 515 while(blitq->serviced != blitq->cur) { 516 517 cur_released = blitq->serviced++; 518 519 DRM_DEBUG("Releasing blit slot %d\n", cur_released); 520 521 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 522 blitq->serviced = 0; 523 524 cur_sg = blitq->blits[cur_released]; 525 blitq->num_free++; 526 527 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 528 529 DRM_WAKEUP(&blitq->busy_queue); 530 531 via_free_sg_info(dev->pdev, cur_sg); 532 kfree(cur_sg); 533 534 spin_lock_irqsave(&blitq->blit_lock, irqsave); 535 } 536 537 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 538} 539 540 541/* 542 * Init all blit engines. Currently we use two, but some hardware have 4. 543 */ 544 545 546void 547via_init_dmablit(struct drm_device *dev) 548{ 549 int i,j; 550 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 551 drm_via_blitq_t *blitq; 552 553 pci_set_master(dev->pdev); 554 555 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { 556 blitq = dev_priv->blit_queues + i; 557 blitq->dev = dev; 558 blitq->cur_blit_handle = 0; 559 blitq->done_blit_handle = 0; 560 blitq->head = 0; 561 blitq->cur = 0; 562 blitq->serviced = 0; 563 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; 564 blitq->num_outstanding = 0; 565 blitq->is_active = 0; 566 blitq->aborting = 0; 567 spin_lock_init(&blitq->blit_lock); 568 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { 569 DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 570 } 571 DRM_INIT_WAITQUEUE(&blitq->busy_queue); 572 INIT_WORK(&blitq->wq, via_dmablit_workqueue); 573 setup_timer(&blitq->poll_timer, via_dmablit_timer, 574 (unsigned long)blitq); 575 } 576} 577 578/* 579 * Build all info and do all mappings required for a blit. 580 */ 581 582 583static int 584via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 585{ 586 int draw = xfer->to_fb; 587 int ret = 0; 588 589 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 590 vsg->bounce_buffer = NULL; 591 592 vsg->state = dr_via_sg_init; 593 594 if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 595 DRM_ERROR("Zero size bitblt.\n"); 596 return -EINVAL; 597 } 598 599 /* 600 * Below check is a driver limitation, not a hardware one. We 601 * don't want to lock unused pages, and don't want to incoporate the 602 * extra logic of avoiding them. Make sure there are no. 603 * (Not a big limitation anyway.) 604 */ 605 606 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { 607 DRM_ERROR("Too large system memory stride. Stride: %d, " 608 "Length: %d\n", xfer->mem_stride, xfer->line_length); 609 return -EINVAL; 610 } 611 612 if ((xfer->mem_stride == xfer->line_length) && 613 (xfer->fb_stride == xfer->line_length)) { 614 xfer->mem_stride *= xfer->num_lines; 615 xfer->line_length = xfer->mem_stride; 616 xfer->fb_stride = xfer->mem_stride; 617 xfer->num_lines = 1; 618 } 619 620 /* 621 * Don't lock an arbitrary large number of pages, since that causes a 622 * DOS security hole. 623 */ 624 625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 626 DRM_ERROR("Too large PCI DMA bitblt.\n"); 627 return -EINVAL; 628 } 629 630 /* 631 * we allow a negative fb stride to allow flipping of images in 632 * transfer. 633 */ 634 635 if (xfer->mem_stride < xfer->line_length || 636 abs(xfer->fb_stride) < xfer->line_length) { 637 DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 638 return -EINVAL; 639 } 640 641 /* 642 * A hardware bug seems to be worked around if system memory addresses start on 643 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted 644 * about this. Meanwhile, impose the following restrictions: 645 */ 646 647#ifdef VIA_BUGFREE 648 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 649 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { 650 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 651 return -EINVAL; 652 } 653#else 654 if ((((unsigned long)xfer->mem_addr & 15) || 655 ((unsigned long)xfer->fb_addr & 3)) || 656 ((xfer->num_lines > 1) && 657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 658 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 659 return -EINVAL; 660 } 661#endif 662 663 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { 664 DRM_ERROR("Could not lock DMA pages.\n"); 665 via_free_sg_info(dev->pdev, vsg); 666 return ret; 667 } 668 669 via_map_blit_for_device(dev->pdev, xfer, vsg, 0); 670 if (0 != (ret = via_alloc_desc_pages(vsg))) { 671 DRM_ERROR("Could not allocate DMA descriptor pages.\n"); 672 via_free_sg_info(dev->pdev, vsg); 673 return ret; 674 } 675 via_map_blit_for_device(dev->pdev, xfer, vsg, 1); 676 677 return 0; 678} 679 680 681/* 682 * Reserve one free slot in the blit queue. Will wait for one second for one 683 * to become available. Otherwise -EBUSY is returned. 684 */ 685 686static int 687via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 688{ 689 int ret=0; 690 unsigned long irqsave; 691 692 DRM_DEBUG("Num free is %d\n", blitq->num_free); 693 spin_lock_irqsave(&blitq->blit_lock, irqsave); 694 while(blitq->num_free == 0) { 695 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 696 697 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); 698 if (ret) { 699 return (-EINTR == ret) ? -EAGAIN : ret; 700 } 701 702 spin_lock_irqsave(&blitq->blit_lock, irqsave); 703 } 704 705 blitq->num_free--; 706 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 707 708 return 0; 709} 710 711/* 712 * Hand back a free slot if we changed our mind. 713 */ 714 715static void 716via_dmablit_release_slot(drm_via_blitq_t *blitq) 717{ 718 unsigned long irqsave; 719 720 spin_lock_irqsave(&blitq->blit_lock, irqsave); 721 blitq->num_free++; 722 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 723 DRM_WAKEUP( &blitq->busy_queue ); 724} 725 726/* 727 * Grab a free slot. Build blit info and queue a blit. 728 */ 729 730 731static int 732via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) 733{ 734 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 735 drm_via_sg_info_t *vsg; 736 drm_via_blitq_t *blitq; 737 int ret; 738 int engine; 739 unsigned long irqsave; 740 741 if (dev_priv == NULL) { 742 DRM_ERROR("Called without initialization.\n"); 743 return -EINVAL; 744 } 745 746 engine = (xfer->to_fb) ? 0 : 1; 747 blitq = dev_priv->blit_queues + engine; 748 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { 749 return ret; 750 } 751 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 752 via_dmablit_release_slot(blitq); 753 return -ENOMEM; 754 } 755 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 756 via_dmablit_release_slot(blitq); 757 kfree(vsg); 758 return ret; 759 } 760 spin_lock_irqsave(&blitq->blit_lock, irqsave); 761 762 blitq->blits[blitq->head++] = vsg; 763 if (blitq->head >= VIA_NUM_BLIT_SLOTS) 764 blitq->head = 0; 765 blitq->num_outstanding++; 766 xfer->sync.sync_handle = ++blitq->cur_blit_handle; 767 768 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 769 xfer->sync.engine = engine; 770 771 via_dmablit_handler(dev, engine, 0); 772 773 return 0; 774} 775 776/* 777 * Sync on a previously submitted blit. Note that the X server use signals extensively, and 778 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that 779 * case it returns with -EAGAIN for the signal to be delivered. 780 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 781 */ 782 783int 784via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) 785{ 786 drm_via_blitsync_t *sync = data; 787 int err; 788 789 if (sync->engine >= VIA_NUM_BLIT_ENGINES) 790 return -EINVAL; 791 792 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); 793 794 if (-EINTR == err) 795 err = -EAGAIN; 796 797 return err; 798} 799 800 801/* 802 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal 803 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 804 * be reissued. See the above IOCTL code. 805 */ 806 807int 808via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) 809{ 810 drm_via_dmablit_t *xfer = data; 811 int err; 812 813 err = via_dmablit(dev, xfer); 814 815 return err; 816}