Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.17 1597 lines 42 kB view raw
1/** 2 * \file drm_bufs.c 3 * Generic buffer template 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com> 7 */ 8 9/* 10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com 11 * 12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 14 * All Rights Reserved. 15 * 16 * Permission is hereby granted, free of charge, to any person obtaining a 17 * copy of this software and associated documentation files (the "Software"), 18 * to deal in the Software without restriction, including without limitation 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 * and/or sell copies of the Software, and to permit persons to whom the 21 * Software is furnished to do so, subject to the following conditions: 22 * 23 * The above copyright notice and this permission notice (including the next 24 * paragraph) shall be included in all copies or substantial portions of the 25 * Software. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33 * OTHER DEALINGS IN THE SOFTWARE. 34 */ 35 36#include <linux/vmalloc.h> 37#include "drmP.h" 38 39unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) 40{ 41 return pci_resource_start(dev->pdev, resource); 42} 43EXPORT_SYMBOL(drm_get_resource_start); 44 45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) 46{ 47 return pci_resource_len(dev->pdev, resource); 48} 49 50EXPORT_SYMBOL(drm_get_resource_len); 51 52static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, 53 drm_local_map_t *map) 54{ 55 struct list_head *list; 56 57 list_for_each(list, &dev->maplist->head) { 58 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head); 59 if (entry->map && map->type == entry->map->type && 60 entry->map->offset == map->offset) { 61 return entry; 62 } 63 } 64 65 return NULL; 66} 67 68/* 69 * Used to allocate 32-bit handles for mappings. 70 */ 71#define START_RANGE 0x10000000 72#define END_RANGE 0x40000000 73 74#ifdef _LP64 75static __inline__ unsigned int HandleID(unsigned long lhandle, 76 drm_device_t *dev) 77{ 78 static unsigned int map32_handle = START_RANGE; 79 unsigned int hash; 80 81 if (lhandle & 0xffffffff00000000) { 82 hash = map32_handle; 83 map32_handle += PAGE_SIZE; 84 if (map32_handle > END_RANGE) 85 map32_handle = START_RANGE; 86 } else 87 hash = lhandle; 88 89 while (1) { 90 drm_map_list_t *_entry; 91 list_for_each_entry(_entry, &dev->maplist->head, head) { 92 if (_entry->user_token == hash) 93 break; 94 } 95 if (&_entry->head == &dev->maplist->head) 96 return hash; 97 98 hash += PAGE_SIZE; 99 map32_handle += PAGE_SIZE; 100 } 101} 102#else 103# define HandleID(x,dev) (unsigned int)(x) 104#endif 105 106/** 107 * Ioctl to specify a range of memory that is available for mapping by a non-root process. 108 * 109 * \param inode device inode. 110 * \param filp file pointer. 111 * \param cmd command. 112 * \param arg pointer to a drm_map structure. 113 * \return zero on success or a negative value on error. 114 * 115 * Adjusts the memory offset to its absolute value according to the mapping 116 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 117 * applicable and if supported by the kernel. 118 */ 119static int drm_addmap_core(drm_device_t * dev, unsigned int offset, 120 unsigned int size, drm_map_type_t type, 121 drm_map_flags_t flags, drm_map_list_t ** maplist) 122{ 123 drm_map_t *map; 124 drm_map_list_t *list; 125 drm_dma_handle_t *dmah; 126 127 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); 128 if (!map) 129 return -ENOMEM; 130 131 map->offset = offset; 132 map->size = size; 133 map->flags = flags; 134 map->type = type; 135 136 /* Only allow shared memory to be removable since we only keep enough 137 * book keeping information about shared memory to allow for removal 138 * when processes fork. 139 */ 140 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 141 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 142 return -EINVAL; 143 } 144 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", 145 map->offset, map->size, map->type); 146 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 147 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 148 return -EINVAL; 149 } 150 map->mtrr = -1; 151 map->handle = NULL; 152 153 switch (map->type) { 154 case _DRM_REGISTERS: 155 case _DRM_FRAME_BUFFER: 156#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) 157 if (map->offset + (map->size-1) < map->offset || 158 map->offset < virt_to_phys(high_memory)) { 159 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 160 return -EINVAL; 161 } 162#endif 163#ifdef __alpha__ 164 map->offset += dev->hose->mem_space->start; 165#endif 166 /* Some drivers preinitialize some maps, without the X Server 167 * needing to be aware of it. Therefore, we just return success 168 * when the server tries to create a duplicate map. 169 */ 170 list = drm_find_matching_map(dev, map); 171 if (list != NULL) { 172 if (list->map->size != map->size) { 173 DRM_DEBUG("Matching maps of type %d with " 174 "mismatched sizes, (%ld vs %ld)\n", 175 map->type, map->size, 176 list->map->size); 177 list->map->size = map->size; 178 } 179 180 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 181 *maplist = list; 182 return 0; 183 } 184 185 if (drm_core_has_MTRR(dev)) { 186 if (map->type == _DRM_FRAME_BUFFER || 187 (map->flags & _DRM_WRITE_COMBINING)) { 188 map->mtrr = mtrr_add(map->offset, map->size, 189 MTRR_TYPE_WRCOMB, 1); 190 } 191 } 192 if (map->type == _DRM_REGISTERS) 193 map->handle = drm_ioremap(map->offset, map->size, dev); 194 break; 195 196 case _DRM_SHM: 197 map->handle = vmalloc_32(map->size); 198 DRM_DEBUG("%lu %d %p\n", 199 map->size, drm_order(map->size), map->handle); 200 if (!map->handle) { 201 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 202 return -ENOMEM; 203 } 204 map->offset = (unsigned long)map->handle; 205 if (map->flags & _DRM_CONTAINS_LOCK) { 206 /* Prevent a 2nd X Server from creating a 2nd lock */ 207 if (dev->lock.hw_lock != NULL) { 208 vfree(map->handle); 209 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 210 return -EBUSY; 211 } 212 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ 213 } 214 break; 215 case _DRM_AGP: 216 if (drm_core_has_AGP(dev)) { 217#ifdef __alpha__ 218 map->offset += dev->hose->mem_space->start; 219#endif 220 map->offset += dev->agp->base; 221 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 222 } 223 break; 224 case _DRM_SCATTER_GATHER: 225 if (!dev->sg) { 226 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 227 return -EINVAL; 228 } 229 map->offset += (unsigned long)dev->sg->virtual; 230 break; 231 case _DRM_CONSISTENT: 232 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 233 * As we're limiting the address to 2^32-1 (or less), 234 * casting it down to 32 bits is no problem, but we 235 * need to point to a 64bit variable first. */ 236 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); 237 if (!dmah) { 238 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 239 return -ENOMEM; 240 } 241 map->handle = dmah->vaddr; 242 map->offset = (unsigned long)dmah->busaddr; 243 kfree(dmah); 244 break; 245 default: 246 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 247 return -EINVAL; 248 } 249 250 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); 251 if (!list) { 252 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 253 return -EINVAL; 254 } 255 memset(list, 0, sizeof(*list)); 256 list->map = map; 257 258 mutex_lock(&dev->struct_mutex); 259 list_add(&list->head, &dev->maplist->head); 260 /* Assign a 32-bit handle */ 261 /* We do it here so that dev->struct_mutex protects the increment */ 262 list->user_token = HandleID(map->type == _DRM_SHM 263 ? (unsigned long)map->handle 264 : map->offset, dev); 265 mutex_unlock(&dev->struct_mutex); 266 267 *maplist = list; 268 return 0; 269} 270 271int drm_addmap(drm_device_t * dev, unsigned int offset, 272 unsigned int size, drm_map_type_t type, 273 drm_map_flags_t flags, drm_local_map_t ** map_ptr) 274{ 275 drm_map_list_t *list; 276 int rc; 277 278 rc = drm_addmap_core(dev, offset, size, type, flags, &list); 279 if (!rc) 280 *map_ptr = list->map; 281 return rc; 282} 283 284EXPORT_SYMBOL(drm_addmap); 285 286int drm_addmap_ioctl(struct inode *inode, struct file *filp, 287 unsigned int cmd, unsigned long arg) 288{ 289 drm_file_t *priv = filp->private_data; 290 drm_device_t *dev = priv->head->dev; 291 drm_map_t map; 292 drm_map_list_t *maplist; 293 drm_map_t __user *argp = (void __user *)arg; 294 int err; 295 296 if (!(filp->f_mode & 3)) 297 return -EACCES; /* Require read/write */ 298 299 if (copy_from_user(&map, argp, sizeof(map))) { 300 return -EFAULT; 301 } 302 303 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) 304 return -EPERM; 305 306 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 307 &maplist); 308 309 if (err) 310 return err; 311 312 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) 313 return -EFAULT; 314 315 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 316 if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) 317 return -EFAULT; 318 return 0; 319} 320 321/** 322 * Remove a map private from list and deallocate resources if the mapping 323 * isn't in use. 324 * 325 * \param inode device inode. 326 * \param filp file pointer. 327 * \param cmd command. 328 * \param arg pointer to a drm_map_t structure. 329 * \return zero on success or a negative value on error. 330 * 331 * Searches the map on drm_device::maplist, removes it from the list, see if 332 * its being used, and free any associate resource (such as MTRR's) if it's not 333 * being on use. 334 * 335 * \sa drm_addmap 336 */ 337int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) 338{ 339 struct list_head *list; 340 drm_map_list_t *r_list = NULL; 341 drm_dma_handle_t dmah; 342 343 /* Find the list entry for the map and remove it */ 344 list_for_each(list, &dev->maplist->head) { 345 r_list = list_entry(list, drm_map_list_t, head); 346 347 if (r_list->map == map) { 348 list_del(list); 349 drm_free(list, sizeof(*list), DRM_MEM_MAPS); 350 break; 351 } 352 } 353 354 /* List has wrapped around to the head pointer, or it's empty and we 355 * didn't find anything. 356 */ 357 if (list == (&dev->maplist->head)) { 358 return -EINVAL; 359 } 360 361 switch (map->type) { 362 case _DRM_REGISTERS: 363 drm_ioremapfree(map->handle, map->size, dev); 364 /* FALLTHROUGH */ 365 case _DRM_FRAME_BUFFER: 366 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { 367 int retcode; 368 retcode = mtrr_del(map->mtrr, map->offset, map->size); 369 DRM_DEBUG("mtrr_del=%d\n", retcode); 370 } 371 break; 372 case _DRM_SHM: 373 vfree(map->handle); 374 break; 375 case _DRM_AGP: 376 case _DRM_SCATTER_GATHER: 377 break; 378 case _DRM_CONSISTENT: 379 dmah.vaddr = map->handle; 380 dmah.busaddr = map->offset; 381 dmah.size = map->size; 382 __drm_pci_free(dev, &dmah); 383 break; 384 } 385 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 386 387 return 0; 388} 389 390int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) 391{ 392 int ret; 393 394 mutex_lock(&dev->struct_mutex); 395 ret = drm_rmmap_locked(dev, map); 396 mutex_unlock(&dev->struct_mutex); 397 398 return ret; 399} 400 401/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 402 * the last close of the device, and this is necessary for cleanup when things 403 * exit uncleanly. Therefore, having userland manually remove mappings seems 404 * like a pointless exercise since they're going away anyway. 405 * 406 * One use case might be after addmap is allowed for normal users for SHM and 407 * gets used by drivers that the server doesn't need to care about. This seems 408 * unlikely. 409 */ 410int drm_rmmap_ioctl(struct inode *inode, struct file *filp, 411 unsigned int cmd, unsigned long arg) 412{ 413 drm_file_t *priv = filp->private_data; 414 drm_device_t *dev = priv->head->dev; 415 drm_map_t request; 416 drm_local_map_t *map = NULL; 417 struct list_head *list; 418 int ret; 419 420 if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) { 421 return -EFAULT; 422 } 423 424 mutex_lock(&dev->struct_mutex); 425 list_for_each(list, &dev->maplist->head) { 426 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); 427 428 if (r_list->map && 429 r_list->user_token == (unsigned long)request.handle && 430 r_list->map->flags & _DRM_REMOVABLE) { 431 map = r_list->map; 432 break; 433 } 434 } 435 436 /* List has wrapped around to the head pointer, or its empty we didn't 437 * find anything. 438 */ 439 if (list == (&dev->maplist->head)) { 440 mutex_unlock(&dev->struct_mutex); 441 return -EINVAL; 442 } 443 444 if (!map) 445 return -EINVAL; 446 447 /* Register and framebuffer maps are permanent */ 448 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 449 mutex_unlock(&dev->struct_mutex); 450 return 0; 451 } 452 453 ret = drm_rmmap_locked(dev, map); 454 455 mutex_unlock(&dev->struct_mutex); 456 457 return ret; 458} 459 460/** 461 * Cleanup after an error on one of the addbufs() functions. 462 * 463 * \param dev DRM device. 464 * \param entry buffer entry where the error occurred. 465 * 466 * Frees any pages and buffers associated with the given entry. 467 */ 468static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) 469{ 470 int i; 471 472 if (entry->seg_count) { 473 for (i = 0; i < entry->seg_count; i++) { 474 if (entry->seglist[i]) { 475 drm_pci_free(dev, entry->seglist[i]); 476 } 477 } 478 drm_free(entry->seglist, 479 entry->seg_count * 480 sizeof(*entry->seglist), DRM_MEM_SEGS); 481 482 entry->seg_count = 0; 483 } 484 485 if (entry->buf_count) { 486 for (i = 0; i < entry->buf_count; i++) { 487 if (entry->buflist[i].dev_private) { 488 drm_free(entry->buflist[i].dev_private, 489 entry->buflist[i].dev_priv_size, 490 DRM_MEM_BUFS); 491 } 492 } 493 drm_free(entry->buflist, 494 entry->buf_count * 495 sizeof(*entry->buflist), DRM_MEM_BUFS); 496 497 entry->buf_count = 0; 498 } 499} 500 501#if __OS_HAS_AGP 502/** 503 * Add AGP buffers for DMA transfers. 504 * 505 * \param dev drm_device_t to which the buffers are to be added. 506 * \param request pointer to a drm_buf_desc_t describing the request. 507 * \return zero on success or a negative number on failure. 508 * 509 * After some sanity checks creates a drm_buf structure for each buffer and 510 * reallocates the buffer list of the same size order to accommodate the new 511 * buffers. 512 */ 513int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) 514{ 515 drm_device_dma_t *dma = dev->dma; 516 drm_buf_entry_t *entry; 517 drm_buf_t *buf; 518 unsigned long offset; 519 unsigned long agp_offset; 520 int count; 521 int order; 522 int size; 523 int alignment; 524 int page_order; 525 int total; 526 int byte_count; 527 int i; 528 drm_buf_t **temp_buflist; 529 530 if (!dma) 531 return -EINVAL; 532 533 count = request->count; 534 order = drm_order(request->size); 535 size = 1 << order; 536 537 alignment = (request->flags & _DRM_PAGE_ALIGN) 538 ? PAGE_ALIGN(size) : size; 539 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 540 total = PAGE_SIZE << page_order; 541 542 byte_count = 0; 543 agp_offset = dev->agp->base + request->agp_start; 544 545 DRM_DEBUG("count: %d\n", count); 546 DRM_DEBUG("order: %d\n", order); 547 DRM_DEBUG("size: %d\n", size); 548 DRM_DEBUG("agp_offset: %lx\n", agp_offset); 549 DRM_DEBUG("alignment: %d\n", alignment); 550 DRM_DEBUG("page_order: %d\n", page_order); 551 DRM_DEBUG("total: %d\n", total); 552 553 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 554 return -EINVAL; 555 if (dev->queue_count) 556 return -EBUSY; /* Not while in use */ 557 558 spin_lock(&dev->count_lock); 559 if (dev->buf_use) { 560 spin_unlock(&dev->count_lock); 561 return -EBUSY; 562 } 563 atomic_inc(&dev->buf_alloc); 564 spin_unlock(&dev->count_lock); 565 566 mutex_lock(&dev->struct_mutex); 567 entry = &dma->bufs[order]; 568 if (entry->buf_count) { 569 mutex_unlock(&dev->struct_mutex); 570 atomic_dec(&dev->buf_alloc); 571 return -ENOMEM; /* May only call once for each order */ 572 } 573 574 if (count < 0 || count > 4096) { 575 mutex_unlock(&dev->struct_mutex); 576 atomic_dec(&dev->buf_alloc); 577 return -EINVAL; 578 } 579 580 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 581 DRM_MEM_BUFS); 582 if (!entry->buflist) { 583 mutex_unlock(&dev->struct_mutex); 584 atomic_dec(&dev->buf_alloc); 585 return -ENOMEM; 586 } 587 memset(entry->buflist, 0, count * sizeof(*entry->buflist)); 588 589 entry->buf_size = size; 590 entry->page_order = page_order; 591 592 offset = 0; 593 594 while (entry->buf_count < count) { 595 buf = &entry->buflist[entry->buf_count]; 596 buf->idx = dma->buf_count + entry->buf_count; 597 buf->total = alignment; 598 buf->order = order; 599 buf->used = 0; 600 601 buf->offset = (dma->byte_count + offset); 602 buf->bus_address = agp_offset + offset; 603 buf->address = (void *)(agp_offset + offset); 604 buf->next = NULL; 605 buf->waiting = 0; 606 buf->pending = 0; 607 init_waitqueue_head(&buf->dma_wait); 608 buf->filp = NULL; 609 610 buf->dev_priv_size = dev->driver->dev_priv_size; 611 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 612 if (!buf->dev_private) { 613 /* Set count correctly so we free the proper amount. */ 614 entry->buf_count = count; 615 drm_cleanup_buf_error(dev, entry); 616 mutex_unlock(&dev->struct_mutex); 617 atomic_dec(&dev->buf_alloc); 618 return -ENOMEM; 619 } 620 memset(buf->dev_private, 0, buf->dev_priv_size); 621 622 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 623 624 offset += alignment; 625 entry->buf_count++; 626 byte_count += PAGE_SIZE << page_order; 627 } 628 629 DRM_DEBUG("byte_count: %d\n", byte_count); 630 631 temp_buflist = drm_realloc(dma->buflist, 632 dma->buf_count * sizeof(*dma->buflist), 633 (dma->buf_count + entry->buf_count) 634 * sizeof(*dma->buflist), DRM_MEM_BUFS); 635 if (!temp_buflist) { 636 /* Free the entry because it isn't valid */ 637 drm_cleanup_buf_error(dev, entry); 638 mutex_unlock(&dev->struct_mutex); 639 atomic_dec(&dev->buf_alloc); 640 return -ENOMEM; 641 } 642 dma->buflist = temp_buflist; 643 644 for (i = 0; i < entry->buf_count; i++) { 645 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 646 } 647 648 dma->buf_count += entry->buf_count; 649 dma->seg_count += entry->seg_count; 650 dma->page_count += byte_count >> PAGE_SHIFT; 651 dma->byte_count += byte_count; 652 653 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 654 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 655 656 mutex_unlock(&dev->struct_mutex); 657 658 request->count = entry->buf_count; 659 request->size = size; 660 661 dma->flags = _DRM_DMA_USE_AGP; 662 663 atomic_dec(&dev->buf_alloc); 664 return 0; 665} 666EXPORT_SYMBOL(drm_addbufs_agp); 667#endif /* __OS_HAS_AGP */ 668 669int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) 670{ 671 drm_device_dma_t *dma = dev->dma; 672 int count; 673 int order; 674 int size; 675 int total; 676 int page_order; 677 drm_buf_entry_t *entry; 678 drm_dma_handle_t *dmah; 679 drm_buf_t *buf; 680 int alignment; 681 unsigned long offset; 682 int i; 683 int byte_count; 684 int page_count; 685 unsigned long *temp_pagelist; 686 drm_buf_t **temp_buflist; 687 688 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 689 return -EINVAL; 690 691 if (!dma) 692 return -EINVAL; 693 694 if (!capable(CAP_SYS_ADMIN)) 695 return -EPERM; 696 697 count = request->count; 698 order = drm_order(request->size); 699 size = 1 << order; 700 701 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", 702 request->count, request->size, size, order, dev->queue_count); 703 704 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 705 return -EINVAL; 706 if (dev->queue_count) 707 return -EBUSY; /* Not while in use */ 708 709 alignment = (request->flags & _DRM_PAGE_ALIGN) 710 ? PAGE_ALIGN(size) : size; 711 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 712 total = PAGE_SIZE << page_order; 713 714 spin_lock(&dev->count_lock); 715 if (dev->buf_use) { 716 spin_unlock(&dev->count_lock); 717 return -EBUSY; 718 } 719 atomic_inc(&dev->buf_alloc); 720 spin_unlock(&dev->count_lock); 721 722 mutex_lock(&dev->struct_mutex); 723 entry = &dma->bufs[order]; 724 if (entry->buf_count) { 725 mutex_unlock(&dev->struct_mutex); 726 atomic_dec(&dev->buf_alloc); 727 return -ENOMEM; /* May only call once for each order */ 728 } 729 730 if (count < 0 || count > 4096) { 731 mutex_unlock(&dev->struct_mutex); 732 atomic_dec(&dev->buf_alloc); 733 return -EINVAL; 734 } 735 736 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 737 DRM_MEM_BUFS); 738 if (!entry->buflist) { 739 mutex_unlock(&dev->struct_mutex); 740 atomic_dec(&dev->buf_alloc); 741 return -ENOMEM; 742 } 743 memset(entry->buflist, 0, count * sizeof(*entry->buflist)); 744 745 entry->seglist = drm_alloc(count * sizeof(*entry->seglist), 746 DRM_MEM_SEGS); 747 if (!entry->seglist) { 748 drm_free(entry->buflist, 749 count * sizeof(*entry->buflist), DRM_MEM_BUFS); 750 mutex_unlock(&dev->struct_mutex); 751 atomic_dec(&dev->buf_alloc); 752 return -ENOMEM; 753 } 754 memset(entry->seglist, 0, count * sizeof(*entry->seglist)); 755 756 /* Keep the original pagelist until we know all the allocations 757 * have succeeded 758 */ 759 temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) 760 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 761 if (!temp_pagelist) { 762 drm_free(entry->buflist, 763 count * sizeof(*entry->buflist), DRM_MEM_BUFS); 764 drm_free(entry->seglist, 765 count * sizeof(*entry->seglist), DRM_MEM_SEGS); 766 mutex_unlock(&dev->struct_mutex); 767 atomic_dec(&dev->buf_alloc); 768 return -ENOMEM; 769 } 770 memcpy(temp_pagelist, 771 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); 772 DRM_DEBUG("pagelist: %d entries\n", 773 dma->page_count + (count << page_order)); 774 775 entry->buf_size = size; 776 entry->page_order = page_order; 777 byte_count = 0; 778 page_count = 0; 779 780 while (entry->buf_count < count) { 781 782 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); 783 784 if (!dmah) { 785 /* Set count correctly so we free the proper amount. */ 786 entry->buf_count = count; 787 entry->seg_count = count; 788 drm_cleanup_buf_error(dev, entry); 789 drm_free(temp_pagelist, 790 (dma->page_count + (count << page_order)) 791 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 792 mutex_unlock(&dev->struct_mutex); 793 atomic_dec(&dev->buf_alloc); 794 return -ENOMEM; 795 } 796 entry->seglist[entry->seg_count++] = dmah; 797 for (i = 0; i < (1 << page_order); i++) { 798 DRM_DEBUG("page %d @ 0x%08lx\n", 799 dma->page_count + page_count, 800 (unsigned long)dmah->vaddr + PAGE_SIZE * i); 801 temp_pagelist[dma->page_count + page_count++] 802 = (unsigned long)dmah->vaddr + PAGE_SIZE * i; 803 } 804 for (offset = 0; 805 offset + size <= total && entry->buf_count < count; 806 offset += alignment, ++entry->buf_count) { 807 buf = &entry->buflist[entry->buf_count]; 808 buf->idx = dma->buf_count + entry->buf_count; 809 buf->total = alignment; 810 buf->order = order; 811 buf->used = 0; 812 buf->offset = (dma->byte_count + byte_count + offset); 813 buf->address = (void *)(dmah->vaddr + offset); 814 buf->bus_address = dmah->busaddr + offset; 815 buf->next = NULL; 816 buf->waiting = 0; 817 buf->pending = 0; 818 init_waitqueue_head(&buf->dma_wait); 819 buf->filp = NULL; 820 821 buf->dev_priv_size = dev->driver->dev_priv_size; 822 buf->dev_private = drm_alloc(buf->dev_priv_size, 823 DRM_MEM_BUFS); 824 if (!buf->dev_private) { 825 /* Set count correctly so we free the proper amount. */ 826 entry->buf_count = count; 827 entry->seg_count = count; 828 drm_cleanup_buf_error(dev, entry); 829 drm_free(temp_pagelist, 830 (dma->page_count + 831 (count << page_order)) 832 * sizeof(*dma->pagelist), 833 DRM_MEM_PAGES); 834 mutex_unlock(&dev->struct_mutex); 835 atomic_dec(&dev->buf_alloc); 836 return -ENOMEM; 837 } 838 memset(buf->dev_private, 0, buf->dev_priv_size); 839 840 DRM_DEBUG("buffer %d @ %p\n", 841 entry->buf_count, buf->address); 842 } 843 byte_count += PAGE_SIZE << page_order; 844 } 845 846 temp_buflist = drm_realloc(dma->buflist, 847 dma->buf_count * sizeof(*dma->buflist), 848 (dma->buf_count + entry->buf_count) 849 * sizeof(*dma->buflist), DRM_MEM_BUFS); 850 if (!temp_buflist) { 851 /* Free the entry because it isn't valid */ 852 drm_cleanup_buf_error(dev, entry); 853 drm_free(temp_pagelist, 854 (dma->page_count + (count << page_order)) 855 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 856 mutex_unlock(&dev->struct_mutex); 857 atomic_dec(&dev->buf_alloc); 858 return -ENOMEM; 859 } 860 dma->buflist = temp_buflist; 861 862 for (i = 0; i < entry->buf_count; i++) { 863 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 864 } 865 866 /* No allocations failed, so now we can replace the orginal pagelist 867 * with the new one. 868 */ 869 if (dma->page_count) { 870 drm_free(dma->pagelist, 871 dma->page_count * sizeof(*dma->pagelist), 872 DRM_MEM_PAGES); 873 } 874 dma->pagelist = temp_pagelist; 875 876 dma->buf_count += entry->buf_count; 877 dma->seg_count += entry->seg_count; 878 dma->page_count += entry->seg_count << page_order; 879 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 880 881 mutex_unlock(&dev->struct_mutex); 882 883 request->count = entry->buf_count; 884 request->size = size; 885 886 atomic_dec(&dev->buf_alloc); 887 return 0; 888 889} 890EXPORT_SYMBOL(drm_addbufs_pci); 891 892static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) 893{ 894 drm_device_dma_t *dma = dev->dma; 895 drm_buf_entry_t *entry; 896 drm_buf_t *buf; 897 unsigned long offset; 898 unsigned long agp_offset; 899 int count; 900 int order; 901 int size; 902 int alignment; 903 int page_order; 904 int total; 905 int byte_count; 906 int i; 907 drm_buf_t **temp_buflist; 908 909 if (!drm_core_check_feature(dev, DRIVER_SG)) 910 return -EINVAL; 911 912 if (!dma) 913 return -EINVAL; 914 915 if (!capable(CAP_SYS_ADMIN)) 916 return -EPERM; 917 918 count = request->count; 919 order = drm_order(request->size); 920 size = 1 << order; 921 922 alignment = (request->flags & _DRM_PAGE_ALIGN) 923 ? PAGE_ALIGN(size) : size; 924 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 925 total = PAGE_SIZE << page_order; 926 927 byte_count = 0; 928 agp_offset = request->agp_start; 929 930 DRM_DEBUG("count: %d\n", count); 931 DRM_DEBUG("order: %d\n", order); 932 DRM_DEBUG("size: %d\n", size); 933 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 934 DRM_DEBUG("alignment: %d\n", alignment); 935 DRM_DEBUG("page_order: %d\n", page_order); 936 DRM_DEBUG("total: %d\n", total); 937 938 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 939 return -EINVAL; 940 if (dev->queue_count) 941 return -EBUSY; /* Not while in use */ 942 943 spin_lock(&dev->count_lock); 944 if (dev->buf_use) { 945 spin_unlock(&dev->count_lock); 946 return -EBUSY; 947 } 948 atomic_inc(&dev->buf_alloc); 949 spin_unlock(&dev->count_lock); 950 951 mutex_lock(&dev->struct_mutex); 952 entry = &dma->bufs[order]; 953 if (entry->buf_count) { 954 mutex_unlock(&dev->struct_mutex); 955 atomic_dec(&dev->buf_alloc); 956 return -ENOMEM; /* May only call once for each order */ 957 } 958 959 if (count < 0 || count > 4096) { 960 mutex_unlock(&dev->struct_mutex); 961 atomic_dec(&dev->buf_alloc); 962 return -EINVAL; 963 } 964 965 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 966 DRM_MEM_BUFS); 967 if (!entry->buflist) { 968 mutex_unlock(&dev->struct_mutex); 969 atomic_dec(&dev->buf_alloc); 970 return -ENOMEM; 971 } 972 memset(entry->buflist, 0, count * sizeof(*entry->buflist)); 973 974 entry->buf_size = size; 975 entry->page_order = page_order; 976 977 offset = 0; 978 979 while (entry->buf_count < count) { 980 buf = &entry->buflist[entry->buf_count]; 981 buf->idx = dma->buf_count + entry->buf_count; 982 buf->total = alignment; 983 buf->order = order; 984 buf->used = 0; 985 986 buf->offset = (dma->byte_count + offset); 987 buf->bus_address = agp_offset + offset; 988 buf->address = (void *)(agp_offset + offset 989 + (unsigned long)dev->sg->virtual); 990 buf->next = NULL; 991 buf->waiting = 0; 992 buf->pending = 0; 993 init_waitqueue_head(&buf->dma_wait); 994 buf->filp = NULL; 995 996 buf->dev_priv_size = dev->driver->dev_priv_size; 997 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 998 if (!buf->dev_private) { 999 /* Set count correctly so we free the proper amount. */ 1000 entry->buf_count = count; 1001 drm_cleanup_buf_error(dev, entry); 1002 mutex_unlock(&dev->struct_mutex); 1003 atomic_dec(&dev->buf_alloc); 1004 return -ENOMEM; 1005 } 1006 1007 memset(buf->dev_private, 0, buf->dev_priv_size); 1008 1009 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1010 1011 offset += alignment; 1012 entry->buf_count++; 1013 byte_count += PAGE_SIZE << page_order; 1014 } 1015 1016 DRM_DEBUG("byte_count: %d\n", byte_count); 1017 1018 temp_buflist = drm_realloc(dma->buflist, 1019 dma->buf_count * sizeof(*dma->buflist), 1020 (dma->buf_count + entry->buf_count) 1021 * sizeof(*dma->buflist), DRM_MEM_BUFS); 1022 if (!temp_buflist) { 1023 /* Free the entry because it isn't valid */ 1024 drm_cleanup_buf_error(dev, entry); 1025 mutex_unlock(&dev->struct_mutex); 1026 atomic_dec(&dev->buf_alloc); 1027 return -ENOMEM; 1028 } 1029 dma->buflist = temp_buflist; 1030 1031 for (i = 0; i < entry->buf_count; i++) { 1032 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1033 } 1034 1035 dma->buf_count += entry->buf_count; 1036 dma->seg_count += entry->seg_count; 1037 dma->page_count += byte_count >> PAGE_SHIFT; 1038 dma->byte_count += byte_count; 1039 1040 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1041 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1042 1043 mutex_unlock(&dev->struct_mutex); 1044 1045 request->count = entry->buf_count; 1046 request->size = size; 1047 1048 dma->flags = _DRM_DMA_USE_SG; 1049 1050 atomic_dec(&dev->buf_alloc); 1051 return 0; 1052} 1053 1054static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) 1055{ 1056 drm_device_dma_t *dma = dev->dma; 1057 drm_buf_entry_t *entry; 1058 drm_buf_t *buf; 1059 unsigned long offset; 1060 unsigned long agp_offset; 1061 int count; 1062 int order; 1063 int size; 1064 int alignment; 1065 int page_order; 1066 int total; 1067 int byte_count; 1068 int i; 1069 drm_buf_t **temp_buflist; 1070 1071 if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) 1072 return -EINVAL; 1073 1074 if (!dma) 1075 return -EINVAL; 1076 1077 if (!capable(CAP_SYS_ADMIN)) 1078 return -EPERM; 1079 1080 count = request->count; 1081 order = drm_order(request->size); 1082 size = 1 << order; 1083 1084 alignment = (request->flags & _DRM_PAGE_ALIGN) 1085 ? PAGE_ALIGN(size) : size; 1086 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1087 total = PAGE_SIZE << page_order; 1088 1089 byte_count = 0; 1090 agp_offset = request->agp_start; 1091 1092 DRM_DEBUG("count: %d\n", count); 1093 DRM_DEBUG("order: %d\n", order); 1094 DRM_DEBUG("size: %d\n", size); 1095 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1096 DRM_DEBUG("alignment: %d\n", alignment); 1097 DRM_DEBUG("page_order: %d\n", page_order); 1098 DRM_DEBUG("total: %d\n", total); 1099 1100 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1101 return -EINVAL; 1102 if (dev->queue_count) 1103 return -EBUSY; /* Not while in use */ 1104 1105 spin_lock(&dev->count_lock); 1106 if (dev->buf_use) { 1107 spin_unlock(&dev->count_lock); 1108 return -EBUSY; 1109 } 1110 atomic_inc(&dev->buf_alloc); 1111 spin_unlock(&dev->count_lock); 1112 1113 mutex_lock(&dev->struct_mutex); 1114 entry = &dma->bufs[order]; 1115 if (entry->buf_count) { 1116 mutex_unlock(&dev->struct_mutex); 1117 atomic_dec(&dev->buf_alloc); 1118 return -ENOMEM; /* May only call once for each order */ 1119 } 1120 1121 if (count < 0 || count > 4096) { 1122 mutex_unlock(&dev->struct_mutex); 1123 atomic_dec(&dev->buf_alloc); 1124 return -EINVAL; 1125 } 1126 1127 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1128 DRM_MEM_BUFS); 1129 if (!entry->buflist) { 1130 mutex_unlock(&dev->struct_mutex); 1131 atomic_dec(&dev->buf_alloc); 1132 return -ENOMEM; 1133 } 1134 memset(entry->buflist, 0, count * sizeof(*entry->buflist)); 1135 1136 entry->buf_size = size; 1137 entry->page_order = page_order; 1138 1139 offset = 0; 1140 1141 while (entry->buf_count < count) { 1142 buf = &entry->buflist[entry->buf_count]; 1143 buf->idx = dma->buf_count + entry->buf_count; 1144 buf->total = alignment; 1145 buf->order = order; 1146 buf->used = 0; 1147 1148 buf->offset = (dma->byte_count + offset); 1149 buf->bus_address = agp_offset + offset; 1150 buf->address = (void *)(agp_offset + offset); 1151 buf->next = NULL; 1152 buf->waiting = 0; 1153 buf->pending = 0; 1154 init_waitqueue_head(&buf->dma_wait); 1155 buf->filp = NULL; 1156 1157 buf->dev_priv_size = dev->driver->dev_priv_size; 1158 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1159 if (!buf->dev_private) { 1160 /* Set count correctly so we free the proper amount. */ 1161 entry->buf_count = count; 1162 drm_cleanup_buf_error(dev, entry); 1163 mutex_unlock(&dev->struct_mutex); 1164 atomic_dec(&dev->buf_alloc); 1165 return -ENOMEM; 1166 } 1167 memset(buf->dev_private, 0, buf->dev_priv_size); 1168 1169 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1170 1171 offset += alignment; 1172 entry->buf_count++; 1173 byte_count += PAGE_SIZE << page_order; 1174 } 1175 1176 DRM_DEBUG("byte_count: %d\n", byte_count); 1177 1178 temp_buflist = drm_realloc(dma->buflist, 1179 dma->buf_count * sizeof(*dma->buflist), 1180 (dma->buf_count + entry->buf_count) 1181 * sizeof(*dma->buflist), DRM_MEM_BUFS); 1182 if (!temp_buflist) { 1183 /* Free the entry because it isn't valid */ 1184 drm_cleanup_buf_error(dev, entry); 1185 mutex_unlock(&dev->struct_mutex); 1186 atomic_dec(&dev->buf_alloc); 1187 return -ENOMEM; 1188 } 1189 dma->buflist = temp_buflist; 1190 1191 for (i = 0; i < entry->buf_count; i++) { 1192 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1193 } 1194 1195 dma->buf_count += entry->buf_count; 1196 dma->seg_count += entry->seg_count; 1197 dma->page_count += byte_count >> PAGE_SHIFT; 1198 dma->byte_count += byte_count; 1199 1200 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1201 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1202 1203 mutex_unlock(&dev->struct_mutex); 1204 1205 request->count = entry->buf_count; 1206 request->size = size; 1207 1208 dma->flags = _DRM_DMA_USE_FB; 1209 1210 atomic_dec(&dev->buf_alloc); 1211 return 0; 1212} 1213 1214 1215/** 1216 * Add buffers for DMA transfers (ioctl). 1217 * 1218 * \param inode device inode. 1219 * \param filp file pointer. 1220 * \param cmd command. 1221 * \param arg pointer to a drm_buf_desc_t request. 1222 * \return zero on success or a negative number on failure. 1223 * 1224 * According with the memory type specified in drm_buf_desc::flags and the 1225 * build options, it dispatches the call either to addbufs_agp(), 1226 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1227 * PCI memory respectively. 1228 */ 1229int drm_addbufs(struct inode *inode, struct file *filp, 1230 unsigned int cmd, unsigned long arg) 1231{ 1232 drm_buf_desc_t request; 1233 drm_file_t *priv = filp->private_data; 1234 drm_device_t *dev = priv->head->dev; 1235 int ret; 1236 1237 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1238 return -EINVAL; 1239 1240 if (copy_from_user(&request, (drm_buf_desc_t __user *) arg, 1241 sizeof(request))) 1242 return -EFAULT; 1243 1244#if __OS_HAS_AGP 1245 if (request.flags & _DRM_AGP_BUFFER) 1246 ret = drm_addbufs_agp(dev, &request); 1247 else 1248#endif 1249 if (request.flags & _DRM_SG_BUFFER) 1250 ret = drm_addbufs_sg(dev, &request); 1251 else if (request.flags & _DRM_FB_BUFFER) 1252 ret = drm_addbufs_fb(dev, &request); 1253 else 1254 ret = drm_addbufs_pci(dev, &request); 1255 1256 if (ret == 0) { 1257 if (copy_to_user((void __user *)arg, &request, sizeof(request))) { 1258 ret = -EFAULT; 1259 } 1260 } 1261 return ret; 1262} 1263 1264/** 1265 * Get information about the buffer mappings. 1266 * 1267 * This was originally mean for debugging purposes, or by a sophisticated 1268 * client library to determine how best to use the available buffers (e.g., 1269 * large buffers can be used for image transfer). 1270 * 1271 * \param inode device inode. 1272 * \param filp file pointer. 1273 * \param cmd command. 1274 * \param arg pointer to a drm_buf_info structure. 1275 * \return zero on success or a negative number on failure. 1276 * 1277 * Increments drm_device::buf_use while holding the drm_device::count_lock 1278 * lock, preventing of allocating more buffers after this call. Information 1279 * about each requested buffer is then copied into user space. 1280 */ 1281int drm_infobufs(struct inode *inode, struct file *filp, 1282 unsigned int cmd, unsigned long arg) 1283{ 1284 drm_file_t *priv = filp->private_data; 1285 drm_device_t *dev = priv->head->dev; 1286 drm_device_dma_t *dma = dev->dma; 1287 drm_buf_info_t request; 1288 drm_buf_info_t __user *argp = (void __user *)arg; 1289 int i; 1290 int count; 1291 1292 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1293 return -EINVAL; 1294 1295 if (!dma) 1296 return -EINVAL; 1297 1298 spin_lock(&dev->count_lock); 1299 if (atomic_read(&dev->buf_alloc)) { 1300 spin_unlock(&dev->count_lock); 1301 return -EBUSY; 1302 } 1303 ++dev->buf_use; /* Can't allocate more after this call */ 1304 spin_unlock(&dev->count_lock); 1305 1306 if (copy_from_user(&request, argp, sizeof(request))) 1307 return -EFAULT; 1308 1309 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1310 if (dma->bufs[i].buf_count) 1311 ++count; 1312 } 1313 1314 DRM_DEBUG("count = %d\n", count); 1315 1316 if (request.count >= count) { 1317 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1318 if (dma->bufs[i].buf_count) { 1319 drm_buf_desc_t __user *to = 1320 &request.list[count]; 1321 drm_buf_entry_t *from = &dma->bufs[i]; 1322 drm_freelist_t *list = &dma->bufs[i].freelist; 1323 if (copy_to_user(&to->count, 1324 &from->buf_count, 1325 sizeof(from->buf_count)) || 1326 copy_to_user(&to->size, 1327 &from->buf_size, 1328 sizeof(from->buf_size)) || 1329 copy_to_user(&to->low_mark, 1330 &list->low_mark, 1331 sizeof(list->low_mark)) || 1332 copy_to_user(&to->high_mark, 1333 &list->high_mark, 1334 sizeof(list->high_mark))) 1335 return -EFAULT; 1336 1337 DRM_DEBUG("%d %d %d %d %d\n", 1338 i, 1339 dma->bufs[i].buf_count, 1340 dma->bufs[i].buf_size, 1341 dma->bufs[i].freelist.low_mark, 1342 dma->bufs[i].freelist.high_mark); 1343 ++count; 1344 } 1345 } 1346 } 1347 request.count = count; 1348 1349 if (copy_to_user(argp, &request, sizeof(request))) 1350 return -EFAULT; 1351 1352 return 0; 1353} 1354 1355/** 1356 * Specifies a low and high water mark for buffer allocation 1357 * 1358 * \param inode device inode. 1359 * \param filp file pointer. 1360 * \param cmd command. 1361 * \param arg a pointer to a drm_buf_desc structure. 1362 * \return zero on success or a negative number on failure. 1363 * 1364 * Verifies that the size order is bounded between the admissible orders and 1365 * updates the respective drm_device_dma::bufs entry low and high water mark. 1366 * 1367 * \note This ioctl is deprecated and mostly never used. 1368 */ 1369int drm_markbufs(struct inode *inode, struct file *filp, 1370 unsigned int cmd, unsigned long arg) 1371{ 1372 drm_file_t *priv = filp->private_data; 1373 drm_device_t *dev = priv->head->dev; 1374 drm_device_dma_t *dma = dev->dma; 1375 drm_buf_desc_t request; 1376 int order; 1377 drm_buf_entry_t *entry; 1378 1379 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1380 return -EINVAL; 1381 1382 if (!dma) 1383 return -EINVAL; 1384 1385 if (copy_from_user(&request, 1386 (drm_buf_desc_t __user *) arg, sizeof(request))) 1387 return -EFAULT; 1388 1389 DRM_DEBUG("%d, %d, %d\n", 1390 request.size, request.low_mark, request.high_mark); 1391 order = drm_order(request.size); 1392 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1393 return -EINVAL; 1394 entry = &dma->bufs[order]; 1395 1396 if (request.low_mark < 0 || request.low_mark > entry->buf_count) 1397 return -EINVAL; 1398 if (request.high_mark < 0 || request.high_mark > entry->buf_count) 1399 return -EINVAL; 1400 1401 entry->freelist.low_mark = request.low_mark; 1402 entry->freelist.high_mark = request.high_mark; 1403 1404 return 0; 1405} 1406 1407/** 1408 * Unreserve the buffers in list, previously reserved using drmDMA. 1409 * 1410 * \param inode device inode. 1411 * \param filp file pointer. 1412 * \param cmd command. 1413 * \param arg pointer to a drm_buf_free structure. 1414 * \return zero on success or a negative number on failure. 1415 * 1416 * Calls free_buffer() for each used buffer. 1417 * This function is primarily used for debugging. 1418 */ 1419int drm_freebufs(struct inode *inode, struct file *filp, 1420 unsigned int cmd, unsigned long arg) 1421{ 1422 drm_file_t *priv = filp->private_data; 1423 drm_device_t *dev = priv->head->dev; 1424 drm_device_dma_t *dma = dev->dma; 1425 drm_buf_free_t request; 1426 int i; 1427 int idx; 1428 drm_buf_t *buf; 1429 1430 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1431 return -EINVAL; 1432 1433 if (!dma) 1434 return -EINVAL; 1435 1436 if (copy_from_user(&request, 1437 (drm_buf_free_t __user *) arg, sizeof(request))) 1438 return -EFAULT; 1439 1440 DRM_DEBUG("%d\n", request.count); 1441 for (i = 0; i < request.count; i++) { 1442 if (copy_from_user(&idx, &request.list[i], sizeof(idx))) 1443 return -EFAULT; 1444 if (idx < 0 || idx >= dma->buf_count) { 1445 DRM_ERROR("Index %d (of %d max)\n", 1446 idx, dma->buf_count - 1); 1447 return -EINVAL; 1448 } 1449 buf = dma->buflist[idx]; 1450 if (buf->filp != filp) { 1451 DRM_ERROR("Process %d freeing buffer not owned\n", 1452 current->pid); 1453 return -EINVAL; 1454 } 1455 drm_free_buffer(dev, buf); 1456 } 1457 1458 return 0; 1459} 1460 1461/** 1462 * Maps all of the DMA buffers into client-virtual space (ioctl). 1463 * 1464 * \param inode device inode. 1465 * \param filp file pointer. 1466 * \param cmd command. 1467 * \param arg pointer to a drm_buf_map structure. 1468 * \return zero on success or a negative number on failure. 1469 * 1470 * Maps the AGP or SG buffer region with do_mmap(), and copies information 1471 * about each buffer into user space. The PCI buffers are already mapped on the 1472 * addbufs_pci() call. 1473 */ 1474int drm_mapbufs(struct inode *inode, struct file *filp, 1475 unsigned int cmd, unsigned long arg) 1476{ 1477 drm_file_t *priv = filp->private_data; 1478 drm_device_t *dev = priv->head->dev; 1479 drm_device_dma_t *dma = dev->dma; 1480 drm_buf_map_t __user *argp = (void __user *)arg; 1481 int retcode = 0; 1482 const int zero = 0; 1483 unsigned long virtual; 1484 unsigned long address; 1485 drm_buf_map_t request; 1486 int i; 1487 1488 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1489 return -EINVAL; 1490 1491 if (!dma) 1492 return -EINVAL; 1493 1494 spin_lock(&dev->count_lock); 1495 if (atomic_read(&dev->buf_alloc)) { 1496 spin_unlock(&dev->count_lock); 1497 return -EBUSY; 1498 } 1499 dev->buf_use++; /* Can't allocate more after this call */ 1500 spin_unlock(&dev->count_lock); 1501 1502 if (copy_from_user(&request, argp, sizeof(request))) 1503 return -EFAULT; 1504 1505 if (request.count >= dma->buf_count) { 1506 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1507 || (drm_core_check_feature(dev, DRIVER_SG) 1508 && (dma->flags & _DRM_DMA_USE_SG)) 1509 || (drm_core_check_feature(dev, DRIVER_FB_DMA) 1510 && (dma->flags & _DRM_DMA_USE_FB))) { 1511 drm_map_t *map = dev->agp_buffer_map; 1512 unsigned long token = dev->agp_buffer_token; 1513 1514 if (!map) { 1515 retcode = -EINVAL; 1516 goto done; 1517 } 1518 1519 down_write(&current->mm->mmap_sem); 1520 virtual = do_mmap(filp, 0, map->size, 1521 PROT_READ | PROT_WRITE, 1522 MAP_SHARED, token); 1523 up_write(&current->mm->mmap_sem); 1524 } else { 1525 down_write(&current->mm->mmap_sem); 1526 virtual = do_mmap(filp, 0, dma->byte_count, 1527 PROT_READ | PROT_WRITE, 1528 MAP_SHARED, 0); 1529 up_write(&current->mm->mmap_sem); 1530 } 1531 if (virtual > -1024UL) { 1532 /* Real error */ 1533 retcode = (signed long)virtual; 1534 goto done; 1535 } 1536 request.virtual = (void __user *)virtual; 1537 1538 for (i = 0; i < dma->buf_count; i++) { 1539 if (copy_to_user(&request.list[i].idx, 1540 &dma->buflist[i]->idx, 1541 sizeof(request.list[0].idx))) { 1542 retcode = -EFAULT; 1543 goto done; 1544 } 1545 if (copy_to_user(&request.list[i].total, 1546 &dma->buflist[i]->total, 1547 sizeof(request.list[0].total))) { 1548 retcode = -EFAULT; 1549 goto done; 1550 } 1551 if (copy_to_user(&request.list[i].used, 1552 &zero, sizeof(zero))) { 1553 retcode = -EFAULT; 1554 goto done; 1555 } 1556 address = virtual + dma->buflist[i]->offset; /* *** */ 1557 if (copy_to_user(&request.list[i].address, 1558 &address, sizeof(address))) { 1559 retcode = -EFAULT; 1560 goto done; 1561 } 1562 } 1563 } 1564 done: 1565 request.count = dma->buf_count; 1566 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); 1567 1568 if (copy_to_user(argp, &request, sizeof(request))) 1569 return -EFAULT; 1570 1571 return retcode; 1572} 1573 1574/** 1575 * Compute size order. Returns the exponent of the smaller power of two which 1576 * is greater or equal to given number. 1577 * 1578 * \param size size. 1579 * \return order. 1580 * 1581 * \todo Can be made faster. 1582 */ 1583int drm_order(unsigned long size) 1584{ 1585 int order; 1586 unsigned long tmp; 1587 1588 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; 1589 1590 if (size & (size - 1)) 1591 ++order; 1592 1593 return order; 1594} 1595EXPORT_SYMBOL(drm_order); 1596 1597