Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.36 912 lines 30 kB view raw
1/************************************************************************** 2 * 3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27/* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30#ifndef _TTM_BO_DRIVER_H_ 31#define _TTM_BO_DRIVER_H_ 32 33#include "ttm/ttm_bo_api.h" 34#include "ttm/ttm_memory.h" 35#include "ttm/ttm_module.h" 36#include "drm_mm.h" 37#include "drm_global.h" 38#include "linux/workqueue.h" 39#include "linux/fs.h" 40#include "linux/spinlock.h" 41 42struct ttm_backend; 43 44struct ttm_backend_func { 45 /** 46 * struct ttm_backend_func member populate 47 * 48 * @backend: Pointer to a struct ttm_backend. 49 * @num_pages: Number of pages to populate. 50 * @pages: Array of pointers to ttm pages. 51 * @dummy_read_page: Page to be used instead of NULL pages in the 52 * array @pages. 53 * 54 * Populate the backend with ttm pages. Depending on the backend, 55 * it may or may not copy the @pages array. 56 */ 57 int (*populate) (struct ttm_backend *backend, 58 unsigned long num_pages, struct page **pages, 59 struct page *dummy_read_page); 60 /** 61 * struct ttm_backend_func member clear 62 * 63 * @backend: Pointer to a struct ttm_backend. 64 * 65 * This is an "unpopulate" function. Release all resources 66 * allocated with populate. 67 */ 68 void (*clear) (struct ttm_backend *backend); 69 70 /** 71 * struct ttm_backend_func member bind 72 * 73 * @backend: Pointer to a struct ttm_backend. 74 * @bo_mem: Pointer to a struct ttm_mem_reg describing the 75 * memory type and location for binding. 76 * 77 * Bind the backend pages into the aperture in the location 78 * indicated by @bo_mem. This function should be able to handle 79 * differences between aperture- and system page sizes. 80 */ 81 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); 82 83 /** 84 * struct ttm_backend_func member unbind 85 * 86 * @backend: Pointer to a struct ttm_backend. 87 * 88 * Unbind previously bound backend pages. This function should be 89 * able to handle differences between aperture- and system page sizes. 90 */ 91 int (*unbind) (struct ttm_backend *backend); 92 93 /** 94 * struct ttm_backend_func member destroy 95 * 96 * @backend: Pointer to a struct ttm_backend. 97 * 98 * Destroy the backend. 99 */ 100 void (*destroy) (struct ttm_backend *backend); 101}; 102 103/** 104 * struct ttm_backend 105 * 106 * @bdev: Pointer to a struct ttm_bo_device. 107 * @flags: For driver use. 108 * @func: Pointer to a struct ttm_backend_func that describes 109 * the backend methods. 110 * 111 */ 112 113struct ttm_backend { 114 struct ttm_bo_device *bdev; 115 uint32_t flags; 116 struct ttm_backend_func *func; 117}; 118 119#define TTM_PAGE_FLAG_USER (1 << 1) 120#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) 121#define TTM_PAGE_FLAG_WRITE (1 << 3) 122#define TTM_PAGE_FLAG_SWAPPED (1 << 4) 123#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) 124#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) 125#define TTM_PAGE_FLAG_DMA32 (1 << 7) 126 127enum ttm_caching_state { 128 tt_uncached, 129 tt_wc, 130 tt_cached 131}; 132 133/** 134 * struct ttm_tt 135 * 136 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL 137 * pointer. 138 * @pages: Array of pages backing the data. 139 * @first_himem_page: Himem pages are put last in the page array, which 140 * enables us to run caching attribute changes on only the first part 141 * of the page array containing lomem pages. This is the index of the 142 * first himem page. 143 * @last_lomem_page: Index of the last lomem page in the page array. 144 * @num_pages: Number of pages in the page array. 145 * @bdev: Pointer to the current struct ttm_bo_device. 146 * @be: Pointer to the ttm backend. 147 * @tsk: The task for user ttm. 148 * @start: virtual address for user ttm. 149 * @swap_storage: Pointer to shmem struct file for swap storage. 150 * @caching_state: The current caching state of the pages. 151 * @state: The current binding state of the pages. 152 * 153 * This is a structure holding the pages, caching- and aperture binding 154 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 155 * memory. 156 */ 157 158struct ttm_tt { 159 struct page *dummy_read_page; 160 struct page **pages; 161 long first_himem_page; 162 long last_lomem_page; 163 uint32_t page_flags; 164 unsigned long num_pages; 165 struct ttm_bo_global *glob; 166 struct ttm_backend *be; 167 struct task_struct *tsk; 168 unsigned long start; 169 struct file *swap_storage; 170 enum ttm_caching_state caching_state; 171 enum { 172 tt_bound, 173 tt_unbound, 174 tt_unpopulated, 175 } state; 176}; 177 178#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 181 182/** 183 * struct ttm_mem_type_manager 184 * 185 * @has_type: The memory type has been initialized. 186 * @use_type: The memory type is enabled. 187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory 188 * managed by this memory type. 189 * @gpu_offset: If used, the GPU offset of the first managed page of 190 * fixed memory or the first managed location in an aperture. 191 * @size: Size of the managed region. 192 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, 193 * as defined in ttm_placement_common.h 194 * @default_caching: The default caching policy used for a buffer object 195 * placed in this memory type if the user doesn't provide one. 196 * @manager: The range manager used for this memory type. FIXME: If the aperture 197 * has a page size different from the underlying system, the granularity 198 * of this manager should take care of this. But the range allocating code 199 * in ttm_bo.c needs to be modified for this. 200 * @lru: The lru list for this memory type. 201 * 202 * This structure is used to identify and manage memory types for a device. 203 * It's set up by the ttm_bo_driver::init_mem_type method. 204 */ 205 206struct ttm_mem_type_manager { 207 208 /* 209 * No protection. Constant from start. 210 */ 211 212 bool has_type; 213 bool use_type; 214 uint32_t flags; 215 unsigned long gpu_offset; 216 uint64_t size; 217 uint32_t available_caching; 218 uint32_t default_caching; 219 220 /* 221 * Protected by the bdev->lru_lock. 222 * TODO: Consider one lru_lock per ttm_mem_type_manager. 223 * Plays ill with list removal, though. 224 */ 225 226 struct drm_mm manager; 227 struct list_head lru; 228}; 229 230/** 231 * struct ttm_bo_driver 232 * 233 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 234 * @invalidate_caches: Callback to invalidate read caches when a buffer object 235 * has been evicted. 236 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager 237 * structure. 238 * @evict_flags: Callback to obtain placement flags when a buffer is evicted. 239 * @move: Callback for a driver to hook in accelerated functions to 240 * move a buffer. 241 * If set to NULL, a potentially slow memcpy() move is used. 242 * @sync_obj_signaled: See ttm_fence_api.h 243 * @sync_obj_wait: See ttm_fence_api.h 244 * @sync_obj_flush: See ttm_fence_api.h 245 * @sync_obj_unref: See ttm_fence_api.h 246 * @sync_obj_ref: See ttm_fence_api.h 247 */ 248 249struct ttm_bo_driver { 250 /** 251 * struct ttm_bo_driver member create_ttm_backend_entry 252 * 253 * @bdev: The buffer object device. 254 * 255 * Create a driver specific struct ttm_backend. 256 */ 257 258 struct ttm_backend *(*create_ttm_backend_entry) 259 (struct ttm_bo_device *bdev); 260 261 /** 262 * struct ttm_bo_driver member invalidate_caches 263 * 264 * @bdev: the buffer object device. 265 * @flags: new placement of the rebound buffer object. 266 * 267 * A previosly evicted buffer has been rebound in a 268 * potentially new location. Tell the driver that it might 269 * consider invalidating read (texture) caches on the next command 270 * submission as a consequence. 271 */ 272 273 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); 274 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, 275 struct ttm_mem_type_manager *man); 276 /** 277 * struct ttm_bo_driver member evict_flags: 278 * 279 * @bo: the buffer object to be evicted 280 * 281 * Return the bo flags for a buffer which is not mapped to the hardware. 282 * These will be placed in proposed_flags so that when the move is 283 * finished, they'll end up in bo->mem.flags 284 */ 285 286 void(*evict_flags) (struct ttm_buffer_object *bo, 287 struct ttm_placement *placement); 288 /** 289 * struct ttm_bo_driver member move: 290 * 291 * @bo: the buffer to move 292 * @evict: whether this motion is evicting the buffer from 293 * the graphics address space 294 * @interruptible: Use interruptible sleeps if possible when sleeping. 295 * @no_wait: whether this should give up and return -EBUSY 296 * if this move would require sleeping 297 * @new_mem: the new memory region receiving the buffer 298 * 299 * Move a buffer between two memory regions. 300 */ 301 int (*move) (struct ttm_buffer_object *bo, 302 bool evict, bool interruptible, 303 bool no_wait_reserve, bool no_wait_gpu, 304 struct ttm_mem_reg *new_mem); 305 306 /** 307 * struct ttm_bo_driver_member verify_access 308 * 309 * @bo: Pointer to a buffer object. 310 * @filp: Pointer to a struct file trying to access the object. 311 * 312 * Called from the map / write / read methods to verify that the 313 * caller is permitted to access the buffer object. 314 * This member may be set to NULL, which will refuse this kind of 315 * access for all buffer objects. 316 * This function should return 0 if access is granted, -EPERM otherwise. 317 */ 318 int (*verify_access) (struct ttm_buffer_object *bo, 319 struct file *filp); 320 321 /** 322 * In case a driver writer dislikes the TTM fence objects, 323 * the driver writer can replace those with sync objects of 324 * his / her own. If it turns out that no driver writer is 325 * using these. I suggest we remove these hooks and plug in 326 * fences directly. The bo driver needs the following functionality: 327 * See the corresponding functions in the fence object API 328 * documentation. 329 */ 330 331 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); 332 int (*sync_obj_wait) (void *sync_obj, void *sync_arg, 333 bool lazy, bool interruptible); 334 int (*sync_obj_flush) (void *sync_obj, void *sync_arg); 335 void (*sync_obj_unref) (void **sync_obj); 336 void *(*sync_obj_ref) (void *sync_obj); 337 338 /* hook to notify driver about a driver move so it 339 * can do tiling things */ 340 void (*move_notify)(struct ttm_buffer_object *bo, 341 struct ttm_mem_reg *new_mem); 342 /* notify the driver we are taking a fault on this BO 343 * and have reserved it */ 344 int (*fault_reserve_notify)(struct ttm_buffer_object *bo); 345 346 /** 347 * notify the driver that we're about to swap out this bo 348 */ 349 void (*swap_notify) (struct ttm_buffer_object *bo); 350 351 /** 352 * Driver callback on when mapping io memory (for bo_move_memcpy 353 * for instance). TTM will take care to call io_mem_free whenever 354 * the mapping is not use anymore. io_mem_reserve & io_mem_free 355 * are balanced. 356 */ 357 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 358 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 359}; 360 361/** 362 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. 363 */ 364 365struct ttm_bo_global_ref { 366 struct drm_global_reference ref; 367 struct ttm_mem_global *mem_glob; 368}; 369 370/** 371 * struct ttm_bo_global - Buffer object driver global data. 372 * 373 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. 374 * @dummy_read_page: Pointer to a dummy page used for mapping requests 375 * of unpopulated pages. 376 * @shrink: A shrink callback object used for buffer object swap. 377 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) 378 * used by a buffer object. This is excluding page arrays and backing pages. 379 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). 380 * @device_list_mutex: Mutex protecting the device list. 381 * This mutex is held while traversing the device list for pm options. 382 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 383 * @device_list: List of buffer object devices. 384 * @swap_lru: Lru list of buffer objects used for swapping. 385 */ 386 387struct ttm_bo_global { 388 389 /** 390 * Constant after init. 391 */ 392 393 struct kobject kobj; 394 struct ttm_mem_global *mem_glob; 395 struct page *dummy_read_page; 396 struct ttm_mem_shrink shrink; 397 size_t ttm_bo_extra_size; 398 size_t ttm_bo_size; 399 struct mutex device_list_mutex; 400 spinlock_t lru_lock; 401 402 /** 403 * Protected by device_list_mutex. 404 */ 405 struct list_head device_list; 406 407 /** 408 * Protected by the lru_lock. 409 */ 410 struct list_head swap_lru; 411 412 /** 413 * Internal protection. 414 */ 415 atomic_t bo_count; 416}; 417 418 419#define TTM_NUM_MEM_TYPES 8 420 421#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs 422 idling before CPU mapping */ 423#define TTM_BO_PRIV_FLAG_MAX 1 424/** 425 * struct ttm_bo_device - Buffer object driver device-specific data. 426 * 427 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 428 * @man: An array of mem_type_managers. 429 * @addr_space_mm: Range manager for the device address space. 430 * lru_lock: Spinlock that protects the buffer+device lru lists and 431 * ddestroy lists. 432 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. 433 * If a GPU lockup has been detected, this is forced to 0. 434 * @dev_mapping: A pointer to the struct address_space representing the 435 * device address space. 436 * @wq: Work queue structure for the delayed delete workqueue. 437 * 438 */ 439 440struct ttm_bo_device { 441 442 /* 443 * Constant after bo device init / atomic. 444 */ 445 struct list_head device_list; 446 struct ttm_bo_global *glob; 447 struct ttm_bo_driver *driver; 448 rwlock_t vm_lock; 449 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 450 /* 451 * Protected by the vm lock. 452 */ 453 struct rb_root addr_space_rb; 454 struct drm_mm addr_space_mm; 455 456 /* 457 * Protected by the global:lru lock. 458 */ 459 struct list_head ddestroy; 460 461 /* 462 * Protected by load / firstopen / lastclose /unload sync. 463 */ 464 465 bool nice_mode; 466 struct address_space *dev_mapping; 467 468 /* 469 * Internal protection. 470 */ 471 472 struct delayed_work wq; 473 474 bool need_dma32; 475}; 476 477/** 478 * ttm_flag_masked 479 * 480 * @old: Pointer to the result and original value. 481 * @new: New value of bits. 482 * @mask: Mask of bits to change. 483 * 484 * Convenience function to change a number of bits identified by a mask. 485 */ 486 487static inline uint32_t 488ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) 489{ 490 *old ^= (*old ^ new) & mask; 491 return *old; 492} 493 494/** 495 * ttm_tt_create 496 * 497 * @bdev: pointer to a struct ttm_bo_device: 498 * @size: Size of the data needed backing. 499 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 500 * @dummy_read_page: See struct ttm_bo_device. 501 * 502 * Create a struct ttm_tt to back data with system memory pages. 503 * No pages are actually allocated. 504 * Returns: 505 * NULL: Out of memory. 506 */ 507extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, 508 unsigned long size, 509 uint32_t page_flags, 510 struct page *dummy_read_page); 511 512/** 513 * ttm_tt_set_user: 514 * 515 * @ttm: The struct ttm_tt to populate. 516 * @tsk: A struct task_struct for which @start is a valid user-space address. 517 * @start: A valid user-space address. 518 * @num_pages: Size in pages of the user memory area. 519 * 520 * Populate a struct ttm_tt with a user-space memory area after first pinning 521 * the pages backing it. 522 * Returns: 523 * !0: Error. 524 */ 525 526extern int ttm_tt_set_user(struct ttm_tt *ttm, 527 struct task_struct *tsk, 528 unsigned long start, unsigned long num_pages); 529 530/** 531 * ttm_ttm_bind: 532 * 533 * @ttm: The struct ttm_tt containing backing pages. 534 * @bo_mem: The struct ttm_mem_reg identifying the binding location. 535 * 536 * Bind the pages of @ttm to an aperture location identified by @bo_mem 537 */ 538extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 539 540/** 541 * ttm_tt_populate: 542 * 543 * @ttm: The struct ttm_tt to contain the backing pages. 544 * 545 * Add backing pages to all of @ttm 546 */ 547extern int ttm_tt_populate(struct ttm_tt *ttm); 548 549/** 550 * ttm_ttm_destroy: 551 * 552 * @ttm: The struct ttm_tt. 553 * 554 * Unbind, unpopulate and destroy a struct ttm_tt. 555 */ 556extern void ttm_tt_destroy(struct ttm_tt *ttm); 557 558/** 559 * ttm_ttm_unbind: 560 * 561 * @ttm: The struct ttm_tt. 562 * 563 * Unbind a struct ttm_tt. 564 */ 565extern void ttm_tt_unbind(struct ttm_tt *ttm); 566 567/** 568 * ttm_ttm_destroy: 569 * 570 * @ttm: The struct ttm_tt. 571 * @index: Index of the desired page. 572 * 573 * Return a pointer to the struct page backing @ttm at page 574 * index @index. If the page is unpopulated, one will be allocated to 575 * populate that index. 576 * 577 * Returns: 578 * NULL on OOM. 579 */ 580extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); 581 582/** 583 * ttm_tt_cache_flush: 584 * 585 * @pages: An array of pointers to struct page:s to flush. 586 * @num_pages: Number of pages to flush. 587 * 588 * Flush the data of the indicated pages from the cpu caches. 589 * This is used when changing caching attributes of the pages from 590 * cache-coherent. 591 */ 592extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); 593 594/** 595 * ttm_tt_set_placement_caching: 596 * 597 * @ttm A struct ttm_tt the backing pages of which will change caching policy. 598 * @placement: Flag indicating the desired caching policy. 599 * 600 * This function will change caching policy of any default kernel mappings of 601 * the pages backing @ttm. If changing from cached to uncached or 602 * write-combined, 603 * all CPU caches will first be flushed to make sure the data of the pages 604 * hit RAM. This function may be very costly as it involves global TLB 605 * and cache flushes and potential page splitting / combining. 606 */ 607extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); 608extern int ttm_tt_swapout(struct ttm_tt *ttm, 609 struct file *persistant_swap_storage); 610 611/* 612 * ttm_bo.c 613 */ 614 615/** 616 * ttm_mem_reg_is_pci 617 * 618 * @bdev: Pointer to a struct ttm_bo_device. 619 * @mem: A valid struct ttm_mem_reg. 620 * 621 * Returns true if the memory described by @mem is PCI memory, 622 * false otherwise. 623 */ 624extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, 625 struct ttm_mem_reg *mem); 626 627/** 628 * ttm_bo_mem_space 629 * 630 * @bo: Pointer to a struct ttm_buffer_object. the data of which 631 * we want to allocate space for. 632 * @proposed_placement: Proposed new placement for the buffer object. 633 * @mem: A struct ttm_mem_reg. 634 * @interruptible: Sleep interruptible when sliping. 635 * @no_wait_reserve: Return immediately if other buffers are busy. 636 * @no_wait_gpu: Return immediately if the GPU is busy. 637 * 638 * Allocate memory space for the buffer object pointed to by @bo, using 639 * the placement flags in @mem, potentially evicting other idle buffer objects. 640 * This function may sleep while waiting for space to become available. 641 * Returns: 642 * -EBUSY: No space available (only if no_wait == 1). 643 * -ENOMEM: Could not allocate memory for the buffer object, either due to 644 * fragmentation or concurrent allocators. 645 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 646 */ 647extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 648 struct ttm_placement *placement, 649 struct ttm_mem_reg *mem, 650 bool interruptible, 651 bool no_wait_reserve, bool no_wait_gpu); 652/** 653 * ttm_bo_wait_for_cpu 654 * 655 * @bo: Pointer to a struct ttm_buffer_object. 656 * @no_wait: Don't sleep while waiting. 657 * 658 * Wait until a buffer object is no longer sync'ed for CPU access. 659 * Returns: 660 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). 661 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 662 */ 663 664extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 665 666/** 667 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. 668 * 669 * @bo Pointer to a struct ttm_buffer_object. 670 * @bus_base On return the base of the PCI region 671 * @bus_offset On return the byte offset into the PCI region 672 * @bus_size On return the byte size of the buffer object or zero if 673 * the buffer object memory is not accessible through a PCI region. 674 * 675 * Returns: 676 * -EINVAL if the buffer object is currently not mappable. 677 * 0 otherwise. 678 */ 679 680extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, 681 struct ttm_mem_reg *mem, 682 unsigned long *bus_base, 683 unsigned long *bus_offset, 684 unsigned long *bus_size); 685 686extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 687 struct ttm_mem_reg *mem); 688extern void ttm_mem_io_free(struct ttm_bo_device *bdev, 689 struct ttm_mem_reg *mem); 690 691extern void ttm_bo_global_release(struct drm_global_reference *ref); 692extern int ttm_bo_global_init(struct drm_global_reference *ref); 693 694extern int ttm_bo_device_release(struct ttm_bo_device *bdev); 695 696/** 697 * ttm_bo_device_init 698 * 699 * @bdev: A pointer to a struct ttm_bo_device to initialize. 700 * @mem_global: A pointer to an initialized struct ttm_mem_global. 701 * @driver: A pointer to a struct ttm_bo_driver set up by the caller. 702 * @file_page_offset: Offset into the device address space that is available 703 * for buffer data. This ensures compatibility with other users of the 704 * address space. 705 * 706 * Initializes a struct ttm_bo_device: 707 * Returns: 708 * !0: Failure. 709 */ 710extern int ttm_bo_device_init(struct ttm_bo_device *bdev, 711 struct ttm_bo_global *glob, 712 struct ttm_bo_driver *driver, 713 uint64_t file_page_offset, bool need_dma32); 714 715/** 716 * ttm_bo_unmap_virtual 717 * 718 * @bo: tear down the virtual mappings for this BO 719 */ 720extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 721 722/** 723 * ttm_bo_reserve: 724 * 725 * @bo: A pointer to a struct ttm_buffer_object. 726 * @interruptible: Sleep interruptible if waiting. 727 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 728 * @use_sequence: If @bo is already reserved, Only sleep waiting for 729 * it to become unreserved if @sequence < (@bo)->sequence. 730 * 731 * Locks a buffer object for validation. (Or prevents other processes from 732 * locking it for validation) and removes it from lru lists, while taking 733 * a number of measures to prevent deadlocks. 734 * 735 * Deadlocks may occur when two processes try to reserve multiple buffers in 736 * different order, either by will or as a result of a buffer being evicted 737 * to make room for a buffer already reserved. (Buffers are reserved before 738 * they are evicted). The following algorithm prevents such deadlocks from 739 * occuring: 740 * 1) Buffers are reserved with the lru spinlock held. Upon successful 741 * reservation they are removed from the lru list. This stops a reserved buffer 742 * from being evicted. However the lru spinlock is released between the time 743 * a buffer is selected for eviction and the time it is reserved. 744 * Therefore a check is made when a buffer is reserved for eviction, that it 745 * is still the first buffer in the lru list, before it is removed from the 746 * list. @check_lru == 1 forces this check. If it fails, the function returns 747 * -EINVAL, and the caller should then choose a new buffer to evict and repeat 748 * the procedure. 749 * 2) Processes attempting to reserve multiple buffers other than for eviction, 750 * (typically execbuf), should first obtain a unique 32-bit 751 * validation sequence number, 752 * and call this function with @use_sequence == 1 and @sequence == the unique 753 * sequence number. If upon call of this function, the buffer object is already 754 * reserved, the validation sequence is checked against the validation 755 * sequence of the process currently reserving the buffer, 756 * and if the current validation sequence is greater than that of the process 757 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps 758 * waiting for the buffer to become unreserved, after which it retries 759 * reserving. 760 * The caller should, when receiving an -EAGAIN error 761 * release all its buffer reservations, wait for @bo to become unreserved, and 762 * then rerun the validation with the same validation sequence. This procedure 763 * will always guarantee that the process with the lowest validation sequence 764 * will eventually succeed, preventing both deadlocks and starvation. 765 * 766 * Returns: 767 * -EAGAIN: The reservation may cause a deadlock. 768 * Release all buffer reservations, wait for @bo to become unreserved and 769 * try again. (only if use_sequence == 1). 770 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 771 * a signal. Release all buffer reservations and return to user-space. 772 */ 773extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 774 bool interruptible, 775 bool no_wait, bool use_sequence, uint32_t sequence); 776 777/** 778 * ttm_bo_unreserve 779 * 780 * @bo: A pointer to a struct ttm_buffer_object. 781 * 782 * Unreserve a previous reservation of @bo. 783 */ 784extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); 785 786/** 787 * ttm_bo_wait_unreserved 788 * 789 * @bo: A pointer to a struct ttm_buffer_object. 790 * 791 * Wait for a struct ttm_buffer_object to become unreserved. 792 * This is typically used in the execbuf code to relax cpu-usage when 793 * a potential deadlock condition backoff. 794 */ 795extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, 796 bool interruptible); 797 798/* 799 * ttm_bo_util.c 800 */ 801 802/** 803 * ttm_bo_move_ttm 804 * 805 * @bo: A pointer to a struct ttm_buffer_object. 806 * @evict: 1: This is an eviction. Don't try to pipeline. 807 * @no_wait_reserve: Return immediately if other buffers are busy. 808 * @no_wait_gpu: Return immediately if the GPU is busy. 809 * @new_mem: struct ttm_mem_reg indicating where to move. 810 * 811 * Optimized move function for a buffer object with both old and 812 * new placement backed by a TTM. The function will, if successful, 813 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 814 * and update the (@bo)->mem placement flags. If unsuccessful, the old 815 * data remains untouched, and it's up to the caller to free the 816 * memory space indicated by @new_mem. 817 * Returns: 818 * !0: Failure. 819 */ 820 821extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 822 bool evict, bool no_wait_reserve, 823 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 824 825/** 826 * ttm_bo_move_memcpy 827 * 828 * @bo: A pointer to a struct ttm_buffer_object. 829 * @evict: 1: This is an eviction. Don't try to pipeline. 830 * @no_wait_reserve: Return immediately if other buffers are busy. 831 * @no_wait_gpu: Return immediately if the GPU is busy. 832 * @new_mem: struct ttm_mem_reg indicating where to move. 833 * 834 * Fallback move function for a mappable buffer object in mappable memory. 835 * The function will, if successful, 836 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 837 * and update the (@bo)->mem placement flags. If unsuccessful, the old 838 * data remains untouched, and it's up to the caller to free the 839 * memory space indicated by @new_mem. 840 * Returns: 841 * !0: Failure. 842 */ 843 844extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 845 bool evict, bool no_wait_reserve, 846 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 847 848/** 849 * ttm_bo_free_old_node 850 * 851 * @bo: A pointer to a struct ttm_buffer_object. 852 * 853 * Utility function to free an old placement after a successful move. 854 */ 855extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 856 857/** 858 * ttm_bo_move_accel_cleanup. 859 * 860 * @bo: A pointer to a struct ttm_buffer_object. 861 * @sync_obj: A sync object that signals when moving is complete. 862 * @sync_obj_arg: An argument to pass to the sync object idle / wait 863 * functions. 864 * @evict: This is an evict move. Don't return until the buffer is idle. 865 * @no_wait_reserve: Return immediately if other buffers are busy. 866 * @no_wait_gpu: Return immediately if the GPU is busy. 867 * @new_mem: struct ttm_mem_reg indicating where to move. 868 * 869 * Accelerated move function to be called when an accelerated move 870 * has been scheduled. The function will create a new temporary buffer object 871 * representing the old placement, and put the sync object on both buffer 872 * objects. After that the newly created buffer object is unref'd to be 873 * destroyed when the move is complete. This will help pipeline 874 * buffer moves. 875 */ 876 877extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 878 void *sync_obj, 879 void *sync_obj_arg, 880 bool evict, bool no_wait_reserve, 881 bool no_wait_gpu, 882 struct ttm_mem_reg *new_mem); 883/** 884 * ttm_io_prot 885 * 886 * @c_state: Caching state. 887 * @tmp: Page protection flag for a normal, cached mapping. 888 * 889 * Utility function that returns the pgprot_t that should be used for 890 * setting up a PTE with the caching model indicated by @c_state. 891 */ 892extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 893 894#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 895#define TTM_HAS_AGP 896#include <linux/agp_backend.h> 897 898/** 899 * ttm_agp_backend_init 900 * 901 * @bdev: Pointer to a struct ttm_bo_device. 902 * @bridge: The agp bridge this device is sitting on. 903 * 904 * Create a TTM backend that uses the indicated AGP bridge as an aperture 905 * for TT memory. This function uses the linux agpgart interface to 906 * bind and unbind memory backing a ttm_tt. 907 */ 908extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, 909 struct agp_bridge_data *bridge); 910#endif 911 912#endif