Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.35-rc5 911 lines 30 kB view raw
1/************************************************************************** 2 * 3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27/* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30#ifndef _TTM_BO_DRIVER_H_ 31#define _TTM_BO_DRIVER_H_ 32 33#include "ttm/ttm_bo_api.h" 34#include "ttm/ttm_memory.h" 35#include "ttm/ttm_module.h" 36#include "drm_mm.h" 37#include "linux/workqueue.h" 38#include "linux/fs.h" 39#include "linux/spinlock.h" 40 41struct ttm_backend; 42 43struct ttm_backend_func { 44 /** 45 * struct ttm_backend_func member populate 46 * 47 * @backend: Pointer to a struct ttm_backend. 48 * @num_pages: Number of pages to populate. 49 * @pages: Array of pointers to ttm pages. 50 * @dummy_read_page: Page to be used instead of NULL pages in the 51 * array @pages. 52 * 53 * Populate the backend with ttm pages. Depending on the backend, 54 * it may or may not copy the @pages array. 55 */ 56 int (*populate) (struct ttm_backend *backend, 57 unsigned long num_pages, struct page **pages, 58 struct page *dummy_read_page); 59 /** 60 * struct ttm_backend_func member clear 61 * 62 * @backend: Pointer to a struct ttm_backend. 63 * 64 * This is an "unpopulate" function. Release all resources 65 * allocated with populate. 66 */ 67 void (*clear) (struct ttm_backend *backend); 68 69 /** 70 * struct ttm_backend_func member bind 71 * 72 * @backend: Pointer to a struct ttm_backend. 73 * @bo_mem: Pointer to a struct ttm_mem_reg describing the 74 * memory type and location for binding. 75 * 76 * Bind the backend pages into the aperture in the location 77 * indicated by @bo_mem. This function should be able to handle 78 * differences between aperture- and system page sizes. 79 */ 80 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); 81 82 /** 83 * struct ttm_backend_func member unbind 84 * 85 * @backend: Pointer to a struct ttm_backend. 86 * 87 * Unbind previously bound backend pages. This function should be 88 * able to handle differences between aperture- and system page sizes. 89 */ 90 int (*unbind) (struct ttm_backend *backend); 91 92 /** 93 * struct ttm_backend_func member destroy 94 * 95 * @backend: Pointer to a struct ttm_backend. 96 * 97 * Destroy the backend. 98 */ 99 void (*destroy) (struct ttm_backend *backend); 100}; 101 102/** 103 * struct ttm_backend 104 * 105 * @bdev: Pointer to a struct ttm_bo_device. 106 * @flags: For driver use. 107 * @func: Pointer to a struct ttm_backend_func that describes 108 * the backend methods. 109 * 110 */ 111 112struct ttm_backend { 113 struct ttm_bo_device *bdev; 114 uint32_t flags; 115 struct ttm_backend_func *func; 116}; 117 118#define TTM_PAGE_FLAG_USER (1 << 1) 119#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) 120#define TTM_PAGE_FLAG_WRITE (1 << 3) 121#define TTM_PAGE_FLAG_SWAPPED (1 << 4) 122#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) 123#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) 124#define TTM_PAGE_FLAG_DMA32 (1 << 7) 125 126enum ttm_caching_state { 127 tt_uncached, 128 tt_wc, 129 tt_cached 130}; 131 132/** 133 * struct ttm_tt 134 * 135 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL 136 * pointer. 137 * @pages: Array of pages backing the data. 138 * @first_himem_page: Himem pages are put last in the page array, which 139 * enables us to run caching attribute changes on only the first part 140 * of the page array containing lomem pages. This is the index of the 141 * first himem page. 142 * @last_lomem_page: Index of the last lomem page in the page array. 143 * @num_pages: Number of pages in the page array. 144 * @bdev: Pointer to the current struct ttm_bo_device. 145 * @be: Pointer to the ttm backend. 146 * @tsk: The task for user ttm. 147 * @start: virtual address for user ttm. 148 * @swap_storage: Pointer to shmem struct file for swap storage. 149 * @caching_state: The current caching state of the pages. 150 * @state: The current binding state of the pages. 151 * 152 * This is a structure holding the pages, caching- and aperture binding 153 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 154 * memory. 155 */ 156 157struct ttm_tt { 158 struct page *dummy_read_page; 159 struct page **pages; 160 long first_himem_page; 161 long last_lomem_page; 162 uint32_t page_flags; 163 unsigned long num_pages; 164 struct ttm_bo_global *glob; 165 struct ttm_backend *be; 166 struct task_struct *tsk; 167 unsigned long start; 168 struct file *swap_storage; 169 enum ttm_caching_state caching_state; 170 enum { 171 tt_bound, 172 tt_unbound, 173 tt_unpopulated, 174 } state; 175}; 176 177#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 178#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 179#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 180 181/** 182 * struct ttm_mem_type_manager 183 * 184 * @has_type: The memory type has been initialized. 185 * @use_type: The memory type is enabled. 186 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory 187 * managed by this memory type. 188 * @gpu_offset: If used, the GPU offset of the first managed page of 189 * fixed memory or the first managed location in an aperture. 190 * @size: Size of the managed region. 191 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, 192 * as defined in ttm_placement_common.h 193 * @default_caching: The default caching policy used for a buffer object 194 * placed in this memory type if the user doesn't provide one. 195 * @manager: The range manager used for this memory type. FIXME: If the aperture 196 * has a page size different from the underlying system, the granularity 197 * of this manager should take care of this. But the range allocating code 198 * in ttm_bo.c needs to be modified for this. 199 * @lru: The lru list for this memory type. 200 * 201 * This structure is used to identify and manage memory types for a device. 202 * It's set up by the ttm_bo_driver::init_mem_type method. 203 */ 204 205struct ttm_mem_type_manager { 206 207 /* 208 * No protection. Constant from start. 209 */ 210 211 bool has_type; 212 bool use_type; 213 uint32_t flags; 214 unsigned long gpu_offset; 215 uint64_t size; 216 uint32_t available_caching; 217 uint32_t default_caching; 218 219 /* 220 * Protected by the bdev->lru_lock. 221 * TODO: Consider one lru_lock per ttm_mem_type_manager. 222 * Plays ill with list removal, though. 223 */ 224 225 struct drm_mm manager; 226 struct list_head lru; 227}; 228 229/** 230 * struct ttm_bo_driver 231 * 232 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 233 * @invalidate_caches: Callback to invalidate read caches when a buffer object 234 * has been evicted. 235 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager 236 * structure. 237 * @evict_flags: Callback to obtain placement flags when a buffer is evicted. 238 * @move: Callback for a driver to hook in accelerated functions to 239 * move a buffer. 240 * If set to NULL, a potentially slow memcpy() move is used. 241 * @sync_obj_signaled: See ttm_fence_api.h 242 * @sync_obj_wait: See ttm_fence_api.h 243 * @sync_obj_flush: See ttm_fence_api.h 244 * @sync_obj_unref: See ttm_fence_api.h 245 * @sync_obj_ref: See ttm_fence_api.h 246 */ 247 248struct ttm_bo_driver { 249 /** 250 * struct ttm_bo_driver member create_ttm_backend_entry 251 * 252 * @bdev: The buffer object device. 253 * 254 * Create a driver specific struct ttm_backend. 255 */ 256 257 struct ttm_backend *(*create_ttm_backend_entry) 258 (struct ttm_bo_device *bdev); 259 260 /** 261 * struct ttm_bo_driver member invalidate_caches 262 * 263 * @bdev: the buffer object device. 264 * @flags: new placement of the rebound buffer object. 265 * 266 * A previosly evicted buffer has been rebound in a 267 * potentially new location. Tell the driver that it might 268 * consider invalidating read (texture) caches on the next command 269 * submission as a consequence. 270 */ 271 272 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); 273 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, 274 struct ttm_mem_type_manager *man); 275 /** 276 * struct ttm_bo_driver member evict_flags: 277 * 278 * @bo: the buffer object to be evicted 279 * 280 * Return the bo flags for a buffer which is not mapped to the hardware. 281 * These will be placed in proposed_flags so that when the move is 282 * finished, they'll end up in bo->mem.flags 283 */ 284 285 void(*evict_flags) (struct ttm_buffer_object *bo, 286 struct ttm_placement *placement); 287 /** 288 * struct ttm_bo_driver member move: 289 * 290 * @bo: the buffer to move 291 * @evict: whether this motion is evicting the buffer from 292 * the graphics address space 293 * @interruptible: Use interruptible sleeps if possible when sleeping. 294 * @no_wait: whether this should give up and return -EBUSY 295 * if this move would require sleeping 296 * @new_mem: the new memory region receiving the buffer 297 * 298 * Move a buffer between two memory regions. 299 */ 300 int (*move) (struct ttm_buffer_object *bo, 301 bool evict, bool interruptible, 302 bool no_wait_reserve, bool no_wait_gpu, 303 struct ttm_mem_reg *new_mem); 304 305 /** 306 * struct ttm_bo_driver_member verify_access 307 * 308 * @bo: Pointer to a buffer object. 309 * @filp: Pointer to a struct file trying to access the object. 310 * 311 * Called from the map / write / read methods to verify that the 312 * caller is permitted to access the buffer object. 313 * This member may be set to NULL, which will refuse this kind of 314 * access for all buffer objects. 315 * This function should return 0 if access is granted, -EPERM otherwise. 316 */ 317 int (*verify_access) (struct ttm_buffer_object *bo, 318 struct file *filp); 319 320 /** 321 * In case a driver writer dislikes the TTM fence objects, 322 * the driver writer can replace those with sync objects of 323 * his / her own. If it turns out that no driver writer is 324 * using these. I suggest we remove these hooks and plug in 325 * fences directly. The bo driver needs the following functionality: 326 * See the corresponding functions in the fence object API 327 * documentation. 328 */ 329 330 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); 331 int (*sync_obj_wait) (void *sync_obj, void *sync_arg, 332 bool lazy, bool interruptible); 333 int (*sync_obj_flush) (void *sync_obj, void *sync_arg); 334 void (*sync_obj_unref) (void **sync_obj); 335 void *(*sync_obj_ref) (void *sync_obj); 336 337 /* hook to notify driver about a driver move so it 338 * can do tiling things */ 339 void (*move_notify)(struct ttm_buffer_object *bo, 340 struct ttm_mem_reg *new_mem); 341 /* notify the driver we are taking a fault on this BO 342 * and have reserved it */ 343 int (*fault_reserve_notify)(struct ttm_buffer_object *bo); 344 345 /** 346 * notify the driver that we're about to swap out this bo 347 */ 348 void (*swap_notify) (struct ttm_buffer_object *bo); 349 350 /** 351 * Driver callback on when mapping io memory (for bo_move_memcpy 352 * for instance). TTM will take care to call io_mem_free whenever 353 * the mapping is not use anymore. io_mem_reserve & io_mem_free 354 * are balanced. 355 */ 356 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 357 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 358}; 359 360/** 361 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. 362 */ 363 364struct ttm_bo_global_ref { 365 struct ttm_global_reference ref; 366 struct ttm_mem_global *mem_glob; 367}; 368 369/** 370 * struct ttm_bo_global - Buffer object driver global data. 371 * 372 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. 373 * @dummy_read_page: Pointer to a dummy page used for mapping requests 374 * of unpopulated pages. 375 * @shrink: A shrink callback object used for buffer object swap. 376 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) 377 * used by a buffer object. This is excluding page arrays and backing pages. 378 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). 379 * @device_list_mutex: Mutex protecting the device list. 380 * This mutex is held while traversing the device list for pm options. 381 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 382 * @device_list: List of buffer object devices. 383 * @swap_lru: Lru list of buffer objects used for swapping. 384 */ 385 386struct ttm_bo_global { 387 388 /** 389 * Constant after init. 390 */ 391 392 struct kobject kobj; 393 struct ttm_mem_global *mem_glob; 394 struct page *dummy_read_page; 395 struct ttm_mem_shrink shrink; 396 size_t ttm_bo_extra_size; 397 size_t ttm_bo_size; 398 struct mutex device_list_mutex; 399 spinlock_t lru_lock; 400 401 /** 402 * Protected by device_list_mutex. 403 */ 404 struct list_head device_list; 405 406 /** 407 * Protected by the lru_lock. 408 */ 409 struct list_head swap_lru; 410 411 /** 412 * Internal protection. 413 */ 414 atomic_t bo_count; 415}; 416 417 418#define TTM_NUM_MEM_TYPES 8 419 420#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs 421 idling before CPU mapping */ 422#define TTM_BO_PRIV_FLAG_MAX 1 423/** 424 * struct ttm_bo_device - Buffer object driver device-specific data. 425 * 426 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 427 * @man: An array of mem_type_managers. 428 * @addr_space_mm: Range manager for the device address space. 429 * lru_lock: Spinlock that protects the buffer+device lru lists and 430 * ddestroy lists. 431 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. 432 * If a GPU lockup has been detected, this is forced to 0. 433 * @dev_mapping: A pointer to the struct address_space representing the 434 * device address space. 435 * @wq: Work queue structure for the delayed delete workqueue. 436 * 437 */ 438 439struct ttm_bo_device { 440 441 /* 442 * Constant after bo device init / atomic. 443 */ 444 struct list_head device_list; 445 struct ttm_bo_global *glob; 446 struct ttm_bo_driver *driver; 447 rwlock_t vm_lock; 448 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 449 /* 450 * Protected by the vm lock. 451 */ 452 struct rb_root addr_space_rb; 453 struct drm_mm addr_space_mm; 454 455 /* 456 * Protected by the global:lru lock. 457 */ 458 struct list_head ddestroy; 459 460 /* 461 * Protected by load / firstopen / lastclose /unload sync. 462 */ 463 464 bool nice_mode; 465 struct address_space *dev_mapping; 466 467 /* 468 * Internal protection. 469 */ 470 471 struct delayed_work wq; 472 473 bool need_dma32; 474}; 475 476/** 477 * ttm_flag_masked 478 * 479 * @old: Pointer to the result and original value. 480 * @new: New value of bits. 481 * @mask: Mask of bits to change. 482 * 483 * Convenience function to change a number of bits identified by a mask. 484 */ 485 486static inline uint32_t 487ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) 488{ 489 *old ^= (*old ^ new) & mask; 490 return *old; 491} 492 493/** 494 * ttm_tt_create 495 * 496 * @bdev: pointer to a struct ttm_bo_device: 497 * @size: Size of the data needed backing. 498 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 499 * @dummy_read_page: See struct ttm_bo_device. 500 * 501 * Create a struct ttm_tt to back data with system memory pages. 502 * No pages are actually allocated. 503 * Returns: 504 * NULL: Out of memory. 505 */ 506extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, 507 unsigned long size, 508 uint32_t page_flags, 509 struct page *dummy_read_page); 510 511/** 512 * ttm_tt_set_user: 513 * 514 * @ttm: The struct ttm_tt to populate. 515 * @tsk: A struct task_struct for which @start is a valid user-space address. 516 * @start: A valid user-space address. 517 * @num_pages: Size in pages of the user memory area. 518 * 519 * Populate a struct ttm_tt with a user-space memory area after first pinning 520 * the pages backing it. 521 * Returns: 522 * !0: Error. 523 */ 524 525extern int ttm_tt_set_user(struct ttm_tt *ttm, 526 struct task_struct *tsk, 527 unsigned long start, unsigned long num_pages); 528 529/** 530 * ttm_ttm_bind: 531 * 532 * @ttm: The struct ttm_tt containing backing pages. 533 * @bo_mem: The struct ttm_mem_reg identifying the binding location. 534 * 535 * Bind the pages of @ttm to an aperture location identified by @bo_mem 536 */ 537extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 538 539/** 540 * ttm_tt_populate: 541 * 542 * @ttm: The struct ttm_tt to contain the backing pages. 543 * 544 * Add backing pages to all of @ttm 545 */ 546extern int ttm_tt_populate(struct ttm_tt *ttm); 547 548/** 549 * ttm_ttm_destroy: 550 * 551 * @ttm: The struct ttm_tt. 552 * 553 * Unbind, unpopulate and destroy a struct ttm_tt. 554 */ 555extern void ttm_tt_destroy(struct ttm_tt *ttm); 556 557/** 558 * ttm_ttm_unbind: 559 * 560 * @ttm: The struct ttm_tt. 561 * 562 * Unbind a struct ttm_tt. 563 */ 564extern void ttm_tt_unbind(struct ttm_tt *ttm); 565 566/** 567 * ttm_ttm_destroy: 568 * 569 * @ttm: The struct ttm_tt. 570 * @index: Index of the desired page. 571 * 572 * Return a pointer to the struct page backing @ttm at page 573 * index @index. If the page is unpopulated, one will be allocated to 574 * populate that index. 575 * 576 * Returns: 577 * NULL on OOM. 578 */ 579extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); 580 581/** 582 * ttm_tt_cache_flush: 583 * 584 * @pages: An array of pointers to struct page:s to flush. 585 * @num_pages: Number of pages to flush. 586 * 587 * Flush the data of the indicated pages from the cpu caches. 588 * This is used when changing caching attributes of the pages from 589 * cache-coherent. 590 */ 591extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); 592 593/** 594 * ttm_tt_set_placement_caching: 595 * 596 * @ttm A struct ttm_tt the backing pages of which will change caching policy. 597 * @placement: Flag indicating the desired caching policy. 598 * 599 * This function will change caching policy of any default kernel mappings of 600 * the pages backing @ttm. If changing from cached to uncached or 601 * write-combined, 602 * all CPU caches will first be flushed to make sure the data of the pages 603 * hit RAM. This function may be very costly as it involves global TLB 604 * and cache flushes and potential page splitting / combining. 605 */ 606extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); 607extern int ttm_tt_swapout(struct ttm_tt *ttm, 608 struct file *persistant_swap_storage); 609 610/* 611 * ttm_bo.c 612 */ 613 614/** 615 * ttm_mem_reg_is_pci 616 * 617 * @bdev: Pointer to a struct ttm_bo_device. 618 * @mem: A valid struct ttm_mem_reg. 619 * 620 * Returns true if the memory described by @mem is PCI memory, 621 * false otherwise. 622 */ 623extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, 624 struct ttm_mem_reg *mem); 625 626/** 627 * ttm_bo_mem_space 628 * 629 * @bo: Pointer to a struct ttm_buffer_object. the data of which 630 * we want to allocate space for. 631 * @proposed_placement: Proposed new placement for the buffer object. 632 * @mem: A struct ttm_mem_reg. 633 * @interruptible: Sleep interruptible when sliping. 634 * @no_wait_reserve: Return immediately if other buffers are busy. 635 * @no_wait_gpu: Return immediately if the GPU is busy. 636 * 637 * Allocate memory space for the buffer object pointed to by @bo, using 638 * the placement flags in @mem, potentially evicting other idle buffer objects. 639 * This function may sleep while waiting for space to become available. 640 * Returns: 641 * -EBUSY: No space available (only if no_wait == 1). 642 * -ENOMEM: Could not allocate memory for the buffer object, either due to 643 * fragmentation or concurrent allocators. 644 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 645 */ 646extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 647 struct ttm_placement *placement, 648 struct ttm_mem_reg *mem, 649 bool interruptible, 650 bool no_wait_reserve, bool no_wait_gpu); 651/** 652 * ttm_bo_wait_for_cpu 653 * 654 * @bo: Pointer to a struct ttm_buffer_object. 655 * @no_wait: Don't sleep while waiting. 656 * 657 * Wait until a buffer object is no longer sync'ed for CPU access. 658 * Returns: 659 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). 660 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 661 */ 662 663extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 664 665/** 666 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. 667 * 668 * @bo Pointer to a struct ttm_buffer_object. 669 * @bus_base On return the base of the PCI region 670 * @bus_offset On return the byte offset into the PCI region 671 * @bus_size On return the byte size of the buffer object or zero if 672 * the buffer object memory is not accessible through a PCI region. 673 * 674 * Returns: 675 * -EINVAL if the buffer object is currently not mappable. 676 * 0 otherwise. 677 */ 678 679extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, 680 struct ttm_mem_reg *mem, 681 unsigned long *bus_base, 682 unsigned long *bus_offset, 683 unsigned long *bus_size); 684 685extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 686 struct ttm_mem_reg *mem); 687extern void ttm_mem_io_free(struct ttm_bo_device *bdev, 688 struct ttm_mem_reg *mem); 689 690extern void ttm_bo_global_release(struct ttm_global_reference *ref); 691extern int ttm_bo_global_init(struct ttm_global_reference *ref); 692 693extern int ttm_bo_device_release(struct ttm_bo_device *bdev); 694 695/** 696 * ttm_bo_device_init 697 * 698 * @bdev: A pointer to a struct ttm_bo_device to initialize. 699 * @mem_global: A pointer to an initialized struct ttm_mem_global. 700 * @driver: A pointer to a struct ttm_bo_driver set up by the caller. 701 * @file_page_offset: Offset into the device address space that is available 702 * for buffer data. This ensures compatibility with other users of the 703 * address space. 704 * 705 * Initializes a struct ttm_bo_device: 706 * Returns: 707 * !0: Failure. 708 */ 709extern int ttm_bo_device_init(struct ttm_bo_device *bdev, 710 struct ttm_bo_global *glob, 711 struct ttm_bo_driver *driver, 712 uint64_t file_page_offset, bool need_dma32); 713 714/** 715 * ttm_bo_unmap_virtual 716 * 717 * @bo: tear down the virtual mappings for this BO 718 */ 719extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 720 721/** 722 * ttm_bo_reserve: 723 * 724 * @bo: A pointer to a struct ttm_buffer_object. 725 * @interruptible: Sleep interruptible if waiting. 726 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 727 * @use_sequence: If @bo is already reserved, Only sleep waiting for 728 * it to become unreserved if @sequence < (@bo)->sequence. 729 * 730 * Locks a buffer object for validation. (Or prevents other processes from 731 * locking it for validation) and removes it from lru lists, while taking 732 * a number of measures to prevent deadlocks. 733 * 734 * Deadlocks may occur when two processes try to reserve multiple buffers in 735 * different order, either by will or as a result of a buffer being evicted 736 * to make room for a buffer already reserved. (Buffers are reserved before 737 * they are evicted). The following algorithm prevents such deadlocks from 738 * occuring: 739 * 1) Buffers are reserved with the lru spinlock held. Upon successful 740 * reservation they are removed from the lru list. This stops a reserved buffer 741 * from being evicted. However the lru spinlock is released between the time 742 * a buffer is selected for eviction and the time it is reserved. 743 * Therefore a check is made when a buffer is reserved for eviction, that it 744 * is still the first buffer in the lru list, before it is removed from the 745 * list. @check_lru == 1 forces this check. If it fails, the function returns 746 * -EINVAL, and the caller should then choose a new buffer to evict and repeat 747 * the procedure. 748 * 2) Processes attempting to reserve multiple buffers other than for eviction, 749 * (typically execbuf), should first obtain a unique 32-bit 750 * validation sequence number, 751 * and call this function with @use_sequence == 1 and @sequence == the unique 752 * sequence number. If upon call of this function, the buffer object is already 753 * reserved, the validation sequence is checked against the validation 754 * sequence of the process currently reserving the buffer, 755 * and if the current validation sequence is greater than that of the process 756 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps 757 * waiting for the buffer to become unreserved, after which it retries 758 * reserving. 759 * The caller should, when receiving an -EAGAIN error 760 * release all its buffer reservations, wait for @bo to become unreserved, and 761 * then rerun the validation with the same validation sequence. This procedure 762 * will always guarantee that the process with the lowest validation sequence 763 * will eventually succeed, preventing both deadlocks and starvation. 764 * 765 * Returns: 766 * -EAGAIN: The reservation may cause a deadlock. 767 * Release all buffer reservations, wait for @bo to become unreserved and 768 * try again. (only if use_sequence == 1). 769 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 770 * a signal. Release all buffer reservations and return to user-space. 771 */ 772extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 773 bool interruptible, 774 bool no_wait, bool use_sequence, uint32_t sequence); 775 776/** 777 * ttm_bo_unreserve 778 * 779 * @bo: A pointer to a struct ttm_buffer_object. 780 * 781 * Unreserve a previous reservation of @bo. 782 */ 783extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); 784 785/** 786 * ttm_bo_wait_unreserved 787 * 788 * @bo: A pointer to a struct ttm_buffer_object. 789 * 790 * Wait for a struct ttm_buffer_object to become unreserved. 791 * This is typically used in the execbuf code to relax cpu-usage when 792 * a potential deadlock condition backoff. 793 */ 794extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, 795 bool interruptible); 796 797/* 798 * ttm_bo_util.c 799 */ 800 801/** 802 * ttm_bo_move_ttm 803 * 804 * @bo: A pointer to a struct ttm_buffer_object. 805 * @evict: 1: This is an eviction. Don't try to pipeline. 806 * @no_wait_reserve: Return immediately if other buffers are busy. 807 * @no_wait_gpu: Return immediately if the GPU is busy. 808 * @new_mem: struct ttm_mem_reg indicating where to move. 809 * 810 * Optimized move function for a buffer object with both old and 811 * new placement backed by a TTM. The function will, if successful, 812 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 813 * and update the (@bo)->mem placement flags. If unsuccessful, the old 814 * data remains untouched, and it's up to the caller to free the 815 * memory space indicated by @new_mem. 816 * Returns: 817 * !0: Failure. 818 */ 819 820extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 821 bool evict, bool no_wait_reserve, 822 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 823 824/** 825 * ttm_bo_move_memcpy 826 * 827 * @bo: A pointer to a struct ttm_buffer_object. 828 * @evict: 1: This is an eviction. Don't try to pipeline. 829 * @no_wait_reserve: Return immediately if other buffers are busy. 830 * @no_wait_gpu: Return immediately if the GPU is busy. 831 * @new_mem: struct ttm_mem_reg indicating where to move. 832 * 833 * Fallback move function for a mappable buffer object in mappable memory. 834 * The function will, if successful, 835 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 836 * and update the (@bo)->mem placement flags. If unsuccessful, the old 837 * data remains untouched, and it's up to the caller to free the 838 * memory space indicated by @new_mem. 839 * Returns: 840 * !0: Failure. 841 */ 842 843extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 844 bool evict, bool no_wait_reserve, 845 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 846 847/** 848 * ttm_bo_free_old_node 849 * 850 * @bo: A pointer to a struct ttm_buffer_object. 851 * 852 * Utility function to free an old placement after a successful move. 853 */ 854extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 855 856/** 857 * ttm_bo_move_accel_cleanup. 858 * 859 * @bo: A pointer to a struct ttm_buffer_object. 860 * @sync_obj: A sync object that signals when moving is complete. 861 * @sync_obj_arg: An argument to pass to the sync object idle / wait 862 * functions. 863 * @evict: This is an evict move. Don't return until the buffer is idle. 864 * @no_wait_reserve: Return immediately if other buffers are busy. 865 * @no_wait_gpu: Return immediately if the GPU is busy. 866 * @new_mem: struct ttm_mem_reg indicating where to move. 867 * 868 * Accelerated move function to be called when an accelerated move 869 * has been scheduled. The function will create a new temporary buffer object 870 * representing the old placement, and put the sync object on both buffer 871 * objects. After that the newly created buffer object is unref'd to be 872 * destroyed when the move is complete. This will help pipeline 873 * buffer moves. 874 */ 875 876extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 877 void *sync_obj, 878 void *sync_obj_arg, 879 bool evict, bool no_wait_reserve, 880 bool no_wait_gpu, 881 struct ttm_mem_reg *new_mem); 882/** 883 * ttm_io_prot 884 * 885 * @c_state: Caching state. 886 * @tmp: Page protection flag for a normal, cached mapping. 887 * 888 * Utility function that returns the pgprot_t that should be used for 889 * setting up a PTE with the caching model indicated by @c_state. 890 */ 891extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 892 893#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 894#define TTM_HAS_AGP 895#include <linux/agp_backend.h> 896 897/** 898 * ttm_agp_backend_init 899 * 900 * @bdev: Pointer to a struct ttm_bo_device. 901 * @bridge: The agp bridge this device is sitting on. 902 * 903 * Create a TTM backend that uses the indicated AGP bridge as an aperture 904 * for TT memory. This function uses the linux agpgart interface to 905 * bind and unbind memory backing a ttm_tt. 906 */ 907extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, 908 struct agp_bridge_data *bridge); 909#endif 910 911#endif