Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.37-rc8 1004 lines 33 kB view raw
1/************************************************************************** 2 * 3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27/* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30#ifndef _TTM_BO_DRIVER_H_ 31#define _TTM_BO_DRIVER_H_ 32 33#include "ttm/ttm_bo_api.h" 34#include "ttm/ttm_memory.h" 35#include "ttm/ttm_module.h" 36#include "drm_mm.h" 37#include "drm_global.h" 38#include "linux/workqueue.h" 39#include "linux/fs.h" 40#include "linux/spinlock.h" 41 42struct ttm_backend; 43 44struct ttm_backend_func { 45 /** 46 * struct ttm_backend_func member populate 47 * 48 * @backend: Pointer to a struct ttm_backend. 49 * @num_pages: Number of pages to populate. 50 * @pages: Array of pointers to ttm pages. 51 * @dummy_read_page: Page to be used instead of NULL pages in the 52 * array @pages. 53 * 54 * Populate the backend with ttm pages. Depending on the backend, 55 * it may or may not copy the @pages array. 56 */ 57 int (*populate) (struct ttm_backend *backend, 58 unsigned long num_pages, struct page **pages, 59 struct page *dummy_read_page); 60 /** 61 * struct ttm_backend_func member clear 62 * 63 * @backend: Pointer to a struct ttm_backend. 64 * 65 * This is an "unpopulate" function. Release all resources 66 * allocated with populate. 67 */ 68 void (*clear) (struct ttm_backend *backend); 69 70 /** 71 * struct ttm_backend_func member bind 72 * 73 * @backend: Pointer to a struct ttm_backend. 74 * @bo_mem: Pointer to a struct ttm_mem_reg describing the 75 * memory type and location for binding. 76 * 77 * Bind the backend pages into the aperture in the location 78 * indicated by @bo_mem. This function should be able to handle 79 * differences between aperture- and system page sizes. 80 */ 81 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); 82 83 /** 84 * struct ttm_backend_func member unbind 85 * 86 * @backend: Pointer to a struct ttm_backend. 87 * 88 * Unbind previously bound backend pages. This function should be 89 * able to handle differences between aperture- and system page sizes. 90 */ 91 int (*unbind) (struct ttm_backend *backend); 92 93 /** 94 * struct ttm_backend_func member destroy 95 * 96 * @backend: Pointer to a struct ttm_backend. 97 * 98 * Destroy the backend. 99 */ 100 void (*destroy) (struct ttm_backend *backend); 101}; 102 103/** 104 * struct ttm_backend 105 * 106 * @bdev: Pointer to a struct ttm_bo_device. 107 * @flags: For driver use. 108 * @func: Pointer to a struct ttm_backend_func that describes 109 * the backend methods. 110 * 111 */ 112 113struct ttm_backend { 114 struct ttm_bo_device *bdev; 115 uint32_t flags; 116 struct ttm_backend_func *func; 117}; 118 119#define TTM_PAGE_FLAG_USER (1 << 1) 120#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) 121#define TTM_PAGE_FLAG_WRITE (1 << 3) 122#define TTM_PAGE_FLAG_SWAPPED (1 << 4) 123#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) 124#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) 125#define TTM_PAGE_FLAG_DMA32 (1 << 7) 126 127enum ttm_caching_state { 128 tt_uncached, 129 tt_wc, 130 tt_cached 131}; 132 133/** 134 * struct ttm_tt 135 * 136 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL 137 * pointer. 138 * @pages: Array of pages backing the data. 139 * @first_himem_page: Himem pages are put last in the page array, which 140 * enables us to run caching attribute changes on only the first part 141 * of the page array containing lomem pages. This is the index of the 142 * first himem page. 143 * @last_lomem_page: Index of the last lomem page in the page array. 144 * @num_pages: Number of pages in the page array. 145 * @bdev: Pointer to the current struct ttm_bo_device. 146 * @be: Pointer to the ttm backend. 147 * @tsk: The task for user ttm. 148 * @start: virtual address for user ttm. 149 * @swap_storage: Pointer to shmem struct file for swap storage. 150 * @caching_state: The current caching state of the pages. 151 * @state: The current binding state of the pages. 152 * 153 * This is a structure holding the pages, caching- and aperture binding 154 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 155 * memory. 156 */ 157 158struct ttm_tt { 159 struct page *dummy_read_page; 160 struct page **pages; 161 long first_himem_page; 162 long last_lomem_page; 163 uint32_t page_flags; 164 unsigned long num_pages; 165 struct ttm_bo_global *glob; 166 struct ttm_backend *be; 167 struct task_struct *tsk; 168 unsigned long start; 169 struct file *swap_storage; 170 enum ttm_caching_state caching_state; 171 enum { 172 tt_bound, 173 tt_unbound, 174 tt_unpopulated, 175 } state; 176}; 177 178#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 181 182/** 183 * struct ttm_mem_type_manager 184 * 185 * @has_type: The memory type has been initialized. 186 * @use_type: The memory type is enabled. 187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory 188 * managed by this memory type. 189 * @gpu_offset: If used, the GPU offset of the first managed page of 190 * fixed memory or the first managed location in an aperture. 191 * @size: Size of the managed region. 192 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, 193 * as defined in ttm_placement_common.h 194 * @default_caching: The default caching policy used for a buffer object 195 * placed in this memory type if the user doesn't provide one. 196 * @manager: The range manager used for this memory type. FIXME: If the aperture 197 * has a page size different from the underlying system, the granularity 198 * of this manager should take care of this. But the range allocating code 199 * in ttm_bo.c needs to be modified for this. 200 * @lru: The lru list for this memory type. 201 * 202 * This structure is used to identify and manage memory types for a device. 203 * It's set up by the ttm_bo_driver::init_mem_type method. 204 */ 205 206struct ttm_mem_type_manager; 207 208struct ttm_mem_type_manager_func { 209 /** 210 * struct ttm_mem_type_manager member init 211 * 212 * @man: Pointer to a memory type manager. 213 * @p_size: Implementation dependent, but typically the size of the 214 * range to be managed in pages. 215 * 216 * Called to initialize a private range manager. The function is 217 * expected to initialize the man::priv member. 218 * Returns 0 on success, negative error code on failure. 219 */ 220 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); 221 222 /** 223 * struct ttm_mem_type_manager member takedown 224 * 225 * @man: Pointer to a memory type manager. 226 * 227 * Called to undo the setup done in init. All allocated resources 228 * should be freed. 229 */ 230 int (*takedown)(struct ttm_mem_type_manager *man); 231 232 /** 233 * struct ttm_mem_type_manager member get_node 234 * 235 * @man: Pointer to a memory type manager. 236 * @bo: Pointer to the buffer object we're allocating space for. 237 * @placement: Placement details. 238 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 239 * 240 * This function should allocate space in the memory type managed 241 * by @man. Placement details if 242 * applicable are given by @placement. If successful, 243 * @mem::mm_node should be set to a non-null value, and 244 * @mem::start should be set to a value identifying the beginning 245 * of the range allocated, and the function should return zero. 246 * If the memory region accomodate the buffer object, @mem::mm_node 247 * should be set to NULL, and the function should return 0. 248 * If a system error occured, preventing the request to be fulfilled, 249 * the function should return a negative error code. 250 * 251 * Note that @mem::mm_node will only be dereferenced by 252 * struct ttm_mem_type_manager functions and optionally by the driver, 253 * which has knowledge of the underlying type. 254 * 255 * This function may not be called from within atomic context, so 256 * an implementation can and must use either a mutex or a spinlock to 257 * protect any data structures managing the space. 258 */ 259 int (*get_node)(struct ttm_mem_type_manager *man, 260 struct ttm_buffer_object *bo, 261 struct ttm_placement *placement, 262 struct ttm_mem_reg *mem); 263 264 /** 265 * struct ttm_mem_type_manager member put_node 266 * 267 * @man: Pointer to a memory type manager. 268 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 269 * 270 * This function frees memory type resources previously allocated 271 * and that are identified by @mem::mm_node and @mem::start. May not 272 * be called from within atomic context. 273 */ 274 void (*put_node)(struct ttm_mem_type_manager *man, 275 struct ttm_mem_reg *mem); 276 277 /** 278 * struct ttm_mem_type_manager member debug 279 * 280 * @man: Pointer to a memory type manager. 281 * @prefix: Prefix to be used in printout to identify the caller. 282 * 283 * This function is called to print out the state of the memory 284 * type manager to aid debugging of out-of-memory conditions. 285 * It may not be called from within atomic context. 286 */ 287 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 288}; 289 290struct ttm_mem_type_manager { 291 struct ttm_bo_device *bdev; 292 293 /* 294 * No protection. Constant from start. 295 */ 296 297 bool has_type; 298 bool use_type; 299 uint32_t flags; 300 unsigned long gpu_offset; 301 uint64_t size; 302 uint32_t available_caching; 303 uint32_t default_caching; 304 const struct ttm_mem_type_manager_func *func; 305 void *priv; 306 307 /* 308 * Protected by the global->lru_lock. 309 */ 310 311 struct list_head lru; 312}; 313 314/** 315 * struct ttm_bo_driver 316 * 317 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 318 * @invalidate_caches: Callback to invalidate read caches when a buffer object 319 * has been evicted. 320 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager 321 * structure. 322 * @evict_flags: Callback to obtain placement flags when a buffer is evicted. 323 * @move: Callback for a driver to hook in accelerated functions to 324 * move a buffer. 325 * If set to NULL, a potentially slow memcpy() move is used. 326 * @sync_obj_signaled: See ttm_fence_api.h 327 * @sync_obj_wait: See ttm_fence_api.h 328 * @sync_obj_flush: See ttm_fence_api.h 329 * @sync_obj_unref: See ttm_fence_api.h 330 * @sync_obj_ref: See ttm_fence_api.h 331 */ 332 333struct ttm_bo_driver { 334 /** 335 * struct ttm_bo_driver member create_ttm_backend_entry 336 * 337 * @bdev: The buffer object device. 338 * 339 * Create a driver specific struct ttm_backend. 340 */ 341 342 struct ttm_backend *(*create_ttm_backend_entry) 343 (struct ttm_bo_device *bdev); 344 345 /** 346 * struct ttm_bo_driver member invalidate_caches 347 * 348 * @bdev: the buffer object device. 349 * @flags: new placement of the rebound buffer object. 350 * 351 * A previosly evicted buffer has been rebound in a 352 * potentially new location. Tell the driver that it might 353 * consider invalidating read (texture) caches on the next command 354 * submission as a consequence. 355 */ 356 357 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); 358 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, 359 struct ttm_mem_type_manager *man); 360 /** 361 * struct ttm_bo_driver member evict_flags: 362 * 363 * @bo: the buffer object to be evicted 364 * 365 * Return the bo flags for a buffer which is not mapped to the hardware. 366 * These will be placed in proposed_flags so that when the move is 367 * finished, they'll end up in bo->mem.flags 368 */ 369 370 void(*evict_flags) (struct ttm_buffer_object *bo, 371 struct ttm_placement *placement); 372 /** 373 * struct ttm_bo_driver member move: 374 * 375 * @bo: the buffer to move 376 * @evict: whether this motion is evicting the buffer from 377 * the graphics address space 378 * @interruptible: Use interruptible sleeps if possible when sleeping. 379 * @no_wait: whether this should give up and return -EBUSY 380 * if this move would require sleeping 381 * @new_mem: the new memory region receiving the buffer 382 * 383 * Move a buffer between two memory regions. 384 */ 385 int (*move) (struct ttm_buffer_object *bo, 386 bool evict, bool interruptible, 387 bool no_wait_reserve, bool no_wait_gpu, 388 struct ttm_mem_reg *new_mem); 389 390 /** 391 * struct ttm_bo_driver_member verify_access 392 * 393 * @bo: Pointer to a buffer object. 394 * @filp: Pointer to a struct file trying to access the object. 395 * 396 * Called from the map / write / read methods to verify that the 397 * caller is permitted to access the buffer object. 398 * This member may be set to NULL, which will refuse this kind of 399 * access for all buffer objects. 400 * This function should return 0 if access is granted, -EPERM otherwise. 401 */ 402 int (*verify_access) (struct ttm_buffer_object *bo, 403 struct file *filp); 404 405 /** 406 * In case a driver writer dislikes the TTM fence objects, 407 * the driver writer can replace those with sync objects of 408 * his / her own. If it turns out that no driver writer is 409 * using these. I suggest we remove these hooks and plug in 410 * fences directly. The bo driver needs the following functionality: 411 * See the corresponding functions in the fence object API 412 * documentation. 413 */ 414 415 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); 416 int (*sync_obj_wait) (void *sync_obj, void *sync_arg, 417 bool lazy, bool interruptible); 418 int (*sync_obj_flush) (void *sync_obj, void *sync_arg); 419 void (*sync_obj_unref) (void **sync_obj); 420 void *(*sync_obj_ref) (void *sync_obj); 421 422 /* hook to notify driver about a driver move so it 423 * can do tiling things */ 424 void (*move_notify)(struct ttm_buffer_object *bo, 425 struct ttm_mem_reg *new_mem); 426 /* notify the driver we are taking a fault on this BO 427 * and have reserved it */ 428 int (*fault_reserve_notify)(struct ttm_buffer_object *bo); 429 430 /** 431 * notify the driver that we're about to swap out this bo 432 */ 433 void (*swap_notify) (struct ttm_buffer_object *bo); 434 435 /** 436 * Driver callback on when mapping io memory (for bo_move_memcpy 437 * for instance). TTM will take care to call io_mem_free whenever 438 * the mapping is not use anymore. io_mem_reserve & io_mem_free 439 * are balanced. 440 */ 441 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 442 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 443}; 444 445/** 446 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. 447 */ 448 449struct ttm_bo_global_ref { 450 struct drm_global_reference ref; 451 struct ttm_mem_global *mem_glob; 452}; 453 454/** 455 * struct ttm_bo_global - Buffer object driver global data. 456 * 457 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. 458 * @dummy_read_page: Pointer to a dummy page used for mapping requests 459 * of unpopulated pages. 460 * @shrink: A shrink callback object used for buffer object swap. 461 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) 462 * used by a buffer object. This is excluding page arrays and backing pages. 463 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). 464 * @device_list_mutex: Mutex protecting the device list. 465 * This mutex is held while traversing the device list for pm options. 466 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 467 * @device_list: List of buffer object devices. 468 * @swap_lru: Lru list of buffer objects used for swapping. 469 */ 470 471struct ttm_bo_global { 472 473 /** 474 * Constant after init. 475 */ 476 477 struct kobject kobj; 478 struct ttm_mem_global *mem_glob; 479 struct page *dummy_read_page; 480 struct ttm_mem_shrink shrink; 481 size_t ttm_bo_extra_size; 482 size_t ttm_bo_size; 483 struct mutex device_list_mutex; 484 spinlock_t lru_lock; 485 486 /** 487 * Protected by device_list_mutex. 488 */ 489 struct list_head device_list; 490 491 /** 492 * Protected by the lru_lock. 493 */ 494 struct list_head swap_lru; 495 496 /** 497 * Internal protection. 498 */ 499 atomic_t bo_count; 500}; 501 502 503#define TTM_NUM_MEM_TYPES 8 504 505#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs 506 idling before CPU mapping */ 507#define TTM_BO_PRIV_FLAG_MAX 1 508/** 509 * struct ttm_bo_device - Buffer object driver device-specific data. 510 * 511 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 512 * @man: An array of mem_type_managers. 513 * @addr_space_mm: Range manager for the device address space. 514 * lru_lock: Spinlock that protects the buffer+device lru lists and 515 * ddestroy lists. 516 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. 517 * If a GPU lockup has been detected, this is forced to 0. 518 * @dev_mapping: A pointer to the struct address_space representing the 519 * device address space. 520 * @wq: Work queue structure for the delayed delete workqueue. 521 * 522 */ 523 524struct ttm_bo_device { 525 526 /* 527 * Constant after bo device init / atomic. 528 */ 529 struct list_head device_list; 530 struct ttm_bo_global *glob; 531 struct ttm_bo_driver *driver; 532 rwlock_t vm_lock; 533 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 534 /* 535 * Protected by the vm lock. 536 */ 537 struct rb_root addr_space_rb; 538 struct drm_mm addr_space_mm; 539 540 /* 541 * Protected by the global:lru lock. 542 */ 543 struct list_head ddestroy; 544 545 /* 546 * Protected by load / firstopen / lastclose /unload sync. 547 */ 548 549 bool nice_mode; 550 struct address_space *dev_mapping; 551 552 /* 553 * Internal protection. 554 */ 555 556 struct delayed_work wq; 557 558 bool need_dma32; 559}; 560 561/** 562 * ttm_flag_masked 563 * 564 * @old: Pointer to the result and original value. 565 * @new: New value of bits. 566 * @mask: Mask of bits to change. 567 * 568 * Convenience function to change a number of bits identified by a mask. 569 */ 570 571static inline uint32_t 572ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) 573{ 574 *old ^= (*old ^ new) & mask; 575 return *old; 576} 577 578/** 579 * ttm_tt_create 580 * 581 * @bdev: pointer to a struct ttm_bo_device: 582 * @size: Size of the data needed backing. 583 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 584 * @dummy_read_page: See struct ttm_bo_device. 585 * 586 * Create a struct ttm_tt to back data with system memory pages. 587 * No pages are actually allocated. 588 * Returns: 589 * NULL: Out of memory. 590 */ 591extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, 592 unsigned long size, 593 uint32_t page_flags, 594 struct page *dummy_read_page); 595 596/** 597 * ttm_tt_set_user: 598 * 599 * @ttm: The struct ttm_tt to populate. 600 * @tsk: A struct task_struct for which @start is a valid user-space address. 601 * @start: A valid user-space address. 602 * @num_pages: Size in pages of the user memory area. 603 * 604 * Populate a struct ttm_tt with a user-space memory area after first pinning 605 * the pages backing it. 606 * Returns: 607 * !0: Error. 608 */ 609 610extern int ttm_tt_set_user(struct ttm_tt *ttm, 611 struct task_struct *tsk, 612 unsigned long start, unsigned long num_pages); 613 614/** 615 * ttm_ttm_bind: 616 * 617 * @ttm: The struct ttm_tt containing backing pages. 618 * @bo_mem: The struct ttm_mem_reg identifying the binding location. 619 * 620 * Bind the pages of @ttm to an aperture location identified by @bo_mem 621 */ 622extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 623 624/** 625 * ttm_tt_populate: 626 * 627 * @ttm: The struct ttm_tt to contain the backing pages. 628 * 629 * Add backing pages to all of @ttm 630 */ 631extern int ttm_tt_populate(struct ttm_tt *ttm); 632 633/** 634 * ttm_ttm_destroy: 635 * 636 * @ttm: The struct ttm_tt. 637 * 638 * Unbind, unpopulate and destroy a struct ttm_tt. 639 */ 640extern void ttm_tt_destroy(struct ttm_tt *ttm); 641 642/** 643 * ttm_ttm_unbind: 644 * 645 * @ttm: The struct ttm_tt. 646 * 647 * Unbind a struct ttm_tt. 648 */ 649extern void ttm_tt_unbind(struct ttm_tt *ttm); 650 651/** 652 * ttm_ttm_destroy: 653 * 654 * @ttm: The struct ttm_tt. 655 * @index: Index of the desired page. 656 * 657 * Return a pointer to the struct page backing @ttm at page 658 * index @index. If the page is unpopulated, one will be allocated to 659 * populate that index. 660 * 661 * Returns: 662 * NULL on OOM. 663 */ 664extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); 665 666/** 667 * ttm_tt_cache_flush: 668 * 669 * @pages: An array of pointers to struct page:s to flush. 670 * @num_pages: Number of pages to flush. 671 * 672 * Flush the data of the indicated pages from the cpu caches. 673 * This is used when changing caching attributes of the pages from 674 * cache-coherent. 675 */ 676extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); 677 678/** 679 * ttm_tt_set_placement_caching: 680 * 681 * @ttm A struct ttm_tt the backing pages of which will change caching policy. 682 * @placement: Flag indicating the desired caching policy. 683 * 684 * This function will change caching policy of any default kernel mappings of 685 * the pages backing @ttm. If changing from cached to uncached or 686 * write-combined, 687 * all CPU caches will first be flushed to make sure the data of the pages 688 * hit RAM. This function may be very costly as it involves global TLB 689 * and cache flushes and potential page splitting / combining. 690 */ 691extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); 692extern int ttm_tt_swapout(struct ttm_tt *ttm, 693 struct file *persistant_swap_storage); 694 695/* 696 * ttm_bo.c 697 */ 698 699/** 700 * ttm_mem_reg_is_pci 701 * 702 * @bdev: Pointer to a struct ttm_bo_device. 703 * @mem: A valid struct ttm_mem_reg. 704 * 705 * Returns true if the memory described by @mem is PCI memory, 706 * false otherwise. 707 */ 708extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, 709 struct ttm_mem_reg *mem); 710 711/** 712 * ttm_bo_mem_space 713 * 714 * @bo: Pointer to a struct ttm_buffer_object. the data of which 715 * we want to allocate space for. 716 * @proposed_placement: Proposed new placement for the buffer object. 717 * @mem: A struct ttm_mem_reg. 718 * @interruptible: Sleep interruptible when sliping. 719 * @no_wait_reserve: Return immediately if other buffers are busy. 720 * @no_wait_gpu: Return immediately if the GPU is busy. 721 * 722 * Allocate memory space for the buffer object pointed to by @bo, using 723 * the placement flags in @mem, potentially evicting other idle buffer objects. 724 * This function may sleep while waiting for space to become available. 725 * Returns: 726 * -EBUSY: No space available (only if no_wait == 1). 727 * -ENOMEM: Could not allocate memory for the buffer object, either due to 728 * fragmentation or concurrent allocators. 729 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 730 */ 731extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 732 struct ttm_placement *placement, 733 struct ttm_mem_reg *mem, 734 bool interruptible, 735 bool no_wait_reserve, bool no_wait_gpu); 736 737extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, 738 struct ttm_mem_reg *mem); 739extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, 740 struct ttm_mem_reg *mem); 741 742/** 743 * ttm_bo_wait_for_cpu 744 * 745 * @bo: Pointer to a struct ttm_buffer_object. 746 * @no_wait: Don't sleep while waiting. 747 * 748 * Wait until a buffer object is no longer sync'ed for CPU access. 749 * Returns: 750 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). 751 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 752 */ 753 754extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 755 756/** 757 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. 758 * 759 * @bo Pointer to a struct ttm_buffer_object. 760 * @bus_base On return the base of the PCI region 761 * @bus_offset On return the byte offset into the PCI region 762 * @bus_size On return the byte size of the buffer object or zero if 763 * the buffer object memory is not accessible through a PCI region. 764 * 765 * Returns: 766 * -EINVAL if the buffer object is currently not mappable. 767 * 0 otherwise. 768 */ 769 770extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, 771 struct ttm_mem_reg *mem, 772 unsigned long *bus_base, 773 unsigned long *bus_offset, 774 unsigned long *bus_size); 775 776extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 777 struct ttm_mem_reg *mem); 778extern void ttm_mem_io_free(struct ttm_bo_device *bdev, 779 struct ttm_mem_reg *mem); 780 781extern void ttm_bo_global_release(struct drm_global_reference *ref); 782extern int ttm_bo_global_init(struct drm_global_reference *ref); 783 784extern int ttm_bo_device_release(struct ttm_bo_device *bdev); 785 786/** 787 * ttm_bo_device_init 788 * 789 * @bdev: A pointer to a struct ttm_bo_device to initialize. 790 * @mem_global: A pointer to an initialized struct ttm_mem_global. 791 * @driver: A pointer to a struct ttm_bo_driver set up by the caller. 792 * @file_page_offset: Offset into the device address space that is available 793 * for buffer data. This ensures compatibility with other users of the 794 * address space. 795 * 796 * Initializes a struct ttm_bo_device: 797 * Returns: 798 * !0: Failure. 799 */ 800extern int ttm_bo_device_init(struct ttm_bo_device *bdev, 801 struct ttm_bo_global *glob, 802 struct ttm_bo_driver *driver, 803 uint64_t file_page_offset, bool need_dma32); 804 805/** 806 * ttm_bo_unmap_virtual 807 * 808 * @bo: tear down the virtual mappings for this BO 809 */ 810extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 811 812/** 813 * ttm_bo_reserve: 814 * 815 * @bo: A pointer to a struct ttm_buffer_object. 816 * @interruptible: Sleep interruptible if waiting. 817 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 818 * @use_sequence: If @bo is already reserved, Only sleep waiting for 819 * it to become unreserved if @sequence < (@bo)->sequence. 820 * 821 * Locks a buffer object for validation. (Or prevents other processes from 822 * locking it for validation) and removes it from lru lists, while taking 823 * a number of measures to prevent deadlocks. 824 * 825 * Deadlocks may occur when two processes try to reserve multiple buffers in 826 * different order, either by will or as a result of a buffer being evicted 827 * to make room for a buffer already reserved. (Buffers are reserved before 828 * they are evicted). The following algorithm prevents such deadlocks from 829 * occuring: 830 * 1) Buffers are reserved with the lru spinlock held. Upon successful 831 * reservation they are removed from the lru list. This stops a reserved buffer 832 * from being evicted. However the lru spinlock is released between the time 833 * a buffer is selected for eviction and the time it is reserved. 834 * Therefore a check is made when a buffer is reserved for eviction, that it 835 * is still the first buffer in the lru list, before it is removed from the 836 * list. @check_lru == 1 forces this check. If it fails, the function returns 837 * -EINVAL, and the caller should then choose a new buffer to evict and repeat 838 * the procedure. 839 * 2) Processes attempting to reserve multiple buffers other than for eviction, 840 * (typically execbuf), should first obtain a unique 32-bit 841 * validation sequence number, 842 * and call this function with @use_sequence == 1 and @sequence == the unique 843 * sequence number. If upon call of this function, the buffer object is already 844 * reserved, the validation sequence is checked against the validation 845 * sequence of the process currently reserving the buffer, 846 * and if the current validation sequence is greater than that of the process 847 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps 848 * waiting for the buffer to become unreserved, after which it retries 849 * reserving. 850 * The caller should, when receiving an -EAGAIN error 851 * release all its buffer reservations, wait for @bo to become unreserved, and 852 * then rerun the validation with the same validation sequence. This procedure 853 * will always guarantee that the process with the lowest validation sequence 854 * will eventually succeed, preventing both deadlocks and starvation. 855 * 856 * Returns: 857 * -EAGAIN: The reservation may cause a deadlock. 858 * Release all buffer reservations, wait for @bo to become unreserved and 859 * try again. (only if use_sequence == 1). 860 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 861 * a signal. Release all buffer reservations and return to user-space. 862 */ 863extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 864 bool interruptible, 865 bool no_wait, bool use_sequence, uint32_t sequence); 866 867/** 868 * ttm_bo_unreserve 869 * 870 * @bo: A pointer to a struct ttm_buffer_object. 871 * 872 * Unreserve a previous reservation of @bo. 873 */ 874extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); 875 876/** 877 * ttm_bo_wait_unreserved 878 * 879 * @bo: A pointer to a struct ttm_buffer_object. 880 * 881 * Wait for a struct ttm_buffer_object to become unreserved. 882 * This is typically used in the execbuf code to relax cpu-usage when 883 * a potential deadlock condition backoff. 884 */ 885extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, 886 bool interruptible); 887 888/* 889 * ttm_bo_util.c 890 */ 891 892/** 893 * ttm_bo_move_ttm 894 * 895 * @bo: A pointer to a struct ttm_buffer_object. 896 * @evict: 1: This is an eviction. Don't try to pipeline. 897 * @no_wait_reserve: Return immediately if other buffers are busy. 898 * @no_wait_gpu: Return immediately if the GPU is busy. 899 * @new_mem: struct ttm_mem_reg indicating where to move. 900 * 901 * Optimized move function for a buffer object with both old and 902 * new placement backed by a TTM. The function will, if successful, 903 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 904 * and update the (@bo)->mem placement flags. If unsuccessful, the old 905 * data remains untouched, and it's up to the caller to free the 906 * memory space indicated by @new_mem. 907 * Returns: 908 * !0: Failure. 909 */ 910 911extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 912 bool evict, bool no_wait_reserve, 913 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 914 915/** 916 * ttm_bo_move_memcpy 917 * 918 * @bo: A pointer to a struct ttm_buffer_object. 919 * @evict: 1: This is an eviction. Don't try to pipeline. 920 * @no_wait_reserve: Return immediately if other buffers are busy. 921 * @no_wait_gpu: Return immediately if the GPU is busy. 922 * @new_mem: struct ttm_mem_reg indicating where to move. 923 * 924 * Fallback move function for a mappable buffer object in mappable memory. 925 * The function will, if successful, 926 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 927 * and update the (@bo)->mem placement flags. If unsuccessful, the old 928 * data remains untouched, and it's up to the caller to free the 929 * memory space indicated by @new_mem. 930 * Returns: 931 * !0: Failure. 932 */ 933 934extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 935 bool evict, bool no_wait_reserve, 936 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 937 938/** 939 * ttm_bo_free_old_node 940 * 941 * @bo: A pointer to a struct ttm_buffer_object. 942 * 943 * Utility function to free an old placement after a successful move. 944 */ 945extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 946 947/** 948 * ttm_bo_move_accel_cleanup. 949 * 950 * @bo: A pointer to a struct ttm_buffer_object. 951 * @sync_obj: A sync object that signals when moving is complete. 952 * @sync_obj_arg: An argument to pass to the sync object idle / wait 953 * functions. 954 * @evict: This is an evict move. Don't return until the buffer is idle. 955 * @no_wait_reserve: Return immediately if other buffers are busy. 956 * @no_wait_gpu: Return immediately if the GPU is busy. 957 * @new_mem: struct ttm_mem_reg indicating where to move. 958 * 959 * Accelerated move function to be called when an accelerated move 960 * has been scheduled. The function will create a new temporary buffer object 961 * representing the old placement, and put the sync object on both buffer 962 * objects. After that the newly created buffer object is unref'd to be 963 * destroyed when the move is complete. This will help pipeline 964 * buffer moves. 965 */ 966 967extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 968 void *sync_obj, 969 void *sync_obj_arg, 970 bool evict, bool no_wait_reserve, 971 bool no_wait_gpu, 972 struct ttm_mem_reg *new_mem); 973/** 974 * ttm_io_prot 975 * 976 * @c_state: Caching state. 977 * @tmp: Page protection flag for a normal, cached mapping. 978 * 979 * Utility function that returns the pgprot_t that should be used for 980 * setting up a PTE with the caching model indicated by @c_state. 981 */ 982extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 983 984extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; 985 986#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 987#define TTM_HAS_AGP 988#include <linux/agp_backend.h> 989 990/** 991 * ttm_agp_backend_init 992 * 993 * @bdev: Pointer to a struct ttm_bo_device. 994 * @bridge: The agp bridge this device is sitting on. 995 * 996 * Create a TTM backend that uses the indicated AGP bridge as an aperture 997 * for TT memory. This function uses the linux agpgart interface to 998 * bind and unbind memory backing a ttm_tt. 999 */ 1000extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, 1001 struct agp_bridge_data *bridge); 1002#endif 1003 1004#endif