Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.13-rc7 1101 lines 34 kB view raw
1/************************************************************************** 2 * 3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27/* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30#ifndef _TTM_BO_DRIVER_H_ 31#define _TTM_BO_DRIVER_H_ 32 33#include <drm/drm_mm.h> 34#include <drm/drm_global.h> 35#include <drm/drm_vma_manager.h> 36#include <linux/workqueue.h> 37#include <linux/fs.h> 38#include <linux/spinlock.h> 39#include <linux/reservation.h> 40 41#include "ttm_bo_api.h" 42#include "ttm_memory.h" 43#include "ttm_module.h" 44#include "ttm_placement.h" 45 46#define TTM_MAX_BO_PRIORITY 4U 47 48struct ttm_backend_func { 49 /** 50 * struct ttm_backend_func member bind 51 * 52 * @ttm: Pointer to a struct ttm_tt. 53 * @bo_mem: Pointer to a struct ttm_mem_reg describing the 54 * memory type and location for binding. 55 * 56 * Bind the backend pages into the aperture in the location 57 * indicated by @bo_mem. This function should be able to handle 58 * differences between aperture and system page sizes. 59 */ 60 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 61 62 /** 63 * struct ttm_backend_func member unbind 64 * 65 * @ttm: Pointer to a struct ttm_tt. 66 * 67 * Unbind previously bound backend pages. This function should be 68 * able to handle differences between aperture and system page sizes. 69 */ 70 int (*unbind) (struct ttm_tt *ttm); 71 72 /** 73 * struct ttm_backend_func member destroy 74 * 75 * @ttm: Pointer to a struct ttm_tt. 76 * 77 * Destroy the backend. This will be call back from ttm_tt_destroy so 78 * don't call ttm_tt_destroy from the callback or infinite loop. 79 */ 80 void (*destroy) (struct ttm_tt *ttm); 81}; 82 83#define TTM_PAGE_FLAG_WRITE (1 << 3) 84#define TTM_PAGE_FLAG_SWAPPED (1 << 4) 85#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) 86#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) 87#define TTM_PAGE_FLAG_DMA32 (1 << 7) 88#define TTM_PAGE_FLAG_SG (1 << 8) 89 90enum ttm_caching_state { 91 tt_uncached, 92 tt_wc, 93 tt_cached 94}; 95 96/** 97 * struct ttm_tt 98 * 99 * @bdev: Pointer to a struct ttm_bo_device. 100 * @func: Pointer to a struct ttm_backend_func that describes 101 * the backend methods. 102 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL 103 * pointer. 104 * @pages: Array of pages backing the data. 105 * @num_pages: Number of pages in the page array. 106 * @bdev: Pointer to the current struct ttm_bo_device. 107 * @be: Pointer to the ttm backend. 108 * @swap_storage: Pointer to shmem struct file for swap storage. 109 * @caching_state: The current caching state of the pages. 110 * @state: The current binding state of the pages. 111 * 112 * This is a structure holding the pages, caching- and aperture binding 113 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 114 * memory. 115 */ 116 117struct ttm_tt { 118 struct ttm_bo_device *bdev; 119 struct ttm_backend_func *func; 120 struct page *dummy_read_page; 121 struct page **pages; 122 uint32_t page_flags; 123 unsigned long num_pages; 124 struct sg_table *sg; /* for SG objects via dma-buf */ 125 struct ttm_bo_global *glob; 126 struct file *swap_storage; 127 enum ttm_caching_state caching_state; 128 enum { 129 tt_bound, 130 tt_unbound, 131 tt_unpopulated, 132 } state; 133}; 134 135/** 136 * struct ttm_dma_tt 137 * 138 * @ttm: Base ttm_tt struct. 139 * @dma_address: The DMA (bus) addresses of the pages 140 * @pages_list: used by some page allocation backend 141 * 142 * This is a structure holding the pages, caching- and aperture binding 143 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 144 * memory. 145 */ 146struct ttm_dma_tt { 147 struct ttm_tt ttm; 148 dma_addr_t *dma_address; 149 struct list_head pages_list; 150}; 151 152#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 153#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 154#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 155 156struct ttm_mem_type_manager; 157 158struct ttm_mem_type_manager_func { 159 /** 160 * struct ttm_mem_type_manager member init 161 * 162 * @man: Pointer to a memory type manager. 163 * @p_size: Implementation dependent, but typically the size of the 164 * range to be managed in pages. 165 * 166 * Called to initialize a private range manager. The function is 167 * expected to initialize the man::priv member. 168 * Returns 0 on success, negative error code on failure. 169 */ 170 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); 171 172 /** 173 * struct ttm_mem_type_manager member takedown 174 * 175 * @man: Pointer to a memory type manager. 176 * 177 * Called to undo the setup done in init. All allocated resources 178 * should be freed. 179 */ 180 int (*takedown)(struct ttm_mem_type_manager *man); 181 182 /** 183 * struct ttm_mem_type_manager member get_node 184 * 185 * @man: Pointer to a memory type manager. 186 * @bo: Pointer to the buffer object we're allocating space for. 187 * @placement: Placement details. 188 * @flags: Additional placement flags. 189 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 190 * 191 * This function should allocate space in the memory type managed 192 * by @man. Placement details if 193 * applicable are given by @placement. If successful, 194 * @mem::mm_node should be set to a non-null value, and 195 * @mem::start should be set to a value identifying the beginning 196 * of the range allocated, and the function should return zero. 197 * If the memory region accommodate the buffer object, @mem::mm_node 198 * should be set to NULL, and the function should return 0. 199 * If a system error occurred, preventing the request to be fulfilled, 200 * the function should return a negative error code. 201 * 202 * Note that @mem::mm_node will only be dereferenced by 203 * struct ttm_mem_type_manager functions and optionally by the driver, 204 * which has knowledge of the underlying type. 205 * 206 * This function may not be called from within atomic context, so 207 * an implementation can and must use either a mutex or a spinlock to 208 * protect any data structures managing the space. 209 */ 210 int (*get_node)(struct ttm_mem_type_manager *man, 211 struct ttm_buffer_object *bo, 212 const struct ttm_place *place, 213 struct ttm_mem_reg *mem); 214 215 /** 216 * struct ttm_mem_type_manager member put_node 217 * 218 * @man: Pointer to a memory type manager. 219 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 220 * 221 * This function frees memory type resources previously allocated 222 * and that are identified by @mem::mm_node and @mem::start. May not 223 * be called from within atomic context. 224 */ 225 void (*put_node)(struct ttm_mem_type_manager *man, 226 struct ttm_mem_reg *mem); 227 228 /** 229 * struct ttm_mem_type_manager member debug 230 * 231 * @man: Pointer to a memory type manager. 232 * @prefix: Prefix to be used in printout to identify the caller. 233 * 234 * This function is called to print out the state of the memory 235 * type manager to aid debugging of out-of-memory conditions. 236 * It may not be called from within atomic context. 237 */ 238 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 239}; 240 241/** 242 * struct ttm_mem_type_manager 243 * 244 * @has_type: The memory type has been initialized. 245 * @use_type: The memory type is enabled. 246 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory 247 * managed by this memory type. 248 * @gpu_offset: If used, the GPU offset of the first managed page of 249 * fixed memory or the first managed location in an aperture. 250 * @size: Size of the managed region. 251 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, 252 * as defined in ttm_placement_common.h 253 * @default_caching: The default caching policy used for a buffer object 254 * placed in this memory type if the user doesn't provide one. 255 * @func: structure pointer implementing the range manager. See above 256 * @priv: Driver private closure for @func. 257 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures 258 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions 259 * reserved by the TTM vm system. 260 * @io_reserve_lru: Optional lru list for unreserving io mem regions. 261 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain 262 * @move_lock: lock for move fence 263 * static information. bdev::driver::io_mem_free is never used. 264 * @lru: The lru list for this memory type. 265 * @move: The fence of the last pipelined move operation. 266 * 267 * This structure is used to identify and manage memory types for a device. 268 * It's set up by the ttm_bo_driver::init_mem_type method. 269 */ 270 271 272 273struct ttm_mem_type_manager { 274 struct ttm_bo_device *bdev; 275 276 /* 277 * No protection. Constant from start. 278 */ 279 280 bool has_type; 281 bool use_type; 282 uint32_t flags; 283 uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ 284 uint64_t size; 285 uint32_t available_caching; 286 uint32_t default_caching; 287 const struct ttm_mem_type_manager_func *func; 288 void *priv; 289 struct mutex io_reserve_mutex; 290 bool use_io_reserve_lru; 291 bool io_reserve_fastpath; 292 spinlock_t move_lock; 293 294 /* 295 * Protected by @io_reserve_mutex: 296 */ 297 298 struct list_head io_reserve_lru; 299 300 /* 301 * Protected by the global->lru_lock. 302 */ 303 304 struct list_head lru[TTM_MAX_BO_PRIORITY]; 305 306 /* 307 * Protected by @move_lock. 308 */ 309 struct dma_fence *move; 310}; 311 312/** 313 * struct ttm_bo_driver 314 * 315 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 316 * @invalidate_caches: Callback to invalidate read caches when a buffer object 317 * has been evicted. 318 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager 319 * structure. 320 * @evict_flags: Callback to obtain placement flags when a buffer is evicted. 321 * @move: Callback for a driver to hook in accelerated functions to 322 * move a buffer. 323 * If set to NULL, a potentially slow memcpy() move is used. 324 */ 325 326struct ttm_bo_driver { 327 /** 328 * ttm_tt_create 329 * 330 * @bdev: pointer to a struct ttm_bo_device: 331 * @size: Size of the data needed backing. 332 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 333 * @dummy_read_page: See struct ttm_bo_device. 334 * 335 * Create a struct ttm_tt to back data with system memory pages. 336 * No pages are actually allocated. 337 * Returns: 338 * NULL: Out of memory. 339 */ 340 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev, 341 unsigned long size, 342 uint32_t page_flags, 343 struct page *dummy_read_page); 344 345 /** 346 * ttm_tt_populate 347 * 348 * @ttm: The struct ttm_tt to contain the backing pages. 349 * 350 * Allocate all backing pages 351 * Returns: 352 * -ENOMEM: Out of memory. 353 */ 354 int (*ttm_tt_populate)(struct ttm_tt *ttm); 355 356 /** 357 * ttm_tt_unpopulate 358 * 359 * @ttm: The struct ttm_tt to contain the backing pages. 360 * 361 * Free all backing page 362 */ 363 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); 364 365 /** 366 * struct ttm_bo_driver member invalidate_caches 367 * 368 * @bdev: the buffer object device. 369 * @flags: new placement of the rebound buffer object. 370 * 371 * A previosly evicted buffer has been rebound in a 372 * potentially new location. Tell the driver that it might 373 * consider invalidating read (texture) caches on the next command 374 * submission as a consequence. 375 */ 376 377 int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags); 378 int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type, 379 struct ttm_mem_type_manager *man); 380 381 /** 382 * struct ttm_bo_driver member eviction_valuable 383 * 384 * @bo: the buffer object to be evicted 385 * @place: placement we need room for 386 * 387 * Check with the driver if it is valuable to evict a BO to make room 388 * for a certain placement. 389 */ 390 bool (*eviction_valuable)(struct ttm_buffer_object *bo, 391 const struct ttm_place *place); 392 /** 393 * struct ttm_bo_driver member evict_flags: 394 * 395 * @bo: the buffer object to be evicted 396 * 397 * Return the bo flags for a buffer which is not mapped to the hardware. 398 * These will be placed in proposed_flags so that when the move is 399 * finished, they'll end up in bo->mem.flags 400 */ 401 402 void (*evict_flags)(struct ttm_buffer_object *bo, 403 struct ttm_placement *placement); 404 405 /** 406 * struct ttm_bo_driver member move: 407 * 408 * @bo: the buffer to move 409 * @evict: whether this motion is evicting the buffer from 410 * the graphics address space 411 * @interruptible: Use interruptible sleeps if possible when sleeping. 412 * @no_wait: whether this should give up and return -EBUSY 413 * if this move would require sleeping 414 * @new_mem: the new memory region receiving the buffer 415 * 416 * Move a buffer between two memory regions. 417 */ 418 int (*move)(struct ttm_buffer_object *bo, bool evict, 419 bool interruptible, bool no_wait_gpu, 420 struct ttm_mem_reg *new_mem); 421 422 /** 423 * struct ttm_bo_driver_member verify_access 424 * 425 * @bo: Pointer to a buffer object. 426 * @filp: Pointer to a struct file trying to access the object. 427 * 428 * Called from the map / write / read methods to verify that the 429 * caller is permitted to access the buffer object. 430 * This member may be set to NULL, which will refuse this kind of 431 * access for all buffer objects. 432 * This function should return 0 if access is granted, -EPERM otherwise. 433 */ 434 int (*verify_access)(struct ttm_buffer_object *bo, 435 struct file *filp); 436 437 /** 438 * Hook to notify driver about a driver move so it 439 * can do tiling things and book-keeping. 440 * 441 * @evict: whether this move is evicting the buffer from the graphics 442 * address space 443 */ 444 void (*move_notify)(struct ttm_buffer_object *bo, 445 bool evict, 446 struct ttm_mem_reg *new_mem); 447 /* notify the driver we are taking a fault on this BO 448 * and have reserved it */ 449 int (*fault_reserve_notify)(struct ttm_buffer_object *bo); 450 451 /** 452 * notify the driver that we're about to swap out this bo 453 */ 454 void (*swap_notify)(struct ttm_buffer_object *bo); 455 456 /** 457 * Driver callback on when mapping io memory (for bo_move_memcpy 458 * for instance). TTM will take care to call io_mem_free whenever 459 * the mapping is not use anymore. io_mem_reserve & io_mem_free 460 * are balanced. 461 */ 462 int (*io_mem_reserve)(struct ttm_bo_device *bdev, 463 struct ttm_mem_reg *mem); 464 void (*io_mem_free)(struct ttm_bo_device *bdev, 465 struct ttm_mem_reg *mem); 466 467 /** 468 * Return the pfn for a given page_offset inside the BO. 469 * 470 * @bo: the BO to look up the pfn for 471 * @page_offset: the offset to look up 472 */ 473 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, 474 unsigned long page_offset); 475}; 476 477/** 478 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. 479 */ 480 481struct ttm_bo_global_ref { 482 struct drm_global_reference ref; 483 struct ttm_mem_global *mem_glob; 484}; 485 486/** 487 * struct ttm_bo_global - Buffer object driver global data. 488 * 489 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. 490 * @dummy_read_page: Pointer to a dummy page used for mapping requests 491 * of unpopulated pages. 492 * @shrink: A shrink callback object used for buffer object swap. 493 * @device_list_mutex: Mutex protecting the device list. 494 * This mutex is held while traversing the device list for pm options. 495 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 496 * @device_list: List of buffer object devices. 497 * @swap_lru: Lru list of buffer objects used for swapping. 498 */ 499 500struct ttm_bo_global { 501 502 /** 503 * Constant after init. 504 */ 505 506 struct kobject kobj; 507 struct ttm_mem_global *mem_glob; 508 struct page *dummy_read_page; 509 struct ttm_mem_shrink shrink; 510 struct mutex device_list_mutex; 511 spinlock_t lru_lock; 512 513 /** 514 * Protected by device_list_mutex. 515 */ 516 struct list_head device_list; 517 518 /** 519 * Protected by the lru_lock. 520 */ 521 struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; 522 523 /** 524 * Internal protection. 525 */ 526 atomic_t bo_count; 527}; 528 529 530#define TTM_NUM_MEM_TYPES 8 531 532/** 533 * struct ttm_bo_device - Buffer object driver device-specific data. 534 * 535 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 536 * @man: An array of mem_type_managers. 537 * @vma_manager: Address space manager 538 * lru_lock: Spinlock that protects the buffer+device lru lists and 539 * ddestroy lists. 540 * @dev_mapping: A pointer to the struct address_space representing the 541 * device address space. 542 * @wq: Work queue structure for the delayed delete workqueue. 543 * 544 */ 545 546struct ttm_bo_device { 547 548 /* 549 * Constant after bo device init / atomic. 550 */ 551 struct list_head device_list; 552 struct ttm_bo_global *glob; 553 struct ttm_bo_driver *driver; 554 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 555 556 /* 557 * Protected by internal locks. 558 */ 559 struct drm_vma_offset_manager vma_manager; 560 561 /* 562 * Protected by the global:lru lock. 563 */ 564 struct list_head ddestroy; 565 566 /* 567 * Protected by load / firstopen / lastclose /unload sync. 568 */ 569 570 struct address_space *dev_mapping; 571 572 /* 573 * Internal protection. 574 */ 575 576 struct delayed_work wq; 577 578 bool need_dma32; 579}; 580 581/** 582 * ttm_flag_masked 583 * 584 * @old: Pointer to the result and original value. 585 * @new: New value of bits. 586 * @mask: Mask of bits to change. 587 * 588 * Convenience function to change a number of bits identified by a mask. 589 */ 590 591static inline uint32_t 592ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) 593{ 594 *old ^= (*old ^ new) & mask; 595 return *old; 596} 597 598/** 599 * ttm_tt_init 600 * 601 * @ttm: The struct ttm_tt. 602 * @bdev: pointer to a struct ttm_bo_device: 603 * @size: Size of the data needed backing. 604 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 605 * @dummy_read_page: See struct ttm_bo_device. 606 * 607 * Create a struct ttm_tt to back data with system memory pages. 608 * No pages are actually allocated. 609 * Returns: 610 * NULL: Out of memory. 611 */ 612extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 613 unsigned long size, uint32_t page_flags, 614 struct page *dummy_read_page); 615extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 616 unsigned long size, uint32_t page_flags, 617 struct page *dummy_read_page); 618 619/** 620 * ttm_tt_fini 621 * 622 * @ttm: the ttm_tt structure. 623 * 624 * Free memory of ttm_tt structure 625 */ 626extern void ttm_tt_fini(struct ttm_tt *ttm); 627extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); 628 629/** 630 * ttm_ttm_bind: 631 * 632 * @ttm: The struct ttm_tt containing backing pages. 633 * @bo_mem: The struct ttm_mem_reg identifying the binding location. 634 * 635 * Bind the pages of @ttm to an aperture location identified by @bo_mem 636 */ 637extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 638 639/** 640 * ttm_ttm_destroy: 641 * 642 * @ttm: The struct ttm_tt. 643 * 644 * Unbind, unpopulate and destroy common struct ttm_tt. 645 */ 646extern void ttm_tt_destroy(struct ttm_tt *ttm); 647 648/** 649 * ttm_ttm_unbind: 650 * 651 * @ttm: The struct ttm_tt. 652 * 653 * Unbind a struct ttm_tt. 654 */ 655extern void ttm_tt_unbind(struct ttm_tt *ttm); 656 657/** 658 * ttm_tt_swapin: 659 * 660 * @ttm: The struct ttm_tt. 661 * 662 * Swap in a previously swap out ttm_tt. 663 */ 664extern int ttm_tt_swapin(struct ttm_tt *ttm); 665 666/** 667 * ttm_tt_set_placement_caching: 668 * 669 * @ttm A struct ttm_tt the backing pages of which will change caching policy. 670 * @placement: Flag indicating the desired caching policy. 671 * 672 * This function will change caching policy of any default kernel mappings of 673 * the pages backing @ttm. If changing from cached to uncached or 674 * write-combined, 675 * all CPU caches will first be flushed to make sure the data of the pages 676 * hit RAM. This function may be very costly as it involves global TLB 677 * and cache flushes and potential page splitting / combining. 678 */ 679extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); 680extern int ttm_tt_swapout(struct ttm_tt *ttm, 681 struct file *persistent_swap_storage); 682 683/** 684 * ttm_tt_unpopulate - free pages from a ttm 685 * 686 * @ttm: Pointer to the ttm_tt structure 687 * 688 * Calls the driver method to free all pages from a ttm 689 */ 690extern void ttm_tt_unpopulate(struct ttm_tt *ttm); 691 692/* 693 * ttm_bo.c 694 */ 695 696/** 697 * ttm_mem_reg_is_pci 698 * 699 * @bdev: Pointer to a struct ttm_bo_device. 700 * @mem: A valid struct ttm_mem_reg. 701 * 702 * Returns true if the memory described by @mem is PCI memory, 703 * false otherwise. 704 */ 705extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, 706 struct ttm_mem_reg *mem); 707 708/** 709 * ttm_bo_mem_space 710 * 711 * @bo: Pointer to a struct ttm_buffer_object. the data of which 712 * we want to allocate space for. 713 * @proposed_placement: Proposed new placement for the buffer object. 714 * @mem: A struct ttm_mem_reg. 715 * @interruptible: Sleep interruptible when sliping. 716 * @no_wait_gpu: Return immediately if the GPU is busy. 717 * 718 * Allocate memory space for the buffer object pointed to by @bo, using 719 * the placement flags in @mem, potentially evicting other idle buffer objects. 720 * This function may sleep while waiting for space to become available. 721 * Returns: 722 * -EBUSY: No space available (only if no_wait == 1). 723 * -ENOMEM: Could not allocate memory for the buffer object, either due to 724 * fragmentation or concurrent allocators. 725 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 726 */ 727extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 728 struct ttm_placement *placement, 729 struct ttm_mem_reg *mem, 730 bool interruptible, 731 bool no_wait_gpu); 732 733extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, 734 struct ttm_mem_reg *mem); 735extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, 736 struct ttm_mem_reg *mem); 737 738extern void ttm_bo_global_release(struct drm_global_reference *ref); 739extern int ttm_bo_global_init(struct drm_global_reference *ref); 740 741extern int ttm_bo_device_release(struct ttm_bo_device *bdev); 742 743/** 744 * ttm_bo_device_init 745 * 746 * @bdev: A pointer to a struct ttm_bo_device to initialize. 747 * @glob: A pointer to an initialized struct ttm_bo_global. 748 * @driver: A pointer to a struct ttm_bo_driver set up by the caller. 749 * @mapping: The address space to use for this bo. 750 * @file_page_offset: Offset into the device address space that is available 751 * for buffer data. This ensures compatibility with other users of the 752 * address space. 753 * 754 * Initializes a struct ttm_bo_device: 755 * Returns: 756 * !0: Failure. 757 */ 758extern int ttm_bo_device_init(struct ttm_bo_device *bdev, 759 struct ttm_bo_global *glob, 760 struct ttm_bo_driver *driver, 761 struct address_space *mapping, 762 uint64_t file_page_offset, bool need_dma32); 763 764/** 765 * ttm_bo_unmap_virtual 766 * 767 * @bo: tear down the virtual mappings for this BO 768 */ 769extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 770 771/** 772 * ttm_bo_unmap_virtual 773 * 774 * @bo: tear down the virtual mappings for this BO 775 * 776 * The caller must take ttm_mem_io_lock before calling this function. 777 */ 778extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); 779 780extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); 781extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); 782extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, 783 bool interruptible); 784extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); 785 786extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); 787extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); 788 789/** 790 * __ttm_bo_reserve: 791 * 792 * @bo: A pointer to a struct ttm_buffer_object. 793 * @interruptible: Sleep interruptible if waiting. 794 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 795 * @ticket: ticket used to acquire the ww_mutex. 796 * 797 * Will not remove reserved buffers from the lru lists. 798 * Otherwise identical to ttm_bo_reserve. 799 * 800 * Returns: 801 * -EDEADLK: The reservation may cause a deadlock. 802 * Release all buffer reservations, wait for @bo to become unreserved and 803 * try again. (only if use_sequence == 1). 804 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 805 * a signal. Release all buffer reservations and return to user-space. 806 * -EBUSY: The function needed to sleep, but @no_wait was true 807 * -EALREADY: Bo already reserved using @ticket. This error code will only 808 * be returned if @use_ticket is set to true. 809 */ 810static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, 811 bool interruptible, bool no_wait, 812 struct ww_acquire_ctx *ticket) 813{ 814 int ret = 0; 815 816 if (no_wait) { 817 bool success; 818 if (WARN_ON(ticket)) 819 return -EBUSY; 820 821 success = ww_mutex_trylock(&bo->resv->lock); 822 return success ? 0 : -EBUSY; 823 } 824 825 if (interruptible) 826 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); 827 else 828 ret = ww_mutex_lock(&bo->resv->lock, ticket); 829 if (ret == -EINTR) 830 return -ERESTARTSYS; 831 return ret; 832} 833 834/** 835 * ttm_bo_reserve: 836 * 837 * @bo: A pointer to a struct ttm_buffer_object. 838 * @interruptible: Sleep interruptible if waiting. 839 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 840 * @ticket: ticket used to acquire the ww_mutex. 841 * 842 * Locks a buffer object for validation. (Or prevents other processes from 843 * locking it for validation) and removes it from lru lists, while taking 844 * a number of measures to prevent deadlocks. 845 * 846 * Deadlocks may occur when two processes try to reserve multiple buffers in 847 * different order, either by will or as a result of a buffer being evicted 848 * to make room for a buffer already reserved. (Buffers are reserved before 849 * they are evicted). The following algorithm prevents such deadlocks from 850 * occurring: 851 * Processes attempting to reserve multiple buffers other than for eviction, 852 * (typically execbuf), should first obtain a unique 32-bit 853 * validation sequence number, 854 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique 855 * sequence number. If upon call of this function, the buffer object is already 856 * reserved, the validation sequence is checked against the validation 857 * sequence of the process currently reserving the buffer, 858 * and if the current validation sequence is greater than that of the process 859 * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps 860 * waiting for the buffer to become unreserved, after which it retries 861 * reserving. 862 * The caller should, when receiving an -EDEADLK error 863 * release all its buffer reservations, wait for @bo to become unreserved, and 864 * then rerun the validation with the same validation sequence. This procedure 865 * will always guarantee that the process with the lowest validation sequence 866 * will eventually succeed, preventing both deadlocks and starvation. 867 * 868 * Returns: 869 * -EDEADLK: The reservation may cause a deadlock. 870 * Release all buffer reservations, wait for @bo to become unreserved and 871 * try again. (only if use_sequence == 1). 872 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 873 * a signal. Release all buffer reservations and return to user-space. 874 * -EBUSY: The function needed to sleep, but @no_wait was true 875 * -EALREADY: Bo already reserved using @ticket. This error code will only 876 * be returned if @use_ticket is set to true. 877 */ 878static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, 879 bool interruptible, bool no_wait, 880 struct ww_acquire_ctx *ticket) 881{ 882 int ret; 883 884 WARN_ON(!kref_read(&bo->kref)); 885 886 ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); 887 if (likely(ret == 0)) 888 ttm_bo_del_sub_from_lru(bo); 889 890 return ret; 891} 892 893/** 894 * ttm_bo_reserve_slowpath: 895 * @bo: A pointer to a struct ttm_buffer_object. 896 * @interruptible: Sleep interruptible if waiting. 897 * @sequence: Set (@bo)->sequence to this value after lock 898 * 899 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off 900 * from all our other reservations. Because there are no other reservations 901 * held by us, this function cannot deadlock any more. 902 */ 903static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 904 bool interruptible, 905 struct ww_acquire_ctx *ticket) 906{ 907 int ret = 0; 908 909 WARN_ON(!kref_read(&bo->kref)); 910 911 if (interruptible) 912 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 913 ticket); 914 else 915 ww_mutex_lock_slow(&bo->resv->lock, ticket); 916 917 if (likely(ret == 0)) 918 ttm_bo_del_sub_from_lru(bo); 919 else if (ret == -EINTR) 920 ret = -ERESTARTSYS; 921 922 return ret; 923} 924 925/** 926 * __ttm_bo_unreserve 927 * @bo: A pointer to a struct ttm_buffer_object. 928 * 929 * Unreserve a previous reservation of @bo where the buffer object is 930 * already on lru lists. 931 */ 932static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) 933{ 934 ww_mutex_unlock(&bo->resv->lock); 935} 936 937/** 938 * ttm_bo_unreserve 939 * 940 * @bo: A pointer to a struct ttm_buffer_object. 941 * 942 * Unreserve a previous reservation of @bo. 943 */ 944static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) 945{ 946 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 947 spin_lock(&bo->glob->lru_lock); 948 ttm_bo_add_to_lru(bo); 949 spin_unlock(&bo->glob->lru_lock); 950 } 951 __ttm_bo_unreserve(bo); 952} 953 954/** 955 * ttm_bo_unreserve_ticket 956 * @bo: A pointer to a struct ttm_buffer_object. 957 * @ticket: ww_acquire_ctx used for reserving 958 * 959 * Unreserve a previous reservation of @bo made with @ticket. 960 */ 961static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, 962 struct ww_acquire_ctx *t) 963{ 964 ttm_bo_unreserve(bo); 965} 966 967/* 968 * ttm_bo_util.c 969 */ 970 971int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 972 struct ttm_mem_reg *mem); 973void ttm_mem_io_free(struct ttm_bo_device *bdev, 974 struct ttm_mem_reg *mem); 975/** 976 * ttm_bo_move_ttm 977 * 978 * @bo: A pointer to a struct ttm_buffer_object. 979 * @interruptible: Sleep interruptible if waiting. 980 * @no_wait_gpu: Return immediately if the GPU is busy. 981 * @new_mem: struct ttm_mem_reg indicating where to move. 982 * 983 * Optimized move function for a buffer object with both old and 984 * new placement backed by a TTM. The function will, if successful, 985 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 986 * and update the (@bo)->mem placement flags. If unsuccessful, the old 987 * data remains untouched, and it's up to the caller to free the 988 * memory space indicated by @new_mem. 989 * Returns: 990 * !0: Failure. 991 */ 992 993extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 994 bool interruptible, bool no_wait_gpu, 995 struct ttm_mem_reg *new_mem); 996 997/** 998 * ttm_bo_move_memcpy 999 * 1000 * @bo: A pointer to a struct ttm_buffer_object. 1001 * @interruptible: Sleep interruptible if waiting. 1002 * @no_wait_gpu: Return immediately if the GPU is busy. 1003 * @new_mem: struct ttm_mem_reg indicating where to move. 1004 * 1005 * Fallback move function for a mappable buffer object in mappable memory. 1006 * The function will, if successful, 1007 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 1008 * and update the (@bo)->mem placement flags. If unsuccessful, the old 1009 * data remains untouched, and it's up to the caller to free the 1010 * memory space indicated by @new_mem. 1011 * Returns: 1012 * !0: Failure. 1013 */ 1014 1015extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 1016 bool interruptible, bool no_wait_gpu, 1017 struct ttm_mem_reg *new_mem); 1018 1019/** 1020 * ttm_bo_free_old_node 1021 * 1022 * @bo: A pointer to a struct ttm_buffer_object. 1023 * 1024 * Utility function to free an old placement after a successful move. 1025 */ 1026extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 1027 1028/** 1029 * ttm_bo_move_accel_cleanup. 1030 * 1031 * @bo: A pointer to a struct ttm_buffer_object. 1032 * @fence: A fence object that signals when moving is complete. 1033 * @evict: This is an evict move. Don't return until the buffer is idle. 1034 * @new_mem: struct ttm_mem_reg indicating where to move. 1035 * 1036 * Accelerated move function to be called when an accelerated move 1037 * has been scheduled. The function will create a new temporary buffer object 1038 * representing the old placement, and put the sync object on both buffer 1039 * objects. After that the newly created buffer object is unref'd to be 1040 * destroyed when the move is complete. This will help pipeline 1041 * buffer moves. 1042 */ 1043 1044extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 1045 struct dma_fence *fence, bool evict, 1046 struct ttm_mem_reg *new_mem); 1047 1048/** 1049 * ttm_bo_pipeline_move. 1050 * 1051 * @bo: A pointer to a struct ttm_buffer_object. 1052 * @fence: A fence object that signals when moving is complete. 1053 * @evict: This is an evict move. Don't return until the buffer is idle. 1054 * @new_mem: struct ttm_mem_reg indicating where to move. 1055 * 1056 * Function for pipelining accelerated moves. Either free the memory 1057 * immediately or hang it on a temporary buffer object. 1058 */ 1059int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, 1060 struct dma_fence *fence, bool evict, 1061 struct ttm_mem_reg *new_mem); 1062 1063/** 1064 * ttm_io_prot 1065 * 1066 * @c_state: Caching state. 1067 * @tmp: Page protection flag for a normal, cached mapping. 1068 * 1069 * Utility function that returns the pgprot_t that should be used for 1070 * setting up a PTE with the caching model indicated by @c_state. 1071 */ 1072extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 1073 1074extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; 1075 1076#if IS_ENABLED(CONFIG_AGP) 1077#include <linux/agp_backend.h> 1078 1079/** 1080 * ttm_agp_tt_create 1081 * 1082 * @bdev: Pointer to a struct ttm_bo_device. 1083 * @bridge: The agp bridge this device is sitting on. 1084 * @size: Size of the data needed backing. 1085 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 1086 * @dummy_read_page: See struct ttm_bo_device. 1087 * 1088 * 1089 * Create a TTM backend that uses the indicated AGP bridge as an aperture 1090 * for TT memory. This function uses the linux agpgart interface to 1091 * bind and unbind memory backing a ttm_tt. 1092 */ 1093extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, 1094 struct agp_bridge_data *bridge, 1095 unsigned long size, uint32_t page_flags, 1096 struct page *dummy_read_page); 1097int ttm_agp_tt_populate(struct ttm_tt *ttm); 1098void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); 1099#endif 1100 1101#endif