Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.10 1239 lines 33 kB view raw
1/* 2 * Copyright 2011 (c) Oracle Corp. 3 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 24 */ 25 26/* 27 * A simple DMA pool losely based on dmapool.c. It has certain advantages 28 * over the DMA pools: 29 * - Pool collects resently freed pages for reuse (and hooks up to 30 * the shrinker). 31 * - Tracks currently in use pages 32 * - Tracks whether the page is UC, WB or cached (and reverts to WB 33 * when freed). 34 */ 35 36#define pr_fmt(fmt) "[TTM] " fmt 37 38#include <linux/dma-mapping.h> 39#include <linux/list.h> 40#include <linux/seq_file.h> /* for seq_printf */ 41#include <linux/slab.h> 42#include <linux/spinlock.h> 43#include <linux/highmem.h> 44#include <linux/mm_types.h> 45#include <linux/module.h> 46#include <linux/mm.h> 47#include <linux/atomic.h> 48#include <linux/device.h> 49#include <linux/kthread.h> 50#include <drm/ttm/ttm_bo_driver.h> 51#include <drm/ttm/ttm_page_alloc.h> 52#include <drm/ttm/ttm_set_memory.h> 53 54#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 55#define SMALL_ALLOCATION 4 56#define FREE_ALL_PAGES (~0U) 57#define VADDR_FLAG_HUGE_POOL 1UL 58#define VADDR_FLAG_UPDATED_COUNT 2UL 59 60enum pool_type { 61 IS_UNDEFINED = 0, 62 IS_WC = 1 << 1, 63 IS_UC = 1 << 2, 64 IS_CACHED = 1 << 3, 65 IS_DMA32 = 1 << 4, 66 IS_HUGE = 1 << 5 67}; 68 69/* 70 * The pool structure. There are up to nine pools: 71 * - generic (not restricted to DMA32): 72 * - write combined, uncached, cached. 73 * - dma32 (up to 2^32 - so up 4GB): 74 * - write combined, uncached, cached. 75 * - huge (not restricted to DMA32): 76 * - write combined, uncached, cached. 77 * for each 'struct device'. The 'cached' is for pages that are actively used. 78 * The other ones can be shrunk by the shrinker API if neccessary. 79 * @pools: The 'struct device->dma_pools' link. 80 * @type: Type of the pool 81 * @lock: Protects the free_list from concurrnet access. Must be 82 * used with irqsave/irqrestore variants because pool allocator maybe called 83 * from delayed work. 84 * @free_list: Pool of pages that are free to be used. No order requirements. 85 * @dev: The device that is associated with these pools. 86 * @size: Size used during DMA allocation. 87 * @npages_free: Count of available pages for re-use. 88 * @npages_in_use: Count of pages that are in use. 89 * @nfrees: Stats when pool is shrinking. 90 * @nrefills: Stats when the pool is grown. 91 * @gfp_flags: Flags to pass for alloc_page. 92 * @name: Name of the pool. 93 * @dev_name: Name derieved from dev - similar to how dev_info works. 94 * Used during shutdown as the dev_info during release is unavailable. 95 */ 96struct dma_pool { 97 struct list_head pools; /* The 'struct device->dma_pools link */ 98 enum pool_type type; 99 spinlock_t lock; 100 struct list_head free_list; 101 struct device *dev; 102 unsigned size; 103 unsigned npages_free; 104 unsigned npages_in_use; 105 unsigned long nfrees; /* Stats when shrunk. */ 106 unsigned long nrefills; /* Stats when grown. */ 107 gfp_t gfp_flags; 108 char name[13]; /* "cached dma32" */ 109 char dev_name[64]; /* Constructed from dev */ 110}; 111 112/* 113 * The accounting page keeping track of the allocated page along with 114 * the DMA address. 115 * @page_list: The link to the 'page_list' in 'struct dma_pool'. 116 * @vaddr: The virtual address of the page and a flag if the page belongs to a 117 * huge pool 118 * @dma: The bus address of the page. If the page is not allocated 119 * via the DMA API, it will be -1. 120 */ 121struct dma_page { 122 struct list_head page_list; 123 unsigned long vaddr; 124 struct page *p; 125 dma_addr_t dma; 126}; 127 128/* 129 * Limits for the pool. They are handled without locks because only place where 130 * they may change is in sysfs store. They won't have immediate effect anyway 131 * so forcing serialization to access them is pointless. 132 */ 133 134struct ttm_pool_opts { 135 unsigned alloc_size; 136 unsigned max_size; 137 unsigned small; 138}; 139 140/* 141 * Contains the list of all of the 'struct device' and their corresponding 142 * DMA pools. Guarded by _mutex->lock. 143 * @pools: The link to 'struct ttm_pool_manager->pools' 144 * @dev: The 'struct device' associated with the 'pool' 145 * @pool: The 'struct dma_pool' associated with the 'dev' 146 */ 147struct device_pools { 148 struct list_head pools; 149 struct device *dev; 150 struct dma_pool *pool; 151}; 152 153/* 154 * struct ttm_pool_manager - Holds memory pools for fast allocation 155 * 156 * @lock: Lock used when adding/removing from pools 157 * @pools: List of 'struct device' and 'struct dma_pool' tuples. 158 * @options: Limits for the pool. 159 * @npools: Total amount of pools in existence. 160 * @shrinker: The structure used by [un|]register_shrinker 161 */ 162struct ttm_pool_manager { 163 struct mutex lock; 164 struct list_head pools; 165 struct ttm_pool_opts options; 166 unsigned npools; 167 struct shrinker mm_shrink; 168 struct kobject kobj; 169}; 170 171static struct ttm_pool_manager *_manager; 172 173static struct attribute ttm_page_pool_max = { 174 .name = "pool_max_size", 175 .mode = S_IRUGO | S_IWUSR 176}; 177static struct attribute ttm_page_pool_small = { 178 .name = "pool_small_allocation", 179 .mode = S_IRUGO | S_IWUSR 180}; 181static struct attribute ttm_page_pool_alloc_size = { 182 .name = "pool_allocation_size", 183 .mode = S_IRUGO | S_IWUSR 184}; 185 186static struct attribute *ttm_pool_attrs[] = { 187 &ttm_page_pool_max, 188 &ttm_page_pool_small, 189 &ttm_page_pool_alloc_size, 190 NULL 191}; 192 193static void ttm_pool_kobj_release(struct kobject *kobj) 194{ 195 struct ttm_pool_manager *m = 196 container_of(kobj, struct ttm_pool_manager, kobj); 197 kfree(m); 198} 199 200static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, 201 const char *buffer, size_t size) 202{ 203 struct ttm_pool_manager *m = 204 container_of(kobj, struct ttm_pool_manager, kobj); 205 int chars; 206 unsigned val; 207 208 chars = sscanf(buffer, "%u", &val); 209 if (chars == 0) 210 return size; 211 212 /* Convert kb to number of pages */ 213 val = val / (PAGE_SIZE >> 10); 214 215 if (attr == &ttm_page_pool_max) { 216 m->options.max_size = val; 217 } else if (attr == &ttm_page_pool_small) { 218 m->options.small = val; 219 } else if (attr == &ttm_page_pool_alloc_size) { 220 if (val > NUM_PAGES_TO_ALLOC*8) { 221 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", 222 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 223 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 224 return size; 225 } else if (val > NUM_PAGES_TO_ALLOC) { 226 pr_warn("Setting allocation size to larger than %lu is not recommended\n", 227 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 228 } 229 m->options.alloc_size = val; 230 } 231 232 return size; 233} 234 235static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, 236 char *buffer) 237{ 238 struct ttm_pool_manager *m = 239 container_of(kobj, struct ttm_pool_manager, kobj); 240 unsigned val = 0; 241 242 if (attr == &ttm_page_pool_max) 243 val = m->options.max_size; 244 else if (attr == &ttm_page_pool_small) 245 val = m->options.small; 246 else if (attr == &ttm_page_pool_alloc_size) 247 val = m->options.alloc_size; 248 249 val = val * (PAGE_SIZE >> 10); 250 251 return snprintf(buffer, PAGE_SIZE, "%u\n", val); 252} 253 254static const struct sysfs_ops ttm_pool_sysfs_ops = { 255 .show = &ttm_pool_show, 256 .store = &ttm_pool_store, 257}; 258 259static struct kobj_type ttm_pool_kobj_type = { 260 .release = &ttm_pool_kobj_release, 261 .sysfs_ops = &ttm_pool_sysfs_ops, 262 .default_attrs = ttm_pool_attrs, 263}; 264 265static int ttm_set_pages_caching(struct dma_pool *pool, 266 struct page **pages, unsigned cpages) 267{ 268 int r = 0; 269 /* Set page caching */ 270 if (pool->type & IS_UC) { 271 r = ttm_set_pages_array_uc(pages, cpages); 272 if (r) 273 pr_err("%s: Failed to set %d pages to uc!\n", 274 pool->dev_name, cpages); 275 } 276 if (pool->type & IS_WC) { 277 r = ttm_set_pages_array_wc(pages, cpages); 278 if (r) 279 pr_err("%s: Failed to set %d pages to wc!\n", 280 pool->dev_name, cpages); 281 } 282 return r; 283} 284 285static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) 286{ 287 unsigned long attrs = 0; 288 dma_addr_t dma = d_page->dma; 289 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; 290 if (pool->type & IS_HUGE) 291 attrs = DMA_ATTR_NO_WARN; 292 293 dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); 294 295 kfree(d_page); 296 d_page = NULL; 297} 298static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) 299{ 300 struct dma_page *d_page; 301 unsigned long attrs = 0; 302 void *vaddr; 303 304 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL); 305 if (!d_page) 306 return NULL; 307 308 if (pool->type & IS_HUGE) 309 attrs = DMA_ATTR_NO_WARN; 310 311 vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma, 312 pool->gfp_flags, attrs); 313 if (vaddr) { 314 if (is_vmalloc_addr(vaddr)) 315 d_page->p = vmalloc_to_page(vaddr); 316 else 317 d_page->p = virt_to_page(vaddr); 318 d_page->vaddr = (unsigned long)vaddr; 319 if (pool->type & IS_HUGE) 320 d_page->vaddr |= VADDR_FLAG_HUGE_POOL; 321 } else { 322 kfree(d_page); 323 d_page = NULL; 324 } 325 return d_page; 326} 327static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate) 328{ 329 enum pool_type type = IS_UNDEFINED; 330 331 if (flags & TTM_PAGE_FLAG_DMA32) 332 type |= IS_DMA32; 333 if (cstate == tt_cached) 334 type |= IS_CACHED; 335 else if (cstate == tt_uncached) 336 type |= IS_UC; 337 else 338 type |= IS_WC; 339 340 return type; 341} 342 343static void ttm_pool_update_free_locked(struct dma_pool *pool, 344 unsigned freed_pages) 345{ 346 pool->npages_free -= freed_pages; 347 pool->nfrees += freed_pages; 348 349} 350 351/* set memory back to wb and free the pages. */ 352static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) 353{ 354 struct page *page = d_page->p; 355 unsigned num_pages; 356 357 /* Don't set WB on WB page pool. */ 358 if (!(pool->type & IS_CACHED)) { 359 num_pages = pool->size / PAGE_SIZE; 360 if (ttm_set_pages_wb(page, num_pages)) 361 pr_err("%s: Failed to set %d pages to wb!\n", 362 pool->dev_name, num_pages); 363 } 364 365 list_del(&d_page->page_list); 366 __ttm_dma_free_page(pool, d_page); 367} 368 369static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, 370 struct page *pages[], unsigned npages) 371{ 372 struct dma_page *d_page, *tmp; 373 374 if (pool->type & IS_HUGE) { 375 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) 376 ttm_dma_page_put(pool, d_page); 377 378 return; 379 } 380 381 /* Don't set WB on WB page pool. */ 382 if (npages && !(pool->type & IS_CACHED) && 383 ttm_set_pages_array_wb(pages, npages)) 384 pr_err("%s: Failed to set %d pages to wb!\n", 385 pool->dev_name, npages); 386 387 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { 388 list_del(&d_page->page_list); 389 __ttm_dma_free_page(pool, d_page); 390 } 391} 392 393/* 394 * Free pages from pool. 395 * 396 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC 397 * number of pages in one go. 398 * 399 * @pool: to free the pages from 400 * @nr_free: If set to true will free all pages in pool 401 * @use_static: Safe to use static buffer 402 **/ 403static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, 404 bool use_static) 405{ 406 static struct page *static_buf[NUM_PAGES_TO_ALLOC]; 407 unsigned long irq_flags; 408 struct dma_page *dma_p, *tmp; 409 struct page **pages_to_free; 410 struct list_head d_pages; 411 unsigned freed_pages = 0, 412 npages_to_free = nr_free; 413 414 if (NUM_PAGES_TO_ALLOC < nr_free) 415 npages_to_free = NUM_PAGES_TO_ALLOC; 416 417 if (use_static) 418 pages_to_free = static_buf; 419 else 420 pages_to_free = kmalloc_array(npages_to_free, 421 sizeof(struct page *), 422 GFP_KERNEL); 423 424 if (!pages_to_free) { 425 pr_debug("%s: Failed to allocate memory for pool free operation\n", 426 pool->dev_name); 427 return 0; 428 } 429 INIT_LIST_HEAD(&d_pages); 430restart: 431 spin_lock_irqsave(&pool->lock, irq_flags); 432 433 /* We picking the oldest ones off the list */ 434 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, 435 page_list) { 436 if (freed_pages >= npages_to_free) 437 break; 438 439 /* Move the dma_page from one list to another. */ 440 list_move(&dma_p->page_list, &d_pages); 441 442 pages_to_free[freed_pages++] = dma_p->p; 443 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ 444 if (freed_pages >= NUM_PAGES_TO_ALLOC) { 445 446 ttm_pool_update_free_locked(pool, freed_pages); 447 /** 448 * Because changing page caching is costly 449 * we unlock the pool to prevent stalling. 450 */ 451 spin_unlock_irqrestore(&pool->lock, irq_flags); 452 453 ttm_dma_pages_put(pool, &d_pages, pages_to_free, 454 freed_pages); 455 456 INIT_LIST_HEAD(&d_pages); 457 458 if (likely(nr_free != FREE_ALL_PAGES)) 459 nr_free -= freed_pages; 460 461 if (NUM_PAGES_TO_ALLOC >= nr_free) 462 npages_to_free = nr_free; 463 else 464 npages_to_free = NUM_PAGES_TO_ALLOC; 465 466 freed_pages = 0; 467 468 /* free all so restart the processing */ 469 if (nr_free) 470 goto restart; 471 472 /* Not allowed to fall through or break because 473 * following context is inside spinlock while we are 474 * outside here. 475 */ 476 goto out; 477 478 } 479 } 480 481 /* remove range of pages from the pool */ 482 if (freed_pages) { 483 ttm_pool_update_free_locked(pool, freed_pages); 484 nr_free -= freed_pages; 485 } 486 487 spin_unlock_irqrestore(&pool->lock, irq_flags); 488 489 if (freed_pages) 490 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); 491out: 492 if (pages_to_free != static_buf) 493 kfree(pages_to_free); 494 return nr_free; 495} 496 497static void ttm_dma_free_pool(struct device *dev, enum pool_type type) 498{ 499 struct device_pools *p; 500 struct dma_pool *pool; 501 502 if (!dev) 503 return; 504 505 mutex_lock(&_manager->lock); 506 list_for_each_entry_reverse(p, &_manager->pools, pools) { 507 if (p->dev != dev) 508 continue; 509 pool = p->pool; 510 if (pool->type != type) 511 continue; 512 513 list_del(&p->pools); 514 kfree(p); 515 _manager->npools--; 516 break; 517 } 518 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { 519 if (pool->type != type) 520 continue; 521 /* Takes a spinlock.. */ 522 /* OK to use static buffer since global mutex is held. */ 523 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); 524 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); 525 /* This code path is called after _all_ references to the 526 * struct device has been dropped - so nobody should be 527 * touching it. In case somebody is trying to _add_ we are 528 * guarded by the mutex. */ 529 list_del(&pool->pools); 530 kfree(pool); 531 break; 532 } 533 mutex_unlock(&_manager->lock); 534} 535 536/* 537 * On free-ing of the 'struct device' this deconstructor is run. 538 * Albeit the pool might have already been freed earlier. 539 */ 540static void ttm_dma_pool_release(struct device *dev, void *res) 541{ 542 struct dma_pool *pool = *(struct dma_pool **)res; 543 544 if (pool) 545 ttm_dma_free_pool(dev, pool->type); 546} 547 548static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data) 549{ 550 return *(struct dma_pool **)res == match_data; 551} 552 553static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, 554 enum pool_type type) 555{ 556 const char *n[] = {"wc", "uc", "cached", " dma32", "huge"}; 557 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE}; 558 struct device_pools *sec_pool = NULL; 559 struct dma_pool *pool = NULL, **ptr; 560 unsigned i; 561 int ret = -ENODEV; 562 char *p; 563 564 if (!dev) 565 return NULL; 566 567 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL); 568 if (!ptr) 569 return NULL; 570 571 ret = -ENOMEM; 572 573 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, 574 dev_to_node(dev)); 575 if (!pool) 576 goto err_mem; 577 578 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL, 579 dev_to_node(dev)); 580 if (!sec_pool) 581 goto err_mem; 582 583 INIT_LIST_HEAD(&sec_pool->pools); 584 sec_pool->dev = dev; 585 sec_pool->pool = pool; 586 587 INIT_LIST_HEAD(&pool->free_list); 588 INIT_LIST_HEAD(&pool->pools); 589 spin_lock_init(&pool->lock); 590 pool->dev = dev; 591 pool->npages_free = pool->npages_in_use = 0; 592 pool->nfrees = 0; 593 pool->gfp_flags = flags; 594 if (type & IS_HUGE) 595#ifdef CONFIG_TRANSPARENT_HUGEPAGE 596 pool->size = HPAGE_PMD_SIZE; 597#else 598 BUG(); 599#endif 600 else 601 pool->size = PAGE_SIZE; 602 pool->type = type; 603 pool->nrefills = 0; 604 p = pool->name; 605 for (i = 0; i < ARRAY_SIZE(t); i++) { 606 if (type & t[i]) { 607 p += scnprintf(p, sizeof(pool->name) - (p - pool->name), 608 "%s", n[i]); 609 } 610 } 611 *p = 0; 612 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called 613 * - the kobj->name has already been deallocated.*/ 614 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", 615 dev_driver_string(dev), dev_name(dev)); 616 mutex_lock(&_manager->lock); 617 /* You can get the dma_pool from either the global: */ 618 list_add(&sec_pool->pools, &_manager->pools); 619 _manager->npools++; 620 /* or from 'struct device': */ 621 list_add(&pool->pools, &dev->dma_pools); 622 mutex_unlock(&_manager->lock); 623 624 *ptr = pool; 625 devres_add(dev, ptr); 626 627 return pool; 628err_mem: 629 devres_free(ptr); 630 kfree(sec_pool); 631 kfree(pool); 632 return ERR_PTR(ret); 633} 634 635static struct dma_pool *ttm_dma_find_pool(struct device *dev, 636 enum pool_type type) 637{ 638 struct dma_pool *pool, *tmp; 639 640 if (type == IS_UNDEFINED) 641 return NULL; 642 643 /* NB: We iterate on the 'struct dev' which has no spinlock, but 644 * it does have a kref which we have taken. The kref is taken during 645 * graphic driver loading - in the drm_pci_init it calls either 646 * pci_dev_get or pci_register_driver which both end up taking a kref 647 * on 'struct device'. 648 * 649 * On teardown, the graphic drivers end up quiescing the TTM (put_pages) 650 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice 651 * thing is at that point of time there are no pages associated with the 652 * driver so this function will not be called. 653 */ 654 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) 655 if (pool->type == type) 656 return pool; 657 return NULL; 658} 659 660/* 661 * Free pages the pages that failed to change the caching state. If there 662 * are pages that have changed their caching state already put them to the 663 * pool. 664 */ 665static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, 666 struct list_head *d_pages, 667 struct page **failed_pages, 668 unsigned cpages) 669{ 670 struct dma_page *d_page, *tmp; 671 struct page *p; 672 unsigned i = 0; 673 674 p = failed_pages[0]; 675 if (!p) 676 return; 677 /* Find the failed page. */ 678 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { 679 if (d_page->p != p) 680 continue; 681 /* .. and then progress over the full list. */ 682 list_del(&d_page->page_list); 683 __ttm_dma_free_page(pool, d_page); 684 if (++i < cpages) 685 p = failed_pages[i]; 686 else 687 break; 688 } 689 690} 691 692/* 693 * Allocate 'count' pages, and put 'need' number of them on the 694 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset. 695 * The full list of pages should also be on 'd_pages'. 696 * We return zero for success, and negative numbers as errors. 697 */ 698static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, 699 struct list_head *d_pages, 700 unsigned count) 701{ 702 struct page **caching_array; 703 struct dma_page *dma_p; 704 struct page *p; 705 int r = 0; 706 unsigned i, j, npages, cpages; 707 unsigned max_cpages = min(count, 708 (unsigned)(PAGE_SIZE/sizeof(struct page *))); 709 710 /* allocate array for page caching change */ 711 caching_array = kmalloc_array(max_cpages, sizeof(struct page *), 712 GFP_KERNEL); 713 714 if (!caching_array) { 715 pr_debug("%s: Unable to allocate table for new pages\n", 716 pool->dev_name); 717 return -ENOMEM; 718 } 719 720 if (count > 1) 721 pr_debug("%s: (%s:%d) Getting %d pages\n", 722 pool->dev_name, pool->name, current->pid, count); 723 724 for (i = 0, cpages = 0; i < count; ++i) { 725 dma_p = __ttm_dma_alloc_page(pool); 726 if (!dma_p) { 727 pr_debug("%s: Unable to get page %u\n", 728 pool->dev_name, i); 729 730 /* store already allocated pages in the pool after 731 * setting the caching state */ 732 if (cpages) { 733 r = ttm_set_pages_caching(pool, caching_array, 734 cpages); 735 if (r) 736 ttm_dma_handle_caching_state_failure( 737 pool, d_pages, caching_array, 738 cpages); 739 } 740 r = -ENOMEM; 741 goto out; 742 } 743 p = dma_p->p; 744 list_add(&dma_p->page_list, d_pages); 745 746#ifdef CONFIG_HIGHMEM 747 /* gfp flags of highmem page should never be dma32 so we 748 * we should be fine in such case 749 */ 750 if (PageHighMem(p)) 751 continue; 752#endif 753 754 npages = pool->size / PAGE_SIZE; 755 for (j = 0; j < npages; ++j) { 756 caching_array[cpages++] = p + j; 757 if (cpages == max_cpages) { 758 /* Note: Cannot hold the spinlock */ 759 r = ttm_set_pages_caching(pool, caching_array, 760 cpages); 761 if (r) { 762 ttm_dma_handle_caching_state_failure( 763 pool, d_pages, caching_array, 764 cpages); 765 goto out; 766 } 767 cpages = 0; 768 } 769 } 770 } 771 772 if (cpages) { 773 r = ttm_set_pages_caching(pool, caching_array, cpages); 774 if (r) 775 ttm_dma_handle_caching_state_failure(pool, d_pages, 776 caching_array, cpages); 777 } 778out: 779 kfree(caching_array); 780 return r; 781} 782 783/* 784 * @return count of pages still required to fulfill the request. 785 */ 786static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, 787 unsigned long *irq_flags) 788{ 789 unsigned count = _manager->options.small; 790 int r = pool->npages_free; 791 792 if (count > pool->npages_free) { 793 struct list_head d_pages; 794 795 INIT_LIST_HEAD(&d_pages); 796 797 spin_unlock_irqrestore(&pool->lock, *irq_flags); 798 799 /* Returns how many more are neccessary to fulfill the 800 * request. */ 801 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); 802 803 spin_lock_irqsave(&pool->lock, *irq_flags); 804 if (!r) { 805 /* Add the fresh to the end.. */ 806 list_splice(&d_pages, &pool->free_list); 807 ++pool->nrefills; 808 pool->npages_free += count; 809 r = count; 810 } else { 811 struct dma_page *d_page; 812 unsigned cpages = 0; 813 814 pr_debug("%s: Failed to fill %s pool (r:%d)!\n", 815 pool->dev_name, pool->name, r); 816 817 list_for_each_entry(d_page, &d_pages, page_list) { 818 cpages++; 819 } 820 list_splice_tail(&d_pages, &pool->free_list); 821 pool->npages_free += cpages; 822 r = cpages; 823 } 824 } 825 return r; 826} 827 828/* 829 * The populate list is actually a stack (not that is matters as TTM 830 * allocates one page at a time. 831 * return dma_page pointer if success, otherwise NULL. 832 */ 833static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, 834 struct ttm_dma_tt *ttm_dma, 835 unsigned index) 836{ 837 struct dma_page *d_page = NULL; 838 struct ttm_tt *ttm = &ttm_dma->ttm; 839 unsigned long irq_flags; 840 int count; 841 842 spin_lock_irqsave(&pool->lock, irq_flags); 843 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); 844 if (count) { 845 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); 846 ttm->pages[index] = d_page->p; 847 ttm_dma->dma_address[index] = d_page->dma; 848 list_move_tail(&d_page->page_list, &ttm_dma->pages_list); 849 pool->npages_in_use += 1; 850 pool->npages_free -= 1; 851 } 852 spin_unlock_irqrestore(&pool->lock, irq_flags); 853 return d_page; 854} 855 856static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) 857{ 858 struct ttm_tt *ttm = &ttm_dma->ttm; 859 gfp_t gfp_flags; 860 861 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) 862 gfp_flags = GFP_USER | GFP_DMA32; 863 else 864 gfp_flags = GFP_HIGHUSER; 865 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 866 gfp_flags |= __GFP_ZERO; 867 868 if (huge) { 869 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | 870 __GFP_KSWAPD_RECLAIM; 871 gfp_flags &= ~__GFP_MOVABLE; 872 gfp_flags &= ~__GFP_COMP; 873 } 874 875 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) 876 gfp_flags |= __GFP_RETRY_MAYFAIL; 877 878 return gfp_flags; 879} 880 881/* 882 * On success pages list will hold count number of correctly 883 * cached pages. On failure will hold the negative return value (-ENOMEM, etc). 884 */ 885int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 886 struct ttm_operation_ctx *ctx) 887{ 888 struct ttm_mem_global *mem_glob = &ttm_mem_glob; 889 struct ttm_tt *ttm = &ttm_dma->ttm; 890 unsigned long num_pages = ttm->num_pages; 891 struct dma_pool *pool; 892 struct dma_page *d_page; 893 enum pool_type type; 894 unsigned i; 895 int ret; 896 897 if (ttm_tt_is_populated(ttm)) 898 return 0; 899 900 if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) 901 return -ENOMEM; 902 903 INIT_LIST_HEAD(&ttm_dma->pages_list); 904 i = 0; 905 906 type = ttm_to_type(ttm->page_flags, ttm->caching_state); 907 908#ifdef CONFIG_TRANSPARENT_HUGEPAGE 909 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) 910 goto skip_huge; 911 912 pool = ttm_dma_find_pool(dev, type | IS_HUGE); 913 if (!pool) { 914 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true); 915 916 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); 917 if (IS_ERR_OR_NULL(pool)) 918 goto skip_huge; 919 } 920 921 while (num_pages >= HPAGE_PMD_NR) { 922 unsigned j; 923 924 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); 925 if (!d_page) 926 break; 927 928 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 929 pool->size, ctx); 930 if (unlikely(ret != 0)) { 931 ttm_dma_unpopulate(ttm_dma, dev); 932 return -ENOMEM; 933 } 934 935 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; 936 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { 937 ttm->pages[j] = ttm->pages[j - 1] + 1; 938 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] + 939 PAGE_SIZE; 940 } 941 942 i += HPAGE_PMD_NR; 943 num_pages -= HPAGE_PMD_NR; 944 } 945 946skip_huge: 947#endif 948 949 pool = ttm_dma_find_pool(dev, type); 950 if (!pool) { 951 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false); 952 953 pool = ttm_dma_pool_init(dev, gfp_flags, type); 954 if (IS_ERR_OR_NULL(pool)) 955 return -ENOMEM; 956 } 957 958 while (num_pages) { 959 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); 960 if (!d_page) { 961 ttm_dma_unpopulate(ttm_dma, dev); 962 return -ENOMEM; 963 } 964 965 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 966 pool->size, ctx); 967 if (unlikely(ret != 0)) { 968 ttm_dma_unpopulate(ttm_dma, dev); 969 return -ENOMEM; 970 } 971 972 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; 973 ++i; 974 --num_pages; 975 } 976 977 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 978 ret = ttm_tt_swapin(ttm); 979 if (unlikely(ret != 0)) { 980 ttm_dma_unpopulate(ttm_dma, dev); 981 return ret; 982 } 983 } 984 985 ttm_tt_set_populated(ttm); 986 return 0; 987} 988EXPORT_SYMBOL_GPL(ttm_dma_populate); 989 990/* Put all pages in pages list to correct pool to wait for reuse */ 991void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) 992{ 993 struct ttm_mem_global *mem_glob = &ttm_mem_glob; 994 struct ttm_tt *ttm = &ttm_dma->ttm; 995 struct dma_pool *pool; 996 struct dma_page *d_page, *next; 997 enum pool_type type; 998 bool is_cached = false; 999 unsigned count, i, npages = 0; 1000 unsigned long irq_flags; 1001 1002 type = ttm_to_type(ttm->page_flags, ttm->caching_state); 1003 1004#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1005 pool = ttm_dma_find_pool(dev, type | IS_HUGE); 1006 if (pool) { 1007 count = 0; 1008 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, 1009 page_list) { 1010 if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) 1011 continue; 1012 1013 count++; 1014 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { 1015 ttm_mem_global_free_page(mem_glob, d_page->p, 1016 pool->size); 1017 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; 1018 } 1019 ttm_dma_page_put(pool, d_page); 1020 } 1021 1022 spin_lock_irqsave(&pool->lock, irq_flags); 1023 pool->npages_in_use -= count; 1024 pool->nfrees += count; 1025 spin_unlock_irqrestore(&pool->lock, irq_flags); 1026 } 1027#endif 1028 1029 pool = ttm_dma_find_pool(dev, type); 1030 if (!pool) 1031 return; 1032 1033 is_cached = (ttm_dma_find_pool(pool->dev, 1034 ttm_to_type(ttm->page_flags, tt_cached)) == pool); 1035 1036 /* make sure pages array match list and count number of pages */ 1037 count = 0; 1038 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, 1039 page_list) { 1040 ttm->pages[count] = d_page->p; 1041 count++; 1042 1043 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { 1044 ttm_mem_global_free_page(mem_glob, d_page->p, 1045 pool->size); 1046 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; 1047 } 1048 1049 if (is_cached) 1050 ttm_dma_page_put(pool, d_page); 1051 } 1052 1053 spin_lock_irqsave(&pool->lock, irq_flags); 1054 pool->npages_in_use -= count; 1055 if (is_cached) { 1056 pool->nfrees += count; 1057 } else { 1058 pool->npages_free += count; 1059 list_splice(&ttm_dma->pages_list, &pool->free_list); 1060 /* 1061 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages 1062 * to free in order to minimize calls to set_memory_wb(). 1063 */ 1064 if (pool->npages_free >= (_manager->options.max_size + 1065 NUM_PAGES_TO_ALLOC)) 1066 npages = pool->npages_free - _manager->options.max_size; 1067 } 1068 spin_unlock_irqrestore(&pool->lock, irq_flags); 1069 1070 INIT_LIST_HEAD(&ttm_dma->pages_list); 1071 for (i = 0; i < ttm->num_pages; i++) { 1072 ttm->pages[i] = NULL; 1073 ttm_dma->dma_address[i] = 0; 1074 } 1075 1076 /* shrink pool if necessary (only on !is_cached pools)*/ 1077 if (npages) 1078 ttm_dma_page_pool_free(pool, npages, false); 1079 ttm_tt_set_unpopulated(ttm); 1080} 1081EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); 1082 1083/** 1084 * Callback for mm to request pool to reduce number of page held. 1085 * 1086 * XXX: (dchinner) Deadlock warning! 1087 * 1088 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool 1089 * shrinkers 1090 */ 1091static unsigned long 1092ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 1093{ 1094 static unsigned start_pool; 1095 unsigned idx = 0; 1096 unsigned pool_offset; 1097 unsigned shrink_pages = sc->nr_to_scan; 1098 struct device_pools *p; 1099 unsigned long freed = 0; 1100 1101 if (list_empty(&_manager->pools)) 1102 return SHRINK_STOP; 1103 1104 if (!mutex_trylock(&_manager->lock)) 1105 return SHRINK_STOP; 1106 if (!_manager->npools) 1107 goto out; 1108 pool_offset = ++start_pool % _manager->npools; 1109 list_for_each_entry(p, &_manager->pools, pools) { 1110 unsigned nr_free; 1111 1112 if (!p->dev) 1113 continue; 1114 if (shrink_pages == 0) 1115 break; 1116 /* Do it in round-robin fashion. */ 1117 if (++idx < pool_offset) 1118 continue; 1119 nr_free = shrink_pages; 1120 /* OK to use static buffer since global mutex is held. */ 1121 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); 1122 freed += nr_free - shrink_pages; 1123 1124 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", 1125 p->pool->dev_name, p->pool->name, current->pid, 1126 nr_free, shrink_pages); 1127 } 1128out: 1129 mutex_unlock(&_manager->lock); 1130 return freed; 1131} 1132 1133static unsigned long 1134ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1135{ 1136 struct device_pools *p; 1137 unsigned long count = 0; 1138 1139 if (!mutex_trylock(&_manager->lock)) 1140 return 0; 1141 list_for_each_entry(p, &_manager->pools, pools) 1142 count += p->pool->npages_free; 1143 mutex_unlock(&_manager->lock); 1144 return count; 1145} 1146 1147static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) 1148{ 1149 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; 1150 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; 1151 manager->mm_shrink.seeks = 1; 1152 return register_shrinker(&manager->mm_shrink); 1153} 1154 1155static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 1156{ 1157 unregister_shrinker(&manager->mm_shrink); 1158} 1159 1160int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 1161{ 1162 int ret; 1163 1164 WARN_ON(_manager); 1165 1166 pr_info("Initializing DMA pool allocator\n"); 1167 1168 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 1169 if (!_manager) 1170 return -ENOMEM; 1171 1172 mutex_init(&_manager->lock); 1173 INIT_LIST_HEAD(&_manager->pools); 1174 1175 _manager->options.max_size = max_pages; 1176 _manager->options.small = SMALL_ALLOCATION; 1177 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 1178 1179 /* This takes care of auto-freeing the _manager */ 1180 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, 1181 &glob->kobj, "dma_pool"); 1182 if (unlikely(ret != 0)) 1183 goto error; 1184 1185 ret = ttm_dma_pool_mm_shrink_init(_manager); 1186 if (unlikely(ret != 0)) 1187 goto error; 1188 return 0; 1189 1190error: 1191 kobject_put(&_manager->kobj); 1192 _manager = NULL; 1193 return ret; 1194} 1195 1196void ttm_dma_page_alloc_fini(void) 1197{ 1198 struct device_pools *p, *t; 1199 1200 pr_info("Finalizing DMA pool allocator\n"); 1201 ttm_dma_pool_mm_shrink_fini(_manager); 1202 1203 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { 1204 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, 1205 current->pid); 1206 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release, 1207 ttm_dma_pool_match, p->pool)); 1208 ttm_dma_free_pool(p->dev, p->pool->type); 1209 } 1210 kobject_put(&_manager->kobj); 1211 _manager = NULL; 1212} 1213 1214int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) 1215{ 1216 struct device_pools *p; 1217 struct dma_pool *pool = NULL; 1218 1219 if (!_manager) { 1220 seq_printf(m, "No pool allocator running.\n"); 1221 return 0; 1222 } 1223 seq_printf(m, " pool refills pages freed inuse available name\n"); 1224 mutex_lock(&_manager->lock); 1225 list_for_each_entry(p, &_manager->pools, pools) { 1226 struct device *dev = p->dev; 1227 if (!dev) 1228 continue; 1229 pool = p->pool; 1230 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n", 1231 pool->name, pool->nrefills, 1232 pool->nfrees, pool->npages_in_use, 1233 pool->npages_free, 1234 pool->dev_name); 1235 } 1236 mutex_unlock(&_manager->lock); 1237 return 0; 1238} 1239EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);