Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.4-rc3 1242 lines 33 kB view raw
1/* 2 * Copyright 2011 (c) Oracle Corp. 3 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 24 */ 25 26/* 27 * A simple DMA pool losely based on dmapool.c. It has certain advantages 28 * over the DMA pools: 29 * - Pool collects resently freed pages for reuse (and hooks up to 30 * the shrinker). 31 * - Tracks currently in use pages 32 * - Tracks whether the page is UC, WB or cached (and reverts to WB 33 * when freed). 34 */ 35 36#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) 37#define pr_fmt(fmt) "[TTM] " fmt 38 39#include <linux/dma-mapping.h> 40#include <linux/list.h> 41#include <linux/seq_file.h> /* for seq_printf */ 42#include <linux/slab.h> 43#include <linux/spinlock.h> 44#include <linux/highmem.h> 45#include <linux/mm_types.h> 46#include <linux/module.h> 47#include <linux/mm.h> 48#include <linux/atomic.h> 49#include <linux/device.h> 50#include <linux/kthread.h> 51#include <drm/ttm/ttm_bo_driver.h> 52#include <drm/ttm/ttm_page_alloc.h> 53#include <drm/ttm/ttm_set_memory.h> 54 55#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 56#define SMALL_ALLOCATION 4 57#define FREE_ALL_PAGES (~0U) 58#define VADDR_FLAG_HUGE_POOL 1UL 59#define VADDR_FLAG_UPDATED_COUNT 2UL 60 61enum pool_type { 62 IS_UNDEFINED = 0, 63 IS_WC = 1 << 1, 64 IS_UC = 1 << 2, 65 IS_CACHED = 1 << 3, 66 IS_DMA32 = 1 << 4, 67 IS_HUGE = 1 << 5 68}; 69 70/* 71 * The pool structure. There are up to nine pools: 72 * - generic (not restricted to DMA32): 73 * - write combined, uncached, cached. 74 * - dma32 (up to 2^32 - so up 4GB): 75 * - write combined, uncached, cached. 76 * - huge (not restricted to DMA32): 77 * - write combined, uncached, cached. 78 * for each 'struct device'. The 'cached' is for pages that are actively used. 79 * The other ones can be shrunk by the shrinker API if neccessary. 80 * @pools: The 'struct device->dma_pools' link. 81 * @type: Type of the pool 82 * @lock: Protects the free_list from concurrnet access. Must be 83 * used with irqsave/irqrestore variants because pool allocator maybe called 84 * from delayed work. 85 * @free_list: Pool of pages that are free to be used. No order requirements. 86 * @dev: The device that is associated with these pools. 87 * @size: Size used during DMA allocation. 88 * @npages_free: Count of available pages for re-use. 89 * @npages_in_use: Count of pages that are in use. 90 * @nfrees: Stats when pool is shrinking. 91 * @nrefills: Stats when the pool is grown. 92 * @gfp_flags: Flags to pass for alloc_page. 93 * @name: Name of the pool. 94 * @dev_name: Name derieved from dev - similar to how dev_info works. 95 * Used during shutdown as the dev_info during release is unavailable. 96 */ 97struct dma_pool { 98 struct list_head pools; /* The 'struct device->dma_pools link */ 99 enum pool_type type; 100 spinlock_t lock; 101 struct list_head free_list; 102 struct device *dev; 103 unsigned size; 104 unsigned npages_free; 105 unsigned npages_in_use; 106 unsigned long nfrees; /* Stats when shrunk. */ 107 unsigned long nrefills; /* Stats when grown. */ 108 gfp_t gfp_flags; 109 char name[13]; /* "cached dma32" */ 110 char dev_name[64]; /* Constructed from dev */ 111}; 112 113/* 114 * The accounting page keeping track of the allocated page along with 115 * the DMA address. 116 * @page_list: The link to the 'page_list' in 'struct dma_pool'. 117 * @vaddr: The virtual address of the page and a flag if the page belongs to a 118 * huge pool 119 * @dma: The bus address of the page. If the page is not allocated 120 * via the DMA API, it will be -1. 121 */ 122struct dma_page { 123 struct list_head page_list; 124 unsigned long vaddr; 125 struct page *p; 126 dma_addr_t dma; 127}; 128 129/* 130 * Limits for the pool. They are handled without locks because only place where 131 * they may change is in sysfs store. They won't have immediate effect anyway 132 * so forcing serialization to access them is pointless. 133 */ 134 135struct ttm_pool_opts { 136 unsigned alloc_size; 137 unsigned max_size; 138 unsigned small; 139}; 140 141/* 142 * Contains the list of all of the 'struct device' and their corresponding 143 * DMA pools. Guarded by _mutex->lock. 144 * @pools: The link to 'struct ttm_pool_manager->pools' 145 * @dev: The 'struct device' associated with the 'pool' 146 * @pool: The 'struct dma_pool' associated with the 'dev' 147 */ 148struct device_pools { 149 struct list_head pools; 150 struct device *dev; 151 struct dma_pool *pool; 152}; 153 154/* 155 * struct ttm_pool_manager - Holds memory pools for fast allocation 156 * 157 * @lock: Lock used when adding/removing from pools 158 * @pools: List of 'struct device' and 'struct dma_pool' tuples. 159 * @options: Limits for the pool. 160 * @npools: Total amount of pools in existence. 161 * @shrinker: The structure used by [un|]register_shrinker 162 */ 163struct ttm_pool_manager { 164 struct mutex lock; 165 struct list_head pools; 166 struct ttm_pool_opts options; 167 unsigned npools; 168 struct shrinker mm_shrink; 169 struct kobject kobj; 170}; 171 172static struct ttm_pool_manager *_manager; 173 174static struct attribute ttm_page_pool_max = { 175 .name = "pool_max_size", 176 .mode = S_IRUGO | S_IWUSR 177}; 178static struct attribute ttm_page_pool_small = { 179 .name = "pool_small_allocation", 180 .mode = S_IRUGO | S_IWUSR 181}; 182static struct attribute ttm_page_pool_alloc_size = { 183 .name = "pool_allocation_size", 184 .mode = S_IRUGO | S_IWUSR 185}; 186 187static struct attribute *ttm_pool_attrs[] = { 188 &ttm_page_pool_max, 189 &ttm_page_pool_small, 190 &ttm_page_pool_alloc_size, 191 NULL 192}; 193 194static void ttm_pool_kobj_release(struct kobject *kobj) 195{ 196 struct ttm_pool_manager *m = 197 container_of(kobj, struct ttm_pool_manager, kobj); 198 kfree(m); 199} 200 201static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, 202 const char *buffer, size_t size) 203{ 204 struct ttm_pool_manager *m = 205 container_of(kobj, struct ttm_pool_manager, kobj); 206 int chars; 207 unsigned val; 208 209 chars = sscanf(buffer, "%u", &val); 210 if (chars == 0) 211 return size; 212 213 /* Convert kb to number of pages */ 214 val = val / (PAGE_SIZE >> 10); 215 216 if (attr == &ttm_page_pool_max) { 217 m->options.max_size = val; 218 } else if (attr == &ttm_page_pool_small) { 219 m->options.small = val; 220 } else if (attr == &ttm_page_pool_alloc_size) { 221 if (val > NUM_PAGES_TO_ALLOC*8) { 222 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", 223 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 224 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 225 return size; 226 } else if (val > NUM_PAGES_TO_ALLOC) { 227 pr_warn("Setting allocation size to larger than %lu is not recommended\n", 228 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 229 } 230 m->options.alloc_size = val; 231 } 232 233 return size; 234} 235 236static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, 237 char *buffer) 238{ 239 struct ttm_pool_manager *m = 240 container_of(kobj, struct ttm_pool_manager, kobj); 241 unsigned val = 0; 242 243 if (attr == &ttm_page_pool_max) 244 val = m->options.max_size; 245 else if (attr == &ttm_page_pool_small) 246 val = m->options.small; 247 else if (attr == &ttm_page_pool_alloc_size) 248 val = m->options.alloc_size; 249 250 val = val * (PAGE_SIZE >> 10); 251 252 return snprintf(buffer, PAGE_SIZE, "%u\n", val); 253} 254 255static const struct sysfs_ops ttm_pool_sysfs_ops = { 256 .show = &ttm_pool_show, 257 .store = &ttm_pool_store, 258}; 259 260static struct kobj_type ttm_pool_kobj_type = { 261 .release = &ttm_pool_kobj_release, 262 .sysfs_ops = &ttm_pool_sysfs_ops, 263 .default_attrs = ttm_pool_attrs, 264}; 265 266static int ttm_set_pages_caching(struct dma_pool *pool, 267 struct page **pages, unsigned cpages) 268{ 269 int r = 0; 270 /* Set page caching */ 271 if (pool->type & IS_UC) { 272 r = ttm_set_pages_array_uc(pages, cpages); 273 if (r) 274 pr_err("%s: Failed to set %d pages to uc!\n", 275 pool->dev_name, cpages); 276 } 277 if (pool->type & IS_WC) { 278 r = ttm_set_pages_array_wc(pages, cpages); 279 if (r) 280 pr_err("%s: Failed to set %d pages to wc!\n", 281 pool->dev_name, cpages); 282 } 283 return r; 284} 285 286static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) 287{ 288 unsigned long attrs = 0; 289 dma_addr_t dma = d_page->dma; 290 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; 291 if (pool->type & IS_HUGE) 292 attrs = DMA_ATTR_NO_WARN; 293 294 dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); 295 296 kfree(d_page); 297 d_page = NULL; 298} 299static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) 300{ 301 struct dma_page *d_page; 302 unsigned long attrs = 0; 303 void *vaddr; 304 305 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL); 306 if (!d_page) 307 return NULL; 308 309 if (pool->type & IS_HUGE) 310 attrs = DMA_ATTR_NO_WARN; 311 312 vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma, 313 pool->gfp_flags, attrs); 314 if (vaddr) { 315 if (is_vmalloc_addr(vaddr)) 316 d_page->p = vmalloc_to_page(vaddr); 317 else 318 d_page->p = virt_to_page(vaddr); 319 d_page->vaddr = (unsigned long)vaddr; 320 if (pool->type & IS_HUGE) 321 d_page->vaddr |= VADDR_FLAG_HUGE_POOL; 322 } else { 323 kfree(d_page); 324 d_page = NULL; 325 } 326 return d_page; 327} 328static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate) 329{ 330 enum pool_type type = IS_UNDEFINED; 331 332 if (flags & TTM_PAGE_FLAG_DMA32) 333 type |= IS_DMA32; 334 if (cstate == tt_cached) 335 type |= IS_CACHED; 336 else if (cstate == tt_uncached) 337 type |= IS_UC; 338 else 339 type |= IS_WC; 340 341 return type; 342} 343 344static void ttm_pool_update_free_locked(struct dma_pool *pool, 345 unsigned freed_pages) 346{ 347 pool->npages_free -= freed_pages; 348 pool->nfrees += freed_pages; 349 350} 351 352/* set memory back to wb and free the pages. */ 353static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) 354{ 355 struct page *page = d_page->p; 356 unsigned num_pages; 357 358 /* Don't set WB on WB page pool. */ 359 if (!(pool->type & IS_CACHED)) { 360 num_pages = pool->size / PAGE_SIZE; 361 if (ttm_set_pages_wb(page, num_pages)) 362 pr_err("%s: Failed to set %d pages to wb!\n", 363 pool->dev_name, num_pages); 364 } 365 366 list_del(&d_page->page_list); 367 __ttm_dma_free_page(pool, d_page); 368} 369 370static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, 371 struct page *pages[], unsigned npages) 372{ 373 struct dma_page *d_page, *tmp; 374 375 if (pool->type & IS_HUGE) { 376 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) 377 ttm_dma_page_put(pool, d_page); 378 379 return; 380 } 381 382 /* Don't set WB on WB page pool. */ 383 if (npages && !(pool->type & IS_CACHED) && 384 ttm_set_pages_array_wb(pages, npages)) 385 pr_err("%s: Failed to set %d pages to wb!\n", 386 pool->dev_name, npages); 387 388 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { 389 list_del(&d_page->page_list); 390 __ttm_dma_free_page(pool, d_page); 391 } 392} 393 394/* 395 * Free pages from pool. 396 * 397 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC 398 * number of pages in one go. 399 * 400 * @pool: to free the pages from 401 * @nr_free: If set to true will free all pages in pool 402 * @use_static: Safe to use static buffer 403 **/ 404static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, 405 bool use_static) 406{ 407 static struct page *static_buf[NUM_PAGES_TO_ALLOC]; 408 unsigned long irq_flags; 409 struct dma_page *dma_p, *tmp; 410 struct page **pages_to_free; 411 struct list_head d_pages; 412 unsigned freed_pages = 0, 413 npages_to_free = nr_free; 414 415 if (NUM_PAGES_TO_ALLOC < nr_free) 416 npages_to_free = NUM_PAGES_TO_ALLOC; 417 418 if (use_static) 419 pages_to_free = static_buf; 420 else 421 pages_to_free = kmalloc_array(npages_to_free, 422 sizeof(struct page *), 423 GFP_KERNEL); 424 425 if (!pages_to_free) { 426 pr_debug("%s: Failed to allocate memory for pool free operation\n", 427 pool->dev_name); 428 return 0; 429 } 430 INIT_LIST_HEAD(&d_pages); 431restart: 432 spin_lock_irqsave(&pool->lock, irq_flags); 433 434 /* We picking the oldest ones off the list */ 435 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, 436 page_list) { 437 if (freed_pages >= npages_to_free) 438 break; 439 440 /* Move the dma_page from one list to another. */ 441 list_move(&dma_p->page_list, &d_pages); 442 443 pages_to_free[freed_pages++] = dma_p->p; 444 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ 445 if (freed_pages >= NUM_PAGES_TO_ALLOC) { 446 447 ttm_pool_update_free_locked(pool, freed_pages); 448 /** 449 * Because changing page caching is costly 450 * we unlock the pool to prevent stalling. 451 */ 452 spin_unlock_irqrestore(&pool->lock, irq_flags); 453 454 ttm_dma_pages_put(pool, &d_pages, pages_to_free, 455 freed_pages); 456 457 INIT_LIST_HEAD(&d_pages); 458 459 if (likely(nr_free != FREE_ALL_PAGES)) 460 nr_free -= freed_pages; 461 462 if (NUM_PAGES_TO_ALLOC >= nr_free) 463 npages_to_free = nr_free; 464 else 465 npages_to_free = NUM_PAGES_TO_ALLOC; 466 467 freed_pages = 0; 468 469 /* free all so restart the processing */ 470 if (nr_free) 471 goto restart; 472 473 /* Not allowed to fall through or break because 474 * following context is inside spinlock while we are 475 * outside here. 476 */ 477 goto out; 478 479 } 480 } 481 482 /* remove range of pages from the pool */ 483 if (freed_pages) { 484 ttm_pool_update_free_locked(pool, freed_pages); 485 nr_free -= freed_pages; 486 } 487 488 spin_unlock_irqrestore(&pool->lock, irq_flags); 489 490 if (freed_pages) 491 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); 492out: 493 if (pages_to_free != static_buf) 494 kfree(pages_to_free); 495 return nr_free; 496} 497 498static void ttm_dma_free_pool(struct device *dev, enum pool_type type) 499{ 500 struct device_pools *p; 501 struct dma_pool *pool; 502 503 if (!dev) 504 return; 505 506 mutex_lock(&_manager->lock); 507 list_for_each_entry_reverse(p, &_manager->pools, pools) { 508 if (p->dev != dev) 509 continue; 510 pool = p->pool; 511 if (pool->type != type) 512 continue; 513 514 list_del(&p->pools); 515 kfree(p); 516 _manager->npools--; 517 break; 518 } 519 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { 520 if (pool->type != type) 521 continue; 522 /* Takes a spinlock.. */ 523 /* OK to use static buffer since global mutex is held. */ 524 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); 525 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); 526 /* This code path is called after _all_ references to the 527 * struct device has been dropped - so nobody should be 528 * touching it. In case somebody is trying to _add_ we are 529 * guarded by the mutex. */ 530 list_del(&pool->pools); 531 kfree(pool); 532 break; 533 } 534 mutex_unlock(&_manager->lock); 535} 536 537/* 538 * On free-ing of the 'struct device' this deconstructor is run. 539 * Albeit the pool might have already been freed earlier. 540 */ 541static void ttm_dma_pool_release(struct device *dev, void *res) 542{ 543 struct dma_pool *pool = *(struct dma_pool **)res; 544 545 if (pool) 546 ttm_dma_free_pool(dev, pool->type); 547} 548 549static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data) 550{ 551 return *(struct dma_pool **)res == match_data; 552} 553 554static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, 555 enum pool_type type) 556{ 557 const char *n[] = {"wc", "uc", "cached", " dma32", "huge"}; 558 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE}; 559 struct device_pools *sec_pool = NULL; 560 struct dma_pool *pool = NULL, **ptr; 561 unsigned i; 562 int ret = -ENODEV; 563 char *p; 564 565 if (!dev) 566 return NULL; 567 568 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL); 569 if (!ptr) 570 return NULL; 571 572 ret = -ENOMEM; 573 574 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, 575 dev_to_node(dev)); 576 if (!pool) 577 goto err_mem; 578 579 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL, 580 dev_to_node(dev)); 581 if (!sec_pool) 582 goto err_mem; 583 584 INIT_LIST_HEAD(&sec_pool->pools); 585 sec_pool->dev = dev; 586 sec_pool->pool = pool; 587 588 INIT_LIST_HEAD(&pool->free_list); 589 INIT_LIST_HEAD(&pool->pools); 590 spin_lock_init(&pool->lock); 591 pool->dev = dev; 592 pool->npages_free = pool->npages_in_use = 0; 593 pool->nfrees = 0; 594 pool->gfp_flags = flags; 595 if (type & IS_HUGE) 596#ifdef CONFIG_TRANSPARENT_HUGEPAGE 597 pool->size = HPAGE_PMD_SIZE; 598#else 599 BUG(); 600#endif 601 else 602 pool->size = PAGE_SIZE; 603 pool->type = type; 604 pool->nrefills = 0; 605 p = pool->name; 606 for (i = 0; i < ARRAY_SIZE(t); i++) { 607 if (type & t[i]) { 608 p += snprintf(p, sizeof(pool->name) - (p - pool->name), 609 "%s", n[i]); 610 } 611 } 612 *p = 0; 613 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called 614 * - the kobj->name has already been deallocated.*/ 615 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", 616 dev_driver_string(dev), dev_name(dev)); 617 mutex_lock(&_manager->lock); 618 /* You can get the dma_pool from either the global: */ 619 list_add(&sec_pool->pools, &_manager->pools); 620 _manager->npools++; 621 /* or from 'struct device': */ 622 list_add(&pool->pools, &dev->dma_pools); 623 mutex_unlock(&_manager->lock); 624 625 *ptr = pool; 626 devres_add(dev, ptr); 627 628 return pool; 629err_mem: 630 devres_free(ptr); 631 kfree(sec_pool); 632 kfree(pool); 633 return ERR_PTR(ret); 634} 635 636static struct dma_pool *ttm_dma_find_pool(struct device *dev, 637 enum pool_type type) 638{ 639 struct dma_pool *pool, *tmp; 640 641 if (type == IS_UNDEFINED) 642 return NULL; 643 644 /* NB: We iterate on the 'struct dev' which has no spinlock, but 645 * it does have a kref which we have taken. The kref is taken during 646 * graphic driver loading - in the drm_pci_init it calls either 647 * pci_dev_get or pci_register_driver which both end up taking a kref 648 * on 'struct device'. 649 * 650 * On teardown, the graphic drivers end up quiescing the TTM (put_pages) 651 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice 652 * thing is at that point of time there are no pages associated with the 653 * driver so this function will not be called. 654 */ 655 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) 656 if (pool->type == type) 657 return pool; 658 return NULL; 659} 660 661/* 662 * Free pages the pages that failed to change the caching state. If there 663 * are pages that have changed their caching state already put them to the 664 * pool. 665 */ 666static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, 667 struct list_head *d_pages, 668 struct page **failed_pages, 669 unsigned cpages) 670{ 671 struct dma_page *d_page, *tmp; 672 struct page *p; 673 unsigned i = 0; 674 675 p = failed_pages[0]; 676 if (!p) 677 return; 678 /* Find the failed page. */ 679 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { 680 if (d_page->p != p) 681 continue; 682 /* .. and then progress over the full list. */ 683 list_del(&d_page->page_list); 684 __ttm_dma_free_page(pool, d_page); 685 if (++i < cpages) 686 p = failed_pages[i]; 687 else 688 break; 689 } 690 691} 692 693/* 694 * Allocate 'count' pages, and put 'need' number of them on the 695 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset. 696 * The full list of pages should also be on 'd_pages'. 697 * We return zero for success, and negative numbers as errors. 698 */ 699static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, 700 struct list_head *d_pages, 701 unsigned count) 702{ 703 struct page **caching_array; 704 struct dma_page *dma_p; 705 struct page *p; 706 int r = 0; 707 unsigned i, j, npages, cpages; 708 unsigned max_cpages = min(count, 709 (unsigned)(PAGE_SIZE/sizeof(struct page *))); 710 711 /* allocate array for page caching change */ 712 caching_array = kmalloc_array(max_cpages, sizeof(struct page *), 713 GFP_KERNEL); 714 715 if (!caching_array) { 716 pr_debug("%s: Unable to allocate table for new pages\n", 717 pool->dev_name); 718 return -ENOMEM; 719 } 720 721 if (count > 1) 722 pr_debug("%s: (%s:%d) Getting %d pages\n", 723 pool->dev_name, pool->name, current->pid, count); 724 725 for (i = 0, cpages = 0; i < count; ++i) { 726 dma_p = __ttm_dma_alloc_page(pool); 727 if (!dma_p) { 728 pr_debug("%s: Unable to get page %u\n", 729 pool->dev_name, i); 730 731 /* store already allocated pages in the pool after 732 * setting the caching state */ 733 if (cpages) { 734 r = ttm_set_pages_caching(pool, caching_array, 735 cpages); 736 if (r) 737 ttm_dma_handle_caching_state_failure( 738 pool, d_pages, caching_array, 739 cpages); 740 } 741 r = -ENOMEM; 742 goto out; 743 } 744 p = dma_p->p; 745 list_add(&dma_p->page_list, d_pages); 746 747#ifdef CONFIG_HIGHMEM 748 /* gfp flags of highmem page should never be dma32 so we 749 * we should be fine in such case 750 */ 751 if (PageHighMem(p)) 752 continue; 753#endif 754 755 npages = pool->size / PAGE_SIZE; 756 for (j = 0; j < npages; ++j) { 757 caching_array[cpages++] = p + j; 758 if (cpages == max_cpages) { 759 /* Note: Cannot hold the spinlock */ 760 r = ttm_set_pages_caching(pool, caching_array, 761 cpages); 762 if (r) { 763 ttm_dma_handle_caching_state_failure( 764 pool, d_pages, caching_array, 765 cpages); 766 goto out; 767 } 768 cpages = 0; 769 } 770 } 771 } 772 773 if (cpages) { 774 r = ttm_set_pages_caching(pool, caching_array, cpages); 775 if (r) 776 ttm_dma_handle_caching_state_failure(pool, d_pages, 777 caching_array, cpages); 778 } 779out: 780 kfree(caching_array); 781 return r; 782} 783 784/* 785 * @return count of pages still required to fulfill the request. 786 */ 787static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, 788 unsigned long *irq_flags) 789{ 790 unsigned count = _manager->options.small; 791 int r = pool->npages_free; 792 793 if (count > pool->npages_free) { 794 struct list_head d_pages; 795 796 INIT_LIST_HEAD(&d_pages); 797 798 spin_unlock_irqrestore(&pool->lock, *irq_flags); 799 800 /* Returns how many more are neccessary to fulfill the 801 * request. */ 802 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); 803 804 spin_lock_irqsave(&pool->lock, *irq_flags); 805 if (!r) { 806 /* Add the fresh to the end.. */ 807 list_splice(&d_pages, &pool->free_list); 808 ++pool->nrefills; 809 pool->npages_free += count; 810 r = count; 811 } else { 812 struct dma_page *d_page; 813 unsigned cpages = 0; 814 815 pr_debug("%s: Failed to fill %s pool (r:%d)!\n", 816 pool->dev_name, pool->name, r); 817 818 list_for_each_entry(d_page, &d_pages, page_list) { 819 cpages++; 820 } 821 list_splice_tail(&d_pages, &pool->free_list); 822 pool->npages_free += cpages; 823 r = cpages; 824 } 825 } 826 return r; 827} 828 829/* 830 * The populate list is actually a stack (not that is matters as TTM 831 * allocates one page at a time. 832 * return dma_page pointer if success, otherwise NULL. 833 */ 834static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, 835 struct ttm_dma_tt *ttm_dma, 836 unsigned index) 837{ 838 struct dma_page *d_page = NULL; 839 struct ttm_tt *ttm = &ttm_dma->ttm; 840 unsigned long irq_flags; 841 int count; 842 843 spin_lock_irqsave(&pool->lock, irq_flags); 844 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); 845 if (count) { 846 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); 847 ttm->pages[index] = d_page->p; 848 ttm_dma->dma_address[index] = d_page->dma; 849 list_move_tail(&d_page->page_list, &ttm_dma->pages_list); 850 pool->npages_in_use += 1; 851 pool->npages_free -= 1; 852 } 853 spin_unlock_irqrestore(&pool->lock, irq_flags); 854 return d_page; 855} 856 857static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) 858{ 859 struct ttm_tt *ttm = &ttm_dma->ttm; 860 gfp_t gfp_flags; 861 862 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) 863 gfp_flags = GFP_USER | GFP_DMA32; 864 else 865 gfp_flags = GFP_HIGHUSER; 866 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 867 gfp_flags |= __GFP_ZERO; 868 869 if (huge) { 870 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | 871 __GFP_KSWAPD_RECLAIM; 872 gfp_flags &= ~__GFP_MOVABLE; 873 gfp_flags &= ~__GFP_COMP; 874 } 875 876 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) 877 gfp_flags |= __GFP_RETRY_MAYFAIL; 878 879 return gfp_flags; 880} 881 882/* 883 * On success pages list will hold count number of correctly 884 * cached pages. On failure will hold the negative return value (-ENOMEM, etc). 885 */ 886int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 887 struct ttm_operation_ctx *ctx) 888{ 889 struct ttm_tt *ttm = &ttm_dma->ttm; 890 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; 891 unsigned long num_pages = ttm->num_pages; 892 struct dma_pool *pool; 893 struct dma_page *d_page; 894 enum pool_type type; 895 unsigned i; 896 int ret; 897 898 if (ttm->state != tt_unpopulated) 899 return 0; 900 901 if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) 902 return -ENOMEM; 903 904 INIT_LIST_HEAD(&ttm_dma->pages_list); 905 i = 0; 906 907 type = ttm_to_type(ttm->page_flags, ttm->caching_state); 908 909#ifdef CONFIG_TRANSPARENT_HUGEPAGE 910 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) 911 goto skip_huge; 912 913 pool = ttm_dma_find_pool(dev, type | IS_HUGE); 914 if (!pool) { 915 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true); 916 917 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); 918 if (IS_ERR_OR_NULL(pool)) 919 goto skip_huge; 920 } 921 922 while (num_pages >= HPAGE_PMD_NR) { 923 unsigned j; 924 925 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); 926 if (!d_page) 927 break; 928 929 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 930 pool->size, ctx); 931 if (unlikely(ret != 0)) { 932 ttm_dma_unpopulate(ttm_dma, dev); 933 return -ENOMEM; 934 } 935 936 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; 937 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { 938 ttm->pages[j] = ttm->pages[j - 1] + 1; 939 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] + 940 PAGE_SIZE; 941 } 942 943 i += HPAGE_PMD_NR; 944 num_pages -= HPAGE_PMD_NR; 945 } 946 947skip_huge: 948#endif 949 950 pool = ttm_dma_find_pool(dev, type); 951 if (!pool) { 952 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false); 953 954 pool = ttm_dma_pool_init(dev, gfp_flags, type); 955 if (IS_ERR_OR_NULL(pool)) 956 return -ENOMEM; 957 } 958 959 while (num_pages) { 960 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); 961 if (!d_page) { 962 ttm_dma_unpopulate(ttm_dma, dev); 963 return -ENOMEM; 964 } 965 966 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 967 pool->size, ctx); 968 if (unlikely(ret != 0)) { 969 ttm_dma_unpopulate(ttm_dma, dev); 970 return -ENOMEM; 971 } 972 973 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; 974 ++i; 975 --num_pages; 976 } 977 978 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 979 ret = ttm_tt_swapin(ttm); 980 if (unlikely(ret != 0)) { 981 ttm_dma_unpopulate(ttm_dma, dev); 982 return ret; 983 } 984 } 985 986 ttm->state = tt_unbound; 987 return 0; 988} 989EXPORT_SYMBOL_GPL(ttm_dma_populate); 990 991/* Put all pages in pages list to correct pool to wait for reuse */ 992void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) 993{ 994 struct ttm_tt *ttm = &ttm_dma->ttm; 995 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; 996 struct dma_pool *pool; 997 struct dma_page *d_page, *next; 998 enum pool_type type; 999 bool is_cached = false; 1000 unsigned count, i, npages = 0; 1001 unsigned long irq_flags; 1002 1003 type = ttm_to_type(ttm->page_flags, ttm->caching_state); 1004 1005#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1006 pool = ttm_dma_find_pool(dev, type | IS_HUGE); 1007 if (pool) { 1008 count = 0; 1009 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, 1010 page_list) { 1011 if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) 1012 continue; 1013 1014 count++; 1015 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { 1016 ttm_mem_global_free_page(mem_glob, d_page->p, 1017 pool->size); 1018 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; 1019 } 1020 ttm_dma_page_put(pool, d_page); 1021 } 1022 1023 spin_lock_irqsave(&pool->lock, irq_flags); 1024 pool->npages_in_use -= count; 1025 pool->nfrees += count; 1026 spin_unlock_irqrestore(&pool->lock, irq_flags); 1027 } 1028#endif 1029 1030 pool = ttm_dma_find_pool(dev, type); 1031 if (!pool) 1032 return; 1033 1034 is_cached = (ttm_dma_find_pool(pool->dev, 1035 ttm_to_type(ttm->page_flags, tt_cached)) == pool); 1036 1037 /* make sure pages array match list and count number of pages */ 1038 count = 0; 1039 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, 1040 page_list) { 1041 ttm->pages[count] = d_page->p; 1042 count++; 1043 1044 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { 1045 ttm_mem_global_free_page(mem_glob, d_page->p, 1046 pool->size); 1047 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; 1048 } 1049 1050 if (is_cached) 1051 ttm_dma_page_put(pool, d_page); 1052 } 1053 1054 spin_lock_irqsave(&pool->lock, irq_flags); 1055 pool->npages_in_use -= count; 1056 if (is_cached) { 1057 pool->nfrees += count; 1058 } else { 1059 pool->npages_free += count; 1060 list_splice(&ttm_dma->pages_list, &pool->free_list); 1061 /* 1062 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages 1063 * to free in order to minimize calls to set_memory_wb(). 1064 */ 1065 if (pool->npages_free >= (_manager->options.max_size + 1066 NUM_PAGES_TO_ALLOC)) 1067 npages = pool->npages_free - _manager->options.max_size; 1068 } 1069 spin_unlock_irqrestore(&pool->lock, irq_flags); 1070 1071 INIT_LIST_HEAD(&ttm_dma->pages_list); 1072 for (i = 0; i < ttm->num_pages; i++) { 1073 ttm->pages[i] = NULL; 1074 ttm_dma->dma_address[i] = 0; 1075 } 1076 1077 /* shrink pool if necessary (only on !is_cached pools)*/ 1078 if (npages) 1079 ttm_dma_page_pool_free(pool, npages, false); 1080 ttm->state = tt_unpopulated; 1081} 1082EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); 1083 1084/** 1085 * Callback for mm to request pool to reduce number of page held. 1086 * 1087 * XXX: (dchinner) Deadlock warning! 1088 * 1089 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool 1090 * shrinkers 1091 */ 1092static unsigned long 1093ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 1094{ 1095 static unsigned start_pool; 1096 unsigned idx = 0; 1097 unsigned pool_offset; 1098 unsigned shrink_pages = sc->nr_to_scan; 1099 struct device_pools *p; 1100 unsigned long freed = 0; 1101 1102 if (list_empty(&_manager->pools)) 1103 return SHRINK_STOP; 1104 1105 if (!mutex_trylock(&_manager->lock)) 1106 return SHRINK_STOP; 1107 if (!_manager->npools) 1108 goto out; 1109 pool_offset = ++start_pool % _manager->npools; 1110 list_for_each_entry(p, &_manager->pools, pools) { 1111 unsigned nr_free; 1112 1113 if (!p->dev) 1114 continue; 1115 if (shrink_pages == 0) 1116 break; 1117 /* Do it in round-robin fashion. */ 1118 if (++idx < pool_offset) 1119 continue; 1120 nr_free = shrink_pages; 1121 /* OK to use static buffer since global mutex is held. */ 1122 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); 1123 freed += nr_free - shrink_pages; 1124 1125 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", 1126 p->pool->dev_name, p->pool->name, current->pid, 1127 nr_free, shrink_pages); 1128 } 1129out: 1130 mutex_unlock(&_manager->lock); 1131 return freed; 1132} 1133 1134static unsigned long 1135ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1136{ 1137 struct device_pools *p; 1138 unsigned long count = 0; 1139 1140 if (!mutex_trylock(&_manager->lock)) 1141 return 0; 1142 list_for_each_entry(p, &_manager->pools, pools) 1143 count += p->pool->npages_free; 1144 mutex_unlock(&_manager->lock); 1145 return count; 1146} 1147 1148static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) 1149{ 1150 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; 1151 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; 1152 manager->mm_shrink.seeks = 1; 1153 return register_shrinker(&manager->mm_shrink); 1154} 1155 1156static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 1157{ 1158 unregister_shrinker(&manager->mm_shrink); 1159} 1160 1161int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 1162{ 1163 int ret; 1164 1165 WARN_ON(_manager); 1166 1167 pr_info("Initializing DMA pool allocator\n"); 1168 1169 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 1170 if (!_manager) 1171 return -ENOMEM; 1172 1173 mutex_init(&_manager->lock); 1174 INIT_LIST_HEAD(&_manager->pools); 1175 1176 _manager->options.max_size = max_pages; 1177 _manager->options.small = SMALL_ALLOCATION; 1178 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 1179 1180 /* This takes care of auto-freeing the _manager */ 1181 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, 1182 &glob->kobj, "dma_pool"); 1183 if (unlikely(ret != 0)) 1184 goto error; 1185 1186 ret = ttm_dma_pool_mm_shrink_init(_manager); 1187 if (unlikely(ret != 0)) 1188 goto error; 1189 return 0; 1190 1191error: 1192 kobject_put(&_manager->kobj); 1193 _manager = NULL; 1194 return ret; 1195} 1196 1197void ttm_dma_page_alloc_fini(void) 1198{ 1199 struct device_pools *p, *t; 1200 1201 pr_info("Finalizing DMA pool allocator\n"); 1202 ttm_dma_pool_mm_shrink_fini(_manager); 1203 1204 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { 1205 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, 1206 current->pid); 1207 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release, 1208 ttm_dma_pool_match, p->pool)); 1209 ttm_dma_free_pool(p->dev, p->pool->type); 1210 } 1211 kobject_put(&_manager->kobj); 1212 _manager = NULL; 1213} 1214 1215int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) 1216{ 1217 struct device_pools *p; 1218 struct dma_pool *pool = NULL; 1219 1220 if (!_manager) { 1221 seq_printf(m, "No pool allocator running.\n"); 1222 return 0; 1223 } 1224 seq_printf(m, " pool refills pages freed inuse available name\n"); 1225 mutex_lock(&_manager->lock); 1226 list_for_each_entry(p, &_manager->pools, pools) { 1227 struct device *dev = p->dev; 1228 if (!dev) 1229 continue; 1230 pool = p->pool; 1231 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n", 1232 pool->name, pool->nrefills, 1233 pool->nfrees, pool->npages_in_use, 1234 pool->npages_free, 1235 pool->dev_name); 1236 } 1237 mutex_unlock(&_manager->lock); 1238 return 0; 1239} 1240EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); 1241 1242#endif