Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: isolate dma data from ttm_tt V4

Move dma data to a superset ttm_dma_tt structure which herit
from ttm_tt. This allow driver that don't use dma functionalities
to not have to waste memory for it.

V2 Rebase on top of no memory account changes (where/when is my
delorean when i need it ?)
V3 Make sure page list is initialized empty
V4 typo/syntax fixes

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>

authored by

Jerome Glisse and committed by
Dave Airlie
8e7e7052 3230cfc3

+203 -156
+10 -8
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1052 1052 static int 1053 1053 nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1054 1054 { 1055 + struct ttm_dma_tt *ttm_dma = (void *)ttm; 1055 1056 struct drm_nouveau_private *dev_priv; 1056 1057 struct drm_device *dev; 1057 1058 unsigned i; ··· 1066 1065 1067 1066 #ifdef CONFIG_SWIOTLB 1068 1067 if (swiotlb_nr_tbl()) { 1069 - return ttm_dma_populate(ttm, dev->dev); 1068 + return ttm_dma_populate((void *)ttm, dev->dev); 1070 1069 } 1071 1070 #endif 1072 1071 ··· 1076 1075 } 1077 1076 1078 1077 for (i = 0; i < ttm->num_pages; i++) { 1079 - ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], 1078 + ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], 1080 1079 0, PAGE_SIZE, 1081 1080 PCI_DMA_BIDIRECTIONAL); 1082 - if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) { 1081 + if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) { 1083 1082 while (--i) { 1084 - pci_unmap_page(dev->pdev, ttm->dma_address[i], 1083 + pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], 1085 1084 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 1086 - ttm->dma_address[i] = 0; 1085 + ttm_dma->dma_address[i] = 0; 1087 1086 } 1088 1087 ttm_pool_unpopulate(ttm); 1089 1088 return -EFAULT; ··· 1095 1094 static void 1096 1095 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) 1097 1096 { 1097 + struct ttm_dma_tt *ttm_dma = (void *)ttm; 1098 1098 struct drm_nouveau_private *dev_priv; 1099 1099 struct drm_device *dev; 1100 1100 unsigned i; ··· 1105 1103 1106 1104 #ifdef CONFIG_SWIOTLB 1107 1105 if (swiotlb_nr_tbl()) { 1108 - ttm_dma_unpopulate(ttm, dev->dev); 1106 + ttm_dma_unpopulate((void *)ttm, dev->dev); 1109 1107 return; 1110 1108 } 1111 1109 #endif 1112 1110 1113 1111 for (i = 0; i < ttm->num_pages; i++) { 1114 - if (ttm->dma_address[i]) { 1115 - pci_unmap_page(dev->pdev, ttm->dma_address[i], 1112 + if (ttm_dma->dma_address[i]) { 1113 + pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], 1116 1114 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 1117 1115 } 1118 1116 }
+14 -8
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 8 8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) 9 9 10 10 struct nouveau_sgdma_be { 11 - struct ttm_tt ttm; 11 + /* this has to be the first field so populate/unpopulated in 12 + * nouve_bo.c works properly, otherwise have to move them here 13 + */ 14 + struct ttm_dma_tt ttm; 12 15 struct drm_device *dev; 13 16 u64 offset; 14 17 }; ··· 23 20 24 21 if (ttm) { 25 22 NV_DEBUG(nvbe->dev, "\n"); 23 + ttm_dma_tt_fini(&nvbe->ttm); 26 24 kfree(nvbe); 27 25 } 28 26 } ··· 42 38 nvbe->offset = mem->start << PAGE_SHIFT; 43 39 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 44 40 for (i = 0; i < ttm->num_pages; i++) { 45 - dma_addr_t dma_offset = ttm->dma_address[i]; 41 + dma_addr_t dma_offset = nvbe->ttm.dma_address[i]; 46 42 uint32_t offset_l = lower_32_bits(dma_offset); 47 43 48 44 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { ··· 101 97 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 102 98 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 103 99 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 104 - dma_addr_t *list = ttm->dma_address; 100 + dma_addr_t *list = nvbe->ttm.dma_address; 105 101 u32 pte = mem->start << 2; 106 102 u32 cnt = ttm->num_pages; 107 103 ··· 210 206 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 211 207 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 212 208 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 213 - dma_addr_t *list = ttm->dma_address; 209 + dma_addr_t *list = nvbe->ttm.dma_address; 214 210 u32 pte = mem->start << 2, tmp[4]; 215 211 u32 cnt = ttm->num_pages; 216 212 int i; ··· 286 282 static int 287 283 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 288 284 { 285 + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 289 286 struct nouveau_mem *node = mem->mm_node; 290 287 291 288 /* noop: bound in move_notify() */ 292 - node->pages = ttm->dma_address; 289 + node->pages = nvbe->ttm.dma_address; 293 290 return 0; 294 291 } 295 292 ··· 321 316 return NULL; 322 317 323 318 nvbe->dev = dev; 324 - nvbe->ttm.func = dev_priv->gart_info.func; 319 + nvbe->ttm.ttm.func = dev_priv->gart_info.func; 325 320 326 - if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 321 + if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 322 + kfree(nvbe); 327 323 return NULL; 328 324 } 329 - return &nvbe->ttm; 325 + return &nvbe->ttm.ttm; 330 326 } 331 327 332 328 int
+22 -21
drivers/gpu/drm/radeon/radeon_ttm.c
··· 501 501 * TTM backend functions. 502 502 */ 503 503 struct radeon_ttm_tt { 504 - struct ttm_tt ttm; 504 + struct ttm_dma_tt ttm; 505 505 struct radeon_device *rdev; 506 506 u64 offset; 507 507 }; ··· 509 509 static int radeon_ttm_backend_bind(struct ttm_tt *ttm, 510 510 struct ttm_mem_reg *bo_mem) 511 511 { 512 - struct radeon_ttm_tt *gtt; 512 + struct radeon_ttm_tt *gtt = (void*)ttm; 513 513 int r; 514 514 515 - gtt = container_of(ttm, struct radeon_ttm_tt, ttm); 516 515 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 517 516 if (!ttm->num_pages) { 518 517 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 519 518 ttm->num_pages, bo_mem, ttm); 520 519 } 521 520 r = radeon_gart_bind(gtt->rdev, gtt->offset, 522 - ttm->num_pages, ttm->pages, ttm->dma_address); 521 + ttm->num_pages, ttm->pages, gtt->ttm.dma_address); 523 522 if (r) { 524 523 DRM_ERROR("failed to bind %lu pages at 0x%08X\n", 525 524 ttm->num_pages, (unsigned)gtt->offset); ··· 529 530 530 531 static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) 531 532 { 532 - struct radeon_ttm_tt *gtt; 533 + struct radeon_ttm_tt *gtt = (void *)ttm; 533 534 534 - gtt = container_of(ttm, struct radeon_ttm_tt, ttm); 535 535 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); 536 536 return 0; 537 537 } 538 538 539 539 static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) 540 540 { 541 - struct radeon_ttm_tt *gtt; 541 + struct radeon_ttm_tt *gtt = (void *)ttm; 542 542 543 - gtt = container_of(ttm, struct radeon_ttm_tt, ttm); 543 + ttm_dma_tt_fini(&gtt->ttm); 544 544 kfree(gtt); 545 545 } 546 546 ··· 568 570 if (gtt == NULL) { 569 571 return NULL; 570 572 } 571 - gtt->ttm.func = &radeon_backend_func; 573 + gtt->ttm.ttm.func = &radeon_backend_func; 572 574 gtt->rdev = rdev; 573 - if (ttm_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) { 575 + if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) { 576 + kfree(gtt); 574 577 return NULL; 575 578 } 576 - return &gtt->ttm; 579 + return &gtt->ttm.ttm; 577 580 } 578 581 579 582 static int radeon_ttm_tt_populate(struct ttm_tt *ttm) 580 583 { 581 584 struct radeon_device *rdev; 585 + struct radeon_ttm_tt *gtt = (void *)ttm; 582 586 unsigned i; 583 587 int r; 584 588 ··· 591 591 592 592 #ifdef CONFIG_SWIOTLB 593 593 if (swiotlb_nr_tbl()) { 594 - return ttm_dma_populate(ttm, rdev->dev); 594 + return ttm_dma_populate(&gtt->ttm, rdev->dev); 595 595 } 596 596 #endif 597 597 ··· 601 601 } 602 602 603 603 for (i = 0; i < ttm->num_pages; i++) { 604 - ttm->dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], 605 - 0, PAGE_SIZE, 606 - PCI_DMA_BIDIRECTIONAL); 607 - if (pci_dma_mapping_error(rdev->pdev, ttm->dma_address[i])) { 604 + gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], 605 + 0, PAGE_SIZE, 606 + PCI_DMA_BIDIRECTIONAL); 607 + if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { 608 608 while (--i) { 609 - pci_unmap_page(rdev->pdev, ttm->dma_address[i], 609 + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], 610 610 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 611 - ttm->dma_address[i] = 0; 611 + gtt->ttm.dma_address[i] = 0; 612 612 } 613 613 ttm_pool_unpopulate(ttm); 614 614 return -EFAULT; ··· 620 620 static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) 621 621 { 622 622 struct radeon_device *rdev; 623 + struct radeon_ttm_tt *gtt = (void *)ttm; 623 624 unsigned i; 624 625 625 626 rdev = radeon_get_rdev(ttm->bdev); 626 627 627 628 #ifdef CONFIG_SWIOTLB 628 629 if (swiotlb_nr_tbl()) { 629 - ttm_dma_unpopulate(ttm, rdev->dev); 630 + ttm_dma_unpopulate(&gtt->ttm, rdev->dev); 630 631 return; 631 632 } 632 633 #endif 633 634 634 635 for (i = 0; i < ttm->num_pages; i++) { 635 - if (ttm->dma_address[i]) { 636 - pci_unmap_page(rdev->pdev, ttm->dma_address[i], 636 + if (gtt->ttm.dma_address[i]) { 637 + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], 637 638 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 638 639 } 639 640 }
+56 -58
drivers/gpu/drm/ttm/ttm_page_alloc.c
··· 662 662 return count; 663 663 } 664 664 665 + /* Put all pages in pages list to correct pool to wait for reuse */ 666 + static void ttm_put_pages(struct page **pages, unsigned npages, int flags, 667 + enum ttm_caching_state cstate) 668 + { 669 + unsigned long irq_flags; 670 + struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 671 + unsigned i; 672 + 673 + if (pool == NULL) { 674 + /* No pool for this memory type so free the pages */ 675 + for (i = 0; i < npages; i++) { 676 + if (pages[i]) { 677 + if (page_count(pages[i]) != 1) 678 + printk(KERN_ERR TTM_PFX 679 + "Erroneous page count. " 680 + "Leaking pages.\n"); 681 + __free_page(pages[i]); 682 + pages[i] = NULL; 683 + } 684 + } 685 + return; 686 + } 687 + 688 + spin_lock_irqsave(&pool->lock, irq_flags); 689 + for (i = 0; i < npages; i++) { 690 + if (pages[i]) { 691 + if (page_count(pages[i]) != 1) 692 + printk(KERN_ERR TTM_PFX 693 + "Erroneous page count. " 694 + "Leaking pages.\n"); 695 + list_add_tail(&pages[i]->lru, &pool->list); 696 + pages[i] = NULL; 697 + pool->npages++; 698 + } 699 + } 700 + /* Check that we don't go over the pool limit */ 701 + npages = 0; 702 + if (pool->npages > _manager->options.max_size) { 703 + npages = pool->npages - _manager->options.max_size; 704 + /* free at least NUM_PAGES_TO_ALLOC number of pages 705 + * to reduce calls to set_memory_wb */ 706 + if (npages < NUM_PAGES_TO_ALLOC) 707 + npages = NUM_PAGES_TO_ALLOC; 708 + } 709 + spin_unlock_irqrestore(&pool->lock, irq_flags); 710 + if (npages) 711 + ttm_page_pool_free(pool, npages); 712 + } 713 + 665 714 /* 666 715 * On success pages list will hold count number of correctly 667 716 * cached pages. 668 717 */ 669 - int ttm_get_pages(struct page **pages, int flags, 670 - enum ttm_caching_state cstate, unsigned npages, 671 - dma_addr_t *dma_address) 718 + static int ttm_get_pages(struct page **pages, unsigned npages, int flags, 719 + enum ttm_caching_state cstate) 672 720 { 673 721 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 674 722 struct list_head plist; ··· 784 736 printk(KERN_ERR TTM_PFX 785 737 "Failed to allocate extra pages " 786 738 "for large request."); 787 - ttm_put_pages(pages, count, flags, cstate, NULL); 739 + ttm_put_pages(pages, count, flags, cstate); 788 740 return r; 789 741 } 790 742 } 791 743 792 744 return 0; 793 - } 794 - 795 - /* Put all pages in pages list to correct pool to wait for reuse */ 796 - void ttm_put_pages(struct page **pages, unsigned npages, int flags, 797 - enum ttm_caching_state cstate, dma_addr_t *dma_address) 798 - { 799 - unsigned long irq_flags; 800 - struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 801 - unsigned i; 802 - 803 - if (pool == NULL) { 804 - /* No pool for this memory type so free the pages */ 805 - for (i = 0; i < npages; i++) { 806 - if (pages[i]) { 807 - if (page_count(pages[i]) != 1) 808 - printk(KERN_ERR TTM_PFX 809 - "Erroneous page count. " 810 - "Leaking pages.\n"); 811 - __free_page(pages[i]); 812 - pages[i] = NULL; 813 - } 814 - } 815 - return; 816 - } 817 - 818 - spin_lock_irqsave(&pool->lock, irq_flags); 819 - for (i = 0; i < npages; i++) { 820 - if (pages[i]) { 821 - if (page_count(pages[i]) != 1) 822 - printk(KERN_ERR TTM_PFX 823 - "Erroneous page count. " 824 - "Leaking pages.\n"); 825 - list_add_tail(&pages[i]->lru, &pool->list); 826 - pages[i] = NULL; 827 - pool->npages++; 828 - } 829 - } 830 - /* Check that we don't go over the pool limit */ 831 - npages = 0; 832 - if (pool->npages > _manager->options.max_size) { 833 - npages = pool->npages - _manager->options.max_size; 834 - /* free at least NUM_PAGES_TO_ALLOC number of pages 835 - * to reduce calls to set_memory_wb */ 836 - if (npages < NUM_PAGES_TO_ALLOC) 837 - npages = NUM_PAGES_TO_ALLOC; 838 - } 839 - spin_unlock_irqrestore(&pool->lock, irq_flags); 840 - if (npages) 841 - ttm_page_pool_free(pool, npages); 842 745 } 843 746 844 747 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, ··· 864 865 return 0; 865 866 866 867 for (i = 0; i < ttm->num_pages; ++i) { 867 - ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags, 868 - ttm->caching_state, 1, 869 - &ttm->dma_address[i]); 868 + ret = ttm_get_pages(&ttm->pages[i], 1, 869 + ttm->page_flags, 870 + ttm->caching_state); 870 871 if (ret != 0) { 871 872 ttm_pool_unpopulate(ttm); 872 873 return -ENOMEM; ··· 903 904 ttm->pages[i]); 904 905 ttm_put_pages(&ttm->pages[i], 1, 905 906 ttm->page_flags, 906 - ttm->caching_state, 907 - ttm->dma_address); 907 + ttm->caching_state); 908 908 } 909 909 } 910 910 ttm->state = tt_unpopulated;
+19 -16
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
··· 789 789 790 790 /* 791 791 * @return count of pages still required to fulfill the request. 792 - */ 792 + */ 793 793 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, 794 794 unsigned long *irq_flags) 795 795 { ··· 838 838 * allocates one page at a time. 839 839 */ 840 840 static int ttm_dma_pool_get_pages(struct dma_pool *pool, 841 - struct ttm_tt *ttm, 841 + struct ttm_dma_tt *ttm_dma, 842 842 unsigned index) 843 843 { 844 844 struct dma_page *d_page; 845 + struct ttm_tt *ttm = &ttm_dma->ttm; 845 846 unsigned long irq_flags; 846 847 int count, r = -ENOMEM; 847 848 ··· 851 850 if (count) { 852 851 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); 853 852 ttm->pages[index] = d_page->p; 854 - ttm->dma_address[index] = d_page->dma; 855 - list_move_tail(&d_page->page_list, &ttm->alloc_list); 853 + ttm_dma->dma_address[index] = d_page->dma; 854 + list_move_tail(&d_page->page_list, &ttm_dma->pages_list); 856 855 r = 0; 857 856 pool->npages_in_use += 1; 858 857 pool->npages_free -= 1; ··· 865 864 * On success pages list will hold count number of correctly 866 865 * cached pages. On failure will hold the negative return value (-ENOMEM, etc). 867 866 */ 868 - int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev) 867 + int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) 869 868 { 869 + struct ttm_tt *ttm = &ttm_dma->ttm; 870 870 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 871 871 struct dma_pool *pool; 872 872 enum pool_type type; ··· 894 892 } 895 893 } 896 894 897 - INIT_LIST_HEAD(&ttm->alloc_list); 895 + INIT_LIST_HEAD(&ttm_dma->pages_list); 898 896 for (i = 0; i < ttm->num_pages; ++i) { 899 - ret = ttm_dma_pool_get_pages(pool, ttm, i); 897 + ret = ttm_dma_pool_get_pages(pool, ttm_dma, i); 900 898 if (ret != 0) { 901 - ttm_dma_unpopulate(ttm, dev); 899 + ttm_dma_unpopulate(ttm_dma, dev); 902 900 return -ENOMEM; 903 901 } 904 902 905 903 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 906 904 false, false); 907 905 if (unlikely(ret != 0)) { 908 - ttm_dma_unpopulate(ttm, dev); 906 + ttm_dma_unpopulate(ttm_dma, dev); 909 907 return -ENOMEM; 910 908 } 911 909 } ··· 913 911 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 914 912 ret = ttm_tt_swapin(ttm); 915 913 if (unlikely(ret != 0)) { 916 - ttm_dma_unpopulate(ttm, dev); 914 + ttm_dma_unpopulate(ttm_dma, dev); 917 915 return ret; 918 916 } 919 917 } ··· 939 937 } 940 938 941 939 /* Put all pages in pages list to correct pool to wait for reuse */ 942 - void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev) 940 + void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) 943 941 { 942 + struct ttm_tt *ttm = &ttm_dma->ttm; 944 943 struct dma_pool *pool; 945 944 struct dma_page *d_page, *next; 946 945 enum pool_type type; ··· 959 956 ttm_to_type(ttm->page_flags, tt_cached)) == pool); 960 957 961 958 /* make sure pages array match list and count number of pages */ 962 - list_for_each_entry(d_page, &ttm->alloc_list, page_list) { 959 + list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) { 963 960 ttm->pages[count] = d_page->p; 964 961 count++; 965 962 } ··· 970 967 pool->nfrees += count; 971 968 } else { 972 969 pool->npages_free += count; 973 - list_splice(&ttm->alloc_list, &pool->free_list); 970 + list_splice(&ttm_dma->pages_list, &pool->free_list); 974 971 if (pool->npages_free > _manager->options.max_size) { 975 972 count = pool->npages_free - _manager->options.max_size; 976 973 } ··· 978 975 spin_unlock_irqrestore(&pool->lock, irq_flags); 979 976 980 977 if (is_cached) { 981 - list_for_each_entry_safe(d_page, next, &ttm->alloc_list, page_list) { 978 + list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) { 982 979 ttm_mem_global_free_page(ttm->glob->mem_glob, 983 980 d_page->p); 984 981 ttm_dma_page_put(pool, d_page); ··· 990 987 } 991 988 } 992 989 993 - INIT_LIST_HEAD(&ttm->alloc_list); 990 + INIT_LIST_HEAD(&ttm_dma->pages_list); 994 991 for (i = 0; i < ttm->num_pages; i++) { 995 992 ttm->pages[i] = NULL; 996 - ttm->dma_address[i] = 0; 993 + ttm_dma->dma_address[i] = 0; 997 994 } 998 995 999 996 /* shrink pool if necessary */
+49 -11
drivers/gpu/drm/ttm/ttm_tt.c
··· 48 48 */ 49 49 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 50 50 { 51 - ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); 52 - ttm->dma_address = drm_calloc_large(ttm->num_pages, 53 - sizeof(*ttm->dma_address)); 51 + ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); 54 52 } 55 53 56 - static void ttm_tt_free_page_directory(struct ttm_tt *ttm) 54 + static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 57 55 { 58 - drm_free_large(ttm->pages); 59 - ttm->pages = NULL; 60 - drm_free_large(ttm->dma_address); 61 - ttm->dma_address = NULL; 56 + ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*)); 57 + ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages, 58 + sizeof(*ttm->dma_address)); 62 59 } 63 60 64 61 #ifdef CONFIG_X86 ··· 170 173 171 174 if (likely(ttm->pages != NULL)) { 172 175 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 173 - ttm_tt_free_page_directory(ttm); 174 176 } 175 177 176 178 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && ··· 192 196 ttm->dummy_read_page = dummy_read_page; 193 197 ttm->state = tt_unpopulated; 194 198 195 - INIT_LIST_HEAD(&ttm->alloc_list); 196 199 ttm_tt_alloc_page_directory(ttm); 197 - if (!ttm->pages || !ttm->dma_address) { 200 + if (!ttm->pages) { 198 201 ttm_tt_destroy(ttm); 199 202 printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); 200 203 return -ENOMEM; ··· 201 206 return 0; 202 207 } 203 208 EXPORT_SYMBOL(ttm_tt_init); 209 + 210 + void ttm_tt_fini(struct ttm_tt *ttm) 211 + { 212 + drm_free_large(ttm->pages); 213 + ttm->pages = NULL; 214 + } 215 + EXPORT_SYMBOL(ttm_tt_fini); 216 + 217 + int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 218 + unsigned long size, uint32_t page_flags, 219 + struct page *dummy_read_page) 220 + { 221 + struct ttm_tt *ttm = &ttm_dma->ttm; 222 + 223 + ttm->bdev = bdev; 224 + ttm->glob = bdev->glob; 225 + ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 226 + ttm->caching_state = tt_cached; 227 + ttm->page_flags = page_flags; 228 + ttm->dummy_read_page = dummy_read_page; 229 + ttm->state = tt_unpopulated; 230 + 231 + INIT_LIST_HEAD(&ttm_dma->pages_list); 232 + ttm_dma_tt_alloc_page_directory(ttm_dma); 233 + if (!ttm->pages || !ttm_dma->dma_address) { 234 + ttm_tt_destroy(ttm); 235 + printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); 236 + return -ENOMEM; 237 + } 238 + return 0; 239 + } 240 + EXPORT_SYMBOL(ttm_dma_tt_init); 241 + 242 + void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 243 + { 244 + struct ttm_tt *ttm = &ttm_dma->ttm; 245 + 246 + drm_free_large(ttm->pages); 247 + ttm->pages = NULL; 248 + drm_free_large(ttm_dma->dma_address); 249 + ttm_dma->dma_address = NULL; 250 + } 251 + EXPORT_SYMBOL(ttm_dma_tt_fini); 204 252 205 253 void ttm_tt_unbind(struct ttm_tt *ttm) 206 254 {
+2
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 168 168 { 169 169 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); 170 170 171 + ttm_tt_fini(ttm); 171 172 kfree(vmw_be); 172 173 } 173 174 ··· 192 191 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 193 192 194 193 if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) { 194 + kfree(vmw_be); 195 195 return NULL; 196 196 } 197 197
+29 -3
include/drm/ttm/ttm_bo_driver.h
··· 103 103 * @swap_storage: Pointer to shmem struct file for swap storage. 104 104 * @caching_state: The current caching state of the pages. 105 105 * @state: The current binding state of the pages. 106 - * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32) 107 - * @alloc_list: used by some page allocation backend 108 106 * 109 107 * This is a structure holding the pages, caching- and aperture binding 110 108 * status for a buffer object that isn't backed by fixed (VRAM / AGP) ··· 125 127 tt_unbound, 126 128 tt_unpopulated, 127 129 } state; 130 + }; 131 + 132 + /** 133 + * struct ttm_dma_tt 134 + * 135 + * @ttm: Base ttm_tt struct. 136 + * @dma_address: The DMA (bus) addresses of the pages 137 + * @pages_list: used by some page allocation backend 138 + * 139 + * This is a structure holding the pages, caching- and aperture binding 140 + * status for a buffer object that isn't backed by fixed (VRAM / AGP) 141 + * memory. 142 + */ 143 + struct ttm_dma_tt { 144 + struct ttm_tt ttm; 128 145 dma_addr_t *dma_address; 129 - struct list_head alloc_list; 146 + struct list_head pages_list; 130 147 }; 131 148 132 149 #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ ··· 608 595 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 609 596 unsigned long size, uint32_t page_flags, 610 597 struct page *dummy_read_page); 598 + extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 599 + unsigned long size, uint32_t page_flags, 600 + struct page *dummy_read_page); 601 + 602 + /** 603 + * ttm_tt_fini 604 + * 605 + * @ttm: the ttm_tt structure. 606 + * 607 + * Free memory of ttm_tt structure 608 + */ 609 + extern void ttm_tt_fini(struct ttm_tt *ttm); 610 + extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); 611 611 612 612 /** 613 613 * ttm_ttm_bind:
+2 -31
include/drm/ttm/ttm_page_alloc.h
··· 30 30 #include "ttm_memory.h" 31 31 32 32 /** 33 - * Get count number of pages from pool to pages list. 34 - * 35 - * @pages: head of empty linked list where pages are filled. 36 - * @flags: ttm flags for page allocation. 37 - * @cstate: ttm caching state for the page. 38 - * @count: number of pages to allocate. 39 - * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). 40 - */ 41 - int ttm_get_pages(struct page **pages, 42 - int flags, 43 - enum ttm_caching_state cstate, 44 - unsigned npages, 45 - dma_addr_t *dma_address); 46 - /** 47 - * Put linked list of pages to pool. 48 - * 49 - * @pages: list of pages to free. 50 - * @page_count: number of pages in the list. Zero can be passed for unknown 51 - * count. 52 - * @flags: ttm flags for page allocation. 53 - * @cstate: ttm caching state. 54 - * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). 55 - */ 56 - void ttm_put_pages(struct page **pages, 57 - unsigned npages, 58 - int flags, 59 - enum ttm_caching_state cstate, 60 - dma_addr_t *dma_address); 61 - /** 62 33 * Initialize pool allocator. 63 34 */ 64 35 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); ··· 78 107 */ 79 108 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); 80 109 81 - int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev); 82 - extern void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev); 110 + extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); 111 + extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); 83 112 84 113 #else 85 114 static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,