Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

media: atomisp: remove hmm_page_object

hmm_page_object only stores a struct page pointer, so we can just use
the hmm_bo.pages page pointer array everywhere.

Link: https://lore.kernel.org/linux-media/20220615205037.16549-33-hdegoede@redhat.com
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>

authored by

Hans de Goede and committed by
Mauro Carvalho Chehab
3a68900a f9599127

+26 -74
-5
drivers/staging/media/atomisp/include/hmm/hmm_bo.h
··· 114 114 struct kmem_cache *bo_cache; 115 115 }; 116 116 117 - struct hmm_page_object { 118 - struct page *page; 119 - }; 120 - 121 117 struct hmm_buffer_object { 122 118 struct hmm_bo_device *bdev; 123 119 struct list_head list; ··· 124 128 /* mutex protecting this BO */ 125 129 struct mutex mutex; 126 130 enum hmm_bo_type type; 127 - struct hmm_page_object *page_obj; /* physical pages */ 128 131 int mmap_count; 129 132 int status; 130 133 int mem_type;
+8 -8
drivers/staging/media/atomisp/pci/hmm/hmm.c
··· 295 295 idx = (virt - bo->start) >> PAGE_SHIFT; 296 296 offset = (virt - bo->start) - (idx << PAGE_SHIFT); 297 297 298 - src = (char *)kmap(bo->page_obj[idx].page) + offset; 298 + src = (char *)kmap(bo->pages[idx]) + offset; 299 299 300 300 if ((bytes + offset) >= PAGE_SIZE) { 301 301 len = PAGE_SIZE - offset; ··· 314 314 315 315 clflush_cache_range(src, len); 316 316 317 - kunmap(bo->page_obj[idx].page); 317 + kunmap(bo->pages[idx]); 318 318 } 319 319 320 320 return 0; ··· 428 428 offset = (virt - bo->start) - (idx << PAGE_SHIFT); 429 429 430 430 if (in_atomic()) 431 - des = (char *)kmap_atomic(bo->page_obj[idx].page); 431 + des = (char *)kmap_atomic(bo->pages[idx]); 432 432 else 433 - des = (char *)kmap(bo->page_obj[idx].page); 433 + des = (char *)kmap(bo->pages[idx]); 434 434 435 435 if (!des) { 436 436 dev_err(atomisp_dev, ··· 464 464 */ 465 465 kunmap_atomic(des - offset); 466 466 else 467 - kunmap(bo->page_obj[idx].page); 467 + kunmap(bo->pages[idx]); 468 468 } 469 469 470 470 return 0; ··· 508 508 idx = (virt - bo->start) >> PAGE_SHIFT; 509 509 offset = (virt - bo->start) - (idx << PAGE_SHIFT); 510 510 511 - des = (char *)kmap(bo->page_obj[idx].page) + offset; 511 + des = (char *)kmap(bo->pages[idx]) + offset; 512 512 513 513 if ((bytes + offset) >= PAGE_SIZE) { 514 514 len = PAGE_SIZE - offset; ··· 524 524 525 525 clflush_cache_range(des, len); 526 526 527 - kunmap(bo->page_obj[idx].page); 527 + kunmap(bo->pages[idx]); 528 528 } 529 529 530 530 return 0; ··· 547 547 idx = (virt - bo->start) >> PAGE_SHIFT; 548 548 offset = (virt - bo->start) - (idx << PAGE_SHIFT); 549 549 550 - return page_to_phys(bo->page_obj[idx].page) + offset; 550 + return page_to_phys(bo->pages[idx]) + offset; 551 551 } 552 552 553 553 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
+18 -61
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
··· 631 631 int i, ret; 632 632 633 633 for (i = 0; i < free_pgnr; i++) { 634 - ret = set_pages_wb(bo->page_obj[i].page, 1); 634 + ret = set_pages_wb(bo->pages[i], 1); 635 635 if (ret) 636 636 dev_err(atomisp_dev, 637 637 "set page to WB err ...ret = %d\n", ··· 644 644 address be valid,it maybe memory corruption by lowmemory 645 645 */ 646 646 if (!ret) { 647 - __free_pages(bo->page_obj[i].page, 0); 647 + __free_pages(bo->pages[i], 0); 648 648 } 649 649 } 650 650 } ··· 662 662 bool lack_mem = true; 663 663 664 664 pgnr = bo->pgnr; 665 - 666 - bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object), 667 - GFP_KERNEL); 668 - if (unlikely(!bo->page_obj)) 669 - return -ENOMEM; 670 665 671 666 i = 0; 672 667 alloc_pgnr = 0; ··· 734 739 } 735 740 736 741 for (j = 0; j < blk_pgnr; j++, i++) { 737 - bo->page_obj[i].page = pages + j; 742 + bo->pages[i] = pages + j; 738 743 } 739 744 740 745 pgnr -= blk_pgnr; ··· 754 759 cleanup: 755 760 alloc_pgnr = i; 756 761 free_private_bo_pages(bo, alloc_pgnr); 757 - 758 - kfree(bo->page_obj); 759 - 760 762 return -ENOMEM; 761 - } 762 - 763 - static void free_private_pages(struct hmm_buffer_object *bo) 764 - { 765 - free_private_bo_pages(bo, bo->pgnr); 766 - kfree(bo->page_obj); 767 763 } 768 764 769 765 static void free_user_pages(struct hmm_buffer_object *bo, ··· 768 782 for (i = 0; i < page_nr; i++) 769 783 put_page(bo->pages[i]); 770 784 } 771 - kfree(bo->pages); 772 - kfree(bo->page_obj); 773 785 } 774 786 775 787 /* ··· 777 793 const void __user *userptr) 778 794 { 779 795 int page_nr; 780 - int i; 781 796 struct vm_area_struct *vma; 782 - struct page **pages; 783 - 784 - pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL); 785 - if (unlikely(!pages)) 786 - return -ENOMEM; 787 - 788 - bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object), 789 - GFP_KERNEL); 790 - if (unlikely(!bo->page_obj)) { 791 - kfree(pages); 792 - return -ENOMEM; 793 - } 794 797 795 798 mutex_unlock(&bo->mutex); 796 799 mmap_read_lock(current->mm); ··· 785 814 mmap_read_unlock(current->mm); 786 815 if (!vma) { 787 816 dev_err(atomisp_dev, "find_vma failed\n"); 788 - kfree(bo->page_obj); 789 - kfree(pages); 790 817 mutex_lock(&bo->mutex); 791 818 return -EFAULT; 792 819 } ··· 796 827 797 828 userptr = untagged_addr(userptr); 798 829 799 - bo->pages = pages; 800 - 801 830 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 802 831 page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr, 803 832 FOLL_LONGTERM | FOLL_WRITE, 804 - pages, NULL); 833 + bo->pages, NULL); 805 834 bo->mem_type = HMM_BO_MEM_TYPE_PFN; 806 835 } else { 807 836 /*Handle frame buffer allocated in user space*/ 808 837 mutex_unlock(&bo->mutex); 809 838 page_nr = get_user_pages_fast((unsigned long)userptr, 810 - (int)(bo->pgnr), 1, pages); 839 + (int)(bo->pgnr), 1, bo->pages); 811 840 mutex_lock(&bo->mutex); 812 841 bo->mem_type = HMM_BO_MEM_TYPE_USER; 813 842 } ··· 823 856 if (page_nr < 0) 824 857 page_nr = 0; 825 858 goto out_of_mem; 826 - } 827 - 828 - for (i = 0; i < bo->pgnr; i++) { 829 - bo->page_obj[i].page = pages[i]; 830 859 } 831 860 832 861 return 0; ··· 854 891 mutex_lock(&bo->mutex); 855 892 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); 856 893 894 + bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL); 895 + if (unlikely(!bo->pages)) { 896 + ret = -ENOMEM; 897 + goto alloc_err; 898 + } 899 + 857 900 /* 858 901 * TO DO: 859 902 * add HMM_BO_USER type ··· 884 915 return 0; 885 916 886 917 alloc_err: 918 + kfree(bo->pages); 887 919 mutex_unlock(&bo->mutex); 888 920 dev_err(atomisp_dev, "alloc pages err...\n"); 889 921 return ret; ··· 910 940 bo->status &= (~HMM_BO_PAGE_ALLOCED); 911 941 912 942 if (bo->type == HMM_BO_PRIVATE) 913 - free_private_pages(bo); 943 + free_private_bo_pages(bo, bo->pgnr); 914 944 else if (bo->type == HMM_BO_USER) 915 945 free_user_pages(bo, bo->pgnr); 916 946 else 917 947 dev_err(atomisp_dev, "invalid buffer type.\n"); 948 + 949 + kfree(bo->pages); 918 950 mutex_unlock(&bo->mutex); 919 951 920 952 return; ··· 961 989 for (i = 0; i < bo->pgnr; i++) { 962 990 ret = 963 991 isp_mmu_map(&bdev->mmu, virt, 964 - page_to_phys(bo->page_obj[i].page), 1); 992 + page_to_phys(bo->pages[i]), 1); 965 993 if (ret) 966 994 goto map_err; 967 995 virt += (1 << PAGE_SHIFT); ··· 1075 1103 1076 1104 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached) 1077 1105 { 1078 - struct page **pages; 1079 - int i; 1080 - 1081 1106 check_bo_null_return(bo, NULL); 1082 1107 1083 1108 mutex_lock(&bo->mutex); ··· 1091 1122 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); 1092 1123 } 1093 1124 1094 - pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL); 1095 - if (unlikely(!pages)) { 1096 - mutex_unlock(&bo->mutex); 1097 - return NULL; 1098 - } 1099 - 1100 - for (i = 0; i < bo->pgnr; i++) 1101 - pages[i] = bo->page_obj[i].page; 1102 - 1103 - bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP, 1125 + bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP, 1104 1126 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE); 1105 1127 if (unlikely(!bo->vmap_addr)) { 1106 - kfree(pages); 1107 1128 mutex_unlock(&bo->mutex); 1108 1129 dev_err(atomisp_dev, "vmap failed...\n"); 1109 1130 return NULL; 1110 1131 } 1111 1132 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED); 1112 - 1113 - kfree(pages); 1114 1133 1115 1134 mutex_unlock(&bo->mutex); 1116 1135 return bo->vmap_addr; ··· 1229 1272 1230 1273 virt = vma->vm_start; 1231 1274 for (i = 0; i < pgnr; i++) { 1232 - pfn = page_to_pfn(bo->page_obj[i].page); 1275 + pfn = page_to_pfn(bo->pages[i]); 1233 1276 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) { 1234 1277 dev_warn(atomisp_dev, 1235 1278 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",