Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mm/pat: Standardize on memtype_*() prefix for APIs

Half of our memtype APIs are memtype_ prefixed, the other half are _memtype suffixed:

reserve_memtype()
free_memtype()
kernel_map_sync_memtype()
io_reserve_memtype()
io_free_memtype()

memtype_check_insert()
memtype_erase()
memtype_lookup()
memtype_copy_nth_element()

Use prefixes consistently, like most other modern kernel APIs:

reserve_memtype() => memtype_reserve()
free_memtype() => memtype_free()
kernel_map_sync_memtype() => memtype_kernel_map_sync()
io_reserve_memtype() => memtype_reserve_io()
io_free_memtype() => memtype_free_io()

memtype_check_insert() => memtype_check_insert()
memtype_erase() => memtype_erase()
memtype_lookup() => memtype_lookup()
memtype_copy_nth_element() => memtype_copy_nth_element()

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+42 -42
+5 -5
arch/x86/include/asm/pat.h
··· 10 10 extern void pat_init(void); 11 11 extern void init_cache_modes(void); 12 12 13 - extern int reserve_memtype(u64 start, u64 end, 13 + extern int memtype_reserve(u64 start, u64 end, 14 14 enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); 15 - extern int free_memtype(u64 start, u64 end); 15 + extern int memtype_free(u64 start, u64 end); 16 16 17 - extern int kernel_map_sync_memtype(u64 base, unsigned long size, 17 + extern int memtype_kernel_map_sync(u64 base, unsigned long size, 18 18 enum page_cache_mode pcm); 19 19 20 - int io_reserve_memtype(resource_size_t start, resource_size_t end, 20 + int memtype_reserve_io(resource_size_t start, resource_size_t end, 21 21 enum page_cache_mode *pcm); 22 22 23 - void io_free_memtype(resource_size_t start, resource_size_t end); 23 + void memtype_free_io(resource_size_t start, resource_size_t end); 24 24 25 25 bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn); 26 26
+2 -2
arch/x86/mm/iomap_32.c
··· 26 26 if (!is_io_mapping_possible(base, size)) 27 27 return -EINVAL; 28 28 29 - ret = io_reserve_memtype(base, base + size, &pcm); 29 + ret = memtype_reserve_io(base, base + size, &pcm); 30 30 if (ret) 31 31 return ret; 32 32 ··· 40 40 41 41 void iomap_free(resource_size_t base, unsigned long size) 42 42 { 43 - io_free_memtype(base, base + size); 43 + memtype_free_io(base, base + size); 44 44 } 45 45 EXPORT_SYMBOL_GPL(iomap_free); 46 46
+5 -5
arch/x86/mm/ioremap.c
··· 196 196 phys_addr &= PHYSICAL_PAGE_MASK; 197 197 size = PAGE_ALIGN(last_addr+1) - phys_addr; 198 198 199 - retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 199 + retval = memtype_reserve(phys_addr, (u64)phys_addr + size, 200 200 pcm, &new_pcm); 201 201 if (retval) { 202 - printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); 202 + printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval); 203 203 return NULL; 204 204 } 205 205 ··· 255 255 area->phys_addr = phys_addr; 256 256 vaddr = (unsigned long) area->addr; 257 257 258 - if (kernel_map_sync_memtype(phys_addr, size, pcm)) 258 + if (memtype_kernel_map_sync(phys_addr, size, pcm)) 259 259 goto err_free_area; 260 260 261 261 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) ··· 275 275 err_free_area: 276 276 free_vm_area(area); 277 277 err_free_memtype: 278 - free_memtype(phys_addr, phys_addr + size); 278 + memtype_free(phys_addr, phys_addr + size); 279 279 return NULL; 280 280 } 281 281 ··· 451 451 return; 452 452 } 453 453 454 - free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 454 + memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 455 455 456 456 /* Finally remove it */ 457 457 o = remove_vm_area((void __force *)addr);
+22 -22
arch/x86/mm/pat/memtype.c
··· 575 575 * available type in new_type in case of no error. In case of any error 576 576 * it will return a negative return value. 577 577 */ 578 - int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, 578 + int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, 579 579 enum page_cache_mode *new_type) 580 580 { 581 581 struct memtype *entry_new; ··· 638 638 639 639 err = memtype_check_insert(entry_new, new_type); 640 640 if (err) { 641 - pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", 641 + pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n", 642 642 start, end - 1, 643 643 cattr_name(entry_new->type), cattr_name(req_type)); 644 644 kfree(entry_new); ··· 649 649 650 650 spin_unlock(&memtype_lock); 651 651 652 - dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", 652 + dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", 653 653 start, end - 1, cattr_name(entry_new->type), cattr_name(req_type), 654 654 new_type ? cattr_name(*new_type) : "-"); 655 655 656 656 return err; 657 657 } 658 658 659 - int free_memtype(u64 start, u64 end) 659 + int memtype_free(u64 start, u64 end) 660 660 { 661 661 int is_range_ram; 662 662 struct memtype *entry_old; ··· 689 689 690 690 kfree(entry_old); 691 691 692 - dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); 692 + dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n", start, end - 1); 693 693 694 694 return 0; 695 695 } ··· 752 752 EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); 753 753 754 754 /** 755 - * io_reserve_memtype - Request a memory type mapping for a region of memory 755 + * memtype_reserve_io - Request a memory type mapping for a region of memory 756 756 * @start: start (physical address) of the region 757 757 * @end: end (physical address) of the region 758 758 * @type: A pointer to memtype, with requested type. On success, requested ··· 761 761 * On success, returns 0 762 762 * On failure, returns non-zero 763 763 */ 764 - int io_reserve_memtype(resource_size_t start, resource_size_t end, 764 + int memtype_reserve_io(resource_size_t start, resource_size_t end, 765 765 enum page_cache_mode *type) 766 766 { 767 767 resource_size_t size = end - start; ··· 771 771 772 772 WARN_ON_ONCE(iomem_map_sanity_check(start, size)); 773 773 774 - ret = reserve_memtype(start, end, req_type, &new_type); 774 + ret = memtype_reserve(start, end, req_type, &new_type); 775 775 if (ret) 776 776 goto out_err; 777 777 778 778 if (!is_new_memtype_allowed(start, size, req_type, new_type)) 779 779 goto out_free; 780 780 781 - if (kernel_map_sync_memtype(start, size, new_type) < 0) 781 + if (memtype_kernel_map_sync(start, size, new_type) < 0) 782 782 goto out_free; 783 783 784 784 *type = new_type; 785 785 return 0; 786 786 787 787 out_free: 788 - free_memtype(start, end); 788 + memtype_free(start, end); 789 789 ret = -EBUSY; 790 790 out_err: 791 791 return ret; 792 792 } 793 793 794 794 /** 795 - * io_free_memtype - Release a memory type mapping for a region of memory 795 + * memtype_free_io - Release a memory type mapping for a region of memory 796 796 * @start: start (physical address) of the region 797 797 * @end: end (physical address) of the region 798 798 */ 799 - void io_free_memtype(resource_size_t start, resource_size_t end) 799 + void memtype_free_io(resource_size_t start, resource_size_t end) 800 800 { 801 - free_memtype(start, end); 801 + memtype_free(start, end); 802 802 } 803 803 804 804 int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) 805 805 { 806 806 enum page_cache_mode type = _PAGE_CACHE_MODE_WC; 807 807 808 - return io_reserve_memtype(start, start + size, &type); 808 + return memtype_reserve_io(start, start + size, &type); 809 809 } 810 810 EXPORT_SYMBOL(arch_io_reserve_memtype_wc); 811 811 812 812 void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) 813 813 { 814 - io_free_memtype(start, start + size); 814 + memtype_free_io(start, start + size); 815 815 } 816 816 EXPORT_SYMBOL(arch_io_free_memtype_wc); 817 817 ··· 871 871 * Change the memory type for the physical address range in kernel identity 872 872 * mapping space if that range is a part of identity map. 873 873 */ 874 - int kernel_map_sync_memtype(u64 base, unsigned long size, 874 + int memtype_kernel_map_sync(u64 base, unsigned long size, 875 875 enum page_cache_mode pcm) 876 876 { 877 877 unsigned long id_sz; ··· 901 901 902 902 /* 903 903 * Internal interface to reserve a range of physical memory with prot. 904 - * Reserved non RAM regions only and after successful reserve_memtype, 904 + * Reserved non RAM regions only and after successful memtype_reserve, 905 905 * this func also keeps identity mapping (if any) in sync with this new prot. 906 906 */ 907 907 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, ··· 938 938 return 0; 939 939 } 940 940 941 - ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); 941 + ret = memtype_reserve(paddr, paddr + size, want_pcm, &pcm); 942 942 if (ret) 943 943 return ret; 944 944 945 945 if (pcm != want_pcm) { 946 946 if (strict_prot || 947 947 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { 948 - free_memtype(paddr, paddr + size); 948 + memtype_free(paddr, paddr + size); 949 949 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", 950 950 current->comm, current->pid, 951 951 cattr_name(want_pcm), ··· 963 963 cachemode2protval(pcm)); 964 964 } 965 965 966 - if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { 967 - free_memtype(paddr, paddr + size); 966 + if (memtype_kernel_map_sync(paddr, size, pcm) < 0) { 967 + memtype_free(paddr, paddr + size); 968 968 return -EINVAL; 969 969 } 970 970 return 0; ··· 980 980 981 981 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 982 982 if (is_ram == 0) 983 - free_memtype(paddr, paddr + size); 983 + memtype_free(paddr, paddr + size); 984 984 } 985 985 986 986 /*
+8 -8
arch/x86/mm/pat/set_memory.c
··· 1801 1801 /* 1802 1802 * for now UC MINUS. see comments in ioremap() 1803 1803 */ 1804 - ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1804 + ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1805 1805 _PAGE_CACHE_MODE_UC_MINUS, NULL); 1806 1806 if (ret) 1807 1807 goto out_err; ··· 1813 1813 return 0; 1814 1814 1815 1815 out_free: 1816 - free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1816 + memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1817 1817 out_err: 1818 1818 return ret; 1819 1819 } ··· 1839 1839 { 1840 1840 int ret; 1841 1841 1842 - ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1842 + ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1843 1843 _PAGE_CACHE_MODE_WC, NULL); 1844 1844 if (ret) 1845 1845 return ret; 1846 1846 1847 1847 ret = _set_memory_wc(addr, numpages); 1848 1848 if (ret) 1849 - free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1849 + memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1850 1850 1851 1851 return ret; 1852 1852 } ··· 1873 1873 if (ret) 1874 1874 return ret; 1875 1875 1876 - free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1876 + memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1877 1877 return 0; 1878 1878 } 1879 1879 EXPORT_SYMBOL(set_memory_wb); ··· 2014 2014 continue; 2015 2015 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2016 2016 end = start + PAGE_SIZE; 2017 - if (reserve_memtype(start, end, new_type, NULL)) 2017 + if (memtype_reserve(start, end, new_type, NULL)) 2018 2018 goto err_out; 2019 2019 } 2020 2020 ··· 2040 2040 continue; 2041 2041 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2042 2042 end = start + PAGE_SIZE; 2043 - free_memtype(start, end); 2043 + memtype_free(start, end); 2044 2044 } 2045 2045 return -EINVAL; 2046 2046 } ··· 2089 2089 continue; 2090 2090 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2091 2091 end = start + PAGE_SIZE; 2092 - free_memtype(start, end); 2092 + memtype_free(start, end); 2093 2093 } 2094 2094 2095 2095 return 0;