Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/gmap: make gmap memcg aware

gmap allocations can be attributed to a process.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Acked-by: Janosch Frank <frankja@linux.ibm.com>
Acked-by: Cornelia Huck <cohuck@redhat.com>

+15 -15
+15 -15
arch/s390/mm/gmap.c
··· 2 2 /* 3 3 * KVM guest address space mapping code 4 4 * 5 - * Copyright IBM Corp. 2007, 2016, 2018 5 + * Copyright IBM Corp. 2007, 2020 6 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 7 * David Hildenbrand <david@redhat.com> 8 8 * Janosch Frank <frankja@linux.vnet.ibm.com> ··· 56 56 atype = _ASCE_TYPE_REGION1; 57 57 etype = _REGION1_ENTRY_EMPTY; 58 58 } 59 - gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); 59 + gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT); 60 60 if (!gmap) 61 61 goto out; 62 62 INIT_LIST_HEAD(&gmap->crst_list); 63 63 INIT_LIST_HEAD(&gmap->children); 64 64 INIT_LIST_HEAD(&gmap->pt_list); 65 - INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL); 66 - INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); 67 - INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC); 65 + INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT); 66 + INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT); 67 + INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT); 68 68 spin_lock_init(&gmap->guest_table_lock); 69 69 spin_lock_init(&gmap->shadow_lock); 70 70 refcount_set(&gmap->ref_count, 1); 71 - page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 71 + page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); 72 72 if (!page) 73 73 goto out_free; 74 74 page->index = 0; ··· 309 309 unsigned long *new; 310 310 311 311 /* since we dont free the gmap table until gmap_free we can unlock */ 312 - page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 312 + page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); 313 313 if (!page) 314 314 return -ENOMEM; 315 315 new = (unsigned long *) page_to_phys(page); ··· 594 594 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) 595 595 return -EFAULT; 596 596 /* Link gmap segment table entry location to page table. */ 597 - rc = radix_tree_preload(GFP_KERNEL); 597 + rc = radix_tree_preload(GFP_KERNEL_ACCOUNT); 598 598 if (rc) 599 599 return rc; 600 600 ptl = pmd_lock(mm, pmd); ··· 1218 1218 vmaddr = __gmap_translate(parent, paddr); 1219 1219 if (IS_ERR_VALUE(vmaddr)) 1220 1220 return vmaddr; 1221 - rmap = kzalloc(sizeof(*rmap), GFP_KERNEL); 1221 + rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT); 1222 1222 if (!rmap) 1223 1223 return -ENOMEM; 1224 1224 rmap->raddr = raddr; 1225 - rc = radix_tree_preload(GFP_KERNEL); 1225 + rc = radix_tree_preload(GFP_KERNEL_ACCOUNT); 1226 1226 if (rc) { 1227 1227 kfree(rmap); 1228 1228 return rc; ··· 1741 1741 1742 1742 BUG_ON(!gmap_is_shadow(sg)); 1743 1743 /* Allocate a shadow region second table */ 1744 - page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 1744 + page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); 1745 1745 if (!page) 1746 1746 return -ENOMEM; 1747 1747 page->index = r2t & _REGION_ENTRY_ORIGIN; ··· 1825 1825 1826 1826 BUG_ON(!gmap_is_shadow(sg)); 1827 1827 /* Allocate a shadow region second table */ 1828 - page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 1828 + page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); 1829 1829 if (!page) 1830 1830 return -ENOMEM; 1831 1831 page->index = r3t & _REGION_ENTRY_ORIGIN; ··· 1909 1909 1910 1910 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE)); 1911 1911 /* Allocate a shadow segment table */ 1912 - page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 1912 + page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); 1913 1913 if (!page) 1914 1914 return -ENOMEM; 1915 1915 page->index = sgt & _REGION_ENTRY_ORIGIN; ··· 2116 2116 parent = sg->parent; 2117 2117 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE; 2118 2118 2119 - rmap = kzalloc(sizeof(*rmap), GFP_KERNEL); 2119 + rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT); 2120 2120 if (!rmap) 2121 2121 return -ENOMEM; 2122 2122 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE; ··· 2128 2128 rc = vmaddr; 2129 2129 break; 2130 2130 } 2131 - rc = radix_tree_preload(GFP_KERNEL); 2131 + rc = radix_tree_preload(GFP_KERNEL_ACCOUNT); 2132 2132 if (rc) 2133 2133 break; 2134 2134 rc = -EAGAIN;