Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: kill vma flag VM_RESERVED and mm->reserved_vm counter

A long time ago, in v2.4, VM_RESERVED kept swapout process off VMA,
currently it lost original meaning but still has some effects:

| effect | alternative flags
-+------------------------+---------------------------------------------
1| account as reserved_vm | VM_IO
2| skip in core dump | VM_IO, VM_DONTDUMP
3| do not merge or expand | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
4| do not mlock | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP

This patch removes reserved_vm counter from mm_struct. Seems like nobody
cares about it, it does not exported into userspace directly, it only
reduces total_vm showed in proc.

Thus VM_RESERVED can be replaced with VM_IO or pair VM_DONTEXPAND | VM_DONTDUMP.

remap_pfn_range() and io_remap_pfn_range() set VM_IO|VM_DONTEXPAND|VM_DONTDUMP.
remap_vmalloc_range() set VM_DONTEXPAND | VM_DONTDUMP.

[akpm@linux-foundation.org: drivers/vfio/pci/vfio_pci.c fixup]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Eric Paris <eparis@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Kentaro Takeda <takedakn@nttdata.co.jp>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Venkatesh Pallipadi <venki@google.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Konstantin Khlebnikov and committed by
Linus Torvalds
314e51b9 0103bd16

+77 -105
+2 -2
Documentation/vm/unevictable-lru.txt
··· 371 371 mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to 372 372 allocate the huge pages and populate the ptes. 373 373 374 - 3) VMAs with VM_DONTEXPAND or VM_RESERVED are generally userspace mappings of 375 - kernel pages, such as the VDSO page, relay channel pages, etc. These pages 374 + 3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages, 375 + such as the VDSO page, relay channel pages, etc. These pages 376 376 are inherently unevictable and are not managed on the LRU lists. 377 377 mlock_fixup() treats these VMAs the same as hugetlbfs VMAs. It calls 378 378 make_pages_present() to populate the ptes.
+1 -1
arch/alpha/kernel/pci-sysfs.c
··· 26 26 base = sparse ? hose->sparse_io_base : hose->dense_io_base; 27 27 28 28 vma->vm_pgoff += base >> PAGE_SHIFT; 29 - vma->vm_flags |= (VM_IO | VM_RESERVED); 29 + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 30 30 31 31 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 32 32 vma->vm_end - vma->vm_start,
+1 -1
arch/ia64/kernel/perfmon.c
··· 2307 2307 */ 2308 2308 vma->vm_mm = mm; 2309 2309 vma->vm_file = get_file(filp); 2310 - vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; 2310 + vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; 2311 2311 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ 2312 2312 2313 2313 /*
+2 -1
arch/ia64/mm/init.c
··· 138 138 vma->vm_mm = current->mm; 139 139 vma->vm_end = PAGE_SIZE; 140 140 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 141 - vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; 141 + vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | 142 + VM_DONTEXPAND | VM_DONTDUMP; 142 143 down_write(&current->mm->mmap_sem); 143 144 if (insert_vm_struct(current->mm, vma)) { 144 145 up_write(&current->mm->mmap_sem);
+1 -1
arch/powerpc/kvm/book3s_hv.c
··· 1183 1183 1184 1184 static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma) 1185 1185 { 1186 - vma->vm_flags |= VM_RESERVED; 1186 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1187 1187 vma->vm_ops = &kvm_rma_vm_ops; 1188 1188 return 0; 1189 1189 }
+1 -1
arch/sparc/kernel/pci.c
··· 779 779 static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma, 780 780 enum pci_mmap_state mmap_state) 781 781 { 782 - vma->vm_flags |= (VM_IO | VM_RESERVED); 782 + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 783 783 } 784 784 785 785 /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
+1 -1
arch/unicore32/kernel/process.c
··· 380 380 return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, 381 381 VM_READ | VM_EXEC | 382 382 VM_MAYREAD | VM_MAYEXEC | 383 - VM_RESERVED, 383 + VM_DONTEXPAND | VM_DONTDUMP, 384 384 NULL); 385 385 } 386 386
+1 -2
arch/x86/xen/mmu.c
··· 2451 2451 2452 2452 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); 2453 2453 2454 - BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == 2455 - (VM_PFNMAP | VM_RESERVED | VM_IO))); 2454 + BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 2456 2455 2457 2456 rmd.mfn = mfn; 2458 2457 rmd.prot = prot;
+1 -1
drivers/char/mbcs.c
··· 507 507 508 508 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 509 509 510 - /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ 510 + /* Remap-pfn-range will mark the range VM_IO */ 511 511 if (remap_pfn_range(vma, 512 512 vma->vm_start, 513 513 __pa(soft->gscr_addr) >> PAGE_SHIFT,
+1 -1
drivers/char/mem.c
··· 322 322 323 323 vma->vm_ops = &mmap_mem_ops; 324 324 325 - /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ 325 + /* Remap-pfn-range will mark the range VM_IO */ 326 326 if (remap_pfn_range(vma, 327 327 vma->vm_start, 328 328 vma->vm_pgoff,
+1 -1
drivers/char/mspec.c
··· 286 286 atomic_set(&vdata->refcnt, 1); 287 287 vma->vm_private_data = vdata; 288 288 289 - vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND); 289 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 290 290 if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED) 291 291 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 292 292 vma->vm_ops = &mspec_vm_ops;
+1 -1
drivers/gpu/drm/drm_gem.c
··· 706 706 goto out_unlock; 707 707 } 708 708 709 - vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 709 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 710 710 vma->vm_ops = obj->dev->driver->gem_vm_ops; 711 711 vma->vm_private_data = map->handle; 712 712 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+2 -8
drivers/gpu/drm/drm_vm.c
··· 514 514 515 515 vma->vm_ops = &drm_vm_dma_ops; 516 516 517 - vma->vm_flags |= VM_RESERVED; /* Don't swap */ 518 - vma->vm_flags |= VM_DONTEXPAND; 517 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 519 518 520 519 drm_vm_open_locked(dev, vma); 521 520 return 0; ··· 642 643 case _DRM_SHM: 643 644 vma->vm_ops = &drm_vm_shm_ops; 644 645 vma->vm_private_data = (void *)map; 645 - /* Don't let this area swap. Change when 646 - DRM_KERNEL advisory is supported. */ 647 - vma->vm_flags |= VM_RESERVED; 648 646 break; 649 647 case _DRM_SCATTER_GATHER: 650 648 vma->vm_ops = &drm_vm_sg_ops; 651 649 vma->vm_private_data = (void *)map; 652 - vma->vm_flags |= VM_RESERVED; 653 650 vma->vm_page_prot = drm_dma_prot(map->type, vma); 654 651 break; 655 652 default: 656 653 return -EINVAL; /* This should never happen. */ 657 654 } 658 - vma->vm_flags |= VM_RESERVED; /* Don't swap */ 659 - vma->vm_flags |= VM_DONTEXPAND; 655 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 660 656 661 657 drm_vm_open_locked(dev, vma); 662 658 return 0;
+1 -1
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 500 500 501 501 DRM_DEBUG_KMS("%s\n", __FILE__); 502 502 503 - vma->vm_flags |= (VM_IO | VM_RESERVED); 503 + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 504 504 505 505 update_vm_cache_attr(exynos_gem_obj, vma); 506 506
+1 -2
drivers/gpu/drm/gma500/framebuffer.c
··· 178 178 */ 179 179 vma->vm_ops = &psbfb_vm_ops; 180 180 vma->vm_private_data = (void *)psbfb; 181 - vma->vm_flags |= VM_RESERVED | VM_IO | 182 - VM_MIXEDMAP | VM_DONTEXPAND; 181 + vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; 183 182 return 0; 184 183 } 185 184
+2 -2
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 285 285 */ 286 286 287 287 vma->vm_private_data = bo; 288 - vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; 288 + vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; 289 289 return 0; 290 290 out_unref: 291 291 ttm_bo_unref(&bo); ··· 300 300 301 301 vma->vm_ops = &ttm_bo_vm_ops; 302 302 vma->vm_private_data = ttm_bo_reference(bo); 303 - vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; 303 + vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; 304 304 return 0; 305 305 } 306 306 EXPORT_SYMBOL(ttm_fbdev_mmap);
+1 -1
drivers/gpu/drm/udl/udl_fb.c
··· 243 243 size = 0; 244 244 } 245 245 246 - vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ 246 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 247 247 return 0; 248 248 } 249 249
+2 -2
drivers/infiniband/hw/ehca/ehca_uverbs.c
··· 117 117 physical = galpas->user.fw_handle; 118 118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 119 119 ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical); 120 - /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ 120 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 121 121 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, 122 122 vma->vm_page_prot); 123 123 if (unlikely(ret)) { ··· 139 139 u64 start, ofs; 140 140 struct page *page; 141 141 142 - vma->vm_flags |= VM_RESERVED; 142 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 143 143 start = vma->vm_start; 144 144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) { 145 145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
+1 -1
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 1225 1225 1226 1226 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 1227 1227 vma->vm_ops = &ipath_file_vm_ops; 1228 - vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 1228 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1229 1229 ret = 1; 1230 1230 1231 1231 bail:
+1 -1
drivers/infiniband/hw/qib/qib_file_ops.c
··· 971 971 972 972 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 973 973 vma->vm_ops = &qib_file_vm_ops; 974 - vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 974 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 975 975 ret = 1; 976 976 977 977 bail:
+1 -1
drivers/media/pci/meye/meye.c
··· 1647 1647 1648 1648 vma->vm_ops = &meye_vm_ops; 1649 1649 vma->vm_flags &= ~VM_IO; /* not I/O memory */ 1650 - vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ 1650 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1651 1651 vma->vm_private_data = (void *) (offset / gbufsize); 1652 1652 meye_vm_open(vma); 1653 1653
+1 -1
drivers/media/platform/omap/omap_vout.c
··· 911 911 912 912 q->bufs[i]->baddr = vma->vm_start; 913 913 914 - vma->vm_flags |= VM_RESERVED; 914 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 915 915 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 916 916 vma->vm_ops = &omap_vout_vm_ops; 917 917 vma->vm_private_data = (void *) vout;
+1 -1
drivers/media/platform/vino.c
··· 3950 3950 3951 3951 fb->map_count = 1; 3952 3952 3953 - vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; 3953 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3954 3954 vma->vm_flags &= ~VM_IO; 3955 3955 vma->vm_private_data = fb; 3956 3956 vma->vm_file = file;
+1 -2
drivers/media/usb/sn9c102/sn9c102_core.c
··· 2126 2126 return -EINVAL; 2127 2127 } 2128 2128 2129 - vma->vm_flags |= VM_IO; 2130 - vma->vm_flags |= VM_RESERVED; 2129 + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 2131 2130 2132 2131 pos = cam->frame[i].bufmem; 2133 2132 while (size > 0) { /* size is page-aligned */
+1 -2
drivers/media/usb/usbvision/usbvision-video.c
··· 1108 1108 } 1109 1109 1110 1110 /* VM_IO is eventually going to replace PageReserved altogether */ 1111 - vma->vm_flags |= VM_IO; 1112 - vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ 1111 + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 1113 1112 1114 1113 pos = usbvision->frame[i].data; 1115 1114 while (size > 0) {
+1 -1
drivers/media/v4l2-core/videobuf-dma-sg.c
··· 582 582 map->count = 1; 583 583 map->q = q; 584 584 vma->vm_ops = &videobuf_vm_ops; 585 - vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; 585 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 586 586 vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */ 587 587 vma->vm_private_data = map; 588 588 dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
+1 -1
drivers/media/v4l2-core/videobuf-vmalloc.c
··· 270 270 } 271 271 272 272 vma->vm_ops = &videobuf_vm_ops; 273 - vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; 273 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 274 274 vma->vm_private_data = map; 275 275 276 276 dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+1 -1
drivers/media/v4l2-core/videobuf2-memops.c
··· 163 163 return ret; 164 164 } 165 165 166 - vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; 166 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 167 167 vma->vm_private_data = priv; 168 168 vma->vm_ops = vm_ops; 169 169
-2
drivers/misc/carma/carma-fpga.c
··· 1243 1243 return -EINVAL; 1244 1244 } 1245 1245 1246 - /* IO memory (stop cacheing) */ 1247 - vma->vm_flags |= VM_IO | VM_RESERVED; 1248 1246 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1249 1247 1250 1248 return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
+2 -3
drivers/misc/sgi-gru/grufile.c
··· 108 108 vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) 109 109 return -EINVAL; 110 110 111 - vma->vm_flags |= 112 - (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP | 113 - VM_RESERVED); 111 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED | 112 + VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 114 113 vma->vm_page_prot = PAGE_SHARED; 115 114 vma->vm_ops = &gru_vm_ops; 116 115
+1 -1
drivers/mtd/mtdchar.c
··· 1182 1182 return -EINVAL; 1183 1183 if (set_vm_offset(vma, off) < 0) 1184 1184 return -EINVAL; 1185 - vma->vm_flags |= VM_IO | VM_RESERVED; 1185 + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 1186 1186 1187 1187 #ifdef pgprot_noncached 1188 1188 if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
+1 -1
drivers/scsi/sg.c
··· 1257 1257 } 1258 1258 1259 1259 sfp->mmap_called = 1; 1260 - vma->vm_flags |= VM_RESERVED; 1260 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1261 1261 vma->vm_private_data = sfp; 1262 1262 vma->vm_ops = &sg_mmap_vm_ops; 1263 1263 return 0;
+1 -1
drivers/staging/omapdrm/omap_gem_dmabuf.c
··· 160 160 goto out_unlock; 161 161 } 162 162 163 - vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 163 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 164 164 vma->vm_ops = obj->dev->driver->gem_vm_ops; 165 165 vma->vm_private_data = obj; 166 166 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+1 -1
drivers/staging/tidspbridge/rmgr/drv_interface.c
··· 261 261 { 262 262 u32 status; 263 263 264 - vma->vm_flags |= VM_RESERVED | VM_IO; 264 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 265 265 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 266 266 267 267 dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
+1 -3
drivers/uio/uio.c
··· 653 653 if (mi < 0) 654 654 return -EINVAL; 655 655 656 - vma->vm_flags |= VM_IO | VM_RESERVED; 657 - 658 656 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 659 657 660 658 return remap_pfn_range(vma, ··· 664 666 665 667 static int uio_mmap_logical(struct vm_area_struct *vma) 666 668 { 667 - vma->vm_flags |= VM_RESERVED; 669 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 668 670 vma->vm_ops = &uio_vm_ops; 669 671 uio_vma_open(vma); 670 672 return 0;
+1 -1
drivers/usb/mon/mon_bin.c
··· 1247 1247 { 1248 1248 /* don't do anything here: "fault" will set up page table entries */ 1249 1249 vma->vm_ops = &mon_bin_vm_ops; 1250 - vma->vm_flags |= VM_RESERVED; 1250 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1251 1251 vma->vm_private_data = filp->private_data; 1252 1252 mon_bin_vma_open(vma); 1253 1253 return 0;
+1 -1
drivers/video/68328fb.c
··· 400 400 #ifndef MMU 401 401 /* this is uClinux (no MMU) specific code */ 402 402 403 - vma->vm_flags |= VM_RESERVED; 403 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 404 404 vma->vm_start = videomemory; 405 405 406 406 return 0;
+1 -2
drivers/video/aty/atyfb_base.c
··· 1942 1942 off = vma->vm_pgoff << PAGE_SHIFT; 1943 1943 size = vma->vm_end - vma->vm_start; 1944 1944 1945 - /* To stop the swapper from even considering these pages. */ 1946 - vma->vm_flags |= (VM_IO | VM_RESERVED); 1945 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 1947 1946 1948 1947 if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) || 1949 1948 ((off == info->fix.smem_len) && (size == PAGE_SIZE)))
+1 -2
drivers/video/fb-puv3.c
··· 653 653 vma->vm_page_prot)) 654 654 return -EAGAIN; 655 655 656 - vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ 656 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 657 657 return 0; 658 - 659 658 } 660 659 661 660 static struct fb_ops unifb_ops = {
+1 -1
drivers/video/fb_defio.c
··· 166 166 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) 167 167 { 168 168 vma->vm_ops = &fb_deferred_io_vm_ops; 169 - vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND ); 169 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 170 170 if (!(info->flags & FBINFO_VIRTFB)) 171 171 vma->vm_flags |= VM_IO; 172 172 vma->vm_private_data = info;
+1 -2
drivers/video/fbmem.c
··· 1410 1410 return -EINVAL; 1411 1411 off += start; 1412 1412 vma->vm_pgoff = off >> PAGE_SHIFT; 1413 - /* This is an IO map - tell maydump to skip this VMA */ 1414 - vma->vm_flags |= VM_IO | VM_RESERVED; 1413 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/ 1415 1414 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 1416 1415 fb_pgprotect(file, vma, off); 1417 1416 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+1 -1
drivers/video/gbefb.c
··· 1024 1024 pgprot_val(vma->vm_page_prot) = 1025 1025 pgprot_fb(pgprot_val(vma->vm_page_prot)); 1026 1026 1027 - vma->vm_flags |= VM_IO | VM_RESERVED; 1027 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 1028 1028 1029 1029 /* look for the starting tile */ 1030 1030 tile = &gbe_tiles.cpu[offset >> TILE_SHIFT];
+1 -1
drivers/video/omap2/omapfb/omapfb-main.c
··· 1128 1128 DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off); 1129 1129 1130 1130 vma->vm_pgoff = off >> PAGE_SHIFT; 1131 - vma->vm_flags |= VM_IO | VM_RESERVED; 1131 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 1132 1132 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1133 1133 vma->vm_ops = &mmap_user_ops; 1134 1134 vma->vm_private_data = rg;
+2 -3
drivers/video/sbuslib.c
··· 57 57 58 58 off = vma->vm_pgoff << PAGE_SHIFT; 59 59 60 - /* To stop the swapper from even considering these pages */ 61 - vma->vm_flags |= (VM_IO | VM_RESERVED); 62 - 60 + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 61 + 63 62 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 64 63 65 64 /* Each page, see which map applies */
-1
drivers/video/smscufx.c
··· 803 803 size = 0; 804 804 } 805 805 806 - vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ 807 806 return 0; 808 807 } 809 808
-1
drivers/video/udlfb.c
··· 345 345 size = 0; 346 346 } 347 347 348 - vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ 349 348 return 0; 350 349 } 351 350
-1
drivers/video/vermilion/vermilion.c
··· 1018 1018 offset += vinfo->vram_start; 1019 1019 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; 1020 1020 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; 1021 - vma->vm_flags |= VM_RESERVED | VM_IO; 1022 1021 if (remap_pfn_range(vma, vma->vm_start, offset >> PAGE_SHIFT, 1023 1022 size, vma->vm_page_prot)) 1024 1023 return -EAGAIN;
-1
drivers/video/vfb.c
··· 439 439 size = 0; 440 440 } 441 441 442 - vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ 443 442 return 0; 444 443 445 444 }
+1 -1
drivers/xen/gntalloc.c
··· 535 535 536 536 vma->vm_private_data = vm_priv; 537 537 538 - vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 538 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 539 539 540 540 vma->vm_ops = &gntalloc_vmops; 541 541
+1 -1
drivers/xen/gntdev.c
··· 720 720 721 721 vma->vm_ops = &gntdev_vmops; 722 722 723 - vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; 723 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 724 724 725 725 if (use_ptemod) 726 726 vma->vm_flags |= VM_DONTCOPY;
+2 -1
drivers/xen/privcmd.c
··· 455 455 { 456 456 /* DONTCOPY is essential for Xen because copy_page_range doesn't know 457 457 * how to recreate these mappings */ 458 - vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP; 458 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY | 459 + VM_DONTEXPAND | VM_DONTDUMP; 459 460 vma->vm_ops = &privcmd_vm_ops; 460 461 vma->vm_private_data = NULL; 461 462
+1 -1
fs/binfmt_elf.c
··· 1135 1135 } 1136 1136 1137 1137 /* Do not dump I/O mapped devices or special mappings */ 1138 - if (vma->vm_flags & (VM_IO | VM_RESERVED)) 1138 + if (vma->vm_flags & VM_IO) 1139 1139 return 0; 1140 1140 1141 1141 /* By default, dump shared memory if mapped from an anonymous file. */
+1 -1
fs/binfmt_elf_fdpic.c
··· 1205 1205 int dump_ok; 1206 1206 1207 1207 /* Do not dump I/O mapped devices or special mappings */ 1208 - if (vma->vm_flags & (VM_IO | VM_RESERVED)) { 1208 + if (vma->vm_flags & VM_IO) { 1209 1209 kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags); 1210 1210 return 0; 1211 1211 }
+1 -1
fs/hugetlbfs/inode.c
··· 110 110 * way when do_mmap_pgoff unwinds (may be important on powerpc 111 111 * and ia64). 112 112 */ 113 - vma->vm_flags |= VM_HUGETLB | VM_RESERVED; 113 + vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP; 114 114 vma->vm_ops = &hugetlb_vm_ops; 115 115 116 116 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
+1 -1
fs/proc/task_mmu.c
··· 54 54 "VmPTE:\t%8lu kB\n" 55 55 "VmSwap:\t%8lu kB\n", 56 56 hiwater_vm << (PAGE_SHIFT-10), 57 - (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), 57 + total_vm << (PAGE_SHIFT-10), 58 58 mm->locked_vm << (PAGE_SHIFT-10), 59 59 mm->pinned_vm << (PAGE_SHIFT-10), 60 60 hiwater_rss << (PAGE_SHIFT-10),
+1 -1
include/linux/mempolicy.h
··· 239 239 /* Check if a vma is migratable */ 240 240 static inline int vma_migratable(struct vm_area_struct *vma) 241 241 { 242 - if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) 242 + if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP)) 243 243 return 0; 244 244 /* 245 245 * Migration allocates pages in the highest zone. If we cannot
+1 -2
include/linux/mm.h
··· 96 96 97 97 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 98 98 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 99 - #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ 100 99 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 101 100 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 102 101 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ ··· 147 148 * Special vmas that are non-mergable, non-mlock()able. 148 149 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 149 150 */ 150 - #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) 151 + #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) 151 152 152 153 /* 153 154 * mapping from the currently active vm_flags protection bits (the
-1
include/linux/mm_types.h
··· 349 349 unsigned long shared_vm; /* Shared pages (files) */ 350 350 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ 351 351 unsigned long stack_vm; /* VM_GROWSUP/DOWN */ 352 - unsigned long reserved_vm; /* VM_RESERVED|VM_IO pages */ 353 352 unsigned long def_flags; 354 353 unsigned long nr_ptes; /* Page table pages */ 355 354 unsigned long start_code, end_code, start_data, end_data;
+1 -1
kernel/events/core.c
··· 3671 3671 atomic_inc(&event->mmap_count); 3672 3672 mutex_unlock(&event->mmap_mutex); 3673 3673 3674 - vma->vm_flags |= VM_RESERVED; 3674 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3675 3675 vma->vm_ops = &perf_mmap_vmops; 3676 3676 3677 3677 return ret;
+1 -2
mm/ksm.c
··· 1469 1469 */ 1470 1470 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1471 1471 VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1472 - VM_RESERVED | VM_HUGETLB | 1473 - VM_NONLINEAR | VM_MIXEDMAP)) 1472 + VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP)) 1474 1473 return 0; /* just ignore the advice */ 1475 1474 1476 1475 #ifdef VM_SAO
+5 -6
mm/memory.c
··· 2297 2297 * rest of the world about it: 2298 2298 * VM_IO tells people not to look at these pages 2299 2299 * (accesses can have side effects). 2300 - * VM_RESERVED is specified all over the place, because 2301 - * in 2.4 it kept swapout's vma scan off this vma; but 2302 - * in 2.6 the LRU scan won't even find its pages, so this 2303 - * flag means no more than count its pages in reserved_vm, 2304 - * and omit it from core dump, even when VM_IO turned off. 2305 2300 * VM_PFNMAP tells the core MM that the base pages are just 2306 2301 * raw PFN mappings, and do not have a "struct page" associated 2307 2302 * with them. 2303 + * VM_DONTEXPAND 2304 + * Disable vma merging and expanding with mremap(). 2305 + * VM_DONTDUMP 2306 + * Omit vma from core dump, even when VM_IO turned off. 2308 2307 * 2309 2308 * There's a horrible special case to handle copy-on-write 2310 2309 * behaviour that some programs depend on. We mark the "original" ··· 2320 2321 if (err) 2321 2322 return -EINVAL; 2322 2323 2323 - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 2324 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 2324 2325 2325 2326 BUG_ON(addr >= end); 2326 2327 pfn -= addr >> PAGE_SHIFT;
+1 -1
mm/mlock.c
··· 227 227 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 228 228 goto no_mlock; 229 229 230 - if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 230 + if (!((vma->vm_flags & VM_DONTEXPAND) || 231 231 is_vm_hugetlb_page(vma) || 232 232 vma == get_gate_vma(current->mm))) { 233 233
-2
mm/mmap.c
··· 945 945 mm->exec_vm += pages; 946 946 } else if (flags & stack_flags) 947 947 mm->stack_vm += pages; 948 - if (flags & (VM_RESERVED|VM_IO)) 949 - mm->reserved_vm += pages; 950 948 } 951 949 #endif /* CONFIG_PROC_FS */ 952 950
+1 -1
mm/nommu.c
··· 1811 1811 if (addr != (pfn << PAGE_SHIFT)) 1812 1812 return -EINVAL; 1813 1813 1814 - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1814 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1815 1815 return 0; 1816 1816 } 1817 1817 EXPORT_SYMBOL(remap_pfn_range);
+1 -2
mm/vmalloc.c
··· 2163 2163 usize -= PAGE_SIZE; 2164 2164 } while (usize > 0); 2165 2165 2166 - /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 2167 - vma->vm_flags |= VM_RESERVED; 2166 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2168 2167 2169 2168 return 0; 2170 2169 }
+1 -1
security/selinux/selinuxfs.c
··· 485 485 return -EACCES; 486 486 } 487 487 488 - vma->vm_flags |= VM_RESERVED; 488 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 489 489 vma->vm_ops = &sel_mmap_policy_ops; 490 490 491 491 return 0;
+3 -3
sound/core/pcm_native.c
··· 3039 3039 return -EINVAL; 3040 3040 area->vm_ops = &snd_pcm_vm_ops_status; 3041 3041 area->vm_private_data = substream; 3042 - area->vm_flags |= VM_RESERVED; 3042 + area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3043 3043 return 0; 3044 3044 } 3045 3045 ··· 3076 3076 return -EINVAL; 3077 3077 area->vm_ops = &snd_pcm_vm_ops_control; 3078 3078 area->vm_private_data = substream; 3079 - area->vm_flags |= VM_RESERVED; 3079 + area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3080 3080 return 0; 3081 3081 } 3082 3082 #else /* ! coherent mmap */ ··· 3170 3170 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, 3171 3171 struct vm_area_struct *area) 3172 3172 { 3173 - area->vm_flags |= VM_RESERVED; 3173 + area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3174 3174 #ifdef ARCH_HAS_DMA_MMAP_COHERENT 3175 3175 if (!substream->ops->page && 3176 3176 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
+1 -1
sound/usb/usx2y/us122l.c
··· 262 262 } 263 263 264 264 area->vm_ops = &usb_stream_hwdep_vm_ops; 265 - area->vm_flags |= VM_RESERVED; 265 + area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 266 266 area->vm_private_data = us122l; 267 267 atomic_inc(&us122l->mmap_count); 268 268 out:
+1 -1
sound/usb/usx2y/usX2Yhwdep.c
··· 82 82 us428->us428ctls_sharedmem->CtlSnapShotLast = -2; 83 83 } 84 84 area->vm_ops = &us428ctls_vm_ops; 85 - area->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 85 + area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 86 86 area->vm_private_data = hw->private_data; 87 87 return 0; 88 88 }
+1 -1
sound/usb/usx2y/usx2yhwdeppcm.c
··· 723 723 return -ENODEV; 724 724 } 725 725 area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops; 726 - area->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 726 + area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 727 727 area->vm_private_data = hw->private_data; 728 728 return 0; 729 729 }