Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: introduce f_op->mmap_capabilities for nommu mmap support

Since "BDI: Provide backing device capability information [try #3]" the
backing_dev_info structure also provides flags for the kind of mmap
operation available in a nommu environment, which is entirely unrelated
to it's original purpose.

Introduce a new nommu-only file operation to provide this information to
the nommu mmap code instead. Splitting this from the backing_dev_info
structure allows to remove lots of backing_dev_info instance that aren't
otherwise needed, and entirely gets rid of the concept of providing a
backing_dev_info for a character device. It also removes the need for
the mtd_inodefs filesystem.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Tejun Heo <tj@kernel.org>
Acked-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Christoph Hellwig and committed by
Jens Axboe
b4caecd4 97b713ba

+169 -346
+4 -4
Documentation/nommu-mmap.txt
··· 43 43 even if this was created by another process. 44 44 45 45 - If possible, the file mapping will be directly on the backing device 46 - if the backing device has the BDI_CAP_MAP_DIRECT capability and 46 + if the backing device has the NOMMU_MAP_DIRECT capability and 47 47 appropriate mapping protection capabilities. Ramfs, romfs, cramfs 48 48 and mtd might all permit this. 49 49 50 50 - If the backing device device can't or won't permit direct sharing, 51 - but does have the BDI_CAP_MAP_COPY capability, then a copy of the 51 + but does have the NOMMU_MAP_COPY capability, then a copy of the 52 52 appropriate bit of the file will be read into a contiguous bit of 53 53 memory and any extraneous space beyond the EOF will be cleared 54 54 ··· 220 220 221 221 The file->f_op->mmap() operation will be called to actually inaugurate the 222 222 mapping. It can be rejected at that point. Returning the ENOSYS error will 223 - cause the mapping to be copied instead if BDI_CAP_MAP_COPY is specified. 223 + cause the mapping to be copied instead if NOMMU_MAP_COPY is specified. 224 224 225 225 The vm_ops->close() routine will be invoked when the last mapping on a chardev 226 226 is removed. An existing mapping will be shared, partially or not, if possible ··· 232 232 might try directing the call to a secondary driver which turns out not to 233 233 implement it. Such is the case for the framebuffer driver which attempts to 234 234 direct the call to the device-specific driver. Under such circumstances, the 235 - mapping request will be rejected if BDI_CAP_MAP_COPY is not specified, and a 235 + mapping request will be rejected if NOMMU_MAP_COPY is not specified, and a 236 236 copy mapped otherwise. 237 237 238 238 IMPORTANT NOTE:
+1 -1
block/blk-core.c
··· 607 607 q->backing_dev_info.ra_pages = 608 608 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 609 609 q->backing_dev_info.state = 0; 610 - q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 610 + q->backing_dev_info.capabilities = 0; 611 611 q->backing_dev_info.name = "block"; 612 612 q->node = node_id; 613 613
+32 -32
drivers/char/mem.c
··· 287 287 return pgoff << PAGE_SHIFT; 288 288 } 289 289 290 + /* permit direct mmap, for read, write or exec */ 291 + static unsigned memory_mmap_capabilities(struct file *file) 292 + { 293 + return NOMMU_MAP_DIRECT | 294 + NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; 295 + } 296 + 297 + static unsigned zero_mmap_capabilities(struct file *file) 298 + { 299 + return NOMMU_MAP_COPY; 300 + } 301 + 290 302 /* can't do an in-place private mapping if there's no MMU */ 291 303 static inline int private_mapping_ok(struct vm_area_struct *vma) 292 304 { 293 305 return vma->vm_flags & VM_MAYSHARE; 294 306 } 295 307 #else 296 - #define get_unmapped_area_mem NULL 297 308 298 309 static inline int private_mapping_ok(struct vm_area_struct *vma) 299 310 { ··· 732 721 .write = write_mem, 733 722 .mmap = mmap_mem, 734 723 .open = open_mem, 724 + #ifndef CONFIG_MMU 735 725 .get_unmapped_area = get_unmapped_area_mem, 726 + .mmap_capabilities = memory_mmap_capabilities, 727 + #endif 736 728 }; 737 729 738 730 #ifdef CONFIG_DEVKMEM ··· 745 731 .write = write_kmem, 746 732 .mmap = mmap_kmem, 747 733 .open = open_kmem, 734 + #ifndef CONFIG_MMU 748 735 .get_unmapped_area = get_unmapped_area_mem, 736 + .mmap_capabilities = memory_mmap_capabilities, 737 + #endif 749 738 }; 750 739 #endif 751 740 ··· 777 760 .read_iter = read_iter_zero, 778 761 .aio_write = aio_write_zero, 779 762 .mmap = mmap_zero, 780 - }; 781 - 782 - /* 783 - * capabilities for /dev/zero 784 - * - permits private mappings, "copies" are taken of the source of zeros 785 - * - no writeback happens 786 - */ 787 - static struct backing_dev_info zero_bdi = { 788 - .name = "char/mem", 789 - .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK, 763 + #ifndef CONFIG_MMU 764 + .mmap_capabilities = zero_mmap_capabilities, 765 + #endif 790 766 }; 791 767 792 768 static const struct file_operations full_fops = { ··· 793 783 const char *name; 794 784 umode_t mode; 795 785 const struct file_operations *fops; 796 - struct backing_dev_info *dev_info; 786 + fmode_t fmode; 797 787 } devlist[] = { 798 - [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, 788 + [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, 799 789 #ifdef CONFIG_DEVKMEM 800 - [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, 790 + [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, 801 791 #endif 802 - [3] = { "null", 0666, &null_fops, NULL }, 792 + [3] = { "null", 0666, &null_fops, 0 }, 803 793 #ifdef CONFIG_DEVPORT 804 - [4] = { "port", 0, &port_fops, NULL }, 794 + [4] = { "port", 0, &port_fops, 0 }, 805 795 #endif 806 - [5] = { "zero", 0666, &zero_fops, &zero_bdi }, 807 - [7] = { "full", 0666, &full_fops, NULL }, 808 - [8] = { "random", 0666, &random_fops, NULL }, 809 - [9] = { "urandom", 0666, &urandom_fops, NULL }, 796 + [5] = { "zero", 0666, &zero_fops, 0 }, 797 + [7] = { "full", 0666, &full_fops, 0 }, 798 + [8] = { "random", 0666, &random_fops, 0 }, 799 + [9] = { "urandom", 0666, &urandom_fops, 0 }, 810 800 #ifdef CONFIG_PRINTK 811 - [11] = { "kmsg", 0644, &kmsg_fops, NULL }, 801 + [11] = { "kmsg", 0644, &kmsg_fops, 0 }, 812 802 #endif 813 803 }; 814 804 ··· 826 816 return -ENXIO; 827 817 828 818 filp->f_op = dev->fops; 829 - if (dev->dev_info) 830 - filp->f_mapping->backing_dev_info = dev->dev_info; 831 - 832 - /* Is /dev/mem or /dev/kmem ? */ 833 - if (dev->dev_info == &directly_mappable_cdev_bdi) 834 - filp->f_mode |= FMODE_UNSIGNED_OFFSET; 819 + filp->f_mode |= dev->fmode; 835 820 836 821 if (dev->fops->open) 837 822 return dev->fops->open(inode, filp); ··· 851 846 static int __init chr_dev_init(void) 852 847 { 853 848 int minor; 854 - int err; 855 - 856 - err = bdi_init(&zero_bdi); 857 - if (err) 858 - return err; 859 849 860 850 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) 861 851 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+10 -62
drivers/mtd/mtdchar.c
··· 49 49 */ 50 50 struct mtd_file_info { 51 51 struct mtd_info *mtd; 52 - struct inode *ino; 53 52 enum mtd_file_modes mode; 54 53 }; 55 54 ··· 58 59 return fixed_size_llseek(file, offset, orig, mfi->mtd->size); 59 60 } 60 61 61 - static int count; 62 - static struct vfsmount *mnt; 63 - static struct file_system_type mtd_inodefs_type; 64 - 65 62 static int mtdchar_open(struct inode *inode, struct file *file) 66 63 { 67 64 int minor = iminor(inode); ··· 65 70 int ret = 0; 66 71 struct mtd_info *mtd; 67 72 struct mtd_file_info *mfi; 68 - struct inode *mtd_ino; 69 73 70 74 pr_debug("MTD_open\n"); 71 75 72 76 /* You can't open the RO devices RW */ 73 77 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 74 78 return -EACCES; 75 - 76 - ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count); 77 - if (ret) 78 - return ret; 79 79 80 80 mutex_lock(&mtd_mutex); 81 81 mtd = get_mtd_device(NULL, devnum); ··· 85 95 goto out1; 86 96 } 87 97 88 - mtd_ino = iget_locked(mnt->mnt_sb, devnum); 89 - if (!mtd_ino) { 90 - ret = -ENOMEM; 91 - goto out1; 92 - } 93 - if (mtd_ino->i_state & I_NEW) { 94 - mtd_ino->i_private = mtd; 95 - mtd_ino->i_mode = S_IFCHR; 96 - mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; 97 - unlock_new_inode(mtd_ino); 98 - } 99 - file->f_mapping = mtd_ino->i_mapping; 100 - 101 98 /* You can't open it RW if it's not a writeable device */ 102 99 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 103 100 ret = -EACCES; 104 - goto out2; 101 + goto out1; 105 102 } 106 103 107 104 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 108 105 if (!mfi) { 109 106 ret = -ENOMEM; 110 - goto out2; 107 + goto out1; 111 108 } 112 - mfi->ino = mtd_ino; 113 109 mfi->mtd = mtd; 114 110 file->private_data = mfi; 115 111 mutex_unlock(&mtd_mutex); 116 112 return 0; 117 113 118 - out2: 119 - iput(mtd_ino); 120 114 out1: 121 115 put_mtd_device(mtd); 122 116 out: 123 117 mutex_unlock(&mtd_mutex); 124 - simple_release_fs(&mnt, &count); 125 118 return ret; 126 119 } /* mtdchar_open */ 127 120 ··· 121 148 if ((file->f_mode & FMODE_WRITE)) 122 149 mtd_sync(mtd); 123 150 124 - iput(mfi->ino); 125 - 126 151 put_mtd_device(mtd); 127 152 file->private_data = NULL; 128 153 kfree(mfi); 129 - simple_release_fs(&mnt, &count); 130 154 131 155 return 0; 132 156 } /* mtdchar_close */ ··· 1087 1117 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1088 1118 return ret == -EOPNOTSUPP ? -ENODEV : ret; 1089 1119 } 1120 + 1121 + static unsigned mtdchar_mmap_capabilities(struct file *file) 1122 + { 1123 + struct mtd_file_info *mfi = file->private_data; 1124 + 1125 + return mtd_mmap_capabilities(mfi->mtd); 1126 + } 1090 1127 #endif 1091 1128 1092 1129 /* ··· 1137 1160 .mmap = mtdchar_mmap, 1138 1161 #ifndef CONFIG_MMU 1139 1162 .get_unmapped_area = mtdchar_get_unmapped_area, 1163 + .mmap_capabilities = mtdchar_mmap_capabilities, 1140 1164 #endif 1141 1165 }; 1142 - 1143 - static const struct super_operations mtd_ops = { 1144 - .drop_inode = generic_delete_inode, 1145 - .statfs = simple_statfs, 1146 - }; 1147 - 1148 - static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, 1149 - int flags, const char *dev_name, void *data) 1150 - { 1151 - return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC); 1152 - } 1153 - 1154 - static struct file_system_type mtd_inodefs_type = { 1155 - .name = "mtd_inodefs", 1156 - .mount = mtd_inodefs_mount, 1157 - .kill_sb = kill_anon_super, 1158 - }; 1159 - MODULE_ALIAS_FS("mtd_inodefs"); 1160 1166 1161 1167 int __init init_mtdchar(void) 1162 1168 { ··· 1153 1193 return ret; 1154 1194 } 1155 1195 1156 - ret = register_filesystem(&mtd_inodefs_type); 1157 - if (ret) { 1158 - pr_err("Can't register mtd_inodefs filesystem, error %d\n", 1159 - ret); 1160 - goto err_unregister_chdev; 1161 - } 1162 - 1163 - return ret; 1164 - 1165 - err_unregister_chdev: 1166 - __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1167 1196 return ret; 1168 1197 } 1169 1198 1170 1199 void __exit cleanup_mtdchar(void) 1171 1200 { 1172 - unregister_filesystem(&mtd_inodefs_type); 1173 1201 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1174 1202 } 1175 1203
-10
drivers/mtd/mtdconcat.c
··· 732 732 733 733 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 734 734 735 - concat->mtd.backing_dev_info = subdev[0]->backing_dev_info; 736 - 737 735 concat->subdev[0] = subdev[0]; 738 736 739 737 for (i = 1; i < num_devs; i++) { ··· 758 760 concat->mtd.flags |= 759 761 subdev[i]->flags & MTD_WRITEABLE; 760 762 } 761 - 762 - /* only permit direct mapping if the BDIs are all the same 763 - * - copy-mapping is still permitted 764 - */ 765 - if (concat->mtd.backing_dev_info != 766 - subdev[i]->backing_dev_info) 767 - concat->mtd.backing_dev_info = 768 - &default_backing_dev_info; 769 763 770 764 concat->mtd.size += subdev[i]->size; 771 765 concat->mtd.ecc_stats.badblocks +=
+22 -58
drivers/mtd/mtdcore.c
··· 43 43 44 44 #include "mtdcore.h" 45 45 46 - /* 47 - * backing device capabilities for non-mappable devices (such as NAND flash) 48 - * - permits private mappings, copies are taken of the data 49 - */ 50 - static struct backing_dev_info mtd_bdi_unmappable = { 51 - .capabilities = BDI_CAP_MAP_COPY, 52 - }; 53 - 54 - /* 55 - * backing device capabilities for R/O mappable devices (such as ROM) 56 - * - permits private mappings, copies are taken of the data 57 - * - permits non-writable shared mappings 58 - */ 59 - static struct backing_dev_info mtd_bdi_ro_mappable = { 60 - .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | 61 - BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), 62 - }; 63 - 64 - /* 65 - * backing device capabilities for writable mappable devices (such as RAM) 66 - * - permits private mappings, copies are taken of the data 67 - * - permits non-writable shared mappings 68 - */ 69 - static struct backing_dev_info mtd_bdi_rw_mappable = { 70 - .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | 71 - BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | 72 - BDI_CAP_WRITE_MAP), 46 + static struct backing_dev_info mtd_bdi = { 73 47 }; 74 48 75 49 static int mtd_cls_suspend(struct device *dev, pm_message_t state); ··· 339 365 .release = mtd_release, 340 366 }; 341 367 368 + #ifndef CONFIG_MMU 369 + unsigned mtd_mmap_capabilities(struct mtd_info *mtd) 370 + { 371 + switch (mtd->type) { 372 + case MTD_RAM: 373 + return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 374 + NOMMU_MAP_READ | NOMMU_MAP_WRITE; 375 + case MTD_ROM: 376 + return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 377 + NOMMU_MAP_READ; 378 + default: 379 + return NOMMU_MAP_COPY; 380 + } 381 + } 382 + #endif 383 + 342 384 /** 343 385 * add_mtd_device - register an MTD device 344 386 * @mtd: pointer to new MTD device info structure ··· 370 380 struct mtd_notifier *not; 371 381 int i, error; 372 382 373 - if (!mtd->backing_dev_info) { 374 - switch (mtd->type) { 375 - case MTD_RAM: 376 - mtd->backing_dev_info = &mtd_bdi_rw_mappable; 377 - break; 378 - case MTD_ROM: 379 - mtd->backing_dev_info = &mtd_bdi_ro_mappable; 380 - break; 381 - default: 382 - mtd->backing_dev_info = &mtd_bdi_unmappable; 383 - break; 384 - } 385 - } 383 + mtd->backing_dev_info = &mtd_bdi; 386 384 387 385 BUG_ON(mtd->writesize == 0); 388 386 mutex_lock(&mtd_table_mutex); ··· 1215 1237 if (ret) 1216 1238 goto err_reg; 1217 1239 1218 - ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); 1240 + ret = mtd_bdi_init(&mtd_bdi, "mtd"); 1219 1241 if (ret) 1220 - goto err_bdi1; 1221 - 1222 - ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap"); 1223 - if (ret) 1224 - goto err_bdi2; 1225 - 1226 - ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap"); 1227 - if (ret) 1228 - goto err_bdi3; 1242 + goto err_bdi; 1229 1243 1230 1244 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); 1231 1245 ··· 1230 1260 out_procfs: 1231 1261 if (proc_mtd) 1232 1262 remove_proc_entry("mtd", NULL); 1233 - err_bdi3: 1234 - bdi_destroy(&mtd_bdi_ro_mappable); 1235 - err_bdi2: 1236 - bdi_destroy(&mtd_bdi_unmappable); 1237 - err_bdi1: 1263 + err_bdi: 1238 1264 class_unregister(&mtd_class); 1239 1265 err_reg: 1240 1266 pr_err("Error registering mtd class or bdi: %d\n", ret); ··· 1243 1277 if (proc_mtd) 1244 1278 remove_proc_entry("mtd", NULL); 1245 1279 class_unregister(&mtd_class); 1246 - bdi_destroy(&mtd_bdi_unmappable); 1247 - bdi_destroy(&mtd_bdi_ro_mappable); 1248 - bdi_destroy(&mtd_bdi_rw_mappable); 1280 + bdi_destroy(&mtd_bdi); 1249 1281 } 1250 1282 1251 1283 module_init(init_mtd);
-1
drivers/mtd/mtdpart.c
··· 378 378 379 379 slave->mtd.name = name; 380 380 slave->mtd.owner = master->owner; 381 - slave->mtd.backing_dev_info = master->backing_dev_info; 382 381 383 382 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone 384 383 * to have the same data be in two different partitions.
+1 -1
drivers/staging/lustre/lustre/llite/llite_lib.c
··· 987 987 if (err) 988 988 goto out_free; 989 989 lsi->lsi_flags |= LSI_BDI_INITIALIZED; 990 - lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY; 990 + lsi->lsi_bdi.capabilities = 0; 991 991 err = ll_bdi_register(&lsi->lsi_bdi); 992 992 if (err) 993 993 goto out_free;
+1 -1
fs/9p/v9fs.c
··· 335 335 } 336 336 init_rwsem(&v9ses->rename_sem); 337 337 338 - rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY); 338 + rc = bdi_setup_and_register(&v9ses->bdi, "9p"); 339 339 if (rc) { 340 340 kfree(v9ses->aname); 341 341 kfree(v9ses->uname);
+1 -1
fs/afs/volume.c
··· 106 106 volume->cell = params->cell; 107 107 volume->vid = vlocation->vldb.vid[params->type]; 108 108 109 - ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY); 109 + ret = bdi_setup_and_register(&volume->bdi, "afs"); 110 110 if (ret) 111 111 goto error_bdi; 112 112
+1 -13
fs/aio.c
··· 165 165 static const struct file_operations aio_ring_fops; 166 166 static const struct address_space_operations aio_ctx_aops; 167 167 168 - /* Backing dev info for aio fs. 169 - * -no dirty page accounting or writeback happens 170 - */ 171 - static struct backing_dev_info aio_fs_backing_dev_info = { 172 - .name = "aiofs", 173 - .state = 0, 174 - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY, 175 - }; 176 - 177 168 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 178 169 { 179 170 struct qstr this = QSTR_INIT("[aio]", 5); ··· 176 185 177 186 inode->i_mapping->a_ops = &aio_ctx_aops; 178 187 inode->i_mapping->private_data = ctx; 179 - inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info; 188 + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; 180 189 inode->i_size = PAGE_SIZE * nr_pages; 181 190 182 191 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); ··· 220 229 aio_mnt = kern_mount(&aio_fs); 221 230 if (IS_ERR(aio_mnt)) 222 231 panic("Failed to create aio fs mount."); 223 - 224 - if (bdi_init(&aio_fs_backing_dev_info)) 225 - panic("Failed to init aio fs backing dev info."); 226 232 227 233 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 228 234 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+1 -2
fs/btrfs/disk-io.c
··· 1715 1715 { 1716 1716 int err; 1717 1717 1718 - bdi->capabilities = BDI_CAP_MAP_COPY; 1719 - err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY); 1718 + err = bdi_setup_and_register(bdi, "btrfs"); 1720 1719 if (err) 1721 1720 return err; 1722 1721
-24
fs/char_dev.c
··· 24 24 25 25 #include "internal.h" 26 26 27 - /* 28 - * capabilities for /dev/mem, /dev/kmem and similar directly mappable character 29 - * devices 30 - * - permits shared-mmap for read, write and/or exec 31 - * - does not permit private mmap in NOMMU mode (can't do COW) 32 - * - no readahead or I/O queue unplugging required 33 - */ 34 - struct backing_dev_info directly_mappable_cdev_bdi = { 35 - .name = "char", 36 - .capabilities = ( 37 - #ifdef CONFIG_MMU 38 - /* permit private copies of the data to be taken */ 39 - BDI_CAP_MAP_COPY | 40 - #endif 41 - /* permit direct mmap, for read, write or exec */ 42 - BDI_CAP_MAP_DIRECT | 43 - BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP | 44 - /* no writeback happens */ 45 - BDI_CAP_NO_ACCT_AND_WRITEBACK), 46 - }; 47 - 48 27 static struct kobj_map *cdev_map; 49 28 50 29 static DEFINE_MUTEX(chrdevs_lock); ··· 554 575 void __init chrdev_init(void) 555 576 { 556 577 cdev_map = kobj_map_init(base_probe, &chrdevs_lock); 557 - if (bdi_init(&directly_mappable_cdev_bdi)) 558 - panic("Failed to init directly mappable cdev bdi"); 559 578 } 560 579 561 580 ··· 567 590 EXPORT_SYMBOL(cdev_add); 568 591 EXPORT_SYMBOL(__register_chrdev); 569 592 EXPORT_SYMBOL(__unregister_chrdev); 570 - EXPORT_SYMBOL(directly_mappable_cdev_bdi);
+1 -1
fs/cifs/connect.c
··· 3446 3446 int referral_walks_count = 0; 3447 3447 #endif 3448 3448 3449 - rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 3449 + rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs"); 3450 3450 if (rc) 3451 3451 return rc; 3452 3452
+1 -1
fs/coda/inode.c
··· 183 183 goto unlock_out; 184 184 } 185 185 186 - error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY); 186 + error = bdi_setup_and_register(&vc->bdi, "coda"); 187 187 if (error) 188 188 goto unlock_out; 189 189
-2
fs/configfs/configfs_internal.h
··· 70 70 71 71 extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); 72 72 extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); 73 - extern int configfs_inode_init(void); 74 - extern void configfs_inode_exit(void); 75 73 76 74 extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); 77 75 extern int configfs_make_dirent(struct configfs_dirent *,
+1 -17
fs/configfs/inode.c
··· 50 50 .write_end = simple_write_end, 51 51 }; 52 52 53 - static struct backing_dev_info configfs_backing_dev_info = { 54 - .name = "configfs", 55 - .ra_pages = 0, /* No readahead */ 56 - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 57 - }; 58 - 59 53 static const struct inode_operations configfs_inode_operations ={ 60 54 .setattr = configfs_setattr, 61 55 }; ··· 131 137 if (inode) { 132 138 inode->i_ino = get_next_ino(); 133 139 inode->i_mapping->a_ops = &configfs_aops; 134 - inode->i_mapping->backing_dev_info = &configfs_backing_dev_info; 140 + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; 135 141 inode->i_op = &configfs_inode_operations; 136 142 137 143 if (sd->s_iattr) { ··· 276 282 } 277 283 } 278 284 mutex_unlock(&dir->d_inode->i_mutex); 279 - } 280 - 281 - int __init configfs_inode_init(void) 282 - { 283 - return bdi_init(&configfs_backing_dev_info); 284 - } 285 - 286 - void configfs_inode_exit(void) 287 - { 288 - bdi_destroy(&configfs_backing_dev_info); 289 285 }
+2 -9
fs/configfs/mount.c
··· 145 145 if (!config_kobj) 146 146 goto out2; 147 147 148 - err = configfs_inode_init(); 148 + err = register_filesystem(&configfs_fs_type); 149 149 if (err) 150 150 goto out3; 151 151 152 - err = register_filesystem(&configfs_fs_type); 153 - if (err) 154 - goto out4; 155 - 156 152 return 0; 157 - out4: 158 - pr_err("Unable to register filesystem!\n"); 159 - configfs_inode_exit(); 160 153 out3: 154 + pr_err("Unable to register filesystem!\n"); 161 155 kobject_put(config_kobj); 162 156 out2: 163 157 kmem_cache_destroy(configfs_dir_cachep); ··· 166 172 kobject_put(config_kobj); 167 173 kmem_cache_destroy(configfs_dir_cachep); 168 174 configfs_dir_cachep = NULL; 169 - configfs_inode_exit(); 170 175 } 171 176 172 177 MODULE_AUTHOR("Oracle");
+1 -1
fs/ecryptfs/main.c
··· 520 520 goto out; 521 521 } 522 522 523 - rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); 523 + rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs"); 524 524 if (rc) 525 525 goto out1; 526 526
+1 -1
fs/exofs/super.c
··· 836 836 goto free_sbi; 837 837 } 838 838 839 - ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY); 839 + ret = bdi_setup_and_register(&sbi->bdi, "exofs"); 840 840 if (ret) { 841 841 EXOFS_DBGMSG("Failed to bdi_setup_and_register\n"); 842 842 dput(sb->s_root);
+1 -1
fs/ncpfs/inode.c
··· 560 560 server = NCP_SBP(sb); 561 561 memset(server, 0, sizeof(*server)); 562 562 563 - error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY); 563 + error = bdi_setup_and_register(&server->bdi, "ncpfs"); 564 564 if (error) 565 565 goto out_fput; 566 566
+7
fs/ramfs/file-nommu.c
··· 34 34 unsigned long flags); 35 35 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); 36 36 37 + static unsigned ramfs_mmap_capabilities(struct file *file) 38 + { 39 + return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ | 40 + NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; 41 + } 42 + 37 43 const struct file_operations ramfs_file_operations = { 44 + .mmap_capabilities = ramfs_mmap_capabilities, 38 45 .mmap = ramfs_nommu_mmap, 39 46 .get_unmapped_area = ramfs_nommu_get_unmapped_area, 40 47 .read = new_sync_read,
+2 -20
fs/ramfs/inode.c
··· 50 50 .set_page_dirty = __set_page_dirty_no_writeback, 51 51 }; 52 52 53 - static struct backing_dev_info ramfs_backing_dev_info = { 54 - .name = "ramfs", 55 - .ra_pages = 0, /* No readahead */ 56 - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | 57 - BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY | 58 - BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP, 59 - }; 60 - 61 53 struct inode *ramfs_get_inode(struct super_block *sb, 62 54 const struct inode *dir, umode_t mode, dev_t dev) 63 55 { ··· 59 67 inode->i_ino = get_next_ino(); 60 68 inode_init_owner(inode, dir, mode); 61 69 inode->i_mapping->a_ops = &ramfs_aops; 62 - inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; 70 + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; 63 71 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); 64 72 mapping_set_unevictable(inode->i_mapping); 65 73 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ··· 259 267 int __init init_ramfs_fs(void) 260 268 { 261 269 static unsigned long once; 262 - int err; 263 270 264 271 if (test_and_set_bit(0, &once)) 265 272 return 0; 266 - 267 - err = bdi_init(&ramfs_backing_dev_info); 268 - if (err) 269 - return err; 270 - 271 - err = register_filesystem(&ramfs_fs_type); 272 - if (err) 273 - bdi_destroy(&ramfs_backing_dev_info); 274 - 275 - return err; 273 + return register_filesystem(&ramfs_fs_type); 276 274 } 277 275 fs_initcall(init_ramfs_fs);
+10
fs/romfs/mmap-nommu.c
··· 70 70 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS; 71 71 } 72 72 73 + static unsigned romfs_mmap_capabilities(struct file *file) 74 + { 75 + struct mtd_info *mtd = file_inode(file)->i_sb->s_mtd; 76 + 77 + if (!mtd) 78 + return NOMMU_MAP_COPY; 79 + return mtd_mmap_capabilities(mtd); 80 + } 81 + 73 82 const struct file_operations romfs_ro_fops = { 74 83 .llseek = generic_file_llseek, 75 84 .read = new_sync_read, ··· 86 77 .splice_read = generic_file_splice_read, 87 78 .mmap = romfs_mmap, 88 79 .get_unmapped_area = romfs_get_unmapped_area, 80 + .mmap_capabilities = romfs_mmap_capabilities, 89 81 };
+1 -1
fs/ubifs/super.c
··· 2017 2017 * Read-ahead will be disabled because @c->bdi.ra_pages is 0. 2018 2018 */ 2019 2019 c->bdi.name = "ubifs", 2020 - c->bdi.capabilities = BDI_CAP_MAP_COPY; 2020 + c->bdi.capabilities = 0; 2021 2021 err = bdi_init(&c->bdi); 2022 2022 if (err) 2023 2023 goto out_close;
+4 -29
include/linux/backing-dev.h
··· 114 114 const char *fmt, ...); 115 115 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 116 116 void bdi_unregister(struct backing_dev_info *bdi); 117 - int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 117 + int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 118 118 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 119 119 enum wb_reason reason); 120 120 void bdi_start_background_writeback(struct backing_dev_info *bdi); ··· 228 228 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting 229 229 * BDI_CAP_NO_WRITEBACK: Don't write pages back 230 230 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 231 - * 232 - * These flags let !MMU mmap() govern direct device mapping vs immediate 233 - * copying more easily for MAP_PRIVATE, especially for ROM filesystems. 234 - * 235 - * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE) 236 - * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED) 237 - * BDI_CAP_READ_MAP: Can be mapped for reading 238 - * BDI_CAP_WRITE_MAP: Can be mapped for writing 239 - * BDI_CAP_EXEC_MAP: Can be mapped for execution 240 - * 241 231 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. 242 232 */ 243 233 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 244 234 #define BDI_CAP_NO_WRITEBACK 0x00000002 245 - #define BDI_CAP_MAP_COPY 0x00000004 246 - #define BDI_CAP_MAP_DIRECT 0x00000008 247 - #define BDI_CAP_READ_MAP 0x00000010 248 - #define BDI_CAP_WRITE_MAP 0x00000020 249 - #define BDI_CAP_EXEC_MAP 0x00000040 250 - #define BDI_CAP_NO_ACCT_WB 0x00000080 251 - #define BDI_CAP_STABLE_WRITES 0x00000200 252 - #define BDI_CAP_STRICTLIMIT 0x00000400 253 - 254 - #define BDI_CAP_VMFLAGS \ 255 - (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) 235 + #define BDI_CAP_NO_ACCT_WB 0x00000004 236 + #define BDI_CAP_STABLE_WRITES 0x00000008 237 + #define BDI_CAP_STRICTLIMIT 0x00000010 256 238 257 239 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 258 240 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 259 - 260 - #if defined(VM_MAYREAD) && \ 261 - (BDI_CAP_READ_MAP != VM_MAYREAD || \ 262 - BDI_CAP_WRITE_MAP != VM_MAYWRITE || \ 263 - BDI_CAP_EXEC_MAP != VM_MAYEXEC) 264 - #error please change backing_dev_info::capabilities flags 265 - #endif 266 241 267 242 extern struct backing_dev_info default_backing_dev_info; 268 243 extern struct backing_dev_info noop_backing_dev_info;
-2
include/linux/cdev.h
··· 30 30 31 31 void cd_forget(struct inode *); 32 32 33 - extern struct backing_dev_info directly_mappable_cdev_bdi; 34 - 35 33 #endif
+23
include/linux/fs.h
··· 1502 1502 #define HAVE_COMPAT_IOCTL 1 1503 1503 #define HAVE_UNLOCKED_IOCTL 1 1504 1504 1505 + /* 1506 + * These flags let !MMU mmap() govern direct device mapping vs immediate 1507 + * copying more easily for MAP_PRIVATE, especially for ROM filesystems. 1508 + * 1509 + * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE) 1510 + * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED) 1511 + * NOMMU_MAP_READ: Can be mapped for reading 1512 + * NOMMU_MAP_WRITE: Can be mapped for writing 1513 + * NOMMU_MAP_EXEC: Can be mapped for execution 1514 + */ 1515 + #define NOMMU_MAP_COPY 0x00000001 1516 + #define NOMMU_MAP_DIRECT 0x00000008 1517 + #define NOMMU_MAP_READ VM_MAYREAD 1518 + #define NOMMU_MAP_WRITE VM_MAYWRITE 1519 + #define NOMMU_MAP_EXEC VM_MAYEXEC 1520 + 1521 + #define NOMMU_VMFLAGS \ 1522 + (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) 1523 + 1524 + 1505 1525 struct iov_iter; 1506 1526 1507 1527 struct file_operations { ··· 1556 1536 long (*fallocate)(struct file *file, int mode, loff_t offset, 1557 1537 loff_t len); 1558 1538 void (*show_fdinfo)(struct seq_file *m, struct file *f); 1539 + #ifndef CONFIG_MMU 1540 + unsigned (*mmap_capabilities)(struct file *); 1541 + #endif 1559 1542 }; 1560 1543 1561 1544 struct inode_operations {
+2
include/linux/mtd/mtd.h
··· 408 408 return mtd_is_bitflip(err) || mtd_is_eccerr(err); 409 409 } 410 410 411 + unsigned mtd_mmap_capabilities(struct mtd_info *mtd); 412 + 411 413 #endif /* __MTD_MTD_H__ */
+2 -5
mm/backing-dev.c
··· 17 17 struct backing_dev_info default_backing_dev_info = { 18 18 .name = "default", 19 19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, 20 - .state = 0, 21 - .capabilities = BDI_CAP_MAP_COPY, 22 20 }; 23 21 EXPORT_SYMBOL_GPL(default_backing_dev_info); 24 22 ··· 511 513 * For use from filesystems to quickly init and register a bdi associated 512 514 * with dirty writeback 513 515 */ 514 - int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, 515 - unsigned int cap) 516 + int bdi_setup_and_register(struct backing_dev_info *bdi, char *name) 516 517 { 517 518 int err; 518 519 519 520 bdi->name = name; 520 - bdi->capabilities = cap; 521 + bdi->capabilities = 0; 521 522 err = bdi_init(bdi); 522 523 if (err) 523 524 return err;
+30 -39
mm/nommu.c
··· 946 946 return -EOVERFLOW; 947 947 948 948 if (file) { 949 - /* validate file mapping requests */ 950 - struct address_space *mapping; 951 - 952 949 /* files must support mmap */ 953 950 if (!file->f_op->mmap) 954 951 return -ENODEV; ··· 954 957 * - we support chardevs that provide their own "memory" 955 958 * - we support files/blockdevs that are memory backed 956 959 */ 957 - mapping = file->f_mapping; 958 - if (!mapping) 959 - mapping = file_inode(file)->i_mapping; 960 - 961 - capabilities = 0; 962 - if (mapping && mapping->backing_dev_info) 963 - capabilities = mapping->backing_dev_info->capabilities; 964 - 965 - if (!capabilities) { 960 + if (file->f_op->mmap_capabilities) { 961 + capabilities = file->f_op->mmap_capabilities(file); 962 + } else { 966 963 /* no explicit capabilities set, so assume some 967 964 * defaults */ 968 965 switch (file_inode(file)->i_mode & S_IFMT) { 969 966 case S_IFREG: 970 967 case S_IFBLK: 971 - capabilities = BDI_CAP_MAP_COPY; 968 + capabilities = NOMMU_MAP_COPY; 972 969 break; 973 970 974 971 case S_IFCHR: 975 972 capabilities = 976 - BDI_CAP_MAP_DIRECT | 977 - BDI_CAP_READ_MAP | 978 - BDI_CAP_WRITE_MAP; 973 + NOMMU_MAP_DIRECT | 974 + NOMMU_MAP_READ | 975 + NOMMU_MAP_WRITE; 979 976 break; 980 977 981 978 default: ··· 980 989 /* eliminate any capabilities that we can't support on this 981 990 * device */ 982 991 if (!file->f_op->get_unmapped_area) 983 - capabilities &= ~BDI_CAP_MAP_DIRECT; 992 + capabilities &= ~NOMMU_MAP_DIRECT; 984 993 if (!file->f_op->read) 985 - capabilities &= ~BDI_CAP_MAP_COPY; 994 + capabilities &= ~NOMMU_MAP_COPY; 986 995 987 996 /* The file shall have been opened with read permission. */ 988 997 if (!(file->f_mode & FMODE_READ)) ··· 1001 1010 if (locks_verify_locked(file)) 1002 1011 return -EAGAIN; 1003 1012 1004 - if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1013 + if (!(capabilities & NOMMU_MAP_DIRECT)) 1005 1014 return -ENODEV; 1006 1015 1007 1016 /* we mustn't privatise shared mappings */ 1008 - capabilities &= ~BDI_CAP_MAP_COPY; 1017 + capabilities &= ~NOMMU_MAP_COPY; 1009 1018 } else { 1010 1019 /* we're going to read the file into private memory we 1011 1020 * allocate */ 1012 - if (!(capabilities & BDI_CAP_MAP_COPY)) 1021 + if (!(capabilities & NOMMU_MAP_COPY)) 1013 1022 return -ENODEV; 1014 1023 1015 1024 /* we don't permit a private writable mapping to be 1016 1025 * shared with the backing device */ 1017 1026 if (prot & PROT_WRITE) 1018 - capabilities &= ~BDI_CAP_MAP_DIRECT; 1027 + capabilities &= ~NOMMU_MAP_DIRECT; 1019 1028 } 1020 1029 1021 - if (capabilities & BDI_CAP_MAP_DIRECT) { 1022 - if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || 1023 - ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || 1024 - ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) 1030 + if (capabilities & NOMMU_MAP_DIRECT) { 1031 + if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) || 1032 + ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) || 1033 + ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC)) 1025 1034 ) { 1026 - capabilities &= ~BDI_CAP_MAP_DIRECT; 1035 + capabilities &= ~NOMMU_MAP_DIRECT; 1027 1036 if (flags & MAP_SHARED) { 1028 1037 printk(KERN_WARNING 1029 1038 "MAP_SHARED not completely supported on !MMU\n"); ··· 1040 1049 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { 1041 1050 /* handle implication of PROT_EXEC by PROT_READ */ 1042 1051 if (current->personality & READ_IMPLIES_EXEC) { 1043 - if (capabilities & BDI_CAP_EXEC_MAP) 1052 + if (capabilities & NOMMU_MAP_EXEC) 1044 1053 prot |= PROT_EXEC; 1045 1054 } 1046 1055 } else if ((prot & PROT_READ) && 1047 1056 (prot & PROT_EXEC) && 1048 - !(capabilities & BDI_CAP_EXEC_MAP) 1057 + !(capabilities & NOMMU_MAP_EXEC) 1049 1058 ) { 1050 1059 /* backing file is not executable, try to copy */ 1051 - capabilities &= ~BDI_CAP_MAP_DIRECT; 1060 + capabilities &= ~NOMMU_MAP_DIRECT; 1052 1061 } 1053 1062 } else { 1054 1063 /* anonymous mappings are always memory backed and can be 1055 1064 * privately mapped 1056 1065 */ 1057 - capabilities = BDI_CAP_MAP_COPY; 1066 + capabilities = NOMMU_MAP_COPY; 1058 1067 1059 1068 /* handle PROT_EXEC implication by PROT_READ */ 1060 1069 if ((prot & PROT_READ) && ··· 1086 1095 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); 1087 1096 /* vm_flags |= mm->def_flags; */ 1088 1097 1089 - if (!(capabilities & BDI_CAP_MAP_DIRECT)) { 1098 + if (!(capabilities & NOMMU_MAP_DIRECT)) { 1090 1099 /* attempt to share read-only copies of mapped file chunks */ 1091 1100 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1092 1101 if (file && !(prot & PROT_WRITE)) ··· 1095 1104 /* overlay a shareable mapping on the backing device or inode 1096 1105 * if possible - used for chardevs, ramfs/tmpfs/shmfs and 1097 1106 * romfs/cramfs */ 1098 - vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); 1107 + vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS); 1099 1108 if (flags & MAP_SHARED) 1100 1109 vm_flags |= VM_SHARED; 1101 1110 } ··· 1148 1157 * shared mappings on devices or memory 1149 1158 * - VM_MAYSHARE will be set if it may attempt to share 1150 1159 */ 1151 - if (capabilities & BDI_CAP_MAP_DIRECT) { 1160 + if (capabilities & NOMMU_MAP_DIRECT) { 1152 1161 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1153 1162 if (ret == 0) { 1154 1163 /* shouldn't return success if we're not sharing */ ··· 1337 1346 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && 1338 1347 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { 1339 1348 /* new mapping is not a subset of the region */ 1340 - if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1349 + if (!(capabilities & NOMMU_MAP_DIRECT)) 1341 1350 goto sharing_violation; 1342 1351 continue; 1343 1352 } ··· 1376 1385 * - this is the hook for quasi-memory character devices to 1377 1386 * tell us the location of a shared mapping 1378 1387 */ 1379 - if (capabilities & BDI_CAP_MAP_DIRECT) { 1388 + if (capabilities & NOMMU_MAP_DIRECT) { 1380 1389 addr = file->f_op->get_unmapped_area(file, addr, len, 1381 1390 pgoff, flags); 1382 1391 if (IS_ERR_VALUE(addr)) { ··· 1388 1397 * the mapping so we'll have to attempt to copy 1389 1398 * it */ 1390 1399 ret = -ENODEV; 1391 - if (!(capabilities & BDI_CAP_MAP_COPY)) 1400 + if (!(capabilities & NOMMU_MAP_COPY)) 1392 1401 goto error_just_free; 1393 1402 1394 - capabilities &= ~BDI_CAP_MAP_DIRECT; 1403 + capabilities &= ~NOMMU_MAP_DIRECT; 1395 1404 } else { 1396 1405 vma->vm_start = region->vm_start = addr; 1397 1406 vma->vm_end = region->vm_end = addr + len; ··· 1402 1411 vma->vm_region = region; 1403 1412 1404 1413 /* set up the mapping 1405 - * - the region is filled in if BDI_CAP_MAP_DIRECT is still set 1414 + * - the region is filled in if NOMMU_MAP_DIRECT is still set 1406 1415 */ 1407 1416 if (file && vma->vm_flags & VM_SHARED) 1408 1417 ret = do_mmap_shared_file(vma);
+6 -7
security/security.c
··· 726 726 return prot | PROT_EXEC; 727 727 /* 728 728 * ditto if it's not on noexec mount, except that on !MMU we need 729 - * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case 729 + * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case 730 730 */ 731 731 if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) { 732 732 #ifndef CONFIG_MMU 733 - unsigned long caps = 0; 734 - struct address_space *mapping = file->f_mapping; 735 - if (mapping && mapping->backing_dev_info) 736 - caps = mapping->backing_dev_info->capabilities; 737 - if (!(caps & BDI_CAP_EXEC_MAP)) 738 - return prot; 733 + if (file->f_op->mmap_capabilities) { 734 + unsigned caps = file->f_op->mmap_capabilities(file); 735 + if (!(caps & NOMMU_MAP_EXEC)) 736 + return prot; 737 + } 739 738 #endif 740 739 return prot | PROT_EXEC; 741 740 }