Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtiofs: add a mount option to enable dax

Add a mount option to allow using dax with virtio_fs.

Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>

authored by

Vivek Goyal and committed by
Miklos Szeredi
1dd53957 22f3787e

+151 -17
+13
fs/fuse/Kconfig
··· 38 38 39 39 If you want to share files between guests or with the host, answer Y 40 40 or M. 41 + 42 + config FUSE_DAX 43 + bool "Virtio Filesystem Direct Host Memory Access support" 44 + default y 45 + depends on VIRTIO_FS 46 + depends on FS_DAX 47 + depends on DAX_DRIVER 48 + help 49 + This allows bypassing guest page cache and allows mapping host page 50 + cache directly in guest address space. 51 + 52 + If you want to allow mounting a Virtio Filesystem with the "dax" 53 + option, answer Y.
+4 -2
fs/fuse/Makefile
··· 7 7 obj-$(CONFIG_CUSE) += cuse.o 8 8 obj-$(CONFIG_VIRTIO_FS) += virtiofs.o 9 9 10 - fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o 11 - virtiofs-y += virtio_fs.o 10 + fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o 11 + fuse-$(CONFIG_FUSE_DAX) += dax.o 12 + 13 + virtiofs-y := virtio_fs.o
+36
fs/fuse/dax.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * dax: direct host memory access 4 + * Copyright (C) 2020 Red Hat, Inc. 5 + */ 6 + 7 + #include "fuse_i.h" 8 + 9 + #include <linux/dax.h> 10 + 11 + struct fuse_conn_dax { 12 + /* DAX device */ 13 + struct dax_device *dev; 14 + }; 15 + 16 + void fuse_dax_conn_free(struct fuse_conn *fc) 17 + { 18 + kfree(fc->dax); 19 + } 20 + 21 + int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev) 22 + { 23 + struct fuse_conn_dax *fcd; 24 + 25 + if (!dax_dev) 26 + return 0; 27 + 28 + fcd = kzalloc(sizeof(*fcd), GFP_KERNEL); 29 + if (!fcd) 30 + return -ENOMEM; 31 + 32 + fcd->dev = dax_dev; 33 + 34 + fc->dax = fcd; 35 + return 0; 36 + }
+14
fs/fuse/fuse_i.h
··· 483 483 bool no_control:1; 484 484 bool no_force_umount:1; 485 485 bool legacy_opts_show:1; 486 + bool dax:1; 486 487 unsigned int max_read; 487 488 unsigned int blksize; 488 489 const char *subtype; 490 + 491 + /* DAX device, may be NULL */ 492 + struct dax_device *dax_dev; 489 493 490 494 /* fuse_dev pointer to fill in, should contain NULL on entry */ 491 495 void **fudptr; ··· 759 755 760 756 /** List of device instances belonging to this connection */ 761 757 struct list_head devices; 758 + 759 + #ifdef CONFIG_FUSE_DAX 760 + /* Dax specific conn data, non-NULL if DAX is enabled */ 761 + struct fuse_conn_dax *dax; 762 + #endif 762 763 }; 763 764 764 765 static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) ··· 1101 1092 */ 1102 1093 u64 fuse_get_unique(struct fuse_iqueue *fiq); 1103 1094 void fuse_free_conn(struct fuse_conn *fc); 1095 + 1096 + /* dax.c */ 1097 + 1098 + int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev); 1099 + void fuse_dax_conn_free(struct fuse_conn *fc); 1104 1100 1105 1101 #endif /* _FS_FUSE_I_H */
+17 -1
fs/fuse/inode.c
··· 587 587 if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) 588 588 seq_printf(m, ",blksize=%lu", sb->s_blocksize); 589 589 } 590 + #ifdef CONFIG_FUSE_DAX 591 + if (fc->dax) 592 + seq_puts(m, ",dax"); 593 + #endif 594 + 590 595 return 0; 591 596 } 592 597 ··· 656 651 if (refcount_dec_and_test(&fc->count)) { 657 652 struct fuse_iqueue *fiq = &fc->iq; 658 653 654 + if (IS_ENABLED(CONFIG_FUSE_DAX)) 655 + fuse_dax_conn_free(fc); 659 656 if (fiq->ops->release) 660 657 fiq->ops->release(fiq); 661 658 put_pid_ns(fc->pid_ns); ··· 1182 1175 if (sb->s_user_ns != &init_user_ns) 1183 1176 sb->s_xattr = fuse_no_acl_xattr_handlers; 1184 1177 1178 + if (IS_ENABLED(CONFIG_FUSE_DAX)) { 1179 + err = fuse_dax_conn_alloc(fc, ctx->dax_dev); 1180 + if (err) 1181 + goto err; 1182 + } 1183 + 1185 1184 if (ctx->fudptr) { 1186 1185 err = -ENOMEM; 1187 1186 fud = fuse_dev_alloc_install(fc); 1188 1187 if (!fud) 1189 - goto err; 1188 + goto err_free_dax; 1190 1189 } 1191 1190 1192 1191 fc->dev = sb->s_dev; ··· 1247 1234 err_dev_free: 1248 1235 if (fud) 1249 1236 fuse_dev_free(fud); 1237 + err_free_dax: 1238 + if (IS_ENABLED(CONFIG_FUSE_DAX)) 1239 + fuse_dax_conn_free(fc); 1250 1240 err: 1251 1241 return err; 1252 1242 }
+67 -14
fs/fuse/virtio_fs.c
··· 13 13 #include <linux/virtio_fs.h> 14 14 #include <linux/delay.h> 15 15 #include <linux/fs_context.h> 16 + #include <linux/fs_parser.h> 16 17 #include <linux/highmem.h> 17 18 #include <linux/uio.h> 18 19 #include "fuse_i.h" ··· 81 80 82 81 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, 83 82 struct fuse_req *req, bool in_flight); 83 + 84 + enum { 85 + OPT_DAX, 86 + }; 87 + 88 + static const struct fs_parameter_spec virtio_fs_parameters[] = { 89 + fsparam_flag("dax", OPT_DAX), 90 + {} 91 + }; 92 + 93 + static int virtio_fs_parse_param(struct fs_context *fc, 94 + struct fs_parameter *param) 95 + { 96 + struct fs_parse_result result; 97 + struct fuse_fs_context *ctx = fc->fs_private; 98 + int opt; 99 + 100 + opt = fs_parse(fc, virtio_fs_parameters, param, &result); 101 + if (opt < 0) 102 + return opt; 103 + 104 + switch (opt) { 105 + case OPT_DAX: 106 + ctx->dax = 1; 107 + break; 108 + default: 109 + return -EINVAL; 110 + } 111 + 112 + return 0; 113 + } 114 + 115 + static void virtio_fs_free_fc(struct fs_context *fc) 116 + { 117 + struct fuse_fs_context *ctx = fc->fs_private; 118 + 119 + kfree(ctx); 120 + } 84 121 85 122 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq) 86 123 { ··· 1258 1219 .release = virtio_fs_fiq_release, 1259 1220 }; 1260 1221 1261 - static int virtio_fs_fill_super(struct super_block *sb) 1222 + static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx) 1223 + { 1224 + ctx->rootmode = S_IFDIR; 1225 + ctx->default_permissions = 1; 1226 + ctx->allow_other = 1; 1227 + ctx->max_read = UINT_MAX; 1228 + ctx->blksize = 512; 1229 + ctx->destroy = true; 1230 + ctx->no_control = true; 1231 + ctx->no_force_umount = true; 1232 + } 1233 + 1234 + static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc) 1262 1235 { 1263 1236 struct fuse_conn *fc = get_fuse_conn_super(sb); 1264 1237 struct virtio_fs *fs = fc->iq.priv; 1238 + struct fuse_fs_context *ctx = fsc->fs_private; 1265 1239 unsigned int i; 1266 1240 int err; 1267 - struct fuse_fs_context ctx = { 1268 - .rootmode = S_IFDIR, 1269 - .default_permissions = 1, 1270 - .allow_other = 1, 1271 - .max_read = UINT_MAX, 1272 - .blksize = 512, 1273 - .destroy = true, 1274 - .no_control = true, 1275 - .no_force_umount = true, 1276 - }; 1277 1241 1242 + virtio_fs_ctx_set_defaults(ctx); 1278 1243 mutex_lock(&virtio_fs_mutex); 1279 1244 1280 1245 /* After holding mutex, make sure virtiofs device is still there. ··· 1302 1259 } 1303 1260 1304 1261 /* virtiofs allocates and installs its own fuse devices */ 1305 - ctx.fudptr = NULL; 1306 - err = fuse_fill_super_common(sb, &ctx); 1262 + ctx->fudptr = NULL; 1263 + if (ctx->dax) 1264 + ctx->dax_dev = fs->dax_dev; 1265 + err = fuse_fill_super_common(sb, ctx); 1307 1266 if (err < 0) 1308 1267 goto err_free_fuse_devs; 1309 1268 ··· 1416 1371 return PTR_ERR(sb); 1417 1372 1418 1373 if (!sb->s_root) { 1419 - err = virtio_fs_fill_super(sb); 1374 + err = virtio_fs_fill_super(sb, fsc); 1420 1375 if (err) { 1421 1376 deactivate_locked_super(sb); 1422 1377 return err; ··· 1431 1386 } 1432 1387 1433 1388 static const struct fs_context_operations virtio_fs_context_ops = { 1389 + .free = virtio_fs_free_fc, 1390 + .parse_param = virtio_fs_parse_param, 1434 1391 .get_tree = virtio_fs_get_tree, 1435 1392 }; 1436 1393 1437 1394 static int virtio_fs_init_fs_context(struct fs_context *fsc) 1438 1395 { 1396 + struct fuse_fs_context *ctx; 1397 + 1398 + ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL); 1399 + if (!ctx) 1400 + return -ENOMEM; 1401 + fsc->fs_private = ctx; 1439 1402 fsc->ops = &virtio_fs_context_ops; 1440 1403 return 0; 1441 1404 }