Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'cuse' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse

* 'cuse' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse:
CUSE: implement CUSE - Character device in Userspace
fuse: export symbols to be used by CUSE
fuse: update fuse_conn_init() and separate out fuse_conn_kill()
fuse: don't use inode in fuse_file_poll
fuse: don't use inode in fuse_do_ioctl() helper
fuse: don't use inode in fuse_sync_release()
fuse: create fuse_do_open() helper for CUSE
fuse: clean up args in fuse_finish_open() and fuse_release_fill()
fuse: don't use inode in helpers called by fuse_direct_io()
fuse: add members to struct fuse_file
fuse: prepare fuse_direct_io() for CUSE
fuse: clean up fuse_write_fill()
fuse: use struct path in release structure
fuse: misc cleanups

+985 -236
+10
fs/Kconfig
··· 62 62 source "fs/autofs4/Kconfig" 63 63 source "fs/fuse/Kconfig" 64 64 65 + config CUSE 66 + tristate "Character device in Userpace support" 67 + depends on FUSE_FS 68 + help 69 + This FUSE extension allows character devices to be 70 + implemented in userspace. 71 + 72 + If you want to develop or use userspace character device 73 + based on CUSE, answer Y or M. 74 + 65 75 config GENERIC_ACL 66 76 bool 67 77 select FS_POSIX_ACL
+1
fs/fuse/Makefile
··· 3 3 # 4 4 5 5 obj-$(CONFIG_FUSE_FS) += fuse.o 6 + obj-$(CONFIG_CUSE) += cuse.o 6 7 7 8 fuse-objs := dev.o dir.o file.o inode.o control.o
+610
fs/fuse/cuse.c
··· 1 + /* 2 + * CUSE: Character device in Userspace 3 + * 4 + * Copyright (C) 2008-2009 SUSE Linux Products GmbH 5 + * Copyright (C) 2008-2009 Tejun Heo <tj@kernel.org> 6 + * 7 + * This file is released under the GPLv2. 8 + * 9 + * CUSE enables character devices to be implemented from userland much 10 + * like FUSE allows filesystems. On initialization /dev/cuse is 11 + * created. By opening the file and replying to the CUSE_INIT request 12 + * userland CUSE server can create a character device. After that the 13 + * operation is very similar to FUSE. 14 + * 15 + * A CUSE instance involves the following objects. 16 + * 17 + * cuse_conn : contains fuse_conn and serves as bonding structure 18 + * channel : file handle connected to the userland CUSE server 19 + * cdev : the implemented character device 20 + * dev : generic device for cdev 21 + * 22 + * Note that 'channel' is what 'dev' is in FUSE. As CUSE deals with 23 + * devices, it's called 'channel' to reduce confusion. 24 + * 25 + * channel determines when the character device dies. When channel is 26 + * closed, everything begins to destruct. The cuse_conn is taken off 27 + * the lookup table preventing further access from cdev, cdev and 28 + * generic device are removed and the base reference of cuse_conn is 29 + * put. 30 + * 31 + * On each open, the matching cuse_conn is looked up and if found an 32 + * additional reference is taken which is released when the file is 33 + * closed. 34 + */ 35 + 36 + #include <linux/fuse.h> 37 + #include <linux/cdev.h> 38 + #include <linux/device.h> 39 + #include <linux/file.h> 40 + #include <linux/fs.h> 41 + #include <linux/kdev_t.h> 42 + #include <linux/kthread.h> 43 + #include <linux/list.h> 44 + #include <linux/magic.h> 45 + #include <linux/miscdevice.h> 46 + #include <linux/mutex.h> 47 + #include <linux/spinlock.h> 48 + #include <linux/stat.h> 49 + 50 + #include "fuse_i.h" 51 + 52 + #define CUSE_CONNTBL_LEN 64 53 + 54 + struct cuse_conn { 55 + struct list_head list; /* linked on cuse_conntbl */ 56 + struct fuse_conn fc; /* fuse connection */ 57 + struct cdev *cdev; /* associated character device */ 58 + struct device *dev; /* device representing @cdev */ 59 + 60 + /* init parameters, set once during initialization */ 61 + bool unrestricted_ioctl; 62 + }; 63 + 64 + static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */ 65 + static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; 66 + static struct class *cuse_class; 67 + 68 + static struct cuse_conn *fc_to_cc(struct fuse_conn *fc) 69 + { 70 + return container_of(fc, struct cuse_conn, fc); 71 + } 72 + 73 + static struct list_head *cuse_conntbl_head(dev_t devt) 74 + { 75 + return &cuse_conntbl[(MAJOR(devt) + MINOR(devt)) % CUSE_CONNTBL_LEN]; 76 + } 77 + 78 + 79 + /************************************************************************** 80 + * CUSE frontend operations 81 + * 82 + * These are file operations for the character device. 83 + * 84 + * On open, CUSE opens a file from the FUSE mnt and stores it to 85 + * private_data of the open file. All other ops call FUSE ops on the 86 + * FUSE file. 87 + */ 88 + 89 + static ssize_t cuse_read(struct file *file, char __user *buf, size_t count, 90 + loff_t *ppos) 91 + { 92 + loff_t pos = 0; 93 + 94 + return fuse_direct_io(file, buf, count, &pos, 0); 95 + } 96 + 97 + static ssize_t cuse_write(struct file *file, const char __user *buf, 98 + size_t count, loff_t *ppos) 99 + { 100 + loff_t pos = 0; 101 + /* 102 + * No locking or generic_write_checks(), the server is 103 + * responsible for locking and sanity checks. 104 + */ 105 + return fuse_direct_io(file, buf, count, &pos, 1); 106 + } 107 + 108 + static int cuse_open(struct inode *inode, struct file *file) 109 + { 110 + dev_t devt = inode->i_cdev->dev; 111 + struct cuse_conn *cc = NULL, *pos; 112 + int rc; 113 + 114 + /* look up and get the connection */ 115 + spin_lock(&cuse_lock); 116 + list_for_each_entry(pos, cuse_conntbl_head(devt), list) 117 + if (pos->dev->devt == devt) { 118 + fuse_conn_get(&pos->fc); 119 + cc = pos; 120 + break; 121 + } 122 + spin_unlock(&cuse_lock); 123 + 124 + /* dead? */ 125 + if (!cc) 126 + return -ENODEV; 127 + 128 + /* 129 + * Generic permission check is already done against the chrdev 130 + * file, proceed to open. 131 + */ 132 + rc = fuse_do_open(&cc->fc, 0, file, 0); 133 + if (rc) 134 + fuse_conn_put(&cc->fc); 135 + return rc; 136 + } 137 + 138 + static int cuse_release(struct inode *inode, struct file *file) 139 + { 140 + struct fuse_file *ff = file->private_data; 141 + struct fuse_conn *fc = ff->fc; 142 + 143 + fuse_sync_release(ff, file->f_flags); 144 + fuse_conn_put(fc); 145 + 146 + return 0; 147 + } 148 + 149 + static long cuse_file_ioctl(struct file *file, unsigned int cmd, 150 + unsigned long arg) 151 + { 152 + struct fuse_file *ff = file->private_data; 153 + struct cuse_conn *cc = fc_to_cc(ff->fc); 154 + unsigned int flags = 0; 155 + 156 + if (cc->unrestricted_ioctl) 157 + flags |= FUSE_IOCTL_UNRESTRICTED; 158 + 159 + return fuse_do_ioctl(file, cmd, arg, flags); 160 + } 161 + 162 + static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd, 163 + unsigned long arg) 164 + { 165 + struct fuse_file *ff = file->private_data; 166 + struct cuse_conn *cc = fc_to_cc(ff->fc); 167 + unsigned int flags = FUSE_IOCTL_COMPAT; 168 + 169 + if (cc->unrestricted_ioctl) 170 + flags |= FUSE_IOCTL_UNRESTRICTED; 171 + 172 + return fuse_do_ioctl(file, cmd, arg, flags); 173 + } 174 + 175 + static const struct file_operations cuse_frontend_fops = { 176 + .owner = THIS_MODULE, 177 + .read = cuse_read, 178 + .write = cuse_write, 179 + .open = cuse_open, 180 + .release = cuse_release, 181 + .unlocked_ioctl = cuse_file_ioctl, 182 + .compat_ioctl = cuse_file_compat_ioctl, 183 + .poll = fuse_file_poll, 184 + }; 185 + 186 + 187 + /************************************************************************** 188 + * CUSE channel initialization and destruction 189 + */ 190 + 191 + struct cuse_devinfo { 192 + const char *name; 193 + }; 194 + 195 + /** 196 + * cuse_parse_one - parse one key=value pair 197 + * @pp: i/o parameter for the current position 198 + * @end: points to one past the end of the packed string 199 + * @keyp: out parameter for key 200 + * @valp: out parameter for value 201 + * 202 + * *@pp points to packed strings - "key0=val0\0key1=val1\0" which ends 203 + * at @end - 1. This function parses one pair and set *@keyp to the 204 + * start of the key and *@valp to the start of the value. Note that 205 + * the original string is modified such that the key string is 206 + * terminated with '\0'. *@pp is updated to point to the next string. 207 + * 208 + * RETURNS: 209 + * 1 on successful parse, 0 on EOF, -errno on failure. 210 + */ 211 + static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) 212 + { 213 + char *p = *pp; 214 + char *key, *val; 215 + 216 + while (p < end && *p == '\0') 217 + p++; 218 + if (p == end) 219 + return 0; 220 + 221 + if (end[-1] != '\0') { 222 + printk(KERN_ERR "CUSE: info not properly terminated\n"); 223 + return -EINVAL; 224 + } 225 + 226 + key = val = p; 227 + p += strlen(p); 228 + 229 + if (valp) { 230 + strsep(&val, "="); 231 + if (!val) 232 + val = key + strlen(key); 233 + key = strstrip(key); 234 + val = strstrip(val); 235 + } else 236 + key = strstrip(key); 237 + 238 + if (!strlen(key)) { 239 + printk(KERN_ERR "CUSE: zero length info key specified\n"); 240 + return -EINVAL; 241 + } 242 + 243 + *pp = p; 244 + *keyp = key; 245 + if (valp) 246 + *valp = val; 247 + 248 + return 1; 249 + } 250 + 251 + /** 252 + * cuse_parse_dev_info - parse device info 253 + * @p: device info string 254 + * @len: length of device info string 255 + * @devinfo: out parameter for parsed device info 256 + * 257 + * Parse @p to extract device info and store it into @devinfo. String 258 + * pointed to by @p is modified by parsing and @devinfo points into 259 + * them, so @p shouldn't be freed while @devinfo is in use. 260 + * 261 + * RETURNS: 262 + * 0 on success, -errno on failure. 263 + */ 264 + static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) 265 + { 266 + char *end = p + len; 267 + char *key, *val; 268 + int rc; 269 + 270 + while (true) { 271 + rc = cuse_parse_one(&p, end, &key, &val); 272 + if (rc < 0) 273 + return rc; 274 + if (!rc) 275 + break; 276 + if (strcmp(key, "DEVNAME") == 0) 277 + devinfo->name = val; 278 + else 279 + printk(KERN_WARNING "CUSE: unknown device info \"%s\"\n", 280 + key); 281 + } 282 + 283 + if (!devinfo->name || !strlen(devinfo->name)) { 284 + printk(KERN_ERR "CUSE: DEVNAME unspecified\n"); 285 + return -EINVAL; 286 + } 287 + 288 + return 0; 289 + } 290 + 291 + static void cuse_gendev_release(struct device *dev) 292 + { 293 + kfree(dev); 294 + } 295 + 296 + /** 297 + * cuse_process_init_reply - finish initializing CUSE channel 298 + * 299 + * This function creates the character device and sets up all the 300 + * required data structures for it. Please read the comment at the 301 + * top of this file for high level overview. 302 + */ 303 + static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) 304 + { 305 + struct cuse_conn *cc = fc_to_cc(fc); 306 + struct cuse_init_out *arg = &req->misc.cuse_init_out; 307 + struct page *page = req->pages[0]; 308 + struct cuse_devinfo devinfo = { }; 309 + struct device *dev; 310 + struct cdev *cdev; 311 + dev_t devt; 312 + int rc; 313 + 314 + if (req->out.h.error || 315 + arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { 316 + goto err; 317 + } 318 + 319 + fc->minor = arg->minor; 320 + fc->max_read = max_t(unsigned, arg->max_read, 4096); 321 + fc->max_write = max_t(unsigned, arg->max_write, 4096); 322 + 323 + /* parse init reply */ 324 + cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL; 325 + 326 + rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size, 327 + &devinfo); 328 + if (rc) 329 + goto err; 330 + 331 + /* determine and reserve devt */ 332 + devt = MKDEV(arg->dev_major, arg->dev_minor); 333 + if (!MAJOR(devt)) 334 + rc = alloc_chrdev_region(&devt, MINOR(devt), 1, devinfo.name); 335 + else 336 + rc = register_chrdev_region(devt, 1, devinfo.name); 337 + if (rc) { 338 + printk(KERN_ERR "CUSE: failed to register chrdev region\n"); 339 + goto err; 340 + } 341 + 342 + /* devt determined, create device */ 343 + rc = -ENOMEM; 344 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 345 + if (!dev) 346 + goto err_region; 347 + 348 + device_initialize(dev); 349 + dev_set_uevent_suppress(dev, 1); 350 + dev->class = cuse_class; 351 + dev->devt = devt; 352 + dev->release = cuse_gendev_release; 353 + dev_set_drvdata(dev, cc); 354 + dev_set_name(dev, "%s", devinfo.name); 355 + 356 + rc = device_add(dev); 357 + if (rc) 358 + goto err_device; 359 + 360 + /* register cdev */ 361 + rc = -ENOMEM; 362 + cdev = cdev_alloc(); 363 + if (!cdev) 364 + goto err_device; 365 + 366 + cdev->owner = THIS_MODULE; 367 + cdev->ops = &cuse_frontend_fops; 368 + 369 + rc = cdev_add(cdev, devt, 1); 370 + if (rc) 371 + goto err_cdev; 372 + 373 + cc->dev = dev; 374 + cc->cdev = cdev; 375 + 376 + /* make the device available */ 377 + spin_lock(&cuse_lock); 378 + list_add(&cc->list, cuse_conntbl_head(devt)); 379 + spin_unlock(&cuse_lock); 380 + 381 + /* announce device availability */ 382 + dev_set_uevent_suppress(dev, 0); 383 + kobject_uevent(&dev->kobj, KOBJ_ADD); 384 + out: 385 + __free_page(page); 386 + return; 387 + 388 + err_cdev: 389 + cdev_del(cdev); 390 + err_device: 391 + put_device(dev); 392 + err_region: 393 + unregister_chrdev_region(devt, 1); 394 + err: 395 + fc->conn_error = 1; 396 + goto out; 397 + } 398 + 399 + static int cuse_send_init(struct cuse_conn *cc) 400 + { 401 + int rc; 402 + struct fuse_req *req; 403 + struct page *page; 404 + struct fuse_conn *fc = &cc->fc; 405 + struct cuse_init_in *arg; 406 + 407 + BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); 408 + 409 + req = fuse_get_req(fc); 410 + if (IS_ERR(req)) { 411 + rc = PTR_ERR(req); 412 + goto err; 413 + } 414 + 415 + rc = -ENOMEM; 416 + page = alloc_page(GFP_KERNEL | __GFP_ZERO); 417 + if (!page) 418 + goto err_put_req; 419 + 420 + arg = &req->misc.cuse_init_in; 421 + arg->major = FUSE_KERNEL_VERSION; 422 + arg->minor = FUSE_KERNEL_MINOR_VERSION; 423 + arg->flags |= CUSE_UNRESTRICTED_IOCTL; 424 + req->in.h.opcode = CUSE_INIT; 425 + req->in.numargs = 1; 426 + req->in.args[0].size = sizeof(struct cuse_init_in); 427 + req->in.args[0].value = arg; 428 + req->out.numargs = 2; 429 + req->out.args[0].size = sizeof(struct cuse_init_out); 430 + req->out.args[0].value = &req->misc.cuse_init_out; 431 + req->out.args[1].size = CUSE_INIT_INFO_MAX; 432 + req->out.argvar = 1; 433 + req->out.argpages = 1; 434 + req->pages[0] = page; 435 + req->num_pages = 1; 436 + req->end = cuse_process_init_reply; 437 + fuse_request_send_background(fc, req); 438 + 439 + return 0; 440 + 441 + err_put_req: 442 + fuse_put_request(fc, req); 443 + err: 444 + return rc; 445 + } 446 + 447 + static void cuse_fc_release(struct fuse_conn *fc) 448 + { 449 + struct cuse_conn *cc = fc_to_cc(fc); 450 + kfree(cc); 451 + } 452 + 453 + /** 454 + * cuse_channel_open - open method for /dev/cuse 455 + * @inode: inode for /dev/cuse 456 + * @file: file struct being opened 457 + * 458 + * Userland CUSE server can create a CUSE device by opening /dev/cuse 459 + * and replying to the initilaization request kernel sends. This 460 + * function is responsible for handling CUSE device initialization. 461 + * Because the fd opened by this function is used during 462 + * initialization, this function only creates cuse_conn and sends 463 + * init. The rest is delegated to a kthread. 464 + * 465 + * RETURNS: 466 + * 0 on success, -errno on failure. 467 + */ 468 + static int cuse_channel_open(struct inode *inode, struct file *file) 469 + { 470 + struct cuse_conn *cc; 471 + int rc; 472 + 473 + /* set up cuse_conn */ 474 + cc = kzalloc(sizeof(*cc), GFP_KERNEL); 475 + if (!cc) 476 + return -ENOMEM; 477 + 478 + fuse_conn_init(&cc->fc); 479 + 480 + INIT_LIST_HEAD(&cc->list); 481 + cc->fc.release = cuse_fc_release; 482 + 483 + cc->fc.connected = 1; 484 + cc->fc.blocked = 0; 485 + rc = cuse_send_init(cc); 486 + if (rc) { 487 + fuse_conn_put(&cc->fc); 488 + return rc; 489 + } 490 + file->private_data = &cc->fc; /* channel owns base reference to cc */ 491 + 492 + return 0; 493 + } 494 + 495 + /** 496 + * cuse_channel_release - release method for /dev/cuse 497 + * @inode: inode for /dev/cuse 498 + * @file: file struct being closed 499 + * 500 + * Disconnect the channel, deregister CUSE device and initiate 501 + * destruction by putting the default reference. 502 + * 503 + * RETURNS: 504 + * 0 on success, -errno on failure. 505 + */ 506 + static int cuse_channel_release(struct inode *inode, struct file *file) 507 + { 508 + struct cuse_conn *cc = fc_to_cc(file->private_data); 509 + int rc; 510 + 511 + /* remove from the conntbl, no more access from this point on */ 512 + spin_lock(&cuse_lock); 513 + list_del_init(&cc->list); 514 + spin_unlock(&cuse_lock); 515 + 516 + /* remove device */ 517 + if (cc->dev) 518 + device_unregister(cc->dev); 519 + if (cc->cdev) { 520 + unregister_chrdev_region(cc->cdev->dev, 1); 521 + cdev_del(cc->cdev); 522 + } 523 + 524 + /* kill connection and shutdown channel */ 525 + fuse_conn_kill(&cc->fc); 526 + rc = fuse_dev_release(inode, file); /* puts the base reference */ 527 + 528 + return rc; 529 + } 530 + 531 + static struct file_operations cuse_channel_fops; /* initialized during init */ 532 + 533 + 534 + /************************************************************************** 535 + * Misc stuff and module initializatiion 536 + * 537 + * CUSE exports the same set of attributes to sysfs as fusectl. 538 + */ 539 + 540 + static ssize_t cuse_class_waiting_show(struct device *dev, 541 + struct device_attribute *attr, char *buf) 542 + { 543 + struct cuse_conn *cc = dev_get_drvdata(dev); 544 + 545 + return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting)); 546 + } 547 + 548 + static ssize_t cuse_class_abort_store(struct device *dev, 549 + struct device_attribute *attr, 550 + const char *buf, size_t count) 551 + { 552 + struct cuse_conn *cc = dev_get_drvdata(dev); 553 + 554 + fuse_abort_conn(&cc->fc); 555 + return count; 556 + } 557 + 558 + static struct device_attribute cuse_class_dev_attrs[] = { 559 + __ATTR(waiting, S_IFREG | 0400, cuse_class_waiting_show, NULL), 560 + __ATTR(abort, S_IFREG | 0200, NULL, cuse_class_abort_store), 561 + { } 562 + }; 563 + 564 + static struct miscdevice cuse_miscdev = { 565 + .minor = MISC_DYNAMIC_MINOR, 566 + .name = "cuse", 567 + .fops = &cuse_channel_fops, 568 + }; 569 + 570 + static int __init cuse_init(void) 571 + { 572 + int i, rc; 573 + 574 + /* init conntbl */ 575 + for (i = 0; i < CUSE_CONNTBL_LEN; i++) 576 + INIT_LIST_HEAD(&cuse_conntbl[i]); 577 + 578 + /* inherit and extend fuse_dev_operations */ 579 + cuse_channel_fops = fuse_dev_operations; 580 + cuse_channel_fops.owner = THIS_MODULE; 581 + cuse_channel_fops.open = cuse_channel_open; 582 + cuse_channel_fops.release = cuse_channel_release; 583 + 584 + cuse_class = class_create(THIS_MODULE, "cuse"); 585 + if (IS_ERR(cuse_class)) 586 + return PTR_ERR(cuse_class); 587 + 588 + cuse_class->dev_attrs = cuse_class_dev_attrs; 589 + 590 + rc = misc_register(&cuse_miscdev); 591 + if (rc) { 592 + class_destroy(cuse_class); 593 + return rc; 594 + } 595 + 596 + return 0; 597 + } 598 + 599 + static void __exit cuse_exit(void) 600 + { 601 + misc_deregister(&cuse_miscdev); 602 + class_destroy(cuse_class); 603 + } 604 + 605 + module_init(cuse_init); 606 + module_exit(cuse_exit); 607 + 608 + MODULE_AUTHOR("Tejun Heo <tj@kernel.org>"); 609 + MODULE_DESCRIPTION("Character device in Userspace"); 610 + MODULE_LICENSE("GPL");
+12 -3
fs/fuse/dev.c
··· 46 46 fuse_request_init(req); 47 47 return req; 48 48 } 49 + EXPORT_SYMBOL_GPL(fuse_request_alloc); 49 50 50 51 struct fuse_req *fuse_request_alloc_nofs(void) 51 52 { ··· 125 124 atomic_dec(&fc->num_waiting); 126 125 return ERR_PTR(err); 127 126 } 127 + EXPORT_SYMBOL_GPL(fuse_get_req); 128 128 129 129 /* 130 130 * Return request in fuse_file->reserved_req. However that may ··· 210 208 fuse_request_free(req); 211 209 } 212 210 } 211 + EXPORT_SYMBOL_GPL(fuse_put_request); 213 212 214 213 static unsigned len_args(unsigned numargs, struct fuse_arg *args) 215 214 { ··· 285 282 wake_up_all(&fc->blocked_waitq); 286 283 } 287 284 if (fc->num_background == FUSE_CONGESTION_THRESHOLD && 288 - fc->connected) { 285 + fc->connected && fc->bdi_initialized) { 289 286 clear_bdi_congested(&fc->bdi, READ); 290 287 clear_bdi_congested(&fc->bdi, WRITE); 291 288 } ··· 403 400 } 404 401 spin_unlock(&fc->lock); 405 402 } 403 + EXPORT_SYMBOL_GPL(fuse_request_send); 406 404 407 405 static void fuse_request_send_nowait_locked(struct fuse_conn *fc, 408 406 struct fuse_req *req) ··· 412 408 fc->num_background++; 413 409 if (fc->num_background == FUSE_MAX_BACKGROUND) 414 410 fc->blocked = 1; 415 - if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { 411 + if (fc->num_background == FUSE_CONGESTION_THRESHOLD && 412 + fc->bdi_initialized) { 416 413 set_bdi_congested(&fc->bdi, READ); 417 414 set_bdi_congested(&fc->bdi, WRITE); 418 415 } ··· 444 439 req->isreply = 1; 445 440 fuse_request_send_nowait(fc, req); 446 441 } 442 + EXPORT_SYMBOL_GPL(fuse_request_send_background); 447 443 448 444 /* 449 445 * Called under fc->lock ··· 1111 1105 } 1112 1106 spin_unlock(&fc->lock); 1113 1107 } 1108 + EXPORT_SYMBOL_GPL(fuse_abort_conn); 1114 1109 1115 - static int fuse_dev_release(struct inode *inode, struct file *file) 1110 + int fuse_dev_release(struct inode *inode, struct file *file) 1116 1111 { 1117 1112 struct fuse_conn *fc = fuse_get_conn(file); 1118 1113 if (fc) { ··· 1127 1120 1128 1121 return 0; 1129 1122 } 1123 + EXPORT_SYMBOL_GPL(fuse_dev_release); 1130 1124 1131 1125 static int fuse_dev_fasync(int fd, struct file *file, int on) 1132 1126 { ··· 1150 1142 .release = fuse_dev_release, 1151 1143 .fasync = fuse_dev_fasync, 1152 1144 }; 1145 + EXPORT_SYMBOL_GPL(fuse_dev_operations); 1153 1146 1154 1147 static struct miscdevice fuse_miscdevice = { 1155 1148 .minor = FUSE_MINOR,
+12 -21
fs/fuse/dir.c
··· 362 362 } 363 363 364 364 /* 365 - * Synchronous release for the case when something goes wrong in CREATE_OPEN 366 - */ 367 - static void fuse_sync_release(struct fuse_conn *fc, struct fuse_file *ff, 368 - u64 nodeid, int flags) 369 - { 370 - fuse_release_fill(ff, nodeid, flags, FUSE_RELEASE); 371 - ff->reserved_req->force = 1; 372 - fuse_request_send(fc, ff->reserved_req); 373 - fuse_put_request(fc, ff->reserved_req); 374 - kfree(ff); 375 - } 376 - 377 - /* 378 365 * Atomic create+open operation 379 366 * 380 367 * If the filesystem doesn't support this, then fall back to separate ··· 432 445 goto out_free_ff; 433 446 434 447 fuse_put_request(fc, req); 448 + ff->fh = outopen.fh; 449 + ff->nodeid = outentry.nodeid; 450 + ff->open_flags = outopen.open_flags; 435 451 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, 436 452 &outentry.attr, entry_attr_timeout(&outentry), 0); 437 453 if (!inode) { 438 454 flags &= ~(O_CREAT | O_EXCL | O_TRUNC); 439 - ff->fh = outopen.fh; 440 - fuse_sync_release(fc, ff, outentry.nodeid, flags); 455 + fuse_sync_release(ff, flags); 441 456 fuse_send_forget(fc, forget_req, outentry.nodeid, 1); 442 457 return -ENOMEM; 443 458 } ··· 449 460 fuse_invalidate_attr(dir); 450 461 file = lookup_instantiate_filp(nd, entry, generic_file_open); 451 462 if (IS_ERR(file)) { 452 - ff->fh = outopen.fh; 453 - fuse_sync_release(fc, ff, outentry.nodeid, flags); 463 + fuse_sync_release(ff, flags); 454 464 return PTR_ERR(file); 455 465 } 456 - fuse_finish_open(inode, file, ff, &outopen); 466 + file->private_data = fuse_file_get(ff); 467 + fuse_finish_open(inode, file); 457 468 return 0; 458 469 459 470 out_free_ff: ··· 1024 1035 req->out.argpages = 1; 1025 1036 req->num_pages = 1; 1026 1037 req->pages[0] = page; 1027 - fuse_read_fill(req, file, inode, file->f_pos, PAGE_SIZE, FUSE_READDIR); 1038 + fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR); 1028 1039 fuse_request_send(fc, req); 1029 1040 nbytes = req->out.args[0].size; 1030 1041 err = req->out.h.error; ··· 1090 1101 1091 1102 static int fuse_dir_open(struct inode *inode, struct file *file) 1092 1103 { 1093 - return fuse_open_common(inode, file, 1); 1104 + return fuse_open_common(inode, file, true); 1094 1105 } 1095 1106 1096 1107 static int fuse_dir_release(struct inode *inode, struct file *file) 1097 1108 { 1098 - return fuse_release_common(inode, file, 1); 1109 + fuse_release_common(file, FUSE_RELEASEDIR); 1110 + 1111 + return 0; 1099 1112 } 1100 1113 1101 1114 static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync)
+206 -150
fs/fuse/file.c
··· 12 12 #include <linux/slab.h> 13 13 #include <linux/kernel.h> 14 14 #include <linux/sched.h> 15 + #include <linux/module.h> 15 16 16 17 static const struct file_operations fuse_direct_io_file_operations; 17 18 18 - static int fuse_send_open(struct inode *inode, struct file *file, int isdir, 19 - struct fuse_open_out *outargp) 19 + static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 20 + int opcode, struct fuse_open_out *outargp) 20 21 { 21 - struct fuse_conn *fc = get_fuse_conn(inode); 22 22 struct fuse_open_in inarg; 23 23 struct fuse_req *req; 24 24 int err; ··· 31 31 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 32 32 if (!fc->atomic_o_trunc) 33 33 inarg.flags &= ~O_TRUNC; 34 - req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 35 - req->in.h.nodeid = get_node_id(inode); 34 + req->in.h.opcode = opcode; 35 + req->in.h.nodeid = nodeid; 36 36 req->in.numargs = 1; 37 37 req->in.args[0].size = sizeof(inarg); 38 38 req->in.args[0].value = &inarg; ··· 49 49 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 50 50 { 51 51 struct fuse_file *ff; 52 + 52 53 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 53 - if (ff) { 54 - ff->reserved_req = fuse_request_alloc(); 55 - if (!ff->reserved_req) { 56 - kfree(ff); 57 - return NULL; 58 - } else { 59 - INIT_LIST_HEAD(&ff->write_entry); 60 - atomic_set(&ff->count, 0); 61 - spin_lock(&fc->lock); 62 - ff->kh = ++fc->khctr; 63 - spin_unlock(&fc->lock); 64 - } 65 - RB_CLEAR_NODE(&ff->polled_node); 66 - init_waitqueue_head(&ff->poll_wait); 54 + if (unlikely(!ff)) 55 + return NULL; 56 + 57 + ff->fc = fc; 58 + ff->reserved_req = fuse_request_alloc(); 59 + if (unlikely(!ff->reserved_req)) { 60 + kfree(ff); 61 + return NULL; 67 62 } 63 + 64 + INIT_LIST_HEAD(&ff->write_entry); 65 + atomic_set(&ff->count, 0); 66 + RB_CLEAR_NODE(&ff->polled_node); 67 + init_waitqueue_head(&ff->poll_wait); 68 + 69 + spin_lock(&fc->lock); 70 + ff->kh = ++fc->khctr; 71 + spin_unlock(&fc->lock); 72 + 68 73 return ff; 69 74 } 70 75 ··· 79 74 kfree(ff); 80 75 } 81 76 82 - static struct fuse_file *fuse_file_get(struct fuse_file *ff) 77 + struct fuse_file *fuse_file_get(struct fuse_file *ff) 83 78 { 84 79 atomic_inc(&ff->count); 85 80 return ff; ··· 87 82 88 83 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 89 84 { 90 - dput(req->misc.release.dentry); 91 - mntput(req->misc.release.vfsmount); 85 + path_put(&req->misc.release.path); 92 86 } 93 87 94 88 static void fuse_file_put(struct fuse_file *ff) 95 89 { 96 90 if (atomic_dec_and_test(&ff->count)) { 97 91 struct fuse_req *req = ff->reserved_req; 98 - struct inode *inode = req->misc.release.dentry->d_inode; 99 - struct fuse_conn *fc = get_fuse_conn(inode); 92 + 100 93 req->end = fuse_release_end; 101 - fuse_request_send_background(fc, req); 94 + fuse_request_send_background(ff->fc, req); 102 95 kfree(ff); 103 96 } 104 97 } 105 98 106 - void fuse_finish_open(struct inode *inode, struct file *file, 107 - struct fuse_file *ff, struct fuse_open_out *outarg) 99 + int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 100 + bool isdir) 108 101 { 109 - if (outarg->open_flags & FOPEN_DIRECT_IO) 110 - file->f_op = &fuse_direct_io_file_operations; 111 - if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) 112 - invalidate_inode_pages2(inode->i_mapping); 113 - if (outarg->open_flags & FOPEN_NONSEEKABLE) 114 - nonseekable_open(inode, file); 115 - ff->fh = outarg->fh; 116 - file->private_data = fuse_file_get(ff); 117 - } 118 - 119 - int fuse_open_common(struct inode *inode, struct file *file, int isdir) 120 - { 121 - struct fuse_conn *fc = get_fuse_conn(inode); 122 102 struct fuse_open_out outarg; 123 103 struct fuse_file *ff; 104 + int err; 105 + int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 106 + 107 + ff = fuse_file_alloc(fc); 108 + if (!ff) 109 + return -ENOMEM; 110 + 111 + err = fuse_send_open(fc, nodeid, file, opcode, &outarg); 112 + if (err) { 113 + fuse_file_free(ff); 114 + return err; 115 + } 116 + 117 + if (isdir) 118 + outarg.open_flags &= ~FOPEN_DIRECT_IO; 119 + 120 + ff->fh = outarg.fh; 121 + ff->nodeid = nodeid; 122 + ff->open_flags = outarg.open_flags; 123 + file->private_data = fuse_file_get(ff); 124 + 125 + return 0; 126 + } 127 + EXPORT_SYMBOL_GPL(fuse_do_open); 128 + 129 + void fuse_finish_open(struct inode *inode, struct file *file) 130 + { 131 + struct fuse_file *ff = file->private_data; 132 + 133 + if (ff->open_flags & FOPEN_DIRECT_IO) 134 + file->f_op = &fuse_direct_io_file_operations; 135 + if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 136 + invalidate_inode_pages2(inode->i_mapping); 137 + if (ff->open_flags & FOPEN_NONSEEKABLE) 138 + nonseekable_open(inode, file); 139 + } 140 + 141 + int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 142 + { 143 + struct fuse_conn *fc = get_fuse_conn(inode); 124 144 int err; 125 145 126 146 /* VFS checks this, but only _after_ ->open() */ ··· 156 126 if (err) 157 127 return err; 158 128 159 - ff = fuse_file_alloc(fc); 160 - if (!ff) 161 - return -ENOMEM; 162 - 163 - err = fuse_send_open(inode, file, isdir, &outarg); 129 + err = fuse_do_open(fc, get_node_id(inode), file, isdir); 164 130 if (err) 165 - fuse_file_free(ff); 166 - else { 167 - if (isdir) 168 - outarg.open_flags &= ~FOPEN_DIRECT_IO; 169 - fuse_finish_open(inode, file, ff, &outarg); 170 - } 131 + return err; 171 132 172 - return err; 133 + fuse_finish_open(inode, file); 134 + 135 + return 0; 173 136 } 174 137 175 - void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) 138 + static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) 176 139 { 140 + struct fuse_conn *fc = ff->fc; 177 141 struct fuse_req *req = ff->reserved_req; 178 142 struct fuse_release_in *inarg = &req->misc.release.in; 143 + 144 + spin_lock(&fc->lock); 145 + list_del(&ff->write_entry); 146 + if (!RB_EMPTY_NODE(&ff->polled_node)) 147 + rb_erase(&ff->polled_node, &fc->polled_files); 148 + spin_unlock(&fc->lock); 149 + 150 + wake_up_interruptible_sync(&ff->poll_wait); 179 151 180 152 inarg->fh = ff->fh; 181 153 inarg->flags = flags; 182 154 req->in.h.opcode = opcode; 183 - req->in.h.nodeid = nodeid; 155 + req->in.h.nodeid = ff->nodeid; 184 156 req->in.numargs = 1; 185 157 req->in.args[0].size = sizeof(struct fuse_release_in); 186 158 req->in.args[0].value = inarg; 187 159 } 188 160 189 - int fuse_release_common(struct inode *inode, struct file *file, int isdir) 161 + void fuse_release_common(struct file *file, int opcode) 190 162 { 191 - struct fuse_file *ff = file->private_data; 192 - if (ff) { 193 - struct fuse_conn *fc = get_fuse_conn(inode); 194 - struct fuse_req *req = ff->reserved_req; 163 + struct fuse_file *ff; 164 + struct fuse_req *req; 195 165 196 - fuse_release_fill(ff, get_node_id(inode), file->f_flags, 197 - isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); 166 + ff = file->private_data; 167 + if (unlikely(!ff)) 168 + return; 198 169 199 - /* Hold vfsmount and dentry until release is finished */ 200 - req->misc.release.vfsmount = mntget(file->f_path.mnt); 201 - req->misc.release.dentry = dget(file->f_path.dentry); 170 + req = ff->reserved_req; 171 + fuse_prepare_release(ff, file->f_flags, opcode); 202 172 203 - spin_lock(&fc->lock); 204 - list_del(&ff->write_entry); 205 - if (!RB_EMPTY_NODE(&ff->polled_node)) 206 - rb_erase(&ff->polled_node, &fc->polled_files); 207 - spin_unlock(&fc->lock); 173 + /* Hold vfsmount and dentry until release is finished */ 174 + path_get(&file->f_path); 175 + req->misc.release.path = file->f_path; 208 176 209 - wake_up_interruptible_sync(&ff->poll_wait); 210 - /* 211 - * Normally this will send the RELEASE request, 212 - * however if some asynchronous READ or WRITE requests 213 - * are outstanding, the sending will be delayed 214 - */ 215 - fuse_file_put(ff); 216 - } 217 - 218 - /* Return value is ignored by VFS */ 219 - return 0; 177 + /* 178 + * Normally this will send the RELEASE request, however if 179 + * some asynchronous READ or WRITE requests are outstanding, 180 + * the sending will be delayed. 181 + */ 182 + fuse_file_put(ff); 220 183 } 221 184 222 185 static int fuse_open(struct inode *inode, struct file *file) 223 186 { 224 - return fuse_open_common(inode, file, 0); 187 + return fuse_open_common(inode, file, false); 225 188 } 226 189 227 190 static int fuse_release(struct inode *inode, struct file *file) 228 191 { 229 - return fuse_release_common(inode, file, 0); 192 + fuse_release_common(file, FUSE_RELEASE); 193 + 194 + /* return value is ignored by VFS */ 195 + return 0; 230 196 } 197 + 198 + void fuse_sync_release(struct fuse_file *ff, int flags) 199 + { 200 + WARN_ON(atomic_read(&ff->count) > 1); 201 + fuse_prepare_release(ff, flags, FUSE_RELEASE); 202 + ff->reserved_req->force = 1; 203 + fuse_request_send(ff->fc, ff->reserved_req); 204 + fuse_put_request(ff->fc, ff->reserved_req); 205 + kfree(ff); 206 + } 207 + EXPORT_SYMBOL_GPL(fuse_sync_release); 231 208 232 209 /* 233 210 * Scramble the ID space with XTEA, so that the value of the files_struct ··· 408 371 return fuse_fsync_common(file, de, datasync, 0); 409 372 } 410 373 411 - void fuse_read_fill(struct fuse_req *req, struct file *file, 412 - struct inode *inode, loff_t pos, size_t count, int opcode) 374 + void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, 375 + size_t count, int opcode) 413 376 { 414 377 struct fuse_read_in *inarg = &req->misc.read.in; 415 378 struct fuse_file *ff = file->private_data; ··· 419 382 inarg->size = count; 420 383 inarg->flags = file->f_flags; 421 384 req->in.h.opcode = opcode; 422 - req->in.h.nodeid = get_node_id(inode); 385 + req->in.h.nodeid = ff->nodeid; 423 386 req->in.numargs = 1; 424 387 req->in.args[0].size = sizeof(struct fuse_read_in); 425 388 req->in.args[0].value = inarg; ··· 429 392 } 430 393 431 394 static size_t fuse_send_read(struct fuse_req *req, struct file *file, 432 - struct inode *inode, loff_t pos, size_t count, 433 - fl_owner_t owner) 395 + loff_t pos, size_t count, fl_owner_t owner) 434 396 { 435 - struct fuse_conn *fc = get_fuse_conn(inode); 397 + struct fuse_file *ff = file->private_data; 398 + struct fuse_conn *fc = ff->fc; 436 399 437 - fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 400 + fuse_read_fill(req, file, pos, count, FUSE_READ); 438 401 if (owner != NULL) { 439 402 struct fuse_read_in *inarg = &req->misc.read.in; 440 403 ··· 492 455 req->out.argpages = 1; 493 456 req->num_pages = 1; 494 457 req->pages[0] = page; 495 - num_read = fuse_send_read(req, file, inode, pos, count, NULL); 458 + num_read = fuse_send_read(req, file, pos, count, NULL); 496 459 err = req->out.h.error; 497 460 fuse_put_request(fc, req); 498 461 ··· 541 504 fuse_file_put(req->ff); 542 505 } 543 506 544 - static void fuse_send_readpages(struct fuse_req *req, struct file *file, 545 - struct inode *inode) 507 + static void fuse_send_readpages(struct fuse_req *req, struct file *file) 546 508 { 547 - struct fuse_conn *fc = get_fuse_conn(inode); 509 + struct fuse_file *ff = file->private_data; 510 + struct fuse_conn *fc = ff->fc; 548 511 loff_t pos = page_offset(req->pages[0]); 549 512 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 550 513 551 514 req->out.argpages = 1; 552 515 req->out.page_zeroing = 1; 553 - fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 516 + fuse_read_fill(req, file, pos, count, FUSE_READ); 554 517 req->misc.read.attr_ver = fuse_get_attr_version(fc); 555 518 if (fc->async_read) { 556 - struct fuse_file *ff = file->private_data; 557 519 req->ff = fuse_file_get(ff); 558 520 req->end = fuse_readpages_end; 559 521 fuse_request_send_background(fc, req); ··· 582 546 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 583 547 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 584 548 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 585 - fuse_send_readpages(req, data->file, inode); 549 + fuse_send_readpages(req, data->file); 586 550 data->req = req = fuse_get_req(fc); 587 551 if (IS_ERR(req)) { 588 552 unlock_page(page); ··· 616 580 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 617 581 if (!err) { 618 582 if (data.req->num_pages) 619 - fuse_send_readpages(data.req, file, inode); 583 + fuse_send_readpages(data.req, file); 620 584 else 621 585 fuse_put_request(fc, data.req); 622 586 } ··· 643 607 return generic_file_aio_read(iocb, iov, nr_segs, pos); 644 608 } 645 609 646 - static void fuse_write_fill(struct fuse_req *req, struct file *file, 647 - struct fuse_file *ff, struct inode *inode, 648 - loff_t pos, size_t count, int writepage) 610 + static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, 611 + loff_t pos, size_t count) 649 612 { 650 - struct fuse_conn *fc = get_fuse_conn(inode); 651 613 struct fuse_write_in *inarg = &req->misc.write.in; 652 614 struct fuse_write_out *outarg = &req->misc.write.out; 653 615 654 - memset(inarg, 0, sizeof(struct fuse_write_in)); 655 616 inarg->fh = ff->fh; 656 617 inarg->offset = pos; 657 618 inarg->size = count; 658 - inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0; 659 - inarg->flags = file ? file->f_flags : 0; 660 619 req->in.h.opcode = FUSE_WRITE; 661 - req->in.h.nodeid = get_node_id(inode); 620 + req->in.h.nodeid = ff->nodeid; 662 621 req->in.numargs = 2; 663 - if (fc->minor < 9) 622 + if (ff->fc->minor < 9) 664 623 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 665 624 else 666 625 req->in.args[0].size = sizeof(struct fuse_write_in); ··· 667 636 } 668 637 669 638 static size_t fuse_send_write(struct fuse_req *req, struct file *file, 670 - struct inode *inode, loff_t pos, size_t count, 671 - fl_owner_t owner) 639 + loff_t pos, size_t count, fl_owner_t owner) 672 640 { 673 - struct fuse_conn *fc = get_fuse_conn(inode); 674 - fuse_write_fill(req, file, file->private_data, inode, pos, count, 0); 641 + struct fuse_file *ff = file->private_data; 642 + struct fuse_conn *fc = ff->fc; 643 + struct fuse_write_in *inarg = &req->misc.write.in; 644 + 645 + fuse_write_fill(req, ff, pos, count); 646 + inarg->flags = file->f_flags; 675 647 if (owner != NULL) { 676 - struct fuse_write_in *inarg = &req->misc.write.in; 677 648 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 678 649 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 679 650 } ··· 733 700 req->num_pages = 1; 734 701 req->pages[0] = page; 735 702 req->page_offset = offset; 736 - nres = fuse_send_write(req, file, inode, pos, count, NULL); 703 + nres = fuse_send_write(req, file, pos, count, NULL); 737 704 err = req->out.h.error; 738 705 fuse_put_request(fc, req); 739 706 if (!err && !nres) ··· 774 741 for (i = 0; i < req->num_pages; i++) 775 742 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 776 743 777 - res = fuse_send_write(req, file, inode, pos, count, NULL); 744 + res = fuse_send_write(req, file, pos, count, NULL); 778 745 779 746 offset = req->page_offset; 780 747 count = res; ··· 1012 979 return 0; 1013 980 } 1014 981 1015 - static ssize_t fuse_direct_io(struct file *file, const char __user *buf, 1016 - size_t count, loff_t *ppos, int write) 982 + ssize_t fuse_direct_io(struct file *file, const char __user *buf, 983 + size_t count, loff_t *ppos, int write) 1017 984 { 1018 - struct inode *inode = file->f_path.dentry->d_inode; 1019 - struct fuse_conn *fc = get_fuse_conn(inode); 985 + struct fuse_file *ff = file->private_data; 986 + struct fuse_conn *fc = ff->fc; 1020 987 size_t nmax = write ? fc->max_write : fc->max_read; 1021 988 loff_t pos = *ppos; 1022 989 ssize_t res = 0; 1023 990 struct fuse_req *req; 1024 - 1025 - if (is_bad_inode(inode)) 1026 - return -EIO; 1027 991 1028 992 req = fuse_get_req(fc); 1029 993 if (IS_ERR(req)) ··· 1028 998 1029 999 while (count) { 1030 1000 size_t nres; 1001 + fl_owner_t owner = current->files; 1031 1002 size_t nbytes = min(count, nmax); 1032 1003 int err = fuse_get_user_pages(req, buf, &nbytes, write); 1033 1004 if (err) { ··· 1037 1006 } 1038 1007 1039 1008 if (write) 1040 - nres = fuse_send_write(req, file, inode, pos, nbytes, 1041 - current->files); 1009 + nres = fuse_send_write(req, file, pos, nbytes, owner); 1042 1010 else 1043 - nres = fuse_send_read(req, file, inode, pos, nbytes, 1044 - current->files); 1011 + nres = fuse_send_read(req, file, pos, nbytes, owner); 1012 + 1045 1013 fuse_release_user_pages(req, !write); 1046 1014 if (req->out.h.error) { 1047 1015 if (!res) ··· 1064 1034 } 1065 1035 } 1066 1036 fuse_put_request(fc, req); 1067 - if (res > 0) { 1068 - if (write) 1069 - fuse_write_update_size(inode, pos); 1037 + if (res > 0) 1070 1038 *ppos = pos; 1071 - } 1072 - fuse_invalidate_attr(inode); 1073 1039 1074 1040 return res; 1075 1041 } 1042 + EXPORT_SYMBOL_GPL(fuse_direct_io); 1076 1043 1077 1044 static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1078 1045 size_t count, loff_t *ppos) 1079 1046 { 1080 - return fuse_direct_io(file, buf, count, ppos, 0); 1047 + ssize_t res; 1048 + struct inode *inode = file->f_path.dentry->d_inode; 1049 + 1050 + if (is_bad_inode(inode)) 1051 + return -EIO; 1052 + 1053 + res = fuse_direct_io(file, buf, count, ppos, 0); 1054 + 1055 + fuse_invalidate_attr(inode); 1056 + 1057 + return res; 1081 1058 } 1082 1059 1083 1060 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, ··· 1092 1055 { 1093 1056 struct inode *inode = file->f_path.dentry->d_inode; 1094 1057 ssize_t res; 1058 + 1059 + if (is_bad_inode(inode)) 1060 + return -EIO; 1061 + 1095 1062 /* Don't allow parallel writes to the same file */ 1096 1063 mutex_lock(&inode->i_mutex); 1097 1064 res = generic_write_checks(file, ppos, &count, 0); 1098 - if (!res) 1065 + if (!res) { 1099 1066 res = fuse_direct_io(file, buf, count, ppos, 1); 1067 + if (res > 0) 1068 + fuse_write_update_size(inode, *ppos); 1069 + } 1100 1070 mutex_unlock(&inode->i_mutex); 1071 + 1072 + fuse_invalidate_attr(inode); 1073 + 1101 1074 return res; 1102 1075 } 1103 1076 ··· 1224 1177 req->ff = fuse_file_get(ff); 1225 1178 spin_unlock(&fc->lock); 1226 1179 1227 - fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1); 1180 + fuse_write_fill(req, ff, page_offset(page), 0); 1228 1181 1229 1182 copy_highpage(tmp_page, page); 1183 + req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1230 1184 req->in.argpages = 1; 1231 1185 req->num_pages = 1; 1232 1186 req->pages[0] = tmp_page; ··· 1651 1603 * limits ioctl data transfers to well-formed ioctls and is the forced 1652 1604 * behavior for all FUSE servers. 1653 1605 */ 1654 - static long fuse_file_do_ioctl(struct file *file, unsigned int cmd, 1655 - unsigned long arg, unsigned int flags) 1606 + long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, 1607 + unsigned int flags) 1656 1608 { 1657 - struct inode *inode = file->f_dentry->d_inode; 1658 1609 struct fuse_file *ff = file->private_data; 1659 - struct fuse_conn *fc = get_fuse_conn(inode); 1610 + struct fuse_conn *fc = ff->fc; 1660 1611 struct fuse_ioctl_in inarg = { 1661 1612 .fh = ff->fh, 1662 1613 .cmd = cmd, ··· 1673 1626 1674 1627 /* assume all the iovs returned by client always fits in a page */ 1675 1628 BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 1676 - 1677 - if (!fuse_allow_task(fc, current)) 1678 - return -EACCES; 1679 - 1680 - err = -EIO; 1681 - if (is_bad_inode(inode)) 1682 - goto out; 1683 1629 1684 1630 err = -ENOMEM; 1685 1631 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); ··· 1734 1694 1735 1695 /* okay, let's send it to the client */ 1736 1696 req->in.h.opcode = FUSE_IOCTL; 1737 - req->in.h.nodeid = get_node_id(inode); 1697 + req->in.h.nodeid = ff->nodeid; 1738 1698 req->in.numargs = 1; 1739 1699 req->in.args[0].size = sizeof(inarg); 1740 1700 req->in.args[0].value = &inarg; ··· 1817 1777 1818 1778 return err ? err : outarg.result; 1819 1779 } 1780 + EXPORT_SYMBOL_GPL(fuse_do_ioctl); 1781 + 1782 + static long fuse_file_ioctl_common(struct file *file, unsigned int cmd, 1783 + unsigned long arg, unsigned int flags) 1784 + { 1785 + struct inode *inode = file->f_dentry->d_inode; 1786 + struct fuse_conn *fc = get_fuse_conn(inode); 1787 + 1788 + if (!fuse_allow_task(fc, current)) 1789 + return -EACCES; 1790 + 1791 + if (is_bad_inode(inode)) 1792 + return -EIO; 1793 + 1794 + return fuse_do_ioctl(file, cmd, arg, flags); 1795 + } 1820 1796 1821 1797 static long fuse_file_ioctl(struct file *file, unsigned int cmd, 1822 1798 unsigned long arg) 1823 1799 { 1824 - return fuse_file_do_ioctl(file, cmd, arg, 0); 1800 + return fuse_file_ioctl_common(file, cmd, arg, 0); 1825 1801 } 1826 1802 1827 1803 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 1828 1804 unsigned long arg) 1829 1805 { 1830 - return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT); 1806 + return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); 1831 1807 } 1832 1808 1833 1809 /* ··· 1897 1841 spin_unlock(&fc->lock); 1898 1842 } 1899 1843 1900 - static unsigned fuse_file_poll(struct file *file, poll_table *wait) 1844 + unsigned fuse_file_poll(struct file *file, poll_table *wait) 1901 1845 { 1902 - struct inode *inode = file->f_dentry->d_inode; 1903 1846 struct fuse_file *ff = file->private_data; 1904 - struct fuse_conn *fc = get_fuse_conn(inode); 1847 + struct fuse_conn *fc = ff->fc; 1905 1848 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 1906 1849 struct fuse_poll_out outarg; 1907 1850 struct fuse_req *req; ··· 1925 1870 return PTR_ERR(req); 1926 1871 1927 1872 req->in.h.opcode = FUSE_POLL; 1928 - req->in.h.nodeid = get_node_id(inode); 1873 + req->in.h.nodeid = ff->nodeid; 1929 1874 req->in.numargs = 1; 1930 1875 req->in.args[0].size = sizeof(inarg); 1931 1876 req->in.args[0].value = &inarg; ··· 1944 1889 } 1945 1890 return POLLERR; 1946 1891 } 1892 + EXPORT_SYMBOL_GPL(fuse_file_poll); 1947 1893 1948 1894 /* 1949 1895 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
+35 -12
fs/fuse/fuse_i.h
··· 97 97 struct list_head writepages; 98 98 }; 99 99 100 + struct fuse_conn; 101 + 100 102 /** FUSE specific file data */ 101 103 struct fuse_file { 104 + /** Fuse connection for this file */ 105 + struct fuse_conn *fc; 106 + 102 107 /** Request reserved for flush and release */ 103 108 struct fuse_req *reserved_req; 104 109 ··· 113 108 /** File handle used by userspace */ 114 109 u64 fh; 115 110 111 + /** Node id of this file */ 112 + u64 nodeid; 113 + 116 114 /** Refcount */ 117 115 atomic_t count; 116 + 117 + /** FOPEN_* flags returned by open */ 118 + u32 open_flags; 118 119 119 120 /** Entry on inode's write_files list */ 120 121 struct list_head write_entry; ··· 196 185 FUSE_REQ_FINISHED 197 186 }; 198 187 199 - struct fuse_conn; 200 - 201 188 /** 202 189 * A request to the client 203 190 */ ··· 257 248 struct fuse_forget_in forget_in; 258 249 struct { 259 250 struct fuse_release_in in; 260 - struct vfsmount *vfsmount; 261 - struct dentry *dentry; 251 + struct path path; 262 252 } release; 263 253 struct fuse_init_in init_in; 264 254 struct fuse_init_out init_out; 255 + struct cuse_init_in cuse_init_in; 256 + struct cuse_init_out cuse_init_out; 265 257 struct { 266 258 struct fuse_read_in in; 267 259 u64 attr_ver; ··· 396 386 /** Filesystem supports NFS exporting. Only set in INIT */ 397 387 unsigned export_support:1; 398 388 389 + /** Set if bdi is valid */ 390 + unsigned bdi_initialized:1; 391 + 399 392 /* 400 393 * The following bitfields are only for optimization purposes 401 394 * and hence races in setting them will not cause malfunction ··· 528 515 * Initialize READ or READDIR request 529 516 */ 530 517 void fuse_read_fill(struct fuse_req *req, struct file *file, 531 - struct inode *inode, loff_t pos, size_t count, int opcode); 518 + loff_t pos, size_t count, int opcode); 532 519 533 520 /** 534 521 * Send OPEN or OPENDIR request 535 522 */ 536 - int fuse_open_common(struct inode *inode, struct file *file, int isdir); 523 + int fuse_open_common(struct inode *inode, struct file *file, bool isdir); 537 524 538 525 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc); 526 + struct fuse_file *fuse_file_get(struct fuse_file *ff); 539 527 void fuse_file_free(struct fuse_file *ff); 540 - void fuse_finish_open(struct inode *inode, struct file *file, 541 - struct fuse_file *ff, struct fuse_open_out *outarg); 528 + void fuse_finish_open(struct inode *inode, struct file *file); 542 529 543 - /** Fill in ff->reserved_req with a RELEASE request */ 544 - void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode); 530 + void fuse_sync_release(struct fuse_file *ff, int flags); 545 531 546 532 /** 547 533 * Send RELEASE or RELEASEDIR request 548 534 */ 549 - int fuse_release_common(struct inode *inode, struct file *file, int isdir); 535 + void fuse_release_common(struct file *file, int opcode); 550 536 551 537 /** 552 538 * Send FSYNC or FSYNCDIR request ··· 664 652 */ 665 653 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc); 666 654 655 + void fuse_conn_kill(struct fuse_conn *fc); 656 + 667 657 /** 668 658 * Initialize fuse_conn 669 659 */ 670 - int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb); 660 + void fuse_conn_init(struct fuse_conn *fc); 671 661 672 662 /** 673 663 * Release reference to fuse_conn ··· 707 693 void fuse_release_nowrite(struct inode *inode); 708 694 709 695 u64 fuse_get_attr_version(struct fuse_conn *fc); 696 + 697 + int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 698 + bool isdir); 699 + ssize_t fuse_direct_io(struct file *file, const char __user *buf, 700 + size_t count, loff_t *ppos, int write); 701 + long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, 702 + unsigned int flags); 703 + unsigned fuse_file_poll(struct file *file, poll_table *wait); 704 + int fuse_dev_release(struct inode *inode, struct file *file); 710 705 711 706 #endif /* _FS_FUSE_I_H */
+68 -50
fs/fuse/inode.c
··· 277 277 } 278 278 } 279 279 280 - static void fuse_put_super(struct super_block *sb) 280 + static void fuse_bdi_destroy(struct fuse_conn *fc) 281 281 { 282 - struct fuse_conn *fc = get_fuse_conn_super(sb); 282 + if (fc->bdi_initialized) 283 + bdi_destroy(&fc->bdi); 284 + } 283 285 284 - fuse_send_destroy(fc); 286 + void fuse_conn_kill(struct fuse_conn *fc) 287 + { 285 288 spin_lock(&fc->lock); 286 289 fc->connected = 0; 287 290 fc->blocked = 0; ··· 298 295 list_del(&fc->entry); 299 296 fuse_ctl_remove_conn(fc); 300 297 mutex_unlock(&fuse_mutex); 301 - bdi_destroy(&fc->bdi); 298 + fuse_bdi_destroy(fc); 299 + } 300 + EXPORT_SYMBOL_GPL(fuse_conn_kill); 301 + 302 + static void fuse_put_super(struct super_block *sb) 303 + { 304 + struct fuse_conn *fc = get_fuse_conn_super(sb); 305 + 306 + fuse_send_destroy(fc); 307 + fuse_conn_kill(fc); 302 308 fuse_conn_put(fc); 303 309 } 304 310 ··· 478 466 return 0; 479 467 } 480 468 481 - int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb) 469 + void fuse_conn_init(struct fuse_conn *fc) 482 470 { 483 - int err; 484 - 485 471 memset(fc, 0, sizeof(*fc)); 486 472 spin_lock_init(&fc->lock); 487 473 mutex_init(&fc->inst_mutex); ··· 494 484 INIT_LIST_HEAD(&fc->bg_queue); 495 485 INIT_LIST_HEAD(&fc->entry); 496 486 atomic_set(&fc->num_waiting, 0); 497 - fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 498 - fc->bdi.unplug_io_fn = default_unplug_io_fn; 499 - /* fuse does it's own writeback accounting */ 500 - fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; 501 487 fc->khctr = 0; 502 488 fc->polled_files = RB_ROOT; 503 - fc->dev = sb->s_dev; 504 - err = bdi_init(&fc->bdi); 505 - if (err) 506 - goto error_mutex_destroy; 507 - if (sb->s_bdev) { 508 - err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", 509 - MAJOR(fc->dev), MINOR(fc->dev)); 510 - } else { 511 - err = bdi_register_dev(&fc->bdi, fc->dev); 512 - } 513 - if (err) 514 - goto error_bdi_destroy; 515 - /* 516 - * For a single fuse filesystem use max 1% of dirty + 517 - * writeback threshold. 518 - * 519 - * This gives about 1M of write buffer for memory maps on a 520 - * machine with 1G and 10% dirty_ratio, which should be more 521 - * than enough. 522 - * 523 - * Privileged users can raise it by writing to 524 - * 525 - * /sys/class/bdi/<bdi>/max_ratio 526 - */ 527 - bdi_set_max_ratio(&fc->bdi, 1); 528 489 fc->reqctr = 0; 529 490 fc->blocked = 1; 530 491 fc->attr_version = 1; 531 492 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 532 - 533 - return 0; 534 - 535 - error_bdi_destroy: 536 - bdi_destroy(&fc->bdi); 537 - error_mutex_destroy: 538 - mutex_destroy(&fc->inst_mutex); 539 - return err; 540 493 } 541 494 EXPORT_SYMBOL_GPL(fuse_conn_init); 542 495 ··· 512 539 fc->release(fc); 513 540 } 514 541 } 542 + EXPORT_SYMBOL_GPL(fuse_conn_put); 515 543 516 544 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) 517 545 { 518 546 atomic_inc(&fc->count); 519 547 return fc; 520 548 } 549 + EXPORT_SYMBOL_GPL(fuse_conn_get); 521 550 522 551 static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) 523 552 { ··· 772 797 kfree(fc); 773 798 } 774 799 800 + static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) 801 + { 802 + int err; 803 + 804 + fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 805 + fc->bdi.unplug_io_fn = default_unplug_io_fn; 806 + /* fuse does it's own writeback accounting */ 807 + fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; 808 + 809 + err = bdi_init(&fc->bdi); 810 + if (err) 811 + return err; 812 + 813 + fc->bdi_initialized = 1; 814 + 815 + if (sb->s_bdev) { 816 + err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", 817 + MAJOR(fc->dev), MINOR(fc->dev)); 818 + } else { 819 + err = bdi_register_dev(&fc->bdi, fc->dev); 820 + } 821 + 822 + if (err) 823 + return err; 824 + 825 + /* 826 + * For a single fuse filesystem use max 1% of dirty + 827 + * writeback threshold. 828 + * 829 + * This gives about 1M of write buffer for memory maps on a 830 + * machine with 1G and 10% dirty_ratio, which should be more 831 + * than enough. 832 + * 833 + * Privileged users can raise it by writing to 834 + * 835 + * /sys/class/bdi/<bdi>/max_ratio 836 + */ 837 + bdi_set_max_ratio(&fc->bdi, 1); 838 + 839 + return 0; 840 + } 841 + 775 842 static int fuse_fill_super(struct super_block *sb, void *data, int silent) 776 843 { 777 844 struct fuse_conn *fc; ··· 860 843 if (!fc) 861 844 goto err_fput; 862 845 863 - err = fuse_conn_init(fc, sb); 864 - if (err) { 865 - kfree(fc); 866 - goto err_fput; 867 - } 846 + fuse_conn_init(fc); 847 + 848 + fc->dev = sb->s_dev; 849 + err = fuse_bdi_init(fc, sb); 850 + if (err) 851 + goto err_put_conn; 868 852 869 853 fc->release = fuse_free_conn; 870 854 fc->flags = d.flags; ··· 929 911 err_put_root: 930 912 dput(root_dentry); 931 913 err_put_conn: 932 - bdi_destroy(&fc->bdi); 914 + fuse_bdi_destroy(fc); 933 915 fuse_conn_put(fc); 934 916 err_fput: 935 917 fput(file);
+31
include/linux/fuse.h
··· 121 121 #define FUSE_BIG_WRITES (1 << 5) 122 122 123 123 /** 124 + * CUSE INIT request/reply flags 125 + * 126 + * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl 127 + */ 128 + #define CUSE_UNRESTRICTED_IOCTL (1 << 0) 129 + 130 + /** 124 131 * Release flags 125 132 */ 126 133 #define FUSE_RELEASE_FLUSH (1 << 0) ··· 217 210 FUSE_DESTROY = 38, 218 211 FUSE_IOCTL = 39, 219 212 FUSE_POLL = 40, 213 + 214 + /* CUSE specific operations */ 215 + CUSE_INIT = 4096, 220 216 }; 221 217 222 218 enum fuse_notify_code { ··· 409 399 __u32 flags; 410 400 __u32 unused; 411 401 __u32 max_write; 402 + }; 403 + 404 + #define CUSE_INIT_INFO_MAX 4096 405 + 406 + struct cuse_init_in { 407 + __u32 major; 408 + __u32 minor; 409 + __u32 unused; 410 + __u32 flags; 411 + }; 412 + 413 + struct cuse_init_out { 414 + __u32 major; 415 + __u32 minor; 416 + __u32 unused; 417 + __u32 flags; 418 + __u32 max_read; 419 + __u32 max_write; 420 + __u32 dev_major; /* chardev major */ 421 + __u32 dev_minor; /* chardev minor */ 422 + __u32 spare[10]; 412 423 }; 413 424 414 425 struct fuse_interrupt_in {