Merge tag 'fuse-update-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse

Pull fuse updates from Miklos Szeredi:

- Allow connection to server to time out (Joanne Koong)

- If server doesn't support creating a hard link, return EPERM rather
than ENOSYS (Matt Johnston)

- Allow file names longer than 1024 chars (Bernd Schubert)

- Fix a possible race if request on io_uring queue is interrupted
(Bernd Schubert)

- Misc fixes and cleanups

* tag 'fuse-update-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse:
fuse: remove unneeded atomic set in uring creation
fuse: fix uring race condition for null dereference of fc
fuse: Increase FUSE_NAME_MAX to PATH_MAX
fuse: Allocate only namelen buf memory in fuse_notify_
fuse: add default_request_timeout and max_request_timeout sysctls
fuse: add kernel-enforced timeout option for requests
fuse: optmize missing FUSE_LINK support
fuse: Return EPERM rather than ENOSYS from link()
fuse: removed unused function fuse_uring_create() from header
fuse: {io-uring} Fix a possible req cancellation race

+356 -41
+25
Documentation/admin-guide/sysctl/fs.rst
··· 347 ``/proc/sys/fs/fuse/max_pages_limit`` is a read/write file for 348 setting/getting the maximum number of pages that can be used for servicing 349 requests in FUSE.
··· 347 ``/proc/sys/fs/fuse/max_pages_limit`` is a read/write file for 348 setting/getting the maximum number of pages that can be used for servicing 349 requests in FUSE. 350 + 351 + ``/proc/sys/fs/fuse/default_request_timeout`` is a read/write file for 352 + setting/getting the default timeout (in seconds) for a fuse server to 353 + reply to a kernel-issued request in the event where the server did not 354 + specify a timeout at mount. If the server set a timeout, 355 + then default_request_timeout will be ignored. The default 356 + "default_request_timeout" is set to 0. 0 indicates no default timeout. 357 + The maximum value that can be set is 65535. 358 + 359 + ``/proc/sys/fs/fuse/max_request_timeout`` is a read/write file for 360 + setting/getting the maximum timeout (in seconds) for a fuse server to 361 + reply to a kernel-issued request. A value greater than 0 automatically opts 362 + the server into a timeout that will be set to at most "max_request_timeout", 363 + even if the server did not specify a timeout and default_request_timeout is 364 + set to 0. If max_request_timeout is greater than 0 and the server set a timeout 365 + greater than max_request_timeout or default_request_timeout is set to a value 366 + greater than max_request_timeout, the system will use max_request_timeout as the 367 + timeout. 0 indicates no max request timeout. The maximum value that can be set 368 + is 65535. 369 + 370 + For timeouts, if the server does not respond to the request by the time 371 + the set timeout elapses, then the connection to the fuse server will be aborted. 372 + Please note that the timeouts are not 100% precise (eg you may set 60 seconds but 373 + the timeout may kick in after 70 seconds). The upper margin of error for the 374 + timeout is roughly FUSE_TIMEOUT_TIMER_FREQ seconds.
+139 -23
fs/fuse/dev.c
··· 32 33 static struct kmem_cache *fuse_req_cachep; 34 35 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) 36 { 37 INIT_LIST_HEAD(&req->list); ··· 134 refcount_set(&req->count, 1); 135 __set_bit(FR_PENDING, &req->flags); 136 req->fm = fm; 137 } 138 139 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags) ··· 502 return 0; 503 } 504 505 static void request_wait_answer(struct fuse_req *req) 506 { 507 struct fuse_conn *fc = req->fm->fc; ··· 541 } 542 543 if (!test_bit(FR_FORCE, &req->flags)) { 544 /* Only fatal signals may interrupt this */ 545 err = wait_event_killable(req->waitq, 546 test_bit(FR_FINISHED, &req->flags)); 547 if (!err) 548 return; 549 550 - spin_lock(&fiq->lock); 551 - /* Request is not yet in userspace, bail out */ 552 - if (test_bit(FR_PENDING, &req->flags)) { 553 - list_del(&req->list); 554 - spin_unlock(&fiq->lock); 555 - __fuse_put_request(req); 556 - req->out.h.error = -EINTR; 557 return; 558 - } 559 - spin_unlock(&fiq->lock); 560 } 561 562 /* ··· 1644 struct fuse_copy_state *cs) 1645 { 1646 struct fuse_notify_inval_entry_out outarg; 1647 - int err = -ENOMEM; 1648 - char *buf; 1649 struct qstr name; 1650 - 1651 - buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); 1652 - if (!buf) 1653 - goto err; 1654 1655 err = -EINVAL; 1656 if (size < sizeof(outarg)) ··· 1657 goto err; 1658 1659 err = -ENAMETOOLONG; 1660 - if (outarg.namelen > FUSE_NAME_MAX) 1661 goto err; 1662 1663 err = -EINVAL; 1664 if (size != sizeof(outarg) + outarg.namelen + 1) 1665 goto err; 1666 1667 name.name = buf; ··· 1693 struct fuse_copy_state *cs) 1694 { 1695 struct fuse_notify_delete_out outarg; 1696 - int err = -ENOMEM; 1697 - char *buf; 1698 struct qstr name; 1699 - 1700 - buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); 1701 - if (!buf) 1702 - goto err; 1703 1704 err = -EINVAL; 1705 if (size < sizeof(outarg)) ··· 1706 goto err; 1707 1708 err = -ENAMETOOLONG; 1709 - if (outarg.namelen > FUSE_NAME_MAX) 1710 goto err; 1711 1712 err = -EINVAL; 1713 if (size != sizeof(outarg) + outarg.namelen + 1) 1714 goto err; 1715 1716 name.name = buf; ··· 2387 struct fuse_req *req, *next; 2388 LIST_HEAD(to_end); 2389 unsigned int i; 2390 2391 /* Background queuing checks fc->connected under bg_lock */ 2392 spin_lock(&fc->bg_lock);
··· 32 33 static struct kmem_cache *fuse_req_cachep; 34 35 + const unsigned long fuse_timeout_timer_freq = 36 + secs_to_jiffies(FUSE_TIMEOUT_TIMER_FREQ); 37 + 38 + bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list) 39 + { 40 + struct fuse_req *req; 41 + 42 + req = list_first_entry_or_null(list, struct fuse_req, list); 43 + if (!req) 44 + return false; 45 + return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout); 46 + } 47 + 48 + bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing) 49 + { 50 + int i; 51 + 52 + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 53 + if (fuse_request_expired(fc, &processing[i])) 54 + return true; 55 + 56 + return false; 57 + } 58 + 59 + /* 60 + * Check if any requests aren't being completed by the time the request timeout 61 + * elapses. To do so, we: 62 + * - check the fiq pending list 63 + * - check the bg queue 64 + * - check the fpq io and processing lists 65 + * 66 + * To make this fast, we only check against the head request on each list since 67 + * these are generally queued in order of creation time (eg newer requests get 68 + * queued to the tail). We might miss a few edge cases (eg requests transitioning 69 + * between lists, re-sent requests at the head of the pending list having a 70 + * later creation time than other requests on that list, etc.) but that is fine 71 + * since if the request never gets fulfilled, it will eventually be caught. 72 + */ 73 + void fuse_check_timeout(struct work_struct *work) 74 + { 75 + struct delayed_work *dwork = to_delayed_work(work); 76 + struct fuse_conn *fc = container_of(dwork, struct fuse_conn, 77 + timeout.work); 78 + struct fuse_iqueue *fiq = &fc->iq; 79 + struct fuse_dev *fud; 80 + struct fuse_pqueue *fpq; 81 + bool expired = false; 82 + 83 + if (!atomic_read(&fc->num_waiting)) 84 + goto out; 85 + 86 + spin_lock(&fiq->lock); 87 + expired = fuse_request_expired(fc, &fiq->pending); 88 + spin_unlock(&fiq->lock); 89 + if (expired) 90 + goto abort_conn; 91 + 92 + spin_lock(&fc->bg_lock); 93 + expired = fuse_request_expired(fc, &fc->bg_queue); 94 + spin_unlock(&fc->bg_lock); 95 + if (expired) 96 + goto abort_conn; 97 + 98 + spin_lock(&fc->lock); 99 + if (!fc->connected) { 100 + spin_unlock(&fc->lock); 101 + return; 102 + } 103 + list_for_each_entry(fud, &fc->devices, entry) { 104 + fpq = &fud->pq; 105 + spin_lock(&fpq->lock); 106 + if (fuse_request_expired(fc, &fpq->io) || 107 + fuse_fpq_processing_expired(fc, fpq->processing)) { 108 + spin_unlock(&fpq->lock); 109 + spin_unlock(&fc->lock); 110 + goto abort_conn; 111 + } 112 + 113 + spin_unlock(&fpq->lock); 114 + } 115 + spin_unlock(&fc->lock); 116 + 117 + if (fuse_uring_request_expired(fc)) 118 + goto abort_conn; 119 + 120 + out: 121 + queue_delayed_work(system_wq, &fc->timeout.work, 122 + fuse_timeout_timer_freq); 123 + return; 124 + 125 + abort_conn: 126 + fuse_abort_conn(fc); 127 + } 128 + 129 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) 130 { 131 INIT_LIST_HEAD(&req->list); ··· 40 refcount_set(&req->count, 1); 41 __set_bit(FR_PENDING, &req->flags); 42 req->fm = fm; 43 + req->create_time = jiffies; 44 } 45 46 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags) ··· 407 return 0; 408 } 409 410 + bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock) 411 + { 412 + spin_lock(lock); 413 + if (test_bit(FR_PENDING, &req->flags)) { 414 + /* 415 + * FR_PENDING does not get cleared as the request will end 416 + * up in destruction anyway. 417 + */ 418 + list_del(&req->list); 419 + spin_unlock(lock); 420 + __fuse_put_request(req); 421 + req->out.h.error = -EINTR; 422 + return true; 423 + } 424 + spin_unlock(lock); 425 + return false; 426 + } 427 + 428 static void request_wait_answer(struct fuse_req *req) 429 { 430 struct fuse_conn *fc = req->fm->fc; ··· 428 } 429 430 if (!test_bit(FR_FORCE, &req->flags)) { 431 + bool removed; 432 + 433 /* Only fatal signals may interrupt this */ 434 err = wait_event_killable(req->waitq, 435 test_bit(FR_FINISHED, &req->flags)); 436 if (!err) 437 return; 438 439 + if (test_bit(FR_URING, &req->flags)) 440 + removed = fuse_uring_remove_pending_req(req); 441 + else 442 + removed = fuse_remove_pending_req(req, &fiq->lock); 443 + if (removed) 444 return; 445 } 446 447 /* ··· 1533 struct fuse_copy_state *cs) 1534 { 1535 struct fuse_notify_inval_entry_out outarg; 1536 + int err; 1537 + char *buf = NULL; 1538 struct qstr name; 1539 1540 err = -EINVAL; 1541 if (size < sizeof(outarg)) ··· 1550 goto err; 1551 1552 err = -ENAMETOOLONG; 1553 + if (outarg.namelen > fc->name_max) 1554 goto err; 1555 1556 err = -EINVAL; 1557 if (size != sizeof(outarg) + outarg.namelen + 1) 1558 + goto err; 1559 + 1560 + err = -ENOMEM; 1561 + buf = kzalloc(outarg.namelen + 1, GFP_KERNEL); 1562 + if (!buf) 1563 goto err; 1564 1565 name.name = buf; ··· 1581 struct fuse_copy_state *cs) 1582 { 1583 struct fuse_notify_delete_out outarg; 1584 + int err; 1585 + char *buf = NULL; 1586 struct qstr name; 1587 1588 err = -EINVAL; 1589 if (size < sizeof(outarg)) ··· 1598 goto err; 1599 1600 err = -ENAMETOOLONG; 1601 + if (outarg.namelen > fc->name_max) 1602 goto err; 1603 1604 err = -EINVAL; 1605 if (size != sizeof(outarg) + outarg.namelen + 1) 1606 + goto err; 1607 + 1608 + err = -ENOMEM; 1609 + buf = kzalloc(outarg.namelen + 1, GFP_KERNEL); 1610 + if (!buf) 1611 goto err; 1612 1613 name.name = buf; ··· 2274 struct fuse_req *req, *next; 2275 LIST_HEAD(to_end); 2276 unsigned int i; 2277 + 2278 + if (fc->timeout.req_timeout) 2279 + cancel_delayed_work(&fc->timeout.work); 2280 2281 /* Background queuing checks fc->connected under bg_lock */ 2282 spin_lock(&fc->bg_lock);
+38 -5
fs/fuse/dev_uring.c
··· 140 } 141 } 142 143 void fuse_uring_destruct(struct fuse_conn *fc) 144 { 145 struct fuse_ring *ring = fc->ring; ··· 238 ring->nr_queues = nr_queues; 239 ring->fc = fc; 240 ring->max_payload_sz = max_payload_size; 241 - atomic_set(&ring->queue_refs, 0); 242 smp_store_release(&fc->ring, ring); 243 244 spin_unlock(&fc->lock); ··· 752 struct fuse_req *req) 753 { 754 struct fuse_ring_queue *queue = ent->queue; 755 - struct fuse_conn *fc = req->fm->fc; 756 - struct fuse_iqueue *fiq = &fc->iq; 757 758 lockdep_assert_held(&queue->lock); 759 ··· 761 ent->state); 762 } 763 764 - spin_lock(&fiq->lock); 765 clear_bit(FR_PENDING, &req->flags); 766 - spin_unlock(&fiq->lock); 767 ent->fuse_req = req; 768 ent->state = FRRS_FUSE_REQ; 769 list_move(&ent->list, &queue->ent_w_req_queue); ··· 1260 if (unlikely(queue->stopped)) 1261 goto err_unlock; 1262 1263 ent = list_first_entry_or_null(&queue->ent_avail_queue, 1264 struct fuse_ring_ent, list); 1265 if (ent) ··· 1300 return false; 1301 } 1302 1303 list_add_tail(&req->list, &queue->fuse_req_bg_queue); 1304 1305 ent = list_first_entry_or_null(&queue->ent_avail_queue, ··· 1330 } 1331 1332 return true; 1333 } 1334 1335 static const struct fuse_iqueue_ops fuse_io_uring_ops = {
··· 140 } 141 } 142 143 + bool fuse_uring_request_expired(struct fuse_conn *fc) 144 + { 145 + struct fuse_ring *ring = fc->ring; 146 + struct fuse_ring_queue *queue; 147 + int qid; 148 + 149 + if (!ring) 150 + return false; 151 + 152 + for (qid = 0; qid < ring->nr_queues; qid++) { 153 + queue = READ_ONCE(ring->queues[qid]); 154 + if (!queue) 155 + continue; 156 + 157 + spin_lock(&queue->lock); 158 + if (fuse_request_expired(fc, &queue->fuse_req_queue) || 159 + fuse_request_expired(fc, &queue->fuse_req_bg_queue) || 160 + fuse_fpq_processing_expired(fc, queue->fpq.processing)) { 161 + spin_unlock(&queue->lock); 162 + return true; 163 + } 164 + spin_unlock(&queue->lock); 165 + } 166 + 167 + return false; 168 + } 169 + 170 void fuse_uring_destruct(struct fuse_conn *fc) 171 { 172 struct fuse_ring *ring = fc->ring; ··· 211 ring->nr_queues = nr_queues; 212 ring->fc = fc; 213 ring->max_payload_sz = max_payload_size; 214 smp_store_release(&fc->ring, ring); 215 216 spin_unlock(&fc->lock); ··· 726 struct fuse_req *req) 727 { 728 struct fuse_ring_queue *queue = ent->queue; 729 730 lockdep_assert_held(&queue->lock); 731 ··· 737 ent->state); 738 } 739 740 clear_bit(FR_PENDING, &req->flags); 741 ent->fuse_req = req; 742 ent->state = FRRS_FUSE_REQ; 743 list_move(&ent->list, &queue->ent_w_req_queue); ··· 1238 if (unlikely(queue->stopped)) 1239 goto err_unlock; 1240 1241 + set_bit(FR_URING, &req->flags); 1242 + req->ring_queue = queue; 1243 ent = list_first_entry_or_null(&queue->ent_avail_queue, 1244 struct fuse_ring_ent, list); 1245 if (ent) ··· 1276 return false; 1277 } 1278 1279 + set_bit(FR_URING, &req->flags); 1280 + req->ring_queue = queue; 1281 list_add_tail(&req->list, &queue->fuse_req_bg_queue); 1282 1283 ent = list_first_entry_or_null(&queue->ent_avail_queue, ··· 1304 } 1305 1306 return true; 1307 + } 1308 + 1309 + bool fuse_uring_remove_pending_req(struct fuse_req *req) 1310 + { 1311 + struct fuse_ring_queue *queue = req->ring_queue; 1312 + 1313 + return fuse_remove_pending_req(req, &queue->lock); 1314 } 1315 1316 static const struct fuse_iqueue_ops fuse_io_uring_ops = {
+12 -6
fs/fuse/dev_uring_i.h
··· 142 int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags); 143 void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req); 144 bool fuse_uring_queue_bq_req(struct fuse_req *req); 145 146 static inline void fuse_uring_abort(struct fuse_conn *fc) 147 { ··· 174 175 #else /* CONFIG_FUSE_IO_URING */ 176 177 - struct fuse_ring; 178 - 179 - static inline void fuse_uring_create(struct fuse_conn *fc) 180 - { 181 - } 182 - 183 static inline void fuse_uring_destruct(struct fuse_conn *fc) 184 { 185 } ··· 192 } 193 194 static inline bool fuse_uring_ready(struct fuse_conn *fc) 195 { 196 return false; 197 }
··· 142 int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags); 143 void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req); 144 bool fuse_uring_queue_bq_req(struct fuse_req *req); 145 + bool fuse_uring_remove_pending_req(struct fuse_req *req); 146 + bool fuse_uring_request_expired(struct fuse_conn *fc); 147 148 static inline void fuse_uring_abort(struct fuse_conn *fc) 149 { ··· 172 173 #else /* CONFIG_FUSE_IO_URING */ 174 175 static inline void fuse_uring_destruct(struct fuse_conn *fc) 176 { 177 } ··· 196 } 197 198 static inline bool fuse_uring_ready(struct fuse_conn *fc) 199 + { 200 + return false; 201 + } 202 + 203 + static inline bool fuse_uring_remove_pending_req(struct fuse_req *req) 204 + { 205 + return false; 206 + } 207 + 208 + static inline bool fuse_uring_request_expired(struct fuse_conn *fc) 209 { 210 return false; 211 }
+10 -1
fs/fuse/dir.c
··· 370 371 *inode = NULL; 372 err = -ENAMETOOLONG; 373 - if (name->len > FUSE_NAME_MAX) 374 goto out; 375 376 ··· 1137 struct fuse_mount *fm = get_fuse_mount(inode); 1138 FUSE_ARGS(args); 1139 1140 memset(&inarg, 0, sizeof(inarg)); 1141 inarg.oldnodeid = get_node_id(inode); 1142 args.opcode = FUSE_LINK; ··· 1153 fuse_update_ctime_in_cache(inode); 1154 else if (err == -EINTR) 1155 fuse_invalidate_attr(inode); 1156 1157 return err; 1158 }
··· 370 371 *inode = NULL; 372 err = -ENAMETOOLONG; 373 + if (name->len > fm->fc->name_max) 374 goto out; 375 376 ··· 1137 struct fuse_mount *fm = get_fuse_mount(inode); 1138 FUSE_ARGS(args); 1139 1140 + if (fm->fc->no_link) 1141 + goto out; 1142 + 1143 memset(&inarg, 0, sizeof(inarg)); 1144 inarg.oldnodeid = get_node_id(inode); 1145 args.opcode = FUSE_LINK; ··· 1150 fuse_update_ctime_in_cache(inode); 1151 else if (err == -EINTR) 1152 fuse_invalidate_attr(inode); 1153 + 1154 + if (err == -ENOSYS) 1155 + fm->fc->no_link = 1; 1156 + out: 1157 + if (fm->fc->no_link) 1158 + return -EPERM; 1159 1160 return err; 1161 }
+4
fs/fuse/fuse_dev_i.h
··· 61 void fuse_dev_queue_forget(struct fuse_iqueue *fiq, 62 struct fuse_forget_link *forget); 63 void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req); 64 65 #endif 66
··· 61 void fuse_dev_queue_forget(struct fuse_iqueue *fiq, 62 struct fuse_forget_link *forget); 63 void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req); 64 + bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock); 65 + 66 + bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list); 67 + bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing); 68 69 #endif 70
+45 -2
fs/fuse/fuse_i.h
··· 38 /** Bias for fi->writectr, meaning new writepages must not be sent */ 39 #define FUSE_NOWRITE INT_MIN 40 41 - /** It could be as large as PATH_MAX, but would that have any uses? */ 42 - #define FUSE_NAME_MAX 1024 43 44 /** Number of dentries for each connection in the control filesystem */ 45 #define FUSE_CTL_NUM_DENTRIES 5 46 47 /** Maximum of max_pages received in init_out */ 48 extern unsigned int fuse_max_pages_limit; 49 50 /** List of active connections */ 51 extern struct list_head fuse_conn_list; ··· 398 * FR_FINISHED: request is finished 399 * FR_PRIVATE: request is on private list 400 * FR_ASYNC: request is asynchronous 401 */ 402 enum fuse_req_flag { 403 FR_ISREPLY, ··· 413 FR_FINISHED, 414 FR_PRIVATE, 415 FR_ASYNC, 416 }; 417 418 /** ··· 463 464 #ifdef CONFIG_FUSE_IO_URING 465 void *ring_entry; 466 #endif 467 }; 468 469 struct fuse_iqueue; ··· 892 /* Use pages instead of pointer for kernel I/O */ 893 unsigned int use_pages_for_kvec_io:1; 894 895 /* Use io_uring for communication */ 896 unsigned int io_uring; 897 ··· 927 928 /** Version counter for evict inode */ 929 atomic64_t evict_ctr; 930 931 /** Called on final put */ 932 void (*release)(struct fuse_conn *); ··· 966 /** uring connection information*/ 967 struct fuse_ring *ring; 968 #endif 969 }; 970 971 /* ··· 1255 /* Abort all requests */ 1256 void fuse_abort_conn(struct fuse_conn *fc); 1257 void fuse_wait_aborted(struct fuse_conn *fc); 1258 1259 /** 1260 * Invalidate inode attributes
··· 38 /** Bias for fi->writectr, meaning new writepages must not be sent */ 39 #define FUSE_NOWRITE INT_MIN 40 41 + /** Maximum length of a filename, not including terminating null */ 42 + 43 + /* maximum, small enough for FUSE_MIN_READ_BUFFER*/ 44 + #define FUSE_NAME_LOW_MAX 1024 45 + /* maximum, but needs a request buffer > FUSE_MIN_READ_BUFFER */ 46 + #define FUSE_NAME_MAX (PATH_MAX - 1) 47 48 /** Number of dentries for each connection in the control filesystem */ 49 #define FUSE_CTL_NUM_DENTRIES 5 50 51 + /* Frequency (in seconds) of request timeout checks, if opted into */ 52 + #define FUSE_TIMEOUT_TIMER_FREQ 15 53 + 54 + /** Frequency (in jiffies) of request timeout checks, if opted into */ 55 + extern const unsigned long fuse_timeout_timer_freq; 56 + 57 /** Maximum of max_pages received in init_out */ 58 extern unsigned int fuse_max_pages_limit; 59 + /* 60 + * Default timeout (in seconds) for the server to reply to a request 61 + * before the connection is aborted, if no timeout was specified on mount. 62 + */ 63 + extern unsigned int fuse_default_req_timeout; 64 + /* 65 + * Max timeout (in seconds) for the server to reply to a request before 66 + * the connection is aborted. 67 + */ 68 + extern unsigned int fuse_max_req_timeout; 69 70 /** List of active connections */ 71 extern struct list_head fuse_conn_list; ··· 378 * FR_FINISHED: request is finished 379 * FR_PRIVATE: request is on private list 380 * FR_ASYNC: request is asynchronous 381 + * FR_URING: request is handled through fuse-io-uring 382 */ 383 enum fuse_req_flag { 384 FR_ISREPLY, ··· 392 FR_FINISHED, 393 FR_PRIVATE, 394 FR_ASYNC, 395 + FR_URING, 396 }; 397 398 /** ··· 441 442 #ifdef CONFIG_FUSE_IO_URING 443 void *ring_entry; 444 + void *ring_queue; 445 #endif 446 + /** When (in jiffies) the request was created */ 447 + unsigned long create_time; 448 }; 449 450 struct fuse_iqueue; ··· 867 /* Use pages instead of pointer for kernel I/O */ 868 unsigned int use_pages_for_kvec_io:1; 869 870 + /* Is link not implemented by fs? */ 871 + unsigned int no_link:1; 872 + 873 /* Use io_uring for communication */ 874 unsigned int io_uring; 875 ··· 899 900 /** Version counter for evict inode */ 901 atomic64_t evict_ctr; 902 + 903 + /* maximum file name length */ 904 + u32 name_max; 905 906 /** Called on final put */ 907 void (*release)(struct fuse_conn *); ··· 935 /** uring connection information*/ 936 struct fuse_ring *ring; 937 #endif 938 + 939 + /** Only used if the connection opts into request timeouts */ 940 + struct { 941 + /* Worker for checking if any requests have timed out */ 942 + struct delayed_work work; 943 + 944 + /* Request timeout (in jiffies). 0 = no timeout */ 945 + unsigned int req_timeout; 946 + } timeout; 947 }; 948 949 /* ··· 1215 /* Abort all requests */ 1216 void fuse_abort_conn(struct fuse_conn *fc); 1217 void fuse_wait_aborted(struct fuse_conn *fc); 1218 + 1219 + /* Check if any requests timed out */ 1220 + void fuse_check_timeout(struct work_struct *work); 1221 1222 /** 1223 * Invalidate inode attributes
+50 -1
fs/fuse/inode.c
··· 37 static int set_global_limit(const char *val, const struct kernel_param *kp); 38 39 unsigned int fuse_max_pages_limit = 256; 40 41 unsigned max_user_bgreq; 42 module_param_call(max_user_bgreq, set_global_limit, param_get_uint, ··· 982 fc->user_ns = get_user_ns(user_ns); 983 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; 984 fc->max_pages_limit = fuse_max_pages_limit; 985 986 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) 987 fuse_backing_files_init(fc); ··· 1012 1013 if (IS_ENABLED(CONFIG_FUSE_DAX)) 1014 fuse_dax_conn_free(fc); 1015 if (fiq->ops->release) 1016 fiq->ops->release(fiq); 1017 put_pid_ns(fc->pid_ns); ··· 1264 spin_unlock(&fc->bg_lock); 1265 } 1266 1267 struct fuse_init_args { 1268 struct fuse_args args; 1269 struct fuse_init_in in; ··· 1310 ok = false; 1311 else { 1312 unsigned long ra_pages; 1313 1314 process_init_limits(fc, arg); 1315 ··· 1374 fc->max_pages = 1375 min_t(unsigned int, fc->max_pages_limit, 1376 max_t(unsigned int, arg->max_pages, 1)); 1377 } 1378 if (IS_ENABLED(CONFIG_FUSE_DAX)) { 1379 if (flags & FUSE_MAP_ALIGNMENT && ··· 1435 } 1436 if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled()) 1437 fc->io_uring = 1; 1438 } else { 1439 ra_pages = fc->max_read / PAGE_SIZE; 1440 fc->no_lock = 1; 1441 fc->no_flock = 1; 1442 } 1443 1444 fm->sb->s_bdi->ra_pages = 1445 min(fm->sb->s_bdi->ra_pages, ra_pages); ··· 1487 FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | 1488 FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | 1489 FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | 1490 - FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP; 1491 #ifdef CONFIG_FUSE_DAX 1492 if (fm->fc->dax) 1493 flags |= FUSE_MAP_ALIGNMENT;
··· 37 static int set_global_limit(const char *val, const struct kernel_param *kp); 38 39 unsigned int fuse_max_pages_limit = 256; 40 + /* default is no timeout */ 41 + unsigned int fuse_default_req_timeout; 42 + unsigned int fuse_max_req_timeout; 43 44 unsigned max_user_bgreq; 45 module_param_call(max_user_bgreq, set_global_limit, param_get_uint, ··· 979 fc->user_ns = get_user_ns(user_ns); 980 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; 981 fc->max_pages_limit = fuse_max_pages_limit; 982 + fc->name_max = FUSE_NAME_LOW_MAX; 983 + fc->timeout.req_timeout = 0; 984 985 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) 986 fuse_backing_files_init(fc); ··· 1007 1008 if (IS_ENABLED(CONFIG_FUSE_DAX)) 1009 fuse_dax_conn_free(fc); 1010 + if (fc->timeout.req_timeout) 1011 + cancel_delayed_work_sync(&fc->timeout.work); 1012 if (fiq->ops->release) 1013 fiq->ops->release(fiq); 1014 put_pid_ns(fc->pid_ns); ··· 1257 spin_unlock(&fc->bg_lock); 1258 } 1259 1260 + static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout) 1261 + { 1262 + fc->timeout.req_timeout = secs_to_jiffies(timeout); 1263 + INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout); 1264 + queue_delayed_work(system_wq, &fc->timeout.work, 1265 + fuse_timeout_timer_freq); 1266 + } 1267 + 1268 + static void init_server_timeout(struct fuse_conn *fc, unsigned int timeout) 1269 + { 1270 + if (!timeout && !fuse_max_req_timeout && !fuse_default_req_timeout) 1271 + return; 1272 + 1273 + if (!timeout) 1274 + timeout = fuse_default_req_timeout; 1275 + 1276 + if (fuse_max_req_timeout) { 1277 + if (timeout) 1278 + timeout = min(fuse_max_req_timeout, timeout); 1279 + else 1280 + timeout = fuse_max_req_timeout; 1281 + } 1282 + 1283 + timeout = max(FUSE_TIMEOUT_TIMER_FREQ, timeout); 1284 + 1285 + set_request_timeout(fc, timeout); 1286 + } 1287 + 1288 struct fuse_init_args { 1289 struct fuse_args args; 1290 struct fuse_init_in in; ··· 1275 ok = false; 1276 else { 1277 unsigned long ra_pages; 1278 + unsigned int timeout = 0; 1279 1280 process_init_limits(fc, arg); 1281 ··· 1338 fc->max_pages = 1339 min_t(unsigned int, fc->max_pages_limit, 1340 max_t(unsigned int, arg->max_pages, 1)); 1341 + 1342 + /* 1343 + * PATH_MAX file names might need two pages for 1344 + * ops like rename 1345 + */ 1346 + if (fc->max_pages > 1) 1347 + fc->name_max = FUSE_NAME_MAX; 1348 } 1349 if (IS_ENABLED(CONFIG_FUSE_DAX)) { 1350 if (flags & FUSE_MAP_ALIGNMENT && ··· 1392 } 1393 if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled()) 1394 fc->io_uring = 1; 1395 + 1396 + if (flags & FUSE_REQUEST_TIMEOUT) 1397 + timeout = arg->request_timeout; 1398 } else { 1399 ra_pages = fc->max_read / PAGE_SIZE; 1400 fc->no_lock = 1; 1401 fc->no_flock = 1; 1402 } 1403 + 1404 + init_server_timeout(fc, timeout); 1405 1406 fm->sb->s_bdi->ra_pages = 1407 min(fm->sb->s_bdi->ra_pages, ra_pages); ··· 1439 FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | 1440 FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | 1441 FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | 1442 + FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP | 1443 + FUSE_REQUEST_TIMEOUT; 1444 #ifdef CONFIG_FUSE_DAX 1445 if (fm->fc->dax) 1446 flags |= FUSE_MAP_ALIGNMENT;
+24
fs/fuse/sysctl.c
··· 13 /* Bound by fuse_init_out max_pages, which is a u16 */ 14 static unsigned int sysctl_fuse_max_pages_limit = 65535; 15 16 static const struct ctl_table fuse_sysctl_table[] = { 17 { 18 .procname = "max_pages_limit", ··· 28 .proc_handler = proc_douintvec_minmax, 29 .extra1 = SYSCTL_ONE, 30 .extra2 = &sysctl_fuse_max_pages_limit, 31 }, 32 }; 33
··· 13 /* Bound by fuse_init_out max_pages, which is a u16 */ 14 static unsigned int sysctl_fuse_max_pages_limit = 65535; 15 16 + /* 17 + * fuse_init_out request timeouts are u16. 18 + * This goes up to ~18 hours, which is plenty for a timeout. 19 + */ 20 + static unsigned int sysctl_fuse_req_timeout_limit = 65535; 21 + 22 static const struct ctl_table fuse_sysctl_table[] = { 23 { 24 .procname = "max_pages_limit", ··· 22 .proc_handler = proc_douintvec_minmax, 23 .extra1 = SYSCTL_ONE, 24 .extra2 = &sysctl_fuse_max_pages_limit, 25 + }, 26 + { 27 + .procname = "default_request_timeout", 28 + .data = &fuse_default_req_timeout, 29 + .maxlen = sizeof(fuse_default_req_timeout), 30 + .mode = 0644, 31 + .proc_handler = proc_douintvec_minmax, 32 + .extra1 = SYSCTL_ZERO, 33 + .extra2 = &sysctl_fuse_req_timeout_limit, 34 + }, 35 + { 36 + .procname = "max_request_timeout", 37 + .data = &fuse_max_req_timeout, 38 + .maxlen = sizeof(fuse_max_req_timeout), 39 + .mode = 0644, 40 + .proc_handler = proc_douintvec_minmax, 41 + .extra1 = SYSCTL_ZERO, 42 + .extra2 = &sysctl_fuse_req_timeout_limit, 43 }, 44 }; 45
+9 -3
include/uapi/linux/fuse.h
··· 229 * - FUSE_URING_IN_OUT_HEADER_SZ 230 * - FUSE_URING_OP_IN_OUT_SZ 231 * - enum fuse_uring_cmd 232 */ 233 234 #ifndef _LINUX_FUSE_H ··· 267 #define FUSE_KERNEL_VERSION 7 268 269 /** Minor version number of this interface */ 270 - #define FUSE_KERNEL_MINOR_VERSION 42 271 272 /** The node ID of the root inode */ 273 #define FUSE_ROOT_ID 1 ··· 438 * of the request ID indicates resend requests 439 * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts 440 * FUSE_OVER_IO_URING: Indicate that client supports io-uring 441 */ 442 #define FUSE_ASYNC_READ (1 << 0) 443 #define FUSE_POSIX_LOCKS (1 << 1) ··· 482 #define FUSE_PASSTHROUGH (1ULL << 37) 483 #define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) 484 #define FUSE_HAS_RESEND (1ULL << 39) 485 - 486 /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ 487 #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP 488 #define FUSE_ALLOW_IDMAP (1ULL << 40) 489 #define FUSE_OVER_IO_URING (1ULL << 41) 490 491 /** 492 * CUSE INIT request/reply flags ··· 914 uint16_t map_alignment; 915 uint32_t flags2; 916 uint32_t max_stack_depth; 917 - uint32_t unused[6]; 918 }; 919 920 #define CUSE_INIT_INFO_MAX 4096
··· 229 * - FUSE_URING_IN_OUT_HEADER_SZ 230 * - FUSE_URING_OP_IN_OUT_SZ 231 * - enum fuse_uring_cmd 232 + * 233 + * 7.43 234 + * - add FUSE_REQUEST_TIMEOUT 235 */ 236 237 #ifndef _LINUX_FUSE_H ··· 264 #define FUSE_KERNEL_VERSION 7 265 266 /** Minor version number of this interface */ 267 + #define FUSE_KERNEL_MINOR_VERSION 43 268 269 /** The node ID of the root inode */ 270 #define FUSE_ROOT_ID 1 ··· 435 * of the request ID indicates resend requests 436 * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts 437 * FUSE_OVER_IO_URING: Indicate that client supports io-uring 438 + * FUSE_REQUEST_TIMEOUT: kernel supports timing out requests. 439 + * init_out.request_timeout contains the timeout (in secs) 440 */ 441 #define FUSE_ASYNC_READ (1 << 0) 442 #define FUSE_POSIX_LOCKS (1 << 1) ··· 477 #define FUSE_PASSTHROUGH (1ULL << 37) 478 #define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) 479 #define FUSE_HAS_RESEND (1ULL << 39) 480 /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ 481 #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP 482 #define FUSE_ALLOW_IDMAP (1ULL << 40) 483 #define FUSE_OVER_IO_URING (1ULL << 41) 484 + #define FUSE_REQUEST_TIMEOUT (1ULL << 42) 485 486 /** 487 * CUSE INIT request/reply flags ··· 909 uint16_t map_alignment; 910 uint32_t flags2; 911 uint32_t max_stack_depth; 912 + uint16_t request_timeout; 913 + uint16_t unused[11]; 914 }; 915 916 #define CUSE_INIT_INFO_MAX 4096