Merge tag 'fuse-update-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse

Pull fuse updates from Miklos Szeredi:

- Add fs-verity support (Richard Fung)

- Add multi-queue support to virtio-fs (Peter-Jan Gootzen)

- Fix a bug in NOTIFY_RESEND handling (Hou Tao)

- page -> folio cleanup (Matthew Wilcox)

* tag 'fuse-update-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse:
virtio-fs: add multi-queue support
virtio-fs: limit number of request queues
fuse: clear FR_SENT when re-adding requests into pending list
fuse: set FR_PENDING atomically in fuse_resend()
fuse: Add initial support for fs-verity
fuse: Convert fuse_readpages_end() to use folio_end_read()

+130 -16
+2 -1
fs/fuse/dev.c
··· 1813 1813 spin_unlock(&fc->lock); 1814 1814 1815 1815 list_for_each_entry_safe(req, next, &to_queue, list) { 1816 - __set_bit(FR_PENDING, &req->flags); 1816 + set_bit(FR_PENDING, &req->flags); 1817 + clear_bit(FR_SENT, &req->flags); 1817 1818 /* mark the request as resend request */ 1818 1819 req->in.h.unique |= FUSE_UNIQUE_RESEND; 1819 1820 }
+3 -7
fs/fuse/file.c
··· 935 935 } 936 936 937 937 for (i = 0; i < ap->num_pages; i++) { 938 - struct page *page = ap->pages[i]; 938 + struct folio *folio = page_folio(ap->pages[i]); 939 939 940 - if (!err) 941 - SetPageUptodate(page); 942 - else 943 - SetPageError(page); 944 - unlock_page(page); 945 - put_page(page); 940 + folio_end_read(folio, !err); 941 + folio_put(folio); 946 942 } 947 943 if (ia->ff) 948 944 fuse_file_put(ia->ff, false);
+60
fs/fuse/ioctl.c
··· 8 8 #include <linux/uio.h> 9 9 #include <linux/compat.h> 10 10 #include <linux/fileattr.h> 11 + #include <linux/fsverity.h> 11 12 12 13 static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args, 13 14 struct fuse_ioctl_out *outarg) ··· 118 117 return 0; 119 118 } 120 119 120 + /* For fs-verity, determine iov lengths from input */ 121 + static int fuse_setup_measure_verity(unsigned long arg, struct iovec *iov) 122 + { 123 + __u16 digest_size; 124 + struct fsverity_digest __user *uarg = (void __user *)arg; 125 + 126 + if (copy_from_user(&digest_size, &uarg->digest_size, sizeof(digest_size))) 127 + return -EFAULT; 128 + 129 + if (digest_size > SIZE_MAX - sizeof(struct fsverity_digest)) 130 + return -EINVAL; 131 + 132 + iov->iov_len = sizeof(struct fsverity_digest) + digest_size; 133 + 134 + return 0; 135 + } 136 + 137 + static int fuse_setup_enable_verity(unsigned long arg, struct iovec *iov, 138 + unsigned int *in_iovs) 139 + { 140 + struct fsverity_enable_arg enable; 141 + struct fsverity_enable_arg __user *uarg = (void __user *)arg; 142 + const __u32 max_buffer_len = FUSE_MAX_MAX_PAGES * PAGE_SIZE; 143 + 144 + if (copy_from_user(&enable, uarg, sizeof(enable))) 145 + return -EFAULT; 146 + 147 + if (enable.salt_size > max_buffer_len || enable.sig_size > max_buffer_len) 148 + return -ENOMEM; 149 + 150 + if (enable.salt_size > 0) { 151 + iov++; 152 + (*in_iovs)++; 153 + 154 + iov->iov_base = u64_to_user_ptr(enable.salt_ptr); 155 + iov->iov_len = enable.salt_size; 156 + } 157 + 158 + if (enable.sig_size > 0) { 159 + iov++; 160 + (*in_iovs)++; 161 + 162 + iov->iov_base = u64_to_user_ptr(enable.sig_ptr); 163 + iov->iov_len = enable.sig_size; 164 + } 165 + return 0; 166 + } 121 167 122 168 /* 123 169 * For ioctls, there is no generic way to determine how much memory ··· 275 227 out_iov = iov; 276 228 out_iovs = 1; 277 229 } 230 + 231 + err = 0; 232 + switch (cmd) { 233 + case FS_IOC_MEASURE_VERITY: 234 + err = fuse_setup_measure_verity(arg, iov); 235 + break; 236 + case FS_IOC_ENABLE_VERITY: 237 + err = fuse_setup_enable_verity(arg, iov, &in_iovs); 238 + break; 239 + } 240 + if (err) 241 + goto out; 278 242 } 279 243 280 244 retry:
+65 -8
fs/fuse/virtio_fs.c
··· 7 7 #include <linux/fs.h> 8 8 #include <linux/dax.h> 9 9 #include <linux/pci.h> 10 + #include <linux/interrupt.h> 11 + #include <linux/group_cpus.h> 10 12 #include <linux/pfn_t.h> 11 13 #include <linux/memremap.h> 12 14 #include <linux/module.h> ··· 68 66 unsigned int nvqs; /* number of virtqueues */ 69 67 unsigned int num_request_queues; /* number of request queues */ 70 68 struct dax_device *dax_dev; 69 + 70 + unsigned int *mq_map; /* index = cpu id, value = request vq id */ 71 71 72 72 /* DAX memory window where file contents are mapped */ 73 73 void *window_kaddr; ··· 189 185 { 190 186 struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj); 191 187 188 + kfree(vfs->mq_map); 192 189 kfree(vfs->vqs); 193 190 kfree(vfs); 194 191 } ··· 711 706 } 712 707 } 713 708 709 + static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs) 710 + { 711 + const struct cpumask *mask, *masks; 712 + unsigned int q, cpu; 713 + 714 + /* First attempt to map using existing transport layer affinities 715 + * e.g. PCIe MSI-X 716 + */ 717 + if (!vdev->config->get_vq_affinity) 718 + goto fallback; 719 + 720 + for (q = 0; q < fs->num_request_queues; q++) { 721 + mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q); 722 + if (!mask) 723 + goto fallback; 724 + 725 + for_each_cpu(cpu, mask) 726 + fs->mq_map[cpu] = q; 727 + } 728 + 729 + return; 730 + fallback: 731 + /* Attempt to map evenly in groups over the CPUs */ 732 + masks = group_cpus_evenly(fs->num_request_queues); 733 + /* If even this fails we default to all CPUs use queue zero */ 734 + if (!masks) { 735 + for_each_possible_cpu(cpu) 736 + fs->mq_map[cpu] = 0; 737 + return; 738 + } 739 + 740 + for (q = 0; q < fs->num_request_queues; q++) { 741 + for_each_cpu(cpu, &masks[q]) 742 + fs->mq_map[cpu] = q; 743 + } 744 + kfree(masks); 745 + } 746 + 714 747 /* Virtqueue interrupt handler */ 715 748 static void virtio_fs_vq_done(struct virtqueue *vq) 716 749 { ··· 785 742 { 786 743 struct virtqueue **vqs; 787 744 vq_callback_t **callbacks; 745 + /* Specify pre_vectors to ensure that the queues before the 746 + * request queues (e.g. hiprio) don't claim any of the CPUs in 747 + * the multi-queue mapping and interrupt affinities 748 + */ 749 + struct irq_affinity desc = { .pre_vectors = VQ_REQUEST }; 788 750 const char **names; 789 751 unsigned int i; 790 752 int ret = 0; ··· 799 751 if (fs->num_request_queues == 0) 800 752 return -EINVAL; 801 753 754 + /* Truncate nr of request queues to nr_cpu_id */ 755 + fs->num_request_queues = min_t(unsigned int, fs->num_request_queues, 756 + nr_cpu_ids); 802 757 fs->nvqs = VQ_REQUEST + fs->num_request_queues; 803 758 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); 804 759 if (!fs->vqs) ··· 811 760 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]), 812 761 GFP_KERNEL); 813 762 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL); 814 - if (!vqs || !callbacks || !names) { 763 + fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL, 764 + dev_to_node(&vdev->dev)); 765 + if (!vqs || !callbacks || !names || !fs->mq_map) { 815 766 ret = -ENOMEM; 816 767 goto out; 817 768 } ··· 833 780 names[i] = fs->vqs[i].name; 834 781 } 835 782 836 - ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); 783 + ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc); 837 784 if (ret < 0) 838 785 goto out; 839 786 ··· 845 792 kfree(names); 846 793 kfree(callbacks); 847 794 kfree(vqs); 848 - if (ret) 795 + if (ret) { 849 796 kfree(fs->vqs); 797 + kfree(fs->mq_map); 798 + } 850 799 return ret; 851 800 } 852 801 ··· 994 939 if (ret < 0) 995 940 goto out; 996 941 997 - /* TODO vq affinity */ 942 + virtio_fs_map_queues(vdev, fs); 998 943 999 944 ret = virtio_fs_setup_dax(vdev, fs); 1000 945 if (ret < 0) ··· 1343 1288 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) 1344 1289 __releases(fiq->lock) 1345 1290 { 1346 - unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ 1291 + unsigned int queue_id; 1347 1292 struct virtio_fs *fs; 1348 1293 struct fuse_req *req; 1349 1294 struct virtio_fs_vq *fsvq; ··· 1357 1302 spin_unlock(&fiq->lock); 1358 1303 1359 1304 fs = fiq->priv; 1305 + queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()]; 1360 1306 1361 - pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n", 1362 - __func__, req->in.h.opcode, req->in.h.unique, 1307 + pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n", 1308 + __func__, req->in.h.opcode, req->in.h.unique, 1363 1309 req->in.h.nodeid, req->in.h.len, 1364 - fuse_len_args(req->args->out_numargs, req->args->out_args)); 1310 + fuse_len_args(req->args->out_numargs, req->args->out_args), 1311 + queue_id); 1365 1312 1366 1313 fsvq = &fs->vqs[queue_id]; 1367 1314 ret = virtio_fs_enqueue_req(fsvq, req, false);