Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fuse: introduce fc->bg_lock

To reduce contention of fc->lock, this patch introduces bg_lock for
protection of fields related to background queue. These are:
max_background, congestion_threshold, num_background, active_background,
bg_queue and blocked.

This allows next patch to make async reads not requiring fc->lock, so async
reads and writes will have better performance executed in parallel.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>

authored by

Kirill Tkhai and committed by
Miklos Szeredi
ae2dffa3 2b30a533

+26 -15
+4 -4
fs/fuse/control.c
··· 125 125 if (ret > 0) { 126 126 struct fuse_conn *fc = fuse_ctl_file_conn_get(file); 127 127 if (fc) { 128 - spin_lock(&fc->lock); 128 + spin_lock(&fc->bg_lock); 129 129 fc->max_background = val; 130 130 fc->blocked = fc->num_background >= fc->max_background; 131 131 if (!fc->blocked) 132 132 wake_up(&fc->blocked_waitq); 133 - spin_unlock(&fc->lock); 133 + spin_unlock(&fc->bg_lock); 134 134 fuse_conn_put(fc); 135 135 } 136 136 } ··· 171 171 if (!fc) 172 172 goto out; 173 173 174 - spin_lock(&fc->lock); 174 + spin_lock(&fc->bg_lock); 175 175 fc->congestion_threshold = val; 176 176 if (fc->sb) { 177 177 if (fc->num_background < fc->congestion_threshold) { ··· 182 182 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); 183 183 } 184 184 } 185 - spin_unlock(&fc->lock); 185 + spin_unlock(&fc->bg_lock); 186 186 fuse_conn_put(fc); 187 187 out: 188 188 return ret;
+12 -8
fs/fuse/dev.c
··· 287 287 * We get here in the unlikely case that a background 288 288 * request was allocated but not sent 289 289 */ 290 - spin_lock(&fc->lock); 290 + spin_lock(&fc->bg_lock); 291 291 if (!fc->blocked) 292 292 wake_up(&fc->blocked_waitq); 293 - spin_unlock(&fc->lock); 293 + spin_unlock(&fc->bg_lock); 294 294 } 295 295 296 296 if (test_bit(FR_WAITING, &req->flags)) { ··· 390 390 WARN_ON(test_bit(FR_PENDING, &req->flags)); 391 391 WARN_ON(test_bit(FR_SENT, &req->flags)); 392 392 if (test_bit(FR_BACKGROUND, &req->flags)) { 393 - spin_lock(&fc->lock); 393 + spin_lock(&fc->bg_lock); 394 394 clear_bit(FR_BACKGROUND, &req->flags); 395 395 if (fc->num_background == fc->max_background) { 396 396 fc->blocked = 0; ··· 413 413 fc->num_background--; 414 414 fc->active_background--; 415 415 flush_bg_queue(fc); 416 - spin_unlock(&fc->lock); 416 + spin_unlock(&fc->bg_lock); 417 417 } 418 418 wake_up(&req->waitq); 419 419 if (req->end) ··· 586 586 * 587 587 * fc->connected must have been checked previously 588 588 */ 589 - void fuse_request_send_background_locked(struct fuse_conn *fc, 590 - struct fuse_req *req) 589 + void fuse_request_send_background_nocheck(struct fuse_conn *fc, 590 + struct fuse_req *req) 591 591 { 592 592 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags)); 593 593 if (!test_bit(FR_WAITING, &req->flags)) { ··· 595 595 atomic_inc(&fc->num_waiting); 596 596 } 597 597 __set_bit(FR_ISREPLY, &req->flags); 598 + spin_lock(&fc->bg_lock); 598 599 fc->num_background++; 599 600 if (fc->num_background == fc->max_background) 600 601 fc->blocked = 1; ··· 605 604 } 606 605 list_add_tail(&req->list, &fc->bg_queue); 607 606 flush_bg_queue(fc); 607 + spin_unlock(&fc->bg_lock); 608 608 } 609 609 610 610 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) ··· 613 611 BUG_ON(!req->end); 614 612 spin_lock(&fc->lock); 615 613 if (fc->connected) { 616 - fuse_request_send_background_locked(fc, req); 614 + fuse_request_send_background_nocheck(fc, req); 617 615 spin_unlock(&fc->lock); 618 616 } else { 619 617 spin_unlock(&fc->lock); ··· 2120 2118 LIST_HEAD(to_end); 2121 2119 2122 2120 fc->connected = 0; 2123 - fc->blocked = 0; 2124 2121 fc->aborted = is_abort; 2125 2122 fuse_set_initialized(fc); 2126 2123 list_for_each_entry(fud, &fc->devices, entry) { ··· 2141 2140 list_splice_tail_init(&fpq->processing, &to_end); 2142 2141 spin_unlock(&fpq->lock); 2143 2142 } 2143 + spin_lock(&fc->bg_lock); 2144 + fc->blocked = 0; 2144 2145 fc->max_background = UINT_MAX; 2145 2146 flush_bg_queue(fc); 2147 + spin_unlock(&fc->bg_lock); 2146 2148 2147 2149 spin_lock(&fiq->waitq.lock); 2148 2150 fiq->connected = 0;
+1 -1
fs/fuse/file.c
··· 1502 1502 1503 1503 req->in.args[1].size = inarg->size; 1504 1504 fi->writectr++; 1505 - fuse_request_send_background_locked(fc, req); 1505 + fuse_request_send_background_nocheck(fc, req); 1506 1506 return; 1507 1507 1508 1508 out_free:
+6 -2
fs/fuse/fuse_i.h
··· 500 500 /** The list of background requests set aside for later queuing */ 501 501 struct list_head bg_queue; 502 502 503 + /** Protects: max_background, congestion_threshold, num_background, 504 + * active_background, bg_queue, blocked */ 505 + spinlock_t bg_lock; 506 + 503 507 /** Flag indicating that INIT reply has been received. Allocating 504 508 * any fuse request will be suspended until the flag is set */ 505 509 int initialized; ··· 864 860 */ 865 861 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req); 866 862 867 - void fuse_request_send_background_locked(struct fuse_conn *fc, 868 - struct fuse_req *req); 863 + void fuse_request_send_background_nocheck(struct fuse_conn *fc, 864 + struct fuse_req *req); 869 865 870 866 /* Abort all requests */ 871 867 void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
+3
fs/fuse/inode.c
··· 605 605 { 606 606 memset(fc, 0, sizeof(*fc)); 607 607 spin_lock_init(&fc->lock); 608 + spin_lock_init(&fc->bg_lock); 608 609 init_rwsem(&fc->killsb); 609 610 refcount_set(&fc->count, 1); 610 611 atomic_set(&fc->dev_count, 1); ··· 853 852 sanitize_global_limit(&max_user_bgreq); 854 853 sanitize_global_limit(&max_user_congthresh); 855 854 855 + spin_lock(&fc->bg_lock); 856 856 if (arg->max_background) { 857 857 fc->max_background = arg->max_background; 858 858 ··· 867 865 fc->congestion_threshold > max_user_congthresh) 868 866 fc->congestion_threshold = max_user_congthresh; 869 867 } 868 + spin_unlock(&fc->bg_lock); 870 869 } 871 870 872 871 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)