[PATCH] blk: light iocontext ops

get_io_context needlessly turned off interrupts and checked for racing io
context creations. Both of which aren't needed, because the io context can
only be created while in process context of the current process.

Also, split the function in 2. A light version, current_io_context does not
elevate the reference count specifically, but can be used when in process
context, because the process holds a reference itself.

Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Nick Piggin and committed by Linus Torvalds fb3cc432 d6344532

+27 -32
+26 -32
drivers/block/ll_rw_blk.c
··· 1876 { 1877 struct request *rq = NULL; 1878 struct request_list *rl = &q->rq; 1879 - struct io_context *ioc = get_io_context(GFP_ATOMIC); 1880 1881 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) 1882 goto out; ··· 1959 rq_init(q, rq); 1960 rq->rl = rl; 1961 out: 1962 - put_io_context(ioc); 1963 return rq; 1964 } 1965 ··· 1996 * up to a big batch of them for a small period time. 1997 * See ioc_batching, ioc_set_batching 1998 */ 1999 - ioc = get_io_context(GFP_NOIO); 2000 ioc_set_batching(q, ioc); 2001 - put_io_context(ioc); 2002 2003 spin_lock_irq(q->queue_lock); 2004 } ··· 3280 3281 /* 3282 * If the current task has no IO context then create one and initialise it. 3283 - * If it does have a context, take a ref on it. 3284 * 3285 - * This is always called in the context of the task which submitted the I/O. 3286 - * But weird things happen, so we disable local interrupts to ensure exclusive 3287 - * access to *current. 3288 */ 3289 - struct io_context *get_io_context(int gfp_flags) 3290 { 3291 struct task_struct *tsk = current; 3292 - unsigned long flags; 3293 struct io_context *ret; 3294 3295 - local_irq_save(flags); 3296 ret = tsk->io_context; 3297 - if (ret) 3298 - goto out; 3299 - 3300 - local_irq_restore(flags); 3301 3302 ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); 3303 if (ret) { ··· 3304 ret->nr_batch_requests = 0; /* because this is 0 */ 3305 ret->aic = NULL; 3306 ret->cic = NULL; 3307 - 3308 - local_irq_save(flags); 3309 - 3310 - /* 3311 - * very unlikely, someone raced with us in setting up the task 3312 - * io context. free new context and just grab a reference. 3313 - */ 3314 - if (!tsk->io_context) 3315 - tsk->io_context = ret; 3316 - else { 3317 - kmem_cache_free(iocontext_cachep, ret); 3318 - ret = tsk->io_context; 3319 - } 3320 - 3321 - out: 3322 - atomic_inc(&ret->refcount); 3323 - local_irq_restore(flags); 3324 } 3325 3326 return ret; 3327 } 3328 EXPORT_SYMBOL(get_io_context);
··· 1876 { 1877 struct request *rq = NULL; 1878 struct request_list *rl = &q->rq; 1879 + struct io_context *ioc = current_io_context(GFP_ATOMIC); 1880 1881 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) 1882 goto out; ··· 1959 rq_init(q, rq); 1960 rq->rl = rl; 1961 out: 1962 return rq; 1963 } 1964 ··· 1997 * up to a big batch of them for a small period time. 1998 * See ioc_batching, ioc_set_batching 1999 */ 2000 + ioc = current_io_context(GFP_NOIO); 2001 ioc_set_batching(q, ioc); 2002 2003 spin_lock_irq(q->queue_lock); 2004 } ··· 3282 3283 /* 3284 * If the current task has no IO context then create one and initialise it. 3285 + * Otherwise, return its existing IO context. 3286 * 3287 + * This returned IO context doesn't have a specifically elevated refcount, 3288 + * but since the current task itself holds a reference, the context can be 3289 + * used in general code, so long as it stays within `current` context. 3290 */ 3291 + struct io_context *current_io_context(int gfp_flags) 3292 { 3293 struct task_struct *tsk = current; 3294 struct io_context *ret; 3295 3296 ret = tsk->io_context; 3297 + if (likely(ret)) 3298 + return ret; 3299 3300 ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); 3301 if (ret) { ··· 3310 ret->nr_batch_requests = 0; /* because this is 0 */ 3311 ret->aic = NULL; 3312 ret->cic = NULL; 3313 + tsk->io_context = ret; 3314 } 3315 3316 + return ret; 3317 + } 3318 + EXPORT_SYMBOL(current_io_context); 3319 + 3320 + /* 3321 + * If the current task has no IO context then create one and initialise it. 3322 + * If it does have a context, take a ref on it. 3323 + * 3324 + * This is always called in the context of the task which submitted the I/O. 3325 + */ 3326 + struct io_context *get_io_context(int gfp_flags) 3327 + { 3328 + struct io_context *ret; 3329 + ret = current_io_context(gfp_flags); 3330 + if (likely(ret)) 3331 + atomic_inc(&ret->refcount); 3332 return ret; 3333 } 3334 EXPORT_SYMBOL(get_io_context);
+1
include/linux/blkdev.h
··· 96 97 void put_io_context(struct io_context *ioc); 98 void exit_io_context(void); 99 struct io_context *get_io_context(int gfp_flags); 100 void copy_io_context(struct io_context **pdst, struct io_context **psrc); 101 void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
··· 96 97 void put_io_context(struct io_context *ioc); 98 void exit_io_context(void); 99 + struct io_context *current_io_context(int gfp_flags); 100 struct io_context *get_io_context(int gfp_flags); 101 void copy_io_context(struct io_context **pdst, struct io_context **psrc); 102 void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);