Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[BLOCK] ll_rw_blk: fastpath get_request()

Originally from: Nick Piggin <nickpiggin@yahoo.com.au>

Move current_io_context out of the get_request fastpth. Also try to
streamline a few other things in this area.

Signed-off-by: Jens Axboe <axboe@suse.de>

authored by

Jens Axboe and committed by
Jens Axboe
88ee5ef1 ef9be1d3

+37 -33
+37 -33
block/ll_rw_blk.c
··· 1908 1908 { 1909 1909 struct request *rq = NULL; 1910 1910 struct request_list *rl = &q->rq; 1911 - struct io_context *ioc = current_io_context(GFP_ATOMIC); 1912 - int priv; 1911 + struct io_context *ioc = NULL; 1912 + int may_queue, priv; 1913 1913 1914 - if (rl->count[rw]+1 >= q->nr_requests) { 1915 - /* 1916 - * The queue will fill after this allocation, so set it as 1917 - * full, and mark this process as "batching". This process 1918 - * will be allowed to complete a batch of requests, others 1919 - * will be blocked. 1920 - */ 1921 - if (!blk_queue_full(q, rw)) { 1922 - ioc_set_batching(q, ioc); 1923 - blk_set_queue_full(q, rw); 1914 + may_queue = elv_may_queue(q, rw, bio); 1915 + if (may_queue == ELV_MQUEUE_NO) 1916 + goto rq_starved; 1917 + 1918 + if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { 1919 + if (rl->count[rw]+1 >= q->nr_requests) { 1920 + ioc = current_io_context(GFP_ATOMIC); 1921 + /* 1922 + * The queue will fill after this allocation, so set 1923 + * it as full, and mark this process as "batching". 1924 + * This process will be allowed to complete a batch of 1925 + * requests, others will be blocked. 1926 + */ 1927 + if (!blk_queue_full(q, rw)) { 1928 + ioc_set_batching(q, ioc); 1929 + blk_set_queue_full(q, rw); 1930 + } else { 1931 + if (may_queue != ELV_MQUEUE_MUST 1932 + && !ioc_batching(q, ioc)) { 1933 + /* 1934 + * The queue is full and the allocating 1935 + * process is not a "batcher", and not 1936 + * exempted by the IO scheduler 1937 + */ 1938 + goto out; 1939 + } 1940 + } 1924 1941 } 1942 + set_queue_congested(q, rw); 1925 1943 } 1926 1944 1927 - switch (elv_may_queue(q, rw, bio)) { 1928 - case ELV_MQUEUE_NO: 1929 - goto rq_starved; 1930 - case ELV_MQUEUE_MAY: 1931 - break; 1932 - case ELV_MQUEUE_MUST: 1933 - goto get_rq; 1934 - } 1935 - 1936 - if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) { 1937 - /* 1938 - * The queue is full and the allocating process is not a 1939 - * "batcher", and not exempted by the IO scheduler 1940 - */ 1941 - goto out; 1942 - } 1943 - 1944 - get_rq: 1945 1945 /* 1946 1946 * Only allow batching queuers to allocate up to 50% over the defined 1947 1947 * limit of requests, otherwise we could have thousands of requests ··· 1952 1952 1953 1953 rl->count[rw]++; 1954 1954 rl->starved[rw] = 0; 1955 - if (rl->count[rw] >= queue_congestion_on_threshold(q)) 1956 - set_queue_congested(q, rw); 1957 1955 1958 1956 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 1959 1957 if (priv) ··· 1960 1962 spin_unlock_irq(q->queue_lock); 1961 1963 1962 1964 rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); 1963 - if (!rq) { 1965 + if (unlikely(!rq)) { 1964 1966 /* 1965 1967 * Allocation failed presumably due to memory. Undo anything 1966 1968 * we might have messed up. ··· 1985 1987 goto out; 1986 1988 } 1987 1989 1990 + /* 1991 + * ioc may be NULL here, and ioc_batching will be false. That's 1992 + * OK, if the queue is under the request limit then requests need 1993 + * not count toward the nr_batch_requests limit. There will always 1994 + * be some limit enforced by BLK_BATCH_TIME. 1995 + */ 1988 1996 if (ioc_batching(q, ioc)) 1989 1997 ioc->nr_batch_requests--; 1990 1998