Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9-rc3 179 lines 4.4 kB view raw
1/* 2 * Functions related to softirq rq completions 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/init.h> 7#include <linux/bio.h> 8#include <linux/blkdev.h> 9#include <linux/interrupt.h> 10#include <linux/cpu.h> 11#include <linux/sched.h> 12 13#include "blk.h" 14 15static DEFINE_PER_CPU(struct list_head, blk_cpu_done); 16 17/* 18 * Softirq action handler - move entries to local list and loop over them 19 * while passing them to the queue registered handler. 20 */ 21static __latent_entropy void blk_done_softirq(struct softirq_action *h) 22{ 23 struct list_head *cpu_list, local_list; 24 25 local_irq_disable(); 26 cpu_list = this_cpu_ptr(&blk_cpu_done); 27 list_replace_init(cpu_list, &local_list); 28 local_irq_enable(); 29 30 while (!list_empty(&local_list)) { 31 struct request *rq; 32 33 rq = list_entry(local_list.next, struct request, ipi_list); 34 list_del_init(&rq->ipi_list); 35 rq->q->softirq_done_fn(rq); 36 } 37} 38 39#ifdef CONFIG_SMP 40static void trigger_softirq(void *data) 41{ 42 struct request *rq = data; 43 unsigned long flags; 44 struct list_head *list; 45 46 local_irq_save(flags); 47 list = this_cpu_ptr(&blk_cpu_done); 48 list_add_tail(&rq->ipi_list, list); 49 50 if (list->next == &rq->ipi_list) 51 raise_softirq_irqoff(BLOCK_SOFTIRQ); 52 53 local_irq_restore(flags); 54} 55 56/* 57 * Setup and invoke a run of 'trigger_softirq' on the given cpu. 58 */ 59static int raise_blk_irq(int cpu, struct request *rq) 60{ 61 if (cpu_online(cpu)) { 62 struct call_single_data *data = &rq->csd; 63 64 data->func = trigger_softirq; 65 data->info = rq; 66 data->flags = 0; 67 68 smp_call_function_single_async(cpu, data); 69 return 0; 70 } 71 72 return 1; 73} 74#else /* CONFIG_SMP */ 75static int raise_blk_irq(int cpu, struct request *rq) 76{ 77 return 1; 78} 79#endif 80 81static int blk_softirq_cpu_dead(unsigned int cpu) 82{ 83 /* 84 * If a CPU goes away, splice its entries to the current CPU 85 * and trigger a run of the softirq 86 */ 87 local_irq_disable(); 88 list_splice_init(&per_cpu(blk_cpu_done, cpu), 89 this_cpu_ptr(&blk_cpu_done)); 90 raise_softirq_irqoff(BLOCK_SOFTIRQ); 91 local_irq_enable(); 92 93 return 0; 94} 95 96void __blk_complete_request(struct request *req) 97{ 98 int ccpu, cpu; 99 struct request_queue *q = req->q; 100 unsigned long flags; 101 bool shared = false; 102 103 BUG_ON(!q->softirq_done_fn); 104 105 local_irq_save(flags); 106 cpu = smp_processor_id(); 107 108 /* 109 * Select completion CPU 110 */ 111 if (req->cpu != -1) { 112 ccpu = req->cpu; 113 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) 114 shared = cpus_share_cache(cpu, ccpu); 115 } else 116 ccpu = cpu; 117 118 /* 119 * If current CPU and requested CPU share a cache, run the softirq on 120 * the current CPU. One might concern this is just like 121 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is 122 * running in interrupt handler, and currently I/O controller doesn't 123 * support multiple interrupts, so current CPU is unique actually. This 124 * avoids IPI sending from current CPU to the first CPU of a group. 125 */ 126 if (ccpu == cpu || shared) { 127 struct list_head *list; 128do_local: 129 list = this_cpu_ptr(&blk_cpu_done); 130 list_add_tail(&req->ipi_list, list); 131 132 /* 133 * if the list only contains our just added request, 134 * signal a raise of the softirq. If there are already 135 * entries there, someone already raised the irq but it 136 * hasn't run yet. 137 */ 138 if (list->next == &req->ipi_list) 139 raise_softirq_irqoff(BLOCK_SOFTIRQ); 140 } else if (raise_blk_irq(ccpu, req)) 141 goto do_local; 142 143 local_irq_restore(flags); 144} 145 146/** 147 * blk_complete_request - end I/O on a request 148 * @req: the request being processed 149 * 150 * Description: 151 * Ends all I/O on a request. It does not handle partial completions, 152 * unless the driver actually implements this in its completion callback 153 * through requeueing. The actual completion happens out-of-order, 154 * through a softirq handler. The user must have registered a completion 155 * callback through blk_queue_softirq_done(). 156 **/ 157void blk_complete_request(struct request *req) 158{ 159 if (unlikely(blk_should_fake_timeout(req->q))) 160 return; 161 if (!blk_mark_rq_complete(req)) 162 __blk_complete_request(req); 163} 164EXPORT_SYMBOL(blk_complete_request); 165 166static __init int blk_softirq_init(void) 167{ 168 int i; 169 170 for_each_possible_cpu(i) 171 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 172 173 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 174 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 175 "block/softirq:dead", NULL, 176 blk_softirq_cpu_dead); 177 return 0; 178} 179subsys_initcall(blk_softirq_init);