Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mfd: revise locking for pcf50633 ADC

Current implementation is prone to races, this patch attempts to remove all
but one (in pcf50633_adc_sync_read).

The idea is that we need to guard the queue access only on inserting and
removing items. If we insert and there're no more items in the queue it
means that the last irq already happened and we need to trigger ADC
manually. If not, then the next conversion will be triggered by the irq
handler upon completion of the previous.

Signed-off-by: Paul Fertser <fercerpav@gmail.com>
Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>

authored by

Paul Fertser and committed by
Samuel Ortiz
bd8ef102 ed52e62e

+16 -16
+16 -16
drivers/mfd/pcf50633-adc.c
··· 73 73 struct pcf50633_adc *adc = __to_adc(pcf); 74 74 int head; 75 75 76 - mutex_lock(&adc->queue_mutex); 77 - 78 76 head = adc->queue_head; 79 77 80 - if (!adc->queue[head]) { 81 - mutex_unlock(&adc->queue_mutex); 78 + if (!adc->queue[head]) 82 79 return; 83 - } 84 - mutex_unlock(&adc->queue_mutex); 85 80 86 81 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); 87 82 } ··· 94 99 95 100 if (adc->queue[tail]) { 96 101 mutex_unlock(&adc->queue_mutex); 102 + dev_err(pcf->dev, "ADC queue is full, dropping request\n"); 97 103 return -EBUSY; 98 104 } 99 105 100 106 adc->queue[tail] = req; 107 + if (head == tail) 108 + trigger_next_adc_job_if_any(pcf); 101 109 adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1); 102 110 103 111 mutex_unlock(&adc->queue_mutex); 104 - 105 - trigger_next_adc_job_if_any(pcf); 106 112 107 113 return 0; 108 114 } ··· 120 124 int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg) 121 125 { 122 126 struct pcf50633_adc_request *req; 127 + int err; 123 128 124 129 /* req is freed when the result is ready, in interrupt handler */ 125 130 req = kzalloc(sizeof(*req), GFP_KERNEL); ··· 133 136 req->callback_param = req; 134 137 135 138 init_completion(&req->completion); 136 - adc_enqueue_request(pcf, req); 139 + err = adc_enqueue_request(pcf, req); 140 + if (err) 141 + return err; 142 + 137 143 wait_for_completion(&req->completion); 138 144 145 + /* FIXME by this time req might be already freed */ 139 146 return req->result; 140 147 } 141 148 EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read); ··· 160 159 req->callback = callback; 161 160 req->callback_param = callback_param; 162 161 163 - adc_enqueue_request(pcf, req); 164 - 165 - return 0; 162 + return adc_enqueue_request(pcf, req); 166 163 } 167 164 EXPORT_SYMBOL_GPL(pcf50633_adc_async_read); 168 165 ··· 183 184 struct pcf50633_adc *adc = data; 184 185 struct pcf50633 *pcf = adc->pcf; 185 186 struct pcf50633_adc_request *req; 186 - int head; 187 + int head, res; 187 188 188 189 mutex_lock(&adc->queue_mutex); 189 190 head = adc->queue_head; ··· 198 199 adc->queue_head = (head + 1) & 199 200 (PCF50633_MAX_ADC_FIFO_DEPTH - 1); 200 201 202 + res = adc_result(pcf); 203 + trigger_next_adc_job_if_any(pcf); 204 + 201 205 mutex_unlock(&adc->queue_mutex); 202 206 203 - req->callback(pcf, req->callback_param, adc_result(pcf)); 207 + req->callback(pcf, req->callback_param, res); 204 208 kfree(req); 205 - 206 - trigger_next_adc_job_if_any(pcf); 207 209 } 208 210 209 211 static int __devinit pcf50633_adc_probe(struct platform_device *pdev)