crypto: talitos - Preempt overflow interrupts

add requests pending/submit count to prevent request queue full
condition by preempting h/w overflow interrupts in software.
We do this due to the delay in the delivery and handling of the
channel overflow error interrupt.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Acked-by: Lee Nipper <lee.nipper@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Kim Phillips and committed by
Herbert Xu
ec6644d6 695ad589

+22 -5
+22 -5
drivers/crypto/talitos.c
··· 99 /* next channel to be assigned next incoming descriptor */ 100 atomic_t last_chan; 101 102 /* per-channel request fifo */ 103 struct talitos_request **fifo; 104 ··· 266 267 spin_lock_irqsave(&priv->head_lock[ch], flags); 268 269 - head = priv->head[ch]; 270 - request = &priv->fifo[ch][head]; 271 - 272 - if (request->desc) { 273 - /* request queue is full */ 274 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 275 return -EAGAIN; 276 } 277 278 /* map descriptor and save caller data */ 279 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), ··· 338 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 339 340 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 341 saved_req.callback(dev, saved_req.desc, saved_req.context, 342 status); 343 /* channel may resume processing in single desc error case */ ··· 1343 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1344 talitos_unregister_rng(dev); 1345 1346 kfree(priv->tail); 1347 kfree(priv->head); 1348 ··· 1507 goto err_out; 1508 } 1509 } 1510 1511 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1512 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
··· 99 /* next channel to be assigned next incoming descriptor */ 100 atomic_t last_chan; 101 102 + /* per-channel number of requests pending in channel h/w fifo */ 103 + atomic_t *submit_count; 104 + 105 /* per-channel request fifo */ 106 struct talitos_request **fifo; 107 ··· 263 264 spin_lock_irqsave(&priv->head_lock[ch], flags); 265 266 + if (!atomic_inc_not_zero(&priv->submit_count[ch])) { 267 + /* h/w fifo is full */ 268 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 269 return -EAGAIN; 270 } 271 + 272 + head = priv->head[ch]; 273 + request = &priv->fifo[ch][head]; 274 275 /* map descriptor and save caller data */ 276 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), ··· 335 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 336 337 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 338 + 339 + atomic_dec(&priv->submit_count[ch]); 340 + 341 saved_req.callback(dev, saved_req.desc, saved_req.context, 342 status); 343 /* channel may resume processing in single desc error case */ ··· 1337 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1338 talitos_unregister_rng(dev); 1339 1340 + kfree(priv->submit_count); 1341 kfree(priv->tail); 1342 kfree(priv->head); 1343 ··· 1500 goto err_out; 1501 } 1502 } 1503 + 1504 + priv->submit_count = kmalloc(sizeof(int) * priv->num_channels, 1505 + GFP_KERNEL); 1506 + if (!priv->submit_count) { 1507 + dev_err(dev, "failed to allocate fifo submit count space\n"); 1508 + err = -ENOMEM; 1509 + goto err_out; 1510 + } 1511 + for (i = 0; i < priv->num_channels; i++) 1512 + atomic_set(&priv->submit_count[i], -priv->chfifo_len); 1513 1514 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1515 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);