Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: omap-sham - Enable Polling mode if DMA fails

For writing input buffer into DATA_IN register current driver
has the following state machine:
-> if input buffer < 9 : use fallback driver
-> else if input buffer < block size : Copy input buffer into data_in regs
-> else use dma transfer.

In cases where requesting for DMA channels fails for some reason,
or channel numbers are not provided in DT or platform data, probe
also fails. Instead of returning from driver use cpu polling mode.
In this mode processor polls on INPUT_READY bit and writes data into
data_in regs when it equals 1. This operation is repeated until the
length of message.

Now the state machine looks like:
-> if input buffer < 9 : use fallback driver
-> else if input buffer < block size : Copy input buffer into data_in regs
-> else if dma enabled: use dma transfer
else use cpu polling mode.

Signed-off-by: Lokesh Vutla <lokeshvutla@ti.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Lokesh Vutla and committed by
Herbert Xu
b8411ccd 5bc35703

+42 -19
+42 -19
drivers/crypto/omap-sham.c
··· 225 225 unsigned int dma; 226 226 struct dma_chan *dma_lch; 227 227 struct tasklet_struct done_task; 228 + u8 polling_mode; 228 229 229 230 unsigned long flags; 230 231 struct crypto_queue queue; ··· 511 510 size_t length, int final) 512 511 { 513 512 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 514 - int count, len32; 513 + int count, len32, bs32, offset = 0; 515 514 const u32 *buffer = (const u32 *)buf; 516 515 517 516 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", ··· 523 522 /* should be non-zero before next lines to disable clocks later */ 524 523 ctx->digcnt += length; 525 524 526 - if (dd->pdata->poll_irq(dd)) 527 - return -ETIMEDOUT; 528 - 529 525 if (final) 530 526 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ 531 527 532 528 set_bit(FLAGS_CPU, &dd->flags); 533 529 534 530 len32 = DIV_ROUND_UP(length, sizeof(u32)); 531 + bs32 = get_block_size(ctx) / sizeof(u32); 535 532 536 - for (count = 0; count < len32; count++) 537 - omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]); 533 + while (len32) { 534 + if (dd->pdata->poll_irq(dd)) 535 + return -ETIMEDOUT; 536 + 537 + for (count = 0; count < min(len32, bs32); count++, offset++) 538 + omap_sham_write(dd, SHA_REG_DIN(dd, count), 539 + buffer[offset]); 540 + len32 -= min(len32, bs32); 541 + } 538 542 539 543 return -EINPROGRESS; 540 544 } ··· 780 774 static int omap_sham_update_cpu(struct omap_sham_dev *dd) 781 775 { 782 776 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 783 - int bufcnt; 777 + int bufcnt, final; 778 + 779 + if (!ctx->total) 780 + return 0; 784 781 785 782 omap_sham_append_sg(ctx); 783 + 784 + final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; 785 + 786 + dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n", 787 + ctx->bufcnt, ctx->digcnt, final); 788 + 786 789 bufcnt = ctx->bufcnt; 787 790 ctx->bufcnt = 0; 788 791 789 - return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); 792 + return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final); 790 793 } 791 794 792 795 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) ··· 918 903 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 919 904 int err = 0, use_dma = 1; 920 905 921 - if (ctx->bufcnt <= DMA_MIN) 922 - /* faster to handle last block with cpu */ 906 + if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode) 907 + /* 908 + * faster to handle last block with cpu or 909 + * use cpu when dma is not present. 910 + */ 923 911 use_dma = 0; 924 912 925 913 if (use_dma) ··· 1074 1056 static int omap_sham_update(struct ahash_request *req) 1075 1057 { 1076 1058 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1059 + struct omap_sham_dev *dd = ctx->dd; 1077 1060 int bs = get_block_size(ctx); 1078 1061 1079 1062 if (!req->nbytes) ··· 1093 1074 */ 1094 1075 omap_sham_append_sg(ctx); 1095 1076 return 0; 1096 - } else if (ctx->bufcnt + ctx->total <= bs) { 1077 + } else if ((ctx->bufcnt + ctx->total <= bs) || 1078 + dd->polling_mode) { 1097 1079 /* 1098 - * faster to use CPU for short transfers 1099 - */ 1080 + * faster to use CPU for short transfers or 1081 + * use cpu when dma is not present. 1082 + */ 1100 1083 ctx->flags |= BIT(FLAGS_CPU); 1101 1084 } 1102 1085 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { ··· 1610 1589 } 1611 1590 1612 1591 if (test_bit(FLAGS_CPU, &dd->flags)) { 1613 - if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) 1614 - goto finish; 1592 + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { 1593 + /* hash or semi-hash ready */ 1594 + err = omap_sham_update_cpu(dd); 1595 + if (err != -EINPROGRESS) 1596 + goto finish; 1597 + } 1615 1598 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { 1616 1599 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { 1617 1600 omap_sham_update_dma_stop(dd); ··· 1935 1910 dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 1936 1911 &dd->dma, dev, "rx"); 1937 1912 if (!dd->dma_lch) { 1938 - dev_err(dev, "unable to obtain RX DMA engine channel %u\n", 1939 - dd->dma); 1940 - err = -ENXIO; 1941 - goto data_err; 1913 + dd->polling_mode = 1; 1914 + dev_dbg(dev, "using polling mode instead of dma\n"); 1942 1915 } 1943 1916 1944 1917 dd->flags |= dd->pdata->flags;