Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

- self-test failure of crc32c on powerpc

- regressions of ecb(aes) when used with xts/lrw in s5p-sss

- a number of bugs in the omap RNG driver

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
crypto: s5p-sss - Fix spinlock recursion on LRW(AES)
hwrng: omap - Do not access INTMASK_REG on EIP76
hwrng: omap - use devm_clk_get() instead of of_clk_get()
hwrng: omap - write registers after enabling the clock
crypto: s5p-sss - Fix completing crypto request in IRQ handler
crypto: powerpc - Fix initialisation of crc32c context

+100 -50
+1 -1
arch/powerpc/crypto/crc32c-vpmsum_glue.c
··· 52 52 { 53 53 u32 *key = crypto_tfm_ctx(tfm); 54 54 55 - *key = 0; 55 + *key = ~0; 56 56 57 57 return 0; 58 58 }
+14 -2
drivers/char/hw_random/omap-rng.c
··· 397 397 irq, err); 398 398 return err; 399 399 } 400 - omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK); 401 400 402 - priv->clk = of_clk_get(pdev->dev.of_node, 0); 401 + priv->clk = devm_clk_get(&pdev->dev, NULL); 403 402 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER) 404 403 return -EPROBE_DEFER; 405 404 if (!IS_ERR(priv->clk)) { ··· 407 408 dev_err(&pdev->dev, "unable to enable the clk, " 408 409 "err = %d\n", err); 409 410 } 411 + 412 + /* 413 + * On OMAP4, enabling the shutdown_oflo interrupt is 414 + * done in the interrupt mask register. There is no 415 + * such register on EIP76, and it's enabled by the 416 + * same bit in the control register 417 + */ 418 + if (priv->pdata->regs[RNG_INTMASK_REG]) 419 + omap_rng_write(priv, RNG_INTMASK_REG, 420 + RNG_SHUTDOWN_OFLO_MASK); 421 + else 422 + omap_rng_write(priv, RNG_CONTROL_REG, 423 + RNG_SHUTDOWN_OFLO_MASK); 410 424 } 411 425 return 0; 412 426 }
+85 -47
drivers/crypto/s5p-sss.c
··· 270 270 scatterwalk_done(&walk, out, 0); 271 271 } 272 272 273 - static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 273 + static void s5p_sg_done(struct s5p_aes_dev *dev) 274 274 { 275 275 if (dev->sg_dst_cpy) { 276 276 dev_dbg(dev->dev, ··· 281 281 } 282 282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 283 283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 284 + } 284 285 285 - /* holding a lock outside */ 286 + /* Calls the completion. Cannot be called with dev->lock hold. */ 287 + static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 288 + { 286 289 dev->req->base.complete(&dev->req->base, err); 287 290 dev->busy = false; 288 291 } ··· 371 368 } 372 369 373 370 /* 374 - * Returns true if new transmitting (output) data is ready and its 375 - * address+length have to be written to device (by calling 376 - * s5p_set_dma_outdata()). False otherwise. 371 + * Returns -ERRNO on error (mapping of new data failed). 372 + * On success returns: 373 + * - 0 if there is no more data, 374 + * - 1 if new transmitting (output) data is ready and its address+length 375 + * have to be written to device (by calling s5p_set_dma_outdata()). 377 376 */ 378 - static bool s5p_aes_tx(struct s5p_aes_dev *dev) 377 + static int s5p_aes_tx(struct s5p_aes_dev *dev) 379 378 { 380 - int err = 0; 381 - bool ret = false; 379 + int ret = 0; 382 380 383 381 s5p_unset_outdata(dev); 384 382 385 383 if (!sg_is_last(dev->sg_dst)) { 386 - err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); 387 - if (err) 388 - s5p_aes_complete(dev, err); 389 - else 390 - ret = true; 391 - } else { 392 - s5p_aes_complete(dev, err); 393 - 394 - dev->busy = true; 395 - tasklet_schedule(&dev->tasklet); 384 + ret = s5p_set_outdata(dev, sg_next(dev->sg_dst)); 385 + if (!ret) 386 + ret = 1; 396 387 } 397 388 398 389 return ret; 399 390 } 400 391 401 392 /* 402 - * Returns true if new receiving (input) data is ready and its 403 - * address+length have to be written to device (by calling 404 - * s5p_set_dma_indata()). False otherwise. 393 + * Returns -ERRNO on error (mapping of new data failed). 394 + * On success returns: 395 + * - 0 if there is no more data, 396 + * - 1 if new receiving (input) data is ready and its address+length 397 + * have to be written to device (by calling s5p_set_dma_indata()). 405 398 */ 406 - static bool s5p_aes_rx(struct s5p_aes_dev *dev) 399 + static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/) 407 400 { 408 - int err; 409 - bool ret = false; 401 + int ret = 0; 410 402 411 403 s5p_unset_indata(dev); 412 404 413 405 if (!sg_is_last(dev->sg_src)) { 414 - err = s5p_set_indata(dev, sg_next(dev->sg_src)); 415 - if (err) 416 - s5p_aes_complete(dev, err); 417 - else 418 - ret = true; 406 + ret = s5p_set_indata(dev, sg_next(dev->sg_src)); 407 + if (!ret) 408 + ret = 1; 419 409 } 420 410 421 411 return ret; ··· 418 422 { 419 423 struct platform_device *pdev = dev_id; 420 424 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 421 - bool set_dma_tx = false; 422 - bool set_dma_rx = false; 425 + int err_dma_tx = 0; 426 + int err_dma_rx = 0; 427 + bool tx_end = false; 423 428 unsigned long flags; 424 429 uint32_t status; 430 + int err; 425 431 426 432 spin_lock_irqsave(&dev->lock, flags); 427 433 434 + /* 435 + * Handle rx or tx interrupt. If there is still data (scatterlist did not 436 + * reach end), then map next scatterlist entry. 437 + * In case of such mapping error, s5p_aes_complete() should be called. 438 + * 439 + * If there is no more data in tx scatter list, call s5p_aes_complete() 440 + * and schedule new tasklet. 441 + */ 428 442 status = SSS_READ(dev, FCINTSTAT); 429 443 if (status & SSS_FCINTSTAT_BRDMAINT) 430 - set_dma_rx = s5p_aes_rx(dev); 431 - if (status & SSS_FCINTSTAT_BTDMAINT) 432 - set_dma_tx = s5p_aes_tx(dev); 444 + err_dma_rx = s5p_aes_rx(dev); 445 + 446 + if (status & SSS_FCINTSTAT_BTDMAINT) { 447 + if (sg_is_last(dev->sg_dst)) 448 + tx_end = true; 449 + err_dma_tx = s5p_aes_tx(dev); 450 + } 433 451 434 452 SSS_WRITE(dev, FCINTPEND, status); 435 453 436 - /* 437 - * Writing length of DMA block (either receiving or transmitting) 438 - * will start the operation immediately, so this should be done 439 - * at the end (even after clearing pending interrupts to not miss the 440 - * interrupt). 441 - */ 442 - if (set_dma_tx) 443 - s5p_set_dma_outdata(dev, dev->sg_dst); 444 - if (set_dma_rx) 445 - s5p_set_dma_indata(dev, dev->sg_src); 454 + if (err_dma_rx < 0) { 455 + err = err_dma_rx; 456 + goto error; 457 + } 458 + if (err_dma_tx < 0) { 459 + err = err_dma_tx; 460 + goto error; 461 + } 446 462 463 + if (tx_end) { 464 + s5p_sg_done(dev); 465 + 466 + spin_unlock_irqrestore(&dev->lock, flags); 467 + 468 + s5p_aes_complete(dev, 0); 469 + dev->busy = true; 470 + tasklet_schedule(&dev->tasklet); 471 + } else { 472 + /* 473 + * Writing length of DMA block (either receiving or 474 + * transmitting) will start the operation immediately, so this 475 + * should be done at the end (even after clearing pending 476 + * interrupts to not miss the interrupt). 477 + */ 478 + if (err_dma_tx == 1) 479 + s5p_set_dma_outdata(dev, dev->sg_dst); 480 + if (err_dma_rx == 1) 481 + s5p_set_dma_indata(dev, dev->sg_src); 482 + 483 + spin_unlock_irqrestore(&dev->lock, flags); 484 + } 485 + 486 + return IRQ_HANDLED; 487 + 488 + error: 489 + s5p_sg_done(dev); 447 490 spin_unlock_irqrestore(&dev->lock, flags); 491 + s5p_aes_complete(dev, err); 448 492 449 493 return IRQ_HANDLED; 450 494 } ··· 633 597 s5p_unset_indata(dev); 634 598 635 599 indata_error: 636 - s5p_aes_complete(dev, err); 600 + s5p_sg_done(dev); 637 601 spin_unlock_irqrestore(&dev->lock, flags); 602 + s5p_aes_complete(dev, err); 638 603 } 639 604 640 605 static void s5p_tasklet_cb(unsigned long data) ··· 842 805 dev_warn(dev, "feed control interrupt is not available.\n"); 843 806 goto err_irq; 844 807 } 845 - err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, 846 - IRQF_SHARED, pdev->name, pdev); 808 + err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL, 809 + s5p_aes_interrupt, IRQF_ONESHOT, 810 + pdev->name, pdev); 847 811 if (err < 0) { 848 812 dev_warn(dev, "feed control interrupt is not available.\n"); 849 813 goto err_irq;