Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: atmel-aes - add support for Device Tree

Add support for Device Tree and use of the DMA DT API to
get the needed channels.
Documentation is added for these DT nodes.

Initial code by: Nicolas Royer and Eukrea.

Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>

+112 -44
+23
Documentation/devicetree/bindings/crypto/atmel-crypto.txt
··· 1 + * Atmel HW cryptographic accelerators 2 + 3 + These are the HW cryptographic accelerators found on some Atmel products. 4 + 5 + * Advanced Encryption Standard (AES) 6 + 7 + Required properties: 8 + - compatible : Should be "atmel,at91sam9g46-aes". 9 + - reg: Should contain AES registers location and length. 10 + - interrupts: Should contain the IRQ line for the AES. 11 + - dmas: List of two DMA specifiers as described in 12 + atmel-dma.txt and dma.txt files. 13 + - dma-names: Contains one identifier string for each DMA specifier 14 + in the dmas property. 15 + 16 + Example: 17 + aes@f8038000 { 18 + compatible = "atmel,at91sam9g46-aes"; 19 + reg = <0xf8038000 0x100>; 20 + interrupts = <43 4 0>; 21 + dmas = <&dma1 2 18>, 22 + <&dma1 2 19>; 23 + dma-names = "tx", "rx";
+89 -44
drivers/crypto/atmel-aes.c
··· 30 30 #include <linux/irq.h> 31 31 #include <linux/scatterlist.h> 32 32 #include <linux/dma-mapping.h> 33 + #include <linux/of_device.h> 33 34 #include <linux/delay.h> 34 35 #include <linux/crypto.h> 35 36 #include <linux/cryptohash.h> ··· 40 39 #include <crypto/hash.h> 41 40 #include <crypto/internal/hash.h> 42 41 #include <linux/platform_data/crypto-atmel.h> 42 + #include <dt-bindings/dma/at91.h> 43 43 #include "atmel-aes-regs.h" 44 44 45 45 #define CFB8_BLOCK_SIZE 1 ··· 749 747 struct crypto_platform_data *pdata) 750 748 { 751 749 int err = -ENOMEM; 752 - dma_cap_mask_t mask_in, mask_out; 750 + dma_cap_mask_t mask; 753 751 754 - if (pdata && pdata->dma_slave->txdata.dma_dev && 755 - pdata->dma_slave->rxdata.dma_dev) { 752 + dma_cap_zero(mask); 753 + dma_cap_set(DMA_SLAVE, mask); 756 754 757 - /* Try to grab 2 DMA channels */ 758 - dma_cap_zero(mask_in); 759 - dma_cap_set(DMA_SLAVE, mask_in); 755 + /* Try to grab 2 DMA channels */ 756 + dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask, 757 + atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); 758 + if (!dd->dma_lch_in.chan) 759 + goto err_dma_in; 760 760 761 - dd->dma_lch_in.chan = dma_request_channel(mask_in, 762 - atmel_aes_filter, &pdata->dma_slave->rxdata); 761 + dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; 762 + dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + 763 + AES_IDATAR(0); 764 + dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; 765 + dd->dma_lch_in.dma_conf.src_addr_width = 766 + DMA_SLAVE_BUSWIDTH_4_BYTES; 767 + dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; 768 + dd->dma_lch_in.dma_conf.dst_addr_width = 769 + DMA_SLAVE_BUSWIDTH_4_BYTES; 770 + dd->dma_lch_in.dma_conf.device_fc = false; 763 771 764 - if (!dd->dma_lch_in.chan) 765 - goto err_dma_in; 772 + dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask, 773 + atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx"); 774 + if (!dd->dma_lch_out.chan) 775 + goto err_dma_out; 766 776 767 - dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; 768 - dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + 769 - AES_IDATAR(0); 770 - dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; 771 - dd->dma_lch_in.dma_conf.src_addr_width = 772 - DMA_SLAVE_BUSWIDTH_4_BYTES; 773 - dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; 774 - dd->dma_lch_in.dma_conf.dst_addr_width = 775 - DMA_SLAVE_BUSWIDTH_4_BYTES; 776 - dd->dma_lch_in.dma_conf.device_fc = false; 777 + dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; 778 + dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + 779 + AES_ODATAR(0); 780 + dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; 781 + dd->dma_lch_out.dma_conf.src_addr_width = 782 + DMA_SLAVE_BUSWIDTH_4_BYTES; 783 + dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; 784 + dd->dma_lch_out.dma_conf.dst_addr_width = 785 + DMA_SLAVE_BUSWIDTH_4_BYTES; 786 + dd->dma_lch_out.dma_conf.device_fc = false; 777 787 778 - dma_cap_zero(mask_out); 779 - dma_cap_set(DMA_SLAVE, mask_out); 780 - dd->dma_lch_out.chan = dma_request_channel(mask_out, 781 - atmel_aes_filter, &pdata->dma_slave->txdata); 782 - 783 - if (!dd->dma_lch_out.chan) 784 - goto err_dma_out; 785 - 786 - dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; 787 - dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + 788 - AES_ODATAR(0); 789 - dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; 790 - dd->dma_lch_out.dma_conf.src_addr_width = 791 - DMA_SLAVE_BUSWIDTH_4_BYTES; 792 - dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; 793 - dd->dma_lch_out.dma_conf.dst_addr_width = 794 - DMA_SLAVE_BUSWIDTH_4_BYTES; 795 - dd->dma_lch_out.dma_conf.device_fc = false; 796 - 797 - return 0; 798 - } else { 799 - return -ENODEV; 800 - } 788 + return 0; 801 789 802 790 err_dma_out: 803 791 dma_release_channel(dd->dma_lch_in.chan); 804 792 err_dma_in: 793 + dev_warn(dd->dev, "no DMA channel available\n"); 805 794 return err; 806 795 } 807 796 ··· 1254 1261 } 1255 1262 } 1256 1263 1264 + #if defined(CONFIG_OF) 1265 + static const struct of_device_id atmel_aes_dt_ids[] = { 1266 + { .compatible = "atmel,at91sam9g46-aes" }, 1267 + { /* sentinel */ } 1268 + }; 1269 + MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids); 1270 + 1271 + static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) 1272 + { 1273 + struct device_node *np = pdev->dev.of_node; 1274 + struct crypto_platform_data *pdata; 1275 + 1276 + if (!np) { 1277 + dev_err(&pdev->dev, "device node not found\n"); 1278 + return ERR_PTR(-EINVAL); 1279 + } 1280 + 1281 + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1282 + if (!pdata) { 1283 + dev_err(&pdev->dev, "could not allocate memory for pdata\n"); 1284 + return ERR_PTR(-ENOMEM); 1285 + } 1286 + 1287 + pdata->dma_slave = devm_kzalloc(&pdev->dev, 1288 + sizeof(*(pdata->dma_slave)), 1289 + GFP_KERNEL); 1290 + if (!pdata->dma_slave) { 1291 + dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); 1292 + devm_kfree(&pdev->dev, pdata); 1293 + return ERR_PTR(-ENOMEM); 1294 + } 1295 + 1296 + return pdata; 1297 + } 1298 + #else 1299 + static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) 1300 + { 1301 + return ERR_PTR(-EINVAL); 1302 + } 1303 + #endif 1304 + 1257 1305 static int atmel_aes_probe(struct platform_device *pdev) 1258 1306 { 1259 1307 struct atmel_aes_dev *aes_dd; ··· 1306 1272 1307 1273 pdata = pdev->dev.platform_data; 1308 1274 if (!pdata) { 1275 + pdata = atmel_aes_of_init(pdev); 1276 + if (IS_ERR(pdata)) { 1277 + err = PTR_ERR(pdata); 1278 + goto aes_dd_err; 1279 + } 1280 + } 1281 + 1282 + if (!pdata->dma_slave) { 1309 1283 err = -ENXIO; 1310 1284 goto aes_dd_err; 1311 1285 } ··· 1400 1358 if (err) 1401 1359 goto err_algs; 1402 1360 1403 - dev_info(dev, "Atmel AES\n"); 1361 + dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", 1362 + dma_chan_name(aes_dd->dma_lch_in.chan), 1363 + dma_chan_name(aes_dd->dma_lch_out.chan)); 1404 1364 1405 1365 return 0; 1406 1366 ··· 1468 1424 .driver = { 1469 1425 .name = "atmel_aes", 1470 1426 .owner = THIS_MODULE, 1427 + .of_match_table = of_match_ptr(atmel_aes_dt_ids), 1471 1428 }, 1472 1429 }; 1473 1430