Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.14 598 lines 17 kB view raw
1/* 2 * Driver for EIP97 cryptographic accelerator. 3 * 4 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 */ 11 12#include <linux/clk.h> 13#include <linux/init.h> 14#include <linux/kernel.h> 15#include <linux/module.h> 16#include <linux/platform_device.h> 17#include <linux/pm_runtime.h> 18#include "mtk-platform.h" 19 20#define MTK_BURST_SIZE_MSK GENMASK(7, 4) 21#define MTK_BURST_SIZE(x) ((x) << 4) 22#define MTK_DESC_SIZE(x) ((x) << 0) 23#define MTK_DESC_OFFSET(x) ((x) << 16) 24#define MTK_DESC_FETCH_SIZE(x) ((x) << 0) 25#define MTK_DESC_FETCH_THRESH(x) ((x) << 16) 26#define MTK_DESC_OVL_IRQ_EN BIT(25) 27#define MTK_DESC_ATP_PRESENT BIT(30) 28 29#define MTK_DFSE_IDLE GENMASK(3, 0) 30#define MTK_DFSE_THR_CTRL_EN BIT(30) 31#define MTK_DFSE_THR_CTRL_RESET BIT(31) 32#define MTK_DFSE_RING_ID(x) (((x) >> 12) & GENMASK(3, 0)) 33#define MTK_DFSE_MIN_DATA(x) ((x) << 0) 34#define MTK_DFSE_MAX_DATA(x) ((x) << 8) 35#define MTK_DFE_MIN_CTRL(x) ((x) << 16) 36#define MTK_DFE_MAX_CTRL(x) ((x) << 24) 37 38#define MTK_IN_BUF_MIN_THRESH(x) ((x) << 8) 39#define MTK_IN_BUF_MAX_THRESH(x) ((x) << 12) 40#define MTK_OUT_BUF_MIN_THRESH(x) ((x) << 0) 41#define MTK_OUT_BUF_MAX_THRESH(x) ((x) << 4) 42#define MTK_IN_TBUF_SIZE(x) (((x) >> 4) & GENMASK(3, 0)) 43#define MTK_IN_DBUF_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) 44#define MTK_OUT_DBUF_SIZE(x) (((x) >> 16) & GENMASK(3, 0)) 45#define MTK_CMD_FIFO_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) 46#define MTK_RES_FIFO_SIZE(x) (((x) >> 12) & GENMASK(3, 0)) 47 48#define MTK_PE_TK_LOC_AVL BIT(2) 49#define MTK_PE_PROC_HELD BIT(14) 50#define MTK_PE_TK_TIMEOUT_EN BIT(22) 51#define MTK_PE_INPUT_DMA_ERR BIT(0) 52#define MTK_PE_OUTPUT_DMA_ERR BIT(1) 53#define MTK_PE_PKT_PORC_ERR BIT(2) 54#define MTK_PE_PKT_TIMEOUT BIT(3) 55#define MTK_PE_FATAL_ERR BIT(14) 56#define MTK_PE_INPUT_DMA_ERR_EN BIT(16) 57#define MTK_PE_OUTPUT_DMA_ERR_EN BIT(17) 58#define MTK_PE_PKT_PORC_ERR_EN BIT(18) 59#define MTK_PE_PKT_TIMEOUT_EN BIT(19) 60#define MTK_PE_FATAL_ERR_EN BIT(30) 61#define MTK_PE_INT_OUT_EN BIT(31) 62 63#define MTK_HIA_SIGNATURE ((u16)0x35ca) 64#define MTK_HIA_DATA_WIDTH(x) (((x) >> 25) & GENMASK(1, 0)) 65#define MTK_HIA_DMA_LENGTH(x) (((x) >> 20) & GENMASK(4, 0)) 66#define MTK_CDR_STAT_CLR GENMASK(4, 0) 67#define MTK_RDR_STAT_CLR GENMASK(7, 0) 68 69#define MTK_AIC_INT_MSK GENMASK(5, 0) 70#define MTK_AIC_VER_MSK (GENMASK(15, 0) | GENMASK(27, 20)) 71#define MTK_AIC_VER11 0x011036c9 72#define MTK_AIC_VER12 0x012036c9 73#define MTK_AIC_G_CLR GENMASK(30, 20) 74 75/** 76 * EIP97 is an integrated security subsystem to accelerate cryptographic 77 * functions and protocols to offload the host processor. 78 * Some important hardware modules are briefly introduced below: 79 * 80 * Host Interface Adapter(HIA) - the main interface between the host 81 * system and the hardware subsystem. It is responsible for attaching 82 * processing engine to the specific host bus interface and provides a 83 * standardized software view for off loading tasks to the engine. 84 * 85 * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many 86 * CD the host has prepared in the CDR. It monitors the fill level of its 87 * CD-FIFO and if there's sufficient space for the next block of descriptors, 88 * then it fires off a DMA request to fetch a block of CDs. 89 * 90 * Data fetch engine(DFE) - It is responsible for parsing the CD and 91 * setting up the required control and packet data DMA transfers from 92 * system memory to the processing engine. 93 * 94 * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager, 95 * but target is result descriptors, Moreover, it also handles the RD 96 * updates under control of the DSE. For each packet data segment 97 * processed, the DSE triggers the RDR Manager to write the updated RD. 98 * If triggered to update, the RDR Manager sets up a DMA operation to 99 * copy the RD from the DSE to the correct location in the RDR. 100 * 101 * Data Store Engine(DSE) - It is responsible for parsing the prepared RD 102 * and setting up the required control and packet data DMA transfers from 103 * the processing engine to system memory. 104 * 105 * Advanced Interrupt Controllers(AICs) - receive interrupt request signals 106 * from various sources and combine them into one interrupt output. 107 * The AICs are used by: 108 * - One for the HIA global and processing engine interrupts. 109 * - The others for the descriptor ring interrupts. 110 */ 111 112/* Cryptographic engine capabilities */ 113struct mtk_sys_cap { 114 /* host interface adapter */ 115 u32 hia_ver; 116 u32 hia_opt; 117 /* packet engine */ 118 u32 pkt_eng_opt; 119 /* global hardware */ 120 u32 hw_opt; 121}; 122 123static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask) 124{ 125 /* Assign rings to DFE/DSE thread and enable it */ 126 writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL); 127 writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL); 128} 129 130static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp, 131 struct mtk_sys_cap *cap) 132{ 133 u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2; 134 u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1; 135 u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len); 136 u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len); 137 u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len); 138 139 writel(MTK_DFSE_MIN_DATA(ipbuf - 1) | 140 MTK_DFSE_MAX_DATA(ipbuf) | 141 MTK_DFE_MIN_CTRL(itbuf - 1) | 142 MTK_DFE_MAX_CTRL(itbuf), 143 cryp->base + DFE_CFG); 144 145 writel(MTK_DFSE_MIN_DATA(opbuf - 1) | 146 MTK_DFSE_MAX_DATA(opbuf), 147 cryp->base + DSE_CFG); 148 149 writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) | 150 MTK_IN_BUF_MAX_THRESH(ipbuf), 151 cryp->base + PE_IN_DBUF_THRESH); 152 153 writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) | 154 MTK_IN_BUF_MAX_THRESH(itbuf), 155 cryp->base + PE_IN_TBUF_THRESH); 156 157 writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) | 158 MTK_OUT_BUF_MAX_THRESH(opbuf), 159 cryp->base + PE_OUT_DBUF_THRESH); 160 161 writel(0, cryp->base + PE_OUT_TBUF_THRESH); 162 writel(0, cryp->base + PE_OUT_BUF_CTRL); 163} 164 165static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp) 166{ 167 int ret = -EINVAL; 168 u32 val; 169 170 /* Check for completion of all DMA transfers */ 171 val = readl(cryp->base + DFE_THR_STAT); 172 if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) { 173 val = readl(cryp->base + DSE_THR_STAT); 174 if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) 175 ret = 0; 176 } 177 178 if (!ret) { 179 /* Take DFE/DSE thread out of reset */ 180 writel(0, cryp->base + DFE_THR_CTRL); 181 writel(0, cryp->base + DSE_THR_CTRL); 182 } else { 183 return -EBUSY; 184 } 185 186 return 0; 187} 188 189static int mtk_dfe_dse_reset(struct mtk_cryp *cryp) 190{ 191 int err; 192 193 /* Reset DSE/DFE and correct system priorities for all rings. */ 194 writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL); 195 writel(0, cryp->base + DFE_PRIO_0); 196 writel(0, cryp->base + DFE_PRIO_1); 197 writel(0, cryp->base + DFE_PRIO_2); 198 writel(0, cryp->base + DFE_PRIO_3); 199 200 writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL); 201 writel(0, cryp->base + DSE_PRIO_0); 202 writel(0, cryp->base + DSE_PRIO_1); 203 writel(0, cryp->base + DSE_PRIO_2); 204 writel(0, cryp->base + DSE_PRIO_3); 205 206 err = mtk_dfe_dse_state_check(cryp); 207 if (err) 208 return err; 209 210 return 0; 211} 212 213static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp, 214 int i, struct mtk_sys_cap *cap) 215{ 216 /* Full descriptor that fits FIFO minus one */ 217 u32 count = 218 ((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1; 219 220 /* Temporarily disable external triggering */ 221 writel(0, cryp->base + CDR_CFG(i)); 222 223 /* Clear CDR count */ 224 writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i)); 225 writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i)); 226 227 writel(0, cryp->base + CDR_PREP_PNTR(i)); 228 writel(0, cryp->base + CDR_PROC_PNTR(i)); 229 writel(0, cryp->base + CDR_DMA_CFG(i)); 230 231 /* Configure CDR host address space */ 232 writel(0, cryp->base + CDR_BASE_ADDR_HI(i)); 233 writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i)); 234 235 writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i)); 236 237 /* Clear and disable all CDR interrupts */ 238 writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i)); 239 240 /* 241 * Set command descriptor offset and enable additional 242 * token present in descriptor. 243 */ 244 writel(MTK_DESC_SIZE(MTK_DESC_SZ) | 245 MTK_DESC_OFFSET(MTK_DESC_OFF) | 246 MTK_DESC_ATP_PRESENT, 247 cryp->base + CDR_DESC_SIZE(i)); 248 249 writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) | 250 MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ), 251 cryp->base + CDR_CFG(i)); 252} 253 254static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp, 255 int i, struct mtk_sys_cap *cap) 256{ 257 u32 rndup = 2; 258 u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1; 259 260 /* Temporarily disable external triggering */ 261 writel(0, cryp->base + RDR_CFG(i)); 262 263 /* Clear RDR count */ 264 writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i)); 265 writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i)); 266 267 writel(0, cryp->base + RDR_PREP_PNTR(i)); 268 writel(0, cryp->base + RDR_PROC_PNTR(i)); 269 writel(0, cryp->base + RDR_DMA_CFG(i)); 270 271 /* Configure RDR host address space */ 272 writel(0, cryp->base + RDR_BASE_ADDR_HI(i)); 273 writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i)); 274 275 writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i)); 276 writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i)); 277 278 /* 279 * RDR manager generates update interrupts on a per-completed-packet, 280 * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count 281 * for the RDR exceeds the number of packets. 282 */ 283 writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE, 284 cryp->base + RDR_THRESH(i)); 285 286 /* 287 * Configure a threshold and time-out value for the processed 288 * result descriptors (or complete packets) that are written to 289 * the RDR. 290 */ 291 writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF), 292 cryp->base + RDR_DESC_SIZE(i)); 293 294 /* 295 * Configure HIA fetch size and fetch threshold that are used to 296 * fetch blocks of multiple descriptors. 297 */ 298 writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) | 299 MTK_DESC_FETCH_THRESH(count * rndup) | 300 MTK_DESC_OVL_IRQ_EN, 301 cryp->base + RDR_CFG(i)); 302} 303 304static int mtk_packet_engine_setup(struct mtk_cryp *cryp) 305{ 306 struct mtk_sys_cap cap; 307 int i, err; 308 u32 val; 309 310 cap.hia_ver = readl(cryp->base + HIA_VERSION); 311 cap.hia_opt = readl(cryp->base + HIA_OPTIONS); 312 cap.hw_opt = readl(cryp->base + EIP97_OPTIONS); 313 314 if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE)) 315 return -EINVAL; 316 317 /* Configure endianness conversion method for master (DMA) interface */ 318 writel(0, cryp->base + EIP97_MST_CTRL); 319 320 /* Set HIA burst size */ 321 val = readl(cryp->base + HIA_MST_CTRL); 322 val &= ~MTK_BURST_SIZE_MSK; 323 val |= MTK_BURST_SIZE(5); 324 writel(val, cryp->base + HIA_MST_CTRL); 325 326 err = mtk_dfe_dse_reset(cryp); 327 if (err) { 328 dev_err(cryp->dev, "Failed to reset DFE and DSE.\n"); 329 return err; 330 } 331 332 mtk_dfe_dse_buf_setup(cryp, &cap); 333 334 /* Enable the 4 rings for the packet engines. */ 335 mtk_desc_ring_link(cryp, 0xf); 336 337 for (i = 0; i < MTK_RING_MAX; i++) { 338 mtk_cmd_desc_ring_setup(cryp, i, &cap); 339 mtk_res_desc_ring_setup(cryp, i, &cap); 340 } 341 342 writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN, 343 cryp->base + PE_TOKEN_CTRL_STAT); 344 345 /* Clear all pending interrupts */ 346 writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK); 347 writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR | 348 MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT | 349 MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN | 350 MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN | 351 MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN | 352 MTK_PE_INT_OUT_EN, 353 cryp->base + PE_INTERRUPT_CTRL_STAT); 354 355 return 0; 356} 357 358static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw) 359{ 360 u32 val; 361 362 if (hw == MTK_RING_MAX) 363 val = readl(cryp->base + AIC_G_VERSION); 364 else 365 val = readl(cryp->base + AIC_VERSION(hw)); 366 367 val &= MTK_AIC_VER_MSK; 368 if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12) 369 return -ENXIO; 370 371 if (hw == MTK_RING_MAX) 372 val = readl(cryp->base + AIC_G_OPTIONS); 373 else 374 val = readl(cryp->base + AIC_OPTIONS(hw)); 375 376 val &= MTK_AIC_INT_MSK; 377 if (!val || val > 32) 378 return -ENXIO; 379 380 return 0; 381} 382 383static int mtk_aic_init(struct mtk_cryp *cryp, int hw) 384{ 385 int err; 386 387 err = mtk_aic_cap_check(cryp, hw); 388 if (err) 389 return err; 390 391 /* Disable all interrupts and set initial configuration */ 392 if (hw == MTK_RING_MAX) { 393 writel(0, cryp->base + AIC_G_ENABLE_CTRL); 394 writel(0, cryp->base + AIC_G_POL_CTRL); 395 writel(0, cryp->base + AIC_G_TYPE_CTRL); 396 writel(0, cryp->base + AIC_G_ENABLE_SET); 397 } else { 398 writel(0, cryp->base + AIC_ENABLE_CTRL(hw)); 399 writel(0, cryp->base + AIC_POL_CTRL(hw)); 400 writel(0, cryp->base + AIC_TYPE_CTRL(hw)); 401 writel(0, cryp->base + AIC_ENABLE_SET(hw)); 402 } 403 404 return 0; 405} 406 407static int mtk_accelerator_init(struct mtk_cryp *cryp) 408{ 409 int i, err; 410 411 /* Initialize advanced interrupt controller(AIC) */ 412 for (i = 0; i < MTK_IRQ_NUM; i++) { 413 err = mtk_aic_init(cryp, i); 414 if (err) { 415 dev_err(cryp->dev, "Failed to initialize AIC.\n"); 416 return err; 417 } 418 } 419 420 /* Initialize packet engine */ 421 err = mtk_packet_engine_setup(cryp); 422 if (err) { 423 dev_err(cryp->dev, "Failed to configure packet engine.\n"); 424 return err; 425 } 426 427 return 0; 428} 429 430static void mtk_desc_dma_free(struct mtk_cryp *cryp) 431{ 432 int i; 433 434 for (i = 0; i < MTK_RING_MAX; i++) { 435 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 436 cryp->ring[i]->res_base, 437 cryp->ring[i]->res_dma); 438 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 439 cryp->ring[i]->cmd_base, 440 cryp->ring[i]->cmd_dma); 441 kfree(cryp->ring[i]); 442 } 443} 444 445static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) 446{ 447 struct mtk_ring **ring = cryp->ring; 448 int i, err = ENOMEM; 449 450 for (i = 0; i < MTK_RING_MAX; i++) { 451 ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); 452 if (!ring[i]) 453 goto err_cleanup; 454 455 ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, 456 MTK_DESC_RING_SZ, 457 &ring[i]->cmd_dma, 458 GFP_KERNEL); 459 if (!ring[i]->cmd_base) 460 goto err_cleanup; 461 462 ring[i]->res_base = dma_zalloc_coherent(cryp->dev, 463 MTK_DESC_RING_SZ, 464 &ring[i]->res_dma, 465 GFP_KERNEL); 466 if (!ring[i]->res_base) 467 goto err_cleanup; 468 469 ring[i]->cmd_next = ring[i]->cmd_base; 470 ring[i]->res_next = ring[i]->res_base; 471 } 472 return 0; 473 474err_cleanup: 475 for (; i--; ) { 476 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 477 ring[i]->res_base, ring[i]->res_dma); 478 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 479 ring[i]->cmd_base, ring[i]->cmd_dma); 480 kfree(ring[i]); 481 } 482 return err; 483} 484 485static int mtk_crypto_probe(struct platform_device *pdev) 486{ 487 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 488 struct mtk_cryp *cryp; 489 int i, err; 490 491 cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL); 492 if (!cryp) 493 return -ENOMEM; 494 495 cryp->base = devm_ioremap_resource(&pdev->dev, res); 496 if (IS_ERR(cryp->base)) 497 return PTR_ERR(cryp->base); 498 499 for (i = 0; i < MTK_IRQ_NUM; i++) { 500 cryp->irq[i] = platform_get_irq(pdev, i); 501 if (cryp->irq[i] < 0) { 502 dev_err(cryp->dev, "no IRQ:%d resource info\n", i); 503 return cryp->irq[i]; 504 } 505 } 506 507 cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); 508 if (IS_ERR(cryp->clk_cryp)) 509 return -EPROBE_DEFER; 510 511 cryp->dev = &pdev->dev; 512 pm_runtime_enable(cryp->dev); 513 pm_runtime_get_sync(cryp->dev); 514 515 err = clk_prepare_enable(cryp->clk_cryp); 516 if (err) 517 goto err_clk_cryp; 518 519 /* Allocate four command/result descriptor rings */ 520 err = mtk_desc_ring_alloc(cryp); 521 if (err) { 522 dev_err(cryp->dev, "Unable to allocate descriptor rings.\n"); 523 goto err_resource; 524 } 525 526 /* Initialize hardware modules */ 527 err = mtk_accelerator_init(cryp); 528 if (err) { 529 dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n"); 530 goto err_engine; 531 } 532 533 err = mtk_cipher_alg_register(cryp); 534 if (err) { 535 dev_err(cryp->dev, "Unable to register cipher algorithm.\n"); 536 goto err_cipher; 537 } 538 539 err = mtk_hash_alg_register(cryp); 540 if (err) { 541 dev_err(cryp->dev, "Unable to register hash algorithm.\n"); 542 goto err_hash; 543 } 544 545 platform_set_drvdata(pdev, cryp); 546 return 0; 547 548err_hash: 549 mtk_cipher_alg_release(cryp); 550err_cipher: 551 mtk_dfe_dse_reset(cryp); 552err_engine: 553 mtk_desc_dma_free(cryp); 554err_resource: 555 clk_disable_unprepare(cryp->clk_cryp); 556err_clk_cryp: 557 pm_runtime_put_sync(cryp->dev); 558 pm_runtime_disable(cryp->dev); 559 560 return err; 561} 562 563static int mtk_crypto_remove(struct platform_device *pdev) 564{ 565 struct mtk_cryp *cryp = platform_get_drvdata(pdev); 566 567 mtk_hash_alg_release(cryp); 568 mtk_cipher_alg_release(cryp); 569 mtk_desc_dma_free(cryp); 570 571 clk_disable_unprepare(cryp->clk_cryp); 572 573 pm_runtime_put_sync(cryp->dev); 574 pm_runtime_disable(cryp->dev); 575 platform_set_drvdata(pdev, NULL); 576 577 return 0; 578} 579 580static const struct of_device_id of_crypto_id[] = { 581 { .compatible = "mediatek,eip97-crypto" }, 582 {}, 583}; 584MODULE_DEVICE_TABLE(of, of_crypto_id); 585 586static struct platform_driver mtk_crypto_driver = { 587 .probe = mtk_crypto_probe, 588 .remove = mtk_crypto_remove, 589 .driver = { 590 .name = "mtk-crypto", 591 .of_match_table = of_crypto_id, 592 }, 593}; 594module_platform_driver(mtk_crypto_driver); 595 596MODULE_LICENSE("GPL"); 597MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>"); 598MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");