Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ux500 - Fix logging, make arrays const, neatening

Logging messages without newlines are possibly interleaved
with other messages. Add terminating newlines to avoid
this.

Other miscellaneous changes:

Make arrays const to reduce data size
Add pr_fmt to prefix pr_<level>, remove now unused DEV_DBG_NAME
Coalesce formats, align arguments
Remove unnecessary OOM messages as dump_stack is already done
Remove unnecessary cast of void *
Change kzalloc(sizeof(struct)...) to kzalloc(sizeof(*var), ...)
Reduce indents in struct definitions

Signed-off-by: Joe Perches <joe@perches.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Joe Perches and committed by
Herbert Xu
69d2884d 997ad290

+279 -307
+279 -307
drivers/crypto/ux500/hash/hash_core.c
··· 11 11 * License terms: GNU General Public License (GPL) version 2 12 12 */ 13 13 14 + #define pr_fmt(fmt) "hashX hashX: " fmt 15 + 14 16 #include <linux/clk.h> 15 17 #include <linux/device.h> 16 18 #include <linux/err.h> ··· 37 35 38 36 #include "hash_alg.h" 39 37 40 - #define DEV_DBG_NAME "hashX hashX:" 41 - 42 38 static int hash_mode; 43 39 module_param(hash_mode, int, 0); 44 40 MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); ··· 44 44 /** 45 45 * Pre-calculated empty message digests. 46 46 */ 47 - static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { 47 + static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { 48 48 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 49 49 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 50 50 0xaf, 0xd8, 0x07, 0x09 51 51 }; 52 52 53 - static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { 53 + static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { 54 54 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 55 55 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 56 56 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, ··· 58 58 }; 59 59 60 60 /* HMAC-SHA1, no key */ 61 - static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { 61 + static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { 62 62 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, 63 63 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, 64 64 0x70, 0x69, 0x0e, 0x1d 65 65 }; 66 66 67 67 /* HMAC-SHA256, no key */ 68 - static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { 68 + static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { 69 69 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, 70 70 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, 71 71 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, ··· 97 97 * 98 98 */ 99 99 static void hash_messagepad(struct hash_device_data *device_data, 100 - const u32 *message, u8 index_bytes); 100 + const u32 *message, u8 index_bytes); 101 101 102 102 /** 103 103 * release_hash_device - Releases a previously allocated hash device. ··· 119 119 } 120 120 121 121 static void hash_dma_setup_channel(struct hash_device_data *device_data, 122 - struct device *dev) 122 + struct device *dev) 123 123 { 124 124 struct hash_platform_data *platform_data = dev->platform_data; 125 125 struct dma_slave_config conf = { ··· 127 127 .dst_addr = device_data->phybase + HASH_DMA_FIFO, 128 128 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, 129 129 .dst_maxburst = 16, 130 - }; 130 + }; 131 131 132 132 dma_cap_zero(device_data->dma.mask); 133 133 dma_cap_set(DMA_SLAVE, device_data->dma.mask); ··· 135 135 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; 136 136 device_data->dma.chan_mem2hash = 137 137 dma_request_channel(device_data->dma.mask, 138 - platform_data->dma_filter, 139 - device_data->dma.cfg_mem2hash); 138 + platform_data->dma_filter, 139 + device_data->dma.cfg_mem2hash); 140 140 141 141 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); 142 142 ··· 145 145 146 146 static void hash_dma_callback(void *data) 147 147 { 148 - struct hash_ctx *ctx = (struct hash_ctx *) data; 148 + struct hash_ctx *ctx = data; 149 149 150 150 complete(&ctx->device->dma.complete); 151 151 } 152 152 153 153 static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, 154 - int len, enum dma_data_direction direction) 154 + int len, enum dma_data_direction direction) 155 155 { 156 156 struct dma_async_tx_descriptor *desc = NULL; 157 157 struct dma_chan *channel = NULL; 158 158 dma_cookie_t cookie; 159 159 160 160 if (direction != DMA_TO_DEVICE) { 161 - dev_err(ctx->device->dev, "[%s] Invalid DMA direction", 162 - __func__); 161 + dev_err(ctx->device->dev, "%s: Invalid DMA direction\n", 162 + __func__); 163 163 return -EFAULT; 164 164 } 165 165 ··· 172 172 direction); 173 173 174 174 if (!ctx->device->dma.sg_len) { 175 - dev_err(ctx->device->dev, 176 - "[%s]: Could not map the sg list (TO_DEVICE)", 177 - __func__); 175 + dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n", 176 + __func__); 178 177 return -EFAULT; 179 178 } 180 179 181 - dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " 182 - "(TO_DEVICE)", __func__); 180 + dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n", 181 + __func__); 183 182 desc = dmaengine_prep_slave_sg(channel, 184 183 ctx->device->dma.sg, ctx->device->dma.sg_len, 185 184 direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); 186 185 if (!desc) { 187 186 dev_err(ctx->device->dev, 188 - "[%s]: device_prep_slave_sg() failed!", __func__); 187 + "%s: device_prep_slave_sg() failed!\n", __func__); 189 188 return -EFAULT; 190 189 } 191 190 ··· 204 205 chan = ctx->device->dma.chan_mem2hash; 205 206 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 206 207 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, 207 - ctx->device->dma.sg_len, DMA_TO_DEVICE); 208 - 208 + ctx->device->dma.sg_len, DMA_TO_DEVICE); 209 209 } 210 210 211 211 static int hash_dma_write(struct hash_ctx *ctx, 212 - struct scatterlist *sg, int len) 212 + struct scatterlist *sg, int len) 213 213 { 214 214 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); 215 215 if (error) { 216 - dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() " 217 - "failed", __func__); 216 + dev_dbg(ctx->device->dev, 217 + "%s: hash_set_dma_transfer() failed\n", __func__); 218 218 return error; 219 219 } 220 220 ··· 243 245 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { 244 246 if (HASH_ALGO_SHA1 == ctx->config.algorithm) { 245 247 memcpy(zero_hash, &zero_message_hash_sha1[0], 246 - SHA1_DIGEST_SIZE); 248 + SHA1_DIGEST_SIZE); 247 249 *zero_hash_size = SHA1_DIGEST_SIZE; 248 250 *zero_digest = true; 249 251 } else if (HASH_ALGO_SHA256 == 250 252 ctx->config.algorithm) { 251 253 memcpy(zero_hash, &zero_message_hash_sha256[0], 252 - SHA256_DIGEST_SIZE); 254 + SHA256_DIGEST_SIZE); 253 255 *zero_hash_size = SHA256_DIGEST_SIZE; 254 256 *zero_digest = true; 255 257 } else { 256 - dev_err(device_data->dev, "[%s] " 257 - "Incorrect algorithm!" 258 - , __func__); 258 + dev_err(device_data->dev, "%s: Incorrect algorithm!\n", 259 + __func__); 259 260 ret = -EINVAL; 260 261 goto out; 261 262 } ··· 262 265 if (!ctx->keylen) { 263 266 if (HASH_ALGO_SHA1 == ctx->config.algorithm) { 264 267 memcpy(zero_hash, &zero_message_hmac_sha1[0], 265 - SHA1_DIGEST_SIZE); 268 + SHA1_DIGEST_SIZE); 266 269 *zero_hash_size = SHA1_DIGEST_SIZE; 267 270 *zero_digest = true; 268 271 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { 269 272 memcpy(zero_hash, &zero_message_hmac_sha256[0], 270 - SHA256_DIGEST_SIZE); 273 + SHA256_DIGEST_SIZE); 271 274 *zero_hash_size = SHA256_DIGEST_SIZE; 272 275 *zero_digest = true; 273 276 } else { 274 - dev_err(device_data->dev, "[%s] " 275 - "Incorrect algorithm!" 276 - , __func__); 277 + dev_err(device_data->dev, "%s: Incorrect algorithm!\n", 278 + __func__); 277 279 ret = -EINVAL; 278 280 goto out; 279 281 } 280 282 } else { 281 - dev_dbg(device_data->dev, "[%s] Continue hash " 282 - "calculation, since hmac key avalable", 283 - __func__); 283 + dev_dbg(device_data->dev, 284 + "%s: Continue hash calculation, since hmac key available\n", 285 + __func__); 284 286 } 285 287 } 286 288 out: ··· 295 299 * This function request for disabling power (regulator) and clock, 296 300 * and could also save current hw state. 297 301 */ 298 - static int hash_disable_power( 299 - struct hash_device_data *device_data, 300 - bool save_device_state) 302 + static int hash_disable_power(struct hash_device_data *device_data, 303 + bool save_device_state) 301 304 { 302 305 int ret = 0; 303 306 struct device *dev = device_data->dev; ··· 314 319 clk_disable(device_data->clk); 315 320 ret = regulator_disable(device_data->regulator); 316 321 if (ret) 317 - dev_err(dev, "[%s] regulator_disable() failed!", __func__); 322 + dev_err(dev, "%s: regulator_disable() failed!\n", __func__); 318 323 319 324 device_data->power_state = false; 320 325 ··· 332 337 * This function request for enabling power (regulator) and clock, 333 338 * and could also restore a previously saved hw state. 334 339 */ 335 - static int hash_enable_power( 336 - struct hash_device_data *device_data, 337 - bool restore_device_state) 340 + static int hash_enable_power(struct hash_device_data *device_data, 341 + bool restore_device_state) 338 342 { 339 343 int ret = 0; 340 344 struct device *dev = device_data->dev; ··· 342 348 if (!device_data->power_state) { 343 349 ret = regulator_enable(device_data->regulator); 344 350 if (ret) { 345 - dev_err(dev, "[%s]: regulator_enable() failed!", 346 - __func__); 351 + dev_err(dev, "%s: regulator_enable() failed!\n", 352 + __func__); 347 353 goto out; 348 354 } 349 355 ret = clk_enable(device_data->clk); 350 356 if (ret) { 351 - dev_err(dev, "[%s]: clk_enable() failed!", 352 - __func__); 357 + dev_err(dev, "%s: clk_enable() failed!\n", __func__); 353 358 ret = regulator_disable( 354 359 device_data->regulator); 355 360 goto out; ··· 359 366 if (device_data->restore_dev_state) { 360 367 if (restore_device_state) { 361 368 device_data->restore_dev_state = false; 362 - hash_resume_state(device_data, 363 - &device_data->state); 369 + hash_resume_state(device_data, &device_data->state); 364 370 } 365 371 } 366 372 out: ··· 439 447 * spec or due to a bug in the hw. 440 448 */ 441 449 static void hash_hw_write_key(struct hash_device_data *device_data, 442 - const u8 *key, unsigned int keylen) 450 + const u8 *key, unsigned int keylen) 443 451 { 444 452 u32 word = 0; 445 453 int nwords = 1; ··· 483 491 * calculation. 484 492 */ 485 493 static int init_hash_hw(struct hash_device_data *device_data, 486 - struct hash_ctx *ctx) 494 + struct hash_ctx *ctx) 487 495 { 488 496 int ret = 0; 489 497 490 498 ret = hash_setconfiguration(device_data, &ctx->config); 491 499 if (ret) { 492 - dev_err(device_data->dev, "[%s] hash_setconfiguration() " 493 - "failed!", __func__); 500 + dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n", 501 + __func__); 494 502 return ret; 495 503 } 496 504 ··· 520 528 size -= sg->length; 521 529 522 530 /* hash_set_dma_transfer will align last nent */ 523 - if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) 524 - || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && 525 - size > 0)) 531 + if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) || 532 + (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0)) 526 533 aligned_data = false; 527 534 528 535 sg = sg_next(sg); ··· 576 585 if (req->nbytes < HASH_DMA_ALIGN_SIZE) { 577 586 req_ctx->dma_mode = false; /* Don't use DMA */ 578 587 579 - pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct " 580 - "to CPU mode for data size < %d", 581 - __func__, HASH_DMA_ALIGN_SIZE); 588 + pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n", 589 + __func__, HASH_DMA_ALIGN_SIZE); 582 590 } else { 583 591 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && 584 - hash_dma_valid_data(req->src, 585 - req->nbytes)) { 592 + hash_dma_valid_data(req->src, req->nbytes)) { 586 593 req_ctx->dma_mode = true; 587 594 } else { 588 595 req_ctx->dma_mode = false; 589 - pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use" 590 - " CPU mode for datalength < %d" 591 - " or non-aligned data, except " 592 - "in last nent", __func__, 593 - HASH_DMA_PERFORMANCE_MIN_SIZE); 596 + pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n", 597 + __func__, 598 + HASH_DMA_PERFORMANCE_MIN_SIZE); 594 599 } 595 600 } 596 601 } ··· 601 614 * the HASH hardware. 602 615 * 603 616 */ 604 - static void hash_processblock( 605 - struct hash_device_data *device_data, 606 - const u32 *message, int length) 617 + static void hash_processblock(struct hash_device_data *device_data, 618 + const u32 *message, int length) 607 619 { 608 620 int len = length / HASH_BYTES_PER_WORD; 609 621 /* ··· 627 641 * 628 642 */ 629 643 static void hash_messagepad(struct hash_device_data *device_data, 630 - const u32 *message, u8 index_bytes) 644 + const u32 *message, u8 index_bytes) 631 645 { 632 646 int nwords = 1; 633 647 ··· 652 666 653 667 /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ 654 668 HASH_SET_NBLW(index_bytes * 8); 655 - dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, 656 - readl_relaxed(&device_data->base->din), 657 - (int)(readl_relaxed(&device_data->base->str) & 658 - HASH_STR_NBLW_MASK)); 669 + dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n", 670 + __func__, readl_relaxed(&device_data->base->din), 671 + readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); 659 672 HASH_SET_DCAL; 660 - dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", 661 - __func__, readl_relaxed(&device_data->base->din), 662 - (int)(readl_relaxed(&device_data->base->str) & 663 - HASH_STR_NBLW_MASK)); 673 + dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n", 674 + __func__, readl_relaxed(&device_data->base->din), 675 + readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); 664 676 665 677 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) 666 678 cpu_relax(); ··· 688 704 * @config: Pointer to a configuration structure. 689 705 */ 690 706 int hash_setconfiguration(struct hash_device_data *device_data, 691 - struct hash_config *config) 707 + struct hash_config *config) 692 708 { 693 709 int ret = 0; 694 710 ··· 715 731 break; 716 732 717 733 default: 718 - dev_err(device_data->dev, "[%s] Incorrect algorithm.", 719 - __func__); 734 + dev_err(device_data->dev, "%s: Incorrect algorithm\n", 735 + __func__); 720 736 return -EPERM; 721 737 } 722 738 ··· 728 744 HASH_CLEAR_BITS(&device_data->base->cr, 729 745 HASH_CR_MODE_MASK); 730 746 else if (HASH_OPER_MODE_HMAC == config->oper_mode) { 731 - HASH_SET_BITS(&device_data->base->cr, 732 - HASH_CR_MODE_MASK); 747 + HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK); 733 748 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { 734 749 /* Truncate key to blocksize */ 735 - dev_dbg(device_data->dev, "[%s] LKEY set", __func__); 750 + dev_dbg(device_data->dev, "%s: LKEY set\n", __func__); 736 751 HASH_SET_BITS(&device_data->base->cr, 737 - HASH_CR_LKEY_MASK); 752 + HASH_CR_LKEY_MASK); 738 753 } else { 739 - dev_dbg(device_data->dev, "[%s] LKEY cleared", 740 - __func__); 754 + dev_dbg(device_data->dev, "%s: LKEY cleared\n", 755 + __func__); 741 756 HASH_CLEAR_BITS(&device_data->base->cr, 742 757 HASH_CR_LKEY_MASK); 743 758 } 744 759 } else { /* Wrong hash mode */ 745 760 ret = -EPERM; 746 - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", 747 - __func__); 761 + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", 762 + __func__); 748 763 } 749 764 return ret; 750 765 } ··· 776 793 } 777 794 778 795 static int hash_process_data(struct hash_device_data *device_data, 779 - struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, 780 - int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) 796 + struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, 797 + int msg_length, u8 *data_buffer, u8 *buffer, 798 + u8 *index) 781 799 { 782 800 int ret = 0; 783 801 u32 count; ··· 793 809 msg_length = 0; 794 810 } else { 795 811 if (req_ctx->updated) { 796 - 797 812 ret = hash_resume_state(device_data, 798 813 &device_data->state); 799 814 memmove(req_ctx->state.buffer, 800 - device_data->state.buffer, 801 - HASH_BLOCK_SIZE / sizeof(u32)); 815 + device_data->state.buffer, 816 + HASH_BLOCK_SIZE / sizeof(u32)); 802 817 if (ret) { 803 - dev_err(device_data->dev, "[%s] " 804 - "hash_resume_state()" 805 - " failed!", __func__); 818 + dev_err(device_data->dev, 819 + "%s: hash_resume_state() failed!\n", 820 + __func__); 806 821 goto out; 807 822 } 808 823 } else { 809 824 ret = init_hash_hw(device_data, ctx); 810 825 if (ret) { 811 - dev_err(device_data->dev, "[%s] " 812 - "init_hash_hw()" 813 - " failed!", __func__); 826 + dev_err(device_data->dev, 827 + "%s: init_hash_hw() failed!\n", 828 + __func__); 814 829 goto out; 815 830 } 816 831 req_ctx->updated = 1; ··· 821 838 * HW peripheral, otherwise we first copy data 822 839 * to a local buffer 823 840 */ 824 - if ((0 == (((u32)data_buffer) % 4)) 825 - && (0 == *index)) 841 + if ((0 == (((u32)data_buffer) % 4)) && 842 + (0 == *index)) 826 843 hash_processblock(device_data, 827 - (const u32 *) 828 - data_buffer, HASH_BLOCK_SIZE); 844 + (const u32 *)data_buffer, 845 + HASH_BLOCK_SIZE); 829 846 else { 830 - for (count = 0; count < 831 - (u32)(HASH_BLOCK_SIZE - 832 - *index); 833 - count++) { 847 + for (count = 0; 848 + count < (u32)(HASH_BLOCK_SIZE - *index); 849 + count++) { 834 850 buffer[*index + count] = 835 851 *(data_buffer + count); 836 852 } 837 853 hash_processblock(device_data, 838 - (const u32 *)buffer, 839 - HASH_BLOCK_SIZE); 854 + (const u32 *)buffer, 855 + HASH_BLOCK_SIZE); 840 856 } 841 857 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); 842 858 data_buffer += (HASH_BLOCK_SIZE - *index); ··· 847 865 &device_data->state); 848 866 849 867 memmove(device_data->state.buffer, 850 - req_ctx->state.buffer, 851 - HASH_BLOCK_SIZE / sizeof(u32)); 868 + req_ctx->state.buffer, 869 + HASH_BLOCK_SIZE / sizeof(u32)); 852 870 if (ret) { 853 - dev_err(device_data->dev, "[%s] " 854 - "hash_save_state()" 855 - " failed!", __func__); 871 + dev_err(device_data->dev, "%s: hash_save_state() failed!\n", 872 + __func__); 856 873 goto out; 857 874 } 858 875 } ··· 879 898 if (ret) 880 899 return ret; 881 900 882 - dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); 901 + dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); 883 902 884 903 if (req_ctx->updated) { 885 904 ret = hash_resume_state(device_data, &device_data->state); 886 905 887 906 if (ret) { 888 - dev_err(device_data->dev, "[%s] hash_resume_state() " 889 - "failed!", __func__); 907 + dev_err(device_data->dev, "%s: hash_resume_state() failed!\n", 908 + __func__); 890 909 goto out; 891 910 } 892 - 893 911 } 894 912 895 913 if (!req_ctx->updated) { 896 914 ret = hash_setconfiguration(device_data, &ctx->config); 897 915 if (ret) { 898 - dev_err(device_data->dev, "[%s] " 899 - "hash_setconfiguration() failed!", 900 - __func__); 916 + dev_err(device_data->dev, 917 + "%s: hash_setconfiguration() failed!\n", 918 + __func__); 901 919 goto out; 902 920 } 903 921 ··· 906 926 HASH_CR_DMAE_MASK); 907 927 } else { 908 928 HASH_SET_BITS(&device_data->base->cr, 909 - HASH_CR_DMAE_MASK); 929 + HASH_CR_DMAE_MASK); 910 930 HASH_SET_BITS(&device_data->base->cr, 911 - HASH_CR_PRIVN_MASK); 931 + HASH_CR_PRIVN_MASK); 912 932 } 913 933 914 934 HASH_INITIALIZE; ··· 924 944 /* Store the nents in the dma struct. */ 925 945 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); 926 946 if (!ctx->device->dma.nents) { 927 - dev_err(device_data->dev, "[%s] " 928 - "ctx->device->dma.nents = 0", __func__); 947 + dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n", 948 + __func__); 929 949 ret = ctx->device->dma.nents; 930 950 goto out; 931 951 } 932 952 933 953 bytes_written = hash_dma_write(ctx, req->src, req->nbytes); 934 954 if (bytes_written != req->nbytes) { 935 - dev_err(device_data->dev, "[%s] " 936 - "hash_dma_write() failed!", __func__); 955 + dev_err(device_data->dev, "%s: hash_dma_write() failed!\n", 956 + __func__); 937 957 ret = bytes_written; 938 958 goto out; 939 959 } ··· 948 968 unsigned int keylen = ctx->keylen; 949 969 u8 *key = ctx->key; 950 970 951 - dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, 952 - ctx->keylen); 971 + dev_dbg(device_data->dev, "%s: keylen: %d\n", 972 + __func__, ctx->keylen); 953 973 hash_hw_write_key(device_data, key, keylen); 954 974 } 955 975 ··· 984 1004 if (ret) 985 1005 return ret; 986 1006 987 - dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); 1007 + dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); 988 1008 989 1009 if (req_ctx->updated) { 990 1010 ret = hash_resume_state(device_data, &device_data->state); 991 1011 992 1012 if (ret) { 993 - dev_err(device_data->dev, "[%s] hash_resume_state() " 994 - "failed!", __func__); 1013 + dev_err(device_data->dev, 1014 + "%s: hash_resume_state() failed!\n", __func__); 995 1015 goto out; 996 1016 } 997 1017 } else if (req->nbytes == 0 && ctx->keylen == 0) { ··· 1005 1025 ret = get_empty_message_digest(device_data, &zero_hash[0], 1006 1026 &zero_hash_size, &zero_digest); 1007 1027 if (!ret && likely(zero_hash_size == ctx->digestsize) && 1008 - zero_digest) { 1028 + zero_digest) { 1009 1029 memcpy(req->result, &zero_hash[0], ctx->digestsize); 1010 1030 goto out; 1011 1031 } else if (!ret && !zero_digest) { 1012 - dev_dbg(device_data->dev, "[%s] HMAC zero msg with " 1013 - "key, continue...", __func__); 1032 + dev_dbg(device_data->dev, 1033 + "%s: HMAC zero msg with key, continue...\n", 1034 + __func__); 1014 1035 } else { 1015 - dev_err(device_data->dev, "[%s] ret=%d, or wrong " 1016 - "digest size? %s", __func__, ret, 1017 - (zero_hash_size == ctx->digestsize) ? 1018 - "true" : "false"); 1036 + dev_err(device_data->dev, 1037 + "%s: ret=%d, or wrong digest size? %s\n", 1038 + __func__, ret, 1039 + zero_hash_size == ctx->digestsize ? 1040 + "true" : "false"); 1019 1041 /* Return error */ 1020 1042 goto out; 1021 1043 } 1022 1044 } else if (req->nbytes == 0 && ctx->keylen > 0) { 1023 - dev_err(device_data->dev, "[%s] Empty message with " 1024 - "keylength > 0, NOT supported.", __func__); 1045 + dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n", 1046 + __func__); 1025 1047 goto out; 1026 1048 } 1027 1049 1028 1050 if (!req_ctx->updated) { 1029 1051 ret = init_hash_hw(device_data, ctx); 1030 1052 if (ret) { 1031 - dev_err(device_data->dev, "[%s] init_hash_hw() " 1032 - "failed!", __func__); 1053 + dev_err(device_data->dev, 1054 + "%s: init_hash_hw() failed!\n", __func__); 1033 1055 goto out; 1034 1056 } 1035 1057 } ··· 1049 1067 unsigned int keylen = ctx->keylen; 1050 1068 u8 *key = ctx->key; 1051 1069 1052 - dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, 1053 - ctx->keylen); 1070 + dev_dbg(device_data->dev, "%s: keylen: %d\n", 1071 + __func__, ctx->keylen); 1054 1072 hash_hw_write_key(device_data, key, keylen); 1055 1073 } 1056 1074 ··· 1097 1115 /* Check if ctx->state.length + msg_length 1098 1116 overflows */ 1099 1117 if (msg_length > (req_ctx->state.length.low_word + msg_length) && 1100 - HASH_HIGH_WORD_MAX_VAL == 1101 - req_ctx->state.length.high_word) { 1102 - pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!", 1103 - __func__); 1118 + HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) { 1119 + pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__); 1104 1120 return -EPERM; 1105 1121 } 1106 1122 ··· 1113 1133 data_buffer, buffer, &index); 1114 1134 1115 1135 if (ret) { 1116 - dev_err(device_data->dev, "[%s] hash_internal_hw_" 1117 - "update() failed!", __func__); 1136 + dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n", 1137 + __func__); 1118 1138 goto out; 1119 1139 } 1120 1140 ··· 1122 1142 } 1123 1143 1124 1144 req_ctx->state.index = index; 1125 - dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))", 1126 - __func__, req_ctx->state.index, 1127 - req_ctx->state.bit_index); 1145 + dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n", 1146 + __func__, req_ctx->state.index, req_ctx->state.bit_index); 1128 1147 1129 1148 out: 1130 1149 release_hash_device(device_data); ··· 1137 1158 * @device_state: The state to be restored in the hash hardware 1138 1159 */ 1139 1160 int hash_resume_state(struct hash_device_data *device_data, 1140 - const struct hash_state *device_state) 1161 + const struct hash_state *device_state) 1141 1162 { 1142 1163 u32 temp_cr; 1143 1164 s32 count; 1144 1165 int hash_mode = HASH_OPER_MODE_HASH; 1145 1166 1146 1167 if (NULL == device_state) { 1147 - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", 1148 - __func__); 1168 + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", 1169 + __func__); 1149 1170 return -EPERM; 1150 1171 } 1151 1172 1152 1173 /* Check correctness of index and length members */ 1153 - if (device_state->index > HASH_BLOCK_SIZE 1154 - || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { 1155 - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", 1156 - __func__); 1174 + if (device_state->index > HASH_BLOCK_SIZE || 1175 + (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { 1176 + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", 1177 + __func__); 1157 1178 return -EPERM; 1158 1179 } 1159 1180 ··· 1177 1198 break; 1178 1199 1179 1200 writel_relaxed(device_state->csr[count], 1180 - &device_data->base->csrx[count]); 1201 + &device_data->base->csrx[count]); 1181 1202 } 1182 1203 1183 1204 writel_relaxed(device_state->csfull, &device_data->base->csfull); ··· 1195 1216 * @device_state: The strucure where the hardware state should be saved. 1196 1217 */ 1197 1218 int hash_save_state(struct hash_device_data *device_data, 1198 - struct hash_state *device_state) 1219 + struct hash_state *device_state) 1199 1220 { 1200 1221 u32 temp_cr; 1201 1222 u32 count; 1202 1223 int hash_mode = HASH_OPER_MODE_HASH; 1203 1224 1204 1225 if (NULL == device_state) { 1205 - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", 1206 - __func__); 1226 + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", 1227 + __func__); 1207 1228 return -ENOTSUPP; 1208 1229 } 1209 1230 ··· 1249 1270 int hash_check_hw(struct hash_device_data *device_data) 1250 1271 { 1251 1272 /* Checking Peripheral Ids */ 1252 - if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) 1253 - && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) 1254 - && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) 1255 - && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) 1256 - && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) 1257 - && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) 1258 - && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) 1259 - && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3) 1260 - ) { 1273 + if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) && 1274 + HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) && 1275 + HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) && 1276 + HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) && 1277 + HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) && 1278 + HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) && 1279 + HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) && 1280 + HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) { 1261 1281 return 0; 1262 1282 } 1263 1283 1264 - dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!", 1265 - __func__); 1284 + dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__); 1266 1285 return -ENOTSUPP; 1267 1286 } 1268 1287 ··· 1271 1294 * @algorithm: The algorithm in use. 1272 1295 */ 1273 1296 void hash_get_digest(struct hash_device_data *device_data, 1274 - u8 *digest, int algorithm) 1297 + u8 *digest, int algorithm) 1275 1298 { 1276 1299 u32 temp_hx_val, count; 1277 1300 int loop_ctr; 1278 1301 1279 1302 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { 1280 - dev_err(device_data->dev, "[%s] Incorrect algorithm %d", 1281 - __func__, algorithm); 1303 + dev_err(device_data->dev, "%s: Incorrect algorithm %d\n", 1304 + __func__, algorithm); 1282 1305 return; 1283 1306 } 1284 1307 ··· 1287 1310 else 1288 1311 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); 1289 1312 1290 - dev_dbg(device_data->dev, "[%s] digest array:(0x%x)", 1291 - __func__, (u32) digest); 1313 + dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n", 1314 + __func__, (u32) digest); 1292 1315 1293 1316 /* Copy result into digest array */ 1294 1317 for (count = 0; count < loop_ctr; count++) { ··· 1314 1337 /* Skip update for DMA, all data will be passed to DMA in final */ 1315 1338 1316 1339 if (ret) { 1317 - pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", 1318 - __func__); 1340 + pr_err("%s: hash_hw_update() failed!\n", __func__); 1319 1341 } 1320 1342 1321 1343 return ret; ··· 1329 1353 int ret = 0; 1330 1354 struct hash_req_ctx *req_ctx = ahash_request_ctx(req); 1331 1355 1332 - pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); 1356 + pr_debug("%s: data size: %d\n", __func__, req->nbytes); 1333 1357 1334 1358 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) 1335 1359 ret = hash_dma_final(req); ··· 1337 1361 ret = hash_hw_final(req); 1338 1362 1339 1363 if (ret) { 1340 - pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed", 1341 - __func__); 1364 + pr_err("%s: hash_hw/dma_final() failed\n", __func__); 1342 1365 } 1343 1366 1344 1367 return ret; 1345 1368 } 1346 1369 1347 1370 static int hash_setkey(struct crypto_ahash *tfm, 1348 - const u8 *key, unsigned int keylen, int alg) 1371 + const u8 *key, unsigned int keylen, int alg) 1349 1372 { 1350 1373 int ret = 0; 1351 1374 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); ··· 1354 1379 */ 1355 1380 ctx->key = kmemdup(key, keylen, GFP_KERNEL); 1356 1381 if (!ctx->key) { 1357 - pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " 1358 - "for %d\n", __func__, alg); 1382 + pr_err("%s: Failed to allocate ctx->key for %d\n", 1383 + __func__, alg); 1359 1384 return -ENOMEM; 1360 1385 } 1361 1386 ctx->keylen = keylen; ··· 1476 1501 } 1477 1502 1478 1503 static int hmac_sha1_setkey(struct crypto_ahash *tfm, 1479 - const u8 *key, unsigned int keylen) 1504 + const u8 *key, unsigned int keylen) 1480 1505 { 1481 1506 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); 1482 1507 } 1483 1508 1484 1509 static int hmac_sha256_setkey(struct crypto_ahash *tfm, 1485 - const u8 *key, unsigned int keylen) 1510 + const u8 *key, unsigned int keylen) 1486 1511 { 1487 1512 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); 1488 1513 } ··· 1503 1528 hash); 1504 1529 1505 1530 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1506 - sizeof(struct hash_req_ctx)); 1531 + sizeof(struct hash_req_ctx)); 1507 1532 1508 1533 ctx->config.data_format = HASH_DATA_8_BITS; 1509 1534 ctx->config.algorithm = hash_alg->conf.algorithm; ··· 1516 1541 1517 1542 static struct hash_algo_template hash_algs[] = { 1518 1543 { 1519 - .conf.algorithm = HASH_ALGO_SHA1, 1520 - .conf.oper_mode = HASH_OPER_MODE_HASH, 1521 - .hash = { 1522 - .init = hash_init, 1523 - .update = ahash_update, 1524 - .final = ahash_final, 1525 - .digest = ahash_sha1_digest, 1526 - .halg.digestsize = SHA1_DIGEST_SIZE, 1527 - .halg.statesize = sizeof(struct hash_ctx), 1528 - .halg.base = { 1529 - .cra_name = "sha1", 1530 - .cra_driver_name = "sha1-ux500", 1531 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1532 - CRYPTO_ALG_ASYNC, 1533 - .cra_blocksize = SHA1_BLOCK_SIZE, 1534 - .cra_ctxsize = sizeof(struct hash_ctx), 1535 - .cra_init = hash_cra_init, 1536 - .cra_module = THIS_MODULE, 1544 + .conf.algorithm = HASH_ALGO_SHA1, 1545 + .conf.oper_mode = HASH_OPER_MODE_HASH, 1546 + .hash = { 1547 + .init = hash_init, 1548 + .update = ahash_update, 1549 + .final = ahash_final, 1550 + .digest = ahash_sha1_digest, 1551 + .halg.digestsize = SHA1_DIGEST_SIZE, 1552 + .halg.statesize = sizeof(struct hash_ctx), 1553 + .halg.base = { 1554 + .cra_name = "sha1", 1555 + .cra_driver_name = "sha1-ux500", 1556 + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | 1557 + CRYPTO_ALG_ASYNC), 1558 + .cra_blocksize = SHA1_BLOCK_SIZE, 1559 + .cra_ctxsize = sizeof(struct hash_ctx), 1560 + .cra_init = hash_cra_init, 1561 + .cra_module = THIS_MODULE, 1537 1562 } 1538 1563 } 1539 1564 }, 1540 1565 { 1541 - .conf.algorithm = HASH_ALGO_SHA256, 1542 - .conf.oper_mode = HASH_OPER_MODE_HASH, 1543 - .hash = { 1544 - .init = hash_init, 1545 - .update = ahash_update, 1546 - .final = ahash_final, 1547 - .digest = ahash_sha256_digest, 1548 - .halg.digestsize = SHA256_DIGEST_SIZE, 1549 - .halg.statesize = sizeof(struct hash_ctx), 1550 - .halg.base = { 1551 - .cra_name = "sha256", 1552 - .cra_driver_name = "sha256-ux500", 1553 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1554 - CRYPTO_ALG_ASYNC, 1555 - .cra_blocksize = SHA256_BLOCK_SIZE, 1556 - .cra_ctxsize = sizeof(struct hash_ctx), 1557 - .cra_type = &crypto_ahash_type, 1558 - .cra_init = hash_cra_init, 1559 - .cra_module = THIS_MODULE, 1560 - } 1566 + .conf.algorithm = HASH_ALGO_SHA256, 1567 + .conf.oper_mode = HASH_OPER_MODE_HASH, 1568 + .hash = { 1569 + .init = hash_init, 1570 + .update = ahash_update, 1571 + .final = ahash_final, 1572 + .digest = ahash_sha256_digest, 1573 + .halg.digestsize = SHA256_DIGEST_SIZE, 1574 + .halg.statesize = sizeof(struct hash_ctx), 1575 + .halg.base = { 1576 + .cra_name = "sha256", 1577 + .cra_driver_name = "sha256-ux500", 1578 + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | 1579 + CRYPTO_ALG_ASYNC), 1580 + .cra_blocksize = SHA256_BLOCK_SIZE, 1581 + .cra_ctxsize = sizeof(struct hash_ctx), 1582 + .cra_type = &crypto_ahash_type, 1583 + .cra_init = hash_cra_init, 1584 + .cra_module = THIS_MODULE, 1561 1585 } 1562 - 1586 + } 1563 1587 }, 1564 1588 { 1565 - .conf.algorithm = HASH_ALGO_SHA1, 1566 - .conf.oper_mode = HASH_OPER_MODE_HMAC, 1589 + .conf.algorithm = HASH_ALGO_SHA1, 1590 + .conf.oper_mode = HASH_OPER_MODE_HMAC, 1567 1591 .hash = { 1568 - .init = hash_init, 1569 - .update = ahash_update, 1570 - .final = ahash_final, 1571 - .digest = hmac_sha1_digest, 1572 - .setkey = hmac_sha1_setkey, 1573 - .halg.digestsize = SHA1_DIGEST_SIZE, 1574 - .halg.statesize = sizeof(struct hash_ctx), 1575 - .halg.base = { 1576 - .cra_name = "hmac(sha1)", 1577 - .cra_driver_name = "hmac-sha1-ux500", 1578 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1579 - CRYPTO_ALG_ASYNC, 1580 - .cra_blocksize = SHA1_BLOCK_SIZE, 1581 - .cra_ctxsize = sizeof(struct hash_ctx), 1582 - .cra_type = &crypto_ahash_type, 1583 - .cra_init = hash_cra_init, 1584 - .cra_module = THIS_MODULE, 1585 - } 1592 + .init = hash_init, 1593 + .update = ahash_update, 1594 + .final = ahash_final, 1595 + .digest = hmac_sha1_digest, 1596 + .setkey = hmac_sha1_setkey, 1597 + .halg.digestsize = SHA1_DIGEST_SIZE, 1598 + .halg.statesize = sizeof(struct hash_ctx), 1599 + .halg.base = { 1600 + .cra_name = "hmac(sha1)", 1601 + .cra_driver_name = "hmac-sha1-ux500", 1602 + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | 1603 + CRYPTO_ALG_ASYNC), 1604 + .cra_blocksize = SHA1_BLOCK_SIZE, 1605 + .cra_ctxsize = sizeof(struct hash_ctx), 1606 + .cra_type = &crypto_ahash_type, 1607 + .cra_init = hash_cra_init, 1608 + .cra_module = THIS_MODULE, 1586 1609 } 1610 + } 1587 1611 }, 1588 1612 { 1589 - .conf.algorithm = HASH_ALGO_SHA256, 1590 - .conf.oper_mode = HASH_OPER_MODE_HMAC, 1591 - .hash = { 1592 - .init = hash_init, 1593 - .update = ahash_update, 1594 - .final = ahash_final, 1595 - .digest = hmac_sha256_digest, 1596 - .setkey = hmac_sha256_setkey, 1597 - .halg.digestsize = SHA256_DIGEST_SIZE, 1598 - .halg.statesize = sizeof(struct hash_ctx), 1599 - .halg.base = { 1600 - .cra_name = "hmac(sha256)", 1601 - .cra_driver_name = "hmac-sha256-ux500", 1602 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1603 - CRYPTO_ALG_ASYNC, 1604 - .cra_blocksize = SHA256_BLOCK_SIZE, 1605 - .cra_ctxsize = sizeof(struct hash_ctx), 1606 - .cra_type = &crypto_ahash_type, 1607 - .cra_init = hash_cra_init, 1608 - .cra_module = THIS_MODULE, 1609 - } 1613 + .conf.algorithm = HASH_ALGO_SHA256, 1614 + .conf.oper_mode = HASH_OPER_MODE_HMAC, 1615 + .hash = { 1616 + .init = hash_init, 1617 + .update = ahash_update, 1618 + .final = ahash_final, 1619 + .digest = hmac_sha256_digest, 1620 + .setkey = hmac_sha256_setkey, 1621 + .halg.digestsize = SHA256_DIGEST_SIZE, 1622 + .halg.statesize = sizeof(struct hash_ctx), 1623 + .halg.base = { 1624 + .cra_name = "hmac(sha256)", 1625 + .cra_driver_name = "hmac-sha256-ux500", 1626 + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | 1627 + CRYPTO_ALG_ASYNC), 1628 + .cra_blocksize = SHA256_BLOCK_SIZE, 1629 + .cra_ctxsize = sizeof(struct hash_ctx), 1630 + .cra_type = &crypto_ahash_type, 1631 + .cra_init = hash_cra_init, 1632 + .cra_module = THIS_MODULE, 1610 1633 } 1634 + } 1611 1635 } 1612 1636 }; 1613 1637 ··· 1623 1649 ret = crypto_register_ahash(&hash_algs[i].hash); 1624 1650 if (ret) { 1625 1651 count = i; 1626 - dev_err(device_data->dev, "[%s] alg registration failed", 1652 + dev_err(device_data->dev, "%s: alg registration failed\n", 1627 1653 hash_algs[i].hash.halg.base.cra_driver_name); 1628 1654 goto unreg; 1629 1655 } ··· 1657 1683 struct hash_device_data *device_data; 1658 1684 struct device *dev = &pdev->dev; 1659 1685 1660 - device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); 1686 + device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC); 1661 1687 if (!device_data) { 1662 - dev_dbg(dev, "[%s] kzalloc() failed!", __func__); 1663 1688 ret = -ENOMEM; 1664 1689 goto out; 1665 1690 } ··· 1668 1695 1669 1696 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1670 1697 if (!res) { 1671 - dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__); 1698 + dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__); 1672 1699 ret = -ENODEV; 1673 1700 goto out_kfree; 1674 1701 } 1675 1702 1676 1703 res = request_mem_region(res->start, resource_size(res), pdev->name); 1677 1704 if (res == NULL) { 1678 - dev_dbg(dev, "[%s] request_mem_region() failed!", __func__); 1705 + dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__); 1679 1706 ret = -EBUSY; 1680 1707 goto out_kfree; 1681 1708 } ··· 1683 1710 device_data->phybase = res->start; 1684 1711 device_data->base = ioremap(res->start, resource_size(res)); 1685 1712 if (!device_data->base) { 1686 - dev_err(dev, "[%s] ioremap() failed!", 1687 - __func__); 1713 + dev_err(dev, "%s: ioremap() failed!\n", __func__); 1688 1714 ret = -ENOMEM; 1689 1715 goto out_free_mem; 1690 1716 } ··· 1693 1721 /* Enable power for HASH1 hardware block */ 1694 1722 device_data->regulator = regulator_get(dev, "v-ape"); 1695 1723 if (IS_ERR(device_data->regulator)) { 1696 - dev_err(dev, "[%s] regulator_get() failed!", __func__); 1724 + dev_err(dev, "%s: regulator_get() failed!\n", __func__); 1697 1725 ret = PTR_ERR(device_data->regulator); 1698 1726 device_data->regulator = NULL; 1699 1727 goto out_unmap; ··· 1702 1730 /* Enable the clock for HASH1 hardware block */ 1703 1731 device_data->clk = clk_get(dev, NULL); 1704 1732 if (IS_ERR(device_data->clk)) { 1705 - dev_err(dev, "[%s] clk_get() failed!", __func__); 1733 + dev_err(dev, "%s: clk_get() failed!\n", __func__); 1706 1734 ret = PTR_ERR(device_data->clk); 1707 1735 goto out_regulator; 1708 1736 } 1709 1737 1710 1738 ret = clk_prepare(device_data->clk); 1711 1739 if (ret) { 1712 - dev_err(dev, "[%s] clk_prepare() failed!", __func__); 1740 + dev_err(dev, "%s: clk_prepare() failed!\n", __func__); 1713 1741 goto out_clk; 1714 1742 } 1715 1743 1716 1744 /* Enable device power (and clock) */ 1717 1745 ret = hash_enable_power(device_data, false); 1718 1746 if (ret) { 1719 - dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); 1747 + dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); 1720 1748 goto out_clk_unprepare; 1721 1749 } 1722 1750 1723 1751 ret = hash_check_hw(device_data); 1724 1752 if (ret) { 1725 - dev_err(dev, "[%s] hash_check_hw() failed!", __func__); 1753 + dev_err(dev, "%s: hash_check_hw() failed!\n", __func__); 1726 1754 goto out_power; 1727 1755 } 1728 1756 ··· 1738 1766 1739 1767 ret = ahash_algs_register_all(device_data); 1740 1768 if (ret) { 1741 - dev_err(dev, "[%s] ahash_algs_register_all() " 1742 - "failed!", __func__); 1769 + dev_err(dev, "%s: ahash_algs_register_all() failed!\n", 1770 + __func__); 1743 1771 goto out_power; 1744 1772 } 1745 1773 ··· 1782 1810 1783 1811 device_data = platform_get_drvdata(pdev); 1784 1812 if (!device_data) { 1785 - dev_err(dev, "[%s]: platform_get_drvdata() failed!", 1786 - __func__); 1813 + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); 1787 1814 return -ENOMEM; 1788 1815 } 1789 1816 ··· 1812 1841 ahash_algs_unregister_all(device_data); 1813 1842 1814 1843 if (hash_disable_power(device_data, false)) 1815 - dev_err(dev, "[%s]: hash_disable_power() failed", 1844 + dev_err(dev, "%s: hash_disable_power() failed\n", 1816 1845 __func__); 1817 1846 1818 1847 clk_unprepare(device_data->clk); ··· 1841 1870 1842 1871 device_data = platform_get_drvdata(pdev); 1843 1872 if (!device_data) { 1844 - dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", 1845 - __func__); 1873 + dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n", 1874 + __func__); 1846 1875 return; 1847 1876 } 1848 1877 ··· 1851 1880 /* current_ctx allocates a device, NULL = unallocated */ 1852 1881 if (!device_data->current_ctx) { 1853 1882 if (down_trylock(&driver_data.device_allocation)) 1854 - dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" 1855 - "Shutting down anyway...", __func__); 1883 + dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n", 1884 + __func__); 1856 1885 /** 1857 1886 * (Allocate the device) 1858 1887 * Need to set this to non-null (dummy) value, ··· 1877 1906 release_mem_region(res->start, resource_size(res)); 1878 1907 1879 1908 if (hash_disable_power(device_data, false)) 1880 - dev_err(&pdev->dev, "[%s] hash_disable_power() failed", 1881 - __func__); 1909 + dev_err(&pdev->dev, "%s: hash_disable_power() failed\n", 1910 + __func__); 1882 1911 } 1883 1912 1884 1913 /** ··· 1893 1922 1894 1923 device_data = dev_get_drvdata(dev); 1895 1924 if (!device_data) { 1896 - dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); 1925 + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); 1897 1926 return -ENOMEM; 1898 1927 } 1899 1928 ··· 1904 1933 1905 1934 if (device_data->current_ctx == ++temp_ctx) { 1906 1935 if (down_interruptible(&driver_data.device_allocation)) 1907 - dev_dbg(dev, "[%s]: down_interruptible() failed", 1936 + dev_dbg(dev, "%s: down_interruptible() failed\n", 1908 1937 __func__); 1909 1938 ret = hash_disable_power(device_data, false); 1910 1939 1911 - } else 1940 + } else { 1912 1941 ret = hash_disable_power(device_data, true); 1942 + } 1913 1943 1914 1944 if (ret) 1915 - dev_err(dev, "[%s]: hash_disable_power()", __func__); 1945 + dev_err(dev, "%s: hash_disable_power()\n", __func__); 1916 1946 1917 1947 return ret; 1918 1948 } ··· 1930 1958 1931 1959 device_data = dev_get_drvdata(dev); 1932 1960 if (!device_data) { 1933 - dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); 1961 + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); 1934 1962 return -ENOMEM; 1935 1963 } 1936 1964 ··· 1945 1973 ret = hash_enable_power(device_data, true); 1946 1974 1947 1975 if (ret) 1948 - dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); 1976 + dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); 1949 1977 1950 1978 return ret; 1951 1979 } ··· 1953 1981 static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); 1954 1982 1955 1983 static const struct of_device_id ux500_hash_match[] = { 1956 - { .compatible = "stericsson,ux500-hash" }, 1957 - { }, 1984 + { .compatible = "stericsson,ux500-hash" }, 1985 + { }, 1958 1986 }; 1959 1987 1960 1988 static struct platform_driver hash_driver = {