Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: sep: SEP update

This is basically a rewrite so there isn't a nice easy to present way of
providing this as a patch series. This patch is a pull of Mark's new driver into
the upstream staging area. On top of that are a series of patches by
Andy Shevchenko to make it build on the current tree, fix a few things and
even get it passed sparse.

The new driver supports the kernel crypto layer, passes the coding style checks,
passes human taste checks and has proper kernel-doc formatted comments.

I've then folded back in some later fixes it was missing that got applied to
to the kernel tree.

This should be ready for more serious review with a view to migration from
the staging tree shortly.

Signed-off-by: Mark Allyn <mark.a.allyn@intel.com>
[Forward port and some bug fixing]
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
[Fold and tweaks for 3.2]
Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Mark Allyn and committed by
Greg Kroah-Hartman
ff3d9c3c dcd6c922

+8958 -3213
+2 -1
drivers/staging/sep/Kconfig
··· 3 3 depends on PCI 4 4 help 5 5 Discretix SEP driver; used for the security processor subsystem 6 - on bard the Intel Mobile Internet Device. 6 + on board the Intel Mobile Internet Device and adds SEP availability 7 + to the kernel crypto infrastructure 7 8 8 9 The driver's name is sep_driver. 9 10
+3 -2
drivers/staging/sep/Makefile
··· 1 - obj-$(CONFIG_DX_SEP) := sep_driver.o 2 - 1 + ccflags-y += -I$(srctree)/$(src) 2 + obj-$(CONFIG_DX_SEP) += sep_driver.o 3 + sep_driver-objs := sep_crypto.o sep_main.o
+2 -3
drivers/staging/sep/TODO
··· 1 1 Todo's so far (from Alan Cox) 2 - - Check whether it can be plugged into any of the kernel crypto API 3 - interfaces - Crypto API 'glue' is still not ready to submit 4 - - Clean up un-needed debug prints - Started to work on this 2 + - Clean up unused ioctls 3 + - Clean up unused fields in ioctl structures
+3768
drivers/staging/sep/sep_crypto.c
··· 1 + /* 2 + * 3 + * sep_crypto.c - Crypto interface structures 4 + * 5 + * Copyright(c) 2009-2011 Intel Corporation. All rights reserved. 6 + * Contributions(c) 2009-2010 Discretix. All rights reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the Free 10 + * Software Foundation; version 2 of the License. 11 + * 12 + * This program is distributed in the hope that it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 59 19 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 + * 21 + * CONTACTS: 22 + * 23 + * Mark Allyn mark.a.allyn@intel.com 24 + * Jayant Mangalampalli jayant.mangalampalli@intel.com 25 + * 26 + * CHANGES: 27 + * 28 + * 2009.06.26 Initial publish 29 + * 2010.09.14 Upgrade to Medfield 30 + * 2011.02.22 Enable Kernel Crypto 31 + * 32 + */ 33 + 34 + /* #define DEBUG */ 35 + #include <linux/init.h> 36 + #include <linux/module.h> 37 + #include <linux/miscdevice.h> 38 + #include <linux/fs.h> 39 + #include <linux/cdev.h> 40 + #include <linux/kdev_t.h> 41 + #include <linux/mutex.h> 42 + #include <linux/sched.h> 43 + #include <linux/mm.h> 44 + #include <linux/poll.h> 45 + #include <linux/wait.h> 46 + #include <linux/pci.h> 47 + #include <linux/pci.h> 48 + #include <linux/pm_runtime.h> 49 + #include <linux/err.h> 50 + #include <linux/device.h> 51 + #include <linux/errno.h> 52 + #include <linux/interrupt.h> 53 + #include <linux/kernel.h> 54 + #include <linux/clk.h> 55 + #include <linux/irq.h> 56 + #include <linux/io.h> 57 + #include <linux/platform_device.h> 58 + #include <linux/list.h> 59 + #include <linux/dma-mapping.h> 60 + #include <linux/delay.h> 61 + #include <linux/jiffies.h> 62 + #include <linux/workqueue.h> 63 + #include <linux/crypto.h> 64 + #include <crypto/internal/hash.h> 65 + #include <crypto/scatterwalk.h> 66 + #include <crypto/sha.h> 67 + #include <crypto/md5.h> 68 + #include <crypto/aes.h> 69 + #include <crypto/des.h> 70 + #include <crypto/hash.h> 71 + #include "sep_driver_hw_defs.h" 72 + #include "sep_driver_config.h" 73 + #include "sep_driver_api.h" 74 + #include "sep_dev.h" 75 + #include "sep_crypto.h" 76 + 77 + /* Globals for queuing */ 78 + static spinlock_t queue_lock; 79 + static struct crypto_queue sep_queue; 80 + 81 + /* Declare of dequeuer */ 82 + static void sep_dequeuer(void *data); 83 + 84 + /* TESTING */ 85 + /** 86 + * crypto_sep_dump_message - dump the message that is pending 87 + * @sep: SEP device 88 + * This will only print dump if DEBUG is set; it does 89 + * follow kernel debug print enabling 90 + */ 91 + static void crypto_sep_dump_message(struct sep_system_ctx *sctx) 92 + { 93 + #if 0 94 + u32 *p; 95 + u32 *i; 96 + int count; 97 + 98 + p = sctx->sep_used->shared_addr; 99 + i = (u32 *)sctx->msg; 100 + for (count = 0; count < 40 * 4; count += 4) 101 + dev_dbg(&sctx->sep_used->pdev->dev, 102 + "[PID%d] Word %d of the message is %x (local)%x\n", 103 + current->pid, count/4, *p++, *i++); 104 + #endif 105 + } 106 + 107 + /** 108 + * sep_do_callback 109 + * @work: pointer to work_struct 110 + * This is what is called by the queue; it is generic so that it 111 + * can be used by any type of operation as each different callback 112 + * function can use the data parameter in its own way 113 + */ 114 + static void sep_do_callback(struct work_struct *work) 115 + { 116 + struct sep_work_struct *sep_work = container_of(work, 117 + struct sep_work_struct, work); 118 + if (sep_work != NULL) { 119 + (sep_work->callback)(sep_work->data); 120 + kfree(sep_work); 121 + } else { 122 + pr_debug("sep crypto: do callback - NULL container\n"); 123 + } 124 + } 125 + 126 + /** 127 + * sep_submit_work 128 + * @work_queue: pointer to struct_workqueue 129 + * @funct: pointer to function to execute 130 + * @data: pointer to data; function will know 131 + * how to use it 132 + * This is a generic API to submit something to 133 + * the queue. The callback function will depend 134 + * on what operation is to be done 135 + */ 136 + static int sep_submit_work(struct workqueue_struct *work_queue, 137 + void(*funct)(void *), 138 + void *data) 139 + { 140 + struct sep_work_struct *sep_work; 141 + int result; 142 + 143 + sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC); 144 + 145 + if (sep_work == NULL) { 146 + pr_debug("sep crypto: cant allocate work structure\n"); 147 + return -ENOMEM; 148 + } 149 + 150 + sep_work->callback = funct; 151 + sep_work->data = data; 152 + INIT_WORK(&sep_work->work, sep_do_callback); 153 + result = queue_work(work_queue, &sep_work->work); 154 + if (!result) { 155 + pr_debug("sep_crypto: queue_work failed\n"); 156 + return -EINVAL; 157 + } 158 + return 0; 159 + } 160 + 161 + /** 162 + * sep_alloc_sg_buf - 163 + * @sep: pointer to struct sep_device 164 + * @size: total size of area 165 + * @block_size: minimum size of chunks 166 + * each page is minimum or modulo this size 167 + * @returns: pointer to struct scatterlist for new 168 + * buffer 169 + **/ 170 + static struct scatterlist *sep_alloc_sg_buf( 171 + struct sep_device *sep, 172 + size_t size, 173 + size_t block_size) 174 + { 175 + u32 nbr_pages; 176 + u32 ct1; 177 + void *buf; 178 + size_t current_size; 179 + size_t real_page_size; 180 + 181 + struct scatterlist *sg, *sg_temp; 182 + 183 + if (size == 0) 184 + return NULL; 185 + 186 + dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n"); 187 + 188 + current_size = 0; 189 + nbr_pages = 0; 190 + real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size); 191 + /** 192 + * The size of each page must be modulo of the operation 193 + * block size; increment by the modified page size until 194 + * the total size is reached, then you have the number of 195 + * pages 196 + */ 197 + while (current_size < size) { 198 + current_size += real_page_size; 199 + nbr_pages += 1; 200 + } 201 + 202 + sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC); 203 + if (!sg) { 204 + dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n"); 205 + return NULL; 206 + } 207 + 208 + sg_init_table(sg, nbr_pages); 209 + 210 + current_size = 0; 211 + sg_temp = sg; 212 + for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) { 213 + buf = (void *)get_zeroed_page(GFP_ATOMIC); 214 + if (!buf) { 215 + dev_warn(&sep->pdev->dev, 216 + "Cannot allocate page for new buffer\n"); 217 + kfree(sg); 218 + return NULL; 219 + } 220 + 221 + sg_set_buf(sg_temp, buf, real_page_size); 222 + if ((size - current_size) > real_page_size) { 223 + sg_temp->length = real_page_size; 224 + current_size += real_page_size; 225 + } else { 226 + sg_temp->length = (size - current_size); 227 + current_size = size; 228 + } 229 + sg_temp = sg_next(sg); 230 + } 231 + return sg; 232 + } 233 + 234 + /** 235 + * sep_free_sg_buf - 236 + * @sg: pointer to struct scatterlist; points to area to free 237 + */ 238 + static void sep_free_sg_buf(struct scatterlist *sg) 239 + { 240 + struct scatterlist *sg_temp = sg; 241 + while (sg_temp) { 242 + free_page((unsigned long)sg_virt(sg_temp)); 243 + sg_temp = sg_next(sg_temp); 244 + } 245 + kfree(sg); 246 + } 247 + 248 + /** 249 + * sep_copy_sg - 250 + * @sep: pointer to struct sep_device 251 + * @sg_src: pointer to struct scatterlist for source 252 + * @sg_dst: pointer to struct scatterlist for destination 253 + * @size: size (in bytes) of data to copy 254 + * 255 + * Copy data from one scatterlist to another; both must 256 + * be the same size 257 + */ 258 + static void sep_copy_sg( 259 + struct sep_device *sep, 260 + struct scatterlist *sg_src, 261 + struct scatterlist *sg_dst, 262 + size_t size) 263 + { 264 + u32 seg_size; 265 + u32 in_offset, out_offset; 266 + 267 + u32 count = 0; 268 + struct scatterlist *sg_src_tmp = sg_src; 269 + struct scatterlist *sg_dst_tmp = sg_dst; 270 + in_offset = 0; 271 + out_offset = 0; 272 + 273 + dev_dbg(&sep->pdev->dev, "sep copy sg\n"); 274 + 275 + if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0)) 276 + return; 277 + 278 + dev_dbg(&sep->pdev->dev, "sep copy sg not null\n"); 279 + 280 + while (count < size) { 281 + if ((sg_src_tmp->length - in_offset) > 282 + (sg_dst_tmp->length - out_offset)) 283 + seg_size = sg_dst_tmp->length - out_offset; 284 + else 285 + seg_size = sg_src_tmp->length - in_offset; 286 + 287 + if (seg_size > (size - count)) 288 + seg_size = (size = count); 289 + 290 + memcpy(sg_virt(sg_dst_tmp) + out_offset, 291 + sg_virt(sg_src_tmp) + in_offset, 292 + seg_size); 293 + 294 + in_offset += seg_size; 295 + out_offset += seg_size; 296 + count += seg_size; 297 + 298 + if (in_offset >= sg_src_tmp->length) { 299 + sg_src_tmp = sg_next(sg_src_tmp); 300 + in_offset = 0; 301 + } 302 + 303 + if (out_offset >= sg_dst_tmp->length) { 304 + sg_dst_tmp = sg_next(sg_dst_tmp); 305 + out_offset = 0; 306 + } 307 + } 308 + } 309 + 310 + /** 311 + * sep_oddball_pages - 312 + * @sep: pointer to struct sep_device 313 + * @sg: pointer to struct scatterlist - buffer to check 314 + * @size: total data size 315 + * @blocksize: minimum block size; must be multiples of this size 316 + * @to_copy: 1 means do copy, 0 means do not copy 317 + * @new_sg: pointer to location to put pointer to new sg area 318 + * @returns: 1 if new scatterlist is needed; 0 if not needed; 319 + * error value if operation failed 320 + * 321 + * The SEP device requires all pages to be multiples of the 322 + * minimum block size appropriate for the operation 323 + * This function check all pages; if any are oddball sizes 324 + * (not multiple of block sizes), it creates a new scatterlist. 325 + * If the to_copy parameter is set to 1, then a scatter list 326 + * copy is performed. The pointer to the new scatterlist is 327 + * put into the address supplied by the new_sg parameter; if 328 + * no new scatterlist is needed, then a NULL is put into 329 + * the location at new_sg. 330 + * 331 + */ 332 + static int sep_oddball_pages( 333 + struct sep_device *sep, 334 + struct scatterlist *sg, 335 + size_t data_size, 336 + u32 block_size, 337 + struct scatterlist **new_sg, 338 + u32 do_copy) 339 + { 340 + struct scatterlist *sg_temp; 341 + u32 flag; 342 + u32 nbr_pages, page_count; 343 + 344 + dev_dbg(&sep->pdev->dev, "sep oddball\n"); 345 + if ((sg == NULL) || (data_size == 0) || (data_size < block_size)) 346 + return 0; 347 + 348 + dev_dbg(&sep->pdev->dev, "sep oddball not null\n"); 349 + flag = 0; 350 + nbr_pages = 0; 351 + page_count = 0; 352 + sg_temp = sg; 353 + 354 + while (sg_temp) { 355 + nbr_pages += 1; 356 + sg_temp = sg_next(sg_temp); 357 + } 358 + 359 + sg_temp = sg; 360 + while ((sg_temp) && (flag == 0)) { 361 + page_count += 1; 362 + if (sg_temp->length % block_size) 363 + flag = 1; 364 + else 365 + sg_temp = sg_next(sg_temp); 366 + } 367 + 368 + /* Do not process if last (or only) page is oddball */ 369 + if (nbr_pages == page_count) 370 + flag = 0; 371 + 372 + if (flag) { 373 + dev_dbg(&sep->pdev->dev, "sep oddball processing\n"); 374 + *new_sg = sep_alloc_sg_buf(sep, data_size, block_size); 375 + if (*new_sg == NULL) { 376 + dev_warn(&sep->pdev->dev, "cannot allocate new sg\n"); 377 + return -ENOMEM; 378 + } 379 + 380 + if (do_copy) 381 + sep_copy_sg(sep, sg, *new_sg, data_size); 382 + 383 + return 1; 384 + } else { 385 + return 0; 386 + } 387 + } 388 + 389 + /** 390 + * sep_copy_offset_sg - 391 + * @sep: pointer to struct sep_device; 392 + * @sg: pointer to struct scatterlist 393 + * @offset: offset into scatterlist memory 394 + * @dst: place to put data 395 + * @len: length of data 396 + * @returns: number of bytes copies 397 + * 398 + * This copies data from scatterlist buffer 399 + * offset from beginning - it is needed for 400 + * handling tail data in hash 401 + */ 402 + static size_t sep_copy_offset_sg( 403 + struct sep_device *sep, 404 + struct scatterlist *sg, 405 + u32 offset, 406 + void *dst, 407 + u32 len) 408 + { 409 + size_t page_start; 410 + size_t page_end; 411 + size_t offset_within_page; 412 + size_t length_within_page; 413 + size_t length_remaining; 414 + size_t current_offset; 415 + 416 + /* Find which page is beginning of segment */ 417 + page_start = 0; 418 + page_end = sg->length; 419 + while ((sg) && (offset > page_end)) { 420 + page_start += sg->length; 421 + sg = sg_next(sg); 422 + if (sg) 423 + page_end += sg->length; 424 + } 425 + 426 + if (sg == NULL) 427 + return -ENOMEM; 428 + 429 + offset_within_page = offset - page_start; 430 + if ((sg->length - offset_within_page) >= len) { 431 + /* All within this page */ 432 + memcpy(dst, sg_virt(sg) + offset_within_page, len); 433 + return len; 434 + } else { 435 + /* Scattered multiple pages */ 436 + current_offset = 0; 437 + length_remaining = len; 438 + while ((sg) && (current_offset < len)) { 439 + length_within_page = sg->length - offset_within_page; 440 + if (length_within_page >= length_remaining) { 441 + memcpy(dst+current_offset, 442 + sg_virt(sg) + offset_within_page, 443 + length_remaining); 444 + length_remaining = 0; 445 + current_offset = len; 446 + } else { 447 + memcpy(dst+current_offset, 448 + sg_virt(sg) + offset_within_page, 449 + length_within_page); 450 + length_remaining -= length_within_page; 451 + current_offset += length_within_page; 452 + offset_within_page = 0; 453 + sg = sg_next(sg); 454 + } 455 + } 456 + 457 + if (sg == NULL) 458 + return -ENOMEM; 459 + } 460 + return len; 461 + } 462 + 463 + /** 464 + * partial_overlap - 465 + * @src_ptr: source pointer 466 + * @dst_ptr: destination pointer 467 + * @nbytes: number of bytes 468 + * @returns: 0 for success; -1 for failure 469 + * We cannot have any partial overlap. Total overlap 470 + * where src is the same as dst is okay 471 + */ 472 + static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes) 473 + { 474 + /* Check for partial overlap */ 475 + if (src_ptr != dst_ptr) { 476 + if (src_ptr < dst_ptr) { 477 + if ((src_ptr + nbytes) > dst_ptr) 478 + return -EINVAL; 479 + } else { 480 + if ((dst_ptr + nbytes) > src_ptr) 481 + return -EINVAL; 482 + } 483 + } 484 + 485 + return 0; 486 + } 487 + 488 + /* Debug - prints only if DEBUG is defined; follows kernel debug model */ 489 + static void sep_dump(struct sep_device *sep, char *stg, void *start, int len) 490 + { 491 + #if 0 492 + int ct1; 493 + u8 *ptt; 494 + 495 + dev_dbg(&sep->pdev->dev, 496 + "Dump of %s starting at %08lx for %08x bytes\n", 497 + stg, (unsigned long)start, len); 498 + for (ct1 = 0; ct1 < len; ct1 += 1) { 499 + ptt = (u8 *)(start + ct1); 500 + dev_dbg(&sep->pdev->dev, "%02x ", *ptt); 501 + if (ct1 % 16 == 15) 502 + dev_dbg(&sep->pdev->dev, "\n"); 503 + } 504 + dev_dbg(&sep->pdev->dev, "\n"); 505 + #endif 506 + } 507 + 508 + /* Debug - prints only if DEBUG is defined; follows kernel debug model */ 509 + static void sep_dump_sg(struct sep_device *sep, char *stg, 510 + struct scatterlist *sg) 511 + { 512 + #if 0 513 + int ct1, ct2; 514 + u8 *ptt; 515 + 516 + dev_dbg(&sep->pdev->dev, "Dump of scatterlist %s\n", stg); 517 + 518 + ct1 = 0; 519 + while (sg) { 520 + dev_dbg(&sep->pdev->dev, "page %x\n size %x", ct1, 521 + sg->length); 522 + dev_dbg(&sep->pdev->dev, "phys addr is %lx", 523 + (unsigned long)sg_phys(sg)); 524 + ptt = sg_virt(sg); 525 + for (ct2 = 0; ct2 < sg->length; ct2 += 1) { 526 + dev_dbg(&sep->pdev->dev, "byte %x is %02x\n", 527 + ct2, (unsigned char)*(ptt + ct2)); 528 + } 529 + 530 + ct1 += 1; 531 + sg = sg_next(sg); 532 + } 533 + dev_dbg(&sep->pdev->dev, "\n"); 534 + #endif 535 + } 536 + 537 + /** 538 + * RFC2451: Weak key check 539 + * Returns: 1 (weak), 0 (not weak) 540 + */ 541 + static int sep_weak_key(const u8 *key, unsigned int keylen) 542 + { 543 + static const u8 parity[] = { 544 + 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8, 545 + 0, 8, 8, 0, 8, 0, 0, 8, 8, 546 + 0, 0, 8, 0, 8, 8, 3, 547 + 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 548 + 8, 0, 0, 8, 0, 8, 8, 0, 0, 549 + 8, 8, 0, 8, 0, 0, 8, 550 + 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 551 + 8, 0, 0, 8, 0, 8, 8, 0, 0, 552 + 8, 8, 0, 8, 0, 0, 8, 553 + 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 554 + 0, 8, 8, 0, 8, 0, 0, 8, 8, 555 + 0, 0, 8, 0, 8, 8, 0, 556 + 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 557 + 8, 0, 0, 8, 0, 8, 8, 0, 0, 558 + 8, 8, 0, 8, 0, 0, 8, 559 + 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 560 + 0, 8, 8, 0, 8, 0, 0, 8, 8, 561 + 0, 0, 8, 0, 8, 8, 0, 562 + 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 563 + 0, 8, 8, 0, 8, 0, 0, 8, 8, 564 + 0, 0, 8, 0, 8, 8, 0, 565 + 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 566 + 8, 5, 0, 8, 0, 8, 8, 0, 0, 567 + 8, 8, 0, 8, 0, 6, 8, 568 + }; 569 + 570 + u32 n, w; 571 + 572 + n = parity[key[0]]; n <<= 4; 573 + n |= parity[key[1]]; n <<= 4; 574 + n |= parity[key[2]]; n <<= 4; 575 + n |= parity[key[3]]; n <<= 4; 576 + n |= parity[key[4]]; n <<= 4; 577 + n |= parity[key[5]]; n <<= 4; 578 + n |= parity[key[6]]; n <<= 4; 579 + n |= parity[key[7]]; 580 + w = 0x88888888L; 581 + 582 + /* 1 in 10^10 keys passes this test */ 583 + if (!((n - (w >> 3)) & w)) { 584 + if (n < 0x41415151) { 585 + if (n < 0x31312121) { 586 + if (n < 0x14141515) { 587 + /* 01 01 01 01 01 01 01 01 */ 588 + if (n == 0x11111111) 589 + goto weak; 590 + /* 01 1F 01 1F 01 0E 01 0E */ 591 + if (n == 0x13131212) 592 + goto weak; 593 + } else { 594 + /* 01 E0 01 E0 01 F1 01 F1 */ 595 + if (n == 0x14141515) 596 + goto weak; 597 + /* 01 FE 01 FE 01 FE 01 FE */ 598 + if (n == 0x16161616) 599 + goto weak; 600 + } 601 + } else { 602 + if (n < 0x34342525) { 603 + /* 1F 01 1F 01 0E 01 0E 01 */ 604 + if (n == 0x31312121) 605 + goto weak; 606 + /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */ 607 + if (n == 0x33332222) 608 + goto weak; 609 + } else { 610 + /* 1F E0 1F E0 0E F1 0E F1 */ 611 + if (n == 0x34342525) 612 + goto weak; 613 + /* 1F FE 1F FE 0E FE 0E FE */ 614 + if (n == 0x36362626) 615 + goto weak; 616 + } 617 + } 618 + } else { 619 + if (n < 0x61616161) { 620 + if (n < 0x44445555) { 621 + /* E0 01 E0 01 F1 01 F1 01 */ 622 + if (n == 0x41415151) 623 + goto weak; 624 + /* E0 1F E0 1F F1 0E F1 0E */ 625 + if (n == 0x43435252) 626 + goto weak; 627 + } else { 628 + /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */ 629 + if (n == 0x44445555) 630 + goto weak; 631 + /* E0 FE E0 FE F1 FE F1 FE */ 632 + if (n == 0x46465656) 633 + goto weak; 634 + } 635 + } else { 636 + if (n < 0x64646565) { 637 + /* FE 01 FE 01 FE 01 FE 01 */ 638 + if (n == 0x61616161) 639 + goto weak; 640 + /* FE 1F FE 1F FE 0E FE 0E */ 641 + if (n == 0x63636262) 642 + goto weak; 643 + } else { 644 + /* FE E0 FE E0 FE F1 FE F1 */ 645 + if (n == 0x64646565) 646 + goto weak; 647 + /* FE FE FE FE FE FE FE FE */ 648 + if (n == 0x66666666) 649 + goto weak; 650 + } 651 + } 652 + } 653 + } 654 + return 0; 655 + weak: 656 + return 1; 657 + } 658 + /** 659 + * sep_sg_nents 660 + */ 661 + static u32 sep_sg_nents(struct scatterlist *sg) 662 + { 663 + u32 ct1 = 0; 664 + while (sg) { 665 + ct1 += 1; 666 + sg = sg_next(sg); 667 + } 668 + 669 + return ct1; 670 + } 671 + 672 + /** 673 + * sep_start_msg - 674 + * @sctx: pointer to struct sep_system_ctx 675 + * @returns: offset to place for the next word in the message 676 + * Set up pointer in message pool for new message 677 + */ 678 + static u32 sep_start_msg(struct sep_system_ctx *sctx) 679 + { 680 + u32 *word_ptr; 681 + sctx->msg_len_words = 2; 682 + sctx->msgptr = sctx->msg; 683 + memset(sctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 684 + sctx->msgptr += sizeof(u32) * 2; 685 + word_ptr = (u32 *)sctx->msgptr; 686 + *word_ptr = SEP_START_MSG_TOKEN; 687 + return sizeof(u32) * 2; 688 + } 689 + 690 + /** 691 + * sep_end_msg - 692 + * @sctx: pointer to struct sep_system_ctx 693 + * @messages_offset: current message offset 694 + * Returns: 0 for success; <0 otherwise 695 + * End message; set length and CRC; and 696 + * send interrupt to the SEP 697 + */ 698 + static void sep_end_msg(struct sep_system_ctx *sctx, u32 msg_offset) 699 + { 700 + u32 *word_ptr; 701 + /* Msg size goes into msg after token */ 702 + sctx->msg_len_words = msg_offset / sizeof(u32) + 1; 703 + word_ptr = (u32 *)sctx->msgptr; 704 + word_ptr += 1; 705 + *word_ptr = sctx->msg_len_words; 706 + 707 + /* CRC (currently 0) goes at end of msg */ 708 + word_ptr = (u32 *)(sctx->msgptr + msg_offset); 709 + *word_ptr = 0; 710 + } 711 + 712 + /** 713 + * sep_start_inbound_msg - 714 + * @sctx: pointer to struct sep_system_ctx 715 + * @msg_offset: offset to place for the next word in the message 716 + * @returns: 0 for success; error value for failure 717 + * Set up pointer in message pool for inbound message 718 + */ 719 + static u32 sep_start_inbound_msg(struct sep_system_ctx *sctx, u32 *msg_offset) 720 + { 721 + u32 *word_ptr; 722 + u32 token; 723 + u32 error = SEP_OK; 724 + 725 + *msg_offset = sizeof(u32) * 2; 726 + word_ptr = (u32 *)sctx->msgptr; 727 + token = *word_ptr; 728 + sctx->msg_len_words = *(word_ptr + 1); 729 + 730 + if (token != SEP_START_MSG_TOKEN) { 731 + error = SEP_INVALID_START; 732 + goto end_function; 733 + } 734 + 735 + end_function: 736 + 737 + return error; 738 + } 739 + 740 + /** 741 + * sep_write_msg - 742 + * @sctx: pointer to struct sep_system_ctx 743 + * @in_addr: pointer to start of parameter 744 + * @size: size of parameter to copy (in bytes) 745 + * @max_size: size to move up offset; SEP mesg is in word sizes 746 + * @msg_offset: pointer to current offset (is updated) 747 + * @byte_array: flag ti indicate wheter endian must be changed 748 + * Copies data into the message area from caller 749 + */ 750 + static void sep_write_msg(struct sep_system_ctx *sctx, void *in_addr, 751 + u32 size, u32 max_size, u32 *msg_offset, u32 byte_array) 752 + { 753 + u32 *word_ptr; 754 + void *void_ptr; 755 + void_ptr = sctx->msgptr + *msg_offset; 756 + word_ptr = (u32 *)void_ptr; 757 + memcpy(void_ptr, in_addr, size); 758 + *msg_offset += max_size; 759 + 760 + /* Do we need to manipulate endian? */ 761 + if (byte_array) { 762 + u32 i; 763 + for (i = 0; i < ((size + 3) / 4); i += 1) 764 + *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i)); 765 + } 766 + } 767 + 768 + /** 769 + * sep_make_header 770 + * @sctx: pointer to struct sep_system_ctx 771 + * @msg_offset: pointer to current offset (is updated) 772 + * @op_code: op code to put into message 773 + * Puts op code into message and updates offset 774 + */ 775 + static void sep_make_header(struct sep_system_ctx *sctx, u32 *msg_offset, 776 + u32 op_code) 777 + { 778 + u32 *word_ptr; 779 + 780 + *msg_offset = sep_start_msg(sctx); 781 + word_ptr = (u32 *)(sctx->msgptr + *msg_offset); 782 + *word_ptr = op_code; 783 + *msg_offset += sizeof(u32); 784 + } 785 + 786 + 787 + 788 + /** 789 + * sep_read_msg - 790 + * @sctx: pointer to struct sep_system_ctx 791 + * @in_addr: pointer to start of parameter 792 + * @size: size of parameter to copy (in bytes) 793 + * @max_size: size to move up offset; SEP mesg is in word sizes 794 + * @msg_offset: pointer to current offset (is updated) 795 + * @byte_array: flag ti indicate wheter endian must be changed 796 + * Copies data out of the message area to caller 797 + */ 798 + static void sep_read_msg(struct sep_system_ctx *sctx, void *in_addr, 799 + u32 size, u32 max_size, u32 *msg_offset, u32 byte_array) 800 + { 801 + u32 *word_ptr; 802 + void *void_ptr; 803 + void_ptr = sctx->msgptr + *msg_offset; 804 + word_ptr = (u32 *)void_ptr; 805 + 806 + /* Do we need to manipulate endian? */ 807 + if (byte_array) { 808 + u32 i; 809 + for (i = 0; i < ((size + 3) / 4); i += 1) 810 + *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i)); 811 + } 812 + 813 + memcpy(in_addr, void_ptr, size); 814 + *msg_offset += max_size; 815 + } 816 + 817 + /** 818 + * sep_verify_op - 819 + * @sctx: pointer to struct sep_system_ctx 820 + * @op_code: expected op_code 821 + * @msg_offset: pointer to current offset (is updated) 822 + * @returns: 0 for success; error for failure 823 + */ 824 + static u32 sep_verify_op(struct sep_system_ctx *sctx, u32 op_code, 825 + u32 *msg_offset) 826 + { 827 + u32 error; 828 + u32 in_ary[2]; 829 + 830 + struct sep_device *sep = sctx->sep_used; 831 + 832 + dev_dbg(&sep->pdev->dev, "dumping return message\n"); 833 + error = sep_start_inbound_msg(sctx, msg_offset); 834 + if (error) { 835 + dev_warn(&sep->pdev->dev, 836 + "sep_start_inbound_msg error\n"); 837 + return error; 838 + } 839 + 840 + sep_read_msg(sctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2, 841 + msg_offset, 0); 842 + 843 + if (in_ary[0] != op_code) { 844 + dev_warn(&sep->pdev->dev, 845 + "sep got back wrong opcode\n"); 846 + dev_warn(&sep->pdev->dev, 847 + "got back %x; expected %x\n", 848 + in_ary[0], op_code); 849 + return SEP_WRONG_OPCODE; 850 + } 851 + 852 + if (in_ary[1] != SEP_OK) { 853 + dev_warn(&sep->pdev->dev, 854 + "sep execution error\n"); 855 + dev_warn(&sep->pdev->dev, 856 + "got back %x; expected %x\n", 857 + in_ary[1], SEP_OK); 858 + return in_ary[0]; 859 + } 860 + 861 + return 0; 862 + } 863 + 864 + /** 865 + * sep_read_context - 866 + * @sctx: pointer to struct sep_system_ctx 867 + * @msg_offset: point to current place in SEP msg; is updated 868 + * @dst: pointer to place to put the context 869 + * @len: size of the context structure (differs for crypro/hash) 870 + * This function reads the context from the msg area 871 + * There is a special way the vendor needs to have the maximum 872 + * length calculated so that the msg_offset is updated properly; 873 + * it skips over some words in the msg area depending on the size 874 + * of the context 875 + */ 876 + static void sep_read_context(struct sep_system_ctx *sctx, u32 *msg_offset, 877 + void *dst, u32 len) 878 + { 879 + u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32); 880 + sep_read_msg(sctx, dst, len, max_length, msg_offset, 0); 881 + } 882 + 883 + /** 884 + * sep_write_context - 885 + * @sctx: pointer to struct sep_system_ctx 886 + * @msg_offset: point to current place in SEP msg; is updated 887 + * @src: pointer to the current context 888 + * @len: size of the context structure (differs for crypro/hash) 889 + * This function writes the context to the msg area 890 + * There is a special way the vendor needs to have the maximum 891 + * length calculated so that the msg_offset is updated properly; 892 + * it skips over some words in the msg area depending on the size 893 + * of the context 894 + */ 895 + static void sep_write_context(struct sep_system_ctx *sctx, u32 *msg_offset, 896 + void *src, u32 len) 897 + { 898 + u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32); 899 + sep_write_msg(sctx, src, len, max_length, msg_offset, 0); 900 + } 901 + 902 + /** 903 + * sep_clear_out - 904 + * @sctx: pointer to struct sep_system_ctx 905 + * Clear out crypto related values in sep device structure 906 + * to enable device to be used by anyone; either kernel 907 + * crypto or userspace app via middleware 908 + */ 909 + static void sep_clear_out(struct sep_system_ctx *sctx) 910 + { 911 + if (sctx->src_sg_hold) { 912 + sep_free_sg_buf(sctx->src_sg_hold); 913 + sctx->src_sg_hold = NULL; 914 + } 915 + 916 + if (sctx->dst_sg_hold) { 917 + sep_free_sg_buf(sctx->dst_sg_hold); 918 + sctx->dst_sg_hold = NULL; 919 + } 920 + 921 + sctx->src_sg = NULL; 922 + sctx->dst_sg = NULL; 923 + 924 + sep_free_dma_table_data_handler(sctx->sep_used, &sctx->dma_ctx); 925 + 926 + if (sctx->i_own_sep) { 927 + /** 928 + * The following unlocks the sep and makes it available 929 + * to any other application 930 + * First, null out crypto entries in sep before relesing it 931 + */ 932 + sctx->sep_used->current_hash_req = NULL; 933 + sctx->sep_used->current_cypher_req = NULL; 934 + sctx->sep_used->current_request = 0; 935 + sctx->sep_used->current_hash_stage = 0; 936 + sctx->sep_used->sctx = NULL; 937 + sctx->sep_used->in_kernel = 0; 938 + 939 + sctx->call_status.status = 0; 940 + 941 + /* Remove anything confidentail */ 942 + memset(sctx->sep_used->shared_addr, 0, 943 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 944 + 945 + sep_queue_status_remove(sctx->sep_used, &sctx->queue_elem); 946 + 947 + #ifdef SEP_ENABLE_RUNTIME_PM 948 + sctx->sep_used->in_use = 0; 949 + pm_runtime_mark_last_busy(&sctx->sep_used->pdev->dev); 950 + pm_runtime_put_autosuspend(&sctx->sep_used->pdev->dev); 951 + #endif 952 + 953 + clear_bit(SEP_WORKING_LOCK_BIT, &sctx->sep_used->in_use_flags); 954 + sctx->sep_used->pid_doing_transaction = 0; 955 + 956 + dev_dbg(&sctx->sep_used->pdev->dev, 957 + "[PID%d] waking up next transaction\n", 958 + current->pid); 959 + 960 + clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, 961 + &sctx->sep_used->in_use_flags); 962 + wake_up(&sctx->sep_used->event_transactions); 963 + 964 + sctx->i_own_sep = 0; 965 + } 966 + } 967 + 968 + /** 969 + * Release crypto infrastructure from EINPROGRESS and 970 + * clear sep_dev so that SEP is available to anyone 971 + */ 972 + static void sep_crypto_release(struct sep_system_ctx *sctx, u32 error) 973 + { 974 + struct ahash_request *hash_req = sctx->current_hash_req; 975 + struct ablkcipher_request *cypher_req = 976 + sctx->current_cypher_req; 977 + struct sep_device *sep = sctx->sep_used; 978 + 979 + sep_clear_out(sctx); 980 + 981 + if (cypher_req != NULL) { 982 + if (cypher_req->base.complete == NULL) { 983 + dev_dbg(&sep->pdev->dev, 984 + "release is null for cypher!"); 985 + } else { 986 + cypher_req->base.complete( 987 + &cypher_req->base, error); 988 + } 989 + } 990 + 991 + if (hash_req != NULL) { 992 + if (hash_req->base.complete == NULL) { 993 + dev_dbg(&sep->pdev->dev, 994 + "release is null for hash!"); 995 + } else { 996 + hash_req->base.complete( 997 + &hash_req->base, error); 998 + } 999 + } 1000 + } 1001 + 1002 + /** 1003 + * This is where we grab the sep itself and tell it to do something. 1004 + * It will sleep if the sep is currently busy 1005 + * and it will return 0 if sep is now ours; error value if there 1006 + * were problems 1007 + */ 1008 + static int sep_crypto_take_sep(struct sep_system_ctx *sctx) 1009 + { 1010 + struct sep_device *sep = sctx->sep_used; 1011 + int result; 1012 + struct sep_msgarea_hdr *my_msg_header; 1013 + 1014 + my_msg_header = (struct sep_msgarea_hdr *)sctx->msg; 1015 + 1016 + /* add to status queue */ 1017 + sctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode, 1018 + sctx->nbytes, current->pid, 1019 + current->comm, sizeof(current->comm)); 1020 + 1021 + if (!sctx->queue_elem) { 1022 + dev_dbg(&sep->pdev->dev, "[PID%d] updating queue" 1023 + " status error\n", current->pid); 1024 + return -EINVAL; 1025 + } 1026 + 1027 + /* get the device; this can sleep */ 1028 + result = sep_wait_transaction(sep); 1029 + if (result) 1030 + return result; 1031 + 1032 + if (sep_dev->power_save_setup == 1) 1033 + pm_runtime_get_sync(&sep_dev->pdev->dev); 1034 + 1035 + /* Copy in the message */ 1036 + memcpy(sep->shared_addr, sctx->msg, 1037 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 1038 + 1039 + /* Copy in the dcb information if there is any */ 1040 + if (sctx->dcb_region) { 1041 + result = sep_activate_dcb_dmatables_context(sep, 1042 + &sctx->dcb_region, &sctx->dmatables_region, 1043 + sctx->dma_ctx); 1044 + if (result) 1045 + return result; 1046 + } 1047 + 1048 + /* Mark the device so we know how to finish the job in the tasklet */ 1049 + if (sctx->current_hash_req) 1050 + sep->current_hash_req = sctx->current_hash_req; 1051 + else 1052 + sep->current_cypher_req = sctx->current_cypher_req; 1053 + 1054 + sep->current_request = sctx->current_request; 1055 + sep->current_hash_stage = sctx->current_hash_stage; 1056 + sep->sctx = sctx; 1057 + sep->in_kernel = 1; 1058 + sctx->i_own_sep = 1; 1059 + 1060 + result = sep_send_command_handler(sep); 1061 + 1062 + dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n", 1063 + current->pid); 1064 + 1065 + if (!result) { 1066 + set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, 1067 + &sctx->call_status.status); 1068 + dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n", 1069 + current->pid); 1070 + } 1071 + 1072 + return result; 1073 + } 1074 + 1075 + /* This needs to be run as a work queue as it can be put asleep */ 1076 + static void sep_crypto_block(void *data) 1077 + { 1078 + int int_error; 1079 + u32 msg_offset; 1080 + static u32 msg[10]; 1081 + void *src_ptr; 1082 + void *dst_ptr; 1083 + 1084 + static char small_buf[100]; 1085 + ssize_t copy_result; 1086 + int result; 1087 + 1088 + u32 max_length; 1089 + struct scatterlist *new_sg; 1090 + struct ablkcipher_request *req; 1091 + struct sep_block_ctx *bctx; 1092 + struct crypto_ablkcipher *tfm; 1093 + struct sep_system_ctx *sctx; 1094 + 1095 + req = (struct ablkcipher_request *)data; 1096 + bctx = ablkcipher_request_ctx(req); 1097 + tfm = crypto_ablkcipher_reqtfm(req); 1098 + sctx = crypto_ablkcipher_ctx(tfm); 1099 + 1100 + /* start the walk on scatterlists */ 1101 + ablkcipher_walk_init(&bctx->walk, req->src, req->dst, req->nbytes); 1102 + dev_dbg(&sctx->sep_used->pdev->dev, "sep crypto block data size of %x\n", 1103 + req->nbytes); 1104 + 1105 + int_error = ablkcipher_walk_phys(req, &bctx->walk); 1106 + if (int_error) { 1107 + dev_warn(&sctx->sep_used->pdev->dev, "walk phys error %x\n", 1108 + int_error); 1109 + sep_crypto_release(sctx, -ENOMEM); 1110 + return; 1111 + } 1112 + 1113 + /* check iv */ 1114 + if (bctx->des_opmode == SEP_DES_CBC) { 1115 + if (!bctx->walk.iv) { 1116 + dev_warn(&sctx->sep_used->pdev->dev, "no iv found\n"); 1117 + sep_crypto_release(sctx, -EINVAL); 1118 + return; 1119 + } 1120 + 1121 + memcpy(bctx->iv, bctx->walk.iv, SEP_DES_IV_SIZE_BYTES); 1122 + sep_dump(sctx->sep_used, "iv", bctx->iv, SEP_DES_IV_SIZE_BYTES); 1123 + } 1124 + 1125 + if (bctx->aes_opmode == SEP_AES_CBC) { 1126 + if (!bctx->walk.iv) { 1127 + dev_warn(&sctx->sep_used->pdev->dev, "no iv found\n"); 1128 + sep_crypto_release(sctx, -EINVAL); 1129 + return; 1130 + } 1131 + 1132 + memcpy(bctx->iv, bctx->walk.iv, SEP_AES_IV_SIZE_BYTES); 1133 + sep_dump(sctx->sep_used, "iv", bctx->iv, SEP_AES_IV_SIZE_BYTES); 1134 + } 1135 + 1136 + dev_dbg(&sctx->sep_used->pdev->dev, 1137 + "crypto block: src is %lx dst is %lx\n", 1138 + (unsigned long)req->src, (unsigned long)req->dst); 1139 + 1140 + /* Make sure all pages are even block */ 1141 + int_error = sep_oddball_pages(sctx->sep_used, req->src, 1142 + req->nbytes, bctx->walk.blocksize, &new_sg, 1); 1143 + 1144 + if (int_error < 0) { 1145 + dev_warn(&sctx->sep_used->pdev->dev, "oddball page eerror\n"); 1146 + sep_crypto_release(sctx, -ENOMEM); 1147 + return; 1148 + } else if (int_error == 1) { 1149 + sctx->src_sg = new_sg; 1150 + sctx->src_sg_hold = new_sg; 1151 + } else { 1152 + sctx->src_sg = req->src; 1153 + sctx->src_sg_hold = NULL; 1154 + } 1155 + 1156 + int_error = sep_oddball_pages(sctx->sep_used, req->dst, 1157 + req->nbytes, bctx->walk.blocksize, &new_sg, 0); 1158 + 1159 + if (int_error < 0) { 1160 + dev_warn(&sctx->sep_used->pdev->dev, "walk phys error %x\n", 1161 + int_error); 1162 + sep_crypto_release(sctx, -ENOMEM); 1163 + return; 1164 + } else if (int_error == 1) { 1165 + sctx->dst_sg = new_sg; 1166 + sctx->dst_sg_hold = new_sg; 1167 + } else { 1168 + sctx->dst_sg = req->dst; 1169 + sctx->dst_sg_hold = NULL; 1170 + } 1171 + 1172 + /* Do we need to perform init; ie; send key to sep? */ 1173 + if (sctx->key_sent == 0) { 1174 + 1175 + dev_dbg(&sctx->sep_used->pdev->dev, "sending key\n"); 1176 + 1177 + /* put together message to SEP */ 1178 + /* Start with op code */ 1179 + sep_make_header(sctx, &msg_offset, bctx->init_opcode); 1180 + 1181 + /* now deal with IV */ 1182 + if (bctx->init_opcode == SEP_DES_INIT_OPCODE) { 1183 + if (bctx->des_opmode == SEP_DES_CBC) { 1184 + sep_write_msg(sctx, bctx->iv, 1185 + SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4, 1186 + &msg_offset, 1); 1187 + sep_dump(sctx->sep_used, "initial IV", 1188 + bctx->walk.iv, SEP_DES_IV_SIZE_BYTES); 1189 + } else { 1190 + /* Skip if ECB */ 1191 + msg_offset += 4 * sizeof(u32); 1192 + } 1193 + } else { 1194 + max_length = ((SEP_AES_IV_SIZE_BYTES + 3) / 1195 + sizeof(u32)) * sizeof(u32); 1196 + if (bctx->aes_opmode == SEP_AES_CBC) { 1197 + sep_write_msg(sctx, bctx->iv, 1198 + SEP_AES_IV_SIZE_BYTES, max_length, 1199 + &msg_offset, 1); 1200 + sep_dump(sctx->sep_used, "initial IV", 1201 + bctx->walk.iv, SEP_AES_IV_SIZE_BYTES); 1202 + } else { 1203 + /* Skip if ECB */ 1204 + msg_offset += max_length; 1205 + } 1206 + } 1207 + 1208 + /* load the key */ 1209 + if (bctx->init_opcode == SEP_DES_INIT_OPCODE) { 1210 + sep_write_msg(sctx, (void *)&sctx->key.des.key1, 1211 + sizeof(u32) * 8, sizeof(u32) * 8, 1212 + &msg_offset, 1); 1213 + 1214 + msg[0] = (u32)sctx->des_nbr_keys; 1215 + msg[1] = (u32)bctx->des_encmode; 1216 + msg[2] = (u32)bctx->des_opmode; 1217 + 1218 + sep_write_msg(sctx, (void *)msg, 1219 + sizeof(u32) * 3, sizeof(u32) * 3, 1220 + &msg_offset, 0); 1221 + } else { 1222 + sep_write_msg(sctx, (void *)&sctx->key.aes, 1223 + sctx->keylen, 1224 + SEP_AES_MAX_KEY_SIZE_BYTES, 1225 + &msg_offset, 1); 1226 + 1227 + msg[0] = (u32)sctx->aes_key_size; 1228 + msg[1] = (u32)bctx->aes_encmode; 1229 + msg[2] = (u32)bctx->aes_opmode; 1230 + msg[3] = (u32)0; /* Secret key is not used */ 1231 + sep_write_msg(sctx, (void *)msg, 1232 + sizeof(u32) * 4, sizeof(u32) * 4, 1233 + &msg_offset, 0); 1234 + } 1235 + 1236 + } else { 1237 + 1238 + /* set nbytes for queue status */ 1239 + sctx->nbytes = req->nbytes; 1240 + 1241 + /* Key already done; this is for data */ 1242 + dev_dbg(&sctx->sep_used->pdev->dev, "sending data\n"); 1243 + 1244 + sep_dump_sg(sctx->sep_used, 1245 + "block sg in", sctx->src_sg); 1246 + 1247 + /* check for valid data and proper spacing */ 1248 + src_ptr = sg_virt(sctx->src_sg); 1249 + dst_ptr = sg_virt(sctx->dst_sg); 1250 + 1251 + if (!src_ptr || !dst_ptr || 1252 + (sctx->current_cypher_req->nbytes % 1253 + crypto_ablkcipher_blocksize(tfm))) { 1254 + 1255 + dev_warn(&sctx->sep_used->pdev->dev, 1256 + "cipher block size odd\n"); 1257 + dev_warn(&sctx->sep_used->pdev->dev, 1258 + "cipher block size is %x\n", 1259 + crypto_ablkcipher_blocksize(tfm)); 1260 + dev_warn(&sctx->sep_used->pdev->dev, 1261 + "cipher data size is %x\n", 1262 + sctx->current_cypher_req->nbytes); 1263 + sep_crypto_release(sctx, -EINVAL); 1264 + return; 1265 + } 1266 + 1267 + if (partial_overlap(src_ptr, dst_ptr, 1268 + sctx->current_cypher_req->nbytes)) { 1269 + dev_warn(&sctx->sep_used->pdev->dev, 1270 + "block partial overlap\n"); 1271 + sep_crypto_release(sctx, -EINVAL); 1272 + return; 1273 + } 1274 + 1275 + /* Put together the message */ 1276 + sep_make_header(sctx, &msg_offset, bctx->block_opcode); 1277 + 1278 + /* If des, and size is 1 block, put directly in msg */ 1279 + if ((bctx->block_opcode == SEP_DES_BLOCK_OPCODE) && 1280 + (req->nbytes == crypto_ablkcipher_blocksize(tfm))) { 1281 + 1282 + dev_dbg(&sctx->sep_used->pdev->dev, 1283 + "writing out one block des\n"); 1284 + 1285 + copy_result = sg_copy_to_buffer( 1286 + sctx->src_sg, sep_sg_nents(sctx->src_sg), 1287 + small_buf, crypto_ablkcipher_blocksize(tfm)); 1288 + 1289 + if (copy_result != crypto_ablkcipher_blocksize(tfm)) { 1290 + dev_warn(&sctx->sep_used->pdev->dev, 1291 + "des block copy faild\n"); 1292 + sep_crypto_release(sctx, -ENOMEM); 1293 + return; 1294 + } 1295 + 1296 + /* Put data into message */ 1297 + sep_write_msg(sctx, small_buf, 1298 + crypto_ablkcipher_blocksize(tfm), 1299 + crypto_ablkcipher_blocksize(tfm) * 2, 1300 + &msg_offset, 1); 1301 + 1302 + /* Put size into message */ 1303 + sep_write_msg(sctx, &req->nbytes, 1304 + sizeof(u32), sizeof(u32), &msg_offset, 0); 1305 + } else { 1306 + /* Otherwise, fill out dma tables */ 1307 + sctx->dcb_input_data.app_in_address = src_ptr; 1308 + sctx->dcb_input_data.data_in_size = req->nbytes; 1309 + sctx->dcb_input_data.app_out_address = dst_ptr; 1310 + sctx->dcb_input_data.block_size = 1311 + crypto_ablkcipher_blocksize(tfm); 1312 + sctx->dcb_input_data.tail_block_size = 0; 1313 + sctx->dcb_input_data.is_applet = 0; 1314 + sctx->dcb_input_data.src_sg = sctx->src_sg; 1315 + sctx->dcb_input_data.dst_sg = sctx->dst_sg; 1316 + 1317 + result = sep_create_dcb_dmatables_context_kernel( 1318 + sctx->sep_used, 1319 + &sctx->dcb_region, 1320 + &sctx->dmatables_region, 1321 + &sctx->dma_ctx, 1322 + &sctx->dcb_input_data, 1323 + 1); 1324 + if (result) { 1325 + dev_warn(&sctx->sep_used->pdev->dev, 1326 + "crypto dma table create failed\n"); 1327 + sep_crypto_release(sctx, -EINVAL); 1328 + return; 1329 + } 1330 + 1331 + /* Portion of msg is nulled (no data) */ 1332 + msg[0] = (u32)0; 1333 + msg[1] = (u32)0; 1334 + msg[2] = (u32)0; 1335 + msg[3] = (u32)0; 1336 + msg[4] = (u32)0; 1337 + sep_write_msg(sctx, (void *)msg, 1338 + sizeof(u32) * 5, 1339 + sizeof(u32) * 5, 1340 + &msg_offset, 0); 1341 + } 1342 + 1343 + /* Write context into message */ 1344 + if (bctx->block_opcode == SEP_DES_BLOCK_OPCODE) { 1345 + sep_write_context(sctx, &msg_offset, 1346 + &bctx->des_private_ctx, 1347 + sizeof(struct sep_des_private_context)); 1348 + sep_dump(sctx->sep_used, "ctx to block des", 1349 + &bctx->des_private_ctx, 40); 1350 + } else { 1351 + sep_write_context(sctx, &msg_offset, 1352 + &bctx->aes_private_ctx, 1353 + sizeof(struct sep_aes_private_context)); 1354 + sep_dump(sctx->sep_used, "ctx to block aes", 1355 + &bctx->aes_private_ctx, 20); 1356 + } 1357 + } 1358 + 1359 + /* conclude message and then tell sep to do its thing */ 1360 + sctx->done_with_transaction = 0; 1361 + 1362 + sep_end_msg(sctx, msg_offset); 1363 + result = sep_crypto_take_sep(sctx); 1364 + if (result) { 1365 + dev_warn(&sctx->sep_used->pdev->dev, 1366 + "sep_crypto_take_sep failed\n"); 1367 + sep_crypto_release(sctx, -EINVAL); 1368 + return; 1369 + } 1370 + 1371 + /** 1372 + * Sep is now working. Lets wait up to 5 seconds 1373 + * for completion. If it does not complete, we will do 1374 + * a crypto release with -EINVAL to release the 1375 + * kernel crypto infrastructure and let the system 1376 + * continue to boot up 1377 + * We have to wait this long because some crypto 1378 + * operations can take a while 1379 + */ 1380 + 1381 + dev_dbg(&sctx->sep_used->pdev->dev, 1382 + "waiting for done with transaction\n"); 1383 + 1384 + sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ); 1385 + while ((time_before(jiffies, sctx->end_time)) && 1386 + (!sctx->done_with_transaction)) 1387 + schedule(); 1388 + 1389 + dev_dbg(&sctx->sep_used->pdev->dev, 1390 + "done waiting for done with transaction\n"); 1391 + 1392 + /* are we done? */ 1393 + if (!sctx->done_with_transaction) { 1394 + /* Nope, lets release and tell crypto no */ 1395 + dev_warn(&sctx->sep_used->pdev->dev, 1396 + "[PID%d] sep_crypto_block never finished\n", 1397 + current->pid); 1398 + sep_crypto_release(sctx, -EINVAL); 1399 + } 1400 + } 1401 + 1402 + /** 1403 + * Post operation (after interrupt) for crypto block 1404 + */ 1405 + static u32 crypto_post_op(struct sep_device *sep) 1406 + { 1407 + /* HERE */ 1408 + int int_error; 1409 + u32 u32_error; 1410 + u32 msg_offset; 1411 + 1412 + ssize_t copy_result; 1413 + static char small_buf[100]; 1414 + 1415 + struct ablkcipher_request *req; 1416 + struct sep_block_ctx *bctx; 1417 + struct sep_system_ctx *sctx; 1418 + struct crypto_ablkcipher *tfm; 1419 + 1420 + if (!sep->current_cypher_req) 1421 + return -EINVAL; 1422 + 1423 + /* hold req since we need to submit work after clearing sep */ 1424 + req = sep->current_cypher_req; 1425 + 1426 + bctx = ablkcipher_request_ctx(sep->current_cypher_req); 1427 + tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req); 1428 + sctx = crypto_ablkcipher_ctx(tfm); 1429 + 1430 + dev_dbg(&sctx->sep_used->pdev->dev, "crypto post_op\n"); 1431 + dev_dbg(&sctx->sep_used->pdev->dev, "crypto post_op message dump\n"); 1432 + crypto_sep_dump_message(sctx); 1433 + 1434 + sctx->done_with_transaction = 1; 1435 + 1436 + /* first bring msg from shared area to local area */ 1437 + memcpy(sctx->msg, sep->shared_addr, 1438 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 1439 + 1440 + /* Is this the result of performing init (key to SEP */ 1441 + if (sctx->key_sent == 0) { 1442 + 1443 + /* Did SEP do it okay */ 1444 + u32_error = sep_verify_op(sctx, bctx->init_opcode, 1445 + &msg_offset); 1446 + if (u32_error) { 1447 + dev_warn(&sctx->sep_used->pdev->dev, 1448 + "aes init error %x\n", u32_error); 1449 + sep_crypto_release(sctx, u32_error); 1450 + return u32_error; 1451 + } 1452 + 1453 + /* Read Context */ 1454 + if (bctx->init_opcode == SEP_DES_INIT_OPCODE) { 1455 + sep_read_context(sctx, &msg_offset, 1456 + &bctx->des_private_ctx, 1457 + sizeof(struct sep_des_private_context)); 1458 + 1459 + sep_dump(sctx->sep_used, "ctx init des", 1460 + &bctx->des_private_ctx, 40); 1461 + } else { 1462 + sep_read_context(sctx, &msg_offset, 1463 + &bctx->aes_private_ctx, 1464 + sizeof(struct sep_des_private_context)); 1465 + 1466 + sep_dump(sctx->sep_used, "ctx init aes", 1467 + &bctx->aes_private_ctx, 20); 1468 + } 1469 + 1470 + /* We are done with init. Now send out the data */ 1471 + /* first release the sep */ 1472 + sctx->key_sent = 1; 1473 + sep_crypto_release(sctx, -EINPROGRESS); 1474 + 1475 + spin_lock_irq(&queue_lock); 1476 + int_error = crypto_enqueue_request(&sep_queue, &req->base); 1477 + spin_unlock_irq(&queue_lock); 1478 + 1479 + if ((int_error != 0) && (int_error != -EINPROGRESS)) { 1480 + dev_warn(&sctx->sep_used->pdev->dev, 1481 + "spe cypher post op cant queue\n"); 1482 + sep_crypto_release(sctx, int_error); 1483 + return int_error; 1484 + } 1485 + 1486 + /* schedule the data send */ 1487 + int_error = sep_submit_work(sep->workqueue, sep_dequeuer, 1488 + (void *)&sep_queue); 1489 + 1490 + if (int_error) { 1491 + dev_warn(&sep->pdev->dev, 1492 + "cant submit work sep_crypto_block\n"); 1493 + sep_crypto_release(sctx, -EINVAL); 1494 + return -EINVAL; 1495 + } 1496 + 1497 + } else { 1498 + 1499 + /** 1500 + * This is the result of a block request 1501 + */ 1502 + dev_dbg(&sctx->sep_used->pdev->dev, 1503 + "crypto_post_op block response\n"); 1504 + 1505 + u32_error = sep_verify_op(sctx, bctx->block_opcode, 1506 + &msg_offset); 1507 + 1508 + if (u32_error) { 1509 + dev_warn(&sctx->sep_used->pdev->dev, 1510 + "sep block error %x\n", u32_error); 1511 + sep_crypto_release(sctx, u32_error); 1512 + return -EINVAL; 1513 + } 1514 + 1515 + if (bctx->block_opcode == SEP_DES_BLOCK_OPCODE) { 1516 + 1517 + dev_dbg(&sctx->sep_used->pdev->dev, 1518 + "post op for DES\n"); 1519 + 1520 + /* special case for 1 block des */ 1521 + if (sep->current_cypher_req->nbytes == 1522 + crypto_ablkcipher_blocksize(tfm)) { 1523 + 1524 + sep_read_msg(sctx, small_buf, 1525 + crypto_ablkcipher_blocksize(tfm), 1526 + crypto_ablkcipher_blocksize(tfm) * 2, 1527 + &msg_offset, 1); 1528 + 1529 + dev_dbg(&sctx->sep_used->pdev->dev, 1530 + "reading in block des\n"); 1531 + 1532 + copy_result = sg_copy_from_buffer( 1533 + sctx->dst_sg, 1534 + sep_sg_nents(sctx->dst_sg), 1535 + small_buf, 1536 + crypto_ablkcipher_blocksize(tfm)); 1537 + 1538 + if (copy_result != 1539 + crypto_ablkcipher_blocksize(tfm)) { 1540 + 1541 + dev_warn(&sctx->sep_used->pdev->dev, 1542 + "des block copy faild\n"); 1543 + sep_crypto_release(sctx, -ENOMEM); 1544 + return -ENOMEM; 1545 + } 1546 + } 1547 + 1548 + /* Read Context */ 1549 + sep_read_context(sctx, &msg_offset, 1550 + &bctx->des_private_ctx, 1551 + sizeof(struct sep_des_private_context)); 1552 + } else { 1553 + 1554 + dev_dbg(&sctx->sep_used->pdev->dev, 1555 + "post op for AES\n"); 1556 + 1557 + /* Skip the MAC Output */ 1558 + msg_offset += (sizeof(u32) * 4); 1559 + 1560 + /* Read Context */ 1561 + sep_read_context(sctx, &msg_offset, 1562 + &bctx->aes_private_ctx, 1563 + sizeof(struct sep_aes_private_context)); 1564 + } 1565 + 1566 + sep_dump_sg(sctx->sep_used, 1567 + "block sg out", sctx->dst_sg); 1568 + 1569 + /* Copy to correct sg if this block had oddball pages */ 1570 + if (sctx->dst_sg_hold) 1571 + sep_copy_sg(sctx->sep_used, 1572 + sctx->dst_sg, 1573 + sctx->current_cypher_req->dst, 1574 + sctx->current_cypher_req->nbytes); 1575 + 1576 + /* finished, release everything */ 1577 + sep_crypto_release(sctx, 0); 1578 + } 1579 + return 0; 1580 + } 1581 + 1582 + static u32 hash_init_post_op(struct sep_device *sep) 1583 + { 1584 + u32 u32_error; 1585 + u32 msg_offset; 1586 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req); 1587 + struct sep_hash_ctx *ctx = ahash_request_ctx(sep->current_hash_req); 1588 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 1589 + dev_dbg(&sctx->sep_used->pdev->dev, 1590 + "hash init post op\n"); 1591 + 1592 + sctx->done_with_transaction = 1; 1593 + 1594 + /* first bring msg from shared area to local area */ 1595 + memcpy(sctx->msg, sep->shared_addr, 1596 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 1597 + 1598 + u32_error = sep_verify_op(sctx, SEP_HASH_INIT_OPCODE, 1599 + &msg_offset); 1600 + 1601 + if (u32_error) { 1602 + dev_warn(&sctx->sep_used->pdev->dev, "hash init error %x\n", 1603 + u32_error); 1604 + sep_crypto_release(sctx, u32_error); 1605 + return u32_error; 1606 + } 1607 + 1608 + /* Read Context */ 1609 + sep_read_context(sctx, &msg_offset, 1610 + &ctx->hash_private_ctx, 1611 + sizeof(struct sep_hash_private_context)); 1612 + 1613 + /* Signal to crypto infrastructure and clear out */ 1614 + dev_dbg(&sctx->sep_used->pdev->dev, "hash init post op done\n"); 1615 + sep_crypto_release(sctx, 0); 1616 + return 0; 1617 + } 1618 + 1619 + static u32 hash_update_post_op(struct sep_device *sep) 1620 + { 1621 + u32 u32_error; 1622 + u32 msg_offset; 1623 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req); 1624 + struct sep_hash_ctx *ctx = ahash_request_ctx(sep->current_hash_req); 1625 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 1626 + dev_dbg(&sctx->sep_used->pdev->dev, 1627 + "hash update post op\n"); 1628 + 1629 + sctx->done_with_transaction = 1; 1630 + 1631 + /* first bring msg from shared area to local area */ 1632 + memcpy(sctx->msg, sep->shared_addr, 1633 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 1634 + 1635 + u32_error = sep_verify_op(sctx, SEP_HASH_UPDATE_OPCODE, 1636 + &msg_offset); 1637 + 1638 + if (u32_error) { 1639 + dev_warn(&sctx->sep_used->pdev->dev, "hash init error %x\n", 1640 + u32_error); 1641 + sep_crypto_release(sctx, u32_error); 1642 + return u32_error; 1643 + } 1644 + 1645 + /* Read Context */ 1646 + sep_read_context(sctx, &msg_offset, 1647 + &ctx->hash_private_ctx, 1648 + sizeof(struct sep_hash_private_context)); 1649 + 1650 + sep_crypto_release(sctx, 0); 1651 + return 0; 1652 + } 1653 + 1654 + static u32 hash_final_post_op(struct sep_device *sep) 1655 + { 1656 + int max_length; 1657 + u32 u32_error; 1658 + u32 msg_offset; 1659 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req); 1660 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 1661 + dev_dbg(&sctx->sep_used->pdev->dev, 1662 + "hash final post op\n"); 1663 + 1664 + sctx->done_with_transaction = 1; 1665 + 1666 + /* first bring msg from shared area to local area */ 1667 + memcpy(sctx->msg, sep->shared_addr, 1668 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 1669 + 1670 + u32_error = sep_verify_op(sctx, SEP_HASH_FINISH_OPCODE, 1671 + &msg_offset); 1672 + 1673 + if (u32_error) { 1674 + dev_warn(&sctx->sep_used->pdev->dev, "hash finish error %x\n", 1675 + u32_error); 1676 + sep_crypto_release(sctx, u32_error); 1677 + return u32_error; 1678 + } 1679 + 1680 + /* Grab the result */ 1681 + if (sctx->current_hash_req->result == NULL) { 1682 + /* Oops, null buffer; error out here */ 1683 + dev_warn(&sctx->sep_used->pdev->dev, 1684 + "hash finish null buffer\n"); 1685 + sep_crypto_release(sctx, (u32)-ENOMEM); 1686 + return -ENOMEM; 1687 + } 1688 + 1689 + max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) / 1690 + sizeof(u32)) * sizeof(u32); 1691 + 1692 + sep_read_msg(sctx, 1693 + sctx->current_hash_req->result, 1694 + crypto_ahash_digestsize(tfm), max_length, 1695 + &msg_offset, 0); 1696 + 1697 + /* Signal to crypto infrastructure and clear out */ 1698 + dev_dbg(&sctx->sep_used->pdev->dev, "hash finish post op done\n"); 1699 + sep_crypto_release(sctx, 0); 1700 + return 0; 1701 + } 1702 + 1703 + static u32 hash_digest_post_op(struct sep_device *sep) 1704 + { 1705 + int max_length; 1706 + u32 u32_error; 1707 + u32 msg_offset; 1708 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req); 1709 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 1710 + dev_dbg(&sctx->sep_used->pdev->dev, 1711 + "hash digest post op\n"); 1712 + 1713 + sctx->done_with_transaction = 1; 1714 + 1715 + /* first bring msg from shared area to local area */ 1716 + memcpy(sctx->msg, sep->shared_addr, 1717 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 1718 + 1719 + u32_error = sep_verify_op(sctx, SEP_HASH_SINGLE_OPCODE, 1720 + &msg_offset); 1721 + 1722 + if (u32_error) { 1723 + dev_warn(&sctx->sep_used->pdev->dev, 1724 + "hash digest finish error %x\n", u32_error); 1725 + 1726 + sep_crypto_release(sctx, u32_error); 1727 + return u32_error; 1728 + } 1729 + 1730 + /* Grab the result */ 1731 + if (sctx->current_hash_req->result == NULL) { 1732 + /* Oops, null buffer; error out here */ 1733 + dev_warn(&sctx->sep_used->pdev->dev, 1734 + "hash digest finish null buffer\n"); 1735 + sep_crypto_release(sctx, (u32)-ENOMEM); 1736 + return -ENOMEM; 1737 + } 1738 + 1739 + max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) / 1740 + sizeof(u32)) * sizeof(u32); 1741 + 1742 + sep_read_msg(sctx, 1743 + sctx->current_hash_req->result, 1744 + crypto_ahash_digestsize(tfm), max_length, 1745 + &msg_offset, 0); 1746 + 1747 + /* Signal to crypto infrastructure and clear out */ 1748 + dev_dbg(&sctx->sep_used->pdev->dev, 1749 + "hash digest finish post op done\n"); 1750 + 1751 + sep_crypto_release(sctx, 0); 1752 + return 0; 1753 + } 1754 + 1755 + /** 1756 + * The sep_finish function is the function that is schedule (via tasket) 1757 + * by the interrupt service routine when the SEP sends and interrupt 1758 + * This is only called by the interrupt handler as a tasklet. 1759 + */ 1760 + static void sep_finish(unsigned long data) 1761 + { 1762 + unsigned long flags; 1763 + struct sep_device *sep_dev; 1764 + int res; 1765 + 1766 + res = 0; 1767 + 1768 + if (data == 0) { 1769 + pr_debug("sep_finish called with null data\n"); 1770 + return; 1771 + } 1772 + 1773 + sep_dev = (struct sep_device *)data; 1774 + if (sep_dev == NULL) { 1775 + pr_debug("sep_finish; sep_dev is NULL\n"); 1776 + return; 1777 + } 1778 + 1779 + spin_lock_irqsave(&sep_dev->busy_lock, flags); 1780 + if (sep_dev->in_kernel == (u32)0) { 1781 + spin_unlock_irqrestore(&sep_dev->busy_lock, flags); 1782 + dev_warn(&sep_dev->pdev->dev, 1783 + "sep_finish; not in kernel operation\n"); 1784 + return; 1785 + } 1786 + spin_unlock_irqrestore(&sep_dev->busy_lock, flags); 1787 + 1788 + /* Did we really do a sep command prior to this? */ 1789 + if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, 1790 + &sep_dev->sctx->call_status.status)) { 1791 + 1792 + dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n", 1793 + current->pid); 1794 + return; 1795 + } 1796 + 1797 + if (sep_dev->send_ct != sep_dev->reply_ct) { 1798 + dev_warn(&sep_dev->pdev->dev, 1799 + "[PID%d] poll; no message came back\n", 1800 + current->pid); 1801 + return; 1802 + } 1803 + 1804 + /* Check for error (In case time ran out) */ 1805 + if ((res != 0x0) && (res != 0x8)) { 1806 + dev_warn(&sep_dev->pdev->dev, 1807 + "[PID%d] poll; poll error GPR3 is %x\n", 1808 + current->pid, res); 1809 + return; 1810 + } 1811 + 1812 + /* What kind of interrupt from sep was this? */ 1813 + res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 1814 + 1815 + dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n", 1816 + current->pid, res); 1817 + 1818 + /* Print request? */ 1819 + if ((res >> 30) & 0x1) { 1820 + dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n", 1821 + current->pid); 1822 + dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n", 1823 + current->pid, 1824 + (char *)(sep_dev->shared_addr + 1825 + SEP_DRIVER_PRINTF_OFFSET_IN_BYTES)); 1826 + return; 1827 + } 1828 + 1829 + /* Request for daemon (not currently in POR)? */ 1830 + if (res >> 31) { 1831 + dev_dbg(&sep_dev->pdev->dev, 1832 + "[PID%d] sep request; ignoring\n", 1833 + current->pid); 1834 + return; 1835 + } 1836 + 1837 + /* If we got here, then we have a replay to a sep command */ 1838 + 1839 + dev_dbg(&sep_dev->pdev->dev, 1840 + "[PID%d] sep reply to command; processing request: %x\n", 1841 + current->pid, sep_dev->current_request); 1842 + 1843 + switch (sep_dev->current_request) { 1844 + case AES_CBC: 1845 + case AES_ECB: 1846 + case DES_CBC: 1847 + case DES_ECB: 1848 + res = crypto_post_op(sep_dev); 1849 + break; 1850 + case SHA1: 1851 + case MD5: 1852 + case SHA224: 1853 + case SHA256: 1854 + switch (sep_dev->current_hash_stage) { 1855 + case HASH_INIT: 1856 + res = hash_init_post_op(sep_dev); 1857 + break; 1858 + case HASH_UPDATE: 1859 + res = hash_update_post_op(sep_dev); 1860 + break; 1861 + case HASH_FINISH: 1862 + res = hash_final_post_op(sep_dev); 1863 + break; 1864 + case HASH_DIGEST: 1865 + res = hash_digest_post_op(sep_dev); 1866 + break; 1867 + default: 1868 + dev_warn(&sep_dev->pdev->dev, 1869 + "invalid stage for hash finish\n"); 1870 + } 1871 + break; 1872 + default: 1873 + dev_warn(&sep_dev->pdev->dev, 1874 + "invalid request for finish\n"); 1875 + } 1876 + 1877 + if (res) { 1878 + dev_warn(&sep_dev->pdev->dev, 1879 + "finish returned error %x\n", res); 1880 + } 1881 + } 1882 + 1883 + static int sep_hash_cra_init(struct crypto_tfm *tfm) 1884 + { 1885 + struct sep_system_ctx *sctx = crypto_tfm_ctx(tfm); 1886 + const char *alg_name = crypto_tfm_alg_name(tfm); 1887 + 1888 + sctx->sep_used = sep_dev; 1889 + 1890 + dev_dbg(&sctx->sep_used->pdev->dev, 1891 + "sep_hash_cra_init name is %s\n", alg_name); 1892 + 1893 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1894 + sizeof(struct sep_hash_ctx)); 1895 + return 0; 1896 + } 1897 + 1898 + static void sep_hash_cra_exit(struct crypto_tfm *tfm) 1899 + { 1900 + struct sep_system_ctx *sctx = crypto_tfm_ctx(tfm); 1901 + 1902 + dev_dbg(&sctx->sep_used->pdev->dev, 1903 + "sep_hash_cra_exit\n"); 1904 + sctx->sep_used = NULL; 1905 + } 1906 + 1907 + static void sep_hash_init(void *data) 1908 + { 1909 + u32 msg_offset; 1910 + int result; 1911 + struct ahash_request *req; 1912 + struct crypto_ahash *tfm; 1913 + struct sep_hash_ctx *ctx; 1914 + struct sep_system_ctx *sctx; 1915 + 1916 + req = (struct ahash_request *)data; 1917 + tfm = crypto_ahash_reqtfm(req); 1918 + ctx = ahash_request_ctx(req); 1919 + sctx = crypto_ahash_ctx(tfm); 1920 + 1921 + dev_dbg(&sctx->sep_used->pdev->dev, 1922 + "sep_hash_init\n"); 1923 + sctx->current_hash_stage = HASH_INIT; 1924 + /* opcode and mode */ 1925 + sep_make_header(sctx, &msg_offset, SEP_HASH_INIT_OPCODE); 1926 + sep_write_msg(sctx, &ctx->hash_opmode, 1927 + sizeof(u32), sizeof(u32), &msg_offset, 0); 1928 + sep_end_msg(sctx, msg_offset); 1929 + 1930 + sctx->done_with_transaction = 0; 1931 + 1932 + result = sep_crypto_take_sep(sctx); 1933 + if (result) { 1934 + dev_warn(&sctx->sep_used->pdev->dev, 1935 + "sep_hash_init take sep failed\n"); 1936 + sep_crypto_release(sctx, -EINVAL); 1937 + } 1938 + 1939 + /** 1940 + * Sep is now working. Lets wait up to 5 seconds 1941 + * for completion. If it does not complete, we will do 1942 + * a crypto release with -EINVAL to release the 1943 + * kernel crypto infrastructure and let the system 1944 + * continue to boot up 1945 + * We have to wait this long because some crypto 1946 + * operations can take a while 1947 + */ 1948 + dev_dbg(&sctx->sep_used->pdev->dev, 1949 + "waiting for done with transaction\n"); 1950 + 1951 + sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ); 1952 + while ((time_before(jiffies, sctx->end_time)) && 1953 + (!sctx->done_with_transaction)) 1954 + schedule(); 1955 + 1956 + dev_dbg(&sctx->sep_used->pdev->dev, 1957 + "done waiting for done with transaction\n"); 1958 + 1959 + /* are we done? */ 1960 + if (!sctx->done_with_transaction) { 1961 + /* Nope, lets release and tell crypto no */ 1962 + dev_warn(&sctx->sep_used->pdev->dev, 1963 + "[PID%d] sep_hash_init never finished\n", 1964 + current->pid); 1965 + sep_crypto_release(sctx, -EINVAL); 1966 + } 1967 + } 1968 + 1969 + static void sep_hash_update(void *data) 1970 + { 1971 + int int_error; 1972 + u32 msg_offset; 1973 + u32 len; 1974 + struct sep_hash_internal_context *int_ctx; 1975 + u32 block_size; 1976 + u32 head_len; 1977 + u32 tail_len; 1978 + static u32 msg[10]; 1979 + static char small_buf[100]; 1980 + void *src_ptr; 1981 + struct scatterlist *new_sg; 1982 + ssize_t copy_result; 1983 + struct ahash_request *req; 1984 + struct crypto_ahash *tfm; 1985 + struct sep_hash_ctx *ctx; 1986 + struct sep_system_ctx *sctx; 1987 + 1988 + req = (struct ahash_request *)data; 1989 + tfm = crypto_ahash_reqtfm(req); 1990 + ctx = ahash_request_ctx(req); 1991 + sctx = crypto_ahash_ctx(tfm); 1992 + 1993 + /* length for queue status */ 1994 + sctx->nbytes = req->nbytes; 1995 + 1996 + dev_dbg(&sctx->sep_used->pdev->dev, 1997 + "sep_hash_update\n"); 1998 + sctx->current_hash_stage = HASH_UPDATE; 1999 + len = req->nbytes; 2000 + 2001 + block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2002 + tail_len = req->nbytes % block_size; 2003 + dev_dbg(&sctx->sep_used->pdev->dev, "length is %x\n", len); 2004 + dev_dbg(&sctx->sep_used->pdev->dev, "block_size is %x\n", block_size); 2005 + dev_dbg(&sctx->sep_used->pdev->dev, "tail len is %x\n", tail_len); 2006 + 2007 + /* Compute header/tail sizes */ 2008 + int_ctx = (struct sep_hash_internal_context *)&ctx-> 2009 + hash_private_ctx.internal_context; 2010 + head_len = (block_size - int_ctx->prev_update_bytes) % block_size; 2011 + tail_len = (req->nbytes - head_len) % block_size; 2012 + 2013 + /* Make sure all pages are even block */ 2014 + int_error = sep_oddball_pages(sctx->sep_used, req->src, 2015 + req->nbytes, 2016 + block_size, &new_sg, 1); 2017 + 2018 + if (int_error < 0) { 2019 + dev_warn(&sctx->sep_used->pdev->dev, 2020 + "oddball pages error in crash update\n"); 2021 + sep_crypto_release(sctx, -ENOMEM); 2022 + return; 2023 + } else if (int_error == 1) { 2024 + sctx->src_sg = new_sg; 2025 + sctx->src_sg_hold = new_sg; 2026 + } else { 2027 + sctx->src_sg = req->src; 2028 + sctx->src_sg_hold = NULL; 2029 + } 2030 + 2031 + src_ptr = sg_virt(sctx->src_sg); 2032 + 2033 + if ((!req->nbytes) || (!ctx->sg)) { 2034 + /* null data */ 2035 + src_ptr = NULL; 2036 + } 2037 + 2038 + sep_dump_sg(sctx->sep_used, "hash block sg in", sctx->src_sg); 2039 + 2040 + sctx->dcb_input_data.app_in_address = src_ptr; 2041 + sctx->dcb_input_data.data_in_size = req->nbytes - (head_len + tail_len); 2042 + sctx->dcb_input_data.app_out_address = NULL; 2043 + sctx->dcb_input_data.block_size = block_size; 2044 + sctx->dcb_input_data.tail_block_size = 0; 2045 + sctx->dcb_input_data.is_applet = 0; 2046 + sctx->dcb_input_data.src_sg = sctx->src_sg; 2047 + sctx->dcb_input_data.dst_sg = NULL; 2048 + 2049 + int_error = sep_create_dcb_dmatables_context_kernel( 2050 + sctx->sep_used, 2051 + &sctx->dcb_region, 2052 + &sctx->dmatables_region, 2053 + &sctx->dma_ctx, 2054 + &sctx->dcb_input_data, 2055 + 1); 2056 + if (int_error) { 2057 + dev_warn(&sctx->sep_used->pdev->dev, 2058 + "hash update dma table create failed\n"); 2059 + sep_crypto_release(sctx, -EINVAL); 2060 + return; 2061 + } 2062 + 2063 + /* Construct message to SEP */ 2064 + sep_make_header(sctx, &msg_offset, SEP_HASH_UPDATE_OPCODE); 2065 + 2066 + msg[0] = (u32)0; 2067 + msg[1] = (u32)0; 2068 + msg[2] = (u32)0; 2069 + 2070 + sep_write_msg(sctx, msg, sizeof(u32) * 3, sizeof(u32) * 3, 2071 + &msg_offset, 0); 2072 + 2073 + /* Handle remainders */ 2074 + 2075 + /* Head */ 2076 + sep_write_msg(sctx, &head_len, sizeof(u32), 2077 + sizeof(u32), &msg_offset, 0); 2078 + 2079 + if (head_len) { 2080 + copy_result = sg_copy_to_buffer( 2081 + req->src, 2082 + sep_sg_nents(sctx->src_sg), 2083 + small_buf, head_len); 2084 + 2085 + if (copy_result != head_len) { 2086 + dev_warn(&sctx->sep_used->pdev->dev, 2087 + "sg head copy failure in hash block\n"); 2088 + sep_crypto_release(sctx, -ENOMEM); 2089 + return; 2090 + } 2091 + 2092 + sep_write_msg(sctx, small_buf, head_len, 2093 + sizeof(u32) * 32, &msg_offset, 1); 2094 + } else { 2095 + msg_offset += sizeof(u32) * 32; 2096 + } 2097 + 2098 + /* Tail */ 2099 + sep_write_msg(sctx, &tail_len, sizeof(u32), 2100 + sizeof(u32), &msg_offset, 0); 2101 + 2102 + if (tail_len) { 2103 + copy_result = sep_copy_offset_sg( 2104 + sctx->sep_used, 2105 + sctx->src_sg, 2106 + req->nbytes - tail_len, 2107 + small_buf, tail_len); 2108 + 2109 + if (copy_result != tail_len) { 2110 + dev_warn(&sctx->sep_used->pdev->dev, 2111 + "sg tail copy failure in hash block\n"); 2112 + sep_crypto_release(sctx, -ENOMEM); 2113 + return; 2114 + } 2115 + 2116 + sep_write_msg(sctx, small_buf, tail_len, 2117 + sizeof(u32) * 32, &msg_offset, 1); 2118 + } else { 2119 + msg_offset += sizeof(u32) * 32; 2120 + } 2121 + 2122 + /* Context */ 2123 + sep_write_context(sctx, &msg_offset, &ctx->hash_private_ctx, 2124 + sizeof(struct sep_hash_private_context)); 2125 + 2126 + sep_end_msg(sctx, msg_offset); 2127 + sctx->done_with_transaction = 0; 2128 + int_error = sep_crypto_take_sep(sctx); 2129 + if (int_error) { 2130 + dev_warn(&sctx->sep_used->pdev->dev, 2131 + "sep_hash_update take sep failed\n"); 2132 + sep_crypto_release(sctx, -EINVAL); 2133 + } 2134 + 2135 + /** 2136 + * Sep is now working. Lets wait up to 5 seconds 2137 + * for completion. If it does not complete, we will do 2138 + * a crypto release with -EINVAL to release the 2139 + * kernel crypto infrastructure and let the system 2140 + * continue to boot up 2141 + * We have to wait this long because some crypto 2142 + * operations can take a while 2143 + */ 2144 + dev_dbg(&sctx->sep_used->pdev->dev, 2145 + "waiting for done with transaction\n"); 2146 + 2147 + sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ); 2148 + while ((time_before(jiffies, sctx->end_time)) && 2149 + (!sctx->done_with_transaction)) 2150 + schedule(); 2151 + 2152 + dev_dbg(&sctx->sep_used->pdev->dev, 2153 + "done waiting for done with transaction\n"); 2154 + 2155 + /* are we done? */ 2156 + if (!sctx->done_with_transaction) { 2157 + /* Nope, lets release and tell crypto no */ 2158 + dev_warn(&sctx->sep_used->pdev->dev, 2159 + "[PID%d] sep_hash_update never finished\n", 2160 + current->pid); 2161 + sep_crypto_release(sctx, -EINVAL); 2162 + } 2163 + } 2164 + 2165 + static void sep_hash_final(void *data) 2166 + { 2167 + u32 msg_offset; 2168 + struct ahash_request *req; 2169 + struct crypto_ahash *tfm; 2170 + struct sep_hash_ctx *ctx; 2171 + struct sep_system_ctx *sctx; 2172 + int result; 2173 + 2174 + req = (struct ahash_request *)data; 2175 + tfm = crypto_ahash_reqtfm(req); 2176 + ctx = ahash_request_ctx(req); 2177 + sctx = crypto_ahash_ctx(tfm); 2178 + 2179 + dev_dbg(&sctx->sep_used->pdev->dev, 2180 + "sep_hash_final\n"); 2181 + sctx->current_hash_stage = HASH_FINISH; 2182 + 2183 + /* opcode and mode */ 2184 + sep_make_header(sctx, &msg_offset, SEP_HASH_FINISH_OPCODE); 2185 + 2186 + /* Context */ 2187 + sep_write_context(sctx, &msg_offset, &ctx->hash_private_ctx, 2188 + sizeof(struct sep_hash_private_context)); 2189 + 2190 + sep_end_msg(sctx, msg_offset); 2191 + sctx->done_with_transaction = 0; 2192 + result = sep_crypto_take_sep(sctx); 2193 + if (result) { 2194 + dev_warn(&sctx->sep_used->pdev->dev, 2195 + "sep_hash_final take sep failed\n"); 2196 + sep_crypto_release(sctx, -EINVAL); 2197 + } 2198 + 2199 + /** 2200 + * Sep is now working. Lets wait up to 5 seconds 2201 + * for completion. If it does not complete, we will do 2202 + * a crypto release with -EINVAL to release the 2203 + * kernel crypto infrastructure and let the system 2204 + * continue to boot up 2205 + * We have to wait this long because some crypto 2206 + * operations can take a while 2207 + */ 2208 + dev_dbg(&sctx->sep_used->pdev->dev, 2209 + "waiting for done with transaction\n"); 2210 + 2211 + sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ); 2212 + while ((time_before(jiffies, sctx->end_time)) && 2213 + (!sctx->done_with_transaction)) 2214 + schedule(); 2215 + 2216 + dev_dbg(&sctx->sep_used->pdev->dev, 2217 + "done waiting for done with transaction\n"); 2218 + 2219 + /* are we done? */ 2220 + if (!sctx->done_with_transaction) { 2221 + /* Nope, lets release and tell crypto no */ 2222 + dev_warn(&sctx->sep_used->pdev->dev, 2223 + "[PID%d] sep_hash_final never finished\n", 2224 + current->pid); 2225 + sep_crypto_release(sctx, -EINVAL); 2226 + } 2227 + } 2228 + 2229 + static void sep_hash_digest(void *data) 2230 + { 2231 + int int_error; 2232 + u32 msg_offset; 2233 + u32 block_size; 2234 + u32 msg[10]; 2235 + size_t copy_result; 2236 + int result; 2237 + u32 tail_len; 2238 + static char small_buf[100]; 2239 + struct scatterlist *new_sg; 2240 + void *src_ptr; 2241 + 2242 + struct ahash_request *req; 2243 + struct crypto_ahash *tfm; 2244 + struct sep_hash_ctx *ctx; 2245 + struct sep_system_ctx *sctx; 2246 + 2247 + req = (struct ahash_request *)data; 2248 + tfm = crypto_ahash_reqtfm(req); 2249 + ctx = ahash_request_ctx(req); 2250 + sctx = crypto_ahash_ctx(tfm); 2251 + 2252 + dev_dbg(&sctx->sep_used->pdev->dev, 2253 + "sep_hash_digest\n"); 2254 + sctx->current_hash_stage = HASH_DIGEST; 2255 + 2256 + /* length for queue status */ 2257 + sctx->nbytes = req->nbytes; 2258 + 2259 + block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2260 + tail_len = req->nbytes % block_size; 2261 + dev_dbg(&sctx->sep_used->pdev->dev, "length is %x\n", req->nbytes); 2262 + dev_dbg(&sctx->sep_used->pdev->dev, "block_size is %x\n", block_size); 2263 + dev_dbg(&sctx->sep_used->pdev->dev, "tail len is %x\n", tail_len); 2264 + 2265 + /* Make sure all pages are even block */ 2266 + int_error = sep_oddball_pages(sctx->sep_used, req->src, 2267 + req->nbytes, 2268 + block_size, &new_sg, 1); 2269 + 2270 + if (int_error < 0) { 2271 + dev_warn(&sctx->sep_used->pdev->dev, 2272 + "oddball pages error in crash update\n"); 2273 + sep_crypto_release(sctx, -ENOMEM); 2274 + return; 2275 + } else if (int_error == 1) { 2276 + sctx->src_sg = new_sg; 2277 + sctx->src_sg_hold = new_sg; 2278 + } else { 2279 + sctx->src_sg = req->src; 2280 + sctx->src_sg_hold = NULL; 2281 + } 2282 + 2283 + src_ptr = sg_virt(sctx->src_sg); 2284 + 2285 + if ((!req->nbytes) || (!ctx->sg)) { 2286 + /* null data */ 2287 + src_ptr = NULL; 2288 + } 2289 + 2290 + sep_dump_sg(sctx->sep_used, "hash block sg in", sctx->src_sg); 2291 + 2292 + sctx->dcb_input_data.app_in_address = src_ptr; 2293 + sctx->dcb_input_data.data_in_size = req->nbytes - tail_len; 2294 + sctx->dcb_input_data.app_out_address = NULL; 2295 + sctx->dcb_input_data.block_size = block_size; 2296 + sctx->dcb_input_data.tail_block_size = 0; 2297 + sctx->dcb_input_data.is_applet = 0; 2298 + sctx->dcb_input_data.src_sg = sctx->src_sg; 2299 + sctx->dcb_input_data.dst_sg = NULL; 2300 + 2301 + int_error = sep_create_dcb_dmatables_context_kernel( 2302 + sctx->sep_used, 2303 + &sctx->dcb_region, 2304 + &sctx->dmatables_region, 2305 + &sctx->dma_ctx, 2306 + &sctx->dcb_input_data, 2307 + 1); 2308 + if (int_error) { 2309 + dev_warn(&sctx->sep_used->pdev->dev, 2310 + "hash update dma table create failed\n"); 2311 + sep_crypto_release(sctx, -EINVAL); 2312 + return; 2313 + } 2314 + 2315 + /* Construct message to SEP */ 2316 + sep_make_header(sctx, &msg_offset, SEP_HASH_SINGLE_OPCODE); 2317 + sep_write_msg(sctx, &ctx->hash_opmode, 2318 + sizeof(u32), sizeof(u32), &msg_offset, 0); 2319 + 2320 + msg[0] = (u32)0; 2321 + msg[1] = (u32)0; 2322 + msg[2] = (u32)0; 2323 + 2324 + sep_write_msg(sctx, msg, sizeof(u32) * 3, sizeof(u32) * 3, 2325 + &msg_offset, 0); 2326 + 2327 + /* Tail */ 2328 + sep_write_msg(sctx, &tail_len, sizeof(u32), 2329 + sizeof(u32), &msg_offset, 0); 2330 + 2331 + if (tail_len) { 2332 + copy_result = sep_copy_offset_sg( 2333 + sctx->sep_used, 2334 + sctx->src_sg, 2335 + req->nbytes - tail_len, 2336 + small_buf, tail_len); 2337 + 2338 + if (copy_result != tail_len) { 2339 + dev_warn(&sctx->sep_used->pdev->dev, 2340 + "sg tail copy failure in hash block\n"); 2341 + sep_crypto_release(sctx, -ENOMEM); 2342 + return; 2343 + } 2344 + 2345 + sep_write_msg(sctx, small_buf, tail_len, 2346 + sizeof(u32) * 32, &msg_offset, 1); 2347 + } else { 2348 + msg_offset += sizeof(u32) * 32; 2349 + } 2350 + 2351 + sep_end_msg(sctx, msg_offset); 2352 + 2353 + sctx->done_with_transaction = 0; 2354 + 2355 + result = sep_crypto_take_sep(sctx); 2356 + if (result) { 2357 + dev_warn(&sctx->sep_used->pdev->dev, 2358 + "sep_hash_digest take sep failed\n"); 2359 + sep_crypto_release(sctx, -EINVAL); 2360 + } 2361 + 2362 + /** 2363 + * Sep is now working. Lets wait up to 5 seconds 2364 + * for completion. If it does not complete, we will do 2365 + * a crypto release with -EINVAL to release the 2366 + * kernel crypto infrastructure and let the system 2367 + * continue to boot up 2368 + * We have to wait this long because some crypto 2369 + * operations can take a while 2370 + */ 2371 + dev_dbg(&sctx->sep_used->pdev->dev, 2372 + "waiting for done with transaction\n"); 2373 + 2374 + sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ); 2375 + while ((time_before(jiffies, sctx->end_time)) && 2376 + (!sctx->done_with_transaction)) 2377 + schedule(); 2378 + 2379 + dev_dbg(&sctx->sep_used->pdev->dev, 2380 + "done waiting for done with transaction\n"); 2381 + 2382 + /* are we done? */ 2383 + if (!sctx->done_with_transaction) { 2384 + /* Nope, lets release and tell crypto no */ 2385 + dev_warn(&sctx->sep_used->pdev->dev, 2386 + "[PID%d] sep_hash_digest never finished\n", 2387 + current->pid); 2388 + sep_crypto_release(sctx, -EINVAL); 2389 + } 2390 + } 2391 + 2392 + /** 2393 + * This is what is called by each of the API's provided 2394 + * in the kernel crypto descriptors. It is run in a process 2395 + * context using the kernel workqueues. Therefore it can 2396 + * be put to sleep. 2397 + */ 2398 + static void sep_dequeuer(void *data) 2399 + { 2400 + struct crypto_queue *this_queue; 2401 + struct crypto_async_request *async_req; 2402 + struct crypto_async_request *backlog; 2403 + struct ablkcipher_request *cypher_req; 2404 + struct ahash_request *hash_req; 2405 + struct sep_system_ctx *sctx; 2406 + struct crypto_ahash *hash_tfm; 2407 + 2408 + 2409 + this_queue = (struct crypto_queue *)data; 2410 + 2411 + spin_lock_irq(&queue_lock); 2412 + backlog = crypto_get_backlog(this_queue); 2413 + async_req = crypto_dequeue_request(this_queue); 2414 + spin_unlock_irq(&queue_lock); 2415 + 2416 + if (!async_req) { 2417 + pr_debug("sep crypto queue is empty\n"); 2418 + return; 2419 + } 2420 + 2421 + if (backlog) { 2422 + pr_debug("sep crypto backlog set\n"); 2423 + if (backlog->complete) 2424 + backlog->complete(backlog, -EINPROGRESS); 2425 + backlog = NULL; 2426 + } 2427 + 2428 + if (!async_req->tfm) { 2429 + pr_debug("sep crypto queue null tfm\n"); 2430 + return; 2431 + } 2432 + 2433 + if (!async_req->tfm->__crt_alg) { 2434 + pr_debug("sep crypto queue null __crt_alg\n"); 2435 + return; 2436 + } 2437 + 2438 + if (!async_req->tfm->__crt_alg->cra_type) { 2439 + pr_debug("sep crypto queue null cra_type\n"); 2440 + return; 2441 + } 2442 + 2443 + /* we have stuff in the queue */ 2444 + if (async_req->tfm->__crt_alg->cra_type != 2445 + &crypto_ahash_type) { 2446 + /* This is for a cypher */ 2447 + pr_debug("sep crypto queue doing cipher\n"); 2448 + cypher_req = container_of(async_req, 2449 + struct ablkcipher_request, 2450 + base); 2451 + if (!cypher_req) { 2452 + pr_debug("sep crypto queue null cypher_req\n"); 2453 + return; 2454 + } 2455 + 2456 + sep_crypto_block((void *)cypher_req); 2457 + return; 2458 + } else { 2459 + /* This is a hash */ 2460 + pr_debug("sep crypto queue doing hash\n"); 2461 + /** 2462 + * This is a bit more complex than cipher; we 2463 + * need to figure out what type of operation 2464 + */ 2465 + hash_req = ahash_request_cast(async_req); 2466 + if (!hash_req) { 2467 + pr_debug("sep crypto queue null hash_req\n"); 2468 + return; 2469 + } 2470 + 2471 + hash_tfm = crypto_ahash_reqtfm(hash_req); 2472 + if (!hash_tfm) { 2473 + pr_debug("sep crypto queue null hash_tfm\n"); 2474 + return; 2475 + } 2476 + 2477 + 2478 + sctx = crypto_ahash_ctx(hash_tfm); 2479 + if (!sctx) { 2480 + pr_debug("sep crypto queue null sctx\n"); 2481 + return; 2482 + } 2483 + 2484 + if (sctx->current_hash_stage == HASH_INIT) { 2485 + pr_debug("sep crypto queue hash init\n"); 2486 + sep_hash_init((void *)hash_req); 2487 + return; 2488 + } else if (sctx->current_hash_stage == HASH_UPDATE) { 2489 + pr_debug("sep crypto queue hash update\n"); 2490 + sep_hash_update((void *)hash_req); 2491 + return; 2492 + } else if (sctx->current_hash_stage == HASH_FINISH) { 2493 + pr_debug("sep crypto queue hash final\n"); 2494 + sep_hash_final((void *)hash_req); 2495 + return; 2496 + } else if (sctx->current_hash_stage == HASH_DIGEST) { 2497 + pr_debug("sep crypto queue hash digest\n"); 2498 + sep_hash_digest((void *)hash_req); 2499 + return; 2500 + } else { 2501 + pr_debug("sep crypto queue hash oops nothing\n"); 2502 + return; 2503 + } 2504 + } 2505 + } 2506 + 2507 + static int sep_sha1_init(struct ahash_request *req) 2508 + { 2509 + int error; 2510 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2511 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2512 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2513 + 2514 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha1 init\n"); 2515 + sctx->current_request = SHA1; 2516 + sctx->current_hash_req = req; 2517 + sctx->current_cypher_req = NULL; 2518 + ctx->hash_opmode = SEP_HASH_SHA1; 2519 + sctx->current_hash_stage = HASH_INIT; 2520 + 2521 + spin_lock_irq(&queue_lock); 2522 + error = crypto_enqueue_request(&sep_queue, &req->base); 2523 + spin_unlock_irq(&queue_lock); 2524 + 2525 + if ((error != 0) && (error != -EINPROGRESS)) { 2526 + dev_warn(&sctx->sep_used->pdev->dev, 2527 + "sep sha1 init cant enqueue\n"); 2528 + sep_crypto_release(sctx, error); 2529 + return error; 2530 + } 2531 + 2532 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2533 + (void *)&sep_queue); 2534 + if (error) { 2535 + dev_warn(&sctx->sep_used->pdev->dev, 2536 + "sha1 init cannot submit queue\n"); 2537 + sep_crypto_release(sctx, -EINVAL); 2538 + return -EINVAL; 2539 + } 2540 + return -EINPROGRESS; 2541 + } 2542 + 2543 + static int sep_sha1_update(struct ahash_request *req) 2544 + { 2545 + int error; 2546 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2547 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2548 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2549 + 2550 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha1 update\n"); 2551 + sctx->current_request = SHA1; 2552 + sctx->current_hash_req = req; 2553 + sctx->current_cypher_req = NULL; 2554 + ctx->hash_opmode = SEP_HASH_SHA1; 2555 + sctx->current_hash_stage = HASH_INIT; 2556 + 2557 + spin_lock_irq(&queue_lock); 2558 + error = crypto_enqueue_request(&sep_queue, &req->base); 2559 + spin_unlock_irq(&queue_lock); 2560 + 2561 + if ((error != 0) && (error != -EINPROGRESS)) { 2562 + dev_warn(&sctx->sep_used->pdev->dev, 2563 + "sep sha1 update cant enqueue\n"); 2564 + sep_crypto_release(sctx, error); 2565 + return error; 2566 + } 2567 + 2568 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2569 + (void *)&sep_queue); 2570 + if (error) { 2571 + dev_warn(&sctx->sep_used->pdev->dev, 2572 + "sha1 update cannot submit queue\n"); 2573 + sep_crypto_release(sctx, -EINVAL); 2574 + return -EINVAL; 2575 + } 2576 + return -EINPROGRESS; 2577 + } 2578 + 2579 + static int sep_sha1_final(struct ahash_request *req) 2580 + { 2581 + int error; 2582 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2583 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2584 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2585 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha1 final\n"); 2586 + 2587 + sctx->current_request = SHA1; 2588 + sctx->current_hash_req = req; 2589 + sctx->current_cypher_req = NULL; 2590 + ctx->hash_opmode = SEP_HASH_SHA1; 2591 + sctx->current_hash_stage = HASH_FINISH; 2592 + 2593 + spin_lock_irq(&queue_lock); 2594 + error = crypto_enqueue_request(&sep_queue, &req->base); 2595 + spin_unlock_irq(&queue_lock); 2596 + 2597 + if ((error != 0) && (error != -EINPROGRESS)) { 2598 + dev_warn(&sctx->sep_used->pdev->dev, 2599 + "sep sha1 final cant enqueue\n"); 2600 + sep_crypto_release(sctx, error); 2601 + return error; 2602 + } 2603 + 2604 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2605 + (void *)&sep_queue); 2606 + if (error) { 2607 + dev_warn(&sctx->sep_used->pdev->dev, 2608 + "sha1 final cannot submit queue\n"); 2609 + sep_crypto_release(sctx, -EINVAL); 2610 + return -EINVAL; 2611 + } 2612 + return -EINPROGRESS; 2613 + 2614 + } 2615 + 2616 + static int sep_sha1_digest(struct ahash_request *req) 2617 + { 2618 + int error; 2619 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2620 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2621 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2622 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha1 digest\n"); 2623 + 2624 + sctx->current_request = SHA1; 2625 + sctx->current_hash_req = req; 2626 + sctx->current_cypher_req = NULL; 2627 + ctx->hash_opmode = SEP_HASH_SHA1; 2628 + sctx->current_hash_stage = HASH_DIGEST; 2629 + 2630 + spin_lock_irq(&queue_lock); 2631 + error = crypto_enqueue_request(&sep_queue, &req->base); 2632 + spin_unlock_irq(&queue_lock); 2633 + 2634 + if ((error != 0) && (error != -EINPROGRESS)) { 2635 + dev_warn(&sctx->sep_used->pdev->dev, 2636 + "sep sha1 digest cant enqueue\n"); 2637 + sep_crypto_release(sctx, error); 2638 + return error; 2639 + } 2640 + 2641 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2642 + (void *)&sep_queue); 2643 + if (error) { 2644 + dev_warn(&sctx->sep_used->pdev->dev, 2645 + "sha1 digest cannot submit queue\n"); 2646 + sep_crypto_release(sctx, -EINVAL); 2647 + return -EINVAL; 2648 + } 2649 + return -EINPROGRESS; 2650 + 2651 + } 2652 + 2653 + static int sep_md5_init(struct ahash_request *req) 2654 + { 2655 + int error; 2656 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2657 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2658 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2659 + dev_dbg(&sctx->sep_used->pdev->dev, "doing md5 init\n"); 2660 + 2661 + sctx->current_request = MD5; 2662 + sctx->current_hash_req = req; 2663 + sctx->current_cypher_req = NULL; 2664 + ctx->hash_opmode = SEP_HASH_MD5; 2665 + sctx->current_hash_stage = HASH_INIT; 2666 + 2667 + spin_lock_irq(&queue_lock); 2668 + error = crypto_enqueue_request(&sep_queue, &req->base); 2669 + spin_unlock_irq(&queue_lock); 2670 + 2671 + if ((error != 0) && (error != -EINPROGRESS)) { 2672 + dev_warn(&sctx->sep_used->pdev->dev, 2673 + "sep md5 init cant enqueue\n"); 2674 + sep_crypto_release(sctx, error); 2675 + return error; 2676 + } 2677 + 2678 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2679 + (void *)&sep_queue); 2680 + if (error) { 2681 + dev_warn(&sctx->sep_used->pdev->dev, 2682 + "md5 init cannot submit queue\n"); 2683 + sep_crypto_release(sctx, -EINVAL); 2684 + return -EINVAL; 2685 + } 2686 + return -EINPROGRESS; 2687 + 2688 + } 2689 + 2690 + static int sep_md5_update(struct ahash_request *req) 2691 + { 2692 + int error; 2693 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2694 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2695 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2696 + dev_dbg(&sctx->sep_used->pdev->dev, "doing md5 update\n"); 2697 + 2698 + sctx->current_request = MD5; 2699 + sctx->current_hash_req = req; 2700 + sctx->current_cypher_req = NULL; 2701 + ctx->hash_opmode = SEP_HASH_MD5; 2702 + sctx->current_hash_stage = HASH_UPDATE; 2703 + 2704 + spin_lock_irq(&queue_lock); 2705 + error = crypto_enqueue_request(&sep_queue, &req->base); 2706 + spin_unlock_irq(&queue_lock); 2707 + 2708 + if ((error != 0) && (error != -EINPROGRESS)) { 2709 + dev_warn(&sctx->sep_used->pdev->dev, 2710 + "md5 update cant enqueue\n"); 2711 + sep_crypto_release(sctx, error); 2712 + return error; 2713 + } 2714 + 2715 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2716 + (void *)&sep_queue); 2717 + if (error) { 2718 + dev_warn(&sctx->sep_used->pdev->dev, 2719 + "md5 update cannot submit queue\n"); 2720 + sep_crypto_release(sctx, -EINVAL); 2721 + return -EINVAL; 2722 + } 2723 + return -EINPROGRESS; 2724 + } 2725 + 2726 + static int sep_md5_final(struct ahash_request *req) 2727 + { 2728 + int error; 2729 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2730 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2731 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2732 + dev_dbg(&sctx->sep_used->pdev->dev, "doing md5 final\n"); 2733 + 2734 + sctx->current_request = MD5; 2735 + sctx->current_hash_req = req; 2736 + sctx->current_cypher_req = NULL; 2737 + ctx->hash_opmode = SEP_HASH_MD5; 2738 + sctx->current_hash_stage = HASH_FINISH; 2739 + 2740 + spin_lock_irq(&queue_lock); 2741 + error = crypto_enqueue_request(&sep_queue, &req->base); 2742 + spin_unlock_irq(&queue_lock); 2743 + 2744 + if ((error != 0) && (error != -EINPROGRESS)) { 2745 + dev_warn(&sctx->sep_used->pdev->dev, 2746 + "sep md5 final cant enqueue\n"); 2747 + sep_crypto_release(sctx, error); 2748 + return error; 2749 + } 2750 + 2751 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2752 + (void *)&sep_queue); 2753 + if (error) { 2754 + dev_warn(&sctx->sep_used->pdev->dev, 2755 + "md5 final cannot submit queue\n"); 2756 + sep_crypto_release(sctx, -EINVAL); 2757 + return -EINVAL; 2758 + } 2759 + return -EINPROGRESS; 2760 + 2761 + } 2762 + 2763 + static int sep_md5_digest(struct ahash_request *req) 2764 + { 2765 + int error; 2766 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2767 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2768 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2769 + 2770 + dev_dbg(&sctx->sep_used->pdev->dev, "doing md5 digest\n"); 2771 + sctx->current_request = MD5; 2772 + sctx->current_hash_req = req; 2773 + sctx->current_cypher_req = NULL; 2774 + ctx->hash_opmode = SEP_HASH_MD5; 2775 + sctx->current_hash_stage = HASH_DIGEST; 2776 + 2777 + spin_lock_irq(&queue_lock); 2778 + error = crypto_enqueue_request(&sep_queue, &req->base); 2779 + spin_unlock_irq(&queue_lock); 2780 + 2781 + if ((error != 0) && (error != -EINPROGRESS)) { 2782 + dev_warn(&sctx->sep_used->pdev->dev, 2783 + "sep md5 digest cant enqueue\n"); 2784 + sep_crypto_release(sctx, error); 2785 + return error; 2786 + } 2787 + 2788 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2789 + (void *)&sep_queue); 2790 + if (error) { 2791 + dev_warn(&sctx->sep_used->pdev->dev, 2792 + "md5 digest cannot submit queue\n"); 2793 + sep_crypto_release(sctx, -EINVAL); 2794 + return -EINVAL; 2795 + } 2796 + return -EINPROGRESS; 2797 + } 2798 + 2799 + static int sep_sha224_init(struct ahash_request *req) 2800 + { 2801 + int error; 2802 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2803 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2804 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2805 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha224 init\n"); 2806 + 2807 + sctx->current_request = SHA224; 2808 + sctx->current_hash_req = req; 2809 + sctx->current_cypher_req = NULL; 2810 + ctx->hash_opmode = SEP_HASH_SHA224; 2811 + sctx->current_hash_stage = HASH_INIT; 2812 + 2813 + spin_lock_irq(&queue_lock); 2814 + error = crypto_enqueue_request(&sep_queue, &req->base); 2815 + spin_unlock_irq(&queue_lock); 2816 + 2817 + if ((error != 0) && (error != -EINPROGRESS)) { 2818 + dev_warn(&sctx->sep_used->pdev->dev, 2819 + "sep sha224 init cant enqueue\n"); 2820 + sep_crypto_release(sctx, error); 2821 + return error; 2822 + } 2823 + 2824 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2825 + (void *)&sep_queue); 2826 + if (error) { 2827 + dev_warn(&sctx->sep_used->pdev->dev, 2828 + "sha224 init cannot submit queue\n"); 2829 + sep_crypto_release(sctx, -EINVAL); 2830 + return -EINVAL; 2831 + } 2832 + return -EINPROGRESS; 2833 + } 2834 + 2835 + static int sep_sha224_update(struct ahash_request *req) 2836 + { 2837 + int error; 2838 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2839 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2840 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2841 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha224 update\n"); 2842 + 2843 + sctx->current_request = SHA224; 2844 + sctx->current_hash_req = req; 2845 + sctx->current_cypher_req = NULL; 2846 + ctx->hash_opmode = SEP_HASH_SHA224; 2847 + sctx->current_hash_stage = HASH_UPDATE; 2848 + 2849 + spin_lock_irq(&queue_lock); 2850 + error = crypto_enqueue_request(&sep_queue, &req->base); 2851 + spin_unlock_irq(&queue_lock); 2852 + 2853 + if ((error != 0) && (error != -EINPROGRESS)) { 2854 + dev_warn(&sctx->sep_used->pdev->dev, 2855 + "sep sha224 update cant enqueue\n"); 2856 + sep_crypto_release(sctx, error); 2857 + return error; 2858 + } 2859 + 2860 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2861 + (void *)&sep_queue); 2862 + if (error) { 2863 + dev_warn(&sctx->sep_used->pdev->dev, 2864 + "sha224 update cannot submit queue\n"); 2865 + sep_crypto_release(sctx, -EINVAL); 2866 + return -EINVAL; 2867 + } 2868 + return -EINPROGRESS; 2869 + } 2870 + 2871 + static int sep_sha224_final(struct ahash_request *req) 2872 + { 2873 + int error; 2874 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2875 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2876 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2877 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha224 final\n"); 2878 + 2879 + sctx->current_request = SHA224; 2880 + sctx->current_hash_req = req; 2881 + sctx->current_cypher_req = NULL; 2882 + ctx->hash_opmode = SEP_HASH_SHA224; 2883 + sctx->current_hash_stage = HASH_FINISH; 2884 + 2885 + spin_lock_irq(&queue_lock); 2886 + error = crypto_enqueue_request(&sep_queue, &req->base); 2887 + spin_unlock_irq(&queue_lock); 2888 + 2889 + if ((error != 0) && (error != -EINPROGRESS)) { 2890 + dev_warn(&sctx->sep_used->pdev->dev, 2891 + "sep sha224 final cant enqueue\n"); 2892 + sep_crypto_release(sctx, error); 2893 + return error; 2894 + } 2895 + 2896 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2897 + (void *)&sep_queue); 2898 + if (error) { 2899 + dev_warn(&sctx->sep_used->pdev->dev, 2900 + "sha224 final cannot submit queue\n"); 2901 + sep_crypto_release(sctx, -EINVAL); 2902 + return -EINVAL; 2903 + } 2904 + return -EINPROGRESS; 2905 + } 2906 + 2907 + static int sep_sha224_digest(struct ahash_request *req) 2908 + { 2909 + int error; 2910 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2911 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2912 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2913 + 2914 + dev_dbg(&sctx->sep_used->pdev->dev, "doing 224 digest\n"); 2915 + sctx->current_request = SHA224; 2916 + sctx->current_hash_req = req; 2917 + sctx->current_cypher_req = NULL; 2918 + ctx->hash_opmode = SEP_HASH_SHA224; 2919 + sctx->current_hash_stage = HASH_DIGEST; 2920 + 2921 + spin_lock_irq(&queue_lock); 2922 + error = crypto_enqueue_request(&sep_queue, &req->base); 2923 + spin_unlock_irq(&queue_lock); 2924 + 2925 + if ((error != 0) && (error != -EINPROGRESS)) { 2926 + dev_warn(&sctx->sep_used->pdev->dev, 2927 + "sep sha224 digest cant enqueue\n"); 2928 + sep_crypto_release(sctx, error); 2929 + return error; 2930 + } 2931 + 2932 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2933 + (void *)&sep_queue); 2934 + if (error) { 2935 + dev_warn(&sctx->sep_used->pdev->dev, 2936 + "sha256 digest cannot submit queue\n"); 2937 + sep_crypto_release(sctx, -EINVAL); 2938 + return -EINVAL; 2939 + } 2940 + return -EINPROGRESS; 2941 + } 2942 + 2943 + static int sep_sha256_init(struct ahash_request *req) 2944 + { 2945 + int error; 2946 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2947 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2948 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2949 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha256 init\n"); 2950 + 2951 + sctx->current_request = SHA256; 2952 + sctx->current_hash_req = req; 2953 + sctx->current_cypher_req = NULL; 2954 + ctx->hash_opmode = SEP_HASH_SHA256; 2955 + sctx->current_hash_stage = HASH_INIT; 2956 + 2957 + spin_lock_irq(&queue_lock); 2958 + error = crypto_enqueue_request(&sep_queue, &req->base); 2959 + spin_unlock_irq(&queue_lock); 2960 + 2961 + if ((error != 0) && (error != -EINPROGRESS)) { 2962 + dev_warn(&sctx->sep_used->pdev->dev, 2963 + "sep sha256 init cant enqueue\n"); 2964 + sep_crypto_release(sctx, error); 2965 + return error; 2966 + } 2967 + 2968 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 2969 + (void *)&sep_queue); 2970 + if (error) { 2971 + dev_warn(&sctx->sep_used->pdev->dev, 2972 + "sha256 init cannot submit queue\n"); 2973 + sep_crypto_release(sctx, -EINVAL); 2974 + return -EINVAL; 2975 + } 2976 + return -EINPROGRESS; 2977 + } 2978 + 2979 + static int sep_sha256_update(struct ahash_request *req) 2980 + { 2981 + int error; 2982 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2983 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 2984 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 2985 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha256 update\n"); 2986 + 2987 + sctx->current_request = SHA256; 2988 + sctx->current_hash_req = req; 2989 + sctx->current_cypher_req = NULL; 2990 + ctx->hash_opmode = SEP_HASH_SHA256; 2991 + sctx->current_hash_stage = HASH_UPDATE; 2992 + 2993 + spin_lock_irq(&queue_lock); 2994 + error = crypto_enqueue_request(&sep_queue, &req->base); 2995 + spin_unlock_irq(&queue_lock); 2996 + 2997 + if ((error != 0) && (error != -EINPROGRESS)) { 2998 + dev_warn(&sctx->sep_used->pdev->dev, 2999 + "sep sha256 update cant enqueue\n"); 3000 + sep_crypto_release(sctx, error); 3001 + return error; 3002 + } 3003 + 3004 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3005 + (void *)&sep_queue); 3006 + if (error) { 3007 + dev_warn(&sctx->sep_used->pdev->dev, 3008 + "sha256 update cannot submit queue\n"); 3009 + sep_crypto_release(sctx, -EINVAL); 3010 + return -EINVAL; 3011 + } 3012 + return -EINPROGRESS; 3013 + } 3014 + 3015 + static int sep_sha256_final(struct ahash_request *req) 3016 + { 3017 + int error; 3018 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3019 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 3020 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 3021 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha256 final\n"); 3022 + 3023 + sctx->current_request = SHA256; 3024 + sctx->current_hash_req = req; 3025 + sctx->current_cypher_req = NULL; 3026 + ctx->hash_opmode = SEP_HASH_SHA256; 3027 + sctx->current_hash_stage = HASH_FINISH; 3028 + 3029 + spin_lock_irq(&queue_lock); 3030 + error = crypto_enqueue_request(&sep_queue, &req->base); 3031 + spin_unlock_irq(&queue_lock); 3032 + 3033 + if ((error != 0) && (error != -EINPROGRESS)) { 3034 + dev_warn(&sctx->sep_used->pdev->dev, 3035 + "sep sha256 final cant enqueue\n"); 3036 + sep_crypto_release(sctx, error); 3037 + return error; 3038 + } 3039 + 3040 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3041 + (void *)&sep_queue); 3042 + if (error) { 3043 + dev_warn(&sctx->sep_used->pdev->dev, 3044 + "sha256 final cannot submit queue\n"); 3045 + sep_crypto_release(sctx, -EINVAL); 3046 + return -EINVAL; 3047 + } 3048 + return -EINPROGRESS; 3049 + } 3050 + 3051 + static int sep_sha256_digest(struct ahash_request *req) 3052 + { 3053 + int error; 3054 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3055 + struct sep_hash_ctx *ctx = ahash_request_ctx(req); 3056 + struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); 3057 + 3058 + dev_dbg(&sctx->sep_used->pdev->dev, "doing sha256 digest\n"); 3059 + sctx->current_request = SHA256; 3060 + sctx->current_hash_req = req; 3061 + sctx->current_cypher_req = NULL; 3062 + ctx->hash_opmode = SEP_HASH_SHA256; 3063 + sctx->current_hash_stage = HASH_DIGEST; 3064 + 3065 + spin_lock_irq(&queue_lock); 3066 + error = crypto_enqueue_request(&sep_queue, &req->base); 3067 + spin_unlock_irq(&queue_lock); 3068 + 3069 + if ((error != 0) && (error != -EINPROGRESS)) { 3070 + dev_warn(&sctx->sep_used->pdev->dev, 3071 + "sep sha256 digest cant enqueue\n"); 3072 + sep_crypto_release(sctx, error); 3073 + return error; 3074 + } 3075 + 3076 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3077 + (void *)&sep_queue); 3078 + if (error) { 3079 + dev_warn(&sctx->sep_used->pdev->dev, 3080 + "sha256 digest cannot submit queue\n"); 3081 + sep_crypto_release(sctx, -EINVAL); 3082 + return -EINVAL; 3083 + } 3084 + return -EINPROGRESS; 3085 + } 3086 + 3087 + static int sep_crypto_init(struct crypto_tfm *tfm) 3088 + { 3089 + struct sep_system_ctx *sctx = crypto_tfm_ctx(tfm); 3090 + const char *alg_name = crypto_tfm_alg_name(tfm); 3091 + 3092 + sctx->sep_used = sep_dev; 3093 + 3094 + if (alg_name == NULL) 3095 + dev_dbg(&sctx->sep_used->pdev->dev, "alg is NULL\n"); 3096 + else 3097 + dev_dbg(&sctx->sep_used->pdev->dev, "alg is %s\n", alg_name); 3098 + 3099 + tfm->crt_ablkcipher.reqsize = sizeof(struct sep_block_ctx); 3100 + dev_dbg(&sctx->sep_used->pdev->dev, "sep_crypto_init\n"); 3101 + return 0; 3102 + } 3103 + 3104 + static void sep_crypto_exit(struct crypto_tfm *tfm) 3105 + { 3106 + struct sep_system_ctx *sctx = crypto_tfm_ctx(tfm); 3107 + dev_dbg(&sctx->sep_used->pdev->dev, "sep_crypto_exit\n"); 3108 + sctx->sep_used = NULL; 3109 + } 3110 + 3111 + static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 3112 + unsigned int keylen) 3113 + { 3114 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm); 3115 + 3116 + dev_dbg(&sctx->sep_used->pdev->dev, "sep aes setkey\n"); 3117 + 3118 + switch (keylen) { 3119 + case SEP_AES_KEY_128_SIZE: 3120 + sctx->aes_key_size = AES_128; 3121 + break; 3122 + case SEP_AES_KEY_192_SIZE: 3123 + sctx->aes_key_size = AES_192; 3124 + break; 3125 + case SEP_AES_KEY_256_SIZE: 3126 + sctx->aes_key_size = AES_256; 3127 + break; 3128 + case SEP_AES_KEY_512_SIZE: 3129 + sctx->aes_key_size = AES_512; 3130 + break; 3131 + default: 3132 + dev_warn(&sctx->sep_used->pdev->dev, "sep aes key size %x\n", 3133 + keylen); 3134 + return -EINVAL; 3135 + } 3136 + 3137 + memset(&sctx->key.aes, 0, sizeof(u32) * 3138 + SEP_AES_MAX_KEY_SIZE_WORDS); 3139 + memcpy(&sctx->key.aes, key, keylen); 3140 + sctx->keylen = keylen; 3141 + /* Indicate to encrypt/decrypt function to send key to SEP */ 3142 + sctx->key_sent = 0; 3143 + sctx->last_block = 0; 3144 + 3145 + return 0; 3146 + } 3147 + 3148 + static int sep_aes_ecb_encrypt(struct ablkcipher_request *req) 3149 + { 3150 + int error; 3151 + struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); 3152 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( 3153 + crypto_ablkcipher_reqtfm(req)); 3154 + 3155 + dev_dbg(&sctx->sep_used->pdev->dev, "sep aes ecb encrypt\n"); 3156 + sctx->current_request = AES_ECB; 3157 + sctx->current_hash_req = NULL; 3158 + sctx->current_cypher_req = req; 3159 + bctx->aes_encmode = SEP_AES_ENCRYPT; 3160 + bctx->aes_opmode = SEP_AES_ECB; 3161 + bctx->init_opcode = SEP_AES_INIT_OPCODE; 3162 + bctx->block_opcode = SEP_AES_BLOCK_OPCODE; 3163 + 3164 + spin_lock_irq(&queue_lock); 3165 + error = crypto_enqueue_request(&sep_queue, &req->base); 3166 + spin_unlock_irq(&queue_lock); 3167 + 3168 + if ((error != 0) && (error != -EINPROGRESS)) { 3169 + dev_warn(&sctx->sep_used->pdev->dev, 3170 + "sep_aes_ecb_encrypt cant enqueue\n"); 3171 + sep_crypto_release(sctx, error); 3172 + return error; 3173 + } 3174 + 3175 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3176 + (void *)&sep_queue); 3177 + if (error) { 3178 + dev_warn(&sctx->sep_used->pdev->dev, 3179 + "sep_aes_ecb_encrypt cannot submit queue\n"); 3180 + sep_crypto_release(sctx, -EINVAL); 3181 + return -EINVAL; 3182 + } 3183 + return -EINPROGRESS; 3184 + } 3185 + 3186 + static int sep_aes_ecb_decrypt(struct ablkcipher_request *req) 3187 + { 3188 + int error; 3189 + struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); 3190 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( 3191 + crypto_ablkcipher_reqtfm(req)); 3192 + 3193 + dev_dbg(&sctx->sep_used->pdev->dev, "sep aes ecb decrypt\n"); 3194 + sctx->current_request = AES_ECB; 3195 + sctx->current_hash_req = NULL; 3196 + sctx->current_cypher_req = req; 3197 + bctx->aes_encmode = SEP_AES_DECRYPT; 3198 + bctx->aes_opmode = SEP_AES_ECB; 3199 + bctx->init_opcode = SEP_AES_INIT_OPCODE; 3200 + bctx->block_opcode = SEP_AES_BLOCK_OPCODE; 3201 + 3202 + spin_lock_irq(&queue_lock); 3203 + error = crypto_enqueue_request(&sep_queue, &req->base); 3204 + spin_unlock_irq(&queue_lock); 3205 + 3206 + if ((error != 0) && (error != -EINPROGRESS)) { 3207 + dev_warn(&sctx->sep_used->pdev->dev, 3208 + "sep_aes_ecb_decrypt cant enqueue\n"); 3209 + sep_crypto_release(sctx, error); 3210 + return error; 3211 + } 3212 + 3213 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3214 + (void *)&sep_queue); 3215 + if (error) { 3216 + dev_warn(&sctx->sep_used->pdev->dev, 3217 + "sep_aes_ecb_decrypt cannot submit queue\n"); 3218 + sep_crypto_release(sctx, -EINVAL); 3219 + return -EINVAL; 3220 + } 3221 + return -EINPROGRESS; 3222 + } 3223 + 3224 + static int sep_aes_cbc_encrypt(struct ablkcipher_request *req) 3225 + { 3226 + int error; 3227 + struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); 3228 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( 3229 + crypto_ablkcipher_reqtfm(req)); 3230 + 3231 + dev_dbg(&sctx->sep_used->pdev->dev, "sep aes cbc encrypt\n"); 3232 + sctx->current_request = AES_CBC; 3233 + sctx->current_hash_req = NULL; 3234 + sctx->current_cypher_req = req; 3235 + bctx->aes_encmode = SEP_AES_ENCRYPT; 3236 + bctx->aes_opmode = SEP_AES_CBC; 3237 + bctx->init_opcode = SEP_AES_INIT_OPCODE; 3238 + bctx->block_opcode = SEP_AES_BLOCK_OPCODE; 3239 + 3240 + spin_lock_irq(&queue_lock); 3241 + error = crypto_enqueue_request(&sep_queue, &req->base); 3242 + spin_unlock_irq(&queue_lock); 3243 + 3244 + if ((error != 0) && (error != -EINPROGRESS)) { 3245 + dev_warn(&sctx->sep_used->pdev->dev, 3246 + "sep_aes_cbc_encrypt cant enqueue\n"); 3247 + sep_crypto_release(sctx, error); 3248 + return error; 3249 + } 3250 + 3251 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3252 + (void *)&sep_queue); 3253 + if (error) { 3254 + dev_warn(&sctx->sep_used->pdev->dev, 3255 + "sep_aes_cbc_encrypt cannot submit queue\n"); 3256 + sep_crypto_release(sctx, -EINVAL); 3257 + return -EINVAL; 3258 + } 3259 + return -EINPROGRESS; 3260 + } 3261 + 3262 + static int sep_aes_cbc_decrypt(struct ablkcipher_request *req) 3263 + { 3264 + int error; 3265 + struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); 3266 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( 3267 + crypto_ablkcipher_reqtfm(req)); 3268 + 3269 + dev_dbg(&sctx->sep_used->pdev->dev, "sep aes cbc decrypt\n"); 3270 + sctx->current_request = AES_CBC; 3271 + sctx->current_hash_req = NULL; 3272 + sctx->current_cypher_req = req; 3273 + bctx->aes_encmode = SEP_AES_DECRYPT; 3274 + bctx->aes_opmode = SEP_AES_CBC; 3275 + bctx->init_opcode = SEP_AES_INIT_OPCODE; 3276 + bctx->block_opcode = SEP_AES_BLOCK_OPCODE; 3277 + 3278 + spin_lock_irq(&queue_lock); 3279 + error = crypto_enqueue_request(&sep_queue, &req->base); 3280 + spin_unlock_irq(&queue_lock); 3281 + 3282 + if ((error != 0) && (error != -EINPROGRESS)) { 3283 + dev_warn(&sctx->sep_used->pdev->dev, 3284 + "sep_aes_cbc_decrypt cant enqueue\n"); 3285 + sep_crypto_release(sctx, error); 3286 + return error; 3287 + } 3288 + 3289 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3290 + (void *)&sep_queue); 3291 + if (error) { 3292 + dev_warn(&sctx->sep_used->pdev->dev, 3293 + "sep_aes_cbc_decrypt cannot submit queue\n"); 3294 + sep_crypto_release(sctx, -EINVAL); 3295 + return -EINVAL; 3296 + } 3297 + return -EINPROGRESS; 3298 + } 3299 + 3300 + static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 3301 + unsigned int keylen) 3302 + { 3303 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm); 3304 + struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm); 3305 + u32 *flags = &ctfm->crt_flags; 3306 + 3307 + dev_dbg(&sctx->sep_used->pdev->dev, "sep des setkey\n"); 3308 + 3309 + switch (keylen) { 3310 + case DES_KEY_SIZE: 3311 + sctx->des_nbr_keys = DES_KEY_1; 3312 + break; 3313 + case DES_KEY_SIZE * 2: 3314 + sctx->des_nbr_keys = DES_KEY_2; 3315 + break; 3316 + case DES_KEY_SIZE * 3: 3317 + sctx->des_nbr_keys = DES_KEY_3; 3318 + break; 3319 + default: 3320 + dev_dbg(&sctx->sep_used->pdev->dev, "invalid key size %x\n", 3321 + keylen); 3322 + return -EINVAL; 3323 + } 3324 + 3325 + if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) && 3326 + (sep_weak_key(key, keylen))) { 3327 + 3328 + *flags |= CRYPTO_TFM_RES_WEAK_KEY; 3329 + dev_warn(&sctx->sep_used->pdev->dev, "weak key\n"); 3330 + return -EINVAL; 3331 + } 3332 + 3333 + memset(&sctx->key.des, 0, sizeof(struct sep_des_key)); 3334 + memcpy(&sctx->key.des.key1, key, keylen); 3335 + sctx->keylen = keylen; 3336 + /* Indicate to encrypt/decrypt function to send key to SEP */ 3337 + sctx->key_sent = 0; 3338 + sctx->last_block = 0; 3339 + 3340 + return 0; 3341 + } 3342 + 3343 + static int sep_des_ebc_encrypt(struct ablkcipher_request *req) 3344 + { 3345 + int error; 3346 + struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); 3347 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( 3348 + crypto_ablkcipher_reqtfm(req)); 3349 + 3350 + dev_dbg(&sctx->sep_used->pdev->dev, "sep des ecb encrypt\n"); 3351 + sctx->current_request = DES_ECB; 3352 + sctx->current_hash_req = NULL; 3353 + sctx->current_cypher_req = req; 3354 + bctx->des_encmode = SEP_DES_ENCRYPT; 3355 + bctx->des_opmode = SEP_DES_ECB; 3356 + bctx->init_opcode = SEP_DES_INIT_OPCODE; 3357 + bctx->block_opcode = SEP_DES_BLOCK_OPCODE; 3358 + 3359 + spin_lock_irq(&queue_lock); 3360 + error = crypto_enqueue_request(&sep_queue, &req->base); 3361 + spin_unlock_irq(&queue_lock); 3362 + 3363 + if ((error != 0) && (error != -EINPROGRESS)) { 3364 + dev_warn(&sctx->sep_used->pdev->dev, 3365 + "sep_des_ecb_encrypt cant enqueue\n"); 3366 + sep_crypto_release(sctx, error); 3367 + return error; 3368 + } 3369 + 3370 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3371 + (void *)&sep_queue); 3372 + if (error) { 3373 + dev_warn(&sctx->sep_used->pdev->dev, 3374 + "sep_des_ecb_encrypt cannot submit queue\n"); 3375 + sep_crypto_release(sctx, -EINVAL); 3376 + return -EINVAL; 3377 + } 3378 + return -EINPROGRESS; 3379 + } 3380 + 3381 + static int sep_des_ebc_decrypt(struct ablkcipher_request *req) 3382 + { 3383 + int error; 3384 + struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); 3385 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( 3386 + crypto_ablkcipher_reqtfm(req)); 3387 + 3388 + dev_dbg(&sctx->sep_used->pdev->dev, "sep des ecb decrypt\n"); 3389 + sctx->current_request = DES_ECB; 3390 + sctx->current_hash_req = NULL; 3391 + sctx->current_cypher_req = req; 3392 + bctx->des_encmode = SEP_DES_DECRYPT; 3393 + bctx->des_opmode = SEP_DES_ECB; 3394 + bctx->init_opcode = SEP_DES_INIT_OPCODE; 3395 + bctx->block_opcode = SEP_DES_BLOCK_OPCODE; 3396 + 3397 + spin_lock_irq(&queue_lock); 3398 + error = crypto_enqueue_request(&sep_queue, &req->base); 3399 + spin_unlock_irq(&queue_lock); 3400 + 3401 + if ((error != 0) && (error != -EINPROGRESS)) { 3402 + dev_warn(&sctx->sep_used->pdev->dev, 3403 + "sep_des_ecb_decrypt cant enqueue\n"); 3404 + sep_crypto_release(sctx, error); 3405 + return error; 3406 + } 3407 + 3408 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3409 + (void *)&sep_queue); 3410 + if (error) { 3411 + dev_warn(&sctx->sep_used->pdev->dev, 3412 + "sep_des_ecb_decrypt cannot submit queue\n"); 3413 + sep_crypto_release(sctx, -EINVAL); 3414 + return -EINVAL; 3415 + } 3416 + return -EINPROGRESS; 3417 + } 3418 + 3419 + static int sep_des_cbc_encrypt(struct ablkcipher_request *req) 3420 + { 3421 + int error; 3422 + struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); 3423 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( 3424 + crypto_ablkcipher_reqtfm(req)); 3425 + 3426 + dev_dbg(&sctx->sep_used->pdev->dev, "sep des cbc encrypt\n"); 3427 + sctx->current_request = DES_CBC; 3428 + sctx->current_hash_req = NULL; 3429 + sctx->current_cypher_req = req; 3430 + bctx->des_encmode = SEP_DES_ENCRYPT; 3431 + bctx->des_opmode = SEP_DES_CBC; 3432 + bctx->init_opcode = SEP_DES_INIT_OPCODE; 3433 + bctx->block_opcode = SEP_DES_BLOCK_OPCODE; 3434 + 3435 + spin_lock_irq(&queue_lock); 3436 + error = crypto_enqueue_request(&sep_queue, &req->base); 3437 + spin_unlock_irq(&queue_lock); 3438 + 3439 + if ((error != 0) && (error != -EINPROGRESS)) { 3440 + dev_warn(&sctx->sep_used->pdev->dev, 3441 + "sep_des_cbc_encrypt cant enqueue\n"); 3442 + sep_crypto_release(sctx, error); 3443 + return error; 3444 + } 3445 + 3446 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3447 + (void *)&sep_queue); 3448 + if (error) { 3449 + dev_warn(&sctx->sep_used->pdev->dev, 3450 + "sep_des_cbc_encrypt cannot submit queue\n"); 3451 + sep_crypto_release(sctx, -EINVAL); 3452 + return -EINVAL; 3453 + } 3454 + return -EINPROGRESS; 3455 + } 3456 + 3457 + static int sep_des_cbc_decrypt(struct ablkcipher_request *req) 3458 + { 3459 + int error; 3460 + struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); 3461 + struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( 3462 + crypto_ablkcipher_reqtfm(req)); 3463 + 3464 + dev_dbg(&sctx->sep_used->pdev->dev, "sep des cbc decrypt\n"); 3465 + sctx->current_request = DES_CBC; 3466 + sctx->current_hash_req = NULL; 3467 + sctx->current_cypher_req = req; 3468 + bctx->des_encmode = SEP_DES_DECRYPT; 3469 + bctx->des_opmode = SEP_DES_CBC; 3470 + bctx->init_opcode = SEP_DES_INIT_OPCODE; 3471 + bctx->block_opcode = SEP_DES_BLOCK_OPCODE; 3472 + 3473 + spin_lock_irq(&queue_lock); 3474 + error = crypto_enqueue_request(&sep_queue, &req->base); 3475 + spin_unlock_irq(&queue_lock); 3476 + 3477 + if ((error != 0) && (error != -EINPROGRESS)) { 3478 + dev_warn(&sctx->sep_used->pdev->dev, 3479 + "sep_des_cbc_decrypt cant enqueue\n"); 3480 + sep_crypto_release(sctx, error); 3481 + return error; 3482 + } 3483 + 3484 + error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, 3485 + (void *)&sep_queue); 3486 + if (error) { 3487 + dev_warn(&sctx->sep_used->pdev->dev, 3488 + "sep_des_cbc_decrypt cannot submit queue\n"); 3489 + sep_crypto_release(sctx, -EINVAL); 3490 + return -EINVAL; 3491 + } 3492 + return -EINPROGRESS; 3493 + } 3494 + 3495 + static struct ahash_alg hash_algs[] = { 3496 + { 3497 + .init = sep_sha1_init, 3498 + .update = sep_sha1_update, 3499 + .final = sep_sha1_final, 3500 + .digest = sep_sha1_digest, 3501 + .halg = { 3502 + .digestsize = SHA1_DIGEST_SIZE, 3503 + .base = { 3504 + .cra_name = "sha1", 3505 + .cra_driver_name = "sha1-sep", 3506 + .cra_priority = 100, 3507 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 3508 + CRYPTO_ALG_ASYNC, 3509 + .cra_blocksize = SHA1_BLOCK_SIZE, 3510 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3511 + .cra_alignmask = 0, 3512 + .cra_module = THIS_MODULE, 3513 + .cra_init = sep_hash_cra_init, 3514 + .cra_exit = sep_hash_cra_exit, 3515 + } 3516 + } 3517 + }, 3518 + { 3519 + .init = sep_md5_init, 3520 + .update = sep_md5_update, 3521 + .final = sep_md5_final, 3522 + .digest = sep_md5_digest, 3523 + .halg = { 3524 + .digestsize = MD5_DIGEST_SIZE, 3525 + .base = { 3526 + .cra_name = "md5", 3527 + .cra_driver_name = "md5-sep", 3528 + .cra_priority = 100, 3529 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 3530 + CRYPTO_ALG_ASYNC, 3531 + .cra_blocksize = SHA1_BLOCK_SIZE, 3532 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3533 + .cra_alignmask = 0, 3534 + .cra_module = THIS_MODULE, 3535 + .cra_init = sep_hash_cra_init, 3536 + .cra_exit = sep_hash_cra_exit, 3537 + } 3538 + } 3539 + }, 3540 + { 3541 + .init = sep_sha224_init, 3542 + .update = sep_sha224_update, 3543 + .final = sep_sha224_final, 3544 + .digest = sep_sha224_digest, 3545 + .halg = { 3546 + .digestsize = SHA224_DIGEST_SIZE, 3547 + .base = { 3548 + .cra_name = "sha224", 3549 + .cra_driver_name = "sha224-sep", 3550 + .cra_priority = 100, 3551 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 3552 + CRYPTO_ALG_ASYNC, 3553 + .cra_blocksize = SHA224_BLOCK_SIZE, 3554 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3555 + .cra_alignmask = 0, 3556 + .cra_module = THIS_MODULE, 3557 + .cra_init = sep_hash_cra_init, 3558 + .cra_exit = sep_hash_cra_exit, 3559 + } 3560 + } 3561 + }, 3562 + { 3563 + .init = sep_sha256_init, 3564 + .update = sep_sha256_update, 3565 + .final = sep_sha256_final, 3566 + .digest = sep_sha256_digest, 3567 + .halg = { 3568 + .digestsize = SHA256_DIGEST_SIZE, 3569 + .base = { 3570 + .cra_name = "sha256", 3571 + .cra_driver_name = "sha256-sep", 3572 + .cra_priority = 100, 3573 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 3574 + CRYPTO_ALG_ASYNC, 3575 + .cra_blocksize = SHA256_BLOCK_SIZE, 3576 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3577 + .cra_alignmask = 0, 3578 + .cra_module = THIS_MODULE, 3579 + .cra_init = sep_hash_cra_init, 3580 + .cra_exit = sep_hash_cra_exit, 3581 + } 3582 + } 3583 + } 3584 + }; 3585 + 3586 + static struct crypto_alg crypto_algs[] = { 3587 + { 3588 + .cra_name = "ecb(aes)", 3589 + .cra_driver_name = "ecb-aes-sep", 3590 + .cra_priority = 100, 3591 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 3592 + .cra_blocksize = AES_BLOCK_SIZE, 3593 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3594 + .cra_alignmask = 0, 3595 + .cra_type = &crypto_ablkcipher_type, 3596 + .cra_module = THIS_MODULE, 3597 + .cra_init = sep_crypto_init, 3598 + .cra_exit = sep_crypto_exit, 3599 + .cra_u.ablkcipher = { 3600 + .min_keysize = AES_MIN_KEY_SIZE, 3601 + .max_keysize = AES_MAX_KEY_SIZE, 3602 + .setkey = sep_aes_setkey, 3603 + .encrypt = sep_aes_ecb_encrypt, 3604 + .decrypt = sep_aes_ecb_decrypt, 3605 + } 3606 + }, 3607 + { 3608 + .cra_name = "cbc(aes)", 3609 + .cra_driver_name = "cbc-aes-sep", 3610 + .cra_priority = 100, 3611 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 3612 + .cra_blocksize = AES_BLOCK_SIZE, 3613 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3614 + .cra_alignmask = 0, 3615 + .cra_type = &crypto_ablkcipher_type, 3616 + .cra_module = THIS_MODULE, 3617 + .cra_init = sep_crypto_init, 3618 + .cra_exit = sep_crypto_exit, 3619 + .cra_u.ablkcipher = { 3620 + .min_keysize = AES_MIN_KEY_SIZE, 3621 + .max_keysize = AES_MAX_KEY_SIZE, 3622 + .setkey = sep_aes_setkey, 3623 + .encrypt = sep_aes_cbc_encrypt, 3624 + .decrypt = sep_aes_cbc_decrypt, 3625 + } 3626 + }, 3627 + { 3628 + .cra_name = "ebc(des)", 3629 + .cra_driver_name = "ebc-des-sep", 3630 + .cra_priority = 100, 3631 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 3632 + .cra_blocksize = DES_BLOCK_SIZE, 3633 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3634 + .cra_alignmask = 0, 3635 + .cra_type = &crypto_ablkcipher_type, 3636 + .cra_module = THIS_MODULE, 3637 + .cra_init = sep_crypto_init, 3638 + .cra_exit = sep_crypto_exit, 3639 + .cra_u.ablkcipher = { 3640 + .min_keysize = DES_KEY_SIZE, 3641 + .max_keysize = DES_KEY_SIZE, 3642 + .setkey = sep_des_setkey, 3643 + .encrypt = sep_des_ebc_encrypt, 3644 + .decrypt = sep_des_ebc_decrypt, 3645 + } 3646 + }, 3647 + { 3648 + .cra_name = "cbc(des)", 3649 + .cra_driver_name = "cbc-des-sep", 3650 + .cra_priority = 100, 3651 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 3652 + .cra_blocksize = DES_BLOCK_SIZE, 3653 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3654 + .cra_alignmask = 0, 3655 + .cra_type = &crypto_ablkcipher_type, 3656 + .cra_module = THIS_MODULE, 3657 + .cra_init = sep_crypto_init, 3658 + .cra_exit = sep_crypto_exit, 3659 + .cra_u.ablkcipher = { 3660 + .min_keysize = DES_KEY_SIZE, 3661 + .max_keysize = DES_KEY_SIZE, 3662 + .setkey = sep_des_setkey, 3663 + .encrypt = sep_des_cbc_encrypt, 3664 + .decrypt = sep_des_cbc_decrypt, 3665 + } 3666 + }, 3667 + { 3668 + .cra_name = "ebc(des3-ede)", 3669 + .cra_driver_name = "ebc-des3-ede-sep", 3670 + .cra_priority = 100, 3671 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 3672 + .cra_blocksize = DES_BLOCK_SIZE, 3673 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3674 + .cra_alignmask = 0, 3675 + .cra_type = &crypto_ablkcipher_type, 3676 + .cra_module = THIS_MODULE, 3677 + .cra_init = sep_crypto_init, 3678 + .cra_exit = sep_crypto_exit, 3679 + .cra_u.ablkcipher = { 3680 + .min_keysize = DES3_EDE_KEY_SIZE, 3681 + .max_keysize = DES3_EDE_KEY_SIZE, 3682 + .setkey = sep_des_setkey, 3683 + .encrypt = sep_des_ebc_encrypt, 3684 + .decrypt = sep_des_ebc_decrypt, 3685 + } 3686 + }, 3687 + { 3688 + .cra_name = "cbc(des3-ede)", 3689 + .cra_driver_name = "cbc-des3--ede-sep", 3690 + .cra_priority = 100, 3691 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 3692 + .cra_blocksize = DES_BLOCK_SIZE, 3693 + .cra_ctxsize = sizeof(struct sep_system_ctx), 3694 + .cra_alignmask = 0, 3695 + .cra_type = &crypto_ablkcipher_type, 3696 + .cra_module = THIS_MODULE, 3697 + .cra_init = sep_crypto_init, 3698 + .cra_exit = sep_crypto_exit, 3699 + .cra_u.ablkcipher = { 3700 + .min_keysize = DES3_EDE_KEY_SIZE, 3701 + .max_keysize = DES3_EDE_KEY_SIZE, 3702 + .setkey = sep_des_setkey, 3703 + .encrypt = sep_des_cbc_encrypt, 3704 + .decrypt = sep_des_cbc_decrypt, 3705 + } 3706 + } 3707 + }; 3708 + 3709 + int sep_crypto_setup(void) 3710 + { 3711 + int err, i, j, k; 3712 + tasklet_init(&sep_dev->finish_tasklet, sep_finish, 3713 + (unsigned long)sep_dev); 3714 + 3715 + crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH); 3716 + 3717 + sep_dev->workqueue = create_workqueue("sep_crypto_workqueue"); 3718 + if (!sep_dev->workqueue) { 3719 + dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n"); 3720 + return -ENOMEM; 3721 + } 3722 + 3723 + i = 0; 3724 + j = 0; 3725 + 3726 + spin_lock_init(&sep_dev->busy_lock); 3727 + spin_lock_init(&queue_lock); 3728 + 3729 + err = 0; 3730 + 3731 + for (i = 0; i < ARRAY_SIZE(hash_algs); i++) { 3732 + err = crypto_register_ahash(&hash_algs[i]); 3733 + if (err) 3734 + goto err_algs; 3735 + } 3736 + 3737 + err = 0; 3738 + for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) { 3739 + err = crypto_register_alg(&crypto_algs[j]); 3740 + if (err) 3741 + goto err_crypto_algs; 3742 + } 3743 + 3744 + return err; 3745 + 3746 + err_algs: 3747 + for (k = 0; k < i; k++) 3748 + crypto_unregister_ahash(&hash_algs[k]); 3749 + return err; 3750 + 3751 + err_crypto_algs: 3752 + for (k = 0; k < j; k++) 3753 + crypto_unregister_alg(&crypto_algs[k]); 3754 + goto err_algs; 3755 + } 3756 + 3757 + void sep_crypto_takedown(void) 3758 + { 3759 + 3760 + int i; 3761 + 3762 + for (i = 0; i < ARRAY_SIZE(hash_algs); i++) 3763 + crypto_unregister_ahash(&hash_algs[i]); 3764 + for (i = 0; i < ARRAY_SIZE(crypto_algs); i++) 3765 + crypto_unregister_alg(&crypto_algs[i]); 3766 + 3767 + tasklet_kill(&sep_dev->finish_tasklet); 3768 + }
+348
drivers/staging/sep/sep_crypto.h
··· 1 + /* 2 + * 3 + * sep_crypto.h - Crypto interface structures 4 + * 5 + * Copyright(c) 2009-2011 Intel Corporation. All rights reserved. 6 + * Contributions(c) 2009-2010 Discretix. All rights reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the Free 10 + * Software Foundation; version 2 of the License. 11 + * 12 + * This program is distributed in the hope that it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 59 19 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 + * 21 + * CONTACTS: 22 + * 23 + * Mark Allyn mark.a.allyn@intel.com 24 + * Jayant Mangalampalli jayant.mangalampalli@intel.com 25 + * 26 + * CHANGES: 27 + * 28 + * 2009.06.26 Initial publish 29 + * 2011.02.22 Enable Kernel Crypto 30 + * 31 + */ 32 + 33 + /* Constants for SEP (from vendor) */ 34 + #define SEP_START_MSG_TOKEN 0x02558808 35 + 36 + #define SEP_DES_IV_SIZE_WORDS 2 37 + #define SEP_DES_IV_SIZE_BYTES (SEP_DES_IV_SIZE_WORDS * \ 38 + sizeof(u32)) 39 + #define SEP_DES_KEY_SIZE_WORDS 2 40 + #define SEP_DES_KEY_SIZE_BYTES (SEP_DES_KEY_SIZE_WORDS * \ 41 + sizeof(u32)) 42 + #define SEP_DES_BLOCK_SIZE 8 43 + #define SEP_DES_DUMMY_SIZE 16 44 + 45 + #define SEP_DES_INIT_OPCODE 0x10 46 + #define SEP_DES_BLOCK_OPCODE 0x11 47 + 48 + #define SEP_AES_BLOCK_SIZE_WORDS 4 49 + #define SEP_AES_BLOCK_SIZE_BYTES \ 50 + (SEP_AES_BLOCK_SIZE_WORDS * sizeof(u32)) 51 + 52 + #define SEP_AES_DUMMY_BLOCK_SIZE 16 53 + #define SEP_AES_IV_SIZE_WORDS SEP_AES_BLOCK_SIZE_WORDS 54 + #define SEP_AES_IV_SIZE_BYTES \ 55 + (SEP_AES_IV_SIZE_WORDS * sizeof(u32)) 56 + 57 + #define SEP_AES_KEY_128_SIZE 16 58 + #define SEP_AES_KEY_192_SIZE 24 59 + #define SEP_AES_KEY_256_SIZE 32 60 + #define SEP_AES_KEY_512_SIZE 64 61 + #define SEP_AES_MAX_KEY_SIZE_WORDS 16 62 + #define SEP_AES_MAX_KEY_SIZE_BYTES \ 63 + (SEP_AES_MAX_KEY_SIZE_WORDS * sizeof(u32)) 64 + 65 + #define SEP_AES_WRAP_MIN_SIZE 8 66 + #define SEP_AES_WRAP_MAX_SIZE 0x10000000 67 + 68 + #define SEP_AES_WRAP_BLOCK_SIZE_WORDS 2 69 + #define SEP_AES_WRAP_BLOCK_SIZE_BYTES \ 70 + (SEP_AES_WRAP_BLOCK_SIZE_WORDS * sizeof(u32)) 71 + 72 + #define SEP_AES_SECRET_RKEK1 0x1 73 + #define SEP_AES_SECRET_RKEK2 0x2 74 + 75 + #define SEP_AES_INIT_OPCODE 0x2 76 + #define SEP_AES_BLOCK_OPCODE 0x3 77 + #define SEP_AES_FINISH_OPCODE 0x4 78 + #define SEP_AES_WRAP_OPCODE 0x6 79 + #define SEP_AES_UNWRAP_OPCODE 0x7 80 + #define SEP_AES_XTS_FINISH_OPCODE 0x8 81 + 82 + #define SEP_HASH_RESULT_SIZE_WORDS 16 83 + #define SEP_MD5_DIGEST_SIZE_WORDS 4 84 + #define SEP_MD5_DIGEST_SIZE_BYTES \ 85 + (SEP_MD5_DIGEST_SIZE_WORDS * sizeof(u32)) 86 + #define SEP_SHA1_DIGEST_SIZE_WORDS 5 87 + #define SEP_SHA1_DIGEST_SIZE_BYTES \ 88 + (SEP_SHA1_DIGEST_SIZE_WORDS * sizeof(u32)) 89 + #define SEP_SHA224_DIGEST_SIZE_WORDS 7 90 + #define SEP_SHA224_DIGEST_SIZE_BYTES \ 91 + (SEP_SHA224_DIGEST_SIZE_WORDS * sizeof(u32)) 92 + #define SEP_SHA256_DIGEST_SIZE_WORDS 8 93 + #define SEP_SHA256_DIGEST_SIZE_BYTES \ 94 + (SEP_SHA256_DIGEST_SIZE_WORDS * sizeof(u32)) 95 + #define SEP_SHA384_DIGEST_SIZE_WORDS 12 96 + #define SEP_SHA384_DIGEST_SIZE_BYTES \ 97 + (SEP_SHA384_DIGEST_SIZE_WORDS * sizeof(u32)) 98 + #define SEP_SHA512_DIGEST_SIZE_WORDS 16 99 + #define SEP_SHA512_DIGEST_SIZE_BYTES \ 100 + (SEP_SHA512_DIGEST_SIZE_WORDS * sizeof(u32)) 101 + #define SEP_HASH_BLOCK_SIZE_WORDS 16 102 + #define SEP_HASH_BLOCK_SIZE_BYTES \ 103 + (SEP_HASH_BLOCK_SIZE_WORDS * sizeof(u32)) 104 + #define SEP_SHA2_BLOCK_SIZE_WORDS 32 105 + #define SEP_SHA2_BLOCK_SIZE_BYTES \ 106 + (SEP_SHA2_BLOCK_SIZE_WORDS * sizeof(u32)) 107 + 108 + #define SEP_HASH_INIT_OPCODE 0x20 109 + #define SEP_HASH_UPDATE_OPCODE 0x21 110 + #define SEP_HASH_FINISH_OPCODE 0x22 111 + #define SEP_HASH_SINGLE_OPCODE 0x23 112 + 113 + #define SEP_HOST_ERROR 0x0b000000 114 + #define SEP_OK 0x0 115 + #define SEP_INVALID_START (SEP_HOST_ERROR + 0x3) 116 + #define SEP_WRONG_OPCODE (SEP_HOST_ERROR + 0x1) 117 + 118 + #define SEP_TRANSACTION_WAIT_TIME 5 119 + 120 + #define SEP_QUEUE_LENGTH 10 121 + /* Macros */ 122 + #ifndef __LITTLE_ENDIAN 123 + #define CHG_ENDIAN(val) \ 124 + (((val) >> 24) | \ 125 + (((val) & 0x00FF0000) >> 8) | \ 126 + (((val) & 0x0000FF00) << 8) | \ 127 + (((val) & 0x000000FF) << 24)) 128 + #else 129 + #define CHG_ENDIAN(val) val 130 + #endif 131 + /* Enums for SEP (from vendor) */ 132 + enum des_numkey { 133 + DES_KEY_1 = 1, 134 + DES_KEY_2 = 2, 135 + DES_KEY_3 = 3, 136 + SEP_NUMKEY_OPTIONS, 137 + SEP_NUMKEY_LAST = 0x7fffffff, 138 + }; 139 + 140 + enum des_enc_mode { 141 + SEP_DES_ENCRYPT = 0, 142 + SEP_DES_DECRYPT = 1, 143 + SEP_DES_ENC_OPTIONS, 144 + SEP_DES_ENC_LAST = 0x7fffffff, 145 + }; 146 + 147 + enum des_op_mode { 148 + SEP_DES_ECB = 0, 149 + SEP_DES_CBC = 1, 150 + SEP_OP_OPTIONS, 151 + SEP_OP_LAST = 0x7fffffff, 152 + }; 153 + 154 + enum aes_keysize { 155 + AES_128 = 0, 156 + AES_192 = 1, 157 + AES_256 = 2, 158 + AES_512 = 3, 159 + AES_SIZE_OPTIONS, 160 + AEA_SIZE_LAST = 0x7FFFFFFF, 161 + }; 162 + 163 + enum aes_enc_mode { 164 + SEP_AES_ENCRYPT = 0, 165 + SEP_AES_DECRYPT = 1, 166 + SEP_AES_ENC_OPTIONS, 167 + SEP_AES_ENC_LAST = 0x7FFFFFFF, 168 + }; 169 + 170 + enum aes_op_mode { 171 + SEP_AES_ECB = 0, 172 + SEP_AES_CBC = 1, 173 + SEP_AES_MAC = 2, 174 + SEP_AES_CTR = 3, 175 + SEP_AES_XCBC = 4, 176 + SEP_AES_CMAC = 5, 177 + SEP_AES_XTS = 6, 178 + SEP_AES_OP_OPTIONS, 179 + SEP_AES_OP_LAST = 0x7FFFFFFF, 180 + }; 181 + 182 + enum hash_op_mode { 183 + SEP_HASH_SHA1 = 0, 184 + SEP_HASH_SHA224 = 1, 185 + SEP_HASH_SHA256 = 2, 186 + SEP_HASH_SHA384 = 3, 187 + SEP_HASH_SHA512 = 4, 188 + SEP_HASH_MD5 = 5, 189 + SEP_HASH_OPTIONS, 190 + SEP_HASH_LAST_MODE = 0x7FFFFFFF, 191 + }; 192 + 193 + /* Structures for SEP (from vendor) */ 194 + struct sep_des_internal_key { 195 + u32 key1[SEP_DES_KEY_SIZE_WORDS]; 196 + u32 key2[SEP_DES_KEY_SIZE_WORDS]; 197 + u32 key3[SEP_DES_KEY_SIZE_WORDS]; 198 + }; 199 + 200 + struct sep_des_internal_context { 201 + u32 iv_context[SEP_DES_IV_SIZE_WORDS]; 202 + struct sep_des_internal_key context_key; 203 + enum des_numkey nbr_keys; 204 + enum des_enc_mode encryption; 205 + enum des_op_mode operation; 206 + u8 dummy_block[SEP_DES_DUMMY_SIZE]; 207 + }; 208 + 209 + struct sep_des_private_context { 210 + u32 valid_tag; 211 + u32 iv; 212 + u8 ctx_buf[sizeof(struct sep_des_internal_context)]; 213 + }; 214 + 215 + /* This is the structure passed to SEP via msg area */ 216 + struct sep_des_key { 217 + u32 key1[SEP_DES_KEY_SIZE_WORDS]; 218 + u32 key2[SEP_DES_KEY_SIZE_WORDS]; 219 + u32 key3[SEP_DES_KEY_SIZE_WORDS]; 220 + u32 pad[SEP_DES_KEY_SIZE_WORDS]; 221 + }; 222 + 223 + struct sep_aes_internal_context { 224 + u32 aes_ctx_iv[SEP_AES_IV_SIZE_WORDS]; 225 + u32 aes_ctx_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2]; 226 + enum aes_keysize keysize; 227 + enum aes_enc_mode encmode; 228 + enum aes_op_mode opmode; 229 + u8 secret_key; 230 + u32 no_add_blocks; 231 + u32 last_block_size; 232 + u32 last_block[SEP_AES_BLOCK_SIZE_WORDS]; 233 + u32 prev_iv[SEP_AES_BLOCK_SIZE_WORDS]; 234 + u32 remaining_size; 235 + union { 236 + struct { 237 + u32 dkey1[SEP_AES_BLOCK_SIZE_WORDS]; 238 + u32 dkey2[SEP_AES_BLOCK_SIZE_WORDS]; 239 + u32 dkey3[SEP_AES_BLOCK_SIZE_WORDS]; 240 + } cmac_data; 241 + struct { 242 + u32 xts_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2]; 243 + u32 temp1[SEP_AES_BLOCK_SIZE_WORDS]; 244 + u32 temp2[SEP_AES_BLOCK_SIZE_WORDS]; 245 + } xtx_data; 246 + } s_data; 247 + u8 dummy_block[SEP_AES_DUMMY_BLOCK_SIZE]; 248 + }; 249 + 250 + struct sep_aes_private_context { 251 + u32 valid_tag; 252 + u32 aes_iv; 253 + u32 op_mode; 254 + u8 cbuff[sizeof(struct sep_aes_internal_context)]; 255 + }; 256 + 257 + struct sep_hash_internal_context { 258 + u32 hash_result[SEP_HASH_RESULT_SIZE_WORDS]; 259 + enum hash_op_mode hash_opmode; 260 + u32 previous_data[SEP_SHA2_BLOCK_SIZE_WORDS]; 261 + u16 prev_update_bytes; 262 + u32 total_proc_128bit[4]; 263 + u16 op_mode_block_size; 264 + u8 dummy_aes_block[SEP_AES_DUMMY_BLOCK_SIZE]; 265 + }; 266 + 267 + struct sep_hash_private_context { 268 + u32 valid_tag; 269 + u32 iv; 270 + u8 internal_context[sizeof(struct sep_hash_internal_context)]; 271 + }; 272 + 273 + /* Context structures for crypto API */ 274 + struct sep_block_ctx { 275 + struct sep_device *sep; 276 + u32 done; 277 + unsigned char iv[100]; 278 + enum des_enc_mode des_encmode; 279 + enum des_op_mode des_opmode; 280 + enum aes_enc_mode aes_encmode; 281 + enum aes_op_mode aes_opmode; 282 + u32 init_opcode; 283 + u32 block_opcode; 284 + size_t data_length; 285 + size_t ivlen; 286 + struct ablkcipher_walk walk; 287 + struct sep_des_private_context des_private_ctx; 288 + struct sep_aes_private_context aes_private_ctx; 289 + }; 290 + 291 + struct sep_hash_ctx { 292 + u32 done; 293 + unsigned char *buf; 294 + size_t buflen; 295 + unsigned char *dgst; 296 + int digest_size_words; 297 + int digest_size_bytes; 298 + int block_size_words; 299 + int block_size_bytes; 300 + struct scatterlist *sg; 301 + enum hash_op_mode hash_opmode; 302 + struct sep_hash_private_context hash_private_ctx; 303 + }; 304 + 305 + struct sep_system_ctx { 306 + struct sep_device *sep_used; 307 + union key_t { 308 + struct sep_des_key des; 309 + u32 aes[SEP_AES_MAX_KEY_SIZE_WORDS]; 310 + } key; 311 + int i_own_sep; /* Do I have custody of the sep? */ 312 + size_t keylen; 313 + enum des_numkey des_nbr_keys; 314 + enum aes_keysize aes_key_size; 315 + u32 key_sent; /* Indicate if key is sent to sep */ 316 + u32 last_block; /* Indicate that this is the final block */ 317 + struct sep_call_status call_status; 318 + struct build_dcb_struct_kernel dcb_input_data; 319 + struct sep_dma_context *dma_ctx; 320 + void *dmatables_region; 321 + size_t nbytes; 322 + struct sep_dcblock *dcb_region; 323 + struct sep_queue_info *queue_elem; 324 + int msg_len_words; 325 + unsigned char msg[SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES]; 326 + void *msgptr; 327 + struct scatterlist *src_sg; 328 + struct scatterlist *dst_sg; 329 + struct scatterlist *src_sg_hold; 330 + struct scatterlist *dst_sg_hold; 331 + struct ahash_request *current_hash_req; 332 + struct ablkcipher_request *current_cypher_req; 333 + enum type_of_request current_request; 334 + enum hash_stage current_hash_stage; 335 + int done_with_transaction; 336 + unsigned long end_time; 337 + }; 338 + 339 + /* work queue structures */ 340 + struct sep_work_struct { 341 + struct work_struct work; 342 + void (*callback)(void *); 343 + void *data; 344 + }; 345 + 346 + /* Functions */ 347 + int sep_crypto_setup(void); 348 + void sep_crypto_takedown(void);
+65 -34
drivers/staging/sep/sep_dev.h
··· 5 5 * 6 6 * sep_dev.h - Security Processor Device Structures 7 7 * 8 - * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 9 - * Contributions(c) 2009,2010 Discretix. All rights reserved. 8 + * Copyright(c) 2009-2011 Intel Corporation. All rights reserved. 9 + * Contributions(c) 2009-2011 Discretix. All rights reserved. 10 10 * 11 11 * This program is free software; you can redistribute it and/or modify it 12 12 * under the terms of the GNU General Public License as published by the Free ··· 28 28 * 29 29 * CHANGES 30 30 * 2010.09.14 upgrade to Medfield 31 + * 2011.02.22 enable kernel crypto 31 32 */ 32 33 33 34 struct sep_device { ··· 37 36 38 37 /* character device file */ 39 38 struct cdev sep_cdev; 40 - struct cdev sep_daemon_cdev; 41 - struct cdev sep_singleton_cdev; 42 39 43 40 /* devices (using misc dev) */ 44 41 struct miscdevice miscdev_sep; 45 - struct miscdevice miscdev_singleton; 46 - struct miscdevice miscdev_daemon; 47 42 48 43 /* major / minor numbers of device */ 49 44 dev_t sep_devno; 50 - dev_t sep_daemon_devno; 51 - dev_t sep_singleton_devno; 52 - 53 - struct mutex sep_mutex; 54 - struct mutex ioctl_mutex; 45 + /* guards command sent counter */ 55 46 spinlock_t snd_rply_lck; 47 + /* guards driver memory usage in fastcall if */ 48 + struct semaphore sep_doublebuf; 56 49 57 50 /* flags to indicate use and lock status of sep */ 58 51 u32 pid_doing_transaction; 59 52 unsigned long in_use_flags; 60 - 61 - /* request daemon alread open */ 62 - unsigned long request_daemon_open; 63 - 64 - /* 1 = Moorestown; 0 = Medfield */ 65 - int mrst; 66 53 67 54 /* address of the shared memory allocated during init for SEP driver 68 55 (coherent alloc) */ ··· 63 74 dma_addr_t reg_physical_end; 64 75 void __iomem *reg_addr; 65 76 66 - /* wait queue head (event) of the driver */ 67 - wait_queue_head_t event; 68 - wait_queue_head_t event_request_daemon; 69 - wait_queue_head_t event_mmap; 77 + /* wait queue heads of the driver */ 78 + wait_queue_head_t event_interrupt; 79 + wait_queue_head_t event_transactions; 70 80 71 - struct sep_caller_id_entry 72 - caller_id_table[SEP_CALLER_ID_TABLE_NUM_ENTRIES]; 81 + struct list_head sep_queue_status; 82 + u32 sep_queue_num; 83 + spinlock_t sep_queue_lock; 73 84 74 - /* access flag for singleton device */ 75 - unsigned long singleton_access_flag; 85 + /* Is this in use? */ 86 + u32 in_use; 87 + 88 + /* indicates whether power save is set up */ 89 + u32 power_save_setup; 90 + 91 + /* Power state */ 92 + u32 power_state; 76 93 77 94 /* transaction counter that coordinates the 78 95 transactions between SEP and HOST */ 79 96 unsigned long send_ct; 80 97 /* counter for the messages from sep */ 81 98 unsigned long reply_ct; 82 - /* counter for the number of bytes allocated in the pool for the 83 - current transaction */ 84 - long data_pool_bytes_allocated; 85 99 86 - u32 num_of_data_allocations; 100 + /* The following are used for kernel crypto client requests */ 101 + u32 in_kernel; /* Set for kernel client request */ 102 + struct tasklet_struct finish_tasklet; 103 + enum type_of_request current_request; 104 + enum hash_stage current_hash_stage; 105 + struct ahash_request *current_hash_req; 106 + struct ablkcipher_request *current_cypher_req; 107 + struct sep_system_ctx *sctx; 108 + spinlock_t busy_lock; 109 + struct workqueue_struct *workqueue; 110 + }; 87 111 88 - /* number of the lli tables created in the current transaction */ 89 - u32 num_lli_tables_created; 112 + extern struct sep_device *sep_dev; 90 113 91 - /* number of data control blocks */ 92 - u32 nr_dcb_creat; 114 + /** 115 + * SEP message header for a transaction 116 + * @reserved: reserved memory (two words) 117 + * @token: SEP message token 118 + * @msg_len: message length 119 + * @opcpde: message opcode 120 + */ 121 + struct sep_msgarea_hdr { 122 + u32 reserved[2]; 123 + u32 token; 124 + u32 msg_len; 125 + u32 opcode; 126 + }; 93 127 94 - struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS]; 128 + /** 129 + * sep_queue_data - data to be maintained in status queue for a transaction 130 + * @opcode : transaction opcode 131 + * @size : message size 132 + * @pid: owner process 133 + * @name: owner process name 134 + */ 135 + struct sep_queue_data { 136 + u32 opcode; 137 + u32 size; 138 + s32 pid; 139 + u8 name[TASK_COMM_LEN]; 140 + }; 95 141 142 + /** sep_queue_info - maintains status info of all transactions 143 + * @list: head of list 144 + * @sep_queue_data : data for transaction 145 + */ 146 + struct sep_queue_info { 147 + struct list_head list; 148 + struct sep_queue_data data; 96 149 }; 97 150 98 151 static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
-2932
drivers/staging/sep/sep_driver.c
··· 1 - /* 2 - * 3 - * sep_driver.c - Security Processor Driver main group of functions 4 - * 5 - * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 6 - * Contributions(c) 2009,2010 Discretix. All rights reserved. 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License as published by the Free 10 - * Software Foundation; version 2 of the License. 11 - * 12 - * This program is distributed in the hope that it will be useful, but WITHOUT 13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 - * more details. 16 - * 17 - * You should have received a copy of the GNU General Public License along with 18 - * this program; if not, write to the Free Software Foundation, Inc., 59 19 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 - * 21 - * CONTACTS: 22 - * 23 - * Mark Allyn mark.a.allyn@intel.com 24 - * Jayant Mangalampalli jayant.mangalampalli@intel.com 25 - * 26 - * CHANGES: 27 - * 28 - * 2009.06.26 Initial publish 29 - * 2010.09.14 Upgrade to Medfield 30 - * 31 - */ 32 - #include <linux/init.h> 33 - #include <linux/module.h> 34 - #include <linux/miscdevice.h> 35 - #include <linux/fs.h> 36 - #include <linux/cdev.h> 37 - #include <linux/kdev_t.h> 38 - #include <linux/mutex.h> 39 - #include <linux/sched.h> 40 - #include <linux/mm.h> 41 - #include <linux/poll.h> 42 - #include <linux/wait.h> 43 - #include <linux/pci.h> 44 - #include <linux/firmware.h> 45 - #include <linux/slab.h> 46 - #include <linux/ioctl.h> 47 - #include <asm/current.h> 48 - #include <linux/ioport.h> 49 - #include <linux/io.h> 50 - #include <linux/interrupt.h> 51 - #include <linux/pagemap.h> 52 - #include <asm/cacheflush.h> 53 - #include <linux/delay.h> 54 - #include <linux/jiffies.h> 55 - #include <linux/rar_register.h> 56 - 57 - #include "sep_driver_hw_defs.h" 58 - #include "sep_driver_config.h" 59 - #include "sep_driver_api.h" 60 - #include "sep_dev.h" 61 - 62 - /*---------------------------------------- 63 - DEFINES 64 - -----------------------------------------*/ 65 - 66 - #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000 67 - 68 - /*-------------------------------------------- 69 - GLOBAL variables 70 - --------------------------------------------*/ 71 - 72 - /* Keep this a single static object for now to keep the conversion easy */ 73 - 74 - static struct sep_device *sep_dev; 75 - 76 - /** 77 - * sep_dump_message - dump the message that is pending 78 - * @sep: SEP device 79 - */ 80 - static void sep_dump_message(struct sep_device *sep) 81 - { 82 - int count; 83 - u32 *p = sep->shared_addr; 84 - for (count = 0; count < 12 * 4; count += 4) 85 - dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n", 86 - count, *p++); 87 - } 88 - 89 - /** 90 - * sep_map_and_alloc_shared_area - allocate shared block 91 - * @sep: security processor 92 - * @size: size of shared area 93 - */ 94 - static int sep_map_and_alloc_shared_area(struct sep_device *sep) 95 - { 96 - sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, 97 - sep->shared_size, 98 - &sep->shared_bus, GFP_KERNEL); 99 - 100 - if (!sep->shared_addr) { 101 - dev_warn(&sep->pdev->dev, 102 - "shared memory dma_alloc_coherent failed\n"); 103 - return -ENOMEM; 104 - } 105 - dev_dbg(&sep->pdev->dev, 106 - "shared_addr %zx bytes @%p (bus %llx)\n", 107 - sep->shared_size, sep->shared_addr, 108 - (unsigned long long)sep->shared_bus); 109 - return 0; 110 - } 111 - 112 - /** 113 - * sep_unmap_and_free_shared_area - free shared block 114 - * @sep: security processor 115 - */ 116 - static void sep_unmap_and_free_shared_area(struct sep_device *sep) 117 - { 118 - dma_free_coherent(&sep->pdev->dev, sep->shared_size, 119 - sep->shared_addr, sep->shared_bus); 120 - } 121 - 122 - /** 123 - * sep_shared_bus_to_virt - convert bus/virt addresses 124 - * @sep: pointer to struct sep_device 125 - * @bus_address: address to convert 126 - * 127 - * Returns virtual address inside the shared area according 128 - * to the bus address. 129 - */ 130 - static void *sep_shared_bus_to_virt(struct sep_device *sep, 131 - dma_addr_t bus_address) 132 - { 133 - return sep->shared_addr + (bus_address - sep->shared_bus); 134 - } 135 - 136 - /** 137 - * open function for the singleton driver 138 - * @inode_ptr struct inode * 139 - * @file_ptr struct file * 140 - * 141 - * Called when the user opens the singleton device interface 142 - */ 143 - static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr) 144 - { 145 - struct sep_device *sep; 146 - 147 - /* 148 - * Get the SEP device structure and use it for the 149 - * private_data field in filp for other methods 150 - */ 151 - sep = sep_dev; 152 - 153 - file_ptr->private_data = sep; 154 - 155 - if (test_and_set_bit(0, &sep->singleton_access_flag)) 156 - return -EBUSY; 157 - return 0; 158 - } 159 - 160 - /** 161 - * sep_open - device open method 162 - * @inode: inode of SEP device 163 - * @filp: file handle to SEP device 164 - * 165 - * Open method for the SEP device. Called when userspace opens 166 - * the SEP device node. 167 - * 168 - * Returns zero on success otherwise an error code. 169 - */ 170 - static int sep_open(struct inode *inode, struct file *filp) 171 - { 172 - struct sep_device *sep; 173 - 174 - /* 175 - * Get the SEP device structure and use it for the 176 - * private_data field in filp for other methods 177 - */ 178 - sep = sep_dev; 179 - filp->private_data = sep; 180 - 181 - /* Anyone can open; locking takes place at transaction level */ 182 - return 0; 183 - } 184 - 185 - /** 186 - * sep_singleton_release - close a SEP singleton device 187 - * @inode: inode of SEP device 188 - * @filp: file handle being closed 189 - * 190 - * Called on the final close of a SEP device. As the open protects against 191 - * multiple simultaenous opens that means this method is called when the 192 - * final reference to the open handle is dropped. 193 - */ 194 - static int sep_singleton_release(struct inode *inode, struct file *filp) 195 - { 196 - struct sep_device *sep = filp->private_data; 197 - 198 - clear_bit(0, &sep->singleton_access_flag); 199 - return 0; 200 - } 201 - 202 - /** 203 - * sep_request_daemon_open - request daemon open method 204 - * @inode: inode of SEP device 205 - * @filp: file handle to SEP device 206 - * 207 - * Open method for the SEP request daemon. Called when 208 - * request daemon in userspace opens the SEP device node. 209 - * 210 - * Returns zero on success otherwise an error code. 211 - */ 212 - static int sep_request_daemon_open(struct inode *inode, struct file *filp) 213 - { 214 - struct sep_device *sep = sep_dev; 215 - int error = 0; 216 - 217 - filp->private_data = sep; 218 - 219 - /* There is supposed to be only one request daemon */ 220 - if (test_and_set_bit(0, &sep->request_daemon_open)) 221 - error = -EBUSY; 222 - return error; 223 - } 224 - 225 - /** 226 - * sep_request_daemon_release - close a SEP daemon 227 - * @inode: inode of SEP device 228 - * @filp: file handle being closed 229 - * 230 - * Called on the final close of a SEP daemon. 231 - */ 232 - static int sep_request_daemon_release(struct inode *inode, struct file *filp) 233 - { 234 - struct sep_device *sep = filp->private_data; 235 - 236 - dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n", 237 - current->pid); 238 - 239 - /* Clear the request_daemon_open flag */ 240 - clear_bit(0, &sep->request_daemon_open); 241 - return 0; 242 - } 243 - 244 - /** 245 - * sep_req_daemon_send_reply_command_handler - poke the SEP 246 - * @sep: struct sep_device * 247 - * 248 - * This function raises interrupt to SEPm that signals that is has a 249 - * new command from HOST 250 - */ 251 - static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep) 252 - { 253 - unsigned long lck_flags; 254 - 255 - sep_dump_message(sep); 256 - 257 - /* Counters are lockable region */ 258 - spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); 259 - sep->send_ct++; 260 - sep->reply_ct++; 261 - 262 - /* Send the interrupt to SEP */ 263 - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct); 264 - sep->send_ct++; 265 - 266 - spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); 267 - 268 - dev_dbg(&sep->pdev->dev, 269 - "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n", 270 - sep->send_ct, sep->reply_ct); 271 - 272 - return 0; 273 - } 274 - 275 - 276 - /** 277 - * sep_free_dma_table_data_handler - free DMA table 278 - * @sep: pointere to struct sep_device 279 - * 280 - * Handles the request to free DMA table for synchronic actions 281 - */ 282 - static int sep_free_dma_table_data_handler(struct sep_device *sep) 283 - { 284 - int count; 285 - int dcb_counter; 286 - /* Pointer to the current dma_resource struct */ 287 - struct sep_dma_resource *dma; 288 - 289 - for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) { 290 - dma = &sep->dma_res_arr[dcb_counter]; 291 - 292 - /* Unmap and free input map array */ 293 - if (dma->in_map_array) { 294 - for (count = 0; count < dma->in_num_pages; count++) { 295 - dma_unmap_page(&sep->pdev->dev, 296 - dma->in_map_array[count].dma_addr, 297 - dma->in_map_array[count].size, 298 - DMA_TO_DEVICE); 299 - } 300 - kfree(dma->in_map_array); 301 - } 302 - 303 - /* Unmap output map array, DON'T free it yet */ 304 - if (dma->out_map_array) { 305 - for (count = 0; count < dma->out_num_pages; count++) { 306 - dma_unmap_page(&sep->pdev->dev, 307 - dma->out_map_array[count].dma_addr, 308 - dma->out_map_array[count].size, 309 - DMA_FROM_DEVICE); 310 - } 311 - kfree(dma->out_map_array); 312 - } 313 - 314 - /* Free page cache for output */ 315 - if (dma->in_page_array) { 316 - for (count = 0; count < dma->in_num_pages; count++) { 317 - flush_dcache_page(dma->in_page_array[count]); 318 - page_cache_release(dma->in_page_array[count]); 319 - } 320 - kfree(dma->in_page_array); 321 - } 322 - 323 - if (dma->out_page_array) { 324 - for (count = 0; count < dma->out_num_pages; count++) { 325 - if (!PageReserved(dma->out_page_array[count])) 326 - SetPageDirty(dma->out_page_array[count]); 327 - flush_dcache_page(dma->out_page_array[count]); 328 - page_cache_release(dma->out_page_array[count]); 329 - } 330 - kfree(dma->out_page_array); 331 - } 332 - 333 - /* Reset all the values */ 334 - dma->in_page_array = NULL; 335 - dma->out_page_array = NULL; 336 - dma->in_num_pages = 0; 337 - dma->out_num_pages = 0; 338 - dma->in_map_array = NULL; 339 - dma->out_map_array = NULL; 340 - dma->in_map_num_entries = 0; 341 - dma->out_map_num_entries = 0; 342 - } 343 - 344 - sep->nr_dcb_creat = 0; 345 - sep->num_lli_tables_created = 0; 346 - 347 - return 0; 348 - } 349 - 350 - /** 351 - * sep_request_daemon_mmap - maps the shared area to user space 352 - * @filp: pointer to struct file 353 - * @vma: pointer to vm_area_struct 354 - * 355 - * Called by the kernel when the daemon attempts an mmap() syscall 356 - * using our handle. 357 - */ 358 - static int sep_request_daemon_mmap(struct file *filp, 359 - struct vm_area_struct *vma) 360 - { 361 - struct sep_device *sep = filp->private_data; 362 - dma_addr_t bus_address; 363 - int error = 0; 364 - 365 - if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) { 366 - error = -EINVAL; 367 - goto end_function; 368 - } 369 - 370 - /* Get physical address */ 371 - bus_address = sep->shared_bus; 372 - 373 - if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT, 374 - vma->vm_end - vma->vm_start, vma->vm_page_prot)) { 375 - 376 - dev_warn(&sep->pdev->dev, "remap_page_range failed\n"); 377 - error = -EAGAIN; 378 - goto end_function; 379 - } 380 - 381 - end_function: 382 - return error; 383 - } 384 - 385 - /** 386 - * sep_request_daemon_poll - poll implementation 387 - * @sep: struct sep_device * for current SEP device 388 - * @filp: struct file * for open file 389 - * @wait: poll_table * for poll 390 - * 391 - * Called when our device is part of a poll() or select() syscall 392 - */ 393 - static unsigned int sep_request_daemon_poll(struct file *filp, 394 - poll_table *wait) 395 - { 396 - u32 mask = 0; 397 - /* GPR2 register */ 398 - u32 retval2; 399 - unsigned long lck_flags; 400 - struct sep_device *sep = filp->private_data; 401 - 402 - poll_wait(filp, &sep->event_request_daemon, wait); 403 - 404 - dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n", 405 - sep->send_ct, sep->reply_ct); 406 - 407 - spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); 408 - /* Check if the data is ready */ 409 - if (sep->send_ct == sep->reply_ct) { 410 - spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); 411 - 412 - retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 413 - dev_dbg(&sep->pdev->dev, 414 - "daemon poll: data check (GPR2) is %x\n", retval2); 415 - 416 - /* Check if PRINT request */ 417 - if ((retval2 >> 30) & 0x1) { 418 - dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n"); 419 - mask |= POLLIN; 420 - goto end_function; 421 - } 422 - /* Check if NVS request */ 423 - if (retval2 >> 31) { 424 - dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n"); 425 - mask |= POLLPRI | POLLWRNORM; 426 - } 427 - } else { 428 - spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); 429 - dev_dbg(&sep->pdev->dev, 430 - "daemon poll: no reply received; returning 0\n"); 431 - mask = 0; 432 - } 433 - end_function: 434 - return mask; 435 - } 436 - 437 - /** 438 - * sep_release - close a SEP device 439 - * @inode: inode of SEP device 440 - * @filp: file handle being closed 441 - * 442 - * Called on the final close of a SEP device. 443 - */ 444 - static int sep_release(struct inode *inode, struct file *filp) 445 - { 446 - struct sep_device *sep = filp->private_data; 447 - 448 - dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid); 449 - 450 - mutex_lock(&sep->sep_mutex); 451 - /* Is this the process that has a transaction open? 452 - * If so, lets reset pid_doing_transaction to 0 and 453 - * clear the in use flags, and then wake up sep_event 454 - * so that other processes can do transactions 455 - */ 456 - if (sep->pid_doing_transaction == current->pid) { 457 - clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags); 458 - clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags); 459 - sep_free_dma_table_data_handler(sep); 460 - wake_up(&sep->event); 461 - sep->pid_doing_transaction = 0; 462 - } 463 - 464 - mutex_unlock(&sep->sep_mutex); 465 - return 0; 466 - } 467 - 468 - /** 469 - * sep_mmap - maps the shared area to user space 470 - * @filp: pointer to struct file 471 - * @vma: pointer to vm_area_struct 472 - * 473 - * Called on an mmap of our space via the normal SEP device 474 - */ 475 - static int sep_mmap(struct file *filp, struct vm_area_struct *vma) 476 - { 477 - dma_addr_t bus_addr; 478 - struct sep_device *sep = filp->private_data; 479 - unsigned long error = 0; 480 - 481 - /* Set the transaction busy (own the device) */ 482 - wait_event_interruptible(sep->event, 483 - test_and_set_bit(SEP_MMAP_LOCK_BIT, 484 - &sep->in_use_flags) == 0); 485 - 486 - if (signal_pending(current)) { 487 - error = -EINTR; 488 - goto end_function_with_error; 489 - } 490 - /* 491 - * The pid_doing_transaction indicates that this process 492 - * now owns the facilities to performa a transaction with 493 - * the SEP. While this process is performing a transaction, 494 - * no other process who has the SEP device open can perform 495 - * any transactions. This method allows more than one process 496 - * to have the device open at any given time, which provides 497 - * finer granularity for device utilization by multiple 498 - * processes. 499 - */ 500 - mutex_lock(&sep->sep_mutex); 501 - sep->pid_doing_transaction = current->pid; 502 - mutex_unlock(&sep->sep_mutex); 503 - 504 - /* Zero the pools and the number of data pool alocation pointers */ 505 - sep->data_pool_bytes_allocated = 0; 506 - sep->num_of_data_allocations = 0; 507 - 508 - /* 509 - * Check that the size of the mapped range is as the size of the message 510 - * shared area 511 - */ 512 - if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) { 513 - error = -EINVAL; 514 - goto end_function_with_error; 515 - } 516 - 517 - dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr); 518 - 519 - /* Get bus address */ 520 - bus_addr = sep->shared_bus; 521 - 522 - if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, 523 - vma->vm_end - vma->vm_start, vma->vm_page_prot)) { 524 - dev_warn(&sep->pdev->dev, "remap_page_range failed\n"); 525 - error = -EAGAIN; 526 - goto end_function_with_error; 527 - } 528 - goto end_function; 529 - 530 - end_function_with_error: 531 - /* Clear the bit */ 532 - clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags); 533 - mutex_lock(&sep->sep_mutex); 534 - sep->pid_doing_transaction = 0; 535 - mutex_unlock(&sep->sep_mutex); 536 - 537 - /* Raise event for stuck contextes */ 538 - 539 - wake_up(&sep->event); 540 - 541 - end_function: 542 - return error; 543 - } 544 - 545 - /** 546 - * sep_poll - poll handler 547 - * @filp: pointer to struct file 548 - * @wait: pointer to poll_table 549 - * 550 - * Called by the OS when the kernel is asked to do a poll on 551 - * a SEP file handle. 552 - */ 553 - static unsigned int sep_poll(struct file *filp, poll_table *wait) 554 - { 555 - u32 mask = 0; 556 - u32 retval = 0; 557 - u32 retval2 = 0; 558 - unsigned long lck_flags; 559 - 560 - struct sep_device *sep = filp->private_data; 561 - 562 - /* Am I the process that owns the transaction? */ 563 - mutex_lock(&sep->sep_mutex); 564 - if (current->pid != sep->pid_doing_transaction) { 565 - dev_dbg(&sep->pdev->dev, "poll; wrong pid\n"); 566 - mask = POLLERR; 567 - mutex_unlock(&sep->sep_mutex); 568 - goto end_function; 569 - } 570 - mutex_unlock(&sep->sep_mutex); 571 - 572 - /* Check if send command or send_reply were activated previously */ 573 - if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) { 574 - mask = POLLERR; 575 - goto end_function; 576 - } 577 - 578 - /* Add the event to the polling wait table */ 579 - dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n"); 580 - 581 - poll_wait(filp, &sep->event, wait); 582 - 583 - dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n", 584 - sep->send_ct, sep->reply_ct); 585 - 586 - /* Check if error occurred during poll */ 587 - retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); 588 - if (retval2 != 0x0) { 589 - dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2); 590 - mask |= POLLERR; 591 - goto end_function; 592 - } 593 - 594 - spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); 595 - 596 - if (sep->send_ct == sep->reply_ct) { 597 - spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); 598 - retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 599 - dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n", 600 - retval); 601 - 602 - /* Check if printf request */ 603 - if ((retval >> 30) & 0x1) { 604 - dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n"); 605 - wake_up(&sep->event_request_daemon); 606 - goto end_function; 607 - } 608 - 609 - /* Check if the this is SEP reply or request */ 610 - if (retval >> 31) { 611 - dev_dbg(&sep->pdev->dev, "poll: SEP request\n"); 612 - wake_up(&sep->event_request_daemon); 613 - } else { 614 - dev_dbg(&sep->pdev->dev, "poll: normal return\n"); 615 - /* In case it is again by send_reply_comand */ 616 - clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags); 617 - sep_dump_message(sep); 618 - dev_dbg(&sep->pdev->dev, 619 - "poll; SEP reply POLLIN | POLLRDNORM\n"); 620 - mask |= POLLIN | POLLRDNORM; 621 - } 622 - } else { 623 - spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); 624 - dev_dbg(&sep->pdev->dev, 625 - "poll; no reply received; returning mask of 0\n"); 626 - mask = 0; 627 - } 628 - 629 - end_function: 630 - return mask; 631 - } 632 - 633 - /** 634 - * sep_time_address - address in SEP memory of time 635 - * @sep: SEP device we want the address from 636 - * 637 - * Return the address of the two dwords in memory used for time 638 - * setting. 639 - */ 640 - static u32 *sep_time_address(struct sep_device *sep) 641 - { 642 - return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES; 643 - } 644 - 645 - /** 646 - * sep_set_time - set the SEP time 647 - * @sep: the SEP we are setting the time for 648 - * 649 - * Calculates time and sets it at the predefined address. 650 - * Called with the SEP mutex held. 651 - */ 652 - static unsigned long sep_set_time(struct sep_device *sep) 653 - { 654 - struct timeval time; 655 - u32 *time_addr; /* Address of time as seen by the kernel */ 656 - 657 - 658 - do_gettimeofday(&time); 659 - 660 - /* Set value in the SYSTEM MEMORY offset */ 661 - time_addr = sep_time_address(sep); 662 - 663 - time_addr[0] = SEP_TIME_VAL_TOKEN; 664 - time_addr[1] = time.tv_sec; 665 - 666 - dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec); 667 - dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr); 668 - dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr); 669 - 670 - return time.tv_sec; 671 - } 672 - 673 - /** 674 - * sep_set_caller_id_handler - insert caller id entry 675 - * @sep: SEP device 676 - * @arg: pointer to struct caller_id_struct 677 - * 678 - * Inserts the data into the caller id table. Note that this function 679 - * falls under the ioctl lock 680 - */ 681 - static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg) 682 - { 683 - void __user *hash; 684 - int error = 0; 685 - int i; 686 - struct caller_id_struct command_args; 687 - 688 - for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) { 689 - if (sep->caller_id_table[i].pid == 0) 690 - break; 691 - } 692 - 693 - if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) { 694 - dev_dbg(&sep->pdev->dev, "no more caller id entries left\n"); 695 - dev_dbg(&sep->pdev->dev, "maximum number is %d\n", 696 - SEP_CALLER_ID_TABLE_NUM_ENTRIES); 697 - error = -EUSERS; 698 - goto end_function; 699 - } 700 - 701 - /* Copy the data */ 702 - if (copy_from_user(&command_args, (void __user *)arg, 703 - sizeof(command_args))) { 704 - error = -EFAULT; 705 - goto end_function; 706 - } 707 - 708 - hash = (void __user *)(unsigned long)command_args.callerIdAddress; 709 - 710 - if (!command_args.pid || !command_args.callerIdSizeInBytes) { 711 - error = -EINVAL; 712 - goto end_function; 713 - } 714 - 715 - dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid); 716 - dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n", 717 - command_args.callerIdSizeInBytes); 718 - 719 - if (command_args.callerIdSizeInBytes > 720 - SEP_CALLER_ID_HASH_SIZE_IN_BYTES) { 721 - error = -EMSGSIZE; 722 - goto end_function; 723 - } 724 - 725 - sep->caller_id_table[i].pid = command_args.pid; 726 - 727 - if (copy_from_user(sep->caller_id_table[i].callerIdHash, 728 - hash, command_args.callerIdSizeInBytes)) 729 - error = -EFAULT; 730 - end_function: 731 - return error; 732 - } 733 - 734 - /** 735 - * sep_set_current_caller_id - set the caller id 736 - * @sep: pointer to struct_sep_device 737 - * 738 - * Set the caller ID (if it exists) to the SEP. Note that this 739 - * function falls under the ioctl lock 740 - */ 741 - static int sep_set_current_caller_id(struct sep_device *sep) 742 - { 743 - int i; 744 - u32 *hash_buf_ptr; 745 - 746 - /* Zero the previous value */ 747 - memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES, 748 - 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES); 749 - 750 - for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) { 751 - if (sep->caller_id_table[i].pid == current->pid) { 752 - dev_dbg(&sep->pdev->dev, "Caller Id found\n"); 753 - 754 - memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES, 755 - (void *)(sep->caller_id_table[i].callerIdHash), 756 - SEP_CALLER_ID_HASH_SIZE_IN_BYTES); 757 - break; 758 - } 759 - } 760 - /* Ensure data is in little endian */ 761 - hash_buf_ptr = (u32 *)sep->shared_addr + 762 - SEP_CALLER_ID_OFFSET_BYTES; 763 - 764 - for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++) 765 - hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]); 766 - 767 - return 0; 768 - } 769 - 770 - /** 771 - * sep_send_command_handler - kick off a command 772 - * @sep: SEP being signalled 773 - * 774 - * This function raises interrupt to SEP that signals that is has a new 775 - * command from the host 776 - * 777 - * Note that this function does fall under the ioctl lock 778 - */ 779 - static int sep_send_command_handler(struct sep_device *sep) 780 - { 781 - unsigned long lck_flags; 782 - int error = 0; 783 - 784 - if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) { 785 - error = -EPROTO; 786 - goto end_function; 787 - } 788 - sep_set_time(sep); 789 - 790 - sep_set_current_caller_id(sep); 791 - 792 - sep_dump_message(sep); 793 - 794 - /* Update counter */ 795 - spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); 796 - sep->send_ct++; 797 - spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); 798 - 799 - dev_dbg(&sep->pdev->dev, 800 - "sep_send_command_handler send_ct %lx reply_ct %lx\n", 801 - sep->send_ct, sep->reply_ct); 802 - 803 - /* Send interrupt to SEP */ 804 - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2); 805 - 806 - end_function: 807 - return error; 808 - } 809 - 810 - /** 811 - * sep_allocate_data_pool_memory_handler -allocate pool memory 812 - * @sep: pointer to struct sep_device 813 - * @arg: pointer to struct alloc_struct 814 - * 815 - * This function handles the allocate data pool memory request 816 - * This function returns calculates the bus address of the 817 - * allocated memory, and the offset of this area from the mapped address. 818 - * Therefore, the FVOs in user space can calculate the exact virtual 819 - * address of this allocated memory 820 - */ 821 - static int sep_allocate_data_pool_memory_handler(struct sep_device *sep, 822 - unsigned long arg) 823 - { 824 - int error = 0; 825 - struct alloc_struct command_args; 826 - 827 - /* Holds the allocated buffer address in the system memory pool */ 828 - u32 *token_addr; 829 - 830 - if (copy_from_user(&command_args, (void __user *)arg, 831 - sizeof(struct alloc_struct))) { 832 - error = -EFAULT; 833 - goto end_function; 834 - } 835 - 836 - /* Allocate memory */ 837 - if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > 838 - SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) { 839 - error = -ENOMEM; 840 - goto end_function; 841 - } 842 - 843 - dev_dbg(&sep->pdev->dev, 844 - "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated); 845 - dev_dbg(&sep->pdev->dev, 846 - "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES); 847 - /* Set the virtual and bus address */ 848 - command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + 849 - sep->data_pool_bytes_allocated; 850 - 851 - /* Place in the shared area that is known by the SEP */ 852 - token_addr = (u32 *)(sep->shared_addr + 853 - SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES + 854 - (sep->num_of_data_allocations)*2*sizeof(u32)); 855 - 856 - token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN; 857 - token_addr[1] = (u32)sep->shared_bus + 858 - SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + 859 - sep->data_pool_bytes_allocated; 860 - 861 - /* Write the memory back to the user space */ 862 - error = copy_to_user((void *)arg, (void *)&command_args, 863 - sizeof(struct alloc_struct)); 864 - if (error) { 865 - error = -EFAULT; 866 - goto end_function; 867 - } 868 - 869 - /* Update the allocation */ 870 - sep->data_pool_bytes_allocated += command_args.num_bytes; 871 - sep->num_of_data_allocations += 1; 872 - 873 - end_function: 874 - return error; 875 - } 876 - 877 - /** 878 - * sep_lock_kernel_pages - map kernel pages for DMA 879 - * @sep: pointer to struct sep_device 880 - * @kernel_virt_addr: address of data buffer in kernel 881 - * @data_size: size of data 882 - * @lli_array_ptr: lli array 883 - * @in_out_flag: input into device or output from device 884 - * 885 - * This function locks all the physical pages of the kernel virtual buffer 886 - * and construct a basic lli array, where each entry holds the physical 887 - * page address and the size that application data holds in this page 888 - * This function is used only during kernel crypto mod calls from within 889 - * the kernel (when ioctl is not used) 890 - */ 891 - static int sep_lock_kernel_pages(struct sep_device *sep, 892 - unsigned long kernel_virt_addr, 893 - u32 data_size, 894 - struct sep_lli_entry **lli_array_ptr, 895 - int in_out_flag) 896 - 897 - { 898 - int error = 0; 899 - /* Array of lli */ 900 - struct sep_lli_entry *lli_array; 901 - /* Map array */ 902 - struct sep_dma_map *map_array; 903 - 904 - dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n", 905 - (unsigned long)kernel_virt_addr); 906 - dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size); 907 - 908 - lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC); 909 - if (!lli_array) { 910 - error = -ENOMEM; 911 - goto end_function; 912 - } 913 - map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC); 914 - if (!map_array) { 915 - error = -ENOMEM; 916 - goto end_function_with_error; 917 - } 918 - 919 - map_array[0].dma_addr = 920 - dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr, 921 - data_size, DMA_BIDIRECTIONAL); 922 - map_array[0].size = data_size; 923 - 924 - 925 - /* 926 - * Set the start address of the first page - app data may start not at 927 - * the beginning of the page 928 - */ 929 - lli_array[0].bus_address = (u32)map_array[0].dma_addr; 930 - lli_array[0].block_size = map_array[0].size; 931 - 932 - dev_dbg(&sep->pdev->dev, 933 - "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n", 934 - (unsigned long)lli_array[0].bus_address, 935 - lli_array[0].block_size); 936 - 937 - /* Set the output parameters */ 938 - if (in_out_flag == SEP_DRIVER_IN_FLAG) { 939 - *lli_array_ptr = lli_array; 940 - sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1; 941 - sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL; 942 - sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array; 943 - sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1; 944 - } else { 945 - *lli_array_ptr = lli_array; 946 - sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1; 947 - sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL; 948 - sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array; 949 - sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1; 950 - } 951 - goto end_function; 952 - 953 - end_function_with_error: 954 - kfree(lli_array); 955 - 956 - end_function: 957 - return error; 958 - } 959 - 960 - /** 961 - * sep_lock_user_pages - lock and map user pages for DMA 962 - * @sep: pointer to struct sep_device 963 - * @app_virt_addr: user memory data buffer 964 - * @data_size: size of data buffer 965 - * @lli_array_ptr: lli array 966 - * @in_out_flag: input or output to device 967 - * 968 - * This function locks all the physical pages of the application 969 - * virtual buffer and construct a basic lli array, where each entry 970 - * holds the physical page address and the size that application 971 - * data holds in this physical pages 972 - */ 973 - static int sep_lock_user_pages(struct sep_device *sep, 974 - u32 app_virt_addr, 975 - u32 data_size, 976 - struct sep_lli_entry **lli_array_ptr, 977 - int in_out_flag) 978 - 979 - { 980 - int error = 0; 981 - u32 count; 982 - int result; 983 - /* The the page of the end address of the user space buffer */ 984 - u32 end_page; 985 - /* The page of the start address of the user space buffer */ 986 - u32 start_page; 987 - /* The range in pages */ 988 - u32 num_pages; 989 - /* Array of pointers to page */ 990 - struct page **page_array; 991 - /* Array of lli */ 992 - struct sep_lli_entry *lli_array; 993 - /* Map array */ 994 - struct sep_dma_map *map_array; 995 - /* Direction of the DMA mapping for locked pages */ 996 - enum dma_data_direction dir; 997 - 998 - /* Set start and end pages and num pages */ 999 - end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT; 1000 - start_page = app_virt_addr >> PAGE_SHIFT; 1001 - num_pages = end_page - start_page + 1; 1002 - 1003 - dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr); 1004 - dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size); 1005 - dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page); 1006 - dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page); 1007 - dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages); 1008 - 1009 - /* Allocate array of pages structure pointers */ 1010 - page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC); 1011 - if (!page_array) { 1012 - error = -ENOMEM; 1013 - goto end_function; 1014 - } 1015 - map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC); 1016 - if (!map_array) { 1017 - dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n"); 1018 - error = -ENOMEM; 1019 - goto end_function_with_error1; 1020 - } 1021 - 1022 - lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages, 1023 - GFP_ATOMIC); 1024 - 1025 - if (!lli_array) { 1026 - dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n"); 1027 - error = -ENOMEM; 1028 - goto end_function_with_error2; 1029 - } 1030 - 1031 - /* Convert the application virtual address into a set of physical */ 1032 - down_read(&current->mm->mmap_sem); 1033 - result = get_user_pages(current, current->mm, app_virt_addr, 1034 - num_pages, 1035 - ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), 1036 - 0, page_array, NULL); 1037 - 1038 - up_read(&current->mm->mmap_sem); 1039 - 1040 - /* Check the number of pages locked - if not all then exit with error */ 1041 - if (result != num_pages) { 1042 - dev_warn(&sep->pdev->dev, 1043 - "not all pages locked by get_user_pages\n"); 1044 - error = -ENOMEM; 1045 - goto end_function_with_error3; 1046 - } 1047 - 1048 - dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n"); 1049 - 1050 - /* Set direction */ 1051 - if (in_out_flag == SEP_DRIVER_IN_FLAG) 1052 - dir = DMA_TO_DEVICE; 1053 - else 1054 - dir = DMA_FROM_DEVICE; 1055 - 1056 - /* 1057 - * Fill the array using page array data and 1058 - * map the pages - this action will also flush the cache as needed 1059 - */ 1060 - for (count = 0; count < num_pages; count++) { 1061 - /* Fill the map array */ 1062 - map_array[count].dma_addr = 1063 - dma_map_page(&sep->pdev->dev, page_array[count], 1064 - 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL); 1065 - 1066 - map_array[count].size = PAGE_SIZE; 1067 - 1068 - /* Fill the lli array entry */ 1069 - lli_array[count].bus_address = (u32)map_array[count].dma_addr; 1070 - lli_array[count].block_size = PAGE_SIZE; 1071 - 1072 - dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n", 1073 - count, (unsigned long)lli_array[count].bus_address, 1074 - count, lli_array[count].block_size); 1075 - } 1076 - 1077 - /* Check the offset for the first page */ 1078 - lli_array[0].bus_address = 1079 - lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK)); 1080 - 1081 - /* Check that not all the data is in the first page only */ 1082 - if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size) 1083 - lli_array[0].block_size = data_size; 1084 - else 1085 - lli_array[0].block_size = 1086 - PAGE_SIZE - (app_virt_addr & (~PAGE_MASK)); 1087 - 1088 - dev_dbg(&sep->pdev->dev, 1089 - "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n", 1090 - (unsigned long)lli_array[count].bus_address, 1091 - lli_array[count].block_size); 1092 - 1093 - /* Check the size of the last page */ 1094 - if (num_pages > 1) { 1095 - lli_array[num_pages - 1].block_size = 1096 - (app_virt_addr + data_size) & (~PAGE_MASK); 1097 - if (lli_array[num_pages - 1].block_size == 0) 1098 - lli_array[num_pages - 1].block_size = PAGE_SIZE; 1099 - 1100 - dev_warn(&sep->pdev->dev, 1101 - "lli_array[%x].bus_address is " 1102 - "%08lx, lli_array[%x].block_size is %x\n", 1103 - num_pages - 1, 1104 - (unsigned long)lli_array[num_pages - 1].bus_address, 1105 - num_pages - 1, 1106 - lli_array[num_pages - 1].block_size); 1107 - } 1108 - 1109 - /* Set output params according to the in_out flag */ 1110 - if (in_out_flag == SEP_DRIVER_IN_FLAG) { 1111 - *lli_array_ptr = lli_array; 1112 - sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages; 1113 - sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array; 1114 - sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array; 1115 - sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1116 - num_pages; 1117 - } else { 1118 - *lli_array_ptr = lli_array; 1119 - sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages; 1120 - sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = 1121 - page_array; 1122 - sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array; 1123 - sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1124 - num_pages; 1125 - } 1126 - goto end_function; 1127 - 1128 - end_function_with_error3: 1129 - /* Free lli array */ 1130 - kfree(lli_array); 1131 - 1132 - end_function_with_error2: 1133 - kfree(map_array); 1134 - 1135 - end_function_with_error1: 1136 - /* Free page array */ 1137 - kfree(page_array); 1138 - 1139 - end_function: 1140 - return error; 1141 - } 1142 - 1143 - /** 1144 - * u32 sep_calculate_lli_table_max_size - size the LLI table 1145 - * @sep: pointer to struct sep_device 1146 - * @lli_in_array_ptr 1147 - * @num_array_entries 1148 - * @last_table_flag 1149 - * 1150 - * This function calculates the size of data that can be inserted into 1151 - * the lli table from this array, such that either the table is full 1152 - * (all entries are entered), or there are no more entries in the 1153 - * lli array 1154 - */ 1155 - static u32 sep_calculate_lli_table_max_size(struct sep_device *sep, 1156 - struct sep_lli_entry *lli_in_array_ptr, 1157 - u32 num_array_entries, 1158 - u32 *last_table_flag) 1159 - { 1160 - u32 counter; 1161 - /* Table data size */ 1162 - u32 table_data_size = 0; 1163 - /* Data size for the next table */ 1164 - u32 next_table_data_size; 1165 - 1166 - *last_table_flag = 0; 1167 - 1168 - /* 1169 - * Calculate the data in the out lli table till we fill the whole 1170 - * table or till the data has ended 1171 - */ 1172 - for (counter = 0; 1173 - (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && 1174 - (counter < num_array_entries); counter++) 1175 - table_data_size += lli_in_array_ptr[counter].block_size; 1176 - 1177 - /* 1178 - * Check if we reached the last entry, 1179 - * meaning this ia the last table to build, 1180 - * and no need to check the block alignment 1181 - */ 1182 - if (counter == num_array_entries) { 1183 - /* Set the last table flag */ 1184 - *last_table_flag = 1; 1185 - goto end_function; 1186 - } 1187 - 1188 - /* 1189 - * Calculate the data size of the next table. 1190 - * Stop if no entries left or if data size is more the DMA restriction 1191 - */ 1192 - next_table_data_size = 0; 1193 - for (; counter < num_array_entries; counter++) { 1194 - next_table_data_size += lli_in_array_ptr[counter].block_size; 1195 - if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) 1196 - break; 1197 - } 1198 - 1199 - /* 1200 - * Check if the next table data size is less then DMA rstriction. 1201 - * if it is - recalculate the current table size, so that the next 1202 - * table data size will be adaquete for DMA 1203 - */ 1204 - if (next_table_data_size && 1205 - next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) 1206 - 1207 - table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE - 1208 - next_table_data_size); 1209 - 1210 - end_function: 1211 - return table_data_size; 1212 - } 1213 - 1214 - /** 1215 - * sep_build_lli_table - build an lli array for the given table 1216 - * @sep: pointer to struct sep_device 1217 - * @lli_array_ptr: pointer to lli array 1218 - * @lli_table_ptr: pointer to lli table 1219 - * @num_processed_entries_ptr: pointer to number of entries 1220 - * @num_table_entries_ptr: pointer to number of tables 1221 - * @table_data_size: total data size 1222 - * 1223 - * Builds ant lli table from the lli_array according to 1224 - * the given size of data 1225 - */ 1226 - static void sep_build_lli_table(struct sep_device *sep, 1227 - struct sep_lli_entry *lli_array_ptr, 1228 - struct sep_lli_entry *lli_table_ptr, 1229 - u32 *num_processed_entries_ptr, 1230 - u32 *num_table_entries_ptr, 1231 - u32 table_data_size) 1232 - { 1233 - /* Current table data size */ 1234 - u32 curr_table_data_size; 1235 - /* Counter of lli array entry */ 1236 - u32 array_counter; 1237 - 1238 - /* Init current table data size and lli array entry counter */ 1239 - curr_table_data_size = 0; 1240 - array_counter = 0; 1241 - *num_table_entries_ptr = 1; 1242 - 1243 - dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size); 1244 - 1245 - /* Fill the table till table size reaches the needed amount */ 1246 - while (curr_table_data_size < table_data_size) { 1247 - /* Update the number of entries in table */ 1248 - (*num_table_entries_ptr)++; 1249 - 1250 - lli_table_ptr->bus_address = 1251 - cpu_to_le32(lli_array_ptr[array_counter].bus_address); 1252 - 1253 - lli_table_ptr->block_size = 1254 - cpu_to_le32(lli_array_ptr[array_counter].block_size); 1255 - 1256 - curr_table_data_size += lli_array_ptr[array_counter].block_size; 1257 - 1258 - dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n", 1259 - lli_table_ptr); 1260 - dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n", 1261 - (unsigned long)lli_table_ptr->bus_address); 1262 - dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n", 1263 - lli_table_ptr->block_size); 1264 - 1265 - /* Check for overflow of the table data */ 1266 - if (curr_table_data_size > table_data_size) { 1267 - dev_dbg(&sep->pdev->dev, 1268 - "curr_table_data_size too large\n"); 1269 - 1270 - /* Update the size of block in the table */ 1271 - lli_table_ptr->block_size -= 1272 - cpu_to_le32((curr_table_data_size - table_data_size)); 1273 - 1274 - /* Update the physical address in the lli array */ 1275 - lli_array_ptr[array_counter].bus_address += 1276 - cpu_to_le32(lli_table_ptr->block_size); 1277 - 1278 - /* Update the block size left in the lli array */ 1279 - lli_array_ptr[array_counter].block_size = 1280 - (curr_table_data_size - table_data_size); 1281 - } else 1282 - /* Advance to the next entry in the lli_array */ 1283 - array_counter++; 1284 - 1285 - dev_dbg(&sep->pdev->dev, 1286 - "lli_table_ptr->bus_address is %08lx\n", 1287 - (unsigned long)lli_table_ptr->bus_address); 1288 - dev_dbg(&sep->pdev->dev, 1289 - "lli_table_ptr->block_size is %x\n", 1290 - lli_table_ptr->block_size); 1291 - 1292 - /* Move to the next entry in table */ 1293 - lli_table_ptr++; 1294 - } 1295 - 1296 - /* Set the info entry to default */ 1297 - lli_table_ptr->bus_address = 0xffffffff; 1298 - lli_table_ptr->block_size = 0; 1299 - 1300 - /* Set the output parameter */ 1301 - *num_processed_entries_ptr += array_counter; 1302 - 1303 - } 1304 - 1305 - /** 1306 - * sep_shared_area_virt_to_bus - map shared area to bus address 1307 - * @sep: pointer to struct sep_device 1308 - * @virt_address: virtual address to convert 1309 - * 1310 - * This functions returns the physical address inside shared area according 1311 - * to the virtual address. It can be either on the externa RAM device 1312 - * (ioremapped), or on the system RAM 1313 - * This implementation is for the external RAM 1314 - */ 1315 - static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep, 1316 - void *virt_address) 1317 - { 1318 - dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address); 1319 - dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n", 1320 - (unsigned long) 1321 - sep->shared_bus + (virt_address - sep->shared_addr)); 1322 - 1323 - return sep->shared_bus + (size_t)(virt_address - sep->shared_addr); 1324 - } 1325 - 1326 - /** 1327 - * sep_shared_area_bus_to_virt - map shared area bus address to kernel 1328 - * @sep: pointer to struct sep_device 1329 - * @bus_address: bus address to convert 1330 - * 1331 - * This functions returns the virtual address inside shared area 1332 - * according to the physical address. It can be either on the 1333 - * externa RAM device (ioremapped), or on the system RAM 1334 - * This implementation is for the external RAM 1335 - */ 1336 - static void *sep_shared_area_bus_to_virt(struct sep_device *sep, 1337 - dma_addr_t bus_address) 1338 - { 1339 - dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n", 1340 - (unsigned long)bus_address, (unsigned long)(sep->shared_addr + 1341 - (size_t)(bus_address - sep->shared_bus))); 1342 - 1343 - return sep->shared_addr + (size_t)(bus_address - sep->shared_bus); 1344 - } 1345 - 1346 - /** 1347 - * sep_debug_print_lli_tables - dump LLI table 1348 - * @sep: pointer to struct sep_device 1349 - * @lli_table_ptr: pointer to sep_lli_entry 1350 - * @num_table_entries: number of entries 1351 - * @table_data_size: total data size 1352 - * 1353 - * Walk the the list of the print created tables and print all the data 1354 - */ 1355 - static void sep_debug_print_lli_tables(struct sep_device *sep, 1356 - struct sep_lli_entry *lli_table_ptr, 1357 - unsigned long num_table_entries, 1358 - unsigned long table_data_size) 1359 - { 1360 - unsigned long table_count = 1; 1361 - unsigned long entries_count = 0; 1362 - 1363 - dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n"); 1364 - 1365 - while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) { 1366 - dev_dbg(&sep->pdev->dev, 1367 - "lli table %08lx, table_data_size is %lu\n", 1368 - table_count, table_data_size); 1369 - dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n", 1370 - num_table_entries); 1371 - 1372 - /* Print entries of the table (without info entry) */ 1373 - for (entries_count = 0; entries_count < num_table_entries; 1374 - entries_count++, lli_table_ptr++) { 1375 - 1376 - dev_dbg(&sep->pdev->dev, 1377 - "lli_table_ptr address is %08lx\n", 1378 - (unsigned long) lli_table_ptr); 1379 - 1380 - dev_dbg(&sep->pdev->dev, 1381 - "phys address is %08lx block size is %x\n", 1382 - (unsigned long)lli_table_ptr->bus_address, 1383 - lli_table_ptr->block_size); 1384 - } 1385 - /* Point to the info entry */ 1386 - lli_table_ptr--; 1387 - 1388 - dev_dbg(&sep->pdev->dev, 1389 - "phys lli_table_ptr->block_size is %x\n", 1390 - lli_table_ptr->block_size); 1391 - 1392 - dev_dbg(&sep->pdev->dev, 1393 - "phys lli_table_ptr->physical_address is %08lu\n", 1394 - (unsigned long)lli_table_ptr->bus_address); 1395 - 1396 - 1397 - table_data_size = lli_table_ptr->block_size & 0xffffff; 1398 - num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff; 1399 - 1400 - dev_dbg(&sep->pdev->dev, 1401 - "phys table_data_size is %lu num_table_entries is" 1402 - " %lu bus_address is%lu\n", table_data_size, 1403 - num_table_entries, (unsigned long)lli_table_ptr->bus_address); 1404 - 1405 - if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff) 1406 - lli_table_ptr = (struct sep_lli_entry *) 1407 - sep_shared_bus_to_virt(sep, 1408 - (unsigned long)lli_table_ptr->bus_address); 1409 - 1410 - table_count++; 1411 - } 1412 - dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n"); 1413 - } 1414 - 1415 - 1416 - /** 1417 - * sep_prepare_empty_lli_table - create a blank LLI table 1418 - * @sep: pointer to struct sep_device 1419 - * @lli_table_addr_ptr: pointer to lli table 1420 - * @num_entries_ptr: pointer to number of entries 1421 - * @table_data_size_ptr: point to table data size 1422 - * 1423 - * This function creates empty lli tables when there is no data 1424 - */ 1425 - static void sep_prepare_empty_lli_table(struct sep_device *sep, 1426 - dma_addr_t *lli_table_addr_ptr, 1427 - u32 *num_entries_ptr, 1428 - u32 *table_data_size_ptr) 1429 - { 1430 - struct sep_lli_entry *lli_table_ptr; 1431 - 1432 - /* Find the area for new table */ 1433 - lli_table_ptr = 1434 - (struct sep_lli_entry *)(sep->shared_addr + 1435 - SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1436 - sep->num_lli_tables_created * sizeof(struct sep_lli_entry) * 1437 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); 1438 - 1439 - lli_table_ptr->bus_address = 0; 1440 - lli_table_ptr->block_size = 0; 1441 - 1442 - lli_table_ptr++; 1443 - lli_table_ptr->bus_address = 0xFFFFFFFF; 1444 - lli_table_ptr->block_size = 0; 1445 - 1446 - /* Set the output parameter value */ 1447 - *lli_table_addr_ptr = sep->shared_bus + 1448 - SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1449 - sep->num_lli_tables_created * 1450 - sizeof(struct sep_lli_entry) * 1451 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 1452 - 1453 - /* Set the num of entries and table data size for empty table */ 1454 - *num_entries_ptr = 2; 1455 - *table_data_size_ptr = 0; 1456 - 1457 - /* Update the number of created tables */ 1458 - sep->num_lli_tables_created++; 1459 - } 1460 - 1461 - /** 1462 - * sep_prepare_input_dma_table - prepare input DMA mappings 1463 - * @sep: pointer to struct sep_device 1464 - * @data_size: 1465 - * @block_size: 1466 - * @lli_table_ptr: 1467 - * @num_entries_ptr: 1468 - * @table_data_size_ptr: 1469 - * @is_kva: set for kernel data (kernel cryptio call) 1470 - * 1471 - * This function prepares only input DMA table for synhronic symmetric 1472 - * operations (HASH) 1473 - * Note that all bus addresses that are passed to the SEP 1474 - * are in 32 bit format; the SEP is a 32 bit device 1475 - */ 1476 - static int sep_prepare_input_dma_table(struct sep_device *sep, 1477 - unsigned long app_virt_addr, 1478 - u32 data_size, 1479 - u32 block_size, 1480 - dma_addr_t *lli_table_ptr, 1481 - u32 *num_entries_ptr, 1482 - u32 *table_data_size_ptr, 1483 - bool is_kva) 1484 - { 1485 - int error = 0; 1486 - /* Pointer to the info entry of the table - the last entry */ 1487 - struct sep_lli_entry *info_entry_ptr; 1488 - /* Array of pointers to page */ 1489 - struct sep_lli_entry *lli_array_ptr; 1490 - /* Points to the first entry to be processed in the lli_in_array */ 1491 - u32 current_entry = 0; 1492 - /* Num entries in the virtual buffer */ 1493 - u32 sep_lli_entries = 0; 1494 - /* Lli table pointer */ 1495 - struct sep_lli_entry *in_lli_table_ptr; 1496 - /* The total data in one table */ 1497 - u32 table_data_size = 0; 1498 - /* Flag for last table */ 1499 - u32 last_table_flag = 0; 1500 - /* Number of entries in lli table */ 1501 - u32 num_entries_in_table = 0; 1502 - /* Next table address */ 1503 - void *lli_table_alloc_addr = 0; 1504 - 1505 - dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size); 1506 - dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size); 1507 - 1508 - /* Initialize the pages pointers */ 1509 - sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL; 1510 - sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0; 1511 - 1512 - /* Set the kernel address for first table to be allocated */ 1513 - lli_table_alloc_addr = (void *)(sep->shared_addr + 1514 - SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1515 - sep->num_lli_tables_created * sizeof(struct sep_lli_entry) * 1516 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); 1517 - 1518 - if (data_size == 0) { 1519 - /* Special case - create meptu table - 2 entries, zero data */ 1520 - sep_prepare_empty_lli_table(sep, lli_table_ptr, 1521 - num_entries_ptr, table_data_size_ptr); 1522 - goto update_dcb_counter; 1523 - } 1524 - 1525 - /* Check if the pages are in Kernel Virtual Address layout */ 1526 - if (is_kva == true) 1527 - /* Lock the pages in the kernel */ 1528 - error = sep_lock_kernel_pages(sep, app_virt_addr, 1529 - data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG); 1530 - else 1531 - /* 1532 - * Lock the pages of the user buffer 1533 - * and translate them to pages 1534 - */ 1535 - error = sep_lock_user_pages(sep, app_virt_addr, 1536 - data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG); 1537 - 1538 - if (error) 1539 - goto end_function; 1540 - 1541 - dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n", 1542 - sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages); 1543 - 1544 - current_entry = 0; 1545 - info_entry_ptr = NULL; 1546 - 1547 - sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages; 1548 - 1549 - /* Loop till all the entries in in array are not processed */ 1550 - while (current_entry < sep_lli_entries) { 1551 - 1552 - /* Set the new input and output tables */ 1553 - in_lli_table_ptr = 1554 - (struct sep_lli_entry *)lli_table_alloc_addr; 1555 - 1556 - lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 1557 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 1558 - 1559 - if (lli_table_alloc_addr > 1560 - ((void *)sep->shared_addr + 1561 - SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1562 - SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) { 1563 - 1564 - error = -ENOMEM; 1565 - goto end_function_error; 1566 - 1567 - } 1568 - 1569 - /* Update the number of created tables */ 1570 - sep->num_lli_tables_created++; 1571 - 1572 - /* Calculate the maximum size of data for input table */ 1573 - table_data_size = sep_calculate_lli_table_max_size(sep, 1574 - &lli_array_ptr[current_entry], 1575 - (sep_lli_entries - current_entry), 1576 - &last_table_flag); 1577 - 1578 - /* 1579 - * If this is not the last table - 1580 - * then align it to the block size 1581 - */ 1582 - if (!last_table_flag) 1583 - table_data_size = 1584 - (table_data_size / block_size) * block_size; 1585 - 1586 - dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n", 1587 - table_data_size); 1588 - 1589 - /* Construct input lli table */ 1590 - sep_build_lli_table(sep, &lli_array_ptr[current_entry], 1591 - in_lli_table_ptr, 1592 - &current_entry, &num_entries_in_table, table_data_size); 1593 - 1594 - if (info_entry_ptr == NULL) { 1595 - 1596 - /* Set the output parameters to physical addresses */ 1597 - *lli_table_ptr = sep_shared_area_virt_to_bus(sep, 1598 - in_lli_table_ptr); 1599 - *num_entries_ptr = num_entries_in_table; 1600 - *table_data_size_ptr = table_data_size; 1601 - 1602 - dev_dbg(&sep->pdev->dev, 1603 - "output lli_table_in_ptr is %08lx\n", 1604 - (unsigned long)*lli_table_ptr); 1605 - 1606 - } else { 1607 - /* Update the info entry of the previous in table */ 1608 - info_entry_ptr->bus_address = 1609 - sep_shared_area_virt_to_bus(sep, 1610 - in_lli_table_ptr); 1611 - info_entry_ptr->block_size = 1612 - ((num_entries_in_table) << 24) | 1613 - (table_data_size); 1614 - } 1615 - /* Save the pointer to the info entry of the current tables */ 1616 - info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; 1617 - } 1618 - /* Print input tables */ 1619 - sep_debug_print_lli_tables(sep, (struct sep_lli_entry *) 1620 - sep_shared_area_bus_to_virt(sep, *lli_table_ptr), 1621 - *num_entries_ptr, *table_data_size_ptr); 1622 - /* The array of the pages */ 1623 - kfree(lli_array_ptr); 1624 - 1625 - update_dcb_counter: 1626 - /* Update DCB counter */ 1627 - sep->nr_dcb_creat++; 1628 - goto end_function; 1629 - 1630 - end_function_error: 1631 - /* Free all the allocated resources */ 1632 - kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array); 1633 - kfree(lli_array_ptr); 1634 - kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array); 1635 - 1636 - end_function: 1637 - return error; 1638 - 1639 - } 1640 - /** 1641 - * sep_construct_dma_tables_from_lli - prepare AES/DES mappings 1642 - * @sep: pointer to struct sep_device 1643 - * @lli_in_array: 1644 - * @sep_in_lli_entries: 1645 - * @lli_out_array: 1646 - * @sep_out_lli_entries 1647 - * @block_size 1648 - * @lli_table_in_ptr 1649 - * @lli_table_out_ptr 1650 - * @in_num_entries_ptr 1651 - * @out_num_entries_ptr 1652 - * @table_data_size_ptr 1653 - * 1654 - * This function creates the input and output DMA tables for 1655 - * symmetric operations (AES/DES) according to the block 1656 - * size from LLI arays 1657 - * Note that all bus addresses that are passed to the SEP 1658 - * are in 32 bit format; the SEP is a 32 bit device 1659 - */ 1660 - static int sep_construct_dma_tables_from_lli( 1661 - struct sep_device *sep, 1662 - struct sep_lli_entry *lli_in_array, 1663 - u32 sep_in_lli_entries, 1664 - struct sep_lli_entry *lli_out_array, 1665 - u32 sep_out_lli_entries, 1666 - u32 block_size, 1667 - dma_addr_t *lli_table_in_ptr, 1668 - dma_addr_t *lli_table_out_ptr, 1669 - u32 *in_num_entries_ptr, 1670 - u32 *out_num_entries_ptr, 1671 - u32 *table_data_size_ptr) 1672 - { 1673 - /* Points to the area where next lli table can be allocated */ 1674 - void *lli_table_alloc_addr = 0; 1675 - /* Input lli table */ 1676 - struct sep_lli_entry *in_lli_table_ptr = NULL; 1677 - /* Output lli table */ 1678 - struct sep_lli_entry *out_lli_table_ptr = NULL; 1679 - /* Pointer to the info entry of the table - the last entry */ 1680 - struct sep_lli_entry *info_in_entry_ptr = NULL; 1681 - /* Pointer to the info entry of the table - the last entry */ 1682 - struct sep_lli_entry *info_out_entry_ptr = NULL; 1683 - /* Points to the first entry to be processed in the lli_in_array */ 1684 - u32 current_in_entry = 0; 1685 - /* Points to the first entry to be processed in the lli_out_array */ 1686 - u32 current_out_entry = 0; 1687 - /* Max size of the input table */ 1688 - u32 in_table_data_size = 0; 1689 - /* Max size of the output table */ 1690 - u32 out_table_data_size = 0; 1691 - /* Flag te signifies if this is the last tables build */ 1692 - u32 last_table_flag = 0; 1693 - /* The data size that should be in table */ 1694 - u32 table_data_size = 0; 1695 - /* Number of etnries in the input table */ 1696 - u32 num_entries_in_table = 0; 1697 - /* Number of etnries in the output table */ 1698 - u32 num_entries_out_table = 0; 1699 - 1700 - /* Initiate to point after the message area */ 1701 - lli_table_alloc_addr = (void *)(sep->shared_addr + 1702 - SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1703 - (sep->num_lli_tables_created * 1704 - (sizeof(struct sep_lli_entry) * 1705 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP))); 1706 - 1707 - /* Loop till all the entries in in array are not processed */ 1708 - while (current_in_entry < sep_in_lli_entries) { 1709 - /* Set the new input and output tables */ 1710 - in_lli_table_ptr = 1711 - (struct sep_lli_entry *)lli_table_alloc_addr; 1712 - 1713 - lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 1714 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 1715 - 1716 - /* Set the first output tables */ 1717 - out_lli_table_ptr = 1718 - (struct sep_lli_entry *)lli_table_alloc_addr; 1719 - 1720 - /* Check if the DMA table area limit was overrun */ 1721 - if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) * 1722 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) > 1723 - ((void *)sep->shared_addr + 1724 - SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1725 - SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) { 1726 - 1727 - dev_warn(&sep->pdev->dev, "dma table limit overrun\n"); 1728 - return -ENOMEM; 1729 - } 1730 - 1731 - /* Update the number of the lli tables created */ 1732 - sep->num_lli_tables_created += 2; 1733 - 1734 - lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 1735 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 1736 - 1737 - /* Calculate the maximum size of data for input table */ 1738 - in_table_data_size = 1739 - sep_calculate_lli_table_max_size(sep, 1740 - &lli_in_array[current_in_entry], 1741 - (sep_in_lli_entries - current_in_entry), 1742 - &last_table_flag); 1743 - 1744 - /* Calculate the maximum size of data for output table */ 1745 - out_table_data_size = 1746 - sep_calculate_lli_table_max_size(sep, 1747 - &lli_out_array[current_out_entry], 1748 - (sep_out_lli_entries - current_out_entry), 1749 - &last_table_flag); 1750 - 1751 - dev_dbg(&sep->pdev->dev, 1752 - "construct tables from lli in_table_data_size is %x\n", 1753 - in_table_data_size); 1754 - 1755 - dev_dbg(&sep->pdev->dev, 1756 - "construct tables from lli out_table_data_size is %x\n", 1757 - out_table_data_size); 1758 - 1759 - table_data_size = in_table_data_size; 1760 - 1761 - if (!last_table_flag) { 1762 - /* 1763 - * If this is not the last table, 1764 - * then must check where the data is smallest 1765 - * and then align it to the block size 1766 - */ 1767 - if (table_data_size > out_table_data_size) 1768 - table_data_size = out_table_data_size; 1769 - 1770 - /* 1771 - * Now calculate the table size so that 1772 - * it will be module block size 1773 - */ 1774 - table_data_size = (table_data_size / block_size) * 1775 - block_size; 1776 - } 1777 - 1778 - /* Construct input lli table */ 1779 - sep_build_lli_table(sep, &lli_in_array[current_in_entry], 1780 - in_lli_table_ptr, 1781 - &current_in_entry, 1782 - &num_entries_in_table, 1783 - table_data_size); 1784 - 1785 - /* Construct output lli table */ 1786 - sep_build_lli_table(sep, &lli_out_array[current_out_entry], 1787 - out_lli_table_ptr, 1788 - &current_out_entry, 1789 - &num_entries_out_table, 1790 - table_data_size); 1791 - 1792 - /* If info entry is null - this is the first table built */ 1793 - if (info_in_entry_ptr == NULL) { 1794 - /* Set the output parameters to physical addresses */ 1795 - *lli_table_in_ptr = 1796 - sep_shared_area_virt_to_bus(sep, in_lli_table_ptr); 1797 - 1798 - *in_num_entries_ptr = num_entries_in_table; 1799 - 1800 - *lli_table_out_ptr = 1801 - sep_shared_area_virt_to_bus(sep, 1802 - out_lli_table_ptr); 1803 - 1804 - *out_num_entries_ptr = num_entries_out_table; 1805 - *table_data_size_ptr = table_data_size; 1806 - 1807 - dev_dbg(&sep->pdev->dev, 1808 - "output lli_table_in_ptr is %08lx\n", 1809 - (unsigned long)*lli_table_in_ptr); 1810 - dev_dbg(&sep->pdev->dev, 1811 - "output lli_table_out_ptr is %08lx\n", 1812 - (unsigned long)*lli_table_out_ptr); 1813 - } else { 1814 - /* Update the info entry of the previous in table */ 1815 - info_in_entry_ptr->bus_address = 1816 - sep_shared_area_virt_to_bus(sep, 1817 - in_lli_table_ptr); 1818 - 1819 - info_in_entry_ptr->block_size = 1820 - ((num_entries_in_table) << 24) | 1821 - (table_data_size); 1822 - 1823 - /* Update the info entry of the previous in table */ 1824 - info_out_entry_ptr->bus_address = 1825 - sep_shared_area_virt_to_bus(sep, 1826 - out_lli_table_ptr); 1827 - 1828 - info_out_entry_ptr->block_size = 1829 - ((num_entries_out_table) << 24) | 1830 - (table_data_size); 1831 - 1832 - dev_dbg(&sep->pdev->dev, 1833 - "output lli_table_in_ptr:%08lx %08x\n", 1834 - (unsigned long)info_in_entry_ptr->bus_address, 1835 - info_in_entry_ptr->block_size); 1836 - 1837 - dev_dbg(&sep->pdev->dev, 1838 - "output lli_table_out_ptr:%08lx %08x\n", 1839 - (unsigned long)info_out_entry_ptr->bus_address, 1840 - info_out_entry_ptr->block_size); 1841 - } 1842 - 1843 - /* Save the pointer to the info entry of the current tables */ 1844 - info_in_entry_ptr = in_lli_table_ptr + 1845 - num_entries_in_table - 1; 1846 - info_out_entry_ptr = out_lli_table_ptr + 1847 - num_entries_out_table - 1; 1848 - 1849 - dev_dbg(&sep->pdev->dev, 1850 - "output num_entries_out_table is %x\n", 1851 - (u32)num_entries_out_table); 1852 - dev_dbg(&sep->pdev->dev, 1853 - "output info_in_entry_ptr is %lx\n", 1854 - (unsigned long)info_in_entry_ptr); 1855 - dev_dbg(&sep->pdev->dev, 1856 - "output info_out_entry_ptr is %lx\n", 1857 - (unsigned long)info_out_entry_ptr); 1858 - } 1859 - 1860 - /* Print input tables */ 1861 - sep_debug_print_lli_tables(sep, 1862 - (struct sep_lli_entry *) 1863 - sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr), 1864 - *in_num_entries_ptr, 1865 - *table_data_size_ptr); 1866 - 1867 - /* Print output tables */ 1868 - sep_debug_print_lli_tables(sep, 1869 - (struct sep_lli_entry *) 1870 - sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr), 1871 - *out_num_entries_ptr, 1872 - *table_data_size_ptr); 1873 - 1874 - return 0; 1875 - } 1876 - 1877 - /** 1878 - * sep_prepare_input_output_dma_table - prepare DMA I/O table 1879 - * @app_virt_in_addr: 1880 - * @app_virt_out_addr: 1881 - * @data_size: 1882 - * @block_size: 1883 - * @lli_table_in_ptr: 1884 - * @lli_table_out_ptr: 1885 - * @in_num_entries_ptr: 1886 - * @out_num_entries_ptr: 1887 - * @table_data_size_ptr: 1888 - * @is_kva: set for kernel data; used only for kernel crypto module 1889 - * 1890 - * This function builds input and output DMA tables for synhronic 1891 - * symmetric operations (AES, DES, HASH). It also checks that each table 1892 - * is of the modular block size 1893 - * Note that all bus addresses that are passed to the SEP 1894 - * are in 32 bit format; the SEP is a 32 bit device 1895 - */ 1896 - static int sep_prepare_input_output_dma_table(struct sep_device *sep, 1897 - unsigned long app_virt_in_addr, 1898 - unsigned long app_virt_out_addr, 1899 - u32 data_size, 1900 - u32 block_size, 1901 - dma_addr_t *lli_table_in_ptr, 1902 - dma_addr_t *lli_table_out_ptr, 1903 - u32 *in_num_entries_ptr, 1904 - u32 *out_num_entries_ptr, 1905 - u32 *table_data_size_ptr, 1906 - bool is_kva) 1907 - 1908 - { 1909 - int error = 0; 1910 - /* Array of pointers of page */ 1911 - struct sep_lli_entry *lli_in_array; 1912 - /* Array of pointers of page */ 1913 - struct sep_lli_entry *lli_out_array; 1914 - 1915 - if (data_size == 0) { 1916 - /* Prepare empty table for input and output */ 1917 - sep_prepare_empty_lli_table(sep, lli_table_in_ptr, 1918 - in_num_entries_ptr, table_data_size_ptr); 1919 - 1920 - sep_prepare_empty_lli_table(sep, lli_table_out_ptr, 1921 - out_num_entries_ptr, table_data_size_ptr); 1922 - 1923 - goto update_dcb_counter; 1924 - } 1925 - 1926 - /* Initialize the pages pointers */ 1927 - sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL; 1928 - sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL; 1929 - 1930 - /* Lock the pages of the buffer and translate them to pages */ 1931 - if (is_kva == true) { 1932 - error = sep_lock_kernel_pages(sep, app_virt_in_addr, 1933 - data_size, &lli_in_array, SEP_DRIVER_IN_FLAG); 1934 - 1935 - if (error) { 1936 - dev_warn(&sep->pdev->dev, 1937 - "lock kernel for in failed\n"); 1938 - goto end_function; 1939 - } 1940 - 1941 - error = sep_lock_kernel_pages(sep, app_virt_out_addr, 1942 - data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG); 1943 - 1944 - if (error) { 1945 - dev_warn(&sep->pdev->dev, 1946 - "lock kernel for out failed\n"); 1947 - goto end_function; 1948 - } 1949 - } 1950 - 1951 - else { 1952 - error = sep_lock_user_pages(sep, app_virt_in_addr, 1953 - data_size, &lli_in_array, SEP_DRIVER_IN_FLAG); 1954 - if (error) { 1955 - dev_warn(&sep->pdev->dev, 1956 - "sep_lock_user_pages for input virtual buffer failed\n"); 1957 - goto end_function; 1958 - } 1959 - 1960 - error = sep_lock_user_pages(sep, app_virt_out_addr, 1961 - data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG); 1962 - 1963 - if (error) { 1964 - dev_warn(&sep->pdev->dev, 1965 - "sep_lock_user_pages for output virtual buffer failed\n"); 1966 - goto end_function_free_lli_in; 1967 - } 1968 - } 1969 - 1970 - dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n", 1971 - sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages); 1972 - dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n", 1973 - sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages); 1974 - dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", 1975 - SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); 1976 - 1977 - /* Call the function that creates table from the lli arrays */ 1978 - error = sep_construct_dma_tables_from_lli(sep, lli_in_array, 1979 - sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages, 1980 - lli_out_array, 1981 - sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages, 1982 - block_size, lli_table_in_ptr, lli_table_out_ptr, 1983 - in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr); 1984 - 1985 - if (error) { 1986 - dev_warn(&sep->pdev->dev, 1987 - "sep_construct_dma_tables_from_lli failed\n"); 1988 - goto end_function_with_error; 1989 - } 1990 - 1991 - kfree(lli_out_array); 1992 - kfree(lli_in_array); 1993 - 1994 - update_dcb_counter: 1995 - /* Update DCB counter */ 1996 - sep->nr_dcb_creat++; 1997 - 1998 - goto end_function; 1999 - 2000 - end_function_with_error: 2001 - kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array); 2002 - kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array); 2003 - kfree(lli_out_array); 2004 - 2005 - 2006 - end_function_free_lli_in: 2007 - kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array); 2008 - kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array); 2009 - kfree(lli_in_array); 2010 - 2011 - end_function: 2012 - 2013 - return error; 2014 - 2015 - } 2016 - 2017 - /** 2018 - * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks 2019 - * @app_in_address: unsigned long; for data buffer in (user space) 2020 - * @app_out_address: unsigned long; for data buffer out (user space) 2021 - * @data_in_size: u32; for size of data 2022 - * @block_size: u32; for block size 2023 - * @tail_block_size: u32; for size of tail block 2024 - * @isapplet: bool; to indicate external app 2025 - * @is_kva: bool; kernel buffer; only used for kernel crypto module 2026 - * 2027 - * This function prepares the linked DMA tables and puts the 2028 - * address for the linked list of tables inta a DCB (data control 2029 - * block) the address of which is known by the SEP hardware 2030 - * Note that all bus addresses that are passed to the SEP 2031 - * are in 32 bit format; the SEP is a 32 bit device 2032 - */ 2033 - static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep, 2034 - unsigned long app_in_address, 2035 - unsigned long app_out_address, 2036 - u32 data_in_size, 2037 - u32 block_size, 2038 - u32 tail_block_size, 2039 - bool isapplet, 2040 - bool is_kva) 2041 - { 2042 - int error = 0; 2043 - /* Size of tail */ 2044 - u32 tail_size = 0; 2045 - /* Address of the created DCB table */ 2046 - struct sep_dcblock *dcb_table_ptr = NULL; 2047 - /* The physical address of the first input DMA table */ 2048 - dma_addr_t in_first_mlli_address = 0; 2049 - /* Number of entries in the first input DMA table */ 2050 - u32 in_first_num_entries = 0; 2051 - /* The physical address of the first output DMA table */ 2052 - dma_addr_t out_first_mlli_address = 0; 2053 - /* Number of entries in the first output DMA table */ 2054 - u32 out_first_num_entries = 0; 2055 - /* Data in the first input/output table */ 2056 - u32 first_data_size = 0; 2057 - 2058 - if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) { 2059 - /* No more DCBs to allocate */ 2060 - dev_warn(&sep->pdev->dev, "no more DCBs available\n"); 2061 - error = -ENOSPC; 2062 - goto end_function; 2063 - } 2064 - 2065 - /* Allocate new DCB */ 2066 - dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr + 2067 - SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES + 2068 - (sep->nr_dcb_creat * sizeof(struct sep_dcblock))); 2069 - 2070 - /* Set the default values in the DCB */ 2071 - dcb_table_ptr->input_mlli_address = 0; 2072 - dcb_table_ptr->input_mlli_num_entries = 0; 2073 - dcb_table_ptr->input_mlli_data_size = 0; 2074 - dcb_table_ptr->output_mlli_address = 0; 2075 - dcb_table_ptr->output_mlli_num_entries = 0; 2076 - dcb_table_ptr->output_mlli_data_size = 0; 2077 - dcb_table_ptr->tail_data_size = 0; 2078 - dcb_table_ptr->out_vr_tail_pt = 0; 2079 - 2080 - if (isapplet == true) { 2081 - 2082 - /* Check if there is enough data for DMA operation */ 2083 - if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) { 2084 - if (is_kva == true) { 2085 - memcpy(dcb_table_ptr->tail_data, 2086 - (void *)app_in_address, data_in_size); 2087 - } else { 2088 - if (copy_from_user(dcb_table_ptr->tail_data, 2089 - (void __user *)app_in_address, 2090 - data_in_size)) { 2091 - error = -EFAULT; 2092 - goto end_function; 2093 - } 2094 - } 2095 - 2096 - dcb_table_ptr->tail_data_size = data_in_size; 2097 - 2098 - /* Set the output user-space address for mem2mem op */ 2099 - if (app_out_address) 2100 - dcb_table_ptr->out_vr_tail_pt = 2101 - (aligned_u64)app_out_address; 2102 - 2103 - /* 2104 - * Update both data length parameters in order to avoid 2105 - * second data copy and allow building of empty mlli 2106 - * tables 2107 - */ 2108 - tail_size = 0x0; 2109 - data_in_size = 0x0; 2110 - 2111 - } else { 2112 - if (!app_out_address) { 2113 - tail_size = data_in_size % block_size; 2114 - if (!tail_size) { 2115 - if (tail_block_size == block_size) 2116 - tail_size = block_size; 2117 - } 2118 - } else { 2119 - tail_size = 0; 2120 - } 2121 - } 2122 - if (tail_size) { 2123 - if (tail_size > sizeof(dcb_table_ptr->tail_data)) 2124 - return -EINVAL; 2125 - if (is_kva == true) { 2126 - memcpy(dcb_table_ptr->tail_data, 2127 - (void *)(app_in_address + data_in_size - 2128 - tail_size), tail_size); 2129 - } else { 2130 - /* We have tail data - copy it to DCB */ 2131 - if (copy_from_user(dcb_table_ptr->tail_data, 2132 - (void *)(app_in_address + 2133 - data_in_size - tail_size), tail_size)) { 2134 - error = -EFAULT; 2135 - goto end_function; 2136 - } 2137 - } 2138 - if (app_out_address) 2139 - /* 2140 - * Calculate the output address 2141 - * according to tail data size 2142 - */ 2143 - dcb_table_ptr->out_vr_tail_pt = 2144 - (aligned_u64)app_out_address + data_in_size 2145 - - tail_size; 2146 - 2147 - /* Save the real tail data size */ 2148 - dcb_table_ptr->tail_data_size = tail_size; 2149 - /* 2150 - * Update the data size without the tail 2151 - * data size AKA data for the dma 2152 - */ 2153 - data_in_size = (data_in_size - tail_size); 2154 - } 2155 - } 2156 - /* Check if we need to build only input table or input/output */ 2157 - if (app_out_address) { 2158 - /* Prepare input/output tables */ 2159 - error = sep_prepare_input_output_dma_table(sep, 2160 - app_in_address, 2161 - app_out_address, 2162 - data_in_size, 2163 - block_size, 2164 - &in_first_mlli_address, 2165 - &out_first_mlli_address, 2166 - &in_first_num_entries, 2167 - &out_first_num_entries, 2168 - &first_data_size, 2169 - is_kva); 2170 - } else { 2171 - /* Prepare input tables */ 2172 - error = sep_prepare_input_dma_table(sep, 2173 - app_in_address, 2174 - data_in_size, 2175 - block_size, 2176 - &in_first_mlli_address, 2177 - &in_first_num_entries, 2178 - &first_data_size, 2179 - is_kva); 2180 - } 2181 - 2182 - if (error) { 2183 - dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n"); 2184 - goto end_function; 2185 - } 2186 - 2187 - /* Set the DCB values */ 2188 - dcb_table_ptr->input_mlli_address = in_first_mlli_address; 2189 - dcb_table_ptr->input_mlli_num_entries = in_first_num_entries; 2190 - dcb_table_ptr->input_mlli_data_size = first_data_size; 2191 - dcb_table_ptr->output_mlli_address = out_first_mlli_address; 2192 - dcb_table_ptr->output_mlli_num_entries = out_first_num_entries; 2193 - dcb_table_ptr->output_mlli_data_size = first_data_size; 2194 - 2195 - end_function: 2196 - return error; 2197 - 2198 - } 2199 - 2200 - /** 2201 - * sep_free_dma_tables_and_dcb - free DMA tables and DCBs 2202 - * @sep: pointer to struct sep_device 2203 - * @isapplet: indicates external application (used for kernel access) 2204 - * @is_kva: indicates kernel addresses (only used for kernel crypto) 2205 - * 2206 - * This function frees the DMA tables and DCB 2207 - */ 2208 - static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet, 2209 - bool is_kva) 2210 - { 2211 - int i = 0; 2212 - int error = 0; 2213 - int error_temp = 0; 2214 - struct sep_dcblock *dcb_table_ptr; 2215 - unsigned long pt_hold; 2216 - void *tail_pt; 2217 - 2218 - if (isapplet == true) { 2219 - /* Set pointer to first DCB table */ 2220 - dcb_table_ptr = (struct sep_dcblock *) 2221 - (sep->shared_addr + 2222 - SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES); 2223 - 2224 - /* Go over each DCB and see if tail pointer must be updated */ 2225 - for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) { 2226 - if (dcb_table_ptr->out_vr_tail_pt) { 2227 - pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt; 2228 - tail_pt = (void *)pt_hold; 2229 - if (is_kva == true) { 2230 - memcpy(tail_pt, 2231 - dcb_table_ptr->tail_data, 2232 - dcb_table_ptr->tail_data_size); 2233 - } else { 2234 - error_temp = copy_to_user( 2235 - tail_pt, 2236 - dcb_table_ptr->tail_data, 2237 - dcb_table_ptr->tail_data_size); 2238 - } 2239 - if (error_temp) { 2240 - /* Release the DMA resource */ 2241 - error = -EFAULT; 2242 - break; 2243 - } 2244 - } 2245 - } 2246 - } 2247 - /* Free the output pages, if any */ 2248 - sep_free_dma_table_data_handler(sep); 2249 - 2250 - return error; 2251 - } 2252 - 2253 - /** 2254 - * sep_get_static_pool_addr_handler - get static pool address 2255 - * @sep: pointer to struct sep_device 2256 - * 2257 - * This function sets the bus and virtual addresses of the static pool 2258 - */ 2259 - static int sep_get_static_pool_addr_handler(struct sep_device *sep) 2260 - { 2261 - u32 *static_pool_addr = NULL; 2262 - 2263 - static_pool_addr = (u32 *)(sep->shared_addr + 2264 - SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES); 2265 - 2266 - static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN; 2267 - static_pool_addr[1] = (u32)sep->shared_bus + 2268 - SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES; 2269 - 2270 - dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n", 2271 - (u32)static_pool_addr[1]); 2272 - 2273 - return 0; 2274 - } 2275 - 2276 - /** 2277 - * sep_end_transaction_handler - end transaction 2278 - * @sep: pointer to struct sep_device 2279 - * 2280 - * This API handles the end transaction request 2281 - */ 2282 - static int sep_end_transaction_handler(struct sep_device *sep) 2283 - { 2284 - /* Clear the data pool pointers Token */ 2285 - memset((void *)(sep->shared_addr + 2286 - SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES), 2287 - 0, sep->num_of_data_allocations*2*sizeof(u32)); 2288 - 2289 - /* Check that all the DMA resources were freed */ 2290 - sep_free_dma_table_data_handler(sep); 2291 - 2292 - clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags); 2293 - 2294 - /* 2295 - * We are now through with the transaction. Let's 2296 - * allow other processes who have the device open 2297 - * to perform transactions 2298 - */ 2299 - mutex_lock(&sep->sep_mutex); 2300 - sep->pid_doing_transaction = 0; 2301 - mutex_unlock(&sep->sep_mutex); 2302 - /* Raise event for stuck contextes */ 2303 - wake_up(&sep->event); 2304 - 2305 - return 0; 2306 - } 2307 - 2308 - /** 2309 - * sep_prepare_dcb_handler - prepare a control block 2310 - * @sep: pointer to struct sep_device 2311 - * @arg: pointer to user parameters 2312 - * 2313 - * This function will retrieve the RAR buffer physical addresses, type 2314 - * & size corresponding to the RAR handles provided in the buffers vector. 2315 - */ 2316 - static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg) 2317 - { 2318 - int error; 2319 - /* Command arguments */ 2320 - struct build_dcb_struct command_args; 2321 - 2322 - /* Get the command arguments */ 2323 - if (copy_from_user(&command_args, (void __user *)arg, 2324 - sizeof(struct build_dcb_struct))) { 2325 - error = -EFAULT; 2326 - goto end_function; 2327 - } 2328 - 2329 - dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n", 2330 - command_args.app_in_address); 2331 - dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n", 2332 - command_args.app_out_address); 2333 - dev_dbg(&sep->pdev->dev, "data_size is %x\n", 2334 - command_args.data_in_size); 2335 - dev_dbg(&sep->pdev->dev, "block_size is %x\n", 2336 - command_args.block_size); 2337 - dev_dbg(&sep->pdev->dev, "tail block_size is %x\n", 2338 - command_args.tail_block_size); 2339 - 2340 - error = sep_prepare_input_output_dma_table_in_dcb(sep, 2341 - (unsigned long)command_args.app_in_address, 2342 - (unsigned long)command_args.app_out_address, 2343 - command_args.data_in_size, command_args.block_size, 2344 - command_args.tail_block_size, true, false); 2345 - 2346 - end_function: 2347 - return error; 2348 - 2349 - } 2350 - 2351 - /** 2352 - * sep_free_dcb_handler - free control block resources 2353 - * @sep: pointer to struct sep_device 2354 - * 2355 - * This function frees the DCB resources and updates the needed 2356 - * user-space buffers. 2357 - */ 2358 - static int sep_free_dcb_handler(struct sep_device *sep) 2359 - { 2360 - return sep_free_dma_tables_and_dcb(sep, false, false); 2361 - } 2362 - 2363 - /** 2364 - * sep_rar_prepare_output_msg_handler - prepare an output message 2365 - * @sep: pointer to struct sep_device 2366 - * @arg: pointer to user parameters 2367 - * 2368 - * This function will retrieve the RAR buffer physical addresses, type 2369 - * & size corresponding to the RAR handles provided in the buffers vector. 2370 - */ 2371 - static int sep_rar_prepare_output_msg_handler(struct sep_device *sep, 2372 - unsigned long arg) 2373 - { 2374 - int error = 0; 2375 - /* Command args */ 2376 - struct rar_hndl_to_bus_struct command_args; 2377 - /* Bus address */ 2378 - dma_addr_t rar_bus = 0; 2379 - /* Holds the RAR address in the system memory offset */ 2380 - u32 *rar_addr; 2381 - 2382 - /* Copy the data */ 2383 - if (copy_from_user(&command_args, (void __user *)arg, 2384 - sizeof(command_args))) { 2385 - error = -EFAULT; 2386 - goto end_function; 2387 - } 2388 - 2389 - /* Call to translation function only if user handle is not NULL */ 2390 - if (command_args.rar_handle) 2391 - return -EOPNOTSUPP; 2392 - dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus); 2393 - 2394 - /* Set value in the SYSTEM MEMORY offset */ 2395 - rar_addr = (u32 *)(sep->shared_addr + 2396 - SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES); 2397 - 2398 - /* Copy the physical address to the System Area for the SEP */ 2399 - rar_addr[0] = SEP_RAR_VAL_TOKEN; 2400 - rar_addr[1] = rar_bus; 2401 - 2402 - end_function: 2403 - return error; 2404 - } 2405 - 2406 - /** 2407 - * sep_ioctl - ioctl api 2408 - * @filp: pointer to struct file 2409 - * @cmd: command 2410 - * @arg: pointer to argument structure 2411 - * 2412 - * Implement the ioctl methods available on the SEP device. 2413 - */ 2414 - static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2415 - { 2416 - int error = 0; 2417 - struct sep_device *sep = filp->private_data; 2418 - 2419 - /* Make sure we own this device */ 2420 - mutex_lock(&sep->sep_mutex); 2421 - if ((current->pid != sep->pid_doing_transaction) && 2422 - (sep->pid_doing_transaction != 0)) { 2423 - dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n"); 2424 - error = -EACCES; 2425 - } 2426 - mutex_unlock(&sep->sep_mutex); 2427 - 2428 - if (error) 2429 - return error; 2430 - 2431 - if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) 2432 - return -ENOTTY; 2433 - 2434 - /* Lock to prevent the daemon to interfere with operation */ 2435 - mutex_lock(&sep->ioctl_mutex); 2436 - 2437 - switch (cmd) { 2438 - case SEP_IOCSENDSEPCOMMAND: 2439 - /* Send command to SEP */ 2440 - error = sep_send_command_handler(sep); 2441 - break; 2442 - case SEP_IOCALLOCDATAPOLL: 2443 - /* Allocate data pool */ 2444 - error = sep_allocate_data_pool_memory_handler(sep, arg); 2445 - break; 2446 - case SEP_IOCGETSTATICPOOLADDR: 2447 - /* Inform the SEP the bus address of the static pool */ 2448 - error = sep_get_static_pool_addr_handler(sep); 2449 - break; 2450 - case SEP_IOCENDTRANSACTION: 2451 - error = sep_end_transaction_handler(sep); 2452 - break; 2453 - case SEP_IOCRARPREPAREMESSAGE: 2454 - error = sep_rar_prepare_output_msg_handler(sep, arg); 2455 - break; 2456 - case SEP_IOCPREPAREDCB: 2457 - error = sep_prepare_dcb_handler(sep, arg); 2458 - break; 2459 - case SEP_IOCFREEDCB: 2460 - error = sep_free_dcb_handler(sep); 2461 - break; 2462 - default: 2463 - error = -ENOTTY; 2464 - break; 2465 - } 2466 - 2467 - mutex_unlock(&sep->ioctl_mutex); 2468 - return error; 2469 - } 2470 - 2471 - /** 2472 - * sep_singleton_ioctl - ioctl api for singleton interface 2473 - * @filp: pointer to struct file 2474 - * @cmd: command 2475 - * @arg: pointer to argument structure 2476 - * 2477 - * Implement the additional ioctls for the singleton device 2478 - */ 2479 - static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg) 2480 - { 2481 - long error = 0; 2482 - struct sep_device *sep = filp->private_data; 2483 - 2484 - /* Check that the command is for the SEP device */ 2485 - if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) 2486 - return -ENOTTY; 2487 - 2488 - /* Make sure we own this device */ 2489 - mutex_lock(&sep->sep_mutex); 2490 - if ((current->pid != sep->pid_doing_transaction) && 2491 - (sep->pid_doing_transaction != 0)) { 2492 - dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n"); 2493 - mutex_unlock(&sep->sep_mutex); 2494 - return -EACCES; 2495 - } 2496 - 2497 - mutex_unlock(&sep->sep_mutex); 2498 - 2499 - switch (cmd) { 2500 - case SEP_IOCTLSETCALLERID: 2501 - mutex_lock(&sep->ioctl_mutex); 2502 - error = sep_set_caller_id_handler(sep, arg); 2503 - mutex_unlock(&sep->ioctl_mutex); 2504 - break; 2505 - default: 2506 - error = sep_ioctl(filp, cmd, arg); 2507 - break; 2508 - } 2509 - return error; 2510 - } 2511 - 2512 - /** 2513 - * sep_request_daemon_ioctl - ioctl for daemon 2514 - * @filp: pointer to struct file 2515 - * @cmd: command 2516 - * @arg: pointer to argument structure 2517 - * 2518 - * Called by the request daemon to perform ioctls on the daemon device 2519 - */ 2520 - static long sep_request_daemon_ioctl(struct file *filp, u32 cmd, 2521 - unsigned long arg) 2522 - { 2523 - 2524 - long error; 2525 - struct sep_device *sep = filp->private_data; 2526 - 2527 - /* Check that the command is for SEP device */ 2528 - if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) 2529 - return -ENOTTY; 2530 - 2531 - /* Only one process can access ioctl at any given time */ 2532 - mutex_lock(&sep->ioctl_mutex); 2533 - 2534 - switch (cmd) { 2535 - case SEP_IOCSENDSEPRPLYCOMMAND: 2536 - /* Send reply command to SEP */ 2537 - error = sep_req_daemon_send_reply_command_handler(sep); 2538 - break; 2539 - case SEP_IOCENDTRANSACTION: 2540 - /* 2541 - * End req daemon transaction, do nothing 2542 - * will be removed upon update in middleware 2543 - * API library 2544 - */ 2545 - error = 0; 2546 - break; 2547 - default: 2548 - error = -ENOTTY; 2549 - } 2550 - mutex_unlock(&sep->ioctl_mutex); 2551 - return error; 2552 - } 2553 - 2554 - /** 2555 - * sep_inthandler - interrupt handler 2556 - * @irq: interrupt 2557 - * @dev_id: device id 2558 - */ 2559 - static irqreturn_t sep_inthandler(int irq, void *dev_id) 2560 - { 2561 - irqreturn_t int_error = IRQ_HANDLED; 2562 - unsigned long lck_flags; 2563 - u32 reg_val, reg_val2 = 0; 2564 - struct sep_device *sep = dev_id; 2565 - 2566 - /* Read the IRR register to check if this is SEP interrupt */ 2567 - reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR); 2568 - 2569 - if (reg_val & (0x1 << 13)) { 2570 - /* Lock and update the counter of reply messages */ 2571 - spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); 2572 - sep->reply_ct++; 2573 - spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); 2574 - 2575 - dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n", 2576 - sep->send_ct, sep->reply_ct); 2577 - 2578 - /* Is this printf or daemon request? */ 2579 - reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 2580 - dev_dbg(&sep->pdev->dev, 2581 - "SEP Interrupt - reg2 is %08x\n", reg_val2); 2582 - 2583 - if ((reg_val2 >> 30) & 0x1) { 2584 - dev_dbg(&sep->pdev->dev, "int: printf request\n"); 2585 - wake_up(&sep->event_request_daemon); 2586 - } else if (reg_val2 >> 31) { 2587 - dev_dbg(&sep->pdev->dev, "int: daemon request\n"); 2588 - wake_up(&sep->event_request_daemon); 2589 - } else { 2590 - dev_dbg(&sep->pdev->dev, "int: SEP reply\n"); 2591 - wake_up(&sep->event); 2592 - } 2593 - } else { 2594 - dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n"); 2595 - int_error = IRQ_NONE; 2596 - } 2597 - if (int_error == IRQ_HANDLED) 2598 - sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val); 2599 - 2600 - return int_error; 2601 - } 2602 - 2603 - /** 2604 - * sep_reconfig_shared_area - reconfigure shared area 2605 - * @sep: pointer to struct sep_device 2606 - * 2607 - * Reconfig the shared area between HOST and SEP - needed in case 2608 - * the DX_CC_Init function was called before OS loading. 2609 - */ 2610 - static int sep_reconfig_shared_area(struct sep_device *sep) 2611 - { 2612 - int ret_val; 2613 - 2614 - /* use to limit waiting for SEP */ 2615 - unsigned long end_time; 2616 - 2617 - /* Send the new SHARED MESSAGE AREA to the SEP */ 2618 - dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n", 2619 - (unsigned long long)sep->shared_bus); 2620 - 2621 - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus); 2622 - 2623 - /* Poll for SEP response */ 2624 - ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); 2625 - 2626 - end_time = jiffies + (WAIT_TIME * HZ); 2627 - 2628 - while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) && 2629 - (ret_val != sep->shared_bus)) 2630 - ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); 2631 - 2632 - /* Check the return value (register) */ 2633 - if (ret_val != sep->shared_bus) { 2634 - dev_warn(&sep->pdev->dev, "could not reconfig shared area\n"); 2635 - dev_warn(&sep->pdev->dev, "result was %x\n", ret_val); 2636 - ret_val = -ENOMEM; 2637 - } else 2638 - ret_val = 0; 2639 - 2640 - dev_dbg(&sep->pdev->dev, "reconfig shared area end\n"); 2641 - return ret_val; 2642 - } 2643 - 2644 - /* File operation for singleton SEP operations */ 2645 - static const struct file_operations singleton_file_operations = { 2646 - .owner = THIS_MODULE, 2647 - .unlocked_ioctl = sep_singleton_ioctl, 2648 - .poll = sep_poll, 2649 - .open = sep_singleton_open, 2650 - .release = sep_singleton_release, 2651 - .mmap = sep_mmap, 2652 - }; 2653 - 2654 - /* File operation for daemon operations */ 2655 - static const struct file_operations daemon_file_operations = { 2656 - .owner = THIS_MODULE, 2657 - .unlocked_ioctl = sep_request_daemon_ioctl, 2658 - .poll = sep_request_daemon_poll, 2659 - .open = sep_request_daemon_open, 2660 - .release = sep_request_daemon_release, 2661 - .mmap = sep_request_daemon_mmap, 2662 - }; 2663 - 2664 - /* The files operations structure of the driver */ 2665 - static const struct file_operations sep_file_operations = { 2666 - .owner = THIS_MODULE, 2667 - .unlocked_ioctl = sep_ioctl, 2668 - .poll = sep_poll, 2669 - .open = sep_open, 2670 - .release = sep_release, 2671 - .mmap = sep_mmap, 2672 - }; 2673 - 2674 - /** 2675 - * sep_register_driver_with_fs - register misc devices 2676 - * @sep: pointer to struct sep_device 2677 - * 2678 - * This function registers the driver with the file system 2679 - */ 2680 - static int sep_register_driver_with_fs(struct sep_device *sep) 2681 - { 2682 - int ret_val; 2683 - 2684 - sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR; 2685 - sep->miscdev_sep.name = SEP_DEV_NAME; 2686 - sep->miscdev_sep.fops = &sep_file_operations; 2687 - 2688 - sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR; 2689 - sep->miscdev_singleton.name = SEP_DEV_SINGLETON; 2690 - sep->miscdev_singleton.fops = &singleton_file_operations; 2691 - 2692 - sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR; 2693 - sep->miscdev_daemon.name = SEP_DEV_DAEMON; 2694 - sep->miscdev_daemon.fops = &daemon_file_operations; 2695 - 2696 - ret_val = misc_register(&sep->miscdev_sep); 2697 - if (ret_val) { 2698 - dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n", 2699 - ret_val); 2700 - return ret_val; 2701 - } 2702 - 2703 - ret_val = misc_register(&sep->miscdev_singleton); 2704 - if (ret_val) { 2705 - dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n", 2706 - ret_val); 2707 - misc_deregister(&sep->miscdev_sep); 2708 - return ret_val; 2709 - } 2710 - 2711 - ret_val = misc_register(&sep->miscdev_daemon); 2712 - if (ret_val) { 2713 - dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n", 2714 - ret_val); 2715 - misc_deregister(&sep->miscdev_sep); 2716 - misc_deregister(&sep->miscdev_singleton); 2717 - 2718 - return ret_val; 2719 - } 2720 - return ret_val; 2721 - } 2722 - 2723 - 2724 - /** 2725 - * sep_probe - probe a matching PCI device 2726 - * @pdev: pci_device 2727 - * @end: pci_device_id 2728 - * 2729 - * Attempt to set up and configure a SEP device that has been 2730 - * discovered by the PCI layer. 2731 - */ 2732 - static int __devinit sep_probe(struct pci_dev *pdev, 2733 - const struct pci_device_id *ent) 2734 - { 2735 - int error = 0; 2736 - struct sep_device *sep; 2737 - 2738 - if (sep_dev != NULL) { 2739 - dev_warn(&pdev->dev, "only one SEP supported.\n"); 2740 - return -EBUSY; 2741 - } 2742 - 2743 - /* Enable the device */ 2744 - error = pci_enable_device(pdev); 2745 - if (error) { 2746 - dev_warn(&pdev->dev, "error enabling pci device\n"); 2747 - goto end_function; 2748 - } 2749 - 2750 - /* Allocate the sep_device structure for this device */ 2751 - sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC); 2752 - if (sep_dev == NULL) { 2753 - dev_warn(&pdev->dev, 2754 - "can't kmalloc the sep_device structure\n"); 2755 - error = -ENOMEM; 2756 - goto end_function_disable_device; 2757 - } 2758 - 2759 - /* 2760 - * We're going to use another variable for actually 2761 - * working with the device; this way, if we have 2762 - * multiple devices in the future, it would be easier 2763 - * to make appropriate changes 2764 - */ 2765 - sep = sep_dev; 2766 - 2767 - sep->pdev = pci_dev_get(pdev); 2768 - 2769 - init_waitqueue_head(&sep->event); 2770 - init_waitqueue_head(&sep->event_request_daemon); 2771 - spin_lock_init(&sep->snd_rply_lck); 2772 - mutex_init(&sep->sep_mutex); 2773 - mutex_init(&sep->ioctl_mutex); 2774 - 2775 - dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n"); 2776 - dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision); 2777 - 2778 - /* Set up our register area */ 2779 - sep->reg_physical_addr = pci_resource_start(sep->pdev, 0); 2780 - if (!sep->reg_physical_addr) { 2781 - dev_warn(&sep->pdev->dev, "Error getting register start\n"); 2782 - error = -ENODEV; 2783 - goto end_function_free_sep_dev; 2784 - } 2785 - 2786 - sep->reg_physical_end = pci_resource_end(sep->pdev, 0); 2787 - if (!sep->reg_physical_end) { 2788 - dev_warn(&sep->pdev->dev, "Error getting register end\n"); 2789 - error = -ENODEV; 2790 - goto end_function_free_sep_dev; 2791 - } 2792 - 2793 - sep->reg_addr = ioremap_nocache(sep->reg_physical_addr, 2794 - (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1)); 2795 - if (!sep->reg_addr) { 2796 - dev_warn(&sep->pdev->dev, "Error getting register virtual\n"); 2797 - error = -ENODEV; 2798 - goto end_function_free_sep_dev; 2799 - } 2800 - 2801 - dev_dbg(&sep->pdev->dev, 2802 - "Register area start %llx end %llx virtual %p\n", 2803 - (unsigned long long)sep->reg_physical_addr, 2804 - (unsigned long long)sep->reg_physical_end, 2805 - sep->reg_addr); 2806 - 2807 - /* Allocate the shared area */ 2808 - sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + 2809 - SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES + 2810 - SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + 2811 - SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + 2812 - SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES; 2813 - 2814 - if (sep_map_and_alloc_shared_area(sep)) { 2815 - error = -ENOMEM; 2816 - /* Allocation failed */ 2817 - goto end_function_error; 2818 - } 2819 - 2820 - /* Clear ICR register */ 2821 - sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); 2822 - 2823 - /* Set the IMR register - open only GPR 2 */ 2824 - sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); 2825 - 2826 - /* Read send/receive counters from SEP */ 2827 - sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 2828 - sep->reply_ct &= 0x3FFFFFFF; 2829 - sep->send_ct = sep->reply_ct; 2830 - 2831 - /* Get the interrupt line */ 2832 - error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, 2833 - "sep_driver", sep); 2834 - 2835 - if (error) 2836 - goto end_function_deallocate_sep_shared_area; 2837 - 2838 - /* The new chip requires a shared area reconfigure */ 2839 - if (sep->pdev->revision == 4) { /* Only for new chip */ 2840 - error = sep_reconfig_shared_area(sep); 2841 - if (error) 2842 - goto end_function_free_irq; 2843 - } 2844 - /* Finally magic up the device nodes */ 2845 - /* Register driver with the fs */ 2846 - error = sep_register_driver_with_fs(sep); 2847 - if (error == 0) 2848 - /* Success */ 2849 - return 0; 2850 - 2851 - end_function_free_irq: 2852 - free_irq(pdev->irq, sep); 2853 - 2854 - end_function_deallocate_sep_shared_area: 2855 - /* De-allocate shared area */ 2856 - sep_unmap_and_free_shared_area(sep); 2857 - 2858 - end_function_error: 2859 - iounmap(sep->reg_addr); 2860 - 2861 - end_function_free_sep_dev: 2862 - pci_dev_put(sep_dev->pdev); 2863 - kfree(sep_dev); 2864 - sep_dev = NULL; 2865 - 2866 - end_function_disable_device: 2867 - pci_disable_device(pdev); 2868 - 2869 - end_function: 2870 - return error; 2871 - } 2872 - 2873 - static void sep_remove(struct pci_dev *pdev) 2874 - { 2875 - struct sep_device *sep = sep_dev; 2876 - 2877 - /* Unregister from fs */ 2878 - misc_deregister(&sep->miscdev_sep); 2879 - misc_deregister(&sep->miscdev_singleton); 2880 - misc_deregister(&sep->miscdev_daemon); 2881 - 2882 - /* Free the irq */ 2883 - free_irq(sep->pdev->irq, sep); 2884 - 2885 - /* Free the shared area */ 2886 - sep_unmap_and_free_shared_area(sep_dev); 2887 - iounmap((void *) sep_dev->reg_addr); 2888 - } 2889 - 2890 - static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = { 2891 - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)}, 2892 - {0} 2893 - }; 2894 - 2895 - MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl); 2896 - 2897 - /* Field for registering driver to PCI device */ 2898 - static struct pci_driver sep_pci_driver = { 2899 - .name = "sep_sec_driver", 2900 - .id_table = sep_pci_id_tbl, 2901 - .probe = sep_probe, 2902 - .remove = sep_remove 2903 - }; 2904 - 2905 - 2906 - /** 2907 - * sep_init - init function 2908 - * 2909 - * Module load time. Register the PCI device driver. 2910 - */ 2911 - static int __init sep_init(void) 2912 - { 2913 - return pci_register_driver(&sep_pci_driver); 2914 - } 2915 - 2916 - 2917 - /** 2918 - * sep_exit - called to unload driver 2919 - * 2920 - * Drop the misc devices then remove and unmap the various resources 2921 - * that are not released by the driver remove method. 2922 - */ 2923 - static void __exit sep_exit(void) 2924 - { 2925 - pci_unregister_driver(&sep_pci_driver); 2926 - } 2927 - 2928 - 2929 - module_init(sep_init); 2930 - module_exit(sep_exit); 2931 - 2932 - MODULE_LICENSE("GPL");
+225 -51
drivers/staging/sep/sep_driver_api.h
··· 2 2 * 3 3 * sep_driver_api.h - Security Processor Driver api definitions 4 4 * 5 - * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 6 - * Contributions(c) 2009,2010 Discretix. All rights reserved. 5 + * Copyright(c) 2009-2011 Intel Corporation. All rights reserved. 6 + * Contributions(c) 2009-2011 Discretix. All rights reserved. 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify it 9 9 * under the terms of the GNU General Public License as published by the Free ··· 26 26 * CHANGES: 27 27 * 28 28 * 2010.09.14 Upgrade to Medfield 29 + * 2011.02.22 Enable kernel crypto 29 30 * 30 31 */ 31 32 ··· 38 37 #define SEP_DRIVER_SRC_REQ 2 39 38 #define SEP_DRIVER_SRC_PRINTF 3 40 39 40 + /* Power state */ 41 + #define SEP_DRIVER_POWERON 1 42 + #define SEP_DRIVER_POWEROFF 2 41 43 42 - /*------------------------------------------- 43 - TYPEDEFS 44 - ----------------------------------------------*/ 44 + /* Following enums are used only for kernel crypto api */ 45 + enum type_of_request { 46 + NO_REQUEST, 47 + AES_CBC, 48 + AES_ECB, 49 + DES_CBC, 50 + DES_ECB, 51 + DES3_ECB, 52 + DES3_CBC, 53 + SHA1, 54 + MD5, 55 + SHA224, 56 + SHA256 57 + }; 45 58 46 - struct alloc_struct { 47 - /* offset from start of shared pool area */ 48 - u32 offset; 49 - /* number of bytes to allocate */ 50 - u32 num_bytes; 51 - }; 52 - 53 - /* command struct for getting caller id value and address */ 54 - struct caller_id_struct { 55 - /* pid of the process */ 56 - u32 pid; 57 - /* virtual address of the caller id hash */ 58 - aligned_u64 callerIdAddress; 59 - /* caller id hash size in bytes */ 60 - u32 callerIdSizeInBytes; 59 + enum hash_stage { 60 + HASH_INIT, 61 + HASH_UPDATE, 62 + HASH_FINISH, 63 + HASH_DIGEST 61 64 }; 62 65 63 66 /* ··· 88 83 u8 tail_data[68]; 89 84 }; 90 85 91 - struct sep_caller_id_entry { 92 - int pid; 93 - unsigned char callerIdHash[SEP_CALLER_ID_HASH_SIZE_IN_BYTES]; 94 - }; 95 - 96 86 /* 97 87 command structure for building dcb block (currently for ext app only 98 88 */ ··· 104 104 /* the size of the block of the operation - if needed, 105 105 every table will be modulo this parameter */ 106 106 u32 tail_block_size; 107 + 108 + /* which application calls the driver DX or applet */ 109 + u32 is_applet; 110 + }; 111 + 112 + /* 113 + command structure for building dcb block for kernel crypto 114 + */ 115 + struct build_dcb_struct_kernel { 116 + /* address value of the data in */ 117 + void *app_in_address; 118 + /* size of data in */ 119 + ssize_t data_in_size; 120 + /* address of the data out */ 121 + void *app_out_address; 122 + /* the size of the block of the operation - if needed, 123 + every table will be modulo this parameter */ 124 + u32 block_size; 125 + /* the size of the block of the operation - if needed, 126 + every table will be modulo this parameter */ 127 + u32 tail_block_size; 128 + 129 + /* which application calls the driver DX or applet */ 130 + u32 is_applet; 131 + 132 + struct scatterlist *src_sg; 133 + struct scatterlist *dst_sg; 107 134 }; 108 135 109 136 /** ··· 174 147 175 148 /* number of entries of the output mapp array */ 176 149 u32 out_map_num_entries; 150 + 151 + /* Scatter list for kernel operations */ 152 + struct scatterlist *src_sg; 153 + struct scatterlist *dst_sg; 177 154 }; 178 155 179 156 ··· 200 169 u32 block_size; 201 170 }; 202 171 203 - /*---------------------------------------------------------------- 204 - IOCTL command defines 205 - -----------------------------------------------------------------*/ 172 + /* 173 + * header format for each fastcall write operation 174 + */ 175 + struct sep_fastcall_hdr { 176 + u32 magic; 177 + u32 msg_len; 178 + u32 num_dcbs; 179 + }; 206 180 181 + /* 182 + * structure used in file pointer's private data field 183 + * to track the status of the calls to the various 184 + * driver interface 185 + */ 186 + struct sep_call_status { 187 + unsigned long status; 188 + }; 189 + 190 + /* 191 + * format of dma context buffer used to store all DMA-related 192 + * context information of a particular transaction 193 + */ 194 + struct sep_dma_context { 195 + /* number of data control blocks */ 196 + u32 nr_dcb_creat; 197 + /* number of the lli tables created in the current transaction */ 198 + u32 num_lli_tables_created; 199 + /* size of currently allocated dma tables region */ 200 + u32 dmatables_len; 201 + /* size of input data */ 202 + u32 input_data_len; 203 + struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS]; 204 + /* Scatter gather for kernel crypto */ 205 + struct scatterlist *src_sg; 206 + struct scatterlist *dst_sg; 207 + }; 208 + 209 + /* 210 + * format for file pointer's private_data field 211 + */ 212 + struct sep_private_data { 213 + struct sep_queue_info *my_queue_elem; 214 + struct sep_device *device; 215 + struct sep_call_status call_status; 216 + struct sep_dma_context *dma_ctx; 217 + }; 218 + 219 + 220 + /* Functions used by sep_crypto */ 221 + 222 + /** 223 + * sep_queue_status_remove - Removes transaction from status queue 224 + * @sep: SEP device 225 + * @sep_queue_info: pointer to status queue 226 + * 227 + * This function will removes information about transaction from the queue. 228 + */ 229 + void sep_queue_status_remove(struct sep_device *sep, 230 + struct sep_queue_info **queue_elem); 231 + /** 232 + * sep_queue_status_add - Adds transaction to status queue 233 + * @sep: SEP device 234 + * @opcode: transaction opcode 235 + * @size: input data size 236 + * @pid: pid of current process 237 + * @name: current process name 238 + * @name_len: length of name (current process) 239 + * 240 + * This function adds information about about transaction started to the status 241 + * queue. 242 + */ 243 + struct sep_queue_info *sep_queue_status_add( 244 + struct sep_device *sep, 245 + u32 opcode, 246 + u32 size, 247 + u32 pid, 248 + u8 *name, size_t name_len); 249 + 250 + /** 251 + * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context 252 + * for kernel crypto 253 + * @sep: SEP device 254 + * @dcb_region: DCB region buf to create for current transaction 255 + * @dmatables_region: MLLI/DMA tables buf to create for current transaction 256 + * @dma_ctx: DMA context buf to create for current transaction 257 + * @user_dcb_args: User arguments for DCB/MLLI creation 258 + * @num_dcbs: Number of DCBs to create 259 + */ 260 + int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep, 261 + struct sep_dcblock **dcb_region, 262 + void **dmatables_region, 263 + struct sep_dma_context **dma_ctx, 264 + const struct build_dcb_struct_kernel *dcb_data, 265 + const u32 num_dcbs); 266 + 267 + /** 268 + * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables 269 + * contexts into use 270 + * @sep: SEP device 271 + * @dcb_region: DCB region copy 272 + * @dmatables_region: MLLI/DMA tables copy 273 + * @dma_ctx: DMA context for current transaction 274 + */ 275 + ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep, 276 + struct sep_dcblock **dcb_region, 277 + void **dmatables_region, 278 + struct sep_dma_context *dma_ctx); 279 + 280 + /** 281 + * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks 282 + * @app_in_address: unsigned long; for data buffer in (user space) 283 + * @app_out_address: unsigned long; for data buffer out (user space) 284 + * @data_in_size: u32; for size of data 285 + * @block_size: u32; for block size 286 + * @tail_block_size: u32; for size of tail block 287 + * @isapplet: bool; to indicate external app 288 + * @is_kva: bool; kernel buffer; only used for kernel crypto module 289 + * 290 + * This function prepares the linked DMA tables and puts the 291 + * address for the linked list of tables inta a DCB (data control 292 + * block) the address of which is known by the SEP hardware 293 + * Note that all bus addresses that are passed to the SEP 294 + * are in 32 bit format; the SEP is a 32 bit device 295 + */ 296 + int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep, 297 + unsigned long app_in_address, 298 + unsigned long app_out_address, 299 + u32 data_in_size, 300 + u32 block_size, 301 + u32 tail_block_size, 302 + bool isapplet, 303 + bool is_kva, 304 + struct sep_dcblock *dcb_region, 305 + void **dmatables_region, 306 + struct sep_dma_context **dma_ctx, 307 + struct scatterlist *src_sg, 308 + struct scatterlist *dst_sg); 309 + 310 + /** 311 + * sep_free_dma_table_data_handler - free DMA table 312 + * @sep: pointere to struct sep_device 313 + * @dma_ctx: dma context 314 + * 315 + * Handles the request to free DMA table for synchronic actions 316 + */ 317 + int sep_free_dma_table_data_handler(struct sep_device *sep, 318 + struct sep_dma_context **dma_ctx); 319 + /** 320 + * sep_send_command_handler - kick off a command 321 + * @sep: SEP being signalled 322 + * 323 + * This function raises interrupt to SEP that signals that is has a new 324 + * command from the host 325 + * 326 + * Note that this function does fall under the ioctl lock 327 + */ 328 + int sep_send_command_handler(struct sep_device *sep); 329 + 330 + /** 331 + * sep_wait_transaction - Used for synchronizing transactions 332 + * @sep: SEP device 333 + */ 334 + int sep_wait_transaction(struct sep_device *sep); 335 + 336 + /** 337 + * IOCTL command defines 338 + */ 207 339 /* magic number 1 of the sep IOCTL command */ 208 - #define SEP_IOC_MAGIC_NUMBER 's' 340 + #define SEP_IOC_MAGIC_NUMBER 's' 209 341 210 342 /* sends interrupt to sep that message is ready */ 211 343 #define SEP_IOCSENDSEPCOMMAND \ 212 344 _IO(SEP_IOC_MAGIC_NUMBER, 0) 213 345 214 - /* sends interrupt to sep that message is ready */ 215 - #define SEP_IOCSENDSEPRPLYCOMMAND \ 216 - _IO(SEP_IOC_MAGIC_NUMBER, 1) 217 - 218 - /* allocate memory in data pool */ 219 - #define SEP_IOCALLOCDATAPOLL \ 220 - _IOW(SEP_IOC_MAGIC_NUMBER, 2, struct alloc_struct) 221 - 222 - /* free dynamic data aalocated during table creation */ 223 - #define SEP_IOCFREEDMATABLEDATA \ 224 - _IO(SEP_IOC_MAGIC_NUMBER, 7) 225 - 226 - /* get the static pool area addersses (physical and virtual) */ 227 - #define SEP_IOCGETSTATICPOOLADDR \ 228 - _IO(SEP_IOC_MAGIC_NUMBER, 8) 229 - 230 346 /* end transaction command */ 231 347 #define SEP_IOCENDTRANSACTION \ 232 348 _IO(SEP_IOC_MAGIC_NUMBER, 15) 233 - 234 - #define SEP_IOCRARPREPAREMESSAGE \ 235 - _IOW(SEP_IOC_MAGIC_NUMBER, 20, struct rar_hndl_to_bus_struct) 236 - 237 - #define SEP_IOCTLSETCALLERID \ 238 - _IOW(SEP_IOC_MAGIC_NUMBER, 34, struct caller_id_struct) 239 349 240 350 #define SEP_IOCPREPAREDCB \ 241 351 _IOW(SEP_IOC_MAGIC_NUMBER, 35, struct build_dcb_struct) 242 352 243 353 #define SEP_IOCFREEDCB \ 244 354 _IO(SEP_IOC_MAGIC_NUMBER, 36) 355 + 356 + struct sep_device; 245 357 246 358 #endif
+67 -12
drivers/staging/sep/sep_driver_config.h
··· 2 2 * 3 3 * sep_driver_config.h - Security Processor Driver configuration 4 4 * 5 - * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 6 - * Contributions(c) 2009,2010 Discretix. All rights reserved. 5 + * Copyright(c) 2009-2011 Intel Corporation. All rights reserved. 6 + * Contributions(c) 2009-2011 Discretix. All rights reserved. 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify it 9 9 * under the terms of the GNU General Public License as published by the Free ··· 26 26 * CHANGES: 27 27 * 28 28 * 2010.06.26 Upgrade to Medfield 29 + * 2011.02.22 Enable kernel crypto 29 30 * 30 31 */ 31 32 ··· 49 48 /* the mode for running on the ARM1172 Evaluation platform (flag is 1) */ 50 49 #define SEP_DRIVER_ARM_DEBUG_MODE 0 51 50 51 + /* Critical message area contents for sanity checking */ 52 + #define SEP_START_MSG_TOKEN 0x02558808 52 53 /*------------------------------------------- 53 54 INTERNAL DATA CONFIGURATION 54 55 -------------------------------------------*/ ··· 68 65 #define SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE 16 69 66 70 67 /* flag that signifies tah the lock is 71 - currently held by the process (struct file) */ 68 + currently held by the proccess (struct file) */ 72 69 #define SEP_DRIVER_OWN_LOCK_FLAG 1 73 70 74 71 /* flag that signifies tah the lock is currently NOT 75 - held by the process (struct file) */ 72 + held by the proccess (struct file) */ 76 73 #define SEP_DRIVER_DISOWN_LOCK_FLAG 0 77 74 78 75 /* indicates whether driver has mapped/unmapped shared area */ 79 76 #define SEP_REQUEST_DAEMON_MAPPED 1 80 77 #define SEP_REQUEST_DAEMON_UNMAPPED 0 81 - 82 - #define SEP_DEV_NAME "sep_sec_driver" 83 - #define SEP_DEV_SINGLETON "sep_sec_singleton_driver" 84 - #define SEP_DEV_DAEMON "sep_req_daemon_driver" 85 78 86 79 /*-------------------------------------------------------- 87 80 SHARED AREA memory total size is 36K ··· 89 90 } 90 91 DATA_POOL_AREA 12K } 91 92 92 - SYNCHRONIC_DMA_TABLES_AREA 5K 93 + SYNCHRONIC_DMA_TABLES_AREA 29K 93 94 94 95 placeholder until drver changes 95 96 FLOW_DMA_TABLES_AREA 4K ··· 108 109 109 110 110 111 /* 112 + the minimum length of the message - includes 2 reserved fields 113 + at the start, then token, message size and opcode fields. all dwords 114 + */ 115 + #define SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES (5*sizeof(u32)) 116 + 117 + /* 111 118 the maximum length of the message - the rest of the message shared 112 119 area will be dedicated to the dma lli tables 113 120 */ ··· 129 124 #define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (16 * 1024) 130 125 131 126 /* the size of the message shared area in pages */ 132 - #define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 5) 127 + #define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 29) 133 128 134 129 /* Placeholder until driver changes */ 135 130 #define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4) 136 131 137 132 /* system data (time, caller id etc') pool */ 138 133 #define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES (1024 * 3) 134 + 135 + /* Offset of the sep printf buffer in the message area */ 136 + #define SEP_DRIVER_PRINTF_OFFSET_IN_BYTES (5888) 139 137 140 138 /* the size in bytes of the time memory */ 141 139 #define SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES 8 ··· 231 223 #define SEP_ALREADY_INITIALIZED_ERR 12 232 224 233 225 /* bit that locks access to the shared area */ 234 - #define SEP_MMAP_LOCK_BIT 0 226 + #define SEP_TRANSACTION_STARTED_LOCK_BIT 0 235 227 236 228 /* bit that lock access to the poll - after send_command */ 237 - #define SEP_SEND_MSG_LOCK_BIT 1 229 + #define SEP_WORKING_LOCK_BIT 1 238 230 239 231 /* the token that defines the static pool address address */ 240 232 #define SEP_STATIC_POOL_VAL_TOKEN 0xABBAABBA ··· 247 239 248 240 /* Time limit for SEP to finish */ 249 241 #define WAIT_TIME 10 242 + 243 + /* Delay for pm runtime suspend (reduces pm thrashing with bursty traffic */ 244 + #define SUSPEND_DELAY 10 245 + 246 + /* Number of delays to wait until scu boots after runtime resume */ 247 + #define SCU_DELAY_MAX 50 248 + 249 + /* Delay for each iteration (usec) wait for scu boots after runtime resume */ 250 + #define SCU_DELAY_ITERATION 10 251 + 252 + 253 + /* 254 + * Bits used in struct sep_call_status to check that 255 + * driver's APIs are called in valid order 256 + */ 257 + 258 + /* Bit offset which indicates status of sep_write() */ 259 + #define SEP_FASTCALL_WRITE_DONE_OFFSET 0 260 + 261 + /* Bit offset which indicates status of sep_mmap() */ 262 + #define SEP_LEGACY_MMAP_DONE_OFFSET 1 263 + 264 + /* Bit offset which indicates status of the SEP_IOCSENDSEPCOMMAND ioctl */ 265 + #define SEP_LEGACY_SENDMSG_DONE_OFFSET 2 266 + 267 + /* Bit offset which indicates status of sep_poll() */ 268 + #define SEP_LEGACY_POLL_DONE_OFFSET 3 269 + 270 + /* Bit offset which indicates status of the SEP_IOCENDTRANSACTION ioctl */ 271 + #define SEP_LEGACY_ENDTRANSACTION_DONE_OFFSET 4 272 + 273 + /* 274 + * Used to limit number of concurrent processes 275 + * allowed to allocte dynamic buffers in fastcall 276 + * interface. 277 + */ 278 + #define SEP_DOUBLEBUF_USERS_LIMIT 3 279 + 280 + /* Identifier for valid fastcall header */ 281 + #define SEP_FC_MAGIC 0xFFAACCAA 282 + 283 + /* 284 + * Used for enabling driver runtime power management. 285 + * Useful for enabling/disabling it during performance 286 + * testing 287 + */ 288 + #define SEP_ENABLE_RUNTIME_PM 250 289 251 290 #endif /* SEP DRIVER CONFIG */
+4 -178
drivers/staging/sep/sep_driver_hw_defs.h
··· 2 2 * 3 3 * sep_driver_hw_defs.h - Security Processor Driver hardware definitions 4 4 * 5 - * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. 6 - * Contributions(c) 2009,2010 Discretix. All rights reserved. 5 + * Copyright(c) 2009-2011 Intel Corporation. All rights reserved. 6 + * Contributions(c) 2009-2011 Discretix. All rights reserved. 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify it 9 9 * under the terms of the GNU General Public License as published by the Free ··· 26 26 * CHANGES: 27 27 * 28 28 * 2010.09.20 Upgrade to Medfield 29 + * 2011.02.22 Enable kernel crypto 29 30 * 30 31 */ 31 32 ··· 43 42 44 43 45 44 /* cf registers */ 46 - #define HW_R0B_ADDR_0_REG_ADDR 0x0000UL 47 - #define HW_R0B_ADDR_1_REG_ADDR 0x0004UL 48 - #define HW_R0B_ADDR_2_REG_ADDR 0x0008UL 49 - #define HW_R0B_ADDR_3_REG_ADDR 0x000cUL 50 - #define HW_R0B_ADDR_4_REG_ADDR 0x0010UL 51 - #define HW_R0B_ADDR_5_REG_ADDR 0x0014UL 52 - #define HW_R0B_ADDR_6_REG_ADDR 0x0018UL 53 - #define HW_R0B_ADDR_7_REG_ADDR 0x001cUL 54 - #define HW_R0B_ADDR_8_REG_ADDR 0x0020UL 55 - #define HW_R2B_ADDR_0_REG_ADDR 0x0080UL 56 - #define HW_R2B_ADDR_1_REG_ADDR 0x0084UL 57 - #define HW_R2B_ADDR_2_REG_ADDR 0x0088UL 58 - #define HW_R2B_ADDR_3_REG_ADDR 0x008cUL 59 - #define HW_R2B_ADDR_4_REG_ADDR 0x0090UL 60 - #define HW_R2B_ADDR_5_REG_ADDR 0x0094UL 61 - #define HW_R2B_ADDR_6_REG_ADDR 0x0098UL 62 - #define HW_R2B_ADDR_7_REG_ADDR 0x009cUL 63 - #define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL 64 - #define HW_R3B_REG_ADDR 0x00C0UL 65 - #define HW_R4B_REG_ADDR 0x0100UL 66 - #define HW_CSA_ADDR_0_REG_ADDR 0x0140UL 67 - #define HW_CSA_ADDR_1_REG_ADDR 0x0144UL 68 - #define HW_CSA_ADDR_2_REG_ADDR 0x0148UL 69 - #define HW_CSA_ADDR_3_REG_ADDR 0x014cUL 70 - #define HW_CSA_ADDR_4_REG_ADDR 0x0150UL 71 - #define HW_CSA_ADDR_5_REG_ADDR 0x0154UL 72 - #define HW_CSA_ADDR_6_REG_ADDR 0x0158UL 73 - #define HW_CSA_ADDR_7_REG_ADDR 0x015cUL 74 - #define HW_CSA_ADDR_8_REG_ADDR 0x0160UL 75 - #define HW_CSA_REG_ADDR 0x0140UL 76 - #define HW_SINB_REG_ADDR 0x0180UL 77 - #define HW_SOUTB_REG_ADDR 0x0184UL 78 - #define HW_PKI_CONTROL_REG_ADDR 0x01C0UL 79 - #define HW_PKI_STATUS_REG_ADDR 0x01C4UL 80 - #define HW_PKI_BUSY_REG_ADDR 0x01C8UL 81 - #define HW_PKI_A_1025_REG_ADDR 0x01CCUL 82 - #define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL 83 - #define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL 84 - #define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL 85 - #define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL 86 - #define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL 87 - #define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL 88 - #define HW_PKI_CLR_REG_ADDR 0x01E8UL 89 - #define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL 90 - #define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL 91 - #define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL 92 - #define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL 93 - #define HW_DES_KEY_0_REG_ADDR 0x0208UL 94 - #define HW_DES_KEY_1_REG_ADDR 0x020CUL 95 - #define HW_DES_KEY_2_REG_ADDR 0x0210UL 96 - #define HW_DES_KEY_3_REG_ADDR 0x0214UL 97 - #define HW_DES_KEY_4_REG_ADDR 0x0218UL 98 - #define HW_DES_KEY_5_REG_ADDR 0x021CUL 99 - #define HW_DES_CONTROL_0_REG_ADDR 0x0220UL 100 - #define HW_DES_CONTROL_1_REG_ADDR 0x0224UL 101 - #define HW_DES_IV_0_REG_ADDR 0x0228UL 102 - #define HW_DES_IV_1_REG_ADDR 0x022CUL 103 - #define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL 104 - #define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL 105 - #define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL 106 - #define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL 107 - #define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL 108 - #define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL 109 - #define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL 110 - #define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL 111 - #define HW_AES_KEY_0_REG_ADDR 0x0400UL 112 - #define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL 113 - #define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL 114 - #define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL 115 - #define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL 116 - #define HW_AES_IV_0_REG_ADDR 0x0440UL 117 - #define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL 118 - #define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL 119 - #define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL 120 - #define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL 121 - #define HW_AES_CTR1_REG_ADDR 0x0460UL 122 - #define HW_AES_SK_REG_ADDR 0x0478UL 123 - #define HW_AES_MAC_OK_REG_ADDR 0x0480UL 124 - #define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL 125 - #define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL 126 - #define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL 127 - #define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL 128 - #define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL 129 - #define HW_AES_CONTROL_REG_ADDR 0x04C0UL 130 - #define HW_HASH_H0_REG_ADDR 0x0640UL 131 - #define HW_HASH_H1_REG_ADDR 0x0644UL 132 - #define HW_HASH_H2_REG_ADDR 0x0648UL 133 - #define HW_HASH_H3_REG_ADDR 0x064CUL 134 - #define HW_HASH_H4_REG_ADDR 0x0650UL 135 - #define HW_HASH_H5_REG_ADDR 0x0654UL 136 - #define HW_HASH_H6_REG_ADDR 0x0658UL 137 - #define HW_HASH_H7_REG_ADDR 0x065CUL 138 - #define HW_HASH_H8_REG_ADDR 0x0660UL 139 - #define HW_HASH_H9_REG_ADDR 0x0664UL 140 - #define HW_HASH_H10_REG_ADDR 0x0668UL 141 - #define HW_HASH_H11_REG_ADDR 0x066CUL 142 - #define HW_HASH_H12_REG_ADDR 0x0670UL 143 - #define HW_HASH_H13_REG_ADDR 0x0674UL 144 - #define HW_HASH_H14_REG_ADDR 0x0678UL 145 - #define HW_HASH_H15_REG_ADDR 0x067CUL 146 - #define HW_HASH_CONTROL_REG_ADDR 0x07C0UL 147 - #define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL 148 - #define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL 149 - #define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL 150 - #define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL 151 - #define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL 152 - #define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL 153 - #define HW_HASH_PARAM_REG_ADDR 0x07DCUL 154 - #define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL 155 - #define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL 156 - #define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL 157 - #define HW_HASH_DATA_REG_ADDR 0x07ECUL 158 - #define HW_DRNG_CONTROL_REG_ADDR 0x0800UL 159 - #define HW_DRNG_VALID_REG_ADDR 0x0804UL 160 - #define HW_DRNG_DATA_REG_ADDR 0x0808UL 161 - #define HW_RND_SRC_EN_REG_ADDR 0x080CUL 162 - #define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL 163 - #define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL 164 - #define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL 165 - #define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL 166 - #define HW_CLK_STATUS_REG_ADDR 0x0824UL 167 - #define HW_CLK_ENABLE_REG_ADDR 0x0828UL 168 - #define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL 169 - #define HW_RND_SRC_CTL_REG_ADDR 0x0858UL 170 - #define HW_CRYPTO_CTL_REG_ADDR 0x0900UL 171 - #define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL 172 - #define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL 173 - #define HW_AES_BUSY_REG_ADDR 0x0914UL 174 - #define HW_DES_BUSY_REG_ADDR 0x0918UL 175 - #define HW_HASH_BUSY_REG_ADDR 0x091CUL 176 - #define HW_CONTENT_REG_ADDR 0x0924UL 177 - #define HW_VERSION_REG_ADDR 0x0928UL 178 - #define HW_CONTEXT_ID_REG_ADDR 0x0930UL 179 - #define HW_DIN_BUFFER_REG_ADDR 0x0C00UL 180 - #define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL 181 - #define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL 182 - #define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL 183 - #define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL 184 - #define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL 185 - #define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL 186 - #define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL 187 - #define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL 188 - #define HW_OLD_DATA_REG_ADDR 0x0C48UL 189 - #define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL 190 - #define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL 191 - #define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL 192 - #define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL 193 - #define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL 194 - #define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL 195 - #define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL 196 - #define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL 197 - #define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL 198 - #define HW_READ_ALIGN_REG_ADDR 0x0D3CUL 199 - #define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL 200 - #define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL 201 - #define HW_AHB_SINGLE_REG_ADDR 0x0E00UL 202 - #define HW_SRAM_DATA_REG_ADDR 0x0F00UL 203 - #define HW_SRAM_ADDR_REG_ADDR 0x0F04UL 204 - #define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL 205 45 #define HW_HOST_IRR_REG_ADDR 0x0A00UL 206 46 #define HW_HOST_IMR_REG_ADDR 0x0A04UL 207 47 #define HW_HOST_ICR_REG_ADDR 0x0A08UL 208 - #define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL 209 - #define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL 210 - #define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL 211 - #define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL 212 - #define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL 213 - #define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL 214 - #define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL 215 - #define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL 216 - #define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL 217 - #define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL 218 - #define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL 219 - #define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL 220 - #define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL 221 48 #define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL 222 49 #define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL 223 50 #define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL ··· 54 225 #define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL 55 226 #define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL 56 227 #define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL 57 - #define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL 58 - #define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL 59 - #define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL 60 - #define HW_CC_SRAM_BASE_ADDRESS 0x5800UL 228 + #define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL 61 229 62 230 #endif /* ifndef HW_DEFS */
+4286
drivers/staging/sep/sep_main.c
··· 1 + /* 2 + * 3 + * sep_main.c - Security Processor Driver main group of functions 4 + * 5 + * Copyright(c) 2009-2011 Intel Corporation. All rights reserved. 6 + * Contributions(c) 2009-2011 Discretix. All rights reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the Free 10 + * Software Foundation; version 2 of the License. 11 + * 12 + * This program is distributed in the hope that it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 59 19 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 + * 21 + * CONTACTS: 22 + * 23 + * Mark Allyn mark.a.allyn@intel.com 24 + * Jayant Mangalampalli jayant.mangalampalli@intel.com 25 + * 26 + * CHANGES: 27 + * 28 + * 2009.06.26 Initial publish 29 + * 2010.09.14 Upgrade to Medfield 30 + * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c 31 + * 2011.02.22 Enable kernel crypto operation 32 + * 33 + * Please note that this driver is based on information in the Discretix 34 + * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2 35 + * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2 36 + * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System 37 + * Overview and Integration Guide. 38 + */ 39 + /* #define DEBUG */ 40 + /* #define SEP_PERF_DEBUG */ 41 + 42 + #include <linux/init.h> 43 + #include <linux/module.h> 44 + #include <linux/miscdevice.h> 45 + #include <linux/fs.h> 46 + #include <linux/cdev.h> 47 + #include <linux/kdev_t.h> 48 + #include <linux/mutex.h> 49 + #include <linux/sched.h> 50 + #include <linux/mm.h> 51 + #include <linux/poll.h> 52 + #include <linux/wait.h> 53 + #include <linux/pci.h> 54 + #include <linux/pm_runtime.h> 55 + #include <linux/slab.h> 56 + #include <linux/ioctl.h> 57 + #include <asm/current.h> 58 + #include <linux/ioport.h> 59 + #include <linux/io.h> 60 + #include <linux/interrupt.h> 61 + #include <linux/pagemap.h> 62 + #include <asm/cacheflush.h> 63 + #include <linux/sched.h> 64 + #include <linux/delay.h> 65 + #include <linux/jiffies.h> 66 + #include <linux/async.h> 67 + #include <linux/crypto.h> 68 + #include <crypto/internal/hash.h> 69 + #include <crypto/scatterwalk.h> 70 + #include <crypto/sha.h> 71 + #include <crypto/md5.h> 72 + #include <crypto/aes.h> 73 + #include <crypto/des.h> 74 + #include <crypto/hash.h> 75 + 76 + #include "sep_driver_hw_defs.h" 77 + #include "sep_driver_config.h" 78 + #include "sep_driver_api.h" 79 + #include "sep_dev.h" 80 + #include "sep_crypto.h" 81 + 82 + #define CREATE_TRACE_POINTS 83 + #include "sep_trace_events.h" 84 + 85 + /* 86 + * Let's not spend cycles iterating over message 87 + * area contents if debugging not enabled 88 + */ 89 + #ifdef DEBUG 90 + #define sep_dump_message(sep) _sep_dump_message(sep) 91 + #else 92 + #define sep_dump_message(sep) 93 + #endif 94 + 95 + /** 96 + * Currenlty, there is only one SEP device per platform; 97 + * In event platforms in the future have more than one SEP 98 + * device, this will be a linked list 99 + */ 100 + 101 + struct sep_device *sep_dev; 102 + 103 + /** 104 + * sep_queue_status_remove - Removes transaction from status queue 105 + * @sep: SEP device 106 + * @sep_queue_info: pointer to status queue 107 + * 108 + * This function will removes information about transaction from the queue. 109 + */ 110 + void sep_queue_status_remove(struct sep_device *sep, 111 + struct sep_queue_info **queue_elem) 112 + { 113 + unsigned long lck_flags; 114 + 115 + dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n", 116 + current->pid); 117 + 118 + if (!queue_elem || !(*queue_elem)) { 119 + dev_dbg(&sep->pdev->dev, "PID%d %s null\n", 120 + current->pid, __func__); 121 + return; 122 + } 123 + 124 + spin_lock_irqsave(&sep->sep_queue_lock, lck_flags); 125 + list_del(&(*queue_elem)->list); 126 + sep->sep_queue_num--; 127 + spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags); 128 + 129 + kfree(*queue_elem); 130 + *queue_elem = NULL; 131 + 132 + dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n", 133 + current->pid); 134 + return; 135 + } 136 + 137 + /** 138 + * sep_queue_status_add - Adds transaction to status queue 139 + * @sep: SEP device 140 + * @opcode: transaction opcode 141 + * @size: input data size 142 + * @pid: pid of current process 143 + * @name: current process name 144 + * @name_len: length of name (current process) 145 + * 146 + * This function adds information about about transaction started to the status 147 + * queue. 148 + */ 149 + struct sep_queue_info *sep_queue_status_add( 150 + struct sep_device *sep, 151 + u32 opcode, 152 + u32 size, 153 + u32 pid, 154 + u8 *name, size_t name_len) 155 + { 156 + unsigned long lck_flags; 157 + struct sep_queue_info *my_elem = NULL; 158 + 159 + my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL); 160 + 161 + if (!my_elem) 162 + return NULL; 163 + 164 + dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid); 165 + 166 + my_elem->data.opcode = opcode; 167 + my_elem->data.size = size; 168 + my_elem->data.pid = pid; 169 + 170 + if (name_len > TASK_COMM_LEN) 171 + name_len = TASK_COMM_LEN; 172 + 173 + memcpy(&my_elem->data.name, name, name_len); 174 + 175 + spin_lock_irqsave(&sep->sep_queue_lock, lck_flags); 176 + 177 + list_add_tail(&my_elem->list, &sep->sep_queue_status); 178 + sep->sep_queue_num++; 179 + 180 + spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags); 181 + 182 + return my_elem; 183 + } 184 + 185 + /** 186 + * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables 187 + * @sep: SEP device 188 + * @dmatables_region: Destination pointer for the buffer 189 + * @dma_ctx: DMA context for the transaction 190 + * @table_count: Number of MLLI/DMA tables to create 191 + * The buffer created will not work as-is for DMA operations, 192 + * it needs to be copied over to the appropriate place in the 193 + * shared area. 194 + */ 195 + static int sep_allocate_dmatables_region(struct sep_device *sep, 196 + void **dmatables_region, 197 + struct sep_dma_context *dma_ctx, 198 + const u32 table_count) 199 + { 200 + const size_t new_len = table_count * 201 + sizeof(struct sep_lli_entry) * 202 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 203 + void *tmp_region = NULL; 204 + 205 + dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n", 206 + current->pid, dma_ctx); 207 + dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n", 208 + current->pid, dmatables_region); 209 + 210 + if (!dma_ctx || !dmatables_region) { 211 + dev_warn(&sep->pdev->dev, 212 + "[PID%d] dma context/region uninitialized\n", 213 + current->pid); 214 + return -EINVAL; 215 + } 216 + 217 + dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08X\n", 218 + current->pid, new_len); 219 + dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid, 220 + dma_ctx->dmatables_len); 221 + tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL); 222 + if (!tmp_region) { 223 + dev_warn(&sep->pdev->dev, 224 + "[PID%d] no mem for dma tables region\n", 225 + current->pid); 226 + return -ENOMEM; 227 + } 228 + 229 + /* Were there any previous tables that need to be preserved ? */ 230 + if (*dmatables_region) { 231 + memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len); 232 + kfree(*dmatables_region); 233 + } 234 + 235 + *dmatables_region = tmp_region; 236 + 237 + dma_ctx->dmatables_len += new_len; 238 + 239 + return 0; 240 + } 241 + 242 + /** 243 + * sep_wait_transaction - Used for synchronizing transactions 244 + * @sep: SEP device 245 + */ 246 + int sep_wait_transaction(struct sep_device *sep) 247 + { 248 + int error = 0; 249 + DEFINE_WAIT(wait); 250 + 251 + if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, 252 + &sep->in_use_flags)) { 253 + dev_dbg(&sep->pdev->dev, 254 + "[PID%d] no transactions, returning\n", 255 + current->pid); 256 + goto end_function_setpid; 257 + } 258 + 259 + /* 260 + * Looping needed even for exclusive waitq entries 261 + * due to process wakeup latencies, previous process 262 + * might have already created another transaction. 263 + */ 264 + for (;;) { 265 + /* 266 + * Exclusive waitq entry, so that only one process is 267 + * woken up from the queue at a time. 268 + */ 269 + prepare_to_wait_exclusive(&sep->event_transactions, 270 + &wait, 271 + TASK_INTERRUPTIBLE); 272 + if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, 273 + &sep->in_use_flags)) { 274 + dev_dbg(&sep->pdev->dev, 275 + "[PID%d] no transactions, breaking\n", 276 + current->pid); 277 + break; 278 + } 279 + dev_dbg(&sep->pdev->dev, 280 + "[PID%d] transactions ongoing, sleeping\n", 281 + current->pid); 282 + schedule(); 283 + dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid); 284 + 285 + if (signal_pending(current)) { 286 + dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n", 287 + current->pid); 288 + error = -EINTR; 289 + goto end_function; 290 + } 291 + } 292 + end_function_setpid: 293 + /* 294 + * The pid_doing_transaction indicates that this process 295 + * now owns the facilities to performa a transaction with 296 + * the SEP. While this process is performing a transaction, 297 + * no other process who has the SEP device open can perform 298 + * any transactions. This method allows more than one process 299 + * to have the device open at any given time, which provides 300 + * finer granularity for device utilization by multiple 301 + * processes. 302 + */ 303 + /* Only one process is able to progress here at a time */ 304 + sep->pid_doing_transaction = current->pid; 305 + 306 + end_function: 307 + finish_wait(&sep->event_transactions, &wait); 308 + 309 + return error; 310 + } 311 + 312 + /** 313 + * sep_check_transaction_owner - Checks if current process owns transaction 314 + * @sep: SEP device 315 + */ 316 + static inline int sep_check_transaction_owner(struct sep_device *sep) 317 + { 318 + dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n", 319 + current->pid, 320 + sep->pid_doing_transaction); 321 + 322 + if ((sep->pid_doing_transaction == 0) || 323 + (current->pid != sep->pid_doing_transaction)) { 324 + return -EACCES; 325 + } 326 + 327 + /* We own the transaction */ 328 + return 0; 329 + } 330 + 331 + #ifdef DEBUG 332 + 333 + /** 334 + * sep_dump_message - dump the message that is pending 335 + * @sep: SEP device 336 + * This will only print dump if DEBUG is set; it does 337 + * follow kernel debug print enabling 338 + */ 339 + static void _sep_dump_message(struct sep_device *sep) 340 + { 341 + int count; 342 + 343 + u32 *p = sep->shared_addr; 344 + 345 + for (count = 0; count < 40 * 4; count += 4) 346 + dev_dbg(&sep->pdev->dev, 347 + "[PID%d] Word %d of the message is %x\n", 348 + current->pid, count/4, *p++); 349 + } 350 + #endif 351 + 352 + /** 353 + * sep_map_and_alloc_shared_area -allocate shared block 354 + * @sep: security processor 355 + * @size: size of shared area 356 + */ 357 + static int sep_map_and_alloc_shared_area(struct sep_device *sep) 358 + { 359 + sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, 360 + sep->shared_size, 361 + &sep->shared_bus, GFP_KERNEL); 362 + 363 + if (!sep->shared_addr) { 364 + dev_dbg(&sep->pdev->dev, 365 + "[PID%d] shared memory dma_alloc_coherent failed\n", 366 + current->pid); 367 + return -ENOMEM; 368 + } 369 + dev_dbg(&sep->pdev->dev, 370 + "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n", 371 + current->pid, 372 + sep->shared_size, sep->shared_addr, 373 + (unsigned long long)sep->shared_bus); 374 + return 0; 375 + } 376 + 377 + /** 378 + * sep_unmap_and_free_shared_area - free shared block 379 + * @sep: security processor 380 + */ 381 + static void sep_unmap_and_free_shared_area(struct sep_device *sep) 382 + { 383 + dma_free_coherent(&sep->pdev->dev, sep->shared_size, 384 + sep->shared_addr, sep->shared_bus); 385 + } 386 + 387 + /** 388 + * sep_shared_bus_to_virt - convert bus/virt addresses 389 + * @sep: pointer to struct sep_device 390 + * @bus_address: address to convert 391 + * 392 + * Returns virtual address inside the shared area according 393 + * to the bus address. 394 + */ 395 + static void *sep_shared_bus_to_virt(struct sep_device *sep, 396 + dma_addr_t bus_address) 397 + { 398 + return sep->shared_addr + (bus_address - sep->shared_bus); 399 + } 400 + 401 + /** 402 + * sep_open - device open method 403 + * @inode: inode of SEP device 404 + * @filp: file handle to SEP device 405 + * 406 + * Open method for the SEP device. Called when userspace opens 407 + * the SEP device node. 408 + * 409 + * Returns zero on success otherwise an error code. 410 + */ 411 + static int sep_open(struct inode *inode, struct file *filp) 412 + { 413 + struct sep_device *sep; 414 + struct sep_private_data *priv; 415 + 416 + dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid); 417 + 418 + if (filp->f_flags & O_NONBLOCK) 419 + return -ENOTSUPP; 420 + 421 + /* 422 + * Get the SEP device structure and use it for the 423 + * private_data field in filp for other methods 424 + */ 425 + 426 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 427 + if (!priv) 428 + return -ENOMEM; 429 + 430 + sep = sep_dev; 431 + priv->device = sep; 432 + filp->private_data = priv; 433 + 434 + dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n", 435 + current->pid, priv); 436 + 437 + /* Anyone can open; locking takes place at transaction level */ 438 + return 0; 439 + } 440 + 441 + /** 442 + * sep_free_dma_table_data_handler - free DMA table 443 + * @sep: pointere to struct sep_device 444 + * @dma_ctx: dma context 445 + * 446 + * Handles the request to free DMA table for synchronic actions 447 + */ 448 + int sep_free_dma_table_data_handler(struct sep_device *sep, 449 + struct sep_dma_context **dma_ctx) 450 + { 451 + int count; 452 + int dcb_counter; 453 + /* Pointer to the current dma_resource struct */ 454 + struct sep_dma_resource *dma; 455 + 456 + dev_dbg(&sep->pdev->dev, 457 + "[PID%d] sep_free_dma_table_data_handler\n", 458 + current->pid); 459 + 460 + if (!dma_ctx || !(*dma_ctx)) { 461 + /* No context or context already freed */ 462 + dev_dbg(&sep->pdev->dev, 463 + "[PID%d] no DMA context or context already freed\n", 464 + current->pid); 465 + 466 + return 0; 467 + } 468 + 469 + dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n", 470 + current->pid, 471 + (*dma_ctx)->nr_dcb_creat); 472 + 473 + for (dcb_counter = 0; 474 + dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) { 475 + dma = &(*dma_ctx)->dma_res_arr[dcb_counter]; 476 + 477 + /* Unmap and free input map array */ 478 + if (dma->in_map_array) { 479 + for (count = 0; count < dma->in_num_pages; count++) { 480 + dma_unmap_page(&sep->pdev->dev, 481 + dma->in_map_array[count].dma_addr, 482 + dma->in_map_array[count].size, 483 + DMA_TO_DEVICE); 484 + } 485 + kfree(dma->in_map_array); 486 + } 487 + 488 + /* Unmap output map array, DON'T free it yet */ 489 + if (dma->out_map_array) { 490 + for (count = 0; count < dma->out_num_pages; count++) { 491 + dma_unmap_page(&sep->pdev->dev, 492 + dma->out_map_array[count].dma_addr, 493 + dma->out_map_array[count].size, 494 + DMA_FROM_DEVICE); 495 + } 496 + kfree(dma->out_map_array); 497 + } 498 + 499 + /* Free page cache for output */ 500 + if (dma->in_page_array) { 501 + for (count = 0; count < dma->in_num_pages; count++) { 502 + flush_dcache_page(dma->in_page_array[count]); 503 + page_cache_release(dma->in_page_array[count]); 504 + } 505 + kfree(dma->in_page_array); 506 + } 507 + 508 + if (dma->out_page_array) { 509 + for (count = 0; count < dma->out_num_pages; count++) { 510 + if (!PageReserved(dma->out_page_array[count])) 511 + 512 + SetPageDirty(dma-> 513 + out_page_array[count]); 514 + 515 + flush_dcache_page(dma->out_page_array[count]); 516 + page_cache_release(dma->out_page_array[count]); 517 + } 518 + kfree(dma->out_page_array); 519 + } 520 + 521 + /** 522 + * Note that here we use in_map_num_entries because we 523 + * don't have a page array; the page array is generated 524 + * only in the lock_user_pages, which is not called 525 + * for kernel crypto, which is what the sg (scatter gather 526 + * is used for exclusively 527 + */ 528 + if (dma->src_sg) { 529 + dma_unmap_sg(&sep->pdev->dev, dma->src_sg, 530 + dma->in_map_num_entries, DMA_TO_DEVICE); 531 + dma->src_sg = NULL; 532 + } 533 + 534 + if (dma->dst_sg) { 535 + dma_unmap_sg(&sep->pdev->dev, dma->dst_sg, 536 + dma->in_map_num_entries, DMA_FROM_DEVICE); 537 + dma->dst_sg = NULL; 538 + } 539 + 540 + /* Reset all the values */ 541 + dma->in_page_array = NULL; 542 + dma->out_page_array = NULL; 543 + dma->in_num_pages = 0; 544 + dma->out_num_pages = 0; 545 + dma->in_map_array = NULL; 546 + dma->out_map_array = NULL; 547 + dma->in_map_num_entries = 0; 548 + dma->out_map_num_entries = 0; 549 + } 550 + 551 + (*dma_ctx)->nr_dcb_creat = 0; 552 + (*dma_ctx)->num_lli_tables_created = 0; 553 + 554 + kfree(*dma_ctx); 555 + *dma_ctx = NULL; 556 + 557 + dev_dbg(&sep->pdev->dev, 558 + "[PID%d] sep_free_dma_table_data_handler end\n", 559 + current->pid); 560 + 561 + return 0; 562 + } 563 + 564 + /** 565 + * sep_end_transaction_handler - end transaction 566 + * @sep: pointer to struct sep_device 567 + * @dma_ctx: DMA context 568 + * @call_status: Call status 569 + * 570 + * This API handles the end transaction request. 571 + */ 572 + static int sep_end_transaction_handler(struct sep_device *sep, 573 + struct sep_dma_context **dma_ctx, 574 + struct sep_call_status *call_status, 575 + struct sep_queue_info **my_queue_elem) 576 + { 577 + dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid); 578 + 579 + /* 580 + * Extraneous transaction clearing would mess up PM 581 + * device usage counters and SEP would get suspended 582 + * just before we send a command to SEP in the next 583 + * transaction 584 + * */ 585 + if (sep_check_transaction_owner(sep)) { 586 + dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n", 587 + current->pid); 588 + return 0; 589 + } 590 + 591 + /* Update queue status */ 592 + sep_queue_status_remove(sep, my_queue_elem); 593 + 594 + /* Check that all the DMA resources were freed */ 595 + if (dma_ctx) 596 + sep_free_dma_table_data_handler(sep, dma_ctx); 597 + 598 + /* Reset call status for next transaction */ 599 + if (call_status) 600 + call_status->status = 0; 601 + 602 + /* Clear the message area to avoid next transaction reading 603 + * sensitive results from previous transaction */ 604 + memset(sep->shared_addr, 0, 605 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 606 + 607 + /* start suspend delay */ 608 + #ifdef SEP_ENABLE_RUNTIME_PM 609 + if (sep->in_use) { 610 + sep->in_use = 0; 611 + pm_runtime_mark_last_busy(&sep->pdev->dev); 612 + pm_runtime_put_autosuspend(&sep->pdev->dev); 613 + } 614 + #endif 615 + 616 + clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags); 617 + sep->pid_doing_transaction = 0; 618 + 619 + /* Now it's safe for next process to proceed */ 620 + dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n", 621 + current->pid); 622 + clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags); 623 + wake_up(&sep->event_transactions); 624 + 625 + return 0; 626 + } 627 + 628 + 629 + /** 630 + * sep_release - close a SEP device 631 + * @inode: inode of SEP device 632 + * @filp: file handle being closed 633 + * 634 + * Called on the final close of a SEP device. 635 + */ 636 + static int sep_release(struct inode *inode, struct file *filp) 637 + { 638 + struct sep_private_data * const private_data = filp->private_data; 639 + struct sep_call_status *call_status = &private_data->call_status; 640 + struct sep_device *sep = private_data->device; 641 + struct sep_dma_context **dma_ctx = &private_data->dma_ctx; 642 + struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem; 643 + 644 + dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid); 645 + 646 + sep_end_transaction_handler(sep, dma_ctx, call_status, 647 + my_queue_elem); 648 + 649 + kfree(filp->private_data); 650 + 651 + return 0; 652 + } 653 + 654 + /** 655 + * sep_mmap - maps the shared area to user space 656 + * @filp: pointer to struct file 657 + * @vma: pointer to vm_area_struct 658 + * 659 + * Called on an mmap of our space via the normal SEP device 660 + */ 661 + static int sep_mmap(struct file *filp, struct vm_area_struct *vma) 662 + { 663 + struct sep_private_data * const private_data = filp->private_data; 664 + struct sep_call_status *call_status = &private_data->call_status; 665 + struct sep_device *sep = private_data->device; 666 + struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem; 667 + dma_addr_t bus_addr; 668 + unsigned long error = 0; 669 + 670 + dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid); 671 + 672 + /* Set the transaction busy (own the device) */ 673 + /* 674 + * Problem for multithreaded applications is that here we're 675 + * possibly going to sleep while holding a write lock on 676 + * current->mm->mmap_sem, which will cause deadlock for ongoing 677 + * transaction trying to create DMA tables 678 + */ 679 + error = sep_wait_transaction(sep); 680 + if (error) 681 + /* Interrupted by signal, don't clear transaction */ 682 + goto end_function; 683 + 684 + /* Clear the message area to avoid next transaction reading 685 + * sensitive results from previous transaction */ 686 + memset(sep->shared_addr, 0, 687 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); 688 + 689 + /* 690 + * Check that the size of the mapped range is as the size of the message 691 + * shared area 692 + */ 693 + if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) { 694 + error = -EINVAL; 695 + goto end_function_with_error; 696 + } 697 + 698 + dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n", 699 + current->pid, sep->shared_addr); 700 + 701 + /* Get bus address */ 702 + bus_addr = sep->shared_bus; 703 + 704 + if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, 705 + vma->vm_end - vma->vm_start, vma->vm_page_prot)) { 706 + dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n", 707 + current->pid); 708 + error = -EAGAIN; 709 + goto end_function_with_error; 710 + } 711 + 712 + /* Update call status */ 713 + set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status); 714 + 715 + goto end_function; 716 + 717 + end_function_with_error: 718 + /* Clear our transaction */ 719 + sep_end_transaction_handler(sep, NULL, call_status, 720 + my_queue_elem); 721 + 722 + end_function: 723 + return error; 724 + } 725 + 726 + /** 727 + * sep_poll - poll handler 728 + * @filp: pointer to struct file 729 + * @wait: pointer to poll_table 730 + * 731 + * Called by the OS when the kernel is asked to do a poll on 732 + * a SEP file handle. 733 + */ 734 + static unsigned int sep_poll(struct file *filp, poll_table *wait) 735 + { 736 + struct sep_private_data * const private_data = filp->private_data; 737 + struct sep_call_status *call_status = &private_data->call_status; 738 + struct sep_device *sep = private_data->device; 739 + u32 mask = 0; 740 + u32 retval = 0; 741 + u32 retval2 = 0; 742 + unsigned long lock_irq_flag; 743 + 744 + /* Am I the process that owns the transaction? */ 745 + if (sep_check_transaction_owner(sep)) { 746 + dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n", 747 + current->pid); 748 + mask = POLLERR; 749 + goto end_function; 750 + } 751 + 752 + /* Check if send command or send_reply were activated previously */ 753 + if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, 754 + &call_status->status)) { 755 + dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n", 756 + current->pid); 757 + mask = POLLERR; 758 + goto end_function; 759 + } 760 + 761 + 762 + /* Add the event to the polling wait table */ 763 + dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n", 764 + current->pid); 765 + 766 + poll_wait(filp, &sep->event_interrupt, wait); 767 + 768 + dev_dbg(&sep->pdev->dev, 769 + "[PID%d] poll: send_ct is %lx reply ct is %lx\n", 770 + current->pid, sep->send_ct, sep->reply_ct); 771 + 772 + /* Check if error occured during poll */ 773 + retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); 774 + if ((retval2 != 0x0) && (retval2 != 0x8)) { 775 + dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n", 776 + current->pid, retval2); 777 + mask |= POLLERR; 778 + goto end_function; 779 + } 780 + 781 + spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag); 782 + 783 + if (sep->send_ct == sep->reply_ct) { 784 + spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag); 785 + retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 786 + dev_dbg(&sep->pdev->dev, 787 + "[PID%d] poll: data ready check (GPR2) %x\n", 788 + current->pid, retval); 789 + 790 + /* Check if printf request */ 791 + if ((retval >> 30) & 0x1) { 792 + dev_dbg(&sep->pdev->dev, 793 + "[PID%d] poll: SEP printf request\n", 794 + current->pid); 795 + goto end_function; 796 + } 797 + 798 + /* Check if the this is SEP reply or request */ 799 + if (retval >> 31) { 800 + dev_dbg(&sep->pdev->dev, 801 + "[PID%d] poll: SEP request\n", 802 + current->pid); 803 + } else { 804 + dev_dbg(&sep->pdev->dev, 805 + "[PID%d] poll: normal return\n", 806 + current->pid); 807 + sep_dump_message(sep); 808 + dev_dbg(&sep->pdev->dev, 809 + "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n", 810 + current->pid); 811 + mask |= POLLIN | POLLRDNORM; 812 + } 813 + set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status); 814 + } else { 815 + spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag); 816 + dev_dbg(&sep->pdev->dev, 817 + "[PID%d] poll; no reply; returning mask of 0\n", 818 + current->pid); 819 + mask = 0; 820 + } 821 + 822 + end_function: 823 + return mask; 824 + } 825 + 826 + /** 827 + * sep_time_address - address in SEP memory of time 828 + * @sep: SEP device we want the address from 829 + * 830 + * Return the address of the two dwords in memory used for time 831 + * setting. 832 + */ 833 + static u32 *sep_time_address(struct sep_device *sep) 834 + { 835 + return sep->shared_addr + 836 + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES; 837 + } 838 + 839 + /** 840 + * sep_set_time - set the SEP time 841 + * @sep: the SEP we are setting the time for 842 + * 843 + * Calculates time and sets it at the predefined address. 844 + * Called with the SEP mutex held. 845 + */ 846 + static unsigned long sep_set_time(struct sep_device *sep) 847 + { 848 + struct timeval time; 849 + u32 *time_addr; /* Address of time as seen by the kernel */ 850 + 851 + 852 + do_gettimeofday(&time); 853 + 854 + /* Set value in the SYSTEM MEMORY offset */ 855 + time_addr = sep_time_address(sep); 856 + 857 + time_addr[0] = SEP_TIME_VAL_TOKEN; 858 + time_addr[1] = time.tv_sec; 859 + 860 + dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n", 861 + current->pid, time.tv_sec); 862 + dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n", 863 + current->pid, time_addr); 864 + dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n", 865 + current->pid, sep->shared_addr); 866 + 867 + return time.tv_sec; 868 + } 869 + 870 + /** 871 + * sep_send_command_handler - kick off a command 872 + * @sep: SEP being signalled 873 + * 874 + * This function raises interrupt to SEP that signals that is has a new 875 + * command from the host 876 + * 877 + * Note that this function does fall under the ioctl lock 878 + */ 879 + int sep_send_command_handler(struct sep_device *sep) 880 + { 881 + unsigned long lock_irq_flag; 882 + u32 *msg_pool; 883 + int error = 0; 884 + 885 + /* Basic sanity check; set msg pool to start of shared area */ 886 + msg_pool = (u32 *)sep->shared_addr; 887 + msg_pool += 2; 888 + 889 + /* Look for start msg token */ 890 + if (*msg_pool != SEP_START_MSG_TOKEN) { 891 + dev_warn(&sep->pdev->dev, "start message token not present\n"); 892 + error = -EPROTO; 893 + goto end_function; 894 + } 895 + 896 + /* Do we have a reasonable size? */ 897 + msg_pool += 1; 898 + if ((*msg_pool < 2) || 899 + (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) { 900 + 901 + dev_warn(&sep->pdev->dev, "invalid message size\n"); 902 + error = -EPROTO; 903 + goto end_function; 904 + } 905 + 906 + /* Does the command look reasonable? */ 907 + msg_pool += 1; 908 + if (*msg_pool < 2) { 909 + dev_warn(&sep->pdev->dev, "invalid message opcode\n"); 910 + error = -EPROTO; 911 + goto end_function; 912 + } 913 + 914 + #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM) 915 + dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n", 916 + current->pid, 917 + sep->pdev->dev.power.runtime_status); 918 + sep->in_use = 1; /* device is about to be used */ 919 + pm_runtime_get_sync(&sep->pdev->dev); 920 + #endif 921 + 922 + if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) { 923 + error = -EPROTO; 924 + goto end_function; 925 + } 926 + sep->in_use = 1; /* device is about to be used */ 927 + sep_set_time(sep); 928 + 929 + sep_dump_message(sep); 930 + 931 + /* Update counter */ 932 + spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag); 933 + sep->send_ct++; 934 + spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag); 935 + 936 + dev_dbg(&sep->pdev->dev, 937 + "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n", 938 + current->pid, sep->send_ct, sep->reply_ct); 939 + 940 + /* Send interrupt to SEP */ 941 + sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2); 942 + 943 + end_function: 944 + return error; 945 + } 946 + 947 + /** 948 + * sep_crypto_dma - 949 + * @sep: pointer to struct sep_device 950 + * @sg: pointer to struct scatterlist 951 + * @direction: 952 + * @dma_maps: pointer to place a pointer to array of dma maps 953 + * This is filled in; anything previous there will be lost 954 + * The structure for dma maps is sep_dma_map 955 + * @returns number of dma maps on success; negative on error 956 + * 957 + * This creates the dma table from the scatterlist 958 + * It is used only for kernel crypto as it works with scatterlists 959 + * representation of data buffers 960 + * 961 + */ 962 + static int sep_crypto_dma( 963 + struct sep_device *sep, 964 + struct scatterlist *sg, 965 + struct sep_dma_map **dma_maps, 966 + enum dma_data_direction direction) 967 + { 968 + struct scatterlist *temp_sg; 969 + 970 + u32 count_segment; 971 + u32 count_mapped; 972 + struct sep_dma_map *sep_dma; 973 + int ct1; 974 + 975 + if (sg->length == 0) 976 + return 0; 977 + 978 + /* Count the segments */ 979 + temp_sg = sg; 980 + count_segment = 0; 981 + while (temp_sg) { 982 + count_segment += 1; 983 + temp_sg = scatterwalk_sg_next(temp_sg); 984 + } 985 + dev_dbg(&sep->pdev->dev, 986 + "There are (hex) %x segments in sg\n", count_segment); 987 + 988 + /* DMA map segments */ 989 + count_mapped = dma_map_sg(&sep->pdev->dev, sg, 990 + count_segment, direction); 991 + 992 + dev_dbg(&sep->pdev->dev, 993 + "There are (hex) %x maps in sg\n", count_mapped); 994 + 995 + if (count_mapped == 0) { 996 + dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n"); 997 + return -ENOMEM; 998 + } 999 + 1000 + sep_dma = kmalloc(sizeof(struct sep_dma_map) * 1001 + count_mapped, GFP_ATOMIC); 1002 + 1003 + if (sep_dma == NULL) { 1004 + dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n"); 1005 + return -ENOMEM; 1006 + } 1007 + 1008 + for_each_sg(sg, temp_sg, count_mapped, ct1) { 1009 + sep_dma[ct1].dma_addr = sg_dma_address(temp_sg); 1010 + sep_dma[ct1].size = sg_dma_len(temp_sg); 1011 + dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n", 1012 + ct1, (unsigned long)sep_dma[ct1].dma_addr, 1013 + (unsigned long)sep_dma[ct1].size); 1014 + } 1015 + 1016 + *dma_maps = sep_dma; 1017 + return count_mapped; 1018 + 1019 + } 1020 + 1021 + /** 1022 + * sep_crypto_lli - 1023 + * @sep: pointer to struct sep_device 1024 + * @sg: pointer to struct scatterlist 1025 + * @data_size: total data size 1026 + * @direction: 1027 + * @dma_maps: pointer to place a pointer to array of dma maps 1028 + * This is filled in; anything previous there will be lost 1029 + * The structure for dma maps is sep_dma_map 1030 + * @lli_maps: pointer to place a pointer to array of lli maps 1031 + * This is filled in; anything previous there will be lost 1032 + * The structure for dma maps is sep_dma_map 1033 + * @returns number of dma maps on success; negative on error 1034 + * 1035 + * This creates the LLI table from the scatterlist 1036 + * It is only used for kernel crypto as it works exclusively 1037 + * with scatterlists (struct scatterlist) representation of 1038 + * data buffers 1039 + */ 1040 + static int sep_crypto_lli( 1041 + struct sep_device *sep, 1042 + struct scatterlist *sg, 1043 + struct sep_dma_map **maps, 1044 + struct sep_lli_entry **llis, 1045 + u32 data_size, 1046 + enum dma_data_direction direction) 1047 + { 1048 + 1049 + int ct1; 1050 + struct sep_lli_entry *sep_lli; 1051 + struct sep_dma_map *sep_map; 1052 + 1053 + int nbr_ents; 1054 + 1055 + nbr_ents = sep_crypto_dma(sep, sg, maps, direction); 1056 + if (nbr_ents <= 0) { 1057 + dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n", 1058 + nbr_ents); 1059 + return nbr_ents; 1060 + } 1061 + 1062 + sep_map = *maps; 1063 + 1064 + sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC); 1065 + 1066 + if (sep_lli == NULL) { 1067 + dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n"); 1068 + 1069 + kfree(*maps); 1070 + *maps = NULL; 1071 + return -ENOMEM; 1072 + } 1073 + 1074 + for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) { 1075 + sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr; 1076 + 1077 + /* Maximum for page is total data size */ 1078 + if (sep_map[ct1].size > data_size) 1079 + sep_map[ct1].size = data_size; 1080 + 1081 + sep_lli[ct1].block_size = (u32)sep_map[ct1].size; 1082 + } 1083 + 1084 + *llis = sep_lli; 1085 + return nbr_ents; 1086 + } 1087 + 1088 + /** 1089 + * sep_lock_kernel_pages - map kernel pages for DMA 1090 + * @sep: pointer to struct sep_device 1091 + * @kernel_virt_addr: address of data buffer in kernel 1092 + * @data_size: size of data 1093 + * @lli_array_ptr: lli array 1094 + * @in_out_flag: input into device or output from device 1095 + * 1096 + * This function locks all the physical pages of the kernel virtual buffer 1097 + * and construct a basic lli array, where each entry holds the physical 1098 + * page address and the size that application data holds in this page 1099 + * This function is used only during kernel crypto mod calls from within 1100 + * the kernel (when ioctl is not used) 1101 + * 1102 + * This is used only for kernel crypto. Kernel pages 1103 + * are handled differently as they are done via 1104 + * scatter gather lists (struct scatterlist) 1105 + */ 1106 + static int sep_lock_kernel_pages(struct sep_device *sep, 1107 + unsigned long kernel_virt_addr, 1108 + u32 data_size, 1109 + struct sep_lli_entry **lli_array_ptr, 1110 + int in_out_flag, 1111 + struct sep_dma_context *dma_ctx) 1112 + 1113 + { 1114 + u32 num_pages; 1115 + struct scatterlist *sg; 1116 + 1117 + /* Array of lli */ 1118 + struct sep_lli_entry *lli_array; 1119 + /* Map array */ 1120 + struct sep_dma_map *map_array; 1121 + 1122 + enum dma_data_direction direction; 1123 + 1124 + lli_array = NULL; 1125 + map_array = NULL; 1126 + 1127 + if (in_out_flag == SEP_DRIVER_IN_FLAG) { 1128 + direction = DMA_TO_DEVICE; 1129 + sg = dma_ctx->src_sg; 1130 + } else { 1131 + direction = DMA_FROM_DEVICE; 1132 + sg = dma_ctx->dst_sg; 1133 + } 1134 + 1135 + num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array, 1136 + data_size, direction); 1137 + 1138 + if (num_pages <= 0) { 1139 + dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n", 1140 + num_pages); 1141 + return -ENOMEM; 1142 + } 1143 + 1144 + /* Put mapped kernel sg into kernel resource array */ 1145 + 1146 + /* Set output params acording to the in_out flag */ 1147 + if (in_out_flag == SEP_DRIVER_IN_FLAG) { 1148 + *lli_array_ptr = lli_array; 1149 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 1150 + num_pages; 1151 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = 1152 + NULL; 1153 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = 1154 + map_array; 1155 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries = 1156 + num_pages; 1157 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = 1158 + dma_ctx->src_sg; 1159 + } else { 1160 + *lli_array_ptr = lli_array; 1161 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = 1162 + num_pages; 1163 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = 1164 + NULL; 1165 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = 1166 + map_array; 1167 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat]. 1168 + out_map_num_entries = num_pages; 1169 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = 1170 + dma_ctx->dst_sg; 1171 + } 1172 + 1173 + return 0; 1174 + } 1175 + 1176 + /** 1177 + * sep_lock_user_pages - lock and map user pages for DMA 1178 + * @sep: pointer to struct sep_device 1179 + * @app_virt_addr: user memory data buffer 1180 + * @data_size: size of data buffer 1181 + * @lli_array_ptr: lli array 1182 + * @in_out_flag: input or output to device 1183 + * 1184 + * This function locks all the physical pages of the application 1185 + * virtual buffer and construct a basic lli array, where each entry 1186 + * holds the physical page address and the size that application 1187 + * data holds in this physical pages 1188 + */ 1189 + static int sep_lock_user_pages(struct sep_device *sep, 1190 + u32 app_virt_addr, 1191 + u32 data_size, 1192 + struct sep_lli_entry **lli_array_ptr, 1193 + int in_out_flag, 1194 + struct sep_dma_context *dma_ctx) 1195 + 1196 + { 1197 + int error = 0; 1198 + u32 count; 1199 + int result; 1200 + /* The the page of the end address of the user space buffer */ 1201 + u32 end_page; 1202 + /* The page of the start address of the user space buffer */ 1203 + u32 start_page; 1204 + /* The range in pages */ 1205 + u32 num_pages; 1206 + /* Array of pointers to page */ 1207 + struct page **page_array; 1208 + /* Array of lli */ 1209 + struct sep_lli_entry *lli_array; 1210 + /* Map array */ 1211 + struct sep_dma_map *map_array; 1212 + 1213 + /* Set start and end pages and num pages */ 1214 + end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT; 1215 + start_page = app_virt_addr >> PAGE_SHIFT; 1216 + num_pages = end_page - start_page + 1; 1217 + 1218 + dev_dbg(&sep->pdev->dev, 1219 + "[PID%d] lock user pages app_virt_addr is %x\n", 1220 + current->pid, app_virt_addr); 1221 + 1222 + dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n", 1223 + current->pid, data_size); 1224 + dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n", 1225 + current->pid, start_page); 1226 + dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n", 1227 + current->pid, end_page); 1228 + dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n", 1229 + current->pid, num_pages); 1230 + 1231 + /* Allocate array of pages structure pointers */ 1232 + page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC); 1233 + if (!page_array) { 1234 + error = -ENOMEM; 1235 + goto end_function; 1236 + } 1237 + map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC); 1238 + if (!map_array) { 1239 + dev_warn(&sep->pdev->dev, 1240 + "[PID%d] kmalloc for map_array failed\n", 1241 + current->pid); 1242 + error = -ENOMEM; 1243 + goto end_function_with_error1; 1244 + } 1245 + 1246 + lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages, 1247 + GFP_ATOMIC); 1248 + 1249 + if (!lli_array) { 1250 + dev_warn(&sep->pdev->dev, 1251 + "[PID%d] kmalloc for lli_array failed\n", 1252 + current->pid); 1253 + error = -ENOMEM; 1254 + goto end_function_with_error2; 1255 + } 1256 + 1257 + /* Convert the application virtual address into a set of physical */ 1258 + down_read(&current->mm->mmap_sem); 1259 + result = get_user_pages(current, current->mm, app_virt_addr, 1260 + num_pages, 1261 + ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), 1262 + 0, page_array, NULL); 1263 + 1264 + up_read(&current->mm->mmap_sem); 1265 + 1266 + /* Check the number of pages locked - if not all then exit with error */ 1267 + if (result != num_pages) { 1268 + dev_warn(&sep->pdev->dev, 1269 + "[PID%d] not all pages locked by get_user_pages, " 1270 + "result 0x%X, num_pages 0x%X\n", 1271 + current->pid, result, num_pages); 1272 + error = -ENOMEM; 1273 + goto end_function_with_error3; 1274 + } 1275 + 1276 + dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n", 1277 + current->pid); 1278 + 1279 + /* 1280 + * Fill the array using page array data and 1281 + * map the pages - this action will also flush the cache as needed 1282 + */ 1283 + for (count = 0; count < num_pages; count++) { 1284 + /* Fill the map array */ 1285 + map_array[count].dma_addr = 1286 + dma_map_page(&sep->pdev->dev, page_array[count], 1287 + 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 1288 + 1289 + map_array[count].size = PAGE_SIZE; 1290 + 1291 + /* Fill the lli array entry */ 1292 + lli_array[count].bus_address = (u32)map_array[count].dma_addr; 1293 + lli_array[count].block_size = PAGE_SIZE; 1294 + 1295 + dev_dbg(&sep->pdev->dev, 1296 + "[PID%d] lli_array[%x].bus_address is %08lx, " 1297 + "lli_array[%x].block_size is (hex) %x\n", current->pid, 1298 + count, (unsigned long)lli_array[count].bus_address, 1299 + count, lli_array[count].block_size); 1300 + } 1301 + 1302 + /* Check the offset for the first page */ 1303 + lli_array[0].bus_address = 1304 + lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK)); 1305 + 1306 + /* Check that not all the data is in the first page only */ 1307 + if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size) 1308 + lli_array[0].block_size = data_size; 1309 + else 1310 + lli_array[0].block_size = 1311 + PAGE_SIZE - (app_virt_addr & (~PAGE_MASK)); 1312 + 1313 + dev_dbg(&sep->pdev->dev, 1314 + "[PID%d] After check if page 0 has all data\n", 1315 + current->pid); 1316 + dev_dbg(&sep->pdev->dev, 1317 + "[PID%d] lli_array[0].bus_address is (hex) %08lx, " 1318 + "lli_array[0].block_size is (hex) %x\n", 1319 + current->pid, 1320 + (unsigned long)lli_array[0].bus_address, 1321 + lli_array[0].block_size); 1322 + 1323 + 1324 + /* Check the size of the last page */ 1325 + if (num_pages > 1) { 1326 + lli_array[num_pages - 1].block_size = 1327 + (app_virt_addr + data_size) & (~PAGE_MASK); 1328 + if (lli_array[num_pages - 1].block_size == 0) 1329 + lli_array[num_pages - 1].block_size = PAGE_SIZE; 1330 + 1331 + dev_dbg(&sep->pdev->dev, 1332 + "[PID%d] After last page size adjustment\n", 1333 + current->pid); 1334 + dev_dbg(&sep->pdev->dev, 1335 + "[PID%d] lli_array[%x].bus_address is (hex) %08lx, " 1336 + "lli_array[%x].block_size is (hex) %x\n", 1337 + current->pid, 1338 + num_pages - 1, 1339 + (unsigned long)lli_array[num_pages - 1].bus_address, 1340 + num_pages - 1, 1341 + lli_array[num_pages - 1].block_size); 1342 + } 1343 + 1344 + /* Set output params acording to the in_out flag */ 1345 + if (in_out_flag == SEP_DRIVER_IN_FLAG) { 1346 + *lli_array_ptr = lli_array; 1347 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 1348 + num_pages; 1349 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = 1350 + page_array; 1351 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = 1352 + map_array; 1353 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries = 1354 + num_pages; 1355 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL; 1356 + } else { 1357 + *lli_array_ptr = lli_array; 1358 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = 1359 + num_pages; 1360 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = 1361 + page_array; 1362 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = 1363 + map_array; 1364 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat]. 1365 + out_map_num_entries = num_pages; 1366 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL; 1367 + } 1368 + goto end_function; 1369 + 1370 + end_function_with_error3: 1371 + /* Free lli array */ 1372 + kfree(lli_array); 1373 + 1374 + end_function_with_error2: 1375 + kfree(map_array); 1376 + 1377 + end_function_with_error1: 1378 + /* Free page array */ 1379 + kfree(page_array); 1380 + 1381 + end_function: 1382 + return error; 1383 + } 1384 + 1385 + /** 1386 + * sep_calculate_lli_table_max_size - size the LLI table 1387 + * @sep: pointer to struct sep_device 1388 + * @lli_in_array_ptr 1389 + * @num_array_entries 1390 + * @last_table_flag 1391 + * 1392 + * This function calculates the size of data that can be inserted into 1393 + * the lli table from this array, such that either the table is full 1394 + * (all entries are entered), or there are no more entries in the 1395 + * lli array 1396 + */ 1397 + static u32 sep_calculate_lli_table_max_size(struct sep_device *sep, 1398 + struct sep_lli_entry *lli_in_array_ptr, 1399 + u32 num_array_entries, 1400 + u32 *last_table_flag) 1401 + { 1402 + u32 counter; 1403 + /* Table data size */ 1404 + u32 table_data_size = 0; 1405 + /* Data size for the next table */ 1406 + u32 next_table_data_size; 1407 + 1408 + *last_table_flag = 0; 1409 + 1410 + /* 1411 + * Calculate the data in the out lli table till we fill the whole 1412 + * table or till the data has ended 1413 + */ 1414 + for (counter = 0; 1415 + (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && 1416 + (counter < num_array_entries); counter++) 1417 + table_data_size += lli_in_array_ptr[counter].block_size; 1418 + 1419 + /* 1420 + * Check if we reached the last entry, 1421 + * meaning this ia the last table to build, 1422 + * and no need to check the block alignment 1423 + */ 1424 + if (counter == num_array_entries) { 1425 + /* Set the last table flag */ 1426 + *last_table_flag = 1; 1427 + goto end_function; 1428 + } 1429 + 1430 + /* 1431 + * Calculate the data size of the next table. 1432 + * Stop if no entries left or if data size is more the DMA restriction 1433 + */ 1434 + next_table_data_size = 0; 1435 + for (; counter < num_array_entries; counter++) { 1436 + next_table_data_size += lli_in_array_ptr[counter].block_size; 1437 + if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) 1438 + break; 1439 + } 1440 + 1441 + /* 1442 + * Check if the next table data size is less then DMA rstriction. 1443 + * if it is - recalculate the current table size, so that the next 1444 + * table data size will be adaquete for DMA 1445 + */ 1446 + if (next_table_data_size && 1447 + next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) 1448 + 1449 + table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE - 1450 + next_table_data_size); 1451 + 1452 + end_function: 1453 + return table_data_size; 1454 + } 1455 + 1456 + /** 1457 + * sep_build_lli_table - build an lli array for the given table 1458 + * @sep: pointer to struct sep_device 1459 + * @lli_array_ptr: pointer to lli array 1460 + * @lli_table_ptr: pointer to lli table 1461 + * @num_processed_entries_ptr: pointer to number of entries 1462 + * @num_table_entries_ptr: pointer to number of tables 1463 + * @table_data_size: total data size 1464 + * 1465 + * Builds ant lli table from the lli_array according to 1466 + * the given size of data 1467 + */ 1468 + static void sep_build_lli_table(struct sep_device *sep, 1469 + struct sep_lli_entry *lli_array_ptr, 1470 + struct sep_lli_entry *lli_table_ptr, 1471 + u32 *num_processed_entries_ptr, 1472 + u32 *num_table_entries_ptr, 1473 + u32 table_data_size) 1474 + { 1475 + /* Current table data size */ 1476 + u32 curr_table_data_size; 1477 + /* Counter of lli array entry */ 1478 + u32 array_counter; 1479 + 1480 + /* Init current table data size and lli array entry counter */ 1481 + curr_table_data_size = 0; 1482 + array_counter = 0; 1483 + *num_table_entries_ptr = 1; 1484 + 1485 + dev_dbg(&sep->pdev->dev, 1486 + "[PID%d] build lli table table_data_size: (hex) %x\n", 1487 + current->pid, table_data_size); 1488 + 1489 + /* Fill the table till table size reaches the needed amount */ 1490 + while (curr_table_data_size < table_data_size) { 1491 + /* Update the number of entries in table */ 1492 + (*num_table_entries_ptr)++; 1493 + 1494 + lli_table_ptr->bus_address = 1495 + cpu_to_le32(lli_array_ptr[array_counter].bus_address); 1496 + 1497 + lli_table_ptr->block_size = 1498 + cpu_to_le32(lli_array_ptr[array_counter].block_size); 1499 + 1500 + curr_table_data_size += lli_array_ptr[array_counter].block_size; 1501 + 1502 + dev_dbg(&sep->pdev->dev, 1503 + "[PID%d] lli_table_ptr is %p\n", 1504 + current->pid, lli_table_ptr); 1505 + dev_dbg(&sep->pdev->dev, 1506 + "[PID%d] lli_table_ptr->bus_address: %08lx\n", 1507 + current->pid, 1508 + (unsigned long)lli_table_ptr->bus_address); 1509 + 1510 + dev_dbg(&sep->pdev->dev, 1511 + "[PID%d] lli_table_ptr->block_size is (hex) %x\n", 1512 + current->pid, lli_table_ptr->block_size); 1513 + 1514 + /* Check for overflow of the table data */ 1515 + if (curr_table_data_size > table_data_size) { 1516 + dev_dbg(&sep->pdev->dev, 1517 + "[PID%d] curr_table_data_size too large\n", 1518 + current->pid); 1519 + 1520 + /* Update the size of block in the table */ 1521 + lli_table_ptr->block_size = 1522 + cpu_to_le32(lli_table_ptr->block_size) - 1523 + (curr_table_data_size - table_data_size); 1524 + 1525 + /* Update the physical address in the lli array */ 1526 + lli_array_ptr[array_counter].bus_address += 1527 + cpu_to_le32(lli_table_ptr->block_size); 1528 + 1529 + /* Update the block size left in the lli array */ 1530 + lli_array_ptr[array_counter].block_size = 1531 + (curr_table_data_size - table_data_size); 1532 + } else 1533 + /* Advance to the next entry in the lli_array */ 1534 + array_counter++; 1535 + 1536 + dev_dbg(&sep->pdev->dev, 1537 + "[PID%d] lli_table_ptr->bus_address is %08lx\n", 1538 + current->pid, 1539 + (unsigned long)lli_table_ptr->bus_address); 1540 + dev_dbg(&sep->pdev->dev, 1541 + "[PID%d] lli_table_ptr->block_size is (hex) %x\n", 1542 + current->pid, 1543 + lli_table_ptr->block_size); 1544 + 1545 + /* Move to the next entry in table */ 1546 + lli_table_ptr++; 1547 + } 1548 + 1549 + /* Set the info entry to default */ 1550 + lli_table_ptr->bus_address = 0xffffffff; 1551 + lli_table_ptr->block_size = 0; 1552 + 1553 + /* Set the output parameter */ 1554 + *num_processed_entries_ptr += array_counter; 1555 + 1556 + } 1557 + 1558 + /** 1559 + * sep_shared_area_virt_to_bus - map shared area to bus address 1560 + * @sep: pointer to struct sep_device 1561 + * @virt_address: virtual address to convert 1562 + * 1563 + * This functions returns the physical address inside shared area according 1564 + * to the virtual address. It can be either on the externa RAM device 1565 + * (ioremapped), or on the system RAM 1566 + * This implementation is for the external RAM 1567 + */ 1568 + static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep, 1569 + void *virt_address) 1570 + { 1571 + dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n", 1572 + current->pid, virt_address); 1573 + dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n", 1574 + current->pid, 1575 + (unsigned long) 1576 + sep->shared_bus + (virt_address - sep->shared_addr)); 1577 + 1578 + return sep->shared_bus + (size_t)(virt_address - sep->shared_addr); 1579 + } 1580 + 1581 + /** 1582 + * sep_shared_area_bus_to_virt - map shared area bus address to kernel 1583 + * @sep: pointer to struct sep_device 1584 + * @bus_address: bus address to convert 1585 + * 1586 + * This functions returns the virtual address inside shared area 1587 + * according to the physical address. It can be either on the 1588 + * externa RAM device (ioremapped), or on the system RAM 1589 + * This implementation is for the external RAM 1590 + */ 1591 + static void *sep_shared_area_bus_to_virt(struct sep_device *sep, 1592 + dma_addr_t bus_address) 1593 + { 1594 + dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n", 1595 + current->pid, 1596 + (unsigned long)bus_address, (unsigned long)(sep->shared_addr + 1597 + (size_t)(bus_address - sep->shared_bus))); 1598 + 1599 + return sep->shared_addr + (size_t)(bus_address - sep->shared_bus); 1600 + } 1601 + 1602 + /** 1603 + * sep_debug_print_lli_tables - dump LLI table 1604 + * @sep: pointer to struct sep_device 1605 + * @lli_table_ptr: pointer to sep_lli_entry 1606 + * @num_table_entries: number of entries 1607 + * @table_data_size: total data size 1608 + * 1609 + * Walk the the list of the print created tables and print all the data 1610 + */ 1611 + static void sep_debug_print_lli_tables(struct sep_device *sep, 1612 + struct sep_lli_entry *lli_table_ptr, 1613 + unsigned long num_table_entries, 1614 + unsigned long table_data_size) 1615 + { 1616 + unsigned long table_count = 1; 1617 + unsigned long entries_count = 0; 1618 + 1619 + dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n", 1620 + current->pid); 1621 + if (num_table_entries == 0) { 1622 + dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n", 1623 + current->pid); 1624 + return; 1625 + } 1626 + 1627 + while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) { 1628 + dev_dbg(&sep->pdev->dev, 1629 + "[PID%d] lli table %08lx, " 1630 + "table_data_size is (hex) %lx\n", 1631 + current->pid, table_count, table_data_size); 1632 + dev_dbg(&sep->pdev->dev, 1633 + "[PID%d] num_table_entries is (hex) %lx\n", 1634 + current->pid, num_table_entries); 1635 + 1636 + /* Print entries of the table (without info entry) */ 1637 + for (entries_count = 0; entries_count < num_table_entries; 1638 + entries_count++, lli_table_ptr++) { 1639 + 1640 + dev_dbg(&sep->pdev->dev, 1641 + "[PID%d] lli_table_ptr address is %08lx\n", 1642 + current->pid, 1643 + (unsigned long) lli_table_ptr); 1644 + 1645 + dev_dbg(&sep->pdev->dev, 1646 + "[PID%d] phys address is %08lx " 1647 + "block size is (hex) %x\n", current->pid, 1648 + (unsigned long)lli_table_ptr->bus_address, 1649 + lli_table_ptr->block_size); 1650 + } 1651 + 1652 + /* Point to the info entry */ 1653 + lli_table_ptr--; 1654 + 1655 + dev_dbg(&sep->pdev->dev, 1656 + "[PID%d] phys lli_table_ptr->block_size " 1657 + "is (hex) %x\n", 1658 + current->pid, 1659 + lli_table_ptr->block_size); 1660 + 1661 + dev_dbg(&sep->pdev->dev, 1662 + "[PID%d] phys lli_table_ptr->physical_address " 1663 + "is %08lx\n", 1664 + current->pid, 1665 + (unsigned long)lli_table_ptr->bus_address); 1666 + 1667 + 1668 + table_data_size = lli_table_ptr->block_size & 0xffffff; 1669 + num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff; 1670 + 1671 + dev_dbg(&sep->pdev->dev, 1672 + "[PID%d] phys table_data_size is " 1673 + "(hex) %lx num_table_entries is" 1674 + " %lx bus_address is%lx\n", 1675 + current->pid, 1676 + table_data_size, 1677 + num_table_entries, 1678 + (unsigned long)lli_table_ptr->bus_address); 1679 + 1680 + if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff) 1681 + lli_table_ptr = (struct sep_lli_entry *) 1682 + sep_shared_bus_to_virt(sep, 1683 + (unsigned long)lli_table_ptr->bus_address); 1684 + 1685 + table_count++; 1686 + } 1687 + dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n", 1688 + current->pid); 1689 + } 1690 + 1691 + 1692 + /** 1693 + * sep_prepare_empty_lli_table - create a blank LLI table 1694 + * @sep: pointer to struct sep_device 1695 + * @lli_table_addr_ptr: pointer to lli table 1696 + * @num_entries_ptr: pointer to number of entries 1697 + * @table_data_size_ptr: point to table data size 1698 + * @dmatables_region: Optional buffer for DMA tables 1699 + * @dma_ctx: DMA context 1700 + * 1701 + * This function creates empty lli tables when there is no data 1702 + */ 1703 + static void sep_prepare_empty_lli_table(struct sep_device *sep, 1704 + dma_addr_t *lli_table_addr_ptr, 1705 + u32 *num_entries_ptr, 1706 + u32 *table_data_size_ptr, 1707 + void **dmatables_region, 1708 + struct sep_dma_context *dma_ctx) 1709 + { 1710 + struct sep_lli_entry *lli_table_ptr; 1711 + 1712 + /* Find the area for new table */ 1713 + lli_table_ptr = 1714 + (struct sep_lli_entry *)(sep->shared_addr + 1715 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1716 + dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) * 1717 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); 1718 + 1719 + if (dmatables_region && *dmatables_region) 1720 + lli_table_ptr = *dmatables_region; 1721 + 1722 + lli_table_ptr->bus_address = 0; 1723 + lli_table_ptr->block_size = 0; 1724 + 1725 + lli_table_ptr++; 1726 + lli_table_ptr->bus_address = 0xFFFFFFFF; 1727 + lli_table_ptr->block_size = 0; 1728 + 1729 + /* Set the output parameter value */ 1730 + *lli_table_addr_ptr = sep->shared_bus + 1731 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1732 + dma_ctx->num_lli_tables_created * 1733 + sizeof(struct sep_lli_entry) * 1734 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 1735 + 1736 + /* Set the num of entries and table data size for empty table */ 1737 + *num_entries_ptr = 2; 1738 + *table_data_size_ptr = 0; 1739 + 1740 + /* Update the number of created tables */ 1741 + dma_ctx->num_lli_tables_created++; 1742 + } 1743 + 1744 + /** 1745 + * sep_prepare_input_dma_table - prepare input DMA mappings 1746 + * @sep: pointer to struct sep_device 1747 + * @data_size: 1748 + * @block_size: 1749 + * @lli_table_ptr: 1750 + * @num_entries_ptr: 1751 + * @table_data_size_ptr: 1752 + * @is_kva: set for kernel data (kernel cryptio call) 1753 + * 1754 + * This function prepares only input DMA table for synhronic symmetric 1755 + * operations (HASH) 1756 + * Note that all bus addresses that are passed to the SEP 1757 + * are in 32 bit format; the SEP is a 32 bit device 1758 + */ 1759 + static int sep_prepare_input_dma_table(struct sep_device *sep, 1760 + unsigned long app_virt_addr, 1761 + u32 data_size, 1762 + u32 block_size, 1763 + dma_addr_t *lli_table_ptr, 1764 + u32 *num_entries_ptr, 1765 + u32 *table_data_size_ptr, 1766 + bool is_kva, 1767 + void **dmatables_region, 1768 + struct sep_dma_context *dma_ctx 1769 + ) 1770 + { 1771 + int error = 0; 1772 + /* Pointer to the info entry of the table - the last entry */ 1773 + struct sep_lli_entry *info_entry_ptr; 1774 + /* Array of pointers to page */ 1775 + struct sep_lli_entry *lli_array_ptr; 1776 + /* Points to the first entry to be processed in the lli_in_array */ 1777 + u32 current_entry = 0; 1778 + /* Num entries in the virtual buffer */ 1779 + u32 sep_lli_entries = 0; 1780 + /* Lli table pointer */ 1781 + struct sep_lli_entry *in_lli_table_ptr; 1782 + /* The total data in one table */ 1783 + u32 table_data_size = 0; 1784 + /* Flag for last table */ 1785 + u32 last_table_flag = 0; 1786 + /* Number of entries in lli table */ 1787 + u32 num_entries_in_table = 0; 1788 + /* Next table address */ 1789 + void *lli_table_alloc_addr = NULL; 1790 + void *dma_lli_table_alloc_addr = NULL; 1791 + void *dma_in_lli_table_ptr = NULL; 1792 + 1793 + dev_dbg(&sep->pdev->dev, "[PID%d] prepare intput dma " 1794 + "tbl data size: (hex) %x\n", 1795 + current->pid, data_size); 1796 + 1797 + dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n", 1798 + current->pid, block_size); 1799 + 1800 + /* Initialize the pages pointers */ 1801 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL; 1802 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0; 1803 + 1804 + /* Set the kernel address for first table to be allocated */ 1805 + lli_table_alloc_addr = (void *)(sep->shared_addr + 1806 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1807 + dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) * 1808 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); 1809 + 1810 + if (data_size == 0) { 1811 + if (dmatables_region) { 1812 + error = sep_allocate_dmatables_region(sep, 1813 + dmatables_region, 1814 + dma_ctx, 1815 + 1); 1816 + if (error) 1817 + return error; 1818 + } 1819 + /* Special case - create meptu table - 2 entries, zero data */ 1820 + sep_prepare_empty_lli_table(sep, lli_table_ptr, 1821 + num_entries_ptr, table_data_size_ptr, 1822 + dmatables_region, dma_ctx); 1823 + goto update_dcb_counter; 1824 + } 1825 + 1826 + /* Check if the pages are in Kernel Virtual Address layout */ 1827 + if (is_kva == true) 1828 + error = sep_lock_kernel_pages(sep, app_virt_addr, 1829 + data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG, 1830 + dma_ctx); 1831 + else 1832 + /* 1833 + * Lock the pages of the user buffer 1834 + * and translate them to pages 1835 + */ 1836 + error = sep_lock_user_pages(sep, app_virt_addr, 1837 + data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG, 1838 + dma_ctx); 1839 + 1840 + if (error) 1841 + goto end_function; 1842 + 1843 + dev_dbg(&sep->pdev->dev, 1844 + "[PID%d] output sep_in_num_pages is (hex) %x\n", 1845 + current->pid, 1846 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages); 1847 + 1848 + current_entry = 0; 1849 + info_entry_ptr = NULL; 1850 + 1851 + sep_lli_entries = 1852 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages; 1853 + 1854 + dma_lli_table_alloc_addr = lli_table_alloc_addr; 1855 + if (dmatables_region) { 1856 + error = sep_allocate_dmatables_region(sep, 1857 + dmatables_region, 1858 + dma_ctx, 1859 + sep_lli_entries); 1860 + if (error) 1861 + return error; 1862 + lli_table_alloc_addr = *dmatables_region; 1863 + } 1864 + 1865 + /* Loop till all the entries in in array are processed */ 1866 + while (current_entry < sep_lli_entries) { 1867 + 1868 + /* Set the new input and output tables */ 1869 + in_lli_table_ptr = 1870 + (struct sep_lli_entry *)lli_table_alloc_addr; 1871 + dma_in_lli_table_ptr = 1872 + (struct sep_lli_entry *)dma_lli_table_alloc_addr; 1873 + 1874 + lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 1875 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 1876 + dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 1877 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 1878 + 1879 + if (dma_lli_table_alloc_addr > 1880 + ((void *)sep->shared_addr + 1881 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 1882 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) { 1883 + 1884 + error = -ENOMEM; 1885 + goto end_function_error; 1886 + 1887 + } 1888 + 1889 + /* Update the number of created tables */ 1890 + dma_ctx->num_lli_tables_created++; 1891 + 1892 + /* Calculate the maximum size of data for input table */ 1893 + table_data_size = sep_calculate_lli_table_max_size(sep, 1894 + &lli_array_ptr[current_entry], 1895 + (sep_lli_entries - current_entry), 1896 + &last_table_flag); 1897 + 1898 + /* 1899 + * If this is not the last table - 1900 + * then allign it to the block size 1901 + */ 1902 + if (!last_table_flag) 1903 + table_data_size = 1904 + (table_data_size / block_size) * block_size; 1905 + 1906 + dev_dbg(&sep->pdev->dev, 1907 + "[PID%d] output table_data_size is (hex) %x\n", 1908 + current->pid, 1909 + table_data_size); 1910 + 1911 + /* Construct input lli table */ 1912 + sep_build_lli_table(sep, &lli_array_ptr[current_entry], 1913 + in_lli_table_ptr, 1914 + &current_entry, &num_entries_in_table, table_data_size); 1915 + 1916 + if (info_entry_ptr == NULL) { 1917 + 1918 + /* Set the output parameters to physical addresses */ 1919 + *lli_table_ptr = sep_shared_area_virt_to_bus(sep, 1920 + dma_in_lli_table_ptr); 1921 + *num_entries_ptr = num_entries_in_table; 1922 + *table_data_size_ptr = table_data_size; 1923 + 1924 + dev_dbg(&sep->pdev->dev, 1925 + "[PID%d] output lli_table_in_ptr is %08lx\n", 1926 + current->pid, 1927 + (unsigned long)*lli_table_ptr); 1928 + 1929 + } else { 1930 + /* Update the info entry of the previous in table */ 1931 + info_entry_ptr->bus_address = 1932 + sep_shared_area_virt_to_bus(sep, 1933 + dma_in_lli_table_ptr); 1934 + info_entry_ptr->block_size = 1935 + ((num_entries_in_table) << 24) | 1936 + (table_data_size); 1937 + } 1938 + /* Save the pointer to the info entry of the current tables */ 1939 + info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; 1940 + } 1941 + /* Print input tables */ 1942 + if (!dmatables_region) { 1943 + sep_debug_print_lli_tables(sep, (struct sep_lli_entry *) 1944 + sep_shared_area_bus_to_virt(sep, *lli_table_ptr), 1945 + *num_entries_ptr, *table_data_size_ptr); 1946 + } 1947 + 1948 + /* The array of the pages */ 1949 + kfree(lli_array_ptr); 1950 + 1951 + update_dcb_counter: 1952 + /* Update DCB counter */ 1953 + dma_ctx->nr_dcb_creat++; 1954 + goto end_function; 1955 + 1956 + end_function_error: 1957 + /* Free all the allocated resources */ 1958 + kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array); 1959 + kfree(lli_array_ptr); 1960 + kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array); 1961 + 1962 + end_function: 1963 + return error; 1964 + 1965 + } 1966 + 1967 + /** 1968 + * sep_construct_dma_tables_from_lli - prepare AES/DES mappings 1969 + * @sep: pointer to struct sep_device 1970 + * @lli_in_array: 1971 + * @sep_in_lli_entries: 1972 + * @lli_out_array: 1973 + * @sep_out_lli_entries 1974 + * @block_size 1975 + * @lli_table_in_ptr 1976 + * @lli_table_out_ptr 1977 + * @in_num_entries_ptr 1978 + * @out_num_entries_ptr 1979 + * @table_data_size_ptr 1980 + * 1981 + * This function creates the input and output DMA tables for 1982 + * symmetric operations (AES/DES) according to the block 1983 + * size from LLI arays 1984 + * Note that all bus addresses that are passed to the SEP 1985 + * are in 32 bit format; the SEP is a 32 bit device 1986 + */ 1987 + static int sep_construct_dma_tables_from_lli( 1988 + struct sep_device *sep, 1989 + struct sep_lli_entry *lli_in_array, 1990 + u32 sep_in_lli_entries, 1991 + struct sep_lli_entry *lli_out_array, 1992 + u32 sep_out_lli_entries, 1993 + u32 block_size, 1994 + dma_addr_t *lli_table_in_ptr, 1995 + dma_addr_t *lli_table_out_ptr, 1996 + u32 *in_num_entries_ptr, 1997 + u32 *out_num_entries_ptr, 1998 + u32 *table_data_size_ptr, 1999 + void **dmatables_region, 2000 + struct sep_dma_context *dma_ctx) 2001 + { 2002 + /* Points to the area where next lli table can be allocated */ 2003 + void *lli_table_alloc_addr = NULL; 2004 + /* 2005 + * Points to the area in shared region where next lli table 2006 + * can be allocated 2007 + */ 2008 + void *dma_lli_table_alloc_addr = NULL; 2009 + /* Input lli table in dmatables_region or shared region */ 2010 + struct sep_lli_entry *in_lli_table_ptr = NULL; 2011 + /* Input lli table location in the shared region */ 2012 + struct sep_lli_entry *dma_in_lli_table_ptr = NULL; 2013 + /* Output lli table in dmatables_region or shared region */ 2014 + struct sep_lli_entry *out_lli_table_ptr = NULL; 2015 + /* Output lli table location in the shared region */ 2016 + struct sep_lli_entry *dma_out_lli_table_ptr = NULL; 2017 + /* Pointer to the info entry of the table - the last entry */ 2018 + struct sep_lli_entry *info_in_entry_ptr = NULL; 2019 + /* Pointer to the info entry of the table - the last entry */ 2020 + struct sep_lli_entry *info_out_entry_ptr = NULL; 2021 + /* Points to the first entry to be processed in the lli_in_array */ 2022 + u32 current_in_entry = 0; 2023 + /* Points to the first entry to be processed in the lli_out_array */ 2024 + u32 current_out_entry = 0; 2025 + /* Max size of the input table */ 2026 + u32 in_table_data_size = 0; 2027 + /* Max size of the output table */ 2028 + u32 out_table_data_size = 0; 2029 + /* Flag te signifies if this is the last tables build */ 2030 + u32 last_table_flag = 0; 2031 + /* The data size that should be in table */ 2032 + u32 table_data_size = 0; 2033 + /* Number of etnries in the input table */ 2034 + u32 num_entries_in_table = 0; 2035 + /* Number of etnries in the output table */ 2036 + u32 num_entries_out_table = 0; 2037 + 2038 + if (!dma_ctx) { 2039 + dev_warn(&sep->pdev->dev, "DMA context uninitialized\n"); 2040 + return -EINVAL; 2041 + } 2042 + 2043 + /* Initiate to point after the message area */ 2044 + lli_table_alloc_addr = (void *)(sep->shared_addr + 2045 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 2046 + (dma_ctx->num_lli_tables_created * 2047 + (sizeof(struct sep_lli_entry) * 2048 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP))); 2049 + dma_lli_table_alloc_addr = lli_table_alloc_addr; 2050 + 2051 + if (dmatables_region) { 2052 + /* 2 for both in+out table */ 2053 + if (sep_allocate_dmatables_region(sep, 2054 + dmatables_region, 2055 + dma_ctx, 2056 + 2*sep_in_lli_entries)) 2057 + return -ENOMEM; 2058 + lli_table_alloc_addr = *dmatables_region; 2059 + } 2060 + 2061 + /* Loop till all the entries in in array are not processed */ 2062 + while (current_in_entry < sep_in_lli_entries) { 2063 + /* Set the new input and output tables */ 2064 + in_lli_table_ptr = 2065 + (struct sep_lli_entry *)lli_table_alloc_addr; 2066 + dma_in_lli_table_ptr = 2067 + (struct sep_lli_entry *)dma_lli_table_alloc_addr; 2068 + 2069 + lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 2070 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 2071 + dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 2072 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 2073 + 2074 + /* Set the first output tables */ 2075 + out_lli_table_ptr = 2076 + (struct sep_lli_entry *)lli_table_alloc_addr; 2077 + dma_out_lli_table_ptr = 2078 + (struct sep_lli_entry *)dma_lli_table_alloc_addr; 2079 + 2080 + /* Check if the DMA table area limit was overrun */ 2081 + if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) * 2082 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) > 2083 + ((void *)sep->shared_addr + 2084 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + 2085 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) { 2086 + 2087 + dev_warn(&sep->pdev->dev, "dma table limit overrun\n"); 2088 + return -ENOMEM; 2089 + } 2090 + 2091 + /* Update the number of the lli tables created */ 2092 + dma_ctx->num_lli_tables_created += 2; 2093 + 2094 + lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 2095 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 2096 + dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) * 2097 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; 2098 + 2099 + /* Calculate the maximum size of data for input table */ 2100 + in_table_data_size = 2101 + sep_calculate_lli_table_max_size(sep, 2102 + &lli_in_array[current_in_entry], 2103 + (sep_in_lli_entries - current_in_entry), 2104 + &last_table_flag); 2105 + 2106 + /* Calculate the maximum size of data for output table */ 2107 + out_table_data_size = 2108 + sep_calculate_lli_table_max_size(sep, 2109 + &lli_out_array[current_out_entry], 2110 + (sep_out_lli_entries - current_out_entry), 2111 + &last_table_flag); 2112 + 2113 + if (!last_table_flag) { 2114 + in_table_data_size = (in_table_data_size / 2115 + block_size) * block_size; 2116 + out_table_data_size = (out_table_data_size / 2117 + block_size) * block_size; 2118 + } 2119 + 2120 + table_data_size = in_table_data_size; 2121 + if (table_data_size > out_table_data_size) 2122 + table_data_size = out_table_data_size; 2123 + 2124 + dev_dbg(&sep->pdev->dev, 2125 + "[PID%d] construct tables from lli" 2126 + " in_table_data_size is (hex) %x\n", current->pid, 2127 + in_table_data_size); 2128 + 2129 + dev_dbg(&sep->pdev->dev, 2130 + "[PID%d] construct tables from lli" 2131 + "out_table_data_size is (hex) %x\n", current->pid, 2132 + out_table_data_size); 2133 + 2134 + /* Construct input lli table */ 2135 + sep_build_lli_table(sep, &lli_in_array[current_in_entry], 2136 + in_lli_table_ptr, 2137 + &current_in_entry, 2138 + &num_entries_in_table, 2139 + table_data_size); 2140 + 2141 + /* Construct output lli table */ 2142 + sep_build_lli_table(sep, &lli_out_array[current_out_entry], 2143 + out_lli_table_ptr, 2144 + &current_out_entry, 2145 + &num_entries_out_table, 2146 + table_data_size); 2147 + 2148 + /* If info entry is null - this is the first table built */ 2149 + if (info_in_entry_ptr == NULL) { 2150 + /* Set the output parameters to physical addresses */ 2151 + *lli_table_in_ptr = 2152 + sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr); 2153 + 2154 + *in_num_entries_ptr = num_entries_in_table; 2155 + 2156 + *lli_table_out_ptr = 2157 + sep_shared_area_virt_to_bus(sep, 2158 + dma_out_lli_table_ptr); 2159 + 2160 + *out_num_entries_ptr = num_entries_out_table; 2161 + *table_data_size_ptr = table_data_size; 2162 + 2163 + dev_dbg(&sep->pdev->dev, 2164 + "[PID%d] output lli_table_in_ptr is %08lx\n", 2165 + current->pid, 2166 + (unsigned long)*lli_table_in_ptr); 2167 + dev_dbg(&sep->pdev->dev, 2168 + "[PID%d] output lli_table_out_ptr is %08lx\n", 2169 + current->pid, 2170 + (unsigned long)*lli_table_out_ptr); 2171 + } else { 2172 + /* Update the info entry of the previous in table */ 2173 + info_in_entry_ptr->bus_address = 2174 + sep_shared_area_virt_to_bus(sep, 2175 + dma_in_lli_table_ptr); 2176 + 2177 + info_in_entry_ptr->block_size = 2178 + ((num_entries_in_table) << 24) | 2179 + (table_data_size); 2180 + 2181 + /* Update the info entry of the previous in table */ 2182 + info_out_entry_ptr->bus_address = 2183 + sep_shared_area_virt_to_bus(sep, 2184 + dma_out_lli_table_ptr); 2185 + 2186 + info_out_entry_ptr->block_size = 2187 + ((num_entries_out_table) << 24) | 2188 + (table_data_size); 2189 + 2190 + dev_dbg(&sep->pdev->dev, 2191 + "[PID%d] output lli_table_in_ptr:%08lx %08x\n", 2192 + current->pid, 2193 + (unsigned long)info_in_entry_ptr->bus_address, 2194 + info_in_entry_ptr->block_size); 2195 + 2196 + dev_dbg(&sep->pdev->dev, 2197 + "[PID%d] output lli_table_out_ptr:" 2198 + "%08lx %08x\n", 2199 + current->pid, 2200 + (unsigned long)info_out_entry_ptr->bus_address, 2201 + info_out_entry_ptr->block_size); 2202 + } 2203 + 2204 + /* Save the pointer to the info entry of the current tables */ 2205 + info_in_entry_ptr = in_lli_table_ptr + 2206 + num_entries_in_table - 1; 2207 + info_out_entry_ptr = out_lli_table_ptr + 2208 + num_entries_out_table - 1; 2209 + 2210 + dev_dbg(&sep->pdev->dev, 2211 + "[PID%d] output num_entries_out_table is %x\n", 2212 + current->pid, 2213 + (u32)num_entries_out_table); 2214 + dev_dbg(&sep->pdev->dev, 2215 + "[PID%d] output info_in_entry_ptr is %lx\n", 2216 + current->pid, 2217 + (unsigned long)info_in_entry_ptr); 2218 + dev_dbg(&sep->pdev->dev, 2219 + "[PID%d] output info_out_entry_ptr is %lx\n", 2220 + current->pid, 2221 + (unsigned long)info_out_entry_ptr); 2222 + } 2223 + 2224 + /* Print input tables */ 2225 + if (!dmatables_region) { 2226 + sep_debug_print_lli_tables( 2227 + sep, 2228 + (struct sep_lli_entry *) 2229 + sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr), 2230 + *in_num_entries_ptr, 2231 + *table_data_size_ptr); 2232 + } 2233 + 2234 + /* Print output tables */ 2235 + if (!dmatables_region) { 2236 + sep_debug_print_lli_tables( 2237 + sep, 2238 + (struct sep_lli_entry *) 2239 + sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr), 2240 + *out_num_entries_ptr, 2241 + *table_data_size_ptr); 2242 + } 2243 + 2244 + return 0; 2245 + } 2246 + 2247 + /** 2248 + * sep_prepare_input_output_dma_table - prepare DMA I/O table 2249 + * @app_virt_in_addr: 2250 + * @app_virt_out_addr: 2251 + * @data_size: 2252 + * @block_size: 2253 + * @lli_table_in_ptr: 2254 + * @lli_table_out_ptr: 2255 + * @in_num_entries_ptr: 2256 + * @out_num_entries_ptr: 2257 + * @table_data_size_ptr: 2258 + * @is_kva: set for kernel data; used only for kernel crypto module 2259 + * 2260 + * This function builds input and output DMA tables for synhronic 2261 + * symmetric operations (AES, DES, HASH). It also checks that each table 2262 + * is of the modular block size 2263 + * Note that all bus addresses that are passed to the SEP 2264 + * are in 32 bit format; the SEP is a 32 bit device 2265 + */ 2266 + static int sep_prepare_input_output_dma_table(struct sep_device *sep, 2267 + unsigned long app_virt_in_addr, 2268 + unsigned long app_virt_out_addr, 2269 + u32 data_size, 2270 + u32 block_size, 2271 + dma_addr_t *lli_table_in_ptr, 2272 + dma_addr_t *lli_table_out_ptr, 2273 + u32 *in_num_entries_ptr, 2274 + u32 *out_num_entries_ptr, 2275 + u32 *table_data_size_ptr, 2276 + bool is_kva, 2277 + void **dmatables_region, 2278 + struct sep_dma_context *dma_ctx) 2279 + 2280 + { 2281 + int error = 0; 2282 + /* Array of pointers of page */ 2283 + struct sep_lli_entry *lli_in_array; 2284 + /* Array of pointers of page */ 2285 + struct sep_lli_entry *lli_out_array; 2286 + 2287 + if (!dma_ctx) { 2288 + error = -EINVAL; 2289 + goto end_function; 2290 + } 2291 + 2292 + if (data_size == 0) { 2293 + /* Prepare empty table for input and output */ 2294 + if (dmatables_region) { 2295 + error = sep_allocate_dmatables_region( 2296 + sep, 2297 + dmatables_region, 2298 + dma_ctx, 2299 + 2); 2300 + if (error) 2301 + goto end_function; 2302 + } 2303 + sep_prepare_empty_lli_table(sep, lli_table_in_ptr, 2304 + in_num_entries_ptr, table_data_size_ptr, 2305 + dmatables_region, dma_ctx); 2306 + 2307 + sep_prepare_empty_lli_table(sep, lli_table_out_ptr, 2308 + out_num_entries_ptr, table_data_size_ptr, 2309 + dmatables_region, dma_ctx); 2310 + 2311 + goto update_dcb_counter; 2312 + } 2313 + 2314 + /* Initialize the pages pointers */ 2315 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL; 2316 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL; 2317 + 2318 + /* Lock the pages of the buffer and translate them to pages */ 2319 + if (is_kva == true) { 2320 + dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n", 2321 + current->pid); 2322 + error = sep_lock_kernel_pages(sep, app_virt_in_addr, 2323 + data_size, &lli_in_array, SEP_DRIVER_IN_FLAG, 2324 + dma_ctx); 2325 + if (error) { 2326 + dev_warn(&sep->pdev->dev, 2327 + "[PID%d] sep_lock_kernel_pages for input " 2328 + "virtual buffer failed\n", current->pid); 2329 + 2330 + goto end_function; 2331 + } 2332 + 2333 + dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n", 2334 + current->pid); 2335 + error = sep_lock_kernel_pages(sep, app_virt_out_addr, 2336 + data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG, 2337 + dma_ctx); 2338 + 2339 + if (error) { 2340 + dev_warn(&sep->pdev->dev, 2341 + "[PID%d] sep_lock_kernel_pages for output " 2342 + "virtual buffer failed\n", current->pid); 2343 + 2344 + goto end_function_free_lli_in; 2345 + } 2346 + 2347 + } 2348 + 2349 + else { 2350 + dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n", 2351 + current->pid); 2352 + error = sep_lock_user_pages(sep, app_virt_in_addr, 2353 + data_size, &lli_in_array, SEP_DRIVER_IN_FLAG, 2354 + dma_ctx); 2355 + if (error) { 2356 + dev_warn(&sep->pdev->dev, 2357 + "[PID%d] sep_lock_user_pages for input " 2358 + "virtual buffer failed\n", current->pid); 2359 + 2360 + goto end_function; 2361 + } 2362 + 2363 + dev_dbg(&sep->pdev->dev, "[PID%d] Locking user output pages\n", 2364 + current->pid); 2365 + 2366 + error = sep_lock_user_pages(sep, app_virt_out_addr, 2367 + data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG, 2368 + dma_ctx); 2369 + 2370 + if (error) { 2371 + dev_warn(&sep->pdev->dev, 2372 + "[PID%d] sep_lock_user_pages" 2373 + " for output virtual buffer failed\n", 2374 + current->pid); 2375 + 2376 + goto end_function_free_lli_in; 2377 + } 2378 + } 2379 + 2380 + dev_dbg(&sep->pdev->dev, "[PID%d] After lock; prep input output dma " 2381 + "table sep_in_num_pages is (hex) %x\n", current->pid, 2382 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages); 2383 + 2384 + dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n", 2385 + current->pid, 2386 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages); 2387 + 2388 + dev_dbg(&sep->pdev->dev, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP" 2389 + " is (hex) %x\n", current->pid, 2390 + SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); 2391 + 2392 + /* Call the fucntion that creates table from the lli arrays */ 2393 + dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n", 2394 + current->pid); 2395 + error = sep_construct_dma_tables_from_lli( 2396 + sep, lli_in_array, 2397 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat]. 2398 + in_num_pages, 2399 + lli_out_array, 2400 + dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat]. 2401 + out_num_pages, 2402 + block_size, lli_table_in_ptr, lli_table_out_ptr, 2403 + in_num_entries_ptr, out_num_entries_ptr, 2404 + table_data_size_ptr, dmatables_region, dma_ctx); 2405 + 2406 + if (error) { 2407 + dev_warn(&sep->pdev->dev, 2408 + "[PID%d] sep_construct_dma_tables_from_lli failed\n", 2409 + current->pid); 2410 + goto end_function_with_error; 2411 + } 2412 + 2413 + kfree(lli_out_array); 2414 + kfree(lli_in_array); 2415 + 2416 + update_dcb_counter: 2417 + /* Update DCB counter */ 2418 + dma_ctx->nr_dcb_creat++; 2419 + 2420 + goto end_function; 2421 + 2422 + end_function_with_error: 2423 + kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array); 2424 + kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array); 2425 + kfree(lli_out_array); 2426 + 2427 + 2428 + end_function_free_lli_in: 2429 + kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array); 2430 + kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array); 2431 + kfree(lli_in_array); 2432 + 2433 + end_function: 2434 + 2435 + return error; 2436 + 2437 + } 2438 + 2439 + /** 2440 + * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks 2441 + * @app_in_address: unsigned long; for data buffer in (user space) 2442 + * @app_out_address: unsigned long; for data buffer out (user space) 2443 + * @data_in_size: u32; for size of data 2444 + * @block_size: u32; for block size 2445 + * @tail_block_size: u32; for size of tail block 2446 + * @isapplet: bool; to indicate external app 2447 + * @is_kva: bool; kernel buffer; only used for kernel crypto module 2448 + * 2449 + * This function prepares the linked DMA tables and puts the 2450 + * address for the linked list of tables inta a DCB (data control 2451 + * block) the address of which is known by the SEP hardware 2452 + * Note that all bus addresses that are passed to the SEP 2453 + * are in 32 bit format; the SEP is a 32 bit device 2454 + */ 2455 + int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep, 2456 + unsigned long app_in_address, 2457 + unsigned long app_out_address, 2458 + u32 data_in_size, 2459 + u32 block_size, 2460 + u32 tail_block_size, 2461 + bool isapplet, 2462 + bool is_kva, 2463 + struct sep_dcblock *dcb_region, 2464 + void **dmatables_region, 2465 + struct sep_dma_context **dma_ctx, 2466 + struct scatterlist *src_sg, 2467 + struct scatterlist *dst_sg) 2468 + { 2469 + int error = 0; 2470 + /* Size of tail */ 2471 + u32 tail_size = 0; 2472 + /* Address of the created DCB table */ 2473 + struct sep_dcblock *dcb_table_ptr = NULL; 2474 + /* The physical address of the first input DMA table */ 2475 + dma_addr_t in_first_mlli_address = 0; 2476 + /* Number of entries in the first input DMA table */ 2477 + u32 in_first_num_entries = 0; 2478 + /* The physical address of the first output DMA table */ 2479 + dma_addr_t out_first_mlli_address = 0; 2480 + /* Number of entries in the first output DMA table */ 2481 + u32 out_first_num_entries = 0; 2482 + /* Data in the first input/output table */ 2483 + u32 first_data_size = 0; 2484 + 2485 + dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n", 2486 + current->pid, app_in_address); 2487 + 2488 + dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n", 2489 + current->pid, app_out_address); 2490 + 2491 + dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n", 2492 + current->pid, data_in_size); 2493 + 2494 + dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n", 2495 + current->pid, block_size); 2496 + 2497 + dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n", 2498 + current->pid, tail_block_size); 2499 + 2500 + dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n", 2501 + current->pid, isapplet); 2502 + 2503 + dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n", 2504 + current->pid, is_kva); 2505 + 2506 + dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n", 2507 + current->pid, src_sg); 2508 + 2509 + dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n", 2510 + current->pid, dst_sg); 2511 + 2512 + if (!dma_ctx) { 2513 + dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n", 2514 + current->pid); 2515 + error = -EINVAL; 2516 + goto end_function; 2517 + } 2518 + 2519 + if (*dma_ctx) { 2520 + /* In case there are multiple DCBs for this transaction */ 2521 + dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n", 2522 + current->pid); 2523 + } else { 2524 + *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL); 2525 + if (!(*dma_ctx)) { 2526 + dev_dbg(&sep->pdev->dev, 2527 + "[PID%d] Not enough memory for DMA context\n", 2528 + current->pid); 2529 + error = -ENOMEM; 2530 + goto end_function; 2531 + } 2532 + dev_dbg(&sep->pdev->dev, 2533 + "[PID%d] Created DMA context addr at 0x%p\n", 2534 + current->pid, *dma_ctx); 2535 + } 2536 + 2537 + /* these are for kernel crypto only */ 2538 + (*dma_ctx)->src_sg = src_sg; 2539 + (*dma_ctx)->dst_sg = dst_sg; 2540 + 2541 + if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) { 2542 + /* No more DCBs to allocate */ 2543 + dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n", 2544 + current->pid); 2545 + error = -ENOSPC; 2546 + goto end_function_error; 2547 + } 2548 + 2549 + /* Allocate new DCB */ 2550 + if (dcb_region) { 2551 + dcb_table_ptr = dcb_region; 2552 + } else { 2553 + dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr + 2554 + SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES + 2555 + ((*dma_ctx)->nr_dcb_creat * 2556 + sizeof(struct sep_dcblock))); 2557 + } 2558 + 2559 + /* Set the default values in the DCB */ 2560 + dcb_table_ptr->input_mlli_address = 0; 2561 + dcb_table_ptr->input_mlli_num_entries = 0; 2562 + dcb_table_ptr->input_mlli_data_size = 0; 2563 + dcb_table_ptr->output_mlli_address = 0; 2564 + dcb_table_ptr->output_mlli_num_entries = 0; 2565 + dcb_table_ptr->output_mlli_data_size = 0; 2566 + dcb_table_ptr->tail_data_size = 0; 2567 + dcb_table_ptr->out_vr_tail_pt = 0; 2568 + 2569 + if (isapplet == true) { 2570 + 2571 + /* Check if there is enough data for DMA operation */ 2572 + if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) { 2573 + if (is_kva == true) { 2574 + error = -ENODEV; 2575 + goto end_function_error; 2576 + } else { 2577 + if (copy_from_user(dcb_table_ptr->tail_data, 2578 + (void __user *)app_in_address, 2579 + data_in_size)) { 2580 + error = -EFAULT; 2581 + goto end_function_error; 2582 + } 2583 + } 2584 + 2585 + dcb_table_ptr->tail_data_size = data_in_size; 2586 + 2587 + /* Set the output user-space address for mem2mem op */ 2588 + if (app_out_address) 2589 + dcb_table_ptr->out_vr_tail_pt = 2590 + (aligned_u64)app_out_address; 2591 + 2592 + /* 2593 + * Update both data length parameters in order to avoid 2594 + * second data copy and allow building of empty mlli 2595 + * tables 2596 + */ 2597 + tail_size = 0x0; 2598 + data_in_size = 0x0; 2599 + 2600 + } else { 2601 + if (!app_out_address) { 2602 + tail_size = data_in_size % block_size; 2603 + if (!tail_size) { 2604 + if (tail_block_size == block_size) 2605 + tail_size = block_size; 2606 + } 2607 + } else { 2608 + tail_size = 0; 2609 + } 2610 + } 2611 + if (tail_size) { 2612 + if (tail_size > sizeof(dcb_table_ptr->tail_data)) 2613 + return -EINVAL; 2614 + if (is_kva == true) { 2615 + error = -ENODEV; 2616 + goto end_function_error; 2617 + } else { 2618 + /* We have tail data - copy it to DCB */ 2619 + if (copy_from_user(dcb_table_ptr->tail_data, 2620 + (void __user *)(app_in_address + 2621 + data_in_size - tail_size), tail_size)) { 2622 + error = -EFAULT; 2623 + goto end_function_error; 2624 + } 2625 + } 2626 + if (app_out_address) 2627 + /* 2628 + * Calculate the output address 2629 + * according to tail data size 2630 + */ 2631 + dcb_table_ptr->out_vr_tail_pt = 2632 + (aligned_u64)app_out_address + 2633 + data_in_size - tail_size; 2634 + 2635 + /* Save the real tail data size */ 2636 + dcb_table_ptr->tail_data_size = tail_size; 2637 + /* 2638 + * Update the data size without the tail 2639 + * data size AKA data for the dma 2640 + */ 2641 + data_in_size = (data_in_size - tail_size); 2642 + } 2643 + } 2644 + /* Check if we need to build only input table or input/output */ 2645 + if (app_out_address) { 2646 + /* Prepare input/output tables */ 2647 + error = sep_prepare_input_output_dma_table(sep, 2648 + app_in_address, 2649 + app_out_address, 2650 + data_in_size, 2651 + block_size, 2652 + &in_first_mlli_address, 2653 + &out_first_mlli_address, 2654 + &in_first_num_entries, 2655 + &out_first_num_entries, 2656 + &first_data_size, 2657 + is_kva, 2658 + dmatables_region, 2659 + *dma_ctx); 2660 + } else { 2661 + /* Prepare input tables */ 2662 + error = sep_prepare_input_dma_table(sep, 2663 + app_in_address, 2664 + data_in_size, 2665 + block_size, 2666 + &in_first_mlli_address, 2667 + &in_first_num_entries, 2668 + &first_data_size, 2669 + is_kva, 2670 + dmatables_region, 2671 + *dma_ctx); 2672 + } 2673 + 2674 + if (error) { 2675 + dev_warn(&sep->pdev->dev, 2676 + "prepare DMA table call failed " 2677 + "from prepare DCB call\n"); 2678 + goto end_function_error; 2679 + } 2680 + 2681 + /* Set the DCB values */ 2682 + dcb_table_ptr->input_mlli_address = in_first_mlli_address; 2683 + dcb_table_ptr->input_mlli_num_entries = in_first_num_entries; 2684 + dcb_table_ptr->input_mlli_data_size = first_data_size; 2685 + dcb_table_ptr->output_mlli_address = out_first_mlli_address; 2686 + dcb_table_ptr->output_mlli_num_entries = out_first_num_entries; 2687 + dcb_table_ptr->output_mlli_data_size = first_data_size; 2688 + 2689 + goto end_function; 2690 + 2691 + end_function_error: 2692 + kfree(*dma_ctx); 2693 + 2694 + end_function: 2695 + return error; 2696 + 2697 + } 2698 + 2699 + 2700 + /** 2701 + * sep_free_dma_tables_and_dcb - free DMA tables and DCBs 2702 + * @sep: pointer to struct sep_device 2703 + * @isapplet: indicates external application (used for kernel access) 2704 + * @is_kva: indicates kernel addresses (only used for kernel crypto) 2705 + * 2706 + * This function frees the DMA tables and DCB 2707 + */ 2708 + static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet, 2709 + bool is_kva, struct sep_dma_context **dma_ctx) 2710 + { 2711 + struct sep_dcblock *dcb_table_ptr; 2712 + unsigned long pt_hold; 2713 + void *tail_pt; 2714 + 2715 + int i = 0; 2716 + int error = 0; 2717 + int error_temp = 0; 2718 + 2719 + dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n", 2720 + current->pid); 2721 + 2722 + if (isapplet == true) { 2723 + /* Set pointer to first DCB table */ 2724 + dcb_table_ptr = (struct sep_dcblock *) 2725 + (sep->shared_addr + 2726 + SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES); 2727 + 2728 + /* Go over each DCB and see if tail pointer must be updated */ 2729 + for (i = 0; 2730 + i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) { 2731 + if (dcb_table_ptr->out_vr_tail_pt) { 2732 + pt_hold = (unsigned long)dcb_table_ptr-> 2733 + out_vr_tail_pt; 2734 + tail_pt = (void *)pt_hold; 2735 + if (is_kva == true) { 2736 + error = -ENODEV; 2737 + break; 2738 + } else { 2739 + error_temp = copy_to_user( 2740 + (void __user *)tail_pt, 2741 + dcb_table_ptr->tail_data, 2742 + dcb_table_ptr->tail_data_size); 2743 + } 2744 + if (error_temp) { 2745 + /* Release the DMA resource */ 2746 + error = -EFAULT; 2747 + break; 2748 + } 2749 + } 2750 + } 2751 + } 2752 + /* Free the output pages, if any */ 2753 + sep_free_dma_table_data_handler(sep, dma_ctx); 2754 + 2755 + dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n", 2756 + current->pid); 2757 + 2758 + return error; 2759 + } 2760 + 2761 + /** 2762 + * sep_prepare_dcb_handler - prepare a control block 2763 + * @sep: pointer to struct sep_device 2764 + * @arg: pointer to user parameters 2765 + * 2766 + * This function will retrieve the RAR buffer physical addresses, type 2767 + * & size corresponding to the RAR handles provided in the buffers vector. 2768 + */ 2769 + static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg, 2770 + struct sep_dma_context **dma_ctx) 2771 + { 2772 + int error; 2773 + /* Command arguments */ 2774 + static struct build_dcb_struct command_args; 2775 + 2776 + /* Get the command arguments */ 2777 + if (copy_from_user(&command_args, (void __user *)arg, 2778 + sizeof(struct build_dcb_struct))) { 2779 + error = -EFAULT; 2780 + goto end_function; 2781 + } 2782 + 2783 + dev_dbg(&sep->pdev->dev, 2784 + "[PID%d] prep dcb handler app_in_address is %08llx\n", 2785 + current->pid, command_args.app_in_address); 2786 + dev_dbg(&sep->pdev->dev, 2787 + "[PID%d] app_out_address is %08llx\n", 2788 + current->pid, command_args.app_out_address); 2789 + dev_dbg(&sep->pdev->dev, 2790 + "[PID%d] data_size is %x\n", 2791 + current->pid, command_args.data_in_size); 2792 + dev_dbg(&sep->pdev->dev, 2793 + "[PID%d] block_size is %x\n", 2794 + current->pid, command_args.block_size); 2795 + dev_dbg(&sep->pdev->dev, 2796 + "[PID%d] tail block_size is %x\n", 2797 + current->pid, command_args.tail_block_size); 2798 + dev_dbg(&sep->pdev->dev, 2799 + "[PID%d] is_applet is %x\n", 2800 + current->pid, command_args.is_applet); 2801 + 2802 + if (!command_args.app_in_address) { 2803 + dev_warn(&sep->pdev->dev, 2804 + "[PID%d] null app_in_address\n", current->pid); 2805 + error = -EINVAL; 2806 + goto end_function; 2807 + } 2808 + 2809 + error = sep_prepare_input_output_dma_table_in_dcb(sep, 2810 + (unsigned long)command_args.app_in_address, 2811 + (unsigned long)command_args.app_out_address, 2812 + command_args.data_in_size, command_args.block_size, 2813 + command_args.tail_block_size, 2814 + command_args.is_applet, false, 2815 + NULL, NULL, dma_ctx, NULL, NULL); 2816 + 2817 + end_function: 2818 + return error; 2819 + 2820 + } 2821 + 2822 + /** 2823 + * sep_free_dcb_handler - free control block resources 2824 + * @sep: pointer to struct sep_device 2825 + * 2826 + * This function frees the DCB resources and updates the needed 2827 + * user-space buffers. 2828 + */ 2829 + static int sep_free_dcb_handler(struct sep_device *sep, 2830 + struct sep_dma_context **dma_ctx) 2831 + { 2832 + int error = 0; 2833 + 2834 + if (!dma_ctx || !(*dma_ctx)) { 2835 + dev_dbg(&sep->pdev->dev, "[PID%d] no dma context defined, nothing to free\n", 2836 + current->pid); 2837 + return error; 2838 + } 2839 + 2840 + dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n", 2841 + current->pid, 2842 + (*dma_ctx)->nr_dcb_creat); 2843 + 2844 + error = sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx); 2845 + 2846 + return error; 2847 + } 2848 + 2849 + /** 2850 + * sep_ioctl - ioctl handler for sep device 2851 + * @filp: pointer to struct file 2852 + * @cmd: command 2853 + * @arg: pointer to argument structure 2854 + * 2855 + * Implement the ioctl methods availble on the SEP device. 2856 + */ 2857 + static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2858 + { 2859 + struct sep_private_data * const private_data = filp->private_data; 2860 + struct sep_call_status *call_status = &private_data->call_status; 2861 + struct sep_device *sep = private_data->device; 2862 + struct sep_dma_context **dma_ctx = &private_data->dma_ctx; 2863 + struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem; 2864 + int error = 0; 2865 + 2866 + dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n", 2867 + current->pid, cmd); 2868 + dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n", 2869 + current->pid, *dma_ctx); 2870 + 2871 + /* Make sure we own this device */ 2872 + error = sep_check_transaction_owner(sep); 2873 + if (error) { 2874 + dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n", 2875 + current->pid); 2876 + goto end_function; 2877 + } 2878 + 2879 + /* Check that sep_mmap has been called before */ 2880 + if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET, 2881 + &call_status->status)) { 2882 + dev_dbg(&sep->pdev->dev, 2883 + "[PID%d] mmap not called\n", current->pid); 2884 + error = -EPROTO; 2885 + goto end_function; 2886 + } 2887 + 2888 + /* Check that the command is for SEP device */ 2889 + if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) { 2890 + error = -ENOTTY; 2891 + goto end_function; 2892 + } 2893 + 2894 + switch (cmd) { 2895 + case SEP_IOCSENDSEPCOMMAND: 2896 + if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, 2897 + &call_status->status)) { 2898 + dev_dbg(&sep->pdev->dev, "[PID%d] send msg already done\n", 2899 + current->pid); 2900 + error = -EPROTO; 2901 + goto end_function; 2902 + } 2903 + /* Send command to SEP */ 2904 + error = sep_send_command_handler(sep); 2905 + if (!error) 2906 + set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, 2907 + &call_status->status); 2908 + dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCSENDSEPCOMMAND end\n", 2909 + current->pid); 2910 + break; 2911 + case SEP_IOCENDTRANSACTION: 2912 + error = sep_end_transaction_handler(sep, dma_ctx, call_status, 2913 + my_queue_elem); 2914 + dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCENDTRANSACTION end\n", 2915 + current->pid); 2916 + break; 2917 + case SEP_IOCPREPAREDCB: 2918 + if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, 2919 + &call_status->status)) { 2920 + dev_dbg(&sep->pdev->dev, 2921 + "[PID%d] dcb preparation needed before send msg\n", 2922 + current->pid); 2923 + error = -EPROTO; 2924 + goto end_function; 2925 + } 2926 + 2927 + if (!arg) { 2928 + dev_dbg(&sep->pdev->dev, 2929 + "[PID%d] dcb prep null arg\n", current->pid); 2930 + error = -EINVAL; 2931 + goto end_function; 2932 + } 2933 + 2934 + error = sep_prepare_dcb_handler(sep, arg, dma_ctx); 2935 + dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCPREPAREDCB end\n", 2936 + current->pid); 2937 + break; 2938 + case SEP_IOCFREEDCB: 2939 + error = sep_free_dcb_handler(sep, dma_ctx); 2940 + dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n", 2941 + current->pid); 2942 + break; 2943 + default: 2944 + error = -ENOTTY; 2945 + dev_dbg(&sep->pdev->dev, "[PID%d] default end\n", 2946 + current->pid); 2947 + break; 2948 + } 2949 + 2950 + end_function: 2951 + dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid); 2952 + 2953 + return error; 2954 + } 2955 + 2956 + /** 2957 + * sep_inthandler - interrupt handler for sep device 2958 + * @irq: interrupt 2959 + * @dev_id: device id 2960 + */ 2961 + static irqreturn_t sep_inthandler(int irq, void *dev_id) 2962 + { 2963 + unsigned long lock_irq_flag; 2964 + u32 reg_val, reg_val2 = 0; 2965 + struct sep_device *sep = dev_id; 2966 + irqreturn_t int_error = IRQ_HANDLED; 2967 + 2968 + /* Are we in power save? */ 2969 + #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM) 2970 + if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) { 2971 + dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n"); 2972 + return IRQ_NONE; 2973 + } 2974 + #endif 2975 + 2976 + if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) { 2977 + dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n"); 2978 + return IRQ_NONE; 2979 + } 2980 + 2981 + /* Read the IRR register to check if this is SEP interrupt */ 2982 + reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR); 2983 + 2984 + dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val); 2985 + 2986 + if (reg_val & (0x1 << 13)) { 2987 + 2988 + /* Lock and update the counter of reply messages */ 2989 + spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag); 2990 + sep->reply_ct++; 2991 + spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag); 2992 + 2993 + dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n", 2994 + sep->send_ct, sep->reply_ct); 2995 + 2996 + /* Is this a kernel client request */ 2997 + if (sep->in_kernel) { 2998 + tasklet_schedule(&sep->finish_tasklet); 2999 + goto finished_interrupt; 3000 + } 3001 + 3002 + /* Is this printf or daemon request? */ 3003 + reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 3004 + dev_dbg(&sep->pdev->dev, 3005 + "SEP Interrupt - GPR2 is %08x\n", reg_val2); 3006 + 3007 + clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags); 3008 + 3009 + if ((reg_val2 >> 30) & 0x1) { 3010 + dev_dbg(&sep->pdev->dev, "int: printf request\n"); 3011 + } else if (reg_val2 >> 31) { 3012 + dev_dbg(&sep->pdev->dev, "int: daemon request\n"); 3013 + } else { 3014 + dev_dbg(&sep->pdev->dev, "int: SEP reply\n"); 3015 + wake_up(&sep->event_interrupt); 3016 + } 3017 + } else { 3018 + dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n"); 3019 + int_error = IRQ_NONE; 3020 + } 3021 + 3022 + finished_interrupt: 3023 + 3024 + if (int_error == IRQ_HANDLED) 3025 + sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val); 3026 + 3027 + return int_error; 3028 + } 3029 + 3030 + /** 3031 + * sep_reconfig_shared_area - reconfigure shared area 3032 + * @sep: pointer to struct sep_device 3033 + * 3034 + * Reconfig the shared area between HOST and SEP - needed in case 3035 + * the DX_CC_Init function was called before OS loading. 3036 + */ 3037 + static int sep_reconfig_shared_area(struct sep_device *sep) 3038 + { 3039 + int ret_val; 3040 + 3041 + /* use to limit waiting for SEP */ 3042 + unsigned long end_time; 3043 + 3044 + /* Send the new SHARED MESSAGE AREA to the SEP */ 3045 + dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n", 3046 + (unsigned long long)sep->shared_bus); 3047 + 3048 + sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus); 3049 + 3050 + /* Poll for SEP response */ 3051 + ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); 3052 + 3053 + end_time = jiffies + (WAIT_TIME * HZ); 3054 + 3055 + while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) && 3056 + (ret_val != sep->shared_bus)) 3057 + ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); 3058 + 3059 + /* Check the return value (register) */ 3060 + if (ret_val != sep->shared_bus) { 3061 + dev_warn(&sep->pdev->dev, "could not reconfig shared area\n"); 3062 + dev_warn(&sep->pdev->dev, "result was %x\n", ret_val); 3063 + ret_val = -ENOMEM; 3064 + } else 3065 + ret_val = 0; 3066 + 3067 + dev_dbg(&sep->pdev->dev, "reconfig shared area end\n"); 3068 + 3069 + return ret_val; 3070 + } 3071 + 3072 + /** 3073 + * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables 3074 + * contexts into use 3075 + * @sep: SEP device 3076 + * @dcb_region: DCB region copy 3077 + * @dmatables_region: MLLI/DMA tables copy 3078 + * @dma_ctx: DMA context for current transaction 3079 + */ 3080 + ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep, 3081 + struct sep_dcblock **dcb_region, 3082 + void **dmatables_region, 3083 + struct sep_dma_context *dma_ctx) 3084 + { 3085 + void *dmaregion_free_start = NULL; 3086 + void *dmaregion_free_end = NULL; 3087 + void *dcbregion_free_start = NULL; 3088 + void *dcbregion_free_end = NULL; 3089 + ssize_t error = 0; 3090 + 3091 + dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n", 3092 + current->pid); 3093 + 3094 + if (1 > dma_ctx->nr_dcb_creat) { 3095 + dev_warn(&sep->pdev->dev, 3096 + "[PID%d] invalid number of dcbs to activate 0x%08X\n", 3097 + current->pid, dma_ctx->nr_dcb_creat); 3098 + error = -EINVAL; 3099 + goto end_function; 3100 + } 3101 + 3102 + dmaregion_free_start = sep->shared_addr 3103 + + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES; 3104 + dmaregion_free_end = dmaregion_free_start 3105 + + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1; 3106 + 3107 + if (dmaregion_free_start 3108 + + dma_ctx->dmatables_len > dmaregion_free_end) { 3109 + error = -ENOMEM; 3110 + goto end_function; 3111 + } 3112 + memcpy(dmaregion_free_start, 3113 + *dmatables_region, 3114 + dma_ctx->dmatables_len); 3115 + /* Free MLLI table copy */ 3116 + kfree(*dmatables_region); 3117 + *dmatables_region = NULL; 3118 + 3119 + /* Copy thread's DCB table copy to DCB table region */ 3120 + dcbregion_free_start = sep->shared_addr + 3121 + SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES; 3122 + dcbregion_free_end = dcbregion_free_start + 3123 + (SEP_MAX_NUM_SYNC_DMA_OPS * 3124 + sizeof(struct sep_dcblock)) - 1; 3125 + 3126 + if (dcbregion_free_start 3127 + + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock)) 3128 + > dcbregion_free_end) { 3129 + error = -ENOMEM; 3130 + goto end_function; 3131 + } 3132 + 3133 + memcpy(dcbregion_free_start, 3134 + *dcb_region, 3135 + dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock)); 3136 + 3137 + /* Print the tables */ 3138 + dev_dbg(&sep->pdev->dev, "activate: input table\n"); 3139 + sep_debug_print_lli_tables(sep, 3140 + (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep, 3141 + (*dcb_region)->input_mlli_address), 3142 + (*dcb_region)->input_mlli_num_entries, 3143 + (*dcb_region)->input_mlli_data_size); 3144 + 3145 + dev_dbg(&sep->pdev->dev, "activate: output table\n"); 3146 + sep_debug_print_lli_tables(sep, 3147 + (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep, 3148 + (*dcb_region)->output_mlli_address), 3149 + (*dcb_region)->output_mlli_num_entries, 3150 + (*dcb_region)->output_mlli_data_size); 3151 + 3152 + dev_dbg(&sep->pdev->dev, 3153 + "[PID%d] printing activated tables\n", current->pid); 3154 + 3155 + end_function: 3156 + kfree(*dmatables_region); 3157 + *dmatables_region = NULL; 3158 + 3159 + kfree(*dcb_region); 3160 + *dcb_region = NULL; 3161 + 3162 + return error; 3163 + } 3164 + 3165 + /** 3166 + * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context 3167 + * @sep: SEP device 3168 + * @dcb_region: DCB region buf to create for current transaction 3169 + * @dmatables_region: MLLI/DMA tables buf to create for current transaction 3170 + * @dma_ctx: DMA context buf to create for current transaction 3171 + * @user_dcb_args: User arguments for DCB/MLLI creation 3172 + * @num_dcbs: Number of DCBs to create 3173 + */ 3174 + static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep, 3175 + struct sep_dcblock **dcb_region, 3176 + void **dmatables_region, 3177 + struct sep_dma_context **dma_ctx, 3178 + const struct build_dcb_struct __user *user_dcb_args, 3179 + const u32 num_dcbs) 3180 + { 3181 + int error = 0; 3182 + int i = 0; 3183 + struct build_dcb_struct *dcb_args = NULL; 3184 + 3185 + dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n", 3186 + current->pid); 3187 + 3188 + if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) { 3189 + error = -EINVAL; 3190 + goto end_function; 3191 + } 3192 + 3193 + if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) { 3194 + dev_warn(&sep->pdev->dev, 3195 + "[PID%d] invalid number of dcbs 0x%08X\n", 3196 + current->pid, num_dcbs); 3197 + error = -EINVAL; 3198 + goto end_function; 3199 + } 3200 + 3201 + dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct), 3202 + GFP_KERNEL); 3203 + if (!dcb_args) { 3204 + dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n", 3205 + current->pid); 3206 + error = -ENOMEM; 3207 + goto end_function; 3208 + } 3209 + 3210 + if (copy_from_user(dcb_args, 3211 + user_dcb_args, 3212 + num_dcbs * sizeof(struct build_dcb_struct))) { 3213 + error = -EINVAL; 3214 + goto end_function; 3215 + } 3216 + 3217 + /* Allocate thread-specific memory for DCB */ 3218 + *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock), 3219 + GFP_KERNEL); 3220 + if (!(*dcb_region)) { 3221 + error = -ENOMEM; 3222 + goto end_function; 3223 + } 3224 + 3225 + /* Prepare DCB and MLLI table into the allocated regions */ 3226 + for (i = 0; i < num_dcbs; i++) { 3227 + error = sep_prepare_input_output_dma_table_in_dcb(sep, 3228 + (unsigned long)dcb_args[i].app_in_address, 3229 + (unsigned long)dcb_args[i].app_out_address, 3230 + dcb_args[i].data_in_size, 3231 + dcb_args[i].block_size, 3232 + dcb_args[i].tail_block_size, 3233 + dcb_args[i].is_applet, 3234 + false, 3235 + *dcb_region, dmatables_region, 3236 + dma_ctx, 3237 + NULL, 3238 + NULL); 3239 + if (error) { 3240 + dev_warn(&sep->pdev->dev, 3241 + "[PID%d] dma table creation failed\n", 3242 + current->pid); 3243 + goto end_function; 3244 + } 3245 + } 3246 + 3247 + end_function: 3248 + kfree(dcb_args); 3249 + return error; 3250 + 3251 + } 3252 + 3253 + /** 3254 + * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context 3255 + * for kernel crypto 3256 + * @sep: SEP device 3257 + * @dcb_region: DCB region buf to create for current transaction 3258 + * @dmatables_region: MLLI/DMA tables buf to create for current transaction 3259 + * @dma_ctx: DMA context buf to create for current transaction 3260 + * @user_dcb_args: User arguments for DCB/MLLI creation 3261 + * @num_dcbs: Number of DCBs to create 3262 + * This does that same thing as sep_create_dcb_dmatables_context 3263 + * except that it is used only for the kernel crypto operation. It is 3264 + * separate because there is no user data involved; the dcb data structure 3265 + * is specific for kernel crypto (build_dcb_struct_kernel) 3266 + */ 3267 + int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep, 3268 + struct sep_dcblock **dcb_region, 3269 + void **dmatables_region, 3270 + struct sep_dma_context **dma_ctx, 3271 + const struct build_dcb_struct_kernel *dcb_data, 3272 + const u32 num_dcbs) 3273 + { 3274 + int error = 0; 3275 + int i = 0; 3276 + 3277 + dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n", 3278 + current->pid); 3279 + 3280 + if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) { 3281 + error = -EINVAL; 3282 + goto end_function; 3283 + } 3284 + 3285 + if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) { 3286 + dev_warn(&sep->pdev->dev, 3287 + "[PID%d] invalid number of dcbs 0x%08X\n", 3288 + current->pid, num_dcbs); 3289 + error = -EINVAL; 3290 + goto end_function; 3291 + } 3292 + 3293 + dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n", 3294 + current->pid, num_dcbs); 3295 + 3296 + /* Allocate thread-specific memory for DCB */ 3297 + *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock), 3298 + GFP_KERNEL); 3299 + if (!(*dcb_region)) { 3300 + error = -ENOMEM; 3301 + goto end_function; 3302 + } 3303 + 3304 + /* Prepare DCB and MLLI table into the allocated regions */ 3305 + for (i = 0; i < num_dcbs; i++) { 3306 + error = sep_prepare_input_output_dma_table_in_dcb(sep, 3307 + (unsigned long)dcb_data->app_in_address, 3308 + (unsigned long)dcb_data->app_out_address, 3309 + dcb_data->data_in_size, 3310 + dcb_data->block_size, 3311 + dcb_data->tail_block_size, 3312 + dcb_data->is_applet, 3313 + true, 3314 + *dcb_region, dmatables_region, 3315 + dma_ctx, 3316 + dcb_data->src_sg, 3317 + dcb_data->dst_sg); 3318 + if (error) { 3319 + dev_warn(&sep->pdev->dev, 3320 + "[PID%d] dma table creation failed\n", 3321 + current->pid); 3322 + goto end_function; 3323 + } 3324 + } 3325 + 3326 + end_function: 3327 + return error; 3328 + 3329 + } 3330 + 3331 + /** 3332 + * sep_activate_msgarea_context - Takes the message area context into use 3333 + * @sep: SEP device 3334 + * @msg_region: Message area context buf 3335 + * @msg_len: Message area context buffer size 3336 + */ 3337 + static ssize_t sep_activate_msgarea_context(struct sep_device *sep, 3338 + void **msg_region, 3339 + const size_t msg_len) 3340 + { 3341 + dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n", 3342 + current->pid); 3343 + 3344 + if (!msg_region || !(*msg_region) || 3345 + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) { 3346 + dev_warn(&sep->pdev->dev, 3347 + "[PID%d] invalid act msgarea len 0x%08X\n", 3348 + current->pid, msg_len); 3349 + return -EINVAL; 3350 + } 3351 + 3352 + memcpy(sep->shared_addr, *msg_region, msg_len); 3353 + 3354 + return 0; 3355 + } 3356 + 3357 + /** 3358 + * sep_create_msgarea_context - Creates message area context 3359 + * @sep: SEP device 3360 + * @msg_region: Msg area region buf to create for current transaction 3361 + * @msg_user: Content for msg area region from user 3362 + * @msg_len: Message area size 3363 + */ 3364 + static ssize_t sep_create_msgarea_context(struct sep_device *sep, 3365 + void **msg_region, 3366 + const void __user *msg_user, 3367 + const size_t msg_len) 3368 + { 3369 + int error = 0; 3370 + 3371 + dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n", 3372 + current->pid); 3373 + 3374 + if (!msg_region || 3375 + !msg_user || 3376 + SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len || 3377 + SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) { 3378 + dev_warn(&sep->pdev->dev, 3379 + "[PID%d] invalid creat msgarea len 0x%08X\n", 3380 + current->pid, msg_len); 3381 + error = -EINVAL; 3382 + goto end_function; 3383 + } 3384 + 3385 + /* Allocate thread-specific memory for message buffer */ 3386 + *msg_region = kzalloc(msg_len, GFP_KERNEL); 3387 + if (!(*msg_region)) { 3388 + dev_warn(&sep->pdev->dev, 3389 + "[PID%d] no mem for msgarea context\n", 3390 + current->pid); 3391 + error = -ENOMEM; 3392 + goto end_function; 3393 + } 3394 + 3395 + /* Copy input data to write() to allocated message buffer */ 3396 + if (copy_from_user(*msg_region, msg_user, msg_len)) { 3397 + error = -EINVAL; 3398 + goto end_function; 3399 + } 3400 + 3401 + end_function: 3402 + if (error && msg_region) { 3403 + kfree(*msg_region); 3404 + *msg_region = NULL; 3405 + } 3406 + 3407 + return error; 3408 + } 3409 + 3410 + 3411 + /** 3412 + * sep_read - Returns results of an operation for fastcall interface 3413 + * @filp: File pointer 3414 + * @buf_user: User buffer for storing results 3415 + * @count_user: User buffer size 3416 + * @offset: File offset, not supported 3417 + * 3418 + * The implementation does not support reading in chunks, all data must be 3419 + * consumed during a single read system call. 3420 + */ 3421 + static ssize_t sep_read(struct file *filp, 3422 + char __user *buf_user, size_t count_user, 3423 + loff_t *offset) 3424 + { 3425 + struct sep_private_data * const private_data = filp->private_data; 3426 + struct sep_call_status *call_status = &private_data->call_status; 3427 + struct sep_device *sep = private_data->device; 3428 + struct sep_dma_context **dma_ctx = &private_data->dma_ctx; 3429 + struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem; 3430 + ssize_t error = 0, error_tmp = 0; 3431 + 3432 + /* Am I the process that owns the transaction? */ 3433 + error = sep_check_transaction_owner(sep); 3434 + if (error) { 3435 + dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n", 3436 + current->pid); 3437 + goto end_function; 3438 + } 3439 + 3440 + /* Checks that user has called necessarry apis */ 3441 + if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, 3442 + &call_status->status)) { 3443 + dev_warn(&sep->pdev->dev, 3444 + "[PID%d] fastcall write not called\n", 3445 + current->pid); 3446 + error = -EPROTO; 3447 + goto end_function_error; 3448 + } 3449 + 3450 + if (!buf_user) { 3451 + dev_warn(&sep->pdev->dev, 3452 + "[PID%d] null user buffer\n", 3453 + current->pid); 3454 + error = -EINVAL; 3455 + goto end_function_error; 3456 + } 3457 + 3458 + 3459 + /* Wait for SEP to finish */ 3460 + wait_event(sep->event_interrupt, 3461 + test_bit(SEP_WORKING_LOCK_BIT, 3462 + &sep->in_use_flags) == 0); 3463 + 3464 + sep_dump_message(sep); 3465 + 3466 + dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08X\n", 3467 + current->pid, count_user); 3468 + 3469 + /* In case user has allocated bigger buffer */ 3470 + if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES) 3471 + count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES; 3472 + 3473 + if (copy_to_user(buf_user, sep->shared_addr, count_user)) { 3474 + error = -EFAULT; 3475 + goto end_function_error; 3476 + } 3477 + 3478 + dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid); 3479 + error = count_user; 3480 + 3481 + end_function_error: 3482 + /* Copy possible tail data to user and free DCB and MLLIs */ 3483 + error_tmp = sep_free_dcb_handler(sep, dma_ctx); 3484 + if (error_tmp) 3485 + dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n", 3486 + current->pid); 3487 + 3488 + /* End the transaction, wakeup pending ones */ 3489 + error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status, 3490 + my_queue_elem); 3491 + if (error_tmp) 3492 + dev_warn(&sep->pdev->dev, 3493 + "[PID%d] ending transaction failed\n", 3494 + current->pid); 3495 + 3496 + end_function: 3497 + return error; 3498 + } 3499 + 3500 + /** 3501 + * sep_fastcall_args_get - Gets fastcall params from user 3502 + * sep: SEP device 3503 + * @args: Parameters buffer 3504 + * @buf_user: User buffer for operation parameters 3505 + * @count_user: User buffer size 3506 + */ 3507 + static inline ssize_t sep_fastcall_args_get(struct sep_device *sep, 3508 + struct sep_fastcall_hdr *args, 3509 + const char __user *buf_user, 3510 + const size_t count_user) 3511 + { 3512 + ssize_t error = 0; 3513 + size_t actual_count = 0; 3514 + 3515 + if (!buf_user) { 3516 + dev_warn(&sep->pdev->dev, 3517 + "[PID%d] null user buffer\n", 3518 + current->pid); 3519 + error = -EINVAL; 3520 + goto end_function; 3521 + } 3522 + 3523 + if (count_user < sizeof(struct sep_fastcall_hdr)) { 3524 + dev_warn(&sep->pdev->dev, 3525 + "[PID%d] too small message size 0x%08X\n", 3526 + current->pid, count_user); 3527 + error = -EINVAL; 3528 + goto end_function; 3529 + } 3530 + 3531 + 3532 + if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) { 3533 + error = -EFAULT; 3534 + goto end_function; 3535 + } 3536 + 3537 + if (SEP_FC_MAGIC != args->magic) { 3538 + dev_warn(&sep->pdev->dev, 3539 + "[PID%d] invalid fastcall magic 0x%08X\n", 3540 + current->pid, args->magic); 3541 + error = -EINVAL; 3542 + goto end_function; 3543 + } 3544 + 3545 + dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n", 3546 + current->pid, args->num_dcbs); 3547 + dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n", 3548 + current->pid, args->msg_len); 3549 + 3550 + if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len || 3551 + SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) { 3552 + dev_warn(&sep->pdev->dev, 3553 + "[PID%d] invalid message length\n", 3554 + current->pid); 3555 + error = -EINVAL; 3556 + goto end_function; 3557 + } 3558 + 3559 + actual_count = sizeof(struct sep_fastcall_hdr) 3560 + + args->msg_len 3561 + + (args->num_dcbs * sizeof(struct build_dcb_struct)); 3562 + 3563 + if (actual_count != count_user) { 3564 + dev_warn(&sep->pdev->dev, 3565 + "[PID%d] inconsistent message " 3566 + "sizes 0x%08X vs 0x%08X\n", 3567 + current->pid, actual_count, count_user); 3568 + error = -EMSGSIZE; 3569 + goto end_function; 3570 + } 3571 + 3572 + end_function: 3573 + return error; 3574 + } 3575 + 3576 + /** 3577 + * sep_write - Starts an operation for fastcall interface 3578 + * @filp: File pointer 3579 + * @buf_user: User buffer for operation parameters 3580 + * @count_user: User buffer size 3581 + * @offset: File offset, not supported 3582 + * 3583 + * The implementation does not support writing in chunks, 3584 + * all data must be given during a single write system call. 3585 + */ 3586 + static ssize_t sep_write(struct file *filp, 3587 + const char __user *buf_user, size_t count_user, 3588 + loff_t *offset) 3589 + { 3590 + struct sep_private_data * const private_data = filp->private_data; 3591 + struct sep_call_status *call_status = &private_data->call_status; 3592 + struct sep_device *sep = private_data->device; 3593 + struct sep_dma_context *dma_ctx = NULL; 3594 + struct sep_fastcall_hdr call_hdr = {0}; 3595 + void *msg_region = NULL; 3596 + void *dmatables_region = NULL; 3597 + struct sep_dcblock *dcb_region = NULL; 3598 + ssize_t error = 0; 3599 + struct sep_queue_info *my_queue_elem = NULL; 3600 + 3601 + dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n", 3602 + current->pid, sep); 3603 + dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n", 3604 + current->pid, private_data); 3605 + 3606 + error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user); 3607 + if (error) 3608 + goto end_function; 3609 + 3610 + buf_user += sizeof(struct sep_fastcall_hdr); 3611 + 3612 + /* 3613 + * Controlling driver memory usage by limiting amount of 3614 + * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number 3615 + * of threads can progress further at a time 3616 + */ 3617 + dev_dbg(&sep->pdev->dev, "[PID%d] waiting for double buffering " 3618 + "region access\n", current->pid); 3619 + error = down_interruptible(&sep->sep_doublebuf); 3620 + dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n", 3621 + current->pid); 3622 + if (error) { 3623 + /* Signal received */ 3624 + goto end_function_error; 3625 + } 3626 + 3627 + 3628 + /* 3629 + * Prepare contents of the shared area regions for 3630 + * the operation into temporary buffers 3631 + */ 3632 + if (0 < call_hdr.num_dcbs) { 3633 + error = sep_create_dcb_dmatables_context(sep, 3634 + &dcb_region, 3635 + &dmatables_region, 3636 + &dma_ctx, 3637 + (const struct build_dcb_struct __user *) 3638 + buf_user, 3639 + call_hdr.num_dcbs); 3640 + if (error) 3641 + goto end_function_error_doublebuf; 3642 + 3643 + buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct); 3644 + } 3645 + 3646 + error = sep_create_msgarea_context(sep, 3647 + &msg_region, 3648 + buf_user, 3649 + call_hdr.msg_len); 3650 + if (error) 3651 + goto end_function_error_doublebuf; 3652 + 3653 + dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n", 3654 + current->pid); 3655 + my_queue_elem = sep_queue_status_add(sep, 3656 + ((struct sep_msgarea_hdr *)msg_region)->opcode, 3657 + (dma_ctx) ? dma_ctx->input_data_len : 0, 3658 + current->pid, 3659 + current->comm, sizeof(current->comm)); 3660 + 3661 + if (!my_queue_elem) { 3662 + dev_dbg(&sep->pdev->dev, "[PID%d] updating queue" 3663 + "status error\n", current->pid); 3664 + error = -ENOMEM; 3665 + goto end_function_error_doublebuf; 3666 + } 3667 + 3668 + /* Wait until current process gets the transaction */ 3669 + error = sep_wait_transaction(sep); 3670 + 3671 + if (error) { 3672 + /* Interrupted by signal, don't clear transaction */ 3673 + dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n", 3674 + current->pid); 3675 + sep_queue_status_remove(sep, &my_queue_elem); 3676 + goto end_function_error_doublebuf; 3677 + } 3678 + 3679 + dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n", 3680 + current->pid); 3681 + private_data->my_queue_elem = my_queue_elem; 3682 + 3683 + /* Activate shared area regions for the transaction */ 3684 + error = sep_activate_msgarea_context(sep, &msg_region, 3685 + call_hdr.msg_len); 3686 + if (error) 3687 + goto end_function_error_clear_transact; 3688 + 3689 + sep_dump_message(sep); 3690 + 3691 + if (0 < call_hdr.num_dcbs) { 3692 + error = sep_activate_dcb_dmatables_context(sep, 3693 + &dcb_region, 3694 + &dmatables_region, 3695 + dma_ctx); 3696 + if (error) 3697 + goto end_function_error_clear_transact; 3698 + } 3699 + 3700 + /* Send command to SEP */ 3701 + error = sep_send_command_handler(sep); 3702 + if (error) 3703 + goto end_function_error_clear_transact; 3704 + 3705 + /* Store DMA context for the transaction */ 3706 + private_data->dma_ctx = dma_ctx; 3707 + /* Update call status */ 3708 + set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status); 3709 + error = count_user; 3710 + 3711 + up(&sep->sep_doublebuf); 3712 + dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n", 3713 + current->pid); 3714 + 3715 + goto end_function; 3716 + 3717 + end_function_error_clear_transact: 3718 + sep_end_transaction_handler(sep, &dma_ctx, call_status, 3719 + &private_data->my_queue_elem); 3720 + 3721 + end_function_error_doublebuf: 3722 + up(&sep->sep_doublebuf); 3723 + dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n", 3724 + current->pid); 3725 + 3726 + end_function_error: 3727 + if (dma_ctx) 3728 + sep_free_dma_table_data_handler(sep, &dma_ctx); 3729 + 3730 + end_function: 3731 + kfree(dcb_region); 3732 + kfree(dmatables_region); 3733 + kfree(msg_region); 3734 + 3735 + return error; 3736 + } 3737 + /** 3738 + * sep_seek - Handler for seek system call 3739 + * @filp: File pointer 3740 + * @offset: File offset 3741 + * @origin: Options for offset 3742 + * 3743 + * Fastcall interface does not support seeking, all reads 3744 + * and writes are from/to offset zero 3745 + */ 3746 + static loff_t sep_seek(struct file *filp, loff_t offset, int origin) 3747 + { 3748 + return -ENOSYS; 3749 + } 3750 + 3751 + 3752 + 3753 + /** 3754 + * sep_file_operations - file operation on sep device 3755 + * @sep_ioctl: ioctl handler from user space call 3756 + * @sep_poll: poll handler 3757 + * @sep_open: handles sep device open request 3758 + * @sep_release:handles sep device release request 3759 + * @sep_mmap: handles memory mapping requests 3760 + * @sep_read: handles read request on sep device 3761 + * @sep_write: handles write request on sep device 3762 + * @sep_seek: handles seek request on sep device 3763 + */ 3764 + static const struct file_operations sep_file_operations = { 3765 + .owner = THIS_MODULE, 3766 + .unlocked_ioctl = sep_ioctl, 3767 + .poll = sep_poll, 3768 + .open = sep_open, 3769 + .release = sep_release, 3770 + .mmap = sep_mmap, 3771 + .read = sep_read, 3772 + .write = sep_write, 3773 + .llseek = sep_seek, 3774 + }; 3775 + 3776 + /** 3777 + * sep_sysfs_read - read sysfs entry per gives arguments 3778 + * @filp: file pointer 3779 + * @kobj: kobject pointer 3780 + * @attr: binary file attributes 3781 + * @buf: read to this buffer 3782 + * @pos: offset to read 3783 + * @count: amount of data to read 3784 + * 3785 + * This function is to read sysfs entries for sep driver per given arguments. 3786 + */ 3787 + static ssize_t 3788 + sep_sysfs_read(struct file *filp, struct kobject *kobj, 3789 + struct bin_attribute *attr, 3790 + char *buf, loff_t pos, size_t count) 3791 + { 3792 + unsigned long lck_flags; 3793 + size_t nleft = count; 3794 + struct sep_device *sep = sep_dev; 3795 + struct sep_queue_info *queue_elem = NULL; 3796 + u32 queue_num = 0; 3797 + u32 i = 1; 3798 + 3799 + spin_lock_irqsave(&sep->sep_queue_lock, lck_flags); 3800 + 3801 + queue_num = sep->sep_queue_num; 3802 + if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT) 3803 + queue_num = SEP_DOUBLEBUF_USERS_LIMIT; 3804 + 3805 + 3806 + if (count < sizeof(queue_num) 3807 + + (queue_num * sizeof(struct sep_queue_data))) { 3808 + spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags); 3809 + return -EINVAL; 3810 + } 3811 + 3812 + memcpy(buf, &queue_num, sizeof(queue_num)); 3813 + buf += sizeof(queue_num); 3814 + nleft -= sizeof(queue_num); 3815 + 3816 + list_for_each_entry(queue_elem, &sep->sep_queue_status, list) { 3817 + if (i++ > queue_num) 3818 + break; 3819 + 3820 + memcpy(buf, &queue_elem->data, sizeof(queue_elem->data)); 3821 + nleft -= sizeof(queue_elem->data); 3822 + buf += sizeof(queue_elem->data); 3823 + } 3824 + spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags); 3825 + 3826 + return count - nleft; 3827 + } 3828 + 3829 + /** 3830 + * bin_attributes - defines attributes for queue_status 3831 + * @attr: attributes (name & permissions) 3832 + * @read: function pointer to read this file 3833 + * @size: maxinum size of binary attribute 3834 + */ 3835 + static const struct bin_attribute queue_status = { 3836 + .attr = {.name = "queue_status", .mode = 0444}, 3837 + .read = sep_sysfs_read, 3838 + .size = sizeof(u32) 3839 + + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)), 3840 + }; 3841 + 3842 + /** 3843 + * sep_register_driver_with_fs - register misc devices 3844 + * @sep: pointer to struct sep_device 3845 + * 3846 + * This function registers the driver with the file system 3847 + */ 3848 + static int sep_register_driver_with_fs(struct sep_device *sep) 3849 + { 3850 + int ret_val; 3851 + 3852 + sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR; 3853 + sep->miscdev_sep.name = SEP_DEV_NAME; 3854 + sep->miscdev_sep.fops = &sep_file_operations; 3855 + 3856 + ret_val = misc_register(&sep->miscdev_sep); 3857 + if (ret_val) { 3858 + dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n", 3859 + ret_val); 3860 + return ret_val; 3861 + } 3862 + 3863 + ret_val = device_create_bin_file(sep->miscdev_sep.this_device, 3864 + &queue_status); 3865 + if (ret_val) { 3866 + dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n", 3867 + ret_val); 3868 + return ret_val; 3869 + } 3870 + 3871 + return ret_val; 3872 + } 3873 + 3874 + 3875 + /** 3876 + *sep_probe - probe a matching PCI device 3877 + *@pdev: pci_device 3878 + *@ent: pci_device_id 3879 + * 3880 + *Attempt to set up and configure a SEP device that has been 3881 + *discovered by the PCI layer. Allocates all required resources. 3882 + */ 3883 + static int __devinit sep_probe(struct pci_dev *pdev, 3884 + const struct pci_device_id *ent) 3885 + { 3886 + int error = 0; 3887 + struct sep_device *sep = NULL; 3888 + 3889 + if (sep_dev != NULL) { 3890 + dev_dbg(&pdev->dev, "only one SEP supported.\n"); 3891 + return -EBUSY; 3892 + } 3893 + 3894 + /* Enable the device */ 3895 + error = pci_enable_device(pdev); 3896 + if (error) { 3897 + dev_warn(&pdev->dev, "error enabling pci device\n"); 3898 + goto end_function; 3899 + } 3900 + 3901 + /* Allocate the sep_device structure for this device */ 3902 + sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC); 3903 + if (sep_dev == NULL) { 3904 + dev_warn(&pdev->dev, 3905 + "can't kmalloc the sep_device structure\n"); 3906 + error = -ENOMEM; 3907 + goto end_function_disable_device; 3908 + } 3909 + 3910 + /* 3911 + * We're going to use another variable for actually 3912 + * working with the device; this way, if we have 3913 + * multiple devices in the future, it would be easier 3914 + * to make appropriate changes 3915 + */ 3916 + sep = sep_dev; 3917 + 3918 + sep->pdev = pci_dev_get(pdev); 3919 + 3920 + init_waitqueue_head(&sep->event_transactions); 3921 + init_waitqueue_head(&sep->event_interrupt); 3922 + spin_lock_init(&sep->snd_rply_lck); 3923 + spin_lock_init(&sep->sep_queue_lock); 3924 + sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT); 3925 + 3926 + INIT_LIST_HEAD(&sep->sep_queue_status); 3927 + 3928 + dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, " 3929 + "device being prepared\n"); 3930 + dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision); 3931 + 3932 + /* Set up our register area */ 3933 + sep->reg_physical_addr = pci_resource_start(sep->pdev, 0); 3934 + if (!sep->reg_physical_addr) { 3935 + dev_warn(&sep->pdev->dev, "Error getting register start\n"); 3936 + error = -ENODEV; 3937 + goto end_function_free_sep_dev; 3938 + } 3939 + 3940 + sep->reg_physical_end = pci_resource_end(sep->pdev, 0); 3941 + if (!sep->reg_physical_end) { 3942 + dev_warn(&sep->pdev->dev, "Error getting register end\n"); 3943 + error = -ENODEV; 3944 + goto end_function_free_sep_dev; 3945 + } 3946 + 3947 + sep->reg_addr = ioremap_nocache(sep->reg_physical_addr, 3948 + (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1)); 3949 + if (!sep->reg_addr) { 3950 + dev_warn(&sep->pdev->dev, "Error getting register virtual\n"); 3951 + error = -ENODEV; 3952 + goto end_function_free_sep_dev; 3953 + } 3954 + 3955 + dev_dbg(&sep->pdev->dev, 3956 + "Register area start %llx end %llx virtual %p\n", 3957 + (unsigned long long)sep->reg_physical_addr, 3958 + (unsigned long long)sep->reg_physical_end, 3959 + sep->reg_addr); 3960 + 3961 + /* Allocate the shared area */ 3962 + sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + 3963 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES + 3964 + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + 3965 + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + 3966 + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES; 3967 + 3968 + if (sep_map_and_alloc_shared_area(sep)) { 3969 + error = -ENOMEM; 3970 + /* Allocation failed */ 3971 + goto end_function_error; 3972 + } 3973 + 3974 + /* Clear ICR register */ 3975 + sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); 3976 + 3977 + /* Set the IMR register - open only GPR 2 */ 3978 + sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); 3979 + 3980 + /* Read send/receive counters from SEP */ 3981 + sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 3982 + sep->reply_ct &= 0x3FFFFFFF; 3983 + sep->send_ct = sep->reply_ct; 3984 + 3985 + /* Get the interrupt line */ 3986 + error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, 3987 + "sep_driver", sep); 3988 + 3989 + if (error) 3990 + goto end_function_deallocate_sep_shared_area; 3991 + 3992 + /* The new chip requires a shared area reconfigure */ 3993 + if (sep->pdev->revision != 0) { /* Only for new chip */ 3994 + error = sep_reconfig_shared_area(sep); 3995 + if (error) 3996 + goto end_function_free_irq; 3997 + } 3998 + 3999 + sep->in_use = 1; 4000 + 4001 + /* Finally magic up the device nodes */ 4002 + /* Register driver with the fs */ 4003 + error = sep_register_driver_with_fs(sep); 4004 + if (error) 4005 + goto end_function_free_irq; 4006 + 4007 + #ifdef SEP_ENABLE_RUNTIME_PM 4008 + pm_runtime_put_noidle(&sep->pdev->dev); 4009 + pm_runtime_allow(&sep->pdev->dev); 4010 + pm_runtime_set_autosuspend_delay(&sep->pdev->dev, 4011 + SUSPEND_DELAY); 4012 + pm_runtime_use_autosuspend(&sep->pdev->dev); 4013 + sep->power_save_setup = 1; 4014 + #endif 4015 + 4016 + sep->in_use = 0; 4017 + 4018 + /* register kernel crypto driver */ 4019 + error = sep_crypto_setup(); 4020 + if (error) { 4021 + dev_dbg(&sep->pdev->dev, "crypto setup fail\n"); 4022 + goto end_function_free_irq; 4023 + } 4024 + 4025 + goto end_function; 4026 + 4027 + end_function_free_irq: 4028 + free_irq(pdev->irq, sep); 4029 + 4030 + end_function_deallocate_sep_shared_area: 4031 + /* De-allocate shared area */ 4032 + sep_unmap_and_free_shared_area(sep); 4033 + 4034 + end_function_error: 4035 + iounmap(sep->reg_addr); 4036 + 4037 + end_function_free_sep_dev: 4038 + pci_dev_put(sep_dev->pdev); 4039 + kfree(sep_dev); 4040 + sep_dev = NULL; 4041 + 4042 + end_function_disable_device: 4043 + pci_disable_device(pdev); 4044 + 4045 + end_function: 4046 + return error; 4047 + } 4048 + 4049 + /** 4050 + * sep_remove - handles removing device from pci subsystem 4051 + * @pdev: pointer to pci device 4052 + * 4053 + * This function will handle removing our sep device from pci subsystem on exit 4054 + * or unloading this module. It should free up all used resources, and unmap if 4055 + * any memory regions mapped. 4056 + */ 4057 + static void sep_remove(struct pci_dev *pdev) 4058 + { 4059 + struct sep_device *sep = sep_dev; 4060 + 4061 + /* Unregister from fs */ 4062 + misc_deregister(&sep->miscdev_sep); 4063 + 4064 + /* Unregister from kernel crypto */ 4065 + sep_crypto_takedown(); 4066 + 4067 + /* Free the irq */ 4068 + free_irq(sep->pdev->irq, sep); 4069 + 4070 + /* Free the shared area */ 4071 + sep_unmap_and_free_shared_area(sep_dev); 4072 + iounmap((void __iomem *)sep_dev->reg_addr); 4073 + 4074 + #ifdef SEP_ENABLE_RUNTIME_PM 4075 + if (sep->in_use) { 4076 + sep->in_use = 0; 4077 + pm_runtime_forbid(&sep->pdev->dev); 4078 + pm_runtime_get_noresume(&sep->pdev->dev); 4079 + } 4080 + #endif 4081 + pci_dev_put(sep_dev->pdev); 4082 + kfree(sep_dev); 4083 + sep_dev = NULL; 4084 + } 4085 + 4086 + /* Initialize struct pci_device_id for our driver */ 4087 + static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = { 4088 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)}, 4089 + {0} 4090 + }; 4091 + 4092 + /* Export our pci_device_id structure to user space */ 4093 + MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl); 4094 + 4095 + #ifdef SEP_ENABLE_RUNTIME_PM 4096 + 4097 + /** 4098 + * sep_pm_resume - rsume routine while waking up from S3 state 4099 + * @dev: pointer to sep device 4100 + * 4101 + * This function is to be used to wake up sep driver while system awakes from S3 4102 + * state i.e. suspend to ram. The RAM in intact. 4103 + * Notes - revisit with more understanding of pm, ICR/IMR & counters. 4104 + */ 4105 + static int sep_pci_resume(struct device *dev) 4106 + { 4107 + struct sep_device *sep = sep_dev; 4108 + 4109 + dev_dbg(&sep->pdev->dev, "pci resume called\n"); 4110 + 4111 + if (sep->power_state == SEP_DRIVER_POWERON) 4112 + return 0; 4113 + 4114 + /* Clear ICR register */ 4115 + sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); 4116 + 4117 + /* Set the IMR register - open only GPR 2 */ 4118 + sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); 4119 + 4120 + /* Read send/receive counters from SEP */ 4121 + sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 4122 + sep->reply_ct &= 0x3FFFFFFF; 4123 + sep->send_ct = sep->reply_ct; 4124 + 4125 + sep->power_state = SEP_DRIVER_POWERON; 4126 + 4127 + return 0; 4128 + } 4129 + 4130 + /** 4131 + * sep_pm_suspend - suspend routine while going to S3 state 4132 + * @dev: pointer to sep device 4133 + * 4134 + * This function is to be used to suspend sep driver while system goes to S3 4135 + * state i.e. suspend to ram. The RAM in intact and ON during this suspend. 4136 + * Notes - revisit with more understanding of pm, ICR/IMR 4137 + */ 4138 + static int sep_pci_suspend(struct device *dev) 4139 + { 4140 + struct sep_device *sep = sep_dev; 4141 + 4142 + dev_dbg(&sep->pdev->dev, "pci suspend called\n"); 4143 + if (sep->in_use == 1) 4144 + return -EAGAIN; 4145 + 4146 + sep->power_state = SEP_DRIVER_POWEROFF; 4147 + 4148 + /* Clear ICR register */ 4149 + sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); 4150 + 4151 + /* Set the IMR to block all */ 4152 + sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF); 4153 + 4154 + return 0; 4155 + } 4156 + 4157 + /** 4158 + * sep_pm_runtime_resume - runtime resume routine 4159 + * @dev: pointer to sep device 4160 + * 4161 + * Notes - revisit with more understanding of pm, ICR/IMR & counters 4162 + */ 4163 + static int sep_pm_runtime_resume(struct device *dev) 4164 + { 4165 + 4166 + u32 retval2; 4167 + u32 delay_count; 4168 + struct sep_device *sep = sep_dev; 4169 + 4170 + dev_dbg(&sep->pdev->dev, "pm runtime resume called\n"); 4171 + 4172 + /** 4173 + * Wait until the SCU boot is ready 4174 + * This is done by iterating SCU_DELAY_ITERATION (10 4175 + * microseconds each) up to SCU_DELAY_MAX (50) times. 4176 + * This bit can be set in a random time that is less 4177 + * than 500 microseconds after each power resume 4178 + */ 4179 + retval2 = 0; 4180 + delay_count = 0; 4181 + while ((!retval2) && (delay_count < SCU_DELAY_MAX)) { 4182 + retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); 4183 + retval2 &= 0x00000008; 4184 + if (!retval2) { 4185 + udelay(SCU_DELAY_ITERATION); 4186 + delay_count += 1; 4187 + } 4188 + } 4189 + 4190 + if (!retval2) { 4191 + dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n"); 4192 + return -EINVAL; 4193 + } 4194 + 4195 + /* Clear ICR register */ 4196 + sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); 4197 + 4198 + /* Set the IMR register - open only GPR 2 */ 4199 + sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); 4200 + 4201 + /* Read send/receive counters from SEP */ 4202 + sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); 4203 + sep->reply_ct &= 0x3FFFFFFF; 4204 + sep->send_ct = sep->reply_ct; 4205 + 4206 + return 0; 4207 + } 4208 + 4209 + /** 4210 + * sep_pm_runtime_suspend - runtime suspend routine 4211 + * @dev: pointer to sep device 4212 + * 4213 + * Notes - revisit with more understanding of pm 4214 + */ 4215 + static int sep_pm_runtime_suspend(struct device *dev) 4216 + { 4217 + struct sep_device *sep = sep_dev; 4218 + 4219 + dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n"); 4220 + 4221 + /* Clear ICR register */ 4222 + sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); 4223 + return 0; 4224 + } 4225 + 4226 + /** 4227 + * sep_pm - power management for sep driver 4228 + * @sep_pm_runtime_resume: resume- no communication with cpu & main memory 4229 + * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory 4230 + * @sep_pci_suspend: suspend - main memory is still ON 4231 + * @sep_pci_resume: resume - main meory is still ON 4232 + */ 4233 + static const struct dev_pm_ops sep_pm = { 4234 + .runtime_resume = sep_pm_runtime_resume, 4235 + .runtime_suspend = sep_pm_runtime_suspend, 4236 + .resume = sep_pci_resume, 4237 + .suspend = sep_pci_suspend, 4238 + }; 4239 + #endif /* SEP_ENABLE_RUNTIME_PM */ 4240 + 4241 + /** 4242 + * sep_pci_driver - registers this device with pci subsystem 4243 + * @name: name identifier for this driver 4244 + * @sep_pci_id_tbl: pointer to struct pci_device_id table 4245 + * @sep_probe: pointer to probe function in PCI driver 4246 + * @sep_remove: pointer to remove function in PCI driver 4247 + */ 4248 + static struct pci_driver sep_pci_driver = { 4249 + #ifdef SEP_ENABLE_RUNTIME_PM 4250 + .driver = { 4251 + .pm = &sep_pm, 4252 + }, 4253 + #endif 4254 + .name = "sep_sec_driver", 4255 + .id_table = sep_pci_id_tbl, 4256 + .probe = sep_probe, 4257 + .remove = sep_remove 4258 + }; 4259 + 4260 + /** 4261 + * sep_init - init function 4262 + * 4263 + * Module load time. Register the PCI device driver. 4264 + */ 4265 + 4266 + static int __init sep_init(void) 4267 + { 4268 + return pci_register_driver(&sep_pci_driver); 4269 + } 4270 + 4271 + 4272 + /** 4273 + * sep_exit - called to unload driver 4274 + * 4275 + * Unregister the driver The device will perform all the cleanup required. 4276 + */ 4277 + static void __exit sep_exit(void) 4278 + { 4279 + pci_unregister_driver(&sep_pci_driver); 4280 + } 4281 + 4282 + 4283 + module_init(sep_init); 4284 + module_exit(sep_exit); 4285 + 4286 + MODULE_LICENSE("GPL");
+188
drivers/staging/sep/sep_trace_events.h
··· 1 + /* 2 + * If TRACE_SYSTEM is defined, that will be the directory created 3 + * in the ftrace directory under /sys/kernel/debug/tracing/events/<system> 4 + * 5 + * The define_trace.h below will also look for a file name of 6 + * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here. 7 + * In this case, it would look for sample.h 8 + * 9 + * If the header name will be different than the system name 10 + * (as in this case), then you can override the header name that 11 + * define_trace.h will look up by defining TRACE_INCLUDE_FILE 12 + * 13 + * This file is called trace-events-sample.h but we want the system 14 + * to be called "sample". Therefore we must define the name of this 15 + * file: 16 + * 17 + * #define TRACE_INCLUDE_FILE trace-events-sample 18 + * 19 + * As we do an the bottom of this file. 20 + * 21 + * Notice that TRACE_SYSTEM should be defined outside of #if 22 + * protection, just like TRACE_INCLUDE_FILE. 23 + */ 24 + #undef TRACE_SYSTEM 25 + #define TRACE_SYSTEM sep 26 + 27 + /* 28 + * Notice that this file is not protected like a normal header. 29 + * We also must allow for rereading of this file. The 30 + * 31 + * || defined(TRACE_HEADER_MULTI_READ) 32 + * 33 + * serves this purpose. 34 + */ 35 + #if !defined(_TRACE_SEP_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) 36 + #define _TRACE_SEP_EVENTS_H 37 + 38 + #ifdef SEP_PERF_DEBUG 39 + #define SEP_TRACE_FUNC_IN() trace_sep_func_start(__func__, 0) 40 + #define SEP_TRACE_FUNC_OUT(branch) trace_sep_func_end(__func__, branch) 41 + #define SEP_TRACE_EVENT(branch) trace_sep_misc_event(__func__, branch) 42 + #else 43 + #define SEP_TRACE_FUNC_IN() 44 + #define SEP_TRACE_FUNC_OUT(branch) 45 + #define SEP_TRACE_EVENT(branch) 46 + #endif 47 + 48 + 49 + /* 50 + * All trace headers should include tracepoint.h, until we finally 51 + * make it into a standard header. 52 + */ 53 + #include <linux/tracepoint.h> 54 + 55 + /* 56 + * The TRACE_EVENT macro is broken up into 5 parts. 57 + * 58 + * name: name of the trace point. This is also how to enable the tracepoint. 59 + * A function called trace_foo_bar() will be created. 60 + * 61 + * proto: the prototype of the function trace_foo_bar() 62 + * Here it is trace_foo_bar(char *foo, int bar). 63 + * 64 + * args: must match the arguments in the prototype. 65 + * Here it is simply "foo, bar". 66 + * 67 + * struct: This defines the way the data will be stored in the ring buffer. 68 + * There are currently two types of elements. __field and __array. 69 + * a __field is broken up into (type, name). Where type can be any 70 + * type but an array. 71 + * For an array. there are three fields. (type, name, size). The 72 + * type of elements in the array, the name of the field and the size 73 + * of the array. 74 + * 75 + * __array( char, foo, 10) is the same as saying char foo[10]. 76 + * 77 + * fast_assign: This is a C like function that is used to store the items 78 + * into the ring buffer. 79 + * 80 + * printk: This is a way to print out the data in pretty print. This is 81 + * useful if the system crashes and you are logging via a serial line, 82 + * the data can be printed to the console using this "printk" method. 83 + * 84 + * Note, that for both the assign and the printk, __entry is the handler 85 + * to the data structure in the ring buffer, and is defined by the 86 + * TP_STRUCT__entry. 87 + */ 88 + TRACE_EVENT(sep_func_start, 89 + 90 + TP_PROTO(const char *name, int branch), 91 + 92 + TP_ARGS(name, branch), 93 + 94 + TP_STRUCT__entry( 95 + __array(char, name, 20) 96 + __field(int, branch) 97 + ), 98 + 99 + TP_fast_assign( 100 + strncpy(__entry->name, name, 20); 101 + __entry->branch = branch; 102 + ), 103 + 104 + TP_printk("func_start %s %d", __entry->name, __entry->branch) 105 + ); 106 + 107 + TRACE_EVENT(sep_func_end, 108 + 109 + TP_PROTO(const char *name, int branch), 110 + 111 + TP_ARGS(name, branch), 112 + 113 + TP_STRUCT__entry( 114 + __array(char, name, 20) 115 + __field(int, branch) 116 + ), 117 + 118 + TP_fast_assign( 119 + strncpy(__entry->name, name, 20); 120 + __entry->branch = branch; 121 + ), 122 + 123 + TP_printk("func_end %s %d", __entry->name, __entry->branch) 124 + ); 125 + 126 + TRACE_EVENT(sep_misc_event, 127 + 128 + TP_PROTO(const char *name, int branch), 129 + 130 + TP_ARGS(name, branch), 131 + 132 + TP_STRUCT__entry( 133 + __array(char, name, 20) 134 + __field(int, branch) 135 + ), 136 + 137 + TP_fast_assign( 138 + strncpy(__entry->name, name, 20); 139 + __entry->branch = branch; 140 + ), 141 + 142 + TP_printk("misc_event %s %d", __entry->name, __entry->branch) 143 + ); 144 + 145 + 146 + #endif 147 + 148 + /***** NOTICE! The #if protection ends here. *****/ 149 + 150 + 151 + /* 152 + * There are several ways I could have done this. If I left out the 153 + * TRACE_INCLUDE_PATH, then it would default to the kernel source 154 + * include/trace/events directory. 155 + * 156 + * I could specify a path from the define_trace.h file back to this 157 + * file. 158 + * 159 + * #define TRACE_INCLUDE_PATH ../../samples/trace_events 160 + * 161 + * But the safest and easiest way to simply make it use the directory 162 + * that the file is in is to add in the Makefile: 163 + * 164 + * CFLAGS_trace-events-sample.o := -I$(src) 165 + * 166 + * This will make sure the current path is part of the include 167 + * structure for our file so that define_trace.h can find it. 168 + * 169 + * I could have made only the top level directory the include: 170 + * 171 + * CFLAGS_trace-events-sample.o := -I$(PWD) 172 + * 173 + * And then let the path to this directory be the TRACE_INCLUDE_PATH: 174 + * 175 + * #define TRACE_INCLUDE_PATH samples/trace_events 176 + * 177 + * But then if something defines "samples" or "trace_events" as a macro 178 + * then we could risk that being converted too, and give us an unexpected 179 + * result. 180 + */ 181 + #undef TRACE_INCLUDE_PATH 182 + #undef TRACE_INCLUDE_FILE 183 + #define TRACE_INCLUDE_PATH . 184 + /* 185 + * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal 186 + */ 187 + #define TRACE_INCLUDE_FILE sep_trace_events 188 + #include <trace/define_trace.h>