Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/zcrypt: add multi domain support

Currently the ap infrastructure only supports one domain at a time.
This feature extends the generic cryptographic device driver to
support multiple cryptographic domains simultaneously.

There are now card and queue devices on the AP bus with independent
card and queue drivers. The new /sys layout is as follows:

/sys/bus/ap
devices
<xx>.<yyyy> -> ../../../devices/ap/card<xx>/<xx>.<yyyy>
...
card<xx> -> ../../../devices/ap/card<xx>
...
drivers
<drv>card
card<xx> -> ../../../../devices/ap/card<xx>
<drv>queue
<xx>.<yyyy> -> ../../../../devices/ap/card<xx>/<xx>.<yyyy>
...

/sys/devices/ap
card<xx>
<xx>.<yyyy>
driver -> ../../../../bus/ap/drivers/<zzz>queue
...
driver -> ../../../bus/ap/drivers/<drv>card
...

The two digit <xx> field is the card number, the four digit <yyyy>
field is the queue number and <drv> is the name of the device driver,
e.g. "cex4".

For compatability /sys/bus/ap/card<xx> for the old layout has to exist,
including the attributes that used to reside there.

With additional contributions from Harald Freudenberger and
Martin Schwidefsky.

Signed-off-by: Ingo Tuchscherer <ingo.tuchscherer@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Ingo Tuchscherer and committed by
Martin Schwidefsky
e28d2af4 34a15167

+2795 -2017
+3 -2
drivers/s390/crypto/Makefile
··· 2 2 # S/390 crypto devices 3 3 # 4 4 5 - ap-objs := ap_bus.o 5 + ap-objs := ap_bus.o ap_card.o ap_queue.o 6 6 obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o 7 7 # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o 8 - zcrypt-objs := zcrypt_api.o zcrypt_msgtype6.o zcrypt_msgtype50.o 8 + zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o 9 + zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o 9 10 obj-$(CONFIG_ZCRYPT) += zcrypt.o 10 11 # adapter drivers depend on ap.o and zcrypt.o 11 12 obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
+269 -820
drivers/s390/crypto/ap_bus.c
··· 46 46 #include <linux/ktime.h> 47 47 #include <asm/facility.h> 48 48 #include <linux/crypto.h> 49 + #include <linux/mod_devicetable.h> 49 50 50 51 #include "ap_bus.h" 51 52 #include "ap_asm.h" ··· 73 72 module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); 74 73 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 75 74 76 - static struct device *ap_root_device = NULL; 75 + static struct device *ap_root_device; 76 + 77 + DEFINE_SPINLOCK(ap_list_lock); 78 + LIST_HEAD(ap_card_list); 79 + 77 80 static struct ap_config_info *ap_configuration; 78 - static DEFINE_SPINLOCK(ap_device_list_lock); 79 - static LIST_HEAD(ap_device_list); 80 81 static bool initialised; 81 82 82 83 /* ··· 130 127 static inline int ap_using_interrupts(void) 131 128 { 132 129 return ap_airq_flag; 130 + } 131 + 132 + /** 133 + * ap_airq_ptr() - Get the address of the adapter interrupt indicator 134 + * 135 + * Returns the address of the local-summary-indicator of the adapter 136 + * interrupt handler for AP, or NULL if adapter interrupts are not 137 + * available. 138 + */ 139 + void *ap_airq_ptr(void) 140 + { 141 + if (ap_using_interrupts()) 142 + return ap_airq.lsi_ptr; 143 + return NULL; 133 144 } 134 145 135 146 /** ··· 247 230 } 248 231 249 232 /** 250 - * ap_queue_enable_interruption(): Enable interruption on an AP. 251 - * @qid: The AP queue number 252 - * @ind: the notification indicator byte 253 - * 254 - * Enables interruption on AP queue via ap_aqic(). Based on the return 255 - * value it waits a while and tests the AP queue if interrupts 256 - * have been switched on using ap_test_queue(). 257 - */ 258 - static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind) 259 - { 260 - struct ap_queue_status status; 261 - 262 - status = ap_aqic(ap_dev->qid, ind); 263 - switch (status.response_code) { 264 - case AP_RESPONSE_NORMAL: 265 - case AP_RESPONSE_OTHERWISE_CHANGED: 266 - return 0; 267 - case AP_RESPONSE_Q_NOT_AVAIL: 268 - case AP_RESPONSE_DECONFIGURED: 269 - case AP_RESPONSE_CHECKSTOPPED: 270 - case AP_RESPONSE_INVALID_ADDRESS: 271 - pr_err("Registering adapter interrupts for AP %d failed\n", 272 - AP_QID_DEVICE(ap_dev->qid)); 273 - return -EOPNOTSUPP; 274 - case AP_RESPONSE_RESET_IN_PROGRESS: 275 - case AP_RESPONSE_BUSY: 276 - default: 277 - return -EBUSY; 278 - } 279 - } 280 - 281 - /** 282 - * __ap_send(): Send message to adjunct processor queue. 283 - * @qid: The AP queue number 284 - * @psmid: The program supplied message identifier 285 - * @msg: The message text 286 - * @length: The message length 287 - * @special: Special Bit 288 - * 289 - * Returns AP queue status structure. 290 - * Condition code 1 on NQAP can't happen because the L bit is 1. 291 - * Condition code 2 on NQAP also means the send is incomplete, 292 - * because a segment boundary was reached. The NQAP is repeated. 293 - */ 294 - static inline struct ap_queue_status 295 - __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 296 - unsigned int special) 297 - { 298 - if (special == 1) 299 - qid |= 0x400000UL; 300 - return ap_nqap(qid, psmid, msg, length); 301 - } 302 - 303 - int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 304 - { 305 - struct ap_queue_status status; 306 - 307 - status = __ap_send(qid, psmid, msg, length, 0); 308 - switch (status.response_code) { 309 - case AP_RESPONSE_NORMAL: 310 - return 0; 311 - case AP_RESPONSE_Q_FULL: 312 - case AP_RESPONSE_RESET_IN_PROGRESS: 313 - return -EBUSY; 314 - case AP_RESPONSE_REQ_FAC_NOT_INST: 315 - return -EINVAL; 316 - default: /* Device is gone. */ 317 - return -ENODEV; 318 - } 319 - } 320 - EXPORT_SYMBOL(ap_send); 321 - 322 - int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 323 - { 324 - struct ap_queue_status status; 325 - 326 - if (msg == NULL) 327 - return -EINVAL; 328 - status = ap_dqap(qid, psmid, msg, length); 329 - switch (status.response_code) { 330 - case AP_RESPONSE_NORMAL: 331 - return 0; 332 - case AP_RESPONSE_NO_PENDING_REPLY: 333 - if (status.queue_empty) 334 - return -ENOENT; 335 - return -EBUSY; 336 - case AP_RESPONSE_RESET_IN_PROGRESS: 337 - return -EBUSY; 338 - default: 339 - return -ENODEV; 340 - } 341 - } 342 - EXPORT_SYMBOL(ap_recv); 343 - 344 - /** 345 233 * ap_query_queue(): Check if an AP queue is available. 346 234 * @qid: The AP queue number 347 235 * @queue_depth: Pointer to queue depth value ··· 260 338 unsigned long info; 261 339 int nd; 262 340 263 - if (!ap_test_config_card_id(AP_QID_DEVICE(qid))) 341 + if (!ap_test_config_card_id(AP_QID_CARD(qid))) 264 342 return -ENODEV; 265 343 266 344 status = ap_test_queue(qid, &info); ··· 288 366 } 289 367 } 290 368 291 - /* State machine definitions and helpers */ 292 - 293 - static void ap_sm_wait(enum ap_wait wait) 369 + void ap_wait(enum ap_wait wait) 294 370 { 295 371 ktime_t hr_time; 296 372 ··· 317 397 } 318 398 } 319 399 320 - static enum ap_wait ap_sm_nop(struct ap_device *ap_dev) 321 - { 322 - return AP_WAIT_NONE; 323 - } 324 - 325 - /** 326 - * ap_sm_recv(): Receive pending reply messages from an AP device but do 327 - * not change the state of the device. 328 - * @ap_dev: pointer to the AP device 329 - * 330 - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 331 - */ 332 - static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev) 333 - { 334 - struct ap_queue_status status; 335 - struct ap_message *ap_msg; 336 - 337 - status = ap_dqap(ap_dev->qid, &ap_dev->reply->psmid, 338 - ap_dev->reply->message, ap_dev->reply->length); 339 - switch (status.response_code) { 340 - case AP_RESPONSE_NORMAL: 341 - ap_dev->queue_count--; 342 - if (ap_dev->queue_count > 0) 343 - mod_timer(&ap_dev->timeout, 344 - jiffies + ap_dev->drv->request_timeout); 345 - list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 346 - if (ap_msg->psmid != ap_dev->reply->psmid) 347 - continue; 348 - list_del_init(&ap_msg->list); 349 - ap_dev->pendingq_count--; 350 - ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 351 - break; 352 - } 353 - case AP_RESPONSE_NO_PENDING_REPLY: 354 - if (!status.queue_empty || ap_dev->queue_count <= 0) 355 - break; 356 - /* The card shouldn't forget requests but who knows. */ 357 - ap_dev->queue_count = 0; 358 - list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 359 - ap_dev->requestq_count += ap_dev->pendingq_count; 360 - ap_dev->pendingq_count = 0; 361 - break; 362 - default: 363 - break; 364 - } 365 - return status; 366 - } 367 - 368 - /** 369 - * ap_sm_read(): Receive pending reply messages from an AP device. 370 - * @ap_dev: pointer to the AP device 371 - * 372 - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 373 - */ 374 - static enum ap_wait ap_sm_read(struct ap_device *ap_dev) 375 - { 376 - struct ap_queue_status status; 377 - 378 - if (!ap_dev->reply) 379 - return AP_WAIT_NONE; 380 - status = ap_sm_recv(ap_dev); 381 - switch (status.response_code) { 382 - case AP_RESPONSE_NORMAL: 383 - if (ap_dev->queue_count > 0) { 384 - ap_dev->state = AP_STATE_WORKING; 385 - return AP_WAIT_AGAIN; 386 - } 387 - ap_dev->state = AP_STATE_IDLE; 388 - return AP_WAIT_NONE; 389 - case AP_RESPONSE_NO_PENDING_REPLY: 390 - if (ap_dev->queue_count > 0) 391 - return AP_WAIT_INTERRUPT; 392 - ap_dev->state = AP_STATE_IDLE; 393 - return AP_WAIT_NONE; 394 - default: 395 - ap_dev->state = AP_STATE_BORKED; 396 - return AP_WAIT_NONE; 397 - } 398 - } 399 - 400 - /** 401 - * ap_sm_suspend_read(): Receive pending reply messages from an AP device 402 - * without changing the device state in between. In suspend mode we don't 403 - * allow sending new requests, therefore just fetch pending replies. 404 - * @ap_dev: pointer to the AP device 405 - * 406 - * Returns AP_WAIT_NONE or AP_WAIT_AGAIN 407 - */ 408 - static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev) 409 - { 410 - struct ap_queue_status status; 411 - 412 - if (!ap_dev->reply) 413 - return AP_WAIT_NONE; 414 - status = ap_sm_recv(ap_dev); 415 - switch (status.response_code) { 416 - case AP_RESPONSE_NORMAL: 417 - if (ap_dev->queue_count > 0) 418 - return AP_WAIT_AGAIN; 419 - /* fall through */ 420 - default: 421 - return AP_WAIT_NONE; 422 - } 423 - } 424 - 425 - /** 426 - * ap_sm_write(): Send messages from the request queue to an AP device. 427 - * @ap_dev: pointer to the AP device 428 - * 429 - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 430 - */ 431 - static enum ap_wait ap_sm_write(struct ap_device *ap_dev) 432 - { 433 - struct ap_queue_status status; 434 - struct ap_message *ap_msg; 435 - 436 - if (ap_dev->requestq_count <= 0) 437 - return AP_WAIT_NONE; 438 - /* Start the next request on the queue. */ 439 - ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 440 - status = __ap_send(ap_dev->qid, ap_msg->psmid, 441 - ap_msg->message, ap_msg->length, ap_msg->special); 442 - switch (status.response_code) { 443 - case AP_RESPONSE_NORMAL: 444 - ap_dev->queue_count++; 445 - if (ap_dev->queue_count == 1) 446 - mod_timer(&ap_dev->timeout, 447 - jiffies + ap_dev->drv->request_timeout); 448 - list_move_tail(&ap_msg->list, &ap_dev->pendingq); 449 - ap_dev->requestq_count--; 450 - ap_dev->pendingq_count++; 451 - if (ap_dev->queue_count < ap_dev->queue_depth) { 452 - ap_dev->state = AP_STATE_WORKING; 453 - return AP_WAIT_AGAIN; 454 - } 455 - /* fall through */ 456 - case AP_RESPONSE_Q_FULL: 457 - ap_dev->state = AP_STATE_QUEUE_FULL; 458 - return AP_WAIT_INTERRUPT; 459 - case AP_RESPONSE_RESET_IN_PROGRESS: 460 - ap_dev->state = AP_STATE_RESET_WAIT; 461 - return AP_WAIT_TIMEOUT; 462 - case AP_RESPONSE_MESSAGE_TOO_BIG: 463 - case AP_RESPONSE_REQ_FAC_NOT_INST: 464 - list_del_init(&ap_msg->list); 465 - ap_dev->requestq_count--; 466 - ap_msg->rc = -EINVAL; 467 - ap_msg->receive(ap_dev, ap_msg, NULL); 468 - return AP_WAIT_AGAIN; 469 - default: 470 - ap_dev->state = AP_STATE_BORKED; 471 - return AP_WAIT_NONE; 472 - } 473 - } 474 - 475 - /** 476 - * ap_sm_read_write(): Send and receive messages to/from an AP device. 477 - * @ap_dev: pointer to the AP device 478 - * 479 - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 480 - */ 481 - static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev) 482 - { 483 - return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev)); 484 - } 485 - 486 - /** 487 - * ap_sm_reset(): Reset an AP queue. 488 - * @qid: The AP queue number 489 - * 490 - * Submit the Reset command to an AP queue. 491 - */ 492 - static enum ap_wait ap_sm_reset(struct ap_device *ap_dev) 493 - { 494 - struct ap_queue_status status; 495 - 496 - status = ap_rapq(ap_dev->qid); 497 - switch (status.response_code) { 498 - case AP_RESPONSE_NORMAL: 499 - case AP_RESPONSE_RESET_IN_PROGRESS: 500 - ap_dev->state = AP_STATE_RESET_WAIT; 501 - ap_dev->interrupt = AP_INTR_DISABLED; 502 - return AP_WAIT_TIMEOUT; 503 - case AP_RESPONSE_BUSY: 504 - return AP_WAIT_TIMEOUT; 505 - case AP_RESPONSE_Q_NOT_AVAIL: 506 - case AP_RESPONSE_DECONFIGURED: 507 - case AP_RESPONSE_CHECKSTOPPED: 508 - default: 509 - ap_dev->state = AP_STATE_BORKED; 510 - return AP_WAIT_NONE; 511 - } 512 - } 513 - 514 - /** 515 - * ap_sm_reset_wait(): Test queue for completion of the reset operation 516 - * @ap_dev: pointer to the AP device 517 - * 518 - * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 519 - */ 520 - static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev) 521 - { 522 - struct ap_queue_status status; 523 - unsigned long info; 524 - 525 - if (ap_dev->queue_count > 0 && ap_dev->reply) 526 - /* Try to read a completed message and get the status */ 527 - status = ap_sm_recv(ap_dev); 528 - else 529 - /* Get the status with TAPQ */ 530 - status = ap_test_queue(ap_dev->qid, &info); 531 - 532 - switch (status.response_code) { 533 - case AP_RESPONSE_NORMAL: 534 - if (ap_using_interrupts() && 535 - ap_queue_enable_interruption(ap_dev, 536 - ap_airq.lsi_ptr) == 0) 537 - ap_dev->state = AP_STATE_SETIRQ_WAIT; 538 - else 539 - ap_dev->state = (ap_dev->queue_count > 0) ? 540 - AP_STATE_WORKING : AP_STATE_IDLE; 541 - return AP_WAIT_AGAIN; 542 - case AP_RESPONSE_BUSY: 543 - case AP_RESPONSE_RESET_IN_PROGRESS: 544 - return AP_WAIT_TIMEOUT; 545 - case AP_RESPONSE_Q_NOT_AVAIL: 546 - case AP_RESPONSE_DECONFIGURED: 547 - case AP_RESPONSE_CHECKSTOPPED: 548 - default: 549 - ap_dev->state = AP_STATE_BORKED; 550 - return AP_WAIT_NONE; 551 - } 552 - } 553 - 554 - /** 555 - * ap_sm_setirq_wait(): Test queue for completion of the irq enablement 556 - * @ap_dev: pointer to the AP device 557 - * 558 - * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 559 - */ 560 - static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev) 561 - { 562 - struct ap_queue_status status; 563 - unsigned long info; 564 - 565 - if (ap_dev->queue_count > 0 && ap_dev->reply) 566 - /* Try to read a completed message and get the status */ 567 - status = ap_sm_recv(ap_dev); 568 - else 569 - /* Get the status with TAPQ */ 570 - status = ap_test_queue(ap_dev->qid, &info); 571 - 572 - if (status.int_enabled == 1) { 573 - /* Irqs are now enabled */ 574 - ap_dev->interrupt = AP_INTR_ENABLED; 575 - ap_dev->state = (ap_dev->queue_count > 0) ? 576 - AP_STATE_WORKING : AP_STATE_IDLE; 577 - } 578 - 579 - switch (status.response_code) { 580 - case AP_RESPONSE_NORMAL: 581 - if (ap_dev->queue_count > 0) 582 - return AP_WAIT_AGAIN; 583 - /* fallthrough */ 584 - case AP_RESPONSE_NO_PENDING_REPLY: 585 - return AP_WAIT_TIMEOUT; 586 - default: 587 - ap_dev->state = AP_STATE_BORKED; 588 - return AP_WAIT_NONE; 589 - } 590 - } 591 - 592 - /* 593 - * AP state machine jump table 594 - */ 595 - static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { 596 - [AP_STATE_RESET_START] = { 597 - [AP_EVENT_POLL] = ap_sm_reset, 598 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 599 - }, 600 - [AP_STATE_RESET_WAIT] = { 601 - [AP_EVENT_POLL] = ap_sm_reset_wait, 602 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 603 - }, 604 - [AP_STATE_SETIRQ_WAIT] = { 605 - [AP_EVENT_POLL] = ap_sm_setirq_wait, 606 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 607 - }, 608 - [AP_STATE_IDLE] = { 609 - [AP_EVENT_POLL] = ap_sm_write, 610 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 611 - }, 612 - [AP_STATE_WORKING] = { 613 - [AP_EVENT_POLL] = ap_sm_read_write, 614 - [AP_EVENT_TIMEOUT] = ap_sm_reset, 615 - }, 616 - [AP_STATE_QUEUE_FULL] = { 617 - [AP_EVENT_POLL] = ap_sm_read, 618 - [AP_EVENT_TIMEOUT] = ap_sm_reset, 619 - }, 620 - [AP_STATE_SUSPEND_WAIT] = { 621 - [AP_EVENT_POLL] = ap_sm_suspend_read, 622 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 623 - }, 624 - [AP_STATE_BORKED] = { 625 - [AP_EVENT_POLL] = ap_sm_nop, 626 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 627 - }, 628 - }; 629 - 630 - static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev, 631 - enum ap_event event) 632 - { 633 - return ap_jumptable[ap_dev->state][event](ap_dev); 634 - } 635 - 636 - static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev, 637 - enum ap_event event) 638 - { 639 - enum ap_wait wait; 640 - 641 - while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN) 642 - ; 643 - return wait; 644 - } 645 - 646 400 /** 647 401 * ap_request_timeout(): Handling of request timeouts 648 402 * @data: Holds the AP device. 649 403 * 650 404 * Handles request timeouts. 651 405 */ 652 - static void ap_request_timeout(unsigned long data) 406 + void ap_request_timeout(unsigned long data) 653 407 { 654 - struct ap_device *ap_dev = (struct ap_device *) data; 408 + struct ap_queue *aq = (struct ap_queue *) data; 655 409 656 410 if (ap_suspend_flag) 657 411 return; 658 - spin_lock_bh(&ap_dev->lock); 659 - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT)); 660 - spin_unlock_bh(&ap_dev->lock); 412 + spin_lock_bh(&aq->lock); 413 + ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT)); 414 + spin_unlock_bh(&aq->lock); 661 415 } 662 416 663 417 /** ··· 366 772 */ 367 773 static void ap_tasklet_fn(unsigned long dummy) 368 774 { 369 - struct ap_device *ap_dev; 775 + struct ap_card *ac; 776 + struct ap_queue *aq; 370 777 enum ap_wait wait = AP_WAIT_NONE; 371 778 372 779 /* Reset the indicator if interrupts are used. Thus new interrupts can ··· 377 782 if (ap_using_interrupts()) 378 783 xchg(ap_airq.lsi_ptr, 0); 379 784 380 - spin_lock(&ap_device_list_lock); 381 - list_for_each_entry(ap_dev, &ap_device_list, list) { 382 - spin_lock_bh(&ap_dev->lock); 383 - wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); 384 - spin_unlock_bh(&ap_dev->lock); 785 + spin_lock_bh(&ap_list_lock); 786 + for_each_ap_card(ac) { 787 + for_each_ap_queue(aq, ac) { 788 + spin_lock_bh(&aq->lock); 789 + wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL)); 790 + spin_unlock_bh(&aq->lock); 791 + } 385 792 } 386 - spin_unlock(&ap_device_list_lock); 387 - ap_sm_wait(wait); 793 + spin_unlock_bh(&ap_list_lock); 794 + 795 + ap_wait(wait); 388 796 } 389 797 390 798 static int ap_pending_requests(void) 391 799 { 392 - struct ap_device *ap_dev; 393 - int id, pending = 0; 800 + struct ap_card *ac; 801 + struct ap_queue *aq; 394 802 395 - for (id = 0; pending == 0 && id < AP_DEVICES; id++) { 396 - spin_lock_bh(&ap_device_list_lock); 397 - list_for_each_entry(ap_dev, &ap_device_list, list) { 398 - spin_lock_bh(&ap_dev->lock); 399 - if (ap_dev->queue_count) 400 - pending = 1; 401 - spin_unlock_bh(&ap_dev->lock); 402 - if (pending) 403 - break; 803 + spin_lock_bh(&ap_list_lock); 804 + for_each_ap_card(ac) { 805 + for_each_ap_queue(aq, ac) { 806 + if (aq->queue_count == 0) 807 + continue; 808 + spin_unlock_bh(&ap_list_lock); 809 + return 1; 404 810 } 405 - spin_unlock_bh(&ap_device_list_lock); 406 811 } 407 - 408 - return pending; 812 + spin_unlock_bh(&ap_list_lock); 813 + return 0; 409 814 } 410 815 411 816 /** ··· 469 874 mutex_unlock(&ap_poll_thread_mutex); 470 875 } 471 876 472 - /** 473 - * ap_queue_message(): Queue a request to an AP device. 474 - * @ap_dev: The AP device to queue the message to 475 - * @ap_msg: The message that is to be added 476 - */ 477 - void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 478 - { 479 - /* For asynchronous message handling a valid receive-callback 480 - * is required. */ 481 - BUG_ON(!ap_msg->receive); 482 - 483 - spin_lock_bh(&ap_dev->lock); 484 - /* Queue the message. */ 485 - list_add_tail(&ap_msg->list, &ap_dev->requestq); 486 - ap_dev->requestq_count++; 487 - ap_dev->total_request_count++; 488 - /* Send/receive as many request from the queue as possible. */ 489 - ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); 490 - spin_unlock_bh(&ap_dev->lock); 491 - } 492 - EXPORT_SYMBOL(ap_queue_message); 493 - 494 - /** 495 - * ap_cancel_message(): Cancel a crypto request. 496 - * @ap_dev: The AP device that has the message queued 497 - * @ap_msg: The message that is to be removed 498 - * 499 - * Cancel a crypto request. This is done by removing the request 500 - * from the device pending or request queue. Note that the 501 - * request stays on the AP queue. When it finishes the message 502 - * reply will be discarded because the psmid can't be found. 503 - */ 504 - void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 505 - { 506 - struct ap_message *tmp; 507 - 508 - spin_lock_bh(&ap_dev->lock); 509 - if (!list_empty(&ap_msg->list)) { 510 - list_for_each_entry(tmp, &ap_dev->pendingq, list) 511 - if (tmp->psmid == ap_msg->psmid) { 512 - ap_dev->pendingq_count--; 513 - goto found; 514 - } 515 - ap_dev->requestq_count--; 516 - found: 517 - list_del_init(&ap_msg->list); 518 - } 519 - spin_unlock_bh(&ap_dev->lock); 520 - } 521 - EXPORT_SYMBOL(ap_cancel_message); 522 - 523 - /* 524 - * AP device related attributes. 525 - */ 526 - static ssize_t ap_hwtype_show(struct device *dev, 527 - struct device_attribute *attr, char *buf) 528 - { 529 - struct ap_device *ap_dev = to_ap_dev(dev); 530 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); 531 - } 532 - 533 - static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 534 - 535 - static ssize_t ap_raw_hwtype_show(struct device *dev, 536 - struct device_attribute *attr, char *buf) 537 - { 538 - struct ap_device *ap_dev = to_ap_dev(dev); 539 - 540 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype); 541 - } 542 - 543 - static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL); 544 - 545 - static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 546 - char *buf) 547 - { 548 - struct ap_device *ap_dev = to_ap_dev(dev); 549 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); 550 - } 551 - 552 - static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 553 - static ssize_t ap_request_count_show(struct device *dev, 554 - struct device_attribute *attr, 555 - char *buf) 556 - { 557 - struct ap_device *ap_dev = to_ap_dev(dev); 558 - int rc; 559 - 560 - spin_lock_bh(&ap_dev->lock); 561 - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); 562 - spin_unlock_bh(&ap_dev->lock); 563 - return rc; 564 - } 565 - 566 - static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 567 - 568 - static ssize_t ap_requestq_count_show(struct device *dev, 569 - struct device_attribute *attr, char *buf) 570 - { 571 - struct ap_device *ap_dev = to_ap_dev(dev); 572 - int rc; 573 - 574 - spin_lock_bh(&ap_dev->lock); 575 - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); 576 - spin_unlock_bh(&ap_dev->lock); 577 - return rc; 578 - } 579 - 580 - static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 581 - 582 - static ssize_t ap_pendingq_count_show(struct device *dev, 583 - struct device_attribute *attr, char *buf) 584 - { 585 - struct ap_device *ap_dev = to_ap_dev(dev); 586 - int rc; 587 - 588 - spin_lock_bh(&ap_dev->lock); 589 - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); 590 - spin_unlock_bh(&ap_dev->lock); 591 - return rc; 592 - } 593 - 594 - static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 595 - 596 - static ssize_t ap_reset_show(struct device *dev, 597 - struct device_attribute *attr, char *buf) 598 - { 599 - struct ap_device *ap_dev = to_ap_dev(dev); 600 - int rc = 0; 601 - 602 - spin_lock_bh(&ap_dev->lock); 603 - switch (ap_dev->state) { 604 - case AP_STATE_RESET_START: 605 - case AP_STATE_RESET_WAIT: 606 - rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); 607 - break; 608 - case AP_STATE_WORKING: 609 - case AP_STATE_QUEUE_FULL: 610 - rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 611 - break; 612 - default: 613 - rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 614 - } 615 - spin_unlock_bh(&ap_dev->lock); 616 - return rc; 617 - } 618 - 619 - static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL); 620 - 621 - static ssize_t ap_interrupt_show(struct device *dev, 622 - struct device_attribute *attr, char *buf) 623 - { 624 - struct ap_device *ap_dev = to_ap_dev(dev); 625 - int rc = 0; 626 - 627 - spin_lock_bh(&ap_dev->lock); 628 - if (ap_dev->state == AP_STATE_SETIRQ_WAIT) 629 - rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); 630 - else if (ap_dev->interrupt == AP_INTR_ENABLED) 631 - rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); 632 - else 633 - rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); 634 - spin_unlock_bh(&ap_dev->lock); 635 - return rc; 636 - } 637 - 638 - static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL); 639 - 640 - static ssize_t ap_modalias_show(struct device *dev, 641 - struct device_attribute *attr, char *buf) 642 - { 643 - return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); 644 - } 645 - 646 - static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 647 - 648 - static ssize_t ap_functions_show(struct device *dev, 649 - struct device_attribute *attr, char *buf) 650 - { 651 - struct ap_device *ap_dev = to_ap_dev(dev); 652 - return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); 653 - } 654 - 655 - static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 656 - 657 - static struct attribute *ap_dev_attrs[] = { 658 - &dev_attr_hwtype.attr, 659 - &dev_attr_raw_hwtype.attr, 660 - &dev_attr_depth.attr, 661 - &dev_attr_request_count.attr, 662 - &dev_attr_requestq_count.attr, 663 - &dev_attr_pendingq_count.attr, 664 - &dev_attr_reset.attr, 665 - &dev_attr_interrupt.attr, 666 - &dev_attr_modalias.attr, 667 - &dev_attr_ap_functions.attr, 668 - NULL 669 - }; 670 - static struct attribute_group ap_dev_attr_group = { 671 - .attrs = ap_dev_attrs 672 - }; 877 + #define is_card_dev(x) ((x)->parent == ap_root_device) 878 + #define is_queue_dev(x) ((x)->parent != ap_root_device) 673 879 674 880 /** 675 881 * ap_bus_match() ··· 481 1085 */ 482 1086 static int ap_bus_match(struct device *dev, struct device_driver *drv) 483 1087 { 484 - struct ap_device *ap_dev = to_ap_dev(dev); 485 1088 struct ap_driver *ap_drv = to_ap_drv(drv); 486 1089 struct ap_device_id *id; 487 1090 ··· 489 1094 * supported types of the device_driver. 490 1095 */ 491 1096 for (id = ap_drv->ids; id->match_flags; id++) { 492 - if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 493 - (id->dev_type != ap_dev->device_type)) 494 - continue; 495 - return 1; 1097 + if (is_card_dev(dev) && 1098 + id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE && 1099 + id->dev_type == to_ap_dev(dev)->device_type) 1100 + return 1; 1101 + if (is_queue_dev(dev) && 1102 + id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE && 1103 + id->dev_type == to_ap_dev(dev)->device_type) 1104 + return 1; 496 1105 } 497 1106 return 0; 498 1107 } ··· 532 1133 { 533 1134 struct ap_device *ap_dev = to_ap_dev(dev); 534 1135 535 - /* Poll on the device until all requests are finished. */ 536 - spin_lock_bh(&ap_dev->lock); 537 - ap_dev->state = AP_STATE_SUSPEND_WAIT; 538 - while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE) 539 - ; 540 - ap_dev->state = AP_STATE_BORKED; 541 - spin_unlock_bh(&ap_dev->lock); 1136 + if (ap_dev->drv && ap_dev->drv->suspend) 1137 + ap_dev->drv->suspend(ap_dev); 1138 + return 0; 1139 + } 1140 + 1141 + static int ap_dev_resume(struct device *dev) 1142 + { 1143 + struct ap_device *ap_dev = to_ap_dev(dev); 1144 + 1145 + if (ap_dev->drv && ap_dev->drv->resume) 1146 + ap_dev->drv->resume(ap_dev); 542 1147 return 0; 543 1148 } 544 1149 ··· 557 1154 tasklet_disable(&ap_tasklet); 558 1155 } 559 1156 560 - static int __ap_devices_unregister(struct device *dev, void *dummy) 1157 + static int __ap_card_devices_unregister(struct device *dev, void *dummy) 561 1158 { 562 - device_unregister(dev); 1159 + if (is_card_dev(dev)) 1160 + device_unregister(dev); 1161 + return 0; 1162 + } 1163 + 1164 + static int __ap_queue_devices_unregister(struct device *dev, void *dummy) 1165 + { 1166 + if (is_queue_dev(dev)) 1167 + device_unregister(dev); 1168 + return 0; 1169 + } 1170 + 1171 + static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) 1172 + { 1173 + if (is_queue_dev(dev) && 1174 + AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data) 1175 + device_unregister(dev); 563 1176 return 0; 564 1177 } 565 1178 ··· 583 1164 { 584 1165 int rc; 585 1166 586 - /* Unconditionally remove all AP devices */ 587 - bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); 1167 + /* remove all queue devices */ 1168 + bus_for_each_dev(&ap_bus_type, NULL, NULL, 1169 + __ap_queue_devices_unregister); 1170 + /* remove all card devices */ 1171 + bus_for_each_dev(&ap_bus_type, NULL, NULL, 1172 + __ap_card_devices_unregister); 1173 + 588 1174 /* Reset thin interrupt setting */ 589 1175 if (ap_interrupts_available() && !ap_using_interrupts()) { 590 1176 rc = register_adapter_interrupt(&ap_airq); ··· 631 1207 .notifier_call = ap_power_event, 632 1208 }; 633 1209 634 - static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL); 1210 + static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume); 635 1211 636 1212 static struct bus_type ap_bus_type = { 637 1213 .name = "ap", ··· 639 1215 .uevent = &ap_uevent, 640 1216 .pm = &ap_bus_pm_ops, 641 1217 }; 642 - 643 - void ap_device_init_reply(struct ap_device *ap_dev, 644 - struct ap_message *reply) 645 - { 646 - ap_dev->reply = reply; 647 - 648 - spin_lock_bh(&ap_dev->lock); 649 - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); 650 - spin_unlock_bh(&ap_dev->lock); 651 - } 652 - EXPORT_SYMBOL(ap_device_init_reply); 653 1218 654 1219 static int ap_device_probe(struct device *dev) 655 1220 { ··· 653 1240 return rc; 654 1241 } 655 1242 656 - /** 657 - * __ap_flush_queue(): Flush requests. 658 - * @ap_dev: Pointer to the AP device 659 - * 660 - * Flush all requests from the request/pending queue of an AP device. 661 - */ 662 - static void __ap_flush_queue(struct ap_device *ap_dev) 663 - { 664 - struct ap_message *ap_msg, *next; 665 - 666 - list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 667 - list_del_init(&ap_msg->list); 668 - ap_dev->pendingq_count--; 669 - ap_msg->rc = -EAGAIN; 670 - ap_msg->receive(ap_dev, ap_msg, NULL); 671 - } 672 - list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 673 - list_del_init(&ap_msg->list); 674 - ap_dev->requestq_count--; 675 - ap_msg->rc = -EAGAIN; 676 - ap_msg->receive(ap_dev, ap_msg, NULL); 677 - } 678 - } 679 - 680 - void ap_flush_queue(struct ap_device *ap_dev) 681 - { 682 - spin_lock_bh(&ap_dev->lock); 683 - __ap_flush_queue(ap_dev); 684 - spin_unlock_bh(&ap_dev->lock); 685 - } 686 - EXPORT_SYMBOL(ap_flush_queue); 687 - 688 1243 static int ap_device_remove(struct device *dev) 689 1244 { 690 1245 struct ap_device *ap_dev = to_ap_dev(dev); 691 1246 struct ap_driver *ap_drv = ap_dev->drv; 692 1247 693 - ap_flush_queue(ap_dev); 694 - del_timer_sync(&ap_dev->timeout); 695 - spin_lock_bh(&ap_device_list_lock); 696 - list_del_init(&ap_dev->list); 697 - spin_unlock_bh(&ap_device_list_lock); 1248 + spin_lock_bh(&ap_list_lock); 1249 + if (is_card_dev(dev)) 1250 + list_del_init(&to_ap_card(dev)->list); 1251 + else 1252 + list_del_init(&to_ap_queue(dev)->list); 1253 + spin_unlock_bh(&ap_list_lock); 698 1254 if (ap_drv->remove) 699 1255 ap_drv->remove(ap_dev); 700 1256 return 0; 701 - } 702 - 703 - static void ap_device_release(struct device *dev) 704 - { 705 - kfree(to_ap_dev(dev)); 706 1257 } 707 1258 708 1259 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, ··· 731 1354 { 732 1355 if (!ap_configuration) /* QCI not supported */ 733 1356 return snprintf(buf, PAGE_SIZE, "not supported\n"); 734 - if (!test_facility(76)) 735 - /* format 0 - 16 bit domain field */ 736 - return snprintf(buf, PAGE_SIZE, "%08x%08x\n", 737 - ap_configuration->adm[0], 738 - ap_configuration->adm[1]); 739 - /* format 1 - 256 bit domain field */ 1357 + 740 1358 return snprintf(buf, PAGE_SIZE, 741 1359 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 742 1360 ap_configuration->adm[0], ap_configuration->adm[1], ··· 742 1370 743 1371 static BUS_ATTR(ap_control_domain_mask, 0444, 744 1372 ap_control_domain_mask_show, NULL); 1373 + 1374 + static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf) 1375 + { 1376 + if (!ap_configuration) /* QCI not supported */ 1377 + return snprintf(buf, PAGE_SIZE, "not supported\n"); 1378 + 1379 + return snprintf(buf, PAGE_SIZE, 1380 + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1381 + ap_configuration->aqm[0], ap_configuration->aqm[1], 1382 + ap_configuration->aqm[2], ap_configuration->aqm[3], 1383 + ap_configuration->aqm[4], ap_configuration->aqm[5], 1384 + ap_configuration->aqm[6], ap_configuration->aqm[7]); 1385 + } 1386 + 1387 + static BUS_ATTR(ap_usage_domain_mask, 0444, 1388 + ap_usage_domain_mask_show, NULL); 745 1389 746 1390 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 747 1391 { ··· 854 1466 static struct bus_attribute *const ap_bus_attrs[] = { 855 1467 &bus_attr_ap_domain, 856 1468 &bus_attr_ap_control_domain_mask, 1469 + &bus_attr_ap_usage_domain_mask, 857 1470 &bus_attr_config_time, 858 1471 &bus_attr_poll_thread, 859 1472 &bus_attr_ap_interrupts, ··· 913 1524 return -ENODEV; 914 1525 } 915 1526 916 - /** 917 - * __ap_scan_bus(): Scan the AP bus. 918 - * @dev: Pointer to device 919 - * @data: Pointer to data 920 - * 921 - * Scan the AP bus for new devices. 1527 + /* 1528 + * helper function to be used with bus_find_dev 1529 + * matches for the card device with the given id 922 1530 */ 923 - static int __ap_scan_bus(struct device *dev, void *data) 1531 + static int __match_card_device_with_id(struct device *dev, void *data) 924 1532 { 925 - return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1533 + return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data; 926 1534 } 927 1535 1536 + /* helper function to be used with bus_find_dev 1537 + * matches for the queue device with a given qid 1538 + */ 1539 + static int __match_queue_device_with_qid(struct device *dev, void *data) 1540 + { 1541 + return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data; 1542 + } 1543 + 1544 + /** 1545 + * ap_scan_bus(): Scan the AP bus for new devices 1546 + * Runs periodically, workqueue timer (ap_config_time) 1547 + */ 928 1548 static void ap_scan_bus(struct work_struct *unused) 929 1549 { 930 - struct ap_device *ap_dev; 1550 + struct ap_queue *aq; 1551 + struct ap_card *ac; 931 1552 struct device *dev; 932 1553 ap_qid_t qid; 933 - int queue_depth = 0, device_type = 0; 934 - unsigned int device_functions = 0; 935 - int rc, i, borked; 1554 + int depth = 0, type = 0; 1555 + unsigned int functions = 0; 1556 + int rc, id, dom, borked, domains; 936 1557 937 1558 ap_query_configuration(); 938 1559 if (ap_select_domain() != 0) 939 1560 goto out; 940 1561 941 - 942 - spin_lock_bh(&ap_domain_lock); 943 - for (i = 0; i < AP_DEVICES; i++) { 944 - qid = AP_MKQID(i, ap_domain_index); 1562 + for (id = 0; id < AP_DEVICES; id++) { 1563 + /* check if device is registered */ 945 1564 dev = bus_find_device(&ap_bus_type, NULL, 946 - (void *)(unsigned long)qid, 947 - __ap_scan_bus); 948 - rc = ap_query_queue(qid, &queue_depth, &device_type, 949 - &device_functions); 950 - if (dev) { 951 - ap_dev = to_ap_dev(dev); 952 - spin_lock_bh(&ap_dev->lock); 953 - if (rc == -ENODEV) 954 - ap_dev->state = AP_STATE_BORKED; 955 - borked = ap_dev->state == AP_STATE_BORKED; 956 - spin_unlock_bh(&ap_dev->lock); 957 - if (borked) /* Remove broken device */ 1565 + (void *)(long) id, 1566 + __match_card_device_with_id); 1567 + ac = dev ? to_ap_card(dev) : NULL; 1568 + if (!ap_test_config_card_id(id)) { 1569 + if (dev) { 1570 + /* Card device has been removed from 1571 + * configuration, remove the belonging 1572 + * queue devices. 1573 + */ 1574 + bus_for_each_dev(&ap_bus_type, NULL, 1575 + (void *)(long) id, 1576 + __ap_queue_devices_with_id_unregister); 1577 + /* now remove the card device */ 958 1578 device_unregister(dev); 959 - put_device(dev); 960 - if (!borked) 1579 + put_device(dev); 1580 + } 1581 + continue; 1582 + } 1583 + /* According to the configuration there should be a card 1584 + * device, so check if there is at least one valid queue 1585 + * and maybe create queue devices and the card device. 1586 + */ 1587 + domains = 0; 1588 + for (dom = 0; dom < AP_DOMAINS; dom++) { 1589 + qid = AP_MKQID(id, dom); 1590 + dev = bus_find_device(&ap_bus_type, NULL, 1591 + (void *)(long) qid, 1592 + __match_queue_device_with_qid); 1593 + aq = dev ? to_ap_queue(dev) : NULL; 1594 + if (!ap_test_config_domain(dom)) { 1595 + if (dev) { 1596 + /* Queue device exists but has been 1597 + * removed from configuration. 1598 + */ 1599 + device_unregister(dev); 1600 + put_device(dev); 1601 + } 961 1602 continue; 1603 + } 1604 + rc = ap_query_queue(qid, &depth, &type, &functions); 1605 + if (dev) { 1606 + spin_lock_bh(&aq->lock); 1607 + if (rc == -ENODEV || 1608 + /* adapter reconfiguration */ 1609 + (ac && ac->functions != functions)) 1610 + aq->state = AP_STATE_BORKED; 1611 + borked = aq->state == AP_STATE_BORKED; 1612 + spin_unlock_bh(&aq->lock); 1613 + if (borked) /* Remove broken device */ 1614 + device_unregister(dev); 1615 + put_device(dev); 1616 + if (!borked) { 1617 + domains++; 1618 + continue; 1619 + } 1620 + } 1621 + if (rc) 1622 + continue; 1623 + /* new queue device needed */ 1624 + if (!ac) { 1625 + /* but first create the card device */ 1626 + ac = ap_card_create(id, depth, 1627 + type, functions); 1628 + if (!ac) 1629 + continue; 1630 + ac->ap_dev.device.bus = &ap_bus_type; 1631 + ac->ap_dev.device.parent = ap_root_device; 1632 + dev_set_name(&ac->ap_dev.device, 1633 + "card%02x", id); 1634 + /* Register card with AP bus */ 1635 + rc = device_register(&ac->ap_dev.device); 1636 + if (rc) { 1637 + put_device(&ac->ap_dev.device); 1638 + ac = NULL; 1639 + break; 1640 + } 1641 + /* get it and thus adjust reference counter */ 1642 + get_device(&ac->ap_dev.device); 1643 + /* Add card device to card list */ 1644 + spin_lock_bh(&ap_list_lock); 1645 + list_add(&ac->list, &ap_card_list); 1646 + spin_unlock_bh(&ap_list_lock); 1647 + } 1648 + /* now create the new queue device */ 1649 + aq = ap_queue_create(qid, type); 1650 + if (!aq) 1651 + continue; 1652 + aq->card = ac; 1653 + aq->ap_dev.device.bus = &ap_bus_type; 1654 + aq->ap_dev.device.parent = &ac->ap_dev.device; 1655 + dev_set_name(&aq->ap_dev.device, 1656 + "%02x.%04x", id, dom); 1657 + /* Add queue device to card queue list */ 1658 + spin_lock_bh(&ap_list_lock); 1659 + list_add(&aq->list, &ac->queues); 1660 + spin_unlock_bh(&ap_list_lock); 1661 + /* Start with a device reset */ 1662 + spin_lock_bh(&aq->lock); 1663 + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 1664 + spin_unlock_bh(&aq->lock); 1665 + /* Register device */ 1666 + rc = device_register(&aq->ap_dev.device); 1667 + if (rc) { 1668 + spin_lock_bh(&ap_list_lock); 1669 + list_del_init(&aq->list); 1670 + spin_unlock_bh(&ap_list_lock); 1671 + put_device(&aq->ap_dev.device); 1672 + continue; 1673 + } 1674 + domains++; 1675 + } /* end domain loop */ 1676 + if (ac) { 1677 + /* remove card dev if there are no queue devices */ 1678 + if (!domains) 1679 + device_unregister(&ac->ap_dev.device); 1680 + put_device(&ac->ap_dev.device); 962 1681 } 963 - if (rc) 964 - continue; 965 - ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); 966 - if (!ap_dev) 967 - break; 968 - ap_dev->qid = qid; 969 - ap_dev->state = AP_STATE_RESET_START; 970 - ap_dev->interrupt = AP_INTR_DISABLED; 971 - ap_dev->queue_depth = queue_depth; 972 - ap_dev->raw_hwtype = device_type; 973 - ap_dev->device_type = device_type; 974 - /* CEX6 toleration: map to CEX5 */ 975 - if (device_type == AP_DEVICE_TYPE_CEX6) 976 - ap_dev->device_type = AP_DEVICE_TYPE_CEX5; 977 - ap_dev->functions = device_functions; 978 - spin_lock_init(&ap_dev->lock); 979 - INIT_LIST_HEAD(&ap_dev->pendingq); 980 - INIT_LIST_HEAD(&ap_dev->requestq); 981 - INIT_LIST_HEAD(&ap_dev->list); 982 - setup_timer(&ap_dev->timeout, ap_request_timeout, 983 - (unsigned long) ap_dev); 984 - 985 - ap_dev->device.bus = &ap_bus_type; 986 - ap_dev->device.parent = ap_root_device; 987 - rc = dev_set_name(&ap_dev->device, "card%02x", 988 - AP_QID_DEVICE(ap_dev->qid)); 989 - if (rc) { 990 - kfree(ap_dev); 991 - continue; 992 - } 993 - /* Add to list of devices */ 994 - spin_lock_bh(&ap_device_list_lock); 995 - list_add(&ap_dev->list, &ap_device_list); 996 - spin_unlock_bh(&ap_device_list_lock); 997 - /* Start with a device reset */ 998 - spin_lock_bh(&ap_dev->lock); 999 - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); 1000 - spin_unlock_bh(&ap_dev->lock); 1001 - /* Register device */ 1002 - ap_dev->device.release = ap_device_release; 1003 - rc = device_register(&ap_dev->device); 1004 - if (rc) { 1005 - spin_lock_bh(&ap_dev->lock); 1006 - list_del_init(&ap_dev->list); 1007 - spin_unlock_bh(&ap_dev->lock); 1008 - put_device(&ap_dev->device); 1009 - continue; 1010 - } 1011 - /* Add device attributes. */ 1012 - rc = sysfs_create_group(&ap_dev->device.kobj, 1013 - &ap_dev_attr_group); 1014 - if (rc) { 1015 - device_unregister(&ap_dev->device); 1016 - continue; 1017 - } 1018 - } 1019 - spin_unlock_bh(&ap_domain_lock); 1682 + } /* end device loop */ 1020 1683 out: 1021 1684 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1022 1685 } ··· 1228 1787 del_timer_sync(&ap_config_timer); 1229 1788 hrtimer_cancel(&ap_poll_timer); 1230 1789 tasklet_kill(&ap_tasklet); 1231 - bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); 1790 + 1791 + /* first remove queue devices */ 1792 + bus_for_each_dev(&ap_bus_type, NULL, NULL, 1793 + __ap_queue_devices_unregister); 1794 + /* now remove the card devices */ 1795 + bus_for_each_dev(&ap_bus_type, NULL, NULL, 1796 + __ap_card_devices_unregister); 1797 + 1798 + /* remove bus attributes */ 1232 1799 for (i = 0; ap_bus_attrs[i]; i++) 1233 1800 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1234 1801 unregister_pm_notifier(&ap_power_notifier);
+72 -40
drivers/s390/crypto/ap_bus.h
··· 27 27 #define _AP_BUS_H_ 28 28 29 29 #include <linux/device.h> 30 - #include <linux/mod_devicetable.h> 31 30 #include <linux/types.h> 32 31 33 32 #define AP_DEVICES 64 /* Number of AP devices. */ ··· 37 38 38 39 extern int ap_domain_index; 39 40 41 + extern spinlock_t ap_list_lock; 42 + extern struct list_head ap_card_list; 43 + 40 44 /** 41 45 * The ap_qid_t identifier of an ap queue. It contains a 42 - * 6 bit device index and a 4 bit queue index (domain). 46 + * 6 bit card index and a 4 bit queue index (domain). 43 47 */ 44 48 typedef unsigned int ap_qid_t; 45 49 46 - #define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255)) 47 - #define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63) 50 + #define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255)) 51 + #define AP_QID_CARD(_qid) (((_qid) >> 8) & 63) 48 52 #define AP_QID_QUEUE(_qid) ((_qid) & 255) 49 53 50 54 /** ··· 57 55 * @queue_full: Is 1 if the queue is full 58 56 * @pad: A 4 bit pad 59 57 * @int_enabled: Shows if interrupts are enabled for the AP 60 - * @response_conde: Holds the 8 bit response code 58 + * @response_code: Holds the 8 bit response code 61 59 * @pad2: A 16 bit pad 62 60 * 63 61 * The ap queue status word is returned by all three AP functions ··· 169 167 170 168 int (*probe)(struct ap_device *); 171 169 void (*remove)(struct ap_device *); 172 - int request_timeout; /* request timeout in jiffies */ 170 + void (*suspend)(struct ap_device *); 171 + void (*resume)(struct ap_device *); 173 172 }; 174 173 175 174 #define to_ap_drv(x) container_of((x), struct ap_driver, driver) ··· 178 175 int ap_driver_register(struct ap_driver *, struct module *, char *); 179 176 void ap_driver_unregister(struct ap_driver *); 180 177 181 - typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev); 182 - 183 178 struct ap_device { 184 179 struct device device; 185 180 struct ap_driver *drv; /* Pointer to AP device driver. */ 186 - spinlock_t lock; /* Per device lock. */ 187 - struct list_head list; /* private list of all AP devices. */ 188 - 189 - enum ap_state state; /* State of the AP device. */ 190 - 191 - ap_qid_t qid; /* AP queue id. */ 192 - int queue_depth; /* AP queue depth.*/ 193 181 int device_type; /* AP device type. */ 194 - int raw_hwtype; /* AP raw hardware type. */ 195 - unsigned int functions; /* AP device function bitfield. */ 196 - struct timer_list timeout; /* Timer for request timeouts. */ 197 - 198 - int interrupt; /* indicate if interrupts are enabled */ 199 - int queue_count; /* # messages currently on AP queue. */ 200 - 201 - struct list_head pendingq; /* List of message sent to AP queue. */ 202 - int pendingq_count; /* # requests on pendingq list. */ 203 - struct list_head requestq; /* List of message yet to be sent. */ 204 - int requestq_count; /* # requests on requestq list. */ 205 - int total_request_count; /* # requests ever for this AP device. */ 206 - 207 - struct ap_message *reply; /* Per device reply message. */ 208 - 209 - void *private; /* ap driver private pointer. */ 210 182 }; 211 183 212 184 #define to_ap_dev(x) container_of((x), struct ap_device, device) 185 + 186 + struct ap_card { 187 + struct ap_device ap_dev; 188 + struct list_head list; /* Private list of AP cards. */ 189 + struct list_head queues; /* List of assoc. AP queues */ 190 + void *private; /* ap driver private pointer. */ 191 + int raw_hwtype; /* AP raw hardware type. */ 192 + unsigned int functions; /* AP device function bitfield. */ 193 + int queue_depth; /* AP queue depth.*/ 194 + int id; /* AP card number. */ 195 + }; 196 + 197 + #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) 198 + 199 + struct ap_queue { 200 + struct ap_device ap_dev; 201 + struct list_head list; /* Private list of AP queues. */ 202 + struct ap_card *card; /* Ptr to assoc. AP card. */ 203 + spinlock_t lock; /* Per device lock. */ 204 + void *private; /* ap driver private pointer. */ 205 + ap_qid_t qid; /* AP queue id. */ 206 + int interrupt; /* indicate if interrupts are enabled */ 207 + int queue_count; /* # messages currently on AP queue. */ 208 + enum ap_state state; /* State of the AP device. */ 209 + int pendingq_count; /* # requests on pendingq list. */ 210 + int requestq_count; /* # requests on requestq list. */ 211 + int total_request_count; /* # requests ever for this AP device. */ 212 + int request_timeout; /* Request timout in jiffies. */ 213 + struct timer_list timeout; /* Timer for request timeouts. */ 214 + struct list_head pendingq; /* List of message sent to AP queue. */ 215 + struct list_head requestq; /* List of message yet to be sent. */ 216 + struct ap_message *reply; /* Per device reply message. */ 217 + }; 218 + 219 + #define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device) 220 + 221 + typedef enum ap_wait (ap_func_t)(struct ap_queue *queue); 213 222 214 223 struct ap_message { 215 224 struct list_head list; /* Request queueing. */ ··· 233 218 void *private; /* ap driver private pointer. */ 234 219 unsigned int special:1; /* Used for special commands. */ 235 220 /* receive is called from tasklet context */ 236 - void (*receive)(struct ap_device *, struct ap_message *, 221 + void (*receive)(struct ap_queue *, struct ap_message *, 237 222 struct ap_message *); 238 223 }; 239 224 ··· 247 232 unsigned int adm[8]; /* AP domain mask */ 248 233 unsigned char reserved4[16]; 249 234 } __packed; 250 - 251 - #define AP_DEVICE(dt) \ 252 - .dev_type=(dt), \ 253 - .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 254 235 255 236 /** 256 237 * ap_init_message() - Initialize ap_message. ··· 262 251 ap_msg->receive = NULL; 263 252 } 264 253 254 + #define for_each_ap_card(_ac) \ 255 + list_for_each_entry(_ac, &ap_card_list, list) 256 + 257 + #define for_each_ap_queue(_aq, _ac) \ 258 + list_for_each_entry(_aq, &(_ac)->queues, list) 259 + 265 260 /* 266 261 * Note: don't use ap_send/ap_recv after using ap_queue_message 267 262 * for the first time. Otherwise the ap message queue will get ··· 276 259 int ap_send(ap_qid_t, unsigned long long, void *, size_t); 277 260 int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); 278 261 279 - void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 280 - void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 281 - void ap_flush_queue(struct ap_device *ap_dev); 262 + enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event); 263 + enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event); 264 + 265 + void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg); 266 + void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg); 267 + void ap_flush_queue(struct ap_queue *aq); 268 + 269 + void *ap_airq_ptr(void); 270 + void ap_wait(enum ap_wait wait); 271 + void ap_request_timeout(unsigned long data); 282 272 void ap_bus_force_rescan(void); 283 - void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg); 273 + 274 + void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 275 + struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 276 + void ap_queue_remove(struct ap_queue *aq); 277 + void ap_queue_suspend(struct ap_device *ap_dev); 278 + void ap_queue_resume(struct ap_device *ap_dev); 279 + 280 + struct ap_card *ap_card_create(int id, int queue_depth, int device_type, 281 + unsigned int device_functions); 284 282 285 283 int ap_module_init(void); 286 284 void ap_module_exit(void);
+172
drivers/s390/crypto/ap_card.c
··· 1 + /* 2 + * Copyright IBM Corp. 2016 3 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 4 + * 5 + * Adjunct processor bus, card related code. 6 + */ 7 + 8 + #define KMSG_COMPONENT "ap" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/init.h> 12 + #include <linux/slab.h> 13 + #include <asm/facility.h> 14 + 15 + #include "ap_bus.h" 16 + #include "ap_asm.h" 17 + 18 + /* 19 + * AP card related attributes. 20 + */ 21 + static ssize_t ap_hwtype_show(struct device *dev, 22 + struct device_attribute *attr, char *buf) 23 + { 24 + struct ap_card *ac = to_ap_card(dev); 25 + 26 + return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type); 27 + } 28 + 29 + static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 30 + 31 + static ssize_t ap_raw_hwtype_show(struct device *dev, 32 + struct device_attribute *attr, char *buf) 33 + { 34 + struct ap_card *ac = to_ap_card(dev); 35 + 36 + return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype); 37 + } 38 + 39 + static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL); 40 + 41 + static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 42 + char *buf) 43 + { 44 + struct ap_card *ac = to_ap_card(dev); 45 + 46 + return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth); 47 + } 48 + 49 + static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 50 + 51 + static ssize_t ap_functions_show(struct device *dev, 52 + struct device_attribute *attr, char *buf) 53 + { 54 + struct ap_card *ac = to_ap_card(dev); 55 + 56 + return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions); 57 + } 58 + 59 + static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 60 + 61 + static ssize_t ap_request_count_show(struct device *dev, 62 + struct device_attribute *attr, 63 + char *buf) 64 + { 65 + struct ap_card *ac = to_ap_card(dev); 66 + struct ap_queue *aq; 67 + unsigned int req_cnt; 68 + 69 + req_cnt = 0; 70 + spin_lock_bh(&ap_list_lock); 71 + for_each_ap_queue(aq, ac) 72 + req_cnt += aq->total_request_count; 73 + spin_unlock_bh(&ap_list_lock); 74 + return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); 75 + } 76 + 77 + static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 78 + 79 + static ssize_t ap_requestq_count_show(struct device *dev, 80 + struct device_attribute *attr, char *buf) 81 + { 82 + struct ap_card *ac = to_ap_card(dev); 83 + struct ap_queue *aq; 84 + unsigned int reqq_cnt; 85 + 86 + reqq_cnt = 0; 87 + spin_lock_bh(&ap_list_lock); 88 + for_each_ap_queue(aq, ac) 89 + reqq_cnt += aq->requestq_count; 90 + spin_unlock_bh(&ap_list_lock); 91 + return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); 92 + } 93 + 94 + static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 95 + 96 + static ssize_t ap_pendingq_count_show(struct device *dev, 97 + struct device_attribute *attr, char *buf) 98 + { 99 + struct ap_card *ac = to_ap_card(dev); 100 + struct ap_queue *aq; 101 + unsigned int penq_cnt; 102 + 103 + penq_cnt = 0; 104 + spin_lock_bh(&ap_list_lock); 105 + for_each_ap_queue(aq, ac) 106 + penq_cnt += aq->pendingq_count; 107 + spin_unlock_bh(&ap_list_lock); 108 + return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); 109 + } 110 + 111 + static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 112 + 113 + static ssize_t ap_modalias_show(struct device *dev, 114 + struct device_attribute *attr, char *buf) 115 + { 116 + return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); 117 + } 118 + 119 + static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 120 + 121 + static struct attribute *ap_card_dev_attrs[] = { 122 + &dev_attr_hwtype.attr, 123 + &dev_attr_raw_hwtype.attr, 124 + &dev_attr_depth.attr, 125 + &dev_attr_ap_functions.attr, 126 + &dev_attr_request_count.attr, 127 + &dev_attr_requestq_count.attr, 128 + &dev_attr_pendingq_count.attr, 129 + &dev_attr_modalias.attr, 130 + NULL 131 + }; 132 + 133 + static struct attribute_group ap_card_dev_attr_group = { 134 + .attrs = ap_card_dev_attrs 135 + }; 136 + 137 + static const struct attribute_group *ap_card_dev_attr_groups[] = { 138 + &ap_card_dev_attr_group, 139 + NULL 140 + }; 141 + 142 + struct device_type ap_card_type = { 143 + .name = "ap_card", 144 + .groups = ap_card_dev_attr_groups, 145 + }; 146 + 147 + static void ap_card_device_release(struct device *dev) 148 + { 149 + kfree(to_ap_card(dev)); 150 + } 151 + 152 + struct ap_card *ap_card_create(int id, int queue_depth, int device_type, 153 + unsigned int functions) 154 + { 155 + struct ap_card *ac; 156 + 157 + ac = kzalloc(sizeof(*ac), GFP_KERNEL); 158 + if (!ac) 159 + return NULL; 160 + INIT_LIST_HEAD(&ac->queues); 161 + ac->ap_dev.device.release = ap_card_device_release; 162 + ac->ap_dev.device.type = &ap_card_type; 163 + ac->ap_dev.device_type = device_type; 164 + /* CEX6 toleration: map to CEX5 */ 165 + if (device_type == AP_DEVICE_TYPE_CEX6) 166 + ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5; 167 + ac->raw_hwtype = device_type; 168 + ac->queue_depth = queue_depth; 169 + ac->functions = functions; 170 + ac->id = id; 171 + return ac; 172 + }
+700
drivers/s390/crypto/ap_queue.c
··· 1 + /* 2 + * Copyright IBM Corp. 2016 3 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 4 + * 5 + * Adjunct processor bus, queue related code. 6 + */ 7 + 8 + #define KMSG_COMPONENT "ap" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/init.h> 12 + #include <linux/slab.h> 13 + #include <asm/facility.h> 14 + 15 + #include "ap_bus.h" 16 + #include "ap_asm.h" 17 + 18 + /** 19 + * ap_queue_enable_interruption(): Enable interruption on an AP queue. 20 + * @qid: The AP queue number 21 + * @ind: the notification indicator byte 22 + * 23 + * Enables interruption on AP queue via ap_aqic(). Based on the return 24 + * value it waits a while and tests the AP queue if interrupts 25 + * have been switched on using ap_test_queue(). 26 + */ 27 + static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind) 28 + { 29 + struct ap_queue_status status; 30 + 31 + status = ap_aqic(aq->qid, ind); 32 + switch (status.response_code) { 33 + case AP_RESPONSE_NORMAL: 34 + case AP_RESPONSE_OTHERWISE_CHANGED: 35 + return 0; 36 + case AP_RESPONSE_Q_NOT_AVAIL: 37 + case AP_RESPONSE_DECONFIGURED: 38 + case AP_RESPONSE_CHECKSTOPPED: 39 + case AP_RESPONSE_INVALID_ADDRESS: 40 + pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n", 41 + AP_QID_CARD(aq->qid), 42 + AP_QID_QUEUE(aq->qid)); 43 + return -EOPNOTSUPP; 44 + case AP_RESPONSE_RESET_IN_PROGRESS: 45 + case AP_RESPONSE_BUSY: 46 + default: 47 + return -EBUSY; 48 + } 49 + } 50 + 51 + /** 52 + * __ap_send(): Send message to adjunct processor queue. 53 + * @qid: The AP queue number 54 + * @psmid: The program supplied message identifier 55 + * @msg: The message text 56 + * @length: The message length 57 + * @special: Special Bit 58 + * 59 + * Returns AP queue status structure. 60 + * Condition code 1 on NQAP can't happen because the L bit is 1. 61 + * Condition code 2 on NQAP also means the send is incomplete, 62 + * because a segment boundary was reached. The NQAP is repeated. 63 + */ 64 + static inline struct ap_queue_status 65 + __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 66 + unsigned int special) 67 + { 68 + if (special == 1) 69 + qid |= 0x400000UL; 70 + return ap_nqap(qid, psmid, msg, length); 71 + } 72 + 73 + int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 74 + { 75 + struct ap_queue_status status; 76 + 77 + status = __ap_send(qid, psmid, msg, length, 0); 78 + switch (status.response_code) { 79 + case AP_RESPONSE_NORMAL: 80 + return 0; 81 + case AP_RESPONSE_Q_FULL: 82 + case AP_RESPONSE_RESET_IN_PROGRESS: 83 + return -EBUSY; 84 + case AP_RESPONSE_REQ_FAC_NOT_INST: 85 + return -EINVAL; 86 + default: /* Device is gone. */ 87 + return -ENODEV; 88 + } 89 + } 90 + EXPORT_SYMBOL(ap_send); 91 + 92 + int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 93 + { 94 + struct ap_queue_status status; 95 + 96 + if (msg == NULL) 97 + return -EINVAL; 98 + status = ap_dqap(qid, psmid, msg, length); 99 + switch (status.response_code) { 100 + case AP_RESPONSE_NORMAL: 101 + return 0; 102 + case AP_RESPONSE_NO_PENDING_REPLY: 103 + if (status.queue_empty) 104 + return -ENOENT; 105 + return -EBUSY; 106 + case AP_RESPONSE_RESET_IN_PROGRESS: 107 + return -EBUSY; 108 + default: 109 + return -ENODEV; 110 + } 111 + } 112 + EXPORT_SYMBOL(ap_recv); 113 + 114 + /* State machine definitions and helpers */ 115 + 116 + static enum ap_wait ap_sm_nop(struct ap_queue *aq) 117 + { 118 + return AP_WAIT_NONE; 119 + } 120 + 121 + /** 122 + * ap_sm_recv(): Receive pending reply messages from an AP queue but do 123 + * not change the state of the device. 124 + * @aq: pointer to the AP queue 125 + * 126 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 127 + */ 128 + static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) 129 + { 130 + struct ap_queue_status status; 131 + struct ap_message *ap_msg; 132 + 133 + status = ap_dqap(aq->qid, &aq->reply->psmid, 134 + aq->reply->message, aq->reply->length); 135 + switch (status.response_code) { 136 + case AP_RESPONSE_NORMAL: 137 + aq->queue_count--; 138 + if (aq->queue_count > 0) 139 + mod_timer(&aq->timeout, 140 + jiffies + aq->request_timeout); 141 + list_for_each_entry(ap_msg, &aq->pendingq, list) { 142 + if (ap_msg->psmid != aq->reply->psmid) 143 + continue; 144 + list_del_init(&ap_msg->list); 145 + aq->pendingq_count--; 146 + ap_msg->receive(aq, ap_msg, aq->reply); 147 + break; 148 + } 149 + case AP_RESPONSE_NO_PENDING_REPLY: 150 + if (!status.queue_empty || aq->queue_count <= 0) 151 + break; 152 + /* The card shouldn't forget requests but who knows. */ 153 + aq->queue_count = 0; 154 + list_splice_init(&aq->pendingq, &aq->requestq); 155 + aq->requestq_count += aq->pendingq_count; 156 + aq->pendingq_count = 0; 157 + break; 158 + default: 159 + break; 160 + } 161 + return status; 162 + } 163 + 164 + /** 165 + * ap_sm_read(): Receive pending reply messages from an AP queue. 166 + * @aq: pointer to the AP queue 167 + * 168 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 169 + */ 170 + static enum ap_wait ap_sm_read(struct ap_queue *aq) 171 + { 172 + struct ap_queue_status status; 173 + 174 + if (!aq->reply) 175 + return AP_WAIT_NONE; 176 + status = ap_sm_recv(aq); 177 + switch (status.response_code) { 178 + case AP_RESPONSE_NORMAL: 179 + if (aq->queue_count > 0) { 180 + aq->state = AP_STATE_WORKING; 181 + return AP_WAIT_AGAIN; 182 + } 183 + aq->state = AP_STATE_IDLE; 184 + return AP_WAIT_NONE; 185 + case AP_RESPONSE_NO_PENDING_REPLY: 186 + if (aq->queue_count > 0) 187 + return AP_WAIT_INTERRUPT; 188 + aq->state = AP_STATE_IDLE; 189 + return AP_WAIT_NONE; 190 + default: 191 + aq->state = AP_STATE_BORKED; 192 + return AP_WAIT_NONE; 193 + } 194 + } 195 + 196 + /** 197 + * ap_sm_suspend_read(): Receive pending reply messages from an AP queue 198 + * without changing the device state in between. In suspend mode we don't 199 + * allow sending new requests, therefore just fetch pending replies. 200 + * @aq: pointer to the AP queue 201 + * 202 + * Returns AP_WAIT_NONE or AP_WAIT_AGAIN 203 + */ 204 + static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq) 205 + { 206 + struct ap_queue_status status; 207 + 208 + if (!aq->reply) 209 + return AP_WAIT_NONE; 210 + status = ap_sm_recv(aq); 211 + switch (status.response_code) { 212 + case AP_RESPONSE_NORMAL: 213 + if (aq->queue_count > 0) 214 + return AP_WAIT_AGAIN; 215 + /* fall through */ 216 + default: 217 + return AP_WAIT_NONE; 218 + } 219 + } 220 + 221 + /** 222 + * ap_sm_write(): Send messages from the request queue to an AP queue. 223 + * @aq: pointer to the AP queue 224 + * 225 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 226 + */ 227 + static enum ap_wait ap_sm_write(struct ap_queue *aq) 228 + { 229 + struct ap_queue_status status; 230 + struct ap_message *ap_msg; 231 + 232 + if (aq->requestq_count <= 0) 233 + return AP_WAIT_NONE; 234 + /* Start the next request on the queue. */ 235 + ap_msg = list_entry(aq->requestq.next, struct ap_message, list); 236 + status = __ap_send(aq->qid, ap_msg->psmid, 237 + ap_msg->message, ap_msg->length, ap_msg->special); 238 + switch (status.response_code) { 239 + case AP_RESPONSE_NORMAL: 240 + aq->queue_count++; 241 + if (aq->queue_count == 1) 242 + mod_timer(&aq->timeout, jiffies + aq->request_timeout); 243 + list_move_tail(&ap_msg->list, &aq->pendingq); 244 + aq->requestq_count--; 245 + aq->pendingq_count++; 246 + if (aq->queue_count < aq->card->queue_depth) { 247 + aq->state = AP_STATE_WORKING; 248 + return AP_WAIT_AGAIN; 249 + } 250 + /* fall through */ 251 + case AP_RESPONSE_Q_FULL: 252 + aq->state = AP_STATE_QUEUE_FULL; 253 + return AP_WAIT_INTERRUPT; 254 + case AP_RESPONSE_RESET_IN_PROGRESS: 255 + aq->state = AP_STATE_RESET_WAIT; 256 + return AP_WAIT_TIMEOUT; 257 + case AP_RESPONSE_MESSAGE_TOO_BIG: 258 + case AP_RESPONSE_REQ_FAC_NOT_INST: 259 + list_del_init(&ap_msg->list); 260 + aq->requestq_count--; 261 + ap_msg->rc = -EINVAL; 262 + ap_msg->receive(aq, ap_msg, NULL); 263 + return AP_WAIT_AGAIN; 264 + default: 265 + aq->state = AP_STATE_BORKED; 266 + return AP_WAIT_NONE; 267 + } 268 + } 269 + 270 + /** 271 + * ap_sm_read_write(): Send and receive messages to/from an AP queue. 272 + * @aq: pointer to the AP queue 273 + * 274 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 275 + */ 276 + static enum ap_wait ap_sm_read_write(struct ap_queue *aq) 277 + { 278 + return min(ap_sm_read(aq), ap_sm_write(aq)); 279 + } 280 + 281 + /** 282 + * ap_sm_reset(): Reset an AP queue. 283 + * @qid: The AP queue number 284 + * 285 + * Submit the Reset command to an AP queue. 286 + */ 287 + static enum ap_wait ap_sm_reset(struct ap_queue *aq) 288 + { 289 + struct ap_queue_status status; 290 + 291 + status = ap_rapq(aq->qid); 292 + switch (status.response_code) { 293 + case AP_RESPONSE_NORMAL: 294 + case AP_RESPONSE_RESET_IN_PROGRESS: 295 + aq->state = AP_STATE_RESET_WAIT; 296 + aq->interrupt = AP_INTR_DISABLED; 297 + return AP_WAIT_TIMEOUT; 298 + case AP_RESPONSE_BUSY: 299 + return AP_WAIT_TIMEOUT; 300 + case AP_RESPONSE_Q_NOT_AVAIL: 301 + case AP_RESPONSE_DECONFIGURED: 302 + case AP_RESPONSE_CHECKSTOPPED: 303 + default: 304 + aq->state = AP_STATE_BORKED; 305 + return AP_WAIT_NONE; 306 + } 307 + } 308 + 309 + /** 310 + * ap_sm_reset_wait(): Test queue for completion of the reset operation 311 + * @aq: pointer to the AP queue 312 + * 313 + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 314 + */ 315 + static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq) 316 + { 317 + struct ap_queue_status status; 318 + void *lsi_ptr; 319 + 320 + if (aq->queue_count > 0 && aq->reply) 321 + /* Try to read a completed message and get the status */ 322 + status = ap_sm_recv(aq); 323 + else 324 + /* Get the status with TAPQ */ 325 + status = ap_tapq(aq->qid, NULL); 326 + 327 + switch (status.response_code) { 328 + case AP_RESPONSE_NORMAL: 329 + lsi_ptr = ap_airq_ptr(); 330 + if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0) 331 + aq->state = AP_STATE_SETIRQ_WAIT; 332 + else 333 + aq->state = (aq->queue_count > 0) ? 334 + AP_STATE_WORKING : AP_STATE_IDLE; 335 + return AP_WAIT_AGAIN; 336 + case AP_RESPONSE_BUSY: 337 + case AP_RESPONSE_RESET_IN_PROGRESS: 338 + return AP_WAIT_TIMEOUT; 339 + case AP_RESPONSE_Q_NOT_AVAIL: 340 + case AP_RESPONSE_DECONFIGURED: 341 + case AP_RESPONSE_CHECKSTOPPED: 342 + default: 343 + aq->state = AP_STATE_BORKED; 344 + return AP_WAIT_NONE; 345 + } 346 + } 347 + 348 + /** 349 + * ap_sm_setirq_wait(): Test queue for completion of the irq enablement 350 + * @aq: pointer to the AP queue 351 + * 352 + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 353 + */ 354 + static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq) 355 + { 356 + struct ap_queue_status status; 357 + 358 + if (aq->queue_count > 0 && aq->reply) 359 + /* Try to read a completed message and get the status */ 360 + status = ap_sm_recv(aq); 361 + else 362 + /* Get the status with TAPQ */ 363 + status = ap_tapq(aq->qid, NULL); 364 + 365 + if (status.int_enabled == 1) { 366 + /* Irqs are now enabled */ 367 + aq->interrupt = AP_INTR_ENABLED; 368 + aq->state = (aq->queue_count > 0) ? 369 + AP_STATE_WORKING : AP_STATE_IDLE; 370 + } 371 + 372 + switch (status.response_code) { 373 + case AP_RESPONSE_NORMAL: 374 + if (aq->queue_count > 0) 375 + return AP_WAIT_AGAIN; 376 + /* fallthrough */ 377 + case AP_RESPONSE_NO_PENDING_REPLY: 378 + return AP_WAIT_TIMEOUT; 379 + default: 380 + aq->state = AP_STATE_BORKED; 381 + return AP_WAIT_NONE; 382 + } 383 + } 384 + 385 + /* 386 + * AP state machine jump table 387 + */ 388 + static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { 389 + [AP_STATE_RESET_START] = { 390 + [AP_EVENT_POLL] = ap_sm_reset, 391 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 392 + }, 393 + [AP_STATE_RESET_WAIT] = { 394 + [AP_EVENT_POLL] = ap_sm_reset_wait, 395 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 396 + }, 397 + [AP_STATE_SETIRQ_WAIT] = { 398 + [AP_EVENT_POLL] = ap_sm_setirq_wait, 399 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 400 + }, 401 + [AP_STATE_IDLE] = { 402 + [AP_EVENT_POLL] = ap_sm_write, 403 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 404 + }, 405 + [AP_STATE_WORKING] = { 406 + [AP_EVENT_POLL] = ap_sm_read_write, 407 + [AP_EVENT_TIMEOUT] = ap_sm_reset, 408 + }, 409 + [AP_STATE_QUEUE_FULL] = { 410 + [AP_EVENT_POLL] = ap_sm_read, 411 + [AP_EVENT_TIMEOUT] = ap_sm_reset, 412 + }, 413 + [AP_STATE_SUSPEND_WAIT] = { 414 + [AP_EVENT_POLL] = ap_sm_suspend_read, 415 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 416 + }, 417 + [AP_STATE_BORKED] = { 418 + [AP_EVENT_POLL] = ap_sm_nop, 419 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 420 + }, 421 + }; 422 + 423 + enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event) 424 + { 425 + return ap_jumptable[aq->state][event](aq); 426 + } 427 + 428 + enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event) 429 + { 430 + enum ap_wait wait; 431 + 432 + while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN) 433 + ; 434 + return wait; 435 + } 436 + 437 + /* 438 + * Power management for queue devices 439 + */ 440 + void ap_queue_suspend(struct ap_device *ap_dev) 441 + { 442 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 443 + 444 + /* Poll on the device until all requests are finished. */ 445 + spin_lock_bh(&aq->lock); 446 + aq->state = AP_STATE_SUSPEND_WAIT; 447 + while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE) 448 + ; 449 + aq->state = AP_STATE_BORKED; 450 + spin_unlock_bh(&aq->lock); 451 + } 452 + EXPORT_SYMBOL(ap_queue_suspend); 453 + 454 + void ap_queue_resume(struct ap_device *ap_dev) 455 + { 456 + } 457 + EXPORT_SYMBOL(ap_queue_resume); 458 + 459 + /* 460 + * AP queue related attributes. 461 + */ 462 + static ssize_t ap_request_count_show(struct device *dev, 463 + struct device_attribute *attr, 464 + char *buf) 465 + { 466 + struct ap_queue *aq = to_ap_queue(dev); 467 + unsigned int req_cnt; 468 + 469 + spin_lock_bh(&aq->lock); 470 + req_cnt = aq->total_request_count; 471 + spin_unlock_bh(&aq->lock); 472 + return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); 473 + } 474 + 475 + static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 476 + 477 + static ssize_t ap_requestq_count_show(struct device *dev, 478 + struct device_attribute *attr, char *buf) 479 + { 480 + struct ap_queue *aq = to_ap_queue(dev); 481 + unsigned int reqq_cnt = 0; 482 + 483 + spin_lock_bh(&aq->lock); 484 + reqq_cnt = aq->requestq_count; 485 + spin_unlock_bh(&aq->lock); 486 + return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); 487 + } 488 + 489 + static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 490 + 491 + static ssize_t ap_pendingq_count_show(struct device *dev, 492 + struct device_attribute *attr, char *buf) 493 + { 494 + struct ap_queue *aq = to_ap_queue(dev); 495 + unsigned int penq_cnt = 0; 496 + 497 + spin_lock_bh(&aq->lock); 498 + penq_cnt = aq->pendingq_count; 499 + spin_unlock_bh(&aq->lock); 500 + return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); 501 + } 502 + 503 + static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 504 + 505 + static ssize_t ap_reset_show(struct device *dev, 506 + struct device_attribute *attr, char *buf) 507 + { 508 + struct ap_queue *aq = to_ap_queue(dev); 509 + int rc = 0; 510 + 511 + spin_lock_bh(&aq->lock); 512 + switch (aq->state) { 513 + case AP_STATE_RESET_START: 514 + case AP_STATE_RESET_WAIT: 515 + rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); 516 + break; 517 + case AP_STATE_WORKING: 518 + case AP_STATE_QUEUE_FULL: 519 + rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 520 + break; 521 + default: 522 + rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 523 + } 524 + spin_unlock_bh(&aq->lock); 525 + return rc; 526 + } 527 + 528 + static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL); 529 + 530 + static ssize_t ap_interrupt_show(struct device *dev, 531 + struct device_attribute *attr, char *buf) 532 + { 533 + struct ap_queue *aq = to_ap_queue(dev); 534 + int rc = 0; 535 + 536 + spin_lock_bh(&aq->lock); 537 + if (aq->state == AP_STATE_SETIRQ_WAIT) 538 + rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); 539 + else if (aq->interrupt == AP_INTR_ENABLED) 540 + rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); 541 + else 542 + rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); 543 + spin_unlock_bh(&aq->lock); 544 + return rc; 545 + } 546 + 547 + static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL); 548 + 549 + static struct attribute *ap_queue_dev_attrs[] = { 550 + &dev_attr_request_count.attr, 551 + &dev_attr_requestq_count.attr, 552 + &dev_attr_pendingq_count.attr, 553 + &dev_attr_reset.attr, 554 + &dev_attr_interrupt.attr, 555 + NULL 556 + }; 557 + 558 + static struct attribute_group ap_queue_dev_attr_group = { 559 + .attrs = ap_queue_dev_attrs 560 + }; 561 + 562 + static const struct attribute_group *ap_queue_dev_attr_groups[] = { 563 + &ap_queue_dev_attr_group, 564 + NULL 565 + }; 566 + 567 + struct device_type ap_queue_type = { 568 + .name = "ap_queue", 569 + .groups = ap_queue_dev_attr_groups, 570 + }; 571 + 572 + static void ap_queue_device_release(struct device *dev) 573 + { 574 + kfree(to_ap_queue(dev)); 575 + } 576 + 577 + struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) 578 + { 579 + struct ap_queue *aq; 580 + 581 + aq = kzalloc(sizeof(*aq), GFP_KERNEL); 582 + if (!aq) 583 + return NULL; 584 + aq->ap_dev.device.release = ap_queue_device_release; 585 + aq->ap_dev.device.type = &ap_queue_type; 586 + aq->ap_dev.device_type = device_type; 587 + /* CEX6 toleration: map to CEX5 */ 588 + if (device_type == AP_DEVICE_TYPE_CEX6) 589 + aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5; 590 + aq->qid = qid; 591 + aq->state = AP_STATE_RESET_START; 592 + aq->interrupt = AP_INTR_DISABLED; 593 + spin_lock_init(&aq->lock); 594 + INIT_LIST_HEAD(&aq->pendingq); 595 + INIT_LIST_HEAD(&aq->requestq); 596 + setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq); 597 + 598 + return aq; 599 + } 600 + 601 + void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply) 602 + { 603 + aq->reply = reply; 604 + 605 + spin_lock_bh(&aq->lock); 606 + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 607 + spin_unlock_bh(&aq->lock); 608 + } 609 + EXPORT_SYMBOL(ap_queue_init_reply); 610 + 611 + /** 612 + * ap_queue_message(): Queue a request to an AP device. 613 + * @aq: The AP device to queue the message to 614 + * @ap_msg: The message that is to be added 615 + */ 616 + void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) 617 + { 618 + /* For asynchronous message handling a valid receive-callback 619 + * is required. 620 + */ 621 + BUG_ON(!ap_msg->receive); 622 + 623 + spin_lock_bh(&aq->lock); 624 + /* Queue the message. */ 625 + list_add_tail(&ap_msg->list, &aq->requestq); 626 + aq->requestq_count++; 627 + aq->total_request_count++; 628 + /* Send/receive as many request from the queue as possible. */ 629 + ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL)); 630 + spin_unlock_bh(&aq->lock); 631 + } 632 + EXPORT_SYMBOL(ap_queue_message); 633 + 634 + /** 635 + * ap_cancel_message(): Cancel a crypto request. 636 + * @aq: The AP device that has the message queued 637 + * @ap_msg: The message that is to be removed 638 + * 639 + * Cancel a crypto request. This is done by removing the request 640 + * from the device pending or request queue. Note that the 641 + * request stays on the AP queue. When it finishes the message 642 + * reply will be discarded because the psmid can't be found. 643 + */ 644 + void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg) 645 + { 646 + struct ap_message *tmp; 647 + 648 + spin_lock_bh(&aq->lock); 649 + if (!list_empty(&ap_msg->list)) { 650 + list_for_each_entry(tmp, &aq->pendingq, list) 651 + if (tmp->psmid == ap_msg->psmid) { 652 + aq->pendingq_count--; 653 + goto found; 654 + } 655 + aq->requestq_count--; 656 + found: 657 + list_del_init(&ap_msg->list); 658 + } 659 + spin_unlock_bh(&aq->lock); 660 + } 661 + EXPORT_SYMBOL(ap_cancel_message); 662 + 663 + /** 664 + * __ap_flush_queue(): Flush requests. 665 + * @aq: Pointer to the AP queue 666 + * 667 + * Flush all requests from the request/pending queue of an AP device. 668 + */ 669 + static void __ap_flush_queue(struct ap_queue *aq) 670 + { 671 + struct ap_message *ap_msg, *next; 672 + 673 + list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) { 674 + list_del_init(&ap_msg->list); 675 + aq->pendingq_count--; 676 + ap_msg->rc = -EAGAIN; 677 + ap_msg->receive(aq, ap_msg, NULL); 678 + } 679 + list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) { 680 + list_del_init(&ap_msg->list); 681 + aq->requestq_count--; 682 + ap_msg->rc = -EAGAIN; 683 + ap_msg->receive(aq, ap_msg, NULL); 684 + } 685 + } 686 + 687 + void ap_flush_queue(struct ap_queue *aq) 688 + { 689 + spin_lock_bh(&aq->lock); 690 + __ap_flush_queue(aq); 691 + spin_unlock_bh(&aq->lock); 692 + } 693 + EXPORT_SYMBOL(ap_flush_queue); 694 + 695 + void ap_queue_remove(struct ap_queue *aq) 696 + { 697 + ap_flush_queue(aq); 698 + del_timer_sync(&aq->timeout); 699 + } 700 + EXPORT_SYMBOL(ap_queue_remove);
+382 -557
drivers/s390/crypto/zcrypt_api.c
··· 59 59 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 60 60 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 61 61 62 - static DEFINE_SPINLOCK(zcrypt_device_lock); 63 - static LIST_HEAD(zcrypt_device_list); 64 - static int zcrypt_device_count = 0; 62 + DEFINE_SPINLOCK(zcrypt_list_lock); 63 + LIST_HEAD(zcrypt_card_list); 64 + int zcrypt_device_count; 65 + 65 66 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 66 67 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 67 68 68 69 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 69 70 EXPORT_SYMBOL(zcrypt_rescan_req); 70 71 71 - static int zcrypt_rng_device_add(void); 72 - static void zcrypt_rng_device_remove(void); 73 - 74 72 static LIST_HEAD(zcrypt_ops_list); 75 73 76 - static debug_info_t *zcrypt_dbf_common; 77 - static debug_info_t *zcrypt_dbf_devices; 78 74 static struct dentry *debugfs_root; 79 - 80 - /* 81 - * Device attributes common for all crypto devices. 82 - */ 83 - static ssize_t zcrypt_type_show(struct device *dev, 84 - struct device_attribute *attr, char *buf) 85 - { 86 - struct zcrypt_device *zdev = to_ap_dev(dev)->private; 87 - return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); 88 - } 89 - 90 - static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); 91 - 92 - static ssize_t zcrypt_online_show(struct device *dev, 93 - struct device_attribute *attr, char *buf) 94 - { 95 - struct zcrypt_device *zdev = to_ap_dev(dev)->private; 96 - return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); 97 - } 98 - 99 - static ssize_t zcrypt_online_store(struct device *dev, 100 - struct device_attribute *attr, 101 - const char *buf, size_t count) 102 - { 103 - struct zcrypt_device *zdev = to_ap_dev(dev)->private; 104 - int online; 105 - 106 - if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 107 - return -EINVAL; 108 - zdev->online = online; 109 - ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid, 110 - zdev->online); 111 - if (!online) 112 - ap_flush_queue(zdev->ap_dev); 113 - return count; 114 - } 115 - 116 - static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); 117 - 118 - static struct attribute * zcrypt_device_attrs[] = { 119 - &dev_attr_type.attr, 120 - &dev_attr_online.attr, 121 - NULL, 122 - }; 123 - 124 - static struct attribute_group zcrypt_device_attr_group = { 125 - .attrs = zcrypt_device_attrs, 126 - }; 75 + debug_info_t *zcrypt_dbf_common; 76 + debug_info_t *zcrypt_dbf_devices; 77 + debug_info_t *zcrypt_dbf_cards; 127 78 128 79 /** 129 80 * Process a rescan of the transport layer. ··· 93 142 } 94 143 return 0; 95 144 } 96 - 97 - /** 98 - * __zcrypt_increase_preference(): Increase preference of a crypto device. 99 - * @zdev: Pointer the crypto device 100 - * 101 - * Move the device towards the head of the device list. 102 - * Need to be called while holding the zcrypt device list lock. 103 - * Note: cards with speed_rating of 0 are kept at the end of the list. 104 - */ 105 - static void __zcrypt_increase_preference(struct zcrypt_device *zdev, 106 - unsigned int weight) 107 - { 108 - struct zcrypt_device *tmp; 109 - struct list_head *l; 110 - 111 - zdev->load -= weight; 112 - for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { 113 - tmp = list_entry(l, struct zcrypt_device, list); 114 - if (tmp->load <= zdev->load) 115 - break; 116 - } 117 - if (l == zdev->list.prev) 118 - return; 119 - /* Move zdev behind l */ 120 - list_move(&zdev->list, l); 121 - } 122 - 123 - /** 124 - * __zcrypt_decrease_preference(): Decrease preference of a crypto device. 125 - * @zdev: Pointer to a crypto device. 126 - * 127 - * Move the device towards the tail of the device list. 128 - * Need to be called while holding the zcrypt device list lock. 129 - * Note: cards with speed_rating of 0 are kept at the end of the list. 130 - */ 131 - static void __zcrypt_decrease_preference(struct zcrypt_device *zdev, 132 - unsigned int weight) 133 - { 134 - struct zcrypt_device *tmp; 135 - struct list_head *l; 136 - 137 - zdev->load += weight; 138 - for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { 139 - tmp = list_entry(l, struct zcrypt_device, list); 140 - if (tmp->load > zdev->load) 141 - break; 142 - } 143 - if (l == zdev->list.next) 144 - return; 145 - /* Move zdev before l */ 146 - list_move_tail(&zdev->list, l); 147 - } 148 - 149 - static void zcrypt_device_release(struct kref *kref) 150 - { 151 - struct zcrypt_device *zdev = 152 - container_of(kref, struct zcrypt_device, refcount); 153 - zcrypt_device_free(zdev); 154 - } 155 - 156 - void zcrypt_device_get(struct zcrypt_device *zdev) 157 - { 158 - kref_get(&zdev->refcount); 159 - } 160 - EXPORT_SYMBOL(zcrypt_device_get); 161 - 162 - int zcrypt_device_put(struct zcrypt_device *zdev) 163 - { 164 - return kref_put(&zdev->refcount, zcrypt_device_release); 165 - } 166 - EXPORT_SYMBOL(zcrypt_device_put); 167 - 168 - struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) 169 - { 170 - struct zcrypt_device *zdev; 171 - 172 - zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); 173 - if (!zdev) 174 - return NULL; 175 - zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); 176 - if (!zdev->reply.message) 177 - goto out_free; 178 - zdev->reply.length = max_response_size; 179 - spin_lock_init(&zdev->lock); 180 - INIT_LIST_HEAD(&zdev->list); 181 - zdev->dbf_area = zcrypt_dbf_devices; 182 - return zdev; 183 - 184 - out_free: 185 - kfree(zdev); 186 - return NULL; 187 - } 188 - EXPORT_SYMBOL(zcrypt_device_alloc); 189 - 190 - void zcrypt_device_free(struct zcrypt_device *zdev) 191 - { 192 - kfree(zdev->reply.message); 193 - kfree(zdev); 194 - } 195 - EXPORT_SYMBOL(zcrypt_device_free); 196 - 197 - /** 198 - * zcrypt_device_register() - Register a crypto device. 199 - * @zdev: Pointer to a crypto device 200 - * 201 - * Register a crypto device. Returns 0 if successful. 202 - */ 203 - int zcrypt_device_register(struct zcrypt_device *zdev) 204 - { 205 - int rc; 206 - 207 - if (!zdev->ops) 208 - return -ENODEV; 209 - rc = sysfs_create_group(&zdev->ap_dev->device.kobj, 210 - &zcrypt_device_attr_group); 211 - if (rc) 212 - goto out; 213 - get_device(&zdev->ap_dev->device); 214 - kref_init(&zdev->refcount); 215 - spin_lock_bh(&zcrypt_device_lock); 216 - zdev->online = 1; /* New devices are online by default. */ 217 - ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid, 218 - zdev->online); 219 - list_add_tail(&zdev->list, &zcrypt_device_list); 220 - __zcrypt_increase_preference(zdev, 0); /* sort devices acc. weight */ 221 - zcrypt_device_count++; 222 - spin_unlock_bh(&zcrypt_device_lock); 223 - if (zdev->ops->rng) { 224 - rc = zcrypt_rng_device_add(); 225 - if (rc) 226 - goto out_unregister; 227 - } 228 - return 0; 229 - 230 - out_unregister: 231 - spin_lock_bh(&zcrypt_device_lock); 232 - zcrypt_device_count--; 233 - list_del_init(&zdev->list); 234 - spin_unlock_bh(&zcrypt_device_lock); 235 - sysfs_remove_group(&zdev->ap_dev->device.kobj, 236 - &zcrypt_device_attr_group); 237 - put_device(&zdev->ap_dev->device); 238 - zcrypt_device_put(zdev); 239 - out: 240 - return rc; 241 - } 242 - EXPORT_SYMBOL(zcrypt_device_register); 243 - 244 - /** 245 - * zcrypt_device_unregister(): Unregister a crypto device. 246 - * @zdev: Pointer to crypto device 247 - * 248 - * Unregister a crypto device. 249 - */ 250 - void zcrypt_device_unregister(struct zcrypt_device *zdev) 251 - { 252 - if (zdev->ops->rng) 253 - zcrypt_rng_device_remove(); 254 - spin_lock_bh(&zcrypt_device_lock); 255 - zcrypt_device_count--; 256 - list_del_init(&zdev->list); 257 - spin_unlock_bh(&zcrypt_device_lock); 258 - sysfs_remove_group(&zdev->ap_dev->device.kobj, 259 - &zcrypt_device_attr_group); 260 - put_device(&zdev->ap_dev->device); 261 - zcrypt_device_put(zdev); 262 - } 263 - EXPORT_SYMBOL(zcrypt_device_unregister); 264 145 265 146 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 266 147 { ··· 160 377 return 0; 161 378 } 162 379 380 + static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 381 + struct zcrypt_queue *zq, 382 + unsigned int weight) 383 + { 384 + if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 385 + return NULL; 386 + zcrypt_queue_get(zq); 387 + get_device(&zq->queue->ap_dev.device); 388 + atomic_add(weight, &zc->load); 389 + atomic_add(weight, &zq->load); 390 + zq->request_count++; 391 + return zq; 392 + } 393 + 394 + static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 395 + struct zcrypt_queue *zq, 396 + unsigned int weight) 397 + { 398 + struct module *mod = zq->queue->ap_dev.drv->driver.owner; 399 + 400 + zq->request_count--; 401 + atomic_sub(weight, &zc->load); 402 + atomic_sub(weight, &zq->load); 403 + put_device(&zq->queue->ap_dev.device); 404 + zcrypt_queue_put(zq); 405 + module_put(mod); 406 + } 407 + 163 408 /* 164 409 * zcrypt ioctls. 165 410 */ 166 411 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 167 412 { 168 - struct zcrypt_device *zdev, *pref_zdev = NULL; 413 + struct zcrypt_card *zc, *pref_zc; 414 + struct zcrypt_queue *zq, *pref_zq; 415 + unsigned int weight, pref_weight; 416 + unsigned int func_code; 169 417 int rc; 170 - unsigned int weight, func_code, pref_weight = 0; 171 418 172 419 if (mex->outputdatalength < mex->inputdatalength) 173 420 return -EINVAL; ··· 212 399 if (rc) 213 400 return rc; 214 401 215 - spin_lock_bh(&zcrypt_device_lock); 216 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 217 - if (!zdev->online || 218 - !zdev->ops->rsa_modexpo || 219 - zdev->min_mod_size > mex->inputdatalength || 220 - zdev->max_mod_size < mex->inputdatalength) 402 + pref_zc = NULL; 403 + pref_zq = NULL; 404 + spin_lock(&zcrypt_list_lock); 405 + for_each_zcrypt_card(zc) { 406 + /* Check for online accelarator and CCA cards */ 407 + if (!zc->online || !(zc->card->functions & 0x18000000)) 221 408 continue; 222 - weight = zdev->speed_rating[func_code]; 223 - if (!pref_zdev) { 224 - pref_zdev = zdev; 409 + /* Check for size limits */ 410 + if (zc->min_mod_size > mex->inputdatalength || 411 + zc->max_mod_size < mex->inputdatalength) 412 + continue; 413 + /* get weight index of the card device */ 414 + weight = zc->speed_rating[func_code]; 415 + if (pref_zc && atomic_read(&zc->load) + weight >= 416 + atomic_read(&pref_zc->load) + pref_weight) 417 + continue; 418 + for_each_zcrypt_queue(zq, zc) { 419 + /* check if device is online and eligible */ 420 + if (!zq->online) 421 + continue; 422 + if (pref_zq && atomic_read(&zq->load) + weight >= 423 + atomic_read(&pref_zq->load) + pref_weight) 424 + continue; 425 + pref_zc = zc; 426 + pref_zq = zq; 225 427 pref_weight = weight; 226 - continue; 227 428 } 228 - if ((pref_zdev->load + pref_weight) > (zdev->load + weight)) { 229 - pref_zdev = zdev; 230 - pref_weight = weight; 231 - continue; 232 - } 233 - if ((pref_zdev->load + pref_weight) <= zdev->load) 234 - break; /* Load on remaining devices too high - abort */ 235 429 } 430 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 431 + spin_unlock(&zcrypt_list_lock); 236 432 237 - if (!pref_zdev) { 238 - spin_unlock_bh(&zcrypt_device_lock); 433 + if (!pref_zq) 239 434 return -ENODEV; 240 - } 241 - __zcrypt_decrease_preference(pref_zdev, pref_weight); 242 - zcrypt_device_get(pref_zdev); 243 - get_device(&pref_zdev->ap_dev->device); 244 - pref_zdev->request_count++; 245 - if (try_module_get(pref_zdev->ap_dev->drv->driver.owner)) { 246 - spin_unlock_bh(&zcrypt_device_lock); 247 - rc = -ENODEV; 248 - rc = pref_zdev->ops->rsa_modexpo(pref_zdev, mex); 249 - spin_lock_bh(&zcrypt_device_lock); 250 - module_put(pref_zdev->ap_dev->drv->driver.owner); 251 - } else 252 - rc = -EAGAIN; 253 435 254 - pref_zdev->request_count--; 255 - __zcrypt_increase_preference(pref_zdev, pref_weight); 256 - put_device(&pref_zdev->ap_dev->device); 257 - zcrypt_device_put(pref_zdev); 258 - spin_unlock_bh(&zcrypt_device_lock); 436 + rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 437 + 438 + spin_lock(&zcrypt_list_lock); 439 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 440 + spin_unlock(&zcrypt_list_lock); 441 + 259 442 return rc; 260 443 } 261 444 262 445 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 263 446 { 264 - struct zcrypt_device *zdev, *pref_zdev = NULL; 265 - unsigned long long z1, z2, z3; 266 - int rc, copied; 267 - unsigned int weight, func_code, pref_weight = 0; 447 + struct zcrypt_card *zc, *pref_zc; 448 + struct zcrypt_queue *zq, *pref_zq; 449 + unsigned int weight, pref_weight; 450 + unsigned int func_code; 451 + int rc; 268 452 269 453 if (crt->outputdatalength < crt->inputdatalength) 270 454 return -EINVAL; ··· 276 466 if (rc) 277 467 return rc; 278 468 279 - copied = 0; 280 - restart: 281 - spin_lock_bh(&zcrypt_device_lock); 282 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 283 - if (!zdev->online || 284 - !zdev->ops->rsa_modexpo_crt || 285 - zdev->min_mod_size > crt->inputdatalength || 286 - zdev->max_mod_size < crt->inputdatalength) 469 + pref_zc = NULL; 470 + pref_zq = NULL; 471 + spin_lock(&zcrypt_list_lock); 472 + for_each_zcrypt_card(zc) { 473 + /* Check for online accelarator and CCA cards */ 474 + if (!zc->online || !(zc->card->functions & 0x18000000)) 287 475 continue; 288 - if (zdev->short_crt && crt->inputdatalength > 240) { 289 - /* 290 - * Check inputdata for leading zeros for cards 291 - * that can't handle np_prime, bp_key, or 292 - * u_mult_inv > 128 bytes. 293 - */ 294 - if (copied == 0) { 295 - unsigned int len; 296 - spin_unlock_bh(&zcrypt_device_lock); 297 - /* len is max 256 / 2 - 120 = 8 298 - * For bigger device just assume len of leading 299 - * 0s is 8 as stated in the requirements for 300 - * ica_rsa_modexpo_crt struct in zcrypt.h. 301 - */ 302 - if (crt->inputdatalength <= 256) 303 - len = crt->inputdatalength / 2 - 120; 304 - else 305 - len = 8; 306 - if (len > sizeof(z1)) 307 - return -EFAULT; 308 - z1 = z2 = z3 = 0; 309 - if (copy_from_user(&z1, crt->np_prime, len) || 310 - copy_from_user(&z2, crt->bp_key, len) || 311 - copy_from_user(&z3, crt->u_mult_inv, len)) 312 - return -EFAULT; 313 - z1 = z2 = z3 = 0; 314 - copied = 1; 315 - /* 316 - * We have to restart device lookup - 317 - * the device list may have changed by now. 318 - */ 319 - goto restart; 320 - } 321 - if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) 322 - /* The device can't handle this request. */ 476 + /* Check for size limits */ 477 + if (zc->min_mod_size > crt->inputdatalength || 478 + zc->max_mod_size < crt->inputdatalength) 479 + continue; 480 + /* get weight index of the card device */ 481 + weight = zc->speed_rating[func_code]; 482 + if (pref_zc && atomic_read(&zc->load) + weight >= 483 + atomic_read(&pref_zc->load) + pref_weight) 484 + continue; 485 + for_each_zcrypt_queue(zq, zc) { 486 + /* check if device is online and eligible */ 487 + if (!zq->online) 323 488 continue; 489 + if (pref_zq && atomic_read(&zq->load) + weight >= 490 + atomic_read(&pref_zq->load) + pref_weight) 491 + continue; 492 + pref_zc = zc; 493 + pref_zq = zq; 494 + pref_weight = weight; 324 495 } 496 + } 497 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 498 + spin_unlock(&zcrypt_list_lock); 325 499 326 - weight = zdev->speed_rating[func_code]; 327 - if (!pref_zdev) { 328 - pref_zdev = zdev; 329 - pref_weight = weight; 330 - continue; 331 - } 332 - if ((pref_zdev->load + pref_weight) > (zdev->load + weight)) { 333 - pref_zdev = zdev; 334 - pref_weight = weight; 335 - continue; 336 - } 337 - if ((pref_zdev->load + pref_weight) <= zdev->load) 338 - break; /* Load on remaining devices too high - abort */ 339 - } 340 - if (!pref_zdev) { 341 - spin_unlock_bh(&zcrypt_device_lock); 500 + if (!pref_zq) 342 501 return -ENODEV; 343 - } 344 - __zcrypt_decrease_preference(pref_zdev, pref_weight); 345 - zcrypt_device_get(pref_zdev); 346 - get_device(&pref_zdev->ap_dev->device); 347 - pref_zdev->request_count++; 348 - if (try_module_get(pref_zdev->ap_dev->drv->driver.owner)) { 349 - spin_unlock_bh(&zcrypt_device_lock); 350 - rc = pref_zdev->ops->rsa_modexpo_crt(pref_zdev, crt); 351 - spin_lock_bh(&zcrypt_device_lock); 352 - module_put(pref_zdev->ap_dev->drv->driver.owner); 353 - } else 354 - rc = -EAGAIN; 355 - pref_zdev->request_count--; 356 - __zcrypt_increase_preference(pref_zdev, pref_weight); 357 - put_device(&pref_zdev->ap_dev->device); 358 - zcrypt_device_put(pref_zdev); 359 - spin_unlock_bh(&zcrypt_device_lock); 502 + 503 + rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 504 + 505 + spin_lock(&zcrypt_list_lock); 506 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 507 + spin_unlock(&zcrypt_list_lock); 508 + 360 509 return rc; 361 510 } 362 511 363 512 static long zcrypt_send_cprb(struct ica_xcRB *xcRB) 364 513 { 365 - struct zcrypt_device *zdev, *pref_zdev = NULL; 366 - unsigned int weight = 0, func_code = 0, pref_weight = 0; 367 - int rc; 514 + struct zcrypt_card *zc, *pref_zc; 515 + struct zcrypt_queue *zq, *pref_zq; 368 516 struct ap_message ap_msg; 517 + unsigned int weight, pref_weight; 518 + unsigned int func_code; 519 + unsigned short *domain; 520 + int rc; 369 521 370 - rc = get_cprb_fc(xcRB, &ap_msg, &func_code); 522 + rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 371 523 if (rc) 372 524 return rc; 373 525 374 - spin_lock_bh(&zcrypt_device_lock); 375 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 376 - if (!zdev->online || !zdev->ops->send_cprb || 377 - (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) || 378 - (xcRB->user_defined != AUTOSELECT && 379 - AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) 526 + pref_zc = NULL; 527 + pref_zq = NULL; 528 + spin_lock(&zcrypt_list_lock); 529 + for_each_zcrypt_card(zc) { 530 + /* Check for online CCA cards */ 531 + if (!zc->online || !(zc->card->functions & 0x10000000)) 380 532 continue; 533 + /* Check for user selected CCA card */ 534 + if (xcRB->user_defined != AUTOSELECT && 535 + xcRB->user_defined != zc->card->id) 536 + continue; 537 + /* get weight index of the card device */ 538 + weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 539 + if (pref_zc && atomic_read(&zc->load) + weight >= 540 + atomic_read(&pref_zc->load) + pref_weight) 541 + continue; 542 + for_each_zcrypt_queue(zq, zc) { 543 + /* check if device is online and eligible */ 544 + if (!zq->online || 545 + ((*domain != (unsigned short) AUTOSELECT) && 546 + (*domain != AP_QID_QUEUE(zq->queue->qid)))) 547 + continue; 548 + if (pref_zq && atomic_read(&zq->load) + weight >= 549 + atomic_read(&pref_zq->load) + pref_weight) 550 + continue; 551 + pref_zc = zc; 552 + pref_zq = zq; 553 + pref_weight = weight; 554 + } 555 + } 556 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 557 + spin_unlock(&zcrypt_list_lock); 381 558 382 - weight = speed_idx_cca(func_code) * zdev->speed_rating[SECKEY]; 383 - if (!pref_zdev) { 384 - pref_zdev = zdev; 385 - pref_weight = weight; 386 - continue; 387 - } 388 - if ((pref_zdev->load + pref_weight) > (zdev->load + weight)) { 389 - pref_zdev = zdev; 390 - pref_weight = weight; 391 - continue; 392 - } 393 - if ((pref_zdev->load + pref_weight) <= zdev->load) 394 - break; /* Load on remaining devices too high - abort */ 395 - } 396 - if (!pref_zdev) { 397 - spin_unlock_bh(&zcrypt_device_lock); 559 + if (!pref_zq) 398 560 return -ENODEV; 399 - } 400 - __zcrypt_decrease_preference(pref_zdev, pref_weight); 401 - zcrypt_device_get(pref_zdev); 402 - get_device(&pref_zdev->ap_dev->device); 403 - pref_zdev->request_count++; 404 - if (try_module_get(pref_zdev->ap_dev->drv->driver.owner)) { 405 - spin_unlock_bh(&zcrypt_device_lock); 406 - rc = pref_zdev->ops->send_cprb(pref_zdev, xcRB, &ap_msg); 407 - spin_lock_bh(&zcrypt_device_lock); 408 - module_put(pref_zdev->ap_dev->drv->driver.owner); 409 - } else 410 - rc = -EAGAIN; 411 - pref_zdev->request_count--; 412 - __zcrypt_increase_preference(pref_zdev, pref_weight); 413 - put_device(&pref_zdev->ap_dev->device); 414 - zcrypt_device_put(pref_zdev); 415 - spin_unlock_bh(&zcrypt_device_lock); 561 + 562 + /* in case of auto select, provide the correct domain */ 563 + if (*domain == (unsigned short) AUTOSELECT) 564 + *domain = AP_QID_QUEUE(pref_zq->queue->qid); 565 + 566 + rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 567 + 568 + spin_lock(&zcrypt_list_lock); 569 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 570 + spin_unlock(&zcrypt_list_lock); 416 571 return rc; 417 572 } 418 573 419 - struct ep11_target_dev_list { 420 - unsigned short targets_num; 421 - struct ep11_target_dev *targets; 422 - }; 423 - 424 - static bool is_desired_ep11dev(unsigned int dev_qid, 425 - struct ep11_target_dev_list dev_list) 574 + static bool is_desired_ep11_card(unsigned int dev_id, 575 + unsigned short target_num, 576 + struct ep11_target_dev *targets) 426 577 { 427 - int n; 428 - 429 - for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) { 430 - if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) && 431 - (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) { 578 + while (target_num-- > 0) { 579 + if (dev_id == targets->ap_id) 432 580 return true; 433 - } 581 + targets++; 582 + } 583 + return false; 584 + } 585 + 586 + static bool is_desired_ep11_queue(unsigned int dev_qid, 587 + unsigned short target_num, 588 + struct ep11_target_dev *targets) 589 + { 590 + while (target_num-- > 0) { 591 + if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 592 + return true; 593 + targets++; 434 594 } 435 595 return false; 436 596 } 437 597 438 598 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 439 599 { 440 - struct zcrypt_device *zdev, *pref_zdev = NULL; 600 + struct zcrypt_card *zc, *pref_zc; 601 + struct zcrypt_queue *zq, *pref_zq; 602 + struct ep11_target_dev *targets; 603 + unsigned short target_num; 604 + unsigned int weight, pref_weight; 605 + unsigned int func_code; 441 606 struct ap_message ap_msg; 442 - unsigned int weight = 0, func_code = 0, pref_weight = 0; 443 - bool autoselect = false; 444 607 int rc; 445 - struct ep11_target_dev_list ep11_dev_list = { 446 - .targets_num = 0x00, 447 - .targets = NULL, 448 - }; 449 608 450 - ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num; 609 + target_num = (unsigned short) xcrb->targets_num; 451 610 452 611 /* empty list indicates autoselect (all available targets) */ 453 - if (ep11_dev_list.targets_num == 0) 454 - autoselect = true; 455 - else { 456 - ep11_dev_list.targets = kcalloc((unsigned short) 457 - xcrb->targets_num, 458 - sizeof(struct ep11_target_dev), 459 - GFP_KERNEL); 460 - if (!ep11_dev_list.targets) 612 + targets = NULL; 613 + if (target_num != 0) { 614 + struct ep11_target_dev __user *uptr; 615 + 616 + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 617 + if (!targets) 461 618 return -ENOMEM; 462 619 463 - if (copy_from_user(ep11_dev_list.targets, 464 - (struct ep11_target_dev __force __user *) 465 - xcrb->targets, xcrb->targets_num * 466 - sizeof(struct ep11_target_dev))) 620 + uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 621 + if (copy_from_user(targets, uptr, 622 + target_num * sizeof(*targets))) 467 623 return -EFAULT; 468 624 } 469 625 470 626 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 471 627 if (rc) 472 - return rc; 628 + goto out_free; 473 629 474 - spin_lock_bh(&zcrypt_device_lock); 475 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 476 - /* check if device is eligible */ 477 - if (!zdev->online || 478 - zdev->ops->variant != MSGTYPE06_VARIANT_EP11) 630 + pref_zc = NULL; 631 + pref_zq = NULL; 632 + spin_lock(&zcrypt_list_lock); 633 + for_each_zcrypt_card(zc) { 634 + /* Check for online EP11 cards */ 635 + if (!zc->online || !(zc->card->functions & 0x04000000)) 479 636 continue; 480 - 481 - /* check if device is selected as valid target */ 482 - if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && 483 - !autoselect) 637 + /* Check for user selected EP11 card */ 638 + if (targets && 639 + !is_desired_ep11_card(zc->card->id, target_num, targets)) 484 640 continue; 485 - 486 - weight = speed_idx_ep11(func_code) * zdev->speed_rating[SECKEY]; 487 - if (!pref_zdev) { 488 - pref_zdev = zdev; 641 + /* get weight index of the card device */ 642 + weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 643 + if (pref_zc && atomic_read(&zc->load) + weight >= 644 + atomic_read(&pref_zc->load) + pref_weight) 645 + continue; 646 + for_each_zcrypt_queue(zq, zc) { 647 + /* check if device is online and eligible */ 648 + if (!zq->online || 649 + (targets && 650 + !is_desired_ep11_queue(zq->queue->qid, 651 + target_num, targets))) 652 + continue; 653 + if (pref_zq && atomic_read(&zq->load) + weight >= 654 + atomic_read(&pref_zq->load) + pref_weight) 655 + continue; 656 + pref_zc = zc; 657 + pref_zq = zq; 489 658 pref_weight = weight; 490 - continue; 491 659 } 492 - if ((pref_zdev->load + pref_weight) > (zdev->load + weight)) { 493 - pref_zdev = zdev; 494 - pref_weight = weight; 495 - continue; 496 - } 497 - if ((pref_zdev->load + pref_weight) <= zdev->load) 498 - break; /* Load on remaining devices too high - abort */ 499 660 } 500 - if (!pref_zdev) { 501 - spin_unlock_bh(&zcrypt_device_lock); 502 - return -ENODEV; 661 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 662 + spin_unlock(&zcrypt_list_lock); 663 + 664 + if (!pref_zq) { 665 + rc = -ENODEV; 666 + goto out_free; 503 667 } 504 668 505 - zcrypt_device_get(pref_zdev); 506 - get_device(&pref_zdev->ap_dev->device); 507 - pref_zdev->request_count++; 508 - if (try_module_get(pref_zdev->ap_dev->drv->driver.owner)) { 509 - spin_unlock_bh(&zcrypt_device_lock); 510 - rc = pref_zdev->ops->send_ep11_cprb(pref_zdev, xcrb, &ap_msg); 511 - spin_lock_bh(&zcrypt_device_lock); 512 - module_put(pref_zdev->ap_dev->drv->driver.owner); 513 - } else { 514 - rc = -EAGAIN; 515 - } 516 - pref_zdev->request_count--; 517 - put_device(&pref_zdev->ap_dev->device); 518 - zcrypt_device_put(pref_zdev); 519 - spin_unlock_bh(&zcrypt_device_lock); 669 + rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 670 + 671 + spin_lock(&zcrypt_list_lock); 672 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 673 + spin_unlock(&zcrypt_list_lock); 674 + 675 + out_free: 676 + kfree(targets); 520 677 return rc; 521 678 } 522 679 523 680 static long zcrypt_rng(char *buffer) 524 681 { 525 - struct zcrypt_device *zdev, *pref_zdev = NULL; 682 + struct zcrypt_card *zc, *pref_zc; 683 + struct zcrypt_queue *zq, *pref_zq; 684 + unsigned int weight, pref_weight; 685 + unsigned int func_code; 526 686 struct ap_message ap_msg; 527 - unsigned int weight = 0, func_code = 0, pref_weight = 0; 687 + unsigned int domain; 528 688 int rc; 529 689 530 - rc = get_rng_fc(&ap_msg, &func_code); 690 + rc = get_rng_fc(&ap_msg, &func_code, &domain); 531 691 if (rc) 532 692 return rc; 533 693 534 - spin_lock_bh(&zcrypt_device_lock); 535 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 536 - if (!zdev->online || !zdev->ops->rng) 694 + pref_zc = NULL; 695 + pref_zq = NULL; 696 + spin_lock(&zcrypt_list_lock); 697 + for_each_zcrypt_card(zc) { 698 + /* Check for online CCA cards */ 699 + if (!zc->online || !(zc->card->functions & 0x10000000)) 537 700 continue; 538 - 539 - weight = zdev->speed_rating[func_code]; 540 - if (!pref_zdev) { 541 - pref_zdev = zdev; 701 + /* get weight index of the card device */ 702 + weight = zc->speed_rating[func_code]; 703 + if (pref_zc && atomic_read(&zc->load) + weight >= 704 + atomic_read(&pref_zc->load) + pref_weight) 705 + continue; 706 + for_each_zcrypt_queue(zq, zc) { 707 + /* check if device is online and eligible */ 708 + if (!zq->online) 709 + continue; 710 + if (pref_zq && atomic_read(&zq->load) + weight >= 711 + atomic_read(&pref_zq->load) + pref_weight) 712 + continue; 713 + pref_zc = zc; 714 + pref_zq = zq; 542 715 pref_weight = weight; 543 - continue; 544 716 } 545 - if ((pref_zdev->load + pref_weight) > (zdev->load + weight)) { 546 - pref_zdev = zdev; 547 - pref_weight = weight; 548 - continue; 549 - } 550 - if ((pref_zdev->load + pref_weight) <= zdev->load) 551 - break; /* Load on remaining devices too high - abort */ 552 717 } 553 - if (!pref_zdev) { 554 - spin_unlock_bh(&zcrypt_device_lock); 718 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 719 + spin_unlock(&zcrypt_list_lock); 720 + 721 + if (!pref_zq) 555 722 return -ENODEV; 556 - } 557 723 558 - zcrypt_device_get(pref_zdev); 559 - get_device(&pref_zdev->ap_dev->device); 560 - pref_zdev->request_count++; 561 - if (try_module_get(pref_zdev->ap_dev->drv->driver.owner)) { 562 - spin_unlock_bh(&zcrypt_device_lock); 563 - rc = pref_zdev->ops->rng(pref_zdev, buffer, &ap_msg); 564 - spin_lock_bh(&zcrypt_device_lock); 565 - module_put(pref_zdev->ap_dev->drv->driver.owner); 566 - } else 567 - rc = -EAGAIN; 568 - pref_zdev->request_count--; 569 - put_device(&pref_zdev->ap_dev->device); 570 - zcrypt_device_put(pref_zdev); 571 - spin_unlock_bh(&zcrypt_device_lock); 724 + rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 725 + 726 + spin_lock(&zcrypt_list_lock); 727 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 728 + spin_unlock(&zcrypt_list_lock); 572 729 return rc; 573 730 } 574 731 575 732 static void zcrypt_status_mask(char status[AP_DEVICES]) 576 733 { 577 - struct zcrypt_device *zdev; 734 + struct zcrypt_card *zc; 735 + struct zcrypt_queue *zq; 578 736 579 737 memset(status, 0, sizeof(char) * AP_DEVICES); 580 - spin_lock_bh(&zcrypt_device_lock); 581 - list_for_each_entry(zdev, &zcrypt_device_list, list) 582 - status[AP_QID_DEVICE(zdev->ap_dev->qid)] = 583 - zdev->online ? zdev->user_space_type : 0x0d; 584 - spin_unlock_bh(&zcrypt_device_lock); 738 + spin_lock(&zcrypt_list_lock); 739 + for_each_zcrypt_card(zc) { 740 + for_each_zcrypt_queue(zq, zc) { 741 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 742 + continue; 743 + status[AP_QID_CARD(zq->queue->qid)] = 744 + zc->online ? zc->user_space_type : 0x0d; 745 + } 746 + } 747 + spin_unlock(&zcrypt_list_lock); 585 748 } 586 749 587 750 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 588 751 { 589 - struct zcrypt_device *zdev; 752 + struct zcrypt_card *zc; 753 + struct zcrypt_queue *zq; 590 754 591 755 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 592 - spin_lock_bh(&zcrypt_device_lock); 593 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 594 - spin_lock(&zdev->ap_dev->lock); 595 - qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = 596 - zdev->ap_dev->pendingq_count + 597 - zdev->ap_dev->requestq_count; 598 - spin_unlock(&zdev->ap_dev->lock); 756 + spin_lock(&zcrypt_list_lock); 757 + for_each_zcrypt_card(zc) { 758 + for_each_zcrypt_queue(zq, zc) { 759 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 760 + continue; 761 + spin_lock(&zq->queue->lock); 762 + qdepth[AP_QID_CARD(zq->queue->qid)] = 763 + zq->queue->pendingq_count + 764 + zq->queue->requestq_count; 765 + spin_unlock(&zq->queue->lock); 766 + } 599 767 } 600 - spin_unlock_bh(&zcrypt_device_lock); 768 + spin_unlock(&zcrypt_list_lock); 601 769 } 602 770 603 771 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 604 772 { 605 - struct zcrypt_device *zdev; 773 + struct zcrypt_card *zc; 774 + struct zcrypt_queue *zq; 606 775 607 776 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 608 - spin_lock_bh(&zcrypt_device_lock); 609 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 610 - spin_lock(&zdev->ap_dev->lock); 611 - reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = 612 - zdev->ap_dev->total_request_count; 613 - spin_unlock(&zdev->ap_dev->lock); 777 + spin_lock(&zcrypt_list_lock); 778 + for_each_zcrypt_card(zc) { 779 + for_each_zcrypt_queue(zq, zc) { 780 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 781 + continue; 782 + spin_lock(&zq->queue->lock); 783 + reqcnt[AP_QID_CARD(zq->queue->qid)] = 784 + zq->queue->total_request_count; 785 + spin_unlock(&zq->queue->lock); 786 + } 614 787 } 615 - spin_unlock_bh(&zcrypt_device_lock); 788 + spin_unlock(&zcrypt_list_lock); 616 789 } 617 790 618 791 static int zcrypt_pendingq_count(void) 619 792 { 620 - struct zcrypt_device *zdev; 621 - int pendingq_count = 0; 793 + struct zcrypt_card *zc; 794 + struct zcrypt_queue *zq; 795 + int pendingq_count; 622 796 623 - spin_lock_bh(&zcrypt_device_lock); 624 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 625 - spin_lock(&zdev->ap_dev->lock); 626 - pendingq_count += zdev->ap_dev->pendingq_count; 627 - spin_unlock(&zdev->ap_dev->lock); 797 + pendingq_count = 0; 798 + spin_lock(&zcrypt_list_lock); 799 + for_each_zcrypt_card(zc) { 800 + for_each_zcrypt_queue(zq, zc) { 801 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 802 + continue; 803 + spin_lock(&zq->queue->lock); 804 + pendingq_count += zq->queue->pendingq_count; 805 + spin_unlock(&zq->queue->lock); 806 + } 628 807 } 629 - spin_unlock_bh(&zcrypt_device_lock); 808 + spin_unlock(&zcrypt_list_lock); 630 809 return pendingq_count; 631 810 } 632 811 633 812 static int zcrypt_requestq_count(void) 634 813 { 635 - struct zcrypt_device *zdev; 636 - int requestq_count = 0; 814 + struct zcrypt_card *zc; 815 + struct zcrypt_queue *zq; 816 + int requestq_count; 637 817 638 - spin_lock_bh(&zcrypt_device_lock); 639 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 640 - spin_lock(&zdev->ap_dev->lock); 641 - requestq_count += zdev->ap_dev->requestq_count; 642 - spin_unlock(&zdev->ap_dev->lock); 818 + requestq_count = 0; 819 + spin_lock(&zcrypt_list_lock); 820 + for_each_zcrypt_card(zc) { 821 + for_each_zcrypt_queue(zq, zc) { 822 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 823 + continue; 824 + spin_lock(&zq->queue->lock); 825 + requestq_count += zq->queue->requestq_count; 826 + spin_unlock(&zq->queue->lock); 827 + } 643 828 } 644 - spin_unlock_bh(&zcrypt_device_lock); 829 + spin_unlock(&zcrypt_list_lock); 645 830 return requestq_count; 646 831 } 647 832 648 833 static int zcrypt_count_type(int type) 649 834 { 650 - struct zcrypt_device *zdev; 651 - int device_count = 0; 835 + struct zcrypt_card *zc; 836 + struct zcrypt_queue *zq; 837 + int device_count; 652 838 653 - spin_lock_bh(&zcrypt_device_lock); 654 - list_for_each_entry(zdev, &zcrypt_device_list, list) 655 - if (zdev->user_space_type == type) 839 + device_count = 0; 840 + spin_lock(&zcrypt_list_lock); 841 + for_each_zcrypt_card(zc) { 842 + if (zc->card->id != type) 843 + continue; 844 + for_each_zcrypt_queue(zq, zc) { 845 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 846 + continue; 656 847 device_count++; 657 - spin_unlock_bh(&zcrypt_device_lock); 848 + } 849 + } 850 + spin_unlock(&zcrypt_list_lock); 658 851 return device_count; 659 852 } 660 853 ··· 1126 1313 1127 1314 static void zcrypt_disable_card(int index) 1128 1315 { 1129 - struct zcrypt_device *zdev; 1316 + struct zcrypt_card *zc; 1317 + struct zcrypt_queue *zq; 1130 1318 1131 - spin_lock_bh(&zcrypt_device_lock); 1132 - list_for_each_entry(zdev, &zcrypt_device_list, list) 1133 - if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1134 - zdev->online = 0; 1135 - ap_flush_queue(zdev->ap_dev); 1136 - break; 1319 + spin_lock(&zcrypt_list_lock); 1320 + for_each_zcrypt_card(zc) { 1321 + for_each_zcrypt_queue(zq, zc) { 1322 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1323 + continue; 1324 + zq->online = 0; 1325 + ap_flush_queue(zq->queue); 1137 1326 } 1138 - spin_unlock_bh(&zcrypt_device_lock); 1327 + } 1328 + spin_unlock(&zcrypt_list_lock); 1139 1329 } 1140 1330 1141 1331 static void zcrypt_enable_card(int index) 1142 1332 { 1143 - struct zcrypt_device *zdev; 1333 + struct zcrypt_card *zc; 1334 + struct zcrypt_queue *zq; 1144 1335 1145 - spin_lock_bh(&zcrypt_device_lock); 1146 - list_for_each_entry(zdev, &zcrypt_device_list, list) 1147 - if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1148 - zdev->online = 1; 1149 - break; 1336 + spin_lock(&zcrypt_list_lock); 1337 + for_each_zcrypt_card(zc) { 1338 + for_each_zcrypt_queue(zq, zc) { 1339 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1340 + continue; 1341 + zq->online = 1; 1342 + ap_flush_queue(zq->queue); 1150 1343 } 1151 - spin_unlock_bh(&zcrypt_device_lock); 1344 + } 1345 + spin_unlock(&zcrypt_list_lock); 1152 1346 } 1153 1347 1154 1348 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, ··· 1253 1433 .quality = 990, 1254 1434 }; 1255 1435 1256 - static int zcrypt_rng_device_add(void) 1436 + int zcrypt_rng_device_add(void) 1257 1437 { 1258 1438 int rc = 0; 1259 1439 ··· 1283 1463 return rc; 1284 1464 } 1285 1465 1286 - static void zcrypt_rng_device_remove(void) 1466 + void zcrypt_rng_device_remove(void) 1287 1467 { 1288 1468 mutex_lock(&zcrypt_rng_mutex); 1289 1469 zcrypt_rng_device_count--; ··· 1305 1485 zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16); 1306 1486 debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view); 1307 1487 debug_set_level(zcrypt_dbf_devices, DBF_ERR); 1488 + 1489 + zcrypt_dbf_cards = debug_register("zcrypt_cards", 1, 1, 16); 1490 + debug_register_view(zcrypt_dbf_cards, &debug_hex_ascii_view); 1491 + debug_set_level(zcrypt_dbf_cards, DBF_ERR); 1308 1492 1309 1493 return 0; 1310 1494 } ··· 1341 1517 goto out; 1342 1518 1343 1519 /* Set up the proc file system */ 1344 - zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); 1520 + zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, 1521 + &zcrypt_proc_fops); 1345 1522 if (!zcrypt_entry) { 1346 1523 rc = -ENOMEM; 1347 1524 goto out_misc;
+64 -20
drivers/s390/crypto/zcrypt_api.h
··· 88 88 * Identifier for Crypto Request Performance Index 89 89 */ 90 90 enum crypto_ops { 91 - MEX_1K = 0, 91 + MEX_1K, 92 92 MEX_2K, 93 93 MEX_4K, 94 94 CRT_1K, ··· 99 99 NUM_OPS 100 100 }; 101 101 102 - struct zcrypt_device; 102 + struct zcrypt_queue; 103 103 104 104 struct zcrypt_ops { 105 - long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *); 106 - long (*rsa_modexpo_crt)(struct zcrypt_device *, 105 + long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *); 106 + long (*rsa_modexpo_crt)(struct zcrypt_queue *, 107 107 struct ica_rsa_modexpo_crt *); 108 - long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *, 108 + long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *, 109 109 struct ap_message *); 110 - long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *, 110 + long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *, 111 111 struct ap_message *); 112 - long (*rng)(struct zcrypt_device *, char *, struct ap_message *); 112 + long (*rng)(struct zcrypt_queue *, char *, struct ap_message *); 113 113 struct list_head list; /* zcrypt ops list. */ 114 114 struct module *owner; 115 115 int variant; 116 116 char name[128]; 117 117 }; 118 118 119 - struct zcrypt_device { 119 + struct zcrypt_card { 120 120 struct list_head list; /* Device list. */ 121 - spinlock_t lock; /* Per device lock. */ 121 + struct list_head zqueues; /* List of zcrypt queues */ 122 122 struct kref refcount; /* device refcounting */ 123 - struct ap_device *ap_dev; /* The "real" ap device. */ 124 - struct zcrypt_ops *ops; /* Crypto operations. */ 123 + struct ap_card *card; /* The "real" ap card device. */ 125 124 int online; /* User online/offline */ 126 125 127 126 int user_space_type; /* User space device id. */ 128 127 char *type_string; /* User space device name. */ 129 128 int min_mod_size; /* Min number of bits. */ 130 129 int max_mod_size; /* Max number of bits. */ 131 - int short_crt; /* Card has crt length restriction. */ 130 + int max_exp_bit_length; 132 131 int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */ 133 - int load; /* Utilization of the crypto device */ 132 + atomic_t load; /* Utilization of the crypto device */ 133 + 134 + int request_count; /* # current requests. */ 135 + 136 + debug_info_t *dbf_area; /* debugging */ 137 + }; 138 + 139 + struct zcrypt_queue { 140 + struct list_head list; /* Device list. */ 141 + struct kref refcount; /* device refcounting */ 142 + struct zcrypt_card *zcard; 143 + struct zcrypt_ops *ops; /* Crypto operations. */ 144 + struct ap_queue *queue; /* The "real" ap queue device. */ 145 + int online; /* User online/offline */ 146 + 147 + atomic_t load; /* Utilization of the crypto device */ 134 148 135 149 int request_count; /* # current requests. */ 136 150 137 151 struct ap_message reply; /* Per-device reply structure. */ 138 - int max_exp_bit_length; 139 152 140 153 debug_info_t *dbf_area; /* debugging */ 141 154 }; ··· 156 143 /* transport layer rescanning */ 157 144 extern atomic_t zcrypt_rescan_req; 158 145 159 - struct zcrypt_device *zcrypt_device_alloc(size_t); 160 - void zcrypt_device_free(struct zcrypt_device *); 161 - void zcrypt_device_get(struct zcrypt_device *); 162 - int zcrypt_device_put(struct zcrypt_device *); 163 - int zcrypt_device_register(struct zcrypt_device *); 164 - void zcrypt_device_unregister(struct zcrypt_device *); 146 + extern spinlock_t zcrypt_list_lock; 147 + extern int zcrypt_device_count; 148 + extern struct list_head zcrypt_card_list; 149 + 150 + extern debug_info_t *zcrypt_dbf_common; 151 + extern debug_info_t *zcrypt_dbf_devices; 152 + extern debug_info_t *zcrypt_dbf_cards; 153 + 154 + #define for_each_zcrypt_card(_zc) \ 155 + list_for_each_entry(_zc, &zcrypt_card_list, list) 156 + 157 + #define for_each_zcrypt_queue(_zq, _zc) \ 158 + list_for_each_entry(_zq, &(_zc)->zqueues, list) 159 + 160 + struct zcrypt_card *zcrypt_card_alloc(void); 161 + void zcrypt_card_free(struct zcrypt_card *); 162 + void zcrypt_card_get(struct zcrypt_card *); 163 + int zcrypt_card_put(struct zcrypt_card *); 164 + int zcrypt_card_register(struct zcrypt_card *); 165 + void zcrypt_card_unregister(struct zcrypt_card *); 166 + struct zcrypt_card *zcrypt_card_get_best(unsigned int *, 167 + unsigned int, unsigned int); 168 + void zcrypt_card_put_best(struct zcrypt_card *, unsigned int); 169 + 170 + struct zcrypt_queue *zcrypt_queue_alloc(size_t); 171 + void zcrypt_queue_free(struct zcrypt_queue *); 172 + void zcrypt_queue_get(struct zcrypt_queue *); 173 + int zcrypt_queue_put(struct zcrypt_queue *); 174 + int zcrypt_queue_register(struct zcrypt_queue *); 175 + void zcrypt_queue_unregister(struct zcrypt_queue *); 176 + void zcrypt_queue_force_online(struct zcrypt_queue *, int); 177 + struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int); 178 + void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int); 179 + 180 + int zcrypt_rng_device_add(void); 181 + void zcrypt_rng_device_remove(void); 182 + 165 183 void zcrypt_msgtype_register(struct zcrypt_ops *); 166 184 void zcrypt_msgtype_unregister(struct zcrypt_ops *); 167 185 struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
+181
drivers/s390/crypto/zcrypt_card.c
··· 1 + /* 2 + * zcrypt 2.1.0 3 + * 4 + * Copyright IBM Corp. 2001, 2012 5 + * Author(s): Robert Burroughs 6 + * Eric Rossman (edrossma@us.ibm.com) 7 + * Cornelia Huck <cornelia.huck@de.ibm.com> 8 + * 9 + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 + * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 13 + * 14 + * This program is free software; you can redistribute it and/or modify 15 + * it under the terms of the GNU General Public License as published by 16 + * the Free Software Foundation; either version 2, or (at your option) 17 + * any later version. 18 + * 19 + * This program is distributed in the hope that it will be useful, 20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 + * GNU General Public License for more details. 23 + */ 24 + 25 + #include <linux/module.h> 26 + #include <linux/init.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/miscdevice.h> 29 + #include <linux/fs.h> 30 + #include <linux/proc_fs.h> 31 + #include <linux/seq_file.h> 32 + #include <linux/compat.h> 33 + #include <linux/slab.h> 34 + #include <linux/atomic.h> 35 + #include <linux/uaccess.h> 36 + #include <linux/hw_random.h> 37 + #include <linux/debugfs.h> 38 + #include <asm/debug.h> 39 + 40 + #include "zcrypt_debug.h" 41 + #include "zcrypt_api.h" 42 + 43 + #include "zcrypt_msgtype6.h" 44 + #include "zcrypt_msgtype50.h" 45 + 46 + /* 47 + * Device attributes common for all crypto card devices. 48 + */ 49 + 50 + static ssize_t zcrypt_card_type_show(struct device *dev, 51 + struct device_attribute *attr, char *buf) 52 + { 53 + struct zcrypt_card *zc = to_ap_card(dev)->private; 54 + 55 + return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string); 56 + } 57 + 58 + static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL); 59 + 60 + static ssize_t zcrypt_card_online_show(struct device *dev, 61 + struct device_attribute *attr, 62 + char *buf) 63 + { 64 + struct zcrypt_card *zc = to_ap_card(dev)->private; 65 + 66 + return snprintf(buf, PAGE_SIZE, "%d\n", zc->online); 67 + } 68 + 69 + static ssize_t zcrypt_card_online_store(struct device *dev, 70 + struct device_attribute *attr, 71 + const char *buf, size_t count) 72 + { 73 + struct zcrypt_card *zc = to_ap_card(dev)->private; 74 + struct zcrypt_queue *zq; 75 + int online, id; 76 + 77 + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 78 + return -EINVAL; 79 + 80 + zc->online = online; 81 + id = zc->card->id; 82 + ZCRYPT_DBF_DEV(DBF_INFO, zc, "card%02xo%dman", id, online); 83 + spin_lock(&zcrypt_list_lock); 84 + list_for_each_entry(zq, &zc->zqueues, list) 85 + zcrypt_queue_force_online(zq, online); 86 + spin_unlock(&zcrypt_list_lock); 87 + return count; 88 + } 89 + 90 + static DEVICE_ATTR(online, 0644, zcrypt_card_online_show, 91 + zcrypt_card_online_store); 92 + 93 + static struct attribute *zcrypt_card_attrs[] = { 94 + &dev_attr_type.attr, 95 + &dev_attr_online.attr, 96 + NULL, 97 + }; 98 + 99 + static struct attribute_group zcrypt_card_attr_group = { 100 + .attrs = zcrypt_card_attrs, 101 + }; 102 + 103 + struct zcrypt_card *zcrypt_card_alloc(void) 104 + { 105 + struct zcrypt_card *zc; 106 + 107 + zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL); 108 + if (!zc) 109 + return NULL; 110 + INIT_LIST_HEAD(&zc->list); 111 + INIT_LIST_HEAD(&zc->zqueues); 112 + zc->dbf_area = zcrypt_dbf_cards; 113 + kref_init(&zc->refcount); 114 + return zc; 115 + } 116 + EXPORT_SYMBOL(zcrypt_card_alloc); 117 + 118 + void zcrypt_card_free(struct zcrypt_card *zc) 119 + { 120 + kfree(zc); 121 + } 122 + EXPORT_SYMBOL(zcrypt_card_free); 123 + 124 + static void zcrypt_card_release(struct kref *kref) 125 + { 126 + struct zcrypt_card *zdev = 127 + container_of(kref, struct zcrypt_card, refcount); 128 + zcrypt_card_free(zdev); 129 + } 130 + 131 + void zcrypt_card_get(struct zcrypt_card *zc) 132 + { 133 + kref_get(&zc->refcount); 134 + } 135 + EXPORT_SYMBOL(zcrypt_card_get); 136 + 137 + int zcrypt_card_put(struct zcrypt_card *zc) 138 + { 139 + return kref_put(&zc->refcount, zcrypt_card_release); 140 + } 141 + EXPORT_SYMBOL(zcrypt_card_put); 142 + 143 + /** 144 + * zcrypt_card_register() - Register a crypto card device. 145 + * @zc: Pointer to a crypto card device 146 + * 147 + * Register a crypto card device. Returns 0 if successful. 148 + */ 149 + int zcrypt_card_register(struct zcrypt_card *zc) 150 + { 151 + int rc; 152 + 153 + rc = sysfs_create_group(&zc->card->ap_dev.device.kobj, 154 + &zcrypt_card_attr_group); 155 + if (rc) 156 + return rc; 157 + 158 + spin_lock(&zcrypt_list_lock); 159 + list_add_tail(&zc->list, &zcrypt_card_list); 160 + spin_unlock(&zcrypt_list_lock); 161 + 162 + zc->online = 1; 163 + return rc; 164 + } 165 + EXPORT_SYMBOL(zcrypt_card_register); 166 + 167 + /** 168 + * zcrypt_card_unregister(): Unregister a crypto card device. 169 + * @zc: Pointer to crypto card device 170 + * 171 + * Unregister a crypto card device. 172 + */ 173 + void zcrypt_card_unregister(struct zcrypt_card *zc) 174 + { 175 + spin_lock(&zcrypt_list_lock); 176 + list_del_init(&zc->list); 177 + spin_unlock(&zcrypt_list_lock); 178 + sysfs_remove_group(&zc->card->ap_dev.device.kobj, 179 + &zcrypt_card_attr_group); 180 + } 181 + EXPORT_SYMBOL(zcrypt_card_unregister);
+157 -69
drivers/s390/crypto/zcrypt_cex2a.c
··· 31 31 #include <linux/err.h> 32 32 #include <linux/atomic.h> 33 33 #include <asm/uaccess.h> 34 + #include <linux/mod_devicetable.h> 34 35 35 36 #include "ap_bus.h" 36 37 #include "zcrypt_api.h" ··· 55 54 #define CEX2A_CLEANUP_TIME (15*HZ) 56 55 #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 57 56 58 - static struct ap_device_id zcrypt_cex2a_ids[] = { 59 - { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, 60 - { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) }, 61 - { /* end of list */ }, 62 - }; 63 - 64 - MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); 65 57 MODULE_AUTHOR("IBM Corporation"); 66 58 MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ 67 59 "Copyright IBM Corp. 2001, 2012"); 68 60 MODULE_LICENSE("GPL"); 69 61 70 - static int zcrypt_cex2a_probe(struct ap_device *ap_dev); 71 - static void zcrypt_cex2a_remove(struct ap_device *ap_dev); 72 - 73 - static struct ap_driver zcrypt_cex2a_driver = { 74 - .probe = zcrypt_cex2a_probe, 75 - .remove = zcrypt_cex2a_remove, 76 - .ids = zcrypt_cex2a_ids, 77 - .request_timeout = CEX2A_CLEANUP_TIME, 62 + static struct ap_device_id zcrypt_cex2a_card_ids[] = { 63 + { .dev_type = AP_DEVICE_TYPE_CEX2A, 64 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 65 + { .dev_type = AP_DEVICE_TYPE_CEX3A, 66 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 67 + { /* end of list */ }, 78 68 }; 79 69 70 + MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids); 71 + 72 + static struct ap_device_id zcrypt_cex2a_queue_ids[] = { 73 + { .dev_type = AP_DEVICE_TYPE_CEX2A, 74 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 75 + { .dev_type = AP_DEVICE_TYPE_CEX3A, 76 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 77 + { /* end of list */ }, 78 + }; 79 + 80 + MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids); 81 + 80 82 /** 81 - * Probe function for CEX2A cards. It always accepts the AP device 82 - * since the bus_match already checked the hardware type. 83 + * Probe function for CEX2A card devices. It always accepts the AP device 84 + * since the bus_match already checked the card type. 83 85 * @ap_dev: pointer to the AP device. 84 86 */ 85 - static int zcrypt_cex2a_probe(struct ap_device *ap_dev) 87 + static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev) 86 88 { 87 - struct zcrypt_device *zdev = NULL; 88 - int CEX2A_SPEED_IDX[] = { 800, 1000, 2000, 900, 1200, 2400, 0}; 89 - int CEX3A_SPEED_IDX[] = { 400, 500, 1000, 450, 550, 1200, 0}; 89 + /* 90 + * Normalized speed ratings per crypto adapter 91 + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY 92 + */ 93 + static const int CEX2A_SPEED_IDX[] = { 94 + 800, 1000, 2000, 900, 1200, 2400, 0, 0}; 95 + static const int CEX3A_SPEED_IDX[] = { 96 + 400, 500, 1000, 450, 550, 1200, 0, 0}; 97 + 98 + struct ap_card *ac = to_ap_card(&ap_dev->device); 99 + struct zcrypt_card *zc; 90 100 int rc = 0; 91 101 92 - switch (ap_dev->device_type) { 93 - case AP_DEVICE_TYPE_CEX2A: 94 - zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); 95 - if (!zdev) 96 - return -ENOMEM; 97 - zdev->user_space_type = ZCRYPT_CEX2A; 98 - zdev->type_string = "CEX2A"; 99 - zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 100 - zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 101 - zdev->short_crt = 1; 102 - memcpy(zdev->speed_rating, CEX2A_SPEED_IDX, 102 + zc = zcrypt_card_alloc(); 103 + if (!zc) 104 + return -ENOMEM; 105 + zc->card = ac; 106 + ac->private = zc; 107 + 108 + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) { 109 + zc->min_mod_size = CEX2A_MIN_MOD_SIZE; 110 + zc->max_mod_size = CEX2A_MAX_MOD_SIZE; 111 + memcpy(zc->speed_rating, CEX2A_SPEED_IDX, 103 112 sizeof(CEX2A_SPEED_IDX)); 104 - zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 105 - break; 106 - case AP_DEVICE_TYPE_CEX3A: 107 - zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); 108 - if (!zdev) 109 - return -ENOMEM; 110 - zdev->user_space_type = ZCRYPT_CEX3A; 111 - zdev->type_string = "CEX3A"; 112 - zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 113 - zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 114 - zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 115 - if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && 116 - ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) { 117 - zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; 118 - zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; 113 + zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 114 + zc->type_string = "CEX2A"; 115 + zc->user_space_type = ZCRYPT_CEX2A; 116 + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) { 117 + zc->min_mod_size = CEX2A_MIN_MOD_SIZE; 118 + zc->max_mod_size = CEX2A_MAX_MOD_SIZE; 119 + zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 120 + if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && 121 + ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { 122 + zc->max_mod_size = CEX3A_MAX_MOD_SIZE; 123 + zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; 119 124 } 120 - zdev->short_crt = 1; 121 - memcpy(zdev->speed_rating, CEX3A_SPEED_IDX, 125 + memcpy(zc->speed_rating, CEX3A_SPEED_IDX, 122 126 sizeof(CEX3A_SPEED_IDX)); 123 - break; 124 - } 125 - if (!zdev) 127 + zc->type_string = "CEX3A"; 128 + zc->user_space_type = ZCRYPT_CEX3A; 129 + } else { 130 + zcrypt_card_free(zc); 126 131 return -ENODEV; 127 - zdev->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT); 128 - zdev->ap_dev = ap_dev; 129 - zdev->online = 1; 130 - zdev->load = zdev->speed_rating[0]; 131 - ap_device_init_reply(ap_dev, &zdev->reply); 132 - ap_dev->private = zdev; 133 - rc = zcrypt_device_register(zdev); 134 - if (rc) { 135 - ap_dev->private = NULL; 136 - zcrypt_device_free(zdev); 137 132 } 133 + zc->online = 1; 134 + 135 + rc = zcrypt_card_register(zc); 136 + if (rc) { 137 + ac->private = NULL; 138 + zcrypt_card_free(zc); 139 + } 140 + 138 141 return rc; 139 142 } 140 143 141 144 /** 142 - * This is called to remove the extended CEX2A driver information 143 - * if an AP device is removed. 145 + * This is called to remove the CEX2A card driver information 146 + * if an AP card device is removed. 144 147 */ 145 - static void zcrypt_cex2a_remove(struct ap_device *ap_dev) 148 + static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev) 146 149 { 147 - struct zcrypt_device *zdev = ap_dev->private; 150 + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; 148 151 149 - zcrypt_device_unregister(zdev); 152 + if (zc) 153 + zcrypt_card_unregister(zc); 150 154 } 155 + 156 + static struct ap_driver zcrypt_cex2a_card_driver = { 157 + .probe = zcrypt_cex2a_card_probe, 158 + .remove = zcrypt_cex2a_card_remove, 159 + .ids = zcrypt_cex2a_card_ids, 160 + }; 161 + 162 + /** 163 + * Probe function for CEX2A queue devices. It always accepts the AP device 164 + * since the bus_match already checked the queue type. 165 + * @ap_dev: pointer to the AP device. 166 + */ 167 + static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) 168 + { 169 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 170 + struct zcrypt_queue *zq = NULL; 171 + int rc; 172 + 173 + switch (ap_dev->device_type) { 174 + case AP_DEVICE_TYPE_CEX2A: 175 + zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE); 176 + if (!zq) 177 + return -ENOMEM; 178 + break; 179 + case AP_DEVICE_TYPE_CEX3A: 180 + zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE); 181 + if (!zq) 182 + return -ENOMEM; 183 + break; 184 + } 185 + if (!zq) 186 + return -ENODEV; 187 + zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT); 188 + zq->queue = aq; 189 + zq->online = 1; 190 + atomic_set(&zq->load, 0); 191 + ap_queue_init_reply(aq, &zq->reply); 192 + aq->request_timeout = CEX2A_CLEANUP_TIME, 193 + aq->private = zq; 194 + rc = zcrypt_queue_register(zq); 195 + if (rc) { 196 + aq->private = NULL; 197 + zcrypt_queue_free(zq); 198 + } 199 + 200 + return rc; 201 + } 202 + 203 + /** 204 + * This is called to remove the CEX2A queue driver information 205 + * if an AP queue device is removed. 206 + */ 207 + static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev) 208 + { 209 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 210 + struct zcrypt_queue *zq = aq->private; 211 + 212 + ap_queue_remove(aq); 213 + if (zq) 214 + zcrypt_queue_unregister(zq); 215 + } 216 + 217 + static struct ap_driver zcrypt_cex2a_queue_driver = { 218 + .probe = zcrypt_cex2a_queue_probe, 219 + .remove = zcrypt_cex2a_queue_remove, 220 + .suspend = ap_queue_suspend, 221 + .resume = ap_queue_resume, 222 + .ids = zcrypt_cex2a_queue_ids, 223 + }; 151 224 152 225 int __init zcrypt_cex2a_init(void) 153 226 { 154 - return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a"); 227 + int rc; 228 + 229 + rc = ap_driver_register(&zcrypt_cex2a_card_driver, 230 + THIS_MODULE, "cex2acard"); 231 + if (rc) 232 + return rc; 233 + 234 + rc = ap_driver_register(&zcrypt_cex2a_queue_driver, 235 + THIS_MODULE, "cex2aqueue"); 236 + if (rc) 237 + ap_driver_unregister(&zcrypt_cex2a_card_driver); 238 + 239 + return rc; 155 240 } 156 241 157 242 void __exit zcrypt_cex2a_exit(void) 158 243 { 159 - ap_driver_unregister(&zcrypt_cex2a_driver); 244 + ap_driver_unregister(&zcrypt_cex2a_queue_driver); 245 + ap_driver_unregister(&zcrypt_cex2a_card_driver); 160 246 } 161 247 162 248 module_init(zcrypt_cex2a_init);
+205 -118
drivers/s390/crypto/zcrypt_cex4.c
··· 9 9 #include <linux/err.h> 10 10 #include <linux/atomic.h> 11 11 #include <linux/uaccess.h> 12 + #include <linux/mod_devicetable.h> 12 13 13 14 #include "ap_bus.h" 14 15 #include "zcrypt_api.h" ··· 35 34 */ 36 35 #define CEX4_CLEANUP_TIME (900*HZ) 37 36 38 - static struct ap_device_id zcrypt_cex4_ids[] = { 39 - { AP_DEVICE(AP_DEVICE_TYPE_CEX4) }, 40 - { AP_DEVICE(AP_DEVICE_TYPE_CEX5) }, 41 - { /* end of list */ }, 42 - }; 43 - 44 - MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids); 45 37 MODULE_AUTHOR("IBM Corporation"); 46 38 MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \ 47 39 "Copyright IBM Corp. 2012"); 48 40 MODULE_LICENSE("GPL"); 49 41 50 - static int zcrypt_cex4_probe(struct ap_device *ap_dev); 51 - static void zcrypt_cex4_remove(struct ap_device *ap_dev); 52 - 53 - static struct ap_driver zcrypt_cex4_driver = { 54 - .probe = zcrypt_cex4_probe, 55 - .remove = zcrypt_cex4_remove, 56 - .ids = zcrypt_cex4_ids, 57 - .request_timeout = CEX4_CLEANUP_TIME, 42 + static struct ap_device_id zcrypt_cex4_card_ids[] = { 43 + { .dev_type = AP_DEVICE_TYPE_CEX4, 44 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 45 + { .dev_type = AP_DEVICE_TYPE_CEX5, 46 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 47 + { /* end of list */ }, 58 48 }; 59 49 50 + MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids); 51 + 52 + static struct ap_device_id zcrypt_cex4_queue_ids[] = { 53 + { .dev_type = AP_DEVICE_TYPE_CEX4, 54 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 55 + { .dev_type = AP_DEVICE_TYPE_CEX5, 56 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 57 + { /* end of list */ }, 58 + }; 59 + 60 + MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids); 61 + 60 62 /** 61 - * Probe function for CEX4 cards. It always accepts the AP device 63 + * Probe function for CEX4 card device. It always accepts the AP device 62 64 * since the bus_match already checked the hardware type. 63 65 * @ap_dev: pointer to the AP device. 64 66 */ 65 - static int zcrypt_cex4_probe(struct ap_device *ap_dev) 67 + static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) 66 68 { 67 - struct zcrypt_device *zdev = NULL; 68 69 /* 69 70 * Normalized speed ratings per crypto adapter 70 71 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY 71 72 */ 72 - int CEX4A_SPEED_IDX[] = { 5, 6, 59, 20, 115, 581, 0, 0}; 73 - int CEX5A_SPEED_IDX[] = { 3, 3, 6, 8, 32, 218, 0, 0}; 74 - int CEX4C_SPEED_IDX[] = { 24, 25, 82, 41, 138, 1111, 79, 8}; 75 - int CEX5C_SPEED_IDX[] = { 10, 14, 23, 17, 45, 242, 63, 4}; 76 - int CEX4P_SPEED_IDX[] = {142, 198, 1852, 203, 331, 1563, 0, 8}; 77 - int CEX5P_SPEED_IDX[] = { 49, 67, 131, 52, 85, 287, 0, 4}; 73 + static const int CEX4A_SPEED_IDX[] = { 74 + 5, 6, 59, 20, 115, 581, 0, 0}; 75 + static const int CEX5A_SPEED_IDX[] = { 76 + 3, 3, 6, 8, 32, 218, 0, 0}; 77 + static const int CEX4C_SPEED_IDX[] = { 78 + 24, 25, 82, 41, 138, 1111, 79, 8}; 79 + static const int CEX5C_SPEED_IDX[] = { 80 + 10, 14, 23, 17, 45, 242, 63, 4}; 81 + static const int CEX4P_SPEED_IDX[] = { 82 + 142, 198, 1852, 203, 331, 1563, 0, 8}; 83 + static const int CEX5P_SPEED_IDX[] = { 84 + 49, 67, 131, 52, 85, 287, 0, 4}; 85 + 86 + struct ap_card *ac = to_ap_card(&ap_dev->device); 87 + struct zcrypt_card *zc; 78 88 int rc = 0; 79 89 80 - switch (ap_dev->device_type) { 81 - case AP_DEVICE_TYPE_CEX4: 82 - case AP_DEVICE_TYPE_CEX5: 83 - if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) { 84 - zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE); 85 - if (!zdev) 86 - return -ENOMEM; 87 - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { 88 - zdev->type_string = "CEX4A"; 89 - memcpy(zdev->speed_rating, CEX4A_SPEED_IDX, 90 - sizeof(CEX4A_SPEED_IDX)); 91 - } else { 92 - zdev->type_string = "CEX5A"; 93 - memcpy(zdev->speed_rating, CEX5A_SPEED_IDX, 94 - sizeof(CEX5A_SPEED_IDX)); 95 - } 96 - zdev->user_space_type = ZCRYPT_CEX3A; 97 - zdev->min_mod_size = CEX4A_MIN_MOD_SIZE; 98 - if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && 99 - ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) { 100 - zdev->max_mod_size = 101 - CEX4A_MAX_MOD_SIZE_4K; 102 - zdev->max_exp_bit_length = 103 - CEX4A_MAX_MOD_SIZE_4K; 104 - } else { 105 - zdev->max_mod_size = 106 - CEX4A_MAX_MOD_SIZE_2K; 107 - zdev->max_exp_bit_length = 108 - CEX4A_MAX_MOD_SIZE_2K; 109 - } 110 - zdev->short_crt = 1; 111 - zdev->ops = zcrypt_msgtype(MSGTYPE50_NAME, 112 - MSGTYPE50_VARIANT_DEFAULT); 113 - } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) { 114 - zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); 115 - if (!zdev) 116 - return -ENOMEM; 117 - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { 118 - zdev->type_string = "CEX4C"; 119 - memcpy(zdev->speed_rating, CEX4C_SPEED_IDX, 120 - sizeof(CEX4C_SPEED_IDX)); 121 - } else { 122 - zdev->type_string = "CEX5C"; 123 - memcpy(zdev->speed_rating, CEX5C_SPEED_IDX, 124 - sizeof(CEX5C_SPEED_IDX)); 125 - } 126 - zdev->user_space_type = ZCRYPT_CEX3C; 127 - zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; 128 - zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; 129 - zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; 130 - zdev->short_crt = 0; 131 - zdev->ops = zcrypt_msgtype(MSGTYPE06_NAME, 132 - MSGTYPE06_VARIANT_DEFAULT); 133 - } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) { 134 - zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); 135 - if (!zdev) 136 - return -ENOMEM; 137 - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { 138 - zdev->type_string = "CEX4P"; 139 - memcpy(zdev->speed_rating, CEX4P_SPEED_IDX, 140 - sizeof(CEX4P_SPEED_IDX)); 141 - } else { 142 - zdev->type_string = "CEX5P"; 143 - memcpy(zdev->speed_rating, CEX5P_SPEED_IDX, 144 - sizeof(CEX5P_SPEED_IDX)); 145 - } 146 - zdev->user_space_type = ZCRYPT_CEX4; 147 - zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; 148 - zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; 149 - zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; 150 - zdev->short_crt = 0; 151 - zdev->ops = zcrypt_msgtype(MSGTYPE06_NAME, 152 - MSGTYPE06_VARIANT_EP11); 90 + zc = zcrypt_card_alloc(); 91 + if (!zc) 92 + return -ENOMEM; 93 + zc->card = ac; 94 + ac->private = zc; 95 + if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) { 96 + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { 97 + zc->type_string = "CEX4A"; 98 + zc->user_space_type = ZCRYPT_CEX4; 99 + memcpy(zc->speed_rating, CEX4A_SPEED_IDX, 100 + sizeof(CEX4A_SPEED_IDX)); 101 + } else { 102 + zc->type_string = "CEX5A"; 103 + zc->user_space_type = ZCRYPT_CEX5; 104 + memcpy(zc->speed_rating, CEX5A_SPEED_IDX, 105 + sizeof(CEX5A_SPEED_IDX)); 153 106 } 154 - break; 155 - } 156 - if (!zdev) 107 + zc->min_mod_size = CEX4A_MIN_MOD_SIZE; 108 + if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && 109 + ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { 110 + zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K; 111 + zc->max_exp_bit_length = 112 + CEX4A_MAX_MOD_SIZE_4K; 113 + } else { 114 + zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K; 115 + zc->max_exp_bit_length = 116 + CEX4A_MAX_MOD_SIZE_2K; 117 + } 118 + } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { 119 + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { 120 + zc->type_string = "CEX4C"; 121 + /* wrong user space type, must be CEX4 122 + * just keep it for cca compatibility 123 + */ 124 + zc->user_space_type = ZCRYPT_CEX3C; 125 + memcpy(zc->speed_rating, CEX4C_SPEED_IDX, 126 + sizeof(CEX4C_SPEED_IDX)); 127 + } else { 128 + zc->type_string = "CEX5C"; 129 + /* wrong user space type, must be CEX5 130 + * just keep it for cca compatibility 131 + */ 132 + zc->user_space_type = ZCRYPT_CEX3C; 133 + memcpy(zc->speed_rating, CEX5C_SPEED_IDX, 134 + sizeof(CEX5C_SPEED_IDX)); 135 + } 136 + zc->min_mod_size = CEX4C_MIN_MOD_SIZE; 137 + zc->max_mod_size = CEX4C_MAX_MOD_SIZE; 138 + zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; 139 + } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) { 140 + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { 141 + zc->type_string = "CEX4P"; 142 + zc->user_space_type = ZCRYPT_CEX4; 143 + memcpy(zc->speed_rating, CEX4P_SPEED_IDX, 144 + sizeof(CEX4P_SPEED_IDX)); 145 + } else { 146 + zc->type_string = "CEX5P"; 147 + zc->user_space_type = ZCRYPT_CEX5; 148 + memcpy(zc->speed_rating, CEX5P_SPEED_IDX, 149 + sizeof(CEX5P_SPEED_IDX)); 150 + } 151 + zc->min_mod_size = CEX4C_MIN_MOD_SIZE; 152 + zc->max_mod_size = CEX4C_MAX_MOD_SIZE; 153 + zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; 154 + } else { 155 + zcrypt_card_free(zc); 157 156 return -ENODEV; 158 - zdev->ap_dev = ap_dev; 159 - zdev->online = 1; 160 - zdev->load = zdev->speed_rating[0]; 161 - ap_device_init_reply(ap_dev, &zdev->reply); 162 - ap_dev->private = zdev; 163 - rc = zcrypt_device_register(zdev); 164 - if (rc) { 165 - ap_dev->private = NULL; 166 - zcrypt_device_free(zdev); 167 157 } 158 + zc->online = 1; 159 + 160 + rc = zcrypt_card_register(zc); 161 + if (rc) { 162 + ac->private = NULL; 163 + zcrypt_card_free(zc); 164 + } 165 + 168 166 return rc; 169 167 } 170 168 171 169 /** 172 - * This is called to remove the extended CEX4 driver information 173 - * if an AP device is removed. 170 + * This is called to remove the CEX4 card driver information 171 + * if an AP card device is removed. 174 172 */ 175 - static void zcrypt_cex4_remove(struct ap_device *ap_dev) 173 + static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) 176 174 { 177 - struct zcrypt_device *zdev = ap_dev->private; 175 + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; 178 176 179 - if (zdev) { 180 - zcrypt_device_unregister(zdev); 181 - } 177 + if (zc) 178 + zcrypt_card_unregister(zc); 182 179 } 180 + 181 + static struct ap_driver zcrypt_cex4_card_driver = { 182 + .probe = zcrypt_cex4_card_probe, 183 + .remove = zcrypt_cex4_card_remove, 184 + .ids = zcrypt_cex4_card_ids, 185 + }; 186 + 187 + /** 188 + * Probe function for CEX4 queue device. It always accepts the AP device 189 + * since the bus_match already checked the hardware type. 190 + * @ap_dev: pointer to the AP device. 191 + */ 192 + static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) 193 + { 194 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 195 + struct zcrypt_queue *zq; 196 + int rc; 197 + 198 + if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) { 199 + zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE); 200 + if (!zq) 201 + return -ENOMEM; 202 + zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, 203 + MSGTYPE50_VARIANT_DEFAULT); 204 + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { 205 + zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); 206 + if (!zq) 207 + return -ENOMEM; 208 + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 209 + MSGTYPE06_VARIANT_DEFAULT); 210 + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { 211 + zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); 212 + if (!zq) 213 + return -ENOMEM; 214 + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 215 + MSGTYPE06_VARIANT_EP11); 216 + } else { 217 + return -ENODEV; 218 + } 219 + zq->queue = aq; 220 + zq->online = 1; 221 + atomic_set(&zq->load, 0); 222 + ap_queue_init_reply(aq, &zq->reply); 223 + aq->request_timeout = CEX4_CLEANUP_TIME, 224 + aq->private = zq; 225 + rc = zcrypt_queue_register(zq); 226 + if (rc) { 227 + aq->private = NULL; 228 + zcrypt_queue_free(zq); 229 + } 230 + 231 + return rc; 232 + } 233 + 234 + /** 235 + * This is called to remove the CEX4 queue driver information 236 + * if an AP queue device is removed. 237 + */ 238 + static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) 239 + { 240 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 241 + struct zcrypt_queue *zq = aq->private; 242 + 243 + ap_queue_remove(aq); 244 + if (zq) 245 + zcrypt_queue_unregister(zq); 246 + } 247 + 248 + static struct ap_driver zcrypt_cex4_queue_driver = { 249 + .probe = zcrypt_cex4_queue_probe, 250 + .remove = zcrypt_cex4_queue_remove, 251 + .suspend = ap_queue_suspend, 252 + .resume = ap_queue_resume, 253 + .ids = zcrypt_cex4_queue_ids, 254 + }; 183 255 184 256 int __init zcrypt_cex4_init(void) 185 257 { 186 - return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4"); 258 + int rc; 259 + 260 + rc = ap_driver_register(&zcrypt_cex4_card_driver, 261 + THIS_MODULE, "cex4card"); 262 + if (rc) 263 + return rc; 264 + 265 + rc = ap_driver_register(&zcrypt_cex4_queue_driver, 266 + THIS_MODULE, "cex4queue"); 267 + if (rc) 268 + ap_driver_unregister(&zcrypt_cex4_card_driver); 269 + 270 + return rc; 187 271 } 188 272 189 273 void __exit zcrypt_cex4_exit(void) 190 274 { 191 - ap_driver_unregister(&zcrypt_cex4_driver); 275 + ap_driver_unregister(&zcrypt_cex4_queue_driver); 276 + ap_driver_unregister(&zcrypt_cex4_card_driver); 192 277 } 193 278 194 279 module_init(zcrypt_cex4_init);
+22 -16
drivers/s390/crypto/zcrypt_error.h
··· 87 87 #define REP88_ERROR_OPERAND 0x84 /* CEX2A */ 88 88 #define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ 89 89 90 - static inline int convert_error(struct zcrypt_device *zdev, 90 + static inline int convert_error(struct zcrypt_queue *zq, 91 91 struct ap_message *reply) 92 92 { 93 93 struct error_hdr *ehdr = reply->message; ··· 110 110 * and then repeat the request. 111 111 */ 112 112 atomic_set(&zcrypt_rescan_req, 1); 113 - zdev->online = 0; 114 - pr_err("Cryptographic device %x failed and was set offline\n", 115 - AP_QID_DEVICE(zdev->ap_dev->qid)); 116 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 117 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 113 + zq->online = 0; 114 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 115 + AP_QID_CARD(zq->queue->qid), 116 + AP_QID_QUEUE(zq->queue->qid)); 117 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%drc%d", 118 + AP_QID_CARD(zq->queue->qid), 119 + AP_QID_QUEUE(zq->queue->qid), zq->online, 118 120 ehdr->reply_code); 119 121 return -EAGAIN; 120 122 case REP82_ERROR_TRANSPORT_FAIL: ··· 124 122 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A 125 123 /* If a card fails disable it and repeat the request. */ 126 124 atomic_set(&zcrypt_rescan_req, 1); 127 - zdev->online = 0; 128 - pr_err("Cryptographic device %x failed and was set offline\n", 129 - AP_QID_DEVICE(zdev->ap_dev->qid)); 130 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 131 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 125 + zq->online = 0; 126 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 127 + AP_QID_CARD(zq->queue->qid), 128 + AP_QID_QUEUE(zq->queue->qid)); 129 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%drc%d", 130 + AP_QID_CARD(zq->queue->qid), 131 + AP_QID_QUEUE(zq->queue->qid), zq->online, 132 132 ehdr->reply_code); 133 133 return -EAGAIN; 134 134 default: 135 - zdev->online = 0; 136 - pr_err("Cryptographic device %x failed and was set offline\n", 137 - AP_QID_DEVICE(zdev->ap_dev->qid)); 138 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 139 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 135 + zq->online = 0; 136 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 137 + AP_QID_CARD(zq->queue->qid), 138 + AP_QID_QUEUE(zq->queue->qid)); 139 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%drc%d", 140 + AP_QID_CARD(zq->queue->qid), 141 + AP_QID_QUEUE(zq->queue->qid), zq->online, 140 142 ehdr->reply_code); 141 143 return -EAGAIN; /* repeat the request on a different device. */ 142 144 }
+46 -44
drivers/s390/crypto/zcrypt_msgtype50.c
··· 53 53 "Copyright IBM Corp. 2001, 2012"); 54 54 MODULE_LICENSE("GPL"); 55 55 56 - static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *, 57 - struct ap_message *); 58 - 59 56 /** 60 57 * The type 50 message family is associated with a CEX2A card. 61 58 * ··· 205 208 /** 206 209 * Convert a ICAMEX message to a type50 MEX message. 207 210 * 208 - * @zdev: crypto device pointer 209 - * @zreq: crypto request pointer 211 + * @zq: crypto queue pointer 212 + * @ap_msg: crypto request pointer 210 213 * @mex: pointer to user input data 211 214 * 212 215 * Returns 0 on success or -EFAULT. 213 216 */ 214 - static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, 217 + static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq, 215 218 struct ap_message *ap_msg, 216 219 struct ica_rsa_modexpo *mex) 217 220 { ··· 263 266 /** 264 267 * Convert a ICACRT message to a type50 CRT message. 265 268 * 266 - * @zdev: crypto device pointer 267 - * @zreq: crypto request pointer 269 + * @zq: crypto queue pointer 270 + * @ap_msg: crypto request pointer 268 271 * @crt: pointer to user input data 269 272 * 270 273 * Returns 0 on success or -EFAULT. 271 274 */ 272 - static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, 275 + static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq, 273 276 struct ap_message *ap_msg, 274 277 struct ica_rsa_modexpo_crt *crt) 275 278 { ··· 312 315 u = crb2->u + sizeof(crb2->u) - short_len; 313 316 inp = crb2->message + sizeof(crb2->message) - mod_len; 314 317 } else if ((mod_len <= 512) && /* up to 4096 bit key size */ 315 - (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */ 318 + (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) { 316 319 struct type50_crb3_msg *crb3 = ap_msg->message; 317 320 memset(crb3, 0, sizeof(*crb3)); 318 321 ap_msg->length = sizeof(*crb3); ··· 346 349 /** 347 350 * Copy results from a type 80 reply message back to user space. 348 351 * 349 - * @zdev: crypto device pointer 352 + * @zq: crypto device pointer 350 353 * @reply: reply AP message. 351 354 * @data: pointer to user output data 352 355 * @length: size of user output data 353 356 * 354 357 * Returns 0 on success or -EFAULT. 355 358 */ 356 - static int convert_type80(struct zcrypt_device *zdev, 359 + static int convert_type80(struct zcrypt_queue *zq, 357 360 struct ap_message *reply, 358 361 char __user *outputdata, 359 362 unsigned int outputdatalength) ··· 363 366 364 367 if (t80h->len < sizeof(*t80h) + outputdatalength) { 365 368 /* The result is too short, the CEX2A card may not do that.. */ 366 - zdev->online = 0; 367 - pr_err("Cryptographic device %x failed and was set offline\n", 368 - AP_QID_DEVICE(zdev->ap_dev->qid)); 369 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 370 - AP_QID_DEVICE(zdev->ap_dev->qid), 371 - zdev->online, t80h->code); 369 + zq->online = 0; 370 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 371 + AP_QID_CARD(zq->queue->qid), 372 + AP_QID_QUEUE(zq->queue->qid)); 373 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%drc%d", 374 + AP_QID_CARD(zq->queue->qid), 375 + AP_QID_QUEUE(zq->queue->qid), 376 + zq->online, t80h->code); 372 377 373 378 return -EAGAIN; /* repeat the request on a different device. */ 374 379 } 375 - if (zdev->user_space_type == ZCRYPT_CEX2A) 380 + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) 376 381 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); 377 382 else 378 383 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); ··· 384 385 return 0; 385 386 } 386 387 387 - static int convert_response(struct zcrypt_device *zdev, 388 + static int convert_response(struct zcrypt_queue *zq, 388 389 struct ap_message *reply, 389 390 char __user *outputdata, 390 391 unsigned int outputdatalength) ··· 393 394 switch (((unsigned char *) reply->message)[1]) { 394 395 case TYPE82_RSP_CODE: 395 396 case TYPE88_RSP_CODE: 396 - return convert_error(zdev, reply); 397 + return convert_error(zq, reply); 397 398 case TYPE80_RSP_CODE: 398 - return convert_type80(zdev, reply, 399 + return convert_type80(zq, reply, 399 400 outputdata, outputdatalength); 400 401 default: /* Unknown response type, this should NEVER EVER happen */ 401 - zdev->online = 0; 402 - pr_err("Cryptographic device %x failed and was set offline\n", 403 - AP_QID_DEVICE(zdev->ap_dev->qid)); 404 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 405 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 402 + zq->online = 0; 403 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 404 + AP_QID_CARD(zq->queue->qid), 405 + AP_QID_QUEUE(zq->queue->qid)); 406 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%dfail", 407 + AP_QID_CARD(zq->queue->qid), 408 + AP_QID_QUEUE(zq->queue->qid), 409 + zq->online); 406 410 return -EAGAIN; /* repeat the request on a different device. */ 407 411 } 408 412 } ··· 414 412 * This function is called from the AP bus code after a crypto request 415 413 * "msg" has finished with the reply message "reply". 416 414 * It is called from tasklet context. 417 - * @ap_dev: pointer to the AP device 415 + * @aq: pointer to the AP device 418 416 * @msg: pointer to the AP message 419 417 * @reply: pointer to the AP reply message 420 418 */ 421 - static void zcrypt_cex2a_receive(struct ap_device *ap_dev, 419 + static void zcrypt_cex2a_receive(struct ap_queue *aq, 422 420 struct ap_message *msg, 423 421 struct ap_message *reply) 424 422 { ··· 434 432 goto out; /* ap_msg->rc indicates the error */ 435 433 t80h = reply->message; 436 434 if (t80h->type == TYPE80_RSP_CODE) { 437 - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) 435 + if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) 438 436 length = min_t(int, 439 437 CEX2A_MAX_RESPONSE_SIZE, t80h->len); 440 438 else ··· 452 450 /** 453 451 * The request distributor calls this function if it picked the CEX2A 454 452 * device to handle a modexpo request. 455 - * @zdev: pointer to zcrypt_device structure that identifies the 453 + * @zq: pointer to zcrypt_queue structure that identifies the 456 454 * CEX2A device to the request distributor 457 455 * @mex: pointer to the modexpo request buffer 458 456 */ 459 - static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, 457 + static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, 460 458 struct ica_rsa_modexpo *mex) 461 459 { 462 460 struct ap_message ap_msg; ··· 464 462 int rc; 465 463 466 464 ap_init_message(&ap_msg); 467 - if (zdev->user_space_type == ZCRYPT_CEX2A) 465 + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) 468 466 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, 469 467 GFP_KERNEL); 470 468 else ··· 476 474 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 477 475 atomic_inc_return(&zcrypt_step); 478 476 ap_msg.private = &work; 479 - rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); 477 + rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex); 480 478 if (rc) 481 479 goto out_free; 482 480 init_completion(&work); 483 - ap_queue_message(zdev->ap_dev, &ap_msg); 481 + ap_queue_message(zq->queue, &ap_msg); 484 482 rc = wait_for_completion_interruptible(&work); 485 483 if (rc == 0) { 486 484 rc = ap_msg.rc; 487 485 if (rc == 0) 488 - rc = convert_response(zdev, &ap_msg, mex->outputdata, 486 + rc = convert_response(zq, &ap_msg, mex->outputdata, 489 487 mex->outputdatalength); 490 488 } else 491 489 /* Signal pending. */ 492 - ap_cancel_message(zdev->ap_dev, &ap_msg); 490 + ap_cancel_message(zq->queue, &ap_msg); 493 491 out_free: 494 492 kfree(ap_msg.message); 495 493 return rc; ··· 498 496 /** 499 497 * The request distributor calls this function if it picked the CEX2A 500 498 * device to handle a modexpo_crt request. 501 - * @zdev: pointer to zcrypt_device structure that identifies the 499 + * @zq: pointer to zcrypt_queue structure that identifies the 502 500 * CEX2A device to the request distributor 503 501 * @crt: pointer to the modexpoc_crt request buffer 504 502 */ 505 - static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, 503 + static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, 506 504 struct ica_rsa_modexpo_crt *crt) 507 505 { 508 506 struct ap_message ap_msg; ··· 510 508 int rc; 511 509 512 510 ap_init_message(&ap_msg); 513 - if (zdev->user_space_type == ZCRYPT_CEX2A) 511 + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) 514 512 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, 515 513 GFP_KERNEL); 516 514 else ··· 522 520 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 523 521 atomic_inc_return(&zcrypt_step); 524 522 ap_msg.private = &work; 525 - rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); 523 + rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt); 526 524 if (rc) 527 525 goto out_free; 528 526 init_completion(&work); 529 - ap_queue_message(zdev->ap_dev, &ap_msg); 527 + ap_queue_message(zq->queue, &ap_msg); 530 528 rc = wait_for_completion_interruptible(&work); 531 529 if (rc == 0) { 532 530 rc = ap_msg.rc; 533 531 if (rc == 0) 534 - rc = convert_response(zdev, &ap_msg, crt->outputdata, 532 + rc = convert_response(zq, &ap_msg, crt->outputdata, 535 533 crt->outputdatalength); 536 534 } else 537 535 /* Signal pending. */ 538 - ap_cancel_message(zdev->ap_dev, &ap_msg); 536 + ap_cancel_message(zq->queue, &ap_msg); 539 537 out_free: 540 538 kfree(ap_msg.message); 541 539 return rc;
+138 -115
drivers/s390/crypto/zcrypt_msgtype6.c
··· 60 60 "Copyright IBM Corp. 2001, 2012"); 61 61 MODULE_LICENSE("GPL"); 62 62 63 - static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *, 64 - struct ap_message *); 65 - 66 63 /** 67 64 * CPRB 68 65 * Note that all shorts, ints and longs are little-endian. ··· 255 258 /** 256 259 * Convert a ICAMEX message to a type6 MEX message. 257 260 * 258 - * @zdev: crypto device pointer 261 + * @zq: crypto device pointer 259 262 * @ap_msg: pointer to AP message 260 263 * @mex: pointer to user input data 261 264 * 262 265 * Returns 0 on success or -EFAULT. 263 266 */ 264 - static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, 267 + static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, 265 268 struct ap_message *ap_msg, 266 269 struct ica_rsa_modexpo *mex) 267 270 { ··· 275 278 .function_code = {'P', 'K'}, 276 279 .ulen = 10, 277 280 .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '} 278 - }; 279 - static struct function_and_rules_block static_pke_fnr_MCL2 = { 280 - .function_code = {'P', 'K'}, 281 - .ulen = 10, 282 - .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} 283 281 }; 284 282 struct { 285 283 struct type6_hdr hdr; ··· 302 310 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 303 311 304 312 msg->cprbx = static_cprbx; 305 - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); 313 + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 306 314 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; 307 315 308 - msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? 309 - static_pke_fnr_MCL2 : static_pke_fnr; 316 + msg->fr = static_pke_fnr; 310 317 311 318 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); 312 319 ··· 316 325 /** 317 326 * Convert a ICACRT message to a type6 CRT message. 318 327 * 319 - * @zdev: crypto device pointer 328 + * @zq: crypto device pointer 320 329 * @ap_msg: pointer to AP message 321 330 * @crt: pointer to user input data 322 331 * 323 332 * Returns 0 on success or -EFAULT. 324 333 */ 325 - static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, 334 + static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, 326 335 struct ap_message *ap_msg, 327 336 struct ica_rsa_modexpo_crt *crt) 328 337 { ··· 338 347 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} 339 348 }; 340 349 341 - static struct function_and_rules_block static_pkd_fnr_MCL2 = { 342 - .function_code = {'P', 'D'}, 343 - .ulen = 10, 344 - .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'} 345 - }; 346 350 struct { 347 351 struct type6_hdr hdr; 348 352 struct CPRBX cprbx; ··· 364 378 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 365 379 366 380 msg->cprbx = static_cprbx; 367 - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); 381 + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 368 382 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = 369 383 size - sizeof(msg->hdr) - sizeof(msg->cprbx); 370 384 371 - msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? 372 - static_pkd_fnr_MCL2 : static_pkd_fnr; 385 + msg->fr = static_pkd_fnr; 373 386 374 387 ap_msg->length = size; 375 388 return 0; ··· 377 392 /** 378 393 * Convert a XCRB message to a type6 CPRB message. 379 394 * 380 - * @zdev: crypto device pointer 395 + * @zq: crypto device pointer 381 396 * @ap_msg: pointer to AP message 382 397 * @xcRB: pointer to user input data 383 398 * ··· 390 405 391 406 static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg, 392 407 struct ica_xcRB *xcRB, 393 - unsigned int *fcode) 408 + unsigned int *fcode, 409 + unsigned short **dom) 394 410 { 395 411 static struct type6_hdr static_type6_hdrX = { 396 412 .type = 0x06, ··· 472 486 sizeof(msg->hdr.function_code)); 473 487 474 488 *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1]; 489 + *dom = (unsigned short *)&msg->cprbx.domain; 475 490 476 491 if (memcmp(function_code, "US", 2) == 0) 477 492 ap_msg->special = 1; ··· 484 497 copy_from_user(req_data, xcRB->request_data_address, 485 498 xcRB->request_data_length)) 486 499 return -EFAULT; 500 + 487 501 return 0; 488 502 } 489 503 ··· 492 504 struct ep11_urb *xcRB, 493 505 unsigned int *fcode) 494 506 { 507 + unsigned int lfmt; 495 508 static struct type6_hdr static_type6_ep11_hdr = { 496 509 .type = 0x06, 497 510 .rqid = {0x00, 0x01}, ··· 545 556 return -EFAULT; 546 557 } 547 558 548 - *fcode = speed_idx_ep11(payload_hdr->func_val & 0xFFFF); 559 + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ 560 + switch (msg->pld_lenfmt & 0x03) { 561 + case 1: 562 + lfmt = 2; 563 + break; 564 + case 2: 565 + lfmt = 3; 566 + break; 567 + default: 568 + return -EINVAL; 569 + } 570 + } else { 571 + lfmt = 1; /* length format #1 */ 572 + } 573 + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 574 + *fcode = payload_hdr->func_val & 0xFFFF; 575 + 549 576 return 0; 550 577 } 551 578 552 579 /** 553 580 * Copy results from a type 86 ICA reply message back to user space. 554 581 * 555 - * @zdev: crypto device pointer 582 + * @zq: crypto device pointer 556 583 * @reply: reply AP message. 557 584 * @data: pointer to user output data 558 585 * @length: size of user output data ··· 590 585 struct ep11_cprb cprbx; 591 586 } __packed; 592 587 593 - static int convert_type86_ica(struct zcrypt_device *zdev, 588 + static int convert_type86_ica(struct zcrypt_queue *zq, 594 589 struct ap_message *reply, 595 590 char __user *outputdata, 596 591 unsigned int outputdatalength) ··· 645 640 if (service_rc == 8 && service_rs == 770) 646 641 return -EINVAL; 647 642 if (service_rc == 8 && service_rs == 783) { 648 - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 643 + zq->zcard->min_mod_size = 644 + PCIXCC_MIN_MOD_SIZE_OLD; 649 645 return -EAGAIN; 650 646 } 651 647 if (service_rc == 12 && service_rs == 769) 652 648 return -EINVAL; 653 649 if (service_rc == 8 && service_rs == 72) 654 650 return -EINVAL; 655 - zdev->online = 0; 656 - pr_err("Cryptographic device %x failed and was set offline\n", 657 - AP_QID_DEVICE(zdev->ap_dev->qid)); 658 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 659 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 651 + zq->online = 0; 652 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 653 + AP_QID_CARD(zq->queue->qid), 654 + AP_QID_QUEUE(zq->queue->qid)); 655 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%drc%d", 656 + AP_QID_CARD(zq->queue->qid), 657 + AP_QID_QUEUE(zq->queue->qid), 658 + zq->online, 660 659 msg->hdr.reply_code); 661 660 return -EAGAIN; /* repeat the request on a different device. */ 662 661 } ··· 697 688 /** 698 689 * Copy results from a type 86 XCRB reply message back to user space. 699 690 * 700 - * @zdev: crypto device pointer 691 + * @zq: crypto device pointer 701 692 * @reply: reply AP message. 702 693 * @xcRB: pointer to XCRB 703 694 * 704 695 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 705 696 */ 706 - static int convert_type86_xcrb(struct zcrypt_device *zdev, 697 + static int convert_type86_xcrb(struct zcrypt_queue *zq, 707 698 struct ap_message *reply, 708 699 struct ica_xcRB *xcRB) 709 700 { ··· 728 719 /** 729 720 * Copy results from a type 86 EP11 XCRB reply message back to user space. 730 721 * 731 - * @zdev: crypto device pointer 722 + * @zq: crypto device pointer 732 723 * @reply: reply AP message. 733 724 * @xcRB: pointer to EP11 user request block 734 725 * 735 726 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 736 727 */ 737 - static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, 728 + static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq, 738 729 struct ap_message *reply, 739 730 struct ep11_urb *xcRB) 740 731 { ··· 752 743 return 0; 753 744 } 754 745 755 - static int convert_type86_rng(struct zcrypt_device *zdev, 746 + static int convert_type86_rng(struct zcrypt_queue *zq, 756 747 struct ap_message *reply, 757 748 char *buffer) 758 749 { ··· 769 760 return msg->fmt2.count2; 770 761 } 771 762 772 - static int convert_response_ica(struct zcrypt_device *zdev, 763 + static int convert_response_ica(struct zcrypt_queue *zq, 773 764 struct ap_message *reply, 774 765 char __user *outputdata, 775 766 unsigned int outputdatalength) ··· 780 771 switch (((unsigned char *) reply->message)[1]) { 781 772 case TYPE82_RSP_CODE: 782 773 case TYPE88_RSP_CODE: 783 - return convert_error(zdev, reply); 774 + return convert_error(zq, reply); 784 775 case TYPE86_RSP_CODE: 785 776 if (msg->cprbx.ccp_rtcode && 786 777 (msg->cprbx.ccp_rscode == 0x14f) && 787 778 (outputdatalength > 256)) { 788 - if (zdev->max_exp_bit_length <= 17) { 789 - zdev->max_exp_bit_length = 17; 779 + if (zq->zcard->max_exp_bit_length <= 17) { 780 + zq->zcard->max_exp_bit_length = 17; 790 781 return -EAGAIN; 791 782 } else 792 783 return -EINVAL; 793 784 } 794 785 if (msg->hdr.reply_code) 795 - return convert_error(zdev, reply); 786 + return convert_error(zq, reply); 796 787 if (msg->cprbx.cprb_ver_id == 0x02) 797 - return convert_type86_ica(zdev, reply, 788 + return convert_type86_ica(zq, reply, 798 789 outputdata, outputdatalength); 799 790 /* Fall through, no break, incorrect cprb version is an unknown 800 791 * response */ 801 792 default: /* Unknown response type, this should NEVER EVER happen */ 802 - zdev->online = 0; 803 - pr_err("Cryptographic device %x failed and was set offline\n", 804 - AP_QID_DEVICE(zdev->ap_dev->qid)); 805 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 806 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 793 + zq->online = 0; 794 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 795 + AP_QID_CARD(zq->queue->qid), 796 + AP_QID_QUEUE(zq->queue->qid)); 797 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%dfail", 798 + AP_QID_CARD(zq->queue->qid), 799 + AP_QID_QUEUE(zq->queue->qid), 800 + zq->online); 807 801 return -EAGAIN; /* repeat the request on a different device. */ 808 802 } 809 803 } 810 804 811 - static int convert_response_xcrb(struct zcrypt_device *zdev, 805 + static int convert_response_xcrb(struct zcrypt_queue *zq, 812 806 struct ap_message *reply, 813 807 struct ica_xcRB *xcRB) 814 808 { ··· 822 810 case TYPE82_RSP_CODE: 823 811 case TYPE88_RSP_CODE: 824 812 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 825 - return convert_error(zdev, reply); 813 + return convert_error(zq, reply); 826 814 case TYPE86_RSP_CODE: 827 815 if (msg->hdr.reply_code) { 828 816 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); 829 - return convert_error(zdev, reply); 817 + return convert_error(zq, reply); 830 818 } 831 819 if (msg->cprbx.cprb_ver_id == 0x02) 832 - return convert_type86_xcrb(zdev, reply, xcRB); 820 + return convert_type86_xcrb(zq, reply, xcRB); 833 821 /* Fall through, no break, incorrect cprb version is an unknown 834 822 * response */ 835 823 default: /* Unknown response type, this should NEVER EVER happen */ 836 824 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 837 - zdev->online = 0; 838 - pr_err("Cryptographic device %x failed and was set offline\n", 839 - AP_QID_DEVICE(zdev->ap_dev->qid)); 840 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 841 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 825 + zq->online = 0; 826 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 827 + AP_QID_CARD(zq->queue->qid), 828 + AP_QID_QUEUE(zq->queue->qid)); 829 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%dfail", 830 + AP_QID_CARD(zq->queue->qid), 831 + AP_QID_QUEUE(zq->queue->qid), 832 + zq->online); 842 833 return -EAGAIN; /* repeat the request on a different device. */ 843 834 } 844 835 } 845 836 846 - static int convert_response_ep11_xcrb(struct zcrypt_device *zdev, 837 + static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, 847 838 struct ap_message *reply, struct ep11_urb *xcRB) 848 839 { 849 840 struct type86_ep11_reply *msg = reply->message; ··· 855 840 switch (((unsigned char *)reply->message)[1]) { 856 841 case TYPE82_RSP_CODE: 857 842 case TYPE87_RSP_CODE: 858 - return convert_error(zdev, reply); 843 + return convert_error(zq, reply); 859 844 case TYPE86_RSP_CODE: 860 845 if (msg->hdr.reply_code) 861 - return convert_error(zdev, reply); 846 + return convert_error(zq, reply); 862 847 if (msg->cprbx.cprb_ver_id == 0x04) 863 - return convert_type86_ep11_xcrb(zdev, reply, xcRB); 848 + return convert_type86_ep11_xcrb(zq, reply, xcRB); 864 849 /* Fall through, no break, incorrect cprb version is an unknown resp.*/ 865 850 default: /* Unknown response type, this should NEVER EVER happen */ 866 - zdev->online = 0; 867 - pr_err("Cryptographic device %x failed and was set offline\n", 868 - AP_QID_DEVICE(zdev->ap_dev->qid)); 869 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 870 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 851 + zq->online = 0; 852 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 853 + AP_QID_CARD(zq->queue->qid), 854 + AP_QID_QUEUE(zq->queue->qid)); 855 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%dfail", 856 + AP_QID_CARD(zq->queue->qid), 857 + AP_QID_QUEUE(zq->queue->qid), 858 + zq->online); 871 859 return -EAGAIN; /* repeat the request on a different device. */ 872 860 } 873 861 } 874 862 875 - static int convert_response_rng(struct zcrypt_device *zdev, 863 + static int convert_response_rng(struct zcrypt_queue *zq, 876 864 struct ap_message *reply, 877 865 char *data) 878 866 { ··· 889 871 if (msg->hdr.reply_code) 890 872 return -EINVAL; 891 873 if (msg->cprbx.cprb_ver_id == 0x02) 892 - return convert_type86_rng(zdev, reply, data); 874 + return convert_type86_rng(zq, reply, data); 893 875 /* Fall through, no break, incorrect cprb version is an unknown 894 876 * response */ 895 877 default: /* Unknown response type, this should NEVER EVER happen */ 896 - zdev->online = 0; 897 - pr_err("Cryptographic device %x failed and was set offline\n", 898 - AP_QID_DEVICE(zdev->ap_dev->qid)); 899 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 900 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 878 + zq->online = 0; 879 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 880 + AP_QID_CARD(zq->queue->qid), 881 + AP_QID_QUEUE(zq->queue->qid)); 882 + ZCRYPT_DBF_DEV(DBF_ERR, zq, "dev%02x%04xo%dfail", 883 + AP_QID_CARD(zq->queue->qid), 884 + AP_QID_QUEUE(zq->queue->qid), 885 + zq->online); 901 886 return -EAGAIN; /* repeat the request on a different device. */ 902 887 } 903 888 } ··· 909 888 * This function is called from the AP bus code after a crypto request 910 889 * "msg" has finished with the reply message "reply". 911 890 * It is called from tasklet context. 912 - * @ap_dev: pointer to the AP device 891 + * @aq: pointer to the AP queue 913 892 * @msg: pointer to the AP message 914 893 * @reply: pointer to the AP reply message 915 894 */ 916 - static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, 895 + static void zcrypt_msgtype6_receive(struct ap_queue *aq, 917 896 struct ap_message *msg, 918 897 struct ap_message *reply) 919 898 { ··· 958 937 * This function is called from the AP bus code after a crypto request 959 938 * "msg" has finished with the reply message "reply". 960 939 * It is called from tasklet context. 961 - * @ap_dev: pointer to the AP device 940 + * @aq: pointer to the AP queue 962 941 * @msg: pointer to the AP message 963 942 * @reply: pointer to the AP reply message 964 943 */ 965 - static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, 944 + static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, 966 945 struct ap_message *msg, 967 946 struct ap_message *reply) 968 947 { ··· 1002 981 /** 1003 982 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1004 983 * device to handle a modexpo request. 1005 - * @zdev: pointer to zcrypt_device structure that identifies the 984 + * @zq: pointer to zcrypt_queue structure that identifies the 1006 985 * PCIXCC/CEX2C device to the request distributor 1007 986 * @mex: pointer to the modexpo request buffer 1008 987 */ 1009 - static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, 988 + static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, 1010 989 struct ica_rsa_modexpo *mex) 1011 990 { 1012 991 struct ap_message ap_msg; ··· 1023 1002 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1024 1003 atomic_inc_return(&zcrypt_step); 1025 1004 ap_msg.private = &resp_type; 1026 - rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex); 1005 + rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex); 1027 1006 if (rc) 1028 1007 goto out_free; 1029 1008 init_completion(&resp_type.work); 1030 - ap_queue_message(zdev->ap_dev, &ap_msg); 1009 + ap_queue_message(zq->queue, &ap_msg); 1031 1010 rc = wait_for_completion_interruptible(&resp_type.work); 1032 1011 if (rc == 0) { 1033 1012 rc = ap_msg.rc; 1034 1013 if (rc == 0) 1035 - rc = convert_response_ica(zdev, &ap_msg, 1014 + rc = convert_response_ica(zq, &ap_msg, 1036 1015 mex->outputdata, 1037 1016 mex->outputdatalength); 1038 1017 } else 1039 1018 /* Signal pending. */ 1040 - ap_cancel_message(zdev->ap_dev, &ap_msg); 1019 + ap_cancel_message(zq->queue, &ap_msg); 1041 1020 out_free: 1042 1021 free_page((unsigned long) ap_msg.message); 1043 1022 return rc; ··· 1046 1025 /** 1047 1026 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1048 1027 * device to handle a modexpo_crt request. 1049 - * @zdev: pointer to zcrypt_device structure that identifies the 1028 + * @zq: pointer to zcrypt_queue structure that identifies the 1050 1029 * PCIXCC/CEX2C device to the request distributor 1051 1030 * @crt: pointer to the modexpoc_crt request buffer 1052 1031 */ 1053 - static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev, 1032 + static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, 1054 1033 struct ica_rsa_modexpo_crt *crt) 1055 1034 { 1056 1035 struct ap_message ap_msg; ··· 1067 1046 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1068 1047 atomic_inc_return(&zcrypt_step); 1069 1048 ap_msg.private = &resp_type; 1070 - rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt); 1049 + rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt); 1071 1050 if (rc) 1072 1051 goto out_free; 1073 1052 init_completion(&resp_type.work); 1074 - ap_queue_message(zdev->ap_dev, &ap_msg); 1053 + ap_queue_message(zq->queue, &ap_msg); 1075 1054 rc = wait_for_completion_interruptible(&resp_type.work); 1076 1055 if (rc == 0) { 1077 1056 rc = ap_msg.rc; 1078 1057 if (rc == 0) 1079 - rc = convert_response_ica(zdev, &ap_msg, 1058 + rc = convert_response_ica(zq, &ap_msg, 1080 1059 crt->outputdata, 1081 1060 crt->outputdatalength); 1082 - } else 1061 + } else { 1083 1062 /* Signal pending. */ 1084 - ap_cancel_message(zdev->ap_dev, &ap_msg); 1063 + ap_cancel_message(zq->queue, &ap_msg); 1064 + } 1085 1065 out_free: 1086 1066 free_page((unsigned long) ap_msg.message); 1087 1067 return rc; ··· 1090 1068 1091 1069 unsigned int get_cprb_fc(struct ica_xcRB *xcRB, 1092 1070 struct ap_message *ap_msg, 1093 - int *func_code) 1071 + unsigned int *func_code, unsigned short **dom) 1094 1072 { 1095 1073 struct response_type resp_type = { 1096 1074 .type = PCIXCC_RESPONSE_TYPE_XCRB, ··· 1110 1088 return -ENOMEM; 1111 1089 } 1112 1090 memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); 1113 - rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code); 1091 + rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom); 1114 1092 if (rc) { 1115 1093 kzfree(ap_msg->message); 1116 1094 kzfree(ap_msg->private); ··· 1121 1099 /** 1122 1100 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1123 1101 * device to handle a send_cprb request. 1124 - * @zdev: pointer to zcrypt_device structure that identifies the 1102 + * @zq: pointer to zcrypt_queue structure that identifies the 1125 1103 * PCIXCC/CEX2C device to the request distributor 1126 1104 * @xcRB: pointer to the send_cprb request buffer 1127 1105 */ 1128 - static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev, 1106 + static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq, 1129 1107 struct ica_xcRB *xcRB, 1130 1108 struct ap_message *ap_msg) 1131 1109 { ··· 1133 1111 struct response_type *rtype = (struct response_type *)(ap_msg->private); 1134 1112 1135 1113 init_completion(&rtype->work); 1136 - ap_queue_message(zdev->ap_dev, ap_msg); 1114 + ap_queue_message(zq->queue, ap_msg); 1137 1115 rc = wait_for_completion_interruptible(&rtype->work); 1138 1116 if (rc == 0) { 1139 1117 rc = ap_msg->rc; 1140 1118 if (rc == 0) 1141 - rc = convert_response_xcrb(zdev, ap_msg, xcRB); 1119 + rc = convert_response_xcrb(zq, ap_msg, xcRB); 1142 1120 } else 1143 1121 /* Signal pending. */ 1144 - ap_cancel_message(zdev->ap_dev, ap_msg); 1122 + ap_cancel_message(zq->queue, ap_msg); 1145 1123 1146 1124 kzfree(ap_msg->message); 1147 1125 kzfree(ap_msg->private); ··· 1150 1128 1151 1129 unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb, 1152 1130 struct ap_message *ap_msg, 1153 - int *func_code) 1131 + unsigned int *func_code) 1154 1132 { 1155 1133 struct response_type resp_type = { 1156 1134 .type = PCIXCC_RESPONSE_TYPE_EP11, ··· 1181 1159 /** 1182 1160 * The request distributor calls this function if it picked the CEX4P 1183 1161 * device to handle a send_ep11_cprb request. 1184 - * @zdev: pointer to zcrypt_device structure that identifies the 1162 + * @zq: pointer to zcrypt_queue structure that identifies the 1185 1163 * CEX4P device to the request distributor 1186 1164 * @xcRB: pointer to the ep11 user request block 1187 1165 */ 1188 - static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev, 1166 + static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq, 1189 1167 struct ep11_urb *xcrb, 1190 1168 struct ap_message *ap_msg) 1191 1169 { ··· 1218 1196 */ 1219 1197 if (!((msg->cprbx.flags & 0x80) == 0x80)) { 1220 1198 msg->cprbx.target_id = (unsigned int) 1221 - AP_QID_QUEUE(zdev->ap_dev->qid); 1199 + AP_QID_QUEUE(zq->queue->qid); 1222 1200 1223 1201 if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ 1224 1202 switch (msg->pld_lenfmt & 0x03) { ··· 1236 1214 } 1237 1215 payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 1238 1216 payload_hdr->dom_val = (unsigned int) 1239 - AP_QID_QUEUE(zdev->ap_dev->qid); 1217 + AP_QID_QUEUE(zq->queue->qid); 1240 1218 } 1241 1219 1242 1220 init_completion(&rtype->work); 1243 - ap_queue_message(zdev->ap_dev, ap_msg); 1221 + ap_queue_message(zq->queue, ap_msg); 1244 1222 rc = wait_for_completion_interruptible(&rtype->work); 1245 1223 if (rc == 0) { 1246 1224 rc = ap_msg->rc; 1247 1225 if (rc == 0) 1248 - rc = convert_response_ep11_xcrb(zdev, ap_msg, xcrb); 1226 + rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb); 1249 1227 } else 1250 1228 /* Signal pending. */ 1251 - ap_cancel_message(zdev->ap_dev, ap_msg); 1229 + ap_cancel_message(zq->queue, ap_msg); 1252 1230 1253 1231 kzfree(ap_msg->message); 1254 1232 kzfree(ap_msg->private); 1255 1233 return rc; 1256 1234 } 1257 1235 1258 - unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code) 1236 + unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code, 1237 + unsigned int *domain) 1259 1238 { 1260 1239 struct response_type resp_type = { 1261 1240 .type = PCIXCC_RESPONSE_TYPE_XCRB, ··· 1276 1253 } 1277 1254 memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); 1278 1255 1279 - rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE); 1256 + rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); 1280 1257 1281 1258 *func_code = HWRNG; 1282 1259 return 0; ··· 1285 1262 /** 1286 1263 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1287 1264 * device to generate random data. 1288 - * @zdev: pointer to zcrypt_device structure that identifies the 1265 + * @zq: pointer to zcrypt_queue structure that identifies the 1289 1266 * PCIXCC/CEX2C device to the request distributor 1290 1267 * @buffer: pointer to a memory page to return random data 1291 1268 */ 1292 - static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev, 1269 + static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, 1293 1270 char *buffer, struct ap_message *ap_msg) 1294 1271 { 1295 1272 struct { ··· 1304 1281 struct response_type *rtype = (struct response_type *)(ap_msg->private); 1305 1282 int rc; 1306 1283 1307 - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); 1284 + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 1308 1285 1309 1286 init_completion(&rtype->work); 1310 - ap_queue_message(zdev->ap_dev, ap_msg); 1287 + ap_queue_message(zq->queue, ap_msg); 1311 1288 rc = wait_for_completion_interruptible(&rtype->work); 1312 1289 if (rc == 0) { 1313 1290 rc = ap_msg->rc; 1314 1291 if (rc == 0) 1315 - rc = convert_response_rng(zdev, ap_msg, buffer); 1292 + rc = convert_response_rng(zq, ap_msg, buffer); 1316 1293 } else 1317 1294 /* Signal pending. */ 1318 - ap_cancel_message(zdev->ap_dev, ap_msg); 1295 + ap_cancel_message(zq->queue, ap_msg); 1319 1296 1320 1297 kzfree(ap_msg->message); 1321 1298 kzfree(ap_msg->private);
+8 -4
drivers/s390/crypto/zcrypt_msgtype6.h
··· 116 116 unsigned int offset4; /* 0x00000000 */ 117 117 } __packed; 118 118 119 - unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *, int *); 120 - unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *, int *); 121 - unsigned int get_rng_fc(struct ap_message *, int *); 119 + unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *, 120 + unsigned int *, unsigned short **); 121 + unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *, 122 + unsigned int *); 123 + unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *); 122 124 123 125 #define LOW 10 124 126 #define MEDIUM 100 ··· 136 134 * @ap_msg: pointer to AP message 137 135 */ 138 136 static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, 139 - unsigned int random_number_length) 137 + unsigned int random_number_length, 138 + unsigned int *domain) 140 139 { 141 140 struct { 142 141 struct type6_hdr hdr; ··· 175 172 msg->verb_length = 0x02; 176 173 msg->key_length = 0x02; 177 174 ap_msg->length = sizeof(*msg); 175 + *domain = (unsigned short)msg->cprbx.domain; 178 176 } 179 177 180 178 void zcrypt_msgtype6_init(void);
+153 -211
drivers/s390/crypto/zcrypt_pcixcc.c
··· 32 32 #include <linux/slab.h> 33 33 #include <linux/atomic.h> 34 34 #include <asm/uaccess.h> 35 + #include <linux/mod_devicetable.h> 35 36 36 37 #include "ap_bus.h" 37 38 #include "zcrypt_api.h" ··· 63 62 #define PCIXCC_RESPONSE_TYPE_ICA 0 64 63 #define PCIXCC_RESPONSE_TYPE_XCRB 1 65 64 66 - static struct ap_device_id zcrypt_pcixcc_ids[] = { 67 - { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, 68 - { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, 69 - { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) }, 70 - { /* end of list */ }, 71 - }; 72 - 73 - MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); 74 65 MODULE_AUTHOR("IBM Corporation"); 75 66 MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ 76 67 "Copyright IBM Corp. 2001, 2012"); 77 68 MODULE_LICENSE("GPL"); 78 69 79 - static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); 80 - static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); 81 - 82 - static struct ap_driver zcrypt_pcixcc_driver = { 83 - .probe = zcrypt_pcixcc_probe, 84 - .remove = zcrypt_pcixcc_remove, 85 - .ids = zcrypt_pcixcc_ids, 86 - .request_timeout = PCIXCC_CLEANUP_TIME, 70 + static struct ap_device_id zcrypt_pcixcc_card_ids[] = { 71 + { .dev_type = AP_DEVICE_TYPE_PCIXCC, 72 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 73 + { .dev_type = AP_DEVICE_TYPE_CEX2C, 74 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 75 + { .dev_type = AP_DEVICE_TYPE_CEX3C, 76 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 77 + { /* end of list */ }, 87 78 }; 88 79 89 - /** 90 - * Micro-code detection function. Its sends a message to a pcixcc card 91 - * to find out the microcode level. 92 - * @ap_dev: pointer to the AP device. 93 - */ 94 - static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev) 95 - { 96 - static unsigned char msg[] = { 97 - 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 98 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 99 - 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 100 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 101 - 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00, 102 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 103 - 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00, 104 - 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00, 105 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 106 - 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00, 107 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 108 - 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32, 109 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8, 110 - 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24, 111 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 112 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 113 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 114 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 115 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 116 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 117 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 118 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 119 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 120 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 121 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 122 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 123 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 124 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 125 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 126 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 127 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 128 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 129 - 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00, 130 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 131 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 132 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 133 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 134 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 135 - 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A, 136 - 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20, 137 - 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05, 138 - 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D, 139 - 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55, 140 - 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD, 141 - 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA, 142 - 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22, 143 - 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB, 144 - 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54, 145 - 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00, 146 - 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00, 147 - 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40, 148 - 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C, 149 - 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF, 150 - 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9, 151 - 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63, 152 - 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5, 153 - 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A, 154 - 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01, 155 - 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28, 156 - 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91, 157 - 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5, 158 - 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C, 159 - 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98, 160 - 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96, 161 - 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19, 162 - 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47, 163 - 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36, 164 - 0xF1,0x3D,0x93,0x53 165 - }; 166 - unsigned long long psmid; 167 - struct CPRBX *cprbx; 168 - char *reply; 169 - int rc, i; 80 + MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids); 170 81 171 - reply = (void *) get_zeroed_page(GFP_KERNEL); 172 - if (!reply) 173 - return -ENOMEM; 82 + static struct ap_device_id zcrypt_pcixcc_queue_ids[] = { 83 + { .dev_type = AP_DEVICE_TYPE_PCIXCC, 84 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 85 + { .dev_type = AP_DEVICE_TYPE_CEX2C, 86 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 87 + { .dev_type = AP_DEVICE_TYPE_CEX3C, 88 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 89 + { /* end of list */ }, 90 + }; 174 91 175 - rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg)); 176 - if (rc) 177 - goto out_free; 178 - 179 - /* Wait for the test message to complete. */ 180 - for (i = 0; i < 6; i++) { 181 - msleep(300); 182 - rc = ap_recv(ap_dev->qid, &psmid, reply, 4096); 183 - if (rc == 0 && psmid == 0x0102030405060708ULL) 184 - break; 185 - } 186 - 187 - if (i >= 6) { 188 - /* Got no answer. */ 189 - rc = -ENODEV; 190 - goto out_free; 191 - } 192 - 193 - cprbx = (struct CPRBX *) (reply + 48); 194 - if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33) 195 - rc = ZCRYPT_PCIXCC_MCL2; 196 - else 197 - rc = ZCRYPT_PCIXCC_MCL3; 198 - out_free: 199 - free_page((unsigned long) reply); 200 - return rc; 201 - } 92 + MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids); 202 93 203 94 /** 204 95 * Large random number detection function. Its sends a message to a pcixcc ··· 99 206 * 100 207 * Returns 1 if large random numbers are supported, 0 if not and < 0 on error. 101 208 */ 102 - static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) 209 + static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq) 103 210 { 104 211 struct ap_message ap_msg; 105 212 unsigned long long psmid; 213 + unsigned int domain; 106 214 struct { 107 215 struct type86_hdr hdr; 108 216 struct type86_fmt2_ext fmt2; ··· 125 231 if (!ap_msg.message) 126 232 return -ENOMEM; 127 233 128 - rng_type6CPRB_msgX(&ap_msg, 4); 234 + rng_type6CPRB_msgX(&ap_msg, 4, &domain); 129 235 130 236 msg = ap_msg.message; 131 - msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); 237 + msg->cprbx.domain = AP_QID_QUEUE(aq->qid); 132 238 133 - rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message, 239 + rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message, 134 240 ap_msg.length); 135 241 if (rc) 136 242 goto out_free; ··· 138 244 /* Wait for the test message to complete. */ 139 245 for (i = 0; i < 2 * HZ; i++) { 140 246 msleep(1000 / HZ); 141 - rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096); 247 + rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096); 142 248 if (rc == 0 && psmid == 0x0102030405060708ULL) 143 249 break; 144 250 } ··· 160 266 } 161 267 162 268 /** 163 - * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device 164 - * since the bus_match already checked the hardware type. The PCIXCC 165 - * cards come in two flavours: micro code level 2 and micro code level 3. 166 - * This is checked by sending a test message to the device. 167 - * @ap_dev: pointer to the AP device. 269 + * Probe function for PCIXCC/CEX2C card devices. It always accepts the 270 + * AP device since the bus_match already checked the hardware type. The 271 + * PCIXCC cards come in two flavours: micro code level 2 and micro code 272 + * level 3. This is checked by sending a test message to the device. 273 + * @ap_dev: pointer to the AP card device. 168 274 */ 169 - static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) 275 + static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev) 170 276 { 171 - struct zcrypt_device *zdev; 172 277 /* 173 278 * Normalized speed ratings per crypto adapter 174 279 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY 175 280 */ 176 - int PCIXCC_MCL2_SPEED_IDX[] = {10, 10, 10, 10, 10, 10, 10, 10}; 177 - int PCIXCC_MCL3_SPEED_IDX[] = { 8, 8, 8, 8, 8, 8, 8, 8}; 178 - int CEX2C_SPEED_IDX[] = {1000, 1400, 2400, 1100, 1500, 2600, 100, 12}; 179 - int CEX3C_SPEED_IDX[] = { 500, 700, 1400, 550, 800, 1500, 80, 10}; 281 + static const int CEX2C_SPEED_IDX[] = { 282 + 1000, 1400, 2400, 1100, 1500, 2600, 100, 12}; 283 + static const int CEX3C_SPEED_IDX[] = { 284 + 500, 700, 1400, 550, 800, 1500, 80, 10}; 285 + 286 + struct ap_card *ac = to_ap_card(&ap_dev->device); 287 + struct zcrypt_card *zc; 180 288 int rc = 0; 181 289 182 - zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); 183 - if (!zdev) 290 + zc = zcrypt_card_alloc(); 291 + if (!zc) 184 292 return -ENOMEM; 185 - zdev->ap_dev = ap_dev; 186 - zdev->online = 1; 187 - switch (ap_dev->device_type) { 188 - case AP_DEVICE_TYPE_PCIXCC: 189 - rc = zcrypt_pcixcc_mcl(ap_dev); 190 - if (rc < 0) { 191 - zcrypt_device_free(zdev); 192 - return rc; 193 - } 194 - zdev->user_space_type = rc; 195 - if (rc == ZCRYPT_PCIXCC_MCL2) { 196 - zdev->type_string = "PCIXCC_MCL2"; 197 - memcpy(zdev->speed_rating, PCIXCC_MCL2_SPEED_IDX, 198 - sizeof(PCIXCC_MCL2_SPEED_IDX)); 199 - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 200 - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 201 - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 202 - } else { 203 - zdev->type_string = "PCIXCC_MCL3"; 204 - memcpy(zdev->speed_rating, PCIXCC_MCL3_SPEED_IDX, 205 - sizeof(PCIXCC_MCL3_SPEED_IDX)); 206 - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 207 - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 208 - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 209 - } 210 - break; 293 + zc->card = ac; 294 + ac->private = zc; 295 + switch (ac->ap_dev.device_type) { 211 296 case AP_DEVICE_TYPE_CEX2C: 212 - zdev->user_space_type = ZCRYPT_CEX2C; 213 - zdev->type_string = "CEX2C"; 214 - memcpy(zdev->speed_rating, CEX2C_SPEED_IDX, 297 + zc->user_space_type = ZCRYPT_CEX2C; 298 + zc->type_string = "CEX2C"; 299 + memcpy(zc->speed_rating, CEX2C_SPEED_IDX, 215 300 sizeof(CEX2C_SPEED_IDX)); 216 - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 217 - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 218 - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 301 + zc->min_mod_size = PCIXCC_MIN_MOD_SIZE; 302 + zc->max_mod_size = PCIXCC_MAX_MOD_SIZE; 303 + zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 219 304 break; 220 305 case AP_DEVICE_TYPE_CEX3C: 221 - zdev->user_space_type = ZCRYPT_CEX3C; 222 - zdev->type_string = "CEX3C"; 223 - memcpy(zdev->speed_rating, CEX3C_SPEED_IDX, 306 + zc->user_space_type = ZCRYPT_CEX3C; 307 + zc->type_string = "CEX3C"; 308 + memcpy(zc->speed_rating, CEX3C_SPEED_IDX, 224 309 sizeof(CEX3C_SPEED_IDX)); 225 - zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; 226 - zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; 227 - zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; 310 + zc->min_mod_size = CEX3C_MIN_MOD_SIZE; 311 + zc->max_mod_size = CEX3C_MAX_MOD_SIZE; 312 + zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; 228 313 break; 229 314 default: 230 - goto out_free; 315 + zcrypt_card_free(zc); 316 + return -ENODEV; 231 317 } 232 - zdev->load = zdev->speed_rating[0]; 318 + zc->online = 1; 233 319 234 - rc = zcrypt_pcixcc_rng_supported(ap_dev); 235 - if (rc < 0) { 236 - zcrypt_device_free(zdev); 237 - return rc; 320 + rc = zcrypt_card_register(zc); 321 + if (rc) { 322 + ac->private = NULL; 323 + zcrypt_card_free(zc); 238 324 } 239 - if (rc) 240 - zdev->ops = zcrypt_msgtype(MSGTYPE06_NAME, 241 - MSGTYPE06_VARIANT_DEFAULT); 242 - else 243 - zdev->ops = zcrypt_msgtype(MSGTYPE06_NAME, 244 - MSGTYPE06_VARIANT_NORNG); 245 - ap_device_init_reply(ap_dev, &zdev->reply); 246 - ap_dev->private = zdev; 247 - rc = zcrypt_device_register(zdev); 248 - if (rc) 249 - goto out_free; 250 - return 0; 251 325 252 - out_free: 253 - ap_dev->private = NULL; 254 - zcrypt_device_free(zdev); 255 326 return rc; 256 327 } 257 328 258 329 /** 259 - * This is called to remove the extended PCIXCC/CEX2C driver information 260 - * if an AP device is removed. 330 + * This is called to remove the PCIXCC/CEX2C card driver information 331 + * if an AP card device is removed. 261 332 */ 262 - static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) 333 + static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev) 263 334 { 264 - struct zcrypt_device *zdev = ap_dev->private; 335 + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; 265 336 266 - zcrypt_device_unregister(zdev); 337 + if (zc) 338 + zcrypt_card_unregister(zc); 267 339 } 340 + 341 + static struct ap_driver zcrypt_pcixcc_card_driver = { 342 + .probe = zcrypt_pcixcc_card_probe, 343 + .remove = zcrypt_pcixcc_card_remove, 344 + .ids = zcrypt_pcixcc_card_ids, 345 + }; 346 + 347 + /** 348 + * Probe function for PCIXCC/CEX2C queue devices. It always accepts the 349 + * AP device since the bus_match already checked the hardware type. The 350 + * PCIXCC cards come in two flavours: micro code level 2 and micro code 351 + * level 3. This is checked by sending a test message to the device. 352 + * @ap_dev: pointer to the AP card device. 353 + */ 354 + static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev) 355 + { 356 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 357 + struct zcrypt_queue *zq; 358 + int rc; 359 + 360 + zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); 361 + if (!zq) 362 + return -ENOMEM; 363 + zq->queue = aq; 364 + zq->online = 1; 365 + atomic_set(&zq->load, 0); 366 + rc = zcrypt_pcixcc_rng_supported(aq); 367 + if (rc < 0) { 368 + zcrypt_queue_free(zq); 369 + return rc; 370 + } 371 + if (rc) 372 + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 373 + MSGTYPE06_VARIANT_DEFAULT); 374 + else 375 + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 376 + MSGTYPE06_VARIANT_NORNG); 377 + ap_queue_init_reply(aq, &zq->reply); 378 + aq->request_timeout = PCIXCC_CLEANUP_TIME, 379 + aq->private = zq; 380 + rc = zcrypt_queue_register(zq); 381 + if (rc) { 382 + aq->private = NULL; 383 + zcrypt_queue_free(zq); 384 + } 385 + return rc; 386 + } 387 + 388 + /** 389 + * This is called to remove the PCIXCC/CEX2C queue driver information 390 + * if an AP queue device is removed. 391 + */ 392 + static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev) 393 + { 394 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 395 + struct zcrypt_queue *zq = aq->private; 396 + 397 + ap_queue_remove(aq); 398 + if (zq) 399 + zcrypt_queue_unregister(zq); 400 + } 401 + 402 + static struct ap_driver zcrypt_pcixcc_queue_driver = { 403 + .probe = zcrypt_pcixcc_queue_probe, 404 + .remove = zcrypt_pcixcc_queue_remove, 405 + .suspend = ap_queue_suspend, 406 + .resume = ap_queue_resume, 407 + .ids = zcrypt_pcixcc_queue_ids, 408 + }; 268 409 269 410 int __init zcrypt_pcixcc_init(void) 270 411 { 271 - return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc"); 412 + int rc; 413 + 414 + rc = ap_driver_register(&zcrypt_pcixcc_card_driver, 415 + THIS_MODULE, "pcixcccard"); 416 + if (rc) 417 + return rc; 418 + 419 + rc = ap_driver_register(&zcrypt_pcixcc_queue_driver, 420 + THIS_MODULE, "pcixccqueue"); 421 + if (rc) 422 + ap_driver_unregister(&zcrypt_pcixcc_card_driver); 423 + 424 + return rc; 272 425 } 273 426 274 427 void zcrypt_pcixcc_exit(void) 275 428 { 276 - ap_driver_unregister(&zcrypt_pcixcc_driver); 429 + ap_driver_unregister(&zcrypt_pcixcc_queue_driver); 430 + ap_driver_unregister(&zcrypt_pcixcc_card_driver); 277 431 } 278 432 279 433 module_init(zcrypt_pcixcc_init);
+221
drivers/s390/crypto/zcrypt_queue.c
··· 1 + /* 2 + * zcrypt 2.1.0 3 + * 4 + * Copyright IBM Corp. 2001, 2012 5 + * Author(s): Robert Burroughs 6 + * Eric Rossman (edrossma@us.ibm.com) 7 + * Cornelia Huck <cornelia.huck@de.ibm.com> 8 + * 9 + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 + * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 13 + * 14 + * This program is free software; you can redistribute it and/or modify 15 + * it under the terms of the GNU General Public License as published by 16 + * the Free Software Foundation; either version 2, or (at your option) 17 + * any later version. 18 + * 19 + * This program is distributed in the hope that it will be useful, 20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 + * GNU General Public License for more details. 23 + */ 24 + 25 + #include <linux/module.h> 26 + #include <linux/init.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/miscdevice.h> 29 + #include <linux/fs.h> 30 + #include <linux/proc_fs.h> 31 + #include <linux/seq_file.h> 32 + #include <linux/compat.h> 33 + #include <linux/slab.h> 34 + #include <linux/atomic.h> 35 + #include <linux/uaccess.h> 36 + #include <linux/hw_random.h> 37 + #include <linux/debugfs.h> 38 + #include <asm/debug.h> 39 + 40 + #include "zcrypt_debug.h" 41 + #include "zcrypt_api.h" 42 + 43 + #include "zcrypt_msgtype6.h" 44 + #include "zcrypt_msgtype50.h" 45 + 46 + /* 47 + * Device attributes common for all crypto queue devices. 48 + */ 49 + 50 + static ssize_t zcrypt_queue_online_show(struct device *dev, 51 + struct device_attribute *attr, 52 + char *buf) 53 + { 54 + struct zcrypt_queue *zq = to_ap_queue(dev)->private; 55 + 56 + return snprintf(buf, PAGE_SIZE, "%d\n", zq->online); 57 + } 58 + 59 + static ssize_t zcrypt_queue_online_store(struct device *dev, 60 + struct device_attribute *attr, 61 + const char *buf, size_t count) 62 + { 63 + struct zcrypt_queue *zq = to_ap_queue(dev)->private; 64 + struct zcrypt_card *zc = zq->zcard; 65 + int online; 66 + 67 + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 68 + return -EINVAL; 69 + 70 + if (online && !zc->online) 71 + return -EINVAL; 72 + zq->online = online; 73 + ZCRYPT_DBF_DEV(DBF_INFO, zq, "dev%02x%04xo%dman", 74 + AP_QID_CARD(zq->queue->qid), 75 + AP_QID_QUEUE(zq->queue->qid), online); 76 + if (!online) 77 + ap_flush_queue(zq->queue); 78 + return count; 79 + } 80 + 81 + static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show, 82 + zcrypt_queue_online_store); 83 + 84 + static struct attribute *zcrypt_queue_attrs[] = { 85 + &dev_attr_online.attr, 86 + NULL, 87 + }; 88 + 89 + static struct attribute_group zcrypt_queue_attr_group = { 90 + .attrs = zcrypt_queue_attrs, 91 + }; 92 + 93 + void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online) 94 + { 95 + zq->online = online; 96 + if (!online) 97 + ap_flush_queue(zq->queue); 98 + } 99 + 100 + struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size) 101 + { 102 + struct zcrypt_queue *zq; 103 + 104 + zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL); 105 + if (!zq) 106 + return NULL; 107 + zq->reply.message = kmalloc(max_response_size, GFP_KERNEL); 108 + if (!zq->reply.message) 109 + goto out_free; 110 + zq->reply.length = max_response_size; 111 + INIT_LIST_HEAD(&zq->list); 112 + zq->dbf_area = zcrypt_dbf_devices; 113 + kref_init(&zq->refcount); 114 + return zq; 115 + 116 + out_free: 117 + kfree(zq); 118 + return NULL; 119 + } 120 + EXPORT_SYMBOL(zcrypt_queue_alloc); 121 + 122 + void zcrypt_queue_free(struct zcrypt_queue *zq) 123 + { 124 + kfree(zq->reply.message); 125 + kfree(zq); 126 + } 127 + EXPORT_SYMBOL(zcrypt_queue_free); 128 + 129 + static void zcrypt_queue_release(struct kref *kref) 130 + { 131 + struct zcrypt_queue *zq = 132 + container_of(kref, struct zcrypt_queue, refcount); 133 + zcrypt_queue_free(zq); 134 + } 135 + 136 + void zcrypt_queue_get(struct zcrypt_queue *zq) 137 + { 138 + kref_get(&zq->refcount); 139 + } 140 + EXPORT_SYMBOL(zcrypt_queue_get); 141 + 142 + int zcrypt_queue_put(struct zcrypt_queue *zq) 143 + { 144 + return kref_put(&zq->refcount, zcrypt_queue_release); 145 + } 146 + EXPORT_SYMBOL(zcrypt_queue_put); 147 + 148 + /** 149 + * zcrypt_queue_register() - Register a crypto queue device. 150 + * @zq: Pointer to a crypto queue device 151 + * 152 + * Register a crypto queue device. Returns 0 if successful. 153 + */ 154 + int zcrypt_queue_register(struct zcrypt_queue *zq) 155 + { 156 + struct zcrypt_card *zc; 157 + int rc; 158 + 159 + spin_lock(&zcrypt_list_lock); 160 + zc = zq->queue->card->private; 161 + zcrypt_card_get(zc); 162 + zq->zcard = zc; 163 + zq->online = 1; /* New devices are online by default. */ 164 + ZCRYPT_DBF_DEV(DBF_INFO, zq, "dev%02x%04xo%dreg", 165 + AP_QID_CARD(zq->queue->qid), 166 + AP_QID_QUEUE(zq->queue->qid), 167 + zq->online); 168 + list_add_tail(&zq->list, &zc->zqueues); 169 + zcrypt_device_count++; 170 + spin_unlock(&zcrypt_list_lock); 171 + 172 + rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj, 173 + &zcrypt_queue_attr_group); 174 + if (rc) 175 + goto out; 176 + get_device(&zq->queue->ap_dev.device); 177 + 178 + if (zq->ops->rng) { 179 + rc = zcrypt_rng_device_add(); 180 + if (rc) 181 + goto out_unregister; 182 + } 183 + return 0; 184 + 185 + out_unregister: 186 + sysfs_remove_group(&zq->queue->ap_dev.device.kobj, 187 + &zcrypt_queue_attr_group); 188 + put_device(&zq->queue->ap_dev.device); 189 + out: 190 + spin_lock(&zcrypt_list_lock); 191 + list_del_init(&zq->list); 192 + spin_unlock(&zcrypt_list_lock); 193 + zcrypt_card_put(zc); 194 + return rc; 195 + } 196 + EXPORT_SYMBOL(zcrypt_queue_register); 197 + 198 + /** 199 + * zcrypt_queue_unregister(): Unregister a crypto queue device. 200 + * @zq: Pointer to crypto queue device 201 + * 202 + * Unregister a crypto queue device. 203 + */ 204 + void zcrypt_queue_unregister(struct zcrypt_queue *zq) 205 + { 206 + struct zcrypt_card *zc; 207 + 208 + zc = zq->zcard; 209 + spin_lock(&zcrypt_list_lock); 210 + list_del_init(&zq->list); 211 + zcrypt_device_count--; 212 + spin_unlock(&zcrypt_list_lock); 213 + zcrypt_card_put(zc); 214 + if (zq->ops->rng) 215 + zcrypt_rng_device_remove(); 216 + sysfs_remove_group(&zq->queue->ap_dev.device.kobj, 217 + &zcrypt_queue_attr_group); 218 + put_device(&zq->queue->ap_dev.device); 219 + zcrypt_queue_put(zq); 220 + } 221 + EXPORT_SYMBOL(zcrypt_queue_unregister);
+2 -1
include/linux/mod_devicetable.h
··· 175 175 kernel_ulong_t driver_info; 176 176 }; 177 177 178 - #define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 178 + #define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01 179 + #define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02 179 180 180 181 /* s390 css bus devices (subchannels) */ 181 182 struct css_device_id {