Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.17 3380 lines 92 kB view raw
1/* 2 * linux/drivers/s390/crypto/z90main.c 3 * 4 * z90crypt 1.3.3 5 * 6 * Copyright (C) 2001, 2005 IBM Corporation 7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com) 9 * 10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 */ 26 27#include <asm/uaccess.h> // copy_(from|to)_user 28#include <linux/compat.h> 29#include <linux/compiler.h> 30#include <linux/delay.h> // mdelay 31#include <linux/init.h> 32#include <linux/interrupt.h> // for tasklets 33#include <linux/miscdevice.h> 34#include <linux/module.h> 35#include <linux/moduleparam.h> 36#include <linux/proc_fs.h> 37#include <linux/syscalls.h> 38#include "z90crypt.h" 39#include "z90common.h" 40 41/** 42 * Defaults that may be modified. 43 */ 44 45/** 46 * You can specify a different minor at compile time. 47 */ 48#ifndef Z90CRYPT_MINOR 49#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR 50#endif 51 52/** 53 * You can specify a different domain at compile time or on the insmod 54 * command line. 55 */ 56#ifndef DOMAIN_INDEX 57#define DOMAIN_INDEX -1 58#endif 59 60/** 61 * This is the name under which the device is registered in /proc/modules. 62 */ 63#define REG_NAME "z90crypt" 64 65/** 66 * Cleanup should run every CLEANUPTIME seconds and should clean up requests 67 * older than CLEANUPTIME seconds in the past. 68 */ 69#ifndef CLEANUPTIME 70#define CLEANUPTIME 15 71#endif 72 73/** 74 * Config should run every CONFIGTIME seconds 75 */ 76#ifndef CONFIGTIME 77#define CONFIGTIME 30 78#endif 79 80/** 81 * The first execution of the config task should take place 82 * immediately after initialization 83 */ 84#ifndef INITIAL_CONFIGTIME 85#define INITIAL_CONFIGTIME 1 86#endif 87 88/** 89 * Reader should run every READERTIME milliseconds 90 * With the 100Hz patch for s390, z90crypt can lock the system solid while 91 * under heavy load. We'll try to avoid that. 92 */ 93#ifndef READERTIME 94#if HZ > 1000 95#define READERTIME 2 96#else 97#define READERTIME 10 98#endif 99#endif 100 101/** 102 * turn long device array index into device pointer 103 */ 104#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)]) 105 106/** 107 * turn short device array index into long device array index 108 */ 109#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)]) 110 111/** 112 * turn short device array index into device pointer 113 */ 114#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx)) 115 116/** 117 * Status for a work-element 118 */ 119#define STAT_DEFAULT 0x00 // request has not been processed 120 121#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device 122 // else, device is determined each write 123#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed 124 // before being sent to the hardware. 125#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device 126// 0x20 // UNUSED state 127#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now 128#define STAT_NOWORK 0x00 // bits off: no work on any queue 129#define STAT_RDWRMASK 0x30 // mask for bits 5-4 130 131/** 132 * Macros to check the status RDWRMASK 133 */ 134#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK) 135#define SET_RDWRMASK(statbyte, newval) \ 136 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;} 137 138/** 139 * Audit Trail. Progress of a Work element 140 * audit[0]: Unless noted otherwise, these bits are all set by the process 141 */ 142#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element 143#define FP_BUFFREQ 0x40 // Low Level buffer requested 144#define FP_BUFFGOT 0x20 // Low Level buffer obtained 145#define FP_SENT 0x10 // Work element sent to a crypto device 146 // (may be set by process or by reader task) 147#define FP_PENDING 0x08 // Work element placed on pending queue 148 // (may be set by process or by reader task) 149#define FP_REQUEST 0x04 // Work element placed on request queue 150#define FP_ASLEEP 0x02 // Work element about to sleep 151#define FP_AWAKE 0x01 // Work element has been awakened 152 153/** 154 * audit[1]: These bits are set by the reader task and/or the cleanup task 155 */ 156#define FP_NOTPENDING 0x80 // Work element removed from pending queue 157#define FP_AWAKENING 0x40 // Caller about to be awakened 158#define FP_TIMEDOUT 0x20 // Caller timed out 159#define FP_RESPSIZESET 0x10 // Response size copied to work element 160#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element 161#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element 162#define FP_REMREQUEST 0x02 // Work element removed from request queue 163#define FP_SIGNALED 0x01 // Work element was awakened by a signal 164 165/** 166 * audit[2]: unused 167 */ 168 169/** 170 * state of the file handle in private_data.status 171 */ 172#define STAT_OPEN 0 173#define STAT_CLOSED 1 174 175/** 176 * PID() expands to the process ID of the current process 177 */ 178#define PID() (current->pid) 179 180/** 181 * Selected Constants. The number of APs and the number of devices 182 */ 183#ifndef Z90CRYPT_NUM_APS 184#define Z90CRYPT_NUM_APS 64 185#endif 186#ifndef Z90CRYPT_NUM_DEVS 187#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS 188#endif 189 190/** 191 * Buffer size for receiving responses. The maximum Response Size 192 * is actually the maximum request size, since in an error condition 193 * the request itself may be returned unchanged. 194 */ 195#define MAX_RESPONSE_SIZE 0x0000077C 196 197/** 198 * A count and status-byte mask 199 */ 200struct status { 201 int st_count; // # of enabled devices 202 int disabled_count; // # of disabled devices 203 int user_disabled_count; // # of devices disabled via proc fs 204 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask 205}; 206 207/** 208 * The array of device indexes is a mechanism for fast indexing into 209 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are 210 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and 211 * z90CDeviceIndex[2] is 47. 212 */ 213struct device_x { 214 int device_index[Z90CRYPT_NUM_DEVS]; 215}; 216 217/** 218 * All devices are arranged in a single array: 64 APs 219 */ 220struct device { 221 int dev_type; // PCICA, PCICC, PCIXCC_MCL2, 222 // PCIXCC_MCL3, CEX2C, CEX2A 223 enum devstat dev_stat; // current device status 224 int dev_self_x; // Index in array 225 int disabled; // Set when device is in error 226 int user_disabled; // Set when device is disabled by user 227 int dev_q_depth; // q depth 228 unsigned char * dev_resp_p; // Response buffer address 229 int dev_resp_l; // Response Buffer length 230 int dev_caller_count; // Number of callers 231 int dev_total_req_cnt; // # requests for device since load 232 struct list_head dev_caller_list; // List of callers 233}; 234 235/** 236 * There's a struct status and a struct device_x for each device type. 237 */ 238struct hdware_block { 239 struct status hdware_mask; 240 struct status type_mask[Z90CRYPT_NUM_TYPES]; 241 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES]; 242 unsigned char device_type_array[Z90CRYPT_NUM_APS]; 243}; 244 245/** 246 * z90crypt is the topmost data structure in the hierarchy. 247 */ 248struct z90crypt { 249 int max_count; // Nr of possible crypto devices 250 struct status mask; 251 int q_depth_array[Z90CRYPT_NUM_DEVS]; 252 int dev_type_array[Z90CRYPT_NUM_DEVS]; 253 struct device_x overall_device_x; // array device indexes 254 struct device * device_p[Z90CRYPT_NUM_DEVS]; 255 int terminating; 256 int domain_established;// TRUE: domain has been found 257 int cdx; // Crypto Domain Index 258 int len; // Length of this data structure 259 struct hdware_block *hdware_info; 260}; 261 262/** 263 * An array of these structures is pointed to from dev_caller 264 * The length of the array depends on the device type. For APs, 265 * there are 8. 266 * 267 * The caller buffer is allocated to the user at OPEN. At WRITE, 268 * it contains the request; at READ, the response. The function 269 * send_to_crypto_device converts the request to device-dependent 270 * form and use the caller's OPEN-allocated buffer for the response. 271 * 272 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p 273 * because that points to it, see the discussion in z90hardware.c. 274 * Search for "extended request message block". 275 */ 276struct caller { 277 int caller_buf_l; // length of original request 278 unsigned char * caller_buf_p; // Original request on WRITE 279 int caller_dev_dep_req_l; // len device dependent request 280 unsigned char * caller_dev_dep_req_p; // Device dependent form 281 unsigned char caller_id[8]; // caller-supplied message id 282 struct list_head caller_liste; 283 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE]; 284}; 285 286/** 287 * Function prototypes from z90hardware.c 288 */ 289enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth, 290 int *dev_type); 291enum devstat reset_device(int deviceNr, int cdx, int resetNr); 292enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext); 293enum devstat receive_from_AP(int dev_nr, int cdx, int resplen, 294 unsigned char *resp, unsigned char *psmid); 295int convert_request(unsigned char *buffer, int func, unsigned short function, 296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p); 297int convert_response(unsigned char *response, unsigned char *buffer, 298 int *respbufflen_p, unsigned char *resp_buff); 299 300/** 301 * Low level function prototypes 302 */ 303static int create_z90crypt(int *cdx_p); 304static int refresh_z90crypt(int *cdx_p); 305static int find_crypto_devices(struct status *deviceMask); 306static int create_crypto_device(int index); 307static int destroy_crypto_device(int index); 308static void destroy_z90crypt(void); 309static int refresh_index_array(struct status *status_str, 310 struct device_x *index_array); 311static int probe_device_type(struct device *devPtr); 312static int probe_PCIXCC_type(struct device *devPtr); 313 314/** 315 * proc fs definitions 316 */ 317static struct proc_dir_entry *z90crypt_entry; 318 319/** 320 * data structures 321 */ 322 323/** 324 * work_element.opener points back to this structure 325 */ 326struct priv_data { 327 pid_t opener_pid; 328 unsigned char status; // 0: open 1: closed 329}; 330 331/** 332 * A work element is allocated for each request 333 */ 334struct work_element { 335 struct priv_data *priv_data; 336 pid_t pid; 337 int devindex; // index of device processing this w_e 338 // (If request did not specify device, 339 // -1 until placed onto a queue) 340 int devtype; 341 struct list_head liste; // used for requestq and pendingq 342 char buffer[128]; // local copy of user request 343 int buff_size; // size of the buffer for the request 344 char resp_buff[RESPBUFFSIZE]; 345 int resp_buff_size; 346 char __user * resp_addr; // address of response in user space 347 unsigned int funccode; // function code of request 348 wait_queue_head_t waitq; 349 unsigned long requestsent; // time at which the request was sent 350 atomic_t alarmrung; // wake-up signal 351 unsigned char caller_id[8]; // pid + counter, for this w_e 352 unsigned char status[1]; // bits to mark status of the request 353 unsigned char audit[3]; // record of work element's progress 354 unsigned char * requestptr; // address of request buffer 355 int retcode; // return code of request 356}; 357 358/** 359 * High level function prototypes 360 */ 361static int z90crypt_open(struct inode *, struct file *); 362static int z90crypt_release(struct inode *, struct file *); 363static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *); 364static ssize_t z90crypt_write(struct file *, const char __user *, 365 size_t, loff_t *); 366static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long); 367static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long); 368 369static void z90crypt_reader_task(unsigned long); 370static void z90crypt_schedule_reader_task(unsigned long); 371static void z90crypt_config_task(unsigned long); 372static void z90crypt_cleanup_task(unsigned long); 373 374static int z90crypt_status(char *, char **, off_t, int, int *, void *); 375static int z90crypt_status_write(struct file *, const char __user *, 376 unsigned long, void *); 377 378/** 379 * Storage allocated at initialization and used throughout the life of 380 * this insmod 381 */ 382static int domain = DOMAIN_INDEX; 383static struct z90crypt z90crypt; 384static int quiesce_z90crypt; 385static spinlock_t queuespinlock; 386static struct list_head request_list; 387static int requestq_count; 388static struct list_head pending_list; 389static int pendingq_count; 390 391static struct tasklet_struct reader_tasklet; 392static struct timer_list reader_timer; 393static struct timer_list config_timer; 394static struct timer_list cleanup_timer; 395static atomic_t total_open; 396static atomic_t z90crypt_step; 397 398static struct file_operations z90crypt_fops = { 399 .owner = THIS_MODULE, 400 .read = z90crypt_read, 401 .write = z90crypt_write, 402 .unlocked_ioctl = z90crypt_unlocked_ioctl, 403#ifdef CONFIG_COMPAT 404 .compat_ioctl = z90crypt_compat_ioctl, 405#endif 406 .open = z90crypt_open, 407 .release = z90crypt_release 408}; 409 410static struct miscdevice z90crypt_misc_device = { 411 .minor = Z90CRYPT_MINOR, 412 .name = DEV_NAME, 413 .fops = &z90crypt_fops, 414 .devfs_name = DEV_NAME 415}; 416 417/** 418 * Documentation values. 419 */ 420MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman" 421 "and Jochen Roehrig"); 422MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, " 423 "Copyright 2001, 2005 IBM Corporation"); 424MODULE_LICENSE("GPL"); 425module_param(domain, int, 0); 426MODULE_PARM_DESC(domain, "domain index for device"); 427 428#ifdef CONFIG_COMPAT 429/** 430 * ioctl32 conversion routines 431 */ 432struct ica_rsa_modexpo_32 { // For 32-bit callers 433 compat_uptr_t inputdata; 434 unsigned int inputdatalength; 435 compat_uptr_t outputdata; 436 unsigned int outputdatalength; 437 compat_uptr_t b_key; 438 compat_uptr_t n_modulus; 439}; 440 441static long 442trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg) 443{ 444 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg); 445 struct ica_rsa_modexpo_32 mex32k; 446 struct ica_rsa_modexpo __user *mex64; 447 long ret = 0; 448 unsigned int i; 449 450 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32))) 451 return -EFAULT; 452 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo)); 453 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo))) 454 return -EFAULT; 455 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32))) 456 return -EFAULT; 457 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) || 458 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) || 459 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) || 460 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) || 461 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) || 462 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus)) 463 return -EFAULT; 464 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64); 465 if (!ret) 466 if (__get_user(i, &mex64->outputdatalength) || 467 __put_user(i, &mex32u->outputdatalength)) 468 ret = -EFAULT; 469 return ret; 470} 471 472struct ica_rsa_modexpo_crt_32 { // For 32-bit callers 473 compat_uptr_t inputdata; 474 unsigned int inputdatalength; 475 compat_uptr_t outputdata; 476 unsigned int outputdatalength; 477 compat_uptr_t bp_key; 478 compat_uptr_t bq_key; 479 compat_uptr_t np_prime; 480 compat_uptr_t nq_prime; 481 compat_uptr_t u_mult_inv; 482}; 483 484static long 485trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg) 486{ 487 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg); 488 struct ica_rsa_modexpo_crt_32 crt32k; 489 struct ica_rsa_modexpo_crt __user *crt64; 490 long ret = 0; 491 unsigned int i; 492 493 if (!access_ok(VERIFY_WRITE, crt32u, 494 sizeof(struct ica_rsa_modexpo_crt_32))) 495 return -EFAULT; 496 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt)); 497 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt))) 498 return -EFAULT; 499 if (copy_from_user(&crt32k, crt32u, 500 sizeof(struct ica_rsa_modexpo_crt_32))) 501 return -EFAULT; 502 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) || 503 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) || 504 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) || 505 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) || 506 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) || 507 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) || 508 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) || 509 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) || 510 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv)) 511 return -EFAULT; 512 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64); 513 if (!ret) 514 if (__get_user(i, &crt64->outputdatalength) || 515 __put_user(i, &crt32u->outputdatalength)) 516 ret = -EFAULT; 517 return ret; 518} 519 520static long 521z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 522{ 523 switch (cmd) { 524 case ICAZ90STATUS: 525 case Z90QUIESCE: 526 case Z90STAT_TOTALCOUNT: 527 case Z90STAT_PCICACOUNT: 528 case Z90STAT_PCICCCOUNT: 529 case Z90STAT_PCIXCCCOUNT: 530 case Z90STAT_PCIXCCMCL2COUNT: 531 case Z90STAT_PCIXCCMCL3COUNT: 532 case Z90STAT_CEX2CCOUNT: 533 case Z90STAT_REQUESTQ_COUNT: 534 case Z90STAT_PENDINGQ_COUNT: 535 case Z90STAT_TOTALOPEN_COUNT: 536 case Z90STAT_DOMAIN_INDEX: 537 case Z90STAT_STATUS_MASK: 538 case Z90STAT_QDEPTH_MASK: 539 case Z90STAT_PERDEV_REQCNT: 540 return z90crypt_unlocked_ioctl(filp, cmd, arg); 541 case ICARSAMODEXPO: 542 return trans_modexpo32(filp, cmd, arg); 543 case ICARSACRT: 544 return trans_modexpo_crt32(filp, cmd, arg); 545 default: 546 return -ENOIOCTLCMD; 547 } 548} 549#endif 550 551/** 552 * The module initialization code. 553 */ 554static int __init 555z90crypt_init_module(void) 556{ 557 int result, nresult; 558 struct proc_dir_entry *entry; 559 560 PDEBUG("PID %d\n", PID()); 561 562 if ((domain < -1) || (domain > 15)) { 563 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain); 564 return -EINVAL; 565 } 566 567 /* Register as misc device with given minor (or get a dynamic one). */ 568 result = misc_register(&z90crypt_misc_device); 569 if (result < 0) { 570 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n", 571 z90crypt_misc_device.minor, result); 572 return result; 573 } 574 575 PDEBUG("Registered " DEV_NAME " with result %d\n", result); 576 577 result = create_z90crypt(&domain); 578 if (result != 0) { 579 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n", 580 domain, result); 581 result = -ENOMEM; 582 goto init_module_cleanup; 583 } 584 585 if (result == 0) { 586 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n", 587 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT, 588 __DATE__, __TIME__); 589 PDEBUG("create_z90crypt (domain index %d) successful.\n", 590 domain); 591 } else 592 PRINTK("No devices at startup\n"); 593 594 /* Initialize globals. */ 595 spin_lock_init(&queuespinlock); 596 597 INIT_LIST_HEAD(&pending_list); 598 pendingq_count = 0; 599 600 INIT_LIST_HEAD(&request_list); 601 requestq_count = 0; 602 603 quiesce_z90crypt = 0; 604 605 atomic_set(&total_open, 0); 606 atomic_set(&z90crypt_step, 0); 607 608 /* Set up the cleanup task. */ 609 init_timer(&cleanup_timer); 610 cleanup_timer.function = z90crypt_cleanup_task; 611 cleanup_timer.data = 0; 612 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ); 613 add_timer(&cleanup_timer); 614 615 /* Set up the proc file system */ 616 entry = create_proc_entry("driver/z90crypt", 0644, 0); 617 if (entry) { 618 entry->nlink = 1; 619 entry->data = 0; 620 entry->read_proc = z90crypt_status; 621 entry->write_proc = z90crypt_status_write; 622 } 623 else 624 PRINTK("Couldn't create z90crypt proc entry\n"); 625 z90crypt_entry = entry; 626 627 /* Set up the configuration task. */ 628 init_timer(&config_timer); 629 config_timer.function = z90crypt_config_task; 630 config_timer.data = 0; 631 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ); 632 add_timer(&config_timer); 633 634 /* Set up the reader task */ 635 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0); 636 init_timer(&reader_timer); 637 reader_timer.function = z90crypt_schedule_reader_task; 638 reader_timer.data = 0; 639 reader_timer.expires = jiffies + (READERTIME * HZ / 1000); 640 add_timer(&reader_timer); 641 642 return 0; // success 643 644init_module_cleanup: 645 if ((nresult = misc_deregister(&z90crypt_misc_device))) 646 PRINTK("misc_deregister failed with %d.\n", nresult); 647 else 648 PDEBUG("misc_deregister successful.\n"); 649 650 return result; // failure 651} 652 653/** 654 * The module termination code 655 */ 656static void __exit 657z90crypt_cleanup_module(void) 658{ 659 int nresult; 660 661 PDEBUG("PID %d\n", PID()); 662 663 remove_proc_entry("driver/z90crypt", 0); 664 665 if ((nresult = misc_deregister(&z90crypt_misc_device))) 666 PRINTK("misc_deregister failed with %d.\n", nresult); 667 else 668 PDEBUG("misc_deregister successful.\n"); 669 670 /* Remove the tasks */ 671 tasklet_kill(&reader_tasklet); 672 del_timer(&reader_timer); 673 del_timer(&config_timer); 674 del_timer(&cleanup_timer); 675 676 destroy_z90crypt(); 677 678 PRINTKN("Unloaded.\n"); 679} 680 681/** 682 * Functions running under a process id 683 * 684 * The I/O functions: 685 * z90crypt_open 686 * z90crypt_release 687 * z90crypt_read 688 * z90crypt_write 689 * z90crypt_unlocked_ioctl 690 * z90crypt_status 691 * z90crypt_status_write 692 * disable_card 693 * enable_card 694 * 695 * Helper functions: 696 * z90crypt_rsa 697 * z90crypt_prepare 698 * z90crypt_send 699 * z90crypt_process_results 700 * 701 */ 702static int 703z90crypt_open(struct inode *inode, struct file *filp) 704{ 705 struct priv_data *private_data_p; 706 707 if (quiesce_z90crypt) 708 return -EQUIESCE; 709 710 private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL); 711 if (!private_data_p) { 712 PRINTK("Memory allocate failed\n"); 713 return -ENOMEM; 714 } 715 716 private_data_p->status = STAT_OPEN; 717 private_data_p->opener_pid = PID(); 718 filp->private_data = private_data_p; 719 atomic_inc(&total_open); 720 721 return 0; 722} 723 724static int 725z90crypt_release(struct inode *inode, struct file *filp) 726{ 727 struct priv_data *private_data_p = filp->private_data; 728 729 PDEBUG("PID %d (filp %p)\n", PID(), filp); 730 731 private_data_p->status = STAT_CLOSED; 732 memset(private_data_p, 0, sizeof(struct priv_data)); 733 kfree(private_data_p); 734 atomic_dec(&total_open); 735 736 return 0; 737} 738 739/* 740 * there are two read functions, of which compile options will choose one 741 * without USE_GET_RANDOM_BYTES 742 * => read() always returns -EPERM; 743 * otherwise 744 * => read() uses get_random_bytes() kernel function 745 */ 746#ifndef USE_GET_RANDOM_BYTES 747/** 748 * z90crypt_read will not be supported beyond z90crypt 1.3.1 749 */ 750static ssize_t 751z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) 752{ 753 PDEBUG("filp %p (PID %d)\n", filp, PID()); 754 return -EPERM; 755} 756#else // we want to use get_random_bytes 757/** 758 * read() just returns a string of random bytes. Since we have no way 759 * to generate these cryptographically, we just execute get_random_bytes 760 * for the length specified. 761 */ 762#include <linux/random.h> 763static ssize_t 764z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) 765{ 766 unsigned char *temp_buff; 767 768 PDEBUG("filp %p (PID %d)\n", filp, PID()); 769 770 if (quiesce_z90crypt) 771 return -EQUIESCE; 772 if (count < 0) { 773 PRINTK("Requested random byte count negative: %ld\n", count); 774 return -EINVAL; 775 } 776 if (count > RESPBUFFSIZE) { 777 PDEBUG("count[%d] > RESPBUFFSIZE", count); 778 return -EINVAL; 779 } 780 if (count == 0) 781 return 0; 782 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL); 783 if (!temp_buff) { 784 PRINTK("Memory allocate failed\n"); 785 return -ENOMEM; 786 } 787 get_random_bytes(temp_buff, count); 788 789 if (copy_to_user(buf, temp_buff, count) != 0) { 790 kfree(temp_buff); 791 return -EFAULT; 792 } 793 kfree(temp_buff); 794 return count; 795} 796#endif 797 798/** 799 * Write is is not allowed 800 */ 801static ssize_t 802z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) 803{ 804 PDEBUG("filp %p (PID %d)\n", filp, PID()); 805 return -EPERM; 806} 807 808/** 809 * New status functions 810 */ 811static inline int 812get_status_totalcount(void) 813{ 814 return z90crypt.hdware_info->hdware_mask.st_count; 815} 816 817static inline int 818get_status_PCICAcount(void) 819{ 820 return z90crypt.hdware_info->type_mask[PCICA].st_count; 821} 822 823static inline int 824get_status_PCICCcount(void) 825{ 826 return z90crypt.hdware_info->type_mask[PCICC].st_count; 827} 828 829static inline int 830get_status_PCIXCCcount(void) 831{ 832 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count + 833 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count; 834} 835 836static inline int 837get_status_PCIXCCMCL2count(void) 838{ 839 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count; 840} 841 842static inline int 843get_status_PCIXCCMCL3count(void) 844{ 845 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count; 846} 847 848static inline int 849get_status_CEX2Ccount(void) 850{ 851 return z90crypt.hdware_info->type_mask[CEX2C].st_count; 852} 853 854static inline int 855get_status_CEX2Acount(void) 856{ 857 return z90crypt.hdware_info->type_mask[CEX2A].st_count; 858} 859 860static inline int 861get_status_requestq_count(void) 862{ 863 return requestq_count; 864} 865 866static inline int 867get_status_pendingq_count(void) 868{ 869 return pendingq_count; 870} 871 872static inline int 873get_status_totalopen_count(void) 874{ 875 return atomic_read(&total_open); 876} 877 878static inline int 879get_status_domain_index(void) 880{ 881 return z90crypt.cdx; 882} 883 884static inline unsigned char * 885get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS]) 886{ 887 int i, ix; 888 889 memcpy(status, z90crypt.hdware_info->device_type_array, 890 Z90CRYPT_NUM_APS); 891 892 for (i = 0; i < get_status_totalcount(); i++) { 893 ix = SHRT2LONG(i); 894 if (LONG2DEVPTR(ix)->user_disabled) 895 status[ix] = 0x0d; 896 } 897 898 return status; 899} 900 901static inline unsigned char * 902get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS]) 903{ 904 int i, ix; 905 906 memset(qdepth, 0, Z90CRYPT_NUM_APS); 907 908 for (i = 0; i < get_status_totalcount(); i++) { 909 ix = SHRT2LONG(i); 910 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count; 911 } 912 913 return qdepth; 914} 915 916static inline unsigned int * 917get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS]) 918{ 919 int i, ix; 920 921 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int)); 922 923 for (i = 0; i < get_status_totalcount(); i++) { 924 ix = SHRT2LONG(i); 925 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt; 926 } 927 928 return reqcnt; 929} 930 931static inline void 932init_work_element(struct work_element *we_p, 933 struct priv_data *priv_data, pid_t pid) 934{ 935 int step; 936 937 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element); 938 /* Come up with a unique id for this caller. */ 939 step = atomic_inc_return(&z90crypt_step); 940 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid)); 941 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step)); 942 we_p->pid = pid; 943 we_p->priv_data = priv_data; 944 we_p->status[0] = STAT_DEFAULT; 945 we_p->audit[0] = 0x00; 946 we_p->audit[1] = 0x00; 947 we_p->audit[2] = 0x00; 948 we_p->resp_buff_size = 0; 949 we_p->retcode = 0; 950 we_p->devindex = -1; 951 we_p->devtype = -1; 952 atomic_set(&we_p->alarmrung, 0); 953 init_waitqueue_head(&we_p->waitq); 954 INIT_LIST_HEAD(&(we_p->liste)); 955} 956 957static inline int 958allocate_work_element(struct work_element **we_pp, 959 struct priv_data *priv_data_p, pid_t pid) 960{ 961 struct work_element *we_p; 962 963 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL); 964 if (!we_p) 965 return -ENOMEM; 966 init_work_element(we_p, priv_data_p, pid); 967 *we_pp = we_p; 968 return 0; 969} 970 971static inline void 972remove_device(struct device *device_p) 973{ 974 if (!device_p || (device_p->disabled != 0)) 975 return; 976 device_p->disabled = 1; 977 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++; 978 z90crypt.hdware_info->hdware_mask.disabled_count++; 979} 980 981/** 982 * Bitlength limits for each card 983 * 984 * There are new MCLs which allow more bitlengths. See the table for details. 985 * The MCL must be applied and the newer bitlengths enabled for these to work. 986 * 987 * Card Type Old limit New limit 988 * PCICA ??-2048 same (the lower limit is less than 128 bit...) 989 * PCICC 512-1024 512-2048 990 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card) 991 * PCIXCC_MCL3 ----- 128-2048 992 * CEX2C 512-2048 128-2048 993 * CEX2A ??-2048 same (the lower limit is less than 128 bit...) 994 * 995 * ext_bitlens (extended bitlengths) is a global, since you should not apply an 996 * MCL to just one card in a machine. We assume, at first, that all cards have 997 * these capabilities. 998 */ 999int ext_bitlens = 1; // This is global 1000#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits 1001#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits 1002#define PCICC_MIN_MOD_SIZE 64 // 512 bits 1003#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits 1004#define MAX_MOD_SIZE 256 // 2048 bits 1005 1006static inline int 1007select_device_type(int *dev_type_p, int bytelength) 1008{ 1009 static int count = 0; 1010 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail, 1011 index_to_use; 1012 struct status *stat; 1013 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) && 1014 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) && 1015 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) && 1016 (*dev_type_p != ANYDEV)) 1017 return -1; 1018 if (*dev_type_p != ANYDEV) { 1019 stat = &z90crypt.hdware_info->type_mask[*dev_type_p]; 1020 if (stat->st_count > 1021 (stat->disabled_count + stat->user_disabled_count)) 1022 return 0; 1023 return -1; 1024 } 1025 1026 /** 1027 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in 1028 * speed. 1029 * 1030 * PCICA and CEX2A do NOT co-exist, so it would be either one or the 1031 * other present. 1032 */ 1033 stat = &z90crypt.hdware_info->type_mask[PCICA]; 1034 PCICA_avail = stat->st_count - 1035 (stat->disabled_count + stat->user_disabled_count); 1036 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3]; 1037 PCIXCC_MCL3_avail = stat->st_count - 1038 (stat->disabled_count + stat->user_disabled_count); 1039 stat = &z90crypt.hdware_info->type_mask[CEX2C]; 1040 CEX2C_avail = stat->st_count - 1041 (stat->disabled_count + stat->user_disabled_count); 1042 stat = &z90crypt.hdware_info->type_mask[CEX2A]; 1043 CEX2A_avail = stat->st_count - 1044 (stat->disabled_count + stat->user_disabled_count); 1045 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) { 1046 /** 1047 * bitlength is a factor, PCICA or CEX2A are the most capable, 1048 * even with the new MCL for PCIXCC. 1049 */ 1050 if ((bytelength < PCIXCC_MIN_MOD_SIZE) || 1051 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) { 1052 if (PCICA_avail) { 1053 *dev_type_p = PCICA; 1054 return 0; 1055 } 1056 if (CEX2A_avail) { 1057 *dev_type_p = CEX2A; 1058 return 0; 1059 } 1060 return -1; 1061 } 1062 1063 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail + 1064 CEX2C_avail + CEX2A_avail); 1065 if (index_to_use < PCICA_avail) 1066 *dev_type_p = PCICA; 1067 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail)) 1068 *dev_type_p = PCIXCC_MCL3; 1069 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail + 1070 CEX2C_avail)) 1071 *dev_type_p = CEX2C; 1072 else 1073 *dev_type_p = CEX2A; 1074 count++; 1075 return 0; 1076 } 1077 1078 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */ 1079 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE) 1080 return -1; 1081 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2]; 1082 if (stat->st_count > 1083 (stat->disabled_count + stat->user_disabled_count)) { 1084 *dev_type_p = PCIXCC_MCL2; 1085 return 0; 1086 } 1087 1088 /** 1089 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE 1090 * (if we don't have the MCL applied and the newer bitlengths enabled) 1091 * cannot go to a PCICC 1092 */ 1093 if ((bytelength < PCICC_MIN_MOD_SIZE) || 1094 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) { 1095 return -1; 1096 } 1097 stat = &z90crypt.hdware_info->type_mask[PCICC]; 1098 if (stat->st_count > 1099 (stat->disabled_count + stat->user_disabled_count)) { 1100 *dev_type_p = PCICC; 1101 return 0; 1102 } 1103 1104 return -1; 1105} 1106 1107/** 1108 * Try the selected number, then the selected type (can be ANYDEV) 1109 */ 1110static inline int 1111select_device(int *dev_type_p, int *device_nr_p, int bytelength) 1112{ 1113 int i, indx, devTp, low_count, low_indx; 1114 struct device_x *index_p; 1115 struct device *dev_ptr; 1116 1117 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p); 1118 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) { 1119 PDEBUG("trying index = %d\n", *device_nr_p); 1120 dev_ptr = z90crypt.device_p[*device_nr_p]; 1121 1122 if (dev_ptr && 1123 (dev_ptr->dev_stat != DEV_GONE) && 1124 (dev_ptr->disabled == 0) && 1125 (dev_ptr->user_disabled == 0)) { 1126 PDEBUG("selected by number, index = %d\n", 1127 *device_nr_p); 1128 *dev_type_p = dev_ptr->dev_type; 1129 return *device_nr_p; 1130 } 1131 } 1132 *device_nr_p = -1; 1133 PDEBUG("trying type = %d\n", *dev_type_p); 1134 devTp = *dev_type_p; 1135 if (select_device_type(&devTp, bytelength) == -1) { 1136 PDEBUG("failed to select by type\n"); 1137 return -1; 1138 } 1139 PDEBUG("selected type = %d\n", devTp); 1140 index_p = &z90crypt.hdware_info->type_x_addr[devTp]; 1141 low_count = 0x0000FFFF; 1142 low_indx = -1; 1143 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) { 1144 indx = index_p->device_index[i]; 1145 dev_ptr = z90crypt.device_p[indx]; 1146 if (dev_ptr && 1147 (dev_ptr->dev_stat != DEV_GONE) && 1148 (dev_ptr->disabled == 0) && 1149 (dev_ptr->user_disabled == 0) && 1150 (devTp == dev_ptr->dev_type) && 1151 (low_count > dev_ptr->dev_caller_count)) { 1152 low_count = dev_ptr->dev_caller_count; 1153 low_indx = indx; 1154 } 1155 } 1156 *device_nr_p = low_indx; 1157 return low_indx; 1158} 1159 1160static inline int 1161send_to_crypto_device(struct work_element *we_p) 1162{ 1163 struct caller *caller_p; 1164 struct device *device_p; 1165 int dev_nr; 1166 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength; 1167 1168 if (!we_p->requestptr) 1169 return SEN_FATAL_ERROR; 1170 caller_p = (struct caller *)we_p->requestptr; 1171 dev_nr = we_p->devindex; 1172 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) { 1173 if (z90crypt.hdware_info->hdware_mask.st_count != 0) 1174 return SEN_RETRY; 1175 else 1176 return SEN_NOT_AVAIL; 1177 } 1178 we_p->devindex = dev_nr; 1179 device_p = z90crypt.device_p[dev_nr]; 1180 if (!device_p) 1181 return SEN_NOT_AVAIL; 1182 if (device_p->dev_type != we_p->devtype) 1183 return SEN_RETRY; 1184 if (device_p->dev_caller_count >= device_p->dev_q_depth) 1185 return SEN_QUEUE_FULL; 1186 PDEBUG("device number prior to send: %d\n", dev_nr); 1187 switch (send_to_AP(dev_nr, z90crypt.cdx, 1188 caller_p->caller_dev_dep_req_l, 1189 caller_p->caller_dev_dep_req_p)) { 1190 case DEV_SEN_EXCEPTION: 1191 PRINTKC("Exception during send to device %d\n", dev_nr); 1192 z90crypt.terminating = 1; 1193 return SEN_FATAL_ERROR; 1194 case DEV_GONE: 1195 PRINTK("Device %d not available\n", dev_nr); 1196 remove_device(device_p); 1197 return SEN_NOT_AVAIL; 1198 case DEV_EMPTY: 1199 return SEN_NOT_AVAIL; 1200 case DEV_NO_WORK: 1201 return SEN_FATAL_ERROR; 1202 case DEV_BAD_MESSAGE: 1203 return SEN_USER_ERROR; 1204 case DEV_QUEUE_FULL: 1205 return SEN_QUEUE_FULL; 1206 default: 1207 case DEV_ONLINE: 1208 break; 1209 } 1210 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list)); 1211 device_p->dev_caller_count++; 1212 return 0; 1213} 1214 1215/** 1216 * Send puts the user's work on one of two queues: 1217 * the pending queue if the send was successful 1218 * the request queue if the send failed because device full or busy 1219 */ 1220static inline int 1221z90crypt_send(struct work_element *we_p, const char *buf) 1222{ 1223 int rv; 1224 1225 PDEBUG("PID %d\n", PID()); 1226 1227 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) { 1228 PDEBUG("PID %d tried to send more work but has outstanding " 1229 "work.\n", PID()); 1230 return -EWORKPEND; 1231 } 1232 we_p->devindex = -1; // Reset device number 1233 spin_lock_irq(&queuespinlock); 1234 rv = send_to_crypto_device(we_p); 1235 switch (rv) { 1236 case 0: 1237 we_p->requestsent = jiffies; 1238 we_p->audit[0] |= FP_SENT; 1239 list_add_tail(&we_p->liste, &pending_list); 1240 ++pendingq_count; 1241 we_p->audit[0] |= FP_PENDING; 1242 break; 1243 case SEN_BUSY: 1244 case SEN_QUEUE_FULL: 1245 rv = 0; 1246 we_p->devindex = -1; // any device will do 1247 we_p->requestsent = jiffies; 1248 list_add_tail(&we_p->liste, &request_list); 1249 ++requestq_count; 1250 we_p->audit[0] |= FP_REQUEST; 1251 break; 1252 case SEN_RETRY: 1253 rv = -ERESTARTSYS; 1254 break; 1255 case SEN_NOT_AVAIL: 1256 PRINTK("*** No devices available.\n"); 1257 rv = we_p->retcode = -ENODEV; 1258 we_p->status[0] |= STAT_FAILED; 1259 break; 1260 case REC_OPERAND_INV: 1261 case REC_OPERAND_SIZE: 1262 case REC_EVEN_MOD: 1263 case REC_INVALID_PAD: 1264 rv = we_p->retcode = -EINVAL; 1265 we_p->status[0] |= STAT_FAILED; 1266 break; 1267 default: 1268 we_p->retcode = rv; 1269 we_p->status[0] |= STAT_FAILED; 1270 break; 1271 } 1272 if (rv != -ERESTARTSYS) 1273 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN); 1274 spin_unlock_irq(&queuespinlock); 1275 if (rv == 0) 1276 tasklet_schedule(&reader_tasklet); 1277 return rv; 1278} 1279 1280/** 1281 * process_results copies the user's work from kernel space. 1282 */ 1283static inline int 1284z90crypt_process_results(struct work_element *we_p, char __user *buf) 1285{ 1286 int rv; 1287 1288 PDEBUG("we_p %p (PID %d)\n", we_p, PID()); 1289 1290 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++; 1291 SET_RDWRMASK(we_p->status[0], STAT_READPEND); 1292 1293 rv = 0; 1294 if (!we_p->buffer) { 1295 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n", 1296 we_p, PID()); 1297 rv = -ENOBUFF; 1298 } 1299 1300 if (!rv) 1301 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) { 1302 PDEBUG("copy_to_user failed: rv = %d\n", rv); 1303 rv = -EFAULT; 1304 } 1305 1306 if (!rv) 1307 rv = we_p->retcode; 1308 if (!rv) 1309 if (we_p->resp_buff_size 1310 && copy_to_user(we_p->resp_addr, we_p->resp_buff, 1311 we_p->resp_buff_size)) 1312 rv = -EFAULT; 1313 1314 SET_RDWRMASK(we_p->status[0], STAT_NOWORK); 1315 return rv; 1316} 1317 1318static unsigned char NULL_psmid[8] = 1319{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1320 1321/** 1322 * Used in device configuration functions 1323 */ 1324#define MAX_RESET 90 1325 1326/** 1327 * This is used only for PCICC support 1328 */ 1329static inline int 1330is_PKCS11_padded(unsigned char *buffer, int length) 1331{ 1332 int i; 1333 if ((buffer[0] != 0x00) || (buffer[1] != 0x01)) 1334 return 0; 1335 for (i = 2; i < length; i++) 1336 if (buffer[i] != 0xFF) 1337 break; 1338 if ((i < 10) || (i == length)) 1339 return 0; 1340 if (buffer[i] != 0x00) 1341 return 0; 1342 return 1; 1343} 1344 1345/** 1346 * This is used only for PCICC support 1347 */ 1348static inline int 1349is_PKCS12_padded(unsigned char *buffer, int length) 1350{ 1351 int i; 1352 if ((buffer[0] != 0x00) || (buffer[1] != 0x02)) 1353 return 0; 1354 for (i = 2; i < length; i++) 1355 if (buffer[i] == 0x00) 1356 break; 1357 if ((i < 10) || (i == length)) 1358 return 0; 1359 if (buffer[i] != 0x00) 1360 return 0; 1361 return 1; 1362} 1363 1364/** 1365 * builds struct caller and converts message from generic format to 1366 * device-dependent format 1367 * func is ICARSAMODEXPO or ICARSACRT 1368 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT 1369 */ 1370static inline int 1371build_caller(struct work_element *we_p, short function) 1372{ 1373 int rv; 1374 struct caller *caller_p = (struct caller *)we_p->requestptr; 1375 1376 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) && 1377 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && 1378 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A)) 1379 return SEN_NOT_AVAIL; 1380 1381 memcpy(caller_p->caller_id, we_p->caller_id, 1382 sizeof(caller_p->caller_id)); 1383 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req; 1384 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE; 1385 caller_p->caller_buf_p = we_p->buffer; 1386 INIT_LIST_HEAD(&(caller_p->caller_liste)); 1387 1388 rv = convert_request(we_p->buffer, we_p->funccode, function, 1389 z90crypt.cdx, we_p->devtype, 1390 &caller_p->caller_dev_dep_req_l, 1391 caller_p->caller_dev_dep_req_p); 1392 if (rv) { 1393 if (rv == SEN_NOT_AVAIL) 1394 PDEBUG("request can't be processed on hdwr avail\n"); 1395 else 1396 PRINTK("Error from convert_request: %d\n", rv); 1397 } 1398 else 1399 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8); 1400 return rv; 1401} 1402 1403static inline void 1404unbuild_caller(struct device *device_p, struct caller *caller_p) 1405{ 1406 if (!caller_p) 1407 return; 1408 if (caller_p->caller_liste.next && caller_p->caller_liste.prev) 1409 if (!list_empty(&caller_p->caller_liste)) { 1410 list_del_init(&caller_p->caller_liste); 1411 device_p->dev_caller_count--; 1412 } 1413 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id)); 1414} 1415 1416static inline int 1417get_crypto_request_buffer(struct work_element *we_p) 1418{ 1419 struct ica_rsa_modexpo *mex_p; 1420 struct ica_rsa_modexpo_crt *crt_p; 1421 unsigned char *temp_buffer; 1422 short function; 1423 int rv; 1424 1425 mex_p = (struct ica_rsa_modexpo *) we_p->buffer; 1426 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer; 1427 1428 PDEBUG("device type input = %d\n", we_p->devtype); 1429 1430 if (z90crypt.terminating) 1431 return REC_NO_RESPONSE; 1432 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) { 1433 PRINTK("psmid zeroes\n"); 1434 return SEN_FATAL_ERROR; 1435 } 1436 if (!we_p->buffer) { 1437 PRINTK("buffer pointer NULL\n"); 1438 return SEN_USER_ERROR; 1439 } 1440 if (!we_p->requestptr) { 1441 PRINTK("caller pointer NULL\n"); 1442 return SEN_USER_ERROR; 1443 } 1444 1445 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) && 1446 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && 1447 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) && 1448 (we_p->devtype != ANYDEV)) { 1449 PRINTK("invalid device type\n"); 1450 return SEN_USER_ERROR; 1451 } 1452 1453 if ((mex_p->inputdatalength < 1) || 1454 (mex_p->inputdatalength > MAX_MOD_SIZE)) { 1455 PRINTK("inputdatalength[%d] is not valid\n", 1456 mex_p->inputdatalength); 1457 return SEN_USER_ERROR; 1458 } 1459 1460 if (mex_p->outputdatalength < mex_p->inputdatalength) { 1461 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n", 1462 mex_p->outputdatalength, mex_p->inputdatalength); 1463 return SEN_USER_ERROR; 1464 } 1465 1466 if (!mex_p->inputdata || !mex_p->outputdata) { 1467 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n", 1468 mex_p->outputdata, mex_p->inputdata); 1469 return SEN_USER_ERROR; 1470 } 1471 1472 /** 1473 * As long as outputdatalength is big enough, we can set the 1474 * outputdatalength equal to the inputdatalength, since that is the 1475 * number of bytes we will copy in any case 1476 */ 1477 mex_p->outputdatalength = mex_p->inputdatalength; 1478 1479 rv = 0; 1480 switch (we_p->funccode) { 1481 case ICARSAMODEXPO: 1482 if (!mex_p->b_key || !mex_p->n_modulus) 1483 rv = SEN_USER_ERROR; 1484 break; 1485 case ICARSACRT: 1486 if (!IS_EVEN(crt_p->inputdatalength)) { 1487 PRINTK("inputdatalength[%d] is odd, CRT form\n", 1488 crt_p->inputdatalength); 1489 rv = SEN_USER_ERROR; 1490 break; 1491 } 1492 if (!crt_p->bp_key || 1493 !crt_p->bq_key || 1494 !crt_p->np_prime || 1495 !crt_p->nq_prime || 1496 !crt_p->u_mult_inv) { 1497 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n", 1498 crt_p->bp_key, crt_p->bq_key, 1499 crt_p->np_prime, crt_p->nq_prime, 1500 crt_p->u_mult_inv); 1501 rv = SEN_USER_ERROR; 1502 } 1503 break; 1504 default: 1505 PRINTK("bad func = %d\n", we_p->funccode); 1506 rv = SEN_USER_ERROR; 1507 break; 1508 } 1509 if (rv != 0) 1510 return rv; 1511 1512 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0) 1513 return SEN_NOT_AVAIL; 1514 1515 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) + 1516 sizeof(struct caller); 1517 if (copy_from_user(temp_buffer, mex_p->inputdata, 1518 mex_p->inputdatalength) != 0) 1519 return SEN_RELEASED; 1520 1521 function = PCI_FUNC_KEY_ENCRYPT; 1522 switch (we_p->devtype) { 1523 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */ 1524 case PCICA: 1525 case CEX2A: 1526 function = PCI_FUNC_KEY_ENCRYPT; 1527 break; 1528 /** 1529 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo 1530 * operation, and all CRT forms with a PKCS-1.2 format decrypt. 1531 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA 1532 * mod-expo operation 1533 */ 1534 case PCIXCC_MCL2: 1535 if (we_p->funccode == ICARSAMODEXPO) 1536 function = PCI_FUNC_KEY_ENCRYPT; 1537 else 1538 function = PCI_FUNC_KEY_DECRYPT; 1539 break; 1540 case PCIXCC_MCL3: 1541 case CEX2C: 1542 if (we_p->funccode == ICARSAMODEXPO) 1543 function = PCI_FUNC_KEY_ENCRYPT; 1544 else 1545 function = PCI_FUNC_KEY_DECRYPT; 1546 break; 1547 /** 1548 * PCICC does everything as a PKCS-1.2 format request 1549 */ 1550 case PCICC: 1551 /* PCICC cannot handle input that is is PKCS#1.1 padded */ 1552 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) { 1553 return SEN_NOT_AVAIL; 1554 } 1555 if (we_p->funccode == ICARSAMODEXPO) { 1556 if (is_PKCS12_padded(temp_buffer, 1557 mex_p->inputdatalength)) 1558 function = PCI_FUNC_KEY_ENCRYPT; 1559 else 1560 function = PCI_FUNC_KEY_DECRYPT; 1561 } else 1562 /* all CRT forms are decrypts */ 1563 function = PCI_FUNC_KEY_DECRYPT; 1564 break; 1565 } 1566 PDEBUG("function: %04x\n", function); 1567 rv = build_caller(we_p, function); 1568 PDEBUG("rv from build_caller = %d\n", rv); 1569 return rv; 1570} 1571 1572static inline int 1573z90crypt_prepare(struct work_element *we_p, unsigned int funccode, 1574 const char __user *buffer) 1575{ 1576 int rv; 1577 1578 we_p->devindex = -1; 1579 if (funccode == ICARSAMODEXPO) 1580 we_p->buff_size = sizeof(struct ica_rsa_modexpo); 1581 else 1582 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt); 1583 1584 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size)) 1585 return -EFAULT; 1586 1587 we_p->audit[0] |= FP_COPYFROM; 1588 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN); 1589 we_p->funccode = funccode; 1590 we_p->devtype = -1; 1591 we_p->audit[0] |= FP_BUFFREQ; 1592 rv = get_crypto_request_buffer(we_p); 1593 switch (rv) { 1594 case 0: 1595 we_p->audit[0] |= FP_BUFFGOT; 1596 break; 1597 case SEN_USER_ERROR: 1598 rv = -EINVAL; 1599 break; 1600 case SEN_QUEUE_FULL: 1601 rv = 0; 1602 break; 1603 case SEN_RELEASED: 1604 rv = -EFAULT; 1605 break; 1606 case REC_NO_RESPONSE: 1607 rv = -ENODEV; 1608 break; 1609 case SEN_NOT_AVAIL: 1610 case EGETBUFF: 1611 rv = -EGETBUFF; 1612 break; 1613 default: 1614 PRINTK("rv = %d\n", rv); 1615 rv = -EGETBUFF; 1616 break; 1617 } 1618 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN) 1619 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT); 1620 return rv; 1621} 1622 1623static inline void 1624purge_work_element(struct work_element *we_p) 1625{ 1626 struct list_head *lptr; 1627 1628 spin_lock_irq(&queuespinlock); 1629 list_for_each(lptr, &request_list) { 1630 if (lptr == &we_p->liste) { 1631 list_del_init(lptr); 1632 requestq_count--; 1633 break; 1634 } 1635 } 1636 list_for_each(lptr, &pending_list) { 1637 if (lptr == &we_p->liste) { 1638 list_del_init(lptr); 1639 pendingq_count--; 1640 break; 1641 } 1642 } 1643 spin_unlock_irq(&queuespinlock); 1644} 1645 1646/** 1647 * Build the request and send it. 1648 */ 1649static inline int 1650z90crypt_rsa(struct priv_data *private_data_p, pid_t pid, 1651 unsigned int cmd, unsigned long arg) 1652{ 1653 struct work_element *we_p; 1654 int rv; 1655 1656 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) { 1657 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid); 1658 return rv; 1659 } 1660 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg))) 1661 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv); 1662 if (!rv) 1663 if ((rv = z90crypt_send(we_p, (const char *)arg))) 1664 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv); 1665 if (!rv) { 1666 we_p->audit[0] |= FP_ASLEEP; 1667 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung)); 1668 we_p->audit[0] |= FP_AWAKE; 1669 rv = we_p->retcode; 1670 } 1671 if (!rv) 1672 rv = z90crypt_process_results(we_p, (char __user *)arg); 1673 1674 if ((we_p->status[0] & STAT_FAILED)) { 1675 switch (rv) { 1676 /** 1677 * EINVAL *after* receive is almost always a padding error or 1678 * length error issued by a coprocessor (not an accelerator). 1679 * We convert this return value to -EGETBUFF which should 1680 * trigger a fallback to software. 1681 */ 1682 case -EINVAL: 1683 if ((we_p->devtype != PCICA) && 1684 (we_p->devtype != CEX2A)) 1685 rv = -EGETBUFF; 1686 break; 1687 case -ETIMEOUT: 1688 if (z90crypt.mask.st_count > 0) 1689 rv = -ERESTARTSYS; // retry with another 1690 else 1691 rv = -ENODEV; // no cards left 1692 /* fall through to clean up request queue */ 1693 case -ERESTARTSYS: 1694 case -ERELEASED: 1695 switch (CHK_RDWRMASK(we_p->status[0])) { 1696 case STAT_WRITTEN: 1697 purge_work_element(we_p); 1698 break; 1699 case STAT_READPEND: 1700 case STAT_NOWORK: 1701 default: 1702 break; 1703 } 1704 break; 1705 default: 1706 we_p->status[0] ^= STAT_FAILED; 1707 break; 1708 } 1709 } 1710 free_page((long)we_p); 1711 return rv; 1712} 1713 1714/** 1715 * This function is a little long, but it's really just one large switch 1716 * statement. 1717 */ 1718static long 1719z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1720{ 1721 struct priv_data *private_data_p = filp->private_data; 1722 unsigned char *status; 1723 unsigned char *qdepth; 1724 unsigned int *reqcnt; 1725 struct ica_z90_status *pstat; 1726 int ret, i, loopLim, tempstat; 1727 static int deprecated_msg_count1 = 0; 1728 static int deprecated_msg_count2 = 0; 1729 1730 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd); 1731 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n", 1732 cmd, 1733 !_IOC_DIR(cmd) ? "NO" 1734 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW" 1735 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD" 1736 : "WR")), 1737 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd)); 1738 1739 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) { 1740 PRINTK("cmd 0x%08X contains bad magic\n", cmd); 1741 return -ENOTTY; 1742 } 1743 1744 ret = 0; 1745 switch (cmd) { 1746 case ICARSAMODEXPO: 1747 case ICARSACRT: 1748 if (quiesce_z90crypt) { 1749 ret = -EQUIESCE; 1750 break; 1751 } 1752 ret = -ENODEV; // Default if no devices 1753 loopLim = z90crypt.hdware_info->hdware_mask.st_count - 1754 (z90crypt.hdware_info->hdware_mask.disabled_count + 1755 z90crypt.hdware_info->hdware_mask.user_disabled_count); 1756 for (i = 0; i < loopLim; i++) { 1757 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg); 1758 if (ret != -ERESTARTSYS) 1759 break; 1760 } 1761 if (ret == -ERESTARTSYS) 1762 ret = -ENODEV; 1763 break; 1764 1765 case Z90STAT_TOTALCOUNT: 1766 tempstat = get_status_totalcount(); 1767 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0) 1768 ret = -EFAULT; 1769 break; 1770 1771 case Z90STAT_PCICACOUNT: 1772 tempstat = get_status_PCICAcount(); 1773 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1774 ret = -EFAULT; 1775 break; 1776 1777 case Z90STAT_PCICCCOUNT: 1778 tempstat = get_status_PCICCcount(); 1779 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1780 ret = -EFAULT; 1781 break; 1782 1783 case Z90STAT_PCIXCCMCL2COUNT: 1784 tempstat = get_status_PCIXCCMCL2count(); 1785 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1786 ret = -EFAULT; 1787 break; 1788 1789 case Z90STAT_PCIXCCMCL3COUNT: 1790 tempstat = get_status_PCIXCCMCL3count(); 1791 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1792 ret = -EFAULT; 1793 break; 1794 1795 case Z90STAT_CEX2CCOUNT: 1796 tempstat = get_status_CEX2Ccount(); 1797 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1798 ret = -EFAULT; 1799 break; 1800 1801 case Z90STAT_CEX2ACOUNT: 1802 tempstat = get_status_CEX2Acount(); 1803 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1804 ret = -EFAULT; 1805 break; 1806 1807 case Z90STAT_REQUESTQ_COUNT: 1808 tempstat = get_status_requestq_count(); 1809 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1810 ret = -EFAULT; 1811 break; 1812 1813 case Z90STAT_PENDINGQ_COUNT: 1814 tempstat = get_status_pendingq_count(); 1815 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1816 ret = -EFAULT; 1817 break; 1818 1819 case Z90STAT_TOTALOPEN_COUNT: 1820 tempstat = get_status_totalopen_count(); 1821 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1822 ret = -EFAULT; 1823 break; 1824 1825 case Z90STAT_DOMAIN_INDEX: 1826 tempstat = get_status_domain_index(); 1827 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1828 ret = -EFAULT; 1829 break; 1830 1831 case Z90STAT_STATUS_MASK: 1832 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL); 1833 if (!status) { 1834 PRINTK("kmalloc for status failed!\n"); 1835 ret = -ENOMEM; 1836 break; 1837 } 1838 get_status_status_mask(status); 1839 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS) 1840 != 0) 1841 ret = -EFAULT; 1842 kfree(status); 1843 break; 1844 1845 case Z90STAT_QDEPTH_MASK: 1846 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL); 1847 if (!qdepth) { 1848 PRINTK("kmalloc for qdepth failed!\n"); 1849 ret = -ENOMEM; 1850 break; 1851 } 1852 get_status_qdepth_mask(qdepth); 1853 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0) 1854 ret = -EFAULT; 1855 kfree(qdepth); 1856 break; 1857 1858 case Z90STAT_PERDEV_REQCNT: 1859 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL); 1860 if (!reqcnt) { 1861 PRINTK("kmalloc for reqcnt failed!\n"); 1862 ret = -ENOMEM; 1863 break; 1864 } 1865 get_status_perdevice_reqcnt(reqcnt); 1866 if (copy_to_user((char __user *) arg, reqcnt, 1867 Z90CRYPT_NUM_APS * sizeof(int)) != 0) 1868 ret = -EFAULT; 1869 kfree(reqcnt); 1870 break; 1871 1872 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */ 1873 case ICAZ90STATUS: 1874 if (deprecated_msg_count1 < 20) { 1875 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n"); 1876 deprecated_msg_count1++; 1877 if (deprecated_msg_count1 == 20) 1878 PRINTK("No longer issuing messages related to " 1879 "deprecated call to ICAZ90STATUS.\n"); 1880 } 1881 1882 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL); 1883 if (!pstat) { 1884 PRINTK("kmalloc for pstat failed!\n"); 1885 ret = -ENOMEM; 1886 break; 1887 } 1888 1889 pstat->totalcount = get_status_totalcount(); 1890 pstat->leedslitecount = get_status_PCICAcount(); 1891 pstat->leeds2count = get_status_PCICCcount(); 1892 pstat->requestqWaitCount = get_status_requestq_count(); 1893 pstat->pendingqWaitCount = get_status_pendingq_count(); 1894 pstat->totalOpenCount = get_status_totalopen_count(); 1895 pstat->cryptoDomain = get_status_domain_index(); 1896 get_status_status_mask(pstat->status); 1897 get_status_qdepth_mask(pstat->qdepth); 1898 1899 if (copy_to_user((struct ica_z90_status __user *) arg, pstat, 1900 sizeof(struct ica_z90_status)) != 0) 1901 ret = -EFAULT; 1902 kfree(pstat); 1903 break; 1904 1905 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */ 1906 case Z90STAT_PCIXCCCOUNT: 1907 if (deprecated_msg_count2 < 20) { 1908 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n"); 1909 deprecated_msg_count2++; 1910 if (deprecated_msg_count2 == 20) 1911 PRINTK("No longer issuing messages about depre" 1912 "cated ioctl Z90STAT_PCIXCCCOUNT.\n"); 1913 } 1914 1915 tempstat = get_status_PCIXCCcount(); 1916 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0) 1917 ret = -EFAULT; 1918 break; 1919 1920 case Z90QUIESCE: 1921 if (current->euid != 0) { 1922 PRINTK("QUIESCE fails: euid %d\n", 1923 current->euid); 1924 ret = -EACCES; 1925 } else { 1926 PRINTK("QUIESCE device from PID %d\n", PID()); 1927 quiesce_z90crypt = 1; 1928 } 1929 break; 1930 1931 default: 1932 /* user passed an invalid IOCTL number */ 1933 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd); 1934 ret = -ENOTTY; 1935 break; 1936 } 1937 1938 return ret; 1939} 1940 1941static inline int 1942sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len) 1943{ 1944 int hl, i; 1945 1946 hl = 0; 1947 for (i = 0; i < len; i++) 1948 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); 1949 hl += sprintf(outaddr+hl, " "); 1950 1951 return hl; 1952} 1953 1954static inline int 1955sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len) 1956{ 1957 int hl, inl, c, cx; 1958 1959 hl = sprintf(outaddr, " "); 1960 inl = 0; 1961 for (c = 0; c < (len / 16); c++) { 1962 hl += sprintcl(outaddr+hl, addr+inl, 16); 1963 inl += 16; 1964 } 1965 1966 cx = len%16; 1967 if (cx) { 1968 hl += sprintcl(outaddr+hl, addr+inl, cx); 1969 inl += cx; 1970 } 1971 1972 hl += sprintf(outaddr+hl, "\n"); 1973 1974 return hl; 1975} 1976 1977static inline int 1978sprinthx(unsigned char *title, unsigned char *outaddr, 1979 unsigned char *addr, unsigned int len) 1980{ 1981 int hl, inl, r, rx; 1982 1983 hl = sprintf(outaddr, "\n%s\n", title); 1984 inl = 0; 1985 for (r = 0; r < (len / 64); r++) { 1986 hl += sprintrw(outaddr+hl, addr+inl, 64); 1987 inl += 64; 1988 } 1989 rx = len % 64; 1990 if (rx) { 1991 hl += sprintrw(outaddr+hl, addr+inl, rx); 1992 inl += rx; 1993 } 1994 1995 hl += sprintf(outaddr+hl, "\n"); 1996 1997 return hl; 1998} 1999 2000static inline int 2001sprinthx4(unsigned char *title, unsigned char *outaddr, 2002 unsigned int *array, unsigned int len) 2003{ 2004 int hl, r; 2005 2006 hl = sprintf(outaddr, "\n%s\n", title); 2007 2008 for (r = 0; r < len; r++) { 2009 if ((r % 8) == 0) 2010 hl += sprintf(outaddr+hl, " "); 2011 hl += sprintf(outaddr+hl, "%08X ", array[r]); 2012 if ((r % 8) == 7) 2013 hl += sprintf(outaddr+hl, "\n"); 2014 } 2015 2016 hl += sprintf(outaddr+hl, "\n"); 2017 2018 return hl; 2019} 2020 2021static int 2022z90crypt_status(char *resp_buff, char **start, off_t offset, 2023 int count, int *eof, void *data) 2024{ 2025 unsigned char *workarea; 2026 int len; 2027 2028 /* resp_buff is a page. Use the right half for a work area */ 2029 workarea = resp_buff+2000; 2030 len = 0; 2031 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n", 2032 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT); 2033 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n", 2034 get_status_domain_index()); 2035 len += sprintf(resp_buff+len, "Total device count: %d\n", 2036 get_status_totalcount()); 2037 len += sprintf(resp_buff+len, "PCICA count: %d\n", 2038 get_status_PCICAcount()); 2039 len += sprintf(resp_buff+len, "PCICC count: %d\n", 2040 get_status_PCICCcount()); 2041 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n", 2042 get_status_PCIXCCMCL2count()); 2043 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n", 2044 get_status_PCIXCCMCL3count()); 2045 len += sprintf(resp_buff+len, "CEX2C count: %d\n", 2046 get_status_CEX2Ccount()); 2047 len += sprintf(resp_buff+len, "CEX2A count: %d\n", 2048 get_status_CEX2Acount()); 2049 len += sprintf(resp_buff+len, "requestq count: %d\n", 2050 get_status_requestq_count()); 2051 len += sprintf(resp_buff+len, "pendingq count: %d\n", 2052 get_status_pendingq_count()); 2053 len += sprintf(resp_buff+len, "Total open handles: %d\n\n", 2054 get_status_totalopen_count()); 2055 len += sprinthx( 2056 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 2057 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", 2058 resp_buff+len, 2059 get_status_status_mask(workarea), 2060 Z90CRYPT_NUM_APS); 2061 len += sprinthx("Waiting work element counts", 2062 resp_buff+len, 2063 get_status_qdepth_mask(workarea), 2064 Z90CRYPT_NUM_APS); 2065 len += sprinthx4( 2066 "Per-device successfully completed request counts", 2067 resp_buff+len, 2068 get_status_perdevice_reqcnt((unsigned int *)workarea), 2069 Z90CRYPT_NUM_APS); 2070 *eof = 1; 2071 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int)); 2072 return len; 2073} 2074 2075static inline void 2076disable_card(int card_index) 2077{ 2078 struct device *devp; 2079 2080 devp = LONG2DEVPTR(card_index); 2081 if (!devp || devp->user_disabled) 2082 return; 2083 devp->user_disabled = 1; 2084 z90crypt.hdware_info->hdware_mask.user_disabled_count++; 2085 if (devp->dev_type == -1) 2086 return; 2087 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++; 2088} 2089 2090static inline void 2091enable_card(int card_index) 2092{ 2093 struct device *devp; 2094 2095 devp = LONG2DEVPTR(card_index); 2096 if (!devp || !devp->user_disabled) 2097 return; 2098 devp->user_disabled = 0; 2099 z90crypt.hdware_info->hdware_mask.user_disabled_count--; 2100 if (devp->dev_type == -1) 2101 return; 2102 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--; 2103} 2104 2105static int 2106z90crypt_status_write(struct file *file, const char __user *buffer, 2107 unsigned long count, void *data) 2108{ 2109 int j, eol; 2110 unsigned char *lbuf, *ptr; 2111 unsigned int local_count; 2112 2113#define LBUFSIZE 1200 2114 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 2115 if (!lbuf) { 2116 PRINTK("kmalloc failed!\n"); 2117 return 0; 2118 } 2119 2120 if (count <= 0) 2121 return 0; 2122 2123 local_count = UMIN((unsigned int)count, LBUFSIZE-1); 2124 2125 if (copy_from_user(lbuf, buffer, local_count) != 0) { 2126 kfree(lbuf); 2127 return -EFAULT; 2128 } 2129 2130 lbuf[local_count] = '\0'; 2131 2132 ptr = strstr(lbuf, "Online devices"); 2133 if (ptr == 0) { 2134 PRINTK("Unable to parse data (missing \"Online devices\")\n"); 2135 kfree(lbuf); 2136 return count; 2137 } 2138 2139 ptr = strstr(ptr, "\n"); 2140 if (ptr == 0) { 2141 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n"); 2142 kfree(lbuf); 2143 return count; 2144 } 2145 ptr++; 2146 2147 if (strstr(ptr, "Waiting work element counts") == NULL) { 2148 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n"); 2149 kfree(lbuf); 2150 return count; 2151 } 2152 2153 j = 0; 2154 eol = 0; 2155 while ((j < 64) && (*ptr != '\0')) { 2156 switch (*ptr) { 2157 case '\t': 2158 case ' ': 2159 break; 2160 case '\n': 2161 default: 2162 eol = 1; 2163 break; 2164 case '0': // no device 2165 case '1': // PCICA 2166 case '2': // PCICC 2167 case '3': // PCIXCC_MCL2 2168 case '4': // PCIXCC_MCL3 2169 case '5': // CEX2C 2170 case '6': // CEX2A 2171 j++; 2172 break; 2173 case 'd': 2174 case 'D': 2175 disable_card(j); 2176 j++; 2177 break; 2178 case 'e': 2179 case 'E': 2180 enable_card(j); 2181 j++; 2182 break; 2183 } 2184 if (eol) 2185 break; 2186 ptr++; 2187 } 2188 2189 kfree(lbuf); 2190 return count; 2191} 2192 2193/** 2194 * Functions that run under a timer, with no process id 2195 * 2196 * The task functions: 2197 * z90crypt_reader_task 2198 * helper_send_work 2199 * helper_handle_work_element 2200 * helper_receive_rc 2201 * z90crypt_config_task 2202 * z90crypt_cleanup_task 2203 * 2204 * Helper functions: 2205 * z90crypt_schedule_reader_timer 2206 * z90crypt_schedule_reader_task 2207 * z90crypt_schedule_config_task 2208 * z90crypt_schedule_cleanup_task 2209 */ 2210static inline int 2211receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p, 2212 unsigned char *buff, unsigned char __user **dest_p_p) 2213{ 2214 int dv, rv; 2215 struct device *dev_ptr; 2216 struct caller *caller_p; 2217 struct ica_rsa_modexpo *icaMsg_p; 2218 struct list_head *ptr, *tptr; 2219 2220 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid)); 2221 2222 if (z90crypt.terminating) 2223 return REC_FATAL_ERROR; 2224 2225 caller_p = 0; 2226 dev_ptr = z90crypt.device_p[index]; 2227 rv = 0; 2228 do { 2229 if (!dev_ptr || dev_ptr->disabled) { 2230 rv = REC_NO_WORK; // a disabled device can't return work 2231 break; 2232 } 2233 if (dev_ptr->dev_self_x != index) { 2234 PRINTKC("Corrupt dev ptr\n"); 2235 z90crypt.terminating = 1; 2236 rv = REC_FATAL_ERROR; 2237 break; 2238 } 2239 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) { 2240 dv = DEV_REC_EXCEPTION; 2241 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n", 2242 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p); 2243 } else { 2244 PDEBUG("Dequeue called for device %d\n", index); 2245 dv = receive_from_AP(index, z90crypt.cdx, 2246 dev_ptr->dev_resp_l, 2247 dev_ptr->dev_resp_p, psmid); 2248 } 2249 switch (dv) { 2250 case DEV_REC_EXCEPTION: 2251 rv = REC_FATAL_ERROR; 2252 z90crypt.terminating = 1; 2253 PRINTKC("Exception in receive from device %d\n", 2254 index); 2255 break; 2256 case DEV_ONLINE: 2257 rv = 0; 2258 break; 2259 case DEV_EMPTY: 2260 rv = REC_EMPTY; 2261 break; 2262 case DEV_NO_WORK: 2263 rv = REC_NO_WORK; 2264 break; 2265 case DEV_BAD_MESSAGE: 2266 case DEV_GONE: 2267 case REC_HARDWAR_ERR: 2268 default: 2269 rv = REC_NO_RESPONSE; 2270 break; 2271 } 2272 if (rv) 2273 break; 2274 if (dev_ptr->dev_caller_count <= 0) { 2275 rv = REC_USER_GONE; 2276 break; 2277 } 2278 2279 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) { 2280 caller_p = list_entry(ptr, struct caller, caller_liste); 2281 if (!memcmp(caller_p->caller_id, psmid, 2282 sizeof(caller_p->caller_id))) { 2283 if (!list_empty(&caller_p->caller_liste)) { 2284 list_del_init(ptr); 2285 dev_ptr->dev_caller_count--; 2286 break; 2287 } 2288 } 2289 caller_p = 0; 2290 } 2291 if (!caller_p) { 2292 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X" 2293 "%02X%02X%02X in device list\n", 2294 psmid[0], psmid[1], psmid[2], psmid[3], 2295 psmid[4], psmid[5], psmid[6], psmid[7]); 2296 rv = REC_USER_GONE; 2297 break; 2298 } 2299 2300 PDEBUG("caller_p after successful receive: %p\n", caller_p); 2301 rv = convert_response(dev_ptr->dev_resp_p, 2302 caller_p->caller_buf_p, buff_len_p, buff); 2303 switch (rv) { 2304 case REC_USE_PCICA: 2305 break; 2306 case REC_OPERAND_INV: 2307 case REC_OPERAND_SIZE: 2308 case REC_EVEN_MOD: 2309 case REC_INVALID_PAD: 2310 PDEBUG("device %d: 'user error' %d\n", index, rv); 2311 break; 2312 case WRONG_DEVICE_TYPE: 2313 case REC_HARDWAR_ERR: 2314 case REC_BAD_MESSAGE: 2315 PRINTKW("device %d: hardware error %d\n", index, rv); 2316 rv = REC_NO_RESPONSE; 2317 break; 2318 default: 2319 PDEBUG("device %d: rv = %d\n", index, rv); 2320 break; 2321 } 2322 } while (0); 2323 2324 switch (rv) { 2325 case 0: 2326 PDEBUG("Successful receive from device %d\n", index); 2327 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p; 2328 *dest_p_p = icaMsg_p->outputdata; 2329 if (*buff_len_p == 0) 2330 PRINTK("Zero *buff_len_p\n"); 2331 break; 2332 case REC_NO_RESPONSE: 2333 PRINTKW("Removing device %d from availability\n", index); 2334 remove_device(dev_ptr); 2335 break; 2336 } 2337 2338 if (caller_p) 2339 unbuild_caller(dev_ptr, caller_p); 2340 2341 return rv; 2342} 2343 2344static inline void 2345helper_send_work(int index) 2346{ 2347 struct work_element *rq_p; 2348 int rv; 2349 2350 if (list_empty(&request_list)) 2351 return; 2352 requestq_count--; 2353 rq_p = list_entry(request_list.next, struct work_element, liste); 2354 list_del_init(&rq_p->liste); 2355 rq_p->audit[1] |= FP_REMREQUEST; 2356 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) { 2357 rq_p->devindex = SHRT2LONG(index); 2358 rv = send_to_crypto_device(rq_p); 2359 if (rv == 0) { 2360 rq_p->requestsent = jiffies; 2361 rq_p->audit[0] |= FP_SENT; 2362 list_add_tail(&rq_p->liste, &pending_list); 2363 ++pendingq_count; 2364 rq_p->audit[0] |= FP_PENDING; 2365 } else { 2366 switch (rv) { 2367 case REC_OPERAND_INV: 2368 case REC_OPERAND_SIZE: 2369 case REC_EVEN_MOD: 2370 case REC_INVALID_PAD: 2371 rq_p->retcode = -EINVAL; 2372 break; 2373 case SEN_NOT_AVAIL: 2374 case SEN_RETRY: 2375 case REC_NO_RESPONSE: 2376 default: 2377 if (z90crypt.mask.st_count > 1) 2378 rq_p->retcode = 2379 -ERESTARTSYS; 2380 else 2381 rq_p->retcode = -ENODEV; 2382 break; 2383 } 2384 rq_p->status[0] |= STAT_FAILED; 2385 rq_p->audit[1] |= FP_AWAKENING; 2386 atomic_set(&rq_p->alarmrung, 1); 2387 wake_up(&rq_p->waitq); 2388 } 2389 } else { 2390 if (z90crypt.mask.st_count > 1) 2391 rq_p->retcode = -ERESTARTSYS; 2392 else 2393 rq_p->retcode = -ENODEV; 2394 rq_p->status[0] |= STAT_FAILED; 2395 rq_p->audit[1] |= FP_AWAKENING; 2396 atomic_set(&rq_p->alarmrung, 1); 2397 wake_up(&rq_p->waitq); 2398 } 2399} 2400 2401static inline void 2402helper_handle_work_element(int index, unsigned char psmid[8], int rc, 2403 int buff_len, unsigned char *buff, 2404 unsigned char __user *resp_addr) 2405{ 2406 struct work_element *pq_p; 2407 struct list_head *lptr, *tptr; 2408 2409 pq_p = 0; 2410 list_for_each_safe(lptr, tptr, &pending_list) { 2411 pq_p = list_entry(lptr, struct work_element, liste); 2412 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) { 2413 list_del_init(lptr); 2414 pendingq_count--; 2415 pq_p->audit[1] |= FP_NOTPENDING; 2416 break; 2417 } 2418 pq_p = 0; 2419 } 2420 2421 if (!pq_p) { 2422 PRINTK("device %d has work but no caller exists on pending Q\n", 2423 SHRT2LONG(index)); 2424 return; 2425 } 2426 2427 switch (rc) { 2428 case 0: 2429 pq_p->resp_buff_size = buff_len; 2430 pq_p->audit[1] |= FP_RESPSIZESET; 2431 if (buff_len) { 2432 pq_p->resp_addr = resp_addr; 2433 pq_p->audit[1] |= FP_RESPADDRCOPIED; 2434 memcpy(pq_p->resp_buff, buff, buff_len); 2435 pq_p->audit[1] |= FP_RESPBUFFCOPIED; 2436 } 2437 break; 2438 case REC_OPERAND_INV: 2439 case REC_OPERAND_SIZE: 2440 case REC_EVEN_MOD: 2441 case REC_INVALID_PAD: 2442 PDEBUG("-EINVAL after application error %d\n", rc); 2443 pq_p->retcode = -EINVAL; 2444 pq_p->status[0] |= STAT_FAILED; 2445 break; 2446 case REC_USE_PCICA: 2447 pq_p->retcode = -ERESTARTSYS; 2448 pq_p->status[0] |= STAT_FAILED; 2449 break; 2450 case REC_NO_RESPONSE: 2451 default: 2452 if (z90crypt.mask.st_count > 1) 2453 pq_p->retcode = -ERESTARTSYS; 2454 else 2455 pq_p->retcode = -ENODEV; 2456 pq_p->status[0] |= STAT_FAILED; 2457 break; 2458 } 2459 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) { 2460 pq_p->audit[1] |= FP_AWAKENING; 2461 atomic_set(&pq_p->alarmrung, 1); 2462 wake_up(&pq_p->waitq); 2463 } 2464} 2465 2466/** 2467 * return TRUE if the work element should be removed from the queue 2468 */ 2469static inline int 2470helper_receive_rc(int index, int *rc_p) 2471{ 2472 switch (*rc_p) { 2473 case 0: 2474 case REC_OPERAND_INV: 2475 case REC_OPERAND_SIZE: 2476 case REC_EVEN_MOD: 2477 case REC_INVALID_PAD: 2478 case REC_USE_PCICA: 2479 break; 2480 2481 case REC_BUSY: 2482 case REC_NO_WORK: 2483 case REC_EMPTY: 2484 case REC_RETRY_DEV: 2485 case REC_FATAL_ERROR: 2486 return 0; 2487 2488 case REC_NO_RESPONSE: 2489 break; 2490 2491 default: 2492 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n", 2493 *rc_p, SHRT2LONG(index)); 2494 *rc_p = REC_NO_RESPONSE; 2495 break; 2496 } 2497 return 1; 2498} 2499 2500static inline void 2501z90crypt_schedule_reader_timer(void) 2502{ 2503 if (timer_pending(&reader_timer)) 2504 return; 2505 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0) 2506 PRINTK("Timer pending while modifying reader timer\n"); 2507} 2508 2509static void 2510z90crypt_reader_task(unsigned long ptr) 2511{ 2512 int workavail, index, rc, buff_len; 2513 unsigned char psmid[8]; 2514 unsigned char __user *resp_addr; 2515 static unsigned char buff[1024]; 2516 2517 /** 2518 * we use workavail = 2 to ensure 2 passes with nothing dequeued before 2519 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the 2520 * loop, there is no work remaining on the queues. 2521 */ 2522 resp_addr = 0; 2523 workavail = 2; 2524 buff_len = 0; 2525 while (workavail) { 2526 workavail--; 2527 rc = 0; 2528 spin_lock_irq(&queuespinlock); 2529 memset(buff, 0x00, sizeof(buff)); 2530 2531 /* Dequeue once from each device in round robin. */ 2532 for (index = 0; index < z90crypt.mask.st_count; index++) { 2533 PDEBUG("About to receive.\n"); 2534 rc = receive_from_crypto_device(SHRT2LONG(index), 2535 psmid, 2536 &buff_len, 2537 buff, 2538 &resp_addr); 2539 PDEBUG("Dequeued: rc = %d.\n", rc); 2540 2541 if (helper_receive_rc(index, &rc)) { 2542 if (rc != REC_NO_RESPONSE) { 2543 helper_send_work(index); 2544 workavail = 2; 2545 } 2546 2547 helper_handle_work_element(index, psmid, rc, 2548 buff_len, buff, 2549 resp_addr); 2550 } 2551 2552 if (rc == REC_FATAL_ERROR) 2553 PRINTKW("REC_FATAL_ERROR from device %d!\n", 2554 SHRT2LONG(index)); 2555 } 2556 spin_unlock_irq(&queuespinlock); 2557 } 2558 2559 if (pendingq_count + requestq_count) 2560 z90crypt_schedule_reader_timer(); 2561} 2562 2563static inline void 2564z90crypt_schedule_config_task(unsigned int expiration) 2565{ 2566 if (timer_pending(&config_timer)) 2567 return; 2568 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0) 2569 PRINTK("Timer pending while modifying config timer\n"); 2570} 2571 2572static void 2573z90crypt_config_task(unsigned long ptr) 2574{ 2575 int rc; 2576 2577 PDEBUG("jiffies %ld\n", jiffies); 2578 2579 if ((rc = refresh_z90crypt(&z90crypt.cdx))) 2580 PRINTK("Error %d detected in refresh_z90crypt.\n", rc); 2581 /* If return was fatal, don't bother reconfiguring */ 2582 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR)) 2583 z90crypt_schedule_config_task(CONFIGTIME); 2584} 2585 2586static inline void 2587z90crypt_schedule_cleanup_task(void) 2588{ 2589 if (timer_pending(&cleanup_timer)) 2590 return; 2591 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0) 2592 PRINTK("Timer pending while modifying cleanup timer\n"); 2593} 2594 2595static inline void 2596helper_drain_queues(void) 2597{ 2598 struct work_element *pq_p; 2599 struct list_head *lptr, *tptr; 2600 2601 list_for_each_safe(lptr, tptr, &pending_list) { 2602 pq_p = list_entry(lptr, struct work_element, liste); 2603 pq_p->retcode = -ENODEV; 2604 pq_p->status[0] |= STAT_FAILED; 2605 unbuild_caller(LONG2DEVPTR(pq_p->devindex), 2606 (struct caller *)pq_p->requestptr); 2607 list_del_init(lptr); 2608 pendingq_count--; 2609 pq_p->audit[1] |= FP_NOTPENDING; 2610 pq_p->audit[1] |= FP_AWAKENING; 2611 atomic_set(&pq_p->alarmrung, 1); 2612 wake_up(&pq_p->waitq); 2613 } 2614 2615 list_for_each_safe(lptr, tptr, &request_list) { 2616 pq_p = list_entry(lptr, struct work_element, liste); 2617 pq_p->retcode = -ENODEV; 2618 pq_p->status[0] |= STAT_FAILED; 2619 list_del_init(lptr); 2620 requestq_count--; 2621 pq_p->audit[1] |= FP_REMREQUEST; 2622 pq_p->audit[1] |= FP_AWAKENING; 2623 atomic_set(&pq_p->alarmrung, 1); 2624 wake_up(&pq_p->waitq); 2625 } 2626} 2627 2628static inline void 2629helper_timeout_requests(void) 2630{ 2631 struct work_element *pq_p; 2632 struct list_head *lptr, *tptr; 2633 long timelimit; 2634 2635 timelimit = jiffies - (CLEANUPTIME * HZ); 2636 /* The list is in strict chronological order */ 2637 list_for_each_safe(lptr, tptr, &pending_list) { 2638 pq_p = list_entry(lptr, struct work_element, liste); 2639 if (pq_p->requestsent >= timelimit) 2640 break; 2641 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n", 2642 ((struct caller *)pq_p->requestptr)->caller_id[0], 2643 ((struct caller *)pq_p->requestptr)->caller_id[1], 2644 ((struct caller *)pq_p->requestptr)->caller_id[2], 2645 ((struct caller *)pq_p->requestptr)->caller_id[3], 2646 ((struct caller *)pq_p->requestptr)->caller_id[4], 2647 ((struct caller *)pq_p->requestptr)->caller_id[5], 2648 ((struct caller *)pq_p->requestptr)->caller_id[6], 2649 ((struct caller *)pq_p->requestptr)->caller_id[7]); 2650 pq_p->retcode = -ETIMEOUT; 2651 pq_p->status[0] |= STAT_FAILED; 2652 /* get this off any caller queue it may be on */ 2653 unbuild_caller(LONG2DEVPTR(pq_p->devindex), 2654 (struct caller *) pq_p->requestptr); 2655 list_del_init(lptr); 2656 pendingq_count--; 2657 pq_p->audit[1] |= FP_TIMEDOUT; 2658 pq_p->audit[1] |= FP_NOTPENDING; 2659 pq_p->audit[1] |= FP_AWAKENING; 2660 atomic_set(&pq_p->alarmrung, 1); 2661 wake_up(&pq_p->waitq); 2662 } 2663 2664 /** 2665 * If pending count is zero, items left on the request queue may 2666 * never be processed. 2667 */ 2668 if (pendingq_count <= 0) { 2669 list_for_each_safe(lptr, tptr, &request_list) { 2670 pq_p = list_entry(lptr, struct work_element, liste); 2671 if (pq_p->requestsent >= timelimit) 2672 break; 2673 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n", 2674 ((struct caller *)pq_p->requestptr)->caller_id[0], 2675 ((struct caller *)pq_p->requestptr)->caller_id[1], 2676 ((struct caller *)pq_p->requestptr)->caller_id[2], 2677 ((struct caller *)pq_p->requestptr)->caller_id[3], 2678 ((struct caller *)pq_p->requestptr)->caller_id[4], 2679 ((struct caller *)pq_p->requestptr)->caller_id[5], 2680 ((struct caller *)pq_p->requestptr)->caller_id[6], 2681 ((struct caller *)pq_p->requestptr)->caller_id[7]); 2682 pq_p->retcode = -ETIMEOUT; 2683 pq_p->status[0] |= STAT_FAILED; 2684 list_del_init(lptr); 2685 requestq_count--; 2686 pq_p->audit[1] |= FP_TIMEDOUT; 2687 pq_p->audit[1] |= FP_REMREQUEST; 2688 pq_p->audit[1] |= FP_AWAKENING; 2689 atomic_set(&pq_p->alarmrung, 1); 2690 wake_up(&pq_p->waitq); 2691 } 2692 } 2693} 2694 2695static void 2696z90crypt_cleanup_task(unsigned long ptr) 2697{ 2698 PDEBUG("jiffies %ld\n", jiffies); 2699 spin_lock_irq(&queuespinlock); 2700 if (z90crypt.mask.st_count <= 0) // no devices! 2701 helper_drain_queues(); 2702 else 2703 helper_timeout_requests(); 2704 spin_unlock_irq(&queuespinlock); 2705 z90crypt_schedule_cleanup_task(); 2706} 2707 2708static void 2709z90crypt_schedule_reader_task(unsigned long ptr) 2710{ 2711 tasklet_schedule(&reader_tasklet); 2712} 2713 2714/** 2715 * Lowlevel Functions: 2716 * 2717 * create_z90crypt: creates and initializes basic data structures 2718 * refresh_z90crypt: re-initializes basic data structures 2719 * find_crypto_devices: returns a count and mask of hardware status 2720 * create_crypto_device: builds the descriptor for a device 2721 * destroy_crypto_device: unallocates the descriptor for a device 2722 * destroy_z90crypt: drains all work, unallocates structs 2723 */ 2724 2725/** 2726 * build the z90crypt root structure using the given domain index 2727 */ 2728static int 2729create_z90crypt(int *cdx_p) 2730{ 2731 struct hdware_block *hdware_blk_p; 2732 2733 memset(&z90crypt, 0x00, sizeof(struct z90crypt)); 2734 z90crypt.domain_established = 0; 2735 z90crypt.len = sizeof(struct z90crypt); 2736 z90crypt.max_count = Z90CRYPT_NUM_DEVS; 2737 z90crypt.cdx = *cdx_p; 2738 2739 hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC); 2740 if (!hdware_blk_p) { 2741 PDEBUG("kmalloc for hardware block failed\n"); 2742 return ENOMEM; 2743 } 2744 z90crypt.hdware_info = hdware_blk_p; 2745 2746 return 0; 2747} 2748 2749static inline int 2750helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found) 2751{ 2752 enum hdstat hd_stat; 2753 int q_depth, dev_type; 2754 int indx, chkdom, numdomains; 2755 2756 q_depth = dev_type = numdomains = 0; 2757 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1); 2758 for (indx = 0; indx < z90crypt.max_count; indx++) { 2759 hd_stat = HD_NOT_THERE; 2760 numdomains = 0; 2761 for (chkdom = 0; chkdom <= 15; chkdom++) { 2762 hd_stat = query_online(indx, chkdom, MAX_RESET, 2763 &q_depth, &dev_type); 2764 if (hd_stat == HD_TSQ_EXCEPTION) { 2765 z90crypt.terminating = 1; 2766 PRINTKC("exception taken!\n"); 2767 break; 2768 } 2769 if (hd_stat == HD_ONLINE) { 2770 cdx_array[numdomains++] = chkdom; 2771 if (*cdx_p == chkdom) { 2772 *correct_cdx_found = 1; 2773 break; 2774 } 2775 } 2776 } 2777 if ((*correct_cdx_found == 1) || (numdomains != 0)) 2778 break; 2779 if (z90crypt.terminating) 2780 break; 2781 } 2782 return numdomains; 2783} 2784 2785static inline int 2786probe_crypto_domain(int *cdx_p) 2787{ 2788 int cdx_array[16]; 2789 char cdx_array_text[53], temp[5]; 2790 int correct_cdx_found, numdomains; 2791 2792 correct_cdx_found = 0; 2793 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found); 2794 2795 if (z90crypt.terminating) 2796 return TSQ_FATAL_ERROR; 2797 2798 if (correct_cdx_found) 2799 return 0; 2800 2801 if (numdomains == 0) { 2802 PRINTKW("Unable to find crypto domain: No devices found\n"); 2803 return Z90C_NO_DEVICES; 2804 } 2805 2806 if (numdomains == 1) { 2807 if (*cdx_p == -1) { 2808 *cdx_p = cdx_array[0]; 2809 return 0; 2810 } 2811 PRINTKW("incorrect domain: specified = %d, found = %d\n", 2812 *cdx_p, cdx_array[0]); 2813 return Z90C_INCORRECT_DOMAIN; 2814 } 2815 2816 numdomains--; 2817 sprintf(cdx_array_text, "%d", cdx_array[numdomains]); 2818 while (numdomains) { 2819 numdomains--; 2820 sprintf(temp, ", %d", cdx_array[numdomains]); 2821 strcat(cdx_array_text, temp); 2822 } 2823 2824 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n", 2825 *cdx_p, cdx_array_text); 2826 return Z90C_AMBIGUOUS_DOMAIN; 2827} 2828 2829static int 2830refresh_z90crypt(int *cdx_p) 2831{ 2832 int i, j, indx, rv; 2833 static struct status local_mask; 2834 struct device *devPtr; 2835 unsigned char oldStat, newStat; 2836 int return_unchanged; 2837 2838 if (z90crypt.len != sizeof(z90crypt)) 2839 return ENOTINIT; 2840 if (z90crypt.terminating) 2841 return TSQ_FATAL_ERROR; 2842 rv = 0; 2843 if (!z90crypt.hdware_info->hdware_mask.st_count && 2844 !z90crypt.domain_established) { 2845 rv = probe_crypto_domain(cdx_p); 2846 if (z90crypt.terminating) 2847 return TSQ_FATAL_ERROR; 2848 if (rv == Z90C_NO_DEVICES) 2849 return 0; // try later 2850 if (rv) 2851 return rv; 2852 z90crypt.cdx = *cdx_p; 2853 z90crypt.domain_established = 1; 2854 } 2855 rv = find_crypto_devices(&local_mask); 2856 if (rv) { 2857 PRINTK("find crypto devices returned %d\n", rv); 2858 return rv; 2859 } 2860 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask, 2861 sizeof(struct status))) { 2862 return_unchanged = 1; 2863 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) { 2864 /** 2865 * Check for disabled cards. If any device is marked 2866 * disabled, destroy it. 2867 */ 2868 for (j = 0; 2869 j < z90crypt.hdware_info->type_mask[i].st_count; 2870 j++) { 2871 indx = z90crypt.hdware_info->type_x_addr[i]. 2872 device_index[j]; 2873 devPtr = z90crypt.device_p[indx]; 2874 if (devPtr && devPtr->disabled) { 2875 local_mask.st_mask[indx] = HD_NOT_THERE; 2876 return_unchanged = 0; 2877 } 2878 } 2879 } 2880 if (return_unchanged == 1) 2881 return 0; 2882 } 2883 2884 spin_lock_irq(&queuespinlock); 2885 for (i = 0; i < z90crypt.max_count; i++) { 2886 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i]; 2887 newStat = local_mask.st_mask[i]; 2888 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE)) 2889 destroy_crypto_device(i); 2890 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) { 2891 rv = create_crypto_device(i); 2892 if (rv >= REC_FATAL_ERROR) 2893 return rv; 2894 if (rv != 0) { 2895 local_mask.st_mask[i] = HD_NOT_THERE; 2896 local_mask.st_count--; 2897 } 2898 } 2899 } 2900 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask, 2901 sizeof(local_mask.st_mask)); 2902 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count; 2903 z90crypt.hdware_info->hdware_mask.disabled_count = 2904 local_mask.disabled_count; 2905 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x); 2906 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) 2907 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]), 2908 &(z90crypt.hdware_info->type_x_addr[i])); 2909 spin_unlock_irq(&queuespinlock); 2910 2911 return rv; 2912} 2913 2914static int 2915find_crypto_devices(struct status *deviceMask) 2916{ 2917 int i, q_depth, dev_type; 2918 enum hdstat hd_stat; 2919 2920 deviceMask->st_count = 0; 2921 deviceMask->disabled_count = 0; 2922 deviceMask->user_disabled_count = 0; 2923 2924 for (i = 0; i < z90crypt.max_count; i++) { 2925 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth, 2926 &dev_type); 2927 if (hd_stat == HD_TSQ_EXCEPTION) { 2928 z90crypt.terminating = 1; 2929 PRINTKC("Exception during probe for crypto devices\n"); 2930 return TSQ_FATAL_ERROR; 2931 } 2932 deviceMask->st_mask[i] = hd_stat; 2933 if (hd_stat == HD_ONLINE) { 2934 PDEBUG("Got an online crypto!: %d\n", i); 2935 PDEBUG("Got a queue depth of %d\n", q_depth); 2936 PDEBUG("Got a device type of %d\n", dev_type); 2937 if (q_depth <= 0) 2938 return TSQ_FATAL_ERROR; 2939 deviceMask->st_count++; 2940 z90crypt.q_depth_array[i] = q_depth; 2941 z90crypt.dev_type_array[i] = dev_type; 2942 } 2943 } 2944 2945 return 0; 2946} 2947 2948static int 2949refresh_index_array(struct status *status_str, struct device_x *index_array) 2950{ 2951 int i, count; 2952 enum devstat stat; 2953 2954 i = -1; 2955 count = 0; 2956 do { 2957 stat = status_str->st_mask[++i]; 2958 if (stat == DEV_ONLINE) 2959 index_array->device_index[count++] = i; 2960 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count)); 2961 2962 return count; 2963} 2964 2965static int 2966create_crypto_device(int index) 2967{ 2968 int rv, devstat, total_size; 2969 struct device *dev_ptr; 2970 struct status *type_str_p; 2971 int deviceType; 2972 2973 dev_ptr = z90crypt.device_p[index]; 2974 if (!dev_ptr) { 2975 total_size = sizeof(struct device) + 2976 z90crypt.q_depth_array[index] * sizeof(int); 2977 2978 dev_ptr = kzalloc(total_size, GFP_ATOMIC); 2979 if (!dev_ptr) { 2980 PRINTK("kmalloc device %d failed\n", index); 2981 return ENOMEM; 2982 } 2983 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC); 2984 if (!dev_ptr->dev_resp_p) { 2985 kfree(dev_ptr); 2986 PRINTK("kmalloc device %d rec buffer failed\n", index); 2987 return ENOMEM; 2988 } 2989 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE; 2990 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list)); 2991 } 2992 2993 devstat = reset_device(index, z90crypt.cdx, MAX_RESET); 2994 if (devstat == DEV_RSQ_EXCEPTION) { 2995 PRINTK("exception during reset device %d\n", index); 2996 kfree(dev_ptr->dev_resp_p); 2997 kfree(dev_ptr); 2998 return RSQ_FATAL_ERROR; 2999 } 3000 if (devstat == DEV_ONLINE) { 3001 dev_ptr->dev_self_x = index; 3002 dev_ptr->dev_type = z90crypt.dev_type_array[index]; 3003 if (dev_ptr->dev_type == NILDEV) { 3004 rv = probe_device_type(dev_ptr); 3005 if (rv) { 3006 PRINTK("rv = %d from probe_device_type %d\n", 3007 rv, index); 3008 kfree(dev_ptr->dev_resp_p); 3009 kfree(dev_ptr); 3010 return rv; 3011 } 3012 } 3013 if (dev_ptr->dev_type == PCIXCC_UNK) { 3014 rv = probe_PCIXCC_type(dev_ptr); 3015 if (rv) { 3016 PRINTK("rv = %d from probe_PCIXCC_type %d\n", 3017 rv, index); 3018 kfree(dev_ptr->dev_resp_p); 3019 kfree(dev_ptr); 3020 return rv; 3021 } 3022 } 3023 deviceType = dev_ptr->dev_type; 3024 z90crypt.dev_type_array[index] = deviceType; 3025 if (deviceType == PCICA) 3026 z90crypt.hdware_info->device_type_array[index] = 1; 3027 else if (deviceType == PCICC) 3028 z90crypt.hdware_info->device_type_array[index] = 2; 3029 else if (deviceType == PCIXCC_MCL2) 3030 z90crypt.hdware_info->device_type_array[index] = 3; 3031 else if (deviceType == PCIXCC_MCL3) 3032 z90crypt.hdware_info->device_type_array[index] = 4; 3033 else if (deviceType == CEX2C) 3034 z90crypt.hdware_info->device_type_array[index] = 5; 3035 else if (deviceType == CEX2A) 3036 z90crypt.hdware_info->device_type_array[index] = 6; 3037 else // No idea how this would happen. 3038 z90crypt.hdware_info->device_type_array[index] = -1; 3039 } 3040 3041 /** 3042 * 'q_depth' returned by the hardware is one less than 3043 * the actual depth 3044 */ 3045 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index]; 3046 dev_ptr->dev_type = z90crypt.dev_type_array[index]; 3047 dev_ptr->dev_stat = devstat; 3048 dev_ptr->disabled = 0; 3049 z90crypt.device_p[index] = dev_ptr; 3050 3051 if (devstat == DEV_ONLINE) { 3052 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) { 3053 z90crypt.mask.st_mask[index] = DEV_ONLINE; 3054 z90crypt.mask.st_count++; 3055 } 3056 deviceType = dev_ptr->dev_type; 3057 type_str_p = &z90crypt.hdware_info->type_mask[deviceType]; 3058 if (type_str_p->st_mask[index] != DEV_ONLINE) { 3059 type_str_p->st_mask[index] = DEV_ONLINE; 3060 type_str_p->st_count++; 3061 } 3062 } 3063 3064 return 0; 3065} 3066 3067static int 3068destroy_crypto_device(int index) 3069{ 3070 struct device *dev_ptr; 3071 int t, disabledFlag; 3072 3073 dev_ptr = z90crypt.device_p[index]; 3074 3075 /* remember device type; get rid of device struct */ 3076 if (dev_ptr) { 3077 disabledFlag = dev_ptr->disabled; 3078 t = dev_ptr->dev_type; 3079 kfree(dev_ptr->dev_resp_p); 3080 kfree(dev_ptr); 3081 } else { 3082 disabledFlag = 0; 3083 t = -1; 3084 } 3085 z90crypt.device_p[index] = 0; 3086 3087 /* if the type is valid, remove the device from the type_mask */ 3088 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) { 3089 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00; 3090 z90crypt.hdware_info->type_mask[t].st_count--; 3091 if (disabledFlag == 1) 3092 z90crypt.hdware_info->type_mask[t].disabled_count--; 3093 } 3094 if (z90crypt.mask.st_mask[index] != DEV_GONE) { 3095 z90crypt.mask.st_mask[index] = DEV_GONE; 3096 z90crypt.mask.st_count--; 3097 } 3098 z90crypt.hdware_info->device_type_array[index] = 0; 3099 3100 return 0; 3101} 3102 3103static void 3104destroy_z90crypt(void) 3105{ 3106 int i; 3107 3108 for (i = 0; i < z90crypt.max_count; i++) 3109 if (z90crypt.device_p[i]) 3110 destroy_crypto_device(i); 3111 kfree(z90crypt.hdware_info); 3112 memset((void *)&z90crypt, 0, sizeof(z90crypt)); 3113} 3114 3115static unsigned char static_testmsg[384] = { 31160x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00, 31170x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58, 31180x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43, 31190x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00, 31200x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 31210x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 31220x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32, 31230x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 31240xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 31250x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 31260x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 31270x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 31280x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46, 31290x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32, 31300x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 31310x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 31320x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 31330x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00, 31340x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00, 31350x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58, 31360x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8, 31370xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e, 31380x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f, 31390x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 3140}; 3141 3142static int 3143probe_device_type(struct device *devPtr) 3144{ 3145 int rv, dv, i, index, length; 3146 unsigned char psmid[8]; 3147 static unsigned char loc_testmsg[sizeof(static_testmsg)]; 3148 3149 index = devPtr->dev_self_x; 3150 rv = 0; 3151 do { 3152 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg)); 3153 length = sizeof(static_testmsg) - 24; 3154 /* the -24 allows for the header */ 3155 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg); 3156 if (dv) { 3157 PDEBUG("dv returned by send during probe: %d\n", dv); 3158 if (dv == DEV_SEN_EXCEPTION) { 3159 rv = SEN_FATAL_ERROR; 3160 PRINTKC("exception in send to AP %d\n", index); 3161 break; 3162 } 3163 PDEBUG("return value from send_to_AP: %d\n", rv); 3164 switch (dv) { 3165 case DEV_GONE: 3166 PDEBUG("dev %d not available\n", index); 3167 rv = SEN_NOT_AVAIL; 3168 break; 3169 case DEV_ONLINE: 3170 rv = 0; 3171 break; 3172 case DEV_EMPTY: 3173 rv = SEN_NOT_AVAIL; 3174 break; 3175 case DEV_NO_WORK: 3176 rv = SEN_FATAL_ERROR; 3177 break; 3178 case DEV_BAD_MESSAGE: 3179 rv = SEN_USER_ERROR; 3180 break; 3181 case DEV_QUEUE_FULL: 3182 rv = SEN_QUEUE_FULL; 3183 break; 3184 default: 3185 PRINTK("unknown dv=%d for dev %d\n", dv, index); 3186 rv = SEN_NOT_AVAIL; 3187 break; 3188 } 3189 } 3190 3191 if (rv) 3192 break; 3193 3194 for (i = 0; i < 6; i++) { 3195 mdelay(300); 3196 dv = receive_from_AP(index, z90crypt.cdx, 3197 devPtr->dev_resp_l, 3198 devPtr->dev_resp_p, psmid); 3199 PDEBUG("dv returned by DQ = %d\n", dv); 3200 if (dv == DEV_REC_EXCEPTION) { 3201 rv = REC_FATAL_ERROR; 3202 PRINTKC("exception in dequeue %d\n", 3203 index); 3204 break; 3205 } 3206 switch (dv) { 3207 case DEV_ONLINE: 3208 rv = 0; 3209 break; 3210 case DEV_EMPTY: 3211 rv = REC_EMPTY; 3212 break; 3213 case DEV_NO_WORK: 3214 rv = REC_NO_WORK; 3215 break; 3216 case DEV_BAD_MESSAGE: 3217 case DEV_GONE: 3218 default: 3219 rv = REC_NO_RESPONSE; 3220 break; 3221 } 3222 if ((rv != 0) && (rv != REC_NO_WORK)) 3223 break; 3224 if (rv == 0) 3225 break; 3226 } 3227 if (rv) 3228 break; 3229 rv = (devPtr->dev_resp_p[0] == 0x00) && 3230 (devPtr->dev_resp_p[1] == 0x86); 3231 if (rv) 3232 devPtr->dev_type = PCICC; 3233 else 3234 devPtr->dev_type = PCICA; 3235 rv = 0; 3236 } while (0); 3237 /* In a general error case, the card is not marked online */ 3238 return rv; 3239} 3240 3241static unsigned char MCL3_testmsg[] = { 32420x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE, 32430x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32440x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32450x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32460x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00, 32470x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00, 32480x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32, 32490x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24, 32500x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32510x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32520x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32530x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32540x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32550x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32560x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32570x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32580x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32590x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32600x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32610x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 32620x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20, 32630x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D, 32640x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD, 32650xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22, 32660x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54, 32670x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00, 32680x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C, 32690x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9, 32700x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5, 32710xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01, 32720xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91, 32730x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C, 32740x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96, 32750xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47, 32760x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53 3277}; 3278 3279static int 3280probe_PCIXCC_type(struct device *devPtr) 3281{ 3282 int rv, dv, i, index, length; 3283 unsigned char psmid[8]; 3284 static unsigned char loc_testmsg[548]; 3285 struct CPRBX *cprbx_p; 3286 3287 index = devPtr->dev_self_x; 3288 rv = 0; 3289 do { 3290 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg)); 3291 length = sizeof(MCL3_testmsg) - 0x0C; 3292 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg); 3293 if (dv) { 3294 PDEBUG("dv returned = %d\n", dv); 3295 if (dv == DEV_SEN_EXCEPTION) { 3296 rv = SEN_FATAL_ERROR; 3297 PRINTKC("exception in send to AP %d\n", index); 3298 break; 3299 } 3300 PDEBUG("return value from send_to_AP: %d\n", rv); 3301 switch (dv) { 3302 case DEV_GONE: 3303 PDEBUG("dev %d not available\n", index); 3304 rv = SEN_NOT_AVAIL; 3305 break; 3306 case DEV_ONLINE: 3307 rv = 0; 3308 break; 3309 case DEV_EMPTY: 3310 rv = SEN_NOT_AVAIL; 3311 break; 3312 case DEV_NO_WORK: 3313 rv = SEN_FATAL_ERROR; 3314 break; 3315 case DEV_BAD_MESSAGE: 3316 rv = SEN_USER_ERROR; 3317 break; 3318 case DEV_QUEUE_FULL: 3319 rv = SEN_QUEUE_FULL; 3320 break; 3321 default: 3322 PRINTK("unknown dv=%d for dev %d\n", dv, index); 3323 rv = SEN_NOT_AVAIL; 3324 break; 3325 } 3326 } 3327 3328 if (rv) 3329 break; 3330 3331 for (i = 0; i < 6; i++) { 3332 mdelay(300); 3333 dv = receive_from_AP(index, z90crypt.cdx, 3334 devPtr->dev_resp_l, 3335 devPtr->dev_resp_p, psmid); 3336 PDEBUG("dv returned by DQ = %d\n", dv); 3337 if (dv == DEV_REC_EXCEPTION) { 3338 rv = REC_FATAL_ERROR; 3339 PRINTKC("exception in dequeue %d\n", 3340 index); 3341 break; 3342 } 3343 switch (dv) { 3344 case DEV_ONLINE: 3345 rv = 0; 3346 break; 3347 case DEV_EMPTY: 3348 rv = REC_EMPTY; 3349 break; 3350 case DEV_NO_WORK: 3351 rv = REC_NO_WORK; 3352 break; 3353 case DEV_BAD_MESSAGE: 3354 case DEV_GONE: 3355 default: 3356 rv = REC_NO_RESPONSE; 3357 break; 3358 } 3359 if ((rv != 0) && (rv != REC_NO_WORK)) 3360 break; 3361 if (rv == 0) 3362 break; 3363 } 3364 if (rv) 3365 break; 3366 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48); 3367 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) { 3368 devPtr->dev_type = PCIXCC_MCL2; 3369 PDEBUG("device %d is MCL2\n", index); 3370 } else { 3371 devPtr->dev_type = PCIXCC_MCL3; 3372 PDEBUG("device %d is MCL3\n", index); 3373 } 3374 } while (0); 3375 /* In a general error case, the card is not marked online */ 3376 return rv; 3377} 3378 3379module_init(z90crypt_init_module); 3380module_exit(z90crypt_cleanup_module);