Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.20 1608 lines 37 kB view raw
1/* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 * Arnd Bergmann (arndb@de.ibm.com) 10 */ 11 12#include <linux/module.h> 13#include <linux/slab.h> 14#include <linux/init.h> 15#include <linux/device.h> 16 17#include <asm/cio.h> 18 19#include "css.h" 20#include "cio.h" 21#include "cio_debug.h" 22#include "ioasm.h" 23#include "chsc.h" 24 25static void *sei_page; 26 27static int new_channel_path(int chpid); 28 29static inline void 30set_chp_logically_online(int chp, int onoff) 31{ 32 css[0]->chps[chp]->state = onoff; 33} 34 35static int 36get_chp_status(int chp) 37{ 38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 39} 40 41void 42chsc_validate_chpids(struct subchannel *sch) 43{ 44 int mask, chp; 45 46 for (chp = 0; chp <= 7; chp++) { 47 mask = 0x80 >> chp; 48 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 49 /* disable using this path */ 50 sch->opm &= ~mask; 51 } 52} 53 54void 55chpid_is_actually_online(int chp) 56{ 57 int state; 58 59 state = get_chp_status(chp); 60 if (state < 0) { 61 need_rescan = 1; 62 queue_work(slow_path_wq, &slow_path_work); 63 } else 64 WARN_ON(!state); 65} 66 67/* FIXME: this is _always_ called for every subchannel. shouldn't we 68 * process more than one at a time? */ 69static int 70chsc_get_sch_desc_irq(struct subchannel *sch, void *page) 71{ 72 int ccode, j; 73 74 struct { 75 struct chsc_header request; 76 u16 reserved1a:10; 77 u16 ssid:2; 78 u16 reserved1b:4; 79 u16 f_sch; /* first subchannel */ 80 u16 reserved2; 81 u16 l_sch; /* last subchannel */ 82 u32 reserved3; 83 struct chsc_header response; 84 u32 reserved4; 85 u8 sch_valid : 1; 86 u8 dev_valid : 1; 87 u8 st : 3; /* subchannel type */ 88 u8 zeroes : 3; 89 u8 unit_addr; /* unit address */ 90 u16 devno; /* device number */ 91 u8 path_mask; 92 u8 fla_valid_mask; 93 u16 sch; /* subchannel */ 94 u8 chpid[8]; /* chpids 0-7 */ 95 u16 fla[8]; /* full link addresses 0-7 */ 96 } *ssd_area; 97 98 ssd_area = page; 99 100 ssd_area->request.length = 0x0010; 101 ssd_area->request.code = 0x0004; 102 103 ssd_area->ssid = sch->schid.ssid; 104 ssd_area->f_sch = sch->schid.sch_no; 105 ssd_area->l_sch = sch->schid.sch_no; 106 107 ccode = chsc(ssd_area); 108 if (ccode > 0) { 109 pr_debug("chsc returned with ccode = %d\n", ccode); 110 return (ccode == 3) ? -ENODEV : -EBUSY; 111 } 112 113 switch (ssd_area->response.code) { 114 case 0x0001: /* everything ok */ 115 break; 116 case 0x0002: 117 CIO_CRW_EVENT(2, "Invalid command!\n"); 118 return -EINVAL; 119 case 0x0003: 120 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 121 return -EINVAL; 122 case 0x0004: 123 CIO_CRW_EVENT(2, "Model does not provide ssd\n"); 124 return -EOPNOTSUPP; 125 default: 126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 127 ssd_area->response.code); 128 return -EIO; 129 } 130 131 /* 132 * ssd_area->st stores the type of the detected 133 * subchannel, with the following definitions: 134 * 135 * 0: I/O subchannel: All fields have meaning 136 * 1: CHSC subchannel: Only sch_val, st and sch 137 * have meaning 138 * 2: Message subchannel: All fields except unit_addr 139 * have meaning 140 * 3: ADM subchannel: Only sch_val, st and sch 141 * have meaning 142 * 143 * Other types are currently undefined. 144 */ 145 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 146 CIO_CRW_EVENT(0, "Strange subchannel type %d" 147 " for sch 0.%x.%04x\n", ssd_area->st, 148 sch->schid.ssid, sch->schid.sch_no); 149 /* 150 * There may have been a new subchannel type defined in the 151 * time since this code was written; since we don't know which 152 * fields have meaning and what to do with it we just jump out 153 */ 154 return 0; 155 } else { 156 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", 158 sch->schid.ssid, sch->schid.sch_no, 159 type[ssd_area->st]); 160 161 sch->ssd_info.valid = 1; 162 sch->ssd_info.type = ssd_area->st; 163 } 164 165 if (ssd_area->st == 0 || ssd_area->st == 2) { 166 for (j = 0; j < 8; j++) { 167 if (!((0x80 >> j) & ssd_area->path_mask & 168 ssd_area->fla_valid_mask)) 169 continue; 170 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 171 sch->ssd_info.fla[j] = ssd_area->fla[j]; 172 } 173 } 174 return 0; 175} 176 177int 178css_get_ssd_info(struct subchannel *sch) 179{ 180 int ret; 181 void *page; 182 183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 184 if (!page) 185 return -ENOMEM; 186 spin_lock_irq(sch->lock); 187 ret = chsc_get_sch_desc_irq(sch, page); 188 if (ret) { 189 static int cio_chsc_err_msg; 190 191 if (!cio_chsc_err_msg) { 192 printk(KERN_ERR 193 "chsc_get_sch_descriptions:" 194 " Error %d while doing chsc; " 195 "processing some machine checks may " 196 "not work\n", ret); 197 cio_chsc_err_msg = 1; 198 } 199 } 200 spin_unlock_irq(sch->lock); 201 free_page((unsigned long)page); 202 if (!ret) { 203 int j, chpid, mask; 204 /* Allocate channel path structures, if needed. */ 205 for (j = 0; j < 8; j++) { 206 mask = 0x80 >> j; 207 chpid = sch->ssd_info.chpid[j]; 208 if ((sch->schib.pmcw.pim & mask) && 209 (get_chp_status(chpid) < 0)) 210 new_channel_path(chpid); 211 } 212 } 213 return ret; 214} 215 216static int 217s390_subchannel_remove_chpid(struct device *dev, void *data) 218{ 219 int j; 220 int mask; 221 struct subchannel *sch; 222 struct channel_path *chpid; 223 struct schib schib; 224 225 sch = to_subchannel(dev); 226 chpid = data; 227 for (j = 0; j < 8; j++) { 228 mask = 0x80 >> j; 229 if ((sch->schib.pmcw.pim & mask) && 230 (sch->schib.pmcw.chpid[j] == chpid->id)) 231 break; 232 } 233 if (j >= 8) 234 return 0; 235 236 spin_lock_irq(sch->lock); 237 238 stsch(sch->schid, &schib); 239 if (!schib.pmcw.dnv) 240 goto out_unreg; 241 memcpy(&sch->schib, &schib, sizeof(struct schib)); 242 /* Check for single path devices. */ 243 if (sch->schib.pmcw.pim == 0x80) 244 goto out_unreg; 245 246 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 247 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 248 (sch->schib.pmcw.lpum == mask)) { 249 int cc; 250 251 cc = cio_clear(sch); 252 if (cc == -ENODEV) 253 goto out_unreg; 254 /* Request retry of internal operation. */ 255 device_set_intretry(sch); 256 /* Call handler. */ 257 if (sch->driver && sch->driver->termination) 258 sch->driver->termination(&sch->dev); 259 goto out_unlock; 260 } 261 262 /* trigger path verification. */ 263 if (sch->driver && sch->driver->verify) 264 sch->driver->verify(&sch->dev); 265 else if (sch->lpm == mask) 266 goto out_unreg; 267out_unlock: 268 spin_unlock_irq(sch->lock); 269 return 0; 270out_unreg: 271 spin_unlock_irq(sch->lock); 272 sch->lpm = 0; 273 if (css_enqueue_subchannel_slow(sch->schid)) { 274 css_clear_subchannel_slow_list(); 275 need_rescan = 1; 276 } 277 return 0; 278} 279 280static inline void 281s390_set_chpid_offline( __u8 chpid) 282{ 283 char dbf_txt[15]; 284 struct device *dev; 285 286 sprintf(dbf_txt, "chpr%x", chpid); 287 CIO_TRACE_EVENT(2, dbf_txt); 288 289 if (get_chp_status(chpid) <= 0) 290 return; 291 dev = get_device(&css[0]->chps[chpid]->dev); 292 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), 293 s390_subchannel_remove_chpid); 294 295 if (need_rescan || css_slow_subchannels_exist()) 296 queue_work(slow_path_wq, &slow_path_work); 297 put_device(dev); 298} 299 300struct res_acc_data { 301 struct channel_path *chp; 302 u32 fla_mask; 303 u16 fla; 304}; 305 306static int 307s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) 308{ 309 int found; 310 int chp; 311 int ccode; 312 313 found = 0; 314 for (chp = 0; chp <= 7; chp++) 315 /* 316 * check if chpid is in information updated by ssd 317 */ 318 if (sch->ssd_info.valid && 319 sch->ssd_info.chpid[chp] == res_data->chp->id && 320 (sch->ssd_info.fla[chp] & res_data->fla_mask) 321 == res_data->fla) { 322 found = 1; 323 break; 324 } 325 326 if (found == 0) 327 return 0; 328 329 /* 330 * Do a stsch to update our subchannel structure with the 331 * new path information and eventually check for logically 332 * offline chpids. 333 */ 334 ccode = stsch(sch->schid, &sch->schib); 335 if (ccode > 0) 336 return 0; 337 338 return 0x80 >> chp; 339} 340 341static inline int 342s390_process_res_acc_new_sch(struct subchannel_id schid) 343{ 344 struct schib schib; 345 int ret; 346 /* 347 * We don't know the device yet, but since a path 348 * may be available now to the device we'll have 349 * to do recognition again. 350 * Since we don't have any idea about which chpid 351 * that beast may be on we'll have to do a stsch 352 * on all devices, grr... 353 */ 354 if (stsch_err(schid, &schib)) 355 /* We're through */ 356 return need_rescan ? -EAGAIN : -ENXIO; 357 358 /* Put it on the slow path. */ 359 ret = css_enqueue_subchannel_slow(schid); 360 if (ret) { 361 css_clear_subchannel_slow_list(); 362 need_rescan = 1; 363 return -EAGAIN; 364 } 365 return 0; 366} 367 368static int 369__s390_process_res_acc(struct subchannel_id schid, void *data) 370{ 371 int chp_mask, old_lpm; 372 struct res_acc_data *res_data; 373 struct subchannel *sch; 374 375 res_data = data; 376 sch = get_subchannel_by_schid(schid); 377 if (!sch) 378 /* Check if a subchannel is newly available. */ 379 return s390_process_res_acc_new_sch(schid); 380 381 spin_lock_irq(sch->lock); 382 383 chp_mask = s390_process_res_acc_sch(res_data, sch); 384 385 if (chp_mask == 0) { 386 spin_unlock_irq(sch->lock); 387 put_device(&sch->dev); 388 return 0; 389 } 390 old_lpm = sch->lpm; 391 sch->lpm = ((sch->schib.pmcw.pim & 392 sch->schib.pmcw.pam & 393 sch->schib.pmcw.pom) 394 | chp_mask) & sch->opm; 395 if (!old_lpm && sch->lpm) 396 device_trigger_reprobe(sch); 397 else if (sch->driver && sch->driver->verify) 398 sch->driver->verify(&sch->dev); 399 400 spin_unlock_irq(sch->lock); 401 put_device(&sch->dev); 402 return 0; 403} 404 405 406static int 407s390_process_res_acc (struct res_acc_data *res_data) 408{ 409 int rc; 410 char dbf_txt[15]; 411 412 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 413 CIO_TRACE_EVENT( 2, dbf_txt); 414 if (res_data->fla != 0) { 415 sprintf(dbf_txt, "fla%x", res_data->fla); 416 CIO_TRACE_EVENT( 2, dbf_txt); 417 } 418 419 /* 420 * I/O resources may have become accessible. 421 * Scan through all subchannels that may be concerned and 422 * do a validation on those. 423 * The more information we have (info), the less scanning 424 * will we have to do. 425 */ 426 rc = for_each_subchannel(__s390_process_res_acc, res_data); 427 if (css_slow_subchannels_exist()) 428 rc = -EAGAIN; 429 else if (rc != -EAGAIN) 430 rc = 0; 431 return rc; 432} 433 434static int 435__get_chpid_from_lir(void *data) 436{ 437 struct lir { 438 u8 iq; 439 u8 ic; 440 u16 sci; 441 /* incident-node descriptor */ 442 u32 indesc[28]; 443 /* attached-node descriptor */ 444 u32 andesc[28]; 445 /* incident-specific information */ 446 u32 isinfo[28]; 447 } *lir; 448 449 lir = data; 450 if (!(lir->iq&0x80)) 451 /* NULL link incident record */ 452 return -EINVAL; 453 if (!(lir->indesc[0]&0xc0000000)) 454 /* node descriptor not valid */ 455 return -EINVAL; 456 if (!(lir->indesc[0]&0x10000000)) 457 /* don't handle device-type nodes - FIXME */ 458 return -EINVAL; 459 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 460 461 return (u16) (lir->indesc[0]&0x000000ff); 462} 463 464int 465chsc_process_crw(void) 466{ 467 int chpid, ret; 468 struct res_acc_data res_data; 469 struct { 470 struct chsc_header request; 471 u32 reserved1; 472 u32 reserved2; 473 u32 reserved3; 474 struct chsc_header response; 475 u32 reserved4; 476 u8 flags; 477 u8 vf; /* validity flags */ 478 u8 rs; /* reporting source */ 479 u8 cc; /* content code */ 480 u16 fla; /* full link address */ 481 u16 rsid; /* reporting source id */ 482 u32 reserved5; 483 u32 reserved6; 484 u32 ccdf[96]; /* content-code dependent field */ 485 /* ccdf has to be big enough for a link-incident record */ 486 } *sei_area; 487 488 if (!sei_page) 489 return 0; 490 /* 491 * build the chsc request block for store event information 492 * and do the call 493 * This function is only called by the machine check handler thread, 494 * so we don't need locking for the sei_page. 495 */ 496 sei_area = sei_page; 497 498 CIO_TRACE_EVENT( 2, "prcss"); 499 ret = 0; 500 do { 501 int ccode, status; 502 struct device *dev; 503 memset(sei_area, 0, sizeof(*sei_area)); 504 memset(&res_data, 0, sizeof(struct res_acc_data)); 505 sei_area->request.length = 0x0010; 506 sei_area->request.code = 0x000e; 507 508 ccode = chsc(sei_area); 509 if (ccode > 0) 510 return 0; 511 512 switch (sei_area->response.code) { 513 /* for debug purposes, check for problems */ 514 case 0x0001: 515 CIO_CRW_EVENT(4, "chsc_process_crw: event information " 516 "successfully stored\n"); 517 break; /* everything ok */ 518 case 0x0002: 519 CIO_CRW_EVENT(2, 520 "chsc_process_crw: invalid command!\n"); 521 return 0; 522 case 0x0003: 523 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " 524 "request block!\n"); 525 return 0; 526 case 0x0005: 527 CIO_CRW_EVENT(2, "chsc_process_crw: no event " 528 "information stored\n"); 529 return 0; 530 default: 531 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", 532 sei_area->response.code); 533 return 0; 534 } 535 536 /* Check if we might have lost some information. */ 537 if (sei_area->flags & 0x40) 538 CIO_CRW_EVENT(2, "chsc_process_crw: Event information " 539 "has been lost due to overflow!\n"); 540 541 if (sei_area->rs != 4) { 542 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " 543 "(%04X) isn't a chpid!\n", 544 sei_area->rsid); 545 continue; 546 } 547 548 /* which kind of information was stored? */ 549 switch (sei_area->cc) { 550 case 1: /* link incident*/ 551 CIO_CRW_EVENT(4, "chsc_process_crw: " 552 "channel subsystem reports link incident," 553 " reporting source is chpid %x\n", 554 sei_area->rsid); 555 chpid = __get_chpid_from_lir(sei_area->ccdf); 556 if (chpid < 0) 557 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", 558 __FUNCTION__); 559 else 560 s390_set_chpid_offline(chpid); 561 break; 562 563 case 2: /* i/o resource accessibiliy */ 564 CIO_CRW_EVENT(4, "chsc_process_crw: " 565 "channel subsystem reports some I/O " 566 "devices may have become accessible\n"); 567 pr_debug("Data received after sei: \n"); 568 pr_debug("Validity flags: %x\n", sei_area->vf); 569 570 /* allocate a new channel path structure, if needed */ 571 status = get_chp_status(sei_area->rsid); 572 if (status < 0) 573 new_channel_path(sei_area->rsid); 574 else if (!status) 575 break; 576 dev = get_device(&css[0]->chps[sei_area->rsid]->dev); 577 res_data.chp = to_channelpath(dev); 578 pr_debug("chpid: %x", sei_area->rsid); 579 if ((sei_area->vf & 0xc0) != 0) { 580 res_data.fla = sei_area->fla; 581 if ((sei_area->vf & 0xc0) == 0xc0) { 582 pr_debug(" full link addr: %x", 583 sei_area->fla); 584 res_data.fla_mask = 0xffff; 585 } else { 586 pr_debug(" link addr: %x", 587 sei_area->fla); 588 res_data.fla_mask = 0xff00; 589 } 590 } 591 ret = s390_process_res_acc(&res_data); 592 pr_debug("\n\n"); 593 put_device(dev); 594 break; 595 596 default: /* other stuff */ 597 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", 598 sei_area->cc); 599 break; 600 } 601 } while (sei_area->flags & 0x80); 602 return ret; 603} 604 605static inline int 606__chp_add_new_sch(struct subchannel_id schid) 607{ 608 struct schib schib; 609 int ret; 610 611 if (stsch(schid, &schib)) 612 /* We're through */ 613 return need_rescan ? -EAGAIN : -ENXIO; 614 615 /* Put it on the slow path. */ 616 ret = css_enqueue_subchannel_slow(schid); 617 if (ret) { 618 css_clear_subchannel_slow_list(); 619 need_rescan = 1; 620 return -EAGAIN; 621 } 622 return 0; 623} 624 625 626static int 627__chp_add(struct subchannel_id schid, void *data) 628{ 629 int i, mask; 630 struct channel_path *chp; 631 struct subchannel *sch; 632 633 chp = data; 634 sch = get_subchannel_by_schid(schid); 635 if (!sch) 636 /* Check if the subchannel is now available. */ 637 return __chp_add_new_sch(schid); 638 spin_lock_irq(sch->lock); 639 for (i=0; i<8; i++) { 640 mask = 0x80 >> i; 641 if ((sch->schib.pmcw.pim & mask) && 642 (sch->schib.pmcw.chpid[i] == chp->id)) { 643 if (stsch(sch->schid, &sch->schib) != 0) { 644 /* Endgame. */ 645 spin_unlock_irq(sch->lock); 646 return -ENXIO; 647 } 648 break; 649 } 650 } 651 if (i==8) { 652 spin_unlock_irq(sch->lock); 653 return 0; 654 } 655 sch->lpm = ((sch->schib.pmcw.pim & 656 sch->schib.pmcw.pam & 657 sch->schib.pmcw.pom) 658 | mask) & sch->opm; 659 660 if (sch->driver && sch->driver->verify) 661 sch->driver->verify(&sch->dev); 662 663 spin_unlock_irq(sch->lock); 664 put_device(&sch->dev); 665 return 0; 666} 667 668static int 669chp_add(int chpid) 670{ 671 int rc; 672 char dbf_txt[15]; 673 struct device *dev; 674 675 if (!get_chp_status(chpid)) 676 return 0; /* no need to do the rest */ 677 678 sprintf(dbf_txt, "cadd%x", chpid); 679 CIO_TRACE_EVENT(2, dbf_txt); 680 681 dev = get_device(&css[0]->chps[chpid]->dev); 682 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 683 if (css_slow_subchannels_exist()) 684 rc = -EAGAIN; 685 if (rc != -EAGAIN) 686 rc = 0; 687 put_device(dev); 688 return rc; 689} 690 691/* 692 * Handling of crw machine checks with channel path source. 693 */ 694int 695chp_process_crw(int chpid, int on) 696{ 697 if (on == 0) { 698 /* Path has gone. We use the link incident routine.*/ 699 s390_set_chpid_offline(chpid); 700 return 0; /* De-register is async anyway. */ 701 } 702 /* 703 * Path has come. Allocate a new channel path structure, 704 * if needed. 705 */ 706 if (get_chp_status(chpid) < 0) 707 new_channel_path(chpid); 708 /* Avoid the extra overhead in process_rec_acc. */ 709 return chp_add(chpid); 710} 711 712static inline int check_for_io_on_path(struct subchannel *sch, int index) 713{ 714 int cc; 715 716 cc = stsch(sch->schid, &sch->schib); 717 if (cc) 718 return 0; 719 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) 720 return 1; 721 return 0; 722} 723 724static void terminate_internal_io(struct subchannel *sch) 725{ 726 if (cio_clear(sch)) { 727 /* Recheck device in case clear failed. */ 728 sch->lpm = 0; 729 if (device_trigger_verify(sch) != 0) { 730 if(css_enqueue_subchannel_slow(sch->schid)) { 731 css_clear_subchannel_slow_list(); 732 need_rescan = 1; 733 } 734 } 735 return; 736 } 737 /* Request retry of internal operation. */ 738 device_set_intretry(sch); 739 /* Call handler. */ 740 if (sch->driver && sch->driver->termination) 741 sch->driver->termination(&sch->dev); 742} 743 744static inline void 745__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 746{ 747 int chp, old_lpm; 748 unsigned long flags; 749 750 if (!sch->ssd_info.valid) 751 return; 752 753 spin_lock_irqsave(sch->lock, flags); 754 old_lpm = sch->lpm; 755 for (chp = 0; chp < 8; chp++) { 756 if (sch->ssd_info.chpid[chp] != chpid) 757 continue; 758 759 if (on) { 760 sch->opm |= (0x80 >> chp); 761 sch->lpm |= (0x80 >> chp); 762 if (!old_lpm) 763 device_trigger_reprobe(sch); 764 else if (sch->driver && sch->driver->verify) 765 sch->driver->verify(&sch->dev); 766 break; 767 } 768 sch->opm &= ~(0x80 >> chp); 769 sch->lpm &= ~(0x80 >> chp); 770 if (check_for_io_on_path(sch, chp)) { 771 if (device_is_online(sch)) 772 /* Path verification is done after killing. */ 773 device_kill_io(sch); 774 else 775 /* Kill and retry internal I/O. */ 776 terminate_internal_io(sch); 777 } else if (!sch->lpm) { 778 if (device_trigger_verify(sch) != 0) { 779 if (css_enqueue_subchannel_slow(sch->schid)) { 780 css_clear_subchannel_slow_list(); 781 need_rescan = 1; 782 } 783 } 784 } else if (sch->driver && sch->driver->verify) 785 sch->driver->verify(&sch->dev); 786 break; 787 } 788 spin_unlock_irqrestore(sch->lock, flags); 789} 790 791static int 792s390_subchannel_vary_chpid_off(struct device *dev, void *data) 793{ 794 struct subchannel *sch; 795 __u8 *chpid; 796 797 sch = to_subchannel(dev); 798 chpid = data; 799 800 __s390_subchannel_vary_chpid(sch, *chpid, 0); 801 return 0; 802} 803 804static int 805s390_subchannel_vary_chpid_on(struct device *dev, void *data) 806{ 807 struct subchannel *sch; 808 __u8 *chpid; 809 810 sch = to_subchannel(dev); 811 chpid = data; 812 813 __s390_subchannel_vary_chpid(sch, *chpid, 1); 814 return 0; 815} 816 817static int 818__s390_vary_chpid_on(struct subchannel_id schid, void *data) 819{ 820 struct schib schib; 821 struct subchannel *sch; 822 823 sch = get_subchannel_by_schid(schid); 824 if (sch) { 825 put_device(&sch->dev); 826 return 0; 827 } 828 if (stsch_err(schid, &schib)) 829 /* We're through */ 830 return -ENXIO; 831 /* Put it on the slow path. */ 832 if (css_enqueue_subchannel_slow(schid)) { 833 css_clear_subchannel_slow_list(); 834 need_rescan = 1; 835 return -EAGAIN; 836 } 837 return 0; 838} 839 840/* 841 * Function: s390_vary_chpid 842 * Varies the specified chpid online or offline 843 */ 844static int 845s390_vary_chpid( __u8 chpid, int on) 846{ 847 char dbf_text[15]; 848 int status; 849 850 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 851 CIO_TRACE_EVENT( 2, dbf_text); 852 853 status = get_chp_status(chpid); 854 if (status < 0) { 855 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); 856 return -EINVAL; 857 } 858 859 if (!on && !status) { 860 printk(KERN_ERR "chpid %x is already offline\n", chpid); 861 return -EINVAL; 862 } 863 864 set_chp_logically_online(chpid, on); 865 866 /* 867 * Redo PathVerification on the devices the chpid connects to 868 */ 869 870 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 871 s390_subchannel_vary_chpid_on : 872 s390_subchannel_vary_chpid_off); 873 if (on) 874 /* Scan for new devices on varied on path. */ 875 for_each_subchannel(__s390_vary_chpid_on, NULL); 876 if (need_rescan || css_slow_subchannels_exist()) 877 queue_work(slow_path_wq, &slow_path_work); 878 return 0; 879} 880 881/* 882 * Channel measurement related functions 883 */ 884static ssize_t 885chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, 886 size_t count) 887{ 888 struct channel_path *chp; 889 unsigned int size; 890 891 chp = to_channelpath(container_of(kobj, struct device, kobj)); 892 if (!chp->cmg_chars) 893 return 0; 894 895 size = sizeof(struct cmg_chars); 896 897 if (off > size) 898 return 0; 899 if (off + count > size) 900 count = size - off; 901 memcpy(buf, chp->cmg_chars + off, count); 902 return count; 903} 904 905static struct bin_attribute chp_measurement_chars_attr = { 906 .attr = { 907 .name = "measurement_chars", 908 .mode = S_IRUSR, 909 .owner = THIS_MODULE, 910 }, 911 .size = sizeof(struct cmg_chars), 912 .read = chp_measurement_chars_read, 913}; 914 915static void 916chp_measurement_copy_block(struct cmg_entry *buf, 917 struct channel_subsystem *css, int chpid) 918{ 919 void *area; 920 struct cmg_entry *entry, reference_buf; 921 int idx; 922 923 if (chpid < 128) { 924 area = css->cub_addr1; 925 idx = chpid; 926 } else { 927 area = css->cub_addr2; 928 idx = chpid - 128; 929 } 930 entry = area + (idx * sizeof(struct cmg_entry)); 931 do { 932 memcpy(buf, entry, sizeof(*entry)); 933 memcpy(&reference_buf, entry, sizeof(*entry)); 934 } while (reference_buf.values[0] != buf->values[0]); 935} 936 937static ssize_t 938chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) 939{ 940 struct channel_path *chp; 941 struct channel_subsystem *css; 942 unsigned int size; 943 944 chp = to_channelpath(container_of(kobj, struct device, kobj)); 945 css = to_css(chp->dev.parent); 946 947 size = sizeof(struct cmg_entry); 948 949 /* Only allow single reads. */ 950 if (off || count < size) 951 return 0; 952 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); 953 count = size; 954 return count; 955} 956 957static struct bin_attribute chp_measurement_attr = { 958 .attr = { 959 .name = "measurement", 960 .mode = S_IRUSR, 961 .owner = THIS_MODULE, 962 }, 963 .size = sizeof(struct cmg_entry), 964 .read = chp_measurement_read, 965}; 966 967static void 968chsc_remove_chp_cmg_attr(struct channel_path *chp) 969{ 970 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 971 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 972} 973 974static int 975chsc_add_chp_cmg_attr(struct channel_path *chp) 976{ 977 int ret; 978 979 ret = sysfs_create_bin_file(&chp->dev.kobj, 980 &chp_measurement_chars_attr); 981 if (ret) 982 return ret; 983 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 984 if (ret) 985 sysfs_remove_bin_file(&chp->dev.kobj, 986 &chp_measurement_chars_attr); 987 return ret; 988} 989 990static void 991chsc_remove_cmg_attr(struct channel_subsystem *css) 992{ 993 int i; 994 995 for (i = 0; i <= __MAX_CHPID; i++) { 996 if (!css->chps[i]) 997 continue; 998 chsc_remove_chp_cmg_attr(css->chps[i]); 999 } 1000} 1001 1002static int 1003chsc_add_cmg_attr(struct channel_subsystem *css) 1004{ 1005 int i, ret; 1006 1007 ret = 0; 1008 for (i = 0; i <= __MAX_CHPID; i++) { 1009 if (!css->chps[i]) 1010 continue; 1011 ret = chsc_add_chp_cmg_attr(css->chps[i]); 1012 if (ret) 1013 goto cleanup; 1014 } 1015 return ret; 1016cleanup: 1017 for (--i; i >= 0; i--) { 1018 if (!css->chps[i]) 1019 continue; 1020 chsc_remove_chp_cmg_attr(css->chps[i]); 1021 } 1022 return ret; 1023} 1024 1025 1026static int 1027__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 1028{ 1029 struct { 1030 struct chsc_header request; 1031 u32 operation_code : 2; 1032 u32 : 30; 1033 u32 key : 4; 1034 u32 : 28; 1035 u32 zeroes1; 1036 u32 cub_addr1; 1037 u32 zeroes2; 1038 u32 cub_addr2; 1039 u32 reserved[13]; 1040 struct chsc_header response; 1041 u32 status : 8; 1042 u32 : 4; 1043 u32 fmt : 4; 1044 u32 : 16; 1045 } *secm_area; 1046 int ret, ccode; 1047 1048 secm_area = page; 1049 secm_area->request.length = 0x0050; 1050 secm_area->request.code = 0x0016; 1051 1052 secm_area->key = PAGE_DEFAULT_KEY; 1053 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 1054 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 1055 1056 secm_area->operation_code = enable ? 0 : 1; 1057 1058 ccode = chsc(secm_area); 1059 if (ccode > 0) 1060 return (ccode == 3) ? -ENODEV : -EBUSY; 1061 1062 switch (secm_area->response.code) { 1063 case 0x0001: /* Success. */ 1064 ret = 0; 1065 break; 1066 case 0x0003: /* Invalid block. */ 1067 case 0x0007: /* Invalid format. */ 1068 case 0x0008: /* Other invalid block. */ 1069 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1070 ret = -EINVAL; 1071 break; 1072 case 0x0004: /* Command not provided in model. */ 1073 CIO_CRW_EVENT(2, "Model does not provide secm\n"); 1074 ret = -EOPNOTSUPP; 1075 break; 1076 case 0x0102: /* cub adresses incorrect */ 1077 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); 1078 ret = -EINVAL; 1079 break; 1080 case 0x0103: /* key error */ 1081 CIO_CRW_EVENT(2, "Access key error in secm\n"); 1082 ret = -EINVAL; 1083 break; 1084 case 0x0105: /* error while starting */ 1085 CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); 1086 ret = -EIO; 1087 break; 1088 default: 1089 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1090 secm_area->response.code); 1091 ret = -EIO; 1092 } 1093 return ret; 1094} 1095 1096int 1097chsc_secm(struct channel_subsystem *css, int enable) 1098{ 1099 void *secm_area; 1100 int ret; 1101 1102 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1103 if (!secm_area) 1104 return -ENOMEM; 1105 1106 mutex_lock(&css->mutex); 1107 if (enable && !css->cm_enabled) { 1108 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1109 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1110 if (!css->cub_addr1 || !css->cub_addr2) { 1111 free_page((unsigned long)css->cub_addr1); 1112 free_page((unsigned long)css->cub_addr2); 1113 free_page((unsigned long)secm_area); 1114 mutex_unlock(&css->mutex); 1115 return -ENOMEM; 1116 } 1117 } 1118 ret = __chsc_do_secm(css, enable, secm_area); 1119 if (!ret) { 1120 css->cm_enabled = enable; 1121 if (css->cm_enabled) { 1122 ret = chsc_add_cmg_attr(css); 1123 if (ret) { 1124 memset(secm_area, 0, PAGE_SIZE); 1125 __chsc_do_secm(css, 0, secm_area); 1126 css->cm_enabled = 0; 1127 } 1128 } else 1129 chsc_remove_cmg_attr(css); 1130 } 1131 if (enable && !css->cm_enabled) { 1132 free_page((unsigned long)css->cub_addr1); 1133 free_page((unsigned long)css->cub_addr2); 1134 } 1135 mutex_unlock(&css->mutex); 1136 free_page((unsigned long)secm_area); 1137 return ret; 1138} 1139 1140/* 1141 * Files for the channel path entries. 1142 */ 1143static ssize_t 1144chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) 1145{ 1146 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1147 1148 if (!chp) 1149 return 0; 1150 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : 1151 sprintf(buf, "offline\n")); 1152} 1153 1154static ssize_t 1155chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1156{ 1157 struct channel_path *cp = container_of(dev, struct channel_path, dev); 1158 char cmd[10]; 1159 int num_args; 1160 int error; 1161 1162 num_args = sscanf(buf, "%5s", cmd); 1163 if (!num_args) 1164 return count; 1165 1166 if (!strnicmp(cmd, "on", 2)) 1167 error = s390_vary_chpid(cp->id, 1); 1168 else if (!strnicmp(cmd, "off", 3)) 1169 error = s390_vary_chpid(cp->id, 0); 1170 else 1171 error = -EINVAL; 1172 1173 return error < 0 ? error : count; 1174 1175} 1176 1177static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); 1178 1179static ssize_t 1180chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1181{ 1182 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1183 1184 if (!chp) 1185 return 0; 1186 return sprintf(buf, "%x\n", chp->desc.desc); 1187} 1188 1189static DEVICE_ATTR(type, 0444, chp_type_show, NULL); 1190 1191static ssize_t 1192chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) 1193{ 1194 struct channel_path *chp = to_channelpath(dev); 1195 1196 if (!chp) 1197 return 0; 1198 if (chp->cmg == -1) /* channel measurements not available */ 1199 return sprintf(buf, "unknown\n"); 1200 return sprintf(buf, "%x\n", chp->cmg); 1201} 1202 1203static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); 1204 1205static ssize_t 1206chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) 1207{ 1208 struct channel_path *chp = to_channelpath(dev); 1209 1210 if (!chp) 1211 return 0; 1212 if (chp->shared == -1) /* channel measurements not available */ 1213 return sprintf(buf, "unknown\n"); 1214 return sprintf(buf, "%x\n", chp->shared); 1215} 1216 1217static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); 1218 1219static struct attribute * chp_attrs[] = { 1220 &dev_attr_status.attr, 1221 &dev_attr_type.attr, 1222 &dev_attr_cmg.attr, 1223 &dev_attr_shared.attr, 1224 NULL, 1225}; 1226 1227static struct attribute_group chp_attr_group = { 1228 .attrs = chp_attrs, 1229}; 1230 1231static void 1232chp_release(struct device *dev) 1233{ 1234 struct channel_path *cp; 1235 1236 cp = container_of(dev, struct channel_path, dev); 1237 kfree(cp); 1238} 1239 1240static int 1241chsc_determine_channel_path_description(int chpid, 1242 struct channel_path_desc *desc) 1243{ 1244 int ccode, ret; 1245 1246 struct { 1247 struct chsc_header request; 1248 u32 : 24; 1249 u32 first_chpid : 8; 1250 u32 : 24; 1251 u32 last_chpid : 8; 1252 u32 zeroes1; 1253 struct chsc_header response; 1254 u32 zeroes2; 1255 struct channel_path_desc desc; 1256 } *scpd_area; 1257 1258 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1259 if (!scpd_area) 1260 return -ENOMEM; 1261 1262 scpd_area->request.length = 0x0010; 1263 scpd_area->request.code = 0x0002; 1264 1265 scpd_area->first_chpid = chpid; 1266 scpd_area->last_chpid = chpid; 1267 1268 ccode = chsc(scpd_area); 1269 if (ccode > 0) { 1270 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1271 goto out; 1272 } 1273 1274 switch (scpd_area->response.code) { 1275 case 0x0001: /* Success. */ 1276 memcpy(desc, &scpd_area->desc, 1277 sizeof(struct channel_path_desc)); 1278 ret = 0; 1279 break; 1280 case 0x0003: /* Invalid block. */ 1281 case 0x0007: /* Invalid format. */ 1282 case 0x0008: /* Other invalid block. */ 1283 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1284 ret = -EINVAL; 1285 break; 1286 case 0x0004: /* Command not provided in model. */ 1287 CIO_CRW_EVENT(2, "Model does not provide scpd\n"); 1288 ret = -EOPNOTSUPP; 1289 break; 1290 default: 1291 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1292 scpd_area->response.code); 1293 ret = -EIO; 1294 } 1295out: 1296 free_page((unsigned long)scpd_area); 1297 return ret; 1298} 1299 1300static void 1301chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 1302 struct cmg_chars *chars) 1303{ 1304 switch (chp->cmg) { 1305 case 2: 1306 case 3: 1307 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 1308 GFP_KERNEL); 1309 if (chp->cmg_chars) { 1310 int i, mask; 1311 struct cmg_chars *cmg_chars; 1312 1313 cmg_chars = chp->cmg_chars; 1314 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 1315 mask = 0x80 >> (i + 3); 1316 if (cmcv & mask) 1317 cmg_chars->values[i] = chars->values[i]; 1318 else 1319 cmg_chars->values[i] = 0; 1320 } 1321 } 1322 break; 1323 default: 1324 /* No cmg-dependent data. */ 1325 break; 1326 } 1327} 1328 1329static int 1330chsc_get_channel_measurement_chars(struct channel_path *chp) 1331{ 1332 int ccode, ret; 1333 1334 struct { 1335 struct chsc_header request; 1336 u32 : 24; 1337 u32 first_chpid : 8; 1338 u32 : 24; 1339 u32 last_chpid : 8; 1340 u32 zeroes1; 1341 struct chsc_header response; 1342 u32 zeroes2; 1343 u32 not_valid : 1; 1344 u32 shared : 1; 1345 u32 : 22; 1346 u32 chpid : 8; 1347 u32 cmcv : 5; 1348 u32 : 11; 1349 u32 cmgq : 8; 1350 u32 cmg : 8; 1351 u32 zeroes3; 1352 u32 data[NR_MEASUREMENT_CHARS]; 1353 } *scmc_area; 1354 1355 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1356 if (!scmc_area) 1357 return -ENOMEM; 1358 1359 scmc_area->request.length = 0x0010; 1360 scmc_area->request.code = 0x0022; 1361 1362 scmc_area->first_chpid = chp->id; 1363 scmc_area->last_chpid = chp->id; 1364 1365 ccode = chsc(scmc_area); 1366 if (ccode > 0) { 1367 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1368 goto out; 1369 } 1370 1371 switch (scmc_area->response.code) { 1372 case 0x0001: /* Success. */ 1373 if (!scmc_area->not_valid) { 1374 chp->cmg = scmc_area->cmg; 1375 chp->shared = scmc_area->shared; 1376 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 1377 (struct cmg_chars *) 1378 &scmc_area->data); 1379 } else { 1380 chp->cmg = -1; 1381 chp->shared = -1; 1382 } 1383 ret = 0; 1384 break; 1385 case 0x0003: /* Invalid block. */ 1386 case 0x0007: /* Invalid format. */ 1387 case 0x0008: /* Invalid bit combination. */ 1388 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1389 ret = -EINVAL; 1390 break; 1391 case 0x0004: /* Command not provided. */ 1392 CIO_CRW_EVENT(2, "Model does not provide scmc\n"); 1393 ret = -EOPNOTSUPP; 1394 break; 1395 default: 1396 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1397 scmc_area->response.code); 1398 ret = -EIO; 1399 } 1400out: 1401 free_page((unsigned long)scmc_area); 1402 return ret; 1403} 1404 1405/* 1406 * Entries for chpids on the system bus. 1407 * This replaces /proc/chpids. 1408 */ 1409static int 1410new_channel_path(int chpid) 1411{ 1412 struct channel_path *chp; 1413 int ret; 1414 1415 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); 1416 if (!chp) 1417 return -ENOMEM; 1418 1419 /* fill in status, etc. */ 1420 chp->id = chpid; 1421 chp->state = 1; 1422 chp->dev.parent = &css[0]->device; 1423 chp->dev.release = chp_release; 1424 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1425 1426 /* Obtain channel path description and fill it in. */ 1427 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 1428 if (ret) 1429 goto out_free; 1430 /* Get channel-measurement characteristics. */ 1431 if (css_characteristics_avail && css_chsc_characteristics.scmc 1432 && css_chsc_characteristics.secm) { 1433 ret = chsc_get_channel_measurement_chars(chp); 1434 if (ret) 1435 goto out_free; 1436 } else { 1437 static int msg_done; 1438 1439 if (!msg_done) { 1440 printk(KERN_WARNING "cio: Channel measurements not " 1441 "available, continuing.\n"); 1442 msg_done = 1; 1443 } 1444 chp->cmg = -1; 1445 } 1446 1447 /* make it known to the system */ 1448 ret = device_register(&chp->dev); 1449 if (ret) { 1450 printk(KERN_WARNING "%s: could not register %02x\n", 1451 __func__, chpid); 1452 goto out_free; 1453 } 1454 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); 1455 if (ret) { 1456 device_unregister(&chp->dev); 1457 goto out_free; 1458 } 1459 mutex_lock(&css[0]->mutex); 1460 if (css[0]->cm_enabled) { 1461 ret = chsc_add_chp_cmg_attr(chp); 1462 if (ret) { 1463 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); 1464 device_unregister(&chp->dev); 1465 mutex_unlock(&css[0]->mutex); 1466 goto out_free; 1467 } 1468 } 1469 css[0]->chps[chpid] = chp; 1470 mutex_unlock(&css[0]->mutex); 1471 return ret; 1472out_free: 1473 kfree(chp); 1474 return ret; 1475} 1476 1477void * 1478chsc_get_chp_desc(struct subchannel *sch, int chp_no) 1479{ 1480 struct channel_path *chp; 1481 struct channel_path_desc *desc; 1482 1483 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; 1484 if (!chp) 1485 return NULL; 1486 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1487 if (!desc) 1488 return NULL; 1489 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 1490 return desc; 1491} 1492 1493static int __init 1494chsc_alloc_sei_area(void) 1495{ 1496 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1497 if (!sei_page) 1498 printk(KERN_WARNING"Can't allocate page for processing of " \ 1499 "chsc machine checks!\n"); 1500 return (sei_page ? 0 : -ENOMEM); 1501} 1502 1503int __init 1504chsc_enable_facility(int operation_code) 1505{ 1506 int ret; 1507 struct { 1508 struct chsc_header request; 1509 u8 reserved1:4; 1510 u8 format:4; 1511 u8 reserved2; 1512 u16 operation_code; 1513 u32 reserved3; 1514 u32 reserved4; 1515 u32 operation_data_area[252]; 1516 struct chsc_header response; 1517 u32 reserved5:4; 1518 u32 format2:4; 1519 u32 reserved6:24; 1520 } *sda_area; 1521 1522 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1523 if (!sda_area) 1524 return -ENOMEM; 1525 sda_area->request.length = 0x0400; 1526 sda_area->request.code = 0x0031; 1527 sda_area->operation_code = operation_code; 1528 1529 ret = chsc(sda_area); 1530 if (ret > 0) { 1531 ret = (ret == 3) ? -ENODEV : -EBUSY; 1532 goto out; 1533 } 1534 switch (sda_area->response.code) { 1535 case 0x0001: /* everything ok */ 1536 ret = 0; 1537 break; 1538 case 0x0003: /* invalid request block */ 1539 case 0x0007: 1540 ret = -EINVAL; 1541 break; 1542 case 0x0004: /* command not provided */ 1543 case 0x0101: /* facility not provided */ 1544 ret = -EOPNOTSUPP; 1545 break; 1546 default: /* something went wrong */ 1547 ret = -EIO; 1548 } 1549 out: 1550 free_page((unsigned long)sda_area); 1551 return ret; 1552} 1553 1554subsys_initcall(chsc_alloc_sei_area); 1555 1556struct css_general_char css_general_characteristics; 1557struct css_chsc_char css_chsc_characteristics; 1558 1559int __init 1560chsc_determine_css_characteristics(void) 1561{ 1562 int result; 1563 struct { 1564 struct chsc_header request; 1565 u32 reserved1; 1566 u32 reserved2; 1567 u32 reserved3; 1568 struct chsc_header response; 1569 u32 reserved4; 1570 u32 general_char[510]; 1571 u32 chsc_char[518]; 1572 } *scsc_area; 1573 1574 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1575 if (!scsc_area) { 1576 printk(KERN_WARNING"cio: Was not able to determine available" \ 1577 "CHSCs due to no memory.\n"); 1578 return -ENOMEM; 1579 } 1580 1581 scsc_area->request.length = 0x0010; 1582 scsc_area->request.code = 0x0010; 1583 1584 result = chsc(scsc_area); 1585 if (result) { 1586 printk(KERN_WARNING"cio: Was not able to determine " \ 1587 "available CHSCs, cc=%i.\n", result); 1588 result = -EIO; 1589 goto exit; 1590 } 1591 1592 if (scsc_area->response.code != 1) { 1593 printk(KERN_WARNING"cio: Was not able to determine " \ 1594 "available CHSCs.\n"); 1595 result = -EIO; 1596 goto exit; 1597 } 1598 memcpy(&css_general_characteristics, scsc_area->general_char, 1599 sizeof(css_general_characteristics)); 1600 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1601 sizeof(css_chsc_characteristics)); 1602exit: 1603 free_page ((unsigned long) scsc_area); 1604 return result; 1605} 1606 1607EXPORT_SYMBOL_GPL(css_general_characteristics); 1608EXPORT_SYMBOL_GPL(css_chsc_characteristics);