Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IPMI: Style fixes in the system interface code

Lots of style fixes for the IPMI system interface driver. No functional
changes. Basically fixes everything reported by checkpatch and fixes the
comment style.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Corey Minyard <cminyard@mvista.com>
Cc: Rocky Craig <rocky.craig@hp.com>
Cc: Hannes Schulz <schulz@schwaar.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Corey Minyard and committed by
Linus Torvalds
c305e3d3 c70d7499

+574 -436
+94 -59
drivers/char/ipmi/ipmi_bt_sm.c
··· 37 37 #define BT_DEBUG_ENABLE 1 /* Generic messages */ 38 38 #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ 39 39 #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ 40 - /* BT_DEBUG_OFF must be zero to correspond to the default uninitialized 41 - value */ 40 + /* 41 + * BT_DEBUG_OFF must be zero to correspond to the default uninitialized 42 + * value 43 + */ 42 44 43 45 static int bt_debug; /* 0 == BT_DEBUG_OFF */ 44 46 45 47 module_param(bt_debug, int, 0644); 46 48 MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); 47 49 48 - /* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, 49 - and 64 byte buffers. However, one HP implementation wants 255 bytes of 50 - buffer (with a documented message of 160 bytes) so go for the max. 51 - Since the Open IPMI architecture is single-message oriented at this 52 - stage, the queue depth of BT is of no concern. */ 50 + /* 51 + * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, 52 + * and 64 byte buffers. However, one HP implementation wants 255 bytes of 53 + * buffer (with a documented message of 160 bytes) so go for the max. 54 + * Since the Open IPMI architecture is single-message oriented at this 55 + * stage, the queue depth of BT is of no concern. 56 + */ 53 57 54 58 #define BT_NORMAL_TIMEOUT 5 /* seconds */ 55 59 #define BT_NORMAL_RETRY_LIMIT 2 56 60 #define BT_RESET_DELAY 6 /* seconds after warm reset */ 57 61 58 - /* States are written in chronological order and usually cover 59 - multiple rows of the state table discussion in the IPMI spec. */ 62 + /* 63 + * States are written in chronological order and usually cover 64 + * multiple rows of the state table discussion in the IPMI spec. 65 + */ 60 66 61 67 enum bt_states { 62 68 BT_STATE_IDLE = 0, /* Order is critical in this list */ ··· 82 76 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ 83 77 }; 84 78 85 - /* Macros seen at the end of state "case" blocks. They help with legibility 86 - and debugging. */ 79 + /* 80 + * Macros seen at the end of state "case" blocks. They help with legibility 81 + * and debugging. 82 + */ 87 83 88 - #define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; } 84 + #define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; } 89 85 90 86 #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } 91 87 ··· 118 110 #define BT_H_BUSY 0x40 119 111 #define BT_B_BUSY 0x80 120 112 121 - /* Some bits are toggled on each write: write once to set it, once 122 - more to clear it; writing a zero does nothing. To absolutely 123 - clear it, check its state and write if set. This avoids the "get 124 - current then use as mask" scheme to modify one bit. Note that the 125 - variable "bt" is hardcoded into these macros. */ 113 + /* 114 + * Some bits are toggled on each write: write once to set it, once 115 + * more to clear it; writing a zero does nothing. To absolutely 116 + * clear it, check its state and write if set. This avoids the "get 117 + * current then use as mask" scheme to modify one bit. Note that the 118 + * variable "bt" is hardcoded into these macros. 119 + */ 126 120 127 121 #define BT_STATUS bt->io->inputb(bt->io, 0) 128 122 #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) ··· 135 125 #define BT_INTMASK_R bt->io->inputb(bt->io, 2) 136 126 #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) 137 127 138 - /* Convenience routines for debugging. These are not multi-open safe! 139 - Note the macros have hardcoded variables in them. */ 128 + /* 129 + * Convenience routines for debugging. These are not multi-open safe! 130 + * Note the macros have hardcoded variables in them. 131 + */ 140 132 141 133 static char *state2txt(unsigned char state) 142 134 { ··· 194 182 static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) 195 183 { 196 184 memset(bt, 0, sizeof(struct si_sm_data)); 197 - if (bt->io != io) { /* external: one-time only things */ 185 + if (bt->io != io) { 186 + /* external: one-time only things */ 198 187 bt->io = io; 199 188 bt->seq = 0; 200 189 } ··· 242 229 printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); 243 230 printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); 244 231 for (i = 0; i < size; i ++) 245 - printk (" %02x", data[i]); 232 + printk(" %02x", data[i]); 246 233 printk("\n"); 247 234 } 248 235 bt->write_data[0] = size + 1; /* all data plus seq byte */ ··· 259 246 return 0; 260 247 } 261 248 262 - /* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE 263 - it calls this. Strip out the length and seq bytes. */ 249 + /* 250 + * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE 251 + * it calls this. Strip out the length and seq bytes. 252 + */ 264 253 265 254 static int bt_get_result(struct si_sm_data *bt, 266 255 unsigned char *data, ··· 284 269 memcpy(data + 2, bt->read_data + 4, msg_len - 2); 285 270 286 271 if (bt_debug & BT_DEBUG_MSG) { 287 - printk (KERN_WARNING "BT: result %d bytes:", msg_len); 272 + printk(KERN_WARNING "BT: result %d bytes:", msg_len); 288 273 for (i = 0; i < msg_len; i++) 289 274 printk(" %02x", data[i]); 290 - printk ("\n"); 275 + printk("\n"); 291 276 } 292 277 return msg_len; 293 278 } ··· 307 292 BT_INTMASK_W(BT_BMC_HWRST); 308 293 } 309 294 310 - /* Get rid of an unwanted/stale response. This should only be needed for 311 - BMCs that support multiple outstanding requests. */ 295 + /* 296 + * Get rid of an unwanted/stale response. This should only be needed for 297 + * BMCs that support multiple outstanding requests. 298 + */ 312 299 313 300 static void drain_BMC2HOST(struct si_sm_data *bt) 314 301 { ··· 343 326 printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", 344 327 bt->write_count, bt->seq); 345 328 for (i = 0; i < bt->write_count; i++) 346 - printk (" %02x", bt->write_data[i]); 347 - printk ("\n"); 329 + printk(" %02x", bt->write_data[i]); 330 + printk("\n"); 348 331 } 349 332 for (i = 0; i < bt->write_count; i++) 350 333 HOST2BMC(bt->write_data[i]); ··· 354 337 { 355 338 unsigned char i; 356 339 357 - /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. 358 - Keep layout of first four bytes aligned with write_data[] */ 340 + /* 341 + * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. 342 + * Keep layout of first four bytes aligned with write_data[] 343 + */ 359 344 360 345 bt->read_data[0] = BMC2HOST; 361 346 bt->read_count = bt->read_data[0]; ··· 381 362 if (max > 16) 382 363 max = 16; 383 364 for (i = 0; i < max; i++) 384 - printk (" %02x", bt->read_data[i]); 385 - printk ("%s\n", bt->read_count == max ? "" : " ..."); 365 + printk(KERN_CONT " %02x", bt->read_data[i]); 366 + printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ..."); 386 367 } 387 368 388 369 /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ ··· 421 402 printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ 422 403 reason, STATE2TXT, STATUS2TXT); 423 404 424 - /* Per the IPMI spec, retries are based on the sequence number 425 - known only to this module, so manage a restart here. */ 405 + /* 406 + * Per the IPMI spec, retries are based on the sequence number 407 + * known only to this module, so manage a restart here. 408 + */ 426 409 (bt->error_retries)++; 427 410 if (bt->error_retries < bt->BT_CAP_retries) { 428 411 printk("%d retries left\n", ··· 433 412 return SI_SM_CALL_WITHOUT_DELAY; 434 413 } 435 414 436 - printk("failed %d retries, sending error response\n", 437 - bt->BT_CAP_retries); 415 + printk(KERN_WARNING "failed %d retries, sending error response\n", 416 + bt->BT_CAP_retries); 438 417 if (!bt->nonzero_status) 439 418 printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); 440 419 ··· 445 424 return SI_SM_CALL_WITHOUT_DELAY; 446 425 } 447 426 448 - /* Concoct a useful error message, set up the next state, and 449 - be done with this sequence. */ 427 + /* 428 + * Concoct a useful error message, set up the next state, and 429 + * be done with this sequence. 430 + */ 450 431 451 432 bt->state = BT_STATE_IDLE; 452 433 switch (cCode) { ··· 484 461 last_printed = bt->state; 485 462 } 486 463 487 - /* Commands that time out may still (eventually) provide a response. 488 - This stale response will get in the way of a new response so remove 489 - it if possible (hopefully during IDLE). Even if it comes up later 490 - it will be rejected by its (now-forgotten) seq number. */ 464 + /* 465 + * Commands that time out may still (eventually) provide a response. 466 + * This stale response will get in the way of a new response so remove 467 + * it if possible (hopefully during IDLE). Even if it comes up later 468 + * it will be rejected by its (now-forgotten) seq number. 469 + */ 491 470 492 471 if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { 493 472 drain_BMC2HOST(bt); ··· 497 472 } 498 473 499 474 if ((bt->state != BT_STATE_IDLE) && 500 - (bt->state < BT_STATE_PRINTME)) { /* check timeout */ 475 + (bt->state < BT_STATE_PRINTME)) { 476 + /* check timeout */ 501 477 bt->timeout -= time; 502 478 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) 503 479 return error_recovery(bt, ··· 508 482 509 483 switch (bt->state) { 510 484 511 - /* Idle state first checks for asynchronous messages from another 512 - channel, then does some opportunistic housekeeping. */ 485 + /* 486 + * Idle state first checks for asynchronous messages from another 487 + * channel, then does some opportunistic housekeeping. 488 + */ 513 489 514 490 case BT_STATE_IDLE: 515 491 if (status & BT_SMS_ATN) { ··· 559 531 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); 560 532 BT_CONTROL(BT_H_BUSY); /* set */ 561 533 562 - /* Uncached, ordered writes should just proceeed serially but 563 - some BMCs don't clear B2H_ATN with one hit. Fast-path a 564 - workaround without too much penalty to the general case. */ 534 + /* 535 + * Uncached, ordered writes should just proceeed serially but 536 + * some BMCs don't clear B2H_ATN with one hit. Fast-path a 537 + * workaround without too much penalty to the general case. 538 + */ 565 539 566 540 BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ 567 541 BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, 568 542 SI_SM_CALL_WITHOUT_DELAY); 569 543 570 544 case BT_STATE_CLEAR_B2H: 571 - if (status & BT_B2H_ATN) { /* keep hitting it */ 545 + if (status & BT_B2H_ATN) { 546 + /* keep hitting it */ 572 547 BT_CONTROL(BT_B2H_ATN); 573 548 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); 574 549 } ··· 579 548 SI_SM_CALL_WITHOUT_DELAY); 580 549 581 550 case BT_STATE_READ_BYTES: 582 - if (!(status & BT_H_BUSY)) /* check in case of retry */ 551 + if (!(status & BT_H_BUSY)) 552 + /* check in case of retry */ 583 553 BT_CONTROL(BT_H_BUSY); 584 554 BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ 585 555 i = read_all_bytes(bt); /* true == packet seq match */ ··· 631 599 BT_STATE_CHANGE(BT_STATE_XACTION_START, 632 600 SI_SM_CALL_WITH_DELAY); 633 601 634 - /* Get BT Capabilities, using timing of upper level state machine. 635 - Set outreqs to prevent infinite loop on timeout. */ 602 + /* 603 + * Get BT Capabilities, using timing of upper level state machine. 604 + * Set outreqs to prevent infinite loop on timeout. 605 + */ 636 606 case BT_STATE_CAPABILITIES_BEGIN: 637 607 bt->BT_CAP_outreqs = 1; 638 608 { ··· 672 638 673 639 static int bt_detect(struct si_sm_data *bt) 674 640 { 675 - /* It's impossible for the BT status and interrupt registers to be 676 - all 1's, (assuming a properly functioning, self-initialized BMC) 677 - but that's what you get from reading a bogus address, so we 678 - test that first. The calling routine uses negative logic. */ 641 + /* 642 + * It's impossible for the BT status and interrupt registers to be 643 + * all 1's, (assuming a properly functioning, self-initialized BMC) 644 + * but that's what you get from reading a bogus address, so we 645 + * test that first. The calling routine uses negative logic. 646 + */ 679 647 680 648 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) 681 649 return 1; ··· 694 658 return sizeof(struct si_sm_data); 695 659 } 696 660 697 - struct si_sm_handlers bt_smi_handlers = 698 - { 661 + struct si_sm_handlers bt_smi_handlers = { 699 662 .init_data = bt_init_data, 700 663 .start_transaction = bt_start_transaction, 701 664 .get_result = bt_get_result,
+91 -62
drivers/char/ipmi/ipmi_kcs_sm.c
··· 60 60 61 61 /* The states the KCS driver may be in. */ 62 62 enum kcs_states { 63 - KCS_IDLE, /* The KCS interface is currently 64 - doing nothing. */ 65 - KCS_START_OP, /* We are starting an operation. The 66 - data is in the output buffer, but 67 - nothing has been done to the 68 - interface yet. This was added to 69 - the state machine in the spec to 70 - wait for the initial IBF. */ 71 - KCS_WAIT_WRITE_START, /* We have written a write cmd to the 72 - interface. */ 73 - KCS_WAIT_WRITE, /* We are writing bytes to the 74 - interface. */ 75 - KCS_WAIT_WRITE_END, /* We have written the write end cmd 76 - to the interface, and still need to 77 - write the last byte. */ 78 - KCS_WAIT_READ, /* We are waiting to read data from 79 - the interface. */ 80 - KCS_ERROR0, /* State to transition to the error 81 - handler, this was added to the 82 - state machine in the spec to be 83 - sure IBF was there. */ 84 - KCS_ERROR1, /* First stage error handler, wait for 85 - the interface to respond. */ 86 - KCS_ERROR2, /* The abort cmd has been written, 87 - wait for the interface to 88 - respond. */ 89 - KCS_ERROR3, /* We wrote some data to the 90 - interface, wait for it to switch to 91 - read mode. */ 92 - KCS_HOSED /* The hardware failed to follow the 93 - state machine. */ 63 + /* The KCS interface is currently doing nothing. */ 64 + KCS_IDLE, 65 + 66 + /* 67 + * We are starting an operation. The data is in the output 68 + * buffer, but nothing has been done to the interface yet. This 69 + * was added to the state machine in the spec to wait for the 70 + * initial IBF. 71 + */ 72 + KCS_START_OP, 73 + 74 + /* We have written a write cmd to the interface. */ 75 + KCS_WAIT_WRITE_START, 76 + 77 + /* We are writing bytes to the interface. */ 78 + KCS_WAIT_WRITE, 79 + 80 + /* 81 + * We have written the write end cmd to the interface, and 82 + * still need to write the last byte. 83 + */ 84 + KCS_WAIT_WRITE_END, 85 + 86 + /* We are waiting to read data from the interface. */ 87 + KCS_WAIT_READ, 88 + 89 + /* 90 + * State to transition to the error handler, this was added to 91 + * the state machine in the spec to be sure IBF was there. 92 + */ 93 + KCS_ERROR0, 94 + 95 + /* 96 + * First stage error handler, wait for the interface to 97 + * respond. 98 + */ 99 + KCS_ERROR1, 100 + 101 + /* 102 + * The abort cmd has been written, wait for the interface to 103 + * respond. 104 + */ 105 + KCS_ERROR2, 106 + 107 + /* 108 + * We wrote some data to the interface, wait for it to switch 109 + * to read mode. 110 + */ 111 + KCS_ERROR3, 112 + 113 + /* The hardware failed to follow the state machine. */ 114 + KCS_HOSED 94 115 }; 95 116 96 117 #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH ··· 123 102 #define MAX_ERROR_RETRIES 10 124 103 #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) 125 104 126 - struct si_sm_data 127 - { 105 + struct si_sm_data { 128 106 enum kcs_states state; 129 107 struct si_sm_io *io; 130 108 unsigned char write_data[MAX_KCS_WRITE_SIZE]; ··· 207 187 (kcs->error_retries)++; 208 188 if (kcs->error_retries > MAX_ERROR_RETRIES) { 209 189 if (kcs_debug & KCS_DEBUG_ENABLE) 210 - printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason); 190 + printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", 191 + reason); 211 192 kcs->state = KCS_HOSED; 212 193 } else { 213 194 kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; ··· 292 271 293 272 if (kcs_debug & KCS_DEBUG_MSG) { 294 273 printk(KERN_DEBUG "start_kcs_transaction -"); 295 - for (i = 0; i < size; i ++) { 274 + for (i = 0; i < size; i++) 296 275 printk(" %02x", (unsigned char) (data [i])); 297 - } 298 - printk ("\n"); 276 + printk("\n"); 299 277 } 300 278 kcs->error_retries = 0; 301 279 memcpy(kcs->write_data, data, size); ··· 325 305 kcs->read_pos = 3; 326 306 } 327 307 if (kcs->truncated) { 328 - /* Report a truncated error. We might overwrite 329 - another error, but that's too bad, the user needs 330 - to know it was truncated. */ 308 + /* 309 + * Report a truncated error. We might overwrite 310 + * another error, but that's too bad, the user needs 311 + * to know it was truncated. 312 + */ 331 313 data[2] = IPMI_ERR_MSG_TRUNCATED; 332 314 kcs->truncated = 0; 333 315 } ··· 337 315 return kcs->read_pos; 338 316 } 339 317 340 - /* This implements the state machine defined in the IPMI manual, see 341 - that for details on how this works. Divide that flowchart into 342 - sections delimited by "Wait for IBF" and this will become clear. */ 318 + /* 319 + * This implements the state machine defined in the IPMI manual, see 320 + * that for details on how this works. Divide that flowchart into 321 + * sections delimited by "Wait for IBF" and this will become clear. 322 + */ 343 323 static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) 344 324 { 345 325 unsigned char status; ··· 412 388 write_next_byte(kcs); 413 389 } 414 390 break; 415 - 391 + 416 392 case KCS_WAIT_WRITE_END: 417 393 if (state != KCS_WRITE_STATE) { 418 394 start_error_recovery(kcs, 419 - "Not in write state for write end"); 395 + "Not in write state" 396 + " for write end"); 420 397 break; 421 398 } 422 399 clear_obf(kcs, status); ··· 438 413 return SI_SM_CALL_WITH_DELAY; 439 414 read_next_byte(kcs); 440 415 } else { 441 - /* We don't implement this exactly like the state 442 - machine in the spec. Some broken hardware 443 - does not write the final dummy byte to the 444 - read register. Thus obf will never go high 445 - here. We just go straight to idle, and we 446 - handle clearing out obf in idle state if it 447 - happens to come in. */ 416 + /* 417 + * We don't implement this exactly like the state 418 + * machine in the spec. Some broken hardware 419 + * does not write the final dummy byte to the 420 + * read register. Thus obf will never go high 421 + * here. We just go straight to idle, and we 422 + * handle clearing out obf in idle state if it 423 + * happens to come in. 424 + */ 448 425 clear_obf(kcs, status); 449 426 kcs->orig_write_count = 0; 450 427 kcs->state = KCS_IDLE; ··· 457 430 case KCS_ERROR0: 458 431 clear_obf(kcs, status); 459 432 status = read_status(kcs); 460 - if (GET_STATUS_OBF(status)) /* controller isn't responding */ 433 + if (GET_STATUS_OBF(status)) 434 + /* controller isn't responding */ 461 435 if (time_before(jiffies, kcs->error0_timeout)) 462 436 return SI_SM_CALL_WITH_TICK_DELAY; 463 437 write_cmd(kcs, KCS_GET_STATUS_ABORT); ··· 470 442 write_data(kcs, 0); 471 443 kcs->state = KCS_ERROR2; 472 444 break; 473 - 445 + 474 446 case KCS_ERROR2: 475 447 if (state != KCS_READ_STATE) { 476 448 start_error_recovery(kcs, ··· 484 456 write_data(kcs, KCS_READ_BYTE); 485 457 kcs->state = KCS_ERROR3; 486 458 break; 487 - 459 + 488 460 case KCS_ERROR3: 489 461 if (state != KCS_IDLE_STATE) { 490 462 start_error_recovery(kcs, ··· 503 475 return SI_SM_TRANSACTION_COMPLETE; 504 476 } 505 477 break; 506 - 478 + 507 479 case KCS_HOSED: 508 480 break; 509 481 } ··· 523 495 524 496 static int kcs_detect(struct si_sm_data *kcs) 525 497 { 526 - /* It's impossible for the KCS status register to be all 1's, 527 - (assuming a properly functioning, self-initialized BMC) 528 - but that's what you get from reading a bogus address, so we 529 - test that first. */ 498 + /* 499 + * It's impossible for the KCS status register to be all 1's, 500 + * (assuming a properly functioning, self-initialized BMC) 501 + * but that's what you get from reading a bogus address, so we 502 + * test that first. 503 + */ 530 504 if (read_status(kcs) == 0xff) 531 505 return 1; 532 506 ··· 539 509 { 540 510 } 541 511 542 - struct si_sm_handlers kcs_smi_handlers = 543 - { 512 + struct si_sm_handlers kcs_smi_handlers = { 544 513 .init_data = init_kcs_data, 545 514 .start_transaction = start_kcs_transaction, 546 515 .get_result = get_kcs_result,
+264 -202
drivers/char/ipmi/ipmi_si_intf.c
··· 80 80 #define SI_USEC_PER_JIFFY (1000000/HZ) 81 81 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) 82 82 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a 83 - short timeout */ 83 + short timeout */ 84 84 85 85 /* Bit for BMC global enables. */ 86 86 #define IPMI_BMC_RCV_MSG_INTR 0x01 ··· 114 114 115 115 #define DEVICE_NAME "ipmi_si" 116 116 117 - static struct device_driver ipmi_driver = 118 - { 117 + static struct device_driver ipmi_driver = { 119 118 .name = DEVICE_NAME, 120 119 .bus = &platform_bus_type 121 120 }; ··· 168 169 SI_NUM_STATS 169 170 }; 170 171 171 - struct smi_info 172 - { 172 + struct smi_info { 173 173 int intf_num; 174 174 ipmi_smi_t intf; 175 175 struct si_sm_data *si_sm; ··· 181 183 struct ipmi_smi_msg *curr_msg; 182 184 enum si_intf_state si_state; 183 185 184 - /* Used to handle the various types of I/O that can occur with 185 - IPMI */ 186 + /* 187 + * Used to handle the various types of I/O that can occur with 188 + * IPMI 189 + */ 186 190 struct si_sm_io io; 187 191 int (*io_setup)(struct smi_info *info); 188 192 void (*io_cleanup)(struct smi_info *info); ··· 195 195 void (*addr_source_cleanup)(struct smi_info *info); 196 196 void *addr_source_data; 197 197 198 - /* Per-OEM handler, called from handle_flags(). 199 - Returns 1 when handle_flags() needs to be re-run 200 - or 0 indicating it set si_state itself. 201 - */ 198 + /* 199 + * Per-OEM handler, called from handle_flags(). Returns 1 200 + * when handle_flags() needs to be re-run or 0 indicating it 201 + * set si_state itself. 202 + */ 202 203 int (*oem_data_avail_handler)(struct smi_info *smi_info); 203 204 204 - /* Flags from the last GET_MSG_FLAGS command, used when an ATTN 205 - is set to hold the flags until we are done handling everything 206 - from the flags. */ 205 + /* 206 + * Flags from the last GET_MSG_FLAGS command, used when an ATTN 207 + * is set to hold the flags until we are done handling everything 208 + * from the flags. 209 + */ 207 210 #define RECEIVE_MSG_AVAIL 0x01 208 211 #define EVENT_MSG_BUFFER_FULL 0x02 209 212 #define WDT_PRE_TIMEOUT_INT 0x08 ··· 214 211 #define OEM1_DATA_AVAIL 0x40 215 212 #define OEM2_DATA_AVAIL 0x80 216 213 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ 217 - OEM1_DATA_AVAIL | \ 218 - OEM2_DATA_AVAIL) 214 + OEM1_DATA_AVAIL | \ 215 + OEM2_DATA_AVAIL) 219 216 unsigned char msg_flags; 220 217 221 - /* If set to true, this will request events the next time the 222 - state machine is idle. */ 218 + /* 219 + * If set to true, this will request events the next time the 220 + * state machine is idle. 221 + */ 223 222 atomic_t req_events; 224 223 225 - /* If true, run the state machine to completion on every send 226 - call. Generally used after a panic to make sure stuff goes 227 - out. */ 224 + /* 225 + * If true, run the state machine to completion on every send 226 + * call. Generally used after a panic to make sure stuff goes 227 + * out. 228 + */ 228 229 int run_to_completion; 229 230 230 231 /* The I/O port of an SI interface. */ 231 232 int port; 232 233 233 - /* The space between start addresses of the two ports. For 234 - instance, if the first port is 0xca2 and the spacing is 4, then 235 - the second port is 0xca6. */ 234 + /* 235 + * The space between start addresses of the two ports. For 236 + * instance, if the first port is 0xca2 and the spacing is 4, then 237 + * the second port is 0xca6. 238 + */ 236 239 unsigned int spacing; 237 240 238 241 /* zero if no irq; */ ··· 253 244 /* Used to gracefully stop the timer without race conditions. */ 254 245 atomic_t stop_operation; 255 246 256 - /* The driver will disable interrupts when it gets into a 257 - situation where it cannot handle messages due to lack of 258 - memory. Once that situation clears up, it will re-enable 259 - interrupts. */ 247 + /* 248 + * The driver will disable interrupts when it gets into a 249 + * situation where it cannot handle messages due to lack of 250 + * memory. Once that situation clears up, it will re-enable 251 + * interrupts. 252 + */ 260 253 int interrupt_disabled; 261 254 262 255 /* From the get device id response... */ ··· 268 257 struct device *dev; 269 258 struct platform_device *pdev; 270 259 271 - /* True if we allocated the device, false if it came from 272 - * someplace else (like PCI). */ 260 + /* 261 + * True if we allocated the device, false if it came from 262 + * someplace else (like PCI). 263 + */ 273 264 int dev_registered; 274 265 275 266 /* Slave address, could be reported from DMI. */ ··· 280 267 /* Counters and things for the proc filesystem. */ 281 268 atomic_t stats[SI_NUM_STATS]; 282 269 283 - struct task_struct *thread; 270 + struct task_struct *thread; 284 271 285 272 struct list_head link; 286 273 }; ··· 301 288 static void cleanup_one_si(struct smi_info *to_clean); 302 289 303 290 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 304 - static int register_xaction_notifier(struct notifier_block * nb) 291 + static int register_xaction_notifier(struct notifier_block *nb) 305 292 { 306 293 return atomic_notifier_chain_register(&xaction_notifier_list, nb); 307 294 } ··· 310 297 struct ipmi_smi_msg *msg) 311 298 { 312 299 /* Deliver the message to the upper layer with the lock 313 - released. */ 300 + released. */ 314 301 spin_unlock(&(smi_info->si_lock)); 315 302 ipmi_smi_msg_received(smi_info->intf, msg); 316 303 spin_lock(&(smi_info->si_lock)); ··· 342 329 struct timeval t; 343 330 #endif 344 331 345 - /* No need to save flags, we aleady have interrupts off and we 346 - already hold the SMI lock. */ 332 + /* 333 + * No need to save flags, we aleady have interrupts off and we 334 + * already hold the SMI lock. 335 + */ 347 336 if (!smi_info->run_to_completion) 348 337 spin_lock(&(smi_info->msg_lock)); 349 338 ··· 368 353 link); 369 354 #ifdef DEBUG_TIMING 370 355 do_gettimeofday(&t); 371 - printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 356 + printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 372 357 #endif 373 358 err = atomic_notifier_call_chain(&xaction_notifier_list, 374 359 0, smi_info); ··· 380 365 smi_info->si_sm, 381 366 smi_info->curr_msg->data, 382 367 smi_info->curr_msg->data_size); 383 - if (err) { 368 + if (err) 384 369 return_hosed_msg(smi_info, err); 385 - } 386 370 387 371 rv = SI_SM_CALL_WITHOUT_DELAY; 388 372 } 389 - out: 373 + out: 390 374 if (!smi_info->run_to_completion) 391 375 spin_unlock(&(smi_info->msg_lock)); 392 376 ··· 396 382 { 397 383 unsigned char msg[2]; 398 384 399 - /* If we are enabling interrupts, we have to tell the 400 - BMC to use them. */ 385 + /* 386 + * If we are enabling interrupts, we have to tell the 387 + * BMC to use them. 388 + */ 401 389 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 402 390 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 403 391 ··· 431 415 smi_info->si_state = SI_CLEARING_FLAGS; 432 416 } 433 417 434 - /* When we have a situtaion where we run out of memory and cannot 435 - allocate messages, we just leave them in the BMC and run the system 436 - polled until we can allocate some memory. Once we have some 437 - memory, we will re-enable the interrupt. */ 418 + /* 419 + * When we have a situtaion where we run out of memory and cannot 420 + * allocate messages, we just leave them in the BMC and run the system 421 + * polled until we can allocate some memory. Once we have some 422 + * memory, we will re-enable the interrupt. 423 + */ 438 424 static inline void disable_si_irq(struct smi_info *smi_info) 439 425 { 440 426 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { ··· 504 486 smi_info->curr_msg->data_size); 505 487 smi_info->si_state = SI_GETTING_EVENTS; 506 488 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && 507 - smi_info->oem_data_avail_handler) { 489 + smi_info->oem_data_avail_handler) { 508 490 if (smi_info->oem_data_avail_handler(smi_info)) 509 491 goto retry; 510 - } else { 492 + } else 511 493 smi_info->si_state = SI_NORMAL; 512 - } 513 494 } 514 495 515 496 static void handle_transaction_done(struct smi_info *smi_info) ··· 518 501 struct timeval t; 519 502 520 503 do_gettimeofday(&t); 521 - printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); 504 + printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); 522 505 #endif 523 506 switch (smi_info->si_state) { 524 507 case SI_NORMAL: ··· 531 514 smi_info->curr_msg->rsp, 532 515 IPMI_MAX_MSG_LENGTH); 533 516 534 - /* Do this here becase deliver_recv_msg() releases the 535 - lock, and a new message can be put in during the 536 - time the lock is released. */ 517 + /* 518 + * Do this here becase deliver_recv_msg() releases the 519 + * lock, and a new message can be put in during the 520 + * time the lock is released. 521 + */ 537 522 msg = smi_info->curr_msg; 538 523 smi_info->curr_msg = NULL; 539 524 deliver_recv_msg(smi_info, msg); ··· 549 530 /* We got the flags from the SMI, now handle them. */ 550 531 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 551 532 if (msg[2] != 0) { 552 - /* Error fetching flags, just give up for 553 - now. */ 533 + /* Error fetching flags, just give up for now. */ 554 534 smi_info->si_state = SI_NORMAL; 555 535 } else if (len < 4) { 556 - /* Hmm, no flags. That's technically illegal, but 557 - don't use uninitialized data. */ 536 + /* 537 + * Hmm, no flags. That's technically illegal, but 538 + * don't use uninitialized data. 539 + */ 558 540 smi_info->si_state = SI_NORMAL; 559 541 } else { 560 542 smi_info->msg_flags = msg[3]; ··· 592 572 smi_info->curr_msg->rsp, 593 573 IPMI_MAX_MSG_LENGTH); 594 574 595 - /* Do this here becase deliver_recv_msg() releases the 596 - lock, and a new message can be put in during the 597 - time the lock is released. */ 575 + /* 576 + * Do this here becase deliver_recv_msg() releases the 577 + * lock, and a new message can be put in during the 578 + * time the lock is released. 579 + */ 598 580 msg = smi_info->curr_msg; 599 581 smi_info->curr_msg = NULL; 600 582 if (msg->rsp[2] != 0) { ··· 609 587 } else { 610 588 smi_inc_stat(smi_info, events); 611 589 612 - /* Do this before we deliver the message 613 - because delivering the message releases the 614 - lock and something else can mess with the 615 - state. */ 590 + /* 591 + * Do this before we deliver the message 592 + * because delivering the message releases the 593 + * lock and something else can mess with the 594 + * state. 595 + */ 616 596 handle_flags(smi_info); 617 597 618 598 deliver_recv_msg(smi_info, msg); ··· 630 606 smi_info->curr_msg->rsp, 631 607 IPMI_MAX_MSG_LENGTH); 632 608 633 - /* Do this here becase deliver_recv_msg() releases the 634 - lock, and a new message can be put in during the 635 - time the lock is released. */ 609 + /* 610 + * Do this here becase deliver_recv_msg() releases the 611 + * lock, and a new message can be put in during the 612 + * time the lock is released. 613 + */ 636 614 msg = smi_info->curr_msg; 637 615 smi_info->curr_msg = NULL; 638 616 if (msg->rsp[2] != 0) { ··· 647 621 } else { 648 622 smi_inc_stat(smi_info, incoming_messages); 649 623 650 - /* Do this before we deliver the message 651 - because delivering the message releases the 652 - lock and something else can mess with the 653 - state. */ 624 + /* 625 + * Do this before we deliver the message 626 + * because delivering the message releases the 627 + * lock and something else can mess with the 628 + * state. 629 + */ 654 630 handle_flags(smi_info); 655 631 656 632 deliver_recv_msg(smi_info, msg); ··· 740 712 } 741 713 } 742 714 743 - /* Called on timeouts and events. Timeouts should pass the elapsed 744 - time, interrupts should pass in zero. Must be called with 745 - si_lock held and interrupts disabled. */ 715 + /* 716 + * Called on timeouts and events. Timeouts should pass the elapsed 717 + * time, interrupts should pass in zero. Must be called with 718 + * si_lock held and interrupts disabled. 719 + */ 746 720 static enum si_sm_result smi_event_handler(struct smi_info *smi_info, 747 721 int time) 748 722 { 749 723 enum si_sm_result si_sm_result; 750 724 751 725 restart: 752 - /* There used to be a loop here that waited a little while 753 - (around 25us) before giving up. That turned out to be 754 - pointless, the minimum delays I was seeing were in the 300us 755 - range, which is far too long to wait in an interrupt. So 756 - we just run until the state machine tells us something 757 - happened or it needs a delay. */ 726 + /* 727 + * There used to be a loop here that waited a little while 728 + * (around 25us) before giving up. That turned out to be 729 + * pointless, the minimum delays I was seeing were in the 300us 730 + * range, which is far too long to wait in an interrupt. So 731 + * we just run until the state machine tells us something 732 + * happened or it needs a delay. 733 + */ 758 734 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); 759 735 time = 0; 760 736 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) 761 - { 762 737 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 763 - } 764 738 765 - if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) 766 - { 739 + if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { 767 740 smi_inc_stat(smi_info, complete_transactions); 768 741 769 742 handle_transaction_done(smi_info); 770 743 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 771 - } 772 - else if (si_sm_result == SI_SM_HOSED) 773 - { 744 + } else if (si_sm_result == SI_SM_HOSED) { 774 745 smi_inc_stat(smi_info, hosed_count); 775 746 776 - /* Do the before return_hosed_msg, because that 777 - releases the lock. */ 747 + /* 748 + * Do the before return_hosed_msg, because that 749 + * releases the lock. 750 + */ 778 751 smi_info->si_state = SI_NORMAL; 779 752 if (smi_info->curr_msg != NULL) { 780 - /* If we were handling a user message, format 781 - a response to send to the upper layer to 782 - tell it about the error. */ 753 + /* 754 + * If we were handling a user message, format 755 + * a response to send to the upper layer to 756 + * tell it about the error. 757 + */ 783 758 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); 784 759 } 785 760 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); ··· 792 761 * We prefer handling attn over new messages. But don't do 793 762 * this if there is not yet an upper layer to handle anything. 794 763 */ 795 - if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) 796 - { 764 + if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { 797 765 unsigned char msg[2]; 798 766 799 767 smi_inc_stat(smi_info, attentions); 800 768 801 - /* Got a attn, send down a get message flags to see 802 - what's causing it. It would be better to handle 803 - this in the upper layer, but due to the way 804 - interrupts work with the SMI, that's not really 805 - possible. */ 769 + /* 770 + * Got a attn, send down a get message flags to see 771 + * what's causing it. It would be better to handle 772 + * this in the upper layer, but due to the way 773 + * interrupts work with the SMI, that's not really 774 + * possible. 775 + */ 806 776 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 807 777 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 808 778 ··· 820 788 si_sm_result = start_next_msg(smi_info); 821 789 if (si_sm_result != SI_SM_IDLE) 822 790 goto restart; 823 - } 791 + } 824 792 825 793 if ((si_sm_result == SI_SM_IDLE) 826 - && (atomic_read(&smi_info->req_events))) 827 - { 828 - /* We are idle and the upper layer requested that I fetch 829 - events, so do so. */ 794 + && (atomic_read(&smi_info->req_events))) { 795 + /* 796 + * We are idle and the upper layer requested that I fetch 797 + * events, so do so. 798 + */ 830 799 atomic_set(&smi_info->req_events, 0); 831 800 832 801 smi_info->curr_msg = ipmi_alloc_smi_msg(); ··· 904 871 spin_unlock_irqrestore(&smi_info->msg_lock, flags); 905 872 906 873 spin_lock_irqsave(&smi_info->si_lock, flags); 907 - if ((smi_info->si_state == SI_NORMAL) 908 - && (smi_info->curr_msg == NULL)) 909 - { 874 + if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) 910 875 start_next_msg(smi_info); 911 - } 912 876 spin_unlock_irqrestore(&smi_info->si_lock, flags); 913 877 } 914 878 ··· 936 906 spin_lock_irqsave(&(smi_info->si_lock), flags); 937 907 smi_result = smi_event_handler(smi_info, 0); 938 908 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 939 - if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 940 - /* do nothing */ 941 - } 909 + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) 910 + ; /* do nothing */ 942 911 else if (smi_result == SI_SM_CALL_WITH_DELAY) 943 912 schedule(); 944 913 else ··· 988 959 spin_lock_irqsave(&(smi_info->si_lock), flags); 989 960 #ifdef DEBUG_TIMING 990 961 do_gettimeofday(&t); 991 - printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); 962 + printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); 992 963 #endif 993 964 jiffies_now = jiffies; 994 965 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) ··· 1006 977 goto do_add_timer; 1007 978 } 1008 979 1009 - /* If the state machine asks for a short delay, then shorten 1010 - the timer timeout. */ 980 + /* 981 + * If the state machine asks for a short delay, then shorten 982 + * the timer timeout. 983 + */ 1011 984 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1012 985 smi_inc_stat(smi_info, short_timeouts); 1013 986 smi_info->si_timer.expires = jiffies + 1; ··· 1036 1005 1037 1006 #ifdef DEBUG_TIMING 1038 1007 do_gettimeofday(&t); 1039 - printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); 1008 + printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); 1040 1009 #endif 1041 1010 smi_event_handler(smi_info, 0); 1042 1011 spin_unlock_irqrestore(&(smi_info->si_lock), flags); ··· 1079 1048 * The BT interface is efficient enough to not need a thread, 1080 1049 * and there is no need for a thread if we have interrupts. 1081 1050 */ 1082 - else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) 1051 + else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) 1083 1052 enable = 1; 1084 1053 1085 1054 if (enable) { ··· 1105 1074 atomic_set(&smi_info->req_events, 0); 1106 1075 } 1107 1076 1108 - static struct ipmi_smi_handlers handlers = 1109 - { 1077 + static struct ipmi_smi_handlers handlers = { 1110 1078 .owner = THIS_MODULE, 1111 1079 .start_processing = smi_start_processing, 1112 1080 .sender = sender, ··· 1115 1085 .poll = poll, 1116 1086 }; 1117 1087 1118 - /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses, 1119 - a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ 1088 + /* 1089 + * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, 1090 + * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. 1091 + */ 1120 1092 1121 1093 static LIST_HEAD(smi_infos); 1122 1094 static DEFINE_MUTEX(smi_infos_lock); ··· 1309 1277 int idx; 1310 1278 1311 1279 if (addr) { 1312 - for (idx = 0; idx < info->io_size; idx++) { 1280 + for (idx = 0; idx < info->io_size; idx++) 1313 1281 release_region(addr + idx * info->io.regspacing, 1314 1282 info->io.regsize); 1315 - } 1316 1283 } 1317 1284 } 1318 1285 ··· 1325 1294 1326 1295 info->io_cleanup = port_cleanup; 1327 1296 1328 - /* Figure out the actual inb/inw/inl/etc routine to use based 1329 - upon the register size. */ 1297 + /* 1298 + * Figure out the actual inb/inw/inl/etc routine to use based 1299 + * upon the register size. 1300 + */ 1330 1301 switch (info->io.regsize) { 1331 1302 case 1: 1332 1303 info->io.inputb = port_inb; ··· 1343 1310 info->io.outputb = port_outl; 1344 1311 break; 1345 1312 default: 1346 - printk("ipmi_si: Invalid register size: %d\n", 1313 + printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1347 1314 info->io.regsize); 1348 1315 return -EINVAL; 1349 1316 } 1350 1317 1351 - /* Some BIOSes reserve disjoint I/O regions in their ACPI 1318 + /* 1319 + * Some BIOSes reserve disjoint I/O regions in their ACPI 1352 1320 * tables. This causes problems when trying to register the 1353 1321 * entire I/O region. Therefore we must register each I/O 1354 1322 * port separately. 1355 1323 */ 1356 - for (idx = 0; idx < info->io_size; idx++) { 1324 + for (idx = 0; idx < info->io_size; idx++) { 1357 1325 if (request_region(addr + idx * info->io.regspacing, 1358 1326 info->io.regsize, DEVICE_NAME) == NULL) { 1359 1327 /* Undo allocations */ ··· 1442 1408 1443 1409 info->io_cleanup = mem_cleanup; 1444 1410 1445 - /* Figure out the actual readb/readw/readl/etc routine to use based 1446 - upon the register size. */ 1411 + /* 1412 + * Figure out the actual readb/readw/readl/etc routine to use based 1413 + * upon the register size. 1414 + */ 1447 1415 switch (info->io.regsize) { 1448 1416 case 1: 1449 1417 info->io.inputb = intf_mem_inb; ··· 1466 1430 break; 1467 1431 #endif 1468 1432 default: 1469 - printk("ipmi_si: Invalid register size: %d\n", 1433 + printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1470 1434 info->io.regsize); 1471 1435 return -EINVAL; 1472 1436 } 1473 1437 1474 - /* Calculate the total amount of memory to claim. This is an 1438 + /* 1439 + * Calculate the total amount of memory to claim. This is an 1475 1440 * unusual looking calculation, but it avoids claiming any 1476 1441 * more memory than it has to. It will claim everything 1477 1442 * between the first address to the end of the last full 1478 - * register. */ 1443 + * register. 1444 + */ 1479 1445 mapsize = ((info->io_size * info->io.regspacing) 1480 1446 - (info->io.regspacing - info->io.regsize)); 1481 1447 ··· 1807 1769 1808 1770 #include <linux/acpi.h> 1809 1771 1810 - /* Once we get an ACPI failure, we don't try any more, because we go 1811 - through the tables sequentially. Once we don't find a table, there 1812 - are no more. */ 1772 + /* 1773 + * Once we get an ACPI failure, we don't try any more, because we go 1774 + * through the tables sequentially. Once we don't find a table, there 1775 + * are no more. 1776 + */ 1813 1777 static int acpi_failure; 1814 1778 1815 1779 /* For GPE-type interrupts. */ ··· 1874 1834 1875 1835 /* 1876 1836 * Defined at 1877 - * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf 1837 + * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/ 1838 + * Docs/TechPapers/IA64/hpspmi.pdf 1878 1839 */ 1879 1840 struct SPMITable { 1880 1841 s8 Signature[4]; ··· 1897 1856 */ 1898 1857 u8 InterruptType; 1899 1858 1900 - /* If bit 0 of InterruptType is set, then this is the SCI 1901 - interrupt in the GPEx_STS register. */ 1859 + /* 1860 + * If bit 0 of InterruptType is set, then this is the SCI 1861 + * interrupt in the GPEx_STS register. 1862 + */ 1902 1863 u8 GPE; 1903 1864 1904 1865 s16 Reserved; 1905 1866 1906 - /* If bit 1 of InterruptType is set, then this is the I/O 1907 - APIC/SAPIC interrupt. */ 1867 + /* 1868 + * If bit 1 of InterruptType is set, then this is the I/O 1869 + * APIC/SAPIC interrupt. 1870 + */ 1908 1871 u32 GlobalSystemInterrupt; 1909 1872 1910 1873 /* The actual register address. */ ··· 1926 1881 1927 1882 if (spmi->IPMIlegacy != 1) { 1928 1883 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 1929 - return -ENODEV; 1884 + return -ENODEV; 1930 1885 } 1931 1886 1932 1887 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) ··· 1943 1898 info->addr_source = "ACPI"; 1944 1899 1945 1900 /* Figure out the interface type. */ 1946 - switch (spmi->InterfaceType) 1947 - { 1901 + switch (spmi->InterfaceType) { 1948 1902 case 1: /* KCS */ 1949 1903 info->si_type = SI_KCS; 1950 1904 break; ··· 1991 1947 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1992 1948 } else { 1993 1949 kfree(info); 1994 - printk("ipmi_si: Unknown ACPI I/O Address type\n"); 1950 + printk(KERN_WARNING 1951 + "ipmi_si: Unknown ACPI I/O Address type\n"); 1995 1952 return -EIO; 1996 1953 } 1997 1954 info->io.addr_data = spmi->addr.address; ··· 2026 1981 #endif 2027 1982 2028 1983 #ifdef CONFIG_DMI 2029 - struct dmi_ipmi_data 2030 - { 1984 + struct dmi_ipmi_data { 2031 1985 u8 type; 2032 1986 u8 addr_space; 2033 1987 unsigned long base_addr; ··· 2051 2007 /* I/O */ 2052 2008 base_addr &= 0xFFFE; 2053 2009 dmi->addr_space = IPMI_IO_ADDR_SPACE; 2054 - } 2055 - else { 2010 + } else 2056 2011 /* Memory */ 2057 2012 dmi->addr_space = IPMI_MEM_ADDR_SPACE; 2058 - } 2013 + 2059 2014 /* If bit 4 of byte 0x10 is set, then the lsb for the address 2060 2015 is odd. */ 2061 2016 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); ··· 2063 2020 2064 2021 /* The top two bits of byte 0x10 hold the register spacing. */ 2065 2022 reg_spacing = (data[0x10] & 0xC0) >> 6; 2066 - switch(reg_spacing){ 2023 + switch (reg_spacing) { 2067 2024 case 0x00: /* Byte boundaries */ 2068 2025 dmi->offset = 1; 2069 2026 break; ··· 2079 2036 } 2080 2037 } else { 2081 2038 /* Old DMI spec. */ 2082 - /* Note that technically, the lower bit of the base 2039 + /* 2040 + * Note that technically, the lower bit of the base 2083 2041 * address should be 1 if the address is I/O and 0 if 2084 2042 * the address is in memory. So many systems get that 2085 2043 * wrong (and all that I have seen are I/O) so we just 2086 2044 * ignore that bit and assume I/O. Systems that use 2087 - * memory should use the newer spec, anyway. */ 2045 + * memory should use the newer spec, anyway. 2046 + */ 2088 2047 dmi->base_addr = base_addr & 0xfffe; 2089 2048 dmi->addr_space = IPMI_IO_ADDR_SPACE; 2090 2049 dmi->offset = 1; ··· 2293 2248 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); 2294 2249 2295 2250 static struct pci_driver ipmi_pci_driver = { 2296 - .name = DEVICE_NAME, 2297 - .id_table = ipmi_pci_devices, 2298 - .probe = ipmi_pci_probe, 2299 - .remove = __devexit_p(ipmi_pci_remove), 2251 + .name = DEVICE_NAME, 2252 + .id_table = ipmi_pci_devices, 2253 + .probe = ipmi_pci_probe, 2254 + .remove = __devexit_p(ipmi_pci_remove), 2300 2255 #ifdef CONFIG_PM 2301 - .suspend = ipmi_pci_suspend, 2302 - .resume = ipmi_pci_resume, 2256 + .suspend = ipmi_pci_suspend, 2257 + .resume = ipmi_pci_resume, 2303 2258 #endif 2304 2259 }; 2305 2260 #endif /* CONFIG_PCI */ ··· 2369 2324 info->io.addr_data, info->io.regsize, info->io.regspacing, 2370 2325 info->irq); 2371 2326 2372 - dev->dev.driver_data = (void*) info; 2327 + dev->dev.driver_data = (void *) info; 2373 2328 2374 2329 return try_smi_init(info); 2375 2330 } ··· 2382 2337 2383 2338 static struct of_device_id ipmi_match[] = 2384 2339 { 2385 - { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS }, 2386 - { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC }, 2387 - { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT }, 2340 + { .type = "ipmi", .compatible = "ipmi-kcs", 2341 + .data = (void *)(unsigned long) SI_KCS }, 2342 + { .type = "ipmi", .compatible = "ipmi-smic", 2343 + .data = (void *)(unsigned long) SI_SMIC }, 2344 + { .type = "ipmi", .compatible = "ipmi-bt", 2345 + .data = (void *)(unsigned long) SI_BT }, 2388 2346 {}, 2389 2347 }; 2390 2348 2391 - static struct of_platform_driver ipmi_of_platform_driver = 2392 - { 2349 + static struct of_platform_driver ipmi_of_platform_driver = { 2393 2350 .name = "ipmi", 2394 2351 .match_table = ipmi_match, 2395 2352 .probe = ipmi_of_probe, ··· 2412 2365 if (!resp) 2413 2366 return -ENOMEM; 2414 2367 2415 - /* Do a Get Device ID command, since it comes back with some 2416 - useful info. */ 2368 + /* 2369 + * Do a Get Device ID command, since it comes back with some 2370 + * useful info. 2371 + */ 2417 2372 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 2418 2373 msg[1] = IPMI_GET_DEVICE_ID_CMD; 2419 2374 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 2420 2375 2421 2376 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); 2422 - for (;;) 2423 - { 2377 + for (;;) { 2424 2378 if (smi_result == SI_SM_CALL_WITH_DELAY || 2425 2379 smi_result == SI_SM_CALL_WITH_TICK_DELAY) { 2426 2380 schedule_timeout_uninterruptible(1); 2427 2381 smi_result = smi_info->handlers->event( 2428 2382 smi_info->si_sm, 100); 2429 - } 2430 - else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) 2431 - { 2383 + } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 2432 2384 smi_result = smi_info->handlers->event( 2433 2385 smi_info->si_sm, 0); 2434 - } 2435 - else 2386 + } else 2436 2387 break; 2437 2388 } 2438 2389 if (smi_result == SI_SM_HOSED) { 2439 - /* We couldn't get the state machine to run, so whatever's at 2440 - the port is probably not an IPMI SMI interface. */ 2390 + /* 2391 + * We couldn't get the state machine to run, so whatever's at 2392 + * the port is probably not an IPMI SMI interface. 2393 + */ 2441 2394 rv = -ENODEV; 2442 2395 goto out; 2443 2396 } ··· 2523 2476 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) 2524 2477 { 2525 2478 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | 2526 - RECEIVE_MSG_AVAIL); 2479 + RECEIVE_MSG_AVAIL); 2527 2480 return 1; 2528 2481 } 2529 2482 ··· 2565 2518 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 2566 2519 smi_info->oem_data_avail_handler = 2567 2520 oem_data_avail_to_receive_msg_avail; 2568 - } 2569 - else if (ipmi_version_major(id) < 1 || 2570 - (ipmi_version_major(id) == 1 && 2571 - ipmi_version_minor(id) < 5)) { 2521 + } else if (ipmi_version_major(id) < 1 || 2522 + (ipmi_version_major(id) == 1 && 2523 + ipmi_version_minor(id) < 5)) { 2572 2524 smi_info->oem_data_avail_handler = 2573 2525 oem_data_avail_to_receive_msg_avail; 2574 2526 } ··· 2659 2613 static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 2660 2614 { 2661 2615 if (smi_info->intf) { 2662 - /* The timer and thread are only running if the 2663 - interface has been started up and registered. */ 2616 + /* 2617 + * The timer and thread are only running if the 2618 + * interface has been started up and registered. 2619 + */ 2664 2620 if (smi_info->thread != NULL) 2665 2621 kthread_stop(smi_info->thread); 2666 2622 del_timer_sync(&smi_info->si_timer); ··· 2787 2739 /* Allocate the state machine's data and initialize it. */ 2788 2740 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2789 2741 if (!new_smi->si_sm) { 2790 - printk(" Could not allocate state machine memory\n"); 2742 + printk(KERN_ERR "Could not allocate state machine memory\n"); 2791 2743 rv = -ENOMEM; 2792 2744 goto out_err; 2793 2745 } ··· 2797 2749 /* Now that we know the I/O size, we can set up the I/O. */ 2798 2750 rv = new_smi->io_setup(new_smi); 2799 2751 if (rv) { 2800 - printk(" Could not set up I/O space\n"); 2752 + printk(KERN_ERR "Could not set up I/O space\n"); 2801 2753 goto out_err; 2802 2754 } 2803 2755 ··· 2813 2765 goto out_err; 2814 2766 } 2815 2767 2816 - /* Attempt a get device id command. If it fails, we probably 2817 - don't have a BMC here. */ 2768 + /* 2769 + * Attempt a get device id command. If it fails, we probably 2770 + * don't have a BMC here. 2771 + */ 2818 2772 rv = try_get_dev_id(new_smi); 2819 2773 if (rv) { 2820 2774 if (new_smi->addr_source) ··· 2841 2791 new_smi->intf_num = smi_num; 2842 2792 smi_num++; 2843 2793 2844 - /* Start clearing the flags before we enable interrupts or the 2845 - timer to avoid racing with the timer. */ 2794 + /* 2795 + * Start clearing the flags before we enable interrupts or the 2796 + * timer to avoid racing with the timer. 2797 + */ 2846 2798 start_clear_flags(new_smi); 2847 2799 /* IRQ is defined to be set when non-zero. */ 2848 2800 if (new_smi->irq) 2849 2801 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; 2850 2802 2851 2803 if (!new_smi->dev) { 2852 - /* If we don't already have a device from something 2853 - * else (like PCI), then register a new one. */ 2804 + /* 2805 + * If we don't already have a device from something 2806 + * else (like PCI), then register a new one. 2807 + */ 2854 2808 new_smi->pdev = platform_device_alloc("ipmi_si", 2855 2809 new_smi->intf_num); 2856 2810 if (rv) { ··· 2925 2871 2926 2872 mutex_unlock(&smi_infos_lock); 2927 2873 2928 - printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); 2874 + printk(KERN_INFO "IPMI %s interface initialized\n", 2875 + si_to_str[new_smi->si_type]); 2929 2876 2930 2877 return 0; 2931 2878 ··· 2941 2886 if (new_smi->irq_cleanup) 2942 2887 new_smi->irq_cleanup(new_smi); 2943 2888 2944 - /* Wait until we know that we are out of any interrupt 2945 - handlers might have been running before we freed the 2946 - interrupt. */ 2889 + /* 2890 + * Wait until we know that we are out of any interrupt 2891 + * handlers might have been running before we freed the 2892 + * interrupt. 2893 + */ 2947 2894 synchronize_sched(); 2948 2895 2949 2896 if (new_smi->si_sm) { ··· 3017 2960 3018 2961 #ifdef CONFIG_PCI 3019 2962 rv = pci_register_driver(&ipmi_pci_driver); 3020 - if (rv){ 2963 + if (rv) 3021 2964 printk(KERN_ERR 3022 2965 "init_ipmi_si: Unable to register PCI driver: %d\n", 3023 2966 rv); 3024 - } 3025 2967 #endif 3026 2968 3027 2969 #ifdef CONFIG_PPC_OF ··· 3049 2993 of_unregister_platform_driver(&ipmi_of_platform_driver); 3050 2994 #endif 3051 2995 driver_unregister(&ipmi_driver); 3052 - printk("ipmi_si: Unable to find any System Interface(s)\n"); 2996 + printk(KERN_WARNING 2997 + "ipmi_si: Unable to find any System Interface(s)\n"); 3053 2998 return -ENODEV; 3054 2999 } else { 3055 3000 mutex_unlock(&smi_infos_lock); ··· 3072 3015 /* Tell the driver that we are shutting down. */ 3073 3016 atomic_inc(&to_clean->stop_operation); 3074 3017 3075 - /* Make sure the timer and thread are stopped and will not run 3076 - again. */ 3018 + /* 3019 + * Make sure the timer and thread are stopped and will not run 3020 + * again. 3021 + */ 3077 3022 wait_for_timer_and_thread(to_clean); 3078 3023 3079 - /* Timeouts are stopped, now make sure the interrupts are off 3080 - for the device. A little tricky with locks to make sure 3081 - there are no races. */ 3024 + /* 3025 + * Timeouts are stopped, now make sure the interrupts are off 3026 + * for the device. A little tricky with locks to make sure 3027 + * there are no races. 3028 + */ 3082 3029 spin_lock_irqsave(&to_clean->si_lock, flags); 3083 3030 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3084 3031 spin_unlock_irqrestore(&to_clean->si_lock, flags); ··· 3153 3092 3154 3093 MODULE_LICENSE("GPL"); 3155 3094 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 3156 - MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces."); 3095 + MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" 3096 + " system interfaces.");
+54 -35
drivers/char/ipmi/ipmi_si_sm.h
··· 34 34 * 675 Mass Ave, Cambridge, MA 02139, USA. 35 35 */ 36 36 37 - /* This is defined by the state machines themselves, it is an opaque 38 - data type for them to use. */ 37 + /* 38 + * This is defined by the state machines themselves, it is an opaque 39 + * data type for them to use. 40 + */ 39 41 struct si_sm_data; 40 42 41 - /* The structure for doing I/O in the state machine. The state 42 - machine doesn't have the actual I/O routines, they are done through 43 - this interface. */ 44 - struct si_sm_io 45 - { 43 + /* 44 + * The structure for doing I/O in the state machine. The state 45 + * machine doesn't have the actual I/O routines, they are done through 46 + * this interface. 47 + */ 48 + struct si_sm_io { 46 49 unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); 47 50 void (*outputb)(struct si_sm_io *io, 48 51 unsigned int offset, 49 52 unsigned char b); 50 53 51 - /* Generic info used by the actual handling routines, the 52 - state machine shouldn't touch these. */ 54 + /* 55 + * Generic info used by the actual handling routines, the 56 + * state machine shouldn't touch these. 57 + */ 53 58 void __iomem *addr; 54 59 int regspacing; 55 60 int regsize; ··· 64 59 }; 65 60 66 61 /* Results of SMI events. */ 67 - enum si_sm_result 68 - { 62 + enum si_sm_result { 69 63 SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ 70 64 SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ 71 - SI_SM_CALL_WITH_TICK_DELAY, /* Delay at least 1 tick before calling again. */ 65 + SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */ 72 66 SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ 73 67 SI_SM_IDLE, /* The SM is in idle state. */ 74 68 SI_SM_HOSED, /* The hardware violated the state machine. */ 75 - SI_SM_ATTN /* The hardware is asserting attn and the 76 - state machine is idle. */ 69 + 70 + /* 71 + * The hardware is asserting attn and the state machine is 72 + * idle. 73 + */ 74 + SI_SM_ATTN 77 75 }; 78 76 79 77 /* Handlers for the SMI state machine. */ 80 - struct si_sm_handlers 81 - { 82 - /* Put the version number of the state machine here so the 83 - upper layer can print it. */ 78 + struct si_sm_handlers { 79 + /* 80 + * Put the version number of the state machine here so the 81 + * upper layer can print it. 82 + */ 84 83 char *version; 85 84 86 - /* Initialize the data and return the amount of I/O space to 87 - reserve for the space. */ 85 + /* 86 + * Initialize the data and return the amount of I/O space to 87 + * reserve for the space. 88 + */ 88 89 unsigned int (*init_data)(struct si_sm_data *smi, 89 90 struct si_sm_io *io); 90 91 91 - /* Start a new transaction in the state machine. This will 92 - return -2 if the state machine is not idle, -1 if the size 93 - is invalid (to large or too small), or 0 if the transaction 94 - is successfully completed. */ 92 + /* 93 + * Start a new transaction in the state machine. This will 94 + * return -2 if the state machine is not idle, -1 if the size 95 + * is invalid (to large or too small), or 0 if the transaction 96 + * is successfully completed. 97 + */ 95 98 int (*start_transaction)(struct si_sm_data *smi, 96 99 unsigned char *data, unsigned int size); 97 100 98 - /* Return the results after the transaction. This will return 99 - -1 if the buffer is too small, zero if no transaction is 100 - present, or the actual length of the result data. */ 101 + /* 102 + * Return the results after the transaction. This will return 103 + * -1 if the buffer is too small, zero if no transaction is 104 + * present, or the actual length of the result data. 105 + */ 101 106 int (*get_result)(struct si_sm_data *smi, 102 107 unsigned char *data, unsigned int length); 103 108 104 - /* Call this periodically (for a polled interface) or upon 105 - receiving an interrupt (for a interrupt-driven interface). 106 - If interrupt driven, you should probably poll this 107 - periodically when not in idle state. This should be called 108 - with the time that passed since the last call, if it is 109 - significant. Time is in microseconds. */ 109 + /* 110 + * Call this periodically (for a polled interface) or upon 111 + * receiving an interrupt (for a interrupt-driven interface). 112 + * If interrupt driven, you should probably poll this 113 + * periodically when not in idle state. This should be called 114 + * with the time that passed since the last call, if it is 115 + * significant. Time is in microseconds. 116 + */ 110 117 enum si_sm_result (*event)(struct si_sm_data *smi, long time); 111 118 112 - /* Attempt to detect an SMI. Returns 0 on success or nonzero 113 - on failure. */ 119 + /* 120 + * Attempt to detect an SMI. Returns 0 on success or nonzero 121 + * on failure. 122 + */ 114 123 int (*detect)(struct si_sm_data *smi); 115 124 116 125 /* The interface is shutting down, so clean it up. */
+71 -78
drivers/char/ipmi/ipmi_smic_sm.c
··· 85 85 /* SMIC Flags Register Bits */ 86 86 #define SMIC_RX_DATA_READY 0x80 87 87 #define SMIC_TX_DATA_READY 0x40 88 + 88 89 /* 89 90 * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by 90 91 * a few systems, and then only by Systems Management ··· 105 104 #define EC_ILLEGAL_COMMAND 0x04 106 105 #define EC_BUFFER_FULL 0x05 107 106 108 - struct si_sm_data 109 - { 107 + struct si_sm_data { 110 108 enum smic_states state; 111 109 struct si_sm_io *io; 112 - unsigned char write_data[MAX_SMIC_WRITE_SIZE]; 113 - int write_pos; 114 - int write_count; 115 - int orig_write_count; 116 - unsigned char read_data[MAX_SMIC_READ_SIZE]; 117 - int read_pos; 118 - int truncated; 119 - unsigned int error_retries; 120 - long smic_timeout; 110 + unsigned char write_data[MAX_SMIC_WRITE_SIZE]; 111 + int write_pos; 112 + int write_count; 113 + int orig_write_count; 114 + unsigned char read_data[MAX_SMIC_READ_SIZE]; 115 + int read_pos; 116 + int truncated; 117 + unsigned int error_retries; 118 + long smic_timeout; 121 119 }; 122 120 123 - static unsigned int init_smic_data (struct si_sm_data *smic, 124 - struct si_sm_io *io) 121 + static unsigned int init_smic_data(struct si_sm_data *smic, 122 + struct si_sm_io *io) 125 123 { 126 124 smic->state = SMIC_IDLE; 127 125 smic->io = io; ··· 150 150 return IPMI_NOT_IN_MY_STATE_ERR; 151 151 152 152 if (smic_debug & SMIC_DEBUG_MSG) { 153 - printk(KERN_INFO "start_smic_transaction -"); 154 - for (i = 0; i < size; i ++) { 155 - printk (" %02x", (unsigned char) (data [i])); 156 - } 157 - printk ("\n"); 153 + printk(KERN_DEBUG "start_smic_transaction -"); 154 + for (i = 0; i < size; i++) 155 + printk(" %02x", (unsigned char) data[i]); 156 + printk("\n"); 158 157 } 159 158 smic->error_retries = 0; 160 159 memcpy(smic->write_data, data, size); ··· 172 173 int i; 173 174 174 175 if (smic_debug & SMIC_DEBUG_MSG) { 175 - printk (KERN_INFO "smic_get result -"); 176 - for (i = 0; i < smic->read_pos; i ++) { 177 - printk (" %02x", (smic->read_data [i])); 178 - } 179 - printk ("\n"); 176 + printk(KERN_DEBUG "smic_get result -"); 177 + for (i = 0; i < smic->read_pos; i++) 178 + printk(" %02x", smic->read_data[i]); 179 + printk("\n"); 180 180 } 181 181 if (length < smic->read_pos) { 182 182 smic->read_pos = length; ··· 221 223 smic->io->outputb(smic->io, 1, control); 222 224 } 223 225 224 - static inline void write_si_sm_data (struct si_sm_data *smic, 225 - unsigned char data) 226 + static inline void write_si_sm_data(struct si_sm_data *smic, 227 + unsigned char data) 226 228 { 227 229 smic->io->outputb(smic->io, 0, data); 228 230 } ··· 231 233 { 232 234 (smic->error_retries)++; 233 235 if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { 234 - if (smic_debug & SMIC_DEBUG_ENABLE) { 236 + if (smic_debug & SMIC_DEBUG_ENABLE) 235 237 printk(KERN_WARNING 236 238 "ipmi_smic_drv: smic hosed: %s\n", reason); 237 - } 238 239 smic->state = SMIC_HOSED; 239 240 } else { 240 241 smic->write_count = smic->orig_write_count; ··· 251 254 (smic->write_count)--; 252 255 } 253 256 254 - static inline void read_next_byte (struct si_sm_data *smic) 257 + static inline void read_next_byte(struct si_sm_data *smic) 255 258 { 256 259 if (smic->read_pos >= MAX_SMIC_READ_SIZE) { 257 - read_smic_data (smic); 260 + read_smic_data(smic); 258 261 smic->truncated = 1; 259 262 } else { 260 263 smic->read_data[smic->read_pos] = read_smic_data(smic); 261 - (smic->read_pos)++; 264 + smic->read_pos++; 262 265 } 263 266 } 264 267 ··· 333 336 SMIC_SC_SMS_RD_END 0xC6 334 337 */ 335 338 336 - static enum si_sm_result smic_event (struct si_sm_data *smic, long time) 339 + static enum si_sm_result smic_event(struct si_sm_data *smic, long time) 337 340 { 338 341 unsigned char status; 339 342 unsigned char flags; ··· 344 347 return SI_SM_HOSED; 345 348 } 346 349 if (smic->state != SMIC_IDLE) { 347 - if (smic_debug & SMIC_DEBUG_STATES) { 348 - printk(KERN_INFO 350 + if (smic_debug & SMIC_DEBUG_STATES) 351 + printk(KERN_DEBUG 349 352 "smic_event - smic->smic_timeout = %ld," 350 353 " time = %ld\n", 351 354 smic->smic_timeout, time); 352 - } 353 - /* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */ 355 + /* 356 + * FIXME: smic_event is sometimes called with time > 357 + * SMIC_RETRY_TIMEOUT 358 + */ 354 359 if (time < SMIC_RETRY_TIMEOUT) { 355 360 smic->smic_timeout -= time; 356 361 if (smic->smic_timeout < 0) { ··· 365 366 if (flags & SMIC_FLAG_BSY) 366 367 return SI_SM_CALL_WITH_DELAY; 367 368 368 - status = read_smic_status (smic); 369 + status = read_smic_status(smic); 369 370 if (smic_debug & SMIC_DEBUG_STATES) 370 - printk(KERN_INFO 371 + printk(KERN_DEBUG 371 372 "smic_event - state = %d, flags = 0x%02x," 372 373 " status = 0x%02x\n", 373 374 smic->state, flags, status); ··· 376 377 case SMIC_IDLE: 377 378 /* in IDLE we check for available messages */ 378 379 if (flags & SMIC_SMS_DATA_AVAIL) 379 - { 380 380 return SI_SM_ATTN; 381 - } 382 381 return SI_SM_IDLE; 383 382 384 383 case SMIC_START_OP: ··· 388 391 389 392 case SMIC_OP_OK: 390 393 if (status != SMIC_SC_SMS_READY) { 391 - /* this should not happen */ 394 + /* this should not happen */ 392 395 start_error_recovery(smic, 393 396 "state = SMIC_OP_OK," 394 397 " status != SMIC_SC_SMS_READY"); ··· 408 411 "status != SMIC_SC_SMS_WR_START"); 409 412 return SI_SM_CALL_WITH_DELAY; 410 413 } 411 - /* we must not issue WR_(NEXT|END) unless 412 - TX_DATA_READY is set */ 414 + /* 415 + * we must not issue WR_(NEXT|END) unless 416 + * TX_DATA_READY is set 417 + * */ 413 418 if (flags & SMIC_TX_DATA_READY) { 414 419 if (smic->write_count == 1) { 415 420 /* last byte */ ··· 423 424 } 424 425 write_next_byte(smic); 425 426 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 426 - } 427 - else { 427 + } else 428 428 return SI_SM_CALL_WITH_DELAY; 429 - } 430 429 break; 431 430 432 431 case SMIC_WRITE_NEXT: ··· 439 442 if (smic->write_count == 1) { 440 443 write_smic_control(smic, SMIC_CC_SMS_WR_END); 441 444 smic->state = SMIC_WRITE_END; 442 - } 443 - else { 445 + } else { 444 446 write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); 445 447 smic->state = SMIC_WRITE_NEXT; 446 448 } 447 449 write_next_byte(smic); 448 450 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 449 - } 450 - else { 451 + } else 451 452 return SI_SM_CALL_WITH_DELAY; 452 - } 453 453 break; 454 454 455 455 case SMIC_WRITE_END: 456 456 if (status != SMIC_SC_SMS_WR_END) { 457 - start_error_recovery (smic, 458 - "state = SMIC_WRITE_END, " 459 - "status != SMIC_SC_SMS_WR_END"); 457 + start_error_recovery(smic, 458 + "state = SMIC_WRITE_END, " 459 + "status != SMIC_SC_SMS_WR_END"); 460 460 return SI_SM_CALL_WITH_DELAY; 461 461 } 462 462 /* data register holds an error code */ 463 463 data = read_smic_data(smic); 464 464 if (data != 0) { 465 - if (smic_debug & SMIC_DEBUG_ENABLE) { 466 - printk(KERN_INFO 465 + if (smic_debug & SMIC_DEBUG_ENABLE) 466 + printk(KERN_DEBUG 467 467 "SMIC_WRITE_END: data = %02x\n", data); 468 - } 469 468 start_error_recovery(smic, 470 469 "state = SMIC_WRITE_END, " 471 470 "data != SUCCESS"); 472 471 return SI_SM_CALL_WITH_DELAY; 473 - } else { 472 + } else 474 473 smic->state = SMIC_WRITE2READ; 475 - } 476 474 break; 477 475 478 476 case SMIC_WRITE2READ: 479 - /* we must wait for RX_DATA_READY to be set before we 480 - can continue */ 477 + /* 478 + * we must wait for RX_DATA_READY to be set before we 479 + * can continue 480 + */ 481 481 if (flags & SMIC_RX_DATA_READY) { 482 482 write_smic_control(smic, SMIC_CC_SMS_RD_START); 483 483 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 484 484 smic->state = SMIC_READ_START; 485 - } else { 485 + } else 486 486 return SI_SM_CALL_WITH_DELAY; 487 - } 488 487 break; 489 488 490 489 case SMIC_READ_START: ··· 495 502 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); 496 503 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 497 504 smic->state = SMIC_READ_NEXT; 498 - } else { 505 + } else 499 506 return SI_SM_CALL_WITH_DELAY; 500 - } 501 507 break; 502 508 503 509 case SMIC_READ_NEXT: 504 510 switch (status) { 505 - /* smic tells us that this is the last byte to be read 506 - --> clean up */ 511 + /* 512 + * smic tells us that this is the last byte to be read 513 + * --> clean up 514 + */ 507 515 case SMIC_SC_SMS_RD_END: 508 516 read_next_byte(smic); 509 517 write_smic_control(smic, SMIC_CC_SMS_RD_END); ··· 517 523 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); 518 524 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 519 525 smic->state = SMIC_READ_NEXT; 520 - } else { 526 + } else 521 527 return SI_SM_CALL_WITH_DELAY; 522 - } 523 528 break; 524 529 default: 525 530 start_error_recovery( ··· 539 546 data = read_smic_data(smic); 540 547 /* data register holds an error code */ 541 548 if (data != 0) { 542 - if (smic_debug & SMIC_DEBUG_ENABLE) { 543 - printk(KERN_INFO 549 + if (smic_debug & SMIC_DEBUG_ENABLE) 550 + printk(KERN_DEBUG 544 551 "SMIC_READ_END: data = %02x\n", data); 545 - } 546 552 start_error_recovery(smic, 547 553 "state = SMIC_READ_END, " 548 554 "data != SUCCESS"); ··· 557 565 558 566 default: 559 567 if (smic_debug & SMIC_DEBUG_ENABLE) { 560 - printk(KERN_WARNING "smic->state = %d\n", smic->state); 568 + printk(KERN_DEBUG "smic->state = %d\n", smic->state); 561 569 start_error_recovery(smic, "state = UNKNOWN"); 562 570 return SI_SM_CALL_WITH_DELAY; 563 571 } ··· 568 576 569 577 static int smic_detect(struct si_sm_data *smic) 570 578 { 571 - /* It's impossible for the SMIC fnags register to be all 1's, 572 - (assuming a properly functioning, self-initialized BMC) 573 - but that's what you get from reading a bogus address, so we 574 - test that first. */ 579 + /* 580 + * It's impossible for the SMIC fnags register to be all 1's, 581 + * (assuming a properly functioning, self-initialized BMC) 582 + * but that's what you get from reading a bogus address, so we 583 + * test that first. 584 + */ 575 585 if (read_smic_flags(smic) == 0xff) 576 586 return 1; 577 587 ··· 589 595 return sizeof(struct si_sm_data); 590 596 } 591 597 592 - struct si_sm_handlers smic_smi_handlers = 593 - { 598 + struct si_sm_handlers smic_smi_handlers = { 594 599 .init_data = init_smic_data, 595 600 .start_transaction = start_smic_transaction, 596 601 .get_result = smic_get_result,