Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

firewire: Clean up comment style.

Drop filenames from file preamble, drop editor annotations and
use standard indent style for block comments.

Signed-off-by: Kristian Hoegsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de> (fixed typo)

authored by

Kristian Høgsberg and committed by
Stefan Richter
c781c06d e175569c

+439 -253
+66 -39
drivers/firewire/fw-card.c
··· 1 - /* -*- c-basic-offset: 8 -*- 2 - * 3 - * fw-card.c - card level functions 4 - * 5 - * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 1 + /* 2 + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 6 3 * 7 4 * This program is free software; you can redistribute it and/or modify 8 5 * it under the terms of the GNU General Public License as published by ··· 66 69 static u32 config_rom[256]; 67 70 int i, j, length; 68 71 69 - /* Initialize contents of config rom buffer. On the OHCI 72 + /* 73 + * Initialize contents of config rom buffer. On the OHCI 70 74 * controller, block reads to the config rom accesses the host 71 75 * memory, but quadlet read access the hardware bus info block 72 76 * registers. That's just crack, but it means we should make 73 77 * sure the contents of bus info block in host memory mathces 74 - * the version stored in the OHCI registers. */ 78 + * the version stored in the OHCI registers. 79 + */ 75 80 76 81 memset(config_rom, 0, sizeof config_rom); 77 82 config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0); ··· 142 143 { 143 144 size_t i; 144 145 145 - /* Check descriptor is valid; the length of all blocks in the 146 + /* 147 + * Check descriptor is valid; the length of all blocks in the 146 148 * descriptor has to add up to exactly the length of the 147 - * block. */ 149 + * block. 150 + */ 148 151 i = 0; 149 152 while (i < desc->length) 150 153 i += (desc->data[i] >> 16) + 1; ··· 229 228 230 229 if (card->bm_generation + 1 == generation || 231 230 (card->bm_generation != generation && grace)) { 232 - /* This first step is to figure out who is IRM and 231 + /* 232 + * This first step is to figure out who is IRM and 233 233 * then try to become bus manager. If the IRM is not 234 234 * well defined (e.g. does not have an active link 235 235 * layer or does not responds to our lock request, we ··· 238 236 * In that case, we do a goto into the gap count logic 239 237 * so that when we do the reset, we still optimize the 240 238 * gap count. That could well save a reset in the 241 - * next generation. */ 239 + * next generation. 240 + */ 242 241 243 242 irm_id = card->irm_node->node_id; 244 243 if (!card->irm_node->link_on) { ··· 263 260 wait_for_completion(&bmd.done); 264 261 265 262 if (bmd.rcode == RCODE_GENERATION) { 266 - /* Another bus reset happened. Just return, 267 - * the BM work has been rescheduled. */ 263 + /* 264 + * Another bus reset happened. Just return, 265 + * the BM work has been rescheduled. 266 + */ 268 267 return; 269 268 } 270 269 ··· 276 271 277 272 spin_lock_irqsave(&card->lock, flags); 278 273 if (bmd.rcode != RCODE_COMPLETE) { 279 - /* The lock request failed, maybe the IRM 274 + /* 275 + * The lock request failed, maybe the IRM 280 276 * isn't really IRM capable after all. Let's 281 277 * do a bus reset and pick the local node as 282 - * root, and thus, IRM. */ 278 + * root, and thus, IRM. 279 + */ 283 280 new_root_id = card->local_node->node_id; 284 281 fw_notify("BM lock failed, making local node (%02x) root.\n", 285 282 new_root_id); 286 283 goto pick_me; 287 284 } 288 285 } else if (card->bm_generation != generation) { 289 - /* OK, we weren't BM in the last generation, and it's 286 + /* 287 + * OK, we weren't BM in the last generation, and it's 290 288 * less than 100ms since last bus reset. Reschedule 291 - * this task 100ms from now. */ 289 + * this task 100ms from now. 290 + */ 292 291 spin_unlock_irqrestore(&card->lock, flags); 293 292 schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); 294 293 return; 295 294 } 296 295 297 - /* We're bus manager for this generation, so next step is to 296 + /* 297 + * We're bus manager for this generation, so next step is to 298 298 * make sure we have an active cycle master and do gap count 299 - * optimization. */ 299 + * optimization. 300 + */ 300 301 card->bm_generation = generation; 301 302 302 303 if (root == NULL) { 303 - /* Either link_on is false, or we failed to read the 304 - * config rom. In either case, pick another root. */ 304 + /* 305 + * Either link_on is false, or we failed to read the 306 + * config rom. In either case, pick another root. 307 + */ 305 308 new_root_id = card->local_node->node_id; 306 309 } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { 307 - /* If we haven't probed this device yet, bail out now 308 - * and let's try again once that's done. */ 310 + /* 311 + * If we haven't probed this device yet, bail out now 312 + * and let's try again once that's done. 313 + */ 309 314 spin_unlock_irqrestore(&card->lock, flags); 310 315 return; 311 316 } else if (root->config_rom[2] & bib_cmc) { 312 - /* FIXME: I suppose we should set the cmstr bit in the 317 + /* 318 + * FIXME: I suppose we should set the cmstr bit in the 313 319 * STATE_CLEAR register of this node, as described in 314 320 * 1394-1995, 8.4.2.6. Also, send out a force root 315 - * packet for this node. */ 321 + * packet for this node. 322 + */ 316 323 new_root_id = root_id; 317 324 } else { 318 - /* Current root has an active link layer and we 325 + /* 326 + * Current root has an active link layer and we 319 327 * successfully read the config rom, but it's not 320 - * cycle master capable. */ 328 + * cycle master capable. 329 + */ 321 330 new_root_id = card->local_node->node_id; 322 331 } 323 332 ··· 343 324 else 344 325 gap_count = 63; 345 326 346 - /* Finally, figure out if we should do a reset or not. If we've 327 + /* 328 + * Finally, figure out if we should do a reset or not. If we've 347 329 * done less that 5 resets with the same physical topology and we 348 - * have either a new root or a new gap count setting, let's do it. */ 330 + * have either a new root or a new gap count setting, let's do it. 331 + */ 349 332 350 333 if (card->bm_retries++ < 5 && 351 334 (card->gap_count != gap_count || new_root_id != root_id)) ··· 412 391 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) 413 392 return -EIO; 414 393 415 - /* The subsystem grabs a reference when the card is added and 416 - * drops it when the driver calls fw_core_remove_card. */ 394 + /* 395 + * The subsystem grabs a reference when the card is added and 396 + * drops it when the driver calls fw_core_remove_card. 397 + */ 417 398 fw_card_get(card); 418 399 419 400 down_write(&card_rwsem); ··· 428 405 EXPORT_SYMBOL(fw_card_add); 429 406 430 407 431 - /* The next few functions implements a dummy driver that use once a 408 + /* 409 + * The next few functions implements a dummy driver that use once a 432 410 * card driver shuts down an fw_card. This allows the driver to 433 411 * cleanly unload, as all IO to the card will be handled by the dummy 434 412 * driver instead of calling into the (possibly) unloaded module. The 435 - * dummy driver just fails all IO. */ 413 + * dummy driver just fails all IO. 414 + */ 436 415 437 416 static int 438 417 dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) ··· 454 429 dummy_set_config_rom(struct fw_card *card, 455 430 u32 *config_rom, size_t length) 456 431 { 457 - /* We take the card out of card_list before setting the dummy 458 - * driver, so this should never get called. */ 432 + /* 433 + * We take the card out of card_list before setting the dummy 434 + * driver, so this should never get called. 435 + */ 459 436 BUG(); 460 437 return -1; 461 438 } ··· 537 510 kfree(card); 538 511 } 539 512 540 - /* An assumption for fw_card_put() is that the card driver allocates 513 + /* 514 + * An assumption for fw_card_put() is that the card driver allocates 541 515 * the fw_card struct with kalloc and that it has been shut down 542 - * before the last ref is dropped. */ 516 + * before the last ref is dropped. 517 + */ 543 518 void 544 519 fw_card_put(struct fw_card *card) 545 520 { ··· 553 524 fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) 554 525 { 555 526 int reg = short_reset ? 5 : 1; 556 - /* The following values happen to be the same bit. However be 557 - * explicit for clarity. */ 558 527 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 559 528 560 529 return card->driver->update_phy_reg(card, reg, 0, bit);
+20 -13
drivers/firewire/fw-cdev.c
··· 1 - /* -*- c-basic-offset: 8 -*- 1 + /* 2 + * Char device for device raw access 2 3 * 3 - * fw-device-cdev.c - Char device for device raw access 4 - * 5 - * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 4 + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 6 5 * 7 6 * This program is free software; you can redistribute it and/or modify 8 7 * it under the terms of the GNU General Public License as published by ··· 35 36 #include "fw-topology.h" 36 37 #include "fw-device.h" 37 38 38 - /* dequeue_event() just kfree()'s the event, so the event has to be 39 - * the first field in the struct. */ 40 - 41 39 struct client; 42 40 struct client_resource { 43 41 struct list_head link; 44 42 void (*release)(struct client *client, struct client_resource *r); 45 43 u32 handle; 46 44 }; 45 + 46 + /* 47 + * dequeue_event() just kfree()'s the event, so the event has to be 48 + * the first field in the struct. 49 + */ 47 50 48 51 struct event { 49 52 struct { void *data; size_t size; } v[2]; ··· 692 691 if (ctx == NULL || request->handle != 0) 693 692 return -EINVAL; 694 693 695 - /* If the user passes a non-NULL data pointer, has mmap()'ed 694 + /* 695 + * If the user passes a non-NULL data pointer, has mmap()'ed 696 696 * the iso buffer, and the pointer points inside the buffer, 697 697 * we setup the payload pointers accordingly. Otherwise we 698 698 * set them both to 0, which will still let packets with 699 699 * payload_length == 0 through. In other words, if no packets 700 700 * use the indirect payload, the iso buffer need not be mapped 701 - * and the request->data pointer is ignored.*/ 701 + * and the request->data pointer is ignored. 702 + */ 702 703 703 704 payload = (unsigned long)request->data - client->vm_start; 704 705 buffer_end = client->buffer.page_count << PAGE_SHIFT; ··· 723 720 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { 724 721 header_length = u.packet.header_length; 725 722 } else { 726 - /* We require that header_length is a multiple of 727 - * the fixed header size, ctx->header_size */ 723 + /* 724 + * We require that header_length is a multiple of 725 + * the fixed header size, ctx->header_size. 726 + */ 728 727 if (ctx->header_size == 0) { 729 728 if (u.packet.header_length > 0) 730 729 return -EINVAL; ··· 913 908 list_for_each_entry_safe(r, next_r, &client->resource_list, link) 914 909 r->release(client, r); 915 910 916 - /* FIXME: We should wait for the async tasklets to stop 917 - * running before freeing the memory. */ 911 + /* 912 + * FIXME: We should wait for the async tasklets to stop 913 + * running before freeing the memory. 914 + */ 918 915 919 916 list_for_each_entry_safe(e, next_e, &client->event_list, link) 920 917 kfree(e);
+66 -35
drivers/firewire/fw-device.c
··· 1 - /* -*- c-basic-offset: 8 -*- 2 - * 3 - * fw-device.c - Device probing and sysfs code. 1 + /* 2 + * Device probing and sysfs code. 4 3 * 5 4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 6 5 * ··· 173 174 struct fw_device *device = fw_device(dev); 174 175 unsigned long flags; 175 176 176 - /* Take the card lock so we don't set this to NULL while a 177 - * FW_NODE_UPDATED callback is being handled. */ 177 + /* 178 + * Take the card lock so we don't set this to NULL while a 179 + * FW_NODE_UPDATED callback is being handled. 180 + */ 178 181 spin_lock_irqsave(&device->card->lock, flags); 179 182 device->node->data = NULL; 180 183 spin_unlock_irqrestore(&device->card->lock, flags); ··· 422 421 for (i = 0; i < 5; i++) { 423 422 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) 424 423 return -1; 425 - /* As per IEEE1212 7.2, during power-up, devices can 424 + /* 425 + * As per IEEE1212 7.2, during power-up, devices can 426 426 * reply with a 0 for the first quadlet of the config 427 427 * rom to indicate that they are booting (for example, 428 428 * if the firmware is on the disk of a external 429 429 * harddisk). In that case we just fail, and the 430 - * retry mechanism will try again later. */ 430 + * retry mechanism will try again later. 431 + */ 431 432 if (i == 0 && rom[i] == 0) 432 433 return -1; 433 434 } 434 435 435 - /* Now parse the config rom. The config rom is a recursive 436 + /* 437 + * Now parse the config rom. The config rom is a recursive 436 438 * directory structure so we parse it using a stack of 437 439 * references to the blocks that make up the structure. We 438 440 * push a reference to the root directory on the stack to 439 - * start things off. */ 441 + * start things off. 442 + */ 440 443 length = i; 441 444 sp = 0; 442 445 stack[sp++] = 0xc0000005; 443 446 while (sp > 0) { 444 - /* Pop the next block reference of the stack. The 447 + /* 448 + * Pop the next block reference of the stack. The 445 449 * lower 24 bits is the offset into the config rom, 446 450 * the upper 8 bits are the type of the reference the 447 - * block. */ 451 + * block. 452 + */ 448 453 key = stack[--sp]; 449 454 i = key & 0xffffff; 450 455 if (i >= ARRAY_SIZE(rom)) 451 - /* The reference points outside the standard 452 - * config rom area, something's fishy. */ 456 + /* 457 + * The reference points outside the standard 458 + * config rom area, something's fishy. 459 + */ 453 460 return -1; 454 461 455 462 /* Read header quadlet for the block to get the length. */ ··· 466 457 end = i + (rom[i] >> 16) + 1; 467 458 i++; 468 459 if (end > ARRAY_SIZE(rom)) 469 - /* This block extends outside standard config 460 + /* 461 + * This block extends outside standard config 470 462 * area (and the array we're reading it 471 463 * into). That's broken, so ignore this 472 - * device. */ 464 + * device. 465 + */ 473 466 return -1; 474 467 475 - /* Now read in the block. If this is a directory 468 + /* 469 + * Now read in the block. If this is a directory 476 470 * block, check the entries as we read them to see if 477 - * it references another block, and push it in that case. */ 471 + * it references another block, and push it in that case. 472 + */ 478 473 while (i < end) { 479 474 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) 480 475 return -1; ··· 529 516 if (key != (CSR_UNIT | CSR_DIRECTORY)) 530 517 continue; 531 518 532 - /* Get the address of the unit directory and try to 533 - * match the drivers id_tables against it. */ 519 + /* 520 + * Get the address of the unit directory and try to 521 + * match the drivers id_tables against it. 522 + */ 534 523 unit = kzalloc(sizeof *unit, GFP_KERNEL); 535 524 if (unit == NULL) { 536 525 fw_error("failed to allocate memory for unit\n"); ··· 600 585 .release = fw_device_release, 601 586 }; 602 587 603 - /* These defines control the retry behavior for reading the config 588 + /* 589 + * These defines control the retry behavior for reading the config 604 590 * rom. It shouldn't be necessary to tweak these; if the device 605 591 * doesn't respond to a config rom read within 10 seconds, it's not 606 592 * going to respond at all. As for the initial delay, a lot of 607 593 * devices will be able to respond within half a second after bus 608 594 * reset. On the other hand, it's not really worth being more 609 595 * aggressive than that, since it scales pretty well; if 10 devices 610 - * are plugged in, they're all getting read within one second. */ 596 + * are plugged in, they're all getting read within one second. 597 + */ 611 598 612 599 #define MAX_RETRIES 10 613 600 #define RETRY_DELAY (3 * HZ) ··· 621 604 container_of(work, struct fw_device, work.work); 622 605 int minor, err; 623 606 624 - /* All failure paths here set node->data to NULL, so that we 607 + /* 608 + * All failure paths here set node->data to NULL, so that we 625 609 * don't try to do device_for_each_child() on a kfree()'d 626 - * device. */ 610 + * device. 611 + */ 627 612 628 613 if (read_bus_info_block(device) < 0) { 629 614 if (device->config_rom_retries < MAX_RETRIES) { ··· 666 647 667 648 create_units(device); 668 649 669 - /* Transition the device to running state. If it got pulled 650 + /* 651 + * Transition the device to running state. If it got pulled 670 652 * out from under us while we did the intialization work, we 671 653 * have to shut down the device again here. Normally, though, 672 654 * fw_node_event will be responsible for shutting it down when 673 655 * necessary. We have to use the atomic cmpxchg here to avoid 674 656 * racing with the FW_NODE_DESTROYED case in 675 - * fw_node_event(). */ 657 + * fw_node_event(). 658 + */ 676 659 if (atomic_cmpxchg(&device->state, 677 660 FW_DEVICE_INITIALIZING, 678 661 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) ··· 683 662 fw_notify("created new fw device %s (%d config rom retries)\n", 684 663 device->device.bus_id, device->config_rom_retries); 685 664 686 - /* Reschedule the IRM work if we just finished reading the 665 + /* 666 + * Reschedule the IRM work if we just finished reading the 687 667 * root node config rom. If this races with a bus reset we 688 668 * just end up running the IRM work a couple of extra times - 689 - * pretty harmless. */ 669 + * pretty harmless. 670 + */ 690 671 if (device->node == device->card->root_node) 691 672 schedule_delayed_work(&device->card->work, 0); 692 673 ··· 739 716 if (device == NULL) 740 717 break; 741 718 742 - /* Do minimal intialization of the device here, the 719 + /* 720 + * Do minimal intialization of the device here, the 743 721 * rest will happen in fw_device_init(). We need the 744 722 * card and node so we can read the config rom and we 745 723 * need to do device_initialize() now so 746 724 * device_for_each_child() in FW_NODE_UPDATED is 747 - * doesn't freak out. */ 725 + * doesn't freak out. 726 + */ 748 727 device_initialize(&device->device); 749 728 atomic_set(&device->state, FW_DEVICE_INITIALIZING); 750 729 device->card = fw_card_get(card); ··· 755 730 device->generation = card->generation; 756 731 INIT_LIST_HEAD(&device->client_list); 757 732 758 - /* Set the node data to point back to this device so 733 + /* 734 + * Set the node data to point back to this device so 759 735 * FW_NODE_UPDATED callbacks can update the node_id 760 - * and generation for the device. */ 736 + * and generation for the device. 737 + */ 761 738 node->data = device; 762 739 763 - /* Many devices are slow to respond after bus resets, 740 + /* 741 + * Many devices are slow to respond after bus resets, 764 742 * especially if they are bus powered and go through 765 743 * power-up after getting plugged in. We schedule the 766 - * first config rom scan half a second after bus reset. */ 744 + * first config rom scan half a second after bus reset. 745 + */ 767 746 INIT_DELAYED_WORK(&device->work, fw_device_init); 768 747 schedule_delayed_work(&device->work, INITIAL_DELAY); 769 748 break; ··· 790 761 if (!node->data) 791 762 break; 792 763 793 - /* Destroy the device associated with the node. There 764 + /* 765 + * Destroy the device associated with the node. There 794 766 * are two cases here: either the device is fully 795 767 * initialized (FW_DEVICE_RUNNING) or we're in the 796 768 * process of reading its config rom ··· 800 770 * full fw_device_shutdown(). If not, there's work 801 771 * scheduled to read it's config rom, and we just put 802 772 * the device in shutdown state to have that code fail 803 - * to create the device. */ 773 + * to create the device. 774 + */ 804 775 device = node->data; 805 776 if (atomic_xchg(&device->state, 806 777 FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
+1 -4
drivers/firewire/fw-device.h
··· 1 - /* -*- c-basic-offset: 8 -*- 2 - * 3 - * fw-device.h - Device probing and sysfs code. 4 - * 1 + /* 5 2 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 6 3 * 7 4 * This program is free software; you can redistribute it and/or modify
+2 -2
drivers/firewire/fw-iso.c
··· 1 - /* -*- c-basic-offset: 8 -*- 1 + /* 2 + * Isochronous IO functionality 2 3 * 3 - * fw-iso.c - Isochronous IO 4 4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify
+94 -50
drivers/firewire/fw-ohci.c
··· 1 - /* -*- c-basic-offset: 8 -*- 1 + /* 2 + * Driver for OHCI 1394 controllers 2 3 * 3 - * fw-ohci.c - Driver for OHCI 1394 boards 4 4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify ··· 141 141 int request_generation; 142 142 u32 bus_seconds; 143 143 144 - /* Spinlock for accessing fw_ohci data. Never call out of 145 - * this driver with this lock held. */ 144 + /* 145 + * Spinlock for accessing fw_ohci data. Never call out of 146 + * this driver with this lock held. 147 + */ 146 148 spinlock_t lock; 147 149 u32 self_id_buffer[512]; 148 150 ··· 330 328 p.timestamp = status & 0xffff; 331 329 p.generation = ohci->request_generation; 332 330 333 - /* The OHCI bus reset handler synthesizes a phy packet with 331 + /* 332 + * The OHCI bus reset handler synthesizes a phy packet with 334 333 * the new generation number when a bus reset happens (see 335 334 * section 8.4.2.3). This helps us determine when a request 336 335 * was received and make sure we send the response in the same 337 336 * generation. We only need this for requests; for responses 338 337 * we use the unique tlabel for finding the matching 339 - * request. */ 338 + * request. 339 + */ 340 340 341 341 if (p.ack + 16 == 0x09) 342 342 ohci->request_generation = (buffer[2] >> 16) & 0xff; ··· 364 360 if (d->res_count == 0) { 365 361 size_t size, rest, offset; 366 362 367 - /* This descriptor is finished and we may have a 363 + /* 364 + * This descriptor is finished and we may have a 368 365 * packet split across this and the next buffer. We 369 - * reuse the page for reassembling the split packet. */ 366 + * reuse the page for reassembling the split packet. 367 + */ 370 368 371 369 offset = offsetof(struct ar_buffer, data); 372 370 dma_unmap_single(ohci->card.device, ··· 479 473 ctx->tail_descriptor = ctx->buffer; 480 474 ctx->tail_descriptor_last = ctx->buffer; 481 475 482 - /* We put a dummy descriptor in the buffer that has a NULL 476 + /* 477 + * We put a dummy descriptor in the buffer that has a NULL 483 478 * branch address and looks like it's been sent. That way we 484 479 * have a descriptor to append DMA programs to. Also, the 485 480 * ring buffer invariant is that it always has at least one 486 - * element so that head == tail means buffer full. */ 481 + * element so that head == tail means buffer full. 482 + */ 487 483 488 484 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); 489 485 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last); ··· 583 575 struct fw_packet *packet; 584 576 }; 585 577 586 - /* This function apppends a packet to the DMA queue for transmission. 578 + /* 579 + * This function apppends a packet to the DMA queue for transmission. 587 580 * Must always be called with the ochi->lock held to ensure proper 588 - * generation handling and locking around packet queue manipulation. */ 581 + * generation handling and locking around packet queue manipulation. 582 + */ 589 583 static int 590 584 at_context_queue_packet(struct context *ctx, struct fw_packet *packet) 591 585 { ··· 608 598 d[0].control = cpu_to_le16(descriptor_key_immediate); 609 599 d[0].res_count = cpu_to_le16(packet->timestamp); 610 600 611 - /* The DMA format for asyncronous link packets is different 601 + /* 602 + * The DMA format for asyncronous link packets is different 612 603 * from the IEEE1394 layout, so shift the fields around 613 604 * accordingly. If header_length is 8, it's a PHY packet, to 614 - * which we need to prepend an extra quadlet. */ 605 + * which we need to prepend an extra quadlet. 606 + */ 615 607 616 608 header = (__le32 *) &d[1]; 617 609 if (packet->header_length > 8) { ··· 715 703 break; 716 704 717 705 case OHCI1394_evt_flushed: 718 - /* The packet was flushed should give same error as 719 - * when we try to use a stale generation count. */ 706 + /* 707 + * The packet was flushed should give same error as 708 + * when we try to use a stale generation count. 709 + */ 720 710 packet->ack = RCODE_GENERATION; 721 711 break; 722 712 723 713 case OHCI1394_evt_missing_ack: 724 - /* Using a valid (current) generation count, but the 725 - * node is not on the bus or not sending acks. */ 714 + /* 715 + * Using a valid (current) generation count, but the 716 + * node is not on the bus or not sending acks. 717 + */ 726 718 packet->ack = RCODE_NO_ACK; 727 719 break; 728 720 ··· 903 887 } 904 888 ohci->node_id = reg & 0xffff; 905 889 906 - /* The count in the SelfIDCount register is the number of 890 + /* 891 + * The count in the SelfIDCount register is the number of 907 892 * bytes in the self ID receive buffer. Since we also receive 908 893 * the inverted quadlets and a header quadlet, we shift one 909 - * bit extra to get the actual number of self IDs. */ 894 + * bit extra to get the actual number of self IDs. 895 + */ 910 896 911 897 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; 912 898 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; ··· 919 901 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); 920 902 } 921 903 922 - /* Check the consistency of the self IDs we just read. The 904 + /* 905 + * Check the consistency of the self IDs we just read. The 923 906 * problem we face is that a new bus reset can start while we 924 907 * read out the self IDs from the DMA buffer. If this happens, 925 908 * the DMA buffer will be overwritten with new self IDs and we ··· 930 911 * self IDs in the buffer before reading them out and compare 931 912 * it to the current generation after reading them out. If 932 913 * the two generations match we know we have a consistent set 933 - * of self IDs. */ 914 + * of self IDs. 915 + */ 934 916 935 917 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 936 918 if (new_generation != generation) { ··· 948 928 context_stop(&ohci->at_response_ctx); 949 929 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 950 930 951 - /* This next bit is unrelated to the AT context stuff but we 931 + /* 932 + * This next bit is unrelated to the AT context stuff but we 952 933 * have to do it under the spinlock also. If a new config rom 953 934 * was set up before this reset, the old one is now no longer 954 935 * in use and we can free it. Update the config rom pointers 955 936 * to point to the current config rom and clear the 956 - * next_config_rom pointer so a new udpate can take place. */ 937 + * next_config_rom pointer so a new udpate can take place. 938 + */ 957 939 958 940 if (ohci->next_config_rom != NULL) { 959 941 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, ··· 964 942 ohci->config_rom_bus = ohci->next_config_rom_bus; 965 943 ohci->next_config_rom = NULL; 966 944 967 - /* Restore config_rom image and manually update 945 + /* 946 + * Restore config_rom image and manually update 968 947 * config_rom registers. Writing the header quadlet 969 948 * will indicate that the config rom is ready, so we 970 - * do that last. */ 949 + * do that last. 950 + */ 971 951 reg_write(ohci, OHCI1394_BusOptions, 972 952 be32_to_cpu(ohci->config_rom[2])); 973 953 ohci->config_rom[0] = cpu_to_be32(ohci->next_header); ··· 1042 1018 struct fw_ohci *ohci = fw_ohci(card); 1043 1019 struct pci_dev *dev = to_pci_dev(card->device); 1044 1020 1045 - /* When the link is not yet enabled, the atomic config rom 1021 + /* 1022 + * When the link is not yet enabled, the atomic config rom 1046 1023 * update mechanism described below in ohci_set_config_rom() 1047 1024 * is not active. We have to update ConfigRomHeader and 1048 1025 * BusOptions manually, and the write to ConfigROMmap takes ··· 1092 1067 OHCI1394_HCControl_BIBimageValid); 1093 1068 flush_writes(ohci); 1094 1069 1095 - /* We are ready to go, initiate bus reset to finish the 1096 - * initialization. */ 1070 + /* 1071 + * We are ready to go, initiate bus reset to finish the 1072 + * initialization. 1073 + */ 1097 1074 1098 1075 fw_core_initiate_bus_reset(&ohci->card, 1); 1099 1076 ··· 1113 1086 1114 1087 ohci = fw_ohci(card); 1115 1088 1116 - /* When the OHCI controller is enabled, the config rom update 1089 + /* 1090 + * When the OHCI controller is enabled, the config rom update 1117 1091 * mechanism is a bit tricky, but easy enough to use. See 1118 1092 * section 5.5.6 in the OHCI specification. 1119 1093 * ··· 1169 1141 1170 1142 spin_unlock_irqrestore(&ohci->lock, flags); 1171 1143 1172 - /* Now initiate a bus reset to have the changes take 1144 + /* 1145 + * Now initiate a bus reset to have the changes take 1173 1146 * effect. We clean up the old config rom memory and DMA 1174 1147 * mappings in the bus reset tasklet, since the OHCI 1175 1148 * controller could need to access it before the bus reset 1176 - * takes effect. */ 1149 + * takes effect. 1150 + */ 1177 1151 if (retval == 0) 1178 1152 fw_core_initiate_bus_reset(&ohci->card, 1); 1179 1153 ··· 1226 1196 unsigned long flags; 1227 1197 int n, retval = 0; 1228 1198 1229 - /* FIXME: Make sure this bitmask is cleared when we clear the busReset 1230 - * interrupt bit. Clear physReqResourceAllBuses on bus reset. */ 1199 + /* 1200 + * FIXME: Make sure this bitmask is cleared when we clear the busReset 1201 + * interrupt bit. Clear physReqResourceAllBuses on bus reset. 1202 + */ 1231 1203 1232 1204 spin_lock_irqsave(&ohci->lock, flags); 1233 1205 ··· 1238 1206 goto out; 1239 1207 } 1240 1208 1241 - /* NOTE, if the node ID contains a non-local bus ID, physical DMA is 1242 - * enabled for _all_ nodes on remote buses. */ 1209 + /* 1210 + * Note, if the node ID contains a non-local bus ID, physical DMA is 1211 + * enabled for _all_ nodes on remote buses. 1212 + */ 1243 1213 1244 1214 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; 1245 1215 if (n < 32) ··· 1291 1257 p = db + 1; 1292 1258 end = p + header_length; 1293 1259 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { 1294 - /* The iso header is byteswapped to little endian by 1260 + /* 1261 + * The iso header is byteswapped to little endian by 1295 1262 * the controller, but the remaining header quadlets 1296 1263 * are big endian. We want to present all the headers 1297 1264 * as big endian, so we have to swap the first 1298 - * quadlet. */ 1265 + * quadlet. 1266 + */ 1299 1267 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); 1300 1268 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); 1301 1269 i += ctx->base.header_size; ··· 1493 1457 u32 payload_index, payload_end_index, next_page_index; 1494 1458 int page, end_page, i, length, offset; 1495 1459 1496 - /* FIXME: Cycle lost behavior should be configurable: lose 1497 - * packet, retransmit or terminate.. */ 1460 + /* 1461 + * FIXME: Cycle lost behavior should be configurable: lose 1462 + * packet, retransmit or terminate.. 1463 + */ 1498 1464 1499 1465 p = packet; 1500 1466 payload_index = payload; ··· 1591 1553 u32 z, header_z, length, rest; 1592 1554 int page, offset, packet_count, header_size; 1593 1555 1594 - /* FIXME: Cycle lost behavior should be configurable: lose 1595 - * packet, retransmit or terminate.. */ 1556 + /* 1557 + * FIXME: Cycle lost behavior should be configurable: lose 1558 + * packet, retransmit or terminate.. 1559 + */ 1596 1560 1597 1561 if (packet->skip) { 1598 1562 d = context_get_descriptors(&ctx->context, 2, &d_bus); ··· 1612 1572 p = packet; 1613 1573 z = 2; 1614 1574 1615 - /* The OHCI controller puts the status word in the header 1616 - * buffer too, so we need 4 extra bytes per packet. */ 1575 + /* 1576 + * The OHCI controller puts the status word in the header 1577 + * buffer too, so we need 4 extra bytes per packet. 1578 + */ 1617 1579 packet_count = p->header_length / ctx->base.header_size; 1618 1580 header_size = packet_count * (ctx->base.header_size + 4); 1619 1581 ··· 1715 1673 return -EBUSY; 1716 1674 } 1717 1675 1718 - /* ---------- pci subsystem interface ---------- */ 1719 - 1720 1676 enum { 1721 1677 CLEANUP_SELF_ID, 1722 1678 CLEANUP_REGISTERS, ··· 1793 1753 return cleanup(ohci, CLEANUP_REGISTERS, -EBUSY); 1794 1754 } 1795 1755 1796 - /* Now enable LPS, which we need in order to start accessing 1756 + /* 1757 + * Now enable LPS, which we need in order to start accessing 1797 1758 * most of the registers. In fact, on some cards (ALI M5251), 1798 1759 * accessing registers in the SClk domain without LPS enabled 1799 1760 * will lock up the machine. Wait 50msec to make sure we have 1800 - * full link enabled. */ 1761 + * full link enabled. 1762 + */ 1801 1763 reg_write(ohci, OHCI1394_HCControlSet, 1802 1764 OHCI1394_HCControl_LPS | 1803 1765 OHCI1394_HCControl_postedWriteEnable); ··· 1896 1854 flush_writes(ohci); 1897 1855 fw_core_remove_card(&ohci->card); 1898 1856 1899 - /* FIXME: Fail all pending packets here, now that the upper 1900 - * layers can't queue any more. */ 1857 + /* 1858 + * FIXME: Fail all pending packets here, now that the upper 1859 + * layers can't queue any more. 1860 + */ 1901 1861 1902 1862 software_reset(ohci); 1903 1863 free_irq(dev->irq, ohci);
+66 -35
drivers/firewire/fw-sbp2.c
··· 1 - /* -*- c-basic-offset: 8 -*- 2 - * fw-spb2.c -- SBP2 driver (SCSI over IEEE1394) 1 + /* 2 + * SBP2 driver (SCSI over IEEE1394) 3 3 * 4 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 5 5 * ··· 18 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 19 */ 20 20 21 - /* The basic structure of this driver is based the old storage driver, 21 + /* 22 + * The basic structure of this driver is based on the old storage driver, 22 23 * drivers/ieee1394/sbp2.c, originally written by 23 24 * James Goodwin <jamesg@filanet.com> 24 25 * with later contributions and ongoing maintenance from ··· 61 60 u32 workarounds; 62 61 int login_id; 63 62 64 - /* We cache these addresses and only update them once we've 63 + /* 64 + * We cache these addresses and only update them once we've 65 65 * logged in or reconnected to the sbp2 device. That way, any 66 66 * IO to the device will automatically fail and get retried if 67 67 * it happens in a window where the device is not ready to 68 - * handle it (e.g. after a bus reset but before we reconnect). */ 68 + * handle it (e.g. after a bus reset but before we reconnect). 69 + */ 69 70 int node_id; 70 71 int address_high; 71 72 int generation; ··· 242 239 .model = ~0, 243 240 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 244 241 }, 245 - /* There are iPods (2nd gen, 3rd gen) with model_id == 0, but 242 + 243 + /* 244 + * There are iPods (2nd gen, 3rd gen) with model_id == 0, but 246 245 * these iPods do not feature the read_capacity bug according 247 246 * to one report. Read_capacity behaviour as well as model_id 248 - * could change due to Apple-supplied firmware updates though. */ 247 + * could change due to Apple-supplied firmware updates though. 248 + */ 249 + 249 250 /* iPod 4th generation. */ { 250 251 .firmware_revision = 0x0a2700, 251 252 .model = 0x000021, ··· 405 398 if (orb == NULL) 406 399 return -ENOMEM; 407 400 408 - /* The sbp2 device is going to send a block read request to 409 - * read out the request from host memory, so map it for 410 - * dma. */ 401 + /* 402 + * The sbp2 device is going to send a block read request to 403 + * read out the request from host memory, so map it for dma. 404 + */ 411 405 orb->base.request_bus = 412 406 dma_map_single(device->card->device, &orb->request, 413 407 sizeof orb->request, DMA_TO_DEVICE); ··· 434 426 orb->request.status_fifo.high = sd->address_handler.offset >> 32; 435 427 orb->request.status_fifo.low = sd->address_handler.offset; 436 428 437 - /* FIXME: Yeah, ok this isn't elegant, we hardwire exclusive 429 + /* 430 + * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive 438 431 * login and 1 second reconnect time. The reconnect setting 439 - * is probably fine, but the exclusive login should be an 440 - * option. */ 432 + * is probably fine, but the exclusive login should be an option. 433 + */ 441 434 if (function == SBP2_LOGIN_REQUEST) { 442 435 orb->request.misc |= 443 436 management_orb_exclusive | ··· 601 592 sbp2_send_management_orb(unit, sd->node_id, sd->generation, 602 593 SBP2_LOGOUT_REQUEST, sd->login_id, 603 594 NULL); 604 - /* Set this back to sbp2_login so we fall back and 605 - * retry login on bus reset. */ 595 + /* 596 + * Set this back to sbp2_login so we fall back and 597 + * retry login on bus reset. 598 + */ 606 599 PREPARE_DELAYED_WORK(&sd->work, sbp2_login); 607 600 } 608 601 kref_put(&sd->kref, release_sbp2_device); ··· 644 633 return -EBUSY; 645 634 } 646 635 647 - /* Scan unit directory to get management agent address, 636 + /* 637 + * Scan unit directory to get management agent address, 648 638 * firmware revison and model. Initialize firmware_revision 649 - * and model to values that wont match anything in our table. */ 639 + * and model to values that wont match anything in our table. 640 + */ 650 641 firmware_revision = 0xff000000; 651 642 model = 0xff000000; 652 643 fw_csr_iterator_init(&ci, unit->directory); ··· 686 673 687 674 get_device(&unit->device); 688 675 689 - /* We schedule work to do the login so we can easily 676 + /* 677 + * We schedule work to do the login so we can easily 690 678 * reschedule retries. Always get the ref before scheduling 691 - * work.*/ 679 + * work. 680 + */ 692 681 INIT_DELAYED_WORK(&sd->work, sbp2_login); 693 682 if (schedule_delayed_work(&sd->work, 0)) 694 683 kref_get(&sd->kref); ··· 849 834 result = sbp2_status_to_sense_data(status_get_data(*status), 850 835 orb->cmd->sense_buffer); 851 836 } else { 852 - /* If the orb completes with status == NULL, something 837 + /* 838 + * If the orb completes with status == NULL, something 853 839 * went wrong, typically a bus reset happened mid-orb 854 - * or when sending the write (less likely). */ 840 + * or when sending the write (less likely). 841 + */ 855 842 result = DID_BUS_BUSY << 16; 856 843 } 857 844 ··· 895 878 count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg, 896 879 orb->cmd->sc_data_direction); 897 880 898 - /* Handle the special case where there is only one element in 881 + /* 882 + * Handle the special case where there is only one element in 899 883 * the scatter list by converting it to an immediate block 900 884 * request. This is also a workaround for broken devices such 901 885 * as the second generation iPod which doesn't support page 902 - * tables. */ 886 + * tables. 887 + */ 903 888 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { 904 889 orb->request.data_descriptor.high = sd->address_high; 905 890 orb->request.data_descriptor.low = sg_dma_address(sg); ··· 910 891 return; 911 892 } 912 893 913 - /* Convert the scatterlist to an sbp2 page table. If any 914 - * scatterlist entries are too big for sbp2 we split the as we go. */ 894 + /* 895 + * Convert the scatterlist to an sbp2 page table. If any 896 + * scatterlist entries are too big for sbp2 we split the as we go. 897 + */ 915 898 for (i = 0, j = 0; i < count; i++) { 916 899 sg_len = sg_dma_len(sg + i); 917 900 sg_addr = sg_dma_address(sg + i); ··· 929 908 930 909 size = sizeof orb->page_table[0] * j; 931 910 932 - /* The data_descriptor pointer is the one case where we need 911 + /* 912 + * The data_descriptor pointer is the one case where we need 933 913 * to fill in the node ID part of the address. All other 934 914 * pointers assume that the data referenced reside on the 935 915 * initiator (i.e. us), but data_descriptor can refer to data 936 - * on other nodes so we need to put our ID in descriptor.high. */ 916 + * on other nodes so we need to put our ID in descriptor.high. 917 + */ 937 918 938 919 orb->page_table_bus = 939 920 dma_map_single(device->card->device, orb->page_table, ··· 956 933 struct fw_device *device = fw_device(unit->device.parent); 957 934 struct sbp2_device *sd = unit->device.driver_data; 958 935 959 - /* As for map_scatterlist, we need to fill in the high bits of 960 - * the data_descriptor pointer. */ 936 + /* 937 + * As for map_scatterlist, we need to fill in the high bits of 938 + * the data_descriptor pointer. 939 + */ 961 940 962 941 orb->request_buffer_bus = 963 942 dma_map_single(device->card->device, ··· 981 956 struct sbp2_device *sd = unit->device.driver_data; 982 957 struct sbp2_command_orb *orb; 983 958 984 - /* Bidirectional commands are not yet implemented, and unknown 985 - * transfer direction not handled. */ 959 + /* 960 + * Bidirectional commands are not yet implemented, and unknown 961 + * transfer direction not handled. 962 + */ 986 963 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { 987 964 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); 988 965 goto fail_alloc; ··· 1010 983 1011 984 orb->request.next.high = SBP2_ORB_NULL; 1012 985 orb->request.next.low = 0x0; 1013 - /* At speed 100 we can do 512 bytes per packet, at speed 200, 986 + /* 987 + * At speed 100 we can do 512 bytes per packet, at speed 200, 1014 988 * 1024 bytes per packet etc. The SBP-2 max_payload field 1015 989 * specifies the max payload size as 2 ^ (max_payload + 2), so 1016 - * if we set this to max_speed + 7, we get the right value. */ 990 + * if we set this to max_speed + 7, we get the right value. 991 + */ 1017 992 orb->request.misc = 1018 993 command_orb_max_payload(device->node->max_speed + 7) | 1019 994 command_orb_speed(device->node->max_speed) | ··· 1031 1002 if (cmd->use_sg) { 1032 1003 sbp2_command_orb_map_scatterlist(orb); 1033 1004 } else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) { 1034 - /* FIXME: Need to split this into a sg list... but 1005 + /* 1006 + * FIXME: Need to split this into a sg list... but 1035 1007 * could we get the scsi or blk layer to do that by 1036 - * reporting our max supported block size? */ 1008 + * reporting our max supported block size? 1009 + */ 1037 1010 fw_error("command > 64k\n"); 1038 1011 goto fail_bufflen; 1039 1012 } else if (cmd->request_bufflen > 0) {
+42 -23
drivers/firewire/fw-topology.c
··· 1 - /* -*- c-basic-offset: 8 -*- 2 - * 3 - * fw-topology.c - Incremental bus scan, based on bus topology 1 + /* 2 + * Incremental bus scan, based on bus topology 4 3 * 5 4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> 6 5 * ··· 68 69 sid++; 69 70 q = *sid; 70 71 71 - /* Check that the extra packets actually are 72 + /* 73 + * Check that the extra packets actually are 72 74 * extended self ID packets and that the 73 75 * sequence numbers in the extended self ID 74 - * packets increase as expected. */ 76 + * packets increase as expected. 77 + */ 75 78 76 79 if (!self_id_extended(q) || 77 80 seq != self_id_ext_sequence(q)) ··· 114 113 return node; 115 114 } 116 115 117 - /* Compute the maximum hop count for this node and it's children. The 116 + /* 117 + * Compute the maximum hop count for this node and it's children. The 118 118 * maximum hop count is the maximum number of connections between any 119 119 * two nodes in the subtree rooted at this node. We need this for 120 120 * setting the gap count. As we build the tree bottom up in ··· 204 202 return NULL; 205 203 } 206 204 207 - /* Seek back from the top of our stack to find the 208 - * start of the child nodes for this node. */ 205 + /* 206 + * Seek back from the top of our stack to find the 207 + * start of the child nodes for this node. 208 + */ 209 209 for (i = 0, h = &stack; i < child_port_count; i++) 210 210 h = h->prev; 211 211 child = fw_node(h); ··· 234 230 for (i = 0; i < port_count; i++) { 235 231 switch (get_port_type(sid, i)) { 236 232 case SELFID_PORT_PARENT: 237 - /* Who's your daddy? We dont know the 233 + /* 234 + * Who's your daddy? We dont know the 238 235 * parent node at this time, so we 239 236 * temporarily abuse node->color for 240 237 * remembering the entry in the ··· 250 245 251 246 case SELFID_PORT_CHILD: 252 247 node->ports[i].node = child; 253 - /* Fix up parent reference for this 254 - * child node. */ 248 + /* 249 + * Fix up parent reference for this 250 + * child node. 251 + */ 255 252 child->ports[child->color].node = node; 256 253 child->color = card->color; 257 254 child = fw_node(child->link.next); ··· 261 254 } 262 255 } 263 256 264 - /* Check that the node reports exactly one parent 257 + /* 258 + * Check that the node reports exactly one parent 265 259 * port, except for the root, which of course should 266 - * have no parents. */ 260 + * have no parents. 261 + */ 267 262 if ((next_sid == end && parent_count != 0) || 268 263 (next_sid < end && parent_count != 1)) { 269 264 fw_error("Parent port inconsistency for node %d: " ··· 278 269 list_add_tail(&node->link, &stack); 279 270 stack_depth += 1 - child_port_count; 280 271 281 - /* If all PHYs does not report the same gap count 272 + /* 273 + * If all PHYs does not report the same gap count 282 274 * setting, we fall back to 63 which will force a gap 283 - * count reconfiguration and a reset. */ 275 + * count reconfiguration and a reset. 276 + */ 284 277 if (self_id_gap_count(q) != gap_count) 285 278 gap_count = 63; 286 279 ··· 438 427 439 428 for (i = 0; i < node0->port_count; i++) { 440 429 if (node0->ports[i].node && node1->ports[i].node) { 441 - /* This port didn't change, queue the 430 + /* 431 + * This port didn't change, queue the 442 432 * connected node for further 443 - * investigation. */ 433 + * investigation. 434 + */ 444 435 if (node0->ports[i].node->color == card->color) 445 436 continue; 446 437 list_add_tail(&node0->ports[i].node->link, ··· 450 437 list_add_tail(&node1->ports[i].node->link, 451 438 &list1); 452 439 } else if (node0->ports[i].node) { 453 - /* The nodes connected here were 440 + /* 441 + * The nodes connected here were 454 442 * unplugged; unref the lost nodes and 455 443 * queue FW_NODE_LOST callbacks for 456 - * them. */ 444 + * them. 445 + */ 457 446 458 447 for_each_fw_node(card, node0->ports[i].node, 459 448 report_lost_node); 460 449 node0->ports[i].node = NULL; 461 450 } else if (node1->ports[i].node) { 462 - /* One or more node were connected to 451 + /* 452 + * One or more node were connected to 463 453 * this port. Move the new nodes into 464 454 * the tree and queue FW_NODE_CREATED 465 - * callbacks for them. */ 455 + * callbacks for them. 456 + */ 466 457 move_tree(node0, node1, i); 467 458 for_each_fw_node(card, node0->ports[i].node, 468 459 report_found_node); ··· 503 486 504 487 spin_lock_irqsave(&card->lock, flags); 505 488 506 - /* If the new topology has a different self_id_count the topology 489 + /* 490 + * If the new topology has a different self_id_count the topology 507 491 * changed, either nodes were added or removed. In that case we 508 - * reset the IRM reset counter. */ 492 + * reset the IRM reset counter. 493 + */ 509 494 if (card->self_id_count != self_id_count) 510 495 card->bm_retries = 0; 511 496
+1 -4
drivers/firewire/fw-topology.h
··· 1 - /* -*- c-basic-offset: 8 -*- 2 - * 3 - * fw-topology.h -- Incremental bus scan, based on bus topology 4 - * 1 + /* 5 2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 6 3 * 7 4 * This program is free software; you can redistribute it and/or modify
+46 -25
drivers/firewire/fw-transaction.c
··· 1 - /* -*- c-basic-offset: 8 -*- 2 - * 3 - * fw-transaction.c - core IEEE1394 transaction logic 1 + /* 2 + * Core IEEE1394 transaction logic 4 3 * 5 4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> 6 5 * ··· 84 85 return -ENOENT; 85 86 } 86 87 87 - /* Only valid for transactions that are potentially pending (ie have 88 - * been sent). */ 88 + /* 89 + * Only valid for transactions that are potentially pending (ie have 90 + * been sent). 91 + */ 89 92 int 90 93 fw_cancel_transaction(struct fw_card *card, 91 94 struct fw_transaction *transaction) 92 95 { 93 - /* Cancel the packet transmission if it's still queued. That 96 + /* 97 + * Cancel the packet transmission if it's still queued. That 94 98 * will call the packet transmission callback which cancels 95 - * the transaction. */ 99 + * the transaction. 100 + */ 96 101 97 102 if (card->driver->cancel_packet(card, &transaction->packet) == 0) 98 103 return 0; 99 104 100 - /* If the request packet has already been sent, we need to see 101 - * if the transaction is still pending and remove it in that case. */ 105 + /* 106 + * If the request packet has already been sent, we need to see 107 + * if the transaction is still pending and remove it in that case. 108 + */ 102 109 103 110 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); 104 111 } ··· 136 131 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); 137 132 break; 138 133 default: 139 - /* In this case the ack is really a juju specific 140 - * rcode, so just forward that to the callback. */ 134 + /* 135 + * In this case the ack is really a juju specific 136 + * rcode, so just forward that to the callback. 137 + */ 141 138 close_transaction(t, card, status, NULL, 0); 142 139 break; 143 140 } ··· 250 243 unsigned long flags; 251 244 int tlabel, source; 252 245 253 - /* Bump the flush timer up 100ms first of all so we 254 - * don't race with a flush timer callback. */ 246 + /* 247 + * Bump the flush timer up 100ms first of all so we 248 + * don't race with a flush timer callback. 249 + */ 255 250 256 251 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10)); 257 252 258 - /* Allocate tlabel from the bitmap and put the transaction on 259 - * the list while holding the card spinlock. */ 253 + /* 254 + * Allocate tlabel from the bitmap and put the transaction on 255 + * the list while holding the card spinlock. 256 + */ 260 257 261 258 spin_lock_irqsave(&card->lock, flags); 262 259 ··· 347 336 list_for_each_entry_safe(t, next, &list, link) { 348 337 card->driver->cancel_packet(card, &t->packet); 349 338 350 - /* At this point cancel_packet will never call the 339 + /* 340 + * At this point cancel_packet will never call the 351 341 * transaction callback, since we just took all the 352 - * transactions out of the list. So do it here.*/ 342 + * transactions out of the list. So do it here. 343 + */ 353 344 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); 354 345 } 355 346 } ··· 600 587 void 601 588 fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) 602 589 { 603 - /* Broadcast packets are reported as ACK_COMPLETE, so this 590 + /* 591 + * Broadcast packets are reported as ACK_COMPLETE, so this 604 592 * check is sufficient to ensure we don't send response to 605 - * broadcast packets or posted writes. */ 593 + * broadcast packets or posted writes. 594 + */ 606 595 if (request->ack != ACK_PENDING) 607 596 return; 608 597 ··· 654 639 offset, request->length); 655 640 spin_unlock_irqrestore(&address_handler_lock, flags); 656 641 657 - /* FIXME: lookup the fw_node corresponding to the sender of 642 + /* 643 + * FIXME: lookup the fw_node corresponding to the sender of 658 644 * this request and pass that to the address handler instead 659 645 * of the node ID. We may also want to move the address 660 646 * allocations to fw_node so we only do this callback if the 661 - * upper layers registered it for this node. */ 647 + * upper layers registered it for this node. 648 + */ 662 649 663 650 if (handler == NULL) 664 651 fw_send_response(card, request, RCODE_ADDRESS_ERROR); ··· 704 687 return; 705 688 } 706 689 707 - /* FIXME: sanity check packet, is length correct, does tcodes 708 - * and addresses match. */ 690 + /* 691 + * FIXME: sanity check packet, is length correct, does tcodes 692 + * and addresses match. 693 + */ 709 694 710 695 switch (tcode) { 711 696 case TCODE_READ_QUADLET_RESPONSE: ··· 809 790 case CSR_BANDWIDTH_AVAILABLE: 810 791 case CSR_CHANNELS_AVAILABLE_HI: 811 792 case CSR_CHANNELS_AVAILABLE_LO: 812 - /* FIXME: these are handled by the OHCI hardware and 793 + /* 794 + * FIXME: these are handled by the OHCI hardware and 813 795 * the stack never sees these request. If we add 814 796 * support for a new type of controller that doesn't 815 797 * handle this in hardware we need to deal with these 816 - * transactions. */ 798 + * transactions. 799 + */ 817 800 BUG(); 818 801 break; 819 802
+35 -23
drivers/firewire/fw-transaction.h
··· 1 - /* -*- c-basic-offset: 8 -*- 2 - * 3 - * fw-transaction.h - Header for IEEE1394 transaction logic 4 - * 1 + /* 5 2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 6 3 * 7 4 * This program is free software; you can redistribute it and/or modify ··· 206 209 size_t payload_length; 207 210 u32 timestamp; 208 211 209 - /* This callback is called when the packet transmission has 212 + /* 213 + * This callback is called when the packet transmission has 210 214 * completed; for successful transmission, the status code is 211 215 * the ack received from the destination, otherwise it's a 212 216 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. ··· 228 230 229 231 struct fw_packet packet; 230 232 231 - /* The data passed to the callback is valid only during the 232 - * callback. */ 233 + /* 234 + * The data passed to the callback is valid only during the 235 + * callback. 236 + */ 233 237 fw_transaction_callback_t callback; 234 238 void *callback_data; 235 239 }; ··· 291 291 int link_speed; 292 292 int config_rom_generation; 293 293 294 - /* We need to store up to 4 self ID for a maximum of 63 295 - * devices plus 3 words for the topology map header. */ 294 + /* 295 + * We need to store up to 4 self ID for a maximum of 63 296 + * devices plus 3 words for the topology map header. 297 + */ 296 298 int self_id_count; 297 299 u32 topology_map[252 + 3]; 298 300 ··· 320 318 struct fw_card *fw_card_get(struct fw_card *card); 321 319 void fw_card_put(struct fw_card *card); 322 320 323 - /* The iso packet format allows for an immediate header/payload part 321 + /* 322 + * The iso packet format allows for an immediate header/payload part 324 323 * stored in 'header' immediately after the packet info plus an 325 324 * indirect payload part that is pointer to by the 'payload' field. 326 325 * Applications can use one or the other or both to implement simple 327 326 * low-bandwidth streaming (e.g. audio) or more advanced 328 - * scatter-gather streaming (e.g. assembling video frame automatically). */ 327 + * scatter-gather streaming (e.g. assembling video frame automatically). 328 + */ 329 329 330 330 struct fw_iso_packet { 331 331 u16 payload_length; /* Length of indirect payload. */ ··· 356 352 void *header, 357 353 void *data); 358 354 359 - /* An iso buffer is just a set of pages mapped for DMA in the 355 + /* 356 + * An iso buffer is just a set of pages mapped for DMA in the 360 357 * specified direction. Since the pages are to be used for DMA, they 361 358 * are not mapped into the kernel virtual address space. We store the 362 359 * DMA address in the page private. The helper function 363 - * fw_iso_buffer_map() will map the pages into a given vma. */ 360 + * fw_iso_buffer_map() will map the pages into a given vma. 361 + */ 364 362 365 363 struct fw_iso_buffer { 366 364 enum dma_data_direction direction; ··· 414 408 struct fw_card_driver { 415 409 const char *name; 416 410 417 - /* Enable the given card with the given initial config rom. 411 + /* 412 + * Enable the given card with the given initial config rom. 418 413 * This function is expected to activate the card, and either 419 414 * enable the PHY or set the link_on bit and initiate a bus 420 - * reset. */ 415 + * reset. 416 + */ 421 417 int (*enable) (struct fw_card *card, u32 *config_rom, size_t length); 422 418 423 419 int (*update_phy_reg) (struct fw_card *card, int address, 424 420 int clear_bits, int set_bits); 425 421 426 - /* Update the config rom for an enabled card. This function 422 + /* 423 + * Update the config rom for an enabled card. This function 427 424 * should change the config rom that is presented on the bus 428 - * an initiate a bus reset. */ 425 + * an initiate a bus reset. 426 + */ 429 427 int (*set_config_rom) (struct fw_card *card, 430 428 u32 *config_rom, size_t length); 431 429 ··· 438 428 /* Calling cancel is valid once a packet has been submitted. */ 439 429 int (*cancel_packet) (struct fw_card *card, struct fw_packet *packet); 440 430 441 - /* Allow the specified node ID to do direct DMA out and in of 431 + /* 432 + * Allow the specified node ID to do direct DMA out and in of 442 433 * host memory. The card will disable this for all node when 443 434 * a bus reset happens, so driver need to reenable this after 444 435 * bus reset. Returns 0 on success, -ENODEV if the card 445 436 * doesn't support this, -ESTALE if the generation doesn't 446 - * match. */ 437 + * match. 438 + */ 447 439 int (*enable_phys_dma) (struct fw_card *card, 448 440 int node_id, int generation); 449 441 ··· 485 473 void fw_send_phy_config(struct fw_card *card, 486 474 int node_id, int generation, int gap_count); 487 475 488 - /* Called by the topology code to inform the device code of node 489 - * activity; found, lost, or updated nodes */ 476 + /* 477 + * Called by the topology code to inform the device code of node 478 + * activity; found, lost, or updated nodes. 479 + */ 490 480 void 491 481 fw_node_event(struct fw_card *card, struct fw_node *node, int event); 492 482 493 483 /* API used by card level drivers */ 494 484 495 - /* Do we need phy speed here also? If we add more args, maybe we 496 - should go back to struct fw_card_info. */ 497 485 void 498 486 fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, 499 487 struct device *device);