Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6:
firewire: core: ignore link-active bit of new nodes, fix device recognition
firewire: sbp2: revert obsolete 'fix stall with "Unsolicited response"'
firewire: core: increase default SPLIT_TIMEOUT value
firewire: ohci: Misleading kfree in ohci.c::pci_probe/remove
firewire: ohci: omit IntEvent.busReset check rom AT queueing
firewire: ohci: prevent starting of iso contexts with empty queue
firewire: ohci: prevent iso completion callbacks after context stop
firewire: core: rename some variables
firewire: nosy: should work on Power Mac G4 PCI too
firewire: core: fix card->reset_jiffies overflow
firewire: cdev: remove unneeded reference
firewire: cdev: always wait for outbound transactions to complete
firewire: cdev: remove unneeded idr_find() from complete_transaction()
firewire: ohci: log dead DMA contexts

+140 -82
+2 -1
drivers/firewire/Kconfig
··· 75 75 The following cards are known to be based on PCILynx or PCILynx-2: 76 76 IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2 77 77 (PCI card), Newer Technology FireWire 2 Go (CardBus card), 78 - Apple Power Mac G3 blue & white (onboard controller). 78 + Apple Power Mac G3 blue & white and G4 with PCI graphics 79 + (onboard controller). 79 80 80 81 To compile this driver as a module, say M here: The module will be 81 82 called nosy. Source code of a userspace interface to nosy, called
+15 -6
drivers/firewire/core-card.c
··· 75 75 #define BIB_IRMC ((1) << 31) 76 76 #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ 77 77 78 + /* 79 + * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms), 80 + * but we have to make it longer because there are many devices whose firmware 81 + * is just too slow for that. 82 + */ 83 + #define DEFAULT_SPLIT_TIMEOUT (2 * 8000) 84 + 78 85 #define CANON_OUI 0x000085 79 86 80 87 static void generate_config_rom(struct fw_card *card, __be32 *config_rom) ··· 240 233 241 234 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ 242 235 if (card->reset_jiffies != 0 && 243 - time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) { 236 + time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { 244 237 if (!schedule_delayed_work(&card->br_work, 2 * HZ)) 245 238 fw_card_put(card); 246 239 return; ··· 323 316 irm_id = card->irm_node->node_id; 324 317 local_id = card->local_node->node_id; 325 318 326 - grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); 319 + grace = time_after64(get_jiffies_64(), 320 + card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); 327 321 328 322 if ((is_next_generation(generation, card->bm_generation) && 329 323 !card->bm_abdicate) || ··· 519 511 card->device = device; 520 512 card->current_tlabel = 0; 521 513 card->tlabel_mask = 0; 522 - card->split_timeout_hi = 0; 523 - card->split_timeout_lo = 800 << 19; 524 - card->split_timeout_cycles = 800; 525 - card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10); 514 + card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000; 515 + card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; 516 + card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT; 517 + card->split_timeout_jiffies = 518 + DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000); 526 519 card->color = 0; 527 520 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; 528 521
+29 -25
drivers/firewire/core-cdev.c
··· 64 64 struct idr resource_idr; 65 65 struct list_head event_list; 66 66 wait_queue_head_t wait; 67 + wait_queue_head_t tx_flush_wait; 67 68 u64 bus_reset_closure; 68 69 69 70 struct fw_iso_context *iso_context; ··· 252 251 idr_init(&client->resource_idr); 253 252 INIT_LIST_HEAD(&client->event_list); 254 253 init_waitqueue_head(&client->wait); 254 + init_waitqueue_head(&client->tx_flush_wait); 255 255 INIT_LIST_HEAD(&client->phy_receiver_link); 256 256 kref_init(&client->kref); 257 257 ··· 522 520 static void release_transaction(struct client *client, 523 521 struct client_resource *resource) 524 522 { 525 - struct outbound_transaction_resource *r = container_of(resource, 526 - struct outbound_transaction_resource, resource); 527 - 528 - fw_cancel_transaction(client->device->card, &r->transaction); 529 523 } 530 524 531 525 static void complete_transaction(struct fw_card *card, int rcode, ··· 538 540 memcpy(rsp->data, payload, rsp->length); 539 541 540 542 spin_lock_irqsave(&client->lock, flags); 541 - /* 542 - * 1. If called while in shutdown, the idr tree must be left untouched. 543 - * The idr handle will be removed and the client reference will be 544 - * dropped later. 545 - * 2. If the call chain was release_client_resource -> 546 - * release_transaction -> complete_transaction (instead of a normal 547 - * conclusion of the transaction), i.e. if this resource was already 548 - * unregistered from the idr, the client reference will be dropped 549 - * by release_client_resource and we must not drop it here. 550 - */ 551 - if (!client->in_shutdown && 552 - idr_find(&client->resource_idr, e->r.resource.handle)) { 553 - idr_remove(&client->resource_idr, e->r.resource.handle); 554 - /* Drop the idr's reference */ 555 - client_put(client); 556 - } 543 + idr_remove(&client->resource_idr, e->r.resource.handle); 544 + if (client->in_shutdown) 545 + wake_up(&client->tx_flush_wait); 557 546 spin_unlock_irqrestore(&client->lock, flags); 558 547 559 548 rsp->type = FW_CDEV_EVENT_RESPONSE; ··· 560 575 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, 561 576 NULL, 0); 562 577 563 - /* Drop the transaction callback's reference */ 578 + /* Drop the idr's reference */ 564 579 client_put(client); 565 580 } 566 581 ··· 598 613 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); 599 614 if (ret < 0) 600 615 goto failed; 601 - 602 - /* Get a reference for the transaction callback */ 603 - client_get(client); 604 616 605 617 fw_send_request(client->device->card, &e->r.transaction, 606 618 request->tcode, destination_id, request->generation, ··· 1205 1223 todo = r->todo; 1206 1224 /* Allow 1000ms grace period for other reallocations. */ 1207 1225 if (todo == ISO_RES_ALLOC && 1208 - time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { 1226 + time_before64(get_jiffies_64(), 1227 + client->device->card->reset_jiffies + HZ)) { 1209 1228 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); 1210 1229 skip = true; 1211 1230 } else { ··· 1661 1678 return ret; 1662 1679 } 1663 1680 1681 + static int is_outbound_transaction_resource(int id, void *p, void *data) 1682 + { 1683 + struct client_resource *resource = p; 1684 + 1685 + return resource->release == release_transaction; 1686 + } 1687 + 1688 + static int has_outbound_transactions(struct client *client) 1689 + { 1690 + int ret; 1691 + 1692 + spin_lock_irq(&client->lock); 1693 + ret = idr_for_each(&client->resource_idr, 1694 + is_outbound_transaction_resource, NULL); 1695 + spin_unlock_irq(&client->lock); 1696 + 1697 + return ret; 1698 + } 1699 + 1664 1700 static int shutdown_resource(int id, void *p, void *data) 1665 1701 { 1666 1702 struct client_resource *resource = p; ··· 1714 1712 spin_lock_irq(&client->lock); 1715 1713 client->in_shutdown = true; 1716 1714 spin_unlock_irq(&client->lock); 1715 + 1716 + wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); 1717 1717 1718 1718 idr_for_each(&client->resource_idr, shutdown_resource, client); 1719 1719 idr_remove_all(&client->resource_idr);
+14 -8
drivers/firewire/core-device.c
··· 747 747 container_of(work, struct fw_device, work.work); 748 748 int minor = MINOR(device->device.devt); 749 749 750 - if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY) 750 + if (time_before64(get_jiffies_64(), 751 + device->card->reset_jiffies + SHUTDOWN_DELAY) 751 752 && !list_empty(&device->card->link)) { 752 753 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 753 754 return; ··· 955 954 device->config_rom_retries++; 956 955 schedule_delayed_work(&device->work, RETRY_DELAY); 957 956 } else { 958 - fw_notify("giving up on config rom for node id %x\n", 959 - device->node_id); 957 + if (device->node->link_on) 958 + fw_notify("giving up on config rom for node id %x\n", 959 + device->node_id); 960 960 if (device->node == device->card->root_node) 961 961 fw_schedule_bm_work(device->card, 0); 962 962 fw_device_release(&device->device); ··· 1170 1168 1171 1169 switch (event) { 1172 1170 case FW_NODE_CREATED: 1173 - case FW_NODE_LINK_ON: 1174 - if (!node->link_on) 1175 - break; 1171 + /* 1172 + * Attempt to scan the node, regardless whether its self ID has 1173 + * the L (link active) flag set or not. Some broken devices 1174 + * send L=0 but have an up-and-running link; others send L=1 1175 + * without actually having a link. 1176 + */ 1176 1177 create: 1177 1178 device = kzalloc(sizeof(*device), GFP_ATOMIC); 1178 1179 if (device == NULL) ··· 1218 1213 break; 1219 1214 1220 1215 case FW_NODE_INITIATED_RESET: 1216 + case FW_NODE_LINK_ON: 1221 1217 device = node->data; 1222 1218 if (device == NULL) 1223 1219 goto create; ··· 1236 1230 break; 1237 1231 1238 1232 case FW_NODE_UPDATED: 1239 - if (!node->link_on || node->data == NULL) 1233 + device = node->data; 1234 + if (device == NULL) 1240 1235 break; 1241 1236 1242 - device = node->data; 1243 1237 device->node_id = node->node_id; 1244 1238 smp_wmb(); /* update node_id before generation */ 1245 1239 device->generation = card->generation;
+11 -11
drivers/firewire/core-iso.c
··· 235 235 static int manage_channel(struct fw_card *card, int irm_id, int generation, 236 236 u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) 237 237 { 238 - __be32 c, all, old; 239 - int i, ret = -EIO, retry = 5; 238 + __be32 bit, all, old; 239 + int channel, ret = -EIO, retry = 5; 240 240 241 241 old = all = allocate ? cpu_to_be32(~0) : 0; 242 242 243 - for (i = 0; i < 32; i++) { 244 - if (!(channels_mask & 1 << i)) 243 + for (channel = 0; channel < 32; channel++) { 244 + if (!(channels_mask & 1 << channel)) 245 245 continue; 246 246 247 247 ret = -EBUSY; 248 248 249 - c = cpu_to_be32(1 << (31 - i)); 250 - if ((old & c) != (all & c)) 249 + bit = cpu_to_be32(1 << (31 - channel)); 250 + if ((old & bit) != (all & bit)) 251 251 continue; 252 252 253 253 data[0] = old; 254 - data[1] = old ^ c; 254 + data[1] = old ^ bit; 255 255 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 256 256 irm_id, generation, SCODE_100, 257 257 offset, data, 8)) { 258 258 case RCODE_GENERATION: 259 259 /* A generation change frees all channels. */ 260 - return allocate ? -EAGAIN : i; 260 + return allocate ? -EAGAIN : channel; 261 261 262 262 case RCODE_COMPLETE: 263 263 if (data[0] == old) 264 - return i; 264 + return channel; 265 265 266 266 old = data[0]; 267 267 268 268 /* Is the IRM 1394a-2000 compliant? */ 269 - if ((data[0] & c) == (data[1] & c)) 269 + if ((data[0] & bit) == (data[1] & bit)) 270 270 continue; 271 271 272 272 /* 1394-1995 IRM, fall through to retry. */ 273 273 default: 274 274 if (retry) { 275 275 retry--; 276 - i--; 276 + channel--; 277 277 } else { 278 278 ret = -EIO; 279 279 }
+1 -1
drivers/firewire/core-topology.c
··· 545 545 */ 546 546 smp_wmb(); 547 547 card->generation = generation; 548 - card->reset_jiffies = jiffies; 548 + card->reset_jiffies = get_jiffies_64(); 549 549 card->bm_node_id = 0xffff; 550 550 card->bm_abdicate = bm_abdicate; 551 551 fw_schedule_bm_work(card, 0);
+64 -21
drivers/firewire/ohci.c
··· 208 208 struct context at_request_ctx; 209 209 struct context at_response_ctx; 210 210 211 + u32 it_context_support; 211 212 u32 it_context_mask; /* unoccupied IT contexts */ 212 213 struct iso_context *it_context_list; 213 214 u64 ir_context_channels; /* unoccupied channels */ 215 + u32 ir_context_support; 214 216 u32 ir_context_mask; /* unoccupied IR contexts */ 215 217 struct iso_context *ir_context_list; 216 218 u64 mc_channels; /* channels in use by the multichannel IR context */ ··· 340 338 !(evt & OHCI1394_busReset)) 341 339 return; 342 340 343 - fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 341 + fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 344 342 evt & OHCI1394_selfIDComplete ? " selfID" : "", 345 343 evt & OHCI1394_RQPkt ? " AR_req" : "", 346 344 evt & OHCI1394_RSPkt ? " AR_resp" : "", ··· 353 351 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 354 352 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 355 353 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 354 + evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", 356 355 evt & OHCI1394_busReset ? " busReset" : "", 357 356 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 358 357 OHCI1394_RSPkt | OHCI1394_reqTxComplete | ··· 1329 1326 DESCRIPTOR_IRQ_ALWAYS | 1330 1327 DESCRIPTOR_BRANCH_ALWAYS); 1331 1328 1332 - /* 1333 - * If the controller and packet generations don't match, we need to 1334 - * bail out and try again. If IntEvent.busReset is set, the AT context 1335 - * is halted, so appending to the context and trying to run it is 1336 - * futile. Most controllers do the right thing and just flush the AT 1337 - * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but 1338 - * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind 1339 - * up stalling out. So we just bail out in software and try again 1340 - * later, and everyone is happy. 1341 - * FIXME: Test of IntEvent.busReset may no longer be necessary since we 1342 - * flush AT queues in bus_reset_tasklet. 1343 - * FIXME: Document how the locking works. 1344 - */ 1345 - if (ohci->generation != packet->generation || 1346 - reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) { 1329 + /* FIXME: Document how the locking works. */ 1330 + if (ohci->generation != packet->generation) { 1347 1331 if (packet->payload_mapped) 1348 1332 dma_unmap_single(ohci->card.device, payload_bus, 1349 1333 packet->payload_length, DMA_TO_DEVICE); ··· 1578 1588 if (ret < 0) 1579 1589 packet->callback(packet, &ctx->ohci->card, packet->ack); 1580 1590 1591 + } 1592 + 1593 + static void detect_dead_context(struct fw_ohci *ohci, 1594 + const char *name, unsigned int regs) 1595 + { 1596 + u32 ctl; 1597 + 1598 + ctl = reg_read(ohci, CONTROL_SET(regs)); 1599 + if (ctl & CONTEXT_DEAD) { 1600 + #ifdef CONFIG_FIREWIRE_OHCI_DEBUG 1601 + fw_error("DMA context %s has stopped, error code: %s\n", 1602 + name, evts[ctl & 0x1f]); 1603 + #else 1604 + fw_error("DMA context %s has stopped, error code: %#x\n", 1605 + name, ctl & 0x1f); 1606 + #endif 1607 + } 1608 + } 1609 + 1610 + static void handle_dead_contexts(struct fw_ohci *ohci) 1611 + { 1612 + unsigned int i; 1613 + char name[8]; 1614 + 1615 + detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase); 1616 + detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase); 1617 + detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase); 1618 + detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase); 1619 + for (i = 0; i < 32; ++i) { 1620 + if (!(ohci->it_context_support & (1 << i))) 1621 + continue; 1622 + sprintf(name, "IT%u", i); 1623 + detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i)); 1624 + } 1625 + for (i = 0; i < 32; ++i) { 1626 + if (!(ohci->ir_context_support & (1 << i))) 1627 + continue; 1628 + sprintf(name, "IR%u", i); 1629 + detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i)); 1630 + } 1631 + /* TODO: maybe try to flush and restart the dead contexts */ 1581 1632 } 1582 1633 1583 1634 static u32 cycle_timer_ticks(u32 cycle_timer) ··· 1935 1904 fw_notify("isochronous cycle inconsistent\n"); 1936 1905 } 1937 1906 1907 + if (unlikely(event & OHCI1394_unrecoverableError)) 1908 + handle_dead_contexts(ohci); 1909 + 1938 1910 if (event & OHCI1394_cycle64Seconds) { 1939 1911 spin_lock(&ohci->lock); 1940 1912 update_bus_time(ohci); ··· 2175 2141 OHCI1394_selfIDComplete | 2176 2142 OHCI1394_regAccessFail | 2177 2143 OHCI1394_cycle64Seconds | 2178 - OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | 2144 + OHCI1394_cycleInconsistent | 2145 + OHCI1394_unrecoverableError | 2146 + OHCI1394_cycleTooLong | 2179 2147 OHCI1394_masterIntEnable; 2180 2148 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 2181 2149 irqs |= OHCI1394_busReset; ··· 2693 2657 u32 control = IR_CONTEXT_ISOCH_HEADER, match; 2694 2658 int index; 2695 2659 2660 + /* the controller cannot start without any queued packets */ 2661 + if (ctx->context.last->branch_address == 0) 2662 + return -ENODATA; 2663 + 2696 2664 switch (ctx->base.type) { 2697 2665 case FW_ISO_CONTEXT_TRANSMIT: 2698 2666 index = ctx - ohci->it_context_list; ··· 2755 2715 } 2756 2716 flush_writes(ohci); 2757 2717 context_stop(&ctx->context); 2718 + tasklet_kill(&ctx->context.tasklet); 2758 2719 2759 2720 return 0; 2760 2721 } ··· 3248 3207 3249 3208 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 3250 3209 ohci->ir_context_channels = ~0ULL; 3251 - ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 3210 + ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 3252 3211 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 3212 + ohci->ir_context_mask = ohci->ir_context_support; 3253 3213 ohci->n_ir = hweight32(ohci->ir_context_mask); 3254 3214 size = sizeof(struct iso_context) * ohci->n_ir; 3255 3215 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 3256 3216 3257 3217 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 3258 - ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 3218 + ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 3259 3219 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 3220 + ohci->it_context_mask = ohci->it_context_support; 3260 3221 ohci->n_it = hweight32(ohci->it_context_mask); 3261 3222 size = sizeof(struct iso_context) * ohci->n_it; 3262 3223 ohci->it_context_list = kzalloc(size, GFP_KERNEL); ··· 3309 3266 fail_disable: 3310 3267 pci_disable_device(dev); 3311 3268 fail_free: 3312 - kfree(&ohci->card); 3269 + kfree(ohci); 3313 3270 pmac_ohci_off(dev); 3314 3271 fail: 3315 3272 if (err == -ENOMEM) ··· 3353 3310 pci_iounmap(dev, ohci->registers); 3354 3311 pci_release_region(dev, 0); 3355 3312 pci_disable_device(dev); 3356 - kfree(&ohci->card); 3313 + kfree(ohci); 3357 3314 pmac_ohci_off(dev); 3358 3315 3359 3316 fw_notify("Removed fw-ohci device.\n");
+3 -8
drivers/firewire/sbp2.c
··· 472 472 * So this callback only sets the rcode if it hasn't already 473 473 * been set and only does the cleanup if the transaction 474 474 * failed and we didn't already get a status write. 475 - * 476 - * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some 477 - * OXUF936QSE firmwares occasionally respond after Split_Timeout and 478 - * complete the ORB just fine. Note, we also get RCODE_CANCELLED 479 - * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0. 480 475 */ 481 476 spin_lock_irqsave(&card->lock, flags); 482 477 483 478 if (orb->rcode == -1) 484 479 orb->rcode = rcode; 485 - 486 - if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) { 480 + if (orb->rcode != RCODE_COMPLETE) { 487 481 list_del(&orb->link); 488 482 spin_unlock_irqrestore(&card->lock, flags); 489 483 ··· 526 532 527 533 list_for_each_entry_safe(orb, next, &list, link) { 528 534 retval = 0; 529 - fw_cancel_transaction(device->card, &orb->t); 535 + if (fw_cancel_transaction(device->card, &orb->t) == 0) 536 + continue; 530 537 531 538 orb->rcode = RCODE_CANCELLED; 532 539 orb->callback(orb, NULL);
+1 -1
include/linux/firewire.h
··· 93 93 int current_tlabel; 94 94 u64 tlabel_mask; 95 95 struct list_head transaction_list; 96 - unsigned long reset_jiffies; 96 + u64 reset_jiffies; 97 97 98 98 u32 split_timeout_hi; 99 99 u32 split_timeout_lo;