Merge firewire branches to be released post v2.6.35

Conflicts:
drivers/firewire/core-card.c
drivers/firewire/core-cdev.c

and forgotten #include <linux/time.h> in drivers/firewire/ohci.c

Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>

+4308 -576
+1
Documentation/ioctl/ioctl-number.txt
··· 79 0x22 all scsi/sg.h 80 '#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem 81 '$' 00-0F linux/perf_counter.h, linux/perf_event.h 82 '1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl 83 <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/> 84 '2' 01-04 linux/i2o.h
··· 79 0x22 all scsi/sg.h 80 '#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem 81 '$' 00-0F linux/perf_counter.h, linux/perf_event.h 82 + '&' 00-07 drivers/firewire/nosy-user.h 83 '1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl 84 <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/> 85 '2' 01-04 linux/i2o.h
+1
MAINTAINERS
··· 2307 S: Maintained 2308 F: drivers/firewire/ 2309 F: include/linux/firewire*.h 2310 2311 FIRMWARE LOADER (request_firmware) 2312 S: Orphan
··· 2307 S: Maintained 2308 F: drivers/firewire/ 2309 F: include/linux/firewire*.h 2310 + F: tools/firewire/ 2311 2312 FIRMWARE LOADER (request_firmware) 2313 S: Orphan
+24
drivers/firewire/Kconfig
··· 66 67 source "drivers/ieee1394/Kconfig" 68 69 endmenu
··· 66 67 source "drivers/ieee1394/Kconfig" 68 69 + config FIREWIRE_NOSY 70 + tristate "Nosy - a FireWire traffic sniffer for PCILynx cards" 71 + depends on PCI 72 + help 73 + Nosy is an IEEE 1394 packet sniffer that is used for protocol 74 + analysis and in development of IEEE 1394 drivers, applications, 75 + or firmwares. 76 + 77 + This driver lets you use a Texas Instruments PCILynx 1394 to PCI 78 + link layer controller TSB12LV21/A/B as a low-budget bus analyzer. 79 + PCILynx is a nowadays very rare IEEE 1394 controller which is 80 + not OHCI 1394 compliant. 81 + 82 + The following cards are known to be based on PCILynx or PCILynx-2: 83 + IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2 84 + (PCI card), Newer Technology FireWire 2 Go (CardBus card), 85 + Apple Power Mac G3 blue & white (onboard controller). 86 + 87 + To compile this driver as a module, say M here: The module will be 88 + called nosy. Source code of a userspace interface to nosy, called 89 + nosy-dump, can be found in tools/firewire/ of the kernel sources. 90 + 91 + If unsure, say N. 92 + 93 endmenu
+1
drivers/firewire/Makefile
··· 12 obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o 13 obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o 14 obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
··· 12 obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o 13 obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o 14 obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o 15 + obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o
+152 -66
drivers/firewire/core-card.c
··· 204 } 205 EXPORT_SYMBOL(fw_core_remove_descriptor); 206 207 static void allocate_broadcast_channel(struct fw_card *card, int generation) 208 { 209 int channel, bandwidth = 0; 210 211 - fw_iso_resource_manage(card, generation, 1ULL << 31, &channel, 212 - &bandwidth, true, card->bm_transaction_data); 213 - if (channel == 31) { 214 card->broadcast_channel_allocated = true; 215 - device_for_each_child(card->device, (void *)(long)generation, 216 - fw_device_set_broadcast_channel); 217 } 218 } 219 220 static const char gap_count_table[] = { ··· 269 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) 270 { 271 fw_card_get(card); 272 - if (!schedule_delayed_work(&card->work, delay)) 273 fw_card_put(card); 274 } 275 276 - static void fw_card_bm_work(struct work_struct *work) 277 { 278 - struct fw_card *card = container_of(work, struct fw_card, work.work); 279 struct fw_device *root_device, *irm_device; 280 struct fw_node *root_node; 281 - unsigned long flags; 282 - int root_id, new_root_id, irm_id, local_id; 283 int gap_count, generation, grace, rcode; 284 bool do_reset = false; 285 bool root_device_is_running; 286 bool root_device_is_cmc; 287 bool irm_is_1394_1995_only; 288 289 - spin_lock_irqsave(&card->lock, flags); 290 291 if (card->local_node == NULL) { 292 - spin_unlock_irqrestore(&card->lock, flags); 293 goto out_put_card; 294 } 295 ··· 311 312 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); 313 314 - if (is_next_generation(generation, card->bm_generation) || 315 (card->bm_generation != generation && grace)) { 316 /* 317 * This first step is to figure out who is IRM and ··· 343 card->bm_transaction_data[0] = cpu_to_be32(0x3f); 344 card->bm_transaction_data[1] = cpu_to_be32(local_id); 345 346 - spin_unlock_irqrestore(&card->lock, flags); 347 348 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 349 irm_id, generation, SCODE_100, 350 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, 351 - card->bm_transaction_data, 352 - sizeof(card->bm_transaction_data)); 353 354 if (rcode == RCODE_GENERATION) 355 /* Another bus reset, BM work has been rescheduled. */ 356 goto out; 357 358 - if (rcode == RCODE_COMPLETE && 359 - card->bm_transaction_data[0] != cpu_to_be32(0x3f)) { 360 361 /* Somebody else is BM. Only act as IRM. */ 362 if (local_id == irm_id) 363 allocate_broadcast_channel(card, generation); ··· 370 goto out; 371 } 372 373 - spin_lock_irqsave(&card->lock, flags); 374 375 if (rcode != RCODE_COMPLETE) { 376 /* ··· 399 * We weren't BM in the last generation, and the last 400 * bus reset is less than 125ms ago. Reschedule this job. 401 */ 402 - spin_unlock_irqrestore(&card->lock, flags); 403 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 404 goto out; 405 } ··· 422 * If we haven't probed this device yet, bail out now 423 * and let's try again once that's done. 424 */ 425 - spin_unlock_irqrestore(&card->lock, flags); 426 goto out; 427 } else if (root_device_is_cmc) { 428 /* 429 - * FIXME: I suppose we should set the cmstr bit in the 430 - * STATE_CLEAR register of this node, as described in 431 - * 1394-1995, 8.4.2.6. Also, send out a force root 432 - * packet for this node. 433 */ 434 new_root_id = root_id; 435 } else { ··· 460 (card->gap_count != gap_count || new_root_id != root_id)) 461 do_reset = true; 462 463 - spin_unlock_irqrestore(&card->lock, flags); 464 465 if (do_reset) { 466 fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", 467 card->index, new_root_id, gap_count); 468 fw_send_phy_config(card, new_root_id, generation, gap_count); 469 - fw_core_initiate_bus_reset(card, 1); 470 /* Will allocate broadcast channel after the reset. */ 471 - } else { 472 - if (local_id == irm_id) 473 - allocate_broadcast_channel(card, generation); 474 } 475 476 out: 477 fw_node_put(root_node); ··· 504 card->device = device; 505 card->current_tlabel = 0; 506 card->tlabel_mask = 0; 507 card->color = 0; 508 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; 509 510 kref_init(&card->kref); 511 init_completion(&card->done); 512 INIT_LIST_HEAD(&card->transaction_list); 513 spin_lock_init(&card->lock); 514 515 card->local_node = NULL; 516 517 - INIT_DELAYED_WORK(&card->work, fw_card_bm_work); 518 } 519 EXPORT_SYMBOL(fw_card_initialize); 520 ··· 546 } 547 EXPORT_SYMBOL(fw_card_add); 548 549 - 550 /* 551 * The next few functions implement a dummy driver that is used once a card 552 * driver shuts down an fw_card. This allows the driver to cleanly unload, 553 * as all IO to the card will be handled (and failed) by the dummy driver 554 * instead of calling into the module. Only functions for iso context 555 * shutdown still need to be provided by the card driver. 556 */ 557 558 - static int dummy_enable(struct fw_card *card, 559 - const __be32 *config_rom, size_t length) 560 { 561 - BUG(); 562 - return -1; 563 } 564 565 static int dummy_update_phy_reg(struct fw_card *card, int address, ··· 570 return -ENODEV; 571 } 572 573 - static int dummy_set_config_rom(struct fw_card *card, 574 - const __be32 *config_rom, size_t length) 575 - { 576 - /* 577 - * We take the card out of card_list before setting the dummy 578 - * driver, so this should never get called. 579 - */ 580 - BUG(); 581 - return -1; 582 - } 583 - 584 static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) 585 { 586 - packet->callback(packet, card, -ENODEV); 587 } 588 589 static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) 590 { 591 - packet->callback(packet, card, -ENODEV); 592 } 593 594 static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) ··· 591 return -ENODEV; 592 } 593 594 static const struct fw_card_driver dummy_driver_template = { 595 - .enable = dummy_enable, 596 - .update_phy_reg = dummy_update_phy_reg, 597 - .set_config_rom = dummy_set_config_rom, 598 - .send_request = dummy_send_request, 599 - .cancel_packet = dummy_cancel_packet, 600 - .send_response = dummy_send_response, 601 - .enable_phys_dma = dummy_enable_phys_dma, 602 }; 603 604 void fw_card_release(struct kref *kref) ··· 640 641 card->driver->update_phy_reg(card, 4, 642 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 643 - fw_core_initiate_bus_reset(card, 1); 644 645 mutex_lock(&card_mutex); 646 list_del_init(&card->link); ··· 660 WARN_ON(!list_empty(&card->transaction_list)); 661 } 662 EXPORT_SYMBOL(fw_core_remove_card); 663 - 664 - int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) 665 - { 666 - int reg = short_reset ? 5 : 1; 667 - int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 668 - 669 - return card->driver->update_phy_reg(card, reg, 0, bit); 670 - } 671 - EXPORT_SYMBOL(fw_core_initiate_bus_reset);
··· 204 } 205 EXPORT_SYMBOL(fw_core_remove_descriptor); 206 207 + static int reset_bus(struct fw_card *card, bool short_reset) 208 + { 209 + int reg = short_reset ? 5 : 1; 210 + int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 211 + 212 + return card->driver->update_phy_reg(card, reg, 0, bit); 213 + } 214 + 215 + void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) 216 + { 217 + /* We don't try hard to sort out requests of long vs. short resets. */ 218 + card->br_short = short_reset; 219 + 220 + /* Use an arbitrary short delay to combine multiple reset requests. */ 221 + fw_card_get(card); 222 + if (!schedule_delayed_work(&card->br_work, 223 + delayed ? DIV_ROUND_UP(HZ, 100) : 0)) 224 + fw_card_put(card); 225 + } 226 + EXPORT_SYMBOL(fw_schedule_bus_reset); 227 + 228 + static void br_work(struct work_struct *work) 229 + { 230 + struct fw_card *card = container_of(work, struct fw_card, br_work.work); 231 + 232 + /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ 233 + if (card->reset_jiffies != 0 && 234 + time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) { 235 + if (!schedule_delayed_work(&card->br_work, 2 * HZ)) 236 + fw_card_put(card); 237 + return; 238 + } 239 + 240 + fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, 241 + FW_PHY_CONFIG_CURRENT_GAP_COUNT); 242 + reset_bus(card, card->br_short); 243 + fw_card_put(card); 244 + } 245 + 246 static void allocate_broadcast_channel(struct fw_card *card, int generation) 247 { 248 int channel, bandwidth = 0; 249 250 + if (!card->broadcast_channel_allocated) { 251 + fw_iso_resource_manage(card, generation, 1ULL << 31, 252 + &channel, &bandwidth, true, 253 + card->bm_transaction_data); 254 + if (channel != 31) { 255 + fw_notify("failed to allocate broadcast channel\n"); 256 + return; 257 + } 258 card->broadcast_channel_allocated = true; 259 } 260 + 261 + device_for_each_child(card->device, (void *)(long)generation, 262 + fw_device_set_broadcast_channel); 263 } 264 265 static const char gap_count_table[] = { ··· 224 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) 225 { 226 fw_card_get(card); 227 + if (!schedule_delayed_work(&card->bm_work, delay)) 228 fw_card_put(card); 229 } 230 231 + static void bm_work(struct work_struct *work) 232 { 233 + struct fw_card *card = container_of(work, struct fw_card, bm_work.work); 234 struct fw_device *root_device, *irm_device; 235 struct fw_node *root_node; 236 + int root_id, new_root_id, irm_id, bm_id, local_id; 237 int gap_count, generation, grace, rcode; 238 bool do_reset = false; 239 bool root_device_is_running; 240 bool root_device_is_cmc; 241 bool irm_is_1394_1995_only; 242 243 + spin_lock_irq(&card->lock); 244 245 if (card->local_node == NULL) { 246 + spin_unlock_irq(&card->lock); 247 goto out_put_card; 248 } 249 ··· 267 268 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); 269 270 + if ((is_next_generation(generation, card->bm_generation) && 271 + !card->bm_abdicate) || 272 (card->bm_generation != generation && grace)) { 273 /* 274 * This first step is to figure out who is IRM and ··· 298 card->bm_transaction_data[0] = cpu_to_be32(0x3f); 299 card->bm_transaction_data[1] = cpu_to_be32(local_id); 300 301 + spin_unlock_irq(&card->lock); 302 303 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 304 irm_id, generation, SCODE_100, 305 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, 306 + card->bm_transaction_data, 8); 307 308 if (rcode == RCODE_GENERATION) 309 /* Another bus reset, BM work has been rescheduled. */ 310 goto out; 311 312 + bm_id = be32_to_cpu(card->bm_transaction_data[0]); 313 314 + spin_lock_irq(&card->lock); 315 + if (rcode == RCODE_COMPLETE && generation == card->generation) 316 + card->bm_node_id = 317 + bm_id == 0x3f ? local_id : 0xffc0 | bm_id; 318 + spin_unlock_irq(&card->lock); 319 + 320 + if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { 321 /* Somebody else is BM. Only act as IRM. */ 322 if (local_id == irm_id) 323 allocate_broadcast_channel(card, generation); ··· 320 goto out; 321 } 322 323 + if (rcode == RCODE_SEND_ERROR) { 324 + /* 325 + * We have been unable to send the lock request due to 326 + * some local problem. Let's try again later and hope 327 + * that the problem has gone away by then. 328 + */ 329 + fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 330 + goto out; 331 + } 332 + 333 + spin_lock_irq(&card->lock); 334 335 if (rcode != RCODE_COMPLETE) { 336 /* ··· 339 * We weren't BM in the last generation, and the last 340 * bus reset is less than 125ms ago. Reschedule this job. 341 */ 342 + spin_unlock_irq(&card->lock); 343 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 344 goto out; 345 } ··· 362 * If we haven't probed this device yet, bail out now 363 * and let's try again once that's done. 364 */ 365 + spin_unlock_irq(&card->lock); 366 goto out; 367 } else if (root_device_is_cmc) { 368 /* 369 + * We will send out a force root packet for this 370 + * node as part of the gap count optimization. 371 */ 372 new_root_id = root_id; 373 } else { ··· 402 (card->gap_count != gap_count || new_root_id != root_id)) 403 do_reset = true; 404 405 + spin_unlock_irq(&card->lock); 406 407 if (do_reset) { 408 fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", 409 card->index, new_root_id, gap_count); 410 fw_send_phy_config(card, new_root_id, generation, gap_count); 411 + reset_bus(card, true); 412 /* Will allocate broadcast channel after the reset. */ 413 + goto out; 414 } 415 + 416 + if (root_device_is_cmc) { 417 + /* 418 + * Make sure that the cycle master sends cycle start packets. 419 + */ 420 + card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); 421 + rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, 422 + root_id, generation, SCODE_100, 423 + CSR_REGISTER_BASE + CSR_STATE_SET, 424 + card->bm_transaction_data, 4); 425 + if (rcode == RCODE_GENERATION) 426 + goto out; 427 + } 428 + 429 + if (local_id == irm_id) 430 + allocate_broadcast_channel(card, generation); 431 432 out: 433 fw_node_put(root_node); ··· 432 card->device = device; 433 card->current_tlabel = 0; 434 card->tlabel_mask = 0; 435 + card->split_timeout_hi = 0; 436 + card->split_timeout_lo = 800 << 19; 437 + card->split_timeout_cycles = 800; 438 + card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10); 439 card->color = 0; 440 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; 441 442 kref_init(&card->kref); 443 init_completion(&card->done); 444 INIT_LIST_HEAD(&card->transaction_list); 445 + INIT_LIST_HEAD(&card->phy_receiver_list); 446 spin_lock_init(&card->lock); 447 448 card->local_node = NULL; 449 450 + INIT_DELAYED_WORK(&card->br_work, br_work); 451 + INIT_DELAYED_WORK(&card->bm_work, bm_work); 452 } 453 EXPORT_SYMBOL(fw_card_initialize); 454 ··· 468 } 469 EXPORT_SYMBOL(fw_card_add); 470 471 /* 472 * The next few functions implement a dummy driver that is used once a card 473 * driver shuts down an fw_card. This allows the driver to cleanly unload, 474 * as all IO to the card will be handled (and failed) by the dummy driver 475 * instead of calling into the module. Only functions for iso context 476 * shutdown still need to be provided by the card driver. 477 + * 478 + * .read/write_csr() should never be called anymore after the dummy driver 479 + * was bound since they are only used within request handler context. 480 + * .set_config_rom() is never called since the card is taken out of card_list 481 + * before switching to the dummy driver. 482 */ 483 484 + static int dummy_read_phy_reg(struct fw_card *card, int address) 485 { 486 + return -ENODEV; 487 } 488 489 static int dummy_update_phy_reg(struct fw_card *card, int address, ··· 490 return -ENODEV; 491 } 492 493 static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) 494 { 495 + packet->callback(packet, card, RCODE_CANCELLED); 496 } 497 498 static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) 499 { 500 + packet->callback(packet, card, RCODE_CANCELLED); 501 } 502 503 static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) ··· 522 return -ENODEV; 523 } 524 525 + static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, 526 + int type, int channel, size_t header_size) 527 + { 528 + return ERR_PTR(-ENODEV); 529 + } 530 + 531 + static int dummy_start_iso(struct fw_iso_context *ctx, 532 + s32 cycle, u32 sync, u32 tags) 533 + { 534 + return -ENODEV; 535 + } 536 + 537 + static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) 538 + { 539 + return -ENODEV; 540 + } 541 + 542 + static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, 543 + struct fw_iso_buffer *buffer, unsigned long payload) 544 + { 545 + return -ENODEV; 546 + } 547 + 548 static const struct fw_card_driver dummy_driver_template = { 549 + .read_phy_reg = dummy_read_phy_reg, 550 + .update_phy_reg = dummy_update_phy_reg, 551 + .send_request = dummy_send_request, 552 + .send_response = dummy_send_response, 553 + .cancel_packet = dummy_cancel_packet, 554 + .enable_phys_dma = dummy_enable_phys_dma, 555 + .allocate_iso_context = dummy_allocate_iso_context, 556 + .start_iso = dummy_start_iso, 557 + .set_iso_channels = dummy_set_iso_channels, 558 + .queue_iso = dummy_queue_iso, 559 }; 560 561 void fw_card_release(struct kref *kref) ··· 545 546 card->driver->update_phy_reg(card, 4, 547 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 548 + fw_schedule_bus_reset(card, false, true); 549 550 mutex_lock(&card_mutex); 551 list_del_init(&card->link); ··· 565 WARN_ON(!list_empty(&card->transaction_list)); 566 } 567 EXPORT_SYMBOL(fw_core_remove_card);
+327 -82
drivers/firewire/core-cdev.c
··· 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #include <linux/compat.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> ··· 34 #include <linux/module.h> 35 #include <linux/mutex.h> 36 #include <linux/poll.h> 37 - #include <linux/sched.h> 38 #include <linux/slab.h> 39 #include <linux/spinlock.h> 40 #include <linux/string.h> ··· 47 #include <asm/system.h> 48 49 #include "core.h" 50 51 struct client { 52 u32 version; ··· 70 u64 iso_closure; 71 struct fw_iso_buffer buffer; 72 unsigned long vm_start; 73 74 struct list_head link; 75 struct kref kref; ··· 118 119 struct inbound_transaction_resource { 120 struct client_resource resource; 121 struct fw_request *request; 122 void *data; 123 size_t length; ··· 183 184 struct inbound_transaction_event { 185 struct event event; 186 - struct fw_cdev_event_request request; 187 }; 188 189 struct iso_interrupt_event { ··· 194 struct fw_cdev_event_iso_interrupt interrupt; 195 }; 196 197 struct iso_resource_event { 198 struct event event; 199 struct fw_cdev_event_iso_resource iso_resource; 200 }; 201 202 static inline void __user *u64_to_uptr(__u64 value) ··· 251 idr_init(&client->resource_idr); 252 INIT_LIST_HEAD(&client->event_list); 253 init_waitqueue_head(&client->wait); 254 kref_init(&client->kref); 255 256 file->private_data = client; ··· 342 event->generation = client->device->generation; 343 event->node_id = client->device->node_id; 344 event->local_node_id = card->local_node->node_id; 345 - event->bm_node_id = 0; /* FIXME: We don't track the BM. */ 346 event->irm_node_id = card->irm_node->node_id; 347 event->root_node_id = card->root_node->node_id; 348 ··· 373 374 e = kzalloc(sizeof(*e), GFP_KERNEL); 375 if (e == NULL) { 376 - fw_notify("Out of memory when allocating bus reset event\n"); 377 return; 378 } 379 ··· 419 struct fw_cdev_allocate_iso_resource allocate_iso_resource; 420 struct fw_cdev_send_stream_packet send_stream_packet; 421 struct fw_cdev_get_cycle_timer2 get_cycle_timer2; 422 }; 423 424 static int ioctl_get_info(struct client *client, union ioctl_arg *arg) ··· 431 unsigned long ret = 0; 432 433 client->version = a->version; 434 - a->version = FW_CDEV_VERSION; 435 a->card = client->device->card->index; 436 437 down_read(&fw_device_rwsem); ··· 590 (request->length > 4096 || request->length > 512 << speed)) 591 return -EIO; 592 593 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); 594 if (e == NULL) 595 return -ENOMEM; ··· 666 if (is_fcp_request(r->request)) 667 kfree(r->data); 668 else 669 - fw_send_response(client->device->card, r->request, 670 - RCODE_CONFLICT_ERROR); 671 kfree(r); 672 } 673 674 static void handle_request(struct fw_card *card, struct fw_request *request, 675 int tcode, int destination, int source, 676 - int generation, int speed, 677 - unsigned long long offset, 678 void *payload, size_t length, void *callback_data) 679 { 680 struct address_handler_resource *handler = callback_data; 681 struct inbound_transaction_resource *r; 682 struct inbound_transaction_event *e; 683 void *fcp_frame = NULL; 684 int ret; 685 686 r = kmalloc(sizeof(*r), GFP_ATOMIC); 687 e = kmalloc(sizeof(*e), GFP_ATOMIC); 688 - if (r == NULL || e == NULL) 689 goto failed; 690 - 691 r->request = request; 692 r->data = payload; 693 r->length = length; ··· 715 if (ret < 0) 716 goto failed; 717 718 - e->request.type = FW_CDEV_EVENT_REQUEST; 719 - e->request.tcode = tcode; 720 - e->request.offset = offset; 721 - e->request.length = length; 722 - e->request.handle = r->resource.handle; 723 - e->request.closure = handler->closure; 724 725 queue_event(handler->client, &e->event, 726 - &e->request, sizeof(e->request), r->data, length); 727 return; 728 729 failed: ··· 755 756 if (!is_fcp_request(request)) 757 fw_send_response(card, request, RCODE_CONFLICT_ERROR); 758 } 759 760 static void release_address_handler(struct client *client, ··· 781 return -ENOMEM; 782 783 region.start = a->offset; 784 - region.end = a->offset + a->length; 785 r->handler.length = a->length; 786 r->handler.address_callback = handle_request; 787 r->handler.callback_data = r; ··· 797 kfree(r); 798 return ret; 799 } 800 801 r->resource.release = release_address_handler; 802 ret = add_client_resource(client, &r->resource, GFP_KERNEL); ··· 832 if (is_fcp_request(r->request)) 833 goto out; 834 835 - if (a->length < r->length) 836 - r->length = a->length; 837 - if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) { 838 ret = -EFAULT; 839 kfree(r->request); 840 goto out; 841 } 842 - fw_send_response(client->device->card, r->request, a->rcode); 843 out: 844 kfree(r); 845 846 return ret; ··· 852 853 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) 854 { 855 - return fw_core_initiate_bus_reset(client->device->card, 856 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); 857 } 858 859 static void release_descriptor(struct client *client, ··· 925 struct client *client = data; 926 struct iso_interrupt_event *e; 927 928 - e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); 929 - if (e == NULL) 930 return; 931 - 932 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 933 e->interrupt.closure = client->iso_closure; 934 e->interrupt.cycle = cycle; ··· 939 sizeof(e->interrupt) + header_length, NULL, 0); 940 } 941 942 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) 943 { 944 struct fw_cdev_create_iso_context *a = &arg->create_iso_context; 945 struct fw_iso_context *context; 946 947 - /* We only support one context at this time. */ 948 - if (client->iso_context != NULL) 949 - return -EBUSY; 950 - 951 - if (a->channel > 63) 952 - return -EINVAL; 953 954 switch (a->type) { 955 - case FW_ISO_CONTEXT_RECEIVE: 956 - if (a->header_size < 4 || (a->header_size & 3)) 957 return -EINVAL; 958 break; 959 960 - case FW_ISO_CONTEXT_TRANSMIT: 961 - if (a->speed > SCODE_3200) 962 return -EINVAL; 963 break; 964 965 default: ··· 994 } 995 996 context = fw_iso_context_create(client->device->card, a->type, 997 - a->channel, a->speed, a->header_size, 998 - iso_callback, client); 999 if (IS_ERR(context)) 1000 return PTR_ERR(context); 1001 1002 client->iso_closure = a->closure; 1003 client->iso_context = context; 1004 1005 - /* We only support one context at this time. */ 1006 a->handle = 0; 1007 1008 return 0; 1009 } 1010 1011 /* Macros for decoding the iso packet control header. */ ··· 1038 struct fw_cdev_queue_iso *a = &arg->queue_iso; 1039 struct fw_cdev_iso_packet __user *p, *end, *next; 1040 struct fw_iso_context *ctx = client->iso_context; 1041 - unsigned long payload, buffer_end, header_length; 1042 u32 control; 1043 int count; 1044 struct { ··· 1058 * use the indirect payload, the iso buffer need not be mapped 1059 * and the a->data pointer is ignored. 1060 */ 1061 - 1062 payload = (unsigned long)a->data - client->vm_start; 1063 buffer_end = client->buffer.page_count << PAGE_SHIFT; 1064 if (a->data == 0 || client->buffer.pages == NULL || ··· 1066 buffer_end = 0; 1067 } 1068 1069 - p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); 1070 1071 if (!access_ok(VERIFY_READ, p, a->size)) 1072 return -EFAULT; 1073 ··· 1085 u.packet.sy = GET_SY(control); 1086 u.packet.header_length = GET_HEADER_LENGTH(control); 1087 1088 - if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { 1089 - if (u.packet.header_length % 4 != 0) 1090 return -EINVAL; 1091 - header_length = u.packet.header_length; 1092 - } else { 1093 - /* 1094 - * We require that header_length is a multiple of 1095 - * the fixed header size, ctx->header_size. 1096 - */ 1097 - if (ctx->header_size == 0) { 1098 - if (u.packet.header_length > 0) 1099 - return -EINVAL; 1100 - } else if (u.packet.header_length == 0 || 1101 - u.packet.header_length % ctx->header_size != 0) { 1102 return -EINVAL; 1103 - } 1104 - header_length = 0; 1105 } 1106 1107 next = (struct fw_cdev_iso_packet __user *) 1108 - &p->header[header_length / 4]; 1109 if (next > end) 1110 return -EINVAL; 1111 if (__copy_from_user 1112 - (u.packet.header, p->header, header_length)) 1113 return -EFAULT; 1114 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && 1115 u.packet.header_length + u.packet.payload_length > 0) ··· 1137 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) 1138 { 1139 struct fw_cdev_start_iso *a = &arg->start_iso; 1140 1141 if (client->iso_context == NULL || a->handle != 0) 1142 return -EINVAL; ··· 1176 1177 local_irq_disable(); 1178 1179 - cycle_time = card->driver->get_cycle_time(card); 1180 1181 switch (a->clk_id) { 1182 case CLOCK_REALTIME: getnstimeofday(&ts); break; ··· 1457 return init_request(client, &request, dest, a->speed); 1458 } 1459 1460 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { 1461 - ioctl_get_info, 1462 - ioctl_send_request, 1463 - ioctl_allocate, 1464 - ioctl_deallocate, 1465 - ioctl_send_response, 1466 - ioctl_initiate_bus_reset, 1467 - ioctl_add_descriptor, 1468 - ioctl_remove_descriptor, 1469 - ioctl_create_iso_context, 1470 - ioctl_queue_iso, 1471 - ioctl_start_iso, 1472 - ioctl_stop_iso, 1473 - ioctl_get_cycle_timer, 1474 - ioctl_allocate_iso_resource, 1475 - ioctl_deallocate_iso_resource, 1476 - ioctl_allocate_iso_resource_once, 1477 - ioctl_deallocate_iso_resource_once, 1478 - ioctl_get_speed, 1479 - ioctl_send_broadcast_request, 1480 - ioctl_send_stream_packet, 1481 - ioctl_get_cycle_timer2, 1482 }; 1483 1484 static int dispatch_ioctl(struct client *client, ··· 1692 { 1693 struct client *client = file->private_data; 1694 struct event *event, *next_event; 1695 1696 mutex_lock(&client->device->client_list_mutex); 1697 list_del(&client->link);
··· 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 + #include <linux/bug.h> 22 #include <linux/compat.h> 23 #include <linux/delay.h> 24 #include <linux/device.h> ··· 33 #include <linux/module.h> 34 #include <linux/mutex.h> 35 #include <linux/poll.h> 36 + #include <linux/sched.h> /* required for linux/wait.h */ 37 #include <linux/slab.h> 38 #include <linux/spinlock.h> 39 #include <linux/string.h> ··· 46 #include <asm/system.h> 47 48 #include "core.h" 49 + 50 + /* 51 + * ABI version history is documented in linux/firewire-cdev.h. 52 + */ 53 + #define FW_CDEV_KERNEL_VERSION 4 54 + #define FW_CDEV_VERSION_EVENT_REQUEST2 4 55 + #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 56 57 struct client { 58 u32 version; ··· 62 u64 iso_closure; 63 struct fw_iso_buffer buffer; 64 unsigned long vm_start; 65 + 66 + struct list_head phy_receiver_link; 67 + u64 phy_receiver_closure; 68 69 struct list_head link; 70 struct kref kref; ··· 107 108 struct inbound_transaction_resource { 109 struct client_resource resource; 110 + struct fw_card *card; 111 struct fw_request *request; 112 void *data; 113 size_t length; ··· 171 172 struct inbound_transaction_event { 173 struct event event; 174 + union { 175 + struct fw_cdev_event_request request; 176 + struct fw_cdev_event_request2 request2; 177 + } req; 178 }; 179 180 struct iso_interrupt_event { ··· 179 struct fw_cdev_event_iso_interrupt interrupt; 180 }; 181 182 + struct iso_interrupt_mc_event { 183 + struct event event; 184 + struct fw_cdev_event_iso_interrupt_mc interrupt; 185 + }; 186 + 187 struct iso_resource_event { 188 struct event event; 189 struct fw_cdev_event_iso_resource iso_resource; 190 + }; 191 + 192 + struct outbound_phy_packet_event { 193 + struct event event; 194 + struct client *client; 195 + struct fw_packet p; 196 + struct fw_cdev_event_phy_packet phy_packet; 197 + }; 198 + 199 + struct inbound_phy_packet_event { 200 + struct event event; 201 + struct fw_cdev_event_phy_packet phy_packet; 202 }; 203 204 static inline void __user *u64_to_uptr(__u64 value) ··· 219 idr_init(&client->resource_idr); 220 INIT_LIST_HEAD(&client->event_list); 221 init_waitqueue_head(&client->wait); 222 + INIT_LIST_HEAD(&client->phy_receiver_link); 223 kref_init(&client->kref); 224 225 file->private_data = client; ··· 309 event->generation = client->device->generation; 310 event->node_id = client->device->node_id; 311 event->local_node_id = card->local_node->node_id; 312 + event->bm_node_id = card->bm_node_id; 313 event->irm_node_id = card->irm_node->node_id; 314 event->root_node_id = card->root_node->node_id; 315 ··· 340 341 e = kzalloc(sizeof(*e), GFP_KERNEL); 342 if (e == NULL) { 343 + fw_notify("Out of memory when allocating event\n"); 344 return; 345 } 346 ··· 386 struct fw_cdev_allocate_iso_resource allocate_iso_resource; 387 struct fw_cdev_send_stream_packet send_stream_packet; 388 struct fw_cdev_get_cycle_timer2 get_cycle_timer2; 389 + struct fw_cdev_send_phy_packet send_phy_packet; 390 + struct fw_cdev_receive_phy_packets receive_phy_packets; 391 + struct fw_cdev_set_iso_channels set_iso_channels; 392 }; 393 394 static int ioctl_get_info(struct client *client, union ioctl_arg *arg) ··· 395 unsigned long ret = 0; 396 397 client->version = a->version; 398 + a->version = FW_CDEV_KERNEL_VERSION; 399 a->card = client->device->card->index; 400 401 down_read(&fw_device_rwsem); ··· 554 (request->length > 4096 || request->length > 512 << speed)) 555 return -EIO; 556 557 + if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && 558 + request->length < 4) 559 + return -EINVAL; 560 + 561 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); 562 if (e == NULL) 563 return -ENOMEM; ··· 626 if (is_fcp_request(r->request)) 627 kfree(r->data); 628 else 629 + fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); 630 + 631 + fw_card_put(r->card); 632 kfree(r); 633 } 634 635 static void handle_request(struct fw_card *card, struct fw_request *request, 636 int tcode, int destination, int source, 637 + int generation, unsigned long long offset, 638 void *payload, size_t length, void *callback_data) 639 { 640 struct address_handler_resource *handler = callback_data; 641 struct inbound_transaction_resource *r; 642 struct inbound_transaction_event *e; 643 + size_t event_size0; 644 void *fcp_frame = NULL; 645 int ret; 646 647 + /* card may be different from handler->client->device->card */ 648 + fw_card_get(card); 649 + 650 r = kmalloc(sizeof(*r), GFP_ATOMIC); 651 e = kmalloc(sizeof(*e), GFP_ATOMIC); 652 + if (r == NULL || e == NULL) { 653 + fw_notify("Out of memory when allocating event\n"); 654 goto failed; 655 + } 656 + r->card = card; 657 r->request = request; 658 r->data = payload; 659 r->length = length; ··· 669 if (ret < 0) 670 goto failed; 671 672 + if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { 673 + struct fw_cdev_event_request *req = &e->req.request; 674 + 675 + if (tcode & 0x10) 676 + tcode = TCODE_LOCK_REQUEST; 677 + 678 + req->type = FW_CDEV_EVENT_REQUEST; 679 + req->tcode = tcode; 680 + req->offset = offset; 681 + req->length = length; 682 + req->handle = r->resource.handle; 683 + req->closure = handler->closure; 684 + event_size0 = sizeof(*req); 685 + } else { 686 + struct fw_cdev_event_request2 *req = &e->req.request2; 687 + 688 + req->type = FW_CDEV_EVENT_REQUEST2; 689 + req->tcode = tcode; 690 + req->offset = offset; 691 + req->source_node_id = source; 692 + req->destination_node_id = destination; 693 + req->card = card->index; 694 + req->generation = generation; 695 + req->length = length; 696 + req->handle = r->resource.handle; 697 + req->closure = handler->closure; 698 + event_size0 = sizeof(*req); 699 + } 700 701 queue_event(handler->client, &e->event, 702 + &e->req, event_size0, r->data, length); 703 return; 704 705 failed: ··· 687 688 if (!is_fcp_request(request)) 689 fw_send_response(card, request, RCODE_CONFLICT_ERROR); 690 + 691 + fw_card_put(card); 692 } 693 694 static void release_address_handler(struct client *client, ··· 711 return -ENOMEM; 712 713 region.start = a->offset; 714 + if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) 715 + region.end = a->offset + a->length; 716 + else 717 + region.end = a->region_end; 718 + 719 r->handler.length = a->length; 720 r->handler.address_callback = handle_request; 721 r->handler.callback_data = r; ··· 723 kfree(r); 724 return ret; 725 } 726 + a->offset = r->handler.offset; 727 728 r->resource.release = release_address_handler; 729 ret = add_client_resource(client, &r->resource, GFP_KERNEL); ··· 757 if (is_fcp_request(r->request)) 758 goto out; 759 760 + if (a->length != fw_get_response_length(r->request)) { 761 + ret = -EINVAL; 762 + kfree(r->request); 763 + goto out; 764 + } 765 + if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { 766 ret = -EFAULT; 767 kfree(r->request); 768 goto out; 769 } 770 + fw_send_response(r->card, r->request, a->rcode); 771 out: 772 + fw_card_put(r->card); 773 kfree(r); 774 775 return ret; ··· 773 774 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) 775 { 776 + fw_schedule_bus_reset(client->device->card, true, 777 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); 778 + return 0; 779 } 780 781 static void release_descriptor(struct client *client, ··· 845 struct client *client = data; 846 struct iso_interrupt_event *e; 847 848 + e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); 849 + if (e == NULL) { 850 + fw_notify("Out of memory when allocating event\n"); 851 return; 852 + } 853 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 854 e->interrupt.closure = client->iso_closure; 855 e->interrupt.cycle = cycle; ··· 858 sizeof(e->interrupt) + header_length, NULL, 0); 859 } 860 861 + static void iso_mc_callback(struct fw_iso_context *context, 862 + dma_addr_t completed, void *data) 863 + { 864 + struct client *client = data; 865 + struct iso_interrupt_mc_event *e; 866 + 867 + e = kmalloc(sizeof(*e), GFP_ATOMIC); 868 + if (e == NULL) { 869 + fw_notify("Out of memory when allocating event\n"); 870 + return; 871 + } 872 + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; 873 + e->interrupt.closure = client->iso_closure; 874 + e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, 875 + completed); 876 + queue_event(client, &e->event, &e->interrupt, 877 + sizeof(e->interrupt), NULL, 0); 878 + } 879 + 880 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) 881 { 882 struct fw_cdev_create_iso_context *a = &arg->create_iso_context; 883 struct fw_iso_context *context; 884 + fw_iso_callback_t cb; 885 886 + BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || 887 + FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || 888 + FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != 889 + FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); 890 891 switch (a->type) { 892 + case FW_ISO_CONTEXT_TRANSMIT: 893 + if (a->speed > SCODE_3200 || a->channel > 63) 894 return -EINVAL; 895 + 896 + cb = iso_callback; 897 break; 898 899 + case FW_ISO_CONTEXT_RECEIVE: 900 + if (a->header_size < 4 || (a->header_size & 3) || 901 + a->channel > 63) 902 return -EINVAL; 903 + 904 + cb = iso_callback; 905 + break; 906 + 907 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 908 + cb = (fw_iso_callback_t)iso_mc_callback; 909 break; 910 911 default: ··· 886 } 887 888 context = fw_iso_context_create(client->device->card, a->type, 889 + a->channel, a->speed, a->header_size, cb, client); 890 if (IS_ERR(context)) 891 return PTR_ERR(context); 892 893 + /* We only support one context at this time. */ 894 + spin_lock_irq(&client->lock); 895 + if (client->iso_context != NULL) { 896 + spin_unlock_irq(&client->lock); 897 + fw_iso_context_destroy(context); 898 + return -EBUSY; 899 + } 900 client->iso_closure = a->closure; 901 client->iso_context = context; 902 + spin_unlock_irq(&client->lock); 903 904 a->handle = 0; 905 906 return 0; 907 + } 908 + 909 + static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) 910 + { 911 + struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; 912 + struct fw_iso_context *ctx = client->iso_context; 913 + 914 + if (ctx == NULL || a->handle != 0) 915 + return -EINVAL; 916 + 917 + return fw_iso_context_set_channels(ctx, &a->channels); 918 } 919 920 /* Macros for decoding the iso packet control header. */ ··· 913 struct fw_cdev_queue_iso *a = &arg->queue_iso; 914 struct fw_cdev_iso_packet __user *p, *end, *next; 915 struct fw_iso_context *ctx = client->iso_context; 916 + unsigned long payload, buffer_end, transmit_header_bytes = 0; 917 u32 control; 918 int count; 919 struct { ··· 933 * use the indirect payload, the iso buffer need not be mapped 934 * and the a->data pointer is ignored. 935 */ 936 payload = (unsigned long)a->data - client->vm_start; 937 buffer_end = client->buffer.page_count << PAGE_SHIFT; 938 if (a->data == 0 || client->buffer.pages == NULL || ··· 942 buffer_end = 0; 943 } 944 945 + if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) 946 + return -EINVAL; 947 948 + p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); 949 if (!access_ok(VERIFY_READ, p, a->size)) 950 return -EFAULT; 951 ··· 959 u.packet.sy = GET_SY(control); 960 u.packet.header_length = GET_HEADER_LENGTH(control); 961 962 + switch (ctx->type) { 963 + case FW_ISO_CONTEXT_TRANSMIT: 964 + if (u.packet.header_length & 3) 965 return -EINVAL; 966 + transmit_header_bytes = u.packet.header_length; 967 + break; 968 + 969 + case FW_ISO_CONTEXT_RECEIVE: 970 + if (u.packet.header_length == 0 || 971 + u.packet.header_length % ctx->header_size != 0) 972 return -EINVAL; 973 + break; 974 + 975 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 976 + if (u.packet.payload_length == 0 || 977 + u.packet.payload_length & 3) 978 + return -EINVAL; 979 + break; 980 } 981 982 next = (struct fw_cdev_iso_packet __user *) 983 + &p->header[transmit_header_bytes / 4]; 984 if (next > end) 985 return -EINVAL; 986 if (__copy_from_user 987 + (u.packet.header, p->header, transmit_header_bytes)) 988 return -EFAULT; 989 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && 990 u.packet.header_length + u.packet.payload_length > 0) ··· 1010 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) 1011 { 1012 struct fw_cdev_start_iso *a = &arg->start_iso; 1013 + 1014 + BUILD_BUG_ON( 1015 + FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || 1016 + FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || 1017 + FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || 1018 + FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || 1019 + FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); 1020 1021 if (client->iso_context == NULL || a->handle != 0) 1022 return -EINVAL; ··· 1042 1043 local_irq_disable(); 1044 1045 + cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); 1046 1047 switch (a->clk_id) { 1048 case CLOCK_REALTIME: getnstimeofday(&ts); break; ··· 1323 return init_request(client, &request, dest, a->speed); 1324 } 1325 1326 + static void outbound_phy_packet_callback(struct fw_packet *packet, 1327 + struct fw_card *card, int status) 1328 + { 1329 + struct outbound_phy_packet_event *e = 1330 + container_of(packet, struct outbound_phy_packet_event, p); 1331 + 1332 + switch (status) { 1333 + /* expected: */ 1334 + case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; 1335 + /* should never happen with PHY packets: */ 1336 + case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; 1337 + case ACK_BUSY_X: 1338 + case ACK_BUSY_A: 1339 + case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; 1340 + case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; 1341 + case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; 1342 + /* stale generation; cancelled; on certain controllers: no ack */ 1343 + default: e->phy_packet.rcode = status; break; 1344 + } 1345 + e->phy_packet.data[0] = packet->timestamp; 1346 + 1347 + queue_event(e->client, &e->event, &e->phy_packet, 1348 + sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); 1349 + client_put(e->client); 1350 + } 1351 + 1352 + static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) 1353 + { 1354 + struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; 1355 + struct fw_card *card = client->device->card; 1356 + struct outbound_phy_packet_event *e; 1357 + 1358 + /* Access policy: Allow this ioctl only on local nodes' device files. */ 1359 + if (!client->device->is_local) 1360 + return -ENOSYS; 1361 + 1362 + e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); 1363 + if (e == NULL) 1364 + return -ENOMEM; 1365 + 1366 + client_get(client); 1367 + e->client = client; 1368 + e->p.speed = SCODE_100; 1369 + e->p.generation = a->generation; 1370 + e->p.header[0] = a->data[0]; 1371 + e->p.header[1] = a->data[1]; 1372 + e->p.header_length = 8; 1373 + e->p.callback = outbound_phy_packet_callback; 1374 + e->phy_packet.closure = a->closure; 1375 + e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; 1376 + if (is_ping_packet(a->data)) 1377 + e->phy_packet.length = 4; 1378 + 1379 + card->driver->send_request(card, &e->p); 1380 + 1381 + return 0; 1382 + } 1383 + 1384 + static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) 1385 + { 1386 + struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; 1387 + struct fw_card *card = client->device->card; 1388 + 1389 + /* Access policy: Allow this ioctl only on local nodes' device files. */ 1390 + if (!client->device->is_local) 1391 + return -ENOSYS; 1392 + 1393 + spin_lock_irq(&card->lock); 1394 + 1395 + list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); 1396 + client->phy_receiver_closure = a->closure; 1397 + 1398 + spin_unlock_irq(&card->lock); 1399 + 1400 + return 0; 1401 + } 1402 + 1403 + void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) 1404 + { 1405 + struct client *client; 1406 + struct inbound_phy_packet_event *e; 1407 + unsigned long flags; 1408 + 1409 + spin_lock_irqsave(&card->lock, flags); 1410 + 1411 + list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { 1412 + e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); 1413 + if (e == NULL) { 1414 + fw_notify("Out of memory when allocating event\n"); 1415 + break; 1416 + } 1417 + e->phy_packet.closure = client->phy_receiver_closure; 1418 + e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; 1419 + e->phy_packet.rcode = RCODE_COMPLETE; 1420 + e->phy_packet.length = 8; 1421 + e->phy_packet.data[0] = p->header[1]; 1422 + e->phy_packet.data[1] = p->header[2]; 1423 + queue_event(client, &e->event, 1424 + &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); 1425 + } 1426 + 1427 + spin_unlock_irqrestore(&card->lock, flags); 1428 + } 1429 + 1430 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { 1431 + [0x00] = ioctl_get_info, 1432 + [0x01] = ioctl_send_request, 1433 + [0x02] = ioctl_allocate, 1434 + [0x03] = ioctl_deallocate, 1435 + [0x04] = ioctl_send_response, 1436 + [0x05] = ioctl_initiate_bus_reset, 1437 + [0x06] = ioctl_add_descriptor, 1438 + [0x07] = ioctl_remove_descriptor, 1439 + [0x08] = ioctl_create_iso_context, 1440 + [0x09] = ioctl_queue_iso, 1441 + [0x0a] = ioctl_start_iso, 1442 + [0x0b] = ioctl_stop_iso, 1443 + [0x0c] = ioctl_get_cycle_timer, 1444 + [0x0d] = ioctl_allocate_iso_resource, 1445 + [0x0e] = ioctl_deallocate_iso_resource, 1446 + [0x0f] = ioctl_allocate_iso_resource_once, 1447 + [0x10] = ioctl_deallocate_iso_resource_once, 1448 + [0x11] = ioctl_get_speed, 1449 + [0x12] = ioctl_send_broadcast_request, 1450 + [0x13] = ioctl_send_stream_packet, 1451 + [0x14] = ioctl_get_cycle_timer2, 1452 + [0x15] = ioctl_send_phy_packet, 1453 + [0x16] = ioctl_receive_phy_packets, 1454 + [0x17] = ioctl_set_iso_channels, 1455 }; 1456 1457 static int dispatch_ioctl(struct client *client, ··· 1451 { 1452 struct client *client = file->private_data; 1453 struct event *event, *next_event; 1454 + 1455 + spin_lock_irq(&client->device->card->lock); 1456 + list_del(&client->phy_receiver_link); 1457 + spin_unlock_irq(&client->device->card->lock); 1458 1459 mutex_lock(&client->device->client_list_mutex); 1460 list_del(&client->link);
+6 -5
drivers/firewire/core-device.c
··· 107 } 108 109 /** 110 - * fw_csr_string - reads a string from the configuration ROM 111 - * @directory: e.g. root directory or unit directory 112 - * @key: the key of the preceding directory entry 113 - * @buf: where to put the string 114 - * @size: size of @buf, in bytes 115 * 116 * The string is taken from a minimal ASCII text descriptor leaf after 117 * the immediate entry with @key. The string is zero-terminated. ··· 1136 goto give_up; 1137 } 1138 1139 create_units(device); 1140 1141 /* Userspace may want to re-read attributes. */
··· 107 } 108 109 /** 110 + * fw_csr_string() - reads a string from the configuration ROM 111 + * @directory: e.g. root directory or unit directory 112 + * @key: the key of the preceding directory entry 113 + * @buf: where to put the string 114 + * @size: size of @buf, in bytes 115 * 116 * The string is taken from a minimal ASCII text descriptor leaf after 117 * the immediate entry with @key. The string is zero-terminated. ··· 1136 goto give_up; 1137 } 1138 1139 + fw_device_cdev_update(device); 1140 create_units(device); 1141 1142 /* Userspace may want to re-read attributes. */
+26 -8
drivers/firewire/core-iso.c
··· 118 } 119 EXPORT_SYMBOL(fw_iso_buffer_destroy); 120 121 struct fw_iso_context *fw_iso_context_create(struct fw_card *card, 122 int type, int channel, int speed, size_t header_size, 123 fw_iso_callback_t callback, void *callback_data) ··· 151 ctx->channel = channel; 152 ctx->speed = speed; 153 ctx->header_size = header_size; 154 - ctx->callback = callback; 155 ctx->callback_data = callback_data; 156 157 return ctx; ··· 160 161 void fw_iso_context_destroy(struct fw_iso_context *ctx) 162 { 163 - struct fw_card *card = ctx->card; 164 - 165 - card->driver->free_iso_context(ctx); 166 } 167 EXPORT_SYMBOL(fw_iso_context_destroy); 168 ··· 171 } 172 EXPORT_SYMBOL(fw_iso_context_start); 173 174 int fw_iso_context_queue(struct fw_iso_context *ctx, 175 struct fw_iso_packet *packet, 176 struct fw_iso_buffer *buffer, 177 unsigned long payload) 178 { 179 - struct fw_card *card = ctx->card; 180 - 181 - return card->driver->queue_iso(ctx, packet, buffer, payload); 182 } 183 EXPORT_SYMBOL(fw_iso_context_queue); 184 ··· 297 } 298 299 /** 300 - * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth 301 * 302 * In parameters: card, generation, channels_mask, bandwidth, allocate 303 * Out parameters: channel, bandwidth
··· 118 } 119 EXPORT_SYMBOL(fw_iso_buffer_destroy); 120 121 + /* Convert DMA address to offset into virtually contiguous buffer. */ 122 + size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) 123 + { 124 + int i; 125 + dma_addr_t address; 126 + ssize_t offset; 127 + 128 + for (i = 0; i < buffer->page_count; i++) { 129 + address = page_private(buffer->pages[i]); 130 + offset = (ssize_t)completed - (ssize_t)address; 131 + if (offset > 0 && offset <= PAGE_SIZE) 132 + return (i << PAGE_SHIFT) + offset; 133 + } 134 + 135 + return 0; 136 + } 137 + 138 struct fw_iso_context *fw_iso_context_create(struct fw_card *card, 139 int type, int channel, int speed, size_t header_size, 140 fw_iso_callback_t callback, void *callback_data) ··· 134 ctx->channel = channel; 135 ctx->speed = speed; 136 ctx->header_size = header_size; 137 + ctx->callback.sc = callback; 138 ctx->callback_data = callback_data; 139 140 return ctx; ··· 143 144 void fw_iso_context_destroy(struct fw_iso_context *ctx) 145 { 146 + ctx->card->driver->free_iso_context(ctx); 147 } 148 EXPORT_SYMBOL(fw_iso_context_destroy); 149 ··· 156 } 157 EXPORT_SYMBOL(fw_iso_context_start); 158 159 + int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) 160 + { 161 + return ctx->card->driver->set_iso_channels(ctx, channels); 162 + } 163 + 164 int fw_iso_context_queue(struct fw_iso_context *ctx, 165 struct fw_iso_packet *packet, 166 struct fw_iso_buffer *buffer, 167 unsigned long payload) 168 { 169 + return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); 170 } 171 EXPORT_SYMBOL(fw_iso_context_queue); 172 ··· 279 } 280 281 /** 282 + * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth 283 * 284 * In parameters: card, generation, channels_mask, bandwidth, allocate 285 * Out parameters: channel, bandwidth
+9 -13
drivers/firewire/core-topology.c
··· 174 return list_entry(l, struct fw_node, link); 175 } 176 177 - /** 178 - * build_tree - Build the tree representation of the topology 179 - * @self_ids: array of self IDs to create the tree from 180 - * @self_id_count: the length of the self_ids array 181 - * @local_id: the node ID of the local node 182 - * 183 * This function builds the tree representation of the topology given 184 * by the self IDs from the latest bus reset. During the construction 185 * of the tree, the function checks that the self IDs are valid and ··· 415 } 416 } 417 418 - /** 419 - * update_tree - compare the old topology tree for card with the new 420 - * one specified by root. Queue the nodes and mark them as either 421 - * found, lost or updated. Update the nodes in the card topology tree 422 - * as we go. 423 */ 424 static void update_tree(struct fw_card *card, struct fw_node *root) 425 { ··· 518 } 519 520 void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, 521 - int self_id_count, u32 *self_ids) 522 { 523 struct fw_node *local_node; 524 unsigned long flags; ··· 537 538 spin_lock_irqsave(&card->lock, flags); 539 540 - card->broadcast_channel_allocated = false; 541 card->node_id = node_id; 542 /* 543 * Update node_id before generation to prevent anybody from using ··· 546 smp_wmb(); 547 card->generation = generation; 548 card->reset_jiffies = jiffies; 549 fw_schedule_bm_work(card, 0); 550 551 local_node = build_tree(card, self_ids, self_id_count);
··· 174 return list_entry(l, struct fw_node, link); 175 } 176 177 + /* 178 * This function builds the tree representation of the topology given 179 * by the self IDs from the latest bus reset. During the construction 180 * of the tree, the function checks that the self IDs are valid and ··· 420 } 421 } 422 423 + /* 424 + * Compare the old topology tree for card with the new one specified by root. 425 + * Queue the nodes and mark them as either found, lost or updated. 426 + * Update the nodes in the card topology tree as we go. 427 */ 428 static void update_tree(struct fw_card *card, struct fw_node *root) 429 { ··· 524 } 525 526 void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, 527 + int self_id_count, u32 *self_ids, bool bm_abdicate) 528 { 529 struct fw_node *local_node; 530 unsigned long flags; ··· 543 544 spin_lock_irqsave(&card->lock, flags); 545 546 + card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; 547 card->node_id = node_id; 548 /* 549 * Update node_id before generation to prevent anybody from using ··· 552 smp_wmb(); 553 card->generation = generation; 554 card->reset_jiffies = jiffies; 555 + card->bm_node_id = 0xffff; 556 + card->bm_abdicate = bm_abdicate; 557 fw_schedule_bm_work(card, 0); 558 559 local_node = build_tree(card, self_ids, self_id_count);
+228 -78
drivers/firewire/core-transaction.c
··· 246 break; 247 248 default: 249 - WARN(1, KERN_ERR "wrong tcode %d", tcode); 250 } 251 common: 252 packet->speed = speed; ··· 273 } 274 275 /** 276 - * This function provides low-level access to the IEEE1394 transaction 277 - * logic. Most C programs would use either fw_read(), fw_write() or 278 - * fw_lock() instead - those function are convenience wrappers for 279 - * this function. The fw_send_request() function is primarily 280 - * provided as a flexible, one-stop entry point for languages bindings 281 - * and protocol bindings. 282 * 283 - * FIXME: Document this function further, in particular the possible 284 - * values for rcode in the callback. In short, we map ACK_COMPLETE to 285 - * RCODE_COMPLETE, internal errors set errno and set rcode to 286 - * RCODE_SEND_ERROR (which is out of range for standard ieee1394 287 - * rcodes). All other rcodes are forwarded unchanged. For all 288 - * errors, payload is NULL, length is 0. 289 * 290 - * Can not expect the callback to be called before the function 291 - * returns, though this does happen in some cases (ACK_COMPLETE and 292 - * errors). 293 * 294 - * The payload is only used for write requests and must not be freed 295 - * until the callback has been called. 296 * 297 - * @param card the card from which to send the request 298 - * @param tcode the tcode for this transaction. Do not use 299 - * TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP 300 - * etc. to specify tcode and ext_tcode. 301 - * @param node_id the destination node ID (bus ID and PHY ID concatenated) 302 - * @param generation the generation for which node_id is valid 303 - * @param speed the speed to use for sending the request 304 - * @param offset the 48 bit offset on the destination node 305 - * @param payload the data payload for the request subaction 306 - * @param length the length in bytes of the data to read 307 - * @param callback function to be called when the transaction is completed 308 - * @param callback_data pointer to arbitrary data, which will be 309 - * passed to the callback 310 - * 311 - * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller 312 * needs to synthesize @destination_id with fw_stream_packet_destination_id(). 313 */ 314 void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, 315 int destination_id, int generation, int speed, ··· 348 setup_timer(&t->split_timeout_timer, 349 split_transaction_timeout_callback, (unsigned long)t); 350 /* FIXME: start this timer later, relative to t->timestamp */ 351 - mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10)); 352 t->callback = callback; 353 t->callback_data = callback_data; 354 ··· 384 } 385 386 /** 387 - * fw_run_transaction - send request and sleep until transaction is completed 388 * 389 - * Returns the RCODE. 390 */ 391 int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, 392 int generation, int speed, unsigned long long offset, ··· 429 int node_id, int generation, int gap_count) 430 { 431 long timeout = DIV_ROUND_UP(HZ, 10); 432 - u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) | 433 - PHY_CONFIG_ROOT_ID(node_id) | 434 - PHY_CONFIG_GAP_COUNT(gap_count); 435 436 mutex_lock(&phy_config_mutex); 437 ··· 518 } 519 520 /** 521 - * fw_core_add_address_handler - register for incoming requests 522 - * @handler: callback 523 - * @region: region in the IEEE 1212 node space address range 524 * 525 * region->start, ->end, and handler->length have to be quadlet-aligned. 526 * ··· 543 int ret = -EBUSY; 544 545 if (region->start & 0xffff000000000003ULL || 546 - region->end & 0xffff000000000003ULL || 547 region->start >= region->end || 548 handler->length & 3 || 549 handler->length == 0) 550 return -EINVAL; ··· 575 EXPORT_SYMBOL(fw_core_add_address_handler); 576 577 /** 578 - * fw_core_remove_address_handler - unregister an address handler 579 */ 580 void fw_core_remove_address_handler(struct fw_address_handler *handler) 581 { ··· 602 603 request = container_of(packet, struct fw_request, response); 604 kfree(request); 605 } 606 607 void fw_fill_response(struct fw_packet *response, u32 *request_header, ··· 690 break; 691 692 default: 693 - WARN(1, KERN_ERR "wrong tcode %d", tcode); 694 } 695 696 response->payload_mapped = false; 697 } 698 EXPORT_SYMBOL(fw_fill_response); 699 700 - static struct fw_request *allocate_request(struct fw_packet *p) 701 { 702 struct fw_request *request; 703 u32 *data, length; 704 - int request_tcode, t; 705 706 request_tcode = HEADER_GET_TCODE(p->header[0]); 707 switch (request_tcode) { ··· 753 if (request == NULL) 754 return NULL; 755 756 - t = (p->timestamp & 0x1fff) + 4000; 757 - if (t >= 8000) 758 - t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000; 759 - else 760 - t = (p->timestamp & ~0x1fff) + t; 761 - 762 request->response.speed = p->speed; 763 - request->response.timestamp = t; 764 request->response.generation = p->generation; 765 request->response.ack = 0; 766 request->response.callback = free_response_callback; ··· 784 785 if (rcode == RCODE_COMPLETE) 786 fw_fill_response(&request->response, request->request_header, 787 - rcode, request->data, request->length); 788 else 789 fw_fill_response(&request->response, request->request_header, 790 rcode, NULL, 0); ··· 803 unsigned long flags; 804 int tcode, destination, source; 805 806 - tcode = HEADER_GET_TCODE(p->header[0]); 807 destination = HEADER_GET_DESTINATION(p->header[0]); 808 source = HEADER_GET_SOURCE(p->header[1]); 809 810 spin_lock_irqsave(&address_handler_lock, flags); 811 handler = lookup_enclosing_address_handler(&address_handler_list, ··· 827 else 828 handler->address_callback(card, request, 829 tcode, destination, source, 830 - p->generation, p->speed, offset, 831 request->data, request->length, 832 handler->callback_data); 833 } ··· 865 if (is_enclosing_handler(handler, offset, request->length)) 866 handler->address_callback(card, NULL, tcode, 867 destination, source, 868 - p->generation, p->speed, 869 - offset, request->data, 870 request->length, 871 handler->callback_data); 872 } ··· 883 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) 884 return; 885 886 - request = allocate_request(p); 887 if (request == NULL) { 888 /* FIXME: send statically allocated busy packet. */ 889 return; ··· 911 unsigned long flags; 912 u32 *data; 913 size_t data_length; 914 - int tcode, tlabel, destination, source, rcode; 915 916 - tcode = HEADER_GET_TCODE(p->header[0]); 917 - tlabel = HEADER_GET_TLABEL(p->header[0]); 918 - destination = HEADER_GET_DESTINATION(p->header[0]); 919 - source = HEADER_GET_SOURCE(p->header[1]); 920 - rcode = HEADER_GET_RCODE(p->header[1]); 921 922 spin_lock_irqsave(&card->lock, flags); 923 list_for_each_entry(t, &card->transaction_list, link) { ··· 981 982 static void handle_topology_map(struct fw_card *card, struct fw_request *request, 983 int tcode, int destination, int source, int generation, 984 - int speed, unsigned long long offset, 985 - void *payload, size_t length, void *callback_data) 986 { 987 int start; 988 ··· 1011 { .start = CSR_REGISTER_BASE, 1012 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; 1013 1014 static void handle_registers(struct fw_card *card, struct fw_request *request, 1015 int tcode, int destination, int source, int generation, 1016 - int speed, unsigned long long offset, 1017 - void *payload, size_t length, void *callback_data) 1018 { 1019 int reg = offset & ~CSR_REGISTER_BASE; 1020 __be32 *data = payload; 1021 int rcode = RCODE_COMPLETE; 1022 1023 switch (reg) { 1024 case CSR_CYCLE_TIME: 1025 - if (TCODE_IS_READ_REQUEST(tcode) && length == 4) 1026 - *data = cpu_to_be32(card->driver->get_cycle_time(card)); 1027 else 1028 rcode = RCODE_TYPE_ERROR; 1029 break; ··· 1130 */ 1131 BUG(); 1132 break; 1133 - 1134 - case CSR_BUSY_TIMEOUT: 1135 - /* FIXME: Implement this. */ 1136 - 1137 - case CSR_BUS_TIME: 1138 - /* Useless without initialization by the bus manager. */ 1139 1140 default: 1141 rcode = RCODE_ADDRESS_ERROR;
··· 246 break; 247 248 default: 249 + WARN(1, "wrong tcode %d", tcode); 250 } 251 common: 252 packet->speed = speed; ··· 273 } 274 275 /** 276 + * fw_send_request() - submit a request packet for transmission 277 + * @card: interface to send the request at 278 + * @t: transaction instance to which the request belongs 279 + * @tcode: transaction code 280 + * @destination_id: destination node ID, consisting of bus_ID and phy_ID 281 + * @generation: bus generation in which request and response are valid 282 + * @speed: transmission speed 283 + * @offset: 48bit wide offset into destination's address space 284 + * @payload: data payload for the request subaction 285 + * @length: length of the payload, in bytes 286 + * @callback: function to be called when the transaction is completed 287 + * @callback_data: data to be passed to the transaction completion callback 288 * 289 + * Submit a request packet into the asynchronous request transmission queue. 290 + * Can be called from atomic context. If you prefer a blocking API, use 291 + * fw_run_transaction() in a context that can sleep. 292 * 293 + * In case of lock requests, specify one of the firewire-core specific %TCODE_ 294 + * constants instead of %TCODE_LOCK_REQUEST in @tcode. 295 * 296 + * Make sure that the value in @destination_id is not older than the one in 297 + * @generation. Otherwise the request is in danger to be sent to a wrong node. 298 * 299 + * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller 300 * needs to synthesize @destination_id with fw_stream_packet_destination_id(). 301 + * It will contain tag, channel, and sy data instead of a node ID then. 302 + * 303 + * The payload buffer at @data is going to be DMA-mapped except in case of 304 + * quadlet-sized payload or of local (loopback) requests. Hence make sure that 305 + * the buffer complies with the restrictions for DMA-mapped memory. The 306 + * @payload must not be freed before the @callback is called. 307 + * 308 + * In case of request types without payload, @data is NULL and @length is 0. 309 + * 310 + * After the transaction is completed successfully or unsuccessfully, the 311 + * @callback will be called. Among its parameters is the response code which 312 + * is either one of the rcodes per IEEE 1394 or, in case of internal errors, 313 + * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core 314 + * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION, 315 + * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request 316 + * generation, or missing ACK respectively. 317 + * 318 + * Note some timing corner cases: fw_send_request() may complete much earlier 319 + * than when the request packet actually hits the wire. On the other hand, 320 + * transaction completion and hence execution of @callback may happen even 321 + * before fw_send_request() returns. 322 */ 323 void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, 324 int destination_id, int generation, int speed, ··· 339 setup_timer(&t->split_timeout_timer, 340 split_transaction_timeout_callback, (unsigned long)t); 341 /* FIXME: start this timer later, relative to t->timestamp */ 342 + mod_timer(&t->split_timeout_timer, 343 + jiffies + card->split_timeout_jiffies); 344 t->callback = callback; 345 t->callback_data = callback_data; 346 ··· 374 } 375 376 /** 377 + * fw_run_transaction() - send request and sleep until transaction is completed 378 * 379 + * Returns the RCODE. See fw_send_request() for parameter documentation. 380 + * Unlike fw_send_request(), @data points to the payload of the request or/and 381 + * to the payload of the response. 382 */ 383 int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, 384 int generation, int speed, unsigned long long offset, ··· 417 int node_id, int generation, int gap_count) 418 { 419 long timeout = DIV_ROUND_UP(HZ, 10); 420 + u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); 421 + 422 + if (node_id != FW_PHY_CONFIG_NO_NODE_ID) 423 + data |= PHY_CONFIG_ROOT_ID(node_id); 424 + 425 + if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { 426 + gap_count = card->driver->read_phy_reg(card, 1); 427 + if (gap_count < 0) 428 + return; 429 + 430 + gap_count &= 63; 431 + if (gap_count == 63) 432 + return; 433 + } 434 + data |= PHY_CONFIG_GAP_COUNT(gap_count); 435 436 mutex_lock(&phy_config_mutex); 437 ··· 494 } 495 496 /** 497 + * fw_core_add_address_handler() - register for incoming requests 498 + * @handler: callback 499 + * @region: region in the IEEE 1212 node space address range 500 * 501 * region->start, ->end, and handler->length have to be quadlet-aligned. 502 * ··· 519 int ret = -EBUSY; 520 521 if (region->start & 0xffff000000000003ULL || 522 region->start >= region->end || 523 + region->end > 0x0001000000000000ULL || 524 handler->length & 3 || 525 handler->length == 0) 526 return -EINVAL; ··· 551 EXPORT_SYMBOL(fw_core_add_address_handler); 552 553 /** 554 + * fw_core_remove_address_handler() - unregister an address handler 555 */ 556 void fw_core_remove_address_handler(struct fw_address_handler *handler) 557 { ··· 578 579 request = container_of(packet, struct fw_request, response); 580 kfree(request); 581 + } 582 + 583 + int fw_get_response_length(struct fw_request *r) 584 + { 585 + int tcode, ext_tcode, data_length; 586 + 587 + tcode = HEADER_GET_TCODE(r->request_header[0]); 588 + 589 + switch (tcode) { 590 + case TCODE_WRITE_QUADLET_REQUEST: 591 + case TCODE_WRITE_BLOCK_REQUEST: 592 + return 0; 593 + 594 + case TCODE_READ_QUADLET_REQUEST: 595 + return 4; 596 + 597 + case TCODE_READ_BLOCK_REQUEST: 598 + data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); 599 + return data_length; 600 + 601 + case TCODE_LOCK_REQUEST: 602 + ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); 603 + data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); 604 + switch (ext_tcode) { 605 + case EXTCODE_FETCH_ADD: 606 + case EXTCODE_LITTLE_ADD: 607 + return data_length; 608 + default: 609 + return data_length / 2; 610 + } 611 + 612 + default: 613 + WARN(1, "wrong tcode %d", tcode); 614 + return 0; 615 + } 616 } 617 618 void fw_fill_response(struct fw_packet *response, u32 *request_header, ··· 631 break; 632 633 default: 634 + WARN(1, "wrong tcode %d", tcode); 635 } 636 637 response->payload_mapped = false; 638 } 639 EXPORT_SYMBOL(fw_fill_response); 640 641 + static u32 compute_split_timeout_timestamp(struct fw_card *card, 642 + u32 request_timestamp) 643 + { 644 + unsigned int cycles; 645 + u32 timestamp; 646 + 647 + cycles = card->split_timeout_cycles; 648 + cycles += request_timestamp & 0x1fff; 649 + 650 + timestamp = request_timestamp & ~0x1fff; 651 + timestamp += (cycles / 8000) << 13; 652 + timestamp |= cycles % 8000; 653 + 654 + return timestamp; 655 + } 656 + 657 + static struct fw_request *allocate_request(struct fw_card *card, 658 + struct fw_packet *p) 659 { 660 struct fw_request *request; 661 u32 *data, length; 662 + int request_tcode; 663 664 request_tcode = HEADER_GET_TCODE(p->header[0]); 665 switch (request_tcode) { ··· 677 if (request == NULL) 678 return NULL; 679 680 request->response.speed = p->speed; 681 + request->response.timestamp = 682 + compute_split_timeout_timestamp(card, p->timestamp); 683 request->response.generation = p->generation; 684 request->response.ack = 0; 685 request->response.callback = free_response_callback; ··· 713 714 if (rcode == RCODE_COMPLETE) 715 fw_fill_response(&request->response, request->request_header, 716 + rcode, request->data, 717 + fw_get_response_length(request)); 718 else 719 fw_fill_response(&request->response, request->request_header, 720 rcode, NULL, 0); ··· 731 unsigned long flags; 732 int tcode, destination, source; 733 734 destination = HEADER_GET_DESTINATION(p->header[0]); 735 source = HEADER_GET_SOURCE(p->header[1]); 736 + tcode = HEADER_GET_TCODE(p->header[0]); 737 + if (tcode == TCODE_LOCK_REQUEST) 738 + tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); 739 740 spin_lock_irqsave(&address_handler_lock, flags); 741 handler = lookup_enclosing_address_handler(&address_handler_list, ··· 753 else 754 handler->address_callback(card, request, 755 tcode, destination, source, 756 + p->generation, offset, 757 request->data, request->length, 758 handler->callback_data); 759 } ··· 791 if (is_enclosing_handler(handler, offset, request->length)) 792 handler->address_callback(card, NULL, tcode, 793 destination, source, 794 + p->generation, offset, 795 + request->data, 796 request->length, 797 handler->callback_data); 798 } ··· 809 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) 810 return; 811 812 + if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { 813 + fw_cdev_handle_phy_packet(card, p); 814 + return; 815 + } 816 + 817 + request = allocate_request(card, p); 818 if (request == NULL) { 819 /* FIXME: send statically allocated busy packet. */ 820 return; ··· 832 unsigned long flags; 833 u32 *data; 834 size_t data_length; 835 + int tcode, tlabel, source, rcode; 836 837 + tcode = HEADER_GET_TCODE(p->header[0]); 838 + tlabel = HEADER_GET_TLABEL(p->header[0]); 839 + source = HEADER_GET_SOURCE(p->header[1]); 840 + rcode = HEADER_GET_RCODE(p->header[1]); 841 842 spin_lock_irqsave(&card->lock, flags); 843 list_for_each_entry(t, &card->transaction_list, link) { ··· 903 904 static void handle_topology_map(struct fw_card *card, struct fw_request *request, 905 int tcode, int destination, int source, int generation, 906 + unsigned long long offset, void *payload, size_t length, 907 + void *callback_data) 908 { 909 int start; 910 ··· 933 { .start = CSR_REGISTER_BASE, 934 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; 935 936 + static void update_split_timeout(struct fw_card *card) 937 + { 938 + unsigned int cycles; 939 + 940 + cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); 941 + 942 + cycles = max(cycles, 800u); /* minimum as per the spec */ 943 + cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */ 944 + 945 + card->split_timeout_cycles = cycles; 946 + card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); 947 + } 948 + 949 static void handle_registers(struct fw_card *card, struct fw_request *request, 950 int tcode, int destination, int source, int generation, 951 + unsigned long long offset, void *payload, size_t length, 952 + void *callback_data) 953 { 954 int reg = offset & ~CSR_REGISTER_BASE; 955 __be32 *data = payload; 956 int rcode = RCODE_COMPLETE; 957 + unsigned long flags; 958 959 switch (reg) { 960 + case CSR_PRIORITY_BUDGET: 961 + if (!card->priority_budget_implemented) { 962 + rcode = RCODE_ADDRESS_ERROR; 963 + break; 964 + } 965 + /* else fall through */ 966 + 967 + case CSR_NODE_IDS: 968 + /* 969 + * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 970 + * and 9.6, but interoperable with IEEE 1394.1-2004 bridges 971 + */ 972 + /* fall through */ 973 + 974 + case CSR_STATE_CLEAR: 975 + case CSR_STATE_SET: 976 case CSR_CYCLE_TIME: 977 + case CSR_BUS_TIME: 978 + case CSR_BUSY_TIMEOUT: 979 + if (tcode == TCODE_READ_QUADLET_REQUEST) 980 + *data = cpu_to_be32(card->driver->read_csr(card, reg)); 981 + else if (tcode == TCODE_WRITE_QUADLET_REQUEST) 982 + card->driver->write_csr(card, reg, be32_to_cpu(*data)); 983 + else 984 + rcode = RCODE_TYPE_ERROR; 985 + break; 986 + 987 + case CSR_RESET_START: 988 + if (tcode == TCODE_WRITE_QUADLET_REQUEST) 989 + card->driver->write_csr(card, CSR_STATE_CLEAR, 990 + CSR_STATE_BIT_ABDICATE); 991 + else 992 + rcode = RCODE_TYPE_ERROR; 993 + break; 994 + 995 + case CSR_SPLIT_TIMEOUT_HI: 996 + if (tcode == TCODE_READ_QUADLET_REQUEST) { 997 + *data = cpu_to_be32(card->split_timeout_hi); 998 + } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { 999 + spin_lock_irqsave(&card->lock, flags); 1000 + card->split_timeout_hi = be32_to_cpu(*data) & 7; 1001 + update_split_timeout(card); 1002 + spin_unlock_irqrestore(&card->lock, flags); 1003 + } else { 1004 + rcode = RCODE_TYPE_ERROR; 1005 + } 1006 + break; 1007 + 1008 + case CSR_SPLIT_TIMEOUT_LO: 1009 + if (tcode == TCODE_READ_QUADLET_REQUEST) { 1010 + *data = cpu_to_be32(card->split_timeout_lo); 1011 + } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { 1012 + spin_lock_irqsave(&card->lock, flags); 1013 + card->split_timeout_lo = 1014 + be32_to_cpu(*data) & 0xfff80000; 1015 + update_split_timeout(card); 1016 + spin_unlock_irqrestore(&card->lock, flags); 1017 + } else { 1018 + rcode = RCODE_TYPE_ERROR; 1019 + } 1020 + break; 1021 + 1022 + case CSR_MAINT_UTILITY: 1023 + if (tcode == TCODE_READ_QUADLET_REQUEST) 1024 + *data = card->maint_utility_register; 1025 + else if (tcode == TCODE_WRITE_QUADLET_REQUEST) 1026 + card->maint_utility_register = *data; 1027 else 1028 rcode = RCODE_TYPE_ERROR; 1029 break; ··· 974 */ 975 BUG(); 976 break; 977 978 default: 979 rcode = RCODE_ADDRESS_ERROR;
+21 -3
drivers/firewire/core.h
··· 38 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 39 #define BROADCAST_CHANNEL_VALID (1 << 30) 40 41 struct fw_card_driver { 42 /* 43 * Enable the given card with the given initial config rom. ··· 51 int (*enable)(struct fw_card *card, 52 const __be32 *config_rom, size_t length); 53 54 int (*update_phy_reg)(struct fw_card *card, int address, 55 int clear_bits, int set_bits); 56 ··· 79 int (*enable_phys_dma)(struct fw_card *card, 80 int node_id, int generation); 81 82 - u32 (*get_cycle_time)(struct fw_card *card); 83 84 struct fw_iso_context * 85 (*allocate_iso_context)(struct fw_card *card, ··· 89 90 int (*start_iso)(struct fw_iso_context *ctx, 91 s32 cycle, u32 sync, u32 tags); 92 93 int (*queue_iso)(struct fw_iso_context *ctx, 94 struct fw_iso_packet *packet, ··· 105 int fw_card_add(struct fw_card *card, 106 u32 max_receive, u32 link_speed, u64 guid); 107 void fw_core_remove_card(struct fw_card *card); 108 - int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); 109 int fw_compute_block_crc(__be32 *block); 110 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); 111 112 static inline struct fw_card *fw_card_get(struct fw_card *card) ··· 130 131 void fw_device_cdev_update(struct fw_device *device); 132 void fw_device_cdev_remove(struct fw_device *device); 133 134 135 /* -device */ ··· 200 } 201 202 void fw_core_handle_bus_reset(struct fw_card *card, int node_id, 203 - int generation, int self_id_count, u32 *self_ids); 204 void fw_destroy_nodes(struct fw_card *card); 205 206 /* ··· 217 218 #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) 219 #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) 220 #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) 221 #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) 222 #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) ··· 227 228 void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); 229 void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 230 void fw_fill_response(struct fw_packet *response, u32 *request_header, 231 int rcode, void *payload, size_t length); 232 void fw_send_phy_config(struct fw_card *card, 233 int node_id, int generation, int gap_count); 234 235 #endif /* _FIREWIRE_CORE_H */
··· 38 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 39 #define BROADCAST_CHANNEL_VALID (1 << 30) 40 41 + #define CSR_STATE_BIT_CMSTR (1 << 8) 42 + #define CSR_STATE_BIT_ABDICATE (1 << 10) 43 + 44 struct fw_card_driver { 45 /* 46 * Enable the given card with the given initial config rom. ··· 48 int (*enable)(struct fw_card *card, 49 const __be32 *config_rom, size_t length); 50 51 + int (*read_phy_reg)(struct fw_card *card, int address); 52 int (*update_phy_reg)(struct fw_card *card, int address, 53 int clear_bits, int set_bits); 54 ··· 75 int (*enable_phys_dma)(struct fw_card *card, 76 int node_id, int generation); 77 78 + u32 (*read_csr)(struct fw_card *card, int csr_offset); 79 + void (*write_csr)(struct fw_card *card, int csr_offset, u32 value); 80 81 struct fw_iso_context * 82 (*allocate_iso_context)(struct fw_card *card, ··· 84 85 int (*start_iso)(struct fw_iso_context *ctx, 86 s32 cycle, u32 sync, u32 tags); 87 + 88 + int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); 89 90 int (*queue_iso)(struct fw_iso_context *ctx, 91 struct fw_iso_packet *packet, ··· 98 int fw_card_add(struct fw_card *card, 99 u32 max_receive, u32 link_speed, u64 guid); 100 void fw_core_remove_card(struct fw_card *card); 101 int fw_compute_block_crc(__be32 *block); 102 + void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); 103 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); 104 105 static inline struct fw_card *fw_card_get(struct fw_card *card) ··· 123 124 void fw_device_cdev_update(struct fw_device *device); 125 void fw_device_cdev_remove(struct fw_device *device); 126 + void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); 127 128 129 /* -device */ ··· 192 } 193 194 void fw_core_handle_bus_reset(struct fw_card *card, int node_id, 195 + int generation, int self_id_count, u32 *self_ids, bool bm_abdicate); 196 void fw_destroy_nodes(struct fw_card *card); 197 198 /* ··· 209 210 #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) 211 #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) 212 + #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe) 213 #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) 214 #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) 215 #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) ··· 218 219 void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); 220 void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 221 + int fw_get_response_length(struct fw_request *request); 222 void fw_fill_response(struct fw_packet *response, u32 *request_header, 223 int rcode, void *payload, size_t length); 224 + 225 + #define FW_PHY_CONFIG_NO_NODE_ID -1 226 + #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1 227 void fw_send_phy_config(struct fw_card *card, 228 int node_id, int generation, int gap_count); 229 + 230 + static inline bool is_ping_packet(u32 *data) 231 + { 232 + return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1]; 233 + } 234 235 #endif /* _FIREWIRE_CORE_H */
+2 -2
drivers/firewire/net.c
··· 806 807 static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, 808 int tcode, int destination, int source, int generation, 809 - int speed, unsigned long long offset, void *payload, 810 - size_t length, void *callback_data) 811 { 812 struct fwnet_device *dev = callback_data; 813 int rcode;
··· 806 807 static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, 808 int tcode, int destination, int source, int generation, 809 + unsigned long long offset, void *payload, size_t length, 810 + void *callback_data) 811 { 812 struct fwnet_device *dev = callback_data; 813 int rcode;
+25
drivers/firewire/nosy-user.h
···
··· 1 + #ifndef __nosy_user_h 2 + #define __nosy_user_h 3 + 4 + #include <linux/ioctl.h> 5 + #include <linux/types.h> 6 + 7 + #define NOSY_IOC_GET_STATS _IOR('&', 0, struct nosy_stats) 8 + #define NOSY_IOC_START _IO('&', 1) 9 + #define NOSY_IOC_STOP _IO('&', 2) 10 + #define NOSY_IOC_FILTER _IOW('&', 2, __u32) 11 + 12 + struct nosy_stats { 13 + __u32 total_packet_count; 14 + __u32 lost_packet_count; 15 + }; 16 + 17 + /* 18 + * Format of packets returned from the kernel driver: 19 + * 20 + * quadlet with timestamp (microseconds, CPU endian) 21 + * quadlet-padded packet data... (little endian) 22 + * quadlet with ack (little endian) 23 + */ 24 + 25 + #endif /* __nosy_user_h */
+721
drivers/firewire/nosy.c
···
··· 1 + /* 2 + * nosy - Snoop mode driver for TI PCILynx 1394 controllers 3 + * Copyright (C) 2002-2007 Kristian Høgsberg 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; either version 2 of the License, or 8 + * (at your option) any later version. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software Foundation, 17 + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 + */ 19 + 20 + #include <linux/device.h> 21 + #include <linux/errno.h> 22 + #include <linux/fs.h> 23 + #include <linux/init.h> 24 + #include <linux/interrupt.h> 25 + #include <linux/io.h> 26 + #include <linux/kernel.h> 27 + #include <linux/kref.h> 28 + #include <linux/miscdevice.h> 29 + #include <linux/module.h> 30 + #include <linux/mutex.h> 31 + #include <linux/pci.h> 32 + #include <linux/poll.h> 33 + #include <linux/sched.h> /* required for linux/wait.h */ 34 + #include <linux/slab.h> 35 + #include <linux/spinlock.h> 36 + #include <linux/timex.h> 37 + #include <linux/uaccess.h> 38 + #include <linux/wait.h> 39 + 40 + #include <asm/atomic.h> 41 + #include <asm/byteorder.h> 42 + 43 + #include "nosy.h" 44 + #include "nosy-user.h" 45 + 46 + #define TCODE_PHY_PACKET 0x10 47 + #define PCI_DEVICE_ID_TI_PCILYNX 0x8000 48 + 49 + static char driver_name[] = KBUILD_MODNAME; 50 + 51 + /* this is the physical layout of a PCL, its size is 128 bytes */ 52 + struct pcl { 53 + __le32 next; 54 + __le32 async_error_next; 55 + u32 user_data; 56 + __le32 pcl_status; 57 + __le32 remaining_transfer_count; 58 + __le32 next_data_buffer; 59 + struct { 60 + __le32 control; 61 + __le32 pointer; 62 + } buffer[13]; 63 + }; 64 + 65 + struct packet { 66 + unsigned int length; 67 + char data[0]; 68 + }; 69 + 70 + struct packet_buffer { 71 + char *data; 72 + size_t capacity; 73 + long total_packet_count, lost_packet_count; 74 + atomic_t size; 75 + struct packet *head, *tail; 76 + wait_queue_head_t wait; 77 + }; 78 + 79 + struct pcilynx { 80 + struct pci_dev *pci_device; 81 + __iomem char *registers; 82 + 83 + struct pcl *rcv_start_pcl, *rcv_pcl; 84 + __le32 *rcv_buffer; 85 + 86 + dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus; 87 + 88 + spinlock_t client_list_lock; 89 + struct list_head client_list; 90 + 91 + struct miscdevice misc; 92 + struct list_head link; 93 + struct kref kref; 94 + }; 95 + 96 + static inline struct pcilynx * 97 + lynx_get(struct pcilynx *lynx) 98 + { 99 + kref_get(&lynx->kref); 100 + 101 + return lynx; 102 + } 103 + 104 + static void 105 + lynx_release(struct kref *kref) 106 + { 107 + kfree(container_of(kref, struct pcilynx, kref)); 108 + } 109 + 110 + static inline void 111 + lynx_put(struct pcilynx *lynx) 112 + { 113 + kref_put(&lynx->kref, lynx_release); 114 + } 115 + 116 + struct client { 117 + struct pcilynx *lynx; 118 + u32 tcode_mask; 119 + struct packet_buffer buffer; 120 + struct list_head link; 121 + }; 122 + 123 + static DEFINE_MUTEX(card_mutex); 124 + static LIST_HEAD(card_list); 125 + 126 + static int 127 + packet_buffer_init(struct packet_buffer *buffer, size_t capacity) 128 + { 129 + buffer->data = kmalloc(capacity, GFP_KERNEL); 130 + if (buffer->data == NULL) 131 + return -ENOMEM; 132 + buffer->head = (struct packet *) buffer->data; 133 + buffer->tail = (struct packet *) buffer->data; 134 + buffer->capacity = capacity; 135 + buffer->lost_packet_count = 0; 136 + atomic_set(&buffer->size, 0); 137 + init_waitqueue_head(&buffer->wait); 138 + 139 + return 0; 140 + } 141 + 142 + static void 143 + packet_buffer_destroy(struct packet_buffer *buffer) 144 + { 145 + kfree(buffer->data); 146 + } 147 + 148 + static int 149 + packet_buffer_get(struct client *client, char __user *data, size_t user_length) 150 + { 151 + struct packet_buffer *buffer = &client->buffer; 152 + size_t length; 153 + char *end; 154 + 155 + if (wait_event_interruptible(buffer->wait, 156 + atomic_read(&buffer->size) > 0) || 157 + list_empty(&client->lynx->link)) 158 + return -ERESTARTSYS; 159 + 160 + if (atomic_read(&buffer->size) == 0) 161 + return -ENODEV; 162 + 163 + /* FIXME: Check length <= user_length. */ 164 + 165 + end = buffer->data + buffer->capacity; 166 + length = buffer->head->length; 167 + 168 + if (&buffer->head->data[length] < end) { 169 + if (copy_to_user(data, buffer->head->data, length)) 170 + return -EFAULT; 171 + buffer->head = (struct packet *) &buffer->head->data[length]; 172 + } else { 173 + size_t split = end - buffer->head->data; 174 + 175 + if (copy_to_user(data, buffer->head->data, split)) 176 + return -EFAULT; 177 + if (copy_to_user(data + split, buffer->data, length - split)) 178 + return -EFAULT; 179 + buffer->head = (struct packet *) &buffer->data[length - split]; 180 + } 181 + 182 + /* 183 + * Decrease buffer->size as the last thing, since this is what 184 + * keeps the interrupt from overwriting the packet we are 185 + * retrieving from the buffer. 186 + */ 187 + atomic_sub(sizeof(struct packet) + length, &buffer->size); 188 + 189 + return length; 190 + } 191 + 192 + static void 193 + packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length) 194 + { 195 + char *end; 196 + 197 + buffer->total_packet_count++; 198 + 199 + if (buffer->capacity < 200 + atomic_read(&buffer->size) + sizeof(struct packet) + length) { 201 + buffer->lost_packet_count++; 202 + return; 203 + } 204 + 205 + end = buffer->data + buffer->capacity; 206 + buffer->tail->length = length; 207 + 208 + if (&buffer->tail->data[length] < end) { 209 + memcpy(buffer->tail->data, data, length); 210 + buffer->tail = (struct packet *) &buffer->tail->data[length]; 211 + } else { 212 + size_t split = end - buffer->tail->data; 213 + 214 + memcpy(buffer->tail->data, data, split); 215 + memcpy(buffer->data, data + split, length - split); 216 + buffer->tail = (struct packet *) &buffer->data[length - split]; 217 + } 218 + 219 + /* Finally, adjust buffer size and wake up userspace reader. */ 220 + 221 + atomic_add(sizeof(struct packet) + length, &buffer->size); 222 + wake_up_interruptible(&buffer->wait); 223 + } 224 + 225 + static inline void 226 + reg_write(struct pcilynx *lynx, int offset, u32 data) 227 + { 228 + writel(data, lynx->registers + offset); 229 + } 230 + 231 + static inline u32 232 + reg_read(struct pcilynx *lynx, int offset) 233 + { 234 + return readl(lynx->registers + offset); 235 + } 236 + 237 + static inline void 238 + reg_set_bits(struct pcilynx *lynx, int offset, u32 mask) 239 + { 240 + reg_write(lynx, offset, (reg_read(lynx, offset) | mask)); 241 + } 242 + 243 + /* 244 + * Maybe the pcl programs could be set up to just append data instead 245 + * of using a whole packet. 246 + */ 247 + static inline void 248 + run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus, 249 + int dmachan) 250 + { 251 + reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus); 252 + reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20, 253 + DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK); 254 + } 255 + 256 + static int 257 + set_phy_reg(struct pcilynx *lynx, int addr, int val) 258 + { 259 + if (addr > 15) { 260 + dev_err(&lynx->pci_device->dev, 261 + "PHY register address %d out of range\n", addr); 262 + return -1; 263 + } 264 + if (val > 0xff) { 265 + dev_err(&lynx->pci_device->dev, 266 + "PHY register value %d out of range\n", val); 267 + return -1; 268 + } 269 + reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | 270 + LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val)); 271 + 272 + return 0; 273 + } 274 + 275 + static int 276 + nosy_open(struct inode *inode, struct file *file) 277 + { 278 + int minor = iminor(inode); 279 + struct client *client; 280 + struct pcilynx *tmp, *lynx = NULL; 281 + 282 + mutex_lock(&card_mutex); 283 + list_for_each_entry(tmp, &card_list, link) 284 + if (tmp->misc.minor == minor) { 285 + lynx = lynx_get(tmp); 286 + break; 287 + } 288 + mutex_unlock(&card_mutex); 289 + if (lynx == NULL) 290 + return -ENODEV; 291 + 292 + client = kmalloc(sizeof *client, GFP_KERNEL); 293 + if (client == NULL) 294 + goto fail; 295 + 296 + client->tcode_mask = ~0; 297 + client->lynx = lynx; 298 + INIT_LIST_HEAD(&client->link); 299 + 300 + if (packet_buffer_init(&client->buffer, 128 * 1024) < 0) 301 + goto fail; 302 + 303 + file->private_data = client; 304 + 305 + return 0; 306 + fail: 307 + kfree(client); 308 + lynx_put(lynx); 309 + 310 + return -ENOMEM; 311 + } 312 + 313 + static int 314 + nosy_release(struct inode *inode, struct file *file) 315 + { 316 + struct client *client = file->private_data; 317 + struct pcilynx *lynx = client->lynx; 318 + 319 + spin_lock_irq(&lynx->client_list_lock); 320 + list_del_init(&client->link); 321 + spin_unlock_irq(&lynx->client_list_lock); 322 + 323 + packet_buffer_destroy(&client->buffer); 324 + kfree(client); 325 + lynx_put(lynx); 326 + 327 + return 0; 328 + } 329 + 330 + static unsigned int 331 + nosy_poll(struct file *file, poll_table *pt) 332 + { 333 + struct client *client = file->private_data; 334 + unsigned int ret = 0; 335 + 336 + poll_wait(file, &client->buffer.wait, pt); 337 + 338 + if (atomic_read(&client->buffer.size) > 0) 339 + ret = POLLIN | POLLRDNORM; 340 + 341 + if (list_empty(&client->lynx->link)) 342 + ret |= POLLHUP; 343 + 344 + return ret; 345 + } 346 + 347 + static ssize_t 348 + nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) 349 + { 350 + struct client *client = file->private_data; 351 + 352 + return packet_buffer_get(client, buffer, count); 353 + } 354 + 355 + static long 356 + nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 357 + { 358 + struct client *client = file->private_data; 359 + spinlock_t *client_list_lock = &client->lynx->client_list_lock; 360 + struct nosy_stats stats; 361 + 362 + switch (cmd) { 363 + case NOSY_IOC_GET_STATS: 364 + spin_lock_irq(client_list_lock); 365 + stats.total_packet_count = client->buffer.total_packet_count; 366 + stats.lost_packet_count = client->buffer.lost_packet_count; 367 + spin_unlock_irq(client_list_lock); 368 + 369 + if (copy_to_user((void __user *) arg, &stats, sizeof stats)) 370 + return -EFAULT; 371 + else 372 + return 0; 373 + 374 + case NOSY_IOC_START: 375 + spin_lock_irq(client_list_lock); 376 + list_add_tail(&client->link, &client->lynx->client_list); 377 + spin_unlock_irq(client_list_lock); 378 + 379 + return 0; 380 + 381 + case NOSY_IOC_STOP: 382 + spin_lock_irq(client_list_lock); 383 + list_del_init(&client->link); 384 + spin_unlock_irq(client_list_lock); 385 + 386 + return 0; 387 + 388 + case NOSY_IOC_FILTER: 389 + spin_lock_irq(client_list_lock); 390 + client->tcode_mask = arg; 391 + spin_unlock_irq(client_list_lock); 392 + 393 + return 0; 394 + 395 + default: 396 + return -EINVAL; 397 + /* Flush buffer, configure filter. */ 398 + } 399 + } 400 + 401 + static const struct file_operations nosy_ops = { 402 + .owner = THIS_MODULE, 403 + .read = nosy_read, 404 + .unlocked_ioctl = nosy_ioctl, 405 + .poll = nosy_poll, 406 + .open = nosy_open, 407 + .release = nosy_release, 408 + }; 409 + 410 + #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ 411 + 412 + static void 413 + packet_irq_handler(struct pcilynx *lynx) 414 + { 415 + struct client *client; 416 + u32 tcode_mask, tcode; 417 + size_t length; 418 + struct timeval tv; 419 + 420 + /* FIXME: Also report rcv_speed. */ 421 + 422 + length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff; 423 + tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf; 424 + 425 + do_gettimeofday(&tv); 426 + lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec; 427 + 428 + if (length == PHY_PACKET_SIZE) 429 + tcode_mask = 1 << TCODE_PHY_PACKET; 430 + else 431 + tcode_mask = 1 << tcode; 432 + 433 + spin_lock(&lynx->client_list_lock); 434 + 435 + list_for_each_entry(client, &lynx->client_list, link) 436 + if (client->tcode_mask & tcode_mask) 437 + packet_buffer_put(&client->buffer, 438 + lynx->rcv_buffer, length + 4); 439 + 440 + spin_unlock(&lynx->client_list_lock); 441 + } 442 + 443 + static void 444 + bus_reset_irq_handler(struct pcilynx *lynx) 445 + { 446 + struct client *client; 447 + struct timeval tv; 448 + 449 + do_gettimeofday(&tv); 450 + 451 + spin_lock(&lynx->client_list_lock); 452 + 453 + list_for_each_entry(client, &lynx->client_list, link) 454 + packet_buffer_put(&client->buffer, &tv.tv_usec, 4); 455 + 456 + spin_unlock(&lynx->client_list_lock); 457 + } 458 + 459 + static irqreturn_t 460 + irq_handler(int irq, void *device) 461 + { 462 + struct pcilynx *lynx = device; 463 + u32 pci_int_status; 464 + 465 + pci_int_status = reg_read(lynx, PCI_INT_STATUS); 466 + 467 + if (pci_int_status == ~0) 468 + /* Card was ejected. */ 469 + return IRQ_NONE; 470 + 471 + if ((pci_int_status & PCI_INT_INT_PEND) == 0) 472 + /* Not our interrupt, bail out quickly. */ 473 + return IRQ_NONE; 474 + 475 + if ((pci_int_status & PCI_INT_P1394_INT) != 0) { 476 + u32 link_int_status; 477 + 478 + link_int_status = reg_read(lynx, LINK_INT_STATUS); 479 + reg_write(lynx, LINK_INT_STATUS, link_int_status); 480 + 481 + if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0) 482 + bus_reset_irq_handler(lynx); 483 + } 484 + 485 + /* Clear the PCI_INT_STATUS register only after clearing the 486 + * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will 487 + * be set again immediately. */ 488 + 489 + reg_write(lynx, PCI_INT_STATUS, pci_int_status); 490 + 491 + if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) { 492 + packet_irq_handler(lynx); 493 + run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); 494 + } 495 + 496 + return IRQ_HANDLED; 497 + } 498 + 499 + static void 500 + remove_card(struct pci_dev *dev) 501 + { 502 + struct pcilynx *lynx = pci_get_drvdata(dev); 503 + struct client *client; 504 + 505 + mutex_lock(&card_mutex); 506 + list_del_init(&lynx->link); 507 + misc_deregister(&lynx->misc); 508 + mutex_unlock(&card_mutex); 509 + 510 + reg_write(lynx, PCI_INT_ENABLE, 0); 511 + free_irq(lynx->pci_device->irq, lynx); 512 + 513 + spin_lock_irq(&lynx->client_list_lock); 514 + list_for_each_entry(client, &lynx->client_list, link) 515 + wake_up_interruptible(&client->buffer.wait); 516 + spin_unlock_irq(&lynx->client_list_lock); 517 + 518 + pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 519 + lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); 520 + pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 521 + lynx->rcv_pcl, lynx->rcv_pcl_bus); 522 + pci_free_consistent(lynx->pci_device, PAGE_SIZE, 523 + lynx->rcv_buffer, lynx->rcv_buffer_bus); 524 + 525 + iounmap(lynx->registers); 526 + pci_disable_device(dev); 527 + lynx_put(lynx); 528 + } 529 + 530 + #define RCV_BUFFER_SIZE (16 * 1024) 531 + 532 + static int __devinit 533 + add_card(struct pci_dev *dev, const struct pci_device_id *unused) 534 + { 535 + struct pcilynx *lynx; 536 + u32 p, end; 537 + int ret, i; 538 + 539 + if (pci_set_dma_mask(dev, 0xffffffff)) { 540 + dev_err(&dev->dev, 541 + "DMA address limits not supported for PCILynx hardware\n"); 542 + return -ENXIO; 543 + } 544 + if (pci_enable_device(dev)) { 545 + dev_err(&dev->dev, "Failed to enable PCILynx hardware\n"); 546 + return -ENXIO; 547 + } 548 + pci_set_master(dev); 549 + 550 + lynx = kzalloc(sizeof *lynx, GFP_KERNEL); 551 + if (lynx == NULL) { 552 + dev_err(&dev->dev, "Failed to allocate control structure\n"); 553 + ret = -ENOMEM; 554 + goto fail_disable; 555 + } 556 + lynx->pci_device = dev; 557 + pci_set_drvdata(dev, lynx); 558 + 559 + spin_lock_init(&lynx->client_list_lock); 560 + INIT_LIST_HEAD(&lynx->client_list); 561 + kref_init(&lynx->kref); 562 + 563 + lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), 564 + PCILYNX_MAX_REGISTER); 565 + 566 + lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, 567 + sizeof(struct pcl), &lynx->rcv_start_pcl_bus); 568 + lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device, 569 + sizeof(struct pcl), &lynx->rcv_pcl_bus); 570 + lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device, 571 + RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus); 572 + if (lynx->rcv_start_pcl == NULL || 573 + lynx->rcv_pcl == NULL || 574 + lynx->rcv_buffer == NULL) { 575 + dev_err(&dev->dev, "Failed to allocate receive buffer\n"); 576 + ret = -ENOMEM; 577 + goto fail_deallocate; 578 + } 579 + lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); 580 + lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); 581 + lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID); 582 + 583 + lynx->rcv_pcl->buffer[0].control = 584 + cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044); 585 + lynx->rcv_pcl->buffer[0].pointer = 586 + cpu_to_le32(lynx->rcv_buffer_bus + 4); 587 + p = lynx->rcv_buffer_bus + 2048; 588 + end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE; 589 + for (i = 1; p < end; i++, p += 2048) { 590 + lynx->rcv_pcl->buffer[i].control = 591 + cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048); 592 + lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p); 593 + } 594 + lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF); 595 + 596 + reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); 597 + /* Fix buggy cards with autoboot pin not tied low: */ 598 + reg_write(lynx, DMA0_CHAN_CTRL, 0); 599 + reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24); 600 + 601 + #if 0 602 + /* now, looking for PHY register set */ 603 + if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) { 604 + lynx->phyic.reg_1394a = 1; 605 + PRINT(KERN_INFO, lynx->id, 606 + "found 1394a conform PHY (using extended register set)"); 607 + lynx->phyic.vendor = get_phy_vendorid(lynx); 608 + lynx->phyic.product = get_phy_productid(lynx); 609 + } else { 610 + lynx->phyic.reg_1394a = 0; 611 + PRINT(KERN_INFO, lynx->id, "found old 1394 PHY"); 612 + } 613 + #endif 614 + 615 + /* Setup the general receive FIFO max size. */ 616 + reg_write(lynx, FIFO_SIZES, 255); 617 + 618 + reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL); 619 + 620 + reg_write(lynx, LINK_INT_ENABLE, 621 + LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD | 622 + LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK | 623 + LINK_INT_AT_STUCK | LINK_INT_SNTRJ | 624 + LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW | 625 + LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW); 626 + 627 + /* Disable the L flag in self ID packets. */ 628 + set_phy_reg(lynx, 4, 0); 629 + 630 + /* Put this baby into snoop mode */ 631 + reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE); 632 + 633 + run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); 634 + 635 + if (request_irq(dev->irq, irq_handler, IRQF_SHARED, 636 + driver_name, lynx)) { 637 + dev_err(&dev->dev, 638 + "Failed to allocate shared interrupt %d\n", dev->irq); 639 + ret = -EIO; 640 + goto fail_deallocate; 641 + } 642 + 643 + lynx->misc.parent = &dev->dev; 644 + lynx->misc.minor = MISC_DYNAMIC_MINOR; 645 + lynx->misc.name = "nosy"; 646 + lynx->misc.fops = &nosy_ops; 647 + 648 + mutex_lock(&card_mutex); 649 + ret = misc_register(&lynx->misc); 650 + if (ret) { 651 + dev_err(&dev->dev, "Failed to register misc char device\n"); 652 + mutex_unlock(&card_mutex); 653 + goto fail_free_irq; 654 + } 655 + list_add_tail(&lynx->link, &card_list); 656 + mutex_unlock(&card_mutex); 657 + 658 + dev_info(&dev->dev, 659 + "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq); 660 + 661 + return 0; 662 + 663 + fail_free_irq: 664 + reg_write(lynx, PCI_INT_ENABLE, 0); 665 + free_irq(lynx->pci_device->irq, lynx); 666 + 667 + fail_deallocate: 668 + if (lynx->rcv_start_pcl) 669 + pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 670 + lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); 671 + if (lynx->rcv_pcl) 672 + pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 673 + lynx->rcv_pcl, lynx->rcv_pcl_bus); 674 + if (lynx->rcv_buffer) 675 + pci_free_consistent(lynx->pci_device, PAGE_SIZE, 676 + lynx->rcv_buffer, lynx->rcv_buffer_bus); 677 + iounmap(lynx->registers); 678 + kfree(lynx); 679 + 680 + fail_disable: 681 + pci_disable_device(dev); 682 + 683 + return ret; 684 + } 685 + 686 + static struct pci_device_id pci_table[] __devinitdata = { 687 + { 688 + .vendor = PCI_VENDOR_ID_TI, 689 + .device = PCI_DEVICE_ID_TI_PCILYNX, 690 + .subvendor = PCI_ANY_ID, 691 + .subdevice = PCI_ANY_ID, 692 + }, 693 + { } /* Terminating entry */ 694 + }; 695 + 696 + static struct pci_driver lynx_pci_driver = { 697 + .name = driver_name, 698 + .id_table = pci_table, 699 + .probe = add_card, 700 + .remove = remove_card, 701 + }; 702 + 703 + MODULE_AUTHOR("Kristian Hoegsberg"); 704 + MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers"); 705 + MODULE_LICENSE("GPL"); 706 + MODULE_DEVICE_TABLE(pci, pci_table); 707 + 708 + static int __init nosy_init(void) 709 + { 710 + return pci_register_driver(&lynx_pci_driver); 711 + } 712 + 713 + static void __exit nosy_cleanup(void) 714 + { 715 + pci_unregister_driver(&lynx_pci_driver); 716 + 717 + pr_info("Unloaded %s\n", driver_name); 718 + } 719 + 720 + module_init(nosy_init); 721 + module_exit(nosy_cleanup);
+237
drivers/firewire/nosy.h
···
··· 1 + /* 2 + * Chip register definitions for PCILynx chipset. Based on pcilynx.h 3 + * from the Linux 1394 drivers, but modified a bit so the names here 4 + * match the specification exactly (even though they have weird names, 5 + * like xxx_OVER_FLOW, or arbitrary abbreviations like SNTRJ for "sent 6 + * reject" etc.) 7 + */ 8 + 9 + #define PCILYNX_MAX_REGISTER 0xfff 10 + #define PCILYNX_MAX_MEMORY 0xffff 11 + 12 + #define PCI_LATENCY_CACHELINE 0x0c 13 + 14 + #define MISC_CONTROL 0x40 15 + #define MISC_CONTROL_SWRESET (1<<0) 16 + 17 + #define SERIAL_EEPROM_CONTROL 0x44 18 + 19 + #define PCI_INT_STATUS 0x48 20 + #define PCI_INT_ENABLE 0x4c 21 + /* status and enable have identical bit numbers */ 22 + #define PCI_INT_INT_PEND (1<<31) 23 + #define PCI_INT_FRC_INT (1<<30) 24 + #define PCI_INT_SLV_ADR_PERR (1<<28) 25 + #define PCI_INT_SLV_DAT_PERR (1<<27) 26 + #define PCI_INT_MST_DAT_PERR (1<<26) 27 + #define PCI_INT_MST_DEV_TO (1<<25) 28 + #define PCI_INT_INT_SLV_TO (1<<23) 29 + #define PCI_INT_AUX_TO (1<<18) 30 + #define PCI_INT_AUX_INT (1<<17) 31 + #define PCI_INT_P1394_INT (1<<16) 32 + #define PCI_INT_DMA4_PCL (1<<9) 33 + #define PCI_INT_DMA4_HLT (1<<8) 34 + #define PCI_INT_DMA3_PCL (1<<7) 35 + #define PCI_INT_DMA3_HLT (1<<6) 36 + #define PCI_INT_DMA2_PCL (1<<5) 37 + #define PCI_INT_DMA2_HLT (1<<4) 38 + #define PCI_INT_DMA1_PCL (1<<3) 39 + #define PCI_INT_DMA1_HLT (1<<2) 40 + #define PCI_INT_DMA0_PCL (1<<1) 41 + #define PCI_INT_DMA0_HLT (1<<0) 42 + /* all DMA interrupts combined: */ 43 + #define PCI_INT_DMA_ALL 0x3ff 44 + 45 + #define PCI_INT_DMA_HLT(chan) (1 << (chan * 2)) 46 + #define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1)) 47 + 48 + #define LBUS_ADDR 0xb4 49 + #define LBUS_ADDR_SEL_RAM (0x0<<16) 50 + #define LBUS_ADDR_SEL_ROM (0x1<<16) 51 + #define LBUS_ADDR_SEL_AUX (0x2<<16) 52 + #define LBUS_ADDR_SEL_ZV (0x3<<16) 53 + 54 + #define GPIO_CTRL_A 0xb8 55 + #define GPIO_CTRL_B 0xbc 56 + #define GPIO_DATA_BASE 0xc0 57 + 58 + #define DMA_BREG(base, chan) (base + chan * 0x20) 59 + #define DMA_SREG(base, chan) (base + chan * 0x10) 60 + 61 + #define PCL_NEXT_INVALID (1<<0) 62 + 63 + /* transfer commands */ 64 + #define PCL_CMD_RCV (0x1<<24) 65 + #define PCL_CMD_RCV_AND_UPDATE (0xa<<24) 66 + #define PCL_CMD_XMT (0x2<<24) 67 + #define PCL_CMD_UNFXMT (0xc<<24) 68 + #define PCL_CMD_PCI_TO_LBUS (0x8<<24) 69 + #define PCL_CMD_LBUS_TO_PCI (0x9<<24) 70 + 71 + /* aux commands */ 72 + #define PCL_CMD_NOP (0x0<<24) 73 + #define PCL_CMD_LOAD (0x3<<24) 74 + #define PCL_CMD_STOREQ (0x4<<24) 75 + #define PCL_CMD_STORED (0xb<<24) 76 + #define PCL_CMD_STORE0 (0x5<<24) 77 + #define PCL_CMD_STORE1 (0x6<<24) 78 + #define PCL_CMD_COMPARE (0xe<<24) 79 + #define PCL_CMD_SWAP_COMPARE (0xf<<24) 80 + #define PCL_CMD_ADD (0xd<<24) 81 + #define PCL_CMD_BRANCH (0x7<<24) 82 + 83 + /* BRANCH condition codes */ 84 + #define PCL_COND_DMARDY_SET (0x1<<20) 85 + #define PCL_COND_DMARDY_CLEAR (0x2<<20) 86 + 87 + #define PCL_GEN_INTR (1<<19) 88 + #define PCL_LAST_BUFF (1<<18) 89 + #define PCL_LAST_CMD (PCL_LAST_BUFF) 90 + #define PCL_WAITSTAT (1<<17) 91 + #define PCL_BIGENDIAN (1<<16) 92 + #define PCL_ISOMODE (1<<12) 93 + 94 + #define DMA0_PREV_PCL 0x100 95 + #define DMA1_PREV_PCL 0x120 96 + #define DMA2_PREV_PCL 0x140 97 + #define DMA3_PREV_PCL 0x160 98 + #define DMA4_PREV_PCL 0x180 99 + #define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan)) 100 + 101 + #define DMA0_CURRENT_PCL 0x104 102 + #define DMA1_CURRENT_PCL 0x124 103 + #define DMA2_CURRENT_PCL 0x144 104 + #define DMA3_CURRENT_PCL 0x164 105 + #define DMA4_CURRENT_PCL 0x184 106 + #define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan)) 107 + 108 + #define DMA0_CHAN_STAT 0x10c 109 + #define DMA1_CHAN_STAT 0x12c 110 + #define DMA2_CHAN_STAT 0x14c 111 + #define DMA3_CHAN_STAT 0x16c 112 + #define DMA4_CHAN_STAT 0x18c 113 + #define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan)) 114 + /* CHAN_STATUS registers share bits */ 115 + #define DMA_CHAN_STAT_SELFID (1<<31) 116 + #define DMA_CHAN_STAT_ISOPKT (1<<30) 117 + #define DMA_CHAN_STAT_PCIERR (1<<29) 118 + #define DMA_CHAN_STAT_PKTERR (1<<28) 119 + #define DMA_CHAN_STAT_PKTCMPL (1<<27) 120 + #define DMA_CHAN_STAT_SPECIALACK (1<<14) 121 + 122 + #define DMA0_CHAN_CTRL 0x110 123 + #define DMA1_CHAN_CTRL 0x130 124 + #define DMA2_CHAN_CTRL 0x150 125 + #define DMA3_CHAN_CTRL 0x170 126 + #define DMA4_CHAN_CTRL 0x190 127 + #define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan)) 128 + /* CHAN_CTRL registers share bits */ 129 + #define DMA_CHAN_CTRL_ENABLE (1<<31) 130 + #define DMA_CHAN_CTRL_BUSY (1<<30) 131 + #define DMA_CHAN_CTRL_LINK (1<<29) 132 + 133 + #define DMA0_READY 0x114 134 + #define DMA1_READY 0x134 135 + #define DMA2_READY 0x154 136 + #define DMA3_READY 0x174 137 + #define DMA4_READY 0x194 138 + #define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan)) 139 + 140 + #define DMA_GLOBAL_REGISTER 0x908 141 + 142 + #define FIFO_SIZES 0xa00 143 + 144 + #define FIFO_CONTROL 0xa10 145 + #define FIFO_CONTROL_GRF_FLUSH (1<<4) 146 + #define FIFO_CONTROL_ITF_FLUSH (1<<3) 147 + #define FIFO_CONTROL_ATF_FLUSH (1<<2) 148 + 149 + #define FIFO_XMIT_THRESHOLD 0xa14 150 + 151 + #define DMA0_WORD0_CMP_VALUE 0xb00 152 + #define DMA1_WORD0_CMP_VALUE 0xb10 153 + #define DMA2_WORD0_CMP_VALUE 0xb20 154 + #define DMA3_WORD0_CMP_VALUE 0xb30 155 + #define DMA4_WORD0_CMP_VALUE 0xb40 156 + #define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan)) 157 + 158 + #define DMA0_WORD0_CMP_ENABLE 0xb04 159 + #define DMA1_WORD0_CMP_ENABLE 0xb14 160 + #define DMA2_WORD0_CMP_ENABLE 0xb24 161 + #define DMA3_WORD0_CMP_ENABLE 0xb34 162 + #define DMA4_WORD0_CMP_ENABLE 0xb44 163 + #define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE, chan)) 164 + 165 + #define DMA0_WORD1_CMP_VALUE 0xb08 166 + #define DMA1_WORD1_CMP_VALUE 0xb18 167 + #define DMA2_WORD1_CMP_VALUE 0xb28 168 + #define DMA3_WORD1_CMP_VALUE 0xb38 169 + #define DMA4_WORD1_CMP_VALUE 0xb48 170 + #define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan)) 171 + 172 + #define DMA0_WORD1_CMP_ENABLE 0xb0c 173 + #define DMA1_WORD1_CMP_ENABLE 0xb1c 174 + #define DMA2_WORD1_CMP_ENABLE 0xb2c 175 + #define DMA3_WORD1_CMP_ENABLE 0xb3c 176 + #define DMA4_WORD1_CMP_ENABLE 0xb4c 177 + #define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE, chan)) 178 + /* word 1 compare enable flags */ 179 + #define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15) 180 + #define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14) 181 + #define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13) 182 + #define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12) 183 + #define DMA_WORD1_CMP_MATCH_EXACT (1<<11) 184 + #define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10) 185 + #define DMA_WORD1_CMP_ENABLE_MASTER (1<<8) 186 + 187 + #define LINK_ID 0xf00 188 + #define LINK_ID_BUS(id) (id<<22) 189 + #define LINK_ID_NODE(id) (id<<16) 190 + 191 + #define LINK_CONTROL 0xf04 192 + #define LINK_CONTROL_BUSY (1<<29) 193 + #define LINK_CONTROL_TX_ISO_EN (1<<26) 194 + #define LINK_CONTROL_RX_ISO_EN (1<<25) 195 + #define LINK_CONTROL_TX_ASYNC_EN (1<<24) 196 + #define LINK_CONTROL_RX_ASYNC_EN (1<<23) 197 + #define LINK_CONTROL_RESET_TX (1<<21) 198 + #define LINK_CONTROL_RESET_RX (1<<20) 199 + #define LINK_CONTROL_CYCMASTER (1<<11) 200 + #define LINK_CONTROL_CYCSOURCE (1<<10) 201 + #define LINK_CONTROL_CYCTIMEREN (1<<9) 202 + #define LINK_CONTROL_RCV_CMP_VALID (1<<7) 203 + #define LINK_CONTROL_SNOOP_ENABLE (1<<6) 204 + 205 + #define CYCLE_TIMER 0xf08 206 + 207 + #define LINK_PHY 0xf0c 208 + #define LINK_PHY_READ (1<<31) 209 + #define LINK_PHY_WRITE (1<<30) 210 + #define LINK_PHY_ADDR(addr) (addr<<24) 211 + #define LINK_PHY_WDATA(data) (data<<16) 212 + #define LINK_PHY_RADDR(addr) (addr<<8) 213 + 214 + #define LINK_INT_STATUS 0xf14 215 + #define LINK_INT_ENABLE 0xf18 216 + /* status and enable have identical bit numbers */ 217 + #define LINK_INT_LINK_INT (1<<31) 218 + #define LINK_INT_PHY_TIME_OUT (1<<30) 219 + #define LINK_INT_PHY_REG_RCVD (1<<29) 220 + #define LINK_INT_PHY_BUSRESET (1<<28) 221 + #define LINK_INT_TX_RDY (1<<26) 222 + #define LINK_INT_RX_DATA_RDY (1<<25) 223 + #define LINK_INT_IT_STUCK (1<<20) 224 + #define LINK_INT_AT_STUCK (1<<19) 225 + #define LINK_INT_SNTRJ (1<<17) 226 + #define LINK_INT_HDR_ERR (1<<16) 227 + #define LINK_INT_TC_ERR (1<<15) 228 + #define LINK_INT_CYC_SEC (1<<11) 229 + #define LINK_INT_CYC_STRT (1<<10) 230 + #define LINK_INT_CYC_DONE (1<<9) 231 + #define LINK_INT_CYC_PEND (1<<8) 232 + #define LINK_INT_CYC_LOST (1<<7) 233 + #define LINK_INT_CYC_ARB_FAILED (1<<6) 234 + #define LINK_INT_GRF_OVER_FLOW (1<<5) 235 + #define LINK_INT_ITF_UNDER_FLOW (1<<4) 236 + #define LINK_INT_ATF_UNDER_FLOW (1<<3) 237 + #define LINK_INT_IARB_FAILED (1<<0)
+555 -160
drivers/firewire/ohci.c
··· 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #include <linux/compiler.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> ··· 33 #include <linux/mm.h> 34 #include <linux/module.h> 35 #include <linux/moduleparam.h> 36 #include <linux/pci.h> 37 #include <linux/pci_ids.h> 38 #include <linux/slab.h> 39 #include <linux/spinlock.h> 40 #include <linux/string.h> 41 42 #include <asm/byteorder.h> 43 #include <asm/page.h> ··· 173 int generation; 174 int request_generation; /* for timestamping incoming requests */ 175 unsigned quirks; 176 177 /* 178 * Spinlock for accessing fw_ohci data. Never call out of ··· 184 */ 185 spinlock_t lock; 186 187 struct ar_context ar_request_ctx; 188 struct ar_context ar_response_ctx; 189 struct context at_request_ctx; 190 struct context at_response_ctx; 191 192 - u32 it_context_mask; 193 struct iso_context *it_context_list; 194 - u64 ir_context_channels; 195 - u32 ir_context_mask; 196 struct iso_context *ir_context_list; 197 198 __be32 *config_rom; 199 dma_addr_t config_rom_bus; ··· 242 243 static char ohci_driver_name[] = KBUILD_MODNAME; 244 245 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 246 247 #define QUIRK_CYCLE_TIMER 1 248 #define QUIRK_RESET_PACKET 2 249 #define QUIRK_BE_HEADERS 4 250 #define QUIRK_NO_1394A 8 251 252 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ 253 static const struct { ··· 260 QUIRK_NO_1394A}, 261 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 262 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 263 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 264 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 265 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, ··· 274 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 275 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 276 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) 277 ")"); 278 279 #define OHCI_PARAM_DEBUG_AT_AR 1 ··· 303 !(evt & OHCI1394_busReset)) 304 return; 305 306 - fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 307 evt & OHCI1394_selfIDComplete ? " selfID" : "", 308 evt & OHCI1394_RQPkt ? " AR_req" : "", 309 evt & OHCI1394_RSPkt ? " AR_resp" : "", ··· 313 evt & OHCI1394_isochTx ? " IT" : "", 314 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 315 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 316 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 317 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 318 evt & OHCI1394_busReset ? " busReset" : "", ··· 321 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 322 OHCI1394_respTxComplete | OHCI1394_isochRx | 323 OHCI1394_isochTx | OHCI1394_postedWriteErr | 324 - OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent | 325 OHCI1394_regAccessFail | OHCI1394_busReset) 326 ? " ?" : ""); 327 } ··· 487 int i; 488 489 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 490 - for (i = 0; i < 10; i++) { 491 val = reg_read(ohci, OHCI1394_PhyControl); 492 if (val & OHCI1394_PhyControl_ReadDone) 493 return OHCI1394_PhyControl_ReadData(val); 494 495 - msleep(1); 496 } 497 fw_error("failed to read phy reg\n"); 498 ··· 510 511 reg_write(ohci, OHCI1394_PhyControl, 512 OHCI1394_PhyControl_Write(addr, val)); 513 - for (i = 0; i < 100; i++) { 514 val = reg_read(ohci, OHCI1394_PhyControl); 515 if (!(val & OHCI1394_PhyControl_WritePending)) 516 return 0; 517 518 - msleep(1); 519 } 520 fw_error("failed to write phy reg\n"); 521 522 return -EBUSY; 523 } 524 525 - static int ohci_update_phy_reg(struct fw_card *card, int addr, 526 - int clear_bits, int set_bits) 527 { 528 - struct fw_ohci *ohci = fw_ohci(card); 529 - int ret; 530 - 531 - ret = read_phy_reg(ohci, addr); 532 if (ret < 0) 533 return ret; 534 ··· 544 { 545 int ret; 546 547 - ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5); 548 if (ret < 0) 549 return ret; 550 551 return read_phy_reg(ohci, addr); 552 } 553 554 static int ar_context_add_page(struct ar_context *ctx) ··· 598 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); 599 ab->descriptor.branch_address = 0; 600 601 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 602 ctx->last_buffer->next = ab; 603 ctx->last_buffer = ab; ··· 986 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 987 988 desc->used += (z + extra) * sizeof(*d); 989 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 990 ctx->prev = find_branch_descriptor(d, z); 991 ··· 1074 header[1] = cpu_to_le32(packet->header[0]); 1075 header[2] = cpu_to_le32(packet->header[1]); 1076 d[0].req_count = cpu_to_le16(12); 1077 break; 1078 1079 case 4: ··· 1362 1363 } 1364 1365 static void bus_reset_tasklet(unsigned long data) 1366 { 1367 struct fw_ohci *ohci = (struct fw_ohci *)data; ··· 1442 unsigned long flags; 1443 void *free_rom = NULL; 1444 dma_addr_t free_rom_bus = 0; 1445 1446 reg = reg_read(ohci, OHCI1394_NodeID); 1447 if (!(reg & OHCI1394_NodeID_idValid)) { ··· 1455 } 1456 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1457 OHCI1394_NodeID_nodeNumber); 1458 1459 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1460 if (reg & OHCI1394_SelfIDCount_selfIDError) { ··· 1569 self_id_count, ohci->self_id_buffer); 1570 1571 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1572 - self_id_count, ohci->self_id_buffer); 1573 } 1574 1575 static irqreturn_t irq_handler(int irq, void *data) ··· 1647 fw_notify("isochronous cycle inconsistent\n"); 1648 } 1649 1650 return IRQ_HANDLED; 1651 } 1652 ··· 1715 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 1716 set = 0; 1717 } 1718 - ret = ohci_update_phy_reg(&ohci->card, 5, clear, set); 1719 if (ret < 0) 1720 return ret; 1721 ··· 1737 { 1738 struct fw_ohci *ohci = fw_ohci(card); 1739 struct pci_dev *dev = to_pci_dev(card->device); 1740 - u32 lps; 1741 int i, ret; 1742 1743 if (software_reset(ohci)) { ··· 1773 OHCI1394_HCControl_noByteSwapData); 1774 1775 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 1776 - reg_write(ohci, OHCI1394_LinkControlClear, 1777 - OHCI1394_LinkControl_rcvPhyPkt); 1778 reg_write(ohci, OHCI1394_LinkControlSet, 1779 OHCI1394_LinkControl_rcvSelfID | 1780 OHCI1394_LinkControl_cycleTimerEnable | 1781 OHCI1394_LinkControl_cycleMaster); 1782 1783 reg_write(ohci, OHCI1394_ATRetries, 1784 OHCI1394_MAX_AT_REQ_RETRIES | 1785 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 1786 - (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); 1787 1788 ar_context_run(&ohci->ar_request_ctx); 1789 ar_context_run(&ohci->ar_response_ctx); ··· 1808 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 1809 reg_write(ohci, OHCI1394_IntEventClear, ~0); 1810 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 1811 - reg_write(ohci, OHCI1394_IntMaskSet, 1812 - OHCI1394_selfIDComplete | 1813 - OHCI1394_RQPkt | OHCI1394_RSPkt | 1814 - OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1815 - OHCI1394_isochRx | OHCI1394_isochTx | 1816 - OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1817 - OHCI1394_cycleInconsistent | OHCI1394_regAccessFail | 1818 - OHCI1394_masterIntEnable); 1819 - if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1820 - reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1821 1822 ret = configure_1394a_enhancements(ohci); 1823 if (ret < 0) ··· 1864 1865 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 1866 1867 if (request_irq(dev->irq, irq_handler, 1868 - IRQF_SHARED, ohci_driver_name, ohci)) { 1869 - fw_error("Failed to allocate shared interrupt %d.\n", 1870 - dev->irq); 1871 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1872 ohci->config_rom, ohci->config_rom_bus); 1873 return -EIO; 1874 } 1875 1876 reg_write(ohci, OHCI1394_HCControlSet, 1877 OHCI1394_HCControl_linkEnable | 1878 OHCI1394_HCControl_BIBimageValid); 1879 flush_writes(ohci); 1880 1881 - /* 1882 - * We are ready to go, initiate bus reset to finish the 1883 - * initialization. 1884 - */ 1885 - 1886 - fw_core_initiate_bus_reset(&ohci->card, 1); 1887 1888 return 0; 1889 } ··· 1970 * takes effect. 1971 */ 1972 if (ret == 0) 1973 - fw_core_initiate_bus_reset(&ohci->card, 1); 1974 else 1975 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1976 next_config_rom, next_config_rom_bus); ··· 2060 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 2061 } 2062 2063 - static u32 cycle_timer_ticks(u32 cycle_timer) 2064 - { 2065 - u32 ticks; 2066 - 2067 - ticks = cycle_timer & 0xfff; 2068 - ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); 2069 - ticks += (3072 * 8000) * (cycle_timer >> 25); 2070 - 2071 - return ticks; 2072 - } 2073 - 2074 - /* 2075 - * Some controllers exhibit one or more of the following bugs when updating the 2076 - * iso cycle timer register: 2077 - * - When the lowest six bits are wrapping around to zero, a read that happens 2078 - * at the same time will return garbage in the lowest ten bits. 2079 - * - When the cycleOffset field wraps around to zero, the cycleCount field is 2080 - * not incremented for about 60 ns. 2081 - * - Occasionally, the entire register reads zero. 2082 - * 2083 - * To catch these, we read the register three times and ensure that the 2084 - * difference between each two consecutive reads is approximately the same, i.e. 2085 - * less than twice the other. Furthermore, any negative difference indicates an 2086 - * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to 2087 - * execute, so we have enough precision to compute the ratio of the differences.) 2088 - */ 2089 - static u32 ohci_get_cycle_time(struct fw_card *card) 2090 { 2091 struct fw_ohci *ohci = fw_ohci(card); 2092 - u32 c0, c1, c2; 2093 - u32 t0, t1, t2; 2094 - s32 diff01, diff12; 2095 - int i; 2096 2097 - c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 2098 2099 - if (ohci->quirks & QUIRK_CYCLE_TIMER) { 2100 - i = 0; 2101 - c1 = c2; 2102 - c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 2103 - do { 2104 - c0 = c1; 2105 - c1 = c2; 2106 - c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 2107 - t0 = cycle_timer_ticks(c0); 2108 - t1 = cycle_timer_ticks(c1); 2109 - t2 = cycle_timer_ticks(c2); 2110 - diff01 = t1 - t0; 2111 - diff12 = t2 - t1; 2112 - } while ((diff01 <= 0 || diff12 <= 0 || 2113 - diff01 / diff12 >= 2 || diff12 / diff01 >= 2) 2114 - && i++ < 20); 2115 } 2116 2117 - return c2; 2118 } 2119 2120 static void copy_iso_headers(struct iso_context *ctx, void *p) ··· 2205 __le32 *ir_header; 2206 void *p; 2207 2208 - for (pd = d; pd <= last; pd++) { 2209 if (pd->transfer_status) 2210 break; 2211 - } 2212 if (pd > last) 2213 /* Descriptor(s) not done yet, stop iteration */ 2214 return 0; ··· 2217 2218 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2219 ir_header = (__le32 *) p; 2220 - ctx->base.callback(&ctx->base, 2221 - le32_to_cpu(ir_header[0]) & 0xffff, 2222 - ctx->header_length, ctx->header, 2223 - ctx->base.callback_data); 2224 ctx->header_length = 0; 2225 } 2226 2227 return 1; 2228 } ··· 2274 ctx->header_length += 4; 2275 } 2276 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2277 - ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 2278 - ctx->header_length, ctx->header, 2279 - ctx->base.callback_data); 2280 ctx->header_length = 0; 2281 } 2282 return 1; 2283 } 2284 2285 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 2286 int type, int channel, size_t header_size) 2287 { 2288 struct fw_ohci *ohci = fw_ohci(card); 2289 - struct iso_context *ctx, *list; 2290 - descriptor_callback_t callback; 2291 - u64 *channels, dont_care = ~0ULL; 2292 - u32 *mask, regs; 2293 unsigned long flags; 2294 - int index, ret = -ENOMEM; 2295 - 2296 - if (type == FW_ISO_CONTEXT_TRANSMIT) { 2297 - channels = &dont_care; 2298 - mask = &ohci->it_context_mask; 2299 - list = ohci->it_context_list; 2300 - callback = handle_it_packet; 2301 - } else { 2302 - channels = &ohci->ir_context_channels; 2303 - mask = &ohci->ir_context_mask; 2304 - list = ohci->ir_context_list; 2305 - callback = handle_ir_packet_per_buffer; 2306 - } 2307 2308 spin_lock_irqsave(&ohci->lock, flags); 2309 - index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 2310 - if (index >= 0) { 2311 - *channels &= ~(1ULL << channel); 2312 - *mask &= ~(1 << index); 2313 } 2314 spin_unlock_irqrestore(&ohci->lock, flags); 2315 2316 if (index < 0) 2317 - return ERR_PTR(-EBUSY); 2318 2319 - if (type == FW_ISO_CONTEXT_TRANSMIT) 2320 - regs = OHCI1394_IsoXmitContextBase(index); 2321 - else 2322 - regs = OHCI1394_IsoRcvContextBase(index); 2323 - 2324 - ctx = &list[index]; 2325 memset(ctx, 0, sizeof(*ctx)); 2326 ctx->header_length = 0; 2327 ctx->header = (void *) __get_free_page(GFP_KERNEL); 2328 - if (ctx->header == NULL) 2329 goto out; 2330 - 2331 ret = context_init(&ctx->context, ohci, regs, callback); 2332 if (ret < 0) 2333 goto out_with_header; 2334 2335 return &ctx->base; 2336 ··· 2374 free_page((unsigned long)ctx->header); 2375 out: 2376 spin_lock_irqsave(&ohci->lock, flags); 2377 *mask |= 1 << index; 2378 spin_unlock_irqrestore(&ohci->lock, flags); 2379 2380 return ERR_PTR(ret); ··· 2396 { 2397 struct iso_context *ctx = container_of(base, struct iso_context, base); 2398 struct fw_ohci *ohci = ctx->context.ohci; 2399 - u32 control, match; 2400 int index; 2401 2402 - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2403 index = ctx - ohci->it_context_list; 2404 match = 0; 2405 if (cycle >= 0) ··· 2410 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 2411 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 2412 context_run(&ctx->context, match); 2413 - } else { 2414 index = ctx - ohci->ir_context_list; 2415 - control = IR_CONTEXT_ISOCH_HEADER; 2416 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2417 if (cycle >= 0) { 2418 match |= (cycle & 0x07fff) << 12; ··· 2427 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 2428 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 2429 context_run(&ctx->context, control); 2430 } 2431 2432 return 0; ··· 2439 struct iso_context *ctx = container_of(base, struct iso_context, base); 2440 int index; 2441 2442 - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2443 index = ctx - ohci->it_context_list; 2444 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 2445 - } else { 2446 index = ctx - ohci->ir_context_list; 2447 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 2448 } 2449 flush_writes(ohci); 2450 context_stop(&ctx->context); ··· 2470 2471 spin_lock_irqsave(&ohci->lock, flags); 2472 2473 - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2474 index = ctx - ohci->it_context_list; 2475 ohci->it_context_mask |= 1 << index; 2476 - } else { 2477 index = ctx - ohci->ir_context_list; 2478 ohci->ir_context_mask |= 1 << index; 2479 ohci->ir_context_channels |= 1ULL << base->channel; 2480 } 2481 2482 spin_unlock_irqrestore(&ohci->lock, flags); 2483 } 2484 2485 - static int ohci_queue_iso_transmit(struct fw_iso_context *base, 2486 - struct fw_iso_packet *packet, 2487 - struct fw_iso_buffer *buffer, 2488 - unsigned long payload) 2489 { 2490 - struct iso_context *ctx = container_of(base, struct iso_context, base); 2491 struct descriptor *d, *last, *pd; 2492 struct fw_iso_packet *p; 2493 __le32 *header; ··· 2624 return 0; 2625 } 2626 2627 - static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2628 - struct fw_iso_packet *packet, 2629 - struct fw_iso_buffer *buffer, 2630 - unsigned long payload) 2631 { 2632 - struct iso_context *ctx = container_of(base, struct iso_context, base); 2633 struct descriptor *d, *pd; 2634 - struct fw_iso_packet *p = packet; 2635 dma_addr_t d_bus, page_bus; 2636 u32 z, header_z, rest; 2637 int i, j, length; ··· 2639 * The OHCI controller puts the isochronous header and trailer in the 2640 * buffer, so we need at least 8 bytes. 2641 */ 2642 - packet_count = p->header_length / ctx->base.header_size; 2643 header_size = max(ctx->base.header_size, (size_t)8); 2644 2645 /* Get header size in number of descriptors. */ 2646 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2647 page = payload >> PAGE_SHIFT; 2648 offset = payload & ~PAGE_MASK; 2649 - payload_per_buffer = p->payload_length / packet_count; 2650 2651 for (i = 0; i < packet_count; i++) { 2652 /* d points to the header descriptor */ ··· 2658 2659 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 2660 DESCRIPTOR_INPUT_MORE); 2661 - if (p->skip && i == 0) 2662 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2663 d->req_count = cpu_to_le16(header_size); 2664 d->res_count = d->req_count; ··· 2691 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2692 DESCRIPTOR_INPUT_LAST | 2693 DESCRIPTOR_BRANCH_ALWAYS); 2694 - if (p->interrupt && i == packet_count - 1) 2695 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2696 2697 context_append(&ctx->context, d, z, header_z); 2698 } 2699 2700 return 0; ··· 2759 { 2760 struct iso_context *ctx = container_of(base, struct iso_context, base); 2761 unsigned long flags; 2762 - int ret; 2763 2764 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2765 - if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2766 - ret = ohci_queue_iso_transmit(base, packet, buffer, payload); 2767 - else 2768 - ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2769 - buffer, payload); 2770 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2771 2772 return ret; ··· 2780 2781 static const struct fw_card_driver ohci_driver = { 2782 .enable = ohci_enable, 2783 .update_phy_reg = ohci_update_phy_reg, 2784 .set_config_rom = ohci_set_config_rom, 2785 .send_request = ohci_send_request, 2786 .send_response = ohci_send_response, 2787 .cancel_packet = ohci_cancel_packet, 2788 .enable_phys_dma = ohci_enable_phys_dma, 2789 - .get_cycle_time = ohci_get_cycle_time, 2790 2791 .allocate_iso_context = ohci_allocate_iso_context, 2792 .free_iso_context = ohci_free_iso_context, 2793 .queue_iso = ohci_queue_iso, 2794 .start_iso = ohci_start_iso, 2795 .stop_iso = ohci_stop_iso, ··· 2857 pci_set_drvdata(dev, ohci); 2858 2859 spin_lock_init(&ohci->lock); 2860 2861 tasklet_init(&ohci->bus_reset_tasklet, 2862 bus_reset_tasklet, (unsigned long)ohci); ··· 3018 context_release(&ohci->at_response_ctx); 3019 kfree(ohci->it_context_list); 3020 kfree(ohci->ir_context_list); 3021 pci_iounmap(dev, ohci->registers); 3022 pci_release_region(dev, 0); 3023 pci_disable_device(dev); ··· 3036 3037 software_reset(ohci); 3038 free_irq(dev->irq, ohci); 3039 err = pci_save_state(dev); 3040 if (err) { 3041 fw_error("pci_save_state failed\n");
··· 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 + #include <linux/bug.h> 22 #include <linux/compiler.h> 23 #include <linux/delay.h> 24 #include <linux/device.h> ··· 32 #include <linux/mm.h> 33 #include <linux/module.h> 34 #include <linux/moduleparam.h> 35 + #include <linux/mutex.h> 36 #include <linux/pci.h> 37 #include <linux/pci_ids.h> 38 #include <linux/slab.h> 39 #include <linux/spinlock.h> 40 #include <linux/string.h> 41 + #include <linux/time.h> 42 43 #include <asm/byteorder.h> 44 #include <asm/page.h> ··· 170 int generation; 171 int request_generation; /* for timestamping incoming requests */ 172 unsigned quirks; 173 + unsigned int pri_req_max; 174 + u32 bus_time; 175 + bool is_root; 176 + bool csr_state_setclear_abdicate; 177 178 /* 179 * Spinlock for accessing fw_ohci data. Never call out of ··· 177 */ 178 spinlock_t lock; 179 180 + struct mutex phy_reg_mutex; 181 + 182 struct ar_context ar_request_ctx; 183 struct ar_context ar_response_ctx; 184 struct context at_request_ctx; 185 struct context at_response_ctx; 186 187 + u32 it_context_mask; /* unoccupied IT contexts */ 188 struct iso_context *it_context_list; 189 + u64 ir_context_channels; /* unoccupied channels */ 190 + u32 ir_context_mask; /* unoccupied IR contexts */ 191 struct iso_context *ir_context_list; 192 + u64 mc_channels; /* channels in use by the multichannel IR context */ 193 + bool mc_allocated; 194 195 __be32 *config_rom; 196 dma_addr_t config_rom_bus; ··· 231 232 static char ohci_driver_name[] = KBUILD_MODNAME; 233 234 + #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 235 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 236 237 #define QUIRK_CYCLE_TIMER 1 238 #define QUIRK_RESET_PACKET 2 239 #define QUIRK_BE_HEADERS 4 240 #define QUIRK_NO_1394A 8 241 + #define QUIRK_NO_MSI 16 242 243 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ 244 static const struct { ··· 247 QUIRK_NO_1394A}, 248 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 249 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 250 + {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, 251 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 252 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 253 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, ··· 260 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 261 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 262 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) 263 + ", disable MSI = " __stringify(QUIRK_NO_MSI) 264 ")"); 265 266 #define OHCI_PARAM_DEBUG_AT_AR 1 ··· 288 !(evt & OHCI1394_busReset)) 289 return; 290 291 + fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 292 evt & OHCI1394_selfIDComplete ? " selfID" : "", 293 evt & OHCI1394_RQPkt ? " AR_req" : "", 294 evt & OHCI1394_RSPkt ? " AR_resp" : "", ··· 298 evt & OHCI1394_isochTx ? " IT" : "", 299 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 300 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 301 + evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 302 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 303 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 304 evt & OHCI1394_busReset ? " busReset" : "", ··· 305 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 306 OHCI1394_respTxComplete | OHCI1394_isochRx | 307 OHCI1394_isochTx | OHCI1394_postedWriteErr | 308 + OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 309 + OHCI1394_cycleInconsistent | 310 OHCI1394_regAccessFail | OHCI1394_busReset) 311 ? " ?" : ""); 312 } ··· 470 int i; 471 472 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 473 + for (i = 0; i < 3 + 100; i++) { 474 val = reg_read(ohci, OHCI1394_PhyControl); 475 if (val & OHCI1394_PhyControl_ReadDone) 476 return OHCI1394_PhyControl_ReadData(val); 477 478 + /* 479 + * Try a few times without waiting. Sleeping is necessary 480 + * only when the link/PHY interface is busy. 481 + */ 482 + if (i >= 3) 483 + msleep(1); 484 } 485 fw_error("failed to read phy reg\n"); 486 ··· 488 489 reg_write(ohci, OHCI1394_PhyControl, 490 OHCI1394_PhyControl_Write(addr, val)); 491 + for (i = 0; i < 3 + 100; i++) { 492 val = reg_read(ohci, OHCI1394_PhyControl); 493 if (!(val & OHCI1394_PhyControl_WritePending)) 494 return 0; 495 496 + if (i >= 3) 497 + msleep(1); 498 } 499 fw_error("failed to write phy reg\n"); 500 501 return -EBUSY; 502 } 503 504 + static int update_phy_reg(struct fw_ohci *ohci, int addr, 505 + int clear_bits, int set_bits) 506 { 507 + int ret = read_phy_reg(ohci, addr); 508 if (ret < 0) 509 return ret; 510 ··· 524 { 525 int ret; 526 527 + ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); 528 if (ret < 0) 529 return ret; 530 531 return read_phy_reg(ohci, addr); 532 + } 533 + 534 + static int ohci_read_phy_reg(struct fw_card *card, int addr) 535 + { 536 + struct fw_ohci *ohci = fw_ohci(card); 537 + int ret; 538 + 539 + mutex_lock(&ohci->phy_reg_mutex); 540 + ret = read_phy_reg(ohci, addr); 541 + mutex_unlock(&ohci->phy_reg_mutex); 542 + 543 + return ret; 544 + } 545 + 546 + static int ohci_update_phy_reg(struct fw_card *card, int addr, 547 + int clear_bits, int set_bits) 548 + { 549 + struct fw_ohci *ohci = fw_ohci(card); 550 + int ret; 551 + 552 + mutex_lock(&ohci->phy_reg_mutex); 553 + ret = update_phy_reg(ohci, addr, clear_bits, set_bits); 554 + mutex_unlock(&ohci->phy_reg_mutex); 555 + 556 + return ret; 557 } 558 559 static int ar_context_add_page(struct ar_context *ctx) ··· 553 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); 554 ab->descriptor.branch_address = 0; 555 556 + wmb(); /* finish init of new descriptors before branch_address update */ 557 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 558 ctx->last_buffer->next = ab; 559 ctx->last_buffer = ab; ··· 940 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 941 942 desc->used += (z + extra) * sizeof(*d); 943 + 944 + wmb(); /* finish init of new descriptors before branch_address update */ 945 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 946 ctx->prev = find_branch_descriptor(d, z); 947 ··· 1026 header[1] = cpu_to_le32(packet->header[0]); 1027 header[2] = cpu_to_le32(packet->header[1]); 1028 d[0].req_count = cpu_to_le16(12); 1029 + 1030 + if (is_ping_packet(packet->header)) 1031 + d[0].control |= cpu_to_le16(DESCRIPTOR_PING); 1032 break; 1033 1034 case 4: ··· 1311 1312 } 1313 1314 + static u32 cycle_timer_ticks(u32 cycle_timer) 1315 + { 1316 + u32 ticks; 1317 + 1318 + ticks = cycle_timer & 0xfff; 1319 + ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); 1320 + ticks += (3072 * 8000) * (cycle_timer >> 25); 1321 + 1322 + return ticks; 1323 + } 1324 + 1325 + /* 1326 + * Some controllers exhibit one or more of the following bugs when updating the 1327 + * iso cycle timer register: 1328 + * - When the lowest six bits are wrapping around to zero, a read that happens 1329 + * at the same time will return garbage in the lowest ten bits. 1330 + * - When the cycleOffset field wraps around to zero, the cycleCount field is 1331 + * not incremented for about 60 ns. 1332 + * - Occasionally, the entire register reads zero. 1333 + * 1334 + * To catch these, we read the register three times and ensure that the 1335 + * difference between each two consecutive reads is approximately the same, i.e. 1336 + * less than twice the other. Furthermore, any negative difference indicates an 1337 + * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to 1338 + * execute, so we have enough precision to compute the ratio of the differences.) 1339 + */ 1340 + static u32 get_cycle_time(struct fw_ohci *ohci) 1341 + { 1342 + u32 c0, c1, c2; 1343 + u32 t0, t1, t2; 1344 + s32 diff01, diff12; 1345 + int i; 1346 + 1347 + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1348 + 1349 + if (ohci->quirks & QUIRK_CYCLE_TIMER) { 1350 + i = 0; 1351 + c1 = c2; 1352 + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1353 + do { 1354 + c0 = c1; 1355 + c1 = c2; 1356 + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1357 + t0 = cycle_timer_ticks(c0); 1358 + t1 = cycle_timer_ticks(c1); 1359 + t2 = cycle_timer_ticks(c2); 1360 + diff01 = t1 - t0; 1361 + diff12 = t2 - t1; 1362 + } while ((diff01 <= 0 || diff12 <= 0 || 1363 + diff01 / diff12 >= 2 || diff12 / diff01 >= 2) 1364 + && i++ < 20); 1365 + } 1366 + 1367 + return c2; 1368 + } 1369 + 1370 + /* 1371 + * This function has to be called at least every 64 seconds. The bus_time 1372 + * field stores not only the upper 25 bits of the BUS_TIME register but also 1373 + * the most significant bit of the cycle timer in bit 6 so that we can detect 1374 + * changes in this bit. 1375 + */ 1376 + static u32 update_bus_time(struct fw_ohci *ohci) 1377 + { 1378 + u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; 1379 + 1380 + if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) 1381 + ohci->bus_time += 0x40; 1382 + 1383 + return ohci->bus_time | cycle_time_seconds; 1384 + } 1385 + 1386 static void bus_reset_tasklet(unsigned long data) 1387 { 1388 struct fw_ohci *ohci = (struct fw_ohci *)data; ··· 1319 unsigned long flags; 1320 void *free_rom = NULL; 1321 dma_addr_t free_rom_bus = 0; 1322 + bool is_new_root; 1323 1324 reg = reg_read(ohci, OHCI1394_NodeID); 1325 if (!(reg & OHCI1394_NodeID_idValid)) { ··· 1331 } 1332 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1333 OHCI1394_NodeID_nodeNumber); 1334 + 1335 + is_new_root = (reg & OHCI1394_NodeID_root) != 0; 1336 + if (!(ohci->is_root && is_new_root)) 1337 + reg_write(ohci, OHCI1394_LinkControlSet, 1338 + OHCI1394_LinkControl_cycleMaster); 1339 + ohci->is_root = is_new_root; 1340 1341 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1342 if (reg & OHCI1394_SelfIDCount_selfIDError) { ··· 1439 self_id_count, ohci->self_id_buffer); 1440 1441 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1442 + self_id_count, ohci->self_id_buffer, 1443 + ohci->csr_state_setclear_abdicate); 1444 + ohci->csr_state_setclear_abdicate = false; 1445 } 1446 1447 static irqreturn_t irq_handler(int irq, void *data) ··· 1515 fw_notify("isochronous cycle inconsistent\n"); 1516 } 1517 1518 + if (event & OHCI1394_cycle64Seconds) { 1519 + spin_lock(&ohci->lock); 1520 + update_bus_time(ohci); 1521 + spin_unlock(&ohci->lock); 1522 + } 1523 + 1524 return IRQ_HANDLED; 1525 } 1526 ··· 1577 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 1578 set = 0; 1579 } 1580 + ret = update_phy_reg(ohci, 5, clear, set); 1581 if (ret < 0) 1582 return ret; 1583 ··· 1599 { 1600 struct fw_ohci *ohci = fw_ohci(card); 1601 struct pci_dev *dev = to_pci_dev(card->device); 1602 + u32 lps, seconds, version, irqs; 1603 int i, ret; 1604 1605 if (software_reset(ohci)) { ··· 1635 OHCI1394_HCControl_noByteSwapData); 1636 1637 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 1638 reg_write(ohci, OHCI1394_LinkControlSet, 1639 OHCI1394_LinkControl_rcvSelfID | 1640 + OHCI1394_LinkControl_rcvPhyPkt | 1641 OHCI1394_LinkControl_cycleTimerEnable | 1642 OHCI1394_LinkControl_cycleMaster); 1643 1644 reg_write(ohci, OHCI1394_ATRetries, 1645 OHCI1394_MAX_AT_REQ_RETRIES | 1646 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 1647 + (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | 1648 + (200 << 16)); 1649 + 1650 + seconds = lower_32_bits(get_seconds()); 1651 + reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); 1652 + ohci->bus_time = seconds & ~0x3f; 1653 + 1654 + version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 1655 + if (version >= OHCI_VERSION_1_1) { 1656 + reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, 1657 + 0xfffffffe); 1658 + card->broadcast_channel_auto_allocated = true; 1659 + } 1660 + 1661 + /* Get implemented bits of the priority arbitration request counter. */ 1662 + reg_write(ohci, OHCI1394_FairnessControl, 0x3f); 1663 + ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; 1664 + reg_write(ohci, OHCI1394_FairnessControl, 0); 1665 + card->priority_budget_implemented = ohci->pri_req_max != 0; 1666 1667 ar_context_run(&ohci->ar_request_ctx); 1668 ar_context_run(&ohci->ar_response_ctx); ··· 1653 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 1654 reg_write(ohci, OHCI1394_IntEventClear, ~0); 1655 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 1656 1657 ret = configure_1394a_enhancements(ohci); 1658 if (ret < 0) ··· 1719 1720 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 1721 1722 + if (!(ohci->quirks & QUIRK_NO_MSI)) 1723 + pci_enable_msi(dev); 1724 if (request_irq(dev->irq, irq_handler, 1725 + pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, 1726 + ohci_driver_name, ohci)) { 1727 + fw_error("Failed to allocate interrupt %d.\n", dev->irq); 1728 + pci_disable_msi(dev); 1729 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1730 ohci->config_rom, ohci->config_rom_bus); 1731 return -EIO; 1732 } 1733 + 1734 + irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1735 + OHCI1394_RQPkt | OHCI1394_RSPkt | 1736 + OHCI1394_isochTx | OHCI1394_isochRx | 1737 + OHCI1394_postedWriteErr | 1738 + OHCI1394_selfIDComplete | 1739 + OHCI1394_regAccessFail | 1740 + OHCI1394_cycle64Seconds | 1741 + OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | 1742 + OHCI1394_masterIntEnable; 1743 + if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1744 + irqs |= OHCI1394_busReset; 1745 + reg_write(ohci, OHCI1394_IntMaskSet, irqs); 1746 1747 reg_write(ohci, OHCI1394_HCControlSet, 1748 OHCI1394_HCControl_linkEnable | 1749 OHCI1394_HCControl_BIBimageValid); 1750 flush_writes(ohci); 1751 1752 + /* We are ready to go, reset bus to finish initialization. */ 1753 + fw_schedule_bus_reset(&ohci->card, false, true); 1754 1755 return 0; 1756 } ··· 1813 * takes effect. 1814 */ 1815 if (ret == 0) 1816 + fw_schedule_bus_reset(&ohci->card, true, true); 1817 else 1818 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1819 next_config_rom, next_config_rom_bus); ··· 1903 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1904 } 1905 1906 + static u32 ohci_read_csr(struct fw_card *card, int csr_offset) 1907 { 1908 struct fw_ohci *ohci = fw_ohci(card); 1909 + unsigned long flags; 1910 + u32 value; 1911 1912 + switch (csr_offset) { 1913 + case CSR_STATE_CLEAR: 1914 + case CSR_STATE_SET: 1915 + if (ohci->is_root && 1916 + (reg_read(ohci, OHCI1394_LinkControlSet) & 1917 + OHCI1394_LinkControl_cycleMaster)) 1918 + value = CSR_STATE_BIT_CMSTR; 1919 + else 1920 + value = 0; 1921 + if (ohci->csr_state_setclear_abdicate) 1922 + value |= CSR_STATE_BIT_ABDICATE; 1923 1924 + return value; 1925 + 1926 + case CSR_NODE_IDS: 1927 + return reg_read(ohci, OHCI1394_NodeID) << 16; 1928 + 1929 + case CSR_CYCLE_TIME: 1930 + return get_cycle_time(ohci); 1931 + 1932 + case CSR_BUS_TIME: 1933 + /* 1934 + * We might be called just after the cycle timer has wrapped 1935 + * around but just before the cycle64Seconds handler, so we 1936 + * better check here, too, if the bus time needs to be updated. 1937 + */ 1938 + spin_lock_irqsave(&ohci->lock, flags); 1939 + value = update_bus_time(ohci); 1940 + spin_unlock_irqrestore(&ohci->lock, flags); 1941 + return value; 1942 + 1943 + case CSR_BUSY_TIMEOUT: 1944 + value = reg_read(ohci, OHCI1394_ATRetries); 1945 + return (value >> 4) & 0x0ffff00f; 1946 + 1947 + case CSR_PRIORITY_BUDGET: 1948 + return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | 1949 + (ohci->pri_req_max << 8); 1950 + 1951 + default: 1952 + WARN_ON(1); 1953 + return 0; 1954 } 1955 + } 1956 1957 + static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) 1958 + { 1959 + struct fw_ohci *ohci = fw_ohci(card); 1960 + unsigned long flags; 1961 + 1962 + switch (csr_offset) { 1963 + case CSR_STATE_CLEAR: 1964 + if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 1965 + reg_write(ohci, OHCI1394_LinkControlClear, 1966 + OHCI1394_LinkControl_cycleMaster); 1967 + flush_writes(ohci); 1968 + } 1969 + if (value & CSR_STATE_BIT_ABDICATE) 1970 + ohci->csr_state_setclear_abdicate = false; 1971 + break; 1972 + 1973 + case CSR_STATE_SET: 1974 + if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 1975 + reg_write(ohci, OHCI1394_LinkControlSet, 1976 + OHCI1394_LinkControl_cycleMaster); 1977 + flush_writes(ohci); 1978 + } 1979 + if (value & CSR_STATE_BIT_ABDICATE) 1980 + ohci->csr_state_setclear_abdicate = true; 1981 + break; 1982 + 1983 + case CSR_NODE_IDS: 1984 + reg_write(ohci, OHCI1394_NodeID, value >> 16); 1985 + flush_writes(ohci); 1986 + break; 1987 + 1988 + case CSR_CYCLE_TIME: 1989 + reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); 1990 + reg_write(ohci, OHCI1394_IntEventSet, 1991 + OHCI1394_cycleInconsistent); 1992 + flush_writes(ohci); 1993 + break; 1994 + 1995 + case CSR_BUS_TIME: 1996 + spin_lock_irqsave(&ohci->lock, flags); 1997 + ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); 1998 + spin_unlock_irqrestore(&ohci->lock, flags); 1999 + break; 2000 + 2001 + case CSR_BUSY_TIMEOUT: 2002 + value = (value & 0xf) | ((value & 0xf) << 4) | 2003 + ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); 2004 + reg_write(ohci, OHCI1394_ATRetries, value); 2005 + flush_writes(ohci); 2006 + break; 2007 + 2008 + case CSR_PRIORITY_BUDGET: 2009 + reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); 2010 + flush_writes(ohci); 2011 + break; 2012 + 2013 + default: 2014 + WARN_ON(1); 2015 + break; 2016 + } 2017 } 2018 2019 static void copy_iso_headers(struct iso_context *ctx, void *p) ··· 1992 __le32 *ir_header; 1993 void *p; 1994 1995 + for (pd = d; pd <= last; pd++) 1996 if (pd->transfer_status) 1997 break; 1998 if (pd > last) 1999 /* Descriptor(s) not done yet, stop iteration */ 2000 return 0; ··· 2005 2006 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2007 ir_header = (__le32 *) p; 2008 + ctx->base.callback.sc(&ctx->base, 2009 + le32_to_cpu(ir_header[0]) & 0xffff, 2010 + ctx->header_length, ctx->header, 2011 + ctx->base.callback_data); 2012 ctx->header_length = 0; 2013 } 2014 + 2015 + return 1; 2016 + } 2017 + 2018 + /* d == last because each descriptor block is only a single descriptor. */ 2019 + static int handle_ir_buffer_fill(struct context *context, 2020 + struct descriptor *d, 2021 + struct descriptor *last) 2022 + { 2023 + struct iso_context *ctx = 2024 + container_of(context, struct iso_context, context); 2025 + 2026 + if (!last->transfer_status) 2027 + /* Descriptor(s) not done yet, stop iteration */ 2028 + return 0; 2029 + 2030 + if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) 2031 + ctx->base.callback.mc(&ctx->base, 2032 + le32_to_cpu(last->data_address) + 2033 + le16_to_cpu(last->req_count) - 2034 + le16_to_cpu(last->res_count), 2035 + ctx->base.callback_data); 2036 2037 return 1; 2038 } ··· 2040 ctx->header_length += 4; 2041 } 2042 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2043 + ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), 2044 + ctx->header_length, ctx->header, 2045 + ctx->base.callback_data); 2046 ctx->header_length = 0; 2047 } 2048 return 1; 2049 + } 2050 + 2051 + static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) 2052 + { 2053 + u32 hi = channels >> 32, lo = channels; 2054 + 2055 + reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); 2056 + reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); 2057 + reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); 2058 + reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); 2059 + mmiowb(); 2060 + ohci->mc_channels = channels; 2061 } 2062 2063 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 2064 int type, int channel, size_t header_size) 2065 { 2066 struct fw_ohci *ohci = fw_ohci(card); 2067 + struct iso_context *uninitialized_var(ctx); 2068 + descriptor_callback_t uninitialized_var(callback); 2069 + u64 *uninitialized_var(channels); 2070 + u32 *uninitialized_var(mask), uninitialized_var(regs); 2071 unsigned long flags; 2072 + int index, ret = -EBUSY; 2073 2074 spin_lock_irqsave(&ohci->lock, flags); 2075 + 2076 + switch (type) { 2077 + case FW_ISO_CONTEXT_TRANSMIT: 2078 + mask = &ohci->it_context_mask; 2079 + callback = handle_it_packet; 2080 + index = ffs(*mask) - 1; 2081 + if (index >= 0) { 2082 + *mask &= ~(1 << index); 2083 + regs = OHCI1394_IsoXmitContextBase(index); 2084 + ctx = &ohci->it_context_list[index]; 2085 + } 2086 + break; 2087 + 2088 + case FW_ISO_CONTEXT_RECEIVE: 2089 + channels = &ohci->ir_context_channels; 2090 + mask = &ohci->ir_context_mask; 2091 + callback = handle_ir_packet_per_buffer; 2092 + index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 2093 + if (index >= 0) { 2094 + *channels &= ~(1ULL << channel); 2095 + *mask &= ~(1 << index); 2096 + regs = OHCI1394_IsoRcvContextBase(index); 2097 + ctx = &ohci->ir_context_list[index]; 2098 + } 2099 + break; 2100 + 2101 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2102 + mask = &ohci->ir_context_mask; 2103 + callback = handle_ir_buffer_fill; 2104 + index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; 2105 + if (index >= 0) { 2106 + ohci->mc_allocated = true; 2107 + *mask &= ~(1 << index); 2108 + regs = OHCI1394_IsoRcvContextBase(index); 2109 + ctx = &ohci->ir_context_list[index]; 2110 + } 2111 + break; 2112 + 2113 + default: 2114 + index = -1; 2115 + ret = -ENOSYS; 2116 } 2117 + 2118 spin_unlock_irqrestore(&ohci->lock, flags); 2119 2120 if (index < 0) 2121 + return ERR_PTR(ret); 2122 2123 memset(ctx, 0, sizeof(*ctx)); 2124 ctx->header_length = 0; 2125 ctx->header = (void *) __get_free_page(GFP_KERNEL); 2126 + if (ctx->header == NULL) { 2127 + ret = -ENOMEM; 2128 goto out; 2129 + } 2130 ret = context_init(&ctx->context, ohci, regs, callback); 2131 if (ret < 0) 2132 goto out_with_header; 2133 + 2134 + if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) 2135 + set_multichannel_mask(ohci, 0); 2136 2137 return &ctx->base; 2138 ··· 2104 free_page((unsigned long)ctx->header); 2105 out: 2106 spin_lock_irqsave(&ohci->lock, flags); 2107 + 2108 + switch (type) { 2109 + case FW_ISO_CONTEXT_RECEIVE: 2110 + *channels |= 1ULL << channel; 2111 + break; 2112 + 2113 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2114 + ohci->mc_allocated = false; 2115 + break; 2116 + } 2117 *mask |= 1 << index; 2118 + 2119 spin_unlock_irqrestore(&ohci->lock, flags); 2120 2121 return ERR_PTR(ret); ··· 2115 { 2116 struct iso_context *ctx = container_of(base, struct iso_context, base); 2117 struct fw_ohci *ohci = ctx->context.ohci; 2118 + u32 control = IR_CONTEXT_ISOCH_HEADER, match; 2119 int index; 2120 2121 + switch (ctx->base.type) { 2122 + case FW_ISO_CONTEXT_TRANSMIT: 2123 index = ctx - ohci->it_context_list; 2124 match = 0; 2125 if (cycle >= 0) ··· 2128 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 2129 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 2130 context_run(&ctx->context, match); 2131 + break; 2132 + 2133 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2134 + control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; 2135 + /* fall through */ 2136 + case FW_ISO_CONTEXT_RECEIVE: 2137 index = ctx - ohci->ir_context_list; 2138 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2139 if (cycle >= 0) { 2140 match |= (cycle & 0x07fff) << 12; ··· 2141 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 2142 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 2143 context_run(&ctx->context, control); 2144 + break; 2145 } 2146 2147 return 0; ··· 2152 struct iso_context *ctx = container_of(base, struct iso_context, base); 2153 int index; 2154 2155 + switch (ctx->base.type) { 2156 + case FW_ISO_CONTEXT_TRANSMIT: 2157 index = ctx - ohci->it_context_list; 2158 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 2159 + break; 2160 + 2161 + case FW_ISO_CONTEXT_RECEIVE: 2162 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2163 index = ctx - ohci->ir_context_list; 2164 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 2165 + break; 2166 } 2167 flush_writes(ohci); 2168 context_stop(&ctx->context); ··· 2178 2179 spin_lock_irqsave(&ohci->lock, flags); 2180 2181 + switch (base->type) { 2182 + case FW_ISO_CONTEXT_TRANSMIT: 2183 index = ctx - ohci->it_context_list; 2184 ohci->it_context_mask |= 1 << index; 2185 + break; 2186 + 2187 + case FW_ISO_CONTEXT_RECEIVE: 2188 index = ctx - ohci->ir_context_list; 2189 ohci->ir_context_mask |= 1 << index; 2190 ohci->ir_context_channels |= 1ULL << base->channel; 2191 + break; 2192 + 2193 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2194 + index = ctx - ohci->ir_context_list; 2195 + ohci->ir_context_mask |= 1 << index; 2196 + ohci->ir_context_channels |= ohci->mc_channels; 2197 + ohci->mc_channels = 0; 2198 + ohci->mc_allocated = false; 2199 + break; 2200 } 2201 2202 spin_unlock_irqrestore(&ohci->lock, flags); 2203 } 2204 2205 + static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) 2206 { 2207 + struct fw_ohci *ohci = fw_ohci(base->card); 2208 + unsigned long flags; 2209 + int ret; 2210 + 2211 + switch (base->type) { 2212 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2213 + 2214 + spin_lock_irqsave(&ohci->lock, flags); 2215 + 2216 + /* Don't allow multichannel to grab other contexts' channels. */ 2217 + if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { 2218 + *channels = ohci->ir_context_channels; 2219 + ret = -EBUSY; 2220 + } else { 2221 + set_multichannel_mask(ohci, *channels); 2222 + ret = 0; 2223 + } 2224 + 2225 + spin_unlock_irqrestore(&ohci->lock, flags); 2226 + 2227 + break; 2228 + default: 2229 + ret = -EINVAL; 2230 + } 2231 + 2232 + return ret; 2233 + } 2234 + 2235 + static int queue_iso_transmit(struct iso_context *ctx, 2236 + struct fw_iso_packet *packet, 2237 + struct fw_iso_buffer *buffer, 2238 + unsigned long payload) 2239 + { 2240 struct descriptor *d, *last, *pd; 2241 struct fw_iso_packet *p; 2242 __le32 *header; ··· 2291 return 0; 2292 } 2293 2294 + static int queue_iso_packet_per_buffer(struct iso_context *ctx, 2295 + struct fw_iso_packet *packet, 2296 + struct fw_iso_buffer *buffer, 2297 + unsigned long payload) 2298 { 2299 struct descriptor *d, *pd; 2300 dma_addr_t d_bus, page_bus; 2301 u32 z, header_z, rest; 2302 int i, j, length; ··· 2308 * The OHCI controller puts the isochronous header and trailer in the 2309 * buffer, so we need at least 8 bytes. 2310 */ 2311 + packet_count = packet->header_length / ctx->base.header_size; 2312 header_size = max(ctx->base.header_size, (size_t)8); 2313 2314 /* Get header size in number of descriptors. */ 2315 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2316 page = payload >> PAGE_SHIFT; 2317 offset = payload & ~PAGE_MASK; 2318 + payload_per_buffer = packet->payload_length / packet_count; 2319 2320 for (i = 0; i < packet_count; i++) { 2321 /* d points to the header descriptor */ ··· 2327 2328 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 2329 DESCRIPTOR_INPUT_MORE); 2330 + if (packet->skip && i == 0) 2331 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2332 d->req_count = cpu_to_le16(header_size); 2333 d->res_count = d->req_count; ··· 2360 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2361 DESCRIPTOR_INPUT_LAST | 2362 DESCRIPTOR_BRANCH_ALWAYS); 2363 + if (packet->interrupt && i == packet_count - 1) 2364 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2365 2366 context_append(&ctx->context, d, z, header_z); 2367 + } 2368 + 2369 + return 0; 2370 + } 2371 + 2372 + static int queue_iso_buffer_fill(struct iso_context *ctx, 2373 + struct fw_iso_packet *packet, 2374 + struct fw_iso_buffer *buffer, 2375 + unsigned long payload) 2376 + { 2377 + struct descriptor *d; 2378 + dma_addr_t d_bus, page_bus; 2379 + int page, offset, rest, z, i, length; 2380 + 2381 + page = payload >> PAGE_SHIFT; 2382 + offset = payload & ~PAGE_MASK; 2383 + rest = packet->payload_length; 2384 + 2385 + /* We need one descriptor for each page in the buffer. */ 2386 + z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); 2387 + 2388 + if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) 2389 + return -EFAULT; 2390 + 2391 + for (i = 0; i < z; i++) { 2392 + d = context_get_descriptors(&ctx->context, 1, &d_bus); 2393 + if (d == NULL) 2394 + return -ENOMEM; 2395 + 2396 + d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 2397 + DESCRIPTOR_BRANCH_ALWAYS); 2398 + if (packet->skip && i == 0) 2399 + d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2400 + if (packet->interrupt && i == z - 1) 2401 + d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2402 + 2403 + if (offset + rest < PAGE_SIZE) 2404 + length = rest; 2405 + else 2406 + length = PAGE_SIZE - offset; 2407 + d->req_count = cpu_to_le16(length); 2408 + d->res_count = d->req_count; 2409 + d->transfer_status = 0; 2410 + 2411 + page_bus = page_private(buffer->pages[page]); 2412 + d->data_address = cpu_to_le32(page_bus + offset); 2413 + 2414 + rest -= length; 2415 + offset = 0; 2416 + page++; 2417 + 2418 + context_append(&ctx->context, d, 1, 0); 2419 } 2420 2421 return 0; ··· 2376 { 2377 struct iso_context *ctx = container_of(base, struct iso_context, base); 2378 unsigned long flags; 2379 + int ret = -ENOSYS; 2380 2381 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2382 + switch (base->type) { 2383 + case FW_ISO_CONTEXT_TRANSMIT: 2384 + ret = queue_iso_transmit(ctx, packet, buffer, payload); 2385 + break; 2386 + case FW_ISO_CONTEXT_RECEIVE: 2387 + ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); 2388 + break; 2389 + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2390 + ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); 2391 + break; 2392 + } 2393 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2394 2395 return ret; ··· 2391 2392 static const struct fw_card_driver ohci_driver = { 2393 .enable = ohci_enable, 2394 + .read_phy_reg = ohci_read_phy_reg, 2395 .update_phy_reg = ohci_update_phy_reg, 2396 .set_config_rom = ohci_set_config_rom, 2397 .send_request = ohci_send_request, 2398 .send_response = ohci_send_response, 2399 .cancel_packet = ohci_cancel_packet, 2400 .enable_phys_dma = ohci_enable_phys_dma, 2401 + .read_csr = ohci_read_csr, 2402 + .write_csr = ohci_write_csr, 2403 2404 .allocate_iso_context = ohci_allocate_iso_context, 2405 .free_iso_context = ohci_free_iso_context, 2406 + .set_iso_channels = ohci_set_iso_channels, 2407 .queue_iso = ohci_queue_iso, 2408 .start_iso = ohci_start_iso, 2409 .stop_iso = ohci_stop_iso, ··· 2465 pci_set_drvdata(dev, ohci); 2466 2467 spin_lock_init(&ohci->lock); 2468 + mutex_init(&ohci->phy_reg_mutex); 2469 2470 tasklet_init(&ohci->bus_reset_tasklet, 2471 bus_reset_tasklet, (unsigned long)ohci); ··· 2625 context_release(&ohci->at_response_ctx); 2626 kfree(ohci->it_context_list); 2627 kfree(ohci->ir_context_list); 2628 + pci_disable_msi(dev); 2629 pci_iounmap(dev, ohci->registers); 2630 pci_release_region(dev, 0); 2631 pci_disable_device(dev); ··· 2642 2643 software_reset(ohci); 2644 free_irq(dev->irq, ohci); 2645 + pci_disable_msi(dev); 2646 err = pci_save_state(dev); 2647 if (err) { 2648 fw_error("pci_save_state failed\n");
+1
drivers/firewire/ohci.h
··· 60 #define OHCI1394_LinkControl_cycleSource (1 << 22) 61 #define OHCI1394_NodeID 0x0E8 62 #define OHCI1394_NodeID_idValid 0x80000000 63 #define OHCI1394_NodeID_nodeNumber 0x0000003f 64 #define OHCI1394_NodeID_busNumber 0x0000ffc0 65 #define OHCI1394_PhyControl 0x0EC
··· 60 #define OHCI1394_LinkControl_cycleSource (1 << 22) 61 #define OHCI1394_NodeID 0x0E8 62 #define OHCI1394_NodeID_idValid 0x80000000 63 + #define OHCI1394_NodeID_root 0x40000000 64 #define OHCI1394_NodeID_nodeNumber 0x0000003f 65 #define OHCI1394_NodeID_busNumber 0x0000ffc0 66 #define OHCI1394_PhyControl 0x0EC
+5 -8
drivers/firewire/sbp2.c
··· 410 411 static void sbp2_status_write(struct fw_card *card, struct fw_request *request, 412 int tcode, int destination, int source, 413 - int generation, int speed, 414 - unsigned long long offset, 415 void *payload, size_t length, void *callback_data) 416 { 417 struct sbp2_logical_unit *lu = callback_data; ··· 507 508 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, 509 node_id, generation, device->max_speed, offset, 510 - &orb->pointer, sizeof(orb->pointer), 511 - complete_transaction, orb); 512 } 513 514 static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) ··· 652 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, 653 lu->tgt->node_id, lu->generation, device->max_speed, 654 lu->command_block_agent_address + SBP2_AGENT_RESET, 655 - &d, sizeof(d)); 656 } 657 658 static void complete_agent_reset_write_no_wait(struct fw_card *card, ··· 674 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, 675 lu->tgt->node_id, lu->generation, device->max_speed, 676 lu->command_block_agent_address + SBP2_AGENT_RESET, 677 - &d, sizeof(d), complete_agent_reset_write_no_wait, t); 678 } 679 680 static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) ··· 864 865 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, 866 lu->tgt->node_id, lu->generation, device->max_speed, 867 - CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, 868 - &d, sizeof(d)); 869 } 870 871 static void sbp2_reconnect(struct work_struct *work);
··· 410 411 static void sbp2_status_write(struct fw_card *card, struct fw_request *request, 412 int tcode, int destination, int source, 413 + int generation, unsigned long long offset, 414 void *payload, size_t length, void *callback_data) 415 { 416 struct sbp2_logical_unit *lu = callback_data; ··· 508 509 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, 510 node_id, generation, device->max_speed, offset, 511 + &orb->pointer, 8, complete_transaction, orb); 512 } 513 514 static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) ··· 654 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, 655 lu->tgt->node_id, lu->generation, device->max_speed, 656 lu->command_block_agent_address + SBP2_AGENT_RESET, 657 + &d, 4); 658 } 659 660 static void complete_agent_reset_write_no_wait(struct fw_card *card, ··· 676 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, 677 lu->tgt->node_id, lu->generation, device->max_speed, 678 lu->command_block_agent_address + SBP2_AGENT_RESET, 679 + &d, 4, complete_agent_reset_write_no_wait, t); 680 } 681 682 static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) ··· 866 867 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, 868 lu->tgt->node_id, lu->generation, device->max_speed, 869 + CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4); 870 } 871 872 static void sbp2_reconnect(struct work_struct *work);
+4 -14
drivers/ieee1394/dv1394.c
··· 172 173 static inline struct video_card* file_to_video_card(struct file *file) 174 { 175 - return (struct video_card*) file->private_data; 176 } 177 178 /*** FRAME METHODS *********************************************************/ ··· 610 } else { 611 612 u32 transmit_sec, transmit_cyc; 613 - u32 ts_cyc, ts_off; 614 615 /* DMA is stopped, so this is the very first frame */ 616 video->active_frame = this_frame; ··· 636 transmit_sec += transmit_cyc/8000; 637 transmit_cyc %= 8000; 638 639 - ts_off = ct_off; 640 ts_cyc = transmit_cyc + 3; 641 ts_cyc %= 8000; 642 ··· 1783 struct video_card *video = NULL; 1784 1785 if (file->private_data) { 1786 - video = (struct video_card*) file->private_data; 1787 1788 } else { 1789 /* look up the card by ID */ ··· 2003 2004 int sof=0; /* start-of-frame flag */ 2005 struct frame *f; 2006 - u16 packet_length, packet_time; 2007 int i, dbc=0; 2008 struct DMA_descriptor_block *block = NULL; 2009 u16 xferstatus; ··· 2023 sizeof(struct packet)); 2024 2025 packet_length = le16_to_cpu(p->data_length); 2026 - packet_time = le16_to_cpu(p->timestamp); 2027 - 2028 - irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet, 2029 - packet_time, packet_length, 2030 - p->data[0], p->data[1]); 2031 2032 /* get the descriptor based on packet_buffer cursor */ 2033 f = video->frames[video->current_packet / MAX_PACKETS]; ··· 2314 2315 static void dv1394_host_reset(struct hpsb_host *host) 2316 { 2317 - struct ti_ohci *ohci; 2318 struct video_card *video = NULL, *tmp_vid; 2319 unsigned long flags; 2320 2321 /* We only work with the OHCI-1394 driver */ 2322 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME)) 2323 return; 2324 - 2325 - ohci = (struct ti_ohci *)host->hostdata; 2326 - 2327 2328 /* find the corresponding video_cards */ 2329 spin_lock_irqsave(&dv1394_cards_lock, flags);
··· 172 173 static inline struct video_card* file_to_video_card(struct file *file) 174 { 175 + return file->private_data; 176 } 177 178 /*** FRAME METHODS *********************************************************/ ··· 610 } else { 611 612 u32 transmit_sec, transmit_cyc; 613 + u32 ts_cyc; 614 615 /* DMA is stopped, so this is the very first frame */ 616 video->active_frame = this_frame; ··· 636 transmit_sec += transmit_cyc/8000; 637 transmit_cyc %= 8000; 638 639 ts_cyc = transmit_cyc + 3; 640 ts_cyc %= 8000; 641 ··· 1784 struct video_card *video = NULL; 1785 1786 if (file->private_data) { 1787 + video = file->private_data; 1788 1789 } else { 1790 /* look up the card by ID */ ··· 2004 2005 int sof=0; /* start-of-frame flag */ 2006 struct frame *f; 2007 + u16 packet_length; 2008 int i, dbc=0; 2009 struct DMA_descriptor_block *block = NULL; 2010 u16 xferstatus; ··· 2024 sizeof(struct packet)); 2025 2026 packet_length = le16_to_cpu(p->data_length); 2027 2028 /* get the descriptor based on packet_buffer cursor */ 2029 f = video->frames[video->current_packet / MAX_PACKETS]; ··· 2320 2321 static void dv1394_host_reset(struct hpsb_host *host) 2322 { 2323 struct video_card *video = NULL, *tmp_vid; 2324 unsigned long flags; 2325 2326 /* We only work with the OHCI-1394 driver */ 2327 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME)) 2328 return; 2329 2330 /* find the corresponding video_cards */ 2331 spin_lock_irqsave(&dv1394_cards_lock, flags);
-3
drivers/ieee1394/eth1394.c
··· 1258 char *buf; 1259 struct eth1394_host_info *hi; 1260 struct net_device *dev; 1261 - struct eth1394_priv *priv; 1262 unsigned int len; 1263 u32 specifier_id; 1264 u16 source_id; ··· 1286 specifier_id = (be32_to_cpu(data[0]) & 0xffff) << 8 | 1287 (be32_to_cpu(data[1]) & 0xff000000) >> 24; 1288 source_id = be32_to_cpu(data[0]) >> 16; 1289 - 1290 - priv = netdev_priv(dev); 1291 1292 if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) 1293 || specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
··· 1258 char *buf; 1259 struct eth1394_host_info *hi; 1260 struct net_device *dev; 1261 unsigned int len; 1262 u32 specifier_id; 1263 u16 source_id; ··· 1287 specifier_id = (be32_to_cpu(data[0]) & 0xffff) << 8 | 1288 (be32_to_cpu(data[1]) & 0xff000000) >> 24; 1289 source_id = be32_to_cpu(data[0]) >> 16; 1290 1291 if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) 1292 || specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
+3 -4
drivers/ieee1394/raw1394.c
··· 440 static ssize_t raw1394_read(struct file *file, char __user * buffer, 441 size_t count, loff_t * offset_is_ignored) 442 { 443 - struct file_info *fi = (struct file_info *)file->private_data; 444 struct pending_request *req; 445 ssize_t ret; 446 ··· 1015 struct arm_addr *arm_addr = NULL; 1016 struct arm_request *arm_req = NULL; 1017 struct arm_response *arm_resp = NULL; 1018 - int found = 0, size = 0, rcode = -1, length_conflict = 0; 1019 struct arm_request_response *arm_req_resp = NULL; 1020 1021 DBGMSG("arm_write called by node: %X " ··· 1054 } 1055 if (arm_addr->rec_length < length) { 1056 DBGMSG("arm_write blocklength too big -> rcode_data_error"); 1057 - length_conflict = 1; 1058 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */ 1059 } 1060 if (rcode == -1) { ··· 2244 static ssize_t raw1394_write(struct file *file, const char __user * buffer, 2245 size_t count, loff_t * offset_is_ignored) 2246 { 2247 - struct file_info *fi = (struct file_info *)file->private_data; 2248 struct pending_request *req; 2249 ssize_t retval = -EBADFD; 2250
··· 440 static ssize_t raw1394_read(struct file *file, char __user * buffer, 441 size_t count, loff_t * offset_is_ignored) 442 { 443 + struct file_info *fi = file->private_data; 444 struct pending_request *req; 445 ssize_t ret; 446 ··· 1015 struct arm_addr *arm_addr = NULL; 1016 struct arm_request *arm_req = NULL; 1017 struct arm_response *arm_resp = NULL; 1018 + int found = 0, size = 0, rcode = -1; 1019 struct arm_request_response *arm_req_resp = NULL; 1020 1021 DBGMSG("arm_write called by node: %X " ··· 1054 } 1055 if (arm_addr->rec_length < length) { 1056 DBGMSG("arm_write blocklength too big -> rcode_data_error"); 1057 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */ 1058 } 1059 if (rcode == -1) { ··· 2245 static ssize_t raw1394_write(struct file *file, const char __user * buffer, 2246 size_t count, loff_t * offset_is_ignored) 2247 { 2248 + struct file_info *fi = file->private_data; 2249 struct pending_request *req; 2250 ssize_t retval = -EBADFD; 2251
+4 -7
drivers/ieee1394/sbp2.c
··· 1350 struct csr1212_keyval *kv; 1351 struct csr1212_dentry *dentry; 1352 u64 management_agent_addr; 1353 - u32 unit_characteristics, firmware_revision, model; 1354 unsigned workarounds; 1355 int i; 1356 1357 management_agent_addr = 0; 1358 - unit_characteristics = 0; 1359 firmware_revision = SBP2_ROM_VALUE_MISSING; 1360 model = ud->flags & UNIT_DIRECTORY_MODEL_ID ? 1361 ud->model_id : SBP2_ROM_VALUE_MISSING; ··· 1371 lu->lun = ORB_SET_LUN(kv->value.immediate); 1372 break; 1373 1374 - case SBP2_UNIT_CHARACTERISTICS_KEY: 1375 - /* FIXME: This is ignored so far. 1376 - * See SBP-2 clause 7.4.8. */ 1377 - unit_characteristics = kv->value.immediate; 1378 - break; 1379 1380 case SBP2_FIRMWARE_REVISION_KEY: 1381 firmware_revision = kv->value.immediate; 1382 break; 1383 1384 default: 1385 /* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY. 1386 * Its "ordered" bit has consequences for command ORB 1387 * list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
··· 1350 struct csr1212_keyval *kv; 1351 struct csr1212_dentry *dentry; 1352 u64 management_agent_addr; 1353 + u32 firmware_revision, model; 1354 unsigned workarounds; 1355 int i; 1356 1357 management_agent_addr = 0; 1358 firmware_revision = SBP2_ROM_VALUE_MISSING; 1359 model = ud->flags & UNIT_DIRECTORY_MODEL_ID ? 1360 ud->model_id : SBP2_ROM_VALUE_MISSING; ··· 1372 lu->lun = ORB_SET_LUN(kv->value.immediate); 1373 break; 1374 1375 1376 case SBP2_FIRMWARE_REVISION_KEY: 1377 firmware_revision = kv->value.immediate; 1378 break; 1379 1380 default: 1381 + /* FIXME: Check for SBP2_UNIT_CHARACTERISTICS_KEY 1382 + * mgt_ORB_timeout and ORB_size, SBP-2 clause 7.4.8. */ 1383 + 1384 /* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY. 1385 * Its "ordered" bit has consequences for command ORB 1386 * list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
+6 -11
drivers/ieee1394/video1394.c
··· 720 static long video1394_ioctl(struct file *file, 721 unsigned int cmd, unsigned long arg) 722 { 723 - struct file_ctx *ctx = (struct file_ctx *)file->private_data; 724 struct ti_ohci *ohci = ctx->ohci; 725 unsigned long flags; 726 void __user *argp = (void __user *)arg; ··· 1045 if (get_user(qv, &p->packet_sizes)) 1046 return -EFAULT; 1047 1048 - psizes = kmalloc(buf_size, GFP_KERNEL); 1049 - if (!psizes) 1050 - return -ENOMEM; 1051 - 1052 - if (copy_from_user(psizes, qv, buf_size)) { 1053 - kfree(psizes); 1054 - return -EFAULT; 1055 - } 1056 } 1057 1058 spin_lock_irqsave(&d->lock,flags); ··· 1172 1173 static int video1394_mmap(struct file *file, struct vm_area_struct *vma) 1174 { 1175 - struct file_ctx *ctx = (struct file_ctx *)file->private_data; 1176 1177 if (ctx->current_ctx == NULL) { 1178 PRINT(KERN_ERR, ctx->ohci->host->id, ··· 1239 1240 static int video1394_release(struct inode *inode, struct file *file) 1241 { 1242 - struct file_ctx *ctx = (struct file_ctx *)file->private_data; 1243 struct ti_ohci *ohci = ctx->ohci; 1244 struct list_head *lh, *next; 1245 u64 mask;
··· 720 static long video1394_ioctl(struct file *file, 721 unsigned int cmd, unsigned long arg) 722 { 723 + struct file_ctx *ctx = file->private_data; 724 struct ti_ohci *ohci = ctx->ohci; 725 unsigned long flags; 726 void __user *argp = (void __user *)arg; ··· 1045 if (get_user(qv, &p->packet_sizes)) 1046 return -EFAULT; 1047 1048 + psizes = memdup_user(qv, buf_size); 1049 + if (IS_ERR(psizes)) 1050 + return PTR_ERR(psizes); 1051 } 1052 1053 spin_lock_irqsave(&d->lock,flags); ··· 1177 1178 static int video1394_mmap(struct file *file, struct vm_area_struct *vma) 1179 { 1180 + struct file_ctx *ctx = file->private_data; 1181 1182 if (ctx->current_ctx == NULL) { 1183 PRINT(KERN_ERR, ctx->ohci->host->id, ··· 1244 1245 static int video1394_release(struct inode *inode, struct file *file) 1246 { 1247 + struct file_ctx *ctx = file->private_data; 1248 struct ti_ohci *ohci = ctx->ohci; 1249 struct list_head *lh, *next; 1250 u64 mask;
+2 -2
drivers/media/dvb/firewire/firedtv-fw.c
··· 194 195 static void handle_fcp(struct fw_card *card, struct fw_request *request, 196 int tcode, int destination, int source, int generation, 197 - int speed, unsigned long long offset, 198 - void *payload, size_t length, void *callback_data) 199 { 200 struct firedtv *f, *fdtv = NULL; 201 struct fw_device *device;
··· 194 195 static void handle_fcp(struct fw_card *card, struct fw_request *request, 196 int tcode, int destination, int source, int generation, 197 + unsigned long long offset, void *payload, size_t length, 198 + void *callback_data) 199 { 200 struct firedtv *f, *fdtv = NULL; 201 struct fw_device *device;
+405 -92
include/linux/firewire-cdev.h
··· 30 #include <linux/types.h> 31 #include <linux/firewire-constants.h> 32 33 - #define FW_CDEV_EVENT_BUS_RESET 0x00 34 - #define FW_CDEV_EVENT_RESPONSE 0x01 35 - #define FW_CDEV_EVENT_REQUEST 0x02 36 - #define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 37 - #define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 38 - #define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 39 40 /** 41 * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types ··· 74 * This event is sent when the bus the device belongs to goes through a bus 75 * reset. It provides information about the new bus configuration, such as 76 * new node ID for this device, new root ID, and others. 77 */ 78 struct fw_cdev_event_bus_reset { 79 __u64 closure; ··· 92 93 /** 94 * struct fw_cdev_event_response - Sent when a response packet was received 95 - * @closure: See &fw_cdev_event_common; 96 - * set by %FW_CDEV_IOC_SEND_REQUEST ioctl 97 * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE 98 * @rcode: Response code returned by the remote node 99 * @length: Data length, i.e. the response's payload size in bytes ··· 104 * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses 105 * carrying data (read and lock responses) follows immediately and can be 106 * accessed through the @data field. 107 */ 108 struct fw_cdev_event_response { 109 __u64 closure; ··· 119 }; 120 121 /** 122 - * struct fw_cdev_event_request - Sent on incoming request to an address region 123 * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl 124 * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST 125 * @tcode: Transaction code of the incoming request 126 * @offset: The offset into the 48-bit per-node address space 127 * @handle: Reference to the kernel-side pending request 128 * @length: Data length, i.e. the request's payload size in bytes 129 * @data: Incoming data, if any ··· 171 * 172 * The payload data for requests carrying data (write and lock requests) 173 * follows immediately and can be accessed through the @data field. 174 */ 175 - struct fw_cdev_event_request { 176 __u64 closure; 177 __u32 type; 178 __u32 tcode; 179 __u64 offset; 180 __u32 handle; 181 __u32 length; 182 __u32 data[0]; ··· 222 * @header: Stripped headers, if any 223 * 224 * This event is sent when the controller has completed an &fw_cdev_iso_packet 225 - * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers 226 - * stripped of all packets up until and including the interrupt packet are 227 - * returned in the @header field. The amount of header data per packet is as 228 - * specified at iso context creation by &fw_cdev_create_iso_context.header_size. 229 * 230 - * In version 1 of this ABI, header data consisted of the 1394 isochronous 231 - * packet header, followed by quadlets from the packet payload if 232 - * &fw_cdev_create_iso_context.header_size > 4. 233 * 234 - * In version 2 of this ABI, header data consist of the 1394 isochronous 235 - * packet header, followed by a timestamp quadlet if 236 - * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the 237 - * packet payload if &fw_cdev_create_iso_context.header_size > 8. 238 * 239 * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. 240 - * 241 - * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel, 242 - * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp: 243 - * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte 244 - * order. 245 */ 246 struct fw_cdev_event_iso_interrupt { 247 __u64 closure; ··· 266 __u32 cycle; 267 __u32 header_length; 268 __u32 header[0]; 269 }; 270 271 /** ··· 335 }; 336 337 /** 338 * union fw_cdev_event - Convenience union of fw_cdev_event_ types 339 - * @common: Valid for all types 340 - * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET 341 - * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE 342 - * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST 343 - * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT 344 - * @iso_resource: Valid if @common.type == 345 * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or 346 * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 347 * 348 * Convenience union for userspace use. Events could be read(2) into an 349 * appropriately aligned char buffer and then cast to this union for further ··· 388 struct fw_cdev_event_bus_reset bus_reset; 389 struct fw_cdev_event_response response; 390 struct fw_cdev_event_request request; 391 struct fw_cdev_event_iso_interrupt iso_interrupt; 392 - struct fw_cdev_event_iso_resource iso_resource; 393 }; 394 395 /* available since kernel version 2.6.22 */ ··· 424 /* available since kernel version 2.6.34 */ 425 #define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2) 426 427 /* 428 - * FW_CDEV_VERSION History 429 * 1 (2.6.22) - initial version 430 * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if 431 * &fw_cdev_create_iso_context.header_size is 8 or more 432 * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt 433 * (2.6.33) - IR has always packet-per-buffer semantics now, not one of 434 * dual-buffer or packet-per-buffer depending on hardware 435 * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable 436 */ 437 - #define FW_CDEV_VERSION 3 438 439 /** 440 * struct fw_cdev_get_info - General purpose information ioctl 441 - * @version: The version field is just a running serial number. 442 - * We never break backwards compatibility, but may add more 443 - * structs and ioctls in later revisions. 444 * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration 445 * ROM will be copied into that user space address. In either 446 * case, @rom_length is updated with the actual length of the ··· 530 }; 531 532 /** 533 - * struct fw_cdev_allocate - Allocate a CSR address range 534 * @offset: Start offset of the address range 535 * @closure: To be passed back to userspace in request events 536 - * @length: Length of the address range, in bytes 537 * @handle: Handle to the allocation, written by the kernel 538 * 539 * Allocate an address range in the 48-bit address space on the local node 540 * (the controller). This allows userspace to listen for requests with an 541 - * offset within that address range. When the kernel receives a request 542 - * within the range, an &fw_cdev_event_request event will be written back. 543 - * The @closure field is passed back to userspace in the response event. 544 * The @handle field is an out parameter, returning a handle to the allocated 545 * range to be used for later deallocation of the range. 546 * 547 * The address range is allocated on all local nodes. The address allocation 548 - * is exclusive except for the FCP command and response registers. 549 */ 550 struct fw_cdev_allocate { 551 __u64 offset; 552 __u64 closure; 553 __u32 length; 554 __u32 handle; 555 }; 556 557 /** ··· 593 * Initiate a bus reset for the bus this device is on. The bus reset can be 594 * either the original (long) bus reset or the arbitrated (short) bus reset 595 * introduced in 1394a-2000. 596 */ 597 struct fw_cdev_initiate_bus_reset { 598 - __u32 type; /* FW_CDEV_SHORT_RESET or FW_CDEV_LONG_RESET */ 599 }; 600 601 /** ··· 624 * 625 * @immediate, @key, and @data array elements are CPU-endian quadlets. 626 * 627 - * If successful, the kernel adds the descriptor and writes back a handle to the 628 - * kernel-side object to be used for later removal of the descriptor block and 629 - * immediate key. 630 * 631 * This ioctl affects the configuration ROMs of all local nodes. 632 * The ioctl only succeeds on device files which represent a local node. ··· 646 * descriptor was added 647 * 648 * Remove a descriptor block and accompanying immediate key from the local 649 - * nodes' configuration ROMs. 650 */ 651 struct fw_cdev_remove_descriptor { 652 __u32 handle; 653 }; 654 655 - #define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 656 - #define FW_CDEV_ISO_CONTEXT_RECEIVE 1 657 658 /** 659 - * struct fw_cdev_create_iso_context - Create a context for isochronous IO 660 - * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE 661 - * @header_size: Header size to strip for receive contexts 662 - * @channel: Channel to bind to 663 - * @speed: Speed for transmit contexts 664 - * @closure: To be returned in &fw_cdev_event_iso_interrupt 665 * @handle: Handle to context, written back by kernel 666 * 667 * Prior to sending or receiving isochronous I/O, a context must be created. 668 * The context records information about the transmit or receive configuration 669 * and typically maps to an underlying hardware resource. A context is set up 670 * for either sending or receiving. It is bound to a specific isochronous 671 - * channel. 672 * 673 * If a context was successfully created, the kernel writes back a handle to the 674 * context, which must be passed in for subsequent operations on that context. 675 * 676 - * For receive contexts, @header_size must be at least 4 and must be a multiple 677 - * of 4. 678 - * 679 - * Note that the effect of a @header_size > 4 depends on 680 - * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. 681 */ 682 struct fw_cdev_create_iso_context { 683 __u32 type; ··· 697 __u32 channel; 698 __u32 speed; 699 __u64 closure; 700 __u32 handle; 701 }; 702 ··· 726 727 /** 728 * struct fw_cdev_iso_packet - Isochronous packet 729 - * @control: Contains the header length (8 uppermost bits), the sy field 730 - * (4 bits), the tag field (2 bits), a sync flag (1 bit), 731 - * a skip flag (1 bit), an interrupt flag (1 bit), and the 732 * payload length (16 lowermost bits) 733 - * @header: Header and payload 734 * 735 * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. 736 - * 737 * Use the FW_CDEV_ISO_ macros to fill in @control. 738 * 739 - * For transmit packets, the header length must be a multiple of 4 and specifies 740 - * the numbers of bytes in @header that will be prepended to the packet's 741 - * payload; these bytes are copied into the kernel and will not be accessed 742 - * after the ioctl has returned. The sy and tag fields are copied to the iso 743 - * packet header (these fields are specified by IEEE 1394a and IEC 61883-1). 744 - * The skip flag specifies that no packet is to be sent in a frame; when using 745 - * this, all other fields except the interrupt flag must be zero. 746 * 747 - * For receive packets, the header length must be a multiple of the context's 748 - * header size; if the header length is larger than the context's header size, 749 - * multiple packets are queued for this entry. The sy and tag fields are 750 - * ignored. If the sync flag is set, the context drops all packets until 751 - * a packet with a matching sy field is received (the sync value to wait for is 752 - * specified in the &fw_cdev_start_iso structure). The payload length defines 753 - * how many payload bytes can be received for one packet (in addition to payload 754 - * quadlets that have been defined as headers and are stripped and returned in 755 - * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the 756 - * additional bytes are dropped. If less bytes are received, the remaining 757 - * bytes in this part of the payload buffer will not be written to, not even by 758 - * the next packet, i.e., packets received in consecutive frames will not 759 - * necessarily be consecutive in memory. If an entry has queued multiple 760 - * packets, the payload length is divided equally among them. 761 * 762 - * When a packet with the interrupt flag set has been completed, the 763 * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued 764 * multiple receive packets is completed when its last packet is completed. 765 */ 766 struct fw_cdev_iso_packet { 767 __u32 control; ··· 800 801 /** 802 * struct fw_cdev_queue_iso - Queue isochronous packets for I/O 803 - * @packets: Userspace pointer to packet data 804 * @data: Pointer into mmap()'ed payload buffer 805 - * @size: Size of packet data in bytes 806 * @handle: Isochronous context handle 807 * 808 * Queue a number of isochronous packets for reception or transmission. ··· 815 * The kernel may or may not queue all packets, but will write back updated 816 * values of the @packets, @data and @size fields, so the ioctl can be 817 * resubmitted easily. 818 */ 819 struct fw_cdev_queue_iso { 820 __u64 packets; ··· 974 __u64 data; 975 __u32 generation; 976 __u32 speed; 977 }; 978 979 #endif /* _LINUX_FIREWIRE_CDEV_H */
··· 30 #include <linux/types.h> 31 #include <linux/firewire-constants.h> 32 33 + #define FW_CDEV_EVENT_BUS_RESET 0x00 34 + #define FW_CDEV_EVENT_RESPONSE 0x01 35 + #define FW_CDEV_EVENT_REQUEST 0x02 36 + #define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 37 + #define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 38 + #define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 39 + 40 + /* available since kernel version 2.6.36 */ 41 + #define FW_CDEV_EVENT_REQUEST2 0x06 42 + #define FW_CDEV_EVENT_PHY_PACKET_SENT 0x07 43 + #define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08 44 + #define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09 45 46 /** 47 * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types ··· 68 * This event is sent when the bus the device belongs to goes through a bus 69 * reset. It provides information about the new bus configuration, such as 70 * new node ID for this device, new root ID, and others. 71 + * 72 + * If @bm_node_id is 0xffff right after bus reset it can be reread by an 73 + * %FW_CDEV_IOC_GET_INFO ioctl after bus manager selection was finished. 74 + * Kernels with ABI version < 4 do not set @bm_node_id. 75 */ 76 struct fw_cdev_event_bus_reset { 77 __u64 closure; ··· 82 83 /** 84 * struct fw_cdev_event_response - Sent when a response packet was received 85 + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST 86 + * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST 87 + * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl 88 * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE 89 * @rcode: Response code returned by the remote node 90 * @length: Data length, i.e. the response's payload size in bytes ··· 93 * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses 94 * carrying data (read and lock responses) follows immediately and can be 95 * accessed through the @data field. 96 + * 97 + * The event is also generated after conclusions of transactions that do not 98 + * involve response packets. This includes unified write transactions, 99 + * broadcast write transactions, and transmission of asynchronous stream 100 + * packets. @rcode indicates success or failure of such transmissions. 101 */ 102 struct fw_cdev_event_response { 103 __u64 closure; ··· 103 }; 104 105 /** 106 + * struct fw_cdev_event_request - Old version of &fw_cdev_event_request2 107 * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl 108 * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST 109 + * @tcode: See &fw_cdev_event_request2 110 + * @offset: See &fw_cdev_event_request2 111 + * @handle: See &fw_cdev_event_request2 112 + * @length: See &fw_cdev_event_request2 113 + * @data: See &fw_cdev_event_request2 114 + * 115 + * This event is sent instead of &fw_cdev_event_request2 if the kernel or 116 + * the client implements ABI version <= 3. 117 + * 118 + * Unlike &fw_cdev_event_request2, the sender identity cannot be established, 119 + * broadcast write requests cannot be distinguished from unicast writes, and 120 + * @tcode of lock requests is %TCODE_LOCK_REQUEST. 121 + * 122 + * Requests to the FCP_REQUEST or FCP_RESPONSE register are responded to as 123 + * with &fw_cdev_event_request2, except in kernel 2.6.32 and older which send 124 + * the response packet of the client's %FW_CDEV_IOC_SEND_RESPONSE ioctl. 125 + */ 126 + struct fw_cdev_event_request { 127 + __u64 closure; 128 + __u32 type; 129 + __u32 tcode; 130 + __u64 offset; 131 + __u32 handle; 132 + __u32 length; 133 + __u32 data[0]; 134 + }; 135 + 136 + /** 137 + * struct fw_cdev_event_request2 - Sent on incoming request to an address region 138 + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl 139 + * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2 140 * @tcode: Transaction code of the incoming request 141 * @offset: The offset into the 48-bit per-node address space 142 + * @source_node_id: Sender node ID 143 + * @destination_node_id: Destination node ID 144 + * @card: The index of the card from which the request came 145 + * @generation: Bus generation in which the request is valid 146 * @handle: Reference to the kernel-side pending request 147 * @length: Data length, i.e. the request's payload size in bytes 148 * @data: Incoming data, if any ··· 120 * 121 * The payload data for requests carrying data (write and lock requests) 122 * follows immediately and can be accessed through the @data field. 123 + * 124 + * Unlike &fw_cdev_event_request, @tcode of lock requests is one of the 125 + * firewire-core specific %TCODE_LOCK_MASK_SWAP...%TCODE_LOCK_VENDOR_DEPENDENT, 126 + * i.e. encodes the extended transaction code. 127 + * 128 + * @card may differ from &fw_cdev_get_info.card because requests are received 129 + * from all cards of the Linux host. @source_node_id, @destination_node_id, and 130 + * @generation pertain to that card. Destination node ID and bus generation may 131 + * therefore differ from the corresponding fields of the last 132 + * &fw_cdev_event_bus_reset. 133 + * 134 + * @destination_node_id may also differ from the current node ID because of a 135 + * non-local bus ID part or in case of a broadcast write request. Note, a 136 + * client must call an %FW_CDEV_IOC_SEND_RESPONSE ioctl even in case of a 137 + * broadcast write request; the kernel will then release the kernel-side pending 138 + * request but will not actually send a response packet. 139 + * 140 + * In case of a write request to FCP_REQUEST or FCP_RESPONSE, the kernel already 141 + * sent a write response immediately after the request was received; in this 142 + * case the client must still call an %FW_CDEV_IOC_SEND_RESPONSE ioctl to 143 + * release the kernel-side pending request, though another response won't be 144 + * sent. 145 + * 146 + * If the client subsequently needs to initiate requests to the sender node of 147 + * an &fw_cdev_event_request2, it needs to use a device file with matching 148 + * card index, node ID, and generation for outbound requests. 149 */ 150 + struct fw_cdev_event_request2 { 151 __u64 closure; 152 __u32 type; 153 __u32 tcode; 154 __u64 offset; 155 + __u32 source_node_id; 156 + __u32 destination_node_id; 157 + __u32 card; 158 + __u32 generation; 159 __u32 handle; 160 __u32 length; 161 __u32 data[0]; ··· 141 * @header: Stripped headers, if any 142 * 143 * This event is sent when the controller has completed an &fw_cdev_iso_packet 144 + * with the %FW_CDEV_ISO_INTERRUPT bit set. 145 * 146 + * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT): 147 * 148 + * In version 3 and some implementations of version 2 of the ABI, &header_length 149 + * is a multiple of 4 and &header contains timestamps of all packets up until 150 + * the interrupt packet. The format of the timestamps is as described below for 151 + * isochronous reception. In version 1 of the ABI, &header_length was 0. 152 * 153 + * Isochronous receive events (context type %FW_CDEV_ISO_CONTEXT_RECEIVE): 154 + * 155 + * The headers stripped of all packets up until and including the interrupt 156 + * packet are returned in the @header field. The amount of header data per 157 + * packet is as specified at iso context creation by 158 + * &fw_cdev_create_iso_context.header_size. 159 + * 160 + * Hence, _interrupt.header_length / _context.header_size is the number of 161 + * packets received in this interrupt event. The client can now iterate 162 + * through the mmap()'ed DMA buffer according to this number of packets and 163 + * to the buffer sizes as the client specified in &fw_cdev_queue_iso. 164 + * 165 + * Since version 2 of this ABI, the portion for each packet in _interrupt.header 166 + * consists of the 1394 isochronous packet header, followed by a timestamp 167 + * quadlet if &fw_cdev_create_iso_context.header_size > 4, followed by quadlets 168 + * from the packet payload if &fw_cdev_create_iso_context.header_size > 8. 169 + * 170 + * Format of 1394 iso packet header: 16 bits data_length, 2 bits tag, 6 bits 171 + * channel, 4 bits tcode, 4 bits sy, in big endian byte order. 172 + * data_length is the actual received size of the packet without the four 173 + * 1394 iso packet header bytes. 174 + * 175 + * Format of timestamp: 16 bits invalid, 3 bits cycleSeconds, 13 bits 176 + * cycleCount, in big endian byte order. 177 + * 178 + * In version 1 of the ABI, no timestamp quadlet was inserted; instead, payload 179 + * data followed directly after the 1394 is header if header_size > 4. 180 * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. 181 */ 182 struct fw_cdev_event_iso_interrupt { 183 __u64 closure; ··· 168 __u32 cycle; 169 __u32 header_length; 170 __u32 header[0]; 171 + }; 172 + 173 + /** 174 + * struct fw_cdev_event_iso_interrupt_mc - An iso buffer chunk was completed 175 + * @closure: See &fw_cdev_event_common; 176 + * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl 177 + * @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 178 + * @completed: Offset into the receive buffer; data before this offest is valid 179 + * 180 + * This event is sent in multichannel contexts (context type 181 + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer 182 + * chunks that have the %FW_CDEV_ISO_INTERRUPT bit set. Whether this happens 183 + * when a packet is completed and/or when a buffer chunk is completed depends 184 + * on the hardware implementation. 185 + * 186 + * The buffer is continuously filled with the following data, per packet: 187 + * - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt, 188 + * but in little endian byte order, 189 + * - packet payload (as many bytes as specified in the data_length field of 190 + * the 1394 iso packet header) in big endian byte order, 191 + * - 0...3 padding bytes as needed to align the following trailer quadlet, 192 + * - trailer quadlet, containing the reception timestamp as described at 193 + * &fw_cdev_event_iso_interrupt, but in little endian byte order. 194 + * 195 + * Hence the per-packet size is data_length (rounded up to a multiple of 4) + 8. 196 + * When processing the data, stop before a packet that would cross the 197 + * @completed offset. 198 + * 199 + * A packet near the end of a buffer chunk will typically spill over into the 200 + * next queued buffer chunk. It is the responsibility of the client to check 201 + * for this condition, assemble a broken-up packet from its parts, and not to 202 + * re-queue any buffer chunks in which as yet unread packet parts reside. 203 + */ 204 + struct fw_cdev_event_iso_interrupt_mc { 205 + __u64 closure; 206 + __u32 type; 207 + __u32 completed; 208 }; 209 210 /** ··· 200 }; 201 202 /** 203 + * struct fw_cdev_event_phy_packet - A PHY packet was transmitted or received 204 + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET 205 + * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl 206 + * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED 207 + * @rcode: %RCODE_..., indicates success or failure of transmission 208 + * @length: Data length in bytes 209 + * @data: Incoming data 210 + * 211 + * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty, 212 + * except in case of a ping packet: Then, @length is 4, and @data[0] is the 213 + * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE. 214 + * 215 + * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data 216 + * consists of the two PHY packet quadlets, in host byte order. 217 + */ 218 + struct fw_cdev_event_phy_packet { 219 + __u64 closure; 220 + __u32 type; 221 + __u32 rcode; 222 + __u32 length; 223 + __u32 data[0]; 224 + }; 225 + 226 + /** 227 * union fw_cdev_event - Convenience union of fw_cdev_event_ types 228 + * @common: Valid for all types 229 + * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET 230 + * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE 231 + * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST 232 + * @request2: Valid if @common.type == %FW_CDEV_EVENT_REQUEST2 233 + * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT 234 + * @iso_interrupt_mc: Valid if @common.type == 235 + * %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 236 + * @iso_resource: Valid if @common.type == 237 * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or 238 * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 239 + * @phy_packet: Valid if @common.type == 240 + * %FW_CDEV_EVENT_PHY_PACKET_SENT or 241 + * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED 242 * 243 * Convenience union for userspace use. Events could be read(2) into an 244 * appropriately aligned char buffer and then cast to this union for further ··· 223 struct fw_cdev_event_bus_reset bus_reset; 224 struct fw_cdev_event_response response; 225 struct fw_cdev_event_request request; 226 + struct fw_cdev_event_request2 request2; /* added in 2.6.36 */ 227 struct fw_cdev_event_iso_interrupt iso_interrupt; 228 + struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */ 229 + struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */ 230 + struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */ 231 }; 232 233 /* available since kernel version 2.6.22 */ ··· 256 /* available since kernel version 2.6.34 */ 257 #define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2) 258 259 + /* available since kernel version 2.6.36 */ 260 + #define FW_CDEV_IOC_SEND_PHY_PACKET _IOWR('#', 0x15, struct fw_cdev_send_phy_packet) 261 + #define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets) 262 + #define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels) 263 + 264 /* 265 + * ABI version history 266 * 1 (2.6.22) - initial version 267 + * (2.6.24) - added %FW_CDEV_IOC_GET_CYCLE_TIMER 268 * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if 269 * &fw_cdev_create_iso_context.header_size is 8 or more 270 + * - added %FW_CDEV_IOC_*_ISO_RESOURCE*, 271 + * %FW_CDEV_IOC_GET_SPEED, %FW_CDEV_IOC_SEND_BROADCAST_REQUEST, 272 + * %FW_CDEV_IOC_SEND_STREAM_PACKET 273 * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt 274 * (2.6.33) - IR has always packet-per-buffer semantics now, not one of 275 * dual-buffer or packet-per-buffer depending on hardware 276 + * - shared use and auto-response for FCP registers 277 * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable 278 + * - added %FW_CDEV_IOC_GET_CYCLE_TIMER2 279 + * 4 (2.6.36) - added %FW_CDEV_EVENT_REQUEST2, %FW_CDEV_EVENT_PHY_PACKET_*, 280 + * and &fw_cdev_allocate.region_end 281 + * - implemented &fw_cdev_event_bus_reset.bm_node_id 282 + * - added %FW_CDEV_IOC_SEND_PHY_PACKET, _RECEIVE_PHY_PACKETS 283 + * - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL, 284 + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and 285 + * %FW_CDEV_IOC_SET_ISO_CHANNELS 286 */ 287 + #define FW_CDEV_VERSION 3 /* Meaningless; don't use this macro. */ 288 289 /** 290 * struct fw_cdev_get_info - General purpose information ioctl 291 + * @version: The version field is just a running serial number. Both an 292 + * input parameter (ABI version implemented by the client) and 293 + * output parameter (ABI version implemented by the kernel). 294 + * A client must not fill in an %FW_CDEV_VERSION defined from an 295 + * included kernel header file but the actual version for which 296 + * the client was implemented. This is necessary for forward 297 + * compatibility. We never break backwards compatibility, but 298 + * may add more structs, events, and ioctls in later revisions. 299 * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration 300 * ROM will be copied into that user space address. In either 301 * case, @rom_length is updated with the actual length of the ··· 339 }; 340 341 /** 342 + * struct fw_cdev_allocate - Allocate a CSR in an address range 343 * @offset: Start offset of the address range 344 * @closure: To be passed back to userspace in request events 345 + * @length: Length of the CSR, in bytes 346 * @handle: Handle to the allocation, written by the kernel 347 + * @region_end: First address above the address range (added in ABI v4, 2.6.36) 348 * 349 * Allocate an address range in the 48-bit address space on the local node 350 * (the controller). This allows userspace to listen for requests with an 351 + * offset within that address range. Every time when the kernel receives a 352 + * request within the range, an &fw_cdev_event_request2 event will be emitted. 353 + * (If the kernel or the client implements ABI version <= 3, an 354 + * &fw_cdev_event_request will be generated instead.) 355 + * 356 + * The @closure field is passed back to userspace in these request events. 357 * The @handle field is an out parameter, returning a handle to the allocated 358 * range to be used for later deallocation of the range. 359 * 360 * The address range is allocated on all local nodes. The address allocation 361 + * is exclusive except for the FCP command and response registers. If an 362 + * exclusive address region is already in use, the ioctl fails with errno set 363 + * to %EBUSY. 364 + * 365 + * If kernel and client implement ABI version >= 4, the kernel looks up a free 366 + * spot of size @length inside [@offset..@region_end) and, if found, writes 367 + * the start address of the new CSR back in @offset. I.e. @offset is an 368 + * in and out parameter. If this automatic placement of a CSR in a bigger 369 + * address range is not desired, the client simply needs to set @region_end 370 + * = @offset + @length. 371 + * 372 + * If the kernel or the client implements ABI version <= 3, @region_end is 373 + * ignored and effectively assumed to be @offset + @length. 374 + * 375 + * @region_end is only present in a kernel header >= 2.6.36. If necessary, 376 + * this can for example be tested by #ifdef FW_CDEV_EVENT_REQUEST2. 377 */ 378 struct fw_cdev_allocate { 379 __u64 offset; 380 __u64 closure; 381 __u32 length; 382 __u32 handle; 383 + __u64 region_end; /* available since kernel version 2.6.36 */ 384 }; 385 386 /** ··· 382 * Initiate a bus reset for the bus this device is on. The bus reset can be 383 * either the original (long) bus reset or the arbitrated (short) bus reset 384 * introduced in 1394a-2000. 385 + * 386 + * The ioctl returns immediately. A subsequent &fw_cdev_event_bus_reset 387 + * indicates when the reset actually happened. Since ABI v4, this may be 388 + * considerably later than the ioctl because the kernel ensures a grace period 389 + * between subsequent bus resets as per IEEE 1394 bus management specification. 390 */ 391 struct fw_cdev_initiate_bus_reset { 392 + __u32 type; 393 }; 394 395 /** ··· 408 * 409 * @immediate, @key, and @data array elements are CPU-endian quadlets. 410 * 411 + * If successful, the kernel adds the descriptor and writes back a @handle to 412 + * the kernel-side object to be used for later removal of the descriptor block 413 + * and immediate key. The kernel will also generate a bus reset to signal the 414 + * change of the configuration ROM to other nodes. 415 * 416 * This ioctl affects the configuration ROMs of all local nodes. 417 * The ioctl only succeeds on device files which represent a local node. ··· 429 * descriptor was added 430 * 431 * Remove a descriptor block and accompanying immediate key from the local 432 + * nodes' configuration ROMs. The kernel will also generate a bus reset to 433 + * signal the change of the configuration ROM to other nodes. 434 */ 435 struct fw_cdev_remove_descriptor { 436 __u32 handle; 437 }; 438 439 + #define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 440 + #define FW_CDEV_ISO_CONTEXT_RECEIVE 1 441 + #define FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 /* added in 2.6.36 */ 442 443 /** 444 + * struct fw_cdev_create_iso_context - Create a context for isochronous I/O 445 + * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE or 446 + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL 447 + * @header_size: Header size to strip in single-channel reception 448 + * @channel: Channel to bind to in single-channel reception or transmission 449 + * @speed: Transmission speed 450 + * @closure: To be returned in &fw_cdev_event_iso_interrupt or 451 + * &fw_cdev_event_iso_interrupt_multichannel 452 * @handle: Handle to context, written back by kernel 453 * 454 * Prior to sending or receiving isochronous I/O, a context must be created. 455 * The context records information about the transmit or receive configuration 456 * and typically maps to an underlying hardware resource. A context is set up 457 * for either sending or receiving. It is bound to a specific isochronous 458 + * @channel. 459 + * 460 + * In case of multichannel reception, @header_size and @channel are ignored 461 + * and the channels are selected by %FW_CDEV_IOC_SET_ISO_CHANNELS. 462 + * 463 + * For %FW_CDEV_ISO_CONTEXT_RECEIVE contexts, @header_size must be at least 4 464 + * and must be a multiple of 4. It is ignored in other context types. 465 + * 466 + * @speed is ignored in receive context types. 467 * 468 * If a context was successfully created, the kernel writes back a handle to the 469 * context, which must be passed in for subsequent operations on that context. 470 * 471 + * Limitations: 472 + * No more than one iso context can be created per fd. 473 + * The total number of contexts that all userspace and kernelspace drivers can 474 + * create on a card at a time is a hardware limit, typically 4 or 8 contexts per 475 + * direction, and of them at most one multichannel receive context. 476 */ 477 struct fw_cdev_create_iso_context { 478 __u32 type; ··· 468 __u32 channel; 469 __u32 speed; 470 __u64 closure; 471 + __u32 handle; 472 + }; 473 + 474 + /** 475 + * struct fw_cdev_set_iso_channels - Select channels in multichannel reception 476 + * @channels: Bitmask of channels to listen to 477 + * @handle: Handle of the mutichannel receive context 478 + * 479 + * @channels is the bitwise or of 1ULL << n for each channel n to listen to. 480 + * 481 + * The ioctl fails with errno %EBUSY if there is already another receive context 482 + * on a channel in @channels. In that case, the bitmask of all unoccupied 483 + * channels is returned in @channels. 484 + */ 485 + struct fw_cdev_set_iso_channels { 486 + __u64 channels; 487 __u32 handle; 488 }; 489 ··· 481 482 /** 483 * struct fw_cdev_iso_packet - Isochronous packet 484 + * @control: Contains the header length (8 uppermost bits), 485 + * the sy field (4 bits), the tag field (2 bits), a sync flag 486 + * or a skip flag (1 bit), an interrupt flag (1 bit), and the 487 * payload length (16 lowermost bits) 488 + * @header: Header and payload in case of a transmit context. 489 * 490 * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. 491 * Use the FW_CDEV_ISO_ macros to fill in @control. 492 + * The @header array is empty in case of receive contexts. 493 * 494 + * Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT: 495 * 496 + * @control.HEADER_LENGTH must be a multiple of 4. It specifies the numbers of 497 + * bytes in @header that will be prepended to the packet's payload. These bytes 498 + * are copied into the kernel and will not be accessed after the ioctl has 499 + * returned. 500 * 501 + * The @control.SY and TAG fields are copied to the iso packet header. These 502 + * fields are specified by IEEE 1394a and IEC 61883-1. 503 + * 504 + * The @control.SKIP flag specifies that no packet is to be sent in a frame. 505 + * When using this, all other fields except @control.INTERRUPT must be zero. 506 + * 507 + * When a packet with the @control.INTERRUPT flag set has been completed, an 508 + * &fw_cdev_event_iso_interrupt event will be sent. 509 + * 510 + * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE: 511 + * 512 + * @control.HEADER_LENGTH must be a multiple of the context's header_size. 513 + * If the HEADER_LENGTH is larger than the context's header_size, multiple 514 + * packets are queued for this entry. 515 + * 516 + * The @control.SY and TAG fields are ignored. 517 + * 518 + * If the @control.SYNC flag is set, the context drops all packets until a 519 + * packet with a sy field is received which matches &fw_cdev_start_iso.sync. 520 + * 521 + * @control.PAYLOAD_LENGTH defines how many payload bytes can be received for 522 + * one packet (in addition to payload quadlets that have been defined as headers 523 + * and are stripped and returned in the &fw_cdev_event_iso_interrupt structure). 524 + * If more bytes are received, the additional bytes are dropped. If less bytes 525 + * are received, the remaining bytes in this part of the payload buffer will not 526 + * be written to, not even by the next packet. I.e., packets received in 527 + * consecutive frames will not necessarily be consecutive in memory. If an 528 + * entry has queued multiple packets, the PAYLOAD_LENGTH is divided equally 529 + * among them. 530 + * 531 + * When a packet with the @control.INTERRUPT flag set has been completed, an 532 * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued 533 * multiple receive packets is completed when its last packet is completed. 534 + * 535 + * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 536 + * 537 + * Here, &fw_cdev_iso_packet would be more aptly named _iso_buffer_chunk since 538 + * it specifies a chunk of the mmap()'ed buffer, while the number and alignment 539 + * of packets to be placed into the buffer chunk is not known beforehand. 540 + * 541 + * @control.PAYLOAD_LENGTH is the size of the buffer chunk and specifies room 542 + * for header, payload, padding, and trailer bytes of one or more packets. 543 + * It must be a multiple of 4. 544 + * 545 + * @control.HEADER_LENGTH, TAG and SY are ignored. SYNC is treated as described 546 + * for single-channel reception. 547 + * 548 + * When a buffer chunk with the @control.INTERRUPT flag set has been filled 549 + * entirely, an &fw_cdev_event_iso_interrupt_mc event will be sent. 550 */ 551 struct fw_cdev_iso_packet { 552 __u32 control; ··· 525 526 /** 527 * struct fw_cdev_queue_iso - Queue isochronous packets for I/O 528 + * @packets: Userspace pointer to an array of &fw_cdev_iso_packet 529 * @data: Pointer into mmap()'ed payload buffer 530 + * @size: Size of the @packets array, in bytes 531 * @handle: Isochronous context handle 532 * 533 * Queue a number of isochronous packets for reception or transmission. ··· 540 * The kernel may or may not queue all packets, but will write back updated 541 * values of the @packets, @data and @size fields, so the ioctl can be 542 * resubmitted easily. 543 + * 544 + * In case of a multichannel receive context, @data must be quadlet-aligned 545 + * relative to the buffer start. 546 */ 547 struct fw_cdev_queue_iso { 548 __u64 packets; ··· 696 __u64 data; 697 __u32 generation; 698 __u32 speed; 699 + }; 700 + 701 + /** 702 + * struct fw_cdev_send_phy_packet - send a PHY packet 703 + * @closure: Passed back to userspace in the PHY-packet-sent event 704 + * @data: First and second quadlet of the PHY packet 705 + * @generation: The bus generation where packet is valid 706 + * 707 + * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes 708 + * on the same card as this device. After transmission, an 709 + * %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated. 710 + * 711 + * The payload @data[] shall be specified in host byte order. Usually, 712 + * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets 713 + * are an exception to this rule. 714 + * 715 + * The ioctl is only permitted on device files which represent a local node. 716 + */ 717 + struct fw_cdev_send_phy_packet { 718 + __u64 closure; 719 + __u32 data[2]; 720 + __u32 generation; 721 + }; 722 + 723 + /** 724 + * struct fw_cdev_receive_phy_packets - start reception of PHY packets 725 + * @closure: Passed back to userspace in phy packet events 726 + * 727 + * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to 728 + * incoming PHY packets from any node on the same bus as the device. 729 + * 730 + * The ioctl is only permitted on device files which represent a local node. 731 + */ 732 + struct fw_cdev_receive_phy_packets { 733 + __u64 closure; 734 }; 735 736 #endif /* _LINUX_FIREWIRE_CDEV_H */
+44 -18
include/linux/firewire.h
··· 32 #define CSR_CYCLE_TIME 0x200 33 #define CSR_BUS_TIME 0x204 34 #define CSR_BUSY_TIMEOUT 0x210 35 #define CSR_BUS_MANAGER_ID 0x21c 36 #define CSR_BANDWIDTH_AVAILABLE 0x220 37 #define CSR_CHANNELS_AVAILABLE 0x224 38 #define CSR_CHANNELS_AVAILABLE_HI 0x224 39 #define CSR_CHANNELS_AVAILABLE_LO 0x228 40 #define CSR_BROADCAST_CHANNEL 0x234 41 #define CSR_CONFIG_ROM 0x400 42 #define CSR_CONFIG_ROM_END 0x800 ··· 91 struct list_head transaction_list; 92 unsigned long reset_jiffies; 93 94 unsigned long long guid; 95 unsigned max_receive; 96 int link_speed; ··· 111 bool beta_repeaters_present; 112 113 int index; 114 - 115 struct list_head link; 116 117 - /* Work struct for BM duties. */ 118 - struct delayed_work work; 119 int bm_retries; 120 int bm_generation; 121 __be32 bm_transaction_data[2]; 122 123 bool broadcast_channel_allocated; 124 u32 broadcast_channel; 125 __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; 126 }; 127 128 struct fw_attribute_group { ··· 269 typedef void (*fw_address_callback_t)(struct fw_card *card, 270 struct fw_request *request, 271 int tcode, int destination, int source, 272 - int generation, int speed, 273 unsigned long long offset, 274 void *data, size_t length, 275 void *callback_data); ··· 286 u32 timestamp; 287 288 /* 289 - * This callback is called when the packet transmission has 290 - * completed; for successful transmission, the status code is 291 - * the ack received from the destination, otherwise it's a 292 - * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. 293 * The callback can be called from tasklet context and thus 294 * must never block. 295 */ ··· 372 * scatter-gather streaming (e.g. assembling video frame automatically). 373 */ 374 struct fw_iso_packet { 375 - u16 payload_length; /* Length of indirect payload. */ 376 - u32 interrupt:1; /* Generate interrupt on this packet */ 377 - u32 skip:1; /* Set to not send packet at all. */ 378 - u32 tag:2; 379 - u32 sy:4; 380 - u32 header_length:8; /* Length of immediate header. */ 381 - u32 header[0]; 382 }; 383 384 - #define FW_ISO_CONTEXT_TRANSMIT 0 385 - #define FW_ISO_CONTEXT_RECEIVE 1 386 387 #define FW_ISO_CONTEXT_MATCH_TAG0 1 388 #define FW_ISO_CONTEXT_MATCH_TAG1 2 ··· 408 int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, 409 int page_count, enum dma_data_direction direction); 410 void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); 411 412 struct fw_iso_context; 413 typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, 414 u32 cycle, size_t header_length, 415 void *header, void *data); 416 struct fw_iso_context { 417 struct fw_card *card; 418 int type; 419 int channel; 420 int speed; 421 size_t header_size; 422 - fw_iso_callback_t callback; 423 void *callback_data; 424 }; 425 426 struct fw_iso_context *fw_iso_context_create(struct fw_card *card, 427 int type, int channel, int speed, size_t header_size, 428 fw_iso_callback_t callback, void *callback_data); 429 int fw_iso_context_queue(struct fw_iso_context *ctx, 430 struct fw_iso_packet *packet, 431 struct fw_iso_buffer *buffer,
··· 32 #define CSR_CYCLE_TIME 0x200 33 #define CSR_BUS_TIME 0x204 34 #define CSR_BUSY_TIMEOUT 0x210 35 + #define CSR_PRIORITY_BUDGET 0x218 36 #define CSR_BUS_MANAGER_ID 0x21c 37 #define CSR_BANDWIDTH_AVAILABLE 0x220 38 #define CSR_CHANNELS_AVAILABLE 0x224 39 #define CSR_CHANNELS_AVAILABLE_HI 0x224 40 #define CSR_CHANNELS_AVAILABLE_LO 0x228 41 + #define CSR_MAINT_UTILITY 0x230 42 #define CSR_BROADCAST_CHANNEL 0x234 43 #define CSR_CONFIG_ROM 0x400 44 #define CSR_CONFIG_ROM_END 0x800 ··· 89 struct list_head transaction_list; 90 unsigned long reset_jiffies; 91 92 + u32 split_timeout_hi; 93 + u32 split_timeout_lo; 94 + unsigned int split_timeout_cycles; 95 + unsigned int split_timeout_jiffies; 96 + 97 unsigned long long guid; 98 unsigned max_receive; 99 int link_speed; ··· 104 bool beta_repeaters_present; 105 106 int index; 107 struct list_head link; 108 109 + struct list_head phy_receiver_list; 110 + 111 + struct delayed_work br_work; /* bus reset job */ 112 + bool br_short; 113 + 114 + struct delayed_work bm_work; /* bus manager job */ 115 int bm_retries; 116 int bm_generation; 117 __be32 bm_transaction_data[2]; 118 + int bm_node_id; 119 + bool bm_abdicate; 120 + 121 + bool priority_budget_implemented; /* controller feature */ 122 + bool broadcast_channel_auto_allocated; /* controller feature */ 123 124 bool broadcast_channel_allocated; 125 u32 broadcast_channel; 126 __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; 127 + 128 + __be32 maint_utility_register; 129 }; 130 131 struct fw_attribute_group { ··· 252 typedef void (*fw_address_callback_t)(struct fw_card *card, 253 struct fw_request *request, 254 int tcode, int destination, int source, 255 + int generation, 256 unsigned long long offset, 257 void *data, size_t length, 258 void *callback_data); ··· 269 u32 timestamp; 270 271 /* 272 + * This callback is called when the packet transmission has completed. 273 + * For successful transmission, the status code is the ack received 274 + * from the destination. Otherwise it is one of the juju-specific 275 + * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. 276 * The callback can be called from tasklet context and thus 277 * must never block. 278 */ ··· 355 * scatter-gather streaming (e.g. assembling video frame automatically). 356 */ 357 struct fw_iso_packet { 358 + u16 payload_length; /* Length of indirect payload */ 359 + u32 interrupt:1; /* Generate interrupt on this packet */ 360 + u32 skip:1; /* tx: Set to not send packet at all */ 361 + /* rx: Sync bit, wait for matching sy */ 362 + u32 tag:2; /* tx: Tag in packet header */ 363 + u32 sy:4; /* tx: Sy in packet header */ 364 + u32 header_length:8; /* Length of immediate header */ 365 + u32 header[0]; /* tx: Top of 1394 isoch. data_block */ 366 }; 367 368 + #define FW_ISO_CONTEXT_TRANSMIT 0 369 + #define FW_ISO_CONTEXT_RECEIVE 1 370 + #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 371 372 #define FW_ISO_CONTEXT_MATCH_TAG0 1 373 #define FW_ISO_CONTEXT_MATCH_TAG1 2 ··· 389 int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, 390 int page_count, enum dma_data_direction direction); 391 void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); 392 + size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); 393 394 struct fw_iso_context; 395 typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, 396 u32 cycle, size_t header_length, 397 void *header, void *data); 398 + typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, 399 + dma_addr_t completed, void *data); 400 struct fw_iso_context { 401 struct fw_card *card; 402 int type; 403 int channel; 404 int speed; 405 size_t header_size; 406 + union { 407 + fw_iso_callback_t sc; 408 + fw_iso_mc_callback_t mc; 409 + } callback; 410 void *callback_data; 411 }; 412 413 struct fw_iso_context *fw_iso_context_create(struct fw_card *card, 414 int type, int channel, int speed, size_t header_size, 415 fw_iso_callback_t callback, void *callback_data); 416 + int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); 417 int fw_iso_context_queue(struct fw_iso_context *ctx, 418 struct fw_iso_packet *packet, 419 struct fw_iso_buffer *buffer,
+19
tools/firewire/Makefile
···
··· 1 + prefix = /usr 2 + nosy-dump-version = 0.4 3 + 4 + CC = gcc 5 + 6 + all : nosy-dump 7 + 8 + nosy-dump : CFLAGS = -Wall -O2 -g 9 + nosy-dump : CPPFLAGS = -DVERSION=\"$(nosy-dump-version)\" -I../../drivers/firewire 10 + nosy-dump : LDFLAGS = -g 11 + nosy-dump : LDLIBS = -lpopt 12 + 13 + nosy-dump : nosy-dump.o decode-fcp.o 14 + 15 + clean : 16 + rm -rf *.o nosy-dump 17 + 18 + install : 19 + install nosy-dump $(prefix)/bin/nosy-dump
+213
tools/firewire/decode-fcp.c
···
··· 1 + #include <linux/firewire-constants.h> 2 + #include <stdio.h> 3 + #include <stdlib.h> 4 + 5 + #include "list.h" 6 + #include "nosy-dump.h" 7 + 8 + #define CSR_FCP_COMMAND 0xfffff0000b00ull 9 + #define CSR_FCP_RESPONSE 0xfffff0000d00ull 10 + 11 + static const char * const ctype_names[] = { 12 + [0x0] = "control", [0x8] = "not implemented", 13 + [0x1] = "status", [0x9] = "accepted", 14 + [0x2] = "specific inquiry", [0xa] = "rejected", 15 + [0x3] = "notify", [0xb] = "in transition", 16 + [0x4] = "general inquiry", [0xc] = "stable", 17 + [0x5] = "(reserved 0x05)", [0xd] = "changed", 18 + [0x6] = "(reserved 0x06)", [0xe] = "(reserved 0x0e)", 19 + [0x7] = "(reserved 0x07)", [0xf] = "interim", 20 + }; 21 + 22 + static const char * const subunit_type_names[] = { 23 + [0x00] = "monitor", [0x10] = "(reserved 0x10)", 24 + [0x01] = "audio", [0x11] = "(reserved 0x11)", 25 + [0x02] = "printer", [0x12] = "(reserved 0x12)", 26 + [0x03] = "disc", [0x13] = "(reserved 0x13)", 27 + [0x04] = "tape recorder/player",[0x14] = "(reserved 0x14)", 28 + [0x05] = "tuner", [0x15] = "(reserved 0x15)", 29 + [0x06] = "ca", [0x16] = "(reserved 0x16)", 30 + [0x07] = "camera", [0x17] = "(reserved 0x17)", 31 + [0x08] = "(reserved 0x08)", [0x18] = "(reserved 0x18)", 32 + [0x09] = "panel", [0x19] = "(reserved 0x19)", 33 + [0x0a] = "bulletin board", [0x1a] = "(reserved 0x1a)", 34 + [0x0b] = "camera storage", [0x1b] = "(reserved 0x1b)", 35 + [0x0c] = "(reserved 0x0c)", [0x1c] = "vendor unique", 36 + [0x0d] = "(reserved 0x0d)", [0x1d] = "all subunit types", 37 + [0x0e] = "(reserved 0x0e)", [0x1e] = "subunit_type extended to next byte", 38 + [0x0f] = "(reserved 0x0f)", [0x1f] = "unit", 39 + }; 40 + 41 + struct avc_enum { 42 + int value; 43 + const char *name; 44 + }; 45 + 46 + struct avc_field { 47 + const char *name; /* Short name for field. */ 48 + int offset; /* Location of field, specified in bits; */ 49 + /* negative means from end of packet. */ 50 + int width; /* Width of field, 0 means use data_length. */ 51 + struct avc_enum *names; 52 + }; 53 + 54 + struct avc_opcode_info { 55 + const char *name; 56 + struct avc_field fields[8]; 57 + }; 58 + 59 + struct avc_enum power_field_names[] = { 60 + { 0x70, "on" }, 61 + { 0x60, "off" }, 62 + { } 63 + }; 64 + 65 + static const struct avc_opcode_info opcode_info[256] = { 66 + 67 + /* TA Document 1999026 */ 68 + /* AV/C Digital Interface Command Set General Specification 4.0 */ 69 + [0xb2] = { "power", { 70 + { "state", 0, 8, power_field_names } 71 + } 72 + }, 73 + [0x30] = { "unit info", { 74 + { "foo", 0, 8 }, 75 + { "unit_type", 8, 5 }, 76 + { "unit", 13, 3 }, 77 + { "company id", 16, 24 }, 78 + } 79 + }, 80 + [0x31] = { "subunit info" }, 81 + [0x01] = { "reserve" }, 82 + [0xb0] = { "version" }, 83 + [0x00] = { "vendor dependent" }, 84 + [0x02] = { "plug info" }, 85 + [0x12] = { "channel usage" }, 86 + [0x24] = { "connect" }, 87 + [0x20] = { "connect av" }, 88 + [0x22] = { "connections" }, 89 + [0x11] = { "digital input" }, 90 + [0x10] = { "digital output" }, 91 + [0x25] = { "disconnect" }, 92 + [0x21] = { "disconnect av" }, 93 + [0x19] = { "input plug signal format" }, 94 + [0x18] = { "output plug signal format" }, 95 + [0x1f] = { "general bus setup" }, 96 + 97 + /* TA Document 1999025 */ 98 + /* AV/C Descriptor Mechanism Specification Version 1.0 */ 99 + [0x0c] = { "create descriptor" }, 100 + [0x08] = { "open descriptor" }, 101 + [0x09] = { "read descriptor" }, 102 + [0x0a] = { "write descriptor" }, 103 + [0x05] = { "open info block" }, 104 + [0x06] = { "read info block" }, 105 + [0x07] = { "write info block" }, 106 + [0x0b] = { "search descriptor" }, 107 + [0x0d] = { "object number select" }, 108 + 109 + /* TA Document 1999015 */ 110 + /* AV/C Command Set for Rate Control of Isochronous Data Flow 1.0 */ 111 + [0xb3] = { "rate", { 112 + { "subfunction", 0, 8 }, 113 + { "result", 8, 8 }, 114 + { "plug_type", 16, 8 }, 115 + { "plug_id", 16, 8 }, 116 + } 117 + }, 118 + 119 + /* TA Document 1999008 */ 120 + /* AV/C Audio Subunit Specification 1.0 */ 121 + [0xb8] = { "function block" }, 122 + 123 + /* TA Document 2001001 */ 124 + /* AV/C Panel Subunit Specification 1.1 */ 125 + [0x7d] = { "gui update" }, 126 + [0x7e] = { "push gui data" }, 127 + [0x7f] = { "user action" }, 128 + [0x7c] = { "pass through" }, 129 + 130 + /* */ 131 + [0x26] = { "asynchronous connection" }, 132 + }; 133 + 134 + struct avc_frame { 135 + uint32_t operand0:8; 136 + uint32_t opcode:8; 137 + uint32_t subunit_id:3; 138 + uint32_t subunit_type:5; 139 + uint32_t ctype:4; 140 + uint32_t cts:4; 141 + }; 142 + 143 + static void 144 + decode_avc(struct link_transaction *t) 145 + { 146 + struct avc_frame *frame = 147 + (struct avc_frame *) t->request->packet.write_block.data; 148 + const struct avc_opcode_info *info; 149 + const char *name; 150 + char buffer[32]; 151 + int i; 152 + 153 + info = &opcode_info[frame->opcode]; 154 + if (info->name == NULL) { 155 + snprintf(buffer, sizeof(buffer), 156 + "(unknown opcode 0x%02x)", frame->opcode); 157 + name = buffer; 158 + } else { 159 + name = info->name; 160 + } 161 + 162 + printf("av/c %s, subunit_type=%s, subunit_id=%d, opcode=%s", 163 + ctype_names[frame->ctype], subunit_type_names[frame->subunit_type], 164 + frame->subunit_id, name); 165 + 166 + for (i = 0; info->fields[i].name != NULL; i++) 167 + printf(", %s", info->fields[i].name); 168 + 169 + printf("\n"); 170 + } 171 + 172 + int 173 + decode_fcp(struct link_transaction *t) 174 + { 175 + struct avc_frame *frame = 176 + (struct avc_frame *) t->request->packet.write_block.data; 177 + unsigned long long offset = 178 + ((unsigned long long) t->request->packet.common.offset_high << 32) | 179 + t->request->packet.common.offset_low; 180 + 181 + if (t->request->packet.common.tcode != TCODE_WRITE_BLOCK_REQUEST) 182 + return 0; 183 + 184 + if (offset == CSR_FCP_COMMAND || offset == CSR_FCP_RESPONSE) { 185 + switch (frame->cts) { 186 + case 0x00: 187 + decode_avc(t); 188 + break; 189 + case 0x01: 190 + printf("cal fcp frame (cts=0x01)\n"); 191 + break; 192 + case 0x02: 193 + printf("ehs fcp frame (cts=0x02)\n"); 194 + break; 195 + case 0x03: 196 + printf("havi fcp frame (cts=0x03)\n"); 197 + break; 198 + case 0x0e: 199 + printf("vendor specific fcp frame (cts=0x0e)\n"); 200 + break; 201 + case 0x0f: 202 + printf("extended cts\n"); 203 + break; 204 + default: 205 + printf("reserved fcp frame (ctx=0x%02x)\n", frame->cts); 206 + break; 207 + } 208 + return 1; 209 + } 210 + 211 + return 0; 212 + } 213 +
+62
tools/firewire/list.h
···
··· 1 + struct list { 2 + struct list *next, *prev; 3 + }; 4 + 5 + static inline void 6 + list_init(struct list *list) 7 + { 8 + list->next = list; 9 + list->prev = list; 10 + } 11 + 12 + static inline int 13 + list_empty(struct list *list) 14 + { 15 + return list->next == list; 16 + } 17 + 18 + static inline void 19 + list_insert(struct list *link, struct list *new_link) 20 + { 21 + new_link->prev = link->prev; 22 + new_link->next = link; 23 + new_link->prev->next = new_link; 24 + new_link->next->prev = new_link; 25 + } 26 + 27 + static inline void 28 + list_append(struct list *list, struct list *new_link) 29 + { 30 + list_insert((struct list *)list, new_link); 31 + } 32 + 33 + static inline void 34 + list_prepend(struct list *list, struct list *new_link) 35 + { 36 + list_insert(list->next, new_link); 37 + } 38 + 39 + static inline void 40 + list_remove(struct list *link) 41 + { 42 + link->prev->next = link->next; 43 + link->next->prev = link->prev; 44 + } 45 + 46 + #define list_entry(link, type, member) \ 47 + ((type *)((char *)(link)-(unsigned long)(&((type *)0)->member))) 48 + 49 + #define list_head(list, type, member) \ 50 + list_entry((list)->next, type, member) 51 + 52 + #define list_tail(list, type, member) \ 53 + list_entry((list)->prev, type, member) 54 + 55 + #define list_next(elm, member) \ 56 + list_entry((elm)->member.next, typeof(*elm), member) 57 + 58 + #define list_for_each_entry(pos, list, member) \ 59 + for (pos = list_head(list, typeof(*pos), member); \ 60 + &pos->member != (list); \ 61 + pos = list_next(pos, member)) 62 +
+1031
tools/firewire/nosy-dump.c
···
··· 1 + /* 2 + * nosy-dump - Interface to snoop mode driver for TI PCILynx 1394 controllers 3 + * Copyright (C) 2002-2006 Kristian Høgsberg 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; either version 2 of the License, or 8 + * (at your option) any later version. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software Foundation, 17 + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 + */ 19 + 20 + #include <byteswap.h> 21 + #include <endian.h> 22 + #include <fcntl.h> 23 + #include <linux/firewire-constants.h> 24 + #include <poll.h> 25 + #include <popt.h> 26 + #include <signal.h> 27 + #include <stdio.h> 28 + #include <stdlib.h> 29 + #include <string.h> 30 + #include <sys/ioctl.h> 31 + #include <sys/time.h> 32 + #include <termios.h> 33 + #include <unistd.h> 34 + 35 + #include "list.h" 36 + #include "nosy-dump.h" 37 + #include "nosy-user.h" 38 + 39 + enum { 40 + PACKET_FIELD_DETAIL = 0x01, 41 + PACKET_FIELD_DATA_LENGTH = 0x02, 42 + /* Marks the fields we print in transaction view. */ 43 + PACKET_FIELD_TRANSACTION = 0x04, 44 + }; 45 + 46 + static void print_packet(uint32_t *data, size_t length); 47 + static void decode_link_packet(struct link_packet *packet, size_t length, 48 + int include_flags, int exclude_flags); 49 + static int run = 1; 50 + sig_t sys_sigint_handler; 51 + 52 + static char *option_nosy_device = "/dev/nosy"; 53 + static char *option_view = "packet"; 54 + static char *option_output; 55 + static char *option_input; 56 + static int option_hex; 57 + static int option_iso; 58 + static int option_cycle_start; 59 + static int option_version; 60 + static int option_verbose; 61 + 62 + enum { 63 + VIEW_TRANSACTION, 64 + VIEW_PACKET, 65 + VIEW_STATS, 66 + }; 67 + 68 + static const struct poptOption options[] = { 69 + { 70 + .longName = "device", 71 + .shortName = 'd', 72 + .argInfo = POPT_ARG_STRING, 73 + .arg = &option_nosy_device, 74 + .descrip = "Path to nosy device.", 75 + .argDescrip = "DEVICE" 76 + }, 77 + { 78 + .longName = "view", 79 + .argInfo = POPT_ARG_STRING, 80 + .arg = &option_view, 81 + .descrip = "Specify view of bus traffic: packet, transaction or stats.", 82 + .argDescrip = "VIEW" 83 + }, 84 + { 85 + .longName = "hex", 86 + .shortName = 'x', 87 + .argInfo = POPT_ARG_NONE, 88 + .arg = &option_hex, 89 + .descrip = "Print each packet in hex.", 90 + }, 91 + { 92 + .longName = "iso", 93 + .argInfo = POPT_ARG_NONE, 94 + .arg = &option_iso, 95 + .descrip = "Print iso packets.", 96 + }, 97 + { 98 + .longName = "cycle-start", 99 + .argInfo = POPT_ARG_NONE, 100 + .arg = &option_cycle_start, 101 + .descrip = "Print cycle start packets.", 102 + }, 103 + { 104 + .longName = "verbose", 105 + .shortName = 'v', 106 + .argInfo = POPT_ARG_NONE, 107 + .arg = &option_verbose, 108 + .descrip = "Verbose packet view.", 109 + }, 110 + { 111 + .longName = "output", 112 + .shortName = 'o', 113 + .argInfo = POPT_ARG_STRING, 114 + .arg = &option_output, 115 + .descrip = "Log to output file.", 116 + .argDescrip = "FILENAME" 117 + }, 118 + { 119 + .longName = "input", 120 + .shortName = 'i', 121 + .argInfo = POPT_ARG_STRING, 122 + .arg = &option_input, 123 + .descrip = "Decode log from file.", 124 + .argDescrip = "FILENAME" 125 + }, 126 + { 127 + .longName = "version", 128 + .argInfo = POPT_ARG_NONE, 129 + .arg = &option_version, 130 + .descrip = "Specify print version info.", 131 + }, 132 + POPT_AUTOHELP 133 + POPT_TABLEEND 134 + }; 135 + 136 + /* Allow all ^C except the first to interrupt the program in the usual way. */ 137 + static void 138 + sigint_handler(int signal_num) 139 + { 140 + if (run == 1) { 141 + run = 0; 142 + signal(SIGINT, SIG_DFL); 143 + } 144 + } 145 + 146 + static struct subaction * 147 + subaction_create(uint32_t *data, size_t length) 148 + { 149 + struct subaction *sa; 150 + 151 + /* we put the ack in the subaction struct for easy access. */ 152 + sa = malloc(sizeof *sa - sizeof sa->packet + length); 153 + sa->ack = data[length / 4 - 1]; 154 + sa->length = length; 155 + memcpy(&sa->packet, data, length); 156 + 157 + return sa; 158 + } 159 + 160 + static void 161 + subaction_destroy(struct subaction *sa) 162 + { 163 + free(sa); 164 + } 165 + 166 + static struct list pending_transaction_list = { 167 + &pending_transaction_list, &pending_transaction_list 168 + }; 169 + 170 + static struct link_transaction * 171 + link_transaction_lookup(int request_node, int response_node, int tlabel) 172 + { 173 + struct link_transaction *t; 174 + 175 + list_for_each_entry(t, &pending_transaction_list, link) { 176 + if (t->request_node == request_node && 177 + t->response_node == response_node && 178 + t->tlabel == tlabel) 179 + return t; 180 + } 181 + 182 + t = malloc(sizeof *t); 183 + t->request_node = request_node; 184 + t->response_node = response_node; 185 + t->tlabel = tlabel; 186 + list_init(&t->request_list); 187 + list_init(&t->response_list); 188 + 189 + list_append(&pending_transaction_list, &t->link); 190 + 191 + return t; 192 + } 193 + 194 + static void 195 + link_transaction_destroy(struct link_transaction *t) 196 + { 197 + struct subaction *sa; 198 + 199 + while (!list_empty(&t->request_list)) { 200 + sa = list_head(&t->request_list, struct subaction, link); 201 + list_remove(&sa->link); 202 + subaction_destroy(sa); 203 + } 204 + while (!list_empty(&t->response_list)) { 205 + sa = list_head(&t->response_list, struct subaction, link); 206 + list_remove(&sa->link); 207 + subaction_destroy(sa); 208 + } 209 + free(t); 210 + } 211 + 212 + struct protocol_decoder { 213 + const char *name; 214 + int (*decode)(struct link_transaction *t); 215 + }; 216 + 217 + static const struct protocol_decoder protocol_decoders[] = { 218 + { "FCP", decode_fcp } 219 + }; 220 + 221 + static void 222 + handle_transaction(struct link_transaction *t) 223 + { 224 + struct subaction *sa; 225 + int i; 226 + 227 + if (!t->request) { 228 + printf("BUG in handle_transaction\n"); 229 + return; 230 + } 231 + 232 + for (i = 0; i < array_length(protocol_decoders); i++) 233 + if (protocol_decoders[i].decode(t)) 234 + break; 235 + 236 + /* HACK: decode only fcp right now. */ 237 + return; 238 + 239 + decode_link_packet(&t->request->packet, t->request->length, 240 + PACKET_FIELD_TRANSACTION, 0); 241 + if (t->response) 242 + decode_link_packet(&t->response->packet, t->request->length, 243 + PACKET_FIELD_TRANSACTION, 0); 244 + else 245 + printf("[no response]"); 246 + 247 + if (option_verbose) { 248 + list_for_each_entry(sa, &t->request_list, link) 249 + print_packet((uint32_t *) &sa->packet, sa->length); 250 + list_for_each_entry(sa, &t->response_list, link) 251 + print_packet((uint32_t *) &sa->packet, sa->length); 252 + } 253 + printf("\r\n"); 254 + 255 + link_transaction_destroy(t); 256 + } 257 + 258 + static void 259 + clear_pending_transaction_list(void) 260 + { 261 + struct link_transaction *t; 262 + 263 + while (!list_empty(&pending_transaction_list)) { 264 + t = list_head(&pending_transaction_list, 265 + struct link_transaction, link); 266 + list_remove(&t->link); 267 + link_transaction_destroy(t); 268 + /* print unfinished transactions */ 269 + } 270 + } 271 + 272 + static const char * const tcode_names[] = { 273 + [0x0] = "write_quadlet_request", [0x6] = "read_quadlet_response", 274 + [0x1] = "write_block_request", [0x7] = "read_block_response", 275 + [0x2] = "write_response", [0x8] = "cycle_start", 276 + [0x3] = "reserved", [0x9] = "lock_request", 277 + [0x4] = "read_quadlet_request", [0xa] = "iso_data", 278 + [0x5] = "read_block_request", [0xb] = "lock_response", 279 + }; 280 + 281 + static const char * const ack_names[] = { 282 + [0x0] = "no ack", [0x8] = "reserved (0x08)", 283 + [0x1] = "ack_complete", [0x9] = "reserved (0x09)", 284 + [0x2] = "ack_pending", [0xa] = "reserved (0x0a)", 285 + [0x3] = "reserved (0x03)", [0xb] = "reserved (0x0b)", 286 + [0x4] = "ack_busy_x", [0xc] = "reserved (0x0c)", 287 + [0x5] = "ack_busy_a", [0xd] = "ack_data_error", 288 + [0x6] = "ack_busy_b", [0xe] = "ack_type_error", 289 + [0x7] = "reserved (0x07)", [0xf] = "reserved (0x0f)", 290 + }; 291 + 292 + static const char * const rcode_names[] = { 293 + [0x0] = "complete", [0x4] = "conflict_error", 294 + [0x1] = "reserved (0x01)", [0x5] = "data_error", 295 + [0x2] = "reserved (0x02)", [0x6] = "type_error", 296 + [0x3] = "reserved (0x03)", [0x7] = "address_error", 297 + }; 298 + 299 + static const char * const retry_names[] = { 300 + [0x0] = "retry_1", 301 + [0x1] = "retry_x", 302 + [0x2] = "retry_a", 303 + [0x3] = "retry_b", 304 + }; 305 + 306 + enum { 307 + PACKET_RESERVED, 308 + PACKET_REQUEST, 309 + PACKET_RESPONSE, 310 + PACKET_OTHER, 311 + }; 312 + 313 + struct packet_info { 314 + const char *name; 315 + int type; 316 + int response_tcode; 317 + const struct packet_field *fields; 318 + int field_count; 319 + }; 320 + 321 + struct packet_field { 322 + const char *name; /* Short name for field. */ 323 + int offset; /* Location of field, specified in bits; */ 324 + /* negative means from end of packet. */ 325 + int width; /* Width of field, 0 means use data_length. */ 326 + int flags; /* Show options. */ 327 + const char * const *value_names; 328 + }; 329 + 330 + #define COMMON_REQUEST_FIELDS \ 331 + { "dest", 0, 16, PACKET_FIELD_TRANSACTION }, \ 332 + { "tl", 16, 6 }, \ 333 + { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \ 334 + { "tcode", 24, 4, PACKET_FIELD_TRANSACTION, tcode_names }, \ 335 + { "pri", 28, 4, PACKET_FIELD_DETAIL }, \ 336 + { "src", 32, 16, PACKET_FIELD_TRANSACTION }, \ 337 + { "offs", 48, 48, PACKET_FIELD_TRANSACTION } 338 + 339 + #define COMMON_RESPONSE_FIELDS \ 340 + { "dest", 0, 16 }, \ 341 + { "tl", 16, 6 }, \ 342 + { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \ 343 + { "tcode", 24, 4, 0, tcode_names }, \ 344 + { "pri", 28, 4, PACKET_FIELD_DETAIL }, \ 345 + { "src", 32, 16 }, \ 346 + { "rcode", 48, 4, PACKET_FIELD_TRANSACTION, rcode_names } 347 + 348 + static const struct packet_field read_quadlet_request_fields[] = { 349 + COMMON_REQUEST_FIELDS, 350 + { "crc", 96, 32, PACKET_FIELD_DETAIL }, 351 + { "ack", 156, 4, 0, ack_names }, 352 + }; 353 + 354 + static const struct packet_field read_quadlet_response_fields[] = { 355 + COMMON_RESPONSE_FIELDS, 356 + { "data", 96, 32, PACKET_FIELD_TRANSACTION }, 357 + { "crc", 128, 32, PACKET_FIELD_DETAIL }, 358 + { "ack", 188, 4, 0, ack_names }, 359 + }; 360 + 361 + static const struct packet_field read_block_request_fields[] = { 362 + COMMON_REQUEST_FIELDS, 363 + { "data_length", 96, 16, PACKET_FIELD_TRANSACTION }, 364 + { "extended_tcode", 112, 16 }, 365 + { "crc", 128, 32, PACKET_FIELD_DETAIL }, 366 + { "ack", 188, 4, 0, ack_names }, 367 + }; 368 + 369 + static const struct packet_field block_response_fields[] = { 370 + COMMON_RESPONSE_FIELDS, 371 + { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH }, 372 + { "extended_tcode", 112, 16 }, 373 + { "crc", 128, 32, PACKET_FIELD_DETAIL }, 374 + { "data", 160, 0, PACKET_FIELD_TRANSACTION }, 375 + { "crc", -64, 32, PACKET_FIELD_DETAIL }, 376 + { "ack", -4, 4, 0, ack_names }, 377 + }; 378 + 379 + static const struct packet_field write_quadlet_request_fields[] = { 380 + COMMON_REQUEST_FIELDS, 381 + { "data", 96, 32, PACKET_FIELD_TRANSACTION }, 382 + { "ack", -4, 4, 0, ack_names }, 383 + }; 384 + 385 + static const struct packet_field block_request_fields[] = { 386 + COMMON_REQUEST_FIELDS, 387 + { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH | PACKET_FIELD_TRANSACTION }, 388 + { "extended_tcode", 112, 16, PACKET_FIELD_TRANSACTION }, 389 + { "crc", 128, 32, PACKET_FIELD_DETAIL }, 390 + { "data", 160, 0, PACKET_FIELD_TRANSACTION }, 391 + { "crc", -64, 32, PACKET_FIELD_DETAIL }, 392 + { "ack", -4, 4, 0, ack_names }, 393 + }; 394 + 395 + static const struct packet_field write_response_fields[] = { 396 + COMMON_RESPONSE_FIELDS, 397 + { "reserved", 64, 32, PACKET_FIELD_DETAIL }, 398 + { "ack", -4, 4, 0, ack_names }, 399 + }; 400 + 401 + static const struct packet_field iso_data_fields[] = { 402 + { "data_length", 0, 16, PACKET_FIELD_DATA_LENGTH }, 403 + { "tag", 16, 2 }, 404 + { "channel", 18, 6 }, 405 + { "tcode", 24, 4, 0, tcode_names }, 406 + { "sy", 28, 4 }, 407 + { "crc", 32, 32, PACKET_FIELD_DETAIL }, 408 + { "data", 64, 0 }, 409 + { "crc", -64, 32, PACKET_FIELD_DETAIL }, 410 + { "ack", -4, 4, 0, ack_names }, 411 + }; 412 + 413 + static const struct packet_info packet_info[] = { 414 + { 415 + .name = "write_quadlet_request", 416 + .type = PACKET_REQUEST, 417 + .response_tcode = TCODE_WRITE_RESPONSE, 418 + .fields = write_quadlet_request_fields, 419 + .field_count = array_length(write_quadlet_request_fields) 420 + }, 421 + { 422 + .name = "write_block_request", 423 + .type = PACKET_REQUEST, 424 + .response_tcode = TCODE_WRITE_RESPONSE, 425 + .fields = block_request_fields, 426 + .field_count = array_length(block_request_fields) 427 + }, 428 + { 429 + .name = "write_response", 430 + .type = PACKET_RESPONSE, 431 + .fields = write_response_fields, 432 + .field_count = array_length(write_response_fields) 433 + }, 434 + { 435 + .name = "reserved", 436 + .type = PACKET_RESERVED, 437 + }, 438 + { 439 + .name = "read_quadlet_request", 440 + .type = PACKET_REQUEST, 441 + .response_tcode = TCODE_READ_QUADLET_RESPONSE, 442 + .fields = read_quadlet_request_fields, 443 + .field_count = array_length(read_quadlet_request_fields) 444 + }, 445 + { 446 + .name = "read_block_request", 447 + .type = PACKET_REQUEST, 448 + .response_tcode = TCODE_READ_BLOCK_RESPONSE, 449 + .fields = read_block_request_fields, 450 + .field_count = array_length(read_block_request_fields) 451 + }, 452 + { 453 + .name = "read_quadlet_response", 454 + .type = PACKET_RESPONSE, 455 + .fields = read_quadlet_response_fields, 456 + .field_count = array_length(read_quadlet_response_fields) 457 + }, 458 + { 459 + .name = "read_block_response", 460 + .type = PACKET_RESPONSE, 461 + .fields = block_response_fields, 462 + .field_count = array_length(block_response_fields) 463 + }, 464 + { 465 + .name = "cycle_start", 466 + .type = PACKET_OTHER, 467 + .fields = write_quadlet_request_fields, 468 + .field_count = array_length(write_quadlet_request_fields) 469 + }, 470 + { 471 + .name = "lock_request", 472 + .type = PACKET_REQUEST, 473 + .fields = block_request_fields, 474 + .field_count = array_length(block_request_fields) 475 + }, 476 + { 477 + .name = "iso_data", 478 + .type = PACKET_OTHER, 479 + .fields = iso_data_fields, 480 + .field_count = array_length(iso_data_fields) 481 + }, 482 + { 483 + .name = "lock_response", 484 + .type = PACKET_RESPONSE, 485 + .fields = block_response_fields, 486 + .field_count = array_length(block_response_fields) 487 + }, 488 + }; 489 + 490 + static int 491 + handle_request_packet(uint32_t *data, size_t length) 492 + { 493 + struct link_packet *p = (struct link_packet *) data; 494 + struct subaction *sa, *prev; 495 + struct link_transaction *t; 496 + 497 + t = link_transaction_lookup(p->common.source, p->common.destination, 498 + p->common.tlabel); 499 + sa = subaction_create(data, length); 500 + t->request = sa; 501 + 502 + if (!list_empty(&t->request_list)) { 503 + prev = list_tail(&t->request_list, 504 + struct subaction, link); 505 + 506 + if (!ACK_BUSY(prev->ack)) { 507 + /* 508 + * error, we should only see ack_busy_* before the 509 + * ack_pending/ack_complete -- this is an ack_pending 510 + * instead (ack_complete would have finished the 511 + * transaction). 512 + */ 513 + } 514 + 515 + if (prev->packet.common.tcode != sa->packet.common.tcode || 516 + prev->packet.common.tlabel != sa->packet.common.tlabel) { 517 + /* memcmp() ? */ 518 + /* error, these should match for retries. */ 519 + } 520 + } 521 + 522 + list_append(&t->request_list, &sa->link); 523 + 524 + switch (sa->ack) { 525 + case ACK_COMPLETE: 526 + if (p->common.tcode != TCODE_WRITE_QUADLET_REQUEST && 527 + p->common.tcode != TCODE_WRITE_BLOCK_REQUEST) 528 + /* error, unified transactions only allowed for write */; 529 + list_remove(&t->link); 530 + handle_transaction(t); 531 + break; 532 + 533 + case ACK_NO_ACK: 534 + case ACK_DATA_ERROR: 535 + case ACK_TYPE_ERROR: 536 + list_remove(&t->link); 537 + handle_transaction(t); 538 + break; 539 + 540 + case ACK_PENDING: 541 + /* request subaction phase over, wait for response. */ 542 + break; 543 + 544 + case ACK_BUSY_X: 545 + case ACK_BUSY_A: 546 + case ACK_BUSY_B: 547 + /* ok, wait for retry. */ 548 + /* check that retry protocol is respected. */ 549 + break; 550 + } 551 + 552 + return 1; 553 + } 554 + 555 + static int 556 + handle_response_packet(uint32_t *data, size_t length) 557 + { 558 + struct link_packet *p = (struct link_packet *) data; 559 + struct subaction *sa, *prev; 560 + struct link_transaction *t; 561 + 562 + t = link_transaction_lookup(p->common.destination, p->common.source, 563 + p->common.tlabel); 564 + if (list_empty(&t->request_list)) { 565 + /* unsolicited response */ 566 + } 567 + 568 + sa = subaction_create(data, length); 569 + t->response = sa; 570 + 571 + if (!list_empty(&t->response_list)) { 572 + prev = list_tail(&t->response_list, struct subaction, link); 573 + 574 + if (!ACK_BUSY(prev->ack)) { 575 + /* 576 + * error, we should only see ack_busy_* before the 577 + * ack_pending/ack_complete 578 + */ 579 + } 580 + 581 + if (prev->packet.common.tcode != sa->packet.common.tcode || 582 + prev->packet.common.tlabel != sa->packet.common.tlabel) { 583 + /* use memcmp() instead? */ 584 + /* error, these should match for retries. */ 585 + } 586 + } else { 587 + prev = list_tail(&t->request_list, struct subaction, link); 588 + if (prev->ack != ACK_PENDING) { 589 + /* 590 + * error, should not get response unless last request got 591 + * ack_pending. 592 + */ 593 + } 594 + 595 + if (packet_info[prev->packet.common.tcode].response_tcode != 596 + sa->packet.common.tcode) { 597 + /* error, tcode mismatch */ 598 + } 599 + } 600 + 601 + list_append(&t->response_list, &sa->link); 602 + 603 + switch (sa->ack) { 604 + case ACK_COMPLETE: 605 + case ACK_NO_ACK: 606 + case ACK_DATA_ERROR: 607 + case ACK_TYPE_ERROR: 608 + list_remove(&t->link); 609 + handle_transaction(t); 610 + /* transaction complete, remove t from pending list. */ 611 + break; 612 + 613 + case ACK_PENDING: 614 + /* error for responses. */ 615 + break; 616 + 617 + case ACK_BUSY_X: 618 + case ACK_BUSY_A: 619 + case ACK_BUSY_B: 620 + /* no problem, wait for next retry */ 621 + break; 622 + } 623 + 624 + return 1; 625 + } 626 + 627 + static int 628 + handle_packet(uint32_t *data, size_t length) 629 + { 630 + if (length == 0) { 631 + printf("bus reset\r\n"); 632 + clear_pending_transaction_list(); 633 + } else if (length > sizeof(struct phy_packet)) { 634 + struct link_packet *p = (struct link_packet *) data; 635 + 636 + switch (packet_info[p->common.tcode].type) { 637 + case PACKET_REQUEST: 638 + return handle_request_packet(data, length); 639 + 640 + case PACKET_RESPONSE: 641 + return handle_response_packet(data, length); 642 + 643 + case PACKET_OTHER: 644 + case PACKET_RESERVED: 645 + return 0; 646 + } 647 + } 648 + 649 + return 1; 650 + } 651 + 652 + static unsigned int 653 + get_bits(struct link_packet *packet, int offset, int width) 654 + { 655 + uint32_t *data = (uint32_t *) packet; 656 + uint32_t index, shift, mask; 657 + 658 + index = offset / 32 + 1; 659 + shift = 32 - (offset & 31) - width; 660 + mask = width == 32 ? ~0 : (1 << width) - 1; 661 + 662 + return (data[index] >> shift) & mask; 663 + } 664 + 665 + #if __BYTE_ORDER == __LITTLE_ENDIAN 666 + #define byte_index(i) ((i) ^ 3) 667 + #elif __BYTE_ORDER == __BIG_ENDIAN 668 + #define byte_index(i) (i) 669 + #else 670 + #error unsupported byte order. 671 + #endif 672 + 673 + static void 674 + dump_data(unsigned char *data, int length) 675 + { 676 + int i, print_length; 677 + 678 + if (length > 128) 679 + print_length = 128; 680 + else 681 + print_length = length; 682 + 683 + for (i = 0; i < print_length; i++) 684 + printf("%s%02hhx", 685 + (i % 4 == 0 && i != 0) ? " " : "", 686 + data[byte_index(i)]); 687 + 688 + if (print_length < length) 689 + printf(" (%d more bytes)", length - print_length); 690 + } 691 + 692 + static void 693 + decode_link_packet(struct link_packet *packet, size_t length, 694 + int include_flags, int exclude_flags) 695 + { 696 + const struct packet_info *pi; 697 + int data_length = 0; 698 + int i; 699 + 700 + pi = &packet_info[packet->common.tcode]; 701 + 702 + for (i = 0; i < pi->field_count; i++) { 703 + const struct packet_field *f = &pi->fields[i]; 704 + int offset; 705 + 706 + if (f->flags & exclude_flags) 707 + continue; 708 + if (include_flags && !(f->flags & include_flags)) 709 + continue; 710 + 711 + if (f->offset < 0) 712 + offset = length * 8 + f->offset - 32; 713 + else 714 + offset = f->offset; 715 + 716 + if (f->value_names != NULL) { 717 + uint32_t bits; 718 + 719 + bits = get_bits(packet, offset, f->width); 720 + printf("%s", f->value_names[bits]); 721 + } else if (f->width == 0) { 722 + printf("%s=[", f->name); 723 + dump_data((unsigned char *) packet + (offset / 8 + 4), data_length); 724 + printf("]"); 725 + } else { 726 + unsigned long long bits; 727 + int high_width, low_width; 728 + 729 + if ((offset & ~31) != ((offset + f->width - 1) & ~31)) { 730 + /* Bit field spans quadlet boundary. */ 731 + high_width = ((offset + 31) & ~31) - offset; 732 + low_width = f->width - high_width; 733 + 734 + bits = get_bits(packet, offset, high_width); 735 + bits = (bits << low_width) | 736 + get_bits(packet, offset + high_width, low_width); 737 + } else { 738 + bits = get_bits(packet, offset, f->width); 739 + } 740 + 741 + printf("%s=0x%0*llx", f->name, (f->width + 3) / 4, bits); 742 + 743 + if (f->flags & PACKET_FIELD_DATA_LENGTH) 744 + data_length = bits; 745 + } 746 + 747 + if (i < pi->field_count - 1) 748 + printf(", "); 749 + } 750 + } 751 + 752 + static void 753 + print_packet(uint32_t *data, size_t length) 754 + { 755 + int i; 756 + 757 + printf("%6u ", data[0]); 758 + 759 + if (length == 4) { 760 + printf("bus reset"); 761 + } else if (length < sizeof(struct phy_packet)) { 762 + printf("short packet: "); 763 + for (i = 1; i < length / 4; i++) 764 + printf("%s%08x", i == 0 ? "[" : " ", data[i]); 765 + printf("]"); 766 + 767 + } else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) { 768 + struct phy_packet *pp = (struct phy_packet *) data; 769 + 770 + /* phy packet are 3 quadlets: the 1 quadlet payload, 771 + * the bitwise inverse of the payload and the snoop 772 + * mode ack */ 773 + 774 + switch (pp->common.identifier) { 775 + case PHY_PACKET_CONFIGURATION: 776 + if (!pp->phy_config.set_root && !pp->phy_config.set_gap_count) { 777 + printf("ext phy config: phy_id=%02x", pp->phy_config.root_id); 778 + } else { 779 + printf("phy config:"); 780 + if (pp->phy_config.set_root) 781 + printf(" set_root_id=%02x", pp->phy_config.root_id); 782 + if (pp->phy_config.set_gap_count) 783 + printf(" set_gap_count=%d", pp->phy_config.gap_count); 784 + } 785 + break; 786 + 787 + case PHY_PACKET_LINK_ON: 788 + printf("link-on packet, phy_id=%02x", pp->link_on.phy_id); 789 + break; 790 + 791 + case PHY_PACKET_SELF_ID: 792 + if (pp->self_id.extended) { 793 + printf("extended self id: phy_id=%02x, seq=%d", 794 + pp->ext_self_id.phy_id, pp->ext_self_id.sequence); 795 + } else { 796 + static const char * const speed_names[] = { 797 + "S100", "S200", "S400", "BETA" 798 + }; 799 + printf("self id: phy_id=%02x, link %s, gap_count=%d, speed=%s%s%s", 800 + pp->self_id.phy_id, 801 + (pp->self_id.link_active ? "active" : "not active"), 802 + pp->self_id.gap_count, 803 + speed_names[pp->self_id.phy_speed], 804 + (pp->self_id.contender ? ", irm contender" : ""), 805 + (pp->self_id.initiated_reset ? ", initiator" : "")); 806 + } 807 + break; 808 + default: 809 + printf("unknown phy packet: "); 810 + for (i = 1; i < length / 4; i++) 811 + printf("%s%08x", i == 0 ? "[" : " ", data[i]); 812 + printf("]"); 813 + break; 814 + } 815 + } else { 816 + struct link_packet *packet = (struct link_packet *) data; 817 + 818 + decode_link_packet(packet, length, 0, 819 + option_verbose ? 0 : PACKET_FIELD_DETAIL); 820 + } 821 + 822 + if (option_hex) { 823 + printf(" ["); 824 + dump_data((unsigned char *) data + 4, length - 4); 825 + printf("]"); 826 + } 827 + 828 + printf("\r\n"); 829 + } 830 + 831 + #define HIDE_CURSOR "\033[?25l" 832 + #define SHOW_CURSOR "\033[?25h" 833 + #define CLEAR "\033[H\033[2J" 834 + 835 + static void 836 + print_stats(uint32_t *data, size_t length) 837 + { 838 + static int bus_reset_count, short_packet_count, phy_packet_count; 839 + static int tcode_count[16]; 840 + static struct timeval last_update; 841 + struct timeval now; 842 + int i; 843 + 844 + if (length == 0) 845 + bus_reset_count++; 846 + else if (length < sizeof(struct phy_packet)) 847 + short_packet_count++; 848 + else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) 849 + phy_packet_count++; 850 + else { 851 + struct link_packet *packet = (struct link_packet *) data; 852 + tcode_count[packet->common.tcode]++; 853 + } 854 + 855 + gettimeofday(&now, NULL); 856 + if (now.tv_sec <= last_update.tv_sec && 857 + now.tv_usec < last_update.tv_usec + 500000) 858 + return; 859 + 860 + last_update = now; 861 + printf(CLEAR HIDE_CURSOR 862 + " bus resets : %8d\n" 863 + " short packets : %8d\n" 864 + " phy packets : %8d\n", 865 + bus_reset_count, short_packet_count, phy_packet_count); 866 + 867 + for (i = 0; i < array_length(packet_info); i++) 868 + if (packet_info[i].type != PACKET_RESERVED) 869 + printf(" %-24s: %8d\n", packet_info[i].name, tcode_count[i]); 870 + printf(SHOW_CURSOR "\n"); 871 + } 872 + 873 + static struct termios saved_attributes; 874 + 875 + static void 876 + reset_input_mode(void) 877 + { 878 + tcsetattr(STDIN_FILENO, TCSANOW, &saved_attributes); 879 + } 880 + 881 + static void 882 + set_input_mode(void) 883 + { 884 + struct termios tattr; 885 + 886 + /* Make sure stdin is a terminal. */ 887 + if (!isatty(STDIN_FILENO)) { 888 + fprintf(stderr, "Not a terminal.\n"); 889 + exit(EXIT_FAILURE); 890 + } 891 + 892 + /* Save the terminal attributes so we can restore them later. */ 893 + tcgetattr(STDIN_FILENO, &saved_attributes); 894 + atexit(reset_input_mode); 895 + 896 + /* Set the funny terminal modes. */ 897 + tcgetattr(STDIN_FILENO, &tattr); 898 + tattr.c_lflag &= ~(ICANON|ECHO); /* Clear ICANON and ECHO. */ 899 + tattr.c_cc[VMIN] = 1; 900 + tattr.c_cc[VTIME] = 0; 901 + tcsetattr(STDIN_FILENO, TCSAFLUSH, &tattr); 902 + } 903 + 904 + int main(int argc, const char *argv[]) 905 + { 906 + uint32_t buf[128 * 1024]; 907 + uint32_t filter; 908 + int length, retval, view; 909 + int fd = -1; 910 + FILE *output = NULL, *input = NULL; 911 + poptContext con; 912 + char c; 913 + struct pollfd pollfds[2]; 914 + 915 + sys_sigint_handler = signal(SIGINT, sigint_handler); 916 + 917 + con = poptGetContext(NULL, argc, argv, options, 0); 918 + retval = poptGetNextOpt(con); 919 + if (retval < -1) { 920 + poptPrintUsage(con, stdout, 0); 921 + return -1; 922 + } 923 + 924 + if (option_version) { 925 + printf("dump tool for nosy sniffer, version %s\n", VERSION); 926 + return 0; 927 + } 928 + 929 + if (__BYTE_ORDER != __LITTLE_ENDIAN) 930 + fprintf(stderr, "warning: nosy has only been tested on little " 931 + "endian machines\n"); 932 + 933 + if (option_input != NULL) { 934 + input = fopen(option_input, "r"); 935 + if (input == NULL) { 936 + fprintf(stderr, "Could not open %s, %m\n", option_input); 937 + return -1; 938 + } 939 + } else { 940 + fd = open(option_nosy_device, O_RDWR); 941 + if (fd < 0) { 942 + fprintf(stderr, "Could not open %s, %m\n", option_nosy_device); 943 + return -1; 944 + } 945 + set_input_mode(); 946 + } 947 + 948 + if (strcmp(option_view, "transaction") == 0) 949 + view = VIEW_TRANSACTION; 950 + else if (strcmp(option_view, "stats") == 0) 951 + view = VIEW_STATS; 952 + else 953 + view = VIEW_PACKET; 954 + 955 + if (option_output) { 956 + output = fopen(option_output, "w"); 957 + if (output == NULL) { 958 + fprintf(stderr, "Could not open %s, %m\n", option_output); 959 + return -1; 960 + } 961 + } 962 + 963 + setvbuf(stdout, NULL, _IOLBF, BUFSIZ); 964 + 965 + filter = ~0; 966 + if (!option_iso) 967 + filter &= ~(1 << TCODE_STREAM_DATA); 968 + if (!option_cycle_start) 969 + filter &= ~(1 << TCODE_CYCLE_START); 970 + if (view == VIEW_STATS) 971 + filter = ~(1 << TCODE_CYCLE_START); 972 + 973 + ioctl(fd, NOSY_IOC_FILTER, filter); 974 + 975 + ioctl(fd, NOSY_IOC_START); 976 + 977 + pollfds[0].fd = fd; 978 + pollfds[0].events = POLLIN; 979 + pollfds[1].fd = STDIN_FILENO; 980 + pollfds[1].events = POLLIN; 981 + 982 + while (run) { 983 + if (input != NULL) { 984 + if (fread(&length, sizeof length, 1, input) != 1) 985 + return 0; 986 + fread(buf, 1, length, input); 987 + } else { 988 + poll(pollfds, 2, -1); 989 + if (pollfds[1].revents) { 990 + read(STDIN_FILENO, &c, sizeof c); 991 + switch (c) { 992 + case 'q': 993 + if (output != NULL) 994 + fclose(output); 995 + return 0; 996 + } 997 + } 998 + 999 + if (pollfds[0].revents) 1000 + length = read(fd, buf, sizeof buf); 1001 + else 1002 + continue; 1003 + } 1004 + 1005 + if (output != NULL) { 1006 + fwrite(&length, sizeof length, 1, output); 1007 + fwrite(buf, 1, length, output); 1008 + } 1009 + 1010 + switch (view) { 1011 + case VIEW_TRANSACTION: 1012 + handle_packet(buf, length); 1013 + break; 1014 + case VIEW_PACKET: 1015 + print_packet(buf, length); 1016 + break; 1017 + case VIEW_STATS: 1018 + print_stats(buf, length); 1019 + break; 1020 + } 1021 + } 1022 + 1023 + if (output != NULL) 1024 + fclose(output); 1025 + 1026 + close(fd); 1027 + 1028 + poptFreeContext(con); 1029 + 1030 + return 0; 1031 + }
+173
tools/firewire/nosy-dump.h
···
··· 1 + #ifndef __nosy_dump_h__ 2 + #define __nosy_dump_h__ 3 + 4 + #define array_length(array) (sizeof(array) / sizeof(array[0])) 5 + 6 + #define ACK_NO_ACK 0x0 7 + #define ACK_DONE(a) ((a >> 2) == 0) 8 + #define ACK_BUSY(a) ((a >> 2) == 1) 9 + #define ACK_ERROR(a) ((a >> 2) == 3) 10 + 11 + #include <stdint.h> 12 + 13 + struct phy_packet { 14 + uint32_t timestamp; 15 + union { 16 + struct { 17 + uint32_t zero:24; 18 + uint32_t phy_id:6; 19 + uint32_t identifier:2; 20 + } common, link_on; 21 + 22 + struct { 23 + uint32_t zero:16; 24 + uint32_t gap_count:6; 25 + uint32_t set_gap_count:1; 26 + uint32_t set_root:1; 27 + uint32_t root_id:6; 28 + uint32_t identifier:2; 29 + } phy_config; 30 + 31 + struct { 32 + uint32_t more_packets:1; 33 + uint32_t initiated_reset:1; 34 + uint32_t port2:2; 35 + uint32_t port1:2; 36 + uint32_t port0:2; 37 + uint32_t power_class:3; 38 + uint32_t contender:1; 39 + uint32_t phy_delay:2; 40 + uint32_t phy_speed:2; 41 + uint32_t gap_count:6; 42 + uint32_t link_active:1; 43 + uint32_t extended:1; 44 + uint32_t phy_id:6; 45 + uint32_t identifier:2; 46 + } self_id; 47 + 48 + struct { 49 + uint32_t more_packets:1; 50 + uint32_t reserved1:1; 51 + uint32_t porth:2; 52 + uint32_t portg:2; 53 + uint32_t portf:2; 54 + uint32_t porte:2; 55 + uint32_t portd:2; 56 + uint32_t portc:2; 57 + uint32_t portb:2; 58 + uint32_t porta:2; 59 + uint32_t reserved0:2; 60 + uint32_t sequence:3; 61 + uint32_t extended:1; 62 + uint32_t phy_id:6; 63 + uint32_t identifier:2; 64 + } ext_self_id; 65 + }; 66 + uint32_t inverted; 67 + uint32_t ack; 68 + }; 69 + 70 + #define TCODE_PHY_PACKET 0x10 71 + 72 + #define PHY_PACKET_CONFIGURATION 0x00 73 + #define PHY_PACKET_LINK_ON 0x01 74 + #define PHY_PACKET_SELF_ID 0x02 75 + 76 + struct link_packet { 77 + uint32_t timestamp; 78 + union { 79 + struct { 80 + uint32_t priority:4; 81 + uint32_t tcode:4; 82 + uint32_t rt:2; 83 + uint32_t tlabel:6; 84 + uint32_t destination:16; 85 + 86 + uint32_t offset_high:16; 87 + uint32_t source:16; 88 + 89 + uint32_t offset_low; 90 + } common; 91 + 92 + struct { 93 + uint32_t common[3]; 94 + uint32_t crc; 95 + } read_quadlet; 96 + 97 + struct { 98 + uint32_t common[3]; 99 + uint32_t data; 100 + uint32_t crc; 101 + } read_quadlet_response; 102 + 103 + struct { 104 + uint32_t common[3]; 105 + uint32_t extended_tcode:16; 106 + uint32_t data_length:16; 107 + uint32_t crc; 108 + } read_block; 109 + 110 + struct { 111 + uint32_t common[3]; 112 + uint32_t extended_tcode:16; 113 + uint32_t data_length:16; 114 + uint32_t crc; 115 + uint32_t data[0]; 116 + /* crc and ack follows. */ 117 + } read_block_response; 118 + 119 + struct { 120 + uint32_t common[3]; 121 + uint32_t data; 122 + uint32_t crc; 123 + } write_quadlet; 124 + 125 + struct { 126 + uint32_t common[3]; 127 + uint32_t extended_tcode:16; 128 + uint32_t data_length:16; 129 + uint32_t crc; 130 + uint32_t data[0]; 131 + /* crc and ack follows. */ 132 + } write_block; 133 + 134 + struct { 135 + uint32_t common[3]; 136 + uint32_t crc; 137 + } write_response; 138 + 139 + struct { 140 + uint32_t common[3]; 141 + uint32_t data; 142 + uint32_t crc; 143 + } cycle_start; 144 + 145 + struct { 146 + uint32_t sy:4; 147 + uint32_t tcode:4; 148 + uint32_t channel:6; 149 + uint32_t tag:2; 150 + uint32_t data_length:16; 151 + 152 + uint32_t crc; 153 + } iso_data; 154 + }; 155 + }; 156 + 157 + struct subaction { 158 + uint32_t ack; 159 + size_t length; 160 + struct list link; 161 + struct link_packet packet; 162 + }; 163 + 164 + struct link_transaction { 165 + int request_node, response_node, tlabel; 166 + struct subaction *request, *response; 167 + struct list request_list, response_list; 168 + struct list link; 169 + }; 170 + 171 + int decode_fcp(struct link_transaction *t); 172 + 173 + #endif /* __nosy_dump_h__ */