Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30-rc1 2629 lines 74 kB view raw
1/* 2 * Driver for OHCI 1394 controllers 3 * 4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21#include <linux/compiler.h> 22#include <linux/delay.h> 23#include <linux/dma-mapping.h> 24#include <linux/gfp.h> 25#include <linux/init.h> 26#include <linux/interrupt.h> 27#include <linux/kernel.h> 28#include <linux/mm.h> 29#include <linux/module.h> 30#include <linux/moduleparam.h> 31#include <linux/pci.h> 32#include <linux/spinlock.h> 33 34#include <asm/page.h> 35#include <asm/system.h> 36 37#ifdef CONFIG_PPC_PMAC 38#include <asm/pmac_feature.h> 39#endif 40 41#include "fw-ohci.h" 42#include "fw-transaction.h" 43 44#define DESCRIPTOR_OUTPUT_MORE 0 45#define DESCRIPTOR_OUTPUT_LAST (1 << 12) 46#define DESCRIPTOR_INPUT_MORE (2 << 12) 47#define DESCRIPTOR_INPUT_LAST (3 << 12) 48#define DESCRIPTOR_STATUS (1 << 11) 49#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) 50#define DESCRIPTOR_PING (1 << 7) 51#define DESCRIPTOR_YY (1 << 6) 52#define DESCRIPTOR_NO_IRQ (0 << 4) 53#define DESCRIPTOR_IRQ_ERROR (1 << 4) 54#define DESCRIPTOR_IRQ_ALWAYS (3 << 4) 55#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) 56#define DESCRIPTOR_WAIT (3 << 0) 57 58struct descriptor { 59 __le16 req_count; 60 __le16 control; 61 __le32 data_address; 62 __le32 branch_address; 63 __le16 res_count; 64 __le16 transfer_status; 65} __attribute__((aligned(16))); 66 67struct db_descriptor { 68 __le16 first_size; 69 __le16 control; 70 __le16 second_req_count; 71 __le16 first_req_count; 72 __le32 branch_address; 73 __le16 second_res_count; 74 __le16 first_res_count; 75 __le32 reserved0; 76 __le32 first_buffer; 77 __le32 second_buffer; 78 __le32 reserved1; 79} __attribute__((aligned(16))); 80 81#define CONTROL_SET(regs) (regs) 82#define CONTROL_CLEAR(regs) ((regs) + 4) 83#define COMMAND_PTR(regs) ((regs) + 12) 84#define CONTEXT_MATCH(regs) ((regs) + 16) 85 86struct ar_buffer { 87 struct descriptor descriptor; 88 struct ar_buffer *next; 89 __le32 data[0]; 90}; 91 92struct ar_context { 93 struct fw_ohci *ohci; 94 struct ar_buffer *current_buffer; 95 struct ar_buffer *last_buffer; 96 void *pointer; 97 u32 regs; 98 struct tasklet_struct tasklet; 99}; 100 101struct context; 102 103typedef int (*descriptor_callback_t)(struct context *ctx, 104 struct descriptor *d, 105 struct descriptor *last); 106 107/* 108 * A buffer that contains a block of DMA-able coherent memory used for 109 * storing a portion of a DMA descriptor program. 110 */ 111struct descriptor_buffer { 112 struct list_head list; 113 dma_addr_t buffer_bus; 114 size_t buffer_size; 115 size_t used; 116 struct descriptor buffer[0]; 117}; 118 119struct context { 120 struct fw_ohci *ohci; 121 u32 regs; 122 int total_allocation; 123 124 /* 125 * List of page-sized buffers for storing DMA descriptors. 126 * Head of list contains buffers in use and tail of list contains 127 * free buffers. 128 */ 129 struct list_head buffer_list; 130 131 /* 132 * Pointer to a buffer inside buffer_list that contains the tail 133 * end of the current DMA program. 134 */ 135 struct descriptor_buffer *buffer_tail; 136 137 /* 138 * The descriptor containing the branch address of the first 139 * descriptor that has not yet been filled by the device. 140 */ 141 struct descriptor *last; 142 143 /* 144 * The last descriptor in the DMA program. It contains the branch 145 * address that must be updated upon appending a new descriptor. 146 */ 147 struct descriptor *prev; 148 149 descriptor_callback_t callback; 150 151 struct tasklet_struct tasklet; 152}; 153 154#define IT_HEADER_SY(v) ((v) << 0) 155#define IT_HEADER_TCODE(v) ((v) << 4) 156#define IT_HEADER_CHANNEL(v) ((v) << 8) 157#define IT_HEADER_TAG(v) ((v) << 14) 158#define IT_HEADER_SPEED(v) ((v) << 16) 159#define IT_HEADER_DATA_LENGTH(v) ((v) << 16) 160 161struct iso_context { 162 struct fw_iso_context base; 163 struct context context; 164 int excess_bytes; 165 void *header; 166 size_t header_length; 167}; 168 169#define CONFIG_ROM_SIZE 1024 170 171struct fw_ohci { 172 struct fw_card card; 173 174 __iomem char *registers; 175 dma_addr_t self_id_bus; 176 __le32 *self_id_cpu; 177 struct tasklet_struct bus_reset_tasklet; 178 int node_id; 179 int generation; 180 int request_generation; /* for timestamping incoming requests */ 181 u32 bus_seconds; 182 183 bool use_dualbuffer; 184 bool old_uninorth; 185 bool bus_reset_packet_quirk; 186 187 /* 188 * Spinlock for accessing fw_ohci data. Never call out of 189 * this driver with this lock held. 190 */ 191 spinlock_t lock; 192 u32 self_id_buffer[512]; 193 194 /* Config rom buffers */ 195 __be32 *config_rom; 196 dma_addr_t config_rom_bus; 197 __be32 *next_config_rom; 198 dma_addr_t next_config_rom_bus; 199 u32 next_header; 200 201 struct ar_context ar_request_ctx; 202 struct ar_context ar_response_ctx; 203 struct context at_request_ctx; 204 struct context at_response_ctx; 205 206 u32 it_context_mask; 207 struct iso_context *it_context_list; 208 u64 ir_context_channels; 209 u32 ir_context_mask; 210 struct iso_context *ir_context_list; 211}; 212 213static inline struct fw_ohci *fw_ohci(struct fw_card *card) 214{ 215 return container_of(card, struct fw_ohci, card); 216} 217 218#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 219#define IR_CONTEXT_BUFFER_FILL 0x80000000 220#define IR_CONTEXT_ISOCH_HEADER 0x40000000 221#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 222#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 223#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 224 225#define CONTEXT_RUN 0x8000 226#define CONTEXT_WAKE 0x1000 227#define CONTEXT_DEAD 0x0800 228#define CONTEXT_ACTIVE 0x0400 229 230#define OHCI1394_MAX_AT_REQ_RETRIES 0xf 231#define OHCI1394_MAX_AT_RESP_RETRIES 0x2 232#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 233 234#define FW_OHCI_MAJOR 240 235#define OHCI1394_REGISTER_SIZE 0x800 236#define OHCI_LOOP_COUNT 500 237#define OHCI1394_PCI_HCI_Control 0x40 238#define SELF_ID_BUF_SIZE 0x800 239#define OHCI_TCODE_PHY_PACKET 0x0e 240#define OHCI_VERSION_1_1 0x010010 241 242static char ohci_driver_name[] = KBUILD_MODNAME; 243 244#ifdef CONFIG_FIREWIRE_OHCI_DEBUG 245 246#define OHCI_PARAM_DEBUG_AT_AR 1 247#define OHCI_PARAM_DEBUG_SELFIDS 2 248#define OHCI_PARAM_DEBUG_IRQS 4 249#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 250 251static int param_debug; 252module_param_named(debug, param_debug, int, 0644); 253MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 254 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) 255 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) 256 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) 257 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) 258 ", or a combination, or all = -1)"); 259 260static void log_irqs(u32 evt) 261{ 262 if (likely(!(param_debug & 263 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) 264 return; 265 266 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && 267 !(evt & OHCI1394_busReset)) 268 return; 269 270 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 271 evt & OHCI1394_selfIDComplete ? " selfID" : "", 272 evt & OHCI1394_RQPkt ? " AR_req" : "", 273 evt & OHCI1394_RSPkt ? " AR_resp" : "", 274 evt & OHCI1394_reqTxComplete ? " AT_req" : "", 275 evt & OHCI1394_respTxComplete ? " AT_resp" : "", 276 evt & OHCI1394_isochRx ? " IR" : "", 277 evt & OHCI1394_isochTx ? " IT" : "", 278 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 279 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 280 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 281 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 282 evt & OHCI1394_busReset ? " busReset" : "", 283 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 284 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 285 OHCI1394_respTxComplete | OHCI1394_isochRx | 286 OHCI1394_isochTx | OHCI1394_postedWriteErr | 287 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 288 OHCI1394_regAccessFail | OHCI1394_busReset) 289 ? " ?" : ""); 290} 291 292static const char *speed[] = { 293 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", 294}; 295static const char *power[] = { 296 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", 297 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", 298}; 299static const char port[] = { '.', '-', 'p', 'c', }; 300 301static char _p(u32 *s, int shift) 302{ 303 return port[*s >> shift & 3]; 304} 305 306static void log_selfids(int node_id, int generation, int self_id_count, u32 *s) 307{ 308 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) 309 return; 310 311 fw_notify("%d selfIDs, generation %d, local node ID %04x\n", 312 self_id_count, generation, node_id); 313 314 for (; self_id_count--; ++s) 315 if ((*s & 1 << 23) == 0) 316 fw_notify("selfID 0: %08x, phy %d [%c%c%c] " 317 "%s gc=%d %s %s%s%s\n", 318 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), 319 speed[*s >> 14 & 3], *s >> 16 & 63, 320 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", 321 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); 322 else 323 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", 324 *s, *s >> 24 & 63, 325 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), 326 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); 327} 328 329static const char *evts[] = { 330 [0x00] = "evt_no_status", [0x01] = "-reserved-", 331 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", 332 [0x04] = "evt_underrun", [0x05] = "evt_overrun", 333 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", 334 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", 335 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", 336 [0x0c] = "-reserved-", [0x0d] = "-reserved-", 337 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", 338 [0x10] = "-reserved-", [0x11] = "ack_complete", 339 [0x12] = "ack_pending ", [0x13] = "-reserved-", 340 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", 341 [0x16] = "ack_busy_B", [0x17] = "-reserved-", 342 [0x18] = "-reserved-", [0x19] = "-reserved-", 343 [0x1a] = "-reserved-", [0x1b] = "ack_tardy", 344 [0x1c] = "-reserved-", [0x1d] = "ack_data_error", 345 [0x1e] = "ack_type_error", [0x1f] = "-reserved-", 346 [0x20] = "pending/cancelled", 347}; 348static const char *tcodes[] = { 349 [0x0] = "QW req", [0x1] = "BW req", 350 [0x2] = "W resp", [0x3] = "-reserved-", 351 [0x4] = "QR req", [0x5] = "BR req", 352 [0x6] = "QR resp", [0x7] = "BR resp", 353 [0x8] = "cycle start", [0x9] = "Lk req", 354 [0xa] = "async stream packet", [0xb] = "Lk resp", 355 [0xc] = "-reserved-", [0xd] = "-reserved-", 356 [0xe] = "link internal", [0xf] = "-reserved-", 357}; 358static const char *phys[] = { 359 [0x0] = "phy config packet", [0x1] = "link-on packet", 360 [0x2] = "self-id packet", [0x3] = "-reserved-", 361}; 362 363static void log_ar_at_event(char dir, int speed, u32 *header, int evt) 364{ 365 int tcode = header[0] >> 4 & 0xf; 366 char specific[12]; 367 368 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) 369 return; 370 371 if (unlikely(evt >= ARRAY_SIZE(evts))) 372 evt = 0x1f; 373 374 if (evt == OHCI1394_evt_bus_reset) { 375 fw_notify("A%c evt_bus_reset, generation %d\n", 376 dir, (header[2] >> 16) & 0xff); 377 return; 378 } 379 380 if (header[0] == ~header[1]) { 381 fw_notify("A%c %s, %s, %08x\n", 382 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]); 383 return; 384 } 385 386 switch (tcode) { 387 case 0x0: case 0x6: case 0x8: 388 snprintf(specific, sizeof(specific), " = %08x", 389 be32_to_cpu((__force __be32)header[3])); 390 break; 391 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: 392 snprintf(specific, sizeof(specific), " %x,%x", 393 header[3] >> 16, header[3] & 0xffff); 394 break; 395 default: 396 specific[0] = '\0'; 397 } 398 399 switch (tcode) { 400 case 0xe: case 0xa: 401 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); 402 break; 403 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 404 fw_notify("A%c spd %x tl %02x, " 405 "%04x -> %04x, %s, " 406 "%s, %04x%08x%s\n", 407 dir, speed, header[0] >> 10 & 0x3f, 408 header[1] >> 16, header[0] >> 16, evts[evt], 409 tcodes[tcode], header[1] & 0xffff, header[2], specific); 410 break; 411 default: 412 fw_notify("A%c spd %x tl %02x, " 413 "%04x -> %04x, %s, " 414 "%s%s\n", 415 dir, speed, header[0] >> 10 & 0x3f, 416 header[1] >> 16, header[0] >> 16, evts[evt], 417 tcodes[tcode], specific); 418 } 419} 420 421#else 422 423#define log_irqs(evt) 424#define log_selfids(node_id, generation, self_id_count, sid) 425#define log_ar_at_event(dir, speed, header, evt) 426 427#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ 428 429static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) 430{ 431 writel(data, ohci->registers + offset); 432} 433 434static inline u32 reg_read(const struct fw_ohci *ohci, int offset) 435{ 436 return readl(ohci->registers + offset); 437} 438 439static inline void flush_writes(const struct fw_ohci *ohci) 440{ 441 /* Do a dummy read to flush writes. */ 442 reg_read(ohci, OHCI1394_Version); 443} 444 445static int ohci_update_phy_reg(struct fw_card *card, int addr, 446 int clear_bits, int set_bits) 447{ 448 struct fw_ohci *ohci = fw_ohci(card); 449 u32 val, old; 450 451 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 452 flush_writes(ohci); 453 msleep(2); 454 val = reg_read(ohci, OHCI1394_PhyControl); 455 if ((val & OHCI1394_PhyControl_ReadDone) == 0) { 456 fw_error("failed to set phy reg bits.\n"); 457 return -EBUSY; 458 } 459 460 old = OHCI1394_PhyControl_ReadData(val); 461 old = (old & ~clear_bits) | set_bits; 462 reg_write(ohci, OHCI1394_PhyControl, 463 OHCI1394_PhyControl_Write(addr, old)); 464 465 return 0; 466} 467 468static int ar_context_add_page(struct ar_context *ctx) 469{ 470 struct device *dev = ctx->ohci->card.device; 471 struct ar_buffer *ab; 472 dma_addr_t uninitialized_var(ab_bus); 473 size_t offset; 474 475 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); 476 if (ab == NULL) 477 return -ENOMEM; 478 479 ab->next = NULL; 480 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 481 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 482 DESCRIPTOR_STATUS | 483 DESCRIPTOR_BRANCH_ALWAYS); 484 offset = offsetof(struct ar_buffer, data); 485 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); 486 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); 487 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); 488 ab->descriptor.branch_address = 0; 489 490 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 491 ctx->last_buffer->next = ab; 492 ctx->last_buffer = ab; 493 494 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 495 flush_writes(ctx->ohci); 496 497 return 0; 498} 499 500static void ar_context_release(struct ar_context *ctx) 501{ 502 struct ar_buffer *ab, *ab_next; 503 size_t offset; 504 dma_addr_t ab_bus; 505 506 for (ab = ctx->current_buffer; ab; ab = ab_next) { 507 ab_next = ab->next; 508 offset = offsetof(struct ar_buffer, data); 509 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 510 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE, 511 ab, ab_bus); 512 } 513} 514 515#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 516#define cond_le32_to_cpu(v) \ 517 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) 518#else 519#define cond_le32_to_cpu(v) le32_to_cpu(v) 520#endif 521 522static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) 523{ 524 struct fw_ohci *ohci = ctx->ohci; 525 struct fw_packet p; 526 u32 status, length, tcode; 527 int evt; 528 529 p.header[0] = cond_le32_to_cpu(buffer[0]); 530 p.header[1] = cond_le32_to_cpu(buffer[1]); 531 p.header[2] = cond_le32_to_cpu(buffer[2]); 532 533 tcode = (p.header[0] >> 4) & 0x0f; 534 switch (tcode) { 535 case TCODE_WRITE_QUADLET_REQUEST: 536 case TCODE_READ_QUADLET_RESPONSE: 537 p.header[3] = (__force __u32) buffer[3]; 538 p.header_length = 16; 539 p.payload_length = 0; 540 break; 541 542 case TCODE_READ_BLOCK_REQUEST : 543 p.header[3] = cond_le32_to_cpu(buffer[3]); 544 p.header_length = 16; 545 p.payload_length = 0; 546 break; 547 548 case TCODE_WRITE_BLOCK_REQUEST: 549 case TCODE_READ_BLOCK_RESPONSE: 550 case TCODE_LOCK_REQUEST: 551 case TCODE_LOCK_RESPONSE: 552 p.header[3] = cond_le32_to_cpu(buffer[3]); 553 p.header_length = 16; 554 p.payload_length = p.header[3] >> 16; 555 break; 556 557 case TCODE_WRITE_RESPONSE: 558 case TCODE_READ_QUADLET_REQUEST: 559 case OHCI_TCODE_PHY_PACKET: 560 p.header_length = 12; 561 p.payload_length = 0; 562 break; 563 564 default: 565 /* FIXME: Stop context, discard everything, and restart? */ 566 p.header_length = 0; 567 p.payload_length = 0; 568 } 569 570 p.payload = (void *) buffer + p.header_length; 571 572 /* FIXME: What to do about evt_* errors? */ 573 length = (p.header_length + p.payload_length + 3) / 4; 574 status = cond_le32_to_cpu(buffer[length]); 575 evt = (status >> 16) & 0x1f; 576 577 p.ack = evt - 16; 578 p.speed = (status >> 21) & 0x7; 579 p.timestamp = status & 0xffff; 580 p.generation = ohci->request_generation; 581 582 log_ar_at_event('R', p.speed, p.header, evt); 583 584 /* 585 * The OHCI bus reset handler synthesizes a phy packet with 586 * the new generation number when a bus reset happens (see 587 * section 8.4.2.3). This helps us determine when a request 588 * was received and make sure we send the response in the same 589 * generation. We only need this for requests; for responses 590 * we use the unique tlabel for finding the matching 591 * request. 592 * 593 * Alas some chips sometimes emit bus reset packets with a 594 * wrong generation. We set the correct generation for these 595 * at a slightly incorrect time (in bus_reset_tasklet). 596 */ 597 if (evt == OHCI1394_evt_bus_reset) { 598 if (!ohci->bus_reset_packet_quirk) 599 ohci->request_generation = (p.header[2] >> 16) & 0xff; 600 } else if (ctx == &ohci->ar_request_ctx) { 601 fw_core_handle_request(&ohci->card, &p); 602 } else { 603 fw_core_handle_response(&ohci->card, &p); 604 } 605 606 return buffer + length + 1; 607} 608 609static void ar_context_tasklet(unsigned long data) 610{ 611 struct ar_context *ctx = (struct ar_context *)data; 612 struct fw_ohci *ohci = ctx->ohci; 613 struct ar_buffer *ab; 614 struct descriptor *d; 615 void *buffer, *end; 616 617 ab = ctx->current_buffer; 618 d = &ab->descriptor; 619 620 if (d->res_count == 0) { 621 size_t size, rest, offset; 622 dma_addr_t start_bus; 623 void *start; 624 625 /* 626 * This descriptor is finished and we may have a 627 * packet split across this and the next buffer. We 628 * reuse the page for reassembling the split packet. 629 */ 630 631 offset = offsetof(struct ar_buffer, data); 632 start = buffer = ab; 633 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 634 635 ab = ab->next; 636 d = &ab->descriptor; 637 size = buffer + PAGE_SIZE - ctx->pointer; 638 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); 639 memmove(buffer, ctx->pointer, size); 640 memcpy(buffer + size, ab->data, rest); 641 ctx->current_buffer = ab; 642 ctx->pointer = (void *) ab->data + rest; 643 end = buffer + size + rest; 644 645 while (buffer < end) 646 buffer = handle_ar_packet(ctx, buffer); 647 648 dma_free_coherent(ohci->card.device, PAGE_SIZE, 649 start, start_bus); 650 ar_context_add_page(ctx); 651 } else { 652 buffer = ctx->pointer; 653 ctx->pointer = end = 654 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); 655 656 while (buffer < end) 657 buffer = handle_ar_packet(ctx, buffer); 658 } 659} 660 661static int ar_context_init(struct ar_context *ctx, 662 struct fw_ohci *ohci, u32 regs) 663{ 664 struct ar_buffer ab; 665 666 ctx->regs = regs; 667 ctx->ohci = ohci; 668 ctx->last_buffer = &ab; 669 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); 670 671 ar_context_add_page(ctx); 672 ar_context_add_page(ctx); 673 ctx->current_buffer = ab.next; 674 ctx->pointer = ctx->current_buffer->data; 675 676 return 0; 677} 678 679static void ar_context_run(struct ar_context *ctx) 680{ 681 struct ar_buffer *ab = ctx->current_buffer; 682 dma_addr_t ab_bus; 683 size_t offset; 684 685 offset = offsetof(struct ar_buffer, data); 686 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 687 688 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); 689 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 690 flush_writes(ctx->ohci); 691} 692 693static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) 694{ 695 int b, key; 696 697 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2; 698 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8; 699 700 /* figure out which descriptor the branch address goes in */ 701 if (z == 2 && (b == 3 || key == 2)) 702 return d; 703 else 704 return d + z - 1; 705} 706 707static void context_tasklet(unsigned long data) 708{ 709 struct context *ctx = (struct context *) data; 710 struct descriptor *d, *last; 711 u32 address; 712 int z; 713 struct descriptor_buffer *desc; 714 715 desc = list_entry(ctx->buffer_list.next, 716 struct descriptor_buffer, list); 717 last = ctx->last; 718 while (last->branch_address != 0) { 719 struct descriptor_buffer *old_desc = desc; 720 address = le32_to_cpu(last->branch_address); 721 z = address & 0xf; 722 address &= ~0xf; 723 724 /* If the branch address points to a buffer outside of the 725 * current buffer, advance to the next buffer. */ 726 if (address < desc->buffer_bus || 727 address >= desc->buffer_bus + desc->used) 728 desc = list_entry(desc->list.next, 729 struct descriptor_buffer, list); 730 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); 731 last = find_branch_descriptor(d, z); 732 733 if (!ctx->callback(ctx, d, last)) 734 break; 735 736 if (old_desc != desc) { 737 /* If we've advanced to the next buffer, move the 738 * previous buffer to the free list. */ 739 unsigned long flags; 740 old_desc->used = 0; 741 spin_lock_irqsave(&ctx->ohci->lock, flags); 742 list_move_tail(&old_desc->list, &ctx->buffer_list); 743 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 744 } 745 ctx->last = last; 746 } 747} 748 749/* 750 * Allocate a new buffer and add it to the list of free buffers for this 751 * context. Must be called with ohci->lock held. 752 */ 753static int context_add_buffer(struct context *ctx) 754{ 755 struct descriptor_buffer *desc; 756 dma_addr_t uninitialized_var(bus_addr); 757 int offset; 758 759 /* 760 * 16MB of descriptors should be far more than enough for any DMA 761 * program. This will catch run-away userspace or DoS attacks. 762 */ 763 if (ctx->total_allocation >= 16*1024*1024) 764 return -ENOMEM; 765 766 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, 767 &bus_addr, GFP_ATOMIC); 768 if (!desc) 769 return -ENOMEM; 770 771 offset = (void *)&desc->buffer - (void *)desc; 772 desc->buffer_size = PAGE_SIZE - offset; 773 desc->buffer_bus = bus_addr + offset; 774 desc->used = 0; 775 776 list_add_tail(&desc->list, &ctx->buffer_list); 777 ctx->total_allocation += PAGE_SIZE; 778 779 return 0; 780} 781 782static int context_init(struct context *ctx, struct fw_ohci *ohci, 783 u32 regs, descriptor_callback_t callback) 784{ 785 ctx->ohci = ohci; 786 ctx->regs = regs; 787 ctx->total_allocation = 0; 788 789 INIT_LIST_HEAD(&ctx->buffer_list); 790 if (context_add_buffer(ctx) < 0) 791 return -ENOMEM; 792 793 ctx->buffer_tail = list_entry(ctx->buffer_list.next, 794 struct descriptor_buffer, list); 795 796 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 797 ctx->callback = callback; 798 799 /* 800 * We put a dummy descriptor in the buffer that has a NULL 801 * branch address and looks like it's been sent. That way we 802 * have a descriptor to append DMA programs to. 803 */ 804 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); 805 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 806 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); 807 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); 808 ctx->last = ctx->buffer_tail->buffer; 809 ctx->prev = ctx->buffer_tail->buffer; 810 811 return 0; 812} 813 814static void context_release(struct context *ctx) 815{ 816 struct fw_card *card = &ctx->ohci->card; 817 struct descriptor_buffer *desc, *tmp; 818 819 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) 820 dma_free_coherent(card->device, PAGE_SIZE, desc, 821 desc->buffer_bus - 822 ((void *)&desc->buffer - (void *)desc)); 823} 824 825/* Must be called with ohci->lock held */ 826static struct descriptor *context_get_descriptors(struct context *ctx, 827 int z, dma_addr_t *d_bus) 828{ 829 struct descriptor *d = NULL; 830 struct descriptor_buffer *desc = ctx->buffer_tail; 831 832 if (z * sizeof(*d) > desc->buffer_size) 833 return NULL; 834 835 if (z * sizeof(*d) > desc->buffer_size - desc->used) { 836 /* No room for the descriptor in this buffer, so advance to the 837 * next one. */ 838 839 if (desc->list.next == &ctx->buffer_list) { 840 /* If there is no free buffer next in the list, 841 * allocate one. */ 842 if (context_add_buffer(ctx) < 0) 843 return NULL; 844 } 845 desc = list_entry(desc->list.next, 846 struct descriptor_buffer, list); 847 ctx->buffer_tail = desc; 848 } 849 850 d = desc->buffer + desc->used / sizeof(*d); 851 memset(d, 0, z * sizeof(*d)); 852 *d_bus = desc->buffer_bus + desc->used; 853 854 return d; 855} 856 857static void context_run(struct context *ctx, u32 extra) 858{ 859 struct fw_ohci *ohci = ctx->ohci; 860 861 reg_write(ohci, COMMAND_PTR(ctx->regs), 862 le32_to_cpu(ctx->last->branch_address)); 863 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 864 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 865 flush_writes(ohci); 866} 867 868static void context_append(struct context *ctx, 869 struct descriptor *d, int z, int extra) 870{ 871 dma_addr_t d_bus; 872 struct descriptor_buffer *desc = ctx->buffer_tail; 873 874 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 875 876 desc->used += (z + extra) * sizeof(*d); 877 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 878 ctx->prev = find_branch_descriptor(d, z); 879 880 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 881 flush_writes(ctx->ohci); 882} 883 884static void context_stop(struct context *ctx) 885{ 886 u32 reg; 887 int i; 888 889 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 890 flush_writes(ctx->ohci); 891 892 for (i = 0; i < 10; i++) { 893 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 894 if ((reg & CONTEXT_ACTIVE) == 0) 895 return; 896 897 mdelay(1); 898 } 899 fw_error("Error: DMA context still active (0x%08x)\n", reg); 900} 901 902struct driver_data { 903 struct fw_packet *packet; 904}; 905 906/* 907 * This function apppends a packet to the DMA queue for transmission. 908 * Must always be called with the ochi->lock held to ensure proper 909 * generation handling and locking around packet queue manipulation. 910 */ 911static int at_context_queue_packet(struct context *ctx, 912 struct fw_packet *packet) 913{ 914 struct fw_ohci *ohci = ctx->ohci; 915 dma_addr_t d_bus, uninitialized_var(payload_bus); 916 struct driver_data *driver_data; 917 struct descriptor *d, *last; 918 __le32 *header; 919 int z, tcode; 920 u32 reg; 921 922 d = context_get_descriptors(ctx, 4, &d_bus); 923 if (d == NULL) { 924 packet->ack = RCODE_SEND_ERROR; 925 return -1; 926 } 927 928 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 929 d[0].res_count = cpu_to_le16(packet->timestamp); 930 931 /* 932 * The DMA format for asyncronous link packets is different 933 * from the IEEE1394 layout, so shift the fields around 934 * accordingly. If header_length is 8, it's a PHY packet, to 935 * which we need to prepend an extra quadlet. 936 */ 937 938 header = (__le32 *) &d[1]; 939 switch (packet->header_length) { 940 case 16: 941 case 12: 942 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 943 (packet->speed << 16)); 944 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 945 (packet->header[0] & 0xffff0000)); 946 header[2] = cpu_to_le32(packet->header[2]); 947 948 tcode = (packet->header[0] >> 4) & 0x0f; 949 if (TCODE_IS_BLOCK_PACKET(tcode)) 950 header[3] = cpu_to_le32(packet->header[3]); 951 else 952 header[3] = (__force __le32) packet->header[3]; 953 954 d[0].req_count = cpu_to_le16(packet->header_length); 955 break; 956 957 case 8: 958 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 959 (packet->speed << 16)); 960 header[1] = cpu_to_le32(packet->header[0]); 961 header[2] = cpu_to_le32(packet->header[1]); 962 d[0].req_count = cpu_to_le16(12); 963 break; 964 965 case 4: 966 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 967 (packet->speed << 16)); 968 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); 969 d[0].req_count = cpu_to_le16(8); 970 break; 971 972 default: 973 /* BUG(); */ 974 packet->ack = RCODE_SEND_ERROR; 975 return -1; 976 } 977 978 driver_data = (struct driver_data *) &d[3]; 979 driver_data->packet = packet; 980 packet->driver_data = driver_data; 981 982 if (packet->payload_length > 0) { 983 payload_bus = 984 dma_map_single(ohci->card.device, packet->payload, 985 packet->payload_length, DMA_TO_DEVICE); 986 if (dma_mapping_error(ohci->card.device, payload_bus)) { 987 packet->ack = RCODE_SEND_ERROR; 988 return -1; 989 } 990 packet->payload_bus = payload_bus; 991 992 d[2].req_count = cpu_to_le16(packet->payload_length); 993 d[2].data_address = cpu_to_le32(payload_bus); 994 last = &d[2]; 995 z = 3; 996 } else { 997 last = &d[0]; 998 z = 2; 999 } 1000 1001 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 1002 DESCRIPTOR_IRQ_ALWAYS | 1003 DESCRIPTOR_BRANCH_ALWAYS); 1004 1005 /* 1006 * If the controller and packet generations don't match, we need to 1007 * bail out and try again. If IntEvent.busReset is set, the AT context 1008 * is halted, so appending to the context and trying to run it is 1009 * futile. Most controllers do the right thing and just flush the AT 1010 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but 1011 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind 1012 * up stalling out. So we just bail out in software and try again 1013 * later, and everyone is happy. 1014 * FIXME: Document how the locking works. 1015 */ 1016 if (ohci->generation != packet->generation || 1017 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) { 1018 if (packet->payload_length > 0) 1019 dma_unmap_single(ohci->card.device, payload_bus, 1020 packet->payload_length, DMA_TO_DEVICE); 1021 packet->ack = RCODE_GENERATION; 1022 return -1; 1023 } 1024 1025 context_append(ctx, d, z, 4 - z); 1026 1027 /* If the context isn't already running, start it up. */ 1028 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 1029 if ((reg & CONTEXT_RUN) == 0) 1030 context_run(ctx, 0); 1031 1032 return 0; 1033} 1034 1035static int handle_at_packet(struct context *context, 1036 struct descriptor *d, 1037 struct descriptor *last) 1038{ 1039 struct driver_data *driver_data; 1040 struct fw_packet *packet; 1041 struct fw_ohci *ohci = context->ohci; 1042 int evt; 1043 1044 if (last->transfer_status == 0) 1045 /* This descriptor isn't done yet, stop iteration. */ 1046 return 0; 1047 1048 driver_data = (struct driver_data *) &d[3]; 1049 packet = driver_data->packet; 1050 if (packet == NULL) 1051 /* This packet was cancelled, just continue. */ 1052 return 1; 1053 1054 if (packet->payload_bus) 1055 dma_unmap_single(ohci->card.device, packet->payload_bus, 1056 packet->payload_length, DMA_TO_DEVICE); 1057 1058 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1059 packet->timestamp = le16_to_cpu(last->res_count); 1060 1061 log_ar_at_event('T', packet->speed, packet->header, evt); 1062 1063 switch (evt) { 1064 case OHCI1394_evt_timeout: 1065 /* Async response transmit timed out. */ 1066 packet->ack = RCODE_CANCELLED; 1067 break; 1068 1069 case OHCI1394_evt_flushed: 1070 /* 1071 * The packet was flushed should give same error as 1072 * when we try to use a stale generation count. 1073 */ 1074 packet->ack = RCODE_GENERATION; 1075 break; 1076 1077 case OHCI1394_evt_missing_ack: 1078 /* 1079 * Using a valid (current) generation count, but the 1080 * node is not on the bus or not sending acks. 1081 */ 1082 packet->ack = RCODE_NO_ACK; 1083 break; 1084 1085 case ACK_COMPLETE + 0x10: 1086 case ACK_PENDING + 0x10: 1087 case ACK_BUSY_X + 0x10: 1088 case ACK_BUSY_A + 0x10: 1089 case ACK_BUSY_B + 0x10: 1090 case ACK_DATA_ERROR + 0x10: 1091 case ACK_TYPE_ERROR + 0x10: 1092 packet->ack = evt - 0x10; 1093 break; 1094 1095 default: 1096 packet->ack = RCODE_SEND_ERROR; 1097 break; 1098 } 1099 1100 packet->callback(packet, &ohci->card, packet->ack); 1101 1102 return 1; 1103} 1104 1105#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 1106#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 1107#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 1108#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1109#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1110 1111static void handle_local_rom(struct fw_ohci *ohci, 1112 struct fw_packet *packet, u32 csr) 1113{ 1114 struct fw_packet response; 1115 int tcode, length, i; 1116 1117 tcode = HEADER_GET_TCODE(packet->header[0]); 1118 if (TCODE_IS_BLOCK_PACKET(tcode)) 1119 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1120 else 1121 length = 4; 1122 1123 i = csr - CSR_CONFIG_ROM; 1124 if (i + length > CONFIG_ROM_SIZE) { 1125 fw_fill_response(&response, packet->header, 1126 RCODE_ADDRESS_ERROR, NULL, 0); 1127 } else if (!TCODE_IS_READ_REQUEST(tcode)) { 1128 fw_fill_response(&response, packet->header, 1129 RCODE_TYPE_ERROR, NULL, 0); 1130 } else { 1131 fw_fill_response(&response, packet->header, RCODE_COMPLETE, 1132 (void *) ohci->config_rom + i, length); 1133 } 1134 1135 fw_core_handle_response(&ohci->card, &response); 1136} 1137 1138static void handle_local_lock(struct fw_ohci *ohci, 1139 struct fw_packet *packet, u32 csr) 1140{ 1141 struct fw_packet response; 1142 int tcode, length, ext_tcode, sel; 1143 __be32 *payload, lock_old; 1144 u32 lock_arg, lock_data; 1145 1146 tcode = HEADER_GET_TCODE(packet->header[0]); 1147 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1148 payload = packet->payload; 1149 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); 1150 1151 if (tcode == TCODE_LOCK_REQUEST && 1152 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { 1153 lock_arg = be32_to_cpu(payload[0]); 1154 lock_data = be32_to_cpu(payload[1]); 1155 } else if (tcode == TCODE_READ_QUADLET_REQUEST) { 1156 lock_arg = 0; 1157 lock_data = 0; 1158 } else { 1159 fw_fill_response(&response, packet->header, 1160 RCODE_TYPE_ERROR, NULL, 0); 1161 goto out; 1162 } 1163 1164 sel = (csr - CSR_BUS_MANAGER_ID) / 4; 1165 reg_write(ohci, OHCI1394_CSRData, lock_data); 1166 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1167 reg_write(ohci, OHCI1394_CSRControl, sel); 1168 1169 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) 1170 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); 1171 else 1172 fw_notify("swap not done yet\n"); 1173 1174 fw_fill_response(&response, packet->header, 1175 RCODE_COMPLETE, &lock_old, sizeof(lock_old)); 1176 out: 1177 fw_core_handle_response(&ohci->card, &response); 1178} 1179 1180static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1181{ 1182 u64 offset; 1183 u32 csr; 1184 1185 if (ctx == &ctx->ohci->at_request_ctx) { 1186 packet->ack = ACK_PENDING; 1187 packet->callback(packet, &ctx->ohci->card, packet->ack); 1188 } 1189 1190 offset = 1191 ((unsigned long long) 1192 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | 1193 packet->header[2]; 1194 csr = offset - CSR_REGISTER_BASE; 1195 1196 /* Handle config rom reads. */ 1197 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) 1198 handle_local_rom(ctx->ohci, packet, csr); 1199 else switch (csr) { 1200 case CSR_BUS_MANAGER_ID: 1201 case CSR_BANDWIDTH_AVAILABLE: 1202 case CSR_CHANNELS_AVAILABLE_HI: 1203 case CSR_CHANNELS_AVAILABLE_LO: 1204 handle_local_lock(ctx->ohci, packet, csr); 1205 break; 1206 default: 1207 if (ctx == &ctx->ohci->at_request_ctx) 1208 fw_core_handle_request(&ctx->ohci->card, packet); 1209 else 1210 fw_core_handle_response(&ctx->ohci->card, packet); 1211 break; 1212 } 1213 1214 if (ctx == &ctx->ohci->at_response_ctx) { 1215 packet->ack = ACK_COMPLETE; 1216 packet->callback(packet, &ctx->ohci->card, packet->ack); 1217 } 1218} 1219 1220static void at_context_transmit(struct context *ctx, struct fw_packet *packet) 1221{ 1222 unsigned long flags; 1223 int ret; 1224 1225 spin_lock_irqsave(&ctx->ohci->lock, flags); 1226 1227 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && 1228 ctx->ohci->generation == packet->generation) { 1229 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1230 handle_local_request(ctx, packet); 1231 return; 1232 } 1233 1234 ret = at_context_queue_packet(ctx, packet); 1235 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1236 1237 if (ret < 0) 1238 packet->callback(packet, &ctx->ohci->card, packet->ack); 1239 1240} 1241 1242static void bus_reset_tasklet(unsigned long data) 1243{ 1244 struct fw_ohci *ohci = (struct fw_ohci *)data; 1245 int self_id_count, i, j, reg; 1246 int generation, new_generation; 1247 unsigned long flags; 1248 void *free_rom = NULL; 1249 dma_addr_t free_rom_bus = 0; 1250 1251 reg = reg_read(ohci, OHCI1394_NodeID); 1252 if (!(reg & OHCI1394_NodeID_idValid)) { 1253 fw_notify("node ID not valid, new bus reset in progress\n"); 1254 return; 1255 } 1256 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { 1257 fw_notify("malconfigured bus\n"); 1258 return; 1259 } 1260 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1261 OHCI1394_NodeID_nodeNumber); 1262 1263 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1264 if (reg & OHCI1394_SelfIDCount_selfIDError) { 1265 fw_notify("inconsistent self IDs\n"); 1266 return; 1267 } 1268 /* 1269 * The count in the SelfIDCount register is the number of 1270 * bytes in the self ID receive buffer. Since we also receive 1271 * the inverted quadlets and a header quadlet, we shift one 1272 * bit extra to get the actual number of self IDs. 1273 */ 1274 self_id_count = (reg >> 3) & 0x3ff; 1275 if (self_id_count == 0) { 1276 fw_notify("inconsistent self IDs\n"); 1277 return; 1278 } 1279 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; 1280 rmb(); 1281 1282 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 1283 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { 1284 fw_notify("inconsistent self IDs\n"); 1285 return; 1286 } 1287 ohci->self_id_buffer[j] = 1288 cond_le32_to_cpu(ohci->self_id_cpu[i]); 1289 } 1290 rmb(); 1291 1292 /* 1293 * Check the consistency of the self IDs we just read. The 1294 * problem we face is that a new bus reset can start while we 1295 * read out the self IDs from the DMA buffer. If this happens, 1296 * the DMA buffer will be overwritten with new self IDs and we 1297 * will read out inconsistent data. The OHCI specification 1298 * (section 11.2) recommends a technique similar to 1299 * linux/seqlock.h, where we remember the generation of the 1300 * self IDs in the buffer before reading them out and compare 1301 * it to the current generation after reading them out. If 1302 * the two generations match we know we have a consistent set 1303 * of self IDs. 1304 */ 1305 1306 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 1307 if (new_generation != generation) { 1308 fw_notify("recursive bus reset detected, " 1309 "discarding self ids\n"); 1310 return; 1311 } 1312 1313 /* FIXME: Document how the locking works. */ 1314 spin_lock_irqsave(&ohci->lock, flags); 1315 1316 ohci->generation = generation; 1317 context_stop(&ohci->at_request_ctx); 1318 context_stop(&ohci->at_response_ctx); 1319 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1320 1321 if (ohci->bus_reset_packet_quirk) 1322 ohci->request_generation = generation; 1323 1324 /* 1325 * This next bit is unrelated to the AT context stuff but we 1326 * have to do it under the spinlock also. If a new config rom 1327 * was set up before this reset, the old one is now no longer 1328 * in use and we can free it. Update the config rom pointers 1329 * to point to the current config rom and clear the 1330 * next_config_rom pointer so a new udpate can take place. 1331 */ 1332 1333 if (ohci->next_config_rom != NULL) { 1334 if (ohci->next_config_rom != ohci->config_rom) { 1335 free_rom = ohci->config_rom; 1336 free_rom_bus = ohci->config_rom_bus; 1337 } 1338 ohci->config_rom = ohci->next_config_rom; 1339 ohci->config_rom_bus = ohci->next_config_rom_bus; 1340 ohci->next_config_rom = NULL; 1341 1342 /* 1343 * Restore config_rom image and manually update 1344 * config_rom registers. Writing the header quadlet 1345 * will indicate that the config rom is ready, so we 1346 * do that last. 1347 */ 1348 reg_write(ohci, OHCI1394_BusOptions, 1349 be32_to_cpu(ohci->config_rom[2])); 1350 ohci->config_rom[0] = cpu_to_be32(ohci->next_header); 1351 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header); 1352 } 1353 1354#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1355 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); 1356 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); 1357#endif 1358 1359 spin_unlock_irqrestore(&ohci->lock, flags); 1360 1361 if (free_rom) 1362 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1363 free_rom, free_rom_bus); 1364 1365 log_selfids(ohci->node_id, generation, 1366 self_id_count, ohci->self_id_buffer); 1367 1368 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1369 self_id_count, ohci->self_id_buffer); 1370} 1371 1372static irqreturn_t irq_handler(int irq, void *data) 1373{ 1374 struct fw_ohci *ohci = data; 1375 u32 event, iso_event, cycle_time; 1376 int i; 1377 1378 event = reg_read(ohci, OHCI1394_IntEventClear); 1379 1380 if (!event || !~event) 1381 return IRQ_NONE; 1382 1383 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */ 1384 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); 1385 log_irqs(event); 1386 1387 if (event & OHCI1394_selfIDComplete) 1388 tasklet_schedule(&ohci->bus_reset_tasklet); 1389 1390 if (event & OHCI1394_RQPkt) 1391 tasklet_schedule(&ohci->ar_request_ctx.tasklet); 1392 1393 if (event & OHCI1394_RSPkt) 1394 tasklet_schedule(&ohci->ar_response_ctx.tasklet); 1395 1396 if (event & OHCI1394_reqTxComplete) 1397 tasklet_schedule(&ohci->at_request_ctx.tasklet); 1398 1399 if (event & OHCI1394_respTxComplete) 1400 tasklet_schedule(&ohci->at_response_ctx.tasklet); 1401 1402 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); 1403 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); 1404 1405 while (iso_event) { 1406 i = ffs(iso_event) - 1; 1407 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); 1408 iso_event &= ~(1 << i); 1409 } 1410 1411 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); 1412 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); 1413 1414 while (iso_event) { 1415 i = ffs(iso_event) - 1; 1416 tasklet_schedule(&ohci->it_context_list[i].context.tasklet); 1417 iso_event &= ~(1 << i); 1418 } 1419 1420 if (unlikely(event & OHCI1394_regAccessFail)) 1421 fw_error("Register access failure - " 1422 "please notify linux1394-devel@lists.sf.net\n"); 1423 1424 if (unlikely(event & OHCI1394_postedWriteErr)) 1425 fw_error("PCI posted write error\n"); 1426 1427 if (unlikely(event & OHCI1394_cycleTooLong)) { 1428 if (printk_ratelimit()) 1429 fw_notify("isochronous cycle too long\n"); 1430 reg_write(ohci, OHCI1394_LinkControlSet, 1431 OHCI1394_LinkControl_cycleMaster); 1432 } 1433 1434 if (event & OHCI1394_cycle64Seconds) { 1435 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1436 if ((cycle_time & 0x80000000) == 0) 1437 ohci->bus_seconds++; 1438 } 1439 1440 return IRQ_HANDLED; 1441} 1442 1443static int software_reset(struct fw_ohci *ohci) 1444{ 1445 int i; 1446 1447 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); 1448 1449 for (i = 0; i < OHCI_LOOP_COUNT; i++) { 1450 if ((reg_read(ohci, OHCI1394_HCControlSet) & 1451 OHCI1394_HCControl_softReset) == 0) 1452 return 0; 1453 msleep(1); 1454 } 1455 1456 return -EBUSY; 1457} 1458 1459static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) 1460{ 1461 struct fw_ohci *ohci = fw_ohci(card); 1462 struct pci_dev *dev = to_pci_dev(card->device); 1463 u32 lps; 1464 int i; 1465 1466 if (software_reset(ohci)) { 1467 fw_error("Failed to reset ohci card.\n"); 1468 return -EBUSY; 1469 } 1470 1471 /* 1472 * Now enable LPS, which we need in order to start accessing 1473 * most of the registers. In fact, on some cards (ALI M5251), 1474 * accessing registers in the SClk domain without LPS enabled 1475 * will lock up the machine. Wait 50msec to make sure we have 1476 * full link enabled. However, with some cards (well, at least 1477 * a JMicron PCIe card), we have to try again sometimes. 1478 */ 1479 reg_write(ohci, OHCI1394_HCControlSet, 1480 OHCI1394_HCControl_LPS | 1481 OHCI1394_HCControl_postedWriteEnable); 1482 flush_writes(ohci); 1483 1484 for (lps = 0, i = 0; !lps && i < 3; i++) { 1485 msleep(50); 1486 lps = reg_read(ohci, OHCI1394_HCControlSet) & 1487 OHCI1394_HCControl_LPS; 1488 } 1489 1490 if (!lps) { 1491 fw_error("Failed to set Link Power Status\n"); 1492 return -EIO; 1493 } 1494 1495 reg_write(ohci, OHCI1394_HCControlClear, 1496 OHCI1394_HCControl_noByteSwapData); 1497 1498 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 1499 reg_write(ohci, OHCI1394_LinkControlClear, 1500 OHCI1394_LinkControl_rcvPhyPkt); 1501 reg_write(ohci, OHCI1394_LinkControlSet, 1502 OHCI1394_LinkControl_rcvSelfID | 1503 OHCI1394_LinkControl_cycleTimerEnable | 1504 OHCI1394_LinkControl_cycleMaster); 1505 1506 reg_write(ohci, OHCI1394_ATRetries, 1507 OHCI1394_MAX_AT_REQ_RETRIES | 1508 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 1509 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); 1510 1511 ar_context_run(&ohci->ar_request_ctx); 1512 ar_context_run(&ohci->ar_response_ctx); 1513 1514 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 1515 reg_write(ohci, OHCI1394_IntEventClear, ~0); 1516 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 1517 reg_write(ohci, OHCI1394_IntMaskSet, 1518 OHCI1394_selfIDComplete | 1519 OHCI1394_RQPkt | OHCI1394_RSPkt | 1520 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1521 OHCI1394_isochRx | OHCI1394_isochTx | 1522 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1523 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail | 1524 OHCI1394_masterIntEnable); 1525 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1526 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1527 1528 /* Activate link_on bit and contender bit in our self ID packets.*/ 1529 if (ohci_update_phy_reg(card, 4, 0, 1530 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) 1531 return -EIO; 1532 1533 /* 1534 * When the link is not yet enabled, the atomic config rom 1535 * update mechanism described below in ohci_set_config_rom() 1536 * is not active. We have to update ConfigRomHeader and 1537 * BusOptions manually, and the write to ConfigROMmap takes 1538 * effect immediately. We tie this to the enabling of the 1539 * link, so we have a valid config rom before enabling - the 1540 * OHCI requires that ConfigROMhdr and BusOptions have valid 1541 * values before enabling. 1542 * 1543 * However, when the ConfigROMmap is written, some controllers 1544 * always read back quadlets 0 and 2 from the config rom to 1545 * the ConfigRomHeader and BusOptions registers on bus reset. 1546 * They shouldn't do that in this initial case where the link 1547 * isn't enabled. This means we have to use the same 1548 * workaround here, setting the bus header to 0 and then write 1549 * the right values in the bus reset tasklet. 1550 */ 1551 1552 if (config_rom) { 1553 ohci->next_config_rom = 1554 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1555 &ohci->next_config_rom_bus, 1556 GFP_KERNEL); 1557 if (ohci->next_config_rom == NULL) 1558 return -ENOMEM; 1559 1560 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE); 1561 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4); 1562 } else { 1563 /* 1564 * In the suspend case, config_rom is NULL, which 1565 * means that we just reuse the old config rom. 1566 */ 1567 ohci->next_config_rom = ohci->config_rom; 1568 ohci->next_config_rom_bus = ohci->config_rom_bus; 1569 } 1570 1571 ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]); 1572 ohci->next_config_rom[0] = 0; 1573 reg_write(ohci, OHCI1394_ConfigROMhdr, 0); 1574 reg_write(ohci, OHCI1394_BusOptions, 1575 be32_to_cpu(ohci->next_config_rom[2])); 1576 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 1577 1578 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 1579 1580 if (request_irq(dev->irq, irq_handler, 1581 IRQF_SHARED, ohci_driver_name, ohci)) { 1582 fw_error("Failed to allocate shared interrupt %d.\n", 1583 dev->irq); 1584 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1585 ohci->config_rom, ohci->config_rom_bus); 1586 return -EIO; 1587 } 1588 1589 reg_write(ohci, OHCI1394_HCControlSet, 1590 OHCI1394_HCControl_linkEnable | 1591 OHCI1394_HCControl_BIBimageValid); 1592 flush_writes(ohci); 1593 1594 /* 1595 * We are ready to go, initiate bus reset to finish the 1596 * initialization. 1597 */ 1598 1599 fw_core_initiate_bus_reset(&ohci->card, 1); 1600 1601 return 0; 1602} 1603 1604static int ohci_set_config_rom(struct fw_card *card, 1605 u32 *config_rom, size_t length) 1606{ 1607 struct fw_ohci *ohci; 1608 unsigned long flags; 1609 int ret = -EBUSY; 1610 __be32 *next_config_rom; 1611 dma_addr_t uninitialized_var(next_config_rom_bus); 1612 1613 ohci = fw_ohci(card); 1614 1615 /* 1616 * When the OHCI controller is enabled, the config rom update 1617 * mechanism is a bit tricky, but easy enough to use. See 1618 * section 5.5.6 in the OHCI specification. 1619 * 1620 * The OHCI controller caches the new config rom address in a 1621 * shadow register (ConfigROMmapNext) and needs a bus reset 1622 * for the changes to take place. When the bus reset is 1623 * detected, the controller loads the new values for the 1624 * ConfigRomHeader and BusOptions registers from the specified 1625 * config rom and loads ConfigROMmap from the ConfigROMmapNext 1626 * shadow register. All automatically and atomically. 1627 * 1628 * Now, there's a twist to this story. The automatic load of 1629 * ConfigRomHeader and BusOptions doesn't honor the 1630 * noByteSwapData bit, so with a be32 config rom, the 1631 * controller will load be32 values in to these registers 1632 * during the atomic update, even on litte endian 1633 * architectures. The workaround we use is to put a 0 in the 1634 * header quadlet; 0 is endian agnostic and means that the 1635 * config rom isn't ready yet. In the bus reset tasklet we 1636 * then set up the real values for the two registers. 1637 * 1638 * We use ohci->lock to avoid racing with the code that sets 1639 * ohci->next_config_rom to NULL (see bus_reset_tasklet). 1640 */ 1641 1642 next_config_rom = 1643 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1644 &next_config_rom_bus, GFP_KERNEL); 1645 if (next_config_rom == NULL) 1646 return -ENOMEM; 1647 1648 spin_lock_irqsave(&ohci->lock, flags); 1649 1650 if (ohci->next_config_rom == NULL) { 1651 ohci->next_config_rom = next_config_rom; 1652 ohci->next_config_rom_bus = next_config_rom_bus; 1653 1654 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE); 1655 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, 1656 length * 4); 1657 1658 ohci->next_header = config_rom[0]; 1659 ohci->next_config_rom[0] = 0; 1660 1661 reg_write(ohci, OHCI1394_ConfigROMmap, 1662 ohci->next_config_rom_bus); 1663 ret = 0; 1664 } 1665 1666 spin_unlock_irqrestore(&ohci->lock, flags); 1667 1668 /* 1669 * Now initiate a bus reset to have the changes take 1670 * effect. We clean up the old config rom memory and DMA 1671 * mappings in the bus reset tasklet, since the OHCI 1672 * controller could need to access it before the bus reset 1673 * takes effect. 1674 */ 1675 if (ret == 0) 1676 fw_core_initiate_bus_reset(&ohci->card, 1); 1677 else 1678 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1679 next_config_rom, next_config_rom_bus); 1680 1681 return ret; 1682} 1683 1684static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 1685{ 1686 struct fw_ohci *ohci = fw_ohci(card); 1687 1688 at_context_transmit(&ohci->at_request_ctx, packet); 1689} 1690 1691static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) 1692{ 1693 struct fw_ohci *ohci = fw_ohci(card); 1694 1695 at_context_transmit(&ohci->at_response_ctx, packet); 1696} 1697 1698static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) 1699{ 1700 struct fw_ohci *ohci = fw_ohci(card); 1701 struct context *ctx = &ohci->at_request_ctx; 1702 struct driver_data *driver_data = packet->driver_data; 1703 int ret = -ENOENT; 1704 1705 tasklet_disable(&ctx->tasklet); 1706 1707 if (packet->ack != 0) 1708 goto out; 1709 1710 if (packet->payload_bus) 1711 dma_unmap_single(ohci->card.device, packet->payload_bus, 1712 packet->payload_length, DMA_TO_DEVICE); 1713 1714 log_ar_at_event('T', packet->speed, packet->header, 0x20); 1715 driver_data->packet = NULL; 1716 packet->ack = RCODE_CANCELLED; 1717 packet->callback(packet, &ohci->card, packet->ack); 1718 ret = 0; 1719 out: 1720 tasklet_enable(&ctx->tasklet); 1721 1722 return ret; 1723} 1724 1725static int ohci_enable_phys_dma(struct fw_card *card, 1726 int node_id, int generation) 1727{ 1728#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1729 return 0; 1730#else 1731 struct fw_ohci *ohci = fw_ohci(card); 1732 unsigned long flags; 1733 int n, ret = 0; 1734 1735 /* 1736 * FIXME: Make sure this bitmask is cleared when we clear the busReset 1737 * interrupt bit. Clear physReqResourceAllBuses on bus reset. 1738 */ 1739 1740 spin_lock_irqsave(&ohci->lock, flags); 1741 1742 if (ohci->generation != generation) { 1743 ret = -ESTALE; 1744 goto out; 1745 } 1746 1747 /* 1748 * Note, if the node ID contains a non-local bus ID, physical DMA is 1749 * enabled for _all_ nodes on remote buses. 1750 */ 1751 1752 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; 1753 if (n < 32) 1754 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); 1755 else 1756 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); 1757 1758 flush_writes(ohci); 1759 out: 1760 spin_unlock_irqrestore(&ohci->lock, flags); 1761 1762 return ret; 1763#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1764} 1765 1766static u64 ohci_get_bus_time(struct fw_card *card) 1767{ 1768 struct fw_ohci *ohci = fw_ohci(card); 1769 u32 cycle_time; 1770 u64 bus_time; 1771 1772 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1773 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time; 1774 1775 return bus_time; 1776} 1777 1778static void copy_iso_headers(struct iso_context *ctx, void *p) 1779{ 1780 int i = ctx->header_length; 1781 1782 if (i + ctx->base.header_size > PAGE_SIZE) 1783 return; 1784 1785 /* 1786 * The iso header is byteswapped to little endian by 1787 * the controller, but the remaining header quadlets 1788 * are big endian. We want to present all the headers 1789 * as big endian, so we have to swap the first quadlet. 1790 */ 1791 if (ctx->base.header_size > 0) 1792 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); 1793 if (ctx->base.header_size > 4) 1794 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); 1795 if (ctx->base.header_size > 8) 1796 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); 1797 ctx->header_length += ctx->base.header_size; 1798} 1799 1800static int handle_ir_dualbuffer_packet(struct context *context, 1801 struct descriptor *d, 1802 struct descriptor *last) 1803{ 1804 struct iso_context *ctx = 1805 container_of(context, struct iso_context, context); 1806 struct db_descriptor *db = (struct db_descriptor *) d; 1807 __le32 *ir_header; 1808 size_t header_length; 1809 void *p, *end; 1810 1811 if (db->first_res_count != 0 && db->second_res_count != 0) { 1812 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { 1813 /* This descriptor isn't done yet, stop iteration. */ 1814 return 0; 1815 } 1816 ctx->excess_bytes -= le16_to_cpu(db->second_req_count); 1817 } 1818 1819 header_length = le16_to_cpu(db->first_req_count) - 1820 le16_to_cpu(db->first_res_count); 1821 1822 p = db + 1; 1823 end = p + header_length; 1824 while (p < end) { 1825 copy_iso_headers(ctx, p); 1826 ctx->excess_bytes += 1827 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; 1828 p += max(ctx->base.header_size, (size_t)8); 1829 } 1830 1831 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - 1832 le16_to_cpu(db->second_res_count); 1833 1834 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) { 1835 ir_header = (__le32 *) (db + 1); 1836 ctx->base.callback(&ctx->base, 1837 le32_to_cpu(ir_header[0]) & 0xffff, 1838 ctx->header_length, ctx->header, 1839 ctx->base.callback_data); 1840 ctx->header_length = 0; 1841 } 1842 1843 return 1; 1844} 1845 1846static int handle_ir_packet_per_buffer(struct context *context, 1847 struct descriptor *d, 1848 struct descriptor *last) 1849{ 1850 struct iso_context *ctx = 1851 container_of(context, struct iso_context, context); 1852 struct descriptor *pd; 1853 __le32 *ir_header; 1854 void *p; 1855 1856 for (pd = d; pd <= last; pd++) { 1857 if (pd->transfer_status) 1858 break; 1859 } 1860 if (pd > last) 1861 /* Descriptor(s) not done yet, stop iteration */ 1862 return 0; 1863 1864 p = last + 1; 1865 copy_iso_headers(ctx, p); 1866 1867 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 1868 ir_header = (__le32 *) p; 1869 ctx->base.callback(&ctx->base, 1870 le32_to_cpu(ir_header[0]) & 0xffff, 1871 ctx->header_length, ctx->header, 1872 ctx->base.callback_data); 1873 ctx->header_length = 0; 1874 } 1875 1876 return 1; 1877} 1878 1879static int handle_it_packet(struct context *context, 1880 struct descriptor *d, 1881 struct descriptor *last) 1882{ 1883 struct iso_context *ctx = 1884 container_of(context, struct iso_context, context); 1885 1886 if (last->transfer_status == 0) 1887 /* This descriptor isn't done yet, stop iteration. */ 1888 return 0; 1889 1890 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) 1891 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 1892 0, NULL, ctx->base.callback_data); 1893 1894 return 1; 1895} 1896 1897static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 1898 int type, int channel, size_t header_size) 1899{ 1900 struct fw_ohci *ohci = fw_ohci(card); 1901 struct iso_context *ctx, *list; 1902 descriptor_callback_t callback; 1903 u64 *channels, dont_care = ~0ULL; 1904 u32 *mask, regs; 1905 unsigned long flags; 1906 int index, ret = -ENOMEM; 1907 1908 if (type == FW_ISO_CONTEXT_TRANSMIT) { 1909 channels = &dont_care; 1910 mask = &ohci->it_context_mask; 1911 list = ohci->it_context_list; 1912 callback = handle_it_packet; 1913 } else { 1914 channels = &ohci->ir_context_channels; 1915 mask = &ohci->ir_context_mask; 1916 list = ohci->ir_context_list; 1917 if (ohci->use_dualbuffer) 1918 callback = handle_ir_dualbuffer_packet; 1919 else 1920 callback = handle_ir_packet_per_buffer; 1921 } 1922 1923 spin_lock_irqsave(&ohci->lock, flags); 1924 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 1925 if (index >= 0) { 1926 *channels &= ~(1ULL << channel); 1927 *mask &= ~(1 << index); 1928 } 1929 spin_unlock_irqrestore(&ohci->lock, flags); 1930 1931 if (index < 0) 1932 return ERR_PTR(-EBUSY); 1933 1934 if (type == FW_ISO_CONTEXT_TRANSMIT) 1935 regs = OHCI1394_IsoXmitContextBase(index); 1936 else 1937 regs = OHCI1394_IsoRcvContextBase(index); 1938 1939 ctx = &list[index]; 1940 memset(ctx, 0, sizeof(*ctx)); 1941 ctx->header_length = 0; 1942 ctx->header = (void *) __get_free_page(GFP_KERNEL); 1943 if (ctx->header == NULL) 1944 goto out; 1945 1946 ret = context_init(&ctx->context, ohci, regs, callback); 1947 if (ret < 0) 1948 goto out_with_header; 1949 1950 return &ctx->base; 1951 1952 out_with_header: 1953 free_page((unsigned long)ctx->header); 1954 out: 1955 spin_lock_irqsave(&ohci->lock, flags); 1956 *mask |= 1 << index; 1957 spin_unlock_irqrestore(&ohci->lock, flags); 1958 1959 return ERR_PTR(ret); 1960} 1961 1962static int ohci_start_iso(struct fw_iso_context *base, 1963 s32 cycle, u32 sync, u32 tags) 1964{ 1965 struct iso_context *ctx = container_of(base, struct iso_context, base); 1966 struct fw_ohci *ohci = ctx->context.ohci; 1967 u32 control, match; 1968 int index; 1969 1970 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 1971 index = ctx - ohci->it_context_list; 1972 match = 0; 1973 if (cycle >= 0) 1974 match = IT_CONTEXT_CYCLE_MATCH_ENABLE | 1975 (cycle & 0x7fff) << 16; 1976 1977 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 1978 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 1979 context_run(&ctx->context, match); 1980 } else { 1981 index = ctx - ohci->ir_context_list; 1982 control = IR_CONTEXT_ISOCH_HEADER; 1983 if (ohci->use_dualbuffer) 1984 control |= IR_CONTEXT_DUAL_BUFFER_MODE; 1985 match = (tags << 28) | (sync << 8) | ctx->base.channel; 1986 if (cycle >= 0) { 1987 match |= (cycle & 0x07fff) << 12; 1988 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; 1989 } 1990 1991 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 1992 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 1993 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 1994 context_run(&ctx->context, control); 1995 } 1996 1997 return 0; 1998} 1999 2000static int ohci_stop_iso(struct fw_iso_context *base) 2001{ 2002 struct fw_ohci *ohci = fw_ohci(base->card); 2003 struct iso_context *ctx = container_of(base, struct iso_context, base); 2004 int index; 2005 2006 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2007 index = ctx - ohci->it_context_list; 2008 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 2009 } else { 2010 index = ctx - ohci->ir_context_list; 2011 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 2012 } 2013 flush_writes(ohci); 2014 context_stop(&ctx->context); 2015 2016 return 0; 2017} 2018 2019static void ohci_free_iso_context(struct fw_iso_context *base) 2020{ 2021 struct fw_ohci *ohci = fw_ohci(base->card); 2022 struct iso_context *ctx = container_of(base, struct iso_context, base); 2023 unsigned long flags; 2024 int index; 2025 2026 ohci_stop_iso(base); 2027 context_release(&ctx->context); 2028 free_page((unsigned long)ctx->header); 2029 2030 spin_lock_irqsave(&ohci->lock, flags); 2031 2032 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2033 index = ctx - ohci->it_context_list; 2034 ohci->it_context_mask |= 1 << index; 2035 } else { 2036 index = ctx - ohci->ir_context_list; 2037 ohci->ir_context_mask |= 1 << index; 2038 ohci->ir_context_channels |= 1ULL << base->channel; 2039 } 2040 2041 spin_unlock_irqrestore(&ohci->lock, flags); 2042} 2043 2044static int ohci_queue_iso_transmit(struct fw_iso_context *base, 2045 struct fw_iso_packet *packet, 2046 struct fw_iso_buffer *buffer, 2047 unsigned long payload) 2048{ 2049 struct iso_context *ctx = container_of(base, struct iso_context, base); 2050 struct descriptor *d, *last, *pd; 2051 struct fw_iso_packet *p; 2052 __le32 *header; 2053 dma_addr_t d_bus, page_bus; 2054 u32 z, header_z, payload_z, irq; 2055 u32 payload_index, payload_end_index, next_page_index; 2056 int page, end_page, i, length, offset; 2057 2058 /* 2059 * FIXME: Cycle lost behavior should be configurable: lose 2060 * packet, retransmit or terminate.. 2061 */ 2062 2063 p = packet; 2064 payload_index = payload; 2065 2066 if (p->skip) 2067 z = 1; 2068 else 2069 z = 2; 2070 if (p->header_length > 0) 2071 z++; 2072 2073 /* Determine the first page the payload isn't contained in. */ 2074 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; 2075 if (p->payload_length > 0) 2076 payload_z = end_page - (payload_index >> PAGE_SHIFT); 2077 else 2078 payload_z = 0; 2079 2080 z += payload_z; 2081 2082 /* Get header size in number of descriptors. */ 2083 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); 2084 2085 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); 2086 if (d == NULL) 2087 return -ENOMEM; 2088 2089 if (!p->skip) { 2090 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 2091 d[0].req_count = cpu_to_le16(8); 2092 2093 header = (__le32 *) &d[1]; 2094 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 2095 IT_HEADER_TAG(p->tag) | 2096 IT_HEADER_TCODE(TCODE_STREAM_DATA) | 2097 IT_HEADER_CHANNEL(ctx->base.channel) | 2098 IT_HEADER_SPEED(ctx->base.speed)); 2099 header[1] = 2100 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + 2101 p->payload_length)); 2102 } 2103 2104 if (p->header_length > 0) { 2105 d[2].req_count = cpu_to_le16(p->header_length); 2106 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); 2107 memcpy(&d[z], p->header, p->header_length); 2108 } 2109 2110 pd = d + z - payload_z; 2111 payload_end_index = payload_index + p->payload_length; 2112 for (i = 0; i < payload_z; i++) { 2113 page = payload_index >> PAGE_SHIFT; 2114 offset = payload_index & ~PAGE_MASK; 2115 next_page_index = (page + 1) << PAGE_SHIFT; 2116 length = 2117 min(next_page_index, payload_end_index) - payload_index; 2118 pd[i].req_count = cpu_to_le16(length); 2119 2120 page_bus = page_private(buffer->pages[page]); 2121 pd[i].data_address = cpu_to_le32(page_bus + offset); 2122 2123 payload_index += length; 2124 } 2125 2126 if (p->interrupt) 2127 irq = DESCRIPTOR_IRQ_ALWAYS; 2128 else 2129 irq = DESCRIPTOR_NO_IRQ; 2130 2131 last = z == 2 ? d : d + z - 1; 2132 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 2133 DESCRIPTOR_STATUS | 2134 DESCRIPTOR_BRANCH_ALWAYS | 2135 irq); 2136 2137 context_append(&ctx->context, d, z, header_z); 2138 2139 return 0; 2140} 2141 2142static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, 2143 struct fw_iso_packet *packet, 2144 struct fw_iso_buffer *buffer, 2145 unsigned long payload) 2146{ 2147 struct iso_context *ctx = container_of(base, struct iso_context, base); 2148 struct db_descriptor *db = NULL; 2149 struct descriptor *d; 2150 struct fw_iso_packet *p; 2151 dma_addr_t d_bus, page_bus; 2152 u32 z, header_z, length, rest; 2153 int page, offset, packet_count, header_size; 2154 2155 /* 2156 * FIXME: Cycle lost behavior should be configurable: lose 2157 * packet, retransmit or terminate.. 2158 */ 2159 2160 p = packet; 2161 z = 2; 2162 2163 /* 2164 * The OHCI controller puts the isochronous header and trailer in the 2165 * buffer, so we need at least 8 bytes. 2166 */ 2167 packet_count = p->header_length / ctx->base.header_size; 2168 header_size = packet_count * max(ctx->base.header_size, (size_t)8); 2169 2170 /* Get header size in number of descriptors. */ 2171 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2172 page = payload >> PAGE_SHIFT; 2173 offset = payload & ~PAGE_MASK; 2174 rest = p->payload_length; 2175 2176 /* FIXME: make packet-per-buffer/dual-buffer a context option */ 2177 while (rest > 0) { 2178 d = context_get_descriptors(&ctx->context, 2179 z + header_z, &d_bus); 2180 if (d == NULL) 2181 return -ENOMEM; 2182 2183 db = (struct db_descriptor *) d; 2184 db->control = cpu_to_le16(DESCRIPTOR_STATUS | 2185 DESCRIPTOR_BRANCH_ALWAYS); 2186 db->first_size = 2187 cpu_to_le16(max(ctx->base.header_size, (size_t)8)); 2188 if (p->skip && rest == p->payload_length) { 2189 db->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2190 db->first_req_count = db->first_size; 2191 } else { 2192 db->first_req_count = cpu_to_le16(header_size); 2193 } 2194 db->first_res_count = db->first_req_count; 2195 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db)); 2196 2197 if (p->skip && rest == p->payload_length) 2198 length = 4; 2199 else if (offset + rest < PAGE_SIZE) 2200 length = rest; 2201 else 2202 length = PAGE_SIZE - offset; 2203 2204 db->second_req_count = cpu_to_le16(length); 2205 db->second_res_count = db->second_req_count; 2206 page_bus = page_private(buffer->pages[page]); 2207 db->second_buffer = cpu_to_le32(page_bus + offset); 2208 2209 if (p->interrupt && length == rest) 2210 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2211 2212 context_append(&ctx->context, d, z, header_z); 2213 offset = (offset + length) & ~PAGE_MASK; 2214 rest -= length; 2215 if (offset == 0) 2216 page++; 2217 } 2218 2219 return 0; 2220} 2221 2222static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2223 struct fw_iso_packet *packet, 2224 struct fw_iso_buffer *buffer, 2225 unsigned long payload) 2226{ 2227 struct iso_context *ctx = container_of(base, struct iso_context, base); 2228 struct descriptor *d = NULL, *pd = NULL; 2229 struct fw_iso_packet *p = packet; 2230 dma_addr_t d_bus, page_bus; 2231 u32 z, header_z, rest; 2232 int i, j, length; 2233 int page, offset, packet_count, header_size, payload_per_buffer; 2234 2235 /* 2236 * The OHCI controller puts the isochronous header and trailer in the 2237 * buffer, so we need at least 8 bytes. 2238 */ 2239 packet_count = p->header_length / ctx->base.header_size; 2240 header_size = max(ctx->base.header_size, (size_t)8); 2241 2242 /* Get header size in number of descriptors. */ 2243 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2244 page = payload >> PAGE_SHIFT; 2245 offset = payload & ~PAGE_MASK; 2246 payload_per_buffer = p->payload_length / packet_count; 2247 2248 for (i = 0; i < packet_count; i++) { 2249 /* d points to the header descriptor */ 2250 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; 2251 d = context_get_descriptors(&ctx->context, 2252 z + header_z, &d_bus); 2253 if (d == NULL) 2254 return -ENOMEM; 2255 2256 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 2257 DESCRIPTOR_INPUT_MORE); 2258 if (p->skip && i == 0) 2259 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2260 d->req_count = cpu_to_le16(header_size); 2261 d->res_count = d->req_count; 2262 d->transfer_status = 0; 2263 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 2264 2265 rest = payload_per_buffer; 2266 for (j = 1; j < z; j++) { 2267 pd = d + j; 2268 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2269 DESCRIPTOR_INPUT_MORE); 2270 2271 if (offset + rest < PAGE_SIZE) 2272 length = rest; 2273 else 2274 length = PAGE_SIZE - offset; 2275 pd->req_count = cpu_to_le16(length); 2276 pd->res_count = pd->req_count; 2277 pd->transfer_status = 0; 2278 2279 page_bus = page_private(buffer->pages[page]); 2280 pd->data_address = cpu_to_le32(page_bus + offset); 2281 2282 offset = (offset + length) & ~PAGE_MASK; 2283 rest -= length; 2284 if (offset == 0) 2285 page++; 2286 } 2287 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2288 DESCRIPTOR_INPUT_LAST | 2289 DESCRIPTOR_BRANCH_ALWAYS); 2290 if (p->interrupt && i == packet_count - 1) 2291 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2292 2293 context_append(&ctx->context, d, z, header_z); 2294 } 2295 2296 return 0; 2297} 2298 2299static int ohci_queue_iso(struct fw_iso_context *base, 2300 struct fw_iso_packet *packet, 2301 struct fw_iso_buffer *buffer, 2302 unsigned long payload) 2303{ 2304 struct iso_context *ctx = container_of(base, struct iso_context, base); 2305 unsigned long flags; 2306 int ret; 2307 2308 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2309 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2310 ret = ohci_queue_iso_transmit(base, packet, buffer, payload); 2311 else if (ctx->context.ohci->use_dualbuffer) 2312 ret = ohci_queue_iso_receive_dualbuffer(base, packet, 2313 buffer, payload); 2314 else 2315 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2316 buffer, payload); 2317 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2318 2319 return ret; 2320} 2321 2322static const struct fw_card_driver ohci_driver = { 2323 .enable = ohci_enable, 2324 .update_phy_reg = ohci_update_phy_reg, 2325 .set_config_rom = ohci_set_config_rom, 2326 .send_request = ohci_send_request, 2327 .send_response = ohci_send_response, 2328 .cancel_packet = ohci_cancel_packet, 2329 .enable_phys_dma = ohci_enable_phys_dma, 2330 .get_bus_time = ohci_get_bus_time, 2331 2332 .allocate_iso_context = ohci_allocate_iso_context, 2333 .free_iso_context = ohci_free_iso_context, 2334 .queue_iso = ohci_queue_iso, 2335 .start_iso = ohci_start_iso, 2336 .stop_iso = ohci_stop_iso, 2337}; 2338 2339#ifdef CONFIG_PPC_PMAC 2340static void ohci_pmac_on(struct pci_dev *dev) 2341{ 2342 if (machine_is(powermac)) { 2343 struct device_node *ofn = pci_device_to_OF_node(dev); 2344 2345 if (ofn) { 2346 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); 2347 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); 2348 } 2349 } 2350} 2351 2352static void ohci_pmac_off(struct pci_dev *dev) 2353{ 2354 if (machine_is(powermac)) { 2355 struct device_node *ofn = pci_device_to_OF_node(dev); 2356 2357 if (ofn) { 2358 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); 2359 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); 2360 } 2361 } 2362} 2363#else 2364#define ohci_pmac_on(dev) 2365#define ohci_pmac_off(dev) 2366#endif /* CONFIG_PPC_PMAC */ 2367 2368static int __devinit pci_probe(struct pci_dev *dev, 2369 const struct pci_device_id *ent) 2370{ 2371 struct fw_ohci *ohci; 2372 u32 bus_options, max_receive, link_speed, version; 2373 u64 guid; 2374 int err; 2375 size_t size; 2376 2377 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 2378 if (ohci == NULL) { 2379 err = -ENOMEM; 2380 goto fail; 2381 } 2382 2383 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 2384 2385 ohci_pmac_on(dev); 2386 2387 err = pci_enable_device(dev); 2388 if (err) { 2389 fw_error("Failed to enable OHCI hardware\n"); 2390 goto fail_free; 2391 } 2392 2393 pci_set_master(dev); 2394 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); 2395 pci_set_drvdata(dev, ohci); 2396 2397 spin_lock_init(&ohci->lock); 2398 2399 tasklet_init(&ohci->bus_reset_tasklet, 2400 bus_reset_tasklet, (unsigned long)ohci); 2401 2402 err = pci_request_region(dev, 0, ohci_driver_name); 2403 if (err) { 2404 fw_error("MMIO resource unavailable\n"); 2405 goto fail_disable; 2406 } 2407 2408 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); 2409 if (ohci->registers == NULL) { 2410 fw_error("Failed to remap registers\n"); 2411 err = -ENXIO; 2412 goto fail_iomem; 2413 } 2414 2415 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2416 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; 2417 2418/* x86-32 currently doesn't use highmem for dma_alloc_coherent */ 2419#if !defined(CONFIG_X86_32) 2420 /* dual-buffer mode is broken with descriptor addresses above 2G */ 2421 if (dev->vendor == PCI_VENDOR_ID_TI && 2422 dev->device == PCI_DEVICE_ID_TI_TSB43AB22) 2423 ohci->use_dualbuffer = false; 2424#endif 2425 2426#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 2427 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && 2428 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW; 2429#endif 2430 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI; 2431 2432 ar_context_init(&ohci->ar_request_ctx, ohci, 2433 OHCI1394_AsReqRcvContextControlSet); 2434 2435 ar_context_init(&ohci->ar_response_ctx, ohci, 2436 OHCI1394_AsRspRcvContextControlSet); 2437 2438 context_init(&ohci->at_request_ctx, ohci, 2439 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 2440 2441 context_init(&ohci->at_response_ctx, ohci, 2442 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 2443 2444 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 2445 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 2446 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 2447 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask); 2448 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2449 2450 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2451 ohci->ir_context_channels = ~0ULL; 2452 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 2453 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2454 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); 2455 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 2456 2457 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 2458 err = -ENOMEM; 2459 goto fail_contexts; 2460 } 2461 2462 /* self-id dma buffer allocation */ 2463 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, 2464 SELF_ID_BUF_SIZE, 2465 &ohci->self_id_bus, 2466 GFP_KERNEL); 2467 if (ohci->self_id_cpu == NULL) { 2468 err = -ENOMEM; 2469 goto fail_contexts; 2470 } 2471 2472 bus_options = reg_read(ohci, OHCI1394_BusOptions); 2473 max_receive = (bus_options >> 12) & 0xf; 2474 link_speed = bus_options & 0x7; 2475 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | 2476 reg_read(ohci, OHCI1394_GUIDLo); 2477 2478 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 2479 if (err) 2480 goto fail_self_id; 2481 2482 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2483 dev_name(&dev->dev), version >> 16, version & 0xff); 2484 2485 return 0; 2486 2487 fail_self_id: 2488 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, 2489 ohci->self_id_cpu, ohci->self_id_bus); 2490 fail_contexts: 2491 kfree(ohci->ir_context_list); 2492 kfree(ohci->it_context_list); 2493 context_release(&ohci->at_response_ctx); 2494 context_release(&ohci->at_request_ctx); 2495 ar_context_release(&ohci->ar_response_ctx); 2496 ar_context_release(&ohci->ar_request_ctx); 2497 pci_iounmap(dev, ohci->registers); 2498 fail_iomem: 2499 pci_release_region(dev, 0); 2500 fail_disable: 2501 pci_disable_device(dev); 2502 fail_free: 2503 kfree(&ohci->card); 2504 ohci_pmac_off(dev); 2505 fail: 2506 if (err == -ENOMEM) 2507 fw_error("Out of memory\n"); 2508 2509 return err; 2510} 2511 2512static void pci_remove(struct pci_dev *dev) 2513{ 2514 struct fw_ohci *ohci; 2515 2516 ohci = pci_get_drvdata(dev); 2517 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 2518 flush_writes(ohci); 2519 fw_core_remove_card(&ohci->card); 2520 2521 /* 2522 * FIXME: Fail all pending packets here, now that the upper 2523 * layers can't queue any more. 2524 */ 2525 2526 software_reset(ohci); 2527 free_irq(dev->irq, ohci); 2528 2529 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) 2530 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2531 ohci->next_config_rom, ohci->next_config_rom_bus); 2532 if (ohci->config_rom) 2533 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2534 ohci->config_rom, ohci->config_rom_bus); 2535 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, 2536 ohci->self_id_cpu, ohci->self_id_bus); 2537 ar_context_release(&ohci->ar_request_ctx); 2538 ar_context_release(&ohci->ar_response_ctx); 2539 context_release(&ohci->at_request_ctx); 2540 context_release(&ohci->at_response_ctx); 2541 kfree(ohci->it_context_list); 2542 kfree(ohci->ir_context_list); 2543 pci_iounmap(dev, ohci->registers); 2544 pci_release_region(dev, 0); 2545 pci_disable_device(dev); 2546 kfree(&ohci->card); 2547 ohci_pmac_off(dev); 2548 2549 fw_notify("Removed fw-ohci device.\n"); 2550} 2551 2552#ifdef CONFIG_PM 2553static int pci_suspend(struct pci_dev *dev, pm_message_t state) 2554{ 2555 struct fw_ohci *ohci = pci_get_drvdata(dev); 2556 int err; 2557 2558 software_reset(ohci); 2559 free_irq(dev->irq, ohci); 2560 err = pci_save_state(dev); 2561 if (err) { 2562 fw_error("pci_save_state failed\n"); 2563 return err; 2564 } 2565 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 2566 if (err) 2567 fw_error("pci_set_power_state failed with %d\n", err); 2568 ohci_pmac_off(dev); 2569 2570 return 0; 2571} 2572 2573static int pci_resume(struct pci_dev *dev) 2574{ 2575 struct fw_ohci *ohci = pci_get_drvdata(dev); 2576 int err; 2577 2578 ohci_pmac_on(dev); 2579 pci_set_power_state(dev, PCI_D0); 2580 pci_restore_state(dev); 2581 err = pci_enable_device(dev); 2582 if (err) { 2583 fw_error("pci_enable_device failed\n"); 2584 return err; 2585 } 2586 2587 return ohci_enable(&ohci->card, NULL, 0); 2588} 2589#endif 2590 2591static struct pci_device_id pci_table[] = { 2592 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 2593 { } 2594}; 2595 2596MODULE_DEVICE_TABLE(pci, pci_table); 2597 2598static struct pci_driver fw_ohci_pci_driver = { 2599 .name = ohci_driver_name, 2600 .id_table = pci_table, 2601 .probe = pci_probe, 2602 .remove = pci_remove, 2603#ifdef CONFIG_PM 2604 .resume = pci_resume, 2605 .suspend = pci_suspend, 2606#endif 2607}; 2608 2609MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 2610MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); 2611MODULE_LICENSE("GPL"); 2612 2613/* Provide a module alias so root-on-sbp2 initrds don't break. */ 2614#ifndef CONFIG_IEEE1394_OHCI1394_MODULE 2615MODULE_ALIAS("ohci1394"); 2616#endif 2617 2618static int __init fw_ohci_init(void) 2619{ 2620 return pci_register_driver(&fw_ohci_pci_driver); 2621} 2622 2623static void __exit fw_ohci_cleanup(void) 2624{ 2625 pci_unregister_driver(&fw_ohci_pci_driver); 2626} 2627 2628module_init(fw_ohci_init); 2629module_exit(fw_ohci_cleanup);