Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Driver for OHCI 1394 controllers
4 *
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8#include <linux/bitops.h>
9#include <linux/bug.h>
10#include <linux/compiler.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <linux/dma-mapping.h>
14#include <linux/firewire.h>
15#include <linux/firewire-constants.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/mutex.h>
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/time.h>
31#include <linux/vmalloc.h>
32#include <linux/workqueue.h>
33
34#include <asm/byteorder.h>
35#include <asm/page.h>
36
37#ifdef CONFIG_PPC_PMAC
38#include <asm/pmac_feature.h>
39#endif
40
41#include "core.h"
42#include "ohci.h"
43#include "packet-header-definitions.h"
44#include "phy-packet-definitions.h"
45
46#include <trace/events/firewire.h>
47
48static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/firewire_ohci.h>
52
53#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
54#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
55
56#define DESCRIPTOR_OUTPUT_MORE 0
57#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
58#define DESCRIPTOR_INPUT_MORE (2 << 12)
59#define DESCRIPTOR_INPUT_LAST (3 << 12)
60#define DESCRIPTOR_STATUS (1 << 11)
61#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
62#define DESCRIPTOR_PING (1 << 7)
63#define DESCRIPTOR_YY (1 << 6)
64#define DESCRIPTOR_NO_IRQ (0 << 4)
65#define DESCRIPTOR_IRQ_ERROR (1 << 4)
66#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
67#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
68#define DESCRIPTOR_WAIT (3 << 0)
69
70#define DESCRIPTOR_CMD (0xf << 12)
71
72struct descriptor {
73 __le16 req_count;
74 __le16 control;
75 __le32 data_address;
76 __le32 branch_address;
77 __le16 res_count;
78 __le16 transfer_status;
79} __aligned(16);
80
81#define CONTROL_SET(regs) (regs)
82#define CONTROL_CLEAR(regs) ((regs) + 4)
83#define COMMAND_PTR(regs) ((regs) + 12)
84#define CONTEXT_MATCH(regs) ((regs) + 16)
85
86#define AR_BUFFER_SIZE (32*1024)
87#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
88/* we need at least two pages for proper list management */
89#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
90
91#define MAX_ASYNC_PAYLOAD 4096
92#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
93#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
94
95struct ar_context {
96 struct fw_ohci *ohci;
97 struct page *pages[AR_BUFFERS];
98 void *buffer;
99 struct descriptor *descriptors;
100 dma_addr_t descriptors_bus;
101 void *pointer;
102 unsigned int last_buffer_index;
103 u32 regs;
104 struct work_struct work;
105};
106
107struct context;
108
109typedef int (*descriptor_callback_t)(struct context *ctx,
110 struct descriptor *d,
111 struct descriptor *last);
112
113/*
114 * A buffer that contains a block of DMA-able coherent memory used for
115 * storing a portion of a DMA descriptor program.
116 */
117struct descriptor_buffer {
118 struct list_head list;
119 dma_addr_t buffer_bus;
120 size_t buffer_size;
121 size_t used;
122 struct descriptor buffer[];
123};
124
125struct context {
126 struct fw_ohci *ohci;
127 u32 regs;
128 int total_allocation;
129 u32 current_bus;
130 bool running;
131
132 /*
133 * List of page-sized buffers for storing DMA descriptors.
134 * Head of list contains buffers in use and tail of list contains
135 * free buffers.
136 */
137 struct list_head buffer_list;
138
139 /*
140 * Pointer to a buffer inside buffer_list that contains the tail
141 * end of the current DMA program.
142 */
143 struct descriptor_buffer *buffer_tail;
144
145 /*
146 * The descriptor containing the branch address of the first
147 * descriptor that has not yet been filled by the device.
148 */
149 struct descriptor *last;
150
151 /*
152 * The last descriptor block in the DMA program. It contains the branch
153 * address that must be updated upon appending a new descriptor.
154 */
155 struct descriptor *prev;
156 int prev_z;
157
158 descriptor_callback_t callback;
159};
160
161struct at_context {
162 struct context context;
163 struct work_struct work;
164 bool flushing;
165};
166
167struct iso_context {
168 struct fw_iso_context base;
169 struct context context;
170 void *header;
171 size_t header_length;
172 unsigned long flushing_completions;
173 u32 mc_buffer_bus;
174 u16 mc_completed;
175 u16 last_timestamp;
176 u8 sync;
177 u8 tags;
178};
179
180#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
181
182struct fw_ohci {
183 struct fw_card card;
184
185 __iomem char *registers;
186 int node_id;
187 int generation;
188 int request_generation; /* for timestamping incoming requests */
189 unsigned quirks;
190 unsigned int pri_req_max;
191 u32 bus_time;
192 bool bus_time_running;
193 bool is_root;
194 bool csr_state_setclear_abdicate;
195 int n_ir;
196 int n_it;
197 /*
198 * Spinlock for accessing fw_ohci data. Never call out of
199 * this driver with this lock held.
200 */
201 spinlock_t lock;
202
203 struct mutex phy_reg_mutex;
204
205 void *misc_buffer;
206 dma_addr_t misc_buffer_bus;
207
208 struct ar_context ar_request_ctx;
209 struct ar_context ar_response_ctx;
210 struct at_context at_request_ctx;
211 struct at_context at_response_ctx;
212
213 u32 it_context_support;
214 u32 it_context_mask; /* unoccupied IT contexts */
215 struct iso_context *it_context_list;
216 u64 ir_context_channels; /* unoccupied channels */
217 u32 ir_context_support;
218 u32 ir_context_mask; /* unoccupied IR contexts */
219 struct iso_context *ir_context_list;
220 u64 mc_channels; /* channels in use by the multichannel IR context */
221 bool mc_allocated;
222
223 __be32 *config_rom;
224 dma_addr_t config_rom_bus;
225 __be32 *next_config_rom;
226 dma_addr_t next_config_rom_bus;
227 __be32 next_header;
228
229 __le32 *self_id;
230 dma_addr_t self_id_bus;
231
232 u32 self_id_buffer[512];
233};
234
235static inline struct fw_ohci *fw_ohci(struct fw_card *card)
236{
237 return container_of(card, struct fw_ohci, card);
238}
239
240#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
241#define IR_CONTEXT_BUFFER_FILL 0x80000000
242#define IR_CONTEXT_ISOCH_HEADER 0x40000000
243#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
244#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
245#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
246
247#define CONTEXT_RUN 0x8000
248#define CONTEXT_WAKE 0x1000
249#define CONTEXT_DEAD 0x0800
250#define CONTEXT_ACTIVE 0x0400
251
252#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
253#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
254#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
255
256#define OHCI1394_REGISTER_SIZE 0x800
257#define OHCI1394_PCI_HCI_Control 0x40
258#define SELF_ID_BUF_SIZE 0x800
259#define OHCI_VERSION_1_1 0x010010
260
261static char ohci_driver_name[] = KBUILD_MODNAME;
262
263#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
264#define PCI_DEVICE_ID_AGERE_FW643 0x5901
265#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
266#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
267#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
268#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
269#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
270#define PCI_DEVICE_ID_VIA_VT630X 0x3044
271#define PCI_REV_ID_VIA_VT6306 0x46
272#define PCI_DEVICE_ID_VIA_VT6315 0x3403
273
274#define QUIRK_CYCLE_TIMER 0x1
275#define QUIRK_RESET_PACKET 0x2
276#define QUIRK_BE_HEADERS 0x4
277#define QUIRK_NO_1394A 0x8
278#define QUIRK_NO_MSI 0x10
279#define QUIRK_TI_SLLZ059 0x20
280#define QUIRK_IR_WAKE 0x40
281
282// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
283// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
284// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
285// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
286// while it is probable due to detection of any type of PCIe error.
287#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
288
289#if IS_ENABLED(CONFIG_X86)
290
291static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
292{
293 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
294}
295
296#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
297
298static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
299{
300 const struct pci_dev *pcie_to_pci_bridge;
301
302 // Detect any type of AMD Ryzen machine.
303 if (!static_cpu_has(X86_FEATURE_ZEN))
304 return false;
305
306 // Detect VIA VT6306/6307/6308.
307 if (pdev->vendor != PCI_VENDOR_ID_VIA)
308 return false;
309 if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
310 return false;
311
312 // Detect Asmedia ASM1083/1085.
313 pcie_to_pci_bridge = pdev->bus->self;
314 if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
315 return false;
316 if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
317 return false;
318
319 return true;
320}
321
322#else
323#define has_reboot_by_cycle_timer_read_quirk(ohci) false
324#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
325#endif
326
327/* In case of multiple matches in ohci_quirks[], only the first one is used. */
328static const struct {
329 unsigned short vendor, device, revision, flags;
330} ohci_quirks[] = {
331 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
332 QUIRK_CYCLE_TIMER},
333
334 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
335 QUIRK_BE_HEADERS},
336
337 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
338 QUIRK_NO_MSI},
339
340 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
341 QUIRK_RESET_PACKET},
342
343 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
344 QUIRK_NO_MSI},
345
346 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
347 QUIRK_CYCLE_TIMER},
348
349 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
350 QUIRK_NO_MSI},
351
352 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
353 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
354
355 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
356 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
357
358 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
359 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
360
361 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
362 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
363
364 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
365 QUIRK_RESET_PACKET},
366
367 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
368 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
369
370 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
371 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
372
373 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
374 QUIRK_NO_MSI},
375
376 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
377 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
378};
379
380/* This overrides anything that was found in ohci_quirks[]. */
381static int param_quirks;
382module_param_named(quirks, param_quirks, int, 0644);
383MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
384 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
385 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
386 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
387 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
388 ", disable MSI = " __stringify(QUIRK_NO_MSI)
389 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
390 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
391 ")");
392
393static bool param_remote_dma;
394module_param_named(remote_dma, param_remote_dma, bool, 0444);
395MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
396
397static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
398{
399 writel(data, ohci->registers + offset);
400}
401
402static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
403{
404 return readl(ohci->registers + offset);
405}
406
407static inline void flush_writes(const struct fw_ohci *ohci)
408{
409 /* Do a dummy read to flush writes. */
410 reg_read(ohci, OHCI1394_Version);
411}
412
413/*
414 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
415 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
416 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
417 * directly. Exceptions are intrinsically serialized contexts like pci_probe.
418 */
419static int read_phy_reg(struct fw_ohci *ohci, int addr)
420{
421 u32 val;
422 int i;
423
424 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
425 for (i = 0; i < 3 + 100; i++) {
426 val = reg_read(ohci, OHCI1394_PhyControl);
427 if (!~val)
428 return -ENODEV; /* Card was ejected. */
429
430 if (val & OHCI1394_PhyControl_ReadDone)
431 return OHCI1394_PhyControl_ReadData(val);
432
433 /*
434 * Try a few times without waiting. Sleeping is necessary
435 * only when the link/PHY interface is busy.
436 */
437 if (i >= 3)
438 msleep(1);
439 }
440 ohci_err(ohci, "failed to read phy reg %d\n", addr);
441 dump_stack();
442
443 return -EBUSY;
444}
445
446static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
447{
448 int i;
449
450 reg_write(ohci, OHCI1394_PhyControl,
451 OHCI1394_PhyControl_Write(addr, val));
452 for (i = 0; i < 3 + 100; i++) {
453 val = reg_read(ohci, OHCI1394_PhyControl);
454 if (!~val)
455 return -ENODEV; /* Card was ejected. */
456
457 if (!(val & OHCI1394_PhyControl_WritePending))
458 return 0;
459
460 if (i >= 3)
461 msleep(1);
462 }
463 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
464 dump_stack();
465
466 return -EBUSY;
467}
468
469static int update_phy_reg(struct fw_ohci *ohci, int addr,
470 int clear_bits, int set_bits)
471{
472 int ret = read_phy_reg(ohci, addr);
473 if (ret < 0)
474 return ret;
475
476 /*
477 * The interrupt status bits are cleared by writing a one bit.
478 * Avoid clearing them unless explicitly requested in set_bits.
479 */
480 if (addr == 5)
481 clear_bits |= PHY_INT_STATUS_BITS;
482
483 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
484}
485
486static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
487{
488 int ret;
489
490 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
491 if (ret < 0)
492 return ret;
493
494 return read_phy_reg(ohci, addr);
495}
496
497static int ohci_read_phy_reg(struct fw_card *card, int addr)
498{
499 struct fw_ohci *ohci = fw_ohci(card);
500
501 guard(mutex)(&ohci->phy_reg_mutex);
502
503 return read_phy_reg(ohci, addr);
504}
505
506static int ohci_update_phy_reg(struct fw_card *card, int addr,
507 int clear_bits, int set_bits)
508{
509 struct fw_ohci *ohci = fw_ohci(card);
510
511 guard(mutex)(&ohci->phy_reg_mutex);
512
513 return update_phy_reg(ohci, addr, clear_bits, set_bits);
514}
515
516static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
517{
518 return page_private(ctx->pages[i]);
519}
520
521static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
522{
523 struct descriptor *d;
524
525 d = &ctx->descriptors[index];
526 d->branch_address &= cpu_to_le32(~0xf);
527 d->res_count = cpu_to_le16(PAGE_SIZE);
528 d->transfer_status = 0;
529
530 wmb(); /* finish init of new descriptors before branch_address update */
531 d = &ctx->descriptors[ctx->last_buffer_index];
532 d->branch_address |= cpu_to_le32(1);
533
534 ctx->last_buffer_index = index;
535
536 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
537}
538
539static void ar_context_release(struct ar_context *ctx)
540{
541 struct device *dev = ctx->ohci->card.device;
542 unsigned int i;
543
544 if (!ctx->buffer)
545 return;
546
547 vunmap(ctx->buffer);
548
549 for (i = 0; i < AR_BUFFERS; i++) {
550 if (ctx->pages[i])
551 dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
552 ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
553 }
554}
555
556static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
557{
558 struct fw_ohci *ohci = ctx->ohci;
559
560 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
561 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
562 flush_writes(ohci);
563
564 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
565 }
566 /* FIXME: restart? */
567}
568
569static inline unsigned int ar_next_buffer_index(unsigned int index)
570{
571 return (index + 1) % AR_BUFFERS;
572}
573
574static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
575{
576 return ar_next_buffer_index(ctx->last_buffer_index);
577}
578
579/*
580 * We search for the buffer that contains the last AR packet DMA data written
581 * by the controller.
582 */
583static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
584 unsigned int *buffer_offset)
585{
586 unsigned int i, next_i, last = ctx->last_buffer_index;
587 __le16 res_count, next_res_count;
588
589 i = ar_first_buffer_index(ctx);
590 res_count = READ_ONCE(ctx->descriptors[i].res_count);
591
592 /* A buffer that is not yet completely filled must be the last one. */
593 while (i != last && res_count == 0) {
594
595 /* Peek at the next descriptor. */
596 next_i = ar_next_buffer_index(i);
597 rmb(); /* read descriptors in order */
598 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
599 /*
600 * If the next descriptor is still empty, we must stop at this
601 * descriptor.
602 */
603 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
604 /*
605 * The exception is when the DMA data for one packet is
606 * split over three buffers; in this case, the middle
607 * buffer's descriptor might be never updated by the
608 * controller and look still empty, and we have to peek
609 * at the third one.
610 */
611 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
612 next_i = ar_next_buffer_index(next_i);
613 rmb();
614 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
615 if (next_res_count != cpu_to_le16(PAGE_SIZE))
616 goto next_buffer_is_active;
617 }
618
619 break;
620 }
621
622next_buffer_is_active:
623 i = next_i;
624 res_count = next_res_count;
625 }
626
627 rmb(); /* read res_count before the DMA data */
628
629 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
630 if (*buffer_offset > PAGE_SIZE) {
631 *buffer_offset = 0;
632 ar_context_abort(ctx, "corrupted descriptor");
633 }
634
635 return i;
636}
637
638static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
639 unsigned int end_buffer_index,
640 unsigned int end_buffer_offset)
641{
642 unsigned int i;
643
644 i = ar_first_buffer_index(ctx);
645 while (i != end_buffer_index) {
646 dma_sync_single_for_cpu(ctx->ohci->card.device,
647 ar_buffer_bus(ctx, i),
648 PAGE_SIZE, DMA_FROM_DEVICE);
649 i = ar_next_buffer_index(i);
650 }
651 if (end_buffer_offset > 0)
652 dma_sync_single_for_cpu(ctx->ohci->card.device,
653 ar_buffer_bus(ctx, i),
654 end_buffer_offset, DMA_FROM_DEVICE);
655}
656
657#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
658static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk)
659{
660 return has_be_header_quirk ? (__force __u32)value : le32_to_cpu(value);
661}
662
663static bool has_be_header_quirk(const struct fw_ohci *ohci)
664{
665 return !!(ohci->quirks & QUIRK_BE_HEADERS);
666}
667#else
668static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk __maybe_unused)
669{
670 return le32_to_cpu(value);
671}
672
673static bool has_be_header_quirk(const struct fw_ohci *ohci)
674{
675 return false;
676}
677#endif
678
679static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
680{
681 struct fw_ohci *ohci = ctx->ohci;
682 struct fw_packet p;
683 u32 status, length, tcode;
684 int evt;
685
686 p.header[0] = cond_le32_to_cpu(buffer[0], has_be_header_quirk(ohci));
687 p.header[1] = cond_le32_to_cpu(buffer[1], has_be_header_quirk(ohci));
688 p.header[2] = cond_le32_to_cpu(buffer[2], has_be_header_quirk(ohci));
689
690 tcode = async_header_get_tcode(p.header);
691 switch (tcode) {
692 case TCODE_WRITE_QUADLET_REQUEST:
693 case TCODE_READ_QUADLET_RESPONSE:
694 p.header[3] = (__force __u32) buffer[3];
695 p.header_length = 16;
696 p.payload_length = 0;
697 break;
698
699 case TCODE_READ_BLOCK_REQUEST :
700 p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
701 p.header_length = 16;
702 p.payload_length = 0;
703 break;
704
705 case TCODE_WRITE_BLOCK_REQUEST:
706 case TCODE_READ_BLOCK_RESPONSE:
707 case TCODE_LOCK_REQUEST:
708 case TCODE_LOCK_RESPONSE:
709 p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
710 p.header_length = 16;
711 p.payload_length = async_header_get_data_length(p.header);
712 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
713 ar_context_abort(ctx, "invalid packet length");
714 return NULL;
715 }
716 break;
717
718 case TCODE_WRITE_RESPONSE:
719 case TCODE_READ_QUADLET_REQUEST:
720 case TCODE_LINK_INTERNAL:
721 p.header_length = 12;
722 p.payload_length = 0;
723 break;
724
725 default:
726 ar_context_abort(ctx, "invalid tcode");
727 return NULL;
728 }
729
730 p.payload = (void *) buffer + p.header_length;
731
732 /* FIXME: What to do about evt_* errors? */
733 length = (p.header_length + p.payload_length + 3) / 4;
734 status = cond_le32_to_cpu(buffer[length], has_be_header_quirk(ohci));
735 evt = (status >> 16) & 0x1f;
736
737 p.ack = evt - 16;
738 p.speed = (status >> 21) & 0x7;
739 p.timestamp = status & 0xffff;
740 p.generation = ohci->request_generation;
741
742 /*
743 * Several controllers, notably from NEC and VIA, forget to
744 * write ack_complete status at PHY packet reception.
745 */
746 if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL)
747 p.ack = ACK_COMPLETE;
748
749 /*
750 * The OHCI bus reset handler synthesizes a PHY packet with
751 * the new generation number when a bus reset happens (see
752 * section 8.4.2.3). This helps us determine when a request
753 * was received and make sure we send the response in the same
754 * generation. We only need this for requests; for responses
755 * we use the unique tlabel for finding the matching
756 * request.
757 *
758 * Alas some chips sometimes emit bus reset packets with a
759 * wrong generation. We set the correct generation for these
760 * at a slightly incorrect time (in handle_selfid_complete_event).
761 */
762 if (evt == OHCI1394_evt_bus_reset) {
763 if (!(ohci->quirks & QUIRK_RESET_PACKET))
764 ohci->request_generation = (p.header[2] >> 16) & 0xff;
765 } else if (ctx == &ohci->ar_request_ctx) {
766 fw_core_handle_request(&ohci->card, &p);
767 } else {
768 fw_core_handle_response(&ohci->card, &p);
769 }
770
771 return buffer + length + 1;
772}
773
774static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
775{
776 void *next;
777
778 while (p < end) {
779 next = handle_ar_packet(ctx, p);
780 if (!next)
781 return p;
782 p = next;
783 }
784
785 return p;
786}
787
788static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
789{
790 unsigned int i;
791
792 i = ar_first_buffer_index(ctx);
793 while (i != end_buffer) {
794 dma_sync_single_for_device(ctx->ohci->card.device,
795 ar_buffer_bus(ctx, i),
796 PAGE_SIZE, DMA_FROM_DEVICE);
797 ar_context_link_page(ctx, i);
798 i = ar_next_buffer_index(i);
799 }
800}
801
802static void ohci_ar_context_work(struct work_struct *work)
803{
804 struct ar_context *ctx = from_work(ctx, work, work);
805 unsigned int end_buffer_index, end_buffer_offset;
806 void *p, *end;
807
808 p = ctx->pointer;
809 if (!p)
810 return;
811
812 end_buffer_index = ar_search_last_active_buffer(ctx, &end_buffer_offset);
813 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
814 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
815
816 if (end_buffer_index < ar_first_buffer_index(ctx)) {
817 // The filled part of the overall buffer wraps around; handle all packets up to the
818 // buffer end here. If the last packet wraps around, its tail will be visible after
819 // the buffer end because the buffer start pages are mapped there again.
820 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
821 p = handle_ar_packets(ctx, p, buffer_end);
822 if (p < buffer_end)
823 goto error;
824 // adjust p to point back into the actual buffer
825 p -= AR_BUFFERS * PAGE_SIZE;
826 }
827
828 p = handle_ar_packets(ctx, p, end);
829 if (p != end) {
830 if (p > end)
831 ar_context_abort(ctx, "inconsistent descriptor");
832 goto error;
833 }
834
835 ctx->pointer = p;
836 ar_recycle_buffers(ctx, end_buffer_index);
837
838 return;
839error:
840 ctx->pointer = NULL;
841}
842
843static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
844 unsigned int descriptors_offset, u32 regs)
845{
846 struct device *dev = ohci->card.device;
847 unsigned int i;
848 dma_addr_t dma_addr;
849 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
850 struct descriptor *d;
851
852 ctx->regs = regs;
853 ctx->ohci = ohci;
854 INIT_WORK(&ctx->work, ohci_ar_context_work);
855
856 for (i = 0; i < AR_BUFFERS; i++) {
857 ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
858 DMA_FROM_DEVICE, GFP_KERNEL);
859 if (!ctx->pages[i])
860 goto out_of_memory;
861 set_page_private(ctx->pages[i], dma_addr);
862 dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
863 DMA_FROM_DEVICE);
864 }
865
866 for (i = 0; i < AR_BUFFERS; i++)
867 pages[i] = ctx->pages[i];
868 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
869 pages[AR_BUFFERS + i] = ctx->pages[i];
870 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
871 if (!ctx->buffer)
872 goto out_of_memory;
873
874 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
875 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
876
877 for (i = 0; i < AR_BUFFERS; i++) {
878 d = &ctx->descriptors[i];
879 d->req_count = cpu_to_le16(PAGE_SIZE);
880 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
881 DESCRIPTOR_STATUS |
882 DESCRIPTOR_BRANCH_ALWAYS);
883 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
884 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
885 ar_next_buffer_index(i) * sizeof(struct descriptor));
886 }
887
888 return 0;
889
890out_of_memory:
891 ar_context_release(ctx);
892
893 return -ENOMEM;
894}
895
896static void ar_context_run(struct ar_context *ctx)
897{
898 unsigned int i;
899
900 for (i = 0; i < AR_BUFFERS; i++)
901 ar_context_link_page(ctx, i);
902
903 ctx->pointer = ctx->buffer;
904
905 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
906 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
907}
908
909static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
910{
911 __le16 branch;
912
913 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
914
915 /* figure out which descriptor the branch address goes in */
916 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
917 return d;
918 else
919 return d + z - 1;
920}
921
922static void context_retire_descriptors(struct context *ctx)
923{
924 struct descriptor *d, *last;
925 u32 address;
926 int z;
927 struct descriptor_buffer *desc;
928
929 desc = list_entry(ctx->buffer_list.next,
930 struct descriptor_buffer, list);
931 last = ctx->last;
932 while (last->branch_address != 0) {
933 struct descriptor_buffer *old_desc = desc;
934 address = le32_to_cpu(last->branch_address);
935 z = address & 0xf;
936 address &= ~0xf;
937 ctx->current_bus = address;
938
939 /* If the branch address points to a buffer outside of the
940 * current buffer, advance to the next buffer. */
941 if (address < desc->buffer_bus ||
942 address >= desc->buffer_bus + desc->used)
943 desc = list_entry(desc->list.next,
944 struct descriptor_buffer, list);
945 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
946 last = find_branch_descriptor(d, z);
947
948 if (!ctx->callback(ctx, d, last))
949 break;
950
951 if (old_desc != desc) {
952 // If we've advanced to the next buffer, move the previous buffer to the
953 // free list.
954 old_desc->used = 0;
955 guard(spinlock_irqsave)(&ctx->ohci->lock);
956 list_move_tail(&old_desc->list, &ctx->buffer_list);
957 }
958 ctx->last = last;
959 }
960}
961
962static void ohci_at_context_work(struct work_struct *work)
963{
964 struct at_context *ctx = from_work(ctx, work, work);
965
966 context_retire_descriptors(&ctx->context);
967}
968
969static void ohci_isoc_context_work(struct work_struct *work)
970{
971 struct fw_iso_context *base = from_work(base, work, work);
972 struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
973
974 context_retire_descriptors(&isoc_ctx->context);
975}
976
977/*
978 * Allocate a new buffer and add it to the list of free buffers for this
979 * context. Must be called with ohci->lock held.
980 */
981static int context_add_buffer(struct context *ctx)
982{
983 struct descriptor_buffer *desc;
984 dma_addr_t bus_addr;
985 int offset;
986
987 /*
988 * 16MB of descriptors should be far more than enough for any DMA
989 * program. This will catch run-away userspace or DoS attacks.
990 */
991 if (ctx->total_allocation >= 16*1024*1024)
992 return -ENOMEM;
993
994 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
995 if (!desc)
996 return -ENOMEM;
997
998 offset = (void *)&desc->buffer - (void *)desc;
999 /*
1000 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1001 * for descriptors, even 0x10-byte ones. This can cause page faults when
1002 * an IOMMU is in use and the oversized read crosses a page boundary.
1003 * Work around this by always leaving at least 0x10 bytes of padding.
1004 */
1005 desc->buffer_size = PAGE_SIZE - offset - 0x10;
1006 desc->buffer_bus = bus_addr + offset;
1007 desc->used = 0;
1008
1009 list_add_tail(&desc->list, &ctx->buffer_list);
1010 ctx->total_allocation += PAGE_SIZE;
1011
1012 return 0;
1013}
1014
1015static int context_init(struct context *ctx, struct fw_ohci *ohci,
1016 u32 regs, descriptor_callback_t callback)
1017{
1018 ctx->ohci = ohci;
1019 ctx->regs = regs;
1020 ctx->total_allocation = 0;
1021
1022 INIT_LIST_HEAD(&ctx->buffer_list);
1023 if (context_add_buffer(ctx) < 0)
1024 return -ENOMEM;
1025
1026 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1027 struct descriptor_buffer, list);
1028
1029 ctx->callback = callback;
1030
1031 /*
1032 * We put a dummy descriptor in the buffer that has a NULL
1033 * branch address and looks like it's been sent. That way we
1034 * have a descriptor to append DMA programs to.
1035 */
1036 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1037 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1038 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1039 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1040 ctx->last = ctx->buffer_tail->buffer;
1041 ctx->prev = ctx->buffer_tail->buffer;
1042 ctx->prev_z = 1;
1043
1044 return 0;
1045}
1046
1047static void context_release(struct context *ctx)
1048{
1049 struct fw_card *card = &ctx->ohci->card;
1050 struct descriptor_buffer *desc, *tmp;
1051
1052 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) {
1053 dmam_free_coherent(card->device, PAGE_SIZE, desc,
1054 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
1055 }
1056}
1057
1058/* Must be called with ohci->lock held */
1059static struct descriptor *context_get_descriptors(struct context *ctx,
1060 int z, dma_addr_t *d_bus)
1061{
1062 struct descriptor *d = NULL;
1063 struct descriptor_buffer *desc = ctx->buffer_tail;
1064
1065 if (z * sizeof(*d) > desc->buffer_size)
1066 return NULL;
1067
1068 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1069 /* No room for the descriptor in this buffer, so advance to the
1070 * next one. */
1071
1072 if (desc->list.next == &ctx->buffer_list) {
1073 /* If there is no free buffer next in the list,
1074 * allocate one. */
1075 if (context_add_buffer(ctx) < 0)
1076 return NULL;
1077 }
1078 desc = list_entry(desc->list.next,
1079 struct descriptor_buffer, list);
1080 ctx->buffer_tail = desc;
1081 }
1082
1083 d = desc->buffer + desc->used / sizeof(*d);
1084 memset(d, 0, z * sizeof(*d));
1085 *d_bus = desc->buffer_bus + desc->used;
1086
1087 return d;
1088}
1089
1090static void context_run(struct context *ctx, u32 extra)
1091{
1092 struct fw_ohci *ohci = ctx->ohci;
1093
1094 reg_write(ohci, COMMAND_PTR(ctx->regs),
1095 le32_to_cpu(ctx->last->branch_address));
1096 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1097 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1098 ctx->running = true;
1099 flush_writes(ohci);
1100}
1101
1102static void context_append(struct context *ctx,
1103 struct descriptor *d, int z, int extra)
1104{
1105 dma_addr_t d_bus;
1106 struct descriptor_buffer *desc = ctx->buffer_tail;
1107 struct descriptor *d_branch;
1108
1109 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1110
1111 desc->used += (z + extra) * sizeof(*d);
1112
1113 wmb(); /* finish init of new descriptors before branch_address update */
1114
1115 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
1116 d_branch->branch_address = cpu_to_le32(d_bus | z);
1117
1118 /*
1119 * VT6306 incorrectly checks only the single descriptor at the
1120 * CommandPtr when the wake bit is written, so if it's a
1121 * multi-descriptor block starting with an INPUT_MORE, put a copy of
1122 * the branch address in the first descriptor.
1123 *
1124 * Not doing this for transmit contexts since not sure how it interacts
1125 * with skip addresses.
1126 */
1127 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1128 d_branch != ctx->prev &&
1129 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
1130 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
1131 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1132 }
1133
1134 ctx->prev = d;
1135 ctx->prev_z = z;
1136}
1137
1138static void context_stop(struct context *ctx)
1139{
1140 struct fw_ohci *ohci = ctx->ohci;
1141 u32 reg;
1142 int i;
1143
1144 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1145 ctx->running = false;
1146
1147 for (i = 0; i < 1000; i++) {
1148 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1149 if ((reg & CONTEXT_ACTIVE) == 0)
1150 return;
1151
1152 if (i)
1153 udelay(10);
1154 }
1155 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1156}
1157
1158struct driver_data {
1159 u8 inline_data[8];
1160 struct fw_packet *packet;
1161};
1162
1163/*
1164 * This function appends a packet to the DMA queue for transmission.
1165 * Must always be called with the ochi->lock held to ensure proper
1166 * generation handling and locking around packet queue manipulation.
1167 */
1168static int at_context_queue_packet(struct at_context *ctx, struct fw_packet *packet)
1169{
1170 struct context *context = &ctx->context;
1171 struct fw_ohci *ohci = context->ohci;
1172 dma_addr_t d_bus, payload_bus;
1173 struct driver_data *driver_data;
1174 struct descriptor *d, *last;
1175 __le32 *header;
1176 int z, tcode;
1177
1178 d = context_get_descriptors(context, 4, &d_bus);
1179 if (d == NULL) {
1180 packet->ack = RCODE_SEND_ERROR;
1181 return -1;
1182 }
1183
1184 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1185 d[0].res_count = cpu_to_le16(packet->timestamp);
1186
1187 tcode = async_header_get_tcode(packet->header);
1188 header = (__le32 *) &d[1];
1189 switch (tcode) {
1190 case TCODE_WRITE_QUADLET_REQUEST:
1191 case TCODE_WRITE_BLOCK_REQUEST:
1192 case TCODE_WRITE_RESPONSE:
1193 case TCODE_READ_QUADLET_REQUEST:
1194 case TCODE_READ_BLOCK_REQUEST:
1195 case TCODE_READ_QUADLET_RESPONSE:
1196 case TCODE_READ_BLOCK_RESPONSE:
1197 case TCODE_LOCK_REQUEST:
1198 case TCODE_LOCK_RESPONSE:
1199 ohci1394_at_data_set_src_bus_id(header, false);
1200 ohci1394_at_data_set_speed(header, packet->speed);
1201 ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header));
1202 ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header));
1203 ohci1394_at_data_set_tcode(header, tcode);
1204
1205 ohci1394_at_data_set_destination_id(header,
1206 async_header_get_destination(packet->header));
1207
1208 if (ctx == &ohci->at_response_ctx) {
1209 ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
1210 } else {
1211 ohci1394_at_data_set_destination_offset(header,
1212 async_header_get_offset(packet->header));
1213 }
1214
1215 if (tcode_is_block_packet(tcode))
1216 header[3] = cpu_to_le32(packet->header[3]);
1217 else
1218 header[3] = (__force __le32) packet->header[3];
1219
1220 d[0].req_count = cpu_to_le16(packet->header_length);
1221 break;
1222 case TCODE_LINK_INTERNAL:
1223 ohci1394_at_data_set_speed(header, packet->speed);
1224 ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL);
1225
1226 header[1] = cpu_to_le32(packet->header[1]);
1227 header[2] = cpu_to_le32(packet->header[2]);
1228 d[0].req_count = cpu_to_le16(12);
1229
1230 if (is_ping_packet(&packet->header[1]))
1231 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1232 break;
1233
1234 case TCODE_STREAM_DATA:
1235 ohci1394_it_data_set_speed(header, packet->speed);
1236 ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0]));
1237 ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0]));
1238 ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
1239 ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0]));
1240
1241 ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0]));
1242
1243 d[0].req_count = cpu_to_le16(8);
1244 break;
1245
1246 default:
1247 /* BUG(); */
1248 packet->ack = RCODE_SEND_ERROR;
1249 return -1;
1250 }
1251
1252 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1253 driver_data = (struct driver_data *) &d[3];
1254 driver_data->packet = packet;
1255 packet->driver_data = driver_data;
1256
1257 if (packet->payload_length > 0) {
1258 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1259 payload_bus = dma_map_single(ohci->card.device,
1260 packet->payload,
1261 packet->payload_length,
1262 DMA_TO_DEVICE);
1263 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1264 packet->ack = RCODE_SEND_ERROR;
1265 return -1;
1266 }
1267 packet->payload_bus = payload_bus;
1268 packet->payload_mapped = true;
1269 } else {
1270 memcpy(driver_data->inline_data, packet->payload,
1271 packet->payload_length);
1272 payload_bus = d_bus + 3 * sizeof(*d);
1273 }
1274
1275 d[2].req_count = cpu_to_le16(packet->payload_length);
1276 d[2].data_address = cpu_to_le32(payload_bus);
1277 last = &d[2];
1278 z = 3;
1279 } else {
1280 last = &d[0];
1281 z = 2;
1282 }
1283
1284 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1285 DESCRIPTOR_IRQ_ALWAYS |
1286 DESCRIPTOR_BRANCH_ALWAYS);
1287
1288 /* FIXME: Document how the locking works. */
1289 if (ohci->generation != packet->generation) {
1290 if (packet->payload_mapped)
1291 dma_unmap_single(ohci->card.device, payload_bus,
1292 packet->payload_length, DMA_TO_DEVICE);
1293 packet->ack = RCODE_GENERATION;
1294 return -1;
1295 }
1296
1297 context_append(context, d, z, 4 - z);
1298
1299 if (context->running)
1300 reg_write(ohci, CONTROL_SET(context->regs), CONTEXT_WAKE);
1301 else
1302 context_run(context, 0);
1303
1304 return 0;
1305}
1306
1307static void at_context_flush(struct at_context *ctx)
1308{
1309 // Avoid dead lock due to programming mistake.
1310 if (WARN_ON_ONCE(current_work() == &ctx->work))
1311 return;
1312
1313 disable_work_sync(&ctx->work);
1314
1315 WRITE_ONCE(ctx->flushing, true);
1316 ohci_at_context_work(&ctx->work);
1317 WRITE_ONCE(ctx->flushing, false);
1318
1319 enable_work(&ctx->work);
1320}
1321
1322static int handle_at_packet(struct context *context,
1323 struct descriptor *d,
1324 struct descriptor *last)
1325{
1326 struct at_context *ctx = container_of(context, struct at_context, context);
1327 struct fw_ohci *ohci = ctx->context.ohci;
1328 struct driver_data *driver_data;
1329 struct fw_packet *packet;
1330 int evt;
1331
1332 if (last->transfer_status == 0 && !READ_ONCE(ctx->flushing))
1333 /* This descriptor isn't done yet, stop iteration. */
1334 return 0;
1335
1336 driver_data = (struct driver_data *) &d[3];
1337 packet = driver_data->packet;
1338 if (packet == NULL)
1339 /* This packet was cancelled, just continue. */
1340 return 1;
1341
1342 if (packet->payload_mapped)
1343 dma_unmap_single(ohci->card.device, packet->payload_bus,
1344 packet->payload_length, DMA_TO_DEVICE);
1345
1346 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1347 packet->timestamp = le16_to_cpu(last->res_count);
1348
1349 switch (evt) {
1350 case OHCI1394_evt_timeout:
1351 /* Async response transmit timed out. */
1352 packet->ack = RCODE_CANCELLED;
1353 break;
1354
1355 case OHCI1394_evt_flushed:
1356 /*
1357 * The packet was flushed should give same error as
1358 * when we try to use a stale generation count.
1359 */
1360 packet->ack = RCODE_GENERATION;
1361 break;
1362
1363 case OHCI1394_evt_missing_ack:
1364 if (READ_ONCE(ctx->flushing))
1365 packet->ack = RCODE_GENERATION;
1366 else {
1367 /*
1368 * Using a valid (current) generation count, but the
1369 * node is not on the bus or not sending acks.
1370 */
1371 packet->ack = RCODE_NO_ACK;
1372 }
1373 break;
1374
1375 case ACK_COMPLETE + 0x10:
1376 case ACK_PENDING + 0x10:
1377 case ACK_BUSY_X + 0x10:
1378 case ACK_BUSY_A + 0x10:
1379 case ACK_BUSY_B + 0x10:
1380 case ACK_DATA_ERROR + 0x10:
1381 case ACK_TYPE_ERROR + 0x10:
1382 packet->ack = evt - 0x10;
1383 break;
1384
1385 case OHCI1394_evt_no_status:
1386 if (READ_ONCE(ctx->flushing)) {
1387 packet->ack = RCODE_GENERATION;
1388 break;
1389 }
1390 fallthrough;
1391
1392 default:
1393 packet->ack = RCODE_SEND_ERROR;
1394 break;
1395 }
1396
1397 packet->callback(packet, &ohci->card, packet->ack);
1398
1399 return 1;
1400}
1401
1402static u32 get_cycle_time(struct fw_ohci *ohci);
1403
1404static void handle_local_rom(struct fw_ohci *ohci,
1405 struct fw_packet *packet, u32 csr)
1406{
1407 struct fw_packet response;
1408 int tcode, length, i;
1409
1410 tcode = async_header_get_tcode(packet->header);
1411 if (tcode_is_block_packet(tcode))
1412 length = async_header_get_data_length(packet->header);
1413 else
1414 length = 4;
1415
1416 i = csr - CSR_CONFIG_ROM;
1417 if (i + length > CONFIG_ROM_SIZE) {
1418 fw_fill_response(&response, packet->header,
1419 RCODE_ADDRESS_ERROR, NULL, 0);
1420 } else if (!tcode_is_read_request(tcode)) {
1421 fw_fill_response(&response, packet->header,
1422 RCODE_TYPE_ERROR, NULL, 0);
1423 } else {
1424 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1425 (void *) ohci->config_rom + i, length);
1426 }
1427
1428 // Timestamping on behalf of the hardware.
1429 response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1430 fw_core_handle_response(&ohci->card, &response);
1431}
1432
1433static void handle_local_lock(struct fw_ohci *ohci,
1434 struct fw_packet *packet, u32 csr)
1435{
1436 struct fw_packet response;
1437 int tcode, length, ext_tcode, sel, try;
1438 __be32 *payload, lock_old;
1439 u32 lock_arg, lock_data;
1440
1441 tcode = async_header_get_tcode(packet->header);
1442 length = async_header_get_data_length(packet->header);
1443 payload = packet->payload;
1444 ext_tcode = async_header_get_extended_tcode(packet->header);
1445
1446 if (tcode == TCODE_LOCK_REQUEST &&
1447 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1448 lock_arg = be32_to_cpu(payload[0]);
1449 lock_data = be32_to_cpu(payload[1]);
1450 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1451 lock_arg = 0;
1452 lock_data = 0;
1453 } else {
1454 fw_fill_response(&response, packet->header,
1455 RCODE_TYPE_ERROR, NULL, 0);
1456 goto out;
1457 }
1458
1459 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1460 reg_write(ohci, OHCI1394_CSRData, lock_data);
1461 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1462 reg_write(ohci, OHCI1394_CSRControl, sel);
1463
1464 for (try = 0; try < 20; try++)
1465 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1466 lock_old = cpu_to_be32(reg_read(ohci,
1467 OHCI1394_CSRData));
1468 fw_fill_response(&response, packet->header,
1469 RCODE_COMPLETE,
1470 &lock_old, sizeof(lock_old));
1471 goto out;
1472 }
1473
1474 ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1475 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1476
1477 out:
1478 // Timestamping on behalf of the hardware.
1479 response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1480 fw_core_handle_response(&ohci->card, &response);
1481}
1482
1483static void handle_local_request(struct at_context *ctx, struct fw_packet *packet)
1484{
1485 struct fw_ohci *ohci = ctx->context.ohci;
1486 u64 offset, csr;
1487
1488 if (ctx == &ohci->at_request_ctx) {
1489 packet->ack = ACK_PENDING;
1490 packet->callback(packet, &ohci->card, packet->ack);
1491 }
1492
1493 offset = async_header_get_offset(packet->header);
1494 csr = offset - CSR_REGISTER_BASE;
1495
1496 /* Handle config rom reads. */
1497 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1498 handle_local_rom(ohci, packet, csr);
1499 else switch (csr) {
1500 case CSR_BUS_MANAGER_ID:
1501 case CSR_BANDWIDTH_AVAILABLE:
1502 case CSR_CHANNELS_AVAILABLE_HI:
1503 case CSR_CHANNELS_AVAILABLE_LO:
1504 handle_local_lock(ohci, packet, csr);
1505 break;
1506 default:
1507 if (ctx == &ohci->at_request_ctx)
1508 fw_core_handle_request(&ohci->card, packet);
1509 else
1510 fw_core_handle_response(&ohci->card, packet);
1511 break;
1512 }
1513
1514 if (ctx == &ohci->at_response_ctx) {
1515 packet->ack = ACK_COMPLETE;
1516 packet->callback(packet, &ohci->card, packet->ack);
1517 }
1518}
1519
1520static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet)
1521{
1522 struct fw_ohci *ohci = ctx->context.ohci;
1523 unsigned long flags;
1524 int ret;
1525
1526 spin_lock_irqsave(&ohci->lock, flags);
1527
1528 if (async_header_get_destination(packet->header) == ohci->node_id &&
1529 ohci->generation == packet->generation) {
1530 spin_unlock_irqrestore(&ohci->lock, flags);
1531
1532 // Timestamping on behalf of the hardware.
1533 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1534
1535 handle_local_request(ctx, packet);
1536 return;
1537 }
1538
1539 ret = at_context_queue_packet(ctx, packet);
1540 spin_unlock_irqrestore(&ohci->lock, flags);
1541
1542 if (ret < 0) {
1543 // Timestamping on behalf of the hardware.
1544 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1545
1546 packet->callback(packet, &ohci->card, packet->ack);
1547 }
1548}
1549
1550static void detect_dead_context(struct fw_ohci *ohci,
1551 const char *name, unsigned int regs)
1552{
1553 static const char *const evts[] = {
1554 [0x00] = "evt_no_status", [0x01] = "-reserved-",
1555 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
1556 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
1557 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
1558 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
1559 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
1560 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
1561 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
1562 [0x10] = "-reserved-", [0x11] = "ack_complete",
1563 [0x12] = "ack_pending ", [0x13] = "-reserved-",
1564 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
1565 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
1566 [0x18] = "-reserved-", [0x19] = "-reserved-",
1567 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
1568 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
1569 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
1570 [0x20] = "pending/cancelled",
1571 };
1572 u32 ctl;
1573
1574 ctl = reg_read(ohci, CONTROL_SET(regs));
1575 if (ctl & CONTEXT_DEAD)
1576 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1577 name, evts[ctl & 0x1f]);
1578}
1579
1580static void handle_dead_contexts(struct fw_ohci *ohci)
1581{
1582 unsigned int i;
1583 char name[8];
1584
1585 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1586 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1587 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1588 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1589 for (i = 0; i < 32; ++i) {
1590 if (!(ohci->it_context_support & (1 << i)))
1591 continue;
1592 sprintf(name, "IT%u", i);
1593 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1594 }
1595 for (i = 0; i < 32; ++i) {
1596 if (!(ohci->ir_context_support & (1 << i)))
1597 continue;
1598 sprintf(name, "IR%u", i);
1599 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1600 }
1601 /* TODO: maybe try to flush and restart the dead contexts */
1602}
1603
1604static u32 cycle_timer_ticks(u32 cycle_timer)
1605{
1606 u32 ticks;
1607
1608 ticks = cycle_timer & 0xfff;
1609 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1610 ticks += (3072 * 8000) * (cycle_timer >> 25);
1611
1612 return ticks;
1613}
1614
1615/*
1616 * Some controllers exhibit one or more of the following bugs when updating the
1617 * iso cycle timer register:
1618 * - When the lowest six bits are wrapping around to zero, a read that happens
1619 * at the same time will return garbage in the lowest ten bits.
1620 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1621 * not incremented for about 60 ns.
1622 * - Occasionally, the entire register reads zero.
1623 *
1624 * To catch these, we read the register three times and ensure that the
1625 * difference between each two consecutive reads is approximately the same, i.e.
1626 * less than twice the other. Furthermore, any negative difference indicates an
1627 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1628 * execute, so we have enough precision to compute the ratio of the differences.)
1629 */
1630static u32 get_cycle_time(struct fw_ohci *ohci)
1631{
1632 u32 c0, c1, c2;
1633 u32 t0, t1, t2;
1634 s32 diff01, diff12;
1635 int i;
1636
1637 if (has_reboot_by_cycle_timer_read_quirk(ohci))
1638 return 0;
1639
1640 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1641
1642 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1643 i = 0;
1644 c1 = c2;
1645 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1646 do {
1647 c0 = c1;
1648 c1 = c2;
1649 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1650 t0 = cycle_timer_ticks(c0);
1651 t1 = cycle_timer_ticks(c1);
1652 t2 = cycle_timer_ticks(c2);
1653 diff01 = t1 - t0;
1654 diff12 = t2 - t1;
1655 } while ((diff01 <= 0 || diff12 <= 0 ||
1656 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1657 && i++ < 20);
1658 }
1659
1660 return c2;
1661}
1662
1663/*
1664 * This function has to be called at least every 64 seconds. The bus_time
1665 * field stores not only the upper 25 bits of the BUS_TIME register but also
1666 * the most significant bit of the cycle timer in bit 6 so that we can detect
1667 * changes in this bit.
1668 */
1669static u32 update_bus_time(struct fw_ohci *ohci)
1670{
1671 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1672
1673 if (unlikely(!ohci->bus_time_running)) {
1674 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1675 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
1676 (cycle_time_seconds & 0x40);
1677 ohci->bus_time_running = true;
1678 }
1679
1680 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1681 ohci->bus_time += 0x40;
1682
1683 return ohci->bus_time | cycle_time_seconds;
1684}
1685
1686static int get_status_for_port(struct fw_ohci *ohci, int port_index,
1687 enum phy_packet_self_id_port_status *status)
1688{
1689 int reg;
1690
1691 scoped_guard(mutex, &ohci->phy_reg_mutex) {
1692 reg = write_phy_reg(ohci, 7, port_index);
1693 if (reg < 0)
1694 return reg;
1695
1696 reg = read_phy_reg(ohci, 8);
1697 if (reg < 0)
1698 return reg;
1699 }
1700
1701 switch (reg & 0x0f) {
1702 case 0x06:
1703 // is child node (connected to parent node)
1704 *status = PHY_PACKET_SELF_ID_PORT_STATUS_PARENT;
1705 break;
1706 case 0x0e:
1707 // is parent node (connected to child node)
1708 *status = PHY_PACKET_SELF_ID_PORT_STATUS_CHILD;
1709 break;
1710 default:
1711 // not connected
1712 *status = PHY_PACKET_SELF_ID_PORT_STATUS_NCONN;
1713 break;
1714 }
1715
1716 return 0;
1717}
1718
1719static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1720 int self_id_count)
1721{
1722 unsigned int left_phy_id = phy_packet_self_id_get_phy_id(self_id);
1723 int i;
1724
1725 for (i = 0; i < self_id_count; i++) {
1726 u32 entry = ohci->self_id_buffer[i];
1727 unsigned int right_phy_id = phy_packet_self_id_get_phy_id(entry);
1728
1729 if (left_phy_id == right_phy_id)
1730 return -1;
1731 if (left_phy_id < right_phy_id)
1732 return i;
1733 }
1734 return i;
1735}
1736
1737static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset)
1738{
1739 int reg;
1740
1741 guard(mutex)(&ohci->phy_reg_mutex);
1742
1743 // Select page 7
1744 reg = write_phy_reg(ohci, 7, 0xe0);
1745 if (reg < 0)
1746 return reg;
1747
1748 reg = read_phy_reg(ohci, 8);
1749 if (reg < 0)
1750 return reg;
1751
1752 // set PMODE bit
1753 reg |= 0x40;
1754 reg = write_phy_reg(ohci, 8, reg);
1755 if (reg < 0)
1756 return reg;
1757
1758 // read register 12
1759 reg = read_phy_reg(ohci, 12);
1760 if (reg < 0)
1761 return reg;
1762
1763 // bit 3 indicates "initiated reset"
1764 *is_initiated_reset = !!((reg & 0x08) == 0x08);
1765
1766 return 0;
1767}
1768
1769/*
1770 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1771 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1772 * Construct the selfID from phy register contents.
1773 */
1774static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1775{
1776 int reg, i, pos, err;
1777 bool is_initiated_reset;
1778 u32 self_id = 0;
1779
1780 // link active 1, speed 3, bridge 0, contender 1, more packets 0.
1781 phy_packet_set_packet_identifier(&self_id, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID);
1782 phy_packet_self_id_zero_set_link_active(&self_id, true);
1783 phy_packet_self_id_zero_set_scode(&self_id, SCODE_800);
1784 phy_packet_self_id_zero_set_contender(&self_id, true);
1785
1786 reg = reg_read(ohci, OHCI1394_NodeID);
1787 if (!(reg & OHCI1394_NodeID_idValid)) {
1788 ohci_notice(ohci,
1789 "node ID not valid, new bus reset in progress\n");
1790 return -EBUSY;
1791 }
1792 phy_packet_self_id_set_phy_id(&self_id, reg & 0x3f);
1793
1794 reg = ohci_read_phy_reg(&ohci->card, 4);
1795 if (reg < 0)
1796 return reg;
1797 phy_packet_self_id_zero_set_power_class(&self_id, reg & 0x07);
1798
1799 reg = ohci_read_phy_reg(&ohci->card, 1);
1800 if (reg < 0)
1801 return reg;
1802 phy_packet_self_id_zero_set_gap_count(&self_id, reg & 0x3f);
1803
1804 for (i = 0; i < 3; i++) {
1805 enum phy_packet_self_id_port_status status;
1806
1807 err = get_status_for_port(ohci, i, &status);
1808 if (err < 0)
1809 return err;
1810
1811 self_id_sequence_set_port_status(&self_id, 1, i, status);
1812 }
1813
1814 err = detect_initiated_reset(ohci, &is_initiated_reset);
1815 if (err < 0)
1816 return err;
1817 phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset);
1818
1819 pos = get_self_id_pos(ohci, self_id, self_id_count);
1820 if (pos >= 0) {
1821 memmove(&(ohci->self_id_buffer[pos+1]),
1822 &(ohci->self_id_buffer[pos]),
1823 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1824 ohci->self_id_buffer[pos] = self_id;
1825 self_id_count++;
1826 }
1827 return self_id_count;
1828}
1829
1830static irqreturn_t handle_selfid_complete_event(int irq, void *data)
1831{
1832 struct fw_ohci *ohci = data;
1833 int self_id_count, generation, new_generation, i, j;
1834 u32 reg, quadlet;
1835 void *free_rom = NULL;
1836 dma_addr_t free_rom_bus = 0;
1837 bool is_new_root;
1838
1839 reg = reg_read(ohci, OHCI1394_NodeID);
1840 if (!(reg & OHCI1394_NodeID_idValid)) {
1841 ohci_notice(ohci,
1842 "node ID not valid, new bus reset in progress\n");
1843 goto end;
1844 }
1845 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1846 ohci_notice(ohci, "malconfigured bus\n");
1847 goto end;
1848 }
1849 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1850 OHCI1394_NodeID_nodeNumber);
1851
1852 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1853 if (!(ohci->is_root && is_new_root))
1854 reg_write(ohci, OHCI1394_LinkControlSet,
1855 OHCI1394_LinkControl_cycleMaster);
1856 ohci->is_root = is_new_root;
1857
1858 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1859 if (ohci1394_self_id_count_is_error(reg)) {
1860 ohci_notice(ohci, "self ID receive error\n");
1861 goto end;
1862 }
1863
1864 trace_self_id_complete(ohci->card.index, reg, ohci->self_id, has_be_header_quirk(ohci));
1865
1866 /*
1867 * The count in the SelfIDCount register is the number of
1868 * bytes in the self ID receive buffer. Since we also receive
1869 * the inverted quadlets and a header quadlet, we shift one
1870 * bit extra to get the actual number of self IDs.
1871 */
1872 self_id_count = ohci1394_self_id_count_get_size(reg) >> 1;
1873
1874 if (self_id_count > 252) {
1875 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
1876 goto end;
1877 }
1878
1879 quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
1880 generation = ohci1394_self_id_receive_q0_get_generation(quadlet);
1881 rmb();
1882
1883 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1884 u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci));
1885 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci));
1886
1887 if (id != ~id2) {
1888 /*
1889 * If the invalid data looks like a cycle start packet,
1890 * it's likely to be the result of the cycle master
1891 * having a wrong gap count. In this case, the self IDs
1892 * so far are valid and should be processed so that the
1893 * bus manager can then correct the gap count.
1894 */
1895 if (id == 0xffff008f) {
1896 ohci_notice(ohci, "ignoring spurious self IDs\n");
1897 self_id_count = j;
1898 break;
1899 }
1900
1901 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
1902 j, self_id_count, id, id2);
1903 goto end;
1904 }
1905 ohci->self_id_buffer[j] = id;
1906 }
1907
1908 if (ohci->quirks & QUIRK_TI_SLLZ059) {
1909 self_id_count = find_and_insert_self_id(ohci, self_id_count);
1910 if (self_id_count < 0) {
1911 ohci_notice(ohci,
1912 "could not construct local self ID\n");
1913 goto end;
1914 }
1915 }
1916
1917 if (self_id_count == 0) {
1918 ohci_notice(ohci, "no self IDs\n");
1919 goto end;
1920 }
1921 rmb();
1922
1923 /*
1924 * Check the consistency of the self IDs we just read. The
1925 * problem we face is that a new bus reset can start while we
1926 * read out the self IDs from the DMA buffer. If this happens,
1927 * the DMA buffer will be overwritten with new self IDs and we
1928 * will read out inconsistent data. The OHCI specification
1929 * (section 11.2) recommends a technique similar to
1930 * linux/seqlock.h, where we remember the generation of the
1931 * self IDs in the buffer before reading them out and compare
1932 * it to the current generation after reading them out. If
1933 * the two generations match we know we have a consistent set
1934 * of self IDs.
1935 */
1936
1937 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1938 new_generation = ohci1394_self_id_count_get_generation(reg);
1939 if (new_generation != generation) {
1940 ohci_notice(ohci, "new bus reset, discarding self ids\n");
1941 goto end;
1942 }
1943
1944 // FIXME: Document how the locking works.
1945 scoped_guard(spinlock_irq, &ohci->lock) {
1946 ohci->generation = -1; // prevent AT packet queueing
1947 context_stop(&ohci->at_request_ctx.context);
1948 context_stop(&ohci->at_response_ctx.context);
1949 }
1950
1951 /*
1952 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
1953 * packets in the AT queues and software needs to drain them.
1954 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
1955 */
1956 at_context_flush(&ohci->at_request_ctx);
1957 at_context_flush(&ohci->at_response_ctx);
1958
1959 scoped_guard(spinlock_irq, &ohci->lock) {
1960 ohci->generation = generation;
1961 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1962 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1963
1964 if (ohci->quirks & QUIRK_RESET_PACKET)
1965 ohci->request_generation = generation;
1966
1967 // This next bit is unrelated to the AT context stuff but we have to do it under the
1968 // spinlock also. If a new config rom was set up before this reset, the old one is
1969 // now no longer in use and we can free it. Update the config rom pointers to point
1970 // to the current config rom and clear the next_config_rom pointer so a new update
1971 // can take place.
1972 if (ohci->next_config_rom != NULL) {
1973 if (ohci->next_config_rom != ohci->config_rom) {
1974 free_rom = ohci->config_rom;
1975 free_rom_bus = ohci->config_rom_bus;
1976 }
1977 ohci->config_rom = ohci->next_config_rom;
1978 ohci->config_rom_bus = ohci->next_config_rom_bus;
1979 ohci->next_config_rom = NULL;
1980
1981 // Restore config_rom image and manually update config_rom registers.
1982 // Writing the header quadlet will indicate that the config rom is ready,
1983 // so we do that last.
1984 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2]));
1985 ohci->config_rom[0] = ohci->next_header;
1986 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header));
1987 }
1988
1989 if (param_remote_dma) {
1990 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1991 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1992 }
1993 }
1994
1995 if (free_rom)
1996 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
1997
1998 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1999 self_id_count, ohci->self_id_buffer,
2000 ohci->csr_state_setclear_abdicate);
2001 ohci->csr_state_setclear_abdicate = false;
2002end:
2003 return IRQ_HANDLED;
2004}
2005
2006static irqreturn_t irq_handler(int irq, void *data)
2007{
2008 struct fw_ohci *ohci = data;
2009 u32 event, iso_event;
2010 int i;
2011
2012 event = reg_read(ohci, OHCI1394_IntEventClear);
2013
2014 if (!event || !~event)
2015 return IRQ_NONE;
2016
2017 /*
2018 * busReset and postedWriteErr events must not be cleared yet
2019 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2020 */
2021 reg_write(ohci, OHCI1394_IntEventClear,
2022 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2023 trace_irqs(ohci->card.index, event);
2024
2025 // The flag is masked again at handle_selfid_complete_event() scheduled by selfID event.
2026 if (event & OHCI1394_busReset)
2027 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2028
2029 if (event & OHCI1394_RQPkt)
2030 queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
2031
2032 if (event & OHCI1394_RSPkt)
2033 queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
2034
2035 if (event & OHCI1394_reqTxComplete)
2036 queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work);
2037
2038 if (event & OHCI1394_respTxComplete)
2039 queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work);
2040
2041 if (event & OHCI1394_isochRx) {
2042 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2043 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2044
2045 while (iso_event) {
2046 i = ffs(iso_event) - 1;
2047 fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base);
2048 iso_event &= ~(1 << i);
2049 }
2050 }
2051
2052 if (event & OHCI1394_isochTx) {
2053 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2054 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2055
2056 while (iso_event) {
2057 i = ffs(iso_event) - 1;
2058 fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base);
2059 iso_event &= ~(1 << i);
2060 }
2061 }
2062
2063 if (unlikely(event & OHCI1394_regAccessFail))
2064 ohci_err(ohci, "register access failure\n");
2065
2066 if (unlikely(event & OHCI1394_postedWriteErr)) {
2067 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2068 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2069 reg_write(ohci, OHCI1394_IntEventClear,
2070 OHCI1394_postedWriteErr);
2071 dev_err_ratelimited(ohci->card.device, "PCI posted write error\n");
2072 }
2073
2074 if (unlikely(event & OHCI1394_cycleTooLong)) {
2075 dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n");
2076 reg_write(ohci, OHCI1394_LinkControlSet,
2077 OHCI1394_LinkControl_cycleMaster);
2078 }
2079
2080 if (unlikely(event & OHCI1394_cycleInconsistent)) {
2081 /*
2082 * We need to clear this event bit in order to make
2083 * cycleMatch isochronous I/O work. In theory we should
2084 * stop active cycleMatch iso contexts now and restart
2085 * them at least two cycles later. (FIXME?)
2086 */
2087 dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n");
2088 }
2089
2090 if (unlikely(event & OHCI1394_unrecoverableError))
2091 handle_dead_contexts(ohci);
2092
2093 if (event & OHCI1394_cycle64Seconds) {
2094 guard(spinlock)(&ohci->lock);
2095 update_bus_time(ohci);
2096 } else
2097 flush_writes(ohci);
2098
2099 if (event & OHCI1394_selfIDComplete)
2100 return IRQ_WAKE_THREAD;
2101 else
2102 return IRQ_HANDLED;
2103}
2104
2105static int software_reset(struct fw_ohci *ohci)
2106{
2107 u32 val;
2108 int i;
2109
2110 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2111 for (i = 0; i < 500; i++) {
2112 val = reg_read(ohci, OHCI1394_HCControlSet);
2113 if (!~val)
2114 return -ENODEV; /* Card was ejected. */
2115
2116 if (!(val & OHCI1394_HCControl_softReset))
2117 return 0;
2118
2119 msleep(1);
2120 }
2121
2122 return -EBUSY;
2123}
2124
2125static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2126{
2127 size_t size = length * 4;
2128
2129 memcpy(dest, src, size);
2130 if (size < CONFIG_ROM_SIZE)
2131 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2132}
2133
2134static int configure_1394a_enhancements(struct fw_ohci *ohci)
2135{
2136 bool enable_1394a;
2137 int ret, clear, set, offset;
2138
2139 /* Check if the driver should configure link and PHY. */
2140 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2141 OHCI1394_HCControl_programPhyEnable))
2142 return 0;
2143
2144 /* Paranoia: check whether the PHY supports 1394a, too. */
2145 enable_1394a = false;
2146 ret = read_phy_reg(ohci, 2);
2147 if (ret < 0)
2148 return ret;
2149 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2150 ret = read_paged_phy_reg(ohci, 1, 8);
2151 if (ret < 0)
2152 return ret;
2153 if (ret >= 1)
2154 enable_1394a = true;
2155 }
2156
2157 if (ohci->quirks & QUIRK_NO_1394A)
2158 enable_1394a = false;
2159
2160 /* Configure PHY and link consistently. */
2161 if (enable_1394a) {
2162 clear = 0;
2163 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2164 } else {
2165 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2166 set = 0;
2167 }
2168 ret = update_phy_reg(ohci, 5, clear, set);
2169 if (ret < 0)
2170 return ret;
2171
2172 if (enable_1394a)
2173 offset = OHCI1394_HCControlSet;
2174 else
2175 offset = OHCI1394_HCControlClear;
2176 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2177
2178 /* Clean up: configuration has been taken care of. */
2179 reg_write(ohci, OHCI1394_HCControlClear,
2180 OHCI1394_HCControl_programPhyEnable);
2181
2182 return 0;
2183}
2184
2185static int probe_tsb41ba3d(struct fw_ohci *ohci)
2186{
2187 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2188 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2189 int reg, i;
2190
2191 reg = read_phy_reg(ohci, 2);
2192 if (reg < 0)
2193 return reg;
2194 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2195 return 0;
2196
2197 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2198 reg = read_paged_phy_reg(ohci, 1, i + 10);
2199 if (reg < 0)
2200 return reg;
2201 if (reg != id[i])
2202 return 0;
2203 }
2204 return 1;
2205}
2206
2207static int ohci_enable(struct fw_card *card,
2208 const __be32 *config_rom, size_t length)
2209{
2210 struct fw_ohci *ohci = fw_ohci(card);
2211 u32 lps, version, irqs;
2212 int i, ret;
2213
2214 ret = software_reset(ohci);
2215 if (ret < 0) {
2216 ohci_err(ohci, "failed to reset ohci card\n");
2217 return ret;
2218 }
2219
2220 /*
2221 * Now enable LPS, which we need in order to start accessing
2222 * most of the registers. In fact, on some cards (ALI M5251),
2223 * accessing registers in the SClk domain without LPS enabled
2224 * will lock up the machine. Wait 50msec to make sure we have
2225 * full link enabled. However, with some cards (well, at least
2226 * a JMicron PCIe card), we have to try again sometimes.
2227 *
2228 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2229 * cannot actually use the phy at that time. These need tens of
2230 * millisecods pause between LPS write and first phy access too.
2231 */
2232
2233 reg_write(ohci, OHCI1394_HCControlSet,
2234 OHCI1394_HCControl_LPS |
2235 OHCI1394_HCControl_postedWriteEnable);
2236 flush_writes(ohci);
2237
2238 for (lps = 0, i = 0; !lps && i < 3; i++) {
2239 msleep(50);
2240 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2241 OHCI1394_HCControl_LPS;
2242 }
2243
2244 if (!lps) {
2245 ohci_err(ohci, "failed to set Link Power Status\n");
2246 return -EIO;
2247 }
2248
2249 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2250 ret = probe_tsb41ba3d(ohci);
2251 if (ret < 0)
2252 return ret;
2253 if (ret)
2254 ohci_notice(ohci, "local TSB41BA3D phy\n");
2255 else
2256 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2257 }
2258
2259 reg_write(ohci, OHCI1394_HCControlClear,
2260 OHCI1394_HCControl_noByteSwapData);
2261
2262 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2263 reg_write(ohci, OHCI1394_LinkControlSet,
2264 OHCI1394_LinkControl_cycleTimerEnable |
2265 OHCI1394_LinkControl_cycleMaster);
2266
2267 reg_write(ohci, OHCI1394_ATRetries,
2268 OHCI1394_MAX_AT_REQ_RETRIES |
2269 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2270 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2271 (200 << 16));
2272
2273 ohci->bus_time_running = false;
2274
2275 for (i = 0; i < 32; i++)
2276 if (ohci->ir_context_support & (1 << i))
2277 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2278 IR_CONTEXT_MULTI_CHANNEL_MODE);
2279
2280 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2281 if (version >= OHCI_VERSION_1_1) {
2282 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2283 0xfffffffe);
2284 card->broadcast_channel_auto_allocated = true;
2285 }
2286
2287 /* Get implemented bits of the priority arbitration request counter. */
2288 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2289 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2290 reg_write(ohci, OHCI1394_FairnessControl, 0);
2291 card->priority_budget_implemented = ohci->pri_req_max != 0;
2292
2293 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
2294 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2295 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2296
2297 ret = configure_1394a_enhancements(ohci);
2298 if (ret < 0)
2299 return ret;
2300
2301 /* Activate link_on bit and contender bit in our self ID packets.*/
2302 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2303 if (ret < 0)
2304 return ret;
2305
2306 /*
2307 * When the link is not yet enabled, the atomic config rom
2308 * update mechanism described below in ohci_set_config_rom()
2309 * is not active. We have to update ConfigRomHeader and
2310 * BusOptions manually, and the write to ConfigROMmap takes
2311 * effect immediately. We tie this to the enabling of the
2312 * link, so we have a valid config rom before enabling - the
2313 * OHCI requires that ConfigROMhdr and BusOptions have valid
2314 * values before enabling.
2315 *
2316 * However, when the ConfigROMmap is written, some controllers
2317 * always read back quadlets 0 and 2 from the config rom to
2318 * the ConfigRomHeader and BusOptions registers on bus reset.
2319 * They shouldn't do that in this initial case where the link
2320 * isn't enabled. This means we have to use the same
2321 * workaround here, setting the bus header to 0 and then write
2322 * the right values in the bus reset work item.
2323 */
2324
2325 if (config_rom) {
2326 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2327 &ohci->next_config_rom_bus, GFP_KERNEL);
2328 if (ohci->next_config_rom == NULL)
2329 return -ENOMEM;
2330
2331 copy_config_rom(ohci->next_config_rom, config_rom, length);
2332 } else {
2333 /*
2334 * In the suspend case, config_rom is NULL, which
2335 * means that we just reuse the old config rom.
2336 */
2337 ohci->next_config_rom = ohci->config_rom;
2338 ohci->next_config_rom_bus = ohci->config_rom_bus;
2339 }
2340
2341 ohci->next_header = ohci->next_config_rom[0];
2342 ohci->next_config_rom[0] = 0;
2343 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2344 reg_write(ohci, OHCI1394_BusOptions,
2345 be32_to_cpu(ohci->next_config_rom[2]));
2346 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2347
2348 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2349
2350 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2351 OHCI1394_RQPkt | OHCI1394_RSPkt |
2352 OHCI1394_isochTx | OHCI1394_isochRx |
2353 OHCI1394_postedWriteErr |
2354 OHCI1394_selfIDComplete |
2355 OHCI1394_regAccessFail |
2356 OHCI1394_cycleInconsistent |
2357 OHCI1394_unrecoverableError |
2358 OHCI1394_cycleTooLong |
2359 OHCI1394_masterIntEnable |
2360 OHCI1394_busReset;
2361 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2362
2363 reg_write(ohci, OHCI1394_HCControlSet,
2364 OHCI1394_HCControl_linkEnable |
2365 OHCI1394_HCControl_BIBimageValid);
2366
2367 reg_write(ohci, OHCI1394_LinkControlSet,
2368 OHCI1394_LinkControl_rcvSelfID |
2369 OHCI1394_LinkControl_rcvPhyPkt);
2370
2371 ar_context_run(&ohci->ar_request_ctx);
2372 ar_context_run(&ohci->ar_response_ctx);
2373
2374 flush_writes(ohci);
2375
2376 /* We are ready to go, reset bus to finish initialization. */
2377 fw_schedule_bus_reset(&ohci->card, false, true);
2378
2379 return 0;
2380}
2381
2382static int ohci_set_config_rom(struct fw_card *card,
2383 const __be32 *config_rom, size_t length)
2384{
2385 struct fw_ohci *ohci;
2386 __be32 *next_config_rom;
2387 dma_addr_t next_config_rom_bus;
2388
2389 ohci = fw_ohci(card);
2390
2391 /*
2392 * When the OHCI controller is enabled, the config rom update
2393 * mechanism is a bit tricky, but easy enough to use. See
2394 * section 5.5.6 in the OHCI specification.
2395 *
2396 * The OHCI controller caches the new config rom address in a
2397 * shadow register (ConfigROMmapNext) and needs a bus reset
2398 * for the changes to take place. When the bus reset is
2399 * detected, the controller loads the new values for the
2400 * ConfigRomHeader and BusOptions registers from the specified
2401 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2402 * shadow register. All automatically and atomically.
2403 *
2404 * Now, there's a twist to this story. The automatic load of
2405 * ConfigRomHeader and BusOptions doesn't honor the
2406 * noByteSwapData bit, so with a be32 config rom, the
2407 * controller will load be32 values in to these registers
2408 * during the atomic update, even on little endian
2409 * architectures. The workaround we use is to put a 0 in the
2410 * header quadlet; 0 is endian agnostic and means that the
2411 * config rom isn't ready yet. In the bus reset work item we
2412 * then set up the real values for the two registers.
2413 *
2414 * We use ohci->lock to avoid racing with the code that sets
2415 * ohci->next_config_rom to NULL (see handle_selfid_complete_event).
2416 */
2417
2418 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2419 &next_config_rom_bus, GFP_KERNEL);
2420 if (next_config_rom == NULL)
2421 return -ENOMEM;
2422
2423 scoped_guard(spinlock_irq, &ohci->lock) {
2424 // If there is not an already pending config_rom update, push our new allocation
2425 // into the ohci->next_config_rom and then mark the local variable as null so that
2426 // we won't deallocate the new buffer.
2427 //
2428 // OTOH, if there is a pending config_rom update, just use that buffer with the new
2429 // config_rom data, and let this routine free the unused DMA allocation.
2430 if (ohci->next_config_rom == NULL) {
2431 ohci->next_config_rom = next_config_rom;
2432 ohci->next_config_rom_bus = next_config_rom_bus;
2433 next_config_rom = NULL;
2434 }
2435
2436 copy_config_rom(ohci->next_config_rom, config_rom, length);
2437
2438 ohci->next_header = config_rom[0];
2439 ohci->next_config_rom[0] = 0;
2440
2441 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2442 }
2443
2444 /* If we didn't use the DMA allocation, delete it. */
2445 if (next_config_rom != NULL) {
2446 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
2447 next_config_rom_bus);
2448 }
2449
2450 /*
2451 * Now initiate a bus reset to have the changes take
2452 * effect. We clean up the old config rom memory and DMA
2453 * mappings in the bus reset work item, since the OHCI
2454 * controller could need to access it before the bus reset
2455 * takes effect.
2456 */
2457
2458 fw_schedule_bus_reset(&ohci->card, true, true);
2459
2460 return 0;
2461}
2462
2463static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2464{
2465 struct fw_ohci *ohci = fw_ohci(card);
2466
2467 at_context_transmit(&ohci->at_request_ctx, packet);
2468}
2469
2470static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2471{
2472 struct fw_ohci *ohci = fw_ohci(card);
2473
2474 at_context_transmit(&ohci->at_response_ctx, packet);
2475}
2476
2477static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2478{
2479 struct fw_ohci *ohci = fw_ohci(card);
2480 struct at_context *ctx = &ohci->at_request_ctx;
2481 struct driver_data *driver_data = packet->driver_data;
2482 int ret = -ENOENT;
2483
2484 // Avoid dead lock due to programming mistake.
2485 if (WARN_ON_ONCE(current_work() == &ctx->work))
2486 return 0;
2487 disable_work_sync(&ctx->work);
2488
2489 if (packet->ack != 0)
2490 goto out;
2491
2492 if (packet->payload_mapped)
2493 dma_unmap_single(ohci->card.device, packet->payload_bus,
2494 packet->payload_length, DMA_TO_DEVICE);
2495
2496 driver_data->packet = NULL;
2497 packet->ack = RCODE_CANCELLED;
2498
2499 // Timestamping on behalf of the hardware.
2500 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
2501
2502 packet->callback(packet, &ohci->card, packet->ack);
2503 ret = 0;
2504 out:
2505 enable_work(&ctx->work);
2506
2507 return ret;
2508}
2509
2510static int ohci_enable_phys_dma(struct fw_card *card,
2511 int node_id, int generation)
2512{
2513 struct fw_ohci *ohci = fw_ohci(card);
2514 int n, ret = 0;
2515
2516 if (param_remote_dma)
2517 return 0;
2518
2519 /*
2520 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2521 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2522 */
2523
2524 guard(spinlock_irqsave)(&ohci->lock);
2525
2526 if (ohci->generation != generation)
2527 return -ESTALE;
2528
2529 /*
2530 * Note, if the node ID contains a non-local bus ID, physical DMA is
2531 * enabled for _all_ nodes on remote buses.
2532 */
2533
2534 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2535 if (n < 32)
2536 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2537 else
2538 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2539
2540 flush_writes(ohci);
2541
2542 return ret;
2543}
2544
2545static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2546{
2547 struct fw_ohci *ohci = fw_ohci(card);
2548 u32 value;
2549
2550 switch (csr_offset) {
2551 case CSR_STATE_CLEAR:
2552 case CSR_STATE_SET:
2553 if (ohci->is_root &&
2554 (reg_read(ohci, OHCI1394_LinkControlSet) &
2555 OHCI1394_LinkControl_cycleMaster))
2556 value = CSR_STATE_BIT_CMSTR;
2557 else
2558 value = 0;
2559 if (ohci->csr_state_setclear_abdicate)
2560 value |= CSR_STATE_BIT_ABDICATE;
2561
2562 return value;
2563
2564 case CSR_NODE_IDS:
2565 return reg_read(ohci, OHCI1394_NodeID) << 16;
2566
2567 case CSR_CYCLE_TIME:
2568 return get_cycle_time(ohci);
2569
2570 case CSR_BUS_TIME:
2571 {
2572 // We might be called just after the cycle timer has wrapped around but just before
2573 // the cycle64Seconds handler, so we better check here, too, if the bus time needs
2574 // to be updated.
2575
2576 guard(spinlock_irqsave)(&ohci->lock);
2577 return update_bus_time(ohci);
2578 }
2579 case CSR_BUSY_TIMEOUT:
2580 value = reg_read(ohci, OHCI1394_ATRetries);
2581 return (value >> 4) & 0x0ffff00f;
2582
2583 case CSR_PRIORITY_BUDGET:
2584 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2585 (ohci->pri_req_max << 8);
2586
2587 default:
2588 WARN_ON(1);
2589 return 0;
2590 }
2591}
2592
2593static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2594{
2595 struct fw_ohci *ohci = fw_ohci(card);
2596
2597 switch (csr_offset) {
2598 case CSR_STATE_CLEAR:
2599 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2600 reg_write(ohci, OHCI1394_LinkControlClear,
2601 OHCI1394_LinkControl_cycleMaster);
2602 flush_writes(ohci);
2603 }
2604 if (value & CSR_STATE_BIT_ABDICATE)
2605 ohci->csr_state_setclear_abdicate = false;
2606 break;
2607
2608 case CSR_STATE_SET:
2609 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2610 reg_write(ohci, OHCI1394_LinkControlSet,
2611 OHCI1394_LinkControl_cycleMaster);
2612 flush_writes(ohci);
2613 }
2614 if (value & CSR_STATE_BIT_ABDICATE)
2615 ohci->csr_state_setclear_abdicate = true;
2616 break;
2617
2618 case CSR_NODE_IDS:
2619 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2620 flush_writes(ohci);
2621 break;
2622
2623 case CSR_CYCLE_TIME:
2624 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2625 reg_write(ohci, OHCI1394_IntEventSet,
2626 OHCI1394_cycleInconsistent);
2627 flush_writes(ohci);
2628 break;
2629
2630 case CSR_BUS_TIME:
2631 {
2632 guard(spinlock_irqsave)(&ohci->lock);
2633 ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f);
2634 break;
2635 }
2636 case CSR_BUSY_TIMEOUT:
2637 value = (value & 0xf) | ((value & 0xf) << 4) |
2638 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2639 reg_write(ohci, OHCI1394_ATRetries, value);
2640 flush_writes(ohci);
2641 break;
2642
2643 case CSR_PRIORITY_BUDGET:
2644 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2645 flush_writes(ohci);
2646 break;
2647
2648 default:
2649 WARN_ON(1);
2650 break;
2651 }
2652}
2653
2654static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause)
2655{
2656 trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
2657 ctx->header_length);
2658 trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
2659 ctx->header_length);
2660
2661 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2662 ctx->header_length, ctx->header,
2663 ctx->base.callback_data);
2664 ctx->header_length = 0;
2665}
2666
2667static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2668{
2669 u32 *ctx_hdr;
2670
2671 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
2672 if (ctx->base.drop_overflow_headers)
2673 return;
2674 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
2675 }
2676
2677 ctx_hdr = ctx->header + ctx->header_length;
2678 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2679
2680 /*
2681 * The two iso header quadlets are byteswapped to little
2682 * endian by the controller, but we want to present them
2683 * as big endian for consistency with the bus endianness.
2684 */
2685 if (ctx->base.header_size > 0)
2686 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
2687 if (ctx->base.header_size > 4)
2688 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
2689 if (ctx->base.header_size > 8)
2690 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2691 ctx->header_length += ctx->base.header_size;
2692}
2693
2694static int handle_ir_packet_per_buffer(struct context *context,
2695 struct descriptor *d,
2696 struct descriptor *last)
2697{
2698 struct iso_context *ctx =
2699 container_of(context, struct iso_context, context);
2700 struct descriptor *pd;
2701 u32 buffer_dma;
2702
2703 for (pd = d; pd <= last; pd++)
2704 if (pd->transfer_status)
2705 break;
2706 if (pd > last)
2707 /* Descriptor(s) not done yet, stop iteration */
2708 return 0;
2709
2710 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2711 d++;
2712 buffer_dma = le32_to_cpu(d->data_address);
2713 dma_sync_single_range_for_cpu(context->ohci->card.device,
2714 buffer_dma & PAGE_MASK,
2715 buffer_dma & ~PAGE_MASK,
2716 le16_to_cpu(d->req_count),
2717 DMA_FROM_DEVICE);
2718 }
2719
2720 copy_iso_headers(ctx, (u32 *) (last + 1));
2721
2722 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2723 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
2724
2725 return 1;
2726}
2727
2728/* d == last because each descriptor block is only a single descriptor. */
2729static int handle_ir_buffer_fill(struct context *context,
2730 struct descriptor *d,
2731 struct descriptor *last)
2732{
2733 struct iso_context *ctx =
2734 container_of(context, struct iso_context, context);
2735 unsigned int req_count, res_count, completed;
2736 u32 buffer_dma;
2737
2738 req_count = le16_to_cpu(last->req_count);
2739 res_count = le16_to_cpu(READ_ONCE(last->res_count));
2740 completed = req_count - res_count;
2741 buffer_dma = le32_to_cpu(last->data_address);
2742
2743 if (completed > 0) {
2744 ctx->mc_buffer_bus = buffer_dma;
2745 ctx->mc_completed = completed;
2746 }
2747
2748 if (res_count != 0)
2749 /* Descriptor(s) not done yet, stop iteration */
2750 return 0;
2751
2752 dma_sync_single_range_for_cpu(context->ohci->card.device,
2753 buffer_dma & PAGE_MASK,
2754 buffer_dma & ~PAGE_MASK,
2755 completed, DMA_FROM_DEVICE);
2756
2757 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2758 trace_isoc_inbound_multiple_completions(&ctx->base, completed,
2759 FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
2760
2761 ctx->base.callback.mc(&ctx->base,
2762 buffer_dma + completed,
2763 ctx->base.callback_data);
2764 ctx->mc_completed = 0;
2765 }
2766
2767 return 1;
2768}
2769
2770static void flush_ir_buffer_fill(struct iso_context *ctx)
2771{
2772 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2773 ctx->mc_buffer_bus & PAGE_MASK,
2774 ctx->mc_buffer_bus & ~PAGE_MASK,
2775 ctx->mc_completed, DMA_FROM_DEVICE);
2776
2777 trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed,
2778 FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
2779
2780 ctx->base.callback.mc(&ctx->base,
2781 ctx->mc_buffer_bus + ctx->mc_completed,
2782 ctx->base.callback_data);
2783 ctx->mc_completed = 0;
2784}
2785
2786static inline void sync_it_packet_for_cpu(struct context *context,
2787 struct descriptor *pd)
2788{
2789 __le16 control;
2790 u32 buffer_dma;
2791
2792 /* only packets beginning with OUTPUT_MORE* have data buffers */
2793 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2794 return;
2795
2796 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2797 pd += 2;
2798
2799 /*
2800 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2801 * data buffer is in the context program's coherent page and must not
2802 * be synced.
2803 */
2804 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2805 (context->current_bus & PAGE_MASK)) {
2806 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2807 return;
2808 pd++;
2809 }
2810
2811 do {
2812 buffer_dma = le32_to_cpu(pd->data_address);
2813 dma_sync_single_range_for_cpu(context->ohci->card.device,
2814 buffer_dma & PAGE_MASK,
2815 buffer_dma & ~PAGE_MASK,
2816 le16_to_cpu(pd->req_count),
2817 DMA_TO_DEVICE);
2818 control = pd->control;
2819 pd++;
2820 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2821}
2822
2823static int handle_it_packet(struct context *context,
2824 struct descriptor *d,
2825 struct descriptor *last)
2826{
2827 struct iso_context *ctx =
2828 container_of(context, struct iso_context, context);
2829 struct descriptor *pd;
2830 __be32 *ctx_hdr;
2831
2832 for (pd = d; pd <= last; pd++)
2833 if (pd->transfer_status)
2834 break;
2835 if (pd > last)
2836 /* Descriptor(s) not done yet, stop iteration */
2837 return 0;
2838
2839 sync_it_packet_for_cpu(context, d);
2840
2841 if (ctx->header_length + 4 > PAGE_SIZE) {
2842 if (ctx->base.drop_overflow_headers)
2843 return 1;
2844 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
2845 }
2846
2847 ctx_hdr = ctx->header + ctx->header_length;
2848 ctx->last_timestamp = le16_to_cpu(last->res_count);
2849 /* Present this value as big-endian to match the receive code */
2850 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
2851 le16_to_cpu(pd->res_count));
2852 ctx->header_length += 4;
2853
2854 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2855 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
2856
2857 return 1;
2858}
2859
2860static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2861{
2862 u32 hi = channels >> 32, lo = channels;
2863
2864 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2865 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2866 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2867 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2868 ohci->mc_channels = channels;
2869}
2870
2871static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2872 int type, int channel, size_t header_size)
2873{
2874 struct fw_ohci *ohci = fw_ohci(card);
2875 struct iso_context *ctx;
2876 descriptor_callback_t callback;
2877 u64 *channels;
2878 u32 *mask, regs;
2879 int index, ret = -EBUSY;
2880
2881 scoped_guard(spinlock_irq, &ohci->lock) {
2882 switch (type) {
2883 case FW_ISO_CONTEXT_TRANSMIT:
2884 mask = &ohci->it_context_mask;
2885 callback = handle_it_packet;
2886 index = ffs(*mask) - 1;
2887 if (index >= 0) {
2888 *mask &= ~(1 << index);
2889 regs = OHCI1394_IsoXmitContextBase(index);
2890 ctx = &ohci->it_context_list[index];
2891 }
2892 break;
2893
2894 case FW_ISO_CONTEXT_RECEIVE:
2895 channels = &ohci->ir_context_channels;
2896 mask = &ohci->ir_context_mask;
2897 callback = handle_ir_packet_per_buffer;
2898 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2899 if (index >= 0) {
2900 *channels &= ~(1ULL << channel);
2901 *mask &= ~(1 << index);
2902 regs = OHCI1394_IsoRcvContextBase(index);
2903 ctx = &ohci->ir_context_list[index];
2904 }
2905 break;
2906
2907 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2908 mask = &ohci->ir_context_mask;
2909 callback = handle_ir_buffer_fill;
2910 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2911 if (index >= 0) {
2912 ohci->mc_allocated = true;
2913 *mask &= ~(1 << index);
2914 regs = OHCI1394_IsoRcvContextBase(index);
2915 ctx = &ohci->ir_context_list[index];
2916 }
2917 break;
2918
2919 default:
2920 index = -1;
2921 ret = -ENOSYS;
2922 }
2923
2924 if (index < 0)
2925 return ERR_PTR(ret);
2926 }
2927
2928 memset(ctx, 0, sizeof(*ctx));
2929 ctx->header_length = 0;
2930 ctx->header = (void *) __get_free_page(GFP_KERNEL);
2931 if (ctx->header == NULL) {
2932 ret = -ENOMEM;
2933 goto out;
2934 }
2935 ret = context_init(&ctx->context, ohci, regs, callback);
2936 if (ret < 0)
2937 goto out_with_header;
2938 fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
2939
2940 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
2941 set_multichannel_mask(ohci, 0);
2942 ctx->mc_completed = 0;
2943 }
2944
2945 return &ctx->base;
2946
2947 out_with_header:
2948 free_page((unsigned long)ctx->header);
2949 out:
2950 scoped_guard(spinlock_irq, &ohci->lock) {
2951 switch (type) {
2952 case FW_ISO_CONTEXT_RECEIVE:
2953 *channels |= 1ULL << channel;
2954 break;
2955
2956 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2957 ohci->mc_allocated = false;
2958 break;
2959 }
2960 *mask |= 1 << index;
2961 }
2962
2963 return ERR_PTR(ret);
2964}
2965
2966static int ohci_start_iso(struct fw_iso_context *base,
2967 s32 cycle, u32 sync, u32 tags)
2968{
2969 struct iso_context *ctx = container_of(base, struct iso_context, base);
2970 struct fw_ohci *ohci = ctx->context.ohci;
2971 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
2972 int index;
2973
2974 /* the controller cannot start without any queued packets */
2975 if (ctx->context.last->branch_address == 0)
2976 return -ENODATA;
2977
2978 switch (ctx->base.type) {
2979 case FW_ISO_CONTEXT_TRANSMIT:
2980 index = ctx - ohci->it_context_list;
2981 match = 0;
2982 if (cycle >= 0)
2983 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
2984 (cycle & 0x7fff) << 16;
2985
2986 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
2987 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
2988 context_run(&ctx->context, match);
2989 break;
2990
2991 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2992 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
2993 fallthrough;
2994 case FW_ISO_CONTEXT_RECEIVE:
2995 index = ctx - ohci->ir_context_list;
2996 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2997 if (cycle >= 0) {
2998 match |= (cycle & 0x07fff) << 12;
2999 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3000 }
3001
3002 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3003 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3004 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3005 context_run(&ctx->context, control);
3006
3007 ctx->sync = sync;
3008 ctx->tags = tags;
3009
3010 break;
3011 }
3012
3013 return 0;
3014}
3015
3016static int ohci_stop_iso(struct fw_iso_context *base)
3017{
3018 struct fw_ohci *ohci = fw_ohci(base->card);
3019 struct iso_context *ctx = container_of(base, struct iso_context, base);
3020 int index;
3021
3022 switch (ctx->base.type) {
3023 case FW_ISO_CONTEXT_TRANSMIT:
3024 index = ctx - ohci->it_context_list;
3025 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3026 break;
3027
3028 case FW_ISO_CONTEXT_RECEIVE:
3029 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3030 index = ctx - ohci->ir_context_list;
3031 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3032 break;
3033 }
3034 flush_writes(ohci);
3035 context_stop(&ctx->context);
3036
3037 return 0;
3038}
3039
3040static void ohci_free_iso_context(struct fw_iso_context *base)
3041{
3042 struct fw_ohci *ohci = fw_ohci(base->card);
3043 struct iso_context *ctx = container_of(base, struct iso_context, base);
3044 int index;
3045
3046 ohci_stop_iso(base);
3047 context_release(&ctx->context);
3048 free_page((unsigned long)ctx->header);
3049
3050 guard(spinlock_irqsave)(&ohci->lock);
3051
3052 switch (base->type) {
3053 case FW_ISO_CONTEXT_TRANSMIT:
3054 index = ctx - ohci->it_context_list;
3055 ohci->it_context_mask |= 1 << index;
3056 break;
3057
3058 case FW_ISO_CONTEXT_RECEIVE:
3059 index = ctx - ohci->ir_context_list;
3060 ohci->ir_context_mask |= 1 << index;
3061 ohci->ir_context_channels |= 1ULL << base->channel;
3062 break;
3063
3064 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3065 index = ctx - ohci->ir_context_list;
3066 ohci->ir_context_mask |= 1 << index;
3067 ohci->ir_context_channels |= ohci->mc_channels;
3068 ohci->mc_channels = 0;
3069 ohci->mc_allocated = false;
3070 break;
3071 }
3072}
3073
3074static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3075{
3076 struct fw_ohci *ohci = fw_ohci(base->card);
3077
3078 switch (base->type) {
3079 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3080 {
3081 guard(spinlock_irqsave)(&ohci->lock);
3082
3083 // Don't allow multichannel to grab other contexts' channels.
3084 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3085 *channels = ohci->ir_context_channels;
3086 return -EBUSY;
3087 } else {
3088 set_multichannel_mask(ohci, *channels);
3089 return 0;
3090 }
3091 }
3092 default:
3093 return -EINVAL;
3094 }
3095}
3096
3097static void __maybe_unused ohci_resume_iso_dma(struct fw_ohci *ohci)
3098{
3099 int i;
3100 struct iso_context *ctx;
3101
3102 for (i = 0 ; i < ohci->n_ir ; i++) {
3103 ctx = &ohci->ir_context_list[i];
3104 if (ctx->context.running)
3105 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3106 }
3107
3108 for (i = 0 ; i < ohci->n_it ; i++) {
3109 ctx = &ohci->it_context_list[i];
3110 if (ctx->context.running)
3111 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3112 }
3113}
3114
3115static int queue_iso_transmit(struct iso_context *ctx,
3116 struct fw_iso_packet *packet,
3117 struct fw_iso_buffer *buffer,
3118 unsigned long payload)
3119{
3120 struct descriptor *d, *last, *pd;
3121 struct fw_iso_packet *p;
3122 __le32 *header;
3123 dma_addr_t d_bus, page_bus;
3124 u32 z, header_z, payload_z, irq;
3125 u32 payload_index, payload_end_index, next_page_index;
3126 int page, end_page, i, length, offset;
3127
3128 p = packet;
3129 payload_index = payload;
3130
3131 if (p->skip)
3132 z = 1;
3133 else
3134 z = 2;
3135 if (p->header_length > 0)
3136 z++;
3137
3138 /* Determine the first page the payload isn't contained in. */
3139 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3140 if (p->payload_length > 0)
3141 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3142 else
3143 payload_z = 0;
3144
3145 z += payload_z;
3146
3147 /* Get header size in number of descriptors. */
3148 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3149
3150 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3151 if (d == NULL)
3152 return -ENOMEM;
3153
3154 if (!p->skip) {
3155 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3156 d[0].req_count = cpu_to_le16(8);
3157 /*
3158 * Link the skip address to this descriptor itself. This causes
3159 * a context to skip a cycle whenever lost cycles or FIFO
3160 * overruns occur, without dropping the data. The application
3161 * should then decide whether this is an error condition or not.
3162 * FIXME: Make the context's cycle-lost behaviour configurable?
3163 */
3164 d[0].branch_address = cpu_to_le32(d_bus | z);
3165
3166 header = (__le32 *) &d[1];
3167
3168 ohci1394_it_data_set_speed(header, ctx->base.speed);
3169 ohci1394_it_data_set_tag(header, p->tag);
3170 ohci1394_it_data_set_channel(header, ctx->base.channel);
3171 ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
3172 ohci1394_it_data_set_sync(header, p->sy);
3173
3174 ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length);
3175 }
3176
3177 if (p->header_length > 0) {
3178 d[2].req_count = cpu_to_le16(p->header_length);
3179 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3180 memcpy(&d[z], p->header, p->header_length);
3181 }
3182
3183 pd = d + z - payload_z;
3184 payload_end_index = payload_index + p->payload_length;
3185 for (i = 0; i < payload_z; i++) {
3186 page = payload_index >> PAGE_SHIFT;
3187 offset = payload_index & ~PAGE_MASK;
3188 next_page_index = (page + 1) << PAGE_SHIFT;
3189 length =
3190 min(next_page_index, payload_end_index) - payload_index;
3191 pd[i].req_count = cpu_to_le16(length);
3192
3193 page_bus = page_private(buffer->pages[page]);
3194 pd[i].data_address = cpu_to_le32(page_bus + offset);
3195
3196 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3197 page_bus, offset, length,
3198 DMA_TO_DEVICE);
3199
3200 payload_index += length;
3201 }
3202
3203 if (p->interrupt)
3204 irq = DESCRIPTOR_IRQ_ALWAYS;
3205 else
3206 irq = DESCRIPTOR_NO_IRQ;
3207
3208 last = z == 2 ? d : d + z - 1;
3209 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3210 DESCRIPTOR_STATUS |
3211 DESCRIPTOR_BRANCH_ALWAYS |
3212 irq);
3213
3214 context_append(&ctx->context, d, z, header_z);
3215
3216 return 0;
3217}
3218
3219static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3220 struct fw_iso_packet *packet,
3221 struct fw_iso_buffer *buffer,
3222 unsigned long payload)
3223{
3224 struct device *device = ctx->context.ohci->card.device;
3225 struct descriptor *d, *pd;
3226 dma_addr_t d_bus, page_bus;
3227 u32 z, header_z, rest;
3228 int i, j, length;
3229 int page, offset, packet_count, header_size, payload_per_buffer;
3230
3231 /*
3232 * The OHCI controller puts the isochronous header and trailer in the
3233 * buffer, so we need at least 8 bytes.
3234 */
3235 packet_count = packet->header_length / ctx->base.header_size;
3236 header_size = max(ctx->base.header_size, (size_t)8);
3237
3238 /* Get header size in number of descriptors. */
3239 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3240 page = payload >> PAGE_SHIFT;
3241 offset = payload & ~PAGE_MASK;
3242 payload_per_buffer = packet->payload_length / packet_count;
3243
3244 for (i = 0; i < packet_count; i++) {
3245 /* d points to the header descriptor */
3246 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3247 d = context_get_descriptors(&ctx->context,
3248 z + header_z, &d_bus);
3249 if (d == NULL)
3250 return -ENOMEM;
3251
3252 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3253 DESCRIPTOR_INPUT_MORE);
3254 if (packet->skip && i == 0)
3255 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3256 d->req_count = cpu_to_le16(header_size);
3257 d->res_count = d->req_count;
3258 d->transfer_status = 0;
3259 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3260
3261 rest = payload_per_buffer;
3262 pd = d;
3263 for (j = 1; j < z; j++) {
3264 pd++;
3265 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3266 DESCRIPTOR_INPUT_MORE);
3267
3268 if (offset + rest < PAGE_SIZE)
3269 length = rest;
3270 else
3271 length = PAGE_SIZE - offset;
3272 pd->req_count = cpu_to_le16(length);
3273 pd->res_count = pd->req_count;
3274 pd->transfer_status = 0;
3275
3276 page_bus = page_private(buffer->pages[page]);
3277 pd->data_address = cpu_to_le32(page_bus + offset);
3278
3279 dma_sync_single_range_for_device(device, page_bus,
3280 offset, length,
3281 DMA_FROM_DEVICE);
3282
3283 offset = (offset + length) & ~PAGE_MASK;
3284 rest -= length;
3285 if (offset == 0)
3286 page++;
3287 }
3288 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3289 DESCRIPTOR_INPUT_LAST |
3290 DESCRIPTOR_BRANCH_ALWAYS);
3291 if (packet->interrupt && i == packet_count - 1)
3292 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3293
3294 context_append(&ctx->context, d, z, header_z);
3295 }
3296
3297 return 0;
3298}
3299
3300static int queue_iso_buffer_fill(struct iso_context *ctx,
3301 struct fw_iso_packet *packet,
3302 struct fw_iso_buffer *buffer,
3303 unsigned long payload)
3304{
3305 struct descriptor *d;
3306 dma_addr_t d_bus, page_bus;
3307 int page, offset, rest, z, i, length;
3308
3309 page = payload >> PAGE_SHIFT;
3310 offset = payload & ~PAGE_MASK;
3311 rest = packet->payload_length;
3312
3313 /* We need one descriptor for each page in the buffer. */
3314 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3315
3316 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3317 return -EFAULT;
3318
3319 for (i = 0; i < z; i++) {
3320 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3321 if (d == NULL)
3322 return -ENOMEM;
3323
3324 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3325 DESCRIPTOR_BRANCH_ALWAYS);
3326 if (packet->skip && i == 0)
3327 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3328 if (packet->interrupt && i == z - 1)
3329 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3330
3331 if (offset + rest < PAGE_SIZE)
3332 length = rest;
3333 else
3334 length = PAGE_SIZE - offset;
3335 d->req_count = cpu_to_le16(length);
3336 d->res_count = d->req_count;
3337 d->transfer_status = 0;
3338
3339 page_bus = page_private(buffer->pages[page]);
3340 d->data_address = cpu_to_le32(page_bus + offset);
3341
3342 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3343 page_bus, offset, length,
3344 DMA_FROM_DEVICE);
3345
3346 rest -= length;
3347 offset = 0;
3348 page++;
3349
3350 context_append(&ctx->context, d, 1, 0);
3351 }
3352
3353 return 0;
3354}
3355
3356static int ohci_queue_iso(struct fw_iso_context *base,
3357 struct fw_iso_packet *packet,
3358 struct fw_iso_buffer *buffer,
3359 unsigned long payload)
3360{
3361 struct iso_context *ctx = container_of(base, struct iso_context, base);
3362
3363 guard(spinlock_irqsave)(&ctx->context.ohci->lock);
3364
3365 switch (base->type) {
3366 case FW_ISO_CONTEXT_TRANSMIT:
3367 return queue_iso_transmit(ctx, packet, buffer, payload);
3368 case FW_ISO_CONTEXT_RECEIVE:
3369 return queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3370 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3371 return queue_iso_buffer_fill(ctx, packet, buffer, payload);
3372 default:
3373 return -ENOSYS;
3374 }
3375}
3376
3377static void ohci_flush_queue_iso(struct fw_iso_context *base)
3378{
3379 struct context *ctx =
3380 &container_of(base, struct iso_context, base)->context;
3381
3382 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3383}
3384
3385static int ohci_flush_iso_completions(struct fw_iso_context *base)
3386{
3387 struct iso_context *ctx = container_of(base, struct iso_context, base);
3388 int ret = 0;
3389
3390 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3391 ohci_isoc_context_work(&base->work);
3392
3393 switch (base->type) {
3394 case FW_ISO_CONTEXT_TRANSMIT:
3395 case FW_ISO_CONTEXT_RECEIVE:
3396 if (ctx->header_length != 0)
3397 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
3398 break;
3399 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3400 if (ctx->mc_completed != 0)
3401 flush_ir_buffer_fill(ctx);
3402 break;
3403 default:
3404 ret = -ENOSYS;
3405 }
3406
3407 clear_bit_unlock(0, &ctx->flushing_completions);
3408 smp_mb__after_atomic();
3409 }
3410
3411 return ret;
3412}
3413
3414static const struct fw_card_driver ohci_driver = {
3415 .enable = ohci_enable,
3416 .read_phy_reg = ohci_read_phy_reg,
3417 .update_phy_reg = ohci_update_phy_reg,
3418 .set_config_rom = ohci_set_config_rom,
3419 .send_request = ohci_send_request,
3420 .send_response = ohci_send_response,
3421 .cancel_packet = ohci_cancel_packet,
3422 .enable_phys_dma = ohci_enable_phys_dma,
3423 .read_csr = ohci_read_csr,
3424 .write_csr = ohci_write_csr,
3425
3426 .allocate_iso_context = ohci_allocate_iso_context,
3427 .free_iso_context = ohci_free_iso_context,
3428 .set_iso_channels = ohci_set_iso_channels,
3429 .queue_iso = ohci_queue_iso,
3430 .flush_queue_iso = ohci_flush_queue_iso,
3431 .flush_iso_completions = ohci_flush_iso_completions,
3432 .start_iso = ohci_start_iso,
3433 .stop_iso = ohci_stop_iso,
3434};
3435
3436#ifdef CONFIG_PPC_PMAC
3437static void pmac_ohci_on(struct pci_dev *dev)
3438{
3439 if (machine_is(powermac)) {
3440 struct device_node *ofn = pci_device_to_OF_node(dev);
3441
3442 if (ofn) {
3443 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3444 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3445 }
3446 }
3447}
3448
3449static void pmac_ohci_off(struct pci_dev *dev)
3450{
3451 if (machine_is(powermac)) {
3452 struct device_node *ofn = pci_device_to_OF_node(dev);
3453
3454 if (ofn) {
3455 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3456 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3457 }
3458 }
3459}
3460#else
3461static inline void pmac_ohci_on(struct pci_dev *dev) {}
3462static inline void pmac_ohci_off(struct pci_dev *dev) {}
3463#endif /* CONFIG_PPC_PMAC */
3464
3465static void release_ohci(struct device *dev, void *data)
3466{
3467 struct pci_dev *pdev = to_pci_dev(dev);
3468 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3469
3470 pmac_ohci_off(pdev);
3471
3472 ar_context_release(&ohci->ar_response_ctx);
3473 ar_context_release(&ohci->ar_request_ctx);
3474
3475 dev_notice(dev, "removed fw-ohci device\n");
3476}
3477
3478static int pci_probe(struct pci_dev *dev,
3479 const struct pci_device_id *ent)
3480{
3481 struct fw_ohci *ohci;
3482 u32 bus_options, max_receive, link_speed, version;
3483 u64 guid;
3484 int i, flags, irq, err;
3485
3486 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3487 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3488 return -ENOSYS;
3489 }
3490
3491 ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
3492 if (ohci == NULL)
3493 return -ENOMEM;
3494 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3495 pci_set_drvdata(dev, ohci);
3496 pmac_ohci_on(dev);
3497 devres_add(&dev->dev, ohci);
3498
3499 err = pcim_enable_device(dev);
3500 if (err) {
3501 dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3502 return err;
3503 }
3504
3505 pci_set_master(dev);
3506 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3507
3508 spin_lock_init(&ohci->lock);
3509 mutex_init(&ohci->phy_reg_mutex);
3510
3511 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3512 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3513 ohci_err(ohci, "invalid MMIO resource\n");
3514 return -ENXIO;
3515 }
3516
3517 ohci->registers = pcim_iomap_region(dev, 0, ohci_driver_name);
3518 if (IS_ERR(ohci->registers)) {
3519 ohci_err(ohci, "request and map MMIO resource unavailable\n");
3520 return -ENXIO;
3521 }
3522
3523 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3524 if ((ohci_quirks[i].vendor == dev->vendor) &&
3525 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3526 ohci_quirks[i].device == dev->device) &&
3527 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3528 ohci_quirks[i].revision >= dev->revision)) {
3529 ohci->quirks = ohci_quirks[i].flags;
3530 break;
3531 }
3532 if (param_quirks)
3533 ohci->quirks = param_quirks;
3534
3535 if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
3536 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
3537
3538 /*
3539 * Because dma_alloc_coherent() allocates at least one page,
3540 * we save space by using a common buffer for the AR request/
3541 * response descriptors and the self IDs buffer.
3542 */
3543 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3544 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3545 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
3546 GFP_KERNEL);
3547 if (!ohci->misc_buffer)
3548 return -ENOMEM;
3549
3550 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3551 OHCI1394_AsReqRcvContextControlSet);
3552 if (err < 0)
3553 return err;
3554
3555 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3556 OHCI1394_AsRspRcvContextControlSet);
3557 if (err < 0)
3558 return err;
3559
3560 err = context_init(&ohci->at_request_ctx.context, ohci,
3561 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3562 if (err < 0)
3563 return err;
3564 INIT_WORK(&ohci->at_request_ctx.work, ohci_at_context_work);
3565
3566 err = context_init(&ohci->at_response_ctx.context, ohci,
3567 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3568 if (err < 0)
3569 return err;
3570 INIT_WORK(&ohci->at_response_ctx.work, ohci_at_context_work);
3571
3572 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3573 ohci->ir_context_channels = ~0ULL;
3574 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3575 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3576 ohci->ir_context_mask = ohci->ir_context_support;
3577 ohci->n_ir = hweight32(ohci->ir_context_mask);
3578 ohci->ir_context_list = devm_kcalloc(&dev->dev, ohci->n_ir, sizeof(struct iso_context), GFP_KERNEL);
3579 if (!ohci->ir_context_list)
3580 return -ENOMEM;
3581
3582 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3583 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3584 /* JMicron JMB38x often shows 0 at first read, just ignore it */
3585 if (!ohci->it_context_support) {
3586 ohci_notice(ohci, "overriding IsoXmitIntMask\n");
3587 ohci->it_context_support = 0xf;
3588 }
3589 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3590 ohci->it_context_mask = ohci->it_context_support;
3591 ohci->n_it = hweight32(ohci->it_context_mask);
3592 ohci->it_context_list = devm_kcalloc(&dev->dev, ohci->n_it, sizeof(struct iso_context), GFP_KERNEL);
3593 if (!ohci->it_context_list)
3594 return -ENOMEM;
3595
3596 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
3597 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3598
3599 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3600 max_receive = (bus_options >> 12) & 0xf;
3601 link_speed = bus_options & 0x7;
3602 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3603 reg_read(ohci, OHCI1394_GUIDLo);
3604
3605 flags = PCI_IRQ_INTX;
3606 if (!(ohci->quirks & QUIRK_NO_MSI))
3607 flags |= PCI_IRQ_MSI;
3608 err = pci_alloc_irq_vectors(dev, 1, 1, flags);
3609 if (err < 0)
3610 return err;
3611 irq = pci_irq_vector(dev, 0);
3612 if (irq < 0) {
3613 err = irq;
3614 goto fail_msi;
3615 }
3616
3617 // IRQF_ONESHOT is not applied so that any events are handled in the hardIRQ handler during
3618 // invoking the threaded IRQ handler for SelfIDComplete event.
3619 err = request_threaded_irq(irq, irq_handler, handle_selfid_complete_event,
3620 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
3621 ohci);
3622 if (err < 0) {
3623 ohci_err(ohci, "failed to allocate interrupt %d\n", irq);
3624 goto fail_msi;
3625 }
3626
3627 err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir);
3628 if (err)
3629 goto fail_irq;
3630
3631 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3632 ohci_notice(ohci,
3633 "added OHCI v%x.%x device as card %d, "
3634 "%d IR + %d IT contexts, quirks 0x%x%s\n",
3635 version >> 16, version & 0xff, ohci->card.index,
3636 ohci->n_ir, ohci->n_it, ohci->quirks,
3637 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3638 ", physUB" : "");
3639
3640 return 0;
3641
3642 fail_irq:
3643 free_irq(irq, ohci);
3644 fail_msi:
3645 pci_free_irq_vectors(dev);
3646
3647 return err;
3648}
3649
3650static void pci_remove(struct pci_dev *dev)
3651{
3652 struct fw_ohci *ohci = pci_get_drvdata(dev);
3653 int irq;
3654
3655 /*
3656 * If the removal is happening from the suspend state, LPS won't be
3657 * enabled and host registers (eg., IntMaskClear) won't be accessible.
3658 */
3659 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
3660 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3661 flush_writes(ohci);
3662 }
3663 fw_core_remove_card(&ohci->card);
3664
3665 /*
3666 * FIXME: Fail all pending packets here, now that the upper
3667 * layers can't queue any more.
3668 */
3669
3670 software_reset(ohci);
3671
3672 irq = pci_irq_vector(dev, 0);
3673 if (irq >= 0)
3674 free_irq(irq, ohci);
3675 pci_free_irq_vectors(dev);
3676
3677 dev_notice(&dev->dev, "removing fw-ohci device\n");
3678}
3679
3680static int __maybe_unused pci_suspend(struct device *dev)
3681{
3682 struct pci_dev *pdev = to_pci_dev(dev);
3683 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3684
3685 software_reset(ohci);
3686 pmac_ohci_off(pdev);
3687
3688 return 0;
3689}
3690
3691
3692static int __maybe_unused pci_resume(struct device *dev)
3693{
3694 struct pci_dev *pdev = to_pci_dev(dev);
3695 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3696 int err;
3697
3698 pmac_ohci_on(pdev);
3699
3700 /* Some systems don't setup GUID register on resume from ram */
3701 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3702 !reg_read(ohci, OHCI1394_GUIDHi)) {
3703 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3704 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3705 }
3706
3707 err = ohci_enable(&ohci->card, NULL, 0);
3708 if (err)
3709 return err;
3710
3711 ohci_resume_iso_dma(ohci);
3712
3713 return 0;
3714}
3715
3716static const struct pci_device_id pci_table[] = {
3717 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3718 { }
3719};
3720
3721MODULE_DEVICE_TABLE(pci, pci_table);
3722
3723static SIMPLE_DEV_PM_OPS(pci_pm_ops, pci_suspend, pci_resume);
3724
3725static struct pci_driver fw_ohci_pci_driver = {
3726 .name = ohci_driver_name,
3727 .id_table = pci_table,
3728 .probe = pci_probe,
3729 .remove = pci_remove,
3730 .driver.pm = &pci_pm_ops,
3731};
3732
3733static int __init fw_ohci_init(void)
3734{
3735 return pci_register_driver(&fw_ohci_pci_driver);
3736}
3737
3738static void __exit fw_ohci_cleanup(void)
3739{
3740 pci_unregister_driver(&fw_ohci_pci_driver);
3741}
3742
3743module_init(fw_ohci_init);
3744module_exit(fw_ohci_cleanup);
3745
3746MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3747MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3748MODULE_LICENSE("GPL");
3749
3750/* Provide a module alias so root-on-sbp2 initrds don't break. */
3751MODULE_ALIAS("ohci1394");