Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Char device for device raw access
4 *
5 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8#include <linux/bug.h>
9#include <linux/compat.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/firewire.h>
16#include <linux/firewire-cdev.h>
17#include <linux/irqflags.h>
18#include <linux/jiffies.h>
19#include <linux/kernel.h>
20#include <linux/kref.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/poll.h>
25#include <linux/sched.h> /* required for linux/wait.h */
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <linux/string.h>
29#include <linux/time.h>
30#include <linux/uaccess.h>
31#include <linux/vmalloc.h>
32#include <linux/wait.h>
33#include <linux/workqueue.h>
34
35
36#include "core.h"
37#include <trace/events/firewire.h>
38
39#include "packet-header-definitions.h"
40
41/*
42 * ABI version history is documented in linux/firewire-cdev.h.
43 */
44#define FW_CDEV_KERNEL_VERSION 6
45#define FW_CDEV_VERSION_EVENT_REQUEST2 4
46#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
47#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
48#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
49
50static DEFINE_SPINLOCK(phy_receiver_list_lock);
51static LIST_HEAD(phy_receiver_list);
52
53struct client {
54 u32 version;
55 struct fw_device *device;
56
57 spinlock_t lock;
58 bool in_shutdown;
59 struct xarray resource_xa;
60 struct list_head event_list;
61 wait_queue_head_t wait;
62 wait_queue_head_t tx_flush_wait;
63 u64 bus_reset_closure;
64
65 struct fw_iso_context *iso_context;
66 u64 iso_closure;
67 struct fw_iso_buffer buffer;
68 unsigned long vm_start;
69 bool buffer_is_mapped;
70
71 struct list_head phy_receiver_link;
72 u64 phy_receiver_closure;
73
74 struct list_head link;
75 struct kref kref;
76};
77
78static inline void client_get(struct client *client)
79{
80 kref_get(&client->kref);
81}
82
83static void client_release(struct kref *kref)
84{
85 struct client *client = container_of(kref, struct client, kref);
86
87 fw_device_put(client->device);
88 kfree(client);
89}
90
91static void client_put(struct client *client)
92{
93 kref_put(&client->kref, client_release);
94}
95
96struct client_resource;
97typedef void (*client_resource_release_fn_t)(struct client *,
98 struct client_resource *);
99struct client_resource {
100 client_resource_release_fn_t release;
101 int handle;
102};
103
104struct address_handler_resource {
105 struct client_resource resource;
106 struct fw_address_handler handler;
107 __u64 closure;
108 struct client *client;
109};
110
111struct outbound_transaction_resource {
112 struct client_resource resource;
113 struct fw_transaction transaction;
114};
115
116struct inbound_transaction_resource {
117 struct client_resource resource;
118 struct fw_card *card;
119 struct fw_request *request;
120 bool is_fcp;
121 void *data;
122 size_t length;
123};
124
125struct descriptor_resource {
126 struct client_resource resource;
127 struct fw_descriptor descriptor;
128 u32 data[];
129};
130
131struct iso_resource {
132 struct client_resource resource;
133 struct client *client;
134 /* Schedule work and access todo only with client->lock held. */
135 struct delayed_work work;
136 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
137 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
138 int generation;
139 u64 channels;
140 s32 bandwidth;
141 struct iso_resource_event *e_alloc, *e_dealloc;
142};
143
144static struct address_handler_resource *to_address_handler_resource(struct client_resource *resource)
145{
146 return container_of(resource, struct address_handler_resource, resource);
147}
148
149static struct inbound_transaction_resource *to_inbound_transaction_resource(struct client_resource *resource)
150{
151 return container_of(resource, struct inbound_transaction_resource, resource);
152}
153
154static struct descriptor_resource *to_descriptor_resource(struct client_resource *resource)
155{
156 return container_of(resource, struct descriptor_resource, resource);
157}
158
159static struct iso_resource *to_iso_resource(struct client_resource *resource)
160{
161 return container_of(resource, struct iso_resource, resource);
162}
163
164static void release_iso_resource(struct client *, struct client_resource *);
165
166static int is_iso_resource(const struct client_resource *resource)
167{
168 return resource->release == release_iso_resource;
169}
170
171static void release_transaction(struct client *client,
172 struct client_resource *resource);
173
174static int is_outbound_transaction_resource(const struct client_resource *resource)
175{
176 return resource->release == release_transaction;
177}
178
179static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
180{
181 client_get(r->client);
182 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
183 client_put(r->client);
184}
185
186/*
187 * dequeue_event() just kfree()'s the event, so the event has to be
188 * the first field in a struct XYZ_event.
189 */
190struct event {
191 struct { void *data; size_t size; } v[2];
192 struct list_head link;
193};
194
195struct bus_reset_event {
196 struct event event;
197 struct fw_cdev_event_bus_reset reset;
198};
199
200struct outbound_transaction_event {
201 struct event event;
202 struct client *client;
203 struct outbound_transaction_resource r;
204 union {
205 struct fw_cdev_event_response without_tstamp;
206 struct fw_cdev_event_response2 with_tstamp;
207 } rsp;
208};
209
210struct inbound_transaction_event {
211 struct event event;
212 union {
213 struct fw_cdev_event_request request;
214 struct fw_cdev_event_request2 request2;
215 struct fw_cdev_event_request3 with_tstamp;
216 } req;
217};
218
219struct iso_interrupt_event {
220 struct event event;
221 struct fw_cdev_event_iso_interrupt interrupt;
222};
223
224struct iso_interrupt_mc_event {
225 struct event event;
226 struct fw_cdev_event_iso_interrupt_mc interrupt;
227};
228
229struct iso_resource_event {
230 struct event event;
231 struct fw_cdev_event_iso_resource iso_resource;
232};
233
234struct outbound_phy_packet_event {
235 struct event event;
236 struct client *client;
237 struct fw_packet p;
238 union {
239 struct fw_cdev_event_phy_packet without_tstamp;
240 struct fw_cdev_event_phy_packet2 with_tstamp;
241 } phy_packet;
242};
243
244struct inbound_phy_packet_event {
245 struct event event;
246 union {
247 struct fw_cdev_event_phy_packet without_tstamp;
248 struct fw_cdev_event_phy_packet2 with_tstamp;
249 } phy_packet;
250};
251
252#ifdef CONFIG_COMPAT
253static void __user *u64_to_uptr(u64 value)
254{
255 if (in_compat_syscall())
256 return compat_ptr(value);
257 else
258 return (void __user *)(unsigned long)value;
259}
260
261static u64 uptr_to_u64(void __user *ptr)
262{
263 if (in_compat_syscall())
264 return ptr_to_compat(ptr);
265 else
266 return (u64)(unsigned long)ptr;
267}
268#else
269static inline void __user *u64_to_uptr(u64 value)
270{
271 return (void __user *)(unsigned long)value;
272}
273
274static inline u64 uptr_to_u64(void __user *ptr)
275{
276 return (u64)(unsigned long)ptr;
277}
278#endif /* CONFIG_COMPAT */
279
280static int fw_device_op_open(struct inode *inode, struct file *file)
281{
282 struct fw_device *device;
283 struct client *client;
284
285 device = fw_device_get_by_devt(inode->i_rdev);
286 if (device == NULL)
287 return -ENODEV;
288
289 if (fw_device_is_shutdown(device)) {
290 fw_device_put(device);
291 return -ENODEV;
292 }
293
294 client = kzalloc(sizeof(*client), GFP_KERNEL);
295 if (client == NULL) {
296 fw_device_put(device);
297 return -ENOMEM;
298 }
299
300 client->device = device;
301 spin_lock_init(&client->lock);
302 xa_init_flags(&client->resource_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
303 INIT_LIST_HEAD(&client->event_list);
304 init_waitqueue_head(&client->wait);
305 init_waitqueue_head(&client->tx_flush_wait);
306 INIT_LIST_HEAD(&client->phy_receiver_link);
307 INIT_LIST_HEAD(&client->link);
308 kref_init(&client->kref);
309
310 file->private_data = client;
311
312 return nonseekable_open(inode, file);
313}
314
315static void queue_event(struct client *client, struct event *event,
316 void *data0, size_t size0, void *data1, size_t size1)
317{
318 event->v[0].data = data0;
319 event->v[0].size = size0;
320 event->v[1].data = data1;
321 event->v[1].size = size1;
322
323 scoped_guard(spinlock_irqsave, &client->lock) {
324 if (client->in_shutdown)
325 kfree(event);
326 else
327 list_add_tail(&event->link, &client->event_list);
328 }
329
330 wake_up_interruptible(&client->wait);
331}
332
333static int dequeue_event(struct client *client,
334 char __user *buffer, size_t count)
335{
336 struct event *event;
337 size_t size, total;
338 int i, ret;
339
340 ret = wait_event_interruptible(client->wait,
341 !list_empty(&client->event_list) ||
342 fw_device_is_shutdown(client->device));
343 if (ret < 0)
344 return ret;
345
346 if (list_empty(&client->event_list) &&
347 fw_device_is_shutdown(client->device))
348 return -ENODEV;
349
350 scoped_guard(spinlock_irq, &client->lock) {
351 event = list_first_entry(&client->event_list, struct event, link);
352 list_del(&event->link);
353 }
354
355 total = 0;
356 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
357 size = min(event->v[i].size, count - total);
358 if (copy_to_user(buffer + total, event->v[i].data, size)) {
359 ret = -EFAULT;
360 goto out;
361 }
362 total += size;
363 }
364 ret = total;
365
366 out:
367 kfree(event);
368
369 return ret;
370}
371
372static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
373 size_t count, loff_t *offset)
374{
375 struct client *client = file->private_data;
376
377 return dequeue_event(client, buffer, count);
378}
379
380static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
381 struct client *client)
382{
383 struct fw_card *card = client->device->card;
384
385 guard(spinlock_irq)(&card->lock);
386
387 event->closure = client->bus_reset_closure;
388 event->type = FW_CDEV_EVENT_BUS_RESET;
389 event->generation = client->device->generation;
390 event->node_id = client->device->node_id;
391 event->local_node_id = card->local_node->node_id;
392 event->bm_node_id = card->bm_node_id;
393 event->irm_node_id = card->irm_node->node_id;
394 event->root_node_id = card->root_node->node_id;
395}
396
397static void for_each_client(struct fw_device *device,
398 void (*callback)(struct client *client))
399{
400 struct client *c;
401
402 guard(mutex)(&device->client_list_mutex);
403
404 list_for_each_entry(c, &device->client_list, link)
405 callback(c);
406}
407
408static void queue_bus_reset_event(struct client *client)
409{
410 struct bus_reset_event *e;
411 struct client_resource *resource;
412 unsigned long index;
413
414 e = kzalloc(sizeof(*e), GFP_KERNEL);
415 if (e == NULL)
416 return;
417
418 fill_bus_reset_event(&e->reset, client);
419
420 queue_event(client, &e->event,
421 &e->reset, sizeof(e->reset), NULL, 0);
422
423 guard(spinlock_irq)(&client->lock);
424
425 xa_for_each(&client->resource_xa, index, resource) {
426 if (is_iso_resource(resource))
427 schedule_iso_resource(to_iso_resource(resource), 0);
428 }
429}
430
431void fw_device_cdev_update(struct fw_device *device)
432{
433 for_each_client(device, queue_bus_reset_event);
434}
435
436static void wake_up_client(struct client *client)
437{
438 wake_up_interruptible(&client->wait);
439}
440
441void fw_device_cdev_remove(struct fw_device *device)
442{
443 for_each_client(device, wake_up_client);
444}
445
446union ioctl_arg {
447 struct fw_cdev_get_info get_info;
448 struct fw_cdev_send_request send_request;
449 struct fw_cdev_allocate allocate;
450 struct fw_cdev_deallocate deallocate;
451 struct fw_cdev_send_response send_response;
452 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
453 struct fw_cdev_add_descriptor add_descriptor;
454 struct fw_cdev_remove_descriptor remove_descriptor;
455 struct fw_cdev_create_iso_context create_iso_context;
456 struct fw_cdev_queue_iso queue_iso;
457 struct fw_cdev_start_iso start_iso;
458 struct fw_cdev_stop_iso stop_iso;
459 struct fw_cdev_get_cycle_timer get_cycle_timer;
460 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
461 struct fw_cdev_send_stream_packet send_stream_packet;
462 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
463 struct fw_cdev_send_phy_packet send_phy_packet;
464 struct fw_cdev_receive_phy_packets receive_phy_packets;
465 struct fw_cdev_set_iso_channels set_iso_channels;
466 struct fw_cdev_flush_iso flush_iso;
467};
468
469static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
470{
471 struct fw_cdev_get_info *a = &arg->get_info;
472 struct fw_cdev_event_bus_reset bus_reset;
473 unsigned long ret = 0;
474
475 client->version = a->version;
476 a->version = FW_CDEV_KERNEL_VERSION;
477 a->card = client->device->card->index;
478
479 scoped_guard(rwsem_read, &fw_device_rwsem) {
480 if (a->rom != 0) {
481 size_t want = a->rom_length;
482 size_t have = client->device->config_rom_length * 4;
483
484 ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom,
485 min(want, have));
486 if (ret != 0)
487 return -EFAULT;
488 }
489 a->rom_length = client->device->config_rom_length * 4;
490 }
491
492 guard(mutex)(&client->device->client_list_mutex);
493
494 client->bus_reset_closure = a->bus_reset_closure;
495 if (a->bus_reset != 0) {
496 fill_bus_reset_event(&bus_reset, client);
497 /* unaligned size of bus_reset is 36 bytes */
498 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
499 }
500 if (ret == 0 && list_empty(&client->link))
501 list_add_tail(&client->link, &client->device->client_list);
502
503 return ret ? -EFAULT : 0;
504}
505
506static int add_client_resource(struct client *client, struct client_resource *resource,
507 gfp_t gfp_mask)
508{
509 int ret;
510
511 scoped_guard(spinlock_irqsave, &client->lock) {
512 u32 index;
513
514 if (client->in_shutdown) {
515 ret = -ECANCELED;
516 } else {
517 if (gfpflags_allow_blocking(gfp_mask)) {
518 ret = xa_alloc(&client->resource_xa, &index, resource, xa_limit_32b,
519 GFP_NOWAIT);
520 } else {
521 ret = xa_alloc_bh(&client->resource_xa, &index, resource,
522 xa_limit_32b, GFP_NOWAIT);
523 }
524 }
525 if (ret >= 0) {
526 resource->handle = index;
527 client_get(client);
528 if (is_iso_resource(resource))
529 schedule_iso_resource(to_iso_resource(resource), 0);
530 }
531 }
532
533 return ret < 0 ? ret : 0;
534}
535
536static int release_client_resource(struct client *client, u32 handle,
537 client_resource_release_fn_t release,
538 struct client_resource **return_resource)
539{
540 unsigned long index = handle;
541 struct client_resource *resource;
542
543 scoped_guard(spinlock_irq, &client->lock) {
544 if (client->in_shutdown)
545 return -EINVAL;
546
547 resource = xa_load(&client->resource_xa, index);
548 if (!resource || resource->release != release)
549 return -EINVAL;
550
551 xa_erase(&client->resource_xa, handle);
552 }
553
554 if (return_resource)
555 *return_resource = resource;
556 else
557 resource->release(client, resource);
558
559 client_put(client);
560
561 return 0;
562}
563
564static void release_transaction(struct client *client,
565 struct client_resource *resource)
566{
567}
568
569static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp,
570 u32 response_tstamp, void *payload, size_t length, void *data)
571{
572 struct outbound_transaction_event *e = data;
573 struct client *client = e->client;
574 unsigned long index = e->r.resource.handle;
575
576 scoped_guard(spinlock_irqsave, &client->lock) {
577 xa_erase(&client->resource_xa, index);
578 if (client->in_shutdown)
579 wake_up(&client->tx_flush_wait);
580 }
581
582 switch (e->rsp.without_tstamp.type) {
583 case FW_CDEV_EVENT_RESPONSE:
584 {
585 struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
586
587 if (length < rsp->length)
588 rsp->length = length;
589 if (rcode == RCODE_COMPLETE)
590 memcpy(rsp->data, payload, rsp->length);
591
592 rsp->rcode = rcode;
593
594 // In the case that sizeof(*rsp) doesn't align with the position of the
595 // data, and the read is short, preserve an extra copy of the data
596 // to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
597 // for short reads and some apps depended on it, this is both safe
598 // and prudent for compatibility.
599 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
600 queue_event(client, &e->event, rsp, sizeof(*rsp), rsp->data, rsp->length);
601 else
602 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
603
604 break;
605 }
606 case FW_CDEV_EVENT_RESPONSE2:
607 {
608 struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
609
610 if (length < rsp->length)
611 rsp->length = length;
612 if (rcode == RCODE_COMPLETE)
613 memcpy(rsp->data, payload, rsp->length);
614
615 rsp->rcode = rcode;
616 rsp->request_tstamp = request_tstamp;
617 rsp->response_tstamp = response_tstamp;
618
619 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
620
621 break;
622 }
623 default:
624 WARN_ON(1);
625 break;
626 }
627
628 // Drop the xarray's reference.
629 client_put(client);
630}
631
632static int init_request(struct client *client,
633 struct fw_cdev_send_request *request,
634 int destination_id, int speed)
635{
636 struct outbound_transaction_event *e;
637 void *payload;
638 int ret;
639
640 if (request->tcode != TCODE_STREAM_DATA &&
641 (request->length > 4096 || request->length > 512 << speed))
642 return -EIO;
643
644 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
645 request->length < 4)
646 return -EINVAL;
647
648 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
649 if (e == NULL)
650 return -ENOMEM;
651 e->client = client;
652
653 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
654 struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
655
656 rsp->type = FW_CDEV_EVENT_RESPONSE;
657 rsp->length = request->length;
658 rsp->closure = request->closure;
659 payload = rsp->data;
660 } else {
661 struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
662
663 rsp->type = FW_CDEV_EVENT_RESPONSE2;
664 rsp->length = request->length;
665 rsp->closure = request->closure;
666 payload = rsp->data;
667 }
668
669 if (request->data && copy_from_user(payload, u64_to_uptr(request->data), request->length)) {
670 ret = -EFAULT;
671 goto failed;
672 }
673
674 e->r.resource.release = release_transaction;
675 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
676 if (ret < 0)
677 goto failed;
678
679 fw_send_request_with_tstamp(client->device->card, &e->r.transaction, request->tcode,
680 destination_id, request->generation, speed, request->offset,
681 payload, request->length, complete_transaction, e);
682 return 0;
683
684 failed:
685 kfree(e);
686
687 return ret;
688}
689
690static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
691{
692 switch (arg->send_request.tcode) {
693 case TCODE_WRITE_QUADLET_REQUEST:
694 case TCODE_WRITE_BLOCK_REQUEST:
695 case TCODE_READ_QUADLET_REQUEST:
696 case TCODE_READ_BLOCK_REQUEST:
697 case TCODE_LOCK_MASK_SWAP:
698 case TCODE_LOCK_COMPARE_SWAP:
699 case TCODE_LOCK_FETCH_ADD:
700 case TCODE_LOCK_LITTLE_ADD:
701 case TCODE_LOCK_BOUNDED_ADD:
702 case TCODE_LOCK_WRAP_ADD:
703 case TCODE_LOCK_VENDOR_DEPENDENT:
704 break;
705 default:
706 return -EINVAL;
707 }
708
709 return init_request(client, &arg->send_request, client->device->node_id,
710 client->device->max_speed);
711}
712
713static void release_request(struct client *client,
714 struct client_resource *resource)
715{
716 struct inbound_transaction_resource *r = to_inbound_transaction_resource(resource);
717
718 if (r->is_fcp)
719 fw_request_put(r->request);
720 else
721 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
722
723 fw_card_put(r->card);
724 kfree(r);
725}
726
727static void handle_request(struct fw_card *card, struct fw_request *request,
728 int tcode, int destination, int source,
729 int generation, unsigned long long offset,
730 void *payload, size_t length, void *callback_data)
731{
732 struct address_handler_resource *handler = callback_data;
733 bool is_fcp = is_in_fcp_region(offset, length);
734 struct inbound_transaction_resource *r;
735 struct inbound_transaction_event *e;
736 size_t event_size0;
737 int ret;
738
739 /* card may be different from handler->client->device->card */
740 fw_card_get(card);
741
742 // Extend the lifetime of data for request so that its payload is safely accessible in
743 // the process context for the client.
744 if (is_fcp)
745 fw_request_get(request);
746
747 r = kmalloc(sizeof(*r), GFP_ATOMIC);
748 e = kmalloc(sizeof(*e), GFP_ATOMIC);
749 if (r == NULL || e == NULL)
750 goto failed;
751
752 r->card = card;
753 r->request = request;
754 r->is_fcp = is_fcp;
755 r->data = payload;
756 r->length = length;
757
758 r->resource.release = release_request;
759 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
760 if (ret < 0)
761 goto failed;
762
763 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
764 struct fw_cdev_event_request *req = &e->req.request;
765
766 if (tcode & 0x10)
767 tcode = TCODE_LOCK_REQUEST;
768
769 req->type = FW_CDEV_EVENT_REQUEST;
770 req->tcode = tcode;
771 req->offset = offset;
772 req->length = length;
773 req->handle = r->resource.handle;
774 req->closure = handler->closure;
775 event_size0 = sizeof(*req);
776 } else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
777 struct fw_cdev_event_request2 *req = &e->req.request2;
778
779 req->type = FW_CDEV_EVENT_REQUEST2;
780 req->tcode = tcode;
781 req->offset = offset;
782 req->source_node_id = source;
783 req->destination_node_id = destination;
784 req->card = card->index;
785 req->generation = generation;
786 req->length = length;
787 req->handle = r->resource.handle;
788 req->closure = handler->closure;
789 event_size0 = sizeof(*req);
790 } else {
791 struct fw_cdev_event_request3 *req = &e->req.with_tstamp;
792
793 req->type = FW_CDEV_EVENT_REQUEST3;
794 req->tcode = tcode;
795 req->offset = offset;
796 req->source_node_id = source;
797 req->destination_node_id = destination;
798 req->card = card->index;
799 req->generation = generation;
800 req->length = length;
801 req->handle = r->resource.handle;
802 req->closure = handler->closure;
803 req->tstamp = fw_request_get_timestamp(request);
804 event_size0 = sizeof(*req);
805 }
806
807 queue_event(handler->client, &e->event,
808 &e->req, event_size0, r->data, length);
809 return;
810
811 failed:
812 kfree(r);
813 kfree(e);
814
815 if (!is_fcp)
816 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
817 else
818 fw_request_put(request);
819
820 fw_card_put(card);
821}
822
823static void release_address_handler(struct client *client,
824 struct client_resource *resource)
825{
826 struct address_handler_resource *r = to_address_handler_resource(resource);
827
828 fw_core_remove_address_handler(&r->handler);
829 kfree(r);
830}
831
832static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
833{
834 struct fw_cdev_allocate *a = &arg->allocate;
835 struct address_handler_resource *r;
836 struct fw_address_region region;
837 int ret;
838
839 r = kmalloc(sizeof(*r), GFP_KERNEL);
840 if (r == NULL)
841 return -ENOMEM;
842
843 region.start = a->offset;
844 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
845 region.end = a->offset + a->length;
846 else
847 region.end = a->region_end;
848
849 r->handler.length = a->length;
850 r->handler.address_callback = handle_request;
851 r->handler.callback_data = r;
852 r->closure = a->closure;
853 r->client = client;
854
855 ret = fw_core_add_address_handler(&r->handler, ®ion);
856 if (ret < 0) {
857 kfree(r);
858 return ret;
859 }
860 a->offset = r->handler.offset;
861
862 r->resource.release = release_address_handler;
863 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
864 if (ret < 0) {
865 release_address_handler(client, &r->resource);
866 return ret;
867 }
868 a->handle = r->resource.handle;
869
870 return 0;
871}
872
873static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
874{
875 return release_client_resource(client, arg->deallocate.handle,
876 release_address_handler, NULL);
877}
878
879static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
880{
881 struct fw_cdev_send_response *a = &arg->send_response;
882 struct client_resource *resource;
883 struct inbound_transaction_resource *r;
884 int ret = 0;
885
886 if (release_client_resource(client, a->handle,
887 release_request, &resource) < 0)
888 return -EINVAL;
889
890 r = to_inbound_transaction_resource(resource);
891 if (r->is_fcp) {
892 fw_request_put(r->request);
893 goto out;
894 }
895
896 if (a->length != fw_get_response_length(r->request)) {
897 ret = -EINVAL;
898 fw_request_put(r->request);
899 goto out;
900 }
901 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
902 ret = -EFAULT;
903 fw_request_put(r->request);
904 goto out;
905 }
906 fw_send_response(r->card, r->request, a->rcode);
907 out:
908 fw_card_put(r->card);
909 kfree(r);
910
911 return ret;
912}
913
914static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
915{
916 fw_schedule_bus_reset(client->device->card, true,
917 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
918 return 0;
919}
920
921static void release_descriptor(struct client *client,
922 struct client_resource *resource)
923{
924 struct descriptor_resource *r = to_descriptor_resource(resource);
925
926 fw_core_remove_descriptor(&r->descriptor);
927 kfree(r);
928}
929
930static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
931{
932 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
933 struct descriptor_resource *r;
934 int ret;
935
936 /* Access policy: Allow this ioctl only on local nodes' device files. */
937 if (!client->device->is_local)
938 return -ENOSYS;
939
940 if (a->length > 256)
941 return -EINVAL;
942
943 r = kmalloc(struct_size(r, data, a->length), GFP_KERNEL);
944 if (r == NULL)
945 return -ENOMEM;
946
947 if (copy_from_user(r->data, u64_to_uptr(a->data),
948 flex_array_size(r, data, a->length))) {
949 ret = -EFAULT;
950 goto failed;
951 }
952
953 r->descriptor.length = a->length;
954 r->descriptor.immediate = a->immediate;
955 r->descriptor.key = a->key;
956 r->descriptor.data = r->data;
957
958 ret = fw_core_add_descriptor(&r->descriptor);
959 if (ret < 0)
960 goto failed;
961
962 r->resource.release = release_descriptor;
963 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
964 if (ret < 0) {
965 fw_core_remove_descriptor(&r->descriptor);
966 goto failed;
967 }
968 a->handle = r->resource.handle;
969
970 return 0;
971 failed:
972 kfree(r);
973
974 return ret;
975}
976
977static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
978{
979 return release_client_resource(client, arg->remove_descriptor.handle,
980 release_descriptor, NULL);
981}
982
983static void iso_callback(struct fw_iso_context *context, u32 cycle,
984 size_t header_length, void *header, void *data)
985{
986 struct client *client = data;
987 struct iso_interrupt_event *e;
988
989 e = kmalloc(sizeof(*e) + header_length, GFP_KERNEL);
990 if (e == NULL)
991 return;
992
993 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
994 e->interrupt.closure = client->iso_closure;
995 e->interrupt.cycle = cycle;
996 e->interrupt.header_length = header_length;
997 memcpy(e->interrupt.header, header, header_length);
998 queue_event(client, &e->event, &e->interrupt,
999 sizeof(e->interrupt) + header_length, NULL, 0);
1000}
1001
1002static void iso_mc_callback(struct fw_iso_context *context,
1003 dma_addr_t completed, void *data)
1004{
1005 struct client *client = data;
1006 struct iso_interrupt_mc_event *e;
1007
1008 e = kmalloc(sizeof(*e), GFP_KERNEL);
1009 if (e == NULL)
1010 return;
1011
1012 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
1013 e->interrupt.closure = client->iso_closure;
1014 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
1015 completed);
1016 queue_event(client, &e->event, &e->interrupt,
1017 sizeof(e->interrupt), NULL, 0);
1018}
1019
1020static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
1021{
1022 if (context->type == FW_ISO_CONTEXT_TRANSMIT)
1023 return DMA_TO_DEVICE;
1024 else
1025 return DMA_FROM_DEVICE;
1026}
1027
1028static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
1029 fw_iso_mc_callback_t callback,
1030 void *callback_data)
1031{
1032 struct fw_iso_context *ctx;
1033
1034 ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
1035 0, 0, 0, NULL, callback_data);
1036 if (!IS_ERR(ctx))
1037 ctx->callback.mc = callback;
1038
1039 return ctx;
1040}
1041
1042static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
1043{
1044 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
1045 struct fw_iso_context *context;
1046 union fw_iso_callback cb;
1047 int ret;
1048
1049 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
1050 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
1051 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
1052 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
1053
1054 switch (a->type) {
1055 case FW_ISO_CONTEXT_TRANSMIT:
1056 if (a->speed > SCODE_3200 || a->channel > 63)
1057 return -EINVAL;
1058
1059 cb.sc = iso_callback;
1060 break;
1061
1062 case FW_ISO_CONTEXT_RECEIVE:
1063 if (a->header_size < 4 || (a->header_size & 3) ||
1064 a->channel > 63)
1065 return -EINVAL;
1066
1067 cb.sc = iso_callback;
1068 break;
1069
1070 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1071 cb.mc = iso_mc_callback;
1072 break;
1073
1074 default:
1075 return -EINVAL;
1076 }
1077
1078 if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
1079 context = fw_iso_mc_context_create(client->device->card, cb.mc,
1080 client);
1081 else
1082 context = fw_iso_context_create(client->device->card, a->type,
1083 a->channel, a->speed,
1084 a->header_size, cb.sc, client);
1085 if (IS_ERR(context))
1086 return PTR_ERR(context);
1087 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1088 context->drop_overflow_headers = true;
1089
1090 // We only support one context at this time.
1091 guard(spinlock_irq)(&client->lock);
1092
1093 if (client->iso_context != NULL) {
1094 fw_iso_context_destroy(context);
1095
1096 return -EBUSY;
1097 }
1098 if (!client->buffer_is_mapped) {
1099 ret = fw_iso_buffer_map_dma(&client->buffer,
1100 client->device->card,
1101 iso_dma_direction(context));
1102 if (ret < 0) {
1103 fw_iso_context_destroy(context);
1104
1105 return ret;
1106 }
1107 client->buffer_is_mapped = true;
1108 }
1109 client->iso_closure = a->closure;
1110 client->iso_context = context;
1111
1112 a->handle = 0;
1113
1114 return 0;
1115}
1116
1117static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1118{
1119 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1120 struct fw_iso_context *ctx = client->iso_context;
1121
1122 if (ctx == NULL || a->handle != 0)
1123 return -EINVAL;
1124
1125 return fw_iso_context_set_channels(ctx, &a->channels);
1126}
1127
1128/* Macros for decoding the iso packet control header. */
1129#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1130#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1131#define GET_SKIP(v) (((v) >> 17) & 0x01)
1132#define GET_TAG(v) (((v) >> 18) & 0x03)
1133#define GET_SY(v) (((v) >> 20) & 0x0f)
1134#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1135
1136static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1137{
1138 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1139 struct fw_cdev_iso_packet __user *p, *end, *next;
1140 struct fw_iso_context *ctx = client->iso_context;
1141 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1142 u32 control;
1143 int count;
1144 DEFINE_RAW_FLEX(struct fw_iso_packet, u, header, 64);
1145
1146 if (ctx == NULL || a->handle != 0)
1147 return -EINVAL;
1148
1149 /*
1150 * If the user passes a non-NULL data pointer, has mmap()'ed
1151 * the iso buffer, and the pointer points inside the buffer,
1152 * we setup the payload pointers accordingly. Otherwise we
1153 * set them both to 0, which will still let packets with
1154 * payload_length == 0 through. In other words, if no packets
1155 * use the indirect payload, the iso buffer need not be mapped
1156 * and the a->data pointer is ignored.
1157 */
1158 payload = (unsigned long)a->data - client->vm_start;
1159 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1160 if (a->data == 0 || client->buffer.pages == NULL ||
1161 payload >= buffer_end) {
1162 payload = 0;
1163 buffer_end = 0;
1164 }
1165
1166 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1167 return -EINVAL;
1168
1169 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1170
1171 end = (void __user *)p + a->size;
1172 count = 0;
1173 while (p < end) {
1174 if (get_user(control, &p->control))
1175 return -EFAULT;
1176 u->payload_length = GET_PAYLOAD_LENGTH(control);
1177 u->interrupt = GET_INTERRUPT(control);
1178 u->skip = GET_SKIP(control);
1179 u->tag = GET_TAG(control);
1180 u->sy = GET_SY(control);
1181 u->header_length = GET_HEADER_LENGTH(control);
1182
1183 switch (ctx->type) {
1184 case FW_ISO_CONTEXT_TRANSMIT:
1185 if (u->header_length & 3)
1186 return -EINVAL;
1187 transmit_header_bytes = u->header_length;
1188 break;
1189
1190 case FW_ISO_CONTEXT_RECEIVE:
1191 if (u->header_length == 0 ||
1192 u->header_length % ctx->header_size != 0)
1193 return -EINVAL;
1194 break;
1195
1196 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1197 if (u->payload_length == 0 ||
1198 u->payload_length & 3)
1199 return -EINVAL;
1200 break;
1201 }
1202
1203 next = (struct fw_cdev_iso_packet __user *)
1204 &p->header[transmit_header_bytes / 4];
1205 if (next > end)
1206 return -EINVAL;
1207 if (copy_from_user
1208 (u->header, p->header, transmit_header_bytes))
1209 return -EFAULT;
1210 if (u->skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1211 u->header_length + u->payload_length > 0)
1212 return -EINVAL;
1213 if (payload + u->payload_length > buffer_end)
1214 return -EINVAL;
1215
1216 if (fw_iso_context_queue(ctx, u, &client->buffer, payload))
1217 break;
1218
1219 p = next;
1220 payload += u->payload_length;
1221 count++;
1222 }
1223 fw_iso_context_queue_flush(ctx);
1224
1225 a->size -= uptr_to_u64(p) - a->packets;
1226 a->packets = uptr_to_u64(p);
1227 a->data = client->vm_start + payload;
1228
1229 return count;
1230}
1231
1232static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1233{
1234 struct fw_cdev_start_iso *a = &arg->start_iso;
1235
1236 BUILD_BUG_ON(
1237 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1238 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1239 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1240 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1241 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1242
1243 if (client->iso_context == NULL || a->handle != 0)
1244 return -EINVAL;
1245
1246 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1247 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1248 return -EINVAL;
1249
1250 return fw_iso_context_start(client->iso_context,
1251 a->cycle, a->sync, a->tags);
1252}
1253
1254static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1255{
1256 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1257
1258 if (client->iso_context == NULL || a->handle != 0)
1259 return -EINVAL;
1260
1261 return fw_iso_context_stop(client->iso_context);
1262}
1263
1264static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1265{
1266 struct fw_cdev_flush_iso *a = &arg->flush_iso;
1267
1268 if (client->iso_context == NULL || a->handle != 0)
1269 return -EINVAL;
1270
1271 return fw_iso_context_flush_completions(client->iso_context);
1272}
1273
1274static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1275{
1276 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1277 struct fw_card *card = client->device->card;
1278 struct timespec64 ts = {0, 0};
1279 u32 cycle_time = 0;
1280 int ret;
1281
1282 guard(irq)();
1283
1284 ret = fw_card_read_cycle_time(card, &cycle_time);
1285 if (ret < 0)
1286 return ret;
1287
1288 switch (a->clk_id) {
1289 case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
1290 case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
1291 case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
1292 default:
1293 return -EINVAL;
1294 }
1295
1296 a->tv_sec = ts.tv_sec;
1297 a->tv_nsec = ts.tv_nsec;
1298 a->cycle_timer = cycle_time;
1299
1300 return 0;
1301}
1302
1303static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1304{
1305 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1306 struct fw_cdev_get_cycle_timer2 ct2;
1307
1308 ct2.clk_id = CLOCK_REALTIME;
1309 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1310
1311 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1312 a->cycle_timer = ct2.cycle_timer;
1313
1314 return 0;
1315}
1316
1317static void iso_resource_work(struct work_struct *work)
1318{
1319 struct iso_resource_event *e;
1320 struct iso_resource *r = from_work(r, work, work.work);
1321 struct client *client = r->client;
1322 unsigned long index = r->resource.handle;
1323 int generation, channel, bandwidth, todo;
1324 bool skip, free, success;
1325
1326 scoped_guard(spinlock_irq, &client->lock) {
1327 generation = client->device->generation;
1328 todo = r->todo;
1329 // Allow 1000ms grace period for other reallocations.
1330 if (todo == ISO_RES_ALLOC &&
1331 time_is_after_jiffies64(client->device->card->reset_jiffies + secs_to_jiffies(1))) {
1332 schedule_iso_resource(r, msecs_to_jiffies(333));
1333 skip = true;
1334 } else {
1335 // We could be called twice within the same generation.
1336 skip = todo == ISO_RES_REALLOC &&
1337 r->generation == generation;
1338 }
1339 free = todo == ISO_RES_DEALLOC ||
1340 todo == ISO_RES_ALLOC_ONCE ||
1341 todo == ISO_RES_DEALLOC_ONCE;
1342 r->generation = generation;
1343 }
1344
1345 if (skip)
1346 goto out;
1347
1348 bandwidth = r->bandwidth;
1349
1350 fw_iso_resource_manage(client->device->card, generation,
1351 r->channels, &channel, &bandwidth,
1352 todo == ISO_RES_ALLOC ||
1353 todo == ISO_RES_REALLOC ||
1354 todo == ISO_RES_ALLOC_ONCE);
1355 /*
1356 * Is this generation outdated already? As long as this resource sticks
1357 * in the xarray, it will be scheduled again for a newer generation or at
1358 * shutdown.
1359 */
1360 if (channel == -EAGAIN &&
1361 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1362 goto out;
1363
1364 success = channel >= 0 || bandwidth > 0;
1365
1366 scoped_guard(spinlock_irq, &client->lock) {
1367 // Transit from allocation to reallocation, except if the client
1368 // requested deallocation in the meantime.
1369 if (r->todo == ISO_RES_ALLOC)
1370 r->todo = ISO_RES_REALLOC;
1371 // Allocation or reallocation failure? Pull this resource out of the
1372 // xarray and prepare for deletion, unless the client is shutting down.
1373 if (r->todo == ISO_RES_REALLOC && !success &&
1374 !client->in_shutdown &&
1375 xa_erase(&client->resource_xa, index)) {
1376 client_put(client);
1377 free = true;
1378 }
1379 }
1380
1381 if (todo == ISO_RES_ALLOC && channel >= 0)
1382 r->channels = 1ULL << channel;
1383
1384 if (todo == ISO_RES_REALLOC && success)
1385 goto out;
1386
1387 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1388 e = r->e_alloc;
1389 r->e_alloc = NULL;
1390 } else {
1391 e = r->e_dealloc;
1392 r->e_dealloc = NULL;
1393 }
1394 e->iso_resource.handle = r->resource.handle;
1395 e->iso_resource.channel = channel;
1396 e->iso_resource.bandwidth = bandwidth;
1397
1398 queue_event(client, &e->event,
1399 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1400
1401 if (free) {
1402 cancel_delayed_work(&r->work);
1403 kfree(r->e_alloc);
1404 kfree(r->e_dealloc);
1405 kfree(r);
1406 }
1407 out:
1408 client_put(client);
1409}
1410
1411static void release_iso_resource(struct client *client,
1412 struct client_resource *resource)
1413{
1414 struct iso_resource *r = to_iso_resource(resource);
1415
1416 guard(spinlock_irq)(&client->lock);
1417
1418 r->todo = ISO_RES_DEALLOC;
1419 schedule_iso_resource(r, 0);
1420}
1421
1422static int init_iso_resource(struct client *client,
1423 struct fw_cdev_allocate_iso_resource *request, int todo)
1424{
1425 struct iso_resource_event *e1, *e2;
1426 struct iso_resource *r;
1427 int ret;
1428
1429 if ((request->channels == 0 && request->bandwidth == 0) ||
1430 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
1431 return -EINVAL;
1432
1433 r = kmalloc(sizeof(*r), GFP_KERNEL);
1434 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1435 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1436 if (r == NULL || e1 == NULL || e2 == NULL) {
1437 ret = -ENOMEM;
1438 goto fail;
1439 }
1440
1441 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1442 r->client = client;
1443 r->todo = todo;
1444 r->generation = -1;
1445 r->channels = request->channels;
1446 r->bandwidth = request->bandwidth;
1447 r->e_alloc = e1;
1448 r->e_dealloc = e2;
1449
1450 e1->iso_resource.closure = request->closure;
1451 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1452 e2->iso_resource.closure = request->closure;
1453 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1454
1455 if (todo == ISO_RES_ALLOC) {
1456 r->resource.release = release_iso_resource;
1457 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1458 if (ret < 0)
1459 goto fail;
1460 } else {
1461 r->resource.release = NULL;
1462 r->resource.handle = -1;
1463 schedule_iso_resource(r, 0);
1464 }
1465 request->handle = r->resource.handle;
1466
1467 return 0;
1468 fail:
1469 kfree(r);
1470 kfree(e1);
1471 kfree(e2);
1472
1473 return ret;
1474}
1475
1476static int ioctl_allocate_iso_resource(struct client *client,
1477 union ioctl_arg *arg)
1478{
1479 return init_iso_resource(client,
1480 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1481}
1482
1483static int ioctl_deallocate_iso_resource(struct client *client,
1484 union ioctl_arg *arg)
1485{
1486 return release_client_resource(client,
1487 arg->deallocate.handle, release_iso_resource, NULL);
1488}
1489
1490static int ioctl_allocate_iso_resource_once(struct client *client,
1491 union ioctl_arg *arg)
1492{
1493 return init_iso_resource(client,
1494 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1495}
1496
1497static int ioctl_deallocate_iso_resource_once(struct client *client,
1498 union ioctl_arg *arg)
1499{
1500 return init_iso_resource(client,
1501 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1502}
1503
1504/*
1505 * Returns a speed code: Maximum speed to or from this device,
1506 * limited by the device's link speed, the local node's link speed,
1507 * and all PHY port speeds between the two links.
1508 */
1509static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1510{
1511 return client->device->max_speed;
1512}
1513
1514static int ioctl_send_broadcast_request(struct client *client,
1515 union ioctl_arg *arg)
1516{
1517 struct fw_cdev_send_request *a = &arg->send_request;
1518
1519 switch (a->tcode) {
1520 case TCODE_WRITE_QUADLET_REQUEST:
1521 case TCODE_WRITE_BLOCK_REQUEST:
1522 break;
1523 default:
1524 return -EINVAL;
1525 }
1526
1527 /* Security policy: Only allow accesses to Units Space. */
1528 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1529 return -EACCES;
1530
1531 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1532}
1533
1534static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1535{
1536 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1537 struct fw_cdev_send_request request;
1538 int dest;
1539
1540 if (a->speed > client->device->card->link_speed ||
1541 a->length > 1024 << a->speed)
1542 return -EIO;
1543
1544 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1545 return -EINVAL;
1546
1547 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1548 request.tcode = TCODE_STREAM_DATA;
1549 request.length = a->length;
1550 request.closure = a->closure;
1551 request.data = a->data;
1552 request.generation = a->generation;
1553
1554 return init_request(client, &request, dest, a->speed);
1555}
1556
1557static void outbound_phy_packet_callback(struct fw_packet *packet,
1558 struct fw_card *card, int status)
1559{
1560 struct outbound_phy_packet_event *e =
1561 container_of(packet, struct outbound_phy_packet_event, p);
1562 struct client *e_client = e->client;
1563 u32 rcode;
1564
1565 trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation,
1566 packet->timestamp);
1567
1568 switch (status) {
1569 // expected:
1570 case ACK_COMPLETE:
1571 rcode = RCODE_COMPLETE;
1572 break;
1573 // should never happen with PHY packets:
1574 case ACK_PENDING:
1575 rcode = RCODE_COMPLETE;
1576 break;
1577 case ACK_BUSY_X:
1578 case ACK_BUSY_A:
1579 case ACK_BUSY_B:
1580 rcode = RCODE_BUSY;
1581 break;
1582 case ACK_DATA_ERROR:
1583 rcode = RCODE_DATA_ERROR;
1584 break;
1585 case ACK_TYPE_ERROR:
1586 rcode = RCODE_TYPE_ERROR;
1587 break;
1588 // stale generation; cancelled; on certain controllers: no ack
1589 default:
1590 rcode = status;
1591 break;
1592 }
1593
1594 switch (e->phy_packet.without_tstamp.type) {
1595 case FW_CDEV_EVENT_PHY_PACKET_SENT:
1596 {
1597 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1598
1599 pp->rcode = rcode;
1600 pp->data[0] = packet->timestamp;
1601 queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
1602 NULL, 0);
1603 break;
1604 }
1605 case FW_CDEV_EVENT_PHY_PACKET_SENT2:
1606 {
1607 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1608
1609 pp->rcode = rcode;
1610 pp->tstamp = packet->timestamp;
1611 queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
1612 NULL, 0);
1613 break;
1614 }
1615 default:
1616 WARN_ON(1);
1617 break;
1618 }
1619
1620 client_put(e_client);
1621}
1622
1623static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1624{
1625 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1626 struct fw_card *card = client->device->card;
1627 struct outbound_phy_packet_event *e;
1628
1629 /* Access policy: Allow this ioctl only on local nodes' device files. */
1630 if (!client->device->is_local)
1631 return -ENOSYS;
1632
1633 e = kzalloc(sizeof(*e) + sizeof(a->data), GFP_KERNEL);
1634 if (e == NULL)
1635 return -ENOMEM;
1636
1637 client_get(client);
1638 e->client = client;
1639 e->p.speed = SCODE_100;
1640 e->p.generation = a->generation;
1641 async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL);
1642 e->p.header[1] = a->data[0];
1643 e->p.header[2] = a->data[1];
1644 e->p.header_length = 12;
1645 e->p.callback = outbound_phy_packet_callback;
1646
1647 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
1648 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1649
1650 pp->closure = a->closure;
1651 pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1652 if (is_ping_packet(a->data))
1653 pp->length = 4;
1654 } else {
1655 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1656
1657 pp->closure = a->closure;
1658 pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2;
1659 // Keep the data field so that application can match the response event to the
1660 // request.
1661 pp->length = sizeof(a->data);
1662 memcpy(pp->data, a->data, sizeof(a->data));
1663 }
1664
1665 trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation,
1666 e->p.header[1], e->p.header[2]);
1667
1668 card->driver->send_request(card, &e->p);
1669
1670 return 0;
1671}
1672
1673static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1674{
1675 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1676
1677 /* Access policy: Allow this ioctl only on local nodes' device files. */
1678 if (!client->device->is_local)
1679 return -ENOSYS;
1680
1681 // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
1682 // destination never runs in any type of IRQ context.
1683 scoped_guard(spinlock_irq, &phy_receiver_list_lock)
1684 list_move_tail(&client->phy_receiver_link, &phy_receiver_list);
1685
1686 client->phy_receiver_closure = a->closure;
1687
1688 return 0;
1689}
1690
1691void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1692{
1693 struct client *client;
1694
1695 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for local
1696 // destination never runs in any type of IRQ context.
1697 guard(spinlock_irqsave)(&phy_receiver_list_lock);
1698
1699 list_for_each_entry(client, &phy_receiver_list, phy_receiver_link) {
1700 struct inbound_phy_packet_event *e;
1701
1702 if (client->device->card != card)
1703 continue;
1704
1705 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1706 if (e == NULL)
1707 break;
1708
1709 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
1710 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1711
1712 pp->closure = client->phy_receiver_closure;
1713 pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1714 pp->rcode = RCODE_COMPLETE;
1715 pp->length = 8;
1716 pp->data[0] = p->header[1];
1717 pp->data[1] = p->header[2];
1718 queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
1719 } else {
1720 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1721
1722 pp = &e->phy_packet.with_tstamp;
1723 pp->closure = client->phy_receiver_closure;
1724 pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2;
1725 pp->rcode = RCODE_COMPLETE;
1726 pp->length = 8;
1727 pp->tstamp = p->timestamp;
1728 pp->data[0] = p->header[1];
1729 pp->data[1] = p->header[2];
1730 queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
1731 }
1732 }
1733}
1734
1735static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1736 [0x00] = ioctl_get_info,
1737 [0x01] = ioctl_send_request,
1738 [0x02] = ioctl_allocate,
1739 [0x03] = ioctl_deallocate,
1740 [0x04] = ioctl_send_response,
1741 [0x05] = ioctl_initiate_bus_reset,
1742 [0x06] = ioctl_add_descriptor,
1743 [0x07] = ioctl_remove_descriptor,
1744 [0x08] = ioctl_create_iso_context,
1745 [0x09] = ioctl_queue_iso,
1746 [0x0a] = ioctl_start_iso,
1747 [0x0b] = ioctl_stop_iso,
1748 [0x0c] = ioctl_get_cycle_timer,
1749 [0x0d] = ioctl_allocate_iso_resource,
1750 [0x0e] = ioctl_deallocate_iso_resource,
1751 [0x0f] = ioctl_allocate_iso_resource_once,
1752 [0x10] = ioctl_deallocate_iso_resource_once,
1753 [0x11] = ioctl_get_speed,
1754 [0x12] = ioctl_send_broadcast_request,
1755 [0x13] = ioctl_send_stream_packet,
1756 [0x14] = ioctl_get_cycle_timer2,
1757 [0x15] = ioctl_send_phy_packet,
1758 [0x16] = ioctl_receive_phy_packets,
1759 [0x17] = ioctl_set_iso_channels,
1760 [0x18] = ioctl_flush_iso,
1761};
1762
1763static int dispatch_ioctl(struct client *client,
1764 unsigned int cmd, void __user *arg)
1765{
1766 union ioctl_arg buffer;
1767 int ret;
1768
1769 if (fw_device_is_shutdown(client->device))
1770 return -ENODEV;
1771
1772 if (_IOC_TYPE(cmd) != '#' ||
1773 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1774 _IOC_SIZE(cmd) > sizeof(buffer))
1775 return -ENOTTY;
1776
1777 memset(&buffer, 0, sizeof(buffer));
1778
1779 if (_IOC_DIR(cmd) & _IOC_WRITE)
1780 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1781 return -EFAULT;
1782
1783 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1784 if (ret < 0)
1785 return ret;
1786
1787 if (_IOC_DIR(cmd) & _IOC_READ)
1788 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1789 return -EFAULT;
1790
1791 return ret;
1792}
1793
1794static long fw_device_op_ioctl(struct file *file,
1795 unsigned int cmd, unsigned long arg)
1796{
1797 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1798}
1799
1800static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1801{
1802 struct client *client = file->private_data;
1803 unsigned long size;
1804 int page_count, ret;
1805
1806 if (fw_device_is_shutdown(client->device))
1807 return -ENODEV;
1808
1809 /* FIXME: We could support multiple buffers, but we don't. */
1810 if (client->buffer.pages != NULL)
1811 return -EBUSY;
1812
1813 if (!(vma->vm_flags & VM_SHARED))
1814 return -EINVAL;
1815
1816 if (vma->vm_start & ~PAGE_MASK)
1817 return -EINVAL;
1818
1819 client->vm_start = vma->vm_start;
1820 size = vma->vm_end - vma->vm_start;
1821 page_count = size >> PAGE_SHIFT;
1822 if (size & ~PAGE_MASK)
1823 return -EINVAL;
1824
1825 ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1826 if (ret < 0)
1827 return ret;
1828
1829 scoped_guard(spinlock_irq, &client->lock) {
1830 if (client->iso_context) {
1831 ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
1832 iso_dma_direction(client->iso_context));
1833 if (ret < 0)
1834 goto fail;
1835 client->buffer_is_mapped = true;
1836 }
1837 }
1838
1839 ret = vm_map_pages_zero(vma, client->buffer.pages,
1840 client->buffer.page_count);
1841 if (ret < 0)
1842 goto fail;
1843
1844 return 0;
1845 fail:
1846 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1847 return ret;
1848}
1849
1850static bool has_outbound_transactions(struct client *client)
1851{
1852 struct client_resource *resource;
1853 unsigned long index;
1854
1855 guard(spinlock_irq)(&client->lock);
1856
1857 xa_for_each(&client->resource_xa, index, resource) {
1858 if (is_outbound_transaction_resource(resource))
1859 return true;
1860 }
1861
1862 return false;
1863}
1864
1865static int fw_device_op_release(struct inode *inode, struct file *file)
1866{
1867 struct client *client = file->private_data;
1868 struct event *event, *next_event;
1869 struct client_resource *resource;
1870 unsigned long index;
1871
1872 // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
1873 // destination never runs in any type of IRQ context.
1874 scoped_guard(spinlock_irq, &phy_receiver_list_lock)
1875 list_del(&client->phy_receiver_link);
1876
1877 scoped_guard(mutex, &client->device->client_list_mutex)
1878 list_del(&client->link);
1879
1880 if (client->iso_context)
1881 fw_iso_context_destroy(client->iso_context);
1882
1883 if (client->buffer.pages)
1884 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1885
1886 // Freeze client->resource_xa and client->event_list.
1887 scoped_guard(spinlock_irq, &client->lock)
1888 client->in_shutdown = true;
1889
1890 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1891
1892 xa_for_each(&client->resource_xa, index, resource) {
1893 resource->release(client, resource);
1894 client_put(client);
1895 }
1896 xa_destroy(&client->resource_xa);
1897
1898 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1899 kfree(event);
1900
1901 client_put(client);
1902
1903 return 0;
1904}
1905
1906static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1907{
1908 struct client *client = file->private_data;
1909 __poll_t mask = 0;
1910
1911 poll_wait(file, &client->wait, pt);
1912
1913 if (fw_device_is_shutdown(client->device))
1914 mask |= EPOLLHUP | EPOLLERR;
1915 if (!list_empty(&client->event_list))
1916 mask |= EPOLLIN | EPOLLRDNORM;
1917
1918 return mask;
1919}
1920
1921const struct file_operations fw_device_ops = {
1922 .owner = THIS_MODULE,
1923 .open = fw_device_op_open,
1924 .read = fw_device_op_read,
1925 .unlocked_ioctl = fw_device_op_ioctl,
1926 .mmap = fw_device_op_mmap,
1927 .release = fw_device_op_release,
1928 .poll = fw_device_op_poll,
1929 .compat_ioctl = compat_ptr_ioctl,
1930};