1/* 2 * Char device for device raw access 3 * 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21#include <linux/module.h> 22#include <linux/kernel.h> 23#include <linux/wait.h> 24#include <linux/errno.h> 25#include <linux/device.h> 26#include <linux/vmalloc.h> 27#include <linux/poll.h> 28#include <linux/preempt.h> 29#include <linux/time.h> 30#include <linux/delay.h> 31#include <linux/mm.h> 32#include <linux/idr.h> 33#include <linux/compat.h> 34#include <linux/firewire-cdev.h> 35#include <asm/system.h> 36#include <asm/uaccess.h> 37#include "fw-transaction.h" 38#include "fw-topology.h" 39#include "fw-device.h" 40 41struct client; 42struct client_resource { 43 struct list_head link; 44 void (*release)(struct client *client, struct client_resource *r); 45 u32 handle; 46}; 47 48/* 49 * dequeue_event() just kfree()'s the event, so the event has to be 50 * the first field in the struct. 51 */ 52 53struct event { 54 struct { void *data; size_t size; } v[2]; 55 struct list_head link; 56}; 57 58struct bus_reset { 59 struct event event; 60 struct fw_cdev_event_bus_reset reset; 61}; 62 63struct response { 64 struct event event; 65 struct fw_transaction transaction; 66 struct client *client; 67 struct client_resource resource; 68 struct fw_cdev_event_response response; 69}; 70 71struct iso_interrupt { 72 struct event event; 73 struct fw_cdev_event_iso_interrupt interrupt; 74}; 75 76struct client { 77 u32 version; 78 struct fw_device *device; 79 spinlock_t lock; 80 u32 resource_handle; 81 struct list_head resource_list; 82 struct list_head event_list; 83 wait_queue_head_t wait; 84 u64 bus_reset_closure; 85 86 struct fw_iso_context *iso_context; 87 u64 iso_closure; 88 struct fw_iso_buffer buffer; 89 unsigned long vm_start; 90 91 struct list_head link; 92}; 93 94static inline void __user * 95u64_to_uptr(__u64 value) 96{ 97 return (void __user *)(unsigned long)value; 98} 99 100static inline __u64 101uptr_to_u64(void __user *ptr) 102{ 103 return (__u64)(unsigned long)ptr; 104} 105 106static int fw_device_op_open(struct inode *inode, struct file *file) 107{ 108 struct fw_device *device; 109 struct client *client; 110 unsigned long flags; 111 112 device = fw_device_from_devt(inode->i_rdev); 113 if (device == NULL) 114 return -ENODEV; 115 116 client = kzalloc(sizeof(*client), GFP_KERNEL); 117 if (client == NULL) 118 return -ENOMEM; 119 120 client->device = fw_device_get(device); 121 INIT_LIST_HEAD(&client->event_list); 122 INIT_LIST_HEAD(&client->resource_list); 123 spin_lock_init(&client->lock); 124 init_waitqueue_head(&client->wait); 125 126 file->private_data = client; 127 128 spin_lock_irqsave(&device->card->lock, flags); 129 list_add_tail(&client->link, &device->client_list); 130 spin_unlock_irqrestore(&device->card->lock, flags); 131 132 return 0; 133} 134 135static void queue_event(struct client *client, struct event *event, 136 void *data0, size_t size0, void *data1, size_t size1) 137{ 138 unsigned long flags; 139 140 event->v[0].data = data0; 141 event->v[0].size = size0; 142 event->v[1].data = data1; 143 event->v[1].size = size1; 144 145 spin_lock_irqsave(&client->lock, flags); 146 list_add_tail(&event->link, &client->event_list); 147 spin_unlock_irqrestore(&client->lock, flags); 148 149 wake_up_interruptible(&client->wait); 150} 151 152static int 153dequeue_event(struct client *client, char __user *buffer, size_t count) 154{ 155 unsigned long flags; 156 struct event *event; 157 size_t size, total; 158 int i, retval; 159 160 retval = wait_event_interruptible(client->wait, 161 !list_empty(&client->event_list) || 162 fw_device_is_shutdown(client->device)); 163 if (retval < 0) 164 return retval; 165 166 if (list_empty(&client->event_list) && 167 fw_device_is_shutdown(client->device)) 168 return -ENODEV; 169 170 spin_lock_irqsave(&client->lock, flags); 171 event = container_of(client->event_list.next, struct event, link); 172 list_del(&event->link); 173 spin_unlock_irqrestore(&client->lock, flags); 174 175 total = 0; 176 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { 177 size = min(event->v[i].size, count - total); 178 if (copy_to_user(buffer + total, event->v[i].data, size)) { 179 retval = -EFAULT; 180 goto out; 181 } 182 total += size; 183 } 184 retval = total; 185 186 out: 187 kfree(event); 188 189 return retval; 190} 191 192static ssize_t 193fw_device_op_read(struct file *file, 194 char __user *buffer, size_t count, loff_t *offset) 195{ 196 struct client *client = file->private_data; 197 198 return dequeue_event(client, buffer, count); 199} 200 201static void 202fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, 203 struct client *client) 204{ 205 struct fw_card *card = client->device->card; 206 207 event->closure = client->bus_reset_closure; 208 event->type = FW_CDEV_EVENT_BUS_RESET; 209 event->generation = client->device->generation; 210 smp_rmb(); /* node_id must not be older than generation */ 211 event->node_id = client->device->node_id; 212 event->local_node_id = card->local_node->node_id; 213 event->bm_node_id = 0; /* FIXME: We don't track the BM. */ 214 event->irm_node_id = card->irm_node->node_id; 215 event->root_node_id = card->root_node->node_id; 216} 217 218static void 219for_each_client(struct fw_device *device, 220 void (*callback)(struct client *client)) 221{ 222 struct fw_card *card = device->card; 223 struct client *c; 224 unsigned long flags; 225 226 spin_lock_irqsave(&card->lock, flags); 227 228 list_for_each_entry(c, &device->client_list, link) 229 callback(c); 230 231 spin_unlock_irqrestore(&card->lock, flags); 232} 233 234static void 235queue_bus_reset_event(struct client *client) 236{ 237 struct bus_reset *bus_reset; 238 239 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); 240 if (bus_reset == NULL) { 241 fw_notify("Out of memory when allocating bus reset event\n"); 242 return; 243 } 244 245 fill_bus_reset_event(&bus_reset->reset, client); 246 247 queue_event(client, &bus_reset->event, 248 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); 249} 250 251void fw_device_cdev_update(struct fw_device *device) 252{ 253 for_each_client(device, queue_bus_reset_event); 254} 255 256static void wake_up_client(struct client *client) 257{ 258 wake_up_interruptible(&client->wait); 259} 260 261void fw_device_cdev_remove(struct fw_device *device) 262{ 263 for_each_client(device, wake_up_client); 264} 265 266static int ioctl_get_info(struct client *client, void *buffer) 267{ 268 struct fw_cdev_get_info *get_info = buffer; 269 struct fw_cdev_event_bus_reset bus_reset; 270 271 client->version = get_info->version; 272 get_info->version = FW_CDEV_VERSION; 273 274 if (get_info->rom != 0) { 275 void __user *uptr = u64_to_uptr(get_info->rom); 276 size_t want = get_info->rom_length; 277 size_t have = client->device->config_rom_length * 4; 278 279 if (copy_to_user(uptr, client->device->config_rom, 280 min(want, have))) 281 return -EFAULT; 282 } 283 get_info->rom_length = client->device->config_rom_length * 4; 284 285 client->bus_reset_closure = get_info->bus_reset_closure; 286 if (get_info->bus_reset != 0) { 287 void __user *uptr = u64_to_uptr(get_info->bus_reset); 288 289 fill_bus_reset_event(&bus_reset, client); 290 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) 291 return -EFAULT; 292 } 293 294 get_info->card = client->device->card->index; 295 296 return 0; 297} 298 299static void 300add_client_resource(struct client *client, struct client_resource *resource) 301{ 302 unsigned long flags; 303 304 spin_lock_irqsave(&client->lock, flags); 305 list_add_tail(&resource->link, &client->resource_list); 306 resource->handle = client->resource_handle++; 307 spin_unlock_irqrestore(&client->lock, flags); 308} 309 310static int 311release_client_resource(struct client *client, u32 handle, 312 struct client_resource **resource) 313{ 314 struct client_resource *r; 315 unsigned long flags; 316 317 spin_lock_irqsave(&client->lock, flags); 318 list_for_each_entry(r, &client->resource_list, link) { 319 if (r->handle == handle) { 320 list_del(&r->link); 321 break; 322 } 323 } 324 spin_unlock_irqrestore(&client->lock, flags); 325 326 if (&r->link == &client->resource_list) 327 return -EINVAL; 328 329 if (resource) 330 *resource = r; 331 else 332 r->release(client, r); 333 334 return 0; 335} 336 337static void 338release_transaction(struct client *client, struct client_resource *resource) 339{ 340 struct response *response = 341 container_of(resource, struct response, resource); 342 343 fw_cancel_transaction(client->device->card, &response->transaction); 344} 345 346static void 347complete_transaction(struct fw_card *card, int rcode, 348 void *payload, size_t length, void *data) 349{ 350 struct response *response = data; 351 struct client *client = response->client; 352 unsigned long flags; 353 354 if (length < response->response.length) 355 response->response.length = length; 356 if (rcode == RCODE_COMPLETE) 357 memcpy(response->response.data, payload, 358 response->response.length); 359 360 spin_lock_irqsave(&client->lock, flags); 361 list_del(&response->resource.link); 362 spin_unlock_irqrestore(&client->lock, flags); 363 364 response->response.type = FW_CDEV_EVENT_RESPONSE; 365 response->response.rcode = rcode; 366 queue_event(client, &response->event, 367 &response->response, sizeof(response->response), 368 response->response.data, response->response.length); 369} 370 371static int ioctl_send_request(struct client *client, void *buffer) 372{ 373 struct fw_device *device = client->device; 374 struct fw_cdev_send_request *request = buffer; 375 struct response *response; 376 377 /* What is the biggest size we'll accept, really? */ 378 if (request->length > 4096) 379 return -EINVAL; 380 381 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); 382 if (response == NULL) 383 return -ENOMEM; 384 385 response->client = client; 386 response->response.length = request->length; 387 response->response.closure = request->closure; 388 389 if (request->data && 390 copy_from_user(response->response.data, 391 u64_to_uptr(request->data), request->length)) { 392 kfree(response); 393 return -EFAULT; 394 } 395 396 response->resource.release = release_transaction; 397 add_client_resource(client, &response->resource); 398 399 fw_send_request(device->card, &response->transaction, 400 request->tcode & 0x1f, 401 device->node->node_id, 402 request->generation, 403 device->max_speed, 404 request->offset, 405 response->response.data, request->length, 406 complete_transaction, response); 407 408 if (request->data) 409 return sizeof(request) + request->length; 410 else 411 return sizeof(request); 412} 413 414struct address_handler { 415 struct fw_address_handler handler; 416 __u64 closure; 417 struct client *client; 418 struct client_resource resource; 419}; 420 421struct request { 422 struct fw_request *request; 423 void *data; 424 size_t length; 425 struct client_resource resource; 426}; 427 428struct request_event { 429 struct event event; 430 struct fw_cdev_event_request request; 431}; 432 433static void 434release_request(struct client *client, struct client_resource *resource) 435{ 436 struct request *request = 437 container_of(resource, struct request, resource); 438 439 fw_send_response(client->device->card, request->request, 440 RCODE_CONFLICT_ERROR); 441 kfree(request); 442} 443 444static void 445handle_request(struct fw_card *card, struct fw_request *r, 446 int tcode, int destination, int source, 447 int generation, int speed, 448 unsigned long long offset, 449 void *payload, size_t length, void *callback_data) 450{ 451 struct address_handler *handler = callback_data; 452 struct request *request; 453 struct request_event *e; 454 struct client *client = handler->client; 455 456 request = kmalloc(sizeof(*request), GFP_ATOMIC); 457 e = kmalloc(sizeof(*e), GFP_ATOMIC); 458 if (request == NULL || e == NULL) { 459 kfree(request); 460 kfree(e); 461 fw_send_response(card, r, RCODE_CONFLICT_ERROR); 462 return; 463 } 464 465 request->request = r; 466 request->data = payload; 467 request->length = length; 468 469 request->resource.release = release_request; 470 add_client_resource(client, &request->resource); 471 472 e->request.type = FW_CDEV_EVENT_REQUEST; 473 e->request.tcode = tcode; 474 e->request.offset = offset; 475 e->request.length = length; 476 e->request.handle = request->resource.handle; 477 e->request.closure = handler->closure; 478 479 queue_event(client, &e->event, 480 &e->request, sizeof(e->request), payload, length); 481} 482 483static void 484release_address_handler(struct client *client, 485 struct client_resource *resource) 486{ 487 struct address_handler *handler = 488 container_of(resource, struct address_handler, resource); 489 490 fw_core_remove_address_handler(&handler->handler); 491 kfree(handler); 492} 493 494static int ioctl_allocate(struct client *client, void *buffer) 495{ 496 struct fw_cdev_allocate *request = buffer; 497 struct address_handler *handler; 498 struct fw_address_region region; 499 500 handler = kmalloc(sizeof(*handler), GFP_KERNEL); 501 if (handler == NULL) 502 return -ENOMEM; 503 504 region.start = request->offset; 505 region.end = request->offset + request->length; 506 handler->handler.length = request->length; 507 handler->handler.address_callback = handle_request; 508 handler->handler.callback_data = handler; 509 handler->closure = request->closure; 510 handler->client = client; 511 512 if (fw_core_add_address_handler(&handler->handler, &region) < 0) { 513 kfree(handler); 514 return -EBUSY; 515 } 516 517 handler->resource.release = release_address_handler; 518 add_client_resource(client, &handler->resource); 519 request->handle = handler->resource.handle; 520 521 return 0; 522} 523 524static int ioctl_deallocate(struct client *client, void *buffer) 525{ 526 struct fw_cdev_deallocate *request = buffer; 527 528 return release_client_resource(client, request->handle, NULL); 529} 530 531static int ioctl_send_response(struct client *client, void *buffer) 532{ 533 struct fw_cdev_send_response *request = buffer; 534 struct client_resource *resource; 535 struct request *r; 536 537 if (release_client_resource(client, request->handle, &resource) < 0) 538 return -EINVAL; 539 r = container_of(resource, struct request, resource); 540 if (request->length < r->length) 541 r->length = request->length; 542 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) 543 return -EFAULT; 544 545 fw_send_response(client->device->card, r->request, request->rcode); 546 kfree(r); 547 548 return 0; 549} 550 551static int ioctl_initiate_bus_reset(struct client *client, void *buffer) 552{ 553 struct fw_cdev_initiate_bus_reset *request = buffer; 554 int short_reset; 555 556 short_reset = (request->type == FW_CDEV_SHORT_RESET); 557 558 return fw_core_initiate_bus_reset(client->device->card, short_reset); 559} 560 561struct descriptor { 562 struct fw_descriptor d; 563 struct client_resource resource; 564 u32 data[0]; 565}; 566 567static void release_descriptor(struct client *client, 568 struct client_resource *resource) 569{ 570 struct descriptor *descriptor = 571 container_of(resource, struct descriptor, resource); 572 573 fw_core_remove_descriptor(&descriptor->d); 574 kfree(descriptor); 575} 576 577static int ioctl_add_descriptor(struct client *client, void *buffer) 578{ 579 struct fw_cdev_add_descriptor *request = buffer; 580 struct descriptor *descriptor; 581 int retval; 582 583 if (request->length > 256) 584 return -EINVAL; 585 586 descriptor = 587 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); 588 if (descriptor == NULL) 589 return -ENOMEM; 590 591 if (copy_from_user(descriptor->data, 592 u64_to_uptr(request->data), request->length * 4)) { 593 kfree(descriptor); 594 return -EFAULT; 595 } 596 597 descriptor->d.length = request->length; 598 descriptor->d.immediate = request->immediate; 599 descriptor->d.key = request->key; 600 descriptor->d.data = descriptor->data; 601 602 retval = fw_core_add_descriptor(&descriptor->d); 603 if (retval < 0) { 604 kfree(descriptor); 605 return retval; 606 } 607 608 descriptor->resource.release = release_descriptor; 609 add_client_resource(client, &descriptor->resource); 610 request->handle = descriptor->resource.handle; 611 612 return 0; 613} 614 615static int ioctl_remove_descriptor(struct client *client, void *buffer) 616{ 617 struct fw_cdev_remove_descriptor *request = buffer; 618 619 return release_client_resource(client, request->handle, NULL); 620} 621 622static void 623iso_callback(struct fw_iso_context *context, u32 cycle, 624 size_t header_length, void *header, void *data) 625{ 626 struct client *client = data; 627 struct iso_interrupt *irq; 628 629 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); 630 if (irq == NULL) 631 return; 632 633 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 634 irq->interrupt.closure = client->iso_closure; 635 irq->interrupt.cycle = cycle; 636 irq->interrupt.header_length = header_length; 637 memcpy(irq->interrupt.header, header, header_length); 638 queue_event(client, &irq->event, &irq->interrupt, 639 sizeof(irq->interrupt) + header_length, NULL, 0); 640} 641 642static int ioctl_create_iso_context(struct client *client, void *buffer) 643{ 644 struct fw_cdev_create_iso_context *request = buffer; 645 struct fw_iso_context *context; 646 647 if (request->channel > 63) 648 return -EINVAL; 649 650 switch (request->type) { 651 case FW_ISO_CONTEXT_RECEIVE: 652 if (request->header_size < 4 || (request->header_size & 3)) 653 return -EINVAL; 654 655 break; 656 657 case FW_ISO_CONTEXT_TRANSMIT: 658 if (request->speed > SCODE_3200) 659 return -EINVAL; 660 661 break; 662 663 default: 664 return -EINVAL; 665 } 666 667 context = fw_iso_context_create(client->device->card, 668 request->type, 669 request->channel, 670 request->speed, 671 request->header_size, 672 iso_callback, client); 673 if (IS_ERR(context)) 674 return PTR_ERR(context); 675 676 client->iso_closure = request->closure; 677 client->iso_context = context; 678 679 /* We only support one context at this time. */ 680 request->handle = 0; 681 682 return 0; 683} 684 685/* Macros for decoding the iso packet control header. */ 686#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) 687#define GET_INTERRUPT(v) (((v) >> 16) & 0x01) 688#define GET_SKIP(v) (((v) >> 17) & 0x01) 689#define GET_TAG(v) (((v) >> 18) & 0x02) 690#define GET_SY(v) (((v) >> 20) & 0x04) 691#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) 692 693static int ioctl_queue_iso(struct client *client, void *buffer) 694{ 695 struct fw_cdev_queue_iso *request = buffer; 696 struct fw_cdev_iso_packet __user *p, *end, *next; 697 struct fw_iso_context *ctx = client->iso_context; 698 unsigned long payload, buffer_end, header_length; 699 u32 control; 700 int count; 701 struct { 702 struct fw_iso_packet packet; 703 u8 header[256]; 704 } u; 705 706 if (ctx == NULL || request->handle != 0) 707 return -EINVAL; 708 709 /* 710 * If the user passes a non-NULL data pointer, has mmap()'ed 711 * the iso buffer, and the pointer points inside the buffer, 712 * we setup the payload pointers accordingly. Otherwise we 713 * set them both to 0, which will still let packets with 714 * payload_length == 0 through. In other words, if no packets 715 * use the indirect payload, the iso buffer need not be mapped 716 * and the request->data pointer is ignored. 717 */ 718 719 payload = (unsigned long)request->data - client->vm_start; 720 buffer_end = client->buffer.page_count << PAGE_SHIFT; 721 if (request->data == 0 || client->buffer.pages == NULL || 722 payload >= buffer_end) { 723 payload = 0; 724 buffer_end = 0; 725 } 726 727 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); 728 729 if (!access_ok(VERIFY_READ, p, request->size)) 730 return -EFAULT; 731 732 end = (void __user *)p + request->size; 733 count = 0; 734 while (p < end) { 735 if (get_user(control, &p->control)) 736 return -EFAULT; 737 u.packet.payload_length = GET_PAYLOAD_LENGTH(control); 738 u.packet.interrupt = GET_INTERRUPT(control); 739 u.packet.skip = GET_SKIP(control); 740 u.packet.tag = GET_TAG(control); 741 u.packet.sy = GET_SY(control); 742 u.packet.header_length = GET_HEADER_LENGTH(control); 743 744 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { 745 header_length = u.packet.header_length; 746 } else { 747 /* 748 * We require that header_length is a multiple of 749 * the fixed header size, ctx->header_size. 750 */ 751 if (ctx->header_size == 0) { 752 if (u.packet.header_length > 0) 753 return -EINVAL; 754 } else if (u.packet.header_length % ctx->header_size != 0) { 755 return -EINVAL; 756 } 757 header_length = 0; 758 } 759 760 next = (struct fw_cdev_iso_packet __user *) 761 &p->header[header_length / 4]; 762 if (next > end) 763 return -EINVAL; 764 if (__copy_from_user 765 (u.packet.header, p->header, header_length)) 766 return -EFAULT; 767 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && 768 u.packet.header_length + u.packet.payload_length > 0) 769 return -EINVAL; 770 if (payload + u.packet.payload_length > buffer_end) 771 return -EINVAL; 772 773 if (fw_iso_context_queue(ctx, &u.packet, 774 &client->buffer, payload)) 775 break; 776 777 p = next; 778 payload += u.packet.payload_length; 779 count++; 780 } 781 782 request->size -= uptr_to_u64(p) - request->packets; 783 request->packets = uptr_to_u64(p); 784 request->data = client->vm_start + payload; 785 786 return count; 787} 788 789static int ioctl_start_iso(struct client *client, void *buffer) 790{ 791 struct fw_cdev_start_iso *request = buffer; 792 793 if (request->handle != 0) 794 return -EINVAL; 795 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { 796 if (request->tags == 0 || request->tags > 15) 797 return -EINVAL; 798 799 if (request->sync > 15) 800 return -EINVAL; 801 } 802 803 return fw_iso_context_start(client->iso_context, request->cycle, 804 request->sync, request->tags); 805} 806 807static int ioctl_stop_iso(struct client *client, void *buffer) 808{ 809 struct fw_cdev_stop_iso *request = buffer; 810 811 if (request->handle != 0) 812 return -EINVAL; 813 814 return fw_iso_context_stop(client->iso_context); 815} 816 817static int ioctl_get_cycle_timer(struct client *client, void *buffer) 818{ 819 struct fw_cdev_get_cycle_timer *request = buffer; 820 struct fw_card *card = client->device->card; 821 unsigned long long bus_time; 822 struct timeval tv; 823 unsigned long flags; 824 825 preempt_disable(); 826 local_irq_save(flags); 827 828 bus_time = card->driver->get_bus_time(card); 829 do_gettimeofday(&tv); 830 831 local_irq_restore(flags); 832 preempt_enable(); 833 834 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec; 835 request->cycle_timer = bus_time & 0xffffffff; 836 return 0; 837} 838 839static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 840 ioctl_get_info, 841 ioctl_send_request, 842 ioctl_allocate, 843 ioctl_deallocate, 844 ioctl_send_response, 845 ioctl_initiate_bus_reset, 846 ioctl_add_descriptor, 847 ioctl_remove_descriptor, 848 ioctl_create_iso_context, 849 ioctl_queue_iso, 850 ioctl_start_iso, 851 ioctl_stop_iso, 852 ioctl_get_cycle_timer, 853}; 854 855static int 856dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) 857{ 858 char buffer[256]; 859 int retval; 860 861 if (_IOC_TYPE(cmd) != '#' || 862 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 863 return -EINVAL; 864 865 if (_IOC_DIR(cmd) & _IOC_WRITE) { 866 if (_IOC_SIZE(cmd) > sizeof(buffer) || 867 copy_from_user(buffer, arg, _IOC_SIZE(cmd))) 868 return -EFAULT; 869 } 870 871 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); 872 if (retval < 0) 873 return retval; 874 875 if (_IOC_DIR(cmd) & _IOC_READ) { 876 if (_IOC_SIZE(cmd) > sizeof(buffer) || 877 copy_to_user(arg, buffer, _IOC_SIZE(cmd))) 878 return -EFAULT; 879 } 880 881 return 0; 882} 883 884static long 885fw_device_op_ioctl(struct file *file, 886 unsigned int cmd, unsigned long arg) 887{ 888 struct client *client = file->private_data; 889 890 return dispatch_ioctl(client, cmd, (void __user *) arg); 891} 892 893#ifdef CONFIG_COMPAT 894static long 895fw_device_op_compat_ioctl(struct file *file, 896 unsigned int cmd, unsigned long arg) 897{ 898 struct client *client = file->private_data; 899 900 return dispatch_ioctl(client, cmd, compat_ptr(arg)); 901} 902#endif 903 904static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) 905{ 906 struct client *client = file->private_data; 907 enum dma_data_direction direction; 908 unsigned long size; 909 int page_count, retval; 910 911 /* FIXME: We could support multiple buffers, but we don't. */ 912 if (client->buffer.pages != NULL) 913 return -EBUSY; 914 915 if (!(vma->vm_flags & VM_SHARED)) 916 return -EINVAL; 917 918 if (vma->vm_start & ~PAGE_MASK) 919 return -EINVAL; 920 921 client->vm_start = vma->vm_start; 922 size = vma->vm_end - vma->vm_start; 923 page_count = size >> PAGE_SHIFT; 924 if (size & ~PAGE_MASK) 925 return -EINVAL; 926 927 if (vma->vm_flags & VM_WRITE) 928 direction = DMA_TO_DEVICE; 929 else 930 direction = DMA_FROM_DEVICE; 931 932 retval = fw_iso_buffer_init(&client->buffer, client->device->card, 933 page_count, direction); 934 if (retval < 0) 935 return retval; 936 937 retval = fw_iso_buffer_map(&client->buffer, vma); 938 if (retval < 0) 939 fw_iso_buffer_destroy(&client->buffer, client->device->card); 940 941 return retval; 942} 943 944static int fw_device_op_release(struct inode *inode, struct file *file) 945{ 946 struct client *client = file->private_data; 947 struct event *e, *next_e; 948 struct client_resource *r, *next_r; 949 unsigned long flags; 950 951 if (client->buffer.pages) 952 fw_iso_buffer_destroy(&client->buffer, client->device->card); 953 954 if (client->iso_context) 955 fw_iso_context_destroy(client->iso_context); 956 957 list_for_each_entry_safe(r, next_r, &client->resource_list, link) 958 r->release(client, r); 959 960 /* 961 * FIXME: We should wait for the async tasklets to stop 962 * running before freeing the memory. 963 */ 964 965 list_for_each_entry_safe(e, next_e, &client->event_list, link) 966 kfree(e); 967 968 spin_lock_irqsave(&client->device->card->lock, flags); 969 list_del(&client->link); 970 spin_unlock_irqrestore(&client->device->card->lock, flags); 971 972 fw_device_put(client->device); 973 kfree(client); 974 975 return 0; 976} 977 978static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) 979{ 980 struct client *client = file->private_data; 981 unsigned int mask = 0; 982 983 poll_wait(file, &client->wait, pt); 984 985 if (fw_device_is_shutdown(client->device)) 986 mask |= POLLHUP | POLLERR; 987 if (!list_empty(&client->event_list)) 988 mask |= POLLIN | POLLRDNORM; 989 990 return mask; 991} 992 993const struct file_operations fw_device_ops = { 994 .owner = THIS_MODULE, 995 .open = fw_device_op_open, 996 .read = fw_device_op_read, 997 .unlocked_ioctl = fw_device_op_ioctl, 998 .poll = fw_device_op_poll, 999 .release = fw_device_op_release, 1000 .mmap = fw_device_op_mmap, 1001 1002#ifdef CONFIG_COMPAT 1003 .compat_ioctl = fw_device_op_compat_ioctl, 1004#endif 1005};