at v2.6.22-rc2 961 lines 24 kB view raw
1/* 2 * Char device for device raw access 3 * 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21#include <linux/module.h> 22#include <linux/kernel.h> 23#include <linux/wait.h> 24#include <linux/errno.h> 25#include <linux/device.h> 26#include <linux/vmalloc.h> 27#include <linux/poll.h> 28#include <linux/delay.h> 29#include <linux/mm.h> 30#include <linux/idr.h> 31#include <linux/compat.h> 32#include <linux/firewire-cdev.h> 33#include <asm/uaccess.h> 34#include "fw-transaction.h" 35#include "fw-topology.h" 36#include "fw-device.h" 37 38struct client; 39struct client_resource { 40 struct list_head link; 41 void (*release)(struct client *client, struct client_resource *r); 42 u32 handle; 43}; 44 45/* 46 * dequeue_event() just kfree()'s the event, so the event has to be 47 * the first field in the struct. 48 */ 49 50struct event { 51 struct { void *data; size_t size; } v[2]; 52 struct list_head link; 53}; 54 55struct bus_reset { 56 struct event event; 57 struct fw_cdev_event_bus_reset reset; 58}; 59 60struct response { 61 struct event event; 62 struct fw_transaction transaction; 63 struct client *client; 64 struct client_resource resource; 65 struct fw_cdev_event_response response; 66}; 67 68struct iso_interrupt { 69 struct event event; 70 struct fw_cdev_event_iso_interrupt interrupt; 71}; 72 73struct client { 74 u32 version; 75 struct fw_device *device; 76 spinlock_t lock; 77 u32 resource_handle; 78 struct list_head resource_list; 79 struct list_head event_list; 80 wait_queue_head_t wait; 81 u64 bus_reset_closure; 82 83 struct fw_iso_context *iso_context; 84 u64 iso_closure; 85 struct fw_iso_buffer buffer; 86 unsigned long vm_start; 87 88 struct list_head link; 89}; 90 91static inline void __user * 92u64_to_uptr(__u64 value) 93{ 94 return (void __user *)(unsigned long)value; 95} 96 97static inline __u64 98uptr_to_u64(void __user *ptr) 99{ 100 return (__u64)(unsigned long)ptr; 101} 102 103static int fw_device_op_open(struct inode *inode, struct file *file) 104{ 105 struct fw_device *device; 106 struct client *client; 107 unsigned long flags; 108 109 device = fw_device_from_devt(inode->i_rdev); 110 if (device == NULL) 111 return -ENODEV; 112 113 client = kzalloc(sizeof(*client), GFP_KERNEL); 114 if (client == NULL) 115 return -ENOMEM; 116 117 client->device = fw_device_get(device); 118 INIT_LIST_HEAD(&client->event_list); 119 INIT_LIST_HEAD(&client->resource_list); 120 spin_lock_init(&client->lock); 121 init_waitqueue_head(&client->wait); 122 123 file->private_data = client; 124 125 spin_lock_irqsave(&device->card->lock, flags); 126 list_add_tail(&client->link, &device->client_list); 127 spin_unlock_irqrestore(&device->card->lock, flags); 128 129 return 0; 130} 131 132static void queue_event(struct client *client, struct event *event, 133 void *data0, size_t size0, void *data1, size_t size1) 134{ 135 unsigned long flags; 136 137 event->v[0].data = data0; 138 event->v[0].size = size0; 139 event->v[1].data = data1; 140 event->v[1].size = size1; 141 142 spin_lock_irqsave(&client->lock, flags); 143 144 list_add_tail(&event->link, &client->event_list); 145 wake_up_interruptible(&client->wait); 146 147 spin_unlock_irqrestore(&client->lock, flags); 148} 149 150static int 151dequeue_event(struct client *client, char __user *buffer, size_t count) 152{ 153 unsigned long flags; 154 struct event *event; 155 size_t size, total; 156 int i, retval; 157 158 retval = wait_event_interruptible(client->wait, 159 !list_empty(&client->event_list) || 160 fw_device_is_shutdown(client->device)); 161 if (retval < 0) 162 return retval; 163 164 if (list_empty(&client->event_list) && 165 fw_device_is_shutdown(client->device)) 166 return -ENODEV; 167 168 spin_lock_irqsave(&client->lock, flags); 169 event = container_of(client->event_list.next, struct event, link); 170 list_del(&event->link); 171 spin_unlock_irqrestore(&client->lock, flags); 172 173 total = 0; 174 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { 175 size = min(event->v[i].size, count - total); 176 if (copy_to_user(buffer + total, event->v[i].data, size)) { 177 retval = -EFAULT; 178 goto out; 179 } 180 total += size; 181 } 182 retval = total; 183 184 out: 185 kfree(event); 186 187 return retval; 188} 189 190static ssize_t 191fw_device_op_read(struct file *file, 192 char __user *buffer, size_t count, loff_t *offset) 193{ 194 struct client *client = file->private_data; 195 196 return dequeue_event(client, buffer, count); 197} 198 199static void 200fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, 201 struct client *client) 202{ 203 struct fw_card *card = client->device->card; 204 205 event->closure = client->bus_reset_closure; 206 event->type = FW_CDEV_EVENT_BUS_RESET; 207 event->node_id = client->device->node_id; 208 event->local_node_id = card->local_node->node_id; 209 event->bm_node_id = 0; /* FIXME: We don't track the BM. */ 210 event->irm_node_id = card->irm_node->node_id; 211 event->root_node_id = card->root_node->node_id; 212 event->generation = card->generation; 213} 214 215static void 216for_each_client(struct fw_device *device, 217 void (*callback)(struct client *client)) 218{ 219 struct fw_card *card = device->card; 220 struct client *c; 221 unsigned long flags; 222 223 spin_lock_irqsave(&card->lock, flags); 224 225 list_for_each_entry(c, &device->client_list, link) 226 callback(c); 227 228 spin_unlock_irqrestore(&card->lock, flags); 229} 230 231static void 232queue_bus_reset_event(struct client *client) 233{ 234 struct bus_reset *bus_reset; 235 236 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); 237 if (bus_reset == NULL) { 238 fw_notify("Out of memory when allocating bus reset event\n"); 239 return; 240 } 241 242 fill_bus_reset_event(&bus_reset->reset, client); 243 244 queue_event(client, &bus_reset->event, 245 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); 246} 247 248void fw_device_cdev_update(struct fw_device *device) 249{ 250 for_each_client(device, queue_bus_reset_event); 251} 252 253static void wake_up_client(struct client *client) 254{ 255 wake_up_interruptible(&client->wait); 256} 257 258void fw_device_cdev_remove(struct fw_device *device) 259{ 260 for_each_client(device, wake_up_client); 261} 262 263static int ioctl_get_info(struct client *client, void *buffer) 264{ 265 struct fw_cdev_get_info *get_info = buffer; 266 struct fw_cdev_event_bus_reset bus_reset; 267 268 client->version = get_info->version; 269 get_info->version = FW_CDEV_VERSION; 270 271 if (get_info->rom != 0) { 272 void __user *uptr = u64_to_uptr(get_info->rom); 273 size_t want = get_info->rom_length; 274 size_t have = client->device->config_rom_length * 4; 275 276 if (copy_to_user(uptr, client->device->config_rom, 277 min(want, have))) 278 return -EFAULT; 279 } 280 get_info->rom_length = client->device->config_rom_length * 4; 281 282 client->bus_reset_closure = get_info->bus_reset_closure; 283 if (get_info->bus_reset != 0) { 284 void __user *uptr = u64_to_uptr(get_info->bus_reset); 285 286 fill_bus_reset_event(&bus_reset, client); 287 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) 288 return -EFAULT; 289 } 290 291 get_info->card = client->device->card->index; 292 293 return 0; 294} 295 296static void 297add_client_resource(struct client *client, struct client_resource *resource) 298{ 299 unsigned long flags; 300 301 spin_lock_irqsave(&client->lock, flags); 302 list_add_tail(&resource->link, &client->resource_list); 303 resource->handle = client->resource_handle++; 304 spin_unlock_irqrestore(&client->lock, flags); 305} 306 307static int 308release_client_resource(struct client *client, u32 handle, 309 struct client_resource **resource) 310{ 311 struct client_resource *r; 312 unsigned long flags; 313 314 spin_lock_irqsave(&client->lock, flags); 315 list_for_each_entry(r, &client->resource_list, link) { 316 if (r->handle == handle) { 317 list_del(&r->link); 318 break; 319 } 320 } 321 spin_unlock_irqrestore(&client->lock, flags); 322 323 if (&r->link == &client->resource_list) 324 return -EINVAL; 325 326 if (resource) 327 *resource = r; 328 else 329 r->release(client, r); 330 331 return 0; 332} 333 334static void 335release_transaction(struct client *client, struct client_resource *resource) 336{ 337 struct response *response = 338 container_of(resource, struct response, resource); 339 340 fw_cancel_transaction(client->device->card, &response->transaction); 341} 342 343static void 344complete_transaction(struct fw_card *card, int rcode, 345 void *payload, size_t length, void *data) 346{ 347 struct response *response = data; 348 struct client *client = response->client; 349 unsigned long flags; 350 351 if (length < response->response.length) 352 response->response.length = length; 353 if (rcode == RCODE_COMPLETE) 354 memcpy(response->response.data, payload, 355 response->response.length); 356 357 spin_lock_irqsave(&client->lock, flags); 358 list_del(&response->resource.link); 359 spin_unlock_irqrestore(&client->lock, flags); 360 361 response->response.type = FW_CDEV_EVENT_RESPONSE; 362 response->response.rcode = rcode; 363 queue_event(client, &response->event, 364 &response->response, sizeof(response->response), 365 response->response.data, response->response.length); 366} 367 368static ssize_t ioctl_send_request(struct client *client, void *buffer) 369{ 370 struct fw_device *device = client->device; 371 struct fw_cdev_send_request *request = buffer; 372 struct response *response; 373 374 /* What is the biggest size we'll accept, really? */ 375 if (request->length > 4096) 376 return -EINVAL; 377 378 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); 379 if (response == NULL) 380 return -ENOMEM; 381 382 response->client = client; 383 response->response.length = request->length; 384 response->response.closure = request->closure; 385 386 if (request->data && 387 copy_from_user(response->response.data, 388 u64_to_uptr(request->data), request->length)) { 389 kfree(response); 390 return -EFAULT; 391 } 392 393 response->resource.release = release_transaction; 394 add_client_resource(client, &response->resource); 395 396 fw_send_request(device->card, &response->transaction, 397 request->tcode & 0x1f, 398 device->node->node_id, 399 request->generation, 400 device->node->max_speed, 401 request->offset, 402 response->response.data, request->length, 403 complete_transaction, response); 404 405 if (request->data) 406 return sizeof(request) + request->length; 407 else 408 return sizeof(request); 409} 410 411struct address_handler { 412 struct fw_address_handler handler; 413 __u64 closure; 414 struct client *client; 415 struct client_resource resource; 416}; 417 418struct request { 419 struct fw_request *request; 420 void *data; 421 size_t length; 422 struct client_resource resource; 423}; 424 425struct request_event { 426 struct event event; 427 struct fw_cdev_event_request request; 428}; 429 430static void 431release_request(struct client *client, struct client_resource *resource) 432{ 433 struct request *request = 434 container_of(resource, struct request, resource); 435 436 fw_send_response(client->device->card, request->request, 437 RCODE_CONFLICT_ERROR); 438 kfree(request); 439} 440 441static void 442handle_request(struct fw_card *card, struct fw_request *r, 443 int tcode, int destination, int source, 444 int generation, int speed, 445 unsigned long long offset, 446 void *payload, size_t length, void *callback_data) 447{ 448 struct address_handler *handler = callback_data; 449 struct request *request; 450 struct request_event *e; 451 struct client *client = handler->client; 452 453 request = kmalloc(sizeof(*request), GFP_ATOMIC); 454 e = kmalloc(sizeof(*e), GFP_ATOMIC); 455 if (request == NULL || e == NULL) { 456 kfree(request); 457 kfree(e); 458 fw_send_response(card, r, RCODE_CONFLICT_ERROR); 459 return; 460 } 461 462 request->request = r; 463 request->data = payload; 464 request->length = length; 465 466 request->resource.release = release_request; 467 add_client_resource(client, &request->resource); 468 469 e->request.type = FW_CDEV_EVENT_REQUEST; 470 e->request.tcode = tcode; 471 e->request.offset = offset; 472 e->request.length = length; 473 e->request.handle = request->resource.handle; 474 e->request.closure = handler->closure; 475 476 queue_event(client, &e->event, 477 &e->request, sizeof(e->request), payload, length); 478} 479 480static void 481release_address_handler(struct client *client, 482 struct client_resource *resource) 483{ 484 struct address_handler *handler = 485 container_of(resource, struct address_handler, resource); 486 487 fw_core_remove_address_handler(&handler->handler); 488 kfree(handler); 489} 490 491static int ioctl_allocate(struct client *client, void *buffer) 492{ 493 struct fw_cdev_allocate *request = buffer; 494 struct address_handler *handler; 495 struct fw_address_region region; 496 497 handler = kmalloc(sizeof(*handler), GFP_KERNEL); 498 if (handler == NULL) 499 return -ENOMEM; 500 501 region.start = request->offset; 502 region.end = request->offset + request->length; 503 handler->handler.length = request->length; 504 handler->handler.address_callback = handle_request; 505 handler->handler.callback_data = handler; 506 handler->closure = request->closure; 507 handler->client = client; 508 509 if (fw_core_add_address_handler(&handler->handler, &region) < 0) { 510 kfree(handler); 511 return -EBUSY; 512 } 513 514 handler->resource.release = release_address_handler; 515 add_client_resource(client, &handler->resource); 516 request->handle = handler->resource.handle; 517 518 return 0; 519} 520 521static int ioctl_deallocate(struct client *client, void *buffer) 522{ 523 struct fw_cdev_deallocate *request = buffer; 524 525 return release_client_resource(client, request->handle, NULL); 526} 527 528static int ioctl_send_response(struct client *client, void *buffer) 529{ 530 struct fw_cdev_send_response *request = buffer; 531 struct client_resource *resource; 532 struct request *r; 533 534 if (release_client_resource(client, request->handle, &resource) < 0) 535 return -EINVAL; 536 r = container_of(resource, struct request, resource); 537 if (request->length < r->length) 538 r->length = request->length; 539 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) 540 return -EFAULT; 541 542 fw_send_response(client->device->card, r->request, request->rcode); 543 kfree(r); 544 545 return 0; 546} 547 548static int ioctl_initiate_bus_reset(struct client *client, void *buffer) 549{ 550 struct fw_cdev_initiate_bus_reset *request = buffer; 551 int short_reset; 552 553 short_reset = (request->type == FW_CDEV_SHORT_RESET); 554 555 return fw_core_initiate_bus_reset(client->device->card, short_reset); 556} 557 558struct descriptor { 559 struct fw_descriptor d; 560 struct client_resource resource; 561 u32 data[0]; 562}; 563 564static void release_descriptor(struct client *client, 565 struct client_resource *resource) 566{ 567 struct descriptor *descriptor = 568 container_of(resource, struct descriptor, resource); 569 570 fw_core_remove_descriptor(&descriptor->d); 571 kfree(descriptor); 572} 573 574static int ioctl_add_descriptor(struct client *client, void *buffer) 575{ 576 struct fw_cdev_add_descriptor *request = buffer; 577 struct descriptor *descriptor; 578 int retval; 579 580 if (request->length > 256) 581 return -EINVAL; 582 583 descriptor = 584 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); 585 if (descriptor == NULL) 586 return -ENOMEM; 587 588 if (copy_from_user(descriptor->data, 589 u64_to_uptr(request->data), request->length * 4)) { 590 kfree(descriptor); 591 return -EFAULT; 592 } 593 594 descriptor->d.length = request->length; 595 descriptor->d.immediate = request->immediate; 596 descriptor->d.key = request->key; 597 descriptor->d.data = descriptor->data; 598 599 retval = fw_core_add_descriptor(&descriptor->d); 600 if (retval < 0) { 601 kfree(descriptor); 602 return retval; 603 } 604 605 descriptor->resource.release = release_descriptor; 606 add_client_resource(client, &descriptor->resource); 607 request->handle = descriptor->resource.handle; 608 609 return 0; 610} 611 612static int ioctl_remove_descriptor(struct client *client, void *buffer) 613{ 614 struct fw_cdev_remove_descriptor *request = buffer; 615 616 return release_client_resource(client, request->handle, NULL); 617} 618 619static void 620iso_callback(struct fw_iso_context *context, u32 cycle, 621 size_t header_length, void *header, void *data) 622{ 623 struct client *client = data; 624 struct iso_interrupt *interrupt; 625 626 interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC); 627 if (interrupt == NULL) 628 return; 629 630 interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 631 interrupt->interrupt.closure = client->iso_closure; 632 interrupt->interrupt.cycle = cycle; 633 interrupt->interrupt.header_length = header_length; 634 memcpy(interrupt->interrupt.header, header, header_length); 635 queue_event(client, &interrupt->event, 636 &interrupt->interrupt, 637 sizeof(interrupt->interrupt) + header_length, NULL, 0); 638} 639 640static int ioctl_create_iso_context(struct client *client, void *buffer) 641{ 642 struct fw_cdev_create_iso_context *request = buffer; 643 644 if (request->channel > 63) 645 return -EINVAL; 646 647 switch (request->type) { 648 case FW_ISO_CONTEXT_RECEIVE: 649 if (request->header_size < 4 || (request->header_size & 3)) 650 return -EINVAL; 651 652 break; 653 654 case FW_ISO_CONTEXT_TRANSMIT: 655 if (request->speed > SCODE_3200) 656 return -EINVAL; 657 658 break; 659 660 default: 661 return -EINVAL; 662 } 663 664 client->iso_closure = request->closure; 665 client->iso_context = fw_iso_context_create(client->device->card, 666 request->type, 667 request->channel, 668 request->speed, 669 request->header_size, 670 iso_callback, client); 671 if (IS_ERR(client->iso_context)) 672 return PTR_ERR(client->iso_context); 673 674 /* We only support one context at this time. */ 675 request->handle = 0; 676 677 return 0; 678} 679 680static int ioctl_queue_iso(struct client *client, void *buffer) 681{ 682 struct fw_cdev_queue_iso *request = buffer; 683 struct fw_cdev_iso_packet __user *p, *end, *next; 684 struct fw_iso_context *ctx = client->iso_context; 685 unsigned long payload, buffer_end, header_length; 686 int count; 687 struct { 688 struct fw_iso_packet packet; 689 u8 header[256]; 690 } u; 691 692 if (ctx == NULL || request->handle != 0) 693 return -EINVAL; 694 695 /* 696 * If the user passes a non-NULL data pointer, has mmap()'ed 697 * the iso buffer, and the pointer points inside the buffer, 698 * we setup the payload pointers accordingly. Otherwise we 699 * set them both to 0, which will still let packets with 700 * payload_length == 0 through. In other words, if no packets 701 * use the indirect payload, the iso buffer need not be mapped 702 * and the request->data pointer is ignored. 703 */ 704 705 payload = (unsigned long)request->data - client->vm_start; 706 buffer_end = client->buffer.page_count << PAGE_SHIFT; 707 if (request->data == 0 || client->buffer.pages == NULL || 708 payload >= buffer_end) { 709 payload = 0; 710 buffer_end = 0; 711 } 712 713 if (!access_ok(VERIFY_READ, request->packets, request->size)) 714 return -EFAULT; 715 716 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); 717 end = (void __user *)p + request->size; 718 count = 0; 719 while (p < end) { 720 if (__copy_from_user(&u.packet, p, sizeof(*p))) 721 return -EFAULT; 722 723 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { 724 header_length = u.packet.header_length; 725 } else { 726 /* 727 * We require that header_length is a multiple of 728 * the fixed header size, ctx->header_size. 729 */ 730 if (ctx->header_size == 0) { 731 if (u.packet.header_length > 0) 732 return -EINVAL; 733 } else if (u.packet.header_length % ctx->header_size != 0) { 734 return -EINVAL; 735 } 736 header_length = 0; 737 } 738 739 next = (struct fw_cdev_iso_packet __user *) 740 &p->header[header_length / 4]; 741 if (next > end) 742 return -EINVAL; 743 if (__copy_from_user 744 (u.packet.header, p->header, header_length)) 745 return -EFAULT; 746 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && 747 u.packet.header_length + u.packet.payload_length > 0) 748 return -EINVAL; 749 if (payload + u.packet.payload_length > buffer_end) 750 return -EINVAL; 751 752 if (fw_iso_context_queue(ctx, &u.packet, 753 &client->buffer, payload)) 754 break; 755 756 p = next; 757 payload += u.packet.payload_length; 758 count++; 759 } 760 761 request->size -= uptr_to_u64(p) - request->packets; 762 request->packets = uptr_to_u64(p); 763 request->data = client->vm_start + payload; 764 765 return count; 766} 767 768static int ioctl_start_iso(struct client *client, void *buffer) 769{ 770 struct fw_cdev_start_iso *request = buffer; 771 772 if (request->handle != 0) 773 return -EINVAL; 774 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { 775 if (request->tags == 0 || request->tags > 15) 776 return -EINVAL; 777 778 if (request->sync > 15) 779 return -EINVAL; 780 } 781 782 return fw_iso_context_start(client->iso_context, request->cycle, 783 request->sync, request->tags); 784} 785 786static int ioctl_stop_iso(struct client *client, void *buffer) 787{ 788 struct fw_cdev_stop_iso *request = buffer; 789 790 if (request->handle != 0) 791 return -EINVAL; 792 793 return fw_iso_context_stop(client->iso_context); 794} 795 796static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 797 ioctl_get_info, 798 ioctl_send_request, 799 ioctl_allocate, 800 ioctl_deallocate, 801 ioctl_send_response, 802 ioctl_initiate_bus_reset, 803 ioctl_add_descriptor, 804 ioctl_remove_descriptor, 805 ioctl_create_iso_context, 806 ioctl_queue_iso, 807 ioctl_start_iso, 808 ioctl_stop_iso, 809}; 810 811static int 812dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) 813{ 814 char buffer[256]; 815 int retval; 816 817 if (_IOC_TYPE(cmd) != '#' || 818 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 819 return -EINVAL; 820 821 if (_IOC_DIR(cmd) & _IOC_WRITE) { 822 if (_IOC_SIZE(cmd) > sizeof(buffer) || 823 copy_from_user(buffer, arg, _IOC_SIZE(cmd))) 824 return -EFAULT; 825 } 826 827 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); 828 if (retval < 0) 829 return retval; 830 831 if (_IOC_DIR(cmd) & _IOC_READ) { 832 if (_IOC_SIZE(cmd) > sizeof(buffer) || 833 copy_to_user(arg, buffer, _IOC_SIZE(cmd))) 834 return -EFAULT; 835 } 836 837 return 0; 838} 839 840static long 841fw_device_op_ioctl(struct file *file, 842 unsigned int cmd, unsigned long arg) 843{ 844 struct client *client = file->private_data; 845 846 return dispatch_ioctl(client, cmd, (void __user *) arg); 847} 848 849#ifdef CONFIG_COMPAT 850static long 851fw_device_op_compat_ioctl(struct file *file, 852 unsigned int cmd, unsigned long arg) 853{ 854 struct client *client = file->private_data; 855 856 return dispatch_ioctl(client, cmd, compat_ptr(arg)); 857} 858#endif 859 860static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) 861{ 862 struct client *client = file->private_data; 863 enum dma_data_direction direction; 864 unsigned long size; 865 int page_count, retval; 866 867 /* FIXME: We could support multiple buffers, but we don't. */ 868 if (client->buffer.pages != NULL) 869 return -EBUSY; 870 871 if (!(vma->vm_flags & VM_SHARED)) 872 return -EINVAL; 873 874 if (vma->vm_start & ~PAGE_MASK) 875 return -EINVAL; 876 877 client->vm_start = vma->vm_start; 878 size = vma->vm_end - vma->vm_start; 879 page_count = size >> PAGE_SHIFT; 880 if (size & ~PAGE_MASK) 881 return -EINVAL; 882 883 if (vma->vm_flags & VM_WRITE) 884 direction = DMA_TO_DEVICE; 885 else 886 direction = DMA_FROM_DEVICE; 887 888 retval = fw_iso_buffer_init(&client->buffer, client->device->card, 889 page_count, direction); 890 if (retval < 0) 891 return retval; 892 893 retval = fw_iso_buffer_map(&client->buffer, vma); 894 if (retval < 0) 895 fw_iso_buffer_destroy(&client->buffer, client->device->card); 896 897 return retval; 898} 899 900static int fw_device_op_release(struct inode *inode, struct file *file) 901{ 902 struct client *client = file->private_data; 903 struct event *e, *next_e; 904 struct client_resource *r, *next_r; 905 unsigned long flags; 906 907 if (client->buffer.pages) 908 fw_iso_buffer_destroy(&client->buffer, client->device->card); 909 910 if (client->iso_context) 911 fw_iso_context_destroy(client->iso_context); 912 913 list_for_each_entry_safe(r, next_r, &client->resource_list, link) 914 r->release(client, r); 915 916 /* 917 * FIXME: We should wait for the async tasklets to stop 918 * running before freeing the memory. 919 */ 920 921 list_for_each_entry_safe(e, next_e, &client->event_list, link) 922 kfree(e); 923 924 spin_lock_irqsave(&client->device->card->lock, flags); 925 list_del(&client->link); 926 spin_unlock_irqrestore(&client->device->card->lock, flags); 927 928 fw_device_put(client->device); 929 kfree(client); 930 931 return 0; 932} 933 934static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) 935{ 936 struct client *client = file->private_data; 937 unsigned int mask = 0; 938 939 poll_wait(file, &client->wait, pt); 940 941 if (fw_device_is_shutdown(client->device)) 942 mask |= POLLHUP | POLLERR; 943 if (!list_empty(&client->event_list)) 944 mask |= POLLIN | POLLRDNORM; 945 946 return mask; 947} 948 949const struct file_operations fw_device_ops = { 950 .owner = THIS_MODULE, 951 .open = fw_device_op_open, 952 .read = fw_device_op_read, 953 .unlocked_ioctl = fw_device_op_ioctl, 954 .poll = fw_device_op_poll, 955 .release = fw_device_op_release, 956 .mmap = fw_device_op_mmap, 957 958#ifdef CONFIG_COMPAT 959 .compat_ioctl = fw_device_op_compat_ioctl, 960#endif 961};