at v2.6.24-rc2 1004 lines 25 kB view raw
1/* 2 * Char device for device raw access 3 * 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21#include <linux/module.h> 22#include <linux/kernel.h> 23#include <linux/wait.h> 24#include <linux/errno.h> 25#include <linux/device.h> 26#include <linux/vmalloc.h> 27#include <linux/poll.h> 28#include <linux/preempt.h> 29#include <linux/time.h> 30#include <linux/delay.h> 31#include <linux/mm.h> 32#include <linux/idr.h> 33#include <linux/compat.h> 34#include <linux/firewire-cdev.h> 35#include <asm/system.h> 36#include <asm/uaccess.h> 37#include "fw-transaction.h" 38#include "fw-topology.h" 39#include "fw-device.h" 40 41struct client; 42struct client_resource { 43 struct list_head link; 44 void (*release)(struct client *client, struct client_resource *r); 45 u32 handle; 46}; 47 48/* 49 * dequeue_event() just kfree()'s the event, so the event has to be 50 * the first field in the struct. 51 */ 52 53struct event { 54 struct { void *data; size_t size; } v[2]; 55 struct list_head link; 56}; 57 58struct bus_reset { 59 struct event event; 60 struct fw_cdev_event_bus_reset reset; 61}; 62 63struct response { 64 struct event event; 65 struct fw_transaction transaction; 66 struct client *client; 67 struct client_resource resource; 68 struct fw_cdev_event_response response; 69}; 70 71struct iso_interrupt { 72 struct event event; 73 struct fw_cdev_event_iso_interrupt interrupt; 74}; 75 76struct client { 77 u32 version; 78 struct fw_device *device; 79 spinlock_t lock; 80 u32 resource_handle; 81 struct list_head resource_list; 82 struct list_head event_list; 83 wait_queue_head_t wait; 84 u64 bus_reset_closure; 85 86 struct fw_iso_context *iso_context; 87 u64 iso_closure; 88 struct fw_iso_buffer buffer; 89 unsigned long vm_start; 90 91 struct list_head link; 92}; 93 94static inline void __user * 95u64_to_uptr(__u64 value) 96{ 97 return (void __user *)(unsigned long)value; 98} 99 100static inline __u64 101uptr_to_u64(void __user *ptr) 102{ 103 return (__u64)(unsigned long)ptr; 104} 105 106static int fw_device_op_open(struct inode *inode, struct file *file) 107{ 108 struct fw_device *device; 109 struct client *client; 110 unsigned long flags; 111 112 device = fw_device_from_devt(inode->i_rdev); 113 if (device == NULL) 114 return -ENODEV; 115 116 client = kzalloc(sizeof(*client), GFP_KERNEL); 117 if (client == NULL) 118 return -ENOMEM; 119 120 client->device = fw_device_get(device); 121 INIT_LIST_HEAD(&client->event_list); 122 INIT_LIST_HEAD(&client->resource_list); 123 spin_lock_init(&client->lock); 124 init_waitqueue_head(&client->wait); 125 126 file->private_data = client; 127 128 spin_lock_irqsave(&device->card->lock, flags); 129 list_add_tail(&client->link, &device->client_list); 130 spin_unlock_irqrestore(&device->card->lock, flags); 131 132 return 0; 133} 134 135static void queue_event(struct client *client, struct event *event, 136 void *data0, size_t size0, void *data1, size_t size1) 137{ 138 unsigned long flags; 139 140 event->v[0].data = data0; 141 event->v[0].size = size0; 142 event->v[1].data = data1; 143 event->v[1].size = size1; 144 145 spin_lock_irqsave(&client->lock, flags); 146 list_add_tail(&event->link, &client->event_list); 147 spin_unlock_irqrestore(&client->lock, flags); 148 149 wake_up_interruptible(&client->wait); 150} 151 152static int 153dequeue_event(struct client *client, char __user *buffer, size_t count) 154{ 155 unsigned long flags; 156 struct event *event; 157 size_t size, total; 158 int i, retval; 159 160 retval = wait_event_interruptible(client->wait, 161 !list_empty(&client->event_list) || 162 fw_device_is_shutdown(client->device)); 163 if (retval < 0) 164 return retval; 165 166 if (list_empty(&client->event_list) && 167 fw_device_is_shutdown(client->device)) 168 return -ENODEV; 169 170 spin_lock_irqsave(&client->lock, flags); 171 event = container_of(client->event_list.next, struct event, link); 172 list_del(&event->link); 173 spin_unlock_irqrestore(&client->lock, flags); 174 175 total = 0; 176 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { 177 size = min(event->v[i].size, count - total); 178 if (copy_to_user(buffer + total, event->v[i].data, size)) { 179 retval = -EFAULT; 180 goto out; 181 } 182 total += size; 183 } 184 retval = total; 185 186 out: 187 kfree(event); 188 189 return retval; 190} 191 192static ssize_t 193fw_device_op_read(struct file *file, 194 char __user *buffer, size_t count, loff_t *offset) 195{ 196 struct client *client = file->private_data; 197 198 return dequeue_event(client, buffer, count); 199} 200 201static void 202fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, 203 struct client *client) 204{ 205 struct fw_card *card = client->device->card; 206 207 event->closure = client->bus_reset_closure; 208 event->type = FW_CDEV_EVENT_BUS_RESET; 209 event->node_id = client->device->node_id; 210 event->local_node_id = card->local_node->node_id; 211 event->bm_node_id = 0; /* FIXME: We don't track the BM. */ 212 event->irm_node_id = card->irm_node->node_id; 213 event->root_node_id = card->root_node->node_id; 214 event->generation = card->generation; 215} 216 217static void 218for_each_client(struct fw_device *device, 219 void (*callback)(struct client *client)) 220{ 221 struct fw_card *card = device->card; 222 struct client *c; 223 unsigned long flags; 224 225 spin_lock_irqsave(&card->lock, flags); 226 227 list_for_each_entry(c, &device->client_list, link) 228 callback(c); 229 230 spin_unlock_irqrestore(&card->lock, flags); 231} 232 233static void 234queue_bus_reset_event(struct client *client) 235{ 236 struct bus_reset *bus_reset; 237 238 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); 239 if (bus_reset == NULL) { 240 fw_notify("Out of memory when allocating bus reset event\n"); 241 return; 242 } 243 244 fill_bus_reset_event(&bus_reset->reset, client); 245 246 queue_event(client, &bus_reset->event, 247 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); 248} 249 250void fw_device_cdev_update(struct fw_device *device) 251{ 252 for_each_client(device, queue_bus_reset_event); 253} 254 255static void wake_up_client(struct client *client) 256{ 257 wake_up_interruptible(&client->wait); 258} 259 260void fw_device_cdev_remove(struct fw_device *device) 261{ 262 for_each_client(device, wake_up_client); 263} 264 265static int ioctl_get_info(struct client *client, void *buffer) 266{ 267 struct fw_cdev_get_info *get_info = buffer; 268 struct fw_cdev_event_bus_reset bus_reset; 269 270 client->version = get_info->version; 271 get_info->version = FW_CDEV_VERSION; 272 273 if (get_info->rom != 0) { 274 void __user *uptr = u64_to_uptr(get_info->rom); 275 size_t want = get_info->rom_length; 276 size_t have = client->device->config_rom_length * 4; 277 278 if (copy_to_user(uptr, client->device->config_rom, 279 min(want, have))) 280 return -EFAULT; 281 } 282 get_info->rom_length = client->device->config_rom_length * 4; 283 284 client->bus_reset_closure = get_info->bus_reset_closure; 285 if (get_info->bus_reset != 0) { 286 void __user *uptr = u64_to_uptr(get_info->bus_reset); 287 288 fill_bus_reset_event(&bus_reset, client); 289 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) 290 return -EFAULT; 291 } 292 293 get_info->card = client->device->card->index; 294 295 return 0; 296} 297 298static void 299add_client_resource(struct client *client, struct client_resource *resource) 300{ 301 unsigned long flags; 302 303 spin_lock_irqsave(&client->lock, flags); 304 list_add_tail(&resource->link, &client->resource_list); 305 resource->handle = client->resource_handle++; 306 spin_unlock_irqrestore(&client->lock, flags); 307} 308 309static int 310release_client_resource(struct client *client, u32 handle, 311 struct client_resource **resource) 312{ 313 struct client_resource *r; 314 unsigned long flags; 315 316 spin_lock_irqsave(&client->lock, flags); 317 list_for_each_entry(r, &client->resource_list, link) { 318 if (r->handle == handle) { 319 list_del(&r->link); 320 break; 321 } 322 } 323 spin_unlock_irqrestore(&client->lock, flags); 324 325 if (&r->link == &client->resource_list) 326 return -EINVAL; 327 328 if (resource) 329 *resource = r; 330 else 331 r->release(client, r); 332 333 return 0; 334} 335 336static void 337release_transaction(struct client *client, struct client_resource *resource) 338{ 339 struct response *response = 340 container_of(resource, struct response, resource); 341 342 fw_cancel_transaction(client->device->card, &response->transaction); 343} 344 345static void 346complete_transaction(struct fw_card *card, int rcode, 347 void *payload, size_t length, void *data) 348{ 349 struct response *response = data; 350 struct client *client = response->client; 351 unsigned long flags; 352 353 if (length < response->response.length) 354 response->response.length = length; 355 if (rcode == RCODE_COMPLETE) 356 memcpy(response->response.data, payload, 357 response->response.length); 358 359 spin_lock_irqsave(&client->lock, flags); 360 list_del(&response->resource.link); 361 spin_unlock_irqrestore(&client->lock, flags); 362 363 response->response.type = FW_CDEV_EVENT_RESPONSE; 364 response->response.rcode = rcode; 365 queue_event(client, &response->event, 366 &response->response, sizeof(response->response), 367 response->response.data, response->response.length); 368} 369 370static int ioctl_send_request(struct client *client, void *buffer) 371{ 372 struct fw_device *device = client->device; 373 struct fw_cdev_send_request *request = buffer; 374 struct response *response; 375 376 /* What is the biggest size we'll accept, really? */ 377 if (request->length > 4096) 378 return -EINVAL; 379 380 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); 381 if (response == NULL) 382 return -ENOMEM; 383 384 response->client = client; 385 response->response.length = request->length; 386 response->response.closure = request->closure; 387 388 if (request->data && 389 copy_from_user(response->response.data, 390 u64_to_uptr(request->data), request->length)) { 391 kfree(response); 392 return -EFAULT; 393 } 394 395 response->resource.release = release_transaction; 396 add_client_resource(client, &response->resource); 397 398 fw_send_request(device->card, &response->transaction, 399 request->tcode & 0x1f, 400 device->node->node_id, 401 request->generation, 402 device->max_speed, 403 request->offset, 404 response->response.data, request->length, 405 complete_transaction, response); 406 407 if (request->data) 408 return sizeof(request) + request->length; 409 else 410 return sizeof(request); 411} 412 413struct address_handler { 414 struct fw_address_handler handler; 415 __u64 closure; 416 struct client *client; 417 struct client_resource resource; 418}; 419 420struct request { 421 struct fw_request *request; 422 void *data; 423 size_t length; 424 struct client_resource resource; 425}; 426 427struct request_event { 428 struct event event; 429 struct fw_cdev_event_request request; 430}; 431 432static void 433release_request(struct client *client, struct client_resource *resource) 434{ 435 struct request *request = 436 container_of(resource, struct request, resource); 437 438 fw_send_response(client->device->card, request->request, 439 RCODE_CONFLICT_ERROR); 440 kfree(request); 441} 442 443static void 444handle_request(struct fw_card *card, struct fw_request *r, 445 int tcode, int destination, int source, 446 int generation, int speed, 447 unsigned long long offset, 448 void *payload, size_t length, void *callback_data) 449{ 450 struct address_handler *handler = callback_data; 451 struct request *request; 452 struct request_event *e; 453 struct client *client = handler->client; 454 455 request = kmalloc(sizeof(*request), GFP_ATOMIC); 456 e = kmalloc(sizeof(*e), GFP_ATOMIC); 457 if (request == NULL || e == NULL) { 458 kfree(request); 459 kfree(e); 460 fw_send_response(card, r, RCODE_CONFLICT_ERROR); 461 return; 462 } 463 464 request->request = r; 465 request->data = payload; 466 request->length = length; 467 468 request->resource.release = release_request; 469 add_client_resource(client, &request->resource); 470 471 e->request.type = FW_CDEV_EVENT_REQUEST; 472 e->request.tcode = tcode; 473 e->request.offset = offset; 474 e->request.length = length; 475 e->request.handle = request->resource.handle; 476 e->request.closure = handler->closure; 477 478 queue_event(client, &e->event, 479 &e->request, sizeof(e->request), payload, length); 480} 481 482static void 483release_address_handler(struct client *client, 484 struct client_resource *resource) 485{ 486 struct address_handler *handler = 487 container_of(resource, struct address_handler, resource); 488 489 fw_core_remove_address_handler(&handler->handler); 490 kfree(handler); 491} 492 493static int ioctl_allocate(struct client *client, void *buffer) 494{ 495 struct fw_cdev_allocate *request = buffer; 496 struct address_handler *handler; 497 struct fw_address_region region; 498 499 handler = kmalloc(sizeof(*handler), GFP_KERNEL); 500 if (handler == NULL) 501 return -ENOMEM; 502 503 region.start = request->offset; 504 region.end = request->offset + request->length; 505 handler->handler.length = request->length; 506 handler->handler.address_callback = handle_request; 507 handler->handler.callback_data = handler; 508 handler->closure = request->closure; 509 handler->client = client; 510 511 if (fw_core_add_address_handler(&handler->handler, &region) < 0) { 512 kfree(handler); 513 return -EBUSY; 514 } 515 516 handler->resource.release = release_address_handler; 517 add_client_resource(client, &handler->resource); 518 request->handle = handler->resource.handle; 519 520 return 0; 521} 522 523static int ioctl_deallocate(struct client *client, void *buffer) 524{ 525 struct fw_cdev_deallocate *request = buffer; 526 527 return release_client_resource(client, request->handle, NULL); 528} 529 530static int ioctl_send_response(struct client *client, void *buffer) 531{ 532 struct fw_cdev_send_response *request = buffer; 533 struct client_resource *resource; 534 struct request *r; 535 536 if (release_client_resource(client, request->handle, &resource) < 0) 537 return -EINVAL; 538 r = container_of(resource, struct request, resource); 539 if (request->length < r->length) 540 r->length = request->length; 541 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) 542 return -EFAULT; 543 544 fw_send_response(client->device->card, r->request, request->rcode); 545 kfree(r); 546 547 return 0; 548} 549 550static int ioctl_initiate_bus_reset(struct client *client, void *buffer) 551{ 552 struct fw_cdev_initiate_bus_reset *request = buffer; 553 int short_reset; 554 555 short_reset = (request->type == FW_CDEV_SHORT_RESET); 556 557 return fw_core_initiate_bus_reset(client->device->card, short_reset); 558} 559 560struct descriptor { 561 struct fw_descriptor d; 562 struct client_resource resource; 563 u32 data[0]; 564}; 565 566static void release_descriptor(struct client *client, 567 struct client_resource *resource) 568{ 569 struct descriptor *descriptor = 570 container_of(resource, struct descriptor, resource); 571 572 fw_core_remove_descriptor(&descriptor->d); 573 kfree(descriptor); 574} 575 576static int ioctl_add_descriptor(struct client *client, void *buffer) 577{ 578 struct fw_cdev_add_descriptor *request = buffer; 579 struct descriptor *descriptor; 580 int retval; 581 582 if (request->length > 256) 583 return -EINVAL; 584 585 descriptor = 586 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); 587 if (descriptor == NULL) 588 return -ENOMEM; 589 590 if (copy_from_user(descriptor->data, 591 u64_to_uptr(request->data), request->length * 4)) { 592 kfree(descriptor); 593 return -EFAULT; 594 } 595 596 descriptor->d.length = request->length; 597 descriptor->d.immediate = request->immediate; 598 descriptor->d.key = request->key; 599 descriptor->d.data = descriptor->data; 600 601 retval = fw_core_add_descriptor(&descriptor->d); 602 if (retval < 0) { 603 kfree(descriptor); 604 return retval; 605 } 606 607 descriptor->resource.release = release_descriptor; 608 add_client_resource(client, &descriptor->resource); 609 request->handle = descriptor->resource.handle; 610 611 return 0; 612} 613 614static int ioctl_remove_descriptor(struct client *client, void *buffer) 615{ 616 struct fw_cdev_remove_descriptor *request = buffer; 617 618 return release_client_resource(client, request->handle, NULL); 619} 620 621static void 622iso_callback(struct fw_iso_context *context, u32 cycle, 623 size_t header_length, void *header, void *data) 624{ 625 struct client *client = data; 626 struct iso_interrupt *irq; 627 628 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); 629 if (irq == NULL) 630 return; 631 632 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 633 irq->interrupt.closure = client->iso_closure; 634 irq->interrupt.cycle = cycle; 635 irq->interrupt.header_length = header_length; 636 memcpy(irq->interrupt.header, header, header_length); 637 queue_event(client, &irq->event, &irq->interrupt, 638 sizeof(irq->interrupt) + header_length, NULL, 0); 639} 640 641static int ioctl_create_iso_context(struct client *client, void *buffer) 642{ 643 struct fw_cdev_create_iso_context *request = buffer; 644 struct fw_iso_context *context; 645 646 if (request->channel > 63) 647 return -EINVAL; 648 649 switch (request->type) { 650 case FW_ISO_CONTEXT_RECEIVE: 651 if (request->header_size < 4 || (request->header_size & 3)) 652 return -EINVAL; 653 654 break; 655 656 case FW_ISO_CONTEXT_TRANSMIT: 657 if (request->speed > SCODE_3200) 658 return -EINVAL; 659 660 break; 661 662 default: 663 return -EINVAL; 664 } 665 666 context = fw_iso_context_create(client->device->card, 667 request->type, 668 request->channel, 669 request->speed, 670 request->header_size, 671 iso_callback, client); 672 if (IS_ERR(context)) 673 return PTR_ERR(context); 674 675 client->iso_closure = request->closure; 676 client->iso_context = context; 677 678 /* We only support one context at this time. */ 679 request->handle = 0; 680 681 return 0; 682} 683 684/* Macros for decoding the iso packet control header. */ 685#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) 686#define GET_INTERRUPT(v) (((v) >> 16) & 0x01) 687#define GET_SKIP(v) (((v) >> 17) & 0x01) 688#define GET_TAG(v) (((v) >> 18) & 0x02) 689#define GET_SY(v) (((v) >> 20) & 0x04) 690#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) 691 692static int ioctl_queue_iso(struct client *client, void *buffer) 693{ 694 struct fw_cdev_queue_iso *request = buffer; 695 struct fw_cdev_iso_packet __user *p, *end, *next; 696 struct fw_iso_context *ctx = client->iso_context; 697 unsigned long payload, buffer_end, header_length; 698 u32 control; 699 int count; 700 struct { 701 struct fw_iso_packet packet; 702 u8 header[256]; 703 } u; 704 705 if (ctx == NULL || request->handle != 0) 706 return -EINVAL; 707 708 /* 709 * If the user passes a non-NULL data pointer, has mmap()'ed 710 * the iso buffer, and the pointer points inside the buffer, 711 * we setup the payload pointers accordingly. Otherwise we 712 * set them both to 0, which will still let packets with 713 * payload_length == 0 through. In other words, if no packets 714 * use the indirect payload, the iso buffer need not be mapped 715 * and the request->data pointer is ignored. 716 */ 717 718 payload = (unsigned long)request->data - client->vm_start; 719 buffer_end = client->buffer.page_count << PAGE_SHIFT; 720 if (request->data == 0 || client->buffer.pages == NULL || 721 payload >= buffer_end) { 722 payload = 0; 723 buffer_end = 0; 724 } 725 726 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); 727 728 if (!access_ok(VERIFY_READ, p, request->size)) 729 return -EFAULT; 730 731 end = (void __user *)p + request->size; 732 count = 0; 733 while (p < end) { 734 if (get_user(control, &p->control)) 735 return -EFAULT; 736 u.packet.payload_length = GET_PAYLOAD_LENGTH(control); 737 u.packet.interrupt = GET_INTERRUPT(control); 738 u.packet.skip = GET_SKIP(control); 739 u.packet.tag = GET_TAG(control); 740 u.packet.sy = GET_SY(control); 741 u.packet.header_length = GET_HEADER_LENGTH(control); 742 743 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { 744 header_length = u.packet.header_length; 745 } else { 746 /* 747 * We require that header_length is a multiple of 748 * the fixed header size, ctx->header_size. 749 */ 750 if (ctx->header_size == 0) { 751 if (u.packet.header_length > 0) 752 return -EINVAL; 753 } else if (u.packet.header_length % ctx->header_size != 0) { 754 return -EINVAL; 755 } 756 header_length = 0; 757 } 758 759 next = (struct fw_cdev_iso_packet __user *) 760 &p->header[header_length / 4]; 761 if (next > end) 762 return -EINVAL; 763 if (__copy_from_user 764 (u.packet.header, p->header, header_length)) 765 return -EFAULT; 766 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && 767 u.packet.header_length + u.packet.payload_length > 0) 768 return -EINVAL; 769 if (payload + u.packet.payload_length > buffer_end) 770 return -EINVAL; 771 772 if (fw_iso_context_queue(ctx, &u.packet, 773 &client->buffer, payload)) 774 break; 775 776 p = next; 777 payload += u.packet.payload_length; 778 count++; 779 } 780 781 request->size -= uptr_to_u64(p) - request->packets; 782 request->packets = uptr_to_u64(p); 783 request->data = client->vm_start + payload; 784 785 return count; 786} 787 788static int ioctl_start_iso(struct client *client, void *buffer) 789{ 790 struct fw_cdev_start_iso *request = buffer; 791 792 if (request->handle != 0) 793 return -EINVAL; 794 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { 795 if (request->tags == 0 || request->tags > 15) 796 return -EINVAL; 797 798 if (request->sync > 15) 799 return -EINVAL; 800 } 801 802 return fw_iso_context_start(client->iso_context, request->cycle, 803 request->sync, request->tags); 804} 805 806static int ioctl_stop_iso(struct client *client, void *buffer) 807{ 808 struct fw_cdev_stop_iso *request = buffer; 809 810 if (request->handle != 0) 811 return -EINVAL; 812 813 return fw_iso_context_stop(client->iso_context); 814} 815 816static int ioctl_get_cycle_timer(struct client *client, void *buffer) 817{ 818 struct fw_cdev_get_cycle_timer *request = buffer; 819 struct fw_card *card = client->device->card; 820 unsigned long long bus_time; 821 struct timeval tv; 822 unsigned long flags; 823 824 preempt_disable(); 825 local_irq_save(flags); 826 827 bus_time = card->driver->get_bus_time(card); 828 do_gettimeofday(&tv); 829 830 local_irq_restore(flags); 831 preempt_enable(); 832 833 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec; 834 request->cycle_timer = bus_time & 0xffffffff; 835 return 0; 836} 837 838static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 839 ioctl_get_info, 840 ioctl_send_request, 841 ioctl_allocate, 842 ioctl_deallocate, 843 ioctl_send_response, 844 ioctl_initiate_bus_reset, 845 ioctl_add_descriptor, 846 ioctl_remove_descriptor, 847 ioctl_create_iso_context, 848 ioctl_queue_iso, 849 ioctl_start_iso, 850 ioctl_stop_iso, 851 ioctl_get_cycle_timer, 852}; 853 854static int 855dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) 856{ 857 char buffer[256]; 858 int retval; 859 860 if (_IOC_TYPE(cmd) != '#' || 861 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 862 return -EINVAL; 863 864 if (_IOC_DIR(cmd) & _IOC_WRITE) { 865 if (_IOC_SIZE(cmd) > sizeof(buffer) || 866 copy_from_user(buffer, arg, _IOC_SIZE(cmd))) 867 return -EFAULT; 868 } 869 870 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); 871 if (retval < 0) 872 return retval; 873 874 if (_IOC_DIR(cmd) & _IOC_READ) { 875 if (_IOC_SIZE(cmd) > sizeof(buffer) || 876 copy_to_user(arg, buffer, _IOC_SIZE(cmd))) 877 return -EFAULT; 878 } 879 880 return 0; 881} 882 883static long 884fw_device_op_ioctl(struct file *file, 885 unsigned int cmd, unsigned long arg) 886{ 887 struct client *client = file->private_data; 888 889 return dispatch_ioctl(client, cmd, (void __user *) arg); 890} 891 892#ifdef CONFIG_COMPAT 893static long 894fw_device_op_compat_ioctl(struct file *file, 895 unsigned int cmd, unsigned long arg) 896{ 897 struct client *client = file->private_data; 898 899 return dispatch_ioctl(client, cmd, compat_ptr(arg)); 900} 901#endif 902 903static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) 904{ 905 struct client *client = file->private_data; 906 enum dma_data_direction direction; 907 unsigned long size; 908 int page_count, retval; 909 910 /* FIXME: We could support multiple buffers, but we don't. */ 911 if (client->buffer.pages != NULL) 912 return -EBUSY; 913 914 if (!(vma->vm_flags & VM_SHARED)) 915 return -EINVAL; 916 917 if (vma->vm_start & ~PAGE_MASK) 918 return -EINVAL; 919 920 client->vm_start = vma->vm_start; 921 size = vma->vm_end - vma->vm_start; 922 page_count = size >> PAGE_SHIFT; 923 if (size & ~PAGE_MASK) 924 return -EINVAL; 925 926 if (vma->vm_flags & VM_WRITE) 927 direction = DMA_TO_DEVICE; 928 else 929 direction = DMA_FROM_DEVICE; 930 931 retval = fw_iso_buffer_init(&client->buffer, client->device->card, 932 page_count, direction); 933 if (retval < 0) 934 return retval; 935 936 retval = fw_iso_buffer_map(&client->buffer, vma); 937 if (retval < 0) 938 fw_iso_buffer_destroy(&client->buffer, client->device->card); 939 940 return retval; 941} 942 943static int fw_device_op_release(struct inode *inode, struct file *file) 944{ 945 struct client *client = file->private_data; 946 struct event *e, *next_e; 947 struct client_resource *r, *next_r; 948 unsigned long flags; 949 950 if (client->buffer.pages) 951 fw_iso_buffer_destroy(&client->buffer, client->device->card); 952 953 if (client->iso_context) 954 fw_iso_context_destroy(client->iso_context); 955 956 list_for_each_entry_safe(r, next_r, &client->resource_list, link) 957 r->release(client, r); 958 959 /* 960 * FIXME: We should wait for the async tasklets to stop 961 * running before freeing the memory. 962 */ 963 964 list_for_each_entry_safe(e, next_e, &client->event_list, link) 965 kfree(e); 966 967 spin_lock_irqsave(&client->device->card->lock, flags); 968 list_del(&client->link); 969 spin_unlock_irqrestore(&client->device->card->lock, flags); 970 971 fw_device_put(client->device); 972 kfree(client); 973 974 return 0; 975} 976 977static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) 978{ 979 struct client *client = file->private_data; 980 unsigned int mask = 0; 981 982 poll_wait(file, &client->wait, pt); 983 984 if (fw_device_is_shutdown(client->device)) 985 mask |= POLLHUP | POLLERR; 986 if (!list_empty(&client->event_list)) 987 mask |= POLLIN | POLLRDNORM; 988 989 return mask; 990} 991 992const struct file_operations fw_device_ops = { 993 .owner = THIS_MODULE, 994 .open = fw_device_op_open, 995 .read = fw_device_op_read, 996 .unlocked_ioctl = fw_device_op_ioctl, 997 .poll = fw_device_op_poll, 998 .release = fw_device_op_release, 999 .mmap = fw_device_op_mmap, 1000 1001#ifdef CONFIG_COMPAT 1002 .compat_ioctl = fw_device_op_compat_ioctl, 1003#endif 1004};