Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (43 commits)
staging: slicoss: update README
otus/zdusb.c: additional USB idnetifier
Staging: go7007: fix build issues
Staging: sxg: Fix leaks and checksum errors in transmit code path
Staging: sxg: Fix sleep in atomic context warning while loading driver
Staging: sxg: Use correct queue_id for transmitting non-TCP packets
Staging: sxg: Fire watchdog timer at end of open routine to change the link
Staging: Pohmelfs: Add load balancing between network states with the same priority.
Staging: Pohmelfs: Added IO permissions and priorities.
Staging: Pohmelfs: Added ->show_stats() callback.
Staging: Pohmelfs: Drop ftrans debugging code.
Staging: Pohmelfs: Use wait_on_page_timeout when waiting for remote directory sync instead of hardcoded 25 seconds.
Staging: Pohmelfs: Reduce debugging noise about non-existing objects.
Staging: Pohmelfs: Sync fs before killing it, since dentry cache is shrunk before writeback is invoked via generic_shutdown_super()
Staging: Pohmelfs: Extend remount option.
Staging: Pohmelfs: Set NETFS_INODE_REMOTE_SYNCED and clear NETFS_INODE_OWNED bits in the root inode.
Staging: Pohmelfs: Added 'need_lock' variable into debug print.
Staging: Pohmelfs: Disable read lock in pohmelfs_getattr().
Staging: Pohmelfs: Move parent lock to the place where we really have to send a lookup request to the server.
Staging: pohmelfs: Populate dentry cache when receiving the new readdir entry.
...

+664 -372
+3 -2
Documentation/filesystems/pohmelfs/design_notes.txt
··· 56 data transfers. 57 58 POHMELFS clients operate with a working set of servers and are capable of balancing read-only 59 - operations (like lookups or directory listings) between them. 60 Administrators can add or remove servers from the set at run-time via special commands (described 61 - in Documentation/pohmelfs/info.txt file). Writes are replicated to all servers. 62 63 POHMELFS is capable of full data channel encryption and/or strong crypto hashing. 64 One can select any kernel supported cipher, encryption mode, hash type and operation mode
··· 56 data transfers. 57 58 POHMELFS clients operate with a working set of servers and are capable of balancing read-only 59 + operations (like lookups or directory listings) between them according to IO priorities. 60 Administrators can add or remove servers from the set at run-time via special commands (described 61 + in Documentation/pohmelfs/info.txt file). Writes are replicated to all servers, which are connected 62 + with write permission turned on. IO priority and permissions can be changed in run-time. 63 64 POHMELFS is capable of full data channel encryption and/or strong crypto hashing. 65 One can select any kernel supported cipher, encryption mode, hash type and operation mode
+17 -4
Documentation/filesystems/pohmelfs/info.txt
··· 1 POHMELFS usage information. 2 3 - Mount options: 4 idx=%u 5 Each mountpoint is associated with a special index via this option. 6 Administrator can add or remove servers from the given index, so all mounts, ··· 54 55 Usage examples. 56 57 - Add (or remove if it already exists) server server1.net:1025 into the working set with index $idx 58 with appropriate hash algorithm and key file and cipher algorithm, mode and key file: 59 - $cfg -a server1.net -p 1025 -i $idx -K $hash_key -k $cipher_key 60 61 Mount filesystem with given index $idx to /mnt mountpoint. 62 Client will connect to all servers specified in the working set via previous command: 63 mount -t pohmel -o idx=$idx q /mnt 64 65 - One can add or remove servers from working set after mounting too. 66 67 68 Server installation. 69
··· 1 POHMELFS usage information. 2 3 + Mount options. 4 + All but index, number of crypto threads and maximum IO size can changed via remount. 5 + 6 idx=%u 7 Each mountpoint is associated with a special index via this option. 8 Administrator can add or remove servers from the given index, so all mounts, ··· 52 53 Usage examples. 54 55 + Add server server1.net:1025 into the working set with index $idx 56 with appropriate hash algorithm and key file and cipher algorithm, mode and key file: 57 + $cfg A add -a server1.net -p 1025 -i $idx -K $hash_key -k $cipher_key 58 59 Mount filesystem with given index $idx to /mnt mountpoint. 60 Client will connect to all servers specified in the working set via previous command: 61 mount -t pohmel -o idx=$idx q /mnt 62 63 + Change permissions to read-only (-I 1 option, '-I 2' - write-only, 3 - rw): 64 + $cfg A modify -a server1.net -p 1025 -i $idx -I 1 65 66 + Change IO priority to 123 (node with the highest priority gets read requests). 67 + $cfg A modify -a server1.net -p 1025 -i $idx -P 123 68 + 69 + One can check currect status of all connections in the mountstats file: 70 + # cat /proc/$PID/mountstats 71 + ... 72 + device none mounted on /mnt with fstype pohmel 73 + idx addr(:port) socket_type protocol active priority permissions 74 + 0 server1.net:1026 1 6 1 250 1 75 + 0 server2.net:1025 1 6 1 123 3 76 77 Server installation. 78
+152 -49
drivers/staging/android/binder.c
··· 41 static struct proc_dir_entry *binder_proc_dir_entry_root; 42 static struct proc_dir_entry *binder_proc_dir_entry_proc; 43 static struct hlist_head binder_dead_nodes; 44 45 static int binder_read_proc_proc( 46 char *page, char **start, off_t off, int count, int *eof, void *data); ··· 56 #define SZ_4M 0x400000 57 #endif 58 59 - #ifndef __i386__ 60 - #define FORBIDDEN_MMAP_FLAGS (VM_WRITE | VM_EXEC) 61 - #else 62 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 63 - #endif 64 65 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 66 ··· 234 uint8_t data[0]; 235 }; 236 237 struct binder_proc { 238 struct hlist_node proc_node; 239 struct rb_root threads; ··· 249 int pid; 250 struct vm_area_struct *vma; 251 struct task_struct *tsk; 252 void *buffer; 253 - size_t user_buffer_offset; 254 255 struct list_head buffers; 256 struct rb_root free_buffers; ··· 317 uid_t sender_euid; 318 }; 319 320 /* 321 * copied from get_unused_fd_flags 322 */ 323 - int task_get_unused_fd_flags(struct task_struct *tsk, int flags) 324 { 325 - struct files_struct *files = get_files_struct(tsk); 326 int fd, error; 327 struct fdtable *fdt; 328 unsigned long rlim_cur; ··· 346 * will limit the total number of files that can be opened. 347 */ 348 rlim_cur = 0; 349 - if (lock_task_sighand(tsk, &irqs)) { 350 - rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; 351 - unlock_task_sighand(tsk, &irqs); 352 } 353 if (fd >= rlim_cur) 354 goto out; ··· 384 385 out: 386 spin_unlock(&files->file_lock); 387 - put_files_struct(files); 388 return error; 389 } 390 ··· 391 * copied from fd_install 392 */ 393 static void task_fd_install( 394 - struct task_struct *tsk, unsigned int fd, struct file *file) 395 { 396 - struct files_struct *files = get_files_struct(tsk); 397 struct fdtable *fdt; 398 399 if (files == NULL) ··· 404 BUG_ON(fdt->fd[fd] != NULL); 405 rcu_assign_pointer(fdt->fd[fd], file); 406 spin_unlock(&files->file_lock); 407 - put_files_struct(files); 408 } 409 410 /* ··· 420 /* 421 * copied from sys_close 422 */ 423 - static long task_close_fd(struct task_struct *tsk, unsigned int fd) 424 { 425 struct file *filp; 426 - struct files_struct *files = get_files_struct(tsk); 427 struct fdtable *fdt; 428 int retval; 429 ··· 450 retval == -ERESTART_RESTARTBLOCK)) 451 retval = -EINTR; 452 453 - put_files_struct(files); 454 return retval; 455 456 out_unlock: 457 spin_unlock(&files->file_lock); 458 - put_files_struct(files); 459 return -EBADF; 460 } 461 ··· 623 proc->pid, page_addr); 624 goto err_map_kernel_failed; 625 } 626 - user_page_addr = (size_t)page_addr + proc->user_buffer_offset; 627 ret = vm_insert_page(vma, user_page_addr, page[0]); 628 if (ret) { 629 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " ··· 645 page_addr -= PAGE_SIZE) { 646 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 647 if (vma) 648 - zap_page_range(vma, (size_t)page_addr + 649 proc->user_buffer_offset, PAGE_SIZE, NULL); 650 err_vm_insert_page_failed: 651 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); ··· 726 "er %p size %zd\n", proc->pid, size, buffer, buffer_size); 727 728 has_page_addr = 729 - (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK); 730 if (n == NULL) { 731 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 732 buffer_size = size; /* no room for other buffers */ 733 else 734 buffer_size = size + sizeof(struct binder_buffer); 735 } 736 - end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size); 737 if (end_page_addr > has_page_addr) 738 end_page_addr = has_page_addr; 739 if (binder_update_page_range(proc, 1, 740 - (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL)) 741 return NULL; 742 743 rb_erase(best_fit, &proc->free_buffers); ··· 769 770 static void *buffer_start_page(struct binder_buffer *buffer) 771 { 772 - return (void *)((size_t)buffer & PAGE_MASK); 773 } 774 775 static void *buffer_end_page(struct binder_buffer *buffer) 776 { 777 - return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK); 778 } 779 780 static void binder_delete_free_buffer( ··· 852 } 853 854 binder_update_page_range(proc, 0, 855 - (void *)PAGE_ALIGN((size_t)buffer->data), 856 - (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK), 857 NULL); 858 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 859 buffer->free = 1; ··· 1352 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1353 struct binder_transaction *tmp; 1354 tmp = thread->transaction_stack; 1355 while (tmp) { 1356 if (tmp->from && tmp->from->proc == target_proc) 1357 target_thread = tmp->from; ··· 1452 return_error = BR_FAILED_REPLY; 1453 goto err_copy_data_failed; 1454 } 1455 off_end = (void *)offp + tr->offsets_size; 1456 for (; offp < off_end; offp++) { 1457 struct flat_binder_object *fp; 1458 - if (*offp > t->buffer->data_size - sizeof(*fp)) { 1459 binder_user_error("binder: %d:%d got transaction with " 1460 "invalid offset, %zd\n", 1461 proc->pid, thread->pid, *offp); ··· 1571 return_error = BR_FAILED_REPLY; 1572 goto err_fget_failed; 1573 } 1574 - target_fd = task_get_unused_fd_flags(target_proc->tsk, O_CLOEXEC); 1575 if (target_fd < 0) { 1576 fput(file); 1577 return_error = BR_FAILED_REPLY; 1578 goto err_get_unused_fd_failed; 1579 } 1580 - task_fd_install(target_proc->tsk, target_fd, file); 1581 if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1582 printk(KERN_INFO " fd %ld -> %d\n", fp->handle, target_fd); 1583 /* TODO: fput? */ ··· 1682 off_end = (void *)offp + buffer->offsets_size; 1683 for (; offp < off_end; offp++) { 1684 struct flat_binder_object *fp; 1685 - if (*offp > buffer->data_size - sizeof(*fp)) { 1686 printk(KERN_ERR "binder: transaction release %d bad" 1687 "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); 1688 continue; ··· 1720 if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1721 printk(KERN_INFO " fd %ld\n", fp->handle); 1722 if (failed_at) 1723 - task_close_fd(proc->tsk, fp->handle); 1724 break; 1725 1726 default: ··· 2369 2370 tr.data_size = t->buffer->data_size; 2371 tr.offsets_size = t->buffer->offsets_size; 2372 - tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset); 2373 tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); 2374 2375 if (put_user(cmd, (uint32_t __user *)ptr)) ··· 2685 (unsigned long)pgprot_val(vma->vm_page_prot)); 2686 dump_stack(); 2687 } 2688 static void binder_vma_close(struct vm_area_struct *vma) 2689 { 2690 struct binder_proc *proc = vma->vm_private_data; ··· 2696 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2697 (unsigned long)pgprot_val(vma->vm_page_prot)); 2698 proc->vma = NULL; 2699 } 2700 2701 static struct vm_operations_struct binder_vm_ops = { ··· 2729 } 2730 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2731 2732 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2733 if (area == NULL) { 2734 ret = -ENOMEM; ··· 2742 goto err_get_vm_area_failed; 2743 } 2744 proc->buffer = area->addr; 2745 - proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer; 2746 2747 #ifdef CONFIG_CPU_CACHE_VIPT 2748 if (cache_is_vipt_aliasing()) { ··· 2775 binder_insert_free_buffer(proc, buffer); 2776 proc->free_async_space = proc->buffer_size / 2; 2777 barrier(); 2778 proc->vma = vma; 2779 2780 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ ··· 2783 2784 err_alloc_small_buf_failed: 2785 kfree(proc->pages); 2786 err_alloc_pages_failed: 2787 vfree(proc->buffer); 2788 err_get_vm_area_failed: 2789 - mutex_unlock(&binder_lock); 2790 err_bad_arg: 2791 printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2792 return ret; ··· 2820 if (binder_proc_dir_entry_proc) { 2821 char strbuf[11]; 2822 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2823 create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc); 2824 } 2825 ··· 2829 2830 static int binder_flush(struct file *filp, fl_owner_t id) 2831 { 2832 - struct rb_node *n; 2833 struct binder_proc *proc = filp->private_data; 2834 - int wake_count = 0; 2835 2836 - mutex_lock(&binder_lock); 2837 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2838 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2839 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; ··· 2849 } 2850 } 2851 wake_up_interruptible_all(&proc->wait); 2852 - mutex_unlock(&binder_lock); 2853 2854 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2855 printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count); 2856 - 2857 - return 0; 2858 } 2859 2860 static int binder_release(struct inode *nodp, struct file *filp) 2861 { 2862 - struct hlist_node *pos; 2863 - struct binder_transaction *t; 2864 - struct rb_node *n; 2865 struct binder_proc *proc = filp->private_data; 2866 - int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2867 - 2868 if (binder_proc_dir_entry_proc) { 2869 char strbuf[11]; 2870 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2871 remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2872 } 2873 - mutex_lock(&binder_lock); 2874 hlist_del(&proc->proc_node); 2875 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 2876 if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) ··· 2951 } 2952 2953 binder_stats.obj_deleted[BINDER_STAT_PROC]++; 2954 - mutex_unlock(&binder_lock); 2955 2956 page_count = 0; 2957 if (proc->pages) { ··· 2974 proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); 2975 2976 kfree(proc); 2977 - return 0; 2978 } 2979 2980 static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t)
··· 41 static struct proc_dir_entry *binder_proc_dir_entry_root; 42 static struct proc_dir_entry *binder_proc_dir_entry_proc; 43 static struct hlist_head binder_dead_nodes; 44 + static HLIST_HEAD(binder_deferred_list); 45 + static DEFINE_MUTEX(binder_deferred_lock); 46 47 static int binder_read_proc_proc( 48 char *page, char **start, off_t off, int count, int *eof, void *data); ··· 54 #define SZ_4M 0x400000 55 #endif 56 57 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 58 59 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 60 ··· 236 uint8_t data[0]; 237 }; 238 239 + enum { 240 + BINDER_DEFERRED_PUT_FILES = 0x01, 241 + BINDER_DEFERRED_FLUSH = 0x02, 242 + BINDER_DEFERRED_RELEASE = 0x04, 243 + }; 244 + 245 struct binder_proc { 246 struct hlist_node proc_node; 247 struct rb_root threads; ··· 245 int pid; 246 struct vm_area_struct *vma; 247 struct task_struct *tsk; 248 + struct files_struct *files; 249 + struct hlist_node deferred_work_node; 250 + int deferred_work; 251 void *buffer; 252 + ptrdiff_t user_buffer_offset; 253 254 struct list_head buffers; 255 struct rb_root free_buffers; ··· 310 uid_t sender_euid; 311 }; 312 313 + static void binder_defer_work(struct binder_proc *proc, int defer); 314 + 315 /* 316 * copied from get_unused_fd_flags 317 */ 318 + int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 319 { 320 + struct files_struct *files = proc->files; 321 int fd, error; 322 struct fdtable *fdt; 323 unsigned long rlim_cur; ··· 337 * will limit the total number of files that can be opened. 338 */ 339 rlim_cur = 0; 340 + if (lock_task_sighand(proc->tsk, &irqs)) { 341 + rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; 342 + unlock_task_sighand(proc->tsk, &irqs); 343 } 344 if (fd >= rlim_cur) 345 goto out; ··· 375 376 out: 377 spin_unlock(&files->file_lock); 378 return error; 379 } 380 ··· 383 * copied from fd_install 384 */ 385 static void task_fd_install( 386 + struct binder_proc *proc, unsigned int fd, struct file *file) 387 { 388 + struct files_struct *files = proc->files; 389 struct fdtable *fdt; 390 391 if (files == NULL) ··· 396 BUG_ON(fdt->fd[fd] != NULL); 397 rcu_assign_pointer(fdt->fd[fd], file); 398 spin_unlock(&files->file_lock); 399 } 400 401 /* ··· 413 /* 414 * copied from sys_close 415 */ 416 + static long task_close_fd(struct binder_proc *proc, unsigned int fd) 417 { 418 struct file *filp; 419 + struct files_struct *files = proc->files; 420 struct fdtable *fdt; 421 int retval; 422 ··· 443 retval == -ERESTART_RESTARTBLOCK)) 444 retval = -EINTR; 445 446 return retval; 447 448 out_unlock: 449 spin_unlock(&files->file_lock); 450 return -EBADF; 451 } 452 ··· 618 proc->pid, page_addr); 619 goto err_map_kernel_failed; 620 } 621 + user_page_addr = 622 + (uintptr_t)page_addr + proc->user_buffer_offset; 623 ret = vm_insert_page(vma, user_page_addr, page[0]); 624 if (ret) { 625 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " ··· 639 page_addr -= PAGE_SIZE) { 640 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 641 if (vma) 642 + zap_page_range(vma, (uintptr_t)page_addr + 643 proc->user_buffer_offset, PAGE_SIZE, NULL); 644 err_vm_insert_page_failed: 645 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); ··· 720 "er %p size %zd\n", proc->pid, size, buffer, buffer_size); 721 722 has_page_addr = 723 + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 724 if (n == NULL) { 725 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 726 buffer_size = size; /* no room for other buffers */ 727 else 728 buffer_size = size + sizeof(struct binder_buffer); 729 } 730 + end_page_addr = 731 + (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 732 if (end_page_addr > has_page_addr) 733 end_page_addr = has_page_addr; 734 if (binder_update_page_range(proc, 1, 735 + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 736 return NULL; 737 738 rb_erase(best_fit, &proc->free_buffers); ··· 762 763 static void *buffer_start_page(struct binder_buffer *buffer) 764 { 765 + return (void *)((uintptr_t)buffer & PAGE_MASK); 766 } 767 768 static void *buffer_end_page(struct binder_buffer *buffer) 769 { 770 + return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 771 } 772 773 static void binder_delete_free_buffer( ··· 845 } 846 847 binder_update_page_range(proc, 0, 848 + (void *)PAGE_ALIGN((uintptr_t)buffer->data), 849 + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 850 NULL); 851 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 852 buffer->free = 1; ··· 1345 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1346 struct binder_transaction *tmp; 1347 tmp = thread->transaction_stack; 1348 + if (tmp->to_thread != thread) { 1349 + binder_user_error("binder: %d:%d got new " 1350 + "transaction with bad transaction stack" 1351 + ", transaction %d has target %d:%d\n", 1352 + proc->pid, thread->pid, tmp->debug_id, 1353 + tmp->to_proc ? tmp->to_proc->pid : 0, 1354 + tmp->to_thread ? 1355 + tmp->to_thread->pid : 0); 1356 + return_error = BR_FAILED_REPLY; 1357 + goto err_bad_call_stack; 1358 + } 1359 while (tmp) { 1360 if (tmp->from && tmp->from->proc == target_proc) 1361 target_thread = tmp->from; ··· 1434 return_error = BR_FAILED_REPLY; 1435 goto err_copy_data_failed; 1436 } 1437 + if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { 1438 + binder_user_error("binder: %d:%d got transaction with " 1439 + "invalid offsets size, %zd\n", 1440 + proc->pid, thread->pid, tr->offsets_size); 1441 + return_error = BR_FAILED_REPLY; 1442 + goto err_bad_offset; 1443 + } 1444 off_end = (void *)offp + tr->offsets_size; 1445 for (; offp < off_end; offp++) { 1446 struct flat_binder_object *fp; 1447 + if (*offp > t->buffer->data_size - sizeof(*fp) || 1448 + t->buffer->data_size < sizeof(*fp) || 1449 + !IS_ALIGNED(*offp, sizeof(void *))) { 1450 binder_user_error("binder: %d:%d got transaction with " 1451 "invalid offset, %zd\n", 1452 proc->pid, thread->pid, *offp); ··· 1544 return_error = BR_FAILED_REPLY; 1545 goto err_fget_failed; 1546 } 1547 + target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1548 if (target_fd < 0) { 1549 fput(file); 1550 return_error = BR_FAILED_REPLY; 1551 goto err_get_unused_fd_failed; 1552 } 1553 + task_fd_install(target_proc, target_fd, file); 1554 if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1555 printk(KERN_INFO " fd %ld -> %d\n", fp->handle, target_fd); 1556 /* TODO: fput? */ ··· 1655 off_end = (void *)offp + buffer->offsets_size; 1656 for (; offp < off_end; offp++) { 1657 struct flat_binder_object *fp; 1658 + if (*offp > buffer->data_size - sizeof(*fp) || 1659 + buffer->data_size < sizeof(*fp) || 1660 + !IS_ALIGNED(*offp, sizeof(void *))) { 1661 printk(KERN_ERR "binder: transaction release %d bad" 1662 "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); 1663 continue; ··· 1691 if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1692 printk(KERN_INFO " fd %ld\n", fp->handle); 1693 if (failed_at) 1694 + task_close_fd(proc, fp->handle); 1695 break; 1696 1697 default: ··· 2340 2341 tr.data_size = t->buffer->data_size; 2342 tr.offsets_size = t->buffer->offsets_size; 2343 + tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset; 2344 tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); 2345 2346 if (put_user(cmd, (uint32_t __user *)ptr)) ··· 2656 (unsigned long)pgprot_val(vma->vm_page_prot)); 2657 dump_stack(); 2658 } 2659 + 2660 static void binder_vma_close(struct vm_area_struct *vma) 2661 { 2662 struct binder_proc *proc = vma->vm_private_data; ··· 2666 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2667 (unsigned long)pgprot_val(vma->vm_page_prot)); 2668 proc->vma = NULL; 2669 + binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2670 } 2671 2672 static struct vm_operations_struct binder_vm_ops = { ··· 2698 } 2699 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2700 2701 + if (proc->buffer) { 2702 + ret = -EBUSY; 2703 + failure_string = "already mapped"; 2704 + goto err_already_mapped; 2705 + } 2706 + 2707 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2708 if (area == NULL) { 2709 ret = -ENOMEM; ··· 2705 goto err_get_vm_area_failed; 2706 } 2707 proc->buffer = area->addr; 2708 + proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2709 2710 #ifdef CONFIG_CPU_CACHE_VIPT 2711 if (cache_is_vipt_aliasing()) { ··· 2738 binder_insert_free_buffer(proc, buffer); 2739 proc->free_async_space = proc->buffer_size / 2; 2740 barrier(); 2741 + proc->files = get_files_struct(current); 2742 proc->vma = vma; 2743 2744 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ ··· 2745 2746 err_alloc_small_buf_failed: 2747 kfree(proc->pages); 2748 + proc->pages = NULL; 2749 err_alloc_pages_failed: 2750 vfree(proc->buffer); 2751 + proc->buffer = NULL; 2752 err_get_vm_area_failed: 2753 + err_already_mapped: 2754 err_bad_arg: 2755 printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2756 return ret; ··· 2780 if (binder_proc_dir_entry_proc) { 2781 char strbuf[11]; 2782 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2783 + remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2784 create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc); 2785 } 2786 ··· 2788 2789 static int binder_flush(struct file *filp, fl_owner_t id) 2790 { 2791 struct binder_proc *proc = filp->private_data; 2792 2793 + binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 2794 + 2795 + return 0; 2796 + } 2797 + 2798 + static void binder_deferred_flush(struct binder_proc *proc) 2799 + { 2800 + struct rb_node *n; 2801 + int wake_count = 0; 2802 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2803 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2804 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; ··· 2802 } 2803 } 2804 wake_up_interruptible_all(&proc->wait); 2805 2806 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2807 printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count); 2808 } 2809 2810 static int binder_release(struct inode *nodp, struct file *filp) 2811 { 2812 struct binder_proc *proc = filp->private_data; 2813 if (binder_proc_dir_entry_proc) { 2814 char strbuf[11]; 2815 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2816 remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2817 } 2818 + 2819 + binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 2820 + 2821 + return 0; 2822 + } 2823 + 2824 + static void binder_deferred_release(struct binder_proc *proc) 2825 + { 2826 + struct hlist_node *pos; 2827 + struct binder_transaction *t; 2828 + struct rb_node *n; 2829 + int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2830 + 2831 + BUG_ON(proc->vma); 2832 + BUG_ON(proc->files); 2833 + 2834 hlist_del(&proc->proc_node); 2835 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 2836 if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) ··· 2897 } 2898 2899 binder_stats.obj_deleted[BINDER_STAT_PROC]++; 2900 2901 page_count = 0; 2902 if (proc->pages) { ··· 2921 proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); 2922 2923 kfree(proc); 2924 + } 2925 + 2926 + static void binder_deferred_func(struct work_struct *work) 2927 + { 2928 + struct binder_proc *proc; 2929 + struct files_struct *files; 2930 + 2931 + int defer; 2932 + do { 2933 + mutex_lock(&binder_lock); 2934 + mutex_lock(&binder_deferred_lock); 2935 + if (!hlist_empty(&binder_deferred_list)) { 2936 + proc = hlist_entry(binder_deferred_list.first, 2937 + struct binder_proc, deferred_work_node); 2938 + hlist_del_init(&proc->deferred_work_node); 2939 + defer = proc->deferred_work; 2940 + proc->deferred_work = 0; 2941 + } else { 2942 + proc = NULL; 2943 + defer = 0; 2944 + } 2945 + mutex_unlock(&binder_deferred_lock); 2946 + 2947 + files = NULL; 2948 + if (defer & BINDER_DEFERRED_PUT_FILES) 2949 + if ((files = proc->files)) 2950 + proc->files = NULL; 2951 + 2952 + if (defer & BINDER_DEFERRED_FLUSH) 2953 + binder_deferred_flush(proc); 2954 + 2955 + if (defer & BINDER_DEFERRED_RELEASE) 2956 + binder_deferred_release(proc); /* frees proc */ 2957 + 2958 + mutex_unlock(&binder_lock); 2959 + if (files) 2960 + put_files_struct(files); 2961 + } while (proc); 2962 + } 2963 + static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 2964 + 2965 + static void binder_defer_work(struct binder_proc *proc, int defer) 2966 + { 2967 + mutex_lock(&binder_deferred_lock); 2968 + proc->deferred_work |= defer; 2969 + if (hlist_unhashed(&proc->deferred_work_node)) { 2970 + hlist_add_head(&proc->deferred_work_node, 2971 + &binder_deferred_list); 2972 + schedule_work(&binder_deferred_work); 2973 + } 2974 + mutex_unlock(&binder_deferred_lock); 2975 } 2976 2977 static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t)
+13 -7
drivers/staging/at76_usb/at76_usb.c
··· 5259 return 0; 5260 } 5261 5262 /* Register network device and initialize the hardware */ 5263 static int at76_init_new_device(struct at76_priv *priv, 5264 struct usb_interface *interface) ··· 5315 priv->scan_mode = SCAN_TYPE_ACTIVE; 5316 5317 netdev->flags &= ~IFF_MULTICAST; /* not yet or never */ 5318 - netdev->open = at76_open; 5319 - netdev->stop = at76_stop; 5320 - netdev->get_stats = at76_get_stats; 5321 netdev->ethtool_ops = &at76_ethtool_ops; 5322 5323 /* Add pointers to enable iwspy support. */ 5324 priv->wireless_data.spy_data = &priv->spy_data; 5325 netdev->wireless_data = &priv->wireless_data; 5326 5327 - netdev->hard_start_xmit = at76_tx; 5328 - netdev->tx_timeout = at76_tx_timeout; 5329 netdev->watchdog_timeo = 2 * HZ; 5330 netdev->wireless_handlers = &at76_handler_def; 5331 - netdev->set_multicast_list = at76_set_multicast; 5332 - netdev->set_mac_address = at76_set_mac_address; 5333 dev_alloc_name(netdev, "wlan%d"); 5334 5335 ret = register_netdev(priv->netdev);
··· 5259 return 0; 5260 } 5261 5262 + static const struct net_device_ops at76_netdev_ops = { 5263 + .ndo_open = at76_open, 5264 + .ndo_stop = at76_stop, 5265 + .ndo_get_stats = at76_get_stats, 5266 + .ndo_start_xmit = at76_tx, 5267 + .ndo_tx_timeout = at76_tx_timeout, 5268 + .ndo_set_multicast_list = at76_set_multicast, 5269 + .ndo_set_mac_address = at76_set_mac_address, 5270 + .ndo_validate_addr = eth_validate_addr, 5271 + .ndo_change_mtu = eth_change_mtu, 5272 + }; 5273 + 5274 /* Register network device and initialize the hardware */ 5275 static int at76_init_new_device(struct at76_priv *priv, 5276 struct usb_interface *interface) ··· 5303 priv->scan_mode = SCAN_TYPE_ACTIVE; 5304 5305 netdev->flags &= ~IFF_MULTICAST; /* not yet or never */ 5306 + netdev->netdev_ops = &at76_netdev_ops; 5307 netdev->ethtool_ops = &at76_ethtool_ops; 5308 5309 /* Add pointers to enable iwspy support. */ 5310 priv->wireless_data.spy_data = &priv->spy_data; 5311 netdev->wireless_data = &priv->wireless_data; 5312 5313 netdev->watchdog_timeo = 2 * HZ; 5314 netdev->wireless_handlers = &at76_handler_def; 5315 dev_alloc_name(netdev, "wlan%d"); 5316 5317 ret = register_netdev(priv->netdev);
+12 -5
drivers/staging/epl/VirtualEthernetLinux.c
··· 284 return Ret; 285 } 286 287 tEplKernel VEthAddInstance(tEplDllkInitParam *pInitParam_p) 288 { 289 tEplKernel Ret = kEplSuccessful; ··· 310 goto Exit; 311 } 312 313 - pVEthNetDevice_g->open = VEthOpen; 314 - pVEthNetDevice_g->stop = VEthClose; 315 - pVEthNetDevice_g->get_stats = VEthGetStats; 316 - pVEthNetDevice_g->hard_start_xmit = VEthXmit; 317 - pVEthNetDevice_g->tx_timeout = VEthTimeout; 318 pVEthNetDevice_g->watchdog_timeo = EPL_VETH_TX_TIMEOUT; 319 pVEthNetDevice_g->destructor = free_netdev; 320
··· 284 return Ret; 285 } 286 287 + static const struct net_device_ops epl_netdev_ops = { 288 + .ndo_open = VEthOpen, 289 + .ndo_stop = VEthClose, 290 + .ndo_get_stats = VEthGetStats, 291 + .ndo_start_xmit = VEthXmit, 292 + .ndo_tx_timeout = VEthTimeout, 293 + .ndo_change_mtu = eth_change_mtu, 294 + .ndo_set_mac_address = eth_mac_addr, 295 + .ndo_validate_addr = eth_validate_addr, 296 + }; 297 + 298 tEplKernel VEthAddInstance(tEplDllkInitParam *pInitParam_p) 299 { 300 tEplKernel Ret = kEplSuccessful; ··· 299 goto Exit; 300 } 301 302 + pVEthNetDevice_g->netdev_ops = &epl_netdev_ops; 303 pVEthNetDevice_g->watchdog_timeo = EPL_VETH_TX_TIMEOUT; 304 pVEthNetDevice_g->destructor = free_netdev; 305
+14 -9
drivers/staging/et131x/et131x_netdev.c
··· 112 void et131x_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 113 void et131x_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 114 115 /** 116 * et131x_device_alloc 117 * ··· 155 */ 156 //netdev->init = &et131x_init; 157 //netdev->set_config = &et131x_config; 158 - netdev->get_stats = &et131x_stats; 159 - netdev->open = &et131x_open; 160 - netdev->stop = &et131x_close; 161 - netdev->do_ioctl = &et131x_ioctl; 162 - netdev->set_multicast_list = &et131x_multicast; 163 - netdev->hard_start_xmit = &et131x_tx; 164 - netdev->tx_timeout = &et131x_tx_timeout; 165 netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 166 - netdev->change_mtu = &et131x_change_mtu; 167 - netdev->set_mac_address = &et131x_set_mac_addr; 168 169 //netdev->ethtool_ops = &et131x_ethtool_ops; 170
··· 112 void et131x_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 113 void et131x_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 114 115 + static const struct net_device_ops et131x_netdev_ops = { 116 + .ndo_open = et131x_open, 117 + .ndo_stop = et131x_close, 118 + .ndo_start_xmit = et131x_tx, 119 + .ndo_set_multicast_list = et131x_multicast, 120 + .ndo_tx_timeout = et131x_tx_timeout, 121 + .ndo_change_mtu = et131x_change_mtu, 122 + .ndo_set_mac_address = et131x_set_mac_addr, 123 + .ndo_validate_addr = eth_validate_addr, 124 + .ndo_get_stats = et131x_stats, 125 + .ndo_do_ioctl = et131x_ioctl, 126 + }; 127 + 128 /** 129 * et131x_device_alloc 130 * ··· 142 */ 143 //netdev->init = &et131x_init; 144 //netdev->set_config = &et131x_config; 145 netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 146 + netdev->netdev_ops = &et131x_netdev_ops; 147 148 //netdev->ethtool_ops = &et131x_ethtool_ops; 149
-15
drivers/staging/go7007/go7007-driver.c
··· 268 init_i2c_module(&go->i2c_adapter, 269 go->board_info->i2c_devs[i].id, 270 go->board_info->i2c_devs[i].addr); 271 - #ifdef TUNER_SET_TYPE_ADDR 272 - if (go->tuner_type >= 0) { 273 - struct tuner_setup tun_setup = { 274 - .mode_mask = T_ANALOG_TV, 275 - .addr = ADDR_UNSET, 276 - .type = go->tuner_type 277 - }; 278 - i2c_clients_command(&go->i2c_adapter, 279 - TUNER_SET_TYPE_ADDR, &tun_setup); 280 - } 281 - #else 282 - if (go->tuner_type >= 0) 283 - i2c_clients_command(&go->i2c_adapter, 284 - TUNER_SET_TYPE, &go->tuner_type); 285 - #endif 286 if (go->board_id == GO7007_BOARDID_ADLINK_MPG24) 287 i2c_clients_command(&go->i2c_adapter, 288 DECODER_SET_CHANNEL, &go->channel_number);
··· 268 init_i2c_module(&go->i2c_adapter, 269 go->board_info->i2c_devs[i].id, 270 go->board_info->i2c_devs[i].addr); 271 if (go->board_id == GO7007_BOARDID_ADLINK_MPG24) 272 i2c_clients_command(&go->i2c_adapter, 273 DECODER_SET_CHANNEL, &go->channel_number);
+2
drivers/staging/go7007/wis-sony-tuner.c
··· 386 struct wis_sony_tuner *t = i2c_get_clientdata(client); 387 388 switch (cmd) { 389 #ifdef TUNER_SET_TYPE_ADDR 390 case TUNER_SET_TYPE_ADDR: 391 { ··· 464 t->type, sony_tuners[t->type - 200].name); 465 break; 466 } 467 case VIDIOC_G_FREQUENCY: 468 { 469 struct v4l2_frequency *f = arg;
··· 386 struct wis_sony_tuner *t = i2c_get_clientdata(client); 387 388 switch (cmd) { 389 + #if 0 390 #ifdef TUNER_SET_TYPE_ADDR 391 case TUNER_SET_TYPE_ADDR: 392 { ··· 463 t->type, sony_tuners[t->type - 200].name); 464 break; 465 } 466 + #endif 467 case VIDIOC_G_FREQUENCY: 468 { 469 struct v4l2_frequency *f = arg;
+5 -4
drivers/staging/line6/audio.c
··· 27 { 28 static int dev; 29 struct snd_card *card; 30 31 - card = snd_card_new(line6_index[dev], line6_id[dev], THIS_MODULE, 0); 32 - 33 - if (card == NULL) 34 - return -ENOMEM; 35 36 line6->card = card; 37
··· 27 { 28 static int dev; 29 struct snd_card *card; 30 + int err; 31 32 + err = snd_card_create(line6_index[dev], line6_id[dev], THIS_MODULE, 0, 33 + &card); 34 + if (err < 0) 35 + return err; 36 37 line6->card = card; 38
+29 -18
drivers/staging/otus/usbdrv.c
··· 822 return 0; 823 } 824 825 int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId) 826 { 827 /* Allocate net device structure */ ··· 861 vap[vapId].dev->ml_priv = parentDev->ml_priv; 862 863 //dev->hard_start_xmit = &zd1212_wds_xmit_frame; 864 - vap[vapId].dev->hard_start_xmit = &zfLnxVapXmitFrame; 865 - vap[vapId].dev->open = &zfLnxVapOpen; 866 - vap[vapId].dev->stop = &zfLnxVapClose; 867 - vap[vapId].dev->get_stats = &usbdrv_get_stats; 868 - vap[vapId].dev->change_mtu = &usbdrv_change_mtu; 869 - #ifdef ZM_HOSTAPD_SUPPORT 870 - vap[vapId].dev->do_ioctl = usbdrv_ioctl; 871 - #else 872 - vap[vapId].dev->do_ioctl = NULL; 873 - #endif 874 vap[vapId].dev->destructor = free_netdev; 875 876 vap[vapId].dev->tx_queue_len = 0; ··· 1074 usb_unlink_urb(macp->RegInUrb); 1075 } 1076 1077 u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp) 1078 { 1079 //unsigned char addr[6]; ··· 1110 dev->wireless_handlers = (struct iw_handler_def *)&p80211wext_handler_def; 1111 #endif 1112 1113 - dev->open = usbdrv_open; 1114 - dev->hard_start_xmit = usbdrv_xmit_frame; 1115 - dev->stop = usbdrv_close; 1116 - dev->change_mtu = &usbdrv_change_mtu; 1117 - dev->get_stats = usbdrv_get_stats; 1118 - dev->set_multicast_list = usbdrv_set_multi; 1119 - dev->set_mac_address = usbdrv_set_mac; 1120 - dev->do_ioctl = usbdrv_ioctl; 1121 1122 dev->flags |= IFF_MULTICAST; 1123
··· 822 return 0; 823 } 824 825 + static const struct net_device_ops vap_netdev_ops = { 826 + .ndo_open = zfLnxVapOpen, 827 + .ndo_stop = zfLnxVapClose, 828 + .ndo_start_xmit = zfLnxVapXmitFrame, 829 + .ndo_get_stats = usbdrv_get_stats, 830 + .ndo_change_mtu = usbdrv_change_mtu, 831 + .ndo_validate_addr = eth_validate_addr, 832 + .ndo_set_mac_address = eth_mac_addr, 833 + #ifdef ZM_HOSTAPD_SUPPORT 834 + .ndo_do_ioctl = usbdrv_ioctl, 835 + #else 836 + .ndo_do_ioctl = NULL, 837 + #endif 838 + }; 839 + 840 int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId) 841 { 842 /* Allocate net device structure */ ··· 846 vap[vapId].dev->ml_priv = parentDev->ml_priv; 847 848 //dev->hard_start_xmit = &zd1212_wds_xmit_frame; 849 + vap[vapId].dev->netdev_ops = &vap_netdev_ops; 850 vap[vapId].dev->destructor = free_netdev; 851 852 vap[vapId].dev->tx_queue_len = 0; ··· 1068 usb_unlink_urb(macp->RegInUrb); 1069 } 1070 1071 + static const struct net_device_ops otus_netdev_ops = { 1072 + .ndo_open = usbdrv_open, 1073 + .ndo_stop = usbdrv_close, 1074 + .ndo_start_xmit = usbdrv_xmit_frame, 1075 + .ndo_change_mtu = usbdrv_change_mtu, 1076 + .ndo_get_stats = usbdrv_get_stats, 1077 + .ndo_set_multicast_list = usbdrv_set_multi, 1078 + .ndo_set_mac_address = usbdrv_set_mac, 1079 + .ndo_do_ioctl = usbdrv_ioctl, 1080 + .ndo_validate_addr = eth_validate_addr, 1081 + }; 1082 + 1083 u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp) 1084 { 1085 //unsigned char addr[6]; ··· 1092 dev->wireless_handlers = (struct iw_handler_def *)&p80211wext_handler_def; 1093 #endif 1094 1095 + dev->netdev_ops = &otus_netdev_ops; 1096 1097 dev->flags |= IFF_MULTICAST; 1098
+2 -1
drivers/staging/otus/zdusb.c
··· 48 static struct usb_device_id zd1221_ids [] = { 49 { USB_DEVICE(VENDOR_ATHR, PRODUCT_AR9170) }, 50 { USB_DEVICE(VENDOR_DLINK, PRODUCT_DWA160A) }, 51 - { USB_DEVICE(0x0846, 0x9010) }, 52 { } /* Terminating entry */ 53 }; 54
··· 48 static struct usb_device_id zd1221_ids [] = { 49 { USB_DEVICE(VENDOR_ATHR, PRODUCT_AR9170) }, 50 { USB_DEVICE(VENDOR_DLINK, PRODUCT_DWA160A) }, 51 + { USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WNDA3100) }, 52 + { USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WN111v2) }, 53 { } /* Terminating entry */ 54 }; 55
+4
drivers/staging/otus/zdusb.h
··· 40 #define VENDOR_DLINK 0x07D1 //Dlink 41 #define PRODUCT_DWA160A 0x3C10 42 43 #endif
··· 40 #define VENDOR_DLINK 0x07D1 //Dlink 41 #define PRODUCT_DWA160A 0x3C10 42 43 + #define VENDOR_NETGEAR 0x0846 /* NetGear */ 44 + #define PRODUCT_WNDA3100 0x9010 45 + #define PRODUCT_WN111v2 0x9001 46 + 47 #endif
+56 -5
drivers/staging/pohmelfs/config.c
··· 81 return g; 82 } 83 84 int pohmelfs_copy_config(struct pohmelfs_sb *psb) 85 { 86 struct pohmelfs_config_group *g; ··· 142 err = 0; 143 list_for_each_entry(dst, &psb->state_list, config_entry) { 144 if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) { 145 - err = -EEXIST; 146 break; 147 } 148 } ··· 160 161 memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl)); 162 163 - list_add_tail(&dst->config_entry, &psb->state_list); 164 165 err = pohmelfs_state_init_one(psb, dst); 166 if (err) { ··· 289 return err; 290 } 291 292 static int pohmelfs_cn_ctl(struct cn_msg *msg, int action) 293 { 294 struct pohmelfs_config_group *g; ··· 326 g->num_entry--; 327 kfree(c); 328 goto out_unlock; 329 } else { 330 err = -EEXIST; 331 goto out_unlock; ··· 347 } 348 memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl)); 349 g->num_entry++; 350 list_add_tail(&c->config_entry, &g->config_list); 351 352 out_unlock: ··· 453 454 switch (msg->flags) { 455 case POHMELFS_FLAGS_ADD: 456 - err = pohmelfs_cn_ctl(msg, POHMELFS_FLAGS_ADD); 457 - break; 458 case POHMELFS_FLAGS_DEL: 459 - err = pohmelfs_cn_ctl(msg, POHMELFS_FLAGS_DEL); 460 break; 461 case POHMELFS_FLAGS_SHOW: 462 err = pohmelfs_cn_disp(msg);
··· 81 return g; 82 } 83 84 + static inline void pohmelfs_insert_config_entry(struct pohmelfs_sb *psb, struct pohmelfs_config *dst) 85 + { 86 + struct pohmelfs_config *tmp; 87 + 88 + INIT_LIST_HEAD(&dst->config_entry); 89 + 90 + list_for_each_entry(tmp, &psb->state_list, config_entry) { 91 + if (dst->state.ctl.prio > tmp->state.ctl.prio) 92 + list_add_tail(&dst->config_entry, &tmp->config_entry); 93 + } 94 + if (list_empty(&dst->config_entry)) 95 + list_add_tail(&dst->config_entry, &psb->state_list); 96 + } 97 + 98 + static int pohmelfs_move_config_entry(struct pohmelfs_sb *psb, 99 + struct pohmelfs_config *dst, struct pohmelfs_config *new) 100 + { 101 + if ((dst->state.ctl.prio == new->state.ctl.prio) && 102 + (dst->state.ctl.perm == new->state.ctl.perm)) 103 + return 0; 104 + 105 + dprintk("%s: dst: prio: %d, perm: %x, new: prio: %d, perm: %d.\n", 106 + __func__, dst->state.ctl.prio, dst->state.ctl.perm, 107 + new->state.ctl.prio, new->state.ctl.perm); 108 + dst->state.ctl.prio = new->state.ctl.prio; 109 + dst->state.ctl.perm = new->state.ctl.perm; 110 + 111 + list_del_init(&dst->config_entry); 112 + pohmelfs_insert_config_entry(psb, dst); 113 + return 0; 114 + } 115 + 116 + /* 117 + * pohmelfs_copy_config() is used to copy new state configs from the 118 + * config group (controlled by the netlink messages) into the superblock. 119 + * This happens either at startup time where no transactions can access 120 + * the list of the configs (and thus list of the network states), or at 121 + * run-time, where it is protected by the psb->state_lock. 122 + */ 123 int pohmelfs_copy_config(struct pohmelfs_sb *psb) 124 { 125 struct pohmelfs_config_group *g; ··· 103 err = 0; 104 list_for_each_entry(dst, &psb->state_list, config_entry) { 105 if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) { 106 + err = pohmelfs_move_config_entry(psb, dst, c); 107 + if (!err) 108 + err = -EEXIST; 109 break; 110 } 111 } ··· 119 120 memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl)); 121 122 + pohmelfs_insert_config_entry(psb, dst); 123 124 err = pohmelfs_state_init_one(psb, dst); 125 if (err) { ··· 248 return err; 249 } 250 251 + static int pohmelfs_modify_config(struct pohmelfs_ctl *old, struct pohmelfs_ctl *new) 252 + { 253 + old->perm = new->perm; 254 + old->prio = new->prio; 255 + return 0; 256 + } 257 + 258 static int pohmelfs_cn_ctl(struct cn_msg *msg, int action) 259 { 260 struct pohmelfs_config_group *g; ··· 278 g->num_entry--; 279 kfree(c); 280 goto out_unlock; 281 + } else if (action == POHMELFS_FLAGS_MODIFY) { 282 + err = pohmelfs_modify_config(sc, ctl); 283 + goto out_unlock; 284 } else { 285 err = -EEXIST; 286 goto out_unlock; ··· 296 } 297 memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl)); 298 g->num_entry++; 299 + 300 list_add_tail(&c->config_entry, &g->config_list); 301 302 out_unlock: ··· 401 402 switch (msg->flags) { 403 case POHMELFS_FLAGS_ADD: 404 case POHMELFS_FLAGS_DEL: 405 + case POHMELFS_FLAGS_MODIFY: 406 + err = pohmelfs_cn_ctl(msg, msg->flags); 407 break; 408 case POHMELFS_FLAGS_SHOW: 409 err = pohmelfs_cn_disp(msg);
+12 -12
drivers/staging/pohmelfs/dir.c
··· 328 { 329 struct inode *inode = &pi->vfs_inode; 330 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); 331 - long ret = msecs_to_jiffies(25000); 332 int err; 333 334 dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n", ··· 389 dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n", 390 __func__, pi->ino, (u64)file->f_pos, 391 (unsigned long)file->private_data); 392 - 393 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); 394 if (err) 395 return err; 396 - 397 err = pohmelfs_sync_remote_dir(pi); 398 if (err) 399 return err; ··· 513 514 need_lock = pohmelfs_need_lock(parent, lock_type); 515 516 - err = pohmelfs_data_lock(parent, 0, ~0, lock_type); 517 - if (err) 518 - goto out; 519 - 520 str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); 521 522 mutex_lock(&parent->offset_lock); ··· 521 ino = n->ino; 522 mutex_unlock(&parent->offset_lock); 523 524 - dprintk("%s: 1 ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx.\n", 525 - __func__, ino, inode, str.name, str.hash, parent->state); 526 527 if (ino) { 528 inode = ilookup(dir->i_sb, ino); ··· 530 goto out; 531 } 532 533 - dprintk("%s: dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n", 534 __func__, dir, parent->ino, 535 str.name, str.len, parent->state, ino); 536 ··· 538 if (!need_lock) 539 goto out; 540 } 541 542 err = pohmelfs_lookup_single(parent, &str, ino); 543 if (err) ··· 557 558 if (ino) { 559 inode = ilookup(dir->i_sb, ino); 560 - printk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n", 561 __func__, ino, inode, str.name, str.hash); 562 if (!inode) { 563 - printk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n", 564 __func__, ino, str.name, str.hash); 565 //return NULL; 566 return ERR_PTR(-EACCES);
··· 328 { 329 struct inode *inode = &pi->vfs_inode; 330 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); 331 + long ret = psb->wait_on_page_timeout; 332 int err; 333 334 dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n", ··· 389 dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n", 390 __func__, pi->ino, (u64)file->f_pos, 391 (unsigned long)file->private_data); 392 + #if 0 393 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); 394 if (err) 395 return err; 396 + #endif 397 err = pohmelfs_sync_remote_dir(pi); 398 if (err) 399 return err; ··· 513 514 need_lock = pohmelfs_need_lock(parent, lock_type); 515 516 str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); 517 518 mutex_lock(&parent->offset_lock); ··· 525 ino = n->ino; 526 mutex_unlock(&parent->offset_lock); 527 528 + dprintk("%s: start ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx, need_lock: %d.\n", 529 + __func__, ino, inode, str.name, str.hash, parent->state, need_lock); 530 531 if (ino) { 532 inode = ilookup(dir->i_sb, ino); ··· 534 goto out; 535 } 536 537 + dprintk("%s: no inode dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n", 538 __func__, dir, parent->ino, 539 str.name, str.len, parent->state, ino); 540 ··· 542 if (!need_lock) 543 goto out; 544 } 545 + 546 + err = pohmelfs_data_lock(parent, 0, ~0, lock_type); 547 + if (err) 548 + goto out; 549 550 err = pohmelfs_lookup_single(parent, &str, ino); 551 if (err) ··· 557 558 if (ino) { 559 inode = ilookup(dir->i_sb, ino); 560 + dprintk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n", 561 __func__, ino, inode, str.name, str.hash); 562 if (!inode) { 563 + dprintk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n", 564 __func__, ino, str.name, str.hash); 565 //return NULL; 566 return ERR_PTR(-EACCES);
+106 -31
drivers/staging/pohmelfs/inode.c
··· 1169 static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1170 { 1171 struct inode *inode = dentry->d_inode; 1172 struct pohmelfs_inode *pi = POHMELFS_I(inode); 1173 int err; 1174 1175 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); 1176 if (err) 1177 return err; 1178 - 1179 dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n", 1180 __func__, pi->ino, inode->i_mode, inode->i_uid, 1181 inode->i_gid, inode->i_size); 1182 1183 generic_fillattr(inode, stat); 1184 return 0; ··· 1343 1344 kfree(psb); 1345 sb->s_fs_info = NULL; 1346 - 1347 - pohmelfs_ftrans_exit(); 1348 - } 1349 - 1350 - static int pohmelfs_remount(struct super_block *sb, int *flags, char *data) 1351 - { 1352 - *flags |= MS_RDONLY; 1353 - return 0; 1354 } 1355 1356 static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf) ··· 1387 return 0; 1388 } 1389 1390 - static const struct super_operations pohmelfs_sb_ops = { 1391 - .alloc_inode = pohmelfs_alloc_inode, 1392 - .destroy_inode = pohmelfs_destroy_inode, 1393 - .drop_inode = pohmelfs_drop_inode, 1394 - .write_inode = pohmelfs_write_inode, 1395 - .put_super = pohmelfs_put_super, 1396 - .remount_fs = pohmelfs_remount, 1397 - .statfs = pohmelfs_statfs, 1398 - .show_options = pohmelfs_show_options, 1399 - }; 1400 - 1401 enum { 1402 pohmelfs_opt_idx, 1403 pohmelfs_opt_trans_scan_timeout, 1404 pohmelfs_opt_drop_scan_timeout, 1405 pohmelfs_opt_wait_on_page_timeout, 1406 pohmelfs_opt_trans_retries, 1407 - pohmelfs_opt_crypto_thread_num, 1408 - pohmelfs_opt_trans_max_pages, 1409 - pohmelfs_opt_crypto_fail_unsupported, 1410 pohmelfs_opt_mcache_timeout, 1411 }; 1412 1413 static struct match_token pohmelfs_tokens[] = { 1414 {pohmelfs_opt_idx, "idx=%u"}, 1415 {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"}, 1416 {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"}, 1417 {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"}, 1418 {pohmelfs_opt_trans_retries, "trans_retries=%u"}, 1419 - {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"}, 1420 - {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"}, 1421 - {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"}, 1422 {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"}, 1423 }; 1424 1425 - static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb) 1426 { 1427 char *p; 1428 substring_t args[MAX_OPT_ARGS]; ··· 1432 err = match_int(&args[0], &option); 1433 if (err) 1434 return err; 1435 1436 switch (token) { 1437 case pohmelfs_opt_idx: ··· 1470 } 1471 1472 return 0; 1473 } 1474 1475 static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count) ··· 1759 return err; 1760 } 1761 1762 /* 1763 * Allocate private superblock and create root dir. 1764 */ ··· 1820 struct inode *root; 1821 struct pohmelfs_inode *npi; 1822 struct qstr str; 1823 - 1824 - pohmelfs_ftrans_init(); 1825 1826 psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL); 1827 if (!psb) ··· 1871 mutex_init(&psb->state_lock); 1872 INIT_LIST_HEAD(&psb->state_list); 1873 1874 - err = pohmelfs_parse_options((char *) data, psb); 1875 if (err) 1876 goto err_out_free_sb; 1877 ··· 1900 err = PTR_ERR(npi); 1901 goto err_out_crypto_exit; 1902 } 1903 1904 root = &npi->vfs_inode; 1905 ··· 1944 mnt); 1945 } 1946 1947 static struct file_system_type pohmel_fs_type = { 1948 .owner = THIS_MODULE, 1949 .name = "pohmel", 1950 .get_sb = pohmelfs_get_sb, 1951 - .kill_sb = kill_anon_super, 1952 }; 1953 1954 /*
··· 1169 static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1170 { 1171 struct inode *inode = dentry->d_inode; 1172 + #if 0 1173 struct pohmelfs_inode *pi = POHMELFS_I(inode); 1174 int err; 1175 1176 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); 1177 if (err) 1178 return err; 1179 dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n", 1180 __func__, pi->ino, inode->i_mode, inode->i_uid, 1181 inode->i_gid, inode->i_size); 1182 + #endif 1183 1184 generic_fillattr(inode, stat); 1185 return 0; ··· 1342 1343 kfree(psb); 1344 sb->s_fs_info = NULL; 1345 } 1346 1347 static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf) ··· 1394 return 0; 1395 } 1396 1397 enum { 1398 pohmelfs_opt_idx, 1399 + pohmelfs_opt_crypto_thread_num, 1400 + pohmelfs_opt_trans_max_pages, 1401 + pohmelfs_opt_crypto_fail_unsupported, 1402 + 1403 + /* Remountable options */ 1404 pohmelfs_opt_trans_scan_timeout, 1405 pohmelfs_opt_drop_scan_timeout, 1406 pohmelfs_opt_wait_on_page_timeout, 1407 pohmelfs_opt_trans_retries, 1408 pohmelfs_opt_mcache_timeout, 1409 }; 1410 1411 static struct match_token pohmelfs_tokens[] = { 1412 {pohmelfs_opt_idx, "idx=%u"}, 1413 + {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"}, 1414 + {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"}, 1415 + {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"}, 1416 {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"}, 1417 {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"}, 1418 {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"}, 1419 {pohmelfs_opt_trans_retries, "trans_retries=%u"}, 1420 {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"}, 1421 }; 1422 1423 + static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount) 1424 { 1425 char *p; 1426 substring_t args[MAX_OPT_ARGS]; ··· 1448 err = match_int(&args[0], &option); 1449 if (err) 1450 return err; 1451 + 1452 + if (remount && token <= pohmelfs_opt_crypto_fail_unsupported) 1453 + continue; 1454 1455 switch (token) { 1456 case pohmelfs_opt_idx: ··· 1483 } 1484 1485 return 0; 1486 + } 1487 + 1488 + static int pohmelfs_remount(struct super_block *sb, int *flags, char *data) 1489 + { 1490 + int err; 1491 + struct pohmelfs_sb *psb = POHMELFS_SB(sb); 1492 + unsigned long old_sb_flags = sb->s_flags; 1493 + 1494 + err = pohmelfs_parse_options(data, psb, 1); 1495 + if (err) 1496 + goto err_out_restore; 1497 + 1498 + if (!(*flags & MS_RDONLY)) 1499 + sb->s_flags &= ~MS_RDONLY; 1500 + return 0; 1501 + 1502 + err_out_restore: 1503 + sb->s_flags = old_sb_flags; 1504 + return err; 1505 } 1506 1507 static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count) ··· 1753 return err; 1754 } 1755 1756 + static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt) 1757 + { 1758 + struct netfs_state *st; 1759 + struct pohmelfs_ctl *ctl; 1760 + struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb); 1761 + struct pohmelfs_config *c; 1762 + 1763 + mutex_lock(&psb->state_lock); 1764 + 1765 + seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n"); 1766 + 1767 + list_for_each_entry(c, &psb->state_list, config_entry) { 1768 + st = &c->state; 1769 + ctl = &st->ctl; 1770 + 1771 + seq_printf(m, "%u ", ctl->idx); 1772 + if (ctl->addr.sa_family == AF_INET) { 1773 + struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr; 1774 + //seq_printf(m, "%pi4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port)); 1775 + seq_printf(m, "%u.%u.%u.%u:%u", NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port)); 1776 + } else if (ctl->addr.sa_family == AF_INET6) { 1777 + struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr; 1778 + seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port)); 1779 + } else { 1780 + unsigned int i; 1781 + for (i=0; i<ctl->addrlen; ++i) 1782 + seq_printf(m, "%02x.", ctl->addr.addr[i]); 1783 + } 1784 + 1785 + seq_printf(m, " %u %u %d %u %x\n", 1786 + ctl->type, ctl->proto, 1787 + st->socket != NULL, 1788 + ctl->prio, ctl->perm); 1789 + } 1790 + mutex_unlock(&psb->state_lock); 1791 + 1792 + return 0; 1793 + } 1794 + 1795 + static const struct super_operations pohmelfs_sb_ops = { 1796 + .alloc_inode = pohmelfs_alloc_inode, 1797 + .destroy_inode = pohmelfs_destroy_inode, 1798 + .drop_inode = pohmelfs_drop_inode, 1799 + .write_inode = pohmelfs_write_inode, 1800 + .put_super = pohmelfs_put_super, 1801 + .remount_fs = pohmelfs_remount, 1802 + .statfs = pohmelfs_statfs, 1803 + .show_options = pohmelfs_show_options, 1804 + .show_stats = pohmelfs_show_stats, 1805 + }; 1806 + 1807 /* 1808 * Allocate private superblock and create root dir. 1809 */ ··· 1763 struct inode *root; 1764 struct pohmelfs_inode *npi; 1765 struct qstr str; 1766 1767 psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL); 1768 if (!psb) ··· 1816 mutex_init(&psb->state_lock); 1817 INIT_LIST_HEAD(&psb->state_list); 1818 1819 + err = pohmelfs_parse_options((char *) data, psb, 0); 1820 if (err) 1821 goto err_out_free_sb; 1822 ··· 1845 err = PTR_ERR(npi); 1846 goto err_out_crypto_exit; 1847 } 1848 + set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); 1849 + clear_bit(NETFS_INODE_OWNED, &npi->state); 1850 1851 root = &npi->vfs_inode; 1852 ··· 1887 mnt); 1888 } 1889 1890 + /* 1891 + * We need this to sync all inodes earlier, since when writeback 1892 + * is invoked from the umount/mntput path dcache is already shrunk, 1893 + * see generic_shutdown_super(), and no inodes can access the path. 1894 + */ 1895 + static void pohmelfs_kill_super(struct super_block *sb) 1896 + { 1897 + struct writeback_control wbc = { 1898 + .sync_mode = WB_SYNC_ALL, 1899 + .range_start = 0, 1900 + .range_end = LLONG_MAX, 1901 + .nr_to_write = LONG_MAX, 1902 + }; 1903 + generic_sync_sb_inodes(sb, &wbc); 1904 + 1905 + kill_anon_super(sb); 1906 + } 1907 + 1908 static struct file_system_type pohmel_fs_type = { 1909 .owner = THIS_MODULE, 1910 .name = "pohmel", 1911 .get_sb = pohmelfs_get_sb, 1912 + .kill_sb = pohmelfs_kill_super, 1913 }; 1914 1915 /*
+2 -1
drivers/staging/pohmelfs/lock.c
··· 41 path_len = err; 42 43 err = -ENOMEM; 44 - t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize, 0, 0); 45 if (!t) 46 goto err_out_exit; 47
··· 41 path_len = err; 42 43 err = -ENOMEM; 44 + t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize, 45 + NETFS_TRANS_SINGLE_DST, 0); 46 if (!t) 47 goto err_out_exit; 48
+18 -54
drivers/staging/pohmelfs/net.c
··· 26 27 #include "netfs.h" 28 29 - static int pohmelfs_ftrans_size = 10240; 30 - static u32 *pohmelfs_ftrans; 31 - 32 - int pohmelfs_ftrans_init(void) 33 - { 34 - pohmelfs_ftrans = vmalloc(pohmelfs_ftrans_size * 4); 35 - if (!pohmelfs_ftrans) 36 - return -ENOMEM; 37 - 38 - return 0; 39 - } 40 - 41 - void pohmelfs_ftrans_exit(void) 42 - { 43 - vfree(pohmelfs_ftrans); 44 - } 45 - 46 - void pohmelfs_ftrans_clean(u64 id) 47 - { 48 - if (pohmelfs_ftrans) { 49 - u32 i = id & 0xffffffff; 50 - int idx = i % pohmelfs_ftrans_size; 51 - 52 - pohmelfs_ftrans[idx] = 0; 53 - } 54 - } 55 - 56 - void pohmelfs_ftrans_update(u64 id) 57 - { 58 - if (pohmelfs_ftrans) { 59 - u32 i = id & 0xffffffff; 60 - int idx = i % pohmelfs_ftrans_size; 61 - 62 - pohmelfs_ftrans[idx] = i; 63 - } 64 - } 65 - 66 - int pohmelfs_ftrans_check(u64 id) 67 - { 68 - if (pohmelfs_ftrans) { 69 - u32 i = id & 0xffffffff; 70 - int idx = i % pohmelfs_ftrans_size; 71 - 72 - return (pohmelfs_ftrans[idx] == i); 73 - } 74 - 75 - return -1; 76 - } 77 - 78 /* 79 * Async machinery lives here. 80 * All commands being sent to server do _not_ require sync reply, ··· 401 if (err != -EEXIST) 402 goto err_out_put; 403 } else { 404 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); 405 clear_bit(NETFS_INODE_OWNED, &npi->state); 406 } 407 } 408 out: ··· 605 if (dst) { 606 netfs_trans_remove_nolock(dst, st); 607 t = dst->trans; 608 - 609 - pohmelfs_ftrans_update(cmd->start); 610 } 611 mutex_unlock(&st->trans_lock); 612 613 if (!t) { 614 - int check = pohmelfs_ftrans_check(cmd->start); 615 - printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u, double: %d.\n", 616 - __func__, cmd->start, cmd->id, cmd->size, cmd->ext, check); 617 err = -EINVAL; 618 goto out; 619 }
··· 26 27 #include "netfs.h" 28 29 /* 30 * Async machinery lives here. 31 * All commands being sent to server do _not_ require sync reply, ··· 450 if (err != -EEXIST) 451 goto err_out_put; 452 } else { 453 + struct dentry *dentry, *alias, *pd; 454 + 455 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); 456 clear_bit(NETFS_INODE_OWNED, &npi->state); 457 + 458 + pd = d_find_alias(&parent->vfs_inode); 459 + if (pd) { 460 + str.hash = full_name_hash(str.name, str.len); 461 + dentry = d_alloc(pd, &str); 462 + if (dentry) { 463 + alias = d_materialise_unique(dentry, &npi->vfs_inode); 464 + if (alias) 465 + dput(dentry); 466 + } 467 + 468 + dput(dentry); 469 + dput(pd); 470 + } 471 } 472 } 473 out: ··· 638 if (dst) { 639 netfs_trans_remove_nolock(dst, st); 640 t = dst->trans; 641 } 642 mutex_unlock(&st->trans_lock); 643 644 if (!t) { 645 + printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u.\n", 646 + __func__, cmd->start, cmd->id, cmd->size, cmd->ext); 647 err = -EINVAL; 648 goto out; 649 }
+10 -11
drivers/staging/pohmelfs/netfs.h
··· 87 POHMELFS_FLAGS_DEL, /* Network state control message for DEL */ 88 POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */ 89 POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */ 90 }; 91 92 /* ··· 117 unsigned char data[0]; /* Algorithm string, key and IV */ 118 }; 119 120 /* 121 * Configuration command used to create table of different remote servers. 122 */ 123 struct pohmelfs_ctl 124 { 125 - unsigned int idx; /* Config index */ 126 - unsigned int type; /* Socket type */ 127 - unsigned int proto; /* Socket protocol */ 128 - unsigned int addrlen; /* Size of the address */ 129 - unsigned short unused; /* Align structure by 4 bytes */ 130 struct saddr addr; /* Remote server address */ 131 }; 132 ··· 925 if (atomic_dec_and_test(&m->refcnt)) 926 pohmelfs_mcache_free(psb, m); 927 } 928 - 929 - int pohmelfs_ftrans_init(void); 930 - void pohmelfs_ftrans_exit(void); 931 - void pohmelfs_ftrans_update(u64 id); 932 - int pohmelfs_ftrans_check(u64 id); 933 - void pohmelfs_ftrans_clean(u64 id); 934 935 #endif /* __KERNEL__*/ 936
··· 87 POHMELFS_FLAGS_DEL, /* Network state control message for DEL */ 88 POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */ 89 POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */ 90 + POHMELFS_FLAGS_MODIFY, /* Network state modification message */ 91 }; 92 93 /* ··· 116 unsigned char data[0]; /* Algorithm string, key and IV */ 117 }; 118 119 + #define POHMELFS_IO_PERM_READ (1<<0) 120 + #define POHMELFS_IO_PERM_WRITE (1<<1) 121 + 122 /* 123 * Configuration command used to create table of different remote servers. 124 */ 125 struct pohmelfs_ctl 126 { 127 + __u32 idx; /* Config index */ 128 + __u32 type; /* Socket type */ 129 + __u32 proto; /* Socket protocol */ 130 + __u16 addrlen; /* Size of the address */ 131 + __u16 perm; /* IO permission */ 132 + __u16 prio; /* IO priority */ 133 struct saddr addr; /* Remote server address */ 134 }; 135 ··· 920 if (atomic_dec_and_test(&m->refcnt)) 921 pohmelfs_mcache_free(psb, m); 922 } 923 924 #endif /* __KERNEL__*/ 925
+12 -23
drivers/staging/pohmelfs/trans.c
··· 456 __func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state); 457 #endif 458 mutex_lock(&psb->state_lock); 459 - 460 - if ((t->flags & NETFS_TRANS_SINGLE_DST) && psb->active_state) { 461 - st = &psb->active_state->state; 462 - 463 - err = -EPIPE; 464 - if (netfs_state_poll(st) & POLLOUT) { 465 - err = netfs_trans_push_dst(t, st); 466 - if (!err) { 467 - err = netfs_trans_send(t, st); 468 - if (err) { 469 - netfs_trans_drop_last(t, st); 470 - } else { 471 - pohmelfs_switch_active(psb); 472 - goto out; 473 - } 474 - } 475 - } 476 - pohmelfs_switch_active(psb); 477 - } 478 - 479 list_for_each_entry(c, &psb->state_list, config_entry) { 480 st = &c->state; 481 482 err = netfs_trans_push(t, st); 483 if (!err && (t->flags & NETFS_TRANS_SINGLE_DST)) 484 break; 485 } 486 - out: 487 mutex_unlock(&psb->state_lock); 488 #if 0 489 dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n", ··· 491 struct netfs_cmd *cmd = t->iovec.iov_base; 492 493 t->gen = atomic_inc_return(&psb->trans_gen); 494 - 495 - pohmelfs_ftrans_clean(t->gen); 496 497 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + 498 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
··· 456 __func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state); 457 #endif 458 mutex_lock(&psb->state_lock); 459 list_for_each_entry(c, &psb->state_list, config_entry) { 460 st = &c->state; 461 + 462 + if (t->flags & NETFS_TRANS_SINGLE_DST) { 463 + if (!(st->ctl.perm & POHMELFS_IO_PERM_READ)) 464 + continue; 465 + } else { 466 + if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE)) 467 + continue; 468 + } 469 + 470 + if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio)) 471 + st = &psb->active_state->state; 472 473 err = netfs_trans_push(t, st); 474 if (!err && (t->flags & NETFS_TRANS_SINGLE_DST)) 475 break; 476 } 477 + 478 mutex_unlock(&psb->state_lock); 479 #if 0 480 dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n", ··· 500 struct netfs_cmd *cmd = t->iovec.iov_base; 501 502 t->gen = atomic_inc_return(&psb->trans_gen); 503 504 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + 505 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
+15 -10
drivers/staging/rt2860/rt_main_dev.c
··· 722 return (-1); 723 } /* End of rt28xx_open */ 724 725 726 /* Must not be called for mdev and apdev */ 727 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 747 748 749 //ether_setup(dev); 750 - dev->hard_start_xmit = rt28xx_send_packets; 751 - 752 - #ifdef IKANOS_VX_1X0 753 - dev->hard_start_xmit = IKANOS_DataFramesTx; 754 - #endif // IKANOS_VX_1X0 // 755 756 #ifdef CONFIG_STA_SUPPORT 757 #if WIRELESS_EXT >= 12 ··· 769 #if WIRELESS_EXT < 21 770 dev->get_wireless_stats = rt28xx_get_wireless_stats; 771 #endif 772 - dev->get_stats = RT28xx_get_ether_stats; 773 - dev->open = MainVirtualIF_open; //rt28xx_open; 774 - dev->stop = MainVirtualIF_close; //rt28xx_close; 775 dev->priv_flags = INT_MAIN; 776 - dev->do_ioctl = rt28xx_ioctl; 777 - dev->validate_addr = NULL; 778 // find available device name 779 for (i = 0; i < 8; i++) 780 {
··· 722 return (-1); 723 } /* End of rt28xx_open */ 724 725 + static const struct net_device_ops rt2860_netdev_ops = { 726 + .ndo_open = MainVirtualIF_open, 727 + .ndo_stop = MainVirtualIF_close, 728 + .ndo_do_ioctl = rt28xx_ioctl, 729 + .ndo_get_stats = RT28xx_get_ether_stats, 730 + .ndo_validate_addr = NULL, 731 + .ndo_set_mac_address = eth_mac_addr, 732 + .ndo_change_mtu = eth_change_mtu, 733 + #ifdef IKANOS_VX_1X0 734 + .ndo_start_xmit = IKANOS_DataFramesTx, 735 + #else 736 + .ndo_start_xmit = rt28xx_send_packets, 737 + #endif 738 + }; 739 740 /* Must not be called for mdev and apdev */ 741 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 733 734 735 //ether_setup(dev); 736 737 #ifdef CONFIG_STA_SUPPORT 738 #if WIRELESS_EXT >= 12 ··· 760 #if WIRELESS_EXT < 21 761 dev->get_wireless_stats = rt28xx_get_wireless_stats; 762 #endif 763 dev->priv_flags = INT_MAIN; 764 + dev->netdev_ops = &rt2860_netdev_ops; 765 // find available device name 766 for (i = 0; i < 8; i++) 767 {
+1
drivers/staging/rt2870/rt2870.h
··· 96 {USB_DEVICE(0x0DF6,0x002B)}, /* Sitecom */ \ 97 {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \ 98 {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \ 99 {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ 100 {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ 101 {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \
··· 96 {USB_DEVICE(0x0DF6,0x002B)}, /* Sitecom */ \ 97 {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \ 98 {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \ 99 + {USB_DEVICE(0x0DF6,0x0039)}, /* Sitecom */ \ 100 {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ 101 {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ 102 {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \
+15 -13
drivers/staging/rt2870/rt_main_dev.c
··· 855 return (-1); 856 } /* End of rt28xx_open */ 857 858 859 /* Must not be called for mdev and apdev */ 860 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 880 881 882 //ether_setup(dev); 883 - dev->hard_start_xmit = rt28xx_send_packets; 884 - 885 - #ifdef IKANOS_VX_1X0 886 - dev->hard_start_xmit = IKANOS_DataFramesTx; 887 - #endif // IKANOS_VX_1X0 // 888 - 889 // dev->set_multicast_list = ieee80211_set_multicast_list; 890 // dev->change_mtu = ieee80211_change_mtu; 891 #ifdef CONFIG_STA_SUPPORT ··· 903 #if WIRELESS_EXT < 21 904 dev->get_wireless_stats = rt28xx_get_wireless_stats; 905 #endif 906 - dev->get_stats = RT28xx_get_ether_stats; 907 - dev->open = MainVirtualIF_open; //rt28xx_open; 908 - dev->stop = MainVirtualIF_close; //rt28xx_close; 909 // dev->uninit = ieee80211_if_reinit; 910 // dev->destructor = ieee80211_if_free; 911 dev->priv_flags = INT_MAIN; 912 - dev->do_ioctl = rt28xx_ioctl; 913 - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) 914 - dev->validate_addr = NULL; 915 - #endif 916 // find available device name 917 for (i = 0; i < 8; i++) 918 {
··· 855 return (-1); 856 } /* End of rt28xx_open */ 857 858 + static const struct net_device_ops rt2870_netdev_ops = { 859 + .ndo_open = MainVirtualIF_open, 860 + .ndo_stop = MainVirtualIF_close, 861 + .ndo_do_ioctl = rt28xx_ioctl, 862 + .ndo_get_stats = RT28xx_get_ether_stats, 863 + .ndo_validate_addr = NULL, 864 + .ndo_set_mac_address = eth_mac_addr, 865 + .ndo_change_mtu = eth_change_mtu, 866 + #ifdef IKANOS_VX_1X0 867 + .ndo_start_xmit = IKANOS_DataFramesTx, 868 + #else 869 + .ndo_start_xmit = rt28xx_send_packets, 870 + #endif 871 + }; 872 873 /* Must not be called for mdev and apdev */ 874 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 866 867 868 //ether_setup(dev); 869 // dev->set_multicast_list = ieee80211_set_multicast_list; 870 // dev->change_mtu = ieee80211_change_mtu; 871 #ifdef CONFIG_STA_SUPPORT ··· 895 #if WIRELESS_EXT < 21 896 dev->get_wireless_stats = rt28xx_get_wireless_stats; 897 #endif 898 // dev->uninit = ieee80211_if_reinit; 899 // dev->destructor = ieee80211_if_free; 900 dev->priv_flags = INT_MAIN; 901 + dev->netdev_ops = &rt2870_netdev_ops; 902 // find available device name 903 for (i = 0; i < 8; i++) 904 {
+15 -14
drivers/staging/rt3070/rt_main_dev.c
··· 436 // OID_SET_HT_PHYMODE SetHT; 437 // WPDMA_GLO_CFG_STRUC GloCfg; 438 UINT32 MacCsr0 = 0; 439 - UINT32 MacValue = 0; 440 441 #ifdef RT2870 442 #ifdef INF_AMAZON_SE ··· 848 return (-1); 849 } /* End of rt28xx_open */ 850 851 852 /* Must not be called for mdev and apdev */ 853 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 873 874 875 //ether_setup(dev); 876 - dev->hard_start_xmit = rt28xx_send_packets; 877 - 878 - #ifdef IKANOS_VX_1X0 879 - dev->hard_start_xmit = IKANOS_DataFramesTx; 880 - #endif // IKANOS_VX_1X0 // 881 - 882 // dev->set_multicast_list = ieee80211_set_multicast_list; 883 // dev->change_mtu = ieee80211_change_mtu; 884 #ifdef CONFIG_STA_SUPPORT ··· 896 #if WIRELESS_EXT < 21 897 dev->get_wireless_stats = rt28xx_get_wireless_stats; 898 #endif 899 - dev->get_stats = RT28xx_get_ether_stats; 900 - dev->open = MainVirtualIF_open; //rt28xx_open; 901 - dev->stop = MainVirtualIF_close; //rt28xx_close; 902 // dev->uninit = ieee80211_if_reinit; 903 // dev->destructor = ieee80211_if_free; 904 dev->priv_flags = INT_MAIN; 905 - dev->do_ioctl = rt28xx_ioctl; 906 - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) 907 - dev->validate_addr = NULL; 908 - #endif 909 // find available device name 910 for (i = 0; i < 8; i++) 911 {
··· 436 // OID_SET_HT_PHYMODE SetHT; 437 // WPDMA_GLO_CFG_STRUC GloCfg; 438 UINT32 MacCsr0 = 0; 439 440 #ifdef RT2870 441 #ifdef INF_AMAZON_SE ··· 849 return (-1); 850 } /* End of rt28xx_open */ 851 852 + static const struct net_device_ops rt3070_netdev_ops = { 853 + .ndo_open = MainVirtualIF_open, 854 + .ndo_stop = MainVirtualIF_close, 855 + .ndo_do_ioctl = rt28xx_ioctl, 856 + .ndo_get_stats = RT28xx_get_ether_stats, 857 + .ndo_validate_addr = NULL, 858 + .ndo_set_mac_address = eth_mac_addr, 859 + .ndo_change_mtu = eth_change_mtu, 860 + #ifdef IKANOS_VX_1X0 861 + .ndo_start_xmit = IKANOS_DataFramesTx, 862 + #else 863 + .ndo_start_xmit = rt28xx_send_packets, 864 + #endif 865 + }; 866 867 /* Must not be called for mdev and apdev */ 868 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 860 861 862 //ether_setup(dev); 863 // dev->set_multicast_list = ieee80211_set_multicast_list; 864 // dev->change_mtu = ieee80211_change_mtu; 865 #ifdef CONFIG_STA_SUPPORT ··· 889 #if WIRELESS_EXT < 21 890 dev->get_wireless_stats = rt28xx_get_wireless_stats; 891 #endif 892 // dev->uninit = ieee80211_if_reinit; 893 // dev->destructor = ieee80211_if_free; 894 dev->priv_flags = INT_MAIN; 895 + dev->netdev_ops = &rt3070_netdev_ops; 896 // find available device name 897 for (i = 0; i < 8; i++) 898 {
+30 -1
drivers/staging/slicoss/README
··· 10 - move firmware loading to request_firmware() 11 - remove direct memory access of structures 12 - any remaining sparse and checkpatch.pl warnings 13 - - any netdev recommended changes 14 15 Please send patches to: 16 Greg Kroah-Hartman <gregkh@suse.de>
··· 10 - move firmware loading to request_firmware() 11 - remove direct memory access of structures 12 - any remaining sparse and checkpatch.pl warnings 13 + 14 + - use net_device_ops 15 + - use dev->stats rather than adapter->stats 16 + - don't cast netdev_priv it is already void 17 + - use compare_ether_addr 18 + - GET RID OF MACROS 19 + - work on all architectures 20 + - without CONFIG_X86_64 confusion 21 + - do 64 bit correctly 22 + - don't depend on order of union 23 + - get rid of ASSERT(), use BUG() instead but only where necessary 24 + looks like most aren't really useful 25 + - no new SIOCDEVPRIVATE ioctl allowed 26 + - don't use module_param for configuring interrupt mitigation 27 + use ethtool instead 28 + - reorder code to elminate use of forward declarations 29 + - don't keep private linked list of drivers. 30 + - remove all the gratiutous debug infrastructure 31 + - use PCI_DEVICE() 32 + - do ethtool correctly using ethtool_ops 33 + - NAPI? 34 + - wasted overhead of extra stats 35 + - state variables for things that are 36 + easily availble and shouldn't be kept in card structure, cardnum, ... 37 + slotnumber, events, ... 38 + - get rid of slic_spinlock wrapper 39 + - volatile == bad design => bad code 40 + - locking too fine grained, not designed just throw more locks 41 + at problem 42 + 43 44 Please send patches to: 45 Greg Kroah-Hartman <gregkh@suse.de>
+16 -9
drivers/staging/slicoss/slicoss.c
··· 345 return; 346 } 347 348 static int __devinit slic_entry_probe(struct pci_dev *pcidev, 349 const struct pci_device_id *pci_tbl_entry) 350 { ··· 455 456 netdev->base_addr = (unsigned long)adapter->memorybase; 457 netdev->irq = adapter->irq; 458 - netdev->open = slic_entry_open; 459 - netdev->stop = slic_entry_halt; 460 - netdev->hard_start_xmit = slic_xmit_start; 461 - netdev->do_ioctl = slic_ioctl; 462 - netdev->set_mac_address = slic_mac_set_address; 463 - netdev->get_stats = slic_get_stats; 464 - netdev->set_multicast_list = slic_mcast_set_list; 465 466 slic_debug_adapter_create(adapter); 467 ··· 1267 } 1268 1269 /* Doesn't already exist. Allocate a structure to hold it */ 1270 - mcaddr = kmalloc(sizeof(struct mcast_address), GFP_KERNEL); 1271 if (mcaddr == NULL) 1272 return 1; 1273 ··· 2291 } 2292 if (!physcard) { 2293 /* no structure allocated for this physical card yet */ 2294 - physcard = kzalloc(sizeof(struct physcard), GFP_KERNEL); 2295 ASSERT(physcard); 2296 2297 physcard->next = slic_global.phys_card;
··· 345 return; 346 } 347 348 + static const struct net_device_ops slic_netdev_ops = { 349 + .ndo_open = slic_entry_open, 350 + .ndo_stop = slic_entry_halt, 351 + .ndo_start_xmit = slic_xmit_start, 352 + .ndo_do_ioctl = slic_ioctl, 353 + .ndo_set_mac_address = slic_mac_set_address, 354 + .ndo_get_stats = slic_get_stats, 355 + .ndo_set_multicast_list = slic_mcast_set_list, 356 + .ndo_validate_addr = eth_validate_addr, 357 + .ndo_set_mac_address = eth_mac_addr, 358 + .ndo_change_mtu = eth_change_mtu, 359 + }; 360 + 361 static int __devinit slic_entry_probe(struct pci_dev *pcidev, 362 const struct pci_device_id *pci_tbl_entry) 363 { ··· 442 443 netdev->base_addr = (unsigned long)adapter->memorybase; 444 netdev->irq = adapter->irq; 445 + netdev->netdev_ops = &slic_netdev_ops; 446 447 slic_debug_adapter_create(adapter); 448 ··· 1260 } 1261 1262 /* Doesn't already exist. Allocate a structure to hold it */ 1263 + mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC); 1264 if (mcaddr == NULL) 1265 return 1; 1266 ··· 2284 } 2285 if (!physcard) { 2286 /* no structure allocated for this physical card yet */ 2287 + physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC); 2288 ASSERT(physcard); 2289 2290 physcard->next = slic_global.phys_card;
+1 -1
drivers/staging/stlc45xx/Kconfig
··· 1 config STLC45XX 2 tristate "stlc4550/4560 support" 3 - depends on MAC80211 && WLAN_80211 && SPI_MASTER 4 ---help--- 5 This is a driver for stlc4550 and stlc4560 chipsets. 6
··· 1 config STLC45XX 2 tristate "stlc4550/4560 support" 3 + depends on MAC80211 && WLAN_80211 && SPI_MASTER && GENERIC_HARDIRQS 4 ---help--- 5 This is a driver for stlc4550 and stlc4560 chipsets. 6
+84 -43
drivers/staging/sxg/sxg.c
··· 322 int ret,i; 323 324 if (!adapter->intrregistered) { 325 for (i=0; i<adapter->nr_msix_entries; i++) { 326 ret = request_irq (adapter->msi_entries[i].vector, 327 sxg_isr, ··· 331 adapter->netdev->name, 332 adapter->netdev); 333 if (ret) { 334 DBG_ERROR("sxg: MSI-X request_irq (%s) " 335 "FAILED [%x]\n", adapter->netdev->name, 336 ret); ··· 340 } 341 } 342 } 343 adapter->msi_enabled = TRUE; 344 adapter->intrregistered = 1; 345 adapter->IntRegistered = TRUE; ··· 901 return status; 902 } 903 904 static int sxg_entry_probe(struct pci_dev *pcidev, 905 const struct pci_device_id *pci_tbl_entry) 906 { ··· 1116 1117 netdev->base_addr = (unsigned long)adapter->base_addr; 1118 netdev->irq = adapter->irq; 1119 - netdev->open = sxg_entry_open; 1120 - netdev->stop = sxg_entry_halt; 1121 - netdev->hard_start_xmit = sxg_send_packets; 1122 - netdev->do_ioctl = sxg_ioctl; 1123 - netdev->change_mtu = sxg_change_mtu; 1124 - #if XXXTODO 1125 - netdev->set_mac_address = sxg_mac_set_address; 1126 - #endif 1127 - netdev->get_stats = sxg_get_stats; 1128 - netdev->set_multicast_list = sxg_mcast_set_list; 1129 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops); 1130 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1131 err = sxg_set_interrupt_capability(adapter); ··· 2259 DBG_ERROR("sxg: %s EXIT\n", __func__); 2260 2261 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); 2262 return STATUS_SUCCESS; 2263 } 2264 ··· 2582 u64 phys_addr; 2583 unsigned long flags; 2584 unsigned long queue_id=0; 2585 2586 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", 2587 pSgl, SxgSgl, 0, 0); ··· 2621 struct iphdr *ip; 2622 2623 ip = ip_hdr(skb); 2624 - if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof( 2625 struct tcphdr))){ 2626 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? 2627 (ntohs (tcp_hdr(skb)->source) & ··· 2634 SXG_LARGE_SEND_QUEUE_MASK)); 2635 } 2636 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2637 - if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >= 2638 - sizeof(struct tcphdr)) ) { 2639 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? 2640 (ntohs (tcp_hdr(skb)->source) & 2641 SXG_LARGE_SEND_QUEUE_MASK): ··· 2667 } 2668 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", 2669 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 2670 - /* Update stats */ 2671 - adapter->stats.tx_packets++; 2672 - adapter->stats.tx_bytes += DataLength; 2673 - #if XXXTODO /* Stats stuff */ 2674 - if (SXG_MULTICAST_PACKET(EtherHdr)) { 2675 - if (SXG_BROADCAST_PACKET(EtherHdr)) { 2676 - adapter->Stats.DumbXmtBcastPkts++; 2677 - adapter->Stats.DumbXmtBcastBytes += DataLength; 2678 } else { 2679 - adapter->Stats.DumbXmtMcastPkts++; 2680 - adapter->Stats.DumbXmtMcastBytes += DataLength; 2681 } 2682 - } else { 2683 - adapter->Stats.DumbXmtUcastPkts++; 2684 - adapter->Stats.DumbXmtUcastBytes += DataLength; 2685 } 2686 - #endif 2687 /* 2688 * Fill in the command 2689 * Copy out the first SGE to the command and adjust for offset ··· 2716 (SXG_INVALID_SGL(phys_addr,skb->data_len))) 2717 { 2718 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); 2719 /* Silently drop this packet */ 2720 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n"); 2721 return STATUS_SUCCESS; 2722 } 2723 - memset(XmtCmd, '\0', sizeof(*XmtCmd)); 2724 XmtCmd->Buffer.FirstSgeAddress = phys_addr; 2725 XmtCmd->Buffer.FirstSgeLength = DataLength; 2726 XmtCmd->Buffer.SgeOffset = 0; 2727 XmtCmd->Buffer.TotalLength = DataLength; 2728 - XmtCmd->SgEntries = 1; 2729 - XmtCmd->Flags = 0; 2730 2731 - if (skb->ip_summed == CHECKSUM_PARTIAL) { 2732 - /* 2733 - * We need to set the Checkum in IP header to 0. This is 2734 - * required by hardware. 2735 - */ 2736 - ip_hdr(skb)->check = 0x0; 2737 - XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP; 2738 - XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP; 2739 - /* Dont know if length will require a change in case of VLAN */ 2740 - XmtCmd->CsumFlags.MacLen = ETH_HLEN; 2741 - XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >> 2742 - SXG_NW_HDR_LEN_SHIFT; 2743 - } 2744 /* 2745 * Advance transmit cmd descripter by 1. 2746 * NOTE - See comments in SxgTcpOutput where we write ··· 2738 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0); 2739 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE); 2740 adapter->Stats.XmtQLen++; /* Stats within lock */ 2741 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); 2742 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", 2743 XmtCmd, pSgl, SxgSgl, 0);
··· 322 int ret,i; 323 324 if (!adapter->intrregistered) { 325 + spin_unlock_irqrestore(&sxg_global.driver_lock, 326 + sxg_global.flags); 327 for (i=0; i<adapter->nr_msix_entries; i++) { 328 ret = request_irq (adapter->msi_entries[i].vector, 329 sxg_isr, ··· 329 adapter->netdev->name, 330 adapter->netdev); 331 if (ret) { 332 + spin_lock_irqsave(&sxg_global.driver_lock, 333 + sxg_global.flags); 334 DBG_ERROR("sxg: MSI-X request_irq (%s) " 335 "FAILED [%x]\n", adapter->netdev->name, 336 ret); ··· 336 } 337 } 338 } 339 + spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); 340 adapter->msi_enabled = TRUE; 341 adapter->intrregistered = 1; 342 adapter->IntRegistered = TRUE; ··· 896 return status; 897 } 898 899 + static const struct net_device_ops sxg_netdev_ops = { 900 + .ndo_open = sxg_entry_open, 901 + .ndo_stop = sxg_entry_halt, 902 + .ndo_start_xmit = sxg_send_packets, 903 + .ndo_do_ioctl = sxg_ioctl, 904 + .ndo_change_mtu = sxg_change_mtu, 905 + .ndo_get_stats = sxg_get_stats, 906 + .ndo_set_multicast_list = sxg_mcast_set_list, 907 + .ndo_validate_addr = eth_validate_addr, 908 + #if XXXTODO 909 + .ndo_set_mac_address = sxg_mac_set_address, 910 + #else 911 + .ndo_set_mac_address = eth_mac_addr, 912 + #endif 913 + }; 914 + 915 static int sxg_entry_probe(struct pci_dev *pcidev, 916 const struct pci_device_id *pci_tbl_entry) 917 { ··· 1095 1096 netdev->base_addr = (unsigned long)adapter->base_addr; 1097 netdev->irq = adapter->irq; 1098 + netdev->netdev_ops = &sxg_netdev_ops; 1099 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops); 1100 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1101 err = sxg_set_interrupt_capability(adapter); ··· 2247 DBG_ERROR("sxg: %s EXIT\n", __func__); 2248 2249 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); 2250 + mod_timer(&adapter->watchdog_timer, jiffies); 2251 + 2252 return STATUS_SUCCESS; 2253 } 2254 ··· 2568 u64 phys_addr; 2569 unsigned long flags; 2570 unsigned long queue_id=0; 2571 + int offload_cksum = 0; 2572 2573 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", 2574 pSgl, SxgSgl, 0, 0); ··· 2606 struct iphdr *ip; 2607 2608 ip = ip_hdr(skb); 2609 + if (ip->protocol == IPPROTO_TCP) 2610 + offload_cksum = 1; 2611 + if (!offload_cksum || !tcp_hdr(skb)) 2612 + queue_id = 0; 2613 + else if (offload_cksum && (DataLength >= sizeof( 2614 struct tcphdr))){ 2615 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? 2616 (ntohs (tcp_hdr(skb)->source) & ··· 2615 SXG_LARGE_SEND_QUEUE_MASK)); 2616 } 2617 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2618 + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2619 + offload_cksum = 1; 2620 + if (!offload_cksum || !tcp_hdr(skb)) 2621 + queue_id = 0; 2622 + else if (offload_cksum && (DataLength>=sizeof(struct tcphdr))){ 2623 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? 2624 (ntohs (tcp_hdr(skb)->source) & 2625 SXG_LARGE_SEND_QUEUE_MASK): ··· 2645 } 2646 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", 2647 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 2648 + memset(XmtCmd, '\0', sizeof(*XmtCmd)); 2649 + XmtCmd->SgEntries = 1; 2650 + XmtCmd->Flags = 0; 2651 + if (skb->ip_summed == CHECKSUM_PARTIAL) { 2652 + /* 2653 + * We need to set the Checkum in IP header to 0. This is 2654 + * required by hardware. 2655 + */ 2656 + if (offload_cksum) { 2657 + ip_hdr(skb)->check = 0x0; 2658 + XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP; 2659 + XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP; 2660 + /* 2661 + * Dont know if length will require a change in 2662 + * case of VLAN 2663 + */ 2664 + XmtCmd->CsumFlags.MacLen = ETH_HLEN; 2665 + XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >> 2666 + SXG_NW_HDR_LEN_SHIFT; 2667 } else { 2668 + if (skb_checksum_help(skb)){ 2669 + printk(KERN_EMERG "Dropped UDP packet for" 2670 + " incorrect checksum calculation\n"); 2671 + if (XmtCmd) 2672 + SXG_ABORT_CMD(XmtRingInfo); 2673 + spin_unlock_irqrestore(&adapter->XmtZeroLock, 2674 + flags); 2675 + return STATUS_SUCCESS; 2676 + } 2677 } 2678 } 2679 + 2680 /* 2681 * Fill in the command 2682 * Copy out the first SGE to the command and adjust for offset ··· 2679 (SXG_INVALID_SGL(phys_addr,skb->data_len))) 2680 { 2681 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); 2682 + if (XmtCmd) 2683 + SXG_ABORT_CMD(XmtRingInfo); 2684 /* Silently drop this packet */ 2685 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n"); 2686 return STATUS_SUCCESS; 2687 } 2688 XmtCmd->Buffer.FirstSgeAddress = phys_addr; 2689 XmtCmd->Buffer.FirstSgeLength = DataLength; 2690 XmtCmd->Buffer.SgeOffset = 0; 2691 XmtCmd->Buffer.TotalLength = DataLength; 2692 2693 /* 2694 * Advance transmit cmd descripter by 1. 2695 * NOTE - See comments in SxgTcpOutput where we write ··· 2715 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0); 2716 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE); 2717 adapter->Stats.XmtQLen++; /* Stats within lock */ 2718 + /* Update stats */ 2719 + adapter->stats.tx_packets++; 2720 + adapter->stats.tx_bytes += DataLength; 2721 + #if XXXTODO /* Stats stuff */ 2722 + if (SXG_MULTICAST_PACKET(EtherHdr)) { 2723 + if (SXG_BROADCAST_PACKET(EtherHdr)) { 2724 + adapter->Stats.DumbXmtBcastPkts++; 2725 + adapter->Stats.DumbXmtBcastBytes += DataLength; 2726 + } else { 2727 + adapter->Stats.DumbXmtMcastPkts++; 2728 + adapter->Stats.DumbXmtMcastBytes += DataLength; 2729 + } 2730 + } else { 2731 + adapter->Stats.DumbXmtUcastPkts++; 2732 + adapter->Stats.DumbXmtUcastBytes += DataLength; 2733 + } 2734 + #endif 2735 + 2736 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); 2737 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", 2738 XmtCmd, pSgl, SxgSgl, 0);
+3 -25
drivers/staging/uc2322/aten2011.c
··· 603 604 tty = tty_port_tty_get(&ATEN2011_port->port->port); 605 606 - if (tty && ATEN2011_port->open) { 607 /* tell the tty driver that something has changed */ 608 - wake_up_interruptible(&tty->write_wait); 609 - } 610 611 /* schedule_work(&ATEN2011_port->port->work); */ 612 tty_kref_put(tty); ··· 824 status = 0; 825 status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); 826 827 - /* force low_latency on so that our tty_push actually forces * 828 - * the data through,otherwise it is scheduled, and with * 829 - * high data rates (like with OHCI) data can get lost. */ 830 - 831 - if (tty) 832 - tty->low_latency = 1; 833 /* 834 * Check to see if we've set up our endpoint info yet 835 * (can't set it up in ATEN2011_startup as the structures ··· 1466 1467 cflag = tty->termios->c_cflag; 1468 1469 - if (!cflag) { 1470 - dbg("%s %s", __func__, "cflag is NULL"); 1471 - return; 1472 - } 1473 - 1474 - /* check that they really want us to change something */ 1475 - if (old_termios) { 1476 - if ((cflag == old_termios->c_cflag) && 1477 - (RELEVANT_IFLAG(tty->termios->c_iflag) == 1478 - RELEVANT_IFLAG(old_termios->c_iflag))) { 1479 - dbg("%s", "Nothing to change"); 1480 - return; 1481 - } 1482 - } 1483 - 1484 - dbg("%s - clfag %08x iflag %08x", __func__, 1485 tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); 1486 1487 if (old_termios) {
··· 603 604 tty = tty_port_tty_get(&ATEN2011_port->port->port); 605 606 + if (tty && ATEN2011_port->open) 607 /* tell the tty driver that something has changed */ 608 + tty_wakeup(tty); 609 610 /* schedule_work(&ATEN2011_port->port->work); */ 611 tty_kref_put(tty); ··· 825 status = 0; 826 status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); 827 828 /* 829 * Check to see if we've set up our endpoint info yet 830 * (can't set it up in ATEN2011_startup as the structures ··· 1473 1474 cflag = tty->termios->c_cflag; 1475 1476 + dbg("%s - cflag %08x iflag %08x", __func__, 1477 tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); 1478 1479 if (old_termios) {
+15 -5
drivers/staging/wlan-ng/p80211netdev.c
··· 711 return 0; 712 } 713 714 /*---------------------------------------------------------------- 715 * wlan_setup 716 * ··· 770 } else { 771 wlandev->netdev = dev; 772 dev->ml_priv = wlandev; 773 - dev->hard_start_xmit = p80211knetdev_hard_start_xmit; 774 - dev->get_stats = p80211knetdev_get_stats; 775 - dev->init = p80211knetdev_init; 776 - dev->open = p80211knetdev_open; 777 - dev->stop = p80211knetdev_stop; 778 779 mutex_init(&wlandev->ioctl_lock); 780 /* block ioctls until fully initialised. Don't forget to call
··· 711 return 0; 712 } 713 714 + static const struct net_device_ops p80211_netdev_ops = { 715 + .ndo_init = p80211knetdev_init, 716 + .ndo_open = p80211knetdev_open, 717 + .ndo_stop = p80211knetdev_stop, 718 + .ndo_get_stats = p80211knetdev_get_stats, 719 + .ndo_start_xmit = p80211knetdev_hard_start_xmit, 720 + .ndo_set_multicast_list = p80211knetdev_set_multicast_list, 721 + .ndo_do_ioctl = p80211knetdev_do_ioctl, 722 + .ndo_set_mac_address = p80211knetdev_set_mac_address, 723 + .ndo_tx_timeout = p80211knetdev_tx_timeout, 724 + .ndo_change_mtu = wlan_change_mtu, 725 + .ndo_validate_addr = eth_validate_addr, 726 + }; 727 + 728 /*---------------------------------------------------------------- 729 * wlan_setup 730 * ··· 756 } else { 757 wlandev->netdev = dev; 758 dev->ml_priv = wlandev; 759 + dev->netdev_ops = &p80211_netdev_ops; 760 761 mutex_init(&wlandev->ioctl_lock); 762 /* block ioctls until fully initialised. Don't forget to call