Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (43 commits)
staging: slicoss: update README
otus/zdusb.c: additional USB idnetifier
Staging: go7007: fix build issues
Staging: sxg: Fix leaks and checksum errors in transmit code path
Staging: sxg: Fix sleep in atomic context warning while loading driver
Staging: sxg: Use correct queue_id for transmitting non-TCP packets
Staging: sxg: Fire watchdog timer at end of open routine to change the link
Staging: Pohmelfs: Add load balancing between network states with the same priority.
Staging: Pohmelfs: Added IO permissions and priorities.
Staging: Pohmelfs: Added ->show_stats() callback.
Staging: Pohmelfs: Drop ftrans debugging code.
Staging: Pohmelfs: Use wait_on_page_timeout when waiting for remote directory sync instead of hardcoded 25 seconds.
Staging: Pohmelfs: Reduce debugging noise about non-existing objects.
Staging: Pohmelfs: Sync fs before killing it, since dentry cache is shrunk before writeback is invoked via generic_shutdown_super()
Staging: Pohmelfs: Extend remount option.
Staging: Pohmelfs: Set NETFS_INODE_REMOTE_SYNCED and clear NETFS_INODE_OWNED bits in the root inode.
Staging: Pohmelfs: Added 'need_lock' variable into debug print.
Staging: Pohmelfs: Disable read lock in pohmelfs_getattr().
Staging: Pohmelfs: Move parent lock to the place where we really have to send a lookup request to the server.
Staging: pohmelfs: Populate dentry cache when receiving the new readdir entry.
...

+664 -372
+3 -2
Documentation/filesystems/pohmelfs/design_notes.txt
··· 56 56 data transfers. 57 57 58 58 POHMELFS clients operate with a working set of servers and are capable of balancing read-only 59 - operations (like lookups or directory listings) between them. 59 + operations (like lookups or directory listings) between them according to IO priorities. 60 60 Administrators can add or remove servers from the set at run-time via special commands (described 61 - in Documentation/pohmelfs/info.txt file). Writes are replicated to all servers. 61 + in Documentation/pohmelfs/info.txt file). Writes are replicated to all servers, which are connected 62 + with write permission turned on. IO priority and permissions can be changed in run-time. 62 63 63 64 POHMELFS is capable of full data channel encryption and/or strong crypto hashing. 64 65 One can select any kernel supported cipher, encryption mode, hash type and operation mode
+17 -4
Documentation/filesystems/pohmelfs/info.txt
··· 1 1 POHMELFS usage information. 2 2 3 - Mount options: 3 + Mount options. 4 + All but index, number of crypto threads and maximum IO size can changed via remount. 5 + 4 6 idx=%u 5 7 Each mountpoint is associated with a special index via this option. 6 8 Administrator can add or remove servers from the given index, so all mounts, ··· 54 52 55 53 Usage examples. 56 54 57 - Add (or remove if it already exists) server server1.net:1025 into the working set with index $idx 55 + Add server server1.net:1025 into the working set with index $idx 58 56 with appropriate hash algorithm and key file and cipher algorithm, mode and key file: 59 - $cfg -a server1.net -p 1025 -i $idx -K $hash_key -k $cipher_key 57 + $cfg A add -a server1.net -p 1025 -i $idx -K $hash_key -k $cipher_key 60 58 61 59 Mount filesystem with given index $idx to /mnt mountpoint. 62 60 Client will connect to all servers specified in the working set via previous command: 63 61 mount -t pohmel -o idx=$idx q /mnt 64 62 65 - One can add or remove servers from working set after mounting too. 63 + Change permissions to read-only (-I 1 option, '-I 2' - write-only, 3 - rw): 64 + $cfg A modify -a server1.net -p 1025 -i $idx -I 1 66 65 66 + Change IO priority to 123 (node with the highest priority gets read requests). 67 + $cfg A modify -a server1.net -p 1025 -i $idx -P 123 68 + 69 + One can check currect status of all connections in the mountstats file: 70 + # cat /proc/$PID/mountstats 71 + ... 72 + device none mounted on /mnt with fstype pohmel 73 + idx addr(:port) socket_type protocol active priority permissions 74 + 0 server1.net:1026 1 6 1 250 1 75 + 0 server2.net:1025 1 6 1 123 3 67 76 68 77 Server installation. 69 78
+152 -49
drivers/staging/android/binder.c
··· 41 41 static struct proc_dir_entry *binder_proc_dir_entry_root; 42 42 static struct proc_dir_entry *binder_proc_dir_entry_proc; 43 43 static struct hlist_head binder_dead_nodes; 44 + static HLIST_HEAD(binder_deferred_list); 45 + static DEFINE_MUTEX(binder_deferred_lock); 44 46 45 47 static int binder_read_proc_proc( 46 48 char *page, char **start, off_t off, int count, int *eof, void *data); ··· 56 54 #define SZ_4M 0x400000 57 55 #endif 58 56 59 - #ifndef __i386__ 60 - #define FORBIDDEN_MMAP_FLAGS (VM_WRITE | VM_EXEC) 61 - #else 62 57 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 63 - #endif 64 58 65 59 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 66 60 ··· 234 236 uint8_t data[0]; 235 237 }; 236 238 239 + enum { 240 + BINDER_DEFERRED_PUT_FILES = 0x01, 241 + BINDER_DEFERRED_FLUSH = 0x02, 242 + BINDER_DEFERRED_RELEASE = 0x04, 243 + }; 244 + 237 245 struct binder_proc { 238 246 struct hlist_node proc_node; 239 247 struct rb_root threads; ··· 249 245 int pid; 250 246 struct vm_area_struct *vma; 251 247 struct task_struct *tsk; 248 + struct files_struct *files; 249 + struct hlist_node deferred_work_node; 250 + int deferred_work; 252 251 void *buffer; 253 - size_t user_buffer_offset; 252 + ptrdiff_t user_buffer_offset; 254 253 255 254 struct list_head buffers; 256 255 struct rb_root free_buffers; ··· 317 310 uid_t sender_euid; 318 311 }; 319 312 313 + static void binder_defer_work(struct binder_proc *proc, int defer); 314 + 320 315 /* 321 316 * copied from get_unused_fd_flags 322 317 */ 323 - int task_get_unused_fd_flags(struct task_struct *tsk, int flags) 318 + int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 324 319 { 325 - struct files_struct *files = get_files_struct(tsk); 320 + struct files_struct *files = proc->files; 326 321 int fd, error; 327 322 struct fdtable *fdt; 328 323 unsigned long rlim_cur; ··· 346 337 * will limit the total number of files that can be opened. 347 338 */ 348 339 rlim_cur = 0; 349 - if (lock_task_sighand(tsk, &irqs)) { 350 - rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; 351 - unlock_task_sighand(tsk, &irqs); 340 + if (lock_task_sighand(proc->tsk, &irqs)) { 341 + rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; 342 + unlock_task_sighand(proc->tsk, &irqs); 352 343 } 353 344 if (fd >= rlim_cur) 354 345 goto out; ··· 384 375 385 376 out: 386 377 spin_unlock(&files->file_lock); 387 - put_files_struct(files); 388 378 return error; 389 379 } 390 380 ··· 391 383 * copied from fd_install 392 384 */ 393 385 static void task_fd_install( 394 - struct task_struct *tsk, unsigned int fd, struct file *file) 386 + struct binder_proc *proc, unsigned int fd, struct file *file) 395 387 { 396 - struct files_struct *files = get_files_struct(tsk); 388 + struct files_struct *files = proc->files; 397 389 struct fdtable *fdt; 398 390 399 391 if (files == NULL) ··· 404 396 BUG_ON(fdt->fd[fd] != NULL); 405 397 rcu_assign_pointer(fdt->fd[fd], file); 406 398 spin_unlock(&files->file_lock); 407 - put_files_struct(files); 408 399 } 409 400 410 401 /* ··· 420 413 /* 421 414 * copied from sys_close 422 415 */ 423 - static long task_close_fd(struct task_struct *tsk, unsigned int fd) 416 + static long task_close_fd(struct binder_proc *proc, unsigned int fd) 424 417 { 425 418 struct file *filp; 426 - struct files_struct *files = get_files_struct(tsk); 419 + struct files_struct *files = proc->files; 427 420 struct fdtable *fdt; 428 421 int retval; 429 422 ··· 450 443 retval == -ERESTART_RESTARTBLOCK)) 451 444 retval = -EINTR; 452 445 453 - put_files_struct(files); 454 446 return retval; 455 447 456 448 out_unlock: 457 449 spin_unlock(&files->file_lock); 458 - put_files_struct(files); 459 450 return -EBADF; 460 451 } 461 452 ··· 623 618 proc->pid, page_addr); 624 619 goto err_map_kernel_failed; 625 620 } 626 - user_page_addr = (size_t)page_addr + proc->user_buffer_offset; 621 + user_page_addr = 622 + (uintptr_t)page_addr + proc->user_buffer_offset; 627 623 ret = vm_insert_page(vma, user_page_addr, page[0]); 628 624 if (ret) { 629 625 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " ··· 645 639 page_addr -= PAGE_SIZE) { 646 640 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 647 641 if (vma) 648 - zap_page_range(vma, (size_t)page_addr + 642 + zap_page_range(vma, (uintptr_t)page_addr + 649 643 proc->user_buffer_offset, PAGE_SIZE, NULL); 650 644 err_vm_insert_page_failed: 651 645 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); ··· 726 720 "er %p size %zd\n", proc->pid, size, buffer, buffer_size); 727 721 728 722 has_page_addr = 729 - (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK); 723 + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 730 724 if (n == NULL) { 731 725 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 732 726 buffer_size = size; /* no room for other buffers */ 733 727 else 734 728 buffer_size = size + sizeof(struct binder_buffer); 735 729 } 736 - end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size); 730 + end_page_addr = 731 + (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 737 732 if (end_page_addr > has_page_addr) 738 733 end_page_addr = has_page_addr; 739 734 if (binder_update_page_range(proc, 1, 740 - (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL)) 735 + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 741 736 return NULL; 742 737 743 738 rb_erase(best_fit, &proc->free_buffers); ··· 769 762 770 763 static void *buffer_start_page(struct binder_buffer *buffer) 771 764 { 772 - return (void *)((size_t)buffer & PAGE_MASK); 765 + return (void *)((uintptr_t)buffer & PAGE_MASK); 773 766 } 774 767 775 768 static void *buffer_end_page(struct binder_buffer *buffer) 776 769 { 777 - return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK); 770 + return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 778 771 } 779 772 780 773 static void binder_delete_free_buffer( ··· 852 845 } 853 846 854 847 binder_update_page_range(proc, 0, 855 - (void *)PAGE_ALIGN((size_t)buffer->data), 856 - (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK), 848 + (void *)PAGE_ALIGN((uintptr_t)buffer->data), 849 + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 857 850 NULL); 858 851 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 859 852 buffer->free = 1; ··· 1352 1345 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1353 1346 struct binder_transaction *tmp; 1354 1347 tmp = thread->transaction_stack; 1348 + if (tmp->to_thread != thread) { 1349 + binder_user_error("binder: %d:%d got new " 1350 + "transaction with bad transaction stack" 1351 + ", transaction %d has target %d:%d\n", 1352 + proc->pid, thread->pid, tmp->debug_id, 1353 + tmp->to_proc ? tmp->to_proc->pid : 0, 1354 + tmp->to_thread ? 1355 + tmp->to_thread->pid : 0); 1356 + return_error = BR_FAILED_REPLY; 1357 + goto err_bad_call_stack; 1358 + } 1355 1359 while (tmp) { 1356 1360 if (tmp->from && tmp->from->proc == target_proc) 1357 1361 target_thread = tmp->from; ··· 1452 1434 return_error = BR_FAILED_REPLY; 1453 1435 goto err_copy_data_failed; 1454 1436 } 1437 + if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { 1438 + binder_user_error("binder: %d:%d got transaction with " 1439 + "invalid offsets size, %zd\n", 1440 + proc->pid, thread->pid, tr->offsets_size); 1441 + return_error = BR_FAILED_REPLY; 1442 + goto err_bad_offset; 1443 + } 1455 1444 off_end = (void *)offp + tr->offsets_size; 1456 1445 for (; offp < off_end; offp++) { 1457 1446 struct flat_binder_object *fp; 1458 - if (*offp > t->buffer->data_size - sizeof(*fp)) { 1447 + if (*offp > t->buffer->data_size - sizeof(*fp) || 1448 + t->buffer->data_size < sizeof(*fp) || 1449 + !IS_ALIGNED(*offp, sizeof(void *))) { 1459 1450 binder_user_error("binder: %d:%d got transaction with " 1460 1451 "invalid offset, %zd\n", 1461 1452 proc->pid, thread->pid, *offp); ··· 1571 1544 return_error = BR_FAILED_REPLY; 1572 1545 goto err_fget_failed; 1573 1546 } 1574 - target_fd = task_get_unused_fd_flags(target_proc->tsk, O_CLOEXEC); 1547 + target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1575 1548 if (target_fd < 0) { 1576 1549 fput(file); 1577 1550 return_error = BR_FAILED_REPLY; 1578 1551 goto err_get_unused_fd_failed; 1579 1552 } 1580 - task_fd_install(target_proc->tsk, target_fd, file); 1553 + task_fd_install(target_proc, target_fd, file); 1581 1554 if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1582 1555 printk(KERN_INFO " fd %ld -> %d\n", fp->handle, target_fd); 1583 1556 /* TODO: fput? */ ··· 1682 1655 off_end = (void *)offp + buffer->offsets_size; 1683 1656 for (; offp < off_end; offp++) { 1684 1657 struct flat_binder_object *fp; 1685 - if (*offp > buffer->data_size - sizeof(*fp)) { 1658 + if (*offp > buffer->data_size - sizeof(*fp) || 1659 + buffer->data_size < sizeof(*fp) || 1660 + !IS_ALIGNED(*offp, sizeof(void *))) { 1686 1661 printk(KERN_ERR "binder: transaction release %d bad" 1687 1662 "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); 1688 1663 continue; ··· 1720 1691 if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1721 1692 printk(KERN_INFO " fd %ld\n", fp->handle); 1722 1693 if (failed_at) 1723 - task_close_fd(proc->tsk, fp->handle); 1694 + task_close_fd(proc, fp->handle); 1724 1695 break; 1725 1696 1726 1697 default: ··· 2369 2340 2370 2341 tr.data_size = t->buffer->data_size; 2371 2342 tr.offsets_size = t->buffer->offsets_size; 2372 - tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset); 2343 + tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset; 2373 2344 tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); 2374 2345 2375 2346 if (put_user(cmd, (uint32_t __user *)ptr)) ··· 2685 2656 (unsigned long)pgprot_val(vma->vm_page_prot)); 2686 2657 dump_stack(); 2687 2658 } 2659 + 2688 2660 static void binder_vma_close(struct vm_area_struct *vma) 2689 2661 { 2690 2662 struct binder_proc *proc = vma->vm_private_data; ··· 2696 2666 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2697 2667 (unsigned long)pgprot_val(vma->vm_page_prot)); 2698 2668 proc->vma = NULL; 2669 + binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2699 2670 } 2700 2671 2701 2672 static struct vm_operations_struct binder_vm_ops = { ··· 2729 2698 } 2730 2699 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2731 2700 2701 + if (proc->buffer) { 2702 + ret = -EBUSY; 2703 + failure_string = "already mapped"; 2704 + goto err_already_mapped; 2705 + } 2706 + 2732 2707 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2733 2708 if (area == NULL) { 2734 2709 ret = -ENOMEM; ··· 2742 2705 goto err_get_vm_area_failed; 2743 2706 } 2744 2707 proc->buffer = area->addr; 2745 - proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer; 2708 + proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2746 2709 2747 2710 #ifdef CONFIG_CPU_CACHE_VIPT 2748 2711 if (cache_is_vipt_aliasing()) { ··· 2775 2738 binder_insert_free_buffer(proc, buffer); 2776 2739 proc->free_async_space = proc->buffer_size / 2; 2777 2740 barrier(); 2741 + proc->files = get_files_struct(current); 2778 2742 proc->vma = vma; 2779 2743 2780 2744 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ ··· 2783 2745 2784 2746 err_alloc_small_buf_failed: 2785 2747 kfree(proc->pages); 2748 + proc->pages = NULL; 2786 2749 err_alloc_pages_failed: 2787 2750 vfree(proc->buffer); 2751 + proc->buffer = NULL; 2788 2752 err_get_vm_area_failed: 2789 - mutex_unlock(&binder_lock); 2753 + err_already_mapped: 2790 2754 err_bad_arg: 2791 2755 printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2792 2756 return ret; ··· 2820 2780 if (binder_proc_dir_entry_proc) { 2821 2781 char strbuf[11]; 2822 2782 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2783 + remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2823 2784 create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc); 2824 2785 } 2825 2786 ··· 2829 2788 2830 2789 static int binder_flush(struct file *filp, fl_owner_t id) 2831 2790 { 2832 - struct rb_node *n; 2833 2791 struct binder_proc *proc = filp->private_data; 2834 - int wake_count = 0; 2835 2792 2836 - mutex_lock(&binder_lock); 2793 + binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 2794 + 2795 + return 0; 2796 + } 2797 + 2798 + static void binder_deferred_flush(struct binder_proc *proc) 2799 + { 2800 + struct rb_node *n; 2801 + int wake_count = 0; 2837 2802 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2838 2803 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2839 2804 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; ··· 2849 2802 } 2850 2803 } 2851 2804 wake_up_interruptible_all(&proc->wait); 2852 - mutex_unlock(&binder_lock); 2853 2805 2854 2806 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2855 2807 printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count); 2856 - 2857 - return 0; 2858 2808 } 2859 2809 2860 2810 static int binder_release(struct inode *nodp, struct file *filp) 2861 2811 { 2862 - struct hlist_node *pos; 2863 - struct binder_transaction *t; 2864 - struct rb_node *n; 2865 2812 struct binder_proc *proc = filp->private_data; 2866 - int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2867 - 2868 2813 if (binder_proc_dir_entry_proc) { 2869 2814 char strbuf[11]; 2870 2815 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2871 2816 remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2872 2817 } 2873 - mutex_lock(&binder_lock); 2818 + 2819 + binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 2820 + 2821 + return 0; 2822 + } 2823 + 2824 + static void binder_deferred_release(struct binder_proc *proc) 2825 + { 2826 + struct hlist_node *pos; 2827 + struct binder_transaction *t; 2828 + struct rb_node *n; 2829 + int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2830 + 2831 + BUG_ON(proc->vma); 2832 + BUG_ON(proc->files); 2833 + 2874 2834 hlist_del(&proc->proc_node); 2875 2835 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 2876 2836 if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) ··· 2951 2897 } 2952 2898 2953 2899 binder_stats.obj_deleted[BINDER_STAT_PROC]++; 2954 - mutex_unlock(&binder_lock); 2955 2900 2956 2901 page_count = 0; 2957 2902 if (proc->pages) { ··· 2974 2921 proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); 2975 2922 2976 2923 kfree(proc); 2977 - return 0; 2924 + } 2925 + 2926 + static void binder_deferred_func(struct work_struct *work) 2927 + { 2928 + struct binder_proc *proc; 2929 + struct files_struct *files; 2930 + 2931 + int defer; 2932 + do { 2933 + mutex_lock(&binder_lock); 2934 + mutex_lock(&binder_deferred_lock); 2935 + if (!hlist_empty(&binder_deferred_list)) { 2936 + proc = hlist_entry(binder_deferred_list.first, 2937 + struct binder_proc, deferred_work_node); 2938 + hlist_del_init(&proc->deferred_work_node); 2939 + defer = proc->deferred_work; 2940 + proc->deferred_work = 0; 2941 + } else { 2942 + proc = NULL; 2943 + defer = 0; 2944 + } 2945 + mutex_unlock(&binder_deferred_lock); 2946 + 2947 + files = NULL; 2948 + if (defer & BINDER_DEFERRED_PUT_FILES) 2949 + if ((files = proc->files)) 2950 + proc->files = NULL; 2951 + 2952 + if (defer & BINDER_DEFERRED_FLUSH) 2953 + binder_deferred_flush(proc); 2954 + 2955 + if (defer & BINDER_DEFERRED_RELEASE) 2956 + binder_deferred_release(proc); /* frees proc */ 2957 + 2958 + mutex_unlock(&binder_lock); 2959 + if (files) 2960 + put_files_struct(files); 2961 + } while (proc); 2962 + } 2963 + static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 2964 + 2965 + static void binder_defer_work(struct binder_proc *proc, int defer) 2966 + { 2967 + mutex_lock(&binder_deferred_lock); 2968 + proc->deferred_work |= defer; 2969 + if (hlist_unhashed(&proc->deferred_work_node)) { 2970 + hlist_add_head(&proc->deferred_work_node, 2971 + &binder_deferred_list); 2972 + schedule_work(&binder_deferred_work); 2973 + } 2974 + mutex_unlock(&binder_deferred_lock); 2978 2975 } 2979 2976 2980 2977 static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t)
+13 -7
drivers/staging/at76_usb/at76_usb.c
··· 5259 5259 return 0; 5260 5260 } 5261 5261 5262 + static const struct net_device_ops at76_netdev_ops = { 5263 + .ndo_open = at76_open, 5264 + .ndo_stop = at76_stop, 5265 + .ndo_get_stats = at76_get_stats, 5266 + .ndo_start_xmit = at76_tx, 5267 + .ndo_tx_timeout = at76_tx_timeout, 5268 + .ndo_set_multicast_list = at76_set_multicast, 5269 + .ndo_set_mac_address = at76_set_mac_address, 5270 + .ndo_validate_addr = eth_validate_addr, 5271 + .ndo_change_mtu = eth_change_mtu, 5272 + }; 5273 + 5262 5274 /* Register network device and initialize the hardware */ 5263 5275 static int at76_init_new_device(struct at76_priv *priv, 5264 5276 struct usb_interface *interface) ··· 5315 5303 priv->scan_mode = SCAN_TYPE_ACTIVE; 5316 5304 5317 5305 netdev->flags &= ~IFF_MULTICAST; /* not yet or never */ 5318 - netdev->open = at76_open; 5319 - netdev->stop = at76_stop; 5320 - netdev->get_stats = at76_get_stats; 5306 + netdev->netdev_ops = &at76_netdev_ops; 5321 5307 netdev->ethtool_ops = &at76_ethtool_ops; 5322 5308 5323 5309 /* Add pointers to enable iwspy support. */ 5324 5310 priv->wireless_data.spy_data = &priv->spy_data; 5325 5311 netdev->wireless_data = &priv->wireless_data; 5326 5312 5327 - netdev->hard_start_xmit = at76_tx; 5328 - netdev->tx_timeout = at76_tx_timeout; 5329 5313 netdev->watchdog_timeo = 2 * HZ; 5330 5314 netdev->wireless_handlers = &at76_handler_def; 5331 - netdev->set_multicast_list = at76_set_multicast; 5332 - netdev->set_mac_address = at76_set_mac_address; 5333 5315 dev_alloc_name(netdev, "wlan%d"); 5334 5316 5335 5317 ret = register_netdev(priv->netdev);
+12 -5
drivers/staging/epl/VirtualEthernetLinux.c
··· 284 284 return Ret; 285 285 } 286 286 287 + static const struct net_device_ops epl_netdev_ops = { 288 + .ndo_open = VEthOpen, 289 + .ndo_stop = VEthClose, 290 + .ndo_get_stats = VEthGetStats, 291 + .ndo_start_xmit = VEthXmit, 292 + .ndo_tx_timeout = VEthTimeout, 293 + .ndo_change_mtu = eth_change_mtu, 294 + .ndo_set_mac_address = eth_mac_addr, 295 + .ndo_validate_addr = eth_validate_addr, 296 + }; 297 + 287 298 tEplKernel VEthAddInstance(tEplDllkInitParam *pInitParam_p) 288 299 { 289 300 tEplKernel Ret = kEplSuccessful; ··· 310 299 goto Exit; 311 300 } 312 301 313 - pVEthNetDevice_g->open = VEthOpen; 314 - pVEthNetDevice_g->stop = VEthClose; 315 - pVEthNetDevice_g->get_stats = VEthGetStats; 316 - pVEthNetDevice_g->hard_start_xmit = VEthXmit; 317 - pVEthNetDevice_g->tx_timeout = VEthTimeout; 302 + pVEthNetDevice_g->netdev_ops = &epl_netdev_ops; 318 303 pVEthNetDevice_g->watchdog_timeo = EPL_VETH_TX_TIMEOUT; 319 304 pVEthNetDevice_g->destructor = free_netdev; 320 305
+14 -9
drivers/staging/et131x/et131x_netdev.c
··· 112 112 void et131x_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 113 113 void et131x_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 114 114 115 + static const struct net_device_ops et131x_netdev_ops = { 116 + .ndo_open = et131x_open, 117 + .ndo_stop = et131x_close, 118 + .ndo_start_xmit = et131x_tx, 119 + .ndo_set_multicast_list = et131x_multicast, 120 + .ndo_tx_timeout = et131x_tx_timeout, 121 + .ndo_change_mtu = et131x_change_mtu, 122 + .ndo_set_mac_address = et131x_set_mac_addr, 123 + .ndo_validate_addr = eth_validate_addr, 124 + .ndo_get_stats = et131x_stats, 125 + .ndo_do_ioctl = et131x_ioctl, 126 + }; 127 + 115 128 /** 116 129 * et131x_device_alloc 117 130 * ··· 155 142 */ 156 143 //netdev->init = &et131x_init; 157 144 //netdev->set_config = &et131x_config; 158 - netdev->get_stats = &et131x_stats; 159 - netdev->open = &et131x_open; 160 - netdev->stop = &et131x_close; 161 - netdev->do_ioctl = &et131x_ioctl; 162 - netdev->set_multicast_list = &et131x_multicast; 163 - netdev->hard_start_xmit = &et131x_tx; 164 - netdev->tx_timeout = &et131x_tx_timeout; 165 145 netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 166 - netdev->change_mtu = &et131x_change_mtu; 167 - netdev->set_mac_address = &et131x_set_mac_addr; 146 + netdev->netdev_ops = &et131x_netdev_ops; 168 147 169 148 //netdev->ethtool_ops = &et131x_ethtool_ops; 170 149
-15
drivers/staging/go7007/go7007-driver.c
··· 268 268 init_i2c_module(&go->i2c_adapter, 269 269 go->board_info->i2c_devs[i].id, 270 270 go->board_info->i2c_devs[i].addr); 271 - #ifdef TUNER_SET_TYPE_ADDR 272 - if (go->tuner_type >= 0) { 273 - struct tuner_setup tun_setup = { 274 - .mode_mask = T_ANALOG_TV, 275 - .addr = ADDR_UNSET, 276 - .type = go->tuner_type 277 - }; 278 - i2c_clients_command(&go->i2c_adapter, 279 - TUNER_SET_TYPE_ADDR, &tun_setup); 280 - } 281 - #else 282 - if (go->tuner_type >= 0) 283 - i2c_clients_command(&go->i2c_adapter, 284 - TUNER_SET_TYPE, &go->tuner_type); 285 - #endif 286 271 if (go->board_id == GO7007_BOARDID_ADLINK_MPG24) 287 272 i2c_clients_command(&go->i2c_adapter, 288 273 DECODER_SET_CHANNEL, &go->channel_number);
+2
drivers/staging/go7007/wis-sony-tuner.c
··· 386 386 struct wis_sony_tuner *t = i2c_get_clientdata(client); 387 387 388 388 switch (cmd) { 389 + #if 0 389 390 #ifdef TUNER_SET_TYPE_ADDR 390 391 case TUNER_SET_TYPE_ADDR: 391 392 { ··· 464 463 t->type, sony_tuners[t->type - 200].name); 465 464 break; 466 465 } 466 + #endif 467 467 case VIDIOC_G_FREQUENCY: 468 468 { 469 469 struct v4l2_frequency *f = arg;
+5 -4
drivers/staging/line6/audio.c
··· 27 27 { 28 28 static int dev; 29 29 struct snd_card *card; 30 + int err; 30 31 31 - card = snd_card_new(line6_index[dev], line6_id[dev], THIS_MODULE, 0); 32 - 33 - if (card == NULL) 34 - return -ENOMEM; 32 + err = snd_card_create(line6_index[dev], line6_id[dev], THIS_MODULE, 0, 33 + &card); 34 + if (err < 0) 35 + return err; 35 36 36 37 line6->card = card; 37 38
+29 -18
drivers/staging/otus/usbdrv.c
··· 822 822 return 0; 823 823 } 824 824 825 + static const struct net_device_ops vap_netdev_ops = { 826 + .ndo_open = zfLnxVapOpen, 827 + .ndo_stop = zfLnxVapClose, 828 + .ndo_start_xmit = zfLnxVapXmitFrame, 829 + .ndo_get_stats = usbdrv_get_stats, 830 + .ndo_change_mtu = usbdrv_change_mtu, 831 + .ndo_validate_addr = eth_validate_addr, 832 + .ndo_set_mac_address = eth_mac_addr, 833 + #ifdef ZM_HOSTAPD_SUPPORT 834 + .ndo_do_ioctl = usbdrv_ioctl, 835 + #else 836 + .ndo_do_ioctl = NULL, 837 + #endif 838 + }; 839 + 825 840 int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId) 826 841 { 827 842 /* Allocate net device structure */ ··· 861 846 vap[vapId].dev->ml_priv = parentDev->ml_priv; 862 847 863 848 //dev->hard_start_xmit = &zd1212_wds_xmit_frame; 864 - vap[vapId].dev->hard_start_xmit = &zfLnxVapXmitFrame; 865 - vap[vapId].dev->open = &zfLnxVapOpen; 866 - vap[vapId].dev->stop = &zfLnxVapClose; 867 - vap[vapId].dev->get_stats = &usbdrv_get_stats; 868 - vap[vapId].dev->change_mtu = &usbdrv_change_mtu; 869 - #ifdef ZM_HOSTAPD_SUPPORT 870 - vap[vapId].dev->do_ioctl = usbdrv_ioctl; 871 - #else 872 - vap[vapId].dev->do_ioctl = NULL; 873 - #endif 849 + vap[vapId].dev->netdev_ops = &vap_netdev_ops; 874 850 vap[vapId].dev->destructor = free_netdev; 875 851 876 852 vap[vapId].dev->tx_queue_len = 0; ··· 1074 1068 usb_unlink_urb(macp->RegInUrb); 1075 1069 } 1076 1070 1071 + static const struct net_device_ops otus_netdev_ops = { 1072 + .ndo_open = usbdrv_open, 1073 + .ndo_stop = usbdrv_close, 1074 + .ndo_start_xmit = usbdrv_xmit_frame, 1075 + .ndo_change_mtu = usbdrv_change_mtu, 1076 + .ndo_get_stats = usbdrv_get_stats, 1077 + .ndo_set_multicast_list = usbdrv_set_multi, 1078 + .ndo_set_mac_address = usbdrv_set_mac, 1079 + .ndo_do_ioctl = usbdrv_ioctl, 1080 + .ndo_validate_addr = eth_validate_addr, 1081 + }; 1082 + 1077 1083 u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp) 1078 1084 { 1079 1085 //unsigned char addr[6]; ··· 1110 1092 dev->wireless_handlers = (struct iw_handler_def *)&p80211wext_handler_def; 1111 1093 #endif 1112 1094 1113 - dev->open = usbdrv_open; 1114 - dev->hard_start_xmit = usbdrv_xmit_frame; 1115 - dev->stop = usbdrv_close; 1116 - dev->change_mtu = &usbdrv_change_mtu; 1117 - dev->get_stats = usbdrv_get_stats; 1118 - dev->set_multicast_list = usbdrv_set_multi; 1119 - dev->set_mac_address = usbdrv_set_mac; 1120 - dev->do_ioctl = usbdrv_ioctl; 1095 + dev->netdev_ops = &otus_netdev_ops; 1121 1096 1122 1097 dev->flags |= IFF_MULTICAST; 1123 1098
+2 -1
drivers/staging/otus/zdusb.c
··· 48 48 static struct usb_device_id zd1221_ids [] = { 49 49 { USB_DEVICE(VENDOR_ATHR, PRODUCT_AR9170) }, 50 50 { USB_DEVICE(VENDOR_DLINK, PRODUCT_DWA160A) }, 51 - { USB_DEVICE(0x0846, 0x9010) }, 51 + { USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WNDA3100) }, 52 + { USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WN111v2) }, 52 53 { } /* Terminating entry */ 53 54 }; 54 55
+4
drivers/staging/otus/zdusb.h
··· 40 40 #define VENDOR_DLINK 0x07D1 //Dlink 41 41 #define PRODUCT_DWA160A 0x3C10 42 42 43 + #define VENDOR_NETGEAR 0x0846 /* NetGear */ 44 + #define PRODUCT_WNDA3100 0x9010 45 + #define PRODUCT_WN111v2 0x9001 46 + 43 47 #endif
+56 -5
drivers/staging/pohmelfs/config.c
··· 81 81 return g; 82 82 } 83 83 84 + static inline void pohmelfs_insert_config_entry(struct pohmelfs_sb *psb, struct pohmelfs_config *dst) 85 + { 86 + struct pohmelfs_config *tmp; 87 + 88 + INIT_LIST_HEAD(&dst->config_entry); 89 + 90 + list_for_each_entry(tmp, &psb->state_list, config_entry) { 91 + if (dst->state.ctl.prio > tmp->state.ctl.prio) 92 + list_add_tail(&dst->config_entry, &tmp->config_entry); 93 + } 94 + if (list_empty(&dst->config_entry)) 95 + list_add_tail(&dst->config_entry, &psb->state_list); 96 + } 97 + 98 + static int pohmelfs_move_config_entry(struct pohmelfs_sb *psb, 99 + struct pohmelfs_config *dst, struct pohmelfs_config *new) 100 + { 101 + if ((dst->state.ctl.prio == new->state.ctl.prio) && 102 + (dst->state.ctl.perm == new->state.ctl.perm)) 103 + return 0; 104 + 105 + dprintk("%s: dst: prio: %d, perm: %x, new: prio: %d, perm: %d.\n", 106 + __func__, dst->state.ctl.prio, dst->state.ctl.perm, 107 + new->state.ctl.prio, new->state.ctl.perm); 108 + dst->state.ctl.prio = new->state.ctl.prio; 109 + dst->state.ctl.perm = new->state.ctl.perm; 110 + 111 + list_del_init(&dst->config_entry); 112 + pohmelfs_insert_config_entry(psb, dst); 113 + return 0; 114 + } 115 + 116 + /* 117 + * pohmelfs_copy_config() is used to copy new state configs from the 118 + * config group (controlled by the netlink messages) into the superblock. 119 + * This happens either at startup time where no transactions can access 120 + * the list of the configs (and thus list of the network states), or at 121 + * run-time, where it is protected by the psb->state_lock. 122 + */ 84 123 int pohmelfs_copy_config(struct pohmelfs_sb *psb) 85 124 { 86 125 struct pohmelfs_config_group *g; ··· 142 103 err = 0; 143 104 list_for_each_entry(dst, &psb->state_list, config_entry) { 144 105 if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) { 145 - err = -EEXIST; 106 + err = pohmelfs_move_config_entry(psb, dst, c); 107 + if (!err) 108 + err = -EEXIST; 146 109 break; 147 110 } 148 111 } ··· 160 119 161 120 memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl)); 162 121 163 - list_add_tail(&dst->config_entry, &psb->state_list); 122 + pohmelfs_insert_config_entry(psb, dst); 164 123 165 124 err = pohmelfs_state_init_one(psb, dst); 166 125 if (err) { ··· 289 248 return err; 290 249 } 291 250 251 + static int pohmelfs_modify_config(struct pohmelfs_ctl *old, struct pohmelfs_ctl *new) 252 + { 253 + old->perm = new->perm; 254 + old->prio = new->prio; 255 + return 0; 256 + } 257 + 292 258 static int pohmelfs_cn_ctl(struct cn_msg *msg, int action) 293 259 { 294 260 struct pohmelfs_config_group *g; ··· 326 278 g->num_entry--; 327 279 kfree(c); 328 280 goto out_unlock; 281 + } else if (action == POHMELFS_FLAGS_MODIFY) { 282 + err = pohmelfs_modify_config(sc, ctl); 283 + goto out_unlock; 329 284 } else { 330 285 err = -EEXIST; 331 286 goto out_unlock; ··· 347 296 } 348 297 memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl)); 349 298 g->num_entry++; 299 + 350 300 list_add_tail(&c->config_entry, &g->config_list); 351 301 352 302 out_unlock: ··· 453 401 454 402 switch (msg->flags) { 455 403 case POHMELFS_FLAGS_ADD: 456 - err = pohmelfs_cn_ctl(msg, POHMELFS_FLAGS_ADD); 457 - break; 458 404 case POHMELFS_FLAGS_DEL: 459 - err = pohmelfs_cn_ctl(msg, POHMELFS_FLAGS_DEL); 405 + case POHMELFS_FLAGS_MODIFY: 406 + err = pohmelfs_cn_ctl(msg, msg->flags); 460 407 break; 461 408 case POHMELFS_FLAGS_SHOW: 462 409 err = pohmelfs_cn_disp(msg);
+12 -12
drivers/staging/pohmelfs/dir.c
··· 328 328 { 329 329 struct inode *inode = &pi->vfs_inode; 330 330 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); 331 - long ret = msecs_to_jiffies(25000); 331 + long ret = psb->wait_on_page_timeout; 332 332 int err; 333 333 334 334 dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n", ··· 389 389 dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n", 390 390 __func__, pi->ino, (u64)file->f_pos, 391 391 (unsigned long)file->private_data); 392 - 392 + #if 0 393 393 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); 394 394 if (err) 395 395 return err; 396 - 396 + #endif 397 397 err = pohmelfs_sync_remote_dir(pi); 398 398 if (err) 399 399 return err; ··· 513 513 514 514 need_lock = pohmelfs_need_lock(parent, lock_type); 515 515 516 - err = pohmelfs_data_lock(parent, 0, ~0, lock_type); 517 - if (err) 518 - goto out; 519 - 520 516 str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); 521 517 522 518 mutex_lock(&parent->offset_lock); ··· 521 525 ino = n->ino; 522 526 mutex_unlock(&parent->offset_lock); 523 527 524 - dprintk("%s: 1 ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx.\n", 525 - __func__, ino, inode, str.name, str.hash, parent->state); 528 + dprintk("%s: start ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx, need_lock: %d.\n", 529 + __func__, ino, inode, str.name, str.hash, parent->state, need_lock); 526 530 527 531 if (ino) { 528 532 inode = ilookup(dir->i_sb, ino); ··· 530 534 goto out; 531 535 } 532 536 533 - dprintk("%s: dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n", 537 + dprintk("%s: no inode dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n", 534 538 __func__, dir, parent->ino, 535 539 str.name, str.len, parent->state, ino); 536 540 ··· 538 542 if (!need_lock) 539 543 goto out; 540 544 } 545 + 546 + err = pohmelfs_data_lock(parent, 0, ~0, lock_type); 547 + if (err) 548 + goto out; 541 549 542 550 err = pohmelfs_lookup_single(parent, &str, ino); 543 551 if (err) ··· 557 557 558 558 if (ino) { 559 559 inode = ilookup(dir->i_sb, ino); 560 - printk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n", 560 + dprintk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n", 561 561 __func__, ino, inode, str.name, str.hash); 562 562 if (!inode) { 563 - printk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n", 563 + dprintk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n", 564 564 __func__, ino, str.name, str.hash); 565 565 //return NULL; 566 566 return ERR_PTR(-EACCES);
+106 -31
drivers/staging/pohmelfs/inode.c
··· 1169 1169 static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1170 1170 { 1171 1171 struct inode *inode = dentry->d_inode; 1172 + #if 0 1172 1173 struct pohmelfs_inode *pi = POHMELFS_I(inode); 1173 1174 int err; 1174 1175 1175 1176 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); 1176 1177 if (err) 1177 1178 return err; 1178 - 1179 1179 dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n", 1180 1180 __func__, pi->ino, inode->i_mode, inode->i_uid, 1181 1181 inode->i_gid, inode->i_size); 1182 + #endif 1182 1183 1183 1184 generic_fillattr(inode, stat); 1184 1185 return 0; ··· 1343 1342 1344 1343 kfree(psb); 1345 1344 sb->s_fs_info = NULL; 1346 - 1347 - pohmelfs_ftrans_exit(); 1348 - } 1349 - 1350 - static int pohmelfs_remount(struct super_block *sb, int *flags, char *data) 1351 - { 1352 - *flags |= MS_RDONLY; 1353 - return 0; 1354 1345 } 1355 1346 1356 1347 static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf) ··· 1387 1394 return 0; 1388 1395 } 1389 1396 1390 - static const struct super_operations pohmelfs_sb_ops = { 1391 - .alloc_inode = pohmelfs_alloc_inode, 1392 - .destroy_inode = pohmelfs_destroy_inode, 1393 - .drop_inode = pohmelfs_drop_inode, 1394 - .write_inode = pohmelfs_write_inode, 1395 - .put_super = pohmelfs_put_super, 1396 - .remount_fs = pohmelfs_remount, 1397 - .statfs = pohmelfs_statfs, 1398 - .show_options = pohmelfs_show_options, 1399 - }; 1400 - 1401 1397 enum { 1402 1398 pohmelfs_opt_idx, 1399 + pohmelfs_opt_crypto_thread_num, 1400 + pohmelfs_opt_trans_max_pages, 1401 + pohmelfs_opt_crypto_fail_unsupported, 1402 + 1403 + /* Remountable options */ 1403 1404 pohmelfs_opt_trans_scan_timeout, 1404 1405 pohmelfs_opt_drop_scan_timeout, 1405 1406 pohmelfs_opt_wait_on_page_timeout, 1406 1407 pohmelfs_opt_trans_retries, 1407 - pohmelfs_opt_crypto_thread_num, 1408 - pohmelfs_opt_trans_max_pages, 1409 - pohmelfs_opt_crypto_fail_unsupported, 1410 1408 pohmelfs_opt_mcache_timeout, 1411 1409 }; 1412 1410 1413 1411 static struct match_token pohmelfs_tokens[] = { 1414 1412 {pohmelfs_opt_idx, "idx=%u"}, 1413 + {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"}, 1414 + {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"}, 1415 + {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"}, 1415 1416 {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"}, 1416 1417 {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"}, 1417 1418 {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"}, 1418 1419 {pohmelfs_opt_trans_retries, "trans_retries=%u"}, 1419 - {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"}, 1420 - {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"}, 1421 - {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"}, 1422 1420 {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"}, 1423 1421 }; 1424 1422 1425 - static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb) 1423 + static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount) 1426 1424 { 1427 1425 char *p; 1428 1426 substring_t args[MAX_OPT_ARGS]; ··· 1432 1448 err = match_int(&args[0], &option); 1433 1449 if (err) 1434 1450 return err; 1451 + 1452 + if (remount && token <= pohmelfs_opt_crypto_fail_unsupported) 1453 + continue; 1435 1454 1436 1455 switch (token) { 1437 1456 case pohmelfs_opt_idx: ··· 1470 1483 } 1471 1484 1472 1485 return 0; 1486 + } 1487 + 1488 + static int pohmelfs_remount(struct super_block *sb, int *flags, char *data) 1489 + { 1490 + int err; 1491 + struct pohmelfs_sb *psb = POHMELFS_SB(sb); 1492 + unsigned long old_sb_flags = sb->s_flags; 1493 + 1494 + err = pohmelfs_parse_options(data, psb, 1); 1495 + if (err) 1496 + goto err_out_restore; 1497 + 1498 + if (!(*flags & MS_RDONLY)) 1499 + sb->s_flags &= ~MS_RDONLY; 1500 + return 0; 1501 + 1502 + err_out_restore: 1503 + sb->s_flags = old_sb_flags; 1504 + return err; 1473 1505 } 1474 1506 1475 1507 static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count) ··· 1759 1753 return err; 1760 1754 } 1761 1755 1756 + static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt) 1757 + { 1758 + struct netfs_state *st; 1759 + struct pohmelfs_ctl *ctl; 1760 + struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb); 1761 + struct pohmelfs_config *c; 1762 + 1763 + mutex_lock(&psb->state_lock); 1764 + 1765 + seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n"); 1766 + 1767 + list_for_each_entry(c, &psb->state_list, config_entry) { 1768 + st = &c->state; 1769 + ctl = &st->ctl; 1770 + 1771 + seq_printf(m, "%u ", ctl->idx); 1772 + if (ctl->addr.sa_family == AF_INET) { 1773 + struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr; 1774 + //seq_printf(m, "%pi4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port)); 1775 + seq_printf(m, "%u.%u.%u.%u:%u", NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port)); 1776 + } else if (ctl->addr.sa_family == AF_INET6) { 1777 + struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr; 1778 + seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port)); 1779 + } else { 1780 + unsigned int i; 1781 + for (i=0; i<ctl->addrlen; ++i) 1782 + seq_printf(m, "%02x.", ctl->addr.addr[i]); 1783 + } 1784 + 1785 + seq_printf(m, " %u %u %d %u %x\n", 1786 + ctl->type, ctl->proto, 1787 + st->socket != NULL, 1788 + ctl->prio, ctl->perm); 1789 + } 1790 + mutex_unlock(&psb->state_lock); 1791 + 1792 + return 0; 1793 + } 1794 + 1795 + static const struct super_operations pohmelfs_sb_ops = { 1796 + .alloc_inode = pohmelfs_alloc_inode, 1797 + .destroy_inode = pohmelfs_destroy_inode, 1798 + .drop_inode = pohmelfs_drop_inode, 1799 + .write_inode = pohmelfs_write_inode, 1800 + .put_super = pohmelfs_put_super, 1801 + .remount_fs = pohmelfs_remount, 1802 + .statfs = pohmelfs_statfs, 1803 + .show_options = pohmelfs_show_options, 1804 + .show_stats = pohmelfs_show_stats, 1805 + }; 1806 + 1762 1807 /* 1763 1808 * Allocate private superblock and create root dir. 1764 1809 */ ··· 1820 1763 struct inode *root; 1821 1764 struct pohmelfs_inode *npi; 1822 1765 struct qstr str; 1823 - 1824 - pohmelfs_ftrans_init(); 1825 1766 1826 1767 psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL); 1827 1768 if (!psb) ··· 1871 1816 mutex_init(&psb->state_lock); 1872 1817 INIT_LIST_HEAD(&psb->state_list); 1873 1818 1874 - err = pohmelfs_parse_options((char *) data, psb); 1819 + err = pohmelfs_parse_options((char *) data, psb, 0); 1875 1820 if (err) 1876 1821 goto err_out_free_sb; 1877 1822 ··· 1900 1845 err = PTR_ERR(npi); 1901 1846 goto err_out_crypto_exit; 1902 1847 } 1848 + set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); 1849 + clear_bit(NETFS_INODE_OWNED, &npi->state); 1903 1850 1904 1851 root = &npi->vfs_inode; 1905 1852 ··· 1944 1887 mnt); 1945 1888 } 1946 1889 1890 + /* 1891 + * We need this to sync all inodes earlier, since when writeback 1892 + * is invoked from the umount/mntput path dcache is already shrunk, 1893 + * see generic_shutdown_super(), and no inodes can access the path. 1894 + */ 1895 + static void pohmelfs_kill_super(struct super_block *sb) 1896 + { 1897 + struct writeback_control wbc = { 1898 + .sync_mode = WB_SYNC_ALL, 1899 + .range_start = 0, 1900 + .range_end = LLONG_MAX, 1901 + .nr_to_write = LONG_MAX, 1902 + }; 1903 + generic_sync_sb_inodes(sb, &wbc); 1904 + 1905 + kill_anon_super(sb); 1906 + } 1907 + 1947 1908 static struct file_system_type pohmel_fs_type = { 1948 1909 .owner = THIS_MODULE, 1949 1910 .name = "pohmel", 1950 1911 .get_sb = pohmelfs_get_sb, 1951 - .kill_sb = kill_anon_super, 1912 + .kill_sb = pohmelfs_kill_super, 1952 1913 }; 1953 1914 1954 1915 /*
+2 -1
drivers/staging/pohmelfs/lock.c
··· 41 41 path_len = err; 42 42 43 43 err = -ENOMEM; 44 - t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize, 0, 0); 44 + t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize, 45 + NETFS_TRANS_SINGLE_DST, 0); 45 46 if (!t) 46 47 goto err_out_exit; 47 48
+18 -54
drivers/staging/pohmelfs/net.c
··· 26 26 27 27 #include "netfs.h" 28 28 29 - static int pohmelfs_ftrans_size = 10240; 30 - static u32 *pohmelfs_ftrans; 31 - 32 - int pohmelfs_ftrans_init(void) 33 - { 34 - pohmelfs_ftrans = vmalloc(pohmelfs_ftrans_size * 4); 35 - if (!pohmelfs_ftrans) 36 - return -ENOMEM; 37 - 38 - return 0; 39 - } 40 - 41 - void pohmelfs_ftrans_exit(void) 42 - { 43 - vfree(pohmelfs_ftrans); 44 - } 45 - 46 - void pohmelfs_ftrans_clean(u64 id) 47 - { 48 - if (pohmelfs_ftrans) { 49 - u32 i = id & 0xffffffff; 50 - int idx = i % pohmelfs_ftrans_size; 51 - 52 - pohmelfs_ftrans[idx] = 0; 53 - } 54 - } 55 - 56 - void pohmelfs_ftrans_update(u64 id) 57 - { 58 - if (pohmelfs_ftrans) { 59 - u32 i = id & 0xffffffff; 60 - int idx = i % pohmelfs_ftrans_size; 61 - 62 - pohmelfs_ftrans[idx] = i; 63 - } 64 - } 65 - 66 - int pohmelfs_ftrans_check(u64 id) 67 - { 68 - if (pohmelfs_ftrans) { 69 - u32 i = id & 0xffffffff; 70 - int idx = i % pohmelfs_ftrans_size; 71 - 72 - return (pohmelfs_ftrans[idx] == i); 73 - } 74 - 75 - return -1; 76 - } 77 - 78 29 /* 79 30 * Async machinery lives here. 80 31 * All commands being sent to server do _not_ require sync reply, ··· 401 450 if (err != -EEXIST) 402 451 goto err_out_put; 403 452 } else { 453 + struct dentry *dentry, *alias, *pd; 454 + 404 455 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); 405 456 clear_bit(NETFS_INODE_OWNED, &npi->state); 457 + 458 + pd = d_find_alias(&parent->vfs_inode); 459 + if (pd) { 460 + str.hash = full_name_hash(str.name, str.len); 461 + dentry = d_alloc(pd, &str); 462 + if (dentry) { 463 + alias = d_materialise_unique(dentry, &npi->vfs_inode); 464 + if (alias) 465 + dput(dentry); 466 + } 467 + 468 + dput(dentry); 469 + dput(pd); 470 + } 406 471 } 407 472 } 408 473 out: ··· 605 638 if (dst) { 606 639 netfs_trans_remove_nolock(dst, st); 607 640 t = dst->trans; 608 - 609 - pohmelfs_ftrans_update(cmd->start); 610 641 } 611 642 mutex_unlock(&st->trans_lock); 612 643 613 644 if (!t) { 614 - int check = pohmelfs_ftrans_check(cmd->start); 615 - printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u, double: %d.\n", 616 - __func__, cmd->start, cmd->id, cmd->size, cmd->ext, check); 645 + printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u.\n", 646 + __func__, cmd->start, cmd->id, cmd->size, cmd->ext); 617 647 err = -EINVAL; 618 648 goto out; 619 649 }
+10 -11
drivers/staging/pohmelfs/netfs.h
··· 87 87 POHMELFS_FLAGS_DEL, /* Network state control message for DEL */ 88 88 POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */ 89 89 POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */ 90 + POHMELFS_FLAGS_MODIFY, /* Network state modification message */ 90 91 }; 91 92 92 93 /* ··· 117 116 unsigned char data[0]; /* Algorithm string, key and IV */ 118 117 }; 119 118 119 + #define POHMELFS_IO_PERM_READ (1<<0) 120 + #define POHMELFS_IO_PERM_WRITE (1<<1) 121 + 120 122 /* 121 123 * Configuration command used to create table of different remote servers. 122 124 */ 123 125 struct pohmelfs_ctl 124 126 { 125 - unsigned int idx; /* Config index */ 126 - unsigned int type; /* Socket type */ 127 - unsigned int proto; /* Socket protocol */ 128 - unsigned int addrlen; /* Size of the address */ 129 - unsigned short unused; /* Align structure by 4 bytes */ 127 + __u32 idx; /* Config index */ 128 + __u32 type; /* Socket type */ 129 + __u32 proto; /* Socket protocol */ 130 + __u16 addrlen; /* Size of the address */ 131 + __u16 perm; /* IO permission */ 132 + __u16 prio; /* IO priority */ 130 133 struct saddr addr; /* Remote server address */ 131 134 }; 132 135 ··· 925 920 if (atomic_dec_and_test(&m->refcnt)) 926 921 pohmelfs_mcache_free(psb, m); 927 922 } 928 - 929 - int pohmelfs_ftrans_init(void); 930 - void pohmelfs_ftrans_exit(void); 931 - void pohmelfs_ftrans_update(u64 id); 932 - int pohmelfs_ftrans_check(u64 id); 933 - void pohmelfs_ftrans_clean(u64 id); 934 923 935 924 #endif /* __KERNEL__*/ 936 925
+12 -23
drivers/staging/pohmelfs/trans.c
··· 456 456 __func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state); 457 457 #endif 458 458 mutex_lock(&psb->state_lock); 459 - 460 - if ((t->flags & NETFS_TRANS_SINGLE_DST) && psb->active_state) { 461 - st = &psb->active_state->state; 462 - 463 - err = -EPIPE; 464 - if (netfs_state_poll(st) & POLLOUT) { 465 - err = netfs_trans_push_dst(t, st); 466 - if (!err) { 467 - err = netfs_trans_send(t, st); 468 - if (err) { 469 - netfs_trans_drop_last(t, st); 470 - } else { 471 - pohmelfs_switch_active(psb); 472 - goto out; 473 - } 474 - } 475 - } 476 - pohmelfs_switch_active(psb); 477 - } 478 - 479 459 list_for_each_entry(c, &psb->state_list, config_entry) { 480 460 st = &c->state; 461 + 462 + if (t->flags & NETFS_TRANS_SINGLE_DST) { 463 + if (!(st->ctl.perm & POHMELFS_IO_PERM_READ)) 464 + continue; 465 + } else { 466 + if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE)) 467 + continue; 468 + } 469 + 470 + if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio)) 471 + st = &psb->active_state->state; 481 472 482 473 err = netfs_trans_push(t, st); 483 474 if (!err && (t->flags & NETFS_TRANS_SINGLE_DST)) 484 475 break; 485 476 } 486 - out: 477 + 487 478 mutex_unlock(&psb->state_lock); 488 479 #if 0 489 480 dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n", ··· 491 500 struct netfs_cmd *cmd = t->iovec.iov_base; 492 501 493 502 t->gen = atomic_inc_return(&psb->trans_gen); 494 - 495 - pohmelfs_ftrans_clean(t->gen); 496 503 497 504 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + 498 505 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
+15 -10
drivers/staging/rt2860/rt_main_dev.c
··· 722 722 return (-1); 723 723 } /* End of rt28xx_open */ 724 724 725 + static const struct net_device_ops rt2860_netdev_ops = { 726 + .ndo_open = MainVirtualIF_open, 727 + .ndo_stop = MainVirtualIF_close, 728 + .ndo_do_ioctl = rt28xx_ioctl, 729 + .ndo_get_stats = RT28xx_get_ether_stats, 730 + .ndo_validate_addr = NULL, 731 + .ndo_set_mac_address = eth_mac_addr, 732 + .ndo_change_mtu = eth_change_mtu, 733 + #ifdef IKANOS_VX_1X0 734 + .ndo_start_xmit = IKANOS_DataFramesTx, 735 + #else 736 + .ndo_start_xmit = rt28xx_send_packets, 737 + #endif 738 + }; 725 739 726 740 /* Must not be called for mdev and apdev */ 727 741 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 747 733 748 734 749 735 //ether_setup(dev); 750 - dev->hard_start_xmit = rt28xx_send_packets; 751 - 752 - #ifdef IKANOS_VX_1X0 753 - dev->hard_start_xmit = IKANOS_DataFramesTx; 754 - #endif // IKANOS_VX_1X0 // 755 736 756 737 #ifdef CONFIG_STA_SUPPORT 757 738 #if WIRELESS_EXT >= 12 ··· 769 760 #if WIRELESS_EXT < 21 770 761 dev->get_wireless_stats = rt28xx_get_wireless_stats; 771 762 #endif 772 - dev->get_stats = RT28xx_get_ether_stats; 773 - dev->open = MainVirtualIF_open; //rt28xx_open; 774 - dev->stop = MainVirtualIF_close; //rt28xx_close; 775 763 dev->priv_flags = INT_MAIN; 776 - dev->do_ioctl = rt28xx_ioctl; 777 - dev->validate_addr = NULL; 764 + dev->netdev_ops = &rt2860_netdev_ops; 778 765 // find available device name 779 766 for (i = 0; i < 8; i++) 780 767 {
+1
drivers/staging/rt2870/rt2870.h
··· 96 96 {USB_DEVICE(0x0DF6,0x002B)}, /* Sitecom */ \ 97 97 {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \ 98 98 {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \ 99 + {USB_DEVICE(0x0DF6,0x0039)}, /* Sitecom */ \ 99 100 {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ 100 101 {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ 101 102 {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \
+15 -13
drivers/staging/rt2870/rt_main_dev.c
··· 855 855 return (-1); 856 856 } /* End of rt28xx_open */ 857 857 858 + static const struct net_device_ops rt2870_netdev_ops = { 859 + .ndo_open = MainVirtualIF_open, 860 + .ndo_stop = MainVirtualIF_close, 861 + .ndo_do_ioctl = rt28xx_ioctl, 862 + .ndo_get_stats = RT28xx_get_ether_stats, 863 + .ndo_validate_addr = NULL, 864 + .ndo_set_mac_address = eth_mac_addr, 865 + .ndo_change_mtu = eth_change_mtu, 866 + #ifdef IKANOS_VX_1X0 867 + .ndo_start_xmit = IKANOS_DataFramesTx, 868 + #else 869 + .ndo_start_xmit = rt28xx_send_packets, 870 + #endif 871 + }; 858 872 859 873 /* Must not be called for mdev and apdev */ 860 874 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 880 866 881 867 882 868 //ether_setup(dev); 883 - dev->hard_start_xmit = rt28xx_send_packets; 884 - 885 - #ifdef IKANOS_VX_1X0 886 - dev->hard_start_xmit = IKANOS_DataFramesTx; 887 - #endif // IKANOS_VX_1X0 // 888 - 889 869 // dev->set_multicast_list = ieee80211_set_multicast_list; 890 870 // dev->change_mtu = ieee80211_change_mtu; 891 871 #ifdef CONFIG_STA_SUPPORT ··· 903 895 #if WIRELESS_EXT < 21 904 896 dev->get_wireless_stats = rt28xx_get_wireless_stats; 905 897 #endif 906 - dev->get_stats = RT28xx_get_ether_stats; 907 - dev->open = MainVirtualIF_open; //rt28xx_open; 908 - dev->stop = MainVirtualIF_close; //rt28xx_close; 909 898 // dev->uninit = ieee80211_if_reinit; 910 899 // dev->destructor = ieee80211_if_free; 911 900 dev->priv_flags = INT_MAIN; 912 - dev->do_ioctl = rt28xx_ioctl; 913 - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) 914 - dev->validate_addr = NULL; 915 - #endif 901 + dev->netdev_ops = &rt2870_netdev_ops; 916 902 // find available device name 917 903 for (i = 0; i < 8; i++) 918 904 {
+15 -14
drivers/staging/rt3070/rt_main_dev.c
··· 436 436 // OID_SET_HT_PHYMODE SetHT; 437 437 // WPDMA_GLO_CFG_STRUC GloCfg; 438 438 UINT32 MacCsr0 = 0; 439 - UINT32 MacValue = 0; 440 439 441 440 #ifdef RT2870 442 441 #ifdef INF_AMAZON_SE ··· 848 849 return (-1); 849 850 } /* End of rt28xx_open */ 850 851 852 + static const struct net_device_ops rt3070_netdev_ops = { 853 + .ndo_open = MainVirtualIF_open, 854 + .ndo_stop = MainVirtualIF_close, 855 + .ndo_do_ioctl = rt28xx_ioctl, 856 + .ndo_get_stats = RT28xx_get_ether_stats, 857 + .ndo_validate_addr = NULL, 858 + .ndo_set_mac_address = eth_mac_addr, 859 + .ndo_change_mtu = eth_change_mtu, 860 + #ifdef IKANOS_VX_1X0 861 + .ndo_start_xmit = IKANOS_DataFramesTx, 862 + #else 863 + .ndo_start_xmit = rt28xx_send_packets, 864 + #endif 865 + }; 851 866 852 867 /* Must not be called for mdev and apdev */ 853 868 static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) ··· 873 860 874 861 875 862 //ether_setup(dev); 876 - dev->hard_start_xmit = rt28xx_send_packets; 877 - 878 - #ifdef IKANOS_VX_1X0 879 - dev->hard_start_xmit = IKANOS_DataFramesTx; 880 - #endif // IKANOS_VX_1X0 // 881 - 882 863 // dev->set_multicast_list = ieee80211_set_multicast_list; 883 864 // dev->change_mtu = ieee80211_change_mtu; 884 865 #ifdef CONFIG_STA_SUPPORT ··· 896 889 #if WIRELESS_EXT < 21 897 890 dev->get_wireless_stats = rt28xx_get_wireless_stats; 898 891 #endif 899 - dev->get_stats = RT28xx_get_ether_stats; 900 - dev->open = MainVirtualIF_open; //rt28xx_open; 901 - dev->stop = MainVirtualIF_close; //rt28xx_close; 902 892 // dev->uninit = ieee80211_if_reinit; 903 893 // dev->destructor = ieee80211_if_free; 904 894 dev->priv_flags = INT_MAIN; 905 - dev->do_ioctl = rt28xx_ioctl; 906 - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) 907 - dev->validate_addr = NULL; 908 - #endif 895 + dev->netdev_ops = &rt3070_netdev_ops; 909 896 // find available device name 910 897 for (i = 0; i < 8; i++) 911 898 {
+30 -1
drivers/staging/slicoss/README
··· 10 10 - move firmware loading to request_firmware() 11 11 - remove direct memory access of structures 12 12 - any remaining sparse and checkpatch.pl warnings 13 - - any netdev recommended changes 13 + 14 + - use net_device_ops 15 + - use dev->stats rather than adapter->stats 16 + - don't cast netdev_priv it is already void 17 + - use compare_ether_addr 18 + - GET RID OF MACROS 19 + - work on all architectures 20 + - without CONFIG_X86_64 confusion 21 + - do 64 bit correctly 22 + - don't depend on order of union 23 + - get rid of ASSERT(), use BUG() instead but only where necessary 24 + looks like most aren't really useful 25 + - no new SIOCDEVPRIVATE ioctl allowed 26 + - don't use module_param for configuring interrupt mitigation 27 + use ethtool instead 28 + - reorder code to elminate use of forward declarations 29 + - don't keep private linked list of drivers. 30 + - remove all the gratiutous debug infrastructure 31 + - use PCI_DEVICE() 32 + - do ethtool correctly using ethtool_ops 33 + - NAPI? 34 + - wasted overhead of extra stats 35 + - state variables for things that are 36 + easily availble and shouldn't be kept in card structure, cardnum, ... 37 + slotnumber, events, ... 38 + - get rid of slic_spinlock wrapper 39 + - volatile == bad design => bad code 40 + - locking too fine grained, not designed just throw more locks 41 + at problem 42 + 14 43 15 44 Please send patches to: 16 45 Greg Kroah-Hartman <gregkh@suse.de>
+16 -9
drivers/staging/slicoss/slicoss.c
··· 345 345 return; 346 346 } 347 347 348 + static const struct net_device_ops slic_netdev_ops = { 349 + .ndo_open = slic_entry_open, 350 + .ndo_stop = slic_entry_halt, 351 + .ndo_start_xmit = slic_xmit_start, 352 + .ndo_do_ioctl = slic_ioctl, 353 + .ndo_set_mac_address = slic_mac_set_address, 354 + .ndo_get_stats = slic_get_stats, 355 + .ndo_set_multicast_list = slic_mcast_set_list, 356 + .ndo_validate_addr = eth_validate_addr, 357 + .ndo_set_mac_address = eth_mac_addr, 358 + .ndo_change_mtu = eth_change_mtu, 359 + }; 360 + 348 361 static int __devinit slic_entry_probe(struct pci_dev *pcidev, 349 362 const struct pci_device_id *pci_tbl_entry) 350 363 { ··· 455 442 456 443 netdev->base_addr = (unsigned long)adapter->memorybase; 457 444 netdev->irq = adapter->irq; 458 - netdev->open = slic_entry_open; 459 - netdev->stop = slic_entry_halt; 460 - netdev->hard_start_xmit = slic_xmit_start; 461 - netdev->do_ioctl = slic_ioctl; 462 - netdev->set_mac_address = slic_mac_set_address; 463 - netdev->get_stats = slic_get_stats; 464 - netdev->set_multicast_list = slic_mcast_set_list; 445 + netdev->netdev_ops = &slic_netdev_ops; 465 446 466 447 slic_debug_adapter_create(adapter); 467 448 ··· 1267 1260 } 1268 1261 1269 1262 /* Doesn't already exist. Allocate a structure to hold it */ 1270 - mcaddr = kmalloc(sizeof(struct mcast_address), GFP_KERNEL); 1263 + mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC); 1271 1264 if (mcaddr == NULL) 1272 1265 return 1; 1273 1266 ··· 2291 2284 } 2292 2285 if (!physcard) { 2293 2286 /* no structure allocated for this physical card yet */ 2294 - physcard = kzalloc(sizeof(struct physcard), GFP_KERNEL); 2287 + physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC); 2295 2288 ASSERT(physcard); 2296 2289 2297 2290 physcard->next = slic_global.phys_card;
+1 -1
drivers/staging/stlc45xx/Kconfig
··· 1 1 config STLC45XX 2 2 tristate "stlc4550/4560 support" 3 - depends on MAC80211 && WLAN_80211 && SPI_MASTER 3 + depends on MAC80211 && WLAN_80211 && SPI_MASTER && GENERIC_HARDIRQS 4 4 ---help--- 5 5 This is a driver for stlc4550 and stlc4560 chipsets. 6 6
+84 -43
drivers/staging/sxg/sxg.c
··· 322 322 int ret,i; 323 323 324 324 if (!adapter->intrregistered) { 325 + spin_unlock_irqrestore(&sxg_global.driver_lock, 326 + sxg_global.flags); 325 327 for (i=0; i<adapter->nr_msix_entries; i++) { 326 328 ret = request_irq (adapter->msi_entries[i].vector, 327 329 sxg_isr, ··· 331 329 adapter->netdev->name, 332 330 adapter->netdev); 333 331 if (ret) { 332 + spin_lock_irqsave(&sxg_global.driver_lock, 333 + sxg_global.flags); 334 334 DBG_ERROR("sxg: MSI-X request_irq (%s) " 335 335 "FAILED [%x]\n", adapter->netdev->name, 336 336 ret); ··· 340 336 } 341 337 } 342 338 } 339 + spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); 343 340 adapter->msi_enabled = TRUE; 344 341 adapter->intrregistered = 1; 345 342 adapter->IntRegistered = TRUE; ··· 901 896 return status; 902 897 } 903 898 899 + static const struct net_device_ops sxg_netdev_ops = { 900 + .ndo_open = sxg_entry_open, 901 + .ndo_stop = sxg_entry_halt, 902 + .ndo_start_xmit = sxg_send_packets, 903 + .ndo_do_ioctl = sxg_ioctl, 904 + .ndo_change_mtu = sxg_change_mtu, 905 + .ndo_get_stats = sxg_get_stats, 906 + .ndo_set_multicast_list = sxg_mcast_set_list, 907 + .ndo_validate_addr = eth_validate_addr, 908 + #if XXXTODO 909 + .ndo_set_mac_address = sxg_mac_set_address, 910 + #else 911 + .ndo_set_mac_address = eth_mac_addr, 912 + #endif 913 + }; 914 + 904 915 static int sxg_entry_probe(struct pci_dev *pcidev, 905 916 const struct pci_device_id *pci_tbl_entry) 906 917 { ··· 1116 1095 1117 1096 netdev->base_addr = (unsigned long)adapter->base_addr; 1118 1097 netdev->irq = adapter->irq; 1119 - netdev->open = sxg_entry_open; 1120 - netdev->stop = sxg_entry_halt; 1121 - netdev->hard_start_xmit = sxg_send_packets; 1122 - netdev->do_ioctl = sxg_ioctl; 1123 - netdev->change_mtu = sxg_change_mtu; 1124 - #if XXXTODO 1125 - netdev->set_mac_address = sxg_mac_set_address; 1126 - #endif 1127 - netdev->get_stats = sxg_get_stats; 1128 - netdev->set_multicast_list = sxg_mcast_set_list; 1098 + netdev->netdev_ops = &sxg_netdev_ops; 1129 1099 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops); 1130 1100 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1131 1101 err = sxg_set_interrupt_capability(adapter); ··· 2259 2247 DBG_ERROR("sxg: %s EXIT\n", __func__); 2260 2248 2261 2249 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); 2250 + mod_timer(&adapter->watchdog_timer, jiffies); 2251 + 2262 2252 return STATUS_SUCCESS; 2263 2253 } 2264 2254 ··· 2582 2568 u64 phys_addr; 2583 2569 unsigned long flags; 2584 2570 unsigned long queue_id=0; 2571 + int offload_cksum = 0; 2585 2572 2586 2573 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", 2587 2574 pSgl, SxgSgl, 0, 0); ··· 2621 2606 struct iphdr *ip; 2622 2607 2623 2608 ip = ip_hdr(skb); 2624 - if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof( 2609 + if (ip->protocol == IPPROTO_TCP) 2610 + offload_cksum = 1; 2611 + if (!offload_cksum || !tcp_hdr(skb)) 2612 + queue_id = 0; 2613 + else if (offload_cksum && (DataLength >= sizeof( 2625 2614 struct tcphdr))){ 2626 2615 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? 2627 2616 (ntohs (tcp_hdr(skb)->source) & ··· 2634 2615 SXG_LARGE_SEND_QUEUE_MASK)); 2635 2616 } 2636 2617 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2637 - if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >= 2638 - sizeof(struct tcphdr)) ) { 2618 + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2619 + offload_cksum = 1; 2620 + if (!offload_cksum || !tcp_hdr(skb)) 2621 + queue_id = 0; 2622 + else if (offload_cksum && (DataLength>=sizeof(struct tcphdr))){ 2639 2623 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? 2640 2624 (ntohs (tcp_hdr(skb)->source) & 2641 2625 SXG_LARGE_SEND_QUEUE_MASK): ··· 2667 2645 } 2668 2646 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", 2669 2647 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 2670 - /* Update stats */ 2671 - adapter->stats.tx_packets++; 2672 - adapter->stats.tx_bytes += DataLength; 2673 - #if XXXTODO /* Stats stuff */ 2674 - if (SXG_MULTICAST_PACKET(EtherHdr)) { 2675 - if (SXG_BROADCAST_PACKET(EtherHdr)) { 2676 - adapter->Stats.DumbXmtBcastPkts++; 2677 - adapter->Stats.DumbXmtBcastBytes += DataLength; 2648 + memset(XmtCmd, '\0', sizeof(*XmtCmd)); 2649 + XmtCmd->SgEntries = 1; 2650 + XmtCmd->Flags = 0; 2651 + if (skb->ip_summed == CHECKSUM_PARTIAL) { 2652 + /* 2653 + * We need to set the Checkum in IP header to 0. This is 2654 + * required by hardware. 2655 + */ 2656 + if (offload_cksum) { 2657 + ip_hdr(skb)->check = 0x0; 2658 + XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP; 2659 + XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP; 2660 + /* 2661 + * Dont know if length will require a change in 2662 + * case of VLAN 2663 + */ 2664 + XmtCmd->CsumFlags.MacLen = ETH_HLEN; 2665 + XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >> 2666 + SXG_NW_HDR_LEN_SHIFT; 2678 2667 } else { 2679 - adapter->Stats.DumbXmtMcastPkts++; 2680 - adapter->Stats.DumbXmtMcastBytes += DataLength; 2668 + if (skb_checksum_help(skb)){ 2669 + printk(KERN_EMERG "Dropped UDP packet for" 2670 + " incorrect checksum calculation\n"); 2671 + if (XmtCmd) 2672 + SXG_ABORT_CMD(XmtRingInfo); 2673 + spin_unlock_irqrestore(&adapter->XmtZeroLock, 2674 + flags); 2675 + return STATUS_SUCCESS; 2676 + } 2681 2677 } 2682 - } else { 2683 - adapter->Stats.DumbXmtUcastPkts++; 2684 - adapter->Stats.DumbXmtUcastBytes += DataLength; 2685 2678 } 2686 - #endif 2679 + 2687 2680 /* 2688 2681 * Fill in the command 2689 2682 * Copy out the first SGE to the command and adjust for offset ··· 2716 2679 (SXG_INVALID_SGL(phys_addr,skb->data_len))) 2717 2680 { 2718 2681 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); 2682 + if (XmtCmd) 2683 + SXG_ABORT_CMD(XmtRingInfo); 2719 2684 /* Silently drop this packet */ 2720 2685 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n"); 2721 2686 return STATUS_SUCCESS; 2722 2687 } 2723 - memset(XmtCmd, '\0', sizeof(*XmtCmd)); 2724 2688 XmtCmd->Buffer.FirstSgeAddress = phys_addr; 2725 2689 XmtCmd->Buffer.FirstSgeLength = DataLength; 2726 2690 XmtCmd->Buffer.SgeOffset = 0; 2727 2691 XmtCmd->Buffer.TotalLength = DataLength; 2728 - XmtCmd->SgEntries = 1; 2729 - XmtCmd->Flags = 0; 2730 2692 2731 - if (skb->ip_summed == CHECKSUM_PARTIAL) { 2732 - /* 2733 - * We need to set the Checkum in IP header to 0. This is 2734 - * required by hardware. 2735 - */ 2736 - ip_hdr(skb)->check = 0x0; 2737 - XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP; 2738 - XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP; 2739 - /* Dont know if length will require a change in case of VLAN */ 2740 - XmtCmd->CsumFlags.MacLen = ETH_HLEN; 2741 - XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >> 2742 - SXG_NW_HDR_LEN_SHIFT; 2743 - } 2744 2693 /* 2745 2694 * Advance transmit cmd descripter by 1. 2746 2695 * NOTE - See comments in SxgTcpOutput where we write ··· 2738 2715 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0); 2739 2716 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE); 2740 2717 adapter->Stats.XmtQLen++; /* Stats within lock */ 2718 + /* Update stats */ 2719 + adapter->stats.tx_packets++; 2720 + adapter->stats.tx_bytes += DataLength; 2721 + #if XXXTODO /* Stats stuff */ 2722 + if (SXG_MULTICAST_PACKET(EtherHdr)) { 2723 + if (SXG_BROADCAST_PACKET(EtherHdr)) { 2724 + adapter->Stats.DumbXmtBcastPkts++; 2725 + adapter->Stats.DumbXmtBcastBytes += DataLength; 2726 + } else { 2727 + adapter->Stats.DumbXmtMcastPkts++; 2728 + adapter->Stats.DumbXmtMcastBytes += DataLength; 2729 + } 2730 + } else { 2731 + adapter->Stats.DumbXmtUcastPkts++; 2732 + adapter->Stats.DumbXmtUcastBytes += DataLength; 2733 + } 2734 + #endif 2735 + 2741 2736 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); 2742 2737 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", 2743 2738 XmtCmd, pSgl, SxgSgl, 0);
+3 -25
drivers/staging/uc2322/aten2011.c
··· 603 603 604 604 tty = tty_port_tty_get(&ATEN2011_port->port->port); 605 605 606 - if (tty && ATEN2011_port->open) { 606 + if (tty && ATEN2011_port->open) 607 607 /* tell the tty driver that something has changed */ 608 - wake_up_interruptible(&tty->write_wait); 609 - } 608 + tty_wakeup(tty); 610 609 611 610 /* schedule_work(&ATEN2011_port->port->work); */ 612 611 tty_kref_put(tty); ··· 824 825 status = 0; 825 826 status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); 826 827 827 - /* force low_latency on so that our tty_push actually forces * 828 - * the data through,otherwise it is scheduled, and with * 829 - * high data rates (like with OHCI) data can get lost. */ 830 - 831 - if (tty) 832 - tty->low_latency = 1; 833 828 /* 834 829 * Check to see if we've set up our endpoint info yet 835 830 * (can't set it up in ATEN2011_startup as the structures ··· 1466 1473 1467 1474 cflag = tty->termios->c_cflag; 1468 1475 1469 - if (!cflag) { 1470 - dbg("%s %s", __func__, "cflag is NULL"); 1471 - return; 1472 - } 1473 - 1474 - /* check that they really want us to change something */ 1475 - if (old_termios) { 1476 - if ((cflag == old_termios->c_cflag) && 1477 - (RELEVANT_IFLAG(tty->termios->c_iflag) == 1478 - RELEVANT_IFLAG(old_termios->c_iflag))) { 1479 - dbg("%s", "Nothing to change"); 1480 - return; 1481 - } 1482 - } 1483 - 1484 - dbg("%s - clfag %08x iflag %08x", __func__, 1476 + dbg("%s - cflag %08x iflag %08x", __func__, 1485 1477 tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); 1486 1478 1487 1479 if (old_termios) {
+15 -5
drivers/staging/wlan-ng/p80211netdev.c
··· 711 711 return 0; 712 712 } 713 713 714 + static const struct net_device_ops p80211_netdev_ops = { 715 + .ndo_init = p80211knetdev_init, 716 + .ndo_open = p80211knetdev_open, 717 + .ndo_stop = p80211knetdev_stop, 718 + .ndo_get_stats = p80211knetdev_get_stats, 719 + .ndo_start_xmit = p80211knetdev_hard_start_xmit, 720 + .ndo_set_multicast_list = p80211knetdev_set_multicast_list, 721 + .ndo_do_ioctl = p80211knetdev_do_ioctl, 722 + .ndo_set_mac_address = p80211knetdev_set_mac_address, 723 + .ndo_tx_timeout = p80211knetdev_tx_timeout, 724 + .ndo_change_mtu = wlan_change_mtu, 725 + .ndo_validate_addr = eth_validate_addr, 726 + }; 727 + 714 728 /*---------------------------------------------------------------- 715 729 * wlan_setup 716 730 * ··· 770 756 } else { 771 757 wlandev->netdev = dev; 772 758 dev->ml_priv = wlandev; 773 - dev->hard_start_xmit = p80211knetdev_hard_start_xmit; 774 - dev->get_stats = p80211knetdev_get_stats; 775 - dev->init = p80211knetdev_init; 776 - dev->open = p80211knetdev_open; 777 - dev->stop = p80211knetdev_stop; 759 + dev->netdev_ops = &p80211_netdev_ops; 778 760 779 761 mutex_init(&wlandev->ioctl_lock); 780 762 /* block ioctls until fully initialised. Don't forget to call