Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6

Conflicts:
drivers/video/sh_mobile_lcdcfb.c

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+2243 -1510
+1
Documentation/accounting/getdelays.c
··· 516 516 default: 517 517 fprintf(stderr, "Unknown nla_type %d\n", 518 518 na->nla_type); 519 + case TASKSTATS_TYPE_NULL: 519 520 break; 520 521 } 521 522 na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
+99 -111
Documentation/filesystems/Locking
··· 18 18 char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); 19 19 20 20 locking rules: 21 - none have BKL 22 21 dcache_lock rename_lock ->d_lock may block 23 22 d_revalidate: no no no yes 24 23 d_hash no no no yes ··· 41 42 int (*rename) (struct inode *, struct dentry *, 42 43 struct inode *, struct dentry *); 43 44 int (*readlink) (struct dentry *, char __user *,int); 44 - int (*follow_link) (struct dentry *, struct nameidata *); 45 + void * (*follow_link) (struct dentry *, struct nameidata *); 46 + void (*put_link) (struct dentry *, struct nameidata *, void *); 45 47 void (*truncate) (struct inode *); 46 48 int (*permission) (struct inode *, int, struct nameidata *); 49 + int (*check_acl)(struct inode *, int); 47 50 int (*setattr) (struct dentry *, struct iattr *); 48 51 int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); 49 52 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 50 53 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 51 54 ssize_t (*listxattr) (struct dentry *, char *, size_t); 52 55 int (*removexattr) (struct dentry *, const char *); 56 + void (*truncate_range)(struct inode *, loff_t, loff_t); 57 + long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len); 58 + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 53 59 54 60 locking rules: 55 - all may block, none have BKL 61 + all may block 56 62 i_mutex(inode) 57 63 lookup: yes 58 64 create: yes ··· 70 66 rename: yes (all) (see below) 71 67 readlink: no 72 68 follow_link: no 69 + put_link: no 73 70 truncate: yes (see below) 74 71 setattr: yes 75 72 permission: no 73 + check_acl: no 76 74 getattr: no 77 75 setxattr: yes 78 76 getxattr: no 79 77 listxattr: no 80 78 removexattr: yes 79 + truncate_range: yes 80 + fallocate: no 81 + fiemap: no 81 82 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on 82 83 victim. 83 84 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. 84 85 ->truncate() is never called directly - it's a callback, not a 85 - method. It's called by vmtruncate() - library function normally used by 86 + method. It's called by vmtruncate() - deprecated library function used by 86 87 ->setattr(). Locking information above applies to that call (i.e. is 87 88 inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been 88 89 passed). ··· 100 91 struct inode *(*alloc_inode)(struct super_block *sb); 101 92 void (*destroy_inode)(struct inode *); 102 93 void (*dirty_inode) (struct inode *); 103 - int (*write_inode) (struct inode *, int); 94 + int (*write_inode) (struct inode *, struct writeback_control *wbc); 104 95 int (*drop_inode) (struct inode *); 105 96 void (*evict_inode) (struct inode *); 106 97 void (*put_super) (struct super_block *); ··· 114 105 int (*show_options)(struct seq_file *, struct vfsmount *); 115 106 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 116 107 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 108 + int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); 117 109 118 110 locking rules: 119 111 All may block [not true, see below] 120 - None have BKL 121 112 s_umount 122 113 alloc_inode: 123 114 destroy_inode: ··· 136 127 show_options: no (namespace_sem) 137 128 quota_read: no (see below) 138 129 quota_write: no (see below) 130 + bdev_try_to_free_page: no (see below) 139 131 140 132 ->statfs() has s_umount (shared) when called by ustat(2) (native or 141 133 compat), but that's an accident of bad API; s_umount is used to pin ··· 149 139 dqio_sem) (unless an admin really wants to screw up something and 150 140 writes to quota files with quotas on). For other details about locking 151 141 see also dquot_operations section. 142 + ->bdev_try_to_free_page is called from the ->releasepage handler of 143 + the block device inode. See there for more details. 152 144 153 145 --------------------------- file_system_type --------------------------- 154 146 prototypes: 155 147 int (*get_sb) (struct file_system_type *, int, 156 148 const char *, void *, struct vfsmount *); 149 + struct dentry *(*mount) (struct file_system_type *, int, 150 + const char *, void *); 157 151 void (*kill_sb) (struct super_block *); 158 152 locking rules: 159 - may block BKL 160 - get_sb yes no 161 - kill_sb yes no 153 + may block 154 + get_sb yes 155 + mount yes 156 + kill_sb yes 162 157 163 158 ->get_sb() returns error or 0 with locked superblock attached to the vfsmount 164 159 (exclusive on ->s_umount). 160 + ->mount() returns ERR_PTR or the root dentry. 165 161 ->kill_sb() takes a write-locked superblock, does all shutdown work on it, 166 162 unlocks and drops the reference. 167 163 ··· 192 176 void (*freepage)(struct page *); 193 177 int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 194 178 loff_t offset, unsigned long nr_segs); 195 - int (*launder_page) (struct page *); 179 + int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **, 180 + unsigned long *); 181 + int (*migratepage)(struct address_space *, struct page *, struct page *); 182 + int (*launder_page)(struct page *); 183 + int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); 184 + int (*error_remove_page)(struct address_space *, struct page *); 196 185 197 186 locking rules: 198 187 All except set_page_dirty and freepage may block 199 188 200 - BKL PageLocked(page) i_mutex 201 - writepage: no yes, unlocks (see below) 202 - readpage: no yes, unlocks 203 - sync_page: no maybe 204 - writepages: no 205 - set_page_dirty no no 206 - readpages: no 207 - write_begin: no locks the page yes 208 - write_end: no yes, unlocks yes 209 - perform_write: no n/a yes 210 - bmap: no 211 - invalidatepage: no yes 212 - releasepage: no yes 213 - freepage: no yes 214 - direct_IO: no 215 - launder_page: no yes 189 + PageLocked(page) i_mutex 190 + writepage: yes, unlocks (see below) 191 + readpage: yes, unlocks 192 + sync_page: maybe 193 + writepages: 194 + set_page_dirty no 195 + readpages: 196 + write_begin: locks the page yes 197 + write_end: yes, unlocks yes 198 + bmap: 199 + invalidatepage: yes 200 + releasepage: yes 201 + freepage: yes 202 + direct_IO: 203 + get_xip_mem: maybe 204 + migratepage: yes (both) 205 + launder_page: yes 206 + is_partially_uptodate: yes 207 + error_remove_page: yes 216 208 217 209 ->write_begin(), ->write_end(), ->sync_page() and ->readpage() 218 210 may be called from the request handler (/dev/loop). ··· 300 276 not locked. 301 277 302 278 ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some 303 - filesystems and by the swapper. The latter will eventually go away. All 304 - instances do not actually need the BKL. Please, keep it that way and don't 305 - breed new callers. 279 + filesystems and by the swapper. The latter will eventually go away. Please, 280 + keep it that way and don't breed new callers. 306 281 307 282 ->invalidatepage() is called when the filesystem must attempt to drop 308 283 some or all of the buffers from the page when it is being truncated. It ··· 322 299 getting mapped back in and redirtied, it needs to be kept locked 323 300 across the entire operation. 324 301 325 - Note: currently almost all instances of address_space methods are 326 - using BKL for internal serialization and that's one of the worst sources 327 - of contention. Normally they are calling library functions (in fs/buffer.c) 328 - and pass foo_get_block() as a callback (on local block-based filesystems, 329 - indeed). BKL is not needed for library stuff and is usually taken by 330 - foo_get_block(). It's an overkill, since block bitmaps can be protected by 331 - internal fs locking and real critical areas are much smaller than the areas 332 - filesystems protect now. 333 - 334 302 ----------------------- file_lock_operations ------------------------------ 335 303 prototypes: 336 - void (*fl_insert)(struct file_lock *); /* lock insertion callback */ 337 - void (*fl_remove)(struct file_lock *); /* lock removal callback */ 338 304 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 339 305 void (*fl_release_private)(struct file_lock *); 340 306 341 307 342 308 locking rules: 343 - BKL may block 344 - fl_insert: yes no 345 - fl_remove: yes no 346 - fl_copy_lock: yes no 347 - fl_release_private: yes yes 309 + file_lock_lock may block 310 + fl_copy_lock: yes no 311 + fl_release_private: maybe no 348 312 349 313 ----------------------- lock_manager_operations --------------------------- 350 314 prototypes: 351 315 int (*fl_compare_owner)(struct file_lock *, struct file_lock *); 352 316 void (*fl_notify)(struct file_lock *); /* unblock callback */ 317 + int (*fl_grant)(struct file_lock *, struct file_lock *, int); 353 318 void (*fl_release_private)(struct file_lock *); 354 319 void (*fl_break)(struct file_lock *); /* break_lease callback */ 320 + int (*fl_mylease)(struct file_lock *, struct file_lock *); 321 + int (*fl_change)(struct file_lock **, int); 355 322 356 323 locking rules: 357 - BKL may block 358 - fl_compare_owner: yes no 359 - fl_notify: yes no 360 - fl_release_private: yes yes 361 - fl_break: yes no 324 + file_lock_lock may block 325 + fl_compare_owner: yes no 326 + fl_notify: yes no 327 + fl_grant: no no 328 + fl_release_private: maybe no 329 + fl_break: yes no 330 + fl_mylease: yes no 331 + fl_change yes no 362 332 363 - Currently only NFSD and NLM provide instances of this class. None of the 364 - them block. If you have out-of-tree instances - please, show up. Locking 365 - in that area will change. 366 333 --------------------------- buffer_head ----------------------------------- 367 334 prototypes: 368 335 void (*b_end_io)(struct buffer_head *bh, int uptodate); ··· 377 364 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 378 365 379 366 locking rules: 380 - BKL bd_mutex 381 - open: no yes 382 - release: no yes 383 - ioctl: no no 384 - compat_ioctl: no no 385 - direct_access: no no 386 - media_changed: no no 387 - unlock_native_capacity: no no 388 - revalidate_disk: no no 389 - getgeo: no no 390 - swap_slot_free_notify: no no (see below) 367 + bd_mutex 368 + open: yes 369 + release: yes 370 + ioctl: no 371 + compat_ioctl: no 372 + direct_access: no 373 + media_changed: no 374 + unlock_native_capacity: no 375 + revalidate_disk: no 376 + getgeo: no 377 + swap_slot_free_notify: no (see below) 391 378 392 379 media_changed, unlock_native_capacity and revalidate_disk are called only from 393 380 check_disk_change(). ··· 426 413 unsigned long (*get_unmapped_area)(struct file *, unsigned long, 427 414 unsigned long, unsigned long, unsigned long); 428 415 int (*check_flags)(int); 416 + int (*flock) (struct file *, int, struct file_lock *); 417 + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, 418 + size_t, unsigned int); 419 + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, 420 + size_t, unsigned int); 421 + int (*setlease)(struct file *, long, struct file_lock **); 429 422 }; 430 423 431 424 locking rules: 432 - All may block. 433 - BKL 434 - llseek: no (see below) 435 - read: no 436 - aio_read: no 437 - write: no 438 - aio_write: no 439 - readdir: no 440 - poll: no 441 - unlocked_ioctl: no 442 - compat_ioctl: no 443 - mmap: no 444 - open: no 445 - flush: no 446 - release: no 447 - fsync: no (see below) 448 - aio_fsync: no 449 - fasync: no 450 - lock: yes 451 - readv: no 452 - writev: no 453 - sendfile: no 454 - sendpage: no 455 - get_unmapped_area: no 456 - check_flags: no 425 + All may block except for ->setlease. 426 + No VFS locks held on entry except for ->fsync and ->setlease. 427 + 428 + ->fsync() has i_mutex on inode. 429 + 430 + ->setlease has the file_list_lock held and must not sleep. 457 431 458 432 ->llseek() locking has moved from llseek to the individual llseek 459 433 implementations. If your fs is not using generic_file_llseek, you ··· 450 450 Note: this does not protect the file->f_pos against concurrent modifications 451 451 since this is something the userspace has to take care about. 452 452 453 - Note: ext2_release() was *the* source of contention on fs-intensive 454 - loads and dropping BKL on ->release() helps to get rid of that (we still 455 - grab BKL for cases when we close a file that had been opened r/w, but that 456 - can and should be done using the internal locking with smaller critical areas). 457 - Current worst offender is ext2_get_block()... 458 - 459 - ->fasync() is called without BKL protection, and is responsible for 460 - maintaining the FASYNC bit in filp->f_flags. Most instances call 461 - fasync_helper(), which does that maintenance, so it's not normally 462 - something one needs to worry about. Return values > 0 will be mapped to 463 - zero in the VFS layer. 453 + ->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags. 454 + Most instances call fasync_helper(), which does that maintenance, so it's 455 + not normally something one needs to worry about. Return values > 0 will be 456 + mapped to zero in the VFS layer. 464 457 465 458 ->readdir() and ->ioctl() on directories must be changed. Ideally we would 466 459 move ->readdir() to inode_operations and use a separate method for directory ··· 463 470 464 471 ->read on directories probably must go away - we should just enforce -EISDIR 465 472 in sys_read() and friends. 466 - 467 - ->fsync() has i_mutex on inode. 468 473 469 474 --------------------------- dquot_operations ------------------------------- 470 475 prototypes: ··· 498 507 int (*access)(struct vm_area_struct *, unsigned long, void*, int, int); 499 508 500 509 locking rules: 501 - BKL mmap_sem PageLocked(page) 502 - open: no yes 503 - close: no yes 504 - fault: no yes can return with page locked 505 - page_mkwrite: no yes can return with page locked 506 - access: no yes 510 + mmap_sem PageLocked(page) 511 + open: yes 512 + close: yes 513 + fault: yes can return with page locked 514 + page_mkwrite: yes can return with page locked 515 + access: yes 507 516 508 517 ->fault() is called when a previously not present pte is about 509 518 to be faulted in. The filesystem must find and return the page associated ··· 530 539 531 540 (if you break something or notice that it is broken and do not fix it yourself 532 541 - at least put it here) 533 - 534 - ipc/shm.c::shm_delete() - may need BKL. 535 - ->read() and ->write() in many drivers are (probably) missing BKL.
+1 -1
Documentation/kernel-parameters.txt
··· 1759 1759 1760 1760 nousb [USB] Disable the USB subsystem 1761 1761 1762 - nowatchdog [KNL] Disable the lockup detector. 1762 + nowatchdog [KNL] Disable the lockup detector (NMI watchdog). 1763 1763 1764 1764 nowb [ARM] 1765 1765
+30 -27
Documentation/scsi/scsi_mid_low_api.txt
··· 1044 1044 1045 1045 1046 1046 /** 1047 - * queuecommand - queue scsi command, invoke 'done' on completion 1047 + * queuecommand - queue scsi command, invoke scp->scsi_done on completion 1048 + * @shost: pointer to the scsi host object 1048 1049 * @scp: pointer to scsi command object 1049 - * @done: function pointer to be invoked on completion 1050 1050 * 1051 1051 * Returns 0 on success. 1052 1052 * ··· 1074 1074 * 1075 1075 * Other types of errors that are detected immediately may be 1076 1076 * flagged by setting scp->result to an appropriate value, 1077 - * invoking the 'done' callback, and then returning 0 from this 1078 - * function. If the command is not performed immediately (and the 1079 - * LLD is starting (or will start) the given command) then this 1080 - * function should place 0 in scp->result and return 0. 1077 + * invoking the scp->scsi_done callback, and then returning 0 1078 + * from this function. If the command is not performed 1079 + * immediately (and the LLD is starting (or will start) the given 1080 + * command) then this function should place 0 in scp->result and 1081 + * return 0. 1081 1082 * 1082 1083 * Command ownership. If the driver returns zero, it owns the 1083 - * command and must take responsibility for ensuring the 'done' 1084 - * callback is executed. Note: the driver may call done before 1085 - * returning zero, but after it has called done, it may not 1086 - * return any value other than zero. If the driver makes a 1087 - * non-zero return, it must not execute the command's done 1088 - * callback at any time. 1084 + * command and must take responsibility for ensuring the 1085 + * scp->scsi_done callback is executed. Note: the driver may 1086 + * call scp->scsi_done before returning zero, but after it has 1087 + * called scp->scsi_done, it may not return any value other than 1088 + * zero. If the driver makes a non-zero return, it must not 1089 + * execute the command's scsi_done callback at any time. 1089 1090 * 1090 - * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave") 1091 - * and is expected to be held on return. 1091 + * Locks: up to and including 2.6.36, struct Scsi_Host::host_lock 1092 + * held on entry (with "irqsave") and is expected to be 1093 + * held on return. From 2.6.37 onwards, queuecommand is 1094 + * called without any locks held. 1092 1095 * 1093 1096 * Calling context: in interrupt (soft irq) or process context 1094 1097 * 1095 - * Notes: This function should be relatively fast. Normally it will 1096 - * not wait for IO to complete. Hence the 'done' callback is invoked 1097 - * (often directly from an interrupt service routine) some time after 1098 - * this function has returned. In some cases (e.g. pseudo adapter 1099 - * drivers that manufacture the response to a SCSI INQUIRY) 1100 - * the 'done' callback may be invoked before this function returns. 1101 - * If the 'done' callback is not invoked within a certain period 1102 - * the SCSI mid level will commence error processing. 1103 - * If a status of CHECK CONDITION is placed in "result" when the 1104 - * 'done' callback is invoked, then the LLD driver should 1105 - * perform autosense and fill in the struct scsi_cmnd::sense_buffer 1098 + * Notes: This function should be relatively fast. Normally it 1099 + * will not wait for IO to complete. Hence the scp->scsi_done 1100 + * callback is invoked (often directly from an interrupt service 1101 + * routine) some time after this function has returned. In some 1102 + * cases (e.g. pseudo adapter drivers that manufacture the 1103 + * response to a SCSI INQUIRY) the scp->scsi_done callback may be 1104 + * invoked before this function returns. If the scp->scsi_done 1105 + * callback is not invoked within a certain period the SCSI mid 1106 + * level will commence error processing. If a status of CHECK 1107 + * CONDITION is placed in "result" when the scp->scsi_done 1108 + * callback is invoked, then the LLD driver should perform 1109 + * autosense and fill in the struct scsi_cmnd::sense_buffer 1106 1110 * array. The scsi_cmnd::sense_buffer array is zeroed prior to 1107 1111 * the mid level queuing a command to an LLD. 1108 1112 * 1109 1113 * Defined in: LLD 1110 1114 **/ 1111 - int queuecommand(struct scsi_cmnd * scp, 1112 - void (*done)(struct scsi_cmnd *)) 1115 + int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp) 1113 1116 1114 1117 1115 1118 /**
+10 -1
Documentation/trace/postprocess/trace-vmscan-postprocess.pl
··· 373 373 print " $regex_lru_isolate/o\n"; 374 374 next; 375 375 } 376 + my $isolate_mode = $1; 376 377 my $nr_scanned = $4; 377 378 my $nr_contig_dirty = $7; 378 - $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned; 379 + 380 + # To closer match vmstat scanning statistics, only count isolate_both 381 + # and isolate_inactive as scanning. isolate_active is rotation 382 + # isolate_inactive == 0 383 + # isolate_active == 1 384 + # isolate_both == 2 385 + if ($isolate_mode != 1) { 386 + $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned; 387 + } 379 388 $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty; 380 389 } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") { 381 390 $details = $5;
+18 -3
MAINTAINERS
··· 405 405 F: drivers/usb/gadget/amd5536udc.* 406 406 407 407 AMD GEODE PROCESSOR/CHIPSET SUPPORT 408 - P: Jordan Crouse 408 + P: Andres Salomon <dilinger@queued.net> 409 409 L: linux-geode@lists.infradead.org (moderated for non-subscribers) 410 410 W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html 411 411 S: Supported ··· 792 792 793 793 ARM/NOMADIK ARCHITECTURE 794 794 M: Alessandro Rubini <rubini@unipv.it> 795 + M: Linus Walleij <linus.walleij@stericsson.com> 795 796 M: STEricsson <STEricsson_nomadik_linux@list.st.com> 796 797 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 797 798 S: Maintained 798 799 F: arch/arm/mach-nomadik/ 799 800 F: arch/arm/plat-nomadik/ 801 + F: drivers/i2c/busses/i2c-nomadik.c 802 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git 800 803 801 804 ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT 802 805 M: Nelson Castillo <arhuaco@freaks-unidos.net> ··· 1001 998 F: drivers/rtc/rtc-coh901331.c 1002 999 F: drivers/watchdog/coh901327_wdt.c 1003 1000 F: drivers/dma/coh901318* 1001 + F: drivers/mfd/ab3100* 1002 + F: drivers/rtc/rtc-ab3100.c 1003 + F: drivers/rtc/rtc-coh901331.c 1004 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git 1004 1005 1005 - ARM/U8500 ARM ARCHITECTURE 1006 + ARM/Ux500 ARM ARCHITECTURE 1006 1007 M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> 1008 + M: Linus Walleij <linus.walleij@stericsson.com> 1007 1009 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1008 1010 S: Maintained 1009 1011 F: arch/arm/mach-ux500/ 1012 + F: drivers/dma/ste_dma40* 1013 + F: drivers/mfd/ab3550* 1014 + F: drivers/mfd/abx500* 1015 + F: drivers/mfd/ab8500* 1016 + F: drivers/mfd/stmpe* 1017 + F: drivers/rtc/rtc-ab8500.c 1018 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git 1010 1019 1011 1020 ARM/VFP SUPPORT 1012 1021 M: Russell King <linux@arm.linux.org.uk> ··· 4605 4590 F: include/pcmcia/ 4606 4591 4607 4592 PCNET32 NETWORK DRIVER 4608 - M: Don Fry <pcnet32@verizon.net> 4593 + M: Don Fry <pcnet32@frontier.com> 4609 4594 L: netdev@vger.kernel.org 4610 4595 S: Maintained 4611 4596 F: drivers/net/pcnet32.c
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 37 4 - EXTRAVERSION = -rc7 4 + EXTRAVERSION = 5 5 NAME = Flesh-Eating Bats with Fangs 6 6 7 7 # *DOCUMENTATION*
+1
arch/arm/common/it8152.c
··· 352 352 return pci_scan_bus(nr, &it8152_ops, sys); 353 353 } 354 354 355 + EXPORT_SYMBOL(dma_set_coherent_mask);
+1
arch/arm/include/asm/hardware/it8152.h
··· 76 76 IT8152_PD_IRQ(0) Audio controller (ACR) 77 77 */ 78 78 #define IT8152_IRQ(x) (IRQ_BOARD_START + (x)) 79 + #define IT8152_LAST_IRQ (IRQ_BOARD_START + 40) 79 80 80 81 /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ 81 82 #define IT8152_LD_IRQ_COUNT 9
-3
arch/arm/include/asm/highmem.h
··· 25 25 extern void *kmap_high_get(struct page *page); 26 26 extern void kunmap_high(struct page *page); 27 27 28 - extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); 29 - extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); 30 - 31 28 /* 32 29 * The following functions are already defined by <linux/highmem.h> 33 30 * when CONFIG_HIGHMEM is not set.
+3 -3
arch/arm/include/asm/sizes.h
··· 13 13 * along with this program; if not, write to the Free Software 14 14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 15 15 */ 16 - /* DO NOT EDIT!! - this file automatically generated 17 - * from .s file by awk -f s2h.awk 18 - */ 19 16 /* Size definitions 20 17 * Copyright (C) ARM Limited 1998. All rights reserved. 21 18 */ ··· 22 25 23 26 /* handy sizes */ 24 27 #define SZ_16 0x00000010 28 + #define SZ_32 0x00000020 29 + #define SZ_64 0x00000040 30 + #define SZ_128 0x00000080 25 31 #define SZ_256 0x00000100 26 32 #define SZ_512 0x00000200 27 33
+1
arch/arm/include/asm/system.h
··· 150 150 #define rmb() dmb() 151 151 #define wmb() mb() 152 152 #else 153 + #include <asm/memory.h> 153 154 #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 154 155 #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 155 156 #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+6
arch/arm/kernel/entry-common.S
··· 29 29 ldr r1, [tsk, #TI_FLAGS] 30 30 tst r1, #_TIF_WORK_MASK 31 31 bne fast_work_pending 32 + #if defined(CONFIG_IRQSOFF_TRACER) 33 + asm_trace_hardirqs_on 34 + #endif 32 35 33 36 /* perform architecture specific actions before user return */ 34 37 arch_ret_to_user r1, lr ··· 68 65 tst r1, #_TIF_WORK_MASK 69 66 bne work_pending 70 67 no_work_pending: 68 + #if defined(CONFIG_IRQSOFF_TRACER) 69 + asm_trace_hardirqs_on 70 + #endif 71 71 /* perform architecture specific actions before user return */ 72 72 arch_ret_to_user r1, lr 73 73
-1
arch/arm/kernel/smp.c
··· 310 310 * All kernel threads share the same mm context; grab a 311 311 * reference and switch to it. 312 312 */ 313 - atomic_inc(&mm->mm_users); 314 313 atomic_inc(&mm->mm_count); 315 314 current->active_mm = mm; 316 315 cpumask_set_cpu(cpu, mm_cpumask(mm));
+2
arch/arm/mach-at91/include/mach/at91_mci.h
··· 74 74 #define AT91_MCI_TRTYP_BLOCK (0 << 19) 75 75 #define AT91_MCI_TRTYP_MULTIPLE (1 << 19) 76 76 #define AT91_MCI_TRTYP_STREAM (2 << 19) 77 + #define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19) 78 + #define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19) 77 79 78 80 #define AT91_MCI_BLKR 0x18 /* Block Register */ 79 81 #define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */
+1 -1
arch/arm/mach-ixp4xx/common-pci.c
··· 513 513 514 514 EXPORT_SYMBOL(ixp4xx_pci_read); 515 515 EXPORT_SYMBOL(ixp4xx_pci_write); 516 - 516 + EXPORT_SYMBOL(dma_set_coherent_mask);
+1
arch/arm/mach-pxa/Kconfig
··· 540 540 config ARCH_PXA_ESERIES 541 541 bool "PXA based Toshiba e-series PDAs" 542 542 select PXA25x 543 + select FB_W100 543 544 544 545 config MACH_E330 545 546 bool "Toshiba e330"
+2 -2
arch/arm/mach-pxa/sleep.S
··· 353 353 354 354 @ Let us ensure we jump to resume_after_mmu only when the mcr above 355 355 @ actually took effect. They call it the "cpwait" operation. 356 - mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15 357 - sub pc, r2, r1, lsr #32 @ jump to virtual addr 356 + mrc p15, 0, r0, c2, c0, 0 @ queue a dependency on CP15 357 + sub pc, r2, r0, lsr #32 @ jump to virtual addr 358 358 nop 359 359 nop 360 360 nop
+19 -18
arch/arm/mm/cache-feroceon-l2.c
··· 13 13 */ 14 14 15 15 #include <linux/init.h> 16 + #include <linux/highmem.h> 16 17 #include <asm/cacheflush.h> 17 - #include <asm/kmap_types.h> 18 - #include <asm/fixmap.h> 19 - #include <asm/pgtable.h> 20 - #include <asm/tlbflush.h> 21 18 #include <plat/cache-feroceon-l2.h> 22 - #include "mm.h" 23 19 24 20 /* 25 21 * Low-level cache maintenance operations. ··· 35 39 * between which we don't want to be preempted. 36 40 */ 37 41 38 - static inline unsigned long l2_start_va(unsigned long paddr) 42 + static inline unsigned long l2_get_va(unsigned long paddr) 39 43 { 40 44 #ifdef CONFIG_HIGHMEM 41 45 /* 42 - * Let's do our own fixmap stuff in a minimal way here. 43 46 * Because range ops can't be done on physical addresses, 44 47 * we simply install a virtual mapping for it only for the 45 48 * TLB lookup to occur, hence no need to flush the untouched 46 - * memory mapping. This is protected with the disabling of 47 - * interrupts by the caller. 49 + * memory mapping afterwards (note: a cache flush may happen 50 + * in some circumstances depending on the path taken in kunmap_atomic). 48 51 */ 49 - unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); 50 - unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 51 - set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); 52 - local_flush_tlb_kernel_page(vaddr); 53 - return vaddr + (paddr & ~PAGE_MASK); 52 + void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT); 53 + return (unsigned long)vaddr + (paddr & ~PAGE_MASK); 54 54 #else 55 55 return __phys_to_virt(paddr); 56 + #endif 57 + } 58 + 59 + static inline void l2_put_va(unsigned long vaddr) 60 + { 61 + #ifdef CONFIG_HIGHMEM 62 + kunmap_atomic((void *)vaddr); 56 63 #endif 57 64 } 58 65 ··· 75 76 */ 76 77 BUG_ON((start ^ end) >> PAGE_SHIFT); 77 78 78 - raw_local_irq_save(flags); 79 - va_start = l2_start_va(start); 79 + va_start = l2_get_va(start); 80 80 va_end = va_start + (end - start); 81 + raw_local_irq_save(flags); 81 82 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" 82 83 "mcr p15, 1, %1, c15, c9, 5" 83 84 : : "r" (va_start), "r" (va_end)); 84 85 raw_local_irq_restore(flags); 86 + l2_put_va(va_start); 85 87 } 86 88 87 89 static inline void l2_clean_inv_pa(unsigned long addr) ··· 106 106 */ 107 107 BUG_ON((start ^ end) >> PAGE_SHIFT); 108 108 109 - raw_local_irq_save(flags); 110 - va_start = l2_start_va(start); 109 + va_start = l2_get_va(start); 111 110 va_end = va_start + (end - start); 111 + raw_local_irq_save(flags); 112 112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" 113 113 "mcr p15, 1, %1, c15, c11, 5" 114 114 : : "r" (va_start), "r" (va_end)); 115 115 raw_local_irq_restore(flags); 116 + l2_put_va(va_start); 116 117 } 117 118 118 119 static inline void l2_inv_all(void)
+21 -36
arch/arm/mm/cache-xsc3l2.c
··· 17 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 18 */ 19 19 #include <linux/init.h> 20 + #include <linux/highmem.h> 20 21 #include <asm/system.h> 21 22 #include <asm/cputype.h> 22 23 #include <asm/cacheflush.h> 23 - #include <asm/kmap_types.h> 24 - #include <asm/fixmap.h> 25 - #include <asm/pgtable.h> 26 - #include <asm/tlbflush.h> 27 - #include "mm.h" 28 24 29 25 #define CR_L2 (1 << 26) 30 26 ··· 67 71 dsb(); 68 72 } 69 73 74 + static inline void l2_unmap_va(unsigned long va) 75 + { 70 76 #ifdef CONFIG_HIGHMEM 71 - #define l2_map_save_flags(x) raw_local_save_flags(x) 72 - #define l2_map_restore_flags(x) raw_local_irq_restore(x) 73 - #else 74 - #define l2_map_save_flags(x) ((x) = 0) 75 - #define l2_map_restore_flags(x) ((void)(x)) 77 + if (va != -1) 78 + kunmap_atomic((void *)va); 76 79 #endif 80 + } 77 81 78 - static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, 79 - unsigned long flags) 82 + static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) 80 83 { 81 84 #ifdef CONFIG_HIGHMEM 82 85 unsigned long va = prev_va & PAGE_MASK; ··· 84 89 /* 85 90 * Switching to a new page. Because cache ops are 86 91 * using virtual addresses only, we must put a mapping 87 - * in place for it. We also enable interrupts for a 88 - * short while and disable them again to protect this 89 - * mapping. 92 + * in place for it. 90 93 */ 91 - unsigned long idx; 92 - raw_local_irq_restore(flags); 93 - idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); 94 - va = __fix_to_virt(FIX_KMAP_BEGIN + idx); 95 - raw_local_irq_restore(flags | PSR_I_BIT); 96 - set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); 97 - local_flush_tlb_kernel_page(va); 94 + l2_unmap_va(prev_va); 95 + va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); 98 96 } 99 97 return va + (pa_offset >> (32 - PAGE_SHIFT)); 100 98 #else ··· 97 109 98 110 static void xsc3_l2_inv_range(unsigned long start, unsigned long end) 99 111 { 100 - unsigned long vaddr, flags; 112 + unsigned long vaddr; 101 113 102 114 if (start == 0 && end == -1ul) { 103 115 xsc3_l2_inv_all(); ··· 105 117 } 106 118 107 119 vaddr = -1; /* to force the first mapping */ 108 - l2_map_save_flags(flags); 109 120 110 121 /* 111 122 * Clean and invalidate partial first cache line. 112 123 */ 113 124 if (start & (CACHE_LINE_SIZE - 1)) { 114 - vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); 125 + vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); 115 126 xsc3_l2_clean_mva(vaddr); 116 127 xsc3_l2_inv_mva(vaddr); 117 128 start = (start | (CACHE_LINE_SIZE - 1)) + 1; ··· 120 133 * Invalidate all full cache lines between 'start' and 'end'. 121 134 */ 122 135 while (start < (end & ~(CACHE_LINE_SIZE - 1))) { 123 - vaddr = l2_map_va(start, vaddr, flags); 136 + vaddr = l2_map_va(start, vaddr); 124 137 xsc3_l2_inv_mva(vaddr); 125 138 start += CACHE_LINE_SIZE; 126 139 } ··· 129 142 * Clean and invalidate partial last cache line. 130 143 */ 131 144 if (start < end) { 132 - vaddr = l2_map_va(start, vaddr, flags); 145 + vaddr = l2_map_va(start, vaddr); 133 146 xsc3_l2_clean_mva(vaddr); 134 147 xsc3_l2_inv_mva(vaddr); 135 148 } 136 149 137 - l2_map_restore_flags(flags); 150 + l2_unmap_va(vaddr); 138 151 139 152 dsb(); 140 153 } 141 154 142 155 static void xsc3_l2_clean_range(unsigned long start, unsigned long end) 143 156 { 144 - unsigned long vaddr, flags; 157 + unsigned long vaddr; 145 158 146 159 vaddr = -1; /* to force the first mapping */ 147 - l2_map_save_flags(flags); 148 160 149 161 start &= ~(CACHE_LINE_SIZE - 1); 150 162 while (start < end) { 151 - vaddr = l2_map_va(start, vaddr, flags); 163 + vaddr = l2_map_va(start, vaddr); 152 164 xsc3_l2_clean_mva(vaddr); 153 165 start += CACHE_LINE_SIZE; 154 166 } 155 167 156 - l2_map_restore_flags(flags); 168 + l2_unmap_va(vaddr); 157 169 158 170 dsb(); 159 171 } ··· 179 193 180 194 static void xsc3_l2_flush_range(unsigned long start, unsigned long end) 181 195 { 182 - unsigned long vaddr, flags; 196 + unsigned long vaddr; 183 197 184 198 if (start == 0 && end == -1ul) { 185 199 xsc3_l2_flush_all(); ··· 187 201 } 188 202 189 203 vaddr = -1; /* to force the first mapping */ 190 - l2_map_save_flags(flags); 191 204 192 205 start &= ~(CACHE_LINE_SIZE - 1); 193 206 while (start < end) { 194 - vaddr = l2_map_va(start, vaddr, flags); 207 + vaddr = l2_map_va(start, vaddr); 195 208 xsc3_l2_clean_mva(vaddr); 196 209 xsc3_l2_inv_mva(vaddr); 197 210 start += CACHE_LINE_SIZE; 198 211 } 199 212 200 - l2_map_restore_flags(flags); 213 + l2_unmap_va(vaddr); 201 214 202 215 dsb(); 203 216 }
+4 -3
arch/arm/mm/dma-mapping.c
··· 17 17 #include <linux/init.h> 18 18 #include <linux/device.h> 19 19 #include <linux/dma-mapping.h> 20 + #include <linux/highmem.h> 20 21 21 22 #include <asm/memory.h> 22 23 #include <asm/highmem.h> ··· 481 480 op(vaddr, len, dir); 482 481 kunmap_high(page); 483 482 } else if (cache_is_vipt()) { 484 - pte_t saved_pte; 485 - vaddr = kmap_high_l1_vipt(page, &saved_pte); 483 + /* unmapped pages might still be cached */ 484 + vaddr = kmap_atomic(page); 486 485 op(vaddr + offset, len, dir); 487 - kunmap_high_l1_vipt(page, saved_pte); 486 + kunmap_atomic(vaddr); 488 487 } 489 488 } else { 490 489 vaddr = page_address(page) + offset;
+4 -3
arch/arm/mm/flush.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/mm.h> 12 12 #include <linux/pagemap.h> 13 + #include <linux/highmem.h> 13 14 14 15 #include <asm/cacheflush.h> 15 16 #include <asm/cachetype.h> ··· 181 180 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 182 181 kunmap_high(page); 183 182 } else if (cache_is_vipt()) { 184 - pte_t saved_pte; 185 - addr = kmap_high_l1_vipt(page, &saved_pte); 183 + /* unmapped pages might still be cached */ 184 + addr = kmap_atomic(page); 186 185 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 187 - kunmap_high_l1_vipt(page, saved_pte); 186 + kunmap_atomic(addr); 188 187 } 189 188 } 190 189
-87
arch/arm/mm/highmem.c
··· 140 140 pte = TOP_PTE(vaddr); 141 141 return pte_page(*pte); 142 142 } 143 - 144 - #ifdef CONFIG_CPU_CACHE_VIPT 145 - 146 - #include <linux/percpu.h> 147 - 148 - /* 149 - * The VIVT cache of a highmem page is always flushed before the page 150 - * is unmapped. Hence unmapped highmem pages need no cache maintenance 151 - * in that case. 152 - * 153 - * However unmapped pages may still be cached with a VIPT cache, and 154 - * it is not possible to perform cache maintenance on them using physical 155 - * addresses unfortunately. So we have no choice but to set up a temporary 156 - * virtual mapping for that purpose. 157 - * 158 - * Yet this VIPT cache maintenance may be triggered from DMA support 159 - * functions which are possibly called from interrupt context. As we don't 160 - * want to keep interrupt disabled all the time when such maintenance is 161 - * taking place, we therefore allow for some reentrancy by preserving and 162 - * restoring the previous fixmap entry before the interrupted context is 163 - * resumed. If the reentrancy depth is 0 then there is no need to restore 164 - * the previous fixmap, and leaving the current one in place allow it to 165 - * be reused the next time without a TLB flush (common with DMA). 166 - */ 167 - 168 - static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); 169 - 170 - void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) 171 - { 172 - unsigned int idx, cpu; 173 - int *depth; 174 - unsigned long vaddr, flags; 175 - pte_t pte, *ptep; 176 - 177 - if (!in_interrupt()) 178 - preempt_disable(); 179 - 180 - cpu = smp_processor_id(); 181 - depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 182 - 183 - idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 184 - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 185 - ptep = TOP_PTE(vaddr); 186 - pte = mk_pte(page, kmap_prot); 187 - 188 - raw_local_irq_save(flags); 189 - (*depth)++; 190 - if (pte_val(*ptep) == pte_val(pte)) { 191 - *saved_pte = pte; 192 - } else { 193 - *saved_pte = *ptep; 194 - set_pte_ext(ptep, pte, 0); 195 - local_flush_tlb_kernel_page(vaddr); 196 - } 197 - raw_local_irq_restore(flags); 198 - 199 - return (void *)vaddr; 200 - } 201 - 202 - void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) 203 - { 204 - unsigned int idx, cpu = smp_processor_id(); 205 - int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 206 - unsigned long vaddr, flags; 207 - pte_t pte, *ptep; 208 - 209 - idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 210 - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 211 - ptep = TOP_PTE(vaddr); 212 - pte = mk_pte(page, kmap_prot); 213 - 214 - BUG_ON(pte_val(*ptep) != pte_val(pte)); 215 - BUG_ON(*depth <= 0); 216 - 217 - raw_local_irq_save(flags); 218 - (*depth)--; 219 - if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { 220 - set_pte_ext(ptep, saved_pte, 0); 221 - local_flush_tlb_kernel_page(vaddr); 222 - } 223 - raw_local_irq_restore(flags); 224 - 225 - if (!in_interrupt()) 226 - preempt_enable(); 227 - } 228 - 229 - #endif /* CONFIG_CPU_CACHE_VIPT */
+1 -1
arch/mn10300/kernel/irq.c
··· 459 459 tmp = CROSS_GxICR(irq, new); 460 460 461 461 x &= GxICR_LEVEL | GxICR_ENABLE; 462 - if (GxICR(irq) & GxICR_REQUEST) { 462 + if (GxICR(irq) & GxICR_REQUEST) 463 463 x |= GxICR_REQUEST | GxICR_DETECT; 464 464 CROSS_GxICR(irq, new) = x; 465 465 tmp = CROSS_GxICR(irq, new);
+1
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
··· 63 63 #include <linux/of_gpio.h> 64 64 #include <linux/kernel.h> 65 65 #include <linux/slab.h> 66 + #include <linux/fs.h> 66 67 #include <linux/watchdog.h> 67 68 #include <linux/miscdevice.h> 68 69 #include <linux/uaccess.h>
+1 -1
arch/sh/boards/mach-se/7206/irq.c
··· 140 140 make_se7206_irq(IRQ1_IRQ); /* ATA */ 141 141 make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */ 142 142 143 - __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */ 143 + __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR1); /* ICR1 */ 144 144 145 145 /* FPGA System register setup*/ 146 146 __raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
+1 -1
arch/sh/kernel/cpu/sh2a/clock-sh7201.c
··· 34 34 35 35 static void master_clk_init(struct clk *clk) 36 36 { 37 - return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; 37 + clk->rate = 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; 38 38 } 39 39 40 40 static struct clk_ops sh7201_master_clk_ops = {
+1 -2
arch/sh/kernel/cpu/sh4/clock-sh4-202.c
··· 81 81 for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) { 82 82 int divisor = frqcr3_divisors[i]; 83 83 84 - if (clk->ops->set_rate(clk, clk->parent->rate / 85 - divisor, 0) == 0) 84 + if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0) 86 85 break; 87 86 } 88 87
+5 -11
arch/x86/kernel/microcode_intel.c
··· 364 364 365 365 /* For performance reasons, reuse mc area when possible */ 366 366 if (!mc || mc_size > curr_mc_size) { 367 - if (mc) 368 - vfree(mc); 367 + vfree(mc); 369 368 mc = vmalloc(mc_size); 370 369 if (!mc) 371 370 break; ··· 373 374 374 375 if (get_ucode_data(mc, ucode_ptr, mc_size) || 375 376 microcode_sanity_check(mc) < 0) { 376 - vfree(mc); 377 377 break; 378 378 } 379 379 380 380 if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) { 381 - if (new_mc) 382 - vfree(new_mc); 381 + vfree(new_mc); 383 382 new_rev = mc_header.rev; 384 383 new_mc = mc; 385 384 mc = NULL; /* trigger new vmalloc */ ··· 387 390 leftover -= mc_size; 388 391 } 389 392 390 - if (mc) 391 - vfree(mc); 393 + vfree(mc); 392 394 393 395 if (leftover) { 394 - if (new_mc) 395 - vfree(new_mc); 396 + vfree(new_mc); 396 397 state = UCODE_ERROR; 397 398 goto out; 398 399 } ··· 400 405 goto out; 401 406 } 402 407 403 - if (uci->mc) 404 - vfree(uci->mc); 408 + vfree(uci->mc); 405 409 uci->mc = (struct microcode_intel *)new_mc; 406 410 407 411 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
+14 -3
arch/x86/kernel/setup.c
··· 501 501 return total << PAGE_SHIFT; 502 502 } 503 503 504 - #define DEFAULT_BZIMAGE_ADDR_MAX 0x37FFFFFF 504 + /* 505 + * Keep the crash kernel below this limit. On 32 bits earlier kernels 506 + * would limit the kernel to the low 512 MiB due to mapping restrictions. 507 + * On 64 bits, kexec-tools currently limits us to 896 MiB; increase this 508 + * limit once kexec-tools are fixed. 509 + */ 510 + #ifdef CONFIG_X86_32 511 + # define CRASH_KERNEL_ADDR_MAX (512 << 20) 512 + #else 513 + # define CRASH_KERNEL_ADDR_MAX (896 << 20) 514 + #endif 515 + 505 516 static void __init reserve_crashkernel(void) 506 517 { 507 518 unsigned long long total_mem; ··· 531 520 const unsigned long long alignment = 16<<20; /* 16M */ 532 521 533 522 /* 534 - * kexec want bzImage is below DEFAULT_BZIMAGE_ADDR_MAX 523 + * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX 535 524 */ 536 525 crash_base = memblock_find_in_range(alignment, 537 - DEFAULT_BZIMAGE_ADDR_MAX, crash_size, alignment); 526 + CRASH_KERNEL_ADDR_MAX, crash_size, alignment); 538 527 539 528 if (crash_base == MEMBLOCK_ERROR) { 540 529 pr_info("crashkernel reservation failed - No suitable area found.\n");
+2
arch/x86/kvm/i8259.c
··· 575 575 s->pics[1].elcr_mask = 0xde; 576 576 s->pics[0].pics_state = s; 577 577 s->pics[1].pics_state = s; 578 + s->pics[0].isr_ack = 0xff; 579 + s->pics[1].isr_ack = 0xff; 578 580 579 581 /* 580 582 * Initialize PIO device
+2 -1
arch/x86/kvm/mmu.c
··· 2394 2394 ASSERT(!VALID_PAGE(root)); 2395 2395 spin_lock(&vcpu->kvm->mmu_lock); 2396 2396 kvm_mmu_free_some_pages(vcpu); 2397 - sp = kvm_mmu_get_page(vcpu, i << 30, i << 30, 2397 + sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), 2398 + i << 30, 2398 2399 PT32_ROOT_LEVEL, 1, ACC_ALL, 2399 2400 NULL); 2400 2401 root = __pa(sp->spt);
+16 -8
arch/x86/oprofile/op_model_amd.c
··· 630 630 return 0; 631 631 } 632 632 633 - /* initialize the APIC for the IBS interrupts if available */ 633 + /* 634 + * check and reserve APIC extended interrupt LVT offset for IBS if 635 + * available 636 + * 637 + * init_ibs() preforms implicitly cpu-local operations, so pin this 638 + * thread to its current CPU 639 + */ 640 + 634 641 static void init_ibs(void) 635 642 { 643 + preempt_disable(); 644 + 636 645 ibs_caps = get_ibs_caps(); 637 - 638 646 if (!ibs_caps) 639 - return; 647 + goto out; 640 648 641 - if (__init_ibs_nmi()) { 649 + if (__init_ibs_nmi() < 0) 642 650 ibs_caps = 0; 643 - return; 644 - } 651 + else 652 + printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); 645 653 646 - printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", 647 - (unsigned)ibs_caps); 654 + out: 655 + preempt_enable(); 648 656 } 649 657 650 658 static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
+3
drivers/acpi/acpica/evgpeinit.c
··· 408 408 return_ACPI_STATUS(AE_OK); 409 409 } 410 410 411 + /* Disable the GPE in case it's been enabled already. */ 412 + (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 413 + 411 414 /* 412 415 * Add the GPE information from above to the gpe_event_info block for 413 416 * use during dispatch of this GPE.
-5
drivers/acpi/battery.c
··· 130 130 unsigned long flags; 131 131 }; 132 132 133 - static int acpi_battery_update(struct acpi_battery *battery); 134 - 135 133 #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); 136 134 137 135 inline int acpi_battery_present(struct acpi_battery *battery) ··· 183 185 { 184 186 int ret = 0; 185 187 struct acpi_battery *battery = to_acpi_battery(psy); 186 - 187 - if (acpi_battery_update(battery)) 188 - return -ENODEV; 189 188 190 189 if (acpi_battery_present(battery)) { 191 190 /* run battery update only if it is present */
+60 -37
drivers/acpi/scan.c
··· 705 705 } 706 706 707 707 static acpi_status 708 - acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device, 709 - union acpi_object *package) 708 + acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, 709 + struct acpi_device_wakeup *wakeup) 710 710 { 711 - int i = 0; 711 + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 712 + union acpi_object *package = NULL; 712 713 union acpi_object *element = NULL; 714 + acpi_status status; 715 + int i = 0; 713 716 714 - if (!device || !package || (package->package.count < 2)) 717 + if (!wakeup) 715 718 return AE_BAD_PARAMETER; 719 + 720 + /* _PRW */ 721 + status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer); 722 + if (ACPI_FAILURE(status)) { 723 + ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW")); 724 + return status; 725 + } 726 + 727 + package = (union acpi_object *)buffer.pointer; 728 + 729 + if (!package || (package->package.count < 2)) { 730 + status = AE_BAD_DATA; 731 + goto out; 732 + } 716 733 717 734 element = &(package->package.elements[0]); 718 - if (!element) 719 - return AE_BAD_PARAMETER; 735 + if (!element) { 736 + status = AE_BAD_DATA; 737 + goto out; 738 + } 720 739 if (element->type == ACPI_TYPE_PACKAGE) { 721 740 if ((element->package.count < 2) || 722 741 (element->package.elements[0].type != 723 742 ACPI_TYPE_LOCAL_REFERENCE) 724 - || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) 725 - return AE_BAD_DATA; 726 - device->wakeup.gpe_device = 743 + || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) { 744 + status = AE_BAD_DATA; 745 + goto out; 746 + } 747 + wakeup->gpe_device = 727 748 element->package.elements[0].reference.handle; 728 - device->wakeup.gpe_number = 749 + wakeup->gpe_number = 729 750 (u32) element->package.elements[1].integer.value; 730 751 } else if (element->type == ACPI_TYPE_INTEGER) { 731 - device->wakeup.gpe_number = element->integer.value; 732 - } else 733 - return AE_BAD_DATA; 752 + wakeup->gpe_device = NULL; 753 + wakeup->gpe_number = element->integer.value; 754 + } else { 755 + status = AE_BAD_DATA; 756 + goto out; 757 + } 734 758 735 759 element = &(package->package.elements[1]); 736 760 if (element->type != ACPI_TYPE_INTEGER) { 737 - return AE_BAD_DATA; 761 + status = AE_BAD_DATA; 762 + goto out; 738 763 } 739 - device->wakeup.sleep_state = element->integer.value; 764 + wakeup->sleep_state = element->integer.value; 740 765 741 766 if ((package->package.count - 2) > ACPI_MAX_HANDLES) { 742 - return AE_NO_MEMORY; 767 + status = AE_NO_MEMORY; 768 + goto out; 743 769 } 744 - device->wakeup.resources.count = package->package.count - 2; 745 - for (i = 0; i < device->wakeup.resources.count; i++) { 770 + wakeup->resources.count = package->package.count - 2; 771 + for (i = 0; i < wakeup->resources.count; i++) { 746 772 element = &(package->package.elements[i + 2]); 747 - if (element->type != ACPI_TYPE_LOCAL_REFERENCE) 748 - return AE_BAD_DATA; 773 + if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { 774 + status = AE_BAD_DATA; 775 + goto out; 776 + } 749 777 750 - device->wakeup.resources.handles[i] = element->reference.handle; 778 + wakeup->resources.handles[i] = element->reference.handle; 751 779 } 752 780 753 - acpi_gpe_can_wake(device->wakeup.gpe_device, device->wakeup.gpe_number); 781 + acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number); 754 782 755 - return AE_OK; 783 + out: 784 + kfree(buffer.pointer); 785 + 786 + return status; 756 787 } 757 788 758 789 static void acpi_bus_set_run_wake_flags(struct acpi_device *device) ··· 818 787 static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 819 788 { 820 789 acpi_status status = 0; 821 - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 822 - union acpi_object *package = NULL; 823 790 int psw_error; 824 791 825 - /* _PRW */ 826 - status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer); 827 - if (ACPI_FAILURE(status)) { 828 - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW")); 829 - goto end; 830 - } 831 - 832 - package = (union acpi_object *)buffer.pointer; 833 - status = acpi_bus_extract_wakeup_device_power_package(device, package); 792 + status = acpi_bus_extract_wakeup_device_power_package(device->handle, 793 + &device->wakeup); 834 794 if (ACPI_FAILURE(status)) { 835 795 ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); 836 796 goto end; 837 797 } 838 - 839 - kfree(buffer.pointer); 840 798 841 799 device->wakeup.flags.valid = 1; 842 800 device->wakeup.prepare_count = 0; ··· 1371 1351 struct acpi_bus_ops *ops = context; 1372 1352 int type; 1373 1353 unsigned long long sta; 1354 + struct acpi_device_wakeup wakeup; 1374 1355 struct acpi_device *device; 1375 1356 acpi_status status; 1376 1357 int result; ··· 1381 1360 return AE_OK; 1382 1361 1383 1362 if (!(sta & ACPI_STA_DEVICE_PRESENT) && 1384 - !(sta & ACPI_STA_DEVICE_FUNCTIONING)) 1363 + !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { 1364 + acpi_bus_extract_wakeup_device_power_package(handle, &wakeup); 1385 1365 return AE_CTRL_DEPTH; 1366 + } 1386 1367 1387 1368 /* 1388 1369 * We may already have an acpi_device from a previous enumeration. If
+11 -11
drivers/ata/Kconfig
··· 128 128 129 129 If unsure, say N. 130 130 131 - config PATA_MPC52xx 132 - tristate "Freescale MPC52xx SoC internal IDE" 133 - depends on PPC_MPC52xx && PPC_BESTCOMM 134 - select PPC_BESTCOMM_ATA 135 - help 136 - This option enables support for integrated IDE controller 137 - of the Freescale MPC52xx SoC. 138 - 139 - If unsure, say N. 140 - 141 131 config PATA_OCTEON_CF 142 132 tristate "OCTEON Boot Bus Compact Flash support" 143 133 depends on CPU_CAVIUM_OCTEON ··· 356 366 357 367 config PATA_CS5536 358 368 tristate "CS5536 PATA support" 359 - depends on PCI && X86 && !X86_64 369 + depends on PCI 360 370 help 361 371 This option enables support for the AMD CS5536 362 372 companion chip used with the Geode LX processor family. ··· 478 488 controllers. If you wish to use only the SATA ports then select 479 489 the AHCI driver alone. If you wish to the use the PATA port or 480 490 both SATA and PATA include this driver. 491 + 492 + If unsure, say N. 493 + 494 + config PATA_MPC52xx 495 + tristate "Freescale MPC52xx SoC internal IDE" 496 + depends on PPC_MPC52xx && PPC_BESTCOMM 497 + select PPC_BESTCOMM_ATA 498 + help 499 + This option enables support for integrated IDE controller 500 + of the Freescale MPC52xx SoC. 481 501 482 502 If unsure, say N. 483 503
+1 -1
drivers/ata/Makefile
··· 11 11 12 12 # SFF w/ custom DMA 13 13 obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 14 - obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o 15 14 obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o 16 15 obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o 17 16 obj-$(CONFIG_SATA_SX4) += sata_sx4.o ··· 51 52 obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 52 53 obj-$(CONFIG_PATA_MACIO) += pata_macio.o 53 54 obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o 55 + obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o 54 56 obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o 55 57 obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o 56 58 obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
+15 -9
drivers/ata/libata-core.c
··· 4807 4807 { 4808 4808 struct ata_device *dev = qc->dev; 4809 4809 4810 - if (ata_tag_internal(qc->tag)) 4811 - return; 4812 - 4813 4810 if (ata_is_nodata(qc->tf.protocol)) 4814 4811 return; 4815 4812 ··· 4855 4858 if (unlikely(qc->err_mask)) 4856 4859 qc->flags |= ATA_QCFLAG_FAILED; 4857 4860 4858 - if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4859 - /* always fill result TF for failed qc */ 4861 + /* 4862 + * Finish internal commands without any further processing 4863 + * and always with the result TF filled. 4864 + */ 4865 + if (unlikely(ata_tag_internal(qc->tag))) { 4860 4866 fill_result_tf(qc); 4867 + __ata_qc_complete(qc); 4868 + return; 4869 + } 4861 4870 4862 - if (!ata_tag_internal(qc->tag)) 4863 - ata_qc_schedule_eh(qc); 4864 - else 4865 - __ata_qc_complete(qc); 4871 + /* 4872 + * Non-internal qc has failed. Fill the result TF and 4873 + * summon EH. 4874 + */ 4875 + if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4876 + fill_result_tf(qc); 4877 + ata_qc_schedule_eh(qc); 4866 4878 return; 4867 4879 } 4868 4880
+14 -3
drivers/ata/libata-eh.c
··· 3275 3275 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 3276 3276 struct ata_eh_context *ehc = &link->eh_context; 3277 3277 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3278 + enum ata_lpm_policy old_policy = link->lpm_policy; 3278 3279 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 3279 3280 unsigned int err_mask; 3280 3281 int rc; ··· 3339 3338 goto fail; 3340 3339 } 3341 3340 3341 + /* 3342 + * Low level driver acked the transition. Issue DIPM command 3343 + * with the new policy set. 3344 + */ 3345 + link->lpm_policy = policy; 3346 + if (ap && ap->slave_link) 3347 + ap->slave_link->lpm_policy = policy; 3348 + 3342 3349 /* host config updated, enable DIPM if transitioning to MIN_POWER */ 3343 3350 ata_for_each_dev(dev, link, ENABLED) { 3344 3351 if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { ··· 3362 3353 } 3363 3354 } 3364 3355 3365 - link->lpm_policy = policy; 3366 - if (ap && ap->slave_link) 3367 - ap->slave_link->lpm_policy = policy; 3368 3356 return 0; 3369 3357 3370 3358 fail: 3359 + /* restore the old policy */ 3360 + link->lpm_policy = old_policy; 3361 + if (ap && ap->slave_link) 3362 + ap->slave_link->lpm_policy = old_policy; 3363 + 3371 3364 /* if no device or only one more chance is left, disable LPM */ 3372 3365 if (!dev || ehc->tries[dev->devno] <= 2) { 3373 3366 ata_link_printk(link, KERN_WARNING,
+3 -4
drivers/ata/libata-sff.c
··· 1532 1532 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1533 1533 return ata_sff_idle_irq(ap); 1534 1534 break; 1535 - case HSM_ST: 1536 - case HSM_ST_LAST: 1537 - break; 1538 - default: 1535 + case HSM_ST_IDLE: 1539 1536 return ata_sff_idle_irq(ap); 1537 + default: 1538 + break; 1540 1539 } 1541 1540 1542 1541 /* check main status, clearing INTRQ if needed */
+14 -6
drivers/ata/pata_cs5536.c
··· 37 37 #include <linux/delay.h> 38 38 #include <linux/libata.h> 39 39 #include <scsi/scsi_host.h> 40 + 41 + #ifdef CONFIG_X86_32 40 42 #include <asm/msr.h> 43 + static int use_msr; 44 + module_param_named(msr, use_msr, int, 0644); 45 + MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)"); 46 + #else 47 + #undef rdmsr /* avoid accidental MSR usage on, e.g. x86-64 */ 48 + #undef wrmsr 49 + #define rdmsr(x, y, z) do { } while (0) 50 + #define wrmsr(x, y, z) do { } while (0) 51 + #define use_msr 0 52 + #endif 41 53 42 54 #define DRV_NAME "pata_cs5536" 43 - #define DRV_VERSION "0.0.7" 55 + #define DRV_VERSION "0.0.8" 44 56 45 57 enum { 46 58 CFG = 0, ··· 87 75 IDE_ETC_NODMA = 0x03, 88 76 }; 89 77 90 - static int use_msr; 91 - 92 78 static const u32 msr_reg[4] = { 93 79 MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC, 94 80 }; ··· 98 88 static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val) 99 89 { 100 90 if (unlikely(use_msr)) { 101 - u32 dummy; 91 + u32 dummy __maybe_unused; 102 92 103 93 rdmsr(msr_reg[reg], *val, dummy); 104 94 return 0; ··· 304 294 MODULE_LICENSE("GPL"); 305 295 MODULE_DEVICE_TABLE(pci, cs5536); 306 296 MODULE_VERSION(DRV_VERSION); 307 - module_param_named(msr, use_msr, int, 0644); 308 - MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)"); 309 297 310 298 module_init(cs5536_init); 311 299 module_exit(cs5536_exit);
+4 -1
drivers/atm/atmtcp.c
··· 392 392 atm_dev_put(dev); 393 393 return -EMEDIUMTYPE; 394 394 } 395 - if (PRIV(dev)->vcc) return -EBUSY; 395 + if (PRIV(dev)->vcc) { 396 + atm_dev_put(dev); 397 + return -EBUSY; 398 + } 396 399 } 397 400 else { 398 401 int error;
+4 -2
drivers/bluetooth/hci_ldisc.c
··· 311 311 312 312 if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { 313 313 hu->proto->close(hu); 314 - hci_unregister_dev(hdev); 315 - hci_free_dev(hdev); 314 + if (hdev) { 315 + hci_unregister_dev(hdev); 316 + hci_free_dev(hdev); 317 + } 316 318 } 317 319 } 318 320 }
+9 -2
drivers/char/agp/intel-gtt.c
··· 1192 1192 writel(1, intel_private.i9xx_flush_page); 1193 1193 } 1194 1194 1195 - static void i965_write_entry(dma_addr_t addr, unsigned int entry, 1195 + static void i965_write_entry(dma_addr_t addr, 1196 + unsigned int entry, 1196 1197 unsigned int flags) 1197 1198 { 1199 + u32 pte_flags; 1200 + 1201 + pte_flags = I810_PTE_VALID; 1202 + if (flags == AGP_USER_CACHED_MEMORY) 1203 + pte_flags |= I830_PTE_SYSTEM_CACHED; 1204 + 1198 1205 /* Shift high bits down */ 1199 1206 addr |= (addr >> 28) & 0xf0; 1200 - writel(addr | I810_PTE_VALID, intel_private.gtt + entry); 1207 + writel(addr | pte_flags, intel_private.gtt + entry); 1201 1208 } 1202 1209 1203 1210 static bool gen6_check_flags(unsigned int flags)
+7 -5
drivers/char/ramoops.c
··· 29 29 #include <linux/ramoops.h> 30 30 31 31 #define RAMOOPS_KERNMSG_HDR "====" 32 - #define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval)) 33 32 34 33 #define RECORD_SIZE 4096 35 34 ··· 64 65 struct ramoops_context, dump); 65 66 unsigned long s1_start, s2_start; 66 67 unsigned long l1_cpy, l2_cpy; 67 - int res; 68 - char *buf; 68 + int res, hdr_size; 69 + char *buf, *buf_orig; 69 70 struct timeval timestamp; 70 71 71 72 /* Only dump oopses if dump_oops is set */ ··· 73 74 return; 74 75 75 76 buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE)); 77 + buf_orig = buf; 78 + 76 79 memset(buf, '\0', RECORD_SIZE); 77 80 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR); 78 81 buf += res; ··· 82 81 res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec); 83 82 buf += res; 84 83 85 - l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE)); 86 - l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy); 84 + hdr_size = buf - buf_orig; 85 + l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size)); 86 + l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy); 87 87 88 88 s2_start = l2 - l2_cpy; 89 89 s1_start = l1 - l1_cpy;
+1 -1
drivers/dma/mv_xor.c
··· 449 449 static void mv_xor_tasklet(unsigned long data) 450 450 { 451 451 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 452 - __mv_xor_slot_cleanup(chan); 452 + mv_xor_slot_cleanup(chan); 453 453 } 454 454 455 455 static struct mv_xor_desc_slot *
+15 -4
drivers/gpio/cs5535-gpio.c
··· 56 56 * registers, see include/linux/cs5535.h. 57 57 */ 58 58 59 - static void errata_outl(u32 val, unsigned long addr) 59 + static void errata_outl(struct cs5535_gpio_chip *chip, u32 val, 60 + unsigned int reg) 60 61 { 62 + unsigned long addr = chip->base + 0x80 + reg; 63 + 61 64 /* 62 65 * According to the CS5536 errata (#36), after suspend 63 66 * a write to the high bank GPIO register will clear all 64 67 * non-selected bits; the recommended workaround is a 65 68 * read-modify-write operation. 69 + * 70 + * Don't apply this errata to the edge status GPIOs, as writing 71 + * to their lower bits will clear them. 66 72 */ 67 - val |= inl(addr); 73 + if (reg != GPIO_POSITIVE_EDGE_STS && reg != GPIO_NEGATIVE_EDGE_STS) { 74 + if (val & 0xffff) 75 + val |= (inl(addr) & 0xffff); /* ignore the high bits */ 76 + else 77 + val |= (inl(addr) ^ (val >> 16)); 78 + } 68 79 outl(val, addr); 69 80 } 70 81 ··· 87 76 outl(1 << offset, chip->base + reg); 88 77 else 89 78 /* high bank register */ 90 - errata_outl(1 << (offset - 16), chip->base + 0x80 + reg); 79 + errata_outl(chip, 1 << (offset - 16), reg); 91 80 } 92 81 93 82 void cs5535_gpio_set(unsigned offset, unsigned int reg) ··· 109 98 outl(1 << (offset + 16), chip->base + reg); 110 99 else 111 100 /* high bank register */ 112 - errata_outl(1 << offset, chip->base + 0x80 + reg); 101 + errata_outl(chip, 1 << offset, reg); 113 102 } 114 103 115 104 void cs5535_gpio_clear(unsigned offset, unsigned int reg)
+3
drivers/gpio/gpiolib.c
··· 1281 1281 err = gpio_direction_output(gpio, 1282 1282 (flags & GPIOF_INIT_HIGH) ? 1 : 0); 1283 1283 1284 + if (err) 1285 + gpio_free(gpio); 1286 + 1284 1287 return err; 1285 1288 } 1286 1289 EXPORT_SYMBOL_GPL(gpio_request_one);
+1 -1
drivers/gpio/rdc321x-gpio.c
··· 135 135 struct rdc321x_gpio *rdc321x_gpio_dev; 136 136 struct rdc321x_gpio_pdata *pdata; 137 137 138 - pdata = pdev->dev.platform_data; 138 + pdata = platform_get_drvdata(pdev); 139 139 if (!pdata) { 140 140 dev_err(&pdev->dev, "no platform data supplied\n"); 141 141 return -ENODEV;
+5 -2
drivers/gpu/drm/drm_crtc_helper.c
··· 241 241 } 242 242 243 243 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 244 - if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) { 244 + if (!drm_helper_encoder_in_use(encoder)) { 245 245 drm_encoder_disable(encoder); 246 246 /* disconnector encoder from any connector */ 247 247 encoder->crtc = NULL; ··· 874 874 continue; 875 875 876 876 connector->status = connector->funcs->detect(connector, false); 877 - DRM_DEBUG_KMS("connector status updated to %d\n", connector->status); 877 + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 878 + connector->base.id, 879 + drm_get_connector_name(connector), 880 + old_status, connector->status); 878 881 if (old_status != connector->status) 879 882 changed = true; 880 883 }
+1 -1
drivers/gpu/drm/i915/dvo_ch7017.c
··· 242 242 243 243 static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) 244 244 { 245 - return connector_status_unknown; 245 + return connector_status_connected; 246 246 } 247 247 248 248 static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
+23
drivers/gpu/drm/i915/i915_dma.c
··· 34 34 #include "i915_drm.h" 35 35 #include "i915_drv.h" 36 36 #include "i915_trace.h" 37 + #include "../../../platform/x86/intel_ips.h" 37 38 #include <linux/pci.h> 38 39 #include <linux/vgaarb.h> 39 40 #include <linux/acpi.h> ··· 1872 1871 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 1873 1872 1874 1873 /** 1874 + * Tells the intel_ips driver that the i915 driver is now loaded, if 1875 + * IPS got loaded first. 1876 + * 1877 + * This awkward dance is so that neither module has to depend on the 1878 + * other in order for IPS to do the appropriate communication of 1879 + * GPU turbo limits to i915. 1880 + */ 1881 + static void 1882 + ips_ping_for_i915_load(void) 1883 + { 1884 + void (*link)(void); 1885 + 1886 + link = symbol_get(ips_link_to_i915_driver); 1887 + if (link) { 1888 + link(); 1889 + symbol_put(ips_link_to_i915_driver); 1890 + } 1891 + } 1892 + 1893 + /** 1875 1894 * i915_driver_load - setup chip and create an initial config 1876 1895 * @dev: DRM device 1877 1896 * @flags: startup flags ··· 2095 2074 i915_mch_dev = dev_priv; 2096 2075 dev_priv->mchdev_lock = &mchdev_lock; 2097 2076 spin_unlock(&mchdev_lock); 2077 + 2078 + ips_ping_for_i915_load(); 2098 2079 2099 2080 return 0; 2100 2081
+10
drivers/gpu/drm/i915/i915_reg.h
··· 2471 2471 # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) 2472 2472 # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) 2473 2473 2474 + #define PCH_3DCGDIS1 0x46024 2475 + # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) 2476 + 2474 2477 #define FDI_PLL_FREQ_CTL 0x46030 2475 2478 #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) 2476 2479 #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 ··· 2591 2588 #define ILK_DISPLAY_CHICKEN2 0x42004 2592 2589 #define ILK_DPARB_GATE (1<<22) 2593 2590 #define ILK_VSDPFD_FULL (1<<21) 2591 + #define ILK_DISPLAY_CHICKEN_FUSES 0x42014 2592 + #define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31) 2593 + #define ILK_INTERNAL_DISPLAY_DISABLE (1<<30) 2594 + #define ILK_DISPLAY_DEBUG_DISABLE (1<<29) 2595 + #define ILK_HDCP_DISABLE (1<<25) 2596 + #define ILK_eDP_A_DISABLE (1<<24) 2597 + #define ILK_DESKTOP (1<<23) 2594 2598 #define ILK_DSPCLK_GATE 0x42020 2595 2599 #define ILK_DPARB_CLK_GATE (1<<5) 2596 2600 /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
+20 -1
drivers/gpu/drm/i915/intel_display.c
··· 5379 5379 return index_mask; 5380 5380 } 5381 5381 5382 + static bool has_edp_a(struct drm_device *dev) 5383 + { 5384 + struct drm_i915_private *dev_priv = dev->dev_private; 5385 + 5386 + if (!IS_MOBILE(dev)) 5387 + return false; 5388 + 5389 + if ((I915_READ(DP_A) & DP_DETECTED) == 0) 5390 + return false; 5391 + 5392 + if (IS_GEN5(dev) && 5393 + (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) 5394 + return false; 5395 + 5396 + return true; 5397 + } 5398 + 5382 5399 static void intel_setup_outputs(struct drm_device *dev) 5383 5400 { 5384 5401 struct drm_i915_private *dev_priv = dev->dev_private; ··· 5413 5396 if (HAS_PCH_SPLIT(dev)) { 5414 5397 dpd_is_edp = intel_dpd_is_edp(dev); 5415 5398 5416 - if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 5399 + if (has_edp_a(dev)) 5417 5400 intel_dp_init(dev, DP_A); 5418 5401 5419 5402 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) ··· 5842 5825 I915_WRITE(PCH_3DCGDIS0, 5843 5826 MARIUNIT_CLOCK_GATE_DISABLE | 5844 5827 SVSMUNIT_CLOCK_GATE_DISABLE); 5828 + I915_WRITE(PCH_3DCGDIS1, 5829 + VFMUNIT_CLOCK_GATE_DISABLE); 5845 5830 } 5846 5831 5847 5832 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+30 -7
drivers/gpu/drm/i915/intel_dp.c
··· 479 479 uint16_t address = algo_data->address; 480 480 uint8_t msg[5]; 481 481 uint8_t reply[2]; 482 + unsigned retry; 482 483 int msg_bytes; 483 484 int reply_bytes; 484 485 int ret; ··· 514 513 break; 515 514 } 516 515 517 - for (;;) { 518 - ret = intel_dp_aux_ch(intel_dp, 519 - msg, msg_bytes, 520 - reply, reply_bytes); 516 + for (retry = 0; retry < 5; retry++) { 517 + ret = intel_dp_aux_ch(intel_dp, 518 + msg, msg_bytes, 519 + reply, reply_bytes); 521 520 if (ret < 0) { 522 521 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 523 522 return ret; 524 523 } 524 + 525 + switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 526 + case AUX_NATIVE_REPLY_ACK: 527 + /* I2C-over-AUX Reply field is only valid 528 + * when paired with AUX ACK. 529 + */ 530 + break; 531 + case AUX_NATIVE_REPLY_NACK: 532 + DRM_DEBUG_KMS("aux_ch native nack\n"); 533 + return -EREMOTEIO; 534 + case AUX_NATIVE_REPLY_DEFER: 535 + udelay(100); 536 + continue; 537 + default: 538 + DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 539 + reply[0]); 540 + return -EREMOTEIO; 541 + } 542 + 525 543 switch (reply[0] & AUX_I2C_REPLY_MASK) { 526 544 case AUX_I2C_REPLY_ACK: 527 545 if (mode == MODE_I2C_READ) { ··· 548 528 } 549 529 return reply_bytes - 1; 550 530 case AUX_I2C_REPLY_NACK: 551 - DRM_DEBUG_KMS("aux_ch nack\n"); 531 + DRM_DEBUG_KMS("aux_i2c nack\n"); 552 532 return -EREMOTEIO; 553 533 case AUX_I2C_REPLY_DEFER: 554 - DRM_DEBUG_KMS("aux_ch defer\n"); 534 + DRM_DEBUG_KMS("aux_i2c defer\n"); 555 535 udelay(100); 556 536 break; 557 537 default: 558 - DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); 538 + DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 559 539 return -EREMOTEIO; 560 540 } 561 541 } 542 + 543 + DRM_ERROR("too many retries, giving up\n"); 544 + return -EREMOTEIO; 562 545 } 563 546 564 547 static int
+8 -11
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 696 696 drm_i915_private_t *dev_priv = dev->dev_private; 697 697 u32 head; 698 698 699 - head = intel_read_status_page(ring, 4); 700 - if (head) { 701 - ring->head = head & HEAD_ADDR; 702 - ring->space = ring->head - (ring->tail + 8); 703 - if (ring->space < 0) 704 - ring->space += ring->size; 705 - if (ring->space >= n) 706 - return 0; 707 - } 708 - 709 699 trace_i915_ring_wait_begin (dev); 710 700 end = jiffies + 3 * HZ; 711 701 do { 712 - ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 702 + /* If the reported head position has wrapped or hasn't advanced, 703 + * fallback to the slow and accurate path. 704 + */ 705 + head = intel_read_status_page(ring, 4); 706 + if (head < ring->actual_head) 707 + head = I915_READ_HEAD(ring); 708 + ring->actual_head = head; 709 + ring->head = head & HEAD_ADDR; 713 710 ring->space = ring->head - (ring->tail + 8); 714 711 if (ring->space < 0) 715 712 ring->space += ring->size;
+3 -2
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 30 30 struct drm_device *dev; 31 31 struct drm_gem_object *gem_object; 32 32 33 - unsigned int head; 34 - unsigned int tail; 33 + u32 actual_head; 34 + u32 head; 35 + u32 tail; 35 36 int space; 36 37 struct intel_hw_status_page status_page; 37 38
+8 -4
drivers/gpu/drm/i915/intel_sdvo.c
··· 1908 1908 speed = mapping->i2c_speed; 1909 1909 } 1910 1910 1911 - sdvo->i2c = &dev_priv->gmbus[pin].adapter; 1912 - intel_gmbus_set_speed(sdvo->i2c, speed); 1913 - intel_gmbus_force_bit(sdvo->i2c, true); 1911 + if (pin < GMBUS_NUM_PORTS) { 1912 + sdvo->i2c = &dev_priv->gmbus[pin].adapter; 1913 + intel_gmbus_set_speed(sdvo->i2c, speed); 1914 + intel_gmbus_force_bit(sdvo->i2c, true); 1915 + } else 1916 + sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; 1914 1917 } 1915 1918 1916 1919 static bool ··· 2040 2037 SDVO_COLORIMETRY_RGB256); 2041 2038 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2042 2039 2043 - intel_sdvo_add_hdmi_properties(intel_sdvo_connector); 2044 2040 intel_sdvo->is_hdmi = true; 2045 2041 } 2046 2042 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2047 2043 (1 << INTEL_ANALOG_CLONE_BIT)); 2048 2044 2049 2045 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2046 + if (intel_sdvo->is_hdmi) 2047 + intel_sdvo_add_hdmi_properties(intel_sdvo_connector); 2050 2048 2051 2049 return true; 2052 2050 }
+4 -3
drivers/gpu/drm/radeon/atombios_crtc.c
··· 253 253 case DRM_MODE_DPMS_SUSPEND: 254 254 case DRM_MODE_DPMS_OFF: 255 255 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); 256 - atombios_blank_crtc(crtc, ATOM_ENABLE); 256 + if (radeon_crtc->enabled) 257 + atombios_blank_crtc(crtc, ATOM_ENABLE); 257 258 if (ASIC_IS_DCE3(rdev)) 258 259 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 259 260 atombios_enable_crtc(crtc, ATOM_DISABLE); ··· 531 530 dp_clock = dig_connector->dp_clock; 532 531 } 533 532 } 534 - 533 + #if 0 /* doesn't work properly on some laptops */ 535 534 /* use recommended ref_div for ss */ 536 535 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 537 536 if (ss_enabled) { ··· 541 540 } 542 541 } 543 542 } 544 - 543 + #endif 545 544 if (ASIC_IS_AVIVO(rdev)) { 546 545 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 547 546 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+12 -15
drivers/gpu/drm/radeon/evergreen.c
··· 748 748 unsigned i; 749 749 u32 tmp; 750 750 751 + WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 752 + 751 753 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 752 754 for (i = 0; i < rdev->usec_timeout; i++) { 753 755 /* read MC_STATUS */ ··· 1924 1922 static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 1925 1923 { 1926 1924 struct evergreen_mc_save save; 1927 - u32 srbm_reset = 0; 1928 1925 u32 grbm_reset = 0; 1929 1926 1930 1927 dev_info(rdev->dev, "GPU softreset \n"); ··· 1962 1961 udelay(50); 1963 1962 WREG32(GRBM_SOFT_RESET, 0); 1964 1963 (void)RREG32(GRBM_SOFT_RESET); 1965 - 1966 - /* reset all the system blocks */ 1967 - srbm_reset = SRBM_SOFT_RESET_ALL_MASK; 1968 - 1969 - dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); 1970 - WREG32(SRBM_SOFT_RESET, srbm_reset); 1971 - (void)RREG32(SRBM_SOFT_RESET); 1972 - udelay(50); 1973 - WREG32(SRBM_SOFT_RESET, 0); 1974 - (void)RREG32(SRBM_SOFT_RESET); 1975 1964 /* Wait a little for things to settle down */ 1976 1965 udelay(50); 1977 1966 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", ··· 1972 1981 RREG32(GRBM_STATUS_SE1)); 1973 1982 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1974 1983 RREG32(SRBM_STATUS)); 1975 - /* After reset we need to reinit the asic as GPU often endup in an 1976 - * incoherent state. 1977 - */ 1978 - atom_asic_init(rdev->mode_info.atom_context); 1979 1984 evergreen_mc_resume(rdev, &save); 1980 1985 return 0; 1981 1986 } ··· 2583 2596 { 2584 2597 int r; 2585 2598 2599 + /* reset the asic, the gfx blocks are often in a bad state 2600 + * after the driver is unloaded or after a resume 2601 + */ 2602 + if (radeon_asic_reset(rdev)) 2603 + dev_warn(rdev->dev, "GPU reset failed !\n"); 2586 2604 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 2587 2605 * posting will perform necessary task to bring back GPU into good 2588 2606 * shape. ··· 2704 2712 r = radeon_atombios_init(rdev); 2705 2713 if (r) 2706 2714 return r; 2715 + /* reset the asic, the gfx blocks are often in a bad state 2716 + * after the driver is unloaded or after a resume 2717 + */ 2718 + if (radeon_asic_reset(rdev)) 2719 + dev_warn(rdev->dev, "GPU reset failed !\n"); 2707 2720 /* Post card if necessary */ 2708 2721 if (!evergreen_card_posted(rdev)) { 2709 2722 if (!rdev->bios) {
+1
drivers/gpu/drm/radeon/evergreend.h
··· 174 174 #define HDP_NONSURFACE_BASE 0x2C04 175 175 #define HDP_NONSURFACE_INFO 0x2C08 176 176 #define HDP_NONSURFACE_SIZE 0x2C0C 177 + #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 177 178 #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 178 179 #define HDP_TILING_CONFIG 0x2F3C 179 180
+8 -2
drivers/gpu/drm/radeon/r600.c
··· 1342 1342 u32 srbm_status; 1343 1343 u32 grbm_status; 1344 1344 u32 grbm_status2; 1345 + struct r100_gpu_lockup *lockup; 1345 1346 int r; 1347 + 1348 + if (rdev->family >= CHIP_RV770) 1349 + lockup = &rdev->config.rv770.lockup; 1350 + else 1351 + lockup = &rdev->config.r600.lockup; 1346 1352 1347 1353 srbm_status = RREG32(R_000E50_SRBM_STATUS); 1348 1354 grbm_status = RREG32(R_008010_GRBM_STATUS); 1349 1355 grbm_status2 = RREG32(R_008014_GRBM_STATUS2); 1350 1356 if (!G_008010_GUI_ACTIVE(grbm_status)) { 1351 - r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); 1357 + r100_gpu_lockup_update(lockup, &rdev->cp); 1352 1358 return false; 1353 1359 } 1354 1360 /* force CP activities */ ··· 1366 1360 radeon_ring_unlock_commit(rdev); 1367 1361 } 1368 1362 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); 1369 - return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); 1363 + return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 1370 1364 } 1371 1365 1372 1366 int r600_asic_reset(struct radeon_device *rdev)
+4 -5
drivers/gpu/drm/radeon/r600_cs.c
··· 315 315 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 316 316 /* the initial DDX does bad things with the CB size occasionally */ 317 317 /* it rounds up height too far for slice tile max but the BO is smaller */ 318 - tmp = (height - 7) * 8 * bpe; 319 - if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 320 - dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 321 - return -EINVAL; 322 - } 318 + /* r600c,g also seem to flush at bad times in some apps resulting in 319 + * bogus values here. So for linear just allow anything to avoid breaking 320 + * broken userspace. 321 + */ 323 322 } else { 324 323 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 325 324 return -EINVAL;
+4 -5
drivers/gpu/drm/radeon/radeon_device.c
··· 910 910 radeon_pm_resume(rdev); 911 911 radeon_restore_bios_scratch_regs(rdev); 912 912 913 - /* turn on display hw */ 914 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 915 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 916 - } 917 - 918 913 radeon_fbdev_set_suspend(rdev, 0); 919 914 release_console_sem(); 920 915 ··· 917 922 radeon_hpd_init(rdev); 918 923 /* blat the mode back in */ 919 924 drm_helper_resume_force_mode(dev); 925 + /* turn on display hw */ 926 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 927 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 928 + } 920 929 return 0; 921 930 } 922 931
+19
drivers/gpu/drm/radeon/radeon_drv.c
··· 232 232 233 233 static struct drm_driver kms_driver; 234 234 235 + static void radeon_kick_out_firmware_fb(struct pci_dev *pdev) 236 + { 237 + struct apertures_struct *ap; 238 + bool primary = false; 239 + 240 + ap = alloc_apertures(1); 241 + ap->ranges[0].base = pci_resource_start(pdev, 0); 242 + ap->ranges[0].size = pci_resource_len(pdev, 0); 243 + 244 + #ifdef CONFIG_X86 245 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 246 + #endif 247 + remove_conflicting_framebuffers(ap, "radeondrmfb", primary); 248 + kfree(ap); 249 + } 250 + 235 251 static int __devinit 236 252 radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 237 253 { 254 + /* Get rid of things like offb */ 255 + radeon_kick_out_firmware_fb(pdev); 256 + 238 257 return drm_get_pci_dev(pdev, ent, &kms_driver); 239 258 } 240 259
+1 -1
drivers/gpu/drm/radeon/radeon_fb.c
··· 245 245 goto out_unref; 246 246 } 247 247 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; 248 - info->apertures->ranges[0].size = rdev->mc.real_vram_size; 248 + info->apertures->ranges[0].size = rdev->mc.aper_size; 249 249 250 250 info->fix.mmio_start = 0; 251 251 info->fix.mmio_len = 0;
-2
drivers/hwmon/s3c-hwmon.c
··· 234 234 attr->index = channel; 235 235 attr->dev_attr.attr.name = attrs->in_name; 236 236 attr->dev_attr.attr.mode = S_IRUGO; 237 - attr->dev_attr.attr.owner = THIS_MODULE; 238 237 attr->dev_attr.show = s3c_hwmon_ch_show; 239 238 240 239 ret = device_create_file(dev, &attr->dev_attr); ··· 251 252 attr->index = channel; 252 253 attr->dev_attr.attr.name = attrs->label_name; 253 254 attr->dev_attr.attr.mode = S_IRUGO; 254 - attr->dev_attr.attr.owner = THIS_MODULE; 255 255 attr->dev_attr.show = s3c_hwmon_label_show; 256 256 257 257 ret = device_create_file(dev, &attr->dev_attr);
+1
drivers/isdn/gigaset/capi.c
··· 1900 1900 if (b3skb == NULL) { 1901 1901 dev_err(cs->dev, "%s: out of memory\n", __func__); 1902 1902 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); 1903 + kfree(b3cmsg); 1903 1904 return; 1904 1905 } 1905 1906 capi_cmsg2message(b3cmsg,
+1 -1
drivers/leds/led-class.c
··· 267 267 unsigned long *delay_off) 268 268 { 269 269 if (led_cdev->blink_set && 270 - led_cdev->blink_set(led_cdev, delay_on, delay_off)) 270 + !led_cdev->blink_set(led_cdev, delay_on, delay_off)) 271 271 return; 272 272 273 273 /* blink with 1 Hz as default if nothing specified */
+11 -10
drivers/media/IR/keymaps/rc-rc6-mce.c
··· 26 26 27 27 { 0x800f040a, KEY_DELETE }, 28 28 { 0x800f040b, KEY_ENTER }, 29 - { 0x800f040c, KEY_POWER }, 30 - { 0x800f040d, KEY_PROG1 }, /* Windows MCE button */ 29 + { 0x800f040c, KEY_POWER }, /* PC Power */ 30 + { 0x800f040d, KEY_PROG1 }, /* Windows MCE button */ 31 31 { 0x800f040e, KEY_MUTE }, 32 32 { 0x800f040f, KEY_INFO }, 33 33 ··· 56 56 { 0x800f0422, KEY_OK }, 57 57 { 0x800f0423, KEY_EXIT }, 58 58 { 0x800f0424, KEY_DVD }, 59 - { 0x800f0425, KEY_TUNER }, /* LiveTV */ 60 - { 0x800f0426, KEY_EPG }, /* Guide */ 61 - { 0x800f0427, KEY_ZOOM }, /* Aspect */ 59 + { 0x800f0425, KEY_TUNER }, /* LiveTV */ 60 + { 0x800f0426, KEY_EPG }, /* Guide */ 61 + { 0x800f0427, KEY_ZOOM }, /* Aspect */ 62 62 63 63 { 0x800f043a, KEY_BRIGHTNESSUP }, 64 64 65 65 { 0x800f0446, KEY_TV }, 66 - { 0x800f0447, KEY_AUDIO }, /* My Music */ 67 - { 0x800f0448, KEY_PVR }, /* RecordedTV */ 66 + { 0x800f0447, KEY_AUDIO }, /* My Music */ 67 + { 0x800f0448, KEY_PVR }, /* RecordedTV */ 68 68 { 0x800f0449, KEY_CAMERA }, 69 69 { 0x800f044a, KEY_VIDEO }, 70 70 { 0x800f044c, KEY_LANGUAGE }, 71 71 { 0x800f044d, KEY_TITLE }, 72 - { 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */ 72 + { 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */ 73 73 74 74 { 0x800f0450, KEY_RADIO }, 75 75 76 - { 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */ 76 + { 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */ 77 77 { 0x800f045b, KEY_RED }, 78 78 { 0x800f045c, KEY_GREEN }, 79 79 { 0x800f045d, KEY_YELLOW }, 80 80 { 0x800f045e, KEY_BLUE }, 81 81 82 + { 0x800f0465, KEY_POWER2 }, /* TV Power */ 82 83 { 0x800f046e, KEY_PLAYPAUSE }, 83 - { 0x800f046f, KEY_MEDIA }, /* Start media application (NEW) */ 84 + { 0x800f046f, KEY_MEDIA }, /* Start media application (NEW) */ 84 85 85 86 { 0x800f0480, KEY_BRIGHTNESSDOWN }, 86 87 { 0x800f0481, KEY_PLAYPAUSE },
+16 -13
drivers/media/IR/lirc_dev.c
··· 522 522 523 523 dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor); 524 524 525 - if (!ir->attached) { 526 - mutex_unlock(&ir->irctl_lock); 525 + if (!ir->attached) 527 526 return POLLERR; 528 - } 529 527 530 528 poll_wait(file, &ir->buf->wait_poll, wait); 531 529 ··· 647 649 if (!buf) 648 650 return -ENOMEM; 649 651 650 - if (mutex_lock_interruptible(&ir->irctl_lock)) 651 - return -ERESTARTSYS; 652 + if (mutex_lock_interruptible(&ir->irctl_lock)) { 653 + ret = -ERESTARTSYS; 654 + goto out_unlocked; 655 + } 652 656 if (!ir->attached) { 653 - mutex_unlock(&ir->irctl_lock); 654 - return -ENODEV; 657 + ret = -ENODEV; 658 + goto out_locked; 655 659 } 656 660 657 661 if (length % ir->chunk_size) { 658 - dev_dbg(ir->d.dev, LOGHEAD "read result = -EINVAL\n", 659 - ir->d.name, ir->d.minor); 660 - mutex_unlock(&ir->irctl_lock); 661 - return -EINVAL; 662 + ret = -EINVAL; 663 + goto out_locked; 662 664 } 663 665 664 666 /* ··· 709 711 lirc_buffer_read(ir->buf, buf); 710 712 ret = copy_to_user((void *)buffer+written, buf, 711 713 ir->buf->chunk_size); 712 - written += ir->buf->chunk_size; 714 + if (!ret) 715 + written += ir->buf->chunk_size; 716 + else 717 + ret = -EFAULT; 713 718 } 714 719 } 715 720 716 721 remove_wait_queue(&ir->buf->wait_poll, &wait); 717 722 set_current_state(TASK_RUNNING); 723 + 724 + out_locked: 718 725 mutex_unlock(&ir->irctl_lock); 719 726 720 727 out_unlocked: 721 728 kfree(buf); 722 729 dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n", 723 - ir->d.name, ir->d.minor, ret ? "-EFAULT" : "OK", ret); 730 + ir->d.name, ir->d.minor, ret ? "<fail>" : "<ok>", ret); 724 731 725 732 return ret ? ret : written; 726 733 }
+109 -65
drivers/media/IR/mceusb.c
··· 35 35 #include <linux/device.h> 36 36 #include <linux/module.h> 37 37 #include <linux/slab.h> 38 - #include <linux/usb.h> 39 38 #include <linux/input.h> 39 + #include <linux/usb.h> 40 + #include <linux/usb/input.h> 40 41 #include <media/ir-core.h> 41 - #include <media/ir-common.h> 42 42 43 43 #define DRIVER_VERSION "1.91" 44 44 #define DRIVER_AUTHOR "Jarod Wilson <jarod@wilsonet.com>" ··· 49 49 #define USB_BUFLEN 32 /* USB reception buffer length */ 50 50 #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */ 51 51 #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */ 52 + #define MS_TO_NS(msec) ((msec) * 1000) 52 53 53 54 /* MCE constants */ 54 55 #define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */ ··· 75 74 #define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */ 76 75 77 76 /* Sub-commands, which follow MCE_COMMAND_HEADER or MCE_HW_CMD_HEADER */ 77 + #define MCE_CMD_SIG_END 0x01 /* End of signal */ 78 78 #define MCE_CMD_PING 0x03 /* Ping device */ 79 79 #define MCE_CMD_UNKNOWN 0x04 /* Unknown */ 80 80 #define MCE_CMD_UNKNOWN2 0x05 /* Unknown */ ··· 93 91 #define MCE_CMD_G_TXMASK 0x13 /* Set TX port bitmask */ 94 92 #define MCE_CMD_S_RXSENSOR 0x14 /* Set RX sensor (std/learning) */ 95 93 #define MCE_CMD_G_RXSENSOR 0x15 /* Get RX sensor (std/learning) */ 94 + #define MCE_RSP_PULSE_COUNT 0x15 /* RX pulse count (only if learning) */ 96 95 #define MCE_CMD_TX_PORTS 0x16 /* Get number of TX ports */ 97 96 #define MCE_CMD_G_WAKESRC 0x17 /* Get wake source */ 98 97 #define MCE_CMD_UNKNOWN7 0x18 /* Unknown */ ··· 149 146 MCE_GEN3, 150 147 MCE_GEN2_TX_INV, 151 148 POLARIS_EVK, 149 + CX_HYBRID_TV, 152 150 }; 153 151 154 152 struct mceusb_model { 155 153 u32 mce_gen1:1; 156 154 u32 mce_gen2:1; 157 155 u32 mce_gen3:1; 158 - u32 tx_mask_inverted:1; 156 + u32 tx_mask_normal:1; 159 157 u32 is_polaris:1; 158 + u32 no_tx:1; 160 159 161 160 const char *rc_map; /* Allow specify a per-board map */ 162 161 const char *name; /* per-board name */ ··· 167 162 static const struct mceusb_model mceusb_model[] = { 168 163 [MCE_GEN1] = { 169 164 .mce_gen1 = 1, 170 - .tx_mask_inverted = 1, 165 + .tx_mask_normal = 1, 171 166 }, 172 167 [MCE_GEN2] = { 173 168 .mce_gen2 = 1, 174 169 }, 175 170 [MCE_GEN2_TX_INV] = { 176 171 .mce_gen2 = 1, 177 - .tx_mask_inverted = 1, 172 + .tx_mask_normal = 1, 178 173 }, 179 174 [MCE_GEN3] = { 180 175 .mce_gen3 = 1, 181 - .tx_mask_inverted = 1, 176 + .tx_mask_normal = 1, 182 177 }, 183 178 [POLARIS_EVK] = { 184 179 .is_polaris = 1, ··· 188 183 * to allow testing it 189 184 */ 190 185 .rc_map = RC_MAP_RC5_HAUPPAUGE_NEW, 191 - .name = "cx231xx MCE IR", 186 + .name = "Conexant Hybrid TV (cx231xx) MCE IR", 187 + }, 188 + [CX_HYBRID_TV] = { 189 + .is_polaris = 1, 190 + .no_tx = 1, /* tx isn't wired up at all */ 191 + .name = "Conexant Hybrid TV (cx231xx) MCE IR", 192 192 }, 193 193 }; 194 194 ··· 283 273 { USB_DEVICE(VENDOR_FORMOSA, 0xe03c) }, 284 274 /* Formosa Industrial Computing */ 285 275 { USB_DEVICE(VENDOR_FORMOSA, 0xe03e) }, 276 + /* Fintek eHome Infrared Transceiver (HP branded) */ 277 + { USB_DEVICE(VENDOR_FINTEK, 0x5168) }, 286 278 /* Fintek eHome Infrared Transceiver */ 287 279 { USB_DEVICE(VENDOR_FINTEK, 0x0602) }, 288 280 /* Fintek eHome Infrared Transceiver (in the AOpen MP45) */ ··· 304 292 { USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) }, 305 293 /* TiVo PC IR Receiver */ 306 294 { USB_DEVICE(VENDOR_TIVO, 0x2000) }, 307 - /* Conexant SDK */ 295 + /* Conexant Hybrid TV "Shelby" Polaris SDK */ 308 296 { USB_DEVICE(VENDOR_CONEXANT, 0x58a1), 309 297 .driver_info = POLARIS_EVK }, 298 + /* Conexant Hybrid TV RDU253S Polaris */ 299 + { USB_DEVICE(VENDOR_CONEXANT, 0x58a5), 300 + .driver_info = CX_HYBRID_TV }, 310 301 /* Terminating entry */ 311 302 { } 312 303 }; ··· 318 303 struct mceusb_dev { 319 304 /* ir-core bits */ 320 305 struct ir_dev_props *props; 321 - struct ir_raw_event rawir; 306 + 307 + /* optional features we can enable */ 308 + bool carrier_report_enabled; 309 + bool learning_enabled; 322 310 323 311 /* core device bits */ 324 312 struct device *dev; ··· 336 318 /* buffers and dma */ 337 319 unsigned char *buf_in; 338 320 unsigned int len_in; 321 + dma_addr_t dma_in; 322 + dma_addr_t dma_out; 339 323 340 324 enum { 341 325 CMD_HEADER = 0, ··· 345 325 CMD_DATA, 346 326 PARSE_IRDATA, 347 327 } parser_state; 348 - u8 cmd, rem; /* Remaining IR data bytes in packet */ 349 328 350 - dma_addr_t dma_in; 351 - dma_addr_t dma_out; 329 + u8 cmd, rem; /* Remaining IR data bytes in packet */ 352 330 353 331 struct { 354 332 u32 connected:1; 355 - u32 tx_mask_inverted:1; 333 + u32 tx_mask_normal:1; 356 334 u32 microsoft_gen1:1; 335 + u32 no_tx:1; 357 336 } flags; 358 337 359 338 /* transmit support */ ··· 427 408 case MCE_CMD_UNKNOWN: 428 409 case MCE_CMD_S_CARRIER: 429 410 case MCE_CMD_S_TIMEOUT: 430 - case MCE_CMD_G_RXSENSOR: 411 + case MCE_RSP_PULSE_COUNT: 431 412 datasize = 2; 432 413 break; 414 + case MCE_CMD_SIG_END: 433 415 case MCE_CMD_S_TXMASK: 434 416 case MCE_CMD_S_RXSENSOR: 435 417 datasize = 1; ··· 453 433 return; 454 434 455 435 /* skip meaningless 0xb1 0x60 header bytes on orig receiver */ 456 - if (ir->flags.microsoft_gen1 && !out) 436 + if (ir->flags.microsoft_gen1 && !out && !offset) 457 437 skip = 2; 458 438 459 439 if (len <= skip) ··· 511 491 break; 512 492 case MCE_COMMAND_HEADER: 513 493 switch (subcmd) { 494 + case MCE_CMD_SIG_END: 495 + dev_info(dev, "End of signal\n"); 496 + break; 514 497 case MCE_CMD_PING: 515 498 dev_info(dev, "Ping\n"); 516 499 break; ··· 548 525 inout, data1 == 0x02 ? "short" : "long"); 549 526 break; 550 527 case MCE_CMD_G_RXSENSOR: 551 - if (len == 2) 528 + /* aka MCE_RSP_PULSE_COUNT */ 529 + if (out) 552 530 dev_info(dev, "Get receive sensor\n"); 553 - else 554 - dev_info(dev, "Received pulse count is %d\n", 531 + else if (ir->learning_enabled) 532 + dev_info(dev, "RX pulse count: %d\n", 555 533 ((data1 << 8) | data2)); 556 534 break; 557 535 case MCE_RSP_CMD_INVALID: ··· 748 724 return ret ? ret : n; 749 725 } 750 726 751 - /* Sets active IR outputs -- mce devices typically (all?) have two */ 727 + /* Sets active IR outputs -- mce devices typically have two */ 752 728 static int mceusb_set_tx_mask(void *priv, u32 mask) 753 729 { 754 730 struct mceusb_dev *ir = priv; 755 731 756 - if (ir->flags.tx_mask_inverted) 732 + if (ir->flags.tx_mask_normal) 733 + ir->tx_mask = mask; 734 + else 757 735 ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ? 758 736 mask ^ MCE_DEFAULT_TX_MASK : mask) << 1; 759 - else 760 - ir->tx_mask = mask; 761 737 762 738 return 0; 763 739 } ··· 776 752 777 753 if (carrier == 0) { 778 754 ir->carrier = carrier; 779 - cmdbuf[2] = 0x01; 755 + cmdbuf[2] = MCE_CMD_SIG_END; 780 756 cmdbuf[3] = MCE_IRDATA_TRAILER; 781 757 dev_dbg(ir->dev, "%s: disabling carrier " 782 758 "modulation\n", __func__); ··· 806 782 return carrier; 807 783 } 808 784 785 + /* 786 + * We don't do anything but print debug spew for many of the command bits 787 + * we receive from the hardware, but some of them are useful information 788 + * we want to store so that we can use them. 789 + */ 790 + static void mceusb_handle_command(struct mceusb_dev *ir, int index) 791 + { 792 + u8 hi = ir->buf_in[index + 1] & 0xff; 793 + u8 lo = ir->buf_in[index + 2] & 0xff; 794 + 795 + switch (ir->buf_in[index]) { 796 + /* 2-byte return value commands */ 797 + case MCE_CMD_S_TIMEOUT: 798 + ir->props->timeout = MS_TO_NS((hi << 8 | lo) / 2); 799 + break; 800 + 801 + /* 1-byte return value commands */ 802 + case MCE_CMD_S_TXMASK: 803 + ir->tx_mask = hi; 804 + break; 805 + case MCE_CMD_S_RXSENSOR: 806 + ir->learning_enabled = (hi == 0x02); 807 + break; 808 + default: 809 + break; 810 + } 811 + } 812 + 809 813 static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) 810 814 { 811 815 DEFINE_IR_RAW_EVENT(rawir); ··· 843 791 if (ir->flags.microsoft_gen1) 844 792 i = 2; 845 793 794 + /* if there's no data, just return now */ 795 + if (buf_len <= i) 796 + return; 797 + 846 798 for (; i < buf_len; i++) { 847 799 switch (ir->parser_state) { 848 800 case SUBCMD: 849 801 ir->rem = mceusb_cmdsize(ir->cmd, ir->buf_in[i]); 850 802 mceusb_dev_printdata(ir, ir->buf_in, i - 1, 851 803 ir->rem + 2, false); 804 + mceusb_handle_command(ir, i); 852 805 ir->parser_state = CMD_DATA; 853 806 break; 854 807 case PARSE_IRDATA: 855 808 ir->rem--; 856 809 rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0); 857 810 rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK) 858 - * MCE_TIME_UNIT * 1000; 859 - 860 - if ((ir->buf_in[i] & MCE_PULSE_MASK) == 0x7f) { 861 - if (ir->rawir.pulse == rawir.pulse) { 862 - ir->rawir.duration += rawir.duration; 863 - } else { 864 - ir->rawir.duration = rawir.duration; 865 - ir->rawir.pulse = rawir.pulse; 866 - } 867 - if (ir->rem) 868 - break; 869 - } 870 - rawir.duration += ir->rawir.duration; 871 - ir->rawir.duration = 0; 872 - ir->rawir.pulse = rawir.pulse; 811 + * MS_TO_NS(MCE_TIME_UNIT); 873 812 874 813 dev_dbg(ir->dev, "Storing %s with duration %d\n", 875 814 rawir.pulse ? "pulse" : "space", 876 815 rawir.duration); 877 816 878 - ir_raw_event_store(ir->idev, &rawir); 817 + ir_raw_event_store_with_filter(ir->idev, &rawir); 879 818 break; 880 819 case CMD_DATA: 881 820 ir->rem--; ··· 882 839 continue; 883 840 } 884 841 ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK); 885 - mceusb_dev_printdata(ir, ir->buf_in, i, ir->rem + 1, false); 886 - if (ir->rem) { 842 + mceusb_dev_printdata(ir, ir->buf_in, 843 + i, ir->rem + 1, false); 844 + if (ir->rem) 887 845 ir->parser_state = PARSE_IRDATA; 888 - break; 889 - } 890 - /* 891 - * a package with len=0 (e. g. 0x80) means end of 892 - * data. We could use it to do the call to 893 - * ir_raw_event_handle(). For now, we don't need to 894 - * use it. 895 - */ 896 846 break; 897 847 } 898 848 ··· 1020 984 mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ)); 1021 985 mce_sync_in(ir, NULL, maxp); 1022 986 1023 - /* get the transmitter bitmask */ 1024 - mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); 1025 - mce_sync_in(ir, NULL, maxp); 987 + if (!ir->flags.no_tx) { 988 + /* get the transmitter bitmask */ 989 + mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); 990 + mce_sync_in(ir, NULL, maxp); 991 + } 1026 992 1027 993 /* get receiver timeout value */ 1028 994 mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); ··· 1073 1035 props->priv = ir; 1074 1036 props->driver_type = RC_DRIVER_IR_RAW; 1075 1037 props->allowed_protos = IR_TYPE_ALL; 1076 - props->s_tx_mask = mceusb_set_tx_mask; 1077 - props->s_tx_carrier = mceusb_set_tx_carrier; 1078 - props->tx_ir = mceusb_tx_ir; 1038 + props->timeout = MS_TO_NS(1000); 1039 + if (!ir->flags.no_tx) { 1040 + props->s_tx_mask = mceusb_set_tx_mask; 1041 + props->s_tx_carrier = mceusb_set_tx_carrier; 1042 + props->tx_ir = mceusb_tx_ir; 1043 + } 1079 1044 1080 1045 ir->props = props; 1046 + 1047 + usb_to_input_id(ir->usbdev, &idev->id); 1048 + idev->dev.parent = ir->dev; 1081 1049 1082 1050 if (mceusb_model[ir->model].rc_map) 1083 1051 rc_map = mceusb_model[ir->model].rc_map; ··· 1118 1074 enum mceusb_model_type model = id->driver_info; 1119 1075 bool is_gen3; 1120 1076 bool is_microsoft_gen1; 1121 - bool tx_mask_inverted; 1077 + bool tx_mask_normal; 1122 1078 bool is_polaris; 1123 1079 1124 - dev_dbg(&intf->dev, ": %s called\n", __func__); 1080 + dev_dbg(&intf->dev, "%s called\n", __func__); 1125 1081 1126 1082 idesc = intf->cur_altsetting; 1127 1083 1128 1084 is_gen3 = mceusb_model[model].mce_gen3; 1129 1085 is_microsoft_gen1 = mceusb_model[model].mce_gen1; 1130 - tx_mask_inverted = mceusb_model[model].tx_mask_inverted; 1086 + tx_mask_normal = mceusb_model[model].tx_mask_normal; 1131 1087 is_polaris = mceusb_model[model].is_polaris; 1132 1088 1133 1089 if (is_polaris) { ··· 1151 1107 ep_in = ep; 1152 1108 ep_in->bmAttributes = USB_ENDPOINT_XFER_INT; 1153 1109 ep_in->bInterval = 1; 1154 - dev_dbg(&intf->dev, ": acceptable inbound endpoint " 1110 + dev_dbg(&intf->dev, "acceptable inbound endpoint " 1155 1111 "found\n"); 1156 1112 } 1157 1113 ··· 1166 1122 ep_out = ep; 1167 1123 ep_out->bmAttributes = USB_ENDPOINT_XFER_INT; 1168 1124 ep_out->bInterval = 1; 1169 - dev_dbg(&intf->dev, ": acceptable outbound endpoint " 1125 + dev_dbg(&intf->dev, "acceptable outbound endpoint " 1170 1126 "found\n"); 1171 1127 } 1172 1128 } 1173 1129 if (ep_in == NULL) { 1174 - dev_dbg(&intf->dev, ": inbound and/or endpoint not found\n"); 1130 + dev_dbg(&intf->dev, "inbound and/or endpoint not found\n"); 1175 1131 return -ENODEV; 1176 1132 } 1177 1133 ··· 1194 1150 ir->dev = &intf->dev; 1195 1151 ir->len_in = maxp; 1196 1152 ir->flags.microsoft_gen1 = is_microsoft_gen1; 1197 - ir->flags.tx_mask_inverted = tx_mask_inverted; 1153 + ir->flags.tx_mask_normal = tx_mask_normal; 1154 + ir->flags.no_tx = mceusb_model[model].no_tx; 1198 1155 ir->model = model; 1199 - 1200 - init_ir_raw_event(&ir->rawir); 1201 1156 1202 1157 /* Saving usb interface data for use by the transmitter routine */ 1203 1158 ir->usb_ep_in = ep_in; ··· 1234 1191 1235 1192 mceusb_get_parameters(ir); 1236 1193 1237 - mceusb_set_tx_mask(ir, MCE_DEFAULT_TX_MASK); 1194 + if (!ir->flags.no_tx) 1195 + mceusb_set_tx_mask(ir, MCE_DEFAULT_TX_MASK); 1238 1196 1239 1197 usb_set_intfdata(intf, ir); 1240 1198
+8 -2
drivers/media/IR/nuvoton-cir.c
··· 603 603 count = nvt->pkts; 604 604 nvt_dbg_verbose("Processing buffer of len %d", count); 605 605 606 + init_ir_raw_event(&rawir); 607 + 606 608 for (i = 0; i < count; i++) { 607 609 nvt->pkts--; 608 610 sample = nvt->buf[i]; ··· 645 643 * indicates end of IR signal, but new data incoming. In both 646 644 * cases, it means we're ready to call ir_raw_event_handle 647 645 */ 648 - if (sample == BUF_PULSE_BIT || ((sample != BUF_LEN_MASK) && 649 - (sample & BUF_REPEAT_MASK) == BUF_REPEAT_BYTE)) 646 + if ((sample == BUF_PULSE_BIT) && nvt->pkts) { 647 + nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); 650 648 ir_raw_event_handle(nvt->rdev); 649 + } 651 650 } 651 + 652 + nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); 653 + ir_raw_event_handle(nvt->rdev); 652 654 653 655 if (nvt->pkts) { 654 656 nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
+12 -9
drivers/media/IR/streamzap.c
··· 34 34 #include <linux/device.h> 35 35 #include <linux/module.h> 36 36 #include <linux/slab.h> 37 - #include <linux/usb.h> 38 37 #include <linux/input.h> 38 + #include <linux/usb.h> 39 + #include <linux/usb/input.h> 39 40 #include <media/ir-core.h> 40 41 41 42 #define DRIVER_VERSION "1.61" ··· 141 140 142 141 static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir) 143 142 { 144 - ir_raw_event_store(sz->idev, &rawir); 143 + dev_dbg(sz->dev, "Storing %s with duration %u us\n", 144 + (rawir.pulse ? "pulse" : "space"), rawir.duration); 145 + ir_raw_event_store_with_filter(sz->idev, &rawir); 145 146 } 146 147 147 148 static void sz_push_full_pulse(struct streamzap_ir *sz, ··· 170 167 rawir.duration *= 1000; 171 168 rawir.duration &= IR_MAX_DURATION; 172 169 } 173 - dev_dbg(sz->dev, "ls %u\n", rawir.duration); 174 170 sz_push(sz, rawir); 175 171 176 172 sz->idle = false; ··· 182 180 sz->sum += rawir.duration; 183 181 rawir.duration *= 1000; 184 182 rawir.duration &= IR_MAX_DURATION; 185 - dev_dbg(sz->dev, "p %u\n", rawir.duration); 186 183 sz_push(sz, rawir); 187 184 } 188 185 ··· 201 200 rawir.duration += SZ_RESOLUTION / 2; 202 201 sz->sum += rawir.duration; 203 202 rawir.duration *= 1000; 204 - dev_dbg(sz->dev, "s %u\n", rawir.duration); 205 203 sz_push(sz, rawir); 206 204 } 207 205 ··· 221 221 struct streamzap_ir *sz; 222 222 unsigned int i; 223 223 int len; 224 - static int timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) & 225 - IR_MAX_DURATION) | 0x03000000); 226 224 227 225 if (!urb) 228 226 return; ··· 244 246 245 247 dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len); 246 248 for (i = 0; i < len; i++) { 247 - dev_dbg(sz->dev, "sz idx %d: %x\n", 249 + dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n", 248 250 i, (unsigned char)sz->buf_in[i]); 249 251 switch (sz->decoder_state) { 250 252 case PulseSpace: ··· 271 273 DEFINE_IR_RAW_EVENT(rawir); 272 274 273 275 rawir.pulse = false; 274 - rawir.duration = timeout; 276 + rawir.duration = sz->props->timeout; 275 277 sz->idle = true; 276 278 if (sz->timeout_enabled) 277 279 sz_push(sz, rawir); ··· 332 334 props->allowed_protos = IR_TYPE_ALL; 333 335 334 336 sz->props = props; 337 + 338 + usb_to_input_id(sz->usbdev, &idev->id); 339 + idev->dev.parent = sz->dev; 335 340 336 341 ret = ir_input_register(idev, RC_MAP_STREAMZAP, props, DRIVER_NAME); 337 342 if (ret < 0) { ··· 445 444 sz->decoder_state = PulseSpace; 446 445 /* FIXME: don't yet have a way to set this */ 447 446 sz->timeout_enabled = true; 447 + sz->props->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) & 448 + IR_MAX_DURATION) | 0x03000000); 448 449 #if 0 449 450 /* not yet supported, depends on patches from maxim */ 450 451 /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
+17 -2
drivers/media/video/cx25840/cx25840-core.c
··· 1989 1989 v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops, 1990 1990 V4L2_CID_HUE, -128, 127, 1, 0); 1991 1991 if (!is_cx2583x(state)) { 1992 - default_volume = 228 - cx25840_read(client, 0x8d4); 1993 - default_volume = ((default_volume / 2) + 23) << 9; 1992 + default_volume = cx25840_read(client, 0x8d4); 1993 + /* 1994 + * Enforce the legacy PVR-350/MSP3400 to PVR-150/CX25843 volume 1995 + * scale mapping limits to avoid -ERANGE errors when 1996 + * initializing the volume control 1997 + */ 1998 + if (default_volume > 228) { 1999 + /* Bottom out at -96 dB, v4l2 vol range 0x2e00-0x2fff */ 2000 + default_volume = 228; 2001 + cx25840_write(client, 0x8d4, 228); 2002 + } 2003 + else if (default_volume < 20) { 2004 + /* Top out at + 8 dB, v4l2 vol range 0xfe00-0xffff */ 2005 + default_volume = 20; 2006 + cx25840_write(client, 0x8d4, 20); 2007 + } 2008 + default_volume = (((228 - default_volume) >> 1) + 23) << 9; 1994 2009 1995 2010 state->volume = v4l2_ctrl_new_std(&state->hdl, 1996 2011 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
+12 -87
drivers/media/video/cx88/cx88-alsa.c
··· 40 40 #include <sound/control.h> 41 41 #include <sound/initval.h> 42 42 #include <sound/tlv.h> 43 - #include <media/wm8775.h> 44 43 45 44 #include "cx88.h" 46 45 #include "cx88-reg.h" ··· 586 587 int left, right, v, b; 587 588 int changed = 0; 588 589 u32 old; 589 - struct v4l2_control client_ctl; 590 - 591 - /* Pass volume & balance onto any WM8775 */ 592 - if (value->value.integer.value[0] >= value->value.integer.value[1]) { 593 - v = value->value.integer.value[0] << 10; 594 - b = value->value.integer.value[0] ? 595 - (0x8000 * value->value.integer.value[1]) / value->value.integer.value[0] : 596 - 0x8000; 597 - } else { 598 - v = value->value.integer.value[1] << 10; 599 - b = value->value.integer.value[1] ? 600 - 0xffff - (0x8000 * value->value.integer.value[0]) / value->value.integer.value[1] : 601 - 0x8000; 602 - } 603 - client_ctl.value = v; 604 - client_ctl.id = V4L2_CID_AUDIO_VOLUME; 605 - call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); 606 - 607 - client_ctl.value = b; 608 - client_ctl.id = V4L2_CID_AUDIO_BALANCE; 609 - call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); 610 590 611 591 left = value->value.integer.value[0] & 0x3f; 612 592 right = value->value.integer.value[1] & 0x3f; 613 593 b = right - left; 614 594 if (b < 0) { 615 - v = 0x3f - left; 616 - b = (-b) | 0x40; 595 + v = 0x3f - left; 596 + b = (-b) | 0x40; 617 597 } else { 618 - v = 0x3f - right; 598 + v = 0x3f - right; 619 599 } 620 600 /* Do we really know this will always be called with IRQs on? */ 621 601 spin_lock_irq(&chip->reg_lock); 622 602 old = cx_read(AUD_VOL_CTL); 623 603 if (v != (old & 0x3f)) { 624 - cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, (old & ~0x3f) | v); 625 - changed = 1; 604 + cx_write(AUD_VOL_CTL, (old & ~0x3f) | v); 605 + changed = 1; 626 606 } 627 - if ((cx_read(AUD_BAL_CTL) & 0x7f) != b) { 628 - cx_write(AUD_BAL_CTL, b); 629 - changed = 1; 607 + if (cx_read(AUD_BAL_CTL) != b) { 608 + cx_write(AUD_BAL_CTL, b); 609 + changed = 1; 630 610 } 631 611 spin_unlock_irq(&chip->reg_lock); 632 612 ··· 618 640 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 619 641 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | 620 642 SNDRV_CTL_ELEM_ACCESS_TLV_READ, 621 - .name = "Analog-TV Volume", 643 + .name = "Playback Volume", 622 644 .info = snd_cx88_volume_info, 623 645 .get = snd_cx88_volume_get, 624 646 .put = snd_cx88_volume_put, ··· 649 671 vol = cx_read(AUD_VOL_CTL); 650 672 if (value->value.integer.value[0] != !(vol & bit)) { 651 673 vol ^= bit; 652 - cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol); 653 - /* Pass mute onto any WM8775 */ 654 - if ((1<<6) == bit) { 655 - struct v4l2_control client_ctl; 656 - client_ctl.value = 0 != (vol & bit); 657 - client_ctl.id = V4L2_CID_AUDIO_MUTE; 658 - call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); 659 - } 674 + cx_write(AUD_VOL_CTL, vol); 660 675 ret = 1; 661 676 } 662 677 spin_unlock_irq(&chip->reg_lock); ··· 658 687 659 688 static const struct snd_kcontrol_new snd_cx88_dac_switch = { 660 689 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 661 - .name = "Audio-Out Switch", 690 + .name = "Playback Switch", 662 691 .info = snd_ctl_boolean_mono_info, 663 692 .get = snd_cx88_switch_get, 664 693 .put = snd_cx88_switch_put, ··· 667 696 668 697 static const struct snd_kcontrol_new snd_cx88_source_switch = { 669 698 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 670 - .name = "Analog-TV Switch", 699 + .name = "Capture Switch", 671 700 .info = snd_ctl_boolean_mono_info, 672 701 .get = snd_cx88_switch_get, 673 702 .put = snd_cx88_switch_put, 674 703 .private_value = (1<<6), 675 - }; 676 - 677 - static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol, 678 - struct snd_ctl_elem_value *value) 679 - { 680 - snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); 681 - struct cx88_core *core = chip->core; 682 - struct v4l2_control client_ctl; 683 - 684 - client_ctl.id = V4L2_CID_AUDIO_LOUDNESS; 685 - call_hw(core, WM8775_GID, core, g_ctrl, &client_ctl); 686 - value->value.integer.value[0] = client_ctl.value ? 1 : 0; 687 - 688 - return 0; 689 - } 690 - 691 - static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol, 692 - struct snd_ctl_elem_value *value) 693 - { 694 - snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); 695 - struct cx88_core *core = chip->core; 696 - struct v4l2_control client_ctl; 697 - 698 - client_ctl.value = 0 != value->value.integer.value[0]; 699 - client_ctl.id = V4L2_CID_AUDIO_LOUDNESS; 700 - call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); 701 - 702 - return 0; 703 - } 704 - 705 - static struct snd_kcontrol_new snd_cx88_alc_switch = { 706 - .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 707 - .name = "Line-In ALC Switch", 708 - .info = snd_ctl_boolean_mono_info, 709 - .get = snd_cx88_alc_get, 710 - .put = snd_cx88_alc_put, 711 704 }; 712 705 713 706 /**************************************************************************** ··· 795 860 { 796 861 struct snd_card *card; 797 862 snd_cx88_card_t *chip; 798 - struct v4l2_subdev *sd; 799 863 int err; 800 864 801 865 if (devno >= SNDRV_CARDS) ··· 829 895 err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_source_switch, chip)); 830 896 if (err < 0) 831 897 goto error; 832 - 833 - /* If there's a wm8775 then add a Line-In ALC switch */ 834 - list_for_each_entry(sd, &chip->core->v4l2_dev.subdevs, list) { 835 - if (WM8775_GID == sd->grp_id) { 836 - snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, 837 - chip)); 838 - break; 839 - } 840 - } 841 898 842 899 strcpy (card->driver, "CX88x"); 843 900 sprintf(card->shortname, "Conexant CX%x", pci->device);
+7
drivers/media/video/cx88/cx88-cards.c
··· 1007 1007 .radio_type = UNSET, 1008 1008 .tuner_addr = ADDR_UNSET, 1009 1009 .radio_addr = ADDR_UNSET, 1010 + .audio_chip = V4L2_IDENT_WM8775, 1010 1011 .input = {{ 1011 1012 .type = CX88_VMUX_DVB, 1012 1013 .vmux = 0, 1014 + /* 2: Line-In */ 1015 + .audioroute = 2, 1013 1016 },{ 1014 1017 .type = CX88_VMUX_COMPOSITE1, 1015 1018 .vmux = 1, 1019 + /* 2: Line-In */ 1020 + .audioroute = 2, 1016 1021 },{ 1017 1022 .type = CX88_VMUX_SVIDEO, 1018 1023 .vmux = 2, 1024 + /* 2: Line-In */ 1025 + .audioroute = 2, 1019 1026 }}, 1020 1027 .mpeg = CX88_MPEG_DVB, 1021 1028 },
+1 -26
drivers/media/video/cx88/cx88-video.c
··· 40 40 #include "cx88.h" 41 41 #include <media/v4l2-common.h> 42 42 #include <media/v4l2-ioctl.h> 43 - #include <media/wm8775.h> 44 43 45 44 MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards"); 46 45 MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); ··· 976 977 const struct cx88_ctrl *c = NULL; 977 978 u32 value,mask; 978 979 int i; 979 - struct v4l2_control client_ctl; 980 980 981 981 for (i = 0; i < CX8800_CTLS; i++) { 982 982 if (cx8800_ctls[i].v.id == ctl->id) { ··· 989 991 ctl->value = c->v.minimum; 990 992 if (ctl->value > c->v.maximum) 991 993 ctl->value = c->v.maximum; 992 - 993 - /* Pass changes onto any WM8775 */ 994 - client_ctl.id = ctl->id; 995 - switch (ctl->id) { 996 - case V4L2_CID_AUDIO_MUTE: 997 - client_ctl.value = ctl->value; 998 - break; 999 - case V4L2_CID_AUDIO_VOLUME: 1000 - client_ctl.value = (ctl->value) ? 1001 - (0x90 + ctl->value) << 8 : 0; 1002 - break; 1003 - case V4L2_CID_AUDIO_BALANCE: 1004 - client_ctl.value = ctl->value << 9; 1005 - break; 1006 - default: 1007 - client_ctl.id = 0; 1008 - break; 1009 - } 1010 - if (client_ctl.id) 1011 - call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); 1012 - 1013 994 mask=c->mask; 1014 995 switch (ctl->id) { 1015 996 case V4L2_CID_AUDIO_BALANCE: ··· 1535 1558 if (c->id < V4L2_CID_BASE || 1536 1559 c->id >= V4L2_CID_LASTP1) 1537 1560 return -EINVAL; 1538 - if (c->id == V4L2_CID_AUDIO_MUTE || 1539 - c->id == V4L2_CID_AUDIO_VOLUME || 1540 - c->id == V4L2_CID_AUDIO_BALANCE) { 1561 + if (c->id == V4L2_CID_AUDIO_MUTE) { 1541 1562 for (i = 0; i < CX8800_CTLS; i++) { 1542 1563 if (cx8800_ctls[i].v.id == c->id) 1543 1564 break;
+2 -4
drivers/media/video/cx88/cx88.h
··· 398 398 return container_of(v4l2_dev, struct cx88_core, v4l2_dev); 399 399 } 400 400 401 - #define call_hw(core, grpid, o, f, args...) \ 401 + #define call_all(core, o, f, args...) \ 402 402 do { \ 403 403 if (!core->i2c_rc) { \ 404 404 if (core->gate_ctrl) \ 405 405 core->gate_ctrl(core, 1); \ 406 - v4l2_device_call_all(&core->v4l2_dev, grpid, o, f, ##args); \ 406 + v4l2_device_call_all(&core->v4l2_dev, 0, o, f, ##args); \ 407 407 if (core->gate_ctrl) \ 408 408 core->gate_ctrl(core, 0); \ 409 409 } \ 410 410 } while (0) 411 - 412 - #define call_all(core, o, f, args...) call_hw(core, 0, o, f, ##args) 413 411 414 412 struct cx8800_dev; 415 413 struct cx8802_dev;
+1 -1
drivers/media/video/em28xx/em28xx-video.c
··· 2377 2377 .owner = THIS_MODULE, 2378 2378 .open = em28xx_v4l2_open, 2379 2379 .release = em28xx_v4l2_close, 2380 - .ioctl = video_ioctl2, 2380 + .unlocked_ioctl = video_ioctl2, 2381 2381 }; 2382 2382 2383 2383 static const struct v4l2_ioctl_ops radio_ioctl_ops = {
-2
drivers/media/video/mx2_camera.c
··· 807 807 808 808 if (common_flags & SOCAM_PCLK_SAMPLE_RISING) 809 809 csicr1 |= CSICR1_REDGE; 810 - if (common_flags & SOCAM_PCLK_SAMPLE_FALLING) 811 - csicr1 |= CSICR1_INV_PCLK; 812 810 if (common_flags & SOCAM_VSYNC_ACTIVE_HIGH) 813 811 csicr1 |= CSICR1_SOF_POL; 814 812 if (common_flags & SOCAM_HSYNC_ACTIVE_HIGH)
+48 -3
drivers/media/video/s5p-fimc/fimc-capture.c
··· 522 522 INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q); 523 523 fimc->vid_cap.active_buf_cnt = 0; 524 524 fimc->vid_cap.frame_count = 0; 525 + fimc->vid_cap.buf_index = fimc_hw_get_frame_index(fimc); 525 526 526 527 set_bit(ST_CAPT_PEND, &fimc->state); 527 528 ret = videobuf_streamon(&fimc->vid_cap.vbq); ··· 653 652 return ret; 654 653 } 655 654 655 + static int fimc_cap_cropcap(struct file *file, void *fh, 656 + struct v4l2_cropcap *cr) 657 + { 658 + struct fimc_frame *f; 659 + struct fimc_ctx *ctx = fh; 660 + struct fimc_dev *fimc = ctx->fimc_dev; 661 + 662 + if (cr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 663 + return -EINVAL; 664 + 665 + if (mutex_lock_interruptible(&fimc->lock)) 666 + return -ERESTARTSYS; 667 + 668 + f = &ctx->s_frame; 669 + cr->bounds.left = 0; 670 + cr->bounds.top = 0; 671 + cr->bounds.width = f->o_width; 672 + cr->bounds.height = f->o_height; 673 + cr->defrect = cr->bounds; 674 + 675 + mutex_unlock(&fimc->lock); 676 + return 0; 677 + } 678 + 679 + static int fimc_cap_g_crop(struct file *file, void *fh, struct v4l2_crop *cr) 680 + { 681 + struct fimc_frame *f; 682 + struct fimc_ctx *ctx = file->private_data; 683 + struct fimc_dev *fimc = ctx->fimc_dev; 684 + 685 + 686 + if (mutex_lock_interruptible(&fimc->lock)) 687 + return -ERESTARTSYS; 688 + 689 + f = &ctx->s_frame; 690 + cr->c.left = f->offs_h; 691 + cr->c.top = f->offs_v; 692 + cr->c.width = f->width; 693 + cr->c.height = f->height; 694 + 695 + mutex_unlock(&fimc->lock); 696 + return 0; 697 + } 698 + 656 699 static int fimc_cap_s_crop(struct file *file, void *fh, 657 700 struct v4l2_crop *cr) 658 701 { ··· 761 716 .vidioc_g_ctrl = fimc_vidioc_g_ctrl, 762 717 .vidioc_s_ctrl = fimc_cap_s_ctrl, 763 718 764 - .vidioc_g_crop = fimc_vidioc_g_crop, 719 + .vidioc_g_crop = fimc_cap_g_crop, 765 720 .vidioc_s_crop = fimc_cap_s_crop, 766 - .vidioc_cropcap = fimc_vidioc_cropcap, 721 + .vidioc_cropcap = fimc_cap_cropcap, 767 722 768 723 .vidioc_enum_input = fimc_cap_enum_input, 769 724 .vidioc_s_input = fimc_cap_s_input, ··· 830 785 videobuf_queue_dma_contig_init(&vid_cap->vbq, &fimc_qops, 831 786 vid_cap->v4l2_dev.dev, &fimc->irqlock, 832 787 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, 833 - sizeof(struct fimc_vid_buffer), (void *)ctx); 788 + sizeof(struct fimc_vid_buffer), (void *)ctx, NULL); 834 789 835 790 ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); 836 791 if (ret) {
+33 -21
drivers/media/video/s5p-fimc/fimc-core.c
··· 50 50 .planes_cnt = 1, 51 51 .flags = FMT_FLAGS_M2M, 52 52 }, { 53 - .name = "XRGB-8-8-8-8, 24 bpp", 54 - .fourcc = V4L2_PIX_FMT_RGB24, 53 + .name = "XRGB-8-8-8-8, 32 bpp", 54 + .fourcc = V4L2_PIX_FMT_RGB32, 55 55 .depth = 32, 56 56 .color = S5P_FIMC_RGB888, 57 57 .buff_cnt = 1, ··· 983 983 { 984 984 struct fimc_ctx *ctx = priv; 985 985 struct v4l2_queryctrl *c; 986 + int ret = -EINVAL; 986 987 987 988 c = get_ctrl(qc->id); 988 989 if (c) { ··· 991 990 return 0; 992 991 } 993 992 994 - if (ctx->state & FIMC_CTX_CAP) 995 - return v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd, 993 + if (ctx->state & FIMC_CTX_CAP) { 994 + if (mutex_lock_interruptible(&ctx->fimc_dev->lock)) 995 + return -ERESTARTSYS; 996 + ret = v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd, 996 997 core, queryctrl, qc); 997 - return -EINVAL; 998 + mutex_unlock(&ctx->fimc_dev->lock); 999 + } 1000 + return ret; 998 1001 } 999 1002 1000 1003 int fimc_vidioc_g_ctrl(struct file *file, void *priv, ··· 1120 1115 return 0; 1121 1116 } 1122 1117 1123 - int fimc_vidioc_cropcap(struct file *file, void *fh, 1118 + static int fimc_m2m_cropcap(struct file *file, void *fh, 1124 1119 struct v4l2_cropcap *cr) 1125 1120 { 1126 1121 struct fimc_frame *frame; ··· 1144 1139 return 0; 1145 1140 } 1146 1141 1147 - int fimc_vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *cr) 1142 + static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr) 1148 1143 { 1149 1144 struct fimc_frame *frame; 1150 1145 struct fimc_ctx *ctx = file->private_data; ··· 1172 1167 struct fimc_frame *f; 1173 1168 u32 min_size, halign; 1174 1169 1175 - f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ? 1176 - &ctx->s_frame : &ctx->d_frame; 1177 - 1178 1170 if (cr->c.top < 0 || cr->c.left < 0) { 1179 1171 v4l2_err(&fimc->m2m.v4l2_dev, 1180 1172 "doesn't support negative values for top & left\n"); 1181 1173 return -EINVAL; 1182 1174 } 1183 1175 1184 - f = ctx_get_frame(ctx, cr->type); 1185 - if (IS_ERR(f)) 1186 - return PTR_ERR(f); 1176 + if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1177 + f = (ctx->state & FIMC_CTX_CAP) ? &ctx->s_frame : &ctx->d_frame; 1178 + else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 1179 + ctx->state & FIMC_CTX_M2M) 1180 + f = &ctx->s_frame; 1181 + else 1182 + return -EINVAL; 1187 1183 1188 - min_size = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1189 - ? fimc->variant->min_inp_pixsize 1190 - : fimc->variant->min_out_pixsize; 1184 + min_size = (f == &ctx->s_frame) ? 1185 + fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize; 1191 1186 1192 1187 if (ctx->state & FIMC_CTX_M2M) { 1193 1188 if (fimc->id == 1 && fimc->variant->pix_hoff) ··· 1238 1233 f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ? 1239 1234 &ctx->s_frame : &ctx->d_frame; 1240 1235 1236 + if (mutex_lock_interruptible(&fimc->lock)) 1237 + return -ERESTARTSYS; 1238 + 1241 1239 spin_lock_irqsave(&ctx->slock, flags); 1242 1240 if (~ctx->state & (FIMC_SRC_FMT | FIMC_DST_FMT)) { 1243 1241 /* Check to see if scaling ratio is within supported range */ ··· 1249 1241 else 1250 1242 ret = fimc_check_scaler_ratio(&cr->c, &ctx->s_frame); 1251 1243 if (ret) { 1252 - spin_unlock_irqrestore(&ctx->slock, flags); 1253 1244 v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range"); 1254 - return -EINVAL; 1245 + ret = -EINVAL; 1246 + goto scr_unlock; 1255 1247 } 1256 1248 } 1257 1249 ctx->state |= FIMC_PARAMS; ··· 1261 1253 f->width = cr->c.width; 1262 1254 f->height = cr->c.height; 1263 1255 1256 + scr_unlock: 1264 1257 spin_unlock_irqrestore(&ctx->slock, flags); 1258 + mutex_unlock(&fimc->lock); 1265 1259 return 0; 1266 1260 } 1267 1261 ··· 1295 1285 .vidioc_g_ctrl = fimc_vidioc_g_ctrl, 1296 1286 .vidioc_s_ctrl = fimc_m2m_s_ctrl, 1297 1287 1298 - .vidioc_g_crop = fimc_vidioc_g_crop, 1288 + .vidioc_g_crop = fimc_m2m_g_crop, 1299 1289 .vidioc_s_crop = fimc_m2m_s_crop, 1300 - .vidioc_cropcap = fimc_vidioc_cropcap 1290 + .vidioc_cropcap = fimc_m2m_cropcap 1301 1291 1302 1292 }; 1303 1293 ··· 1406 1396 .open = fimc_m2m_open, 1407 1397 .release = fimc_m2m_release, 1408 1398 .poll = fimc_m2m_poll, 1409 - .ioctl = video_ioctl2, 1399 + .unlocked_ioctl = video_ioctl2, 1410 1400 .mmap = fimc_m2m_mmap, 1411 1401 }; 1412 1402 ··· 1746 1736 .pix_hoff = 1, 1747 1737 .has_inp_rot = 1, 1748 1738 .has_out_rot = 1, 1739 + .has_cistatus2 = 1, 1749 1740 .min_inp_pixsize = 16, 1750 1741 .min_out_pixsize = 16, 1751 1742 .hor_offs_align = 1, ··· 1756 1745 1757 1746 static struct samsung_fimc_variant fimc2_variant_s5pv310 = { 1758 1747 .pix_hoff = 1, 1748 + .has_cistatus2 = 1, 1759 1749 .min_inp_pixsize = 16, 1760 1750 .min_out_pixsize = 16, 1761 1751 .hor_offs_align = 1,
+16 -8
drivers/media/video/s5p-fimc/fimc-core.h
··· 13 13 14 14 /*#define DEBUG*/ 15 15 16 + #include <linux/sched.h> 16 17 #include <linux/types.h> 18 + #include <linux/videodev2.h> 17 19 #include <media/videobuf-core.h> 18 20 #include <media/v4l2-device.h> 19 21 #include <media/v4l2-mem2mem.h> 20 22 #include <media/v4l2-mediabus.h> 21 23 #include <media/s3c_fimc.h> 22 - #include <linux/videodev2.h> 24 + 23 25 #include "regs-fimc.h" 24 26 25 27 #define err(fmt, args...) \ ··· 371 369 * @pix_hoff: indicate whether horizontal offset is in pixels or in bytes 372 370 * @has_inp_rot: set if has input rotator 373 371 * @has_out_rot: set if has output rotator 372 + * @has_cistatus2: 1 if CISTATUS2 register is present in this IP revision 374 373 * @pix_limit: pixel size constraints for the scaler 375 374 * @min_inp_pixsize: minimum input pixel size 376 375 * @min_out_pixsize: minimum output pixel size ··· 382 379 unsigned int pix_hoff:1; 383 380 unsigned int has_inp_rot:1; 384 381 unsigned int has_out_rot:1; 382 + unsigned int has_cistatus2:1; 385 383 struct fimc_pix_limit *pix_limit; 386 384 u16 min_inp_pixsize; 387 385 u16 min_out_pixsize; ··· 558 554 return frame; 559 555 } 560 556 557 + /* Return an index to the buffer actually being written. */ 561 558 static inline u32 fimc_hw_get_frame_index(struct fimc_dev *dev) 562 559 { 563 - u32 reg = readl(dev->regs + S5P_CISTATUS); 564 - return (reg & S5P_CISTATUS_FRAMECNT_MASK) >> 565 - S5P_CISTATUS_FRAMECNT_SHIFT; 560 + u32 reg; 561 + 562 + if (dev->variant->has_cistatus2) { 563 + reg = readl(dev->regs + S5P_CISTATUS2) & 0x3F; 564 + return reg > 0 ? --reg : reg; 565 + } else { 566 + reg = readl(dev->regs + S5P_CISTATUS); 567 + return (reg & S5P_CISTATUS_FRAMECNT_MASK) >> 568 + S5P_CISTATUS_FRAMECNT_SHIFT; 569 + } 566 570 } 567 571 568 572 /* -----------------------------------------------------*/ ··· 606 594 struct v4l2_format *f); 607 595 int fimc_vidioc_try_fmt(struct file *file, void *priv, 608 596 struct v4l2_format *f); 609 - int fimc_vidioc_g_crop(struct file *file, void *fh, 610 - struct v4l2_crop *cr); 611 - int fimc_vidioc_cropcap(struct file *file, void *fh, 612 - struct v4l2_cropcap *cr); 613 597 int fimc_vidioc_queryctrl(struct file *file, void *priv, 614 598 struct v4l2_queryctrl *qc); 615 599 int fimc_vidioc_g_ctrl(struct file *file, void *priv,
+3
drivers/media/video/s5p-fimc/regs-fimc.h
··· 165 165 #define S5P_CISTATUS_VVALID_A (1 << 15) 166 166 #define S5P_CISTATUS_VVALID_B (1 << 14) 167 167 168 + /* Indexes to the last and the currently processed buffer. */ 169 + #define S5P_CISTATUS2 0x68 170 + 168 171 /* Image capture control */ 169 172 #define S5P_CIIMGCPT 0xc0 170 173 #define S5P_CIIMGCPT_IMGCPTEN (1 << 31)
+1 -1
drivers/media/video/sh_mobile_ceu_camera.c
··· 1980 1980 * we complete the completion. 1981 1981 */ 1982 1982 1983 - if (!csi2->driver || !csi2->driver->owner) { 1983 + if (!csi2->driver) { 1984 1984 complete(&wait.completion); 1985 1985 /* Either too late, or probing failed */ 1986 1986 bus_unregister_notifier(&platform_bus_type, &wait.notifier);
+2 -2
drivers/media/video/soc_camera.c
··· 405 405 ret = soc_camera_set_fmt(icd, &f); 406 406 if (ret < 0) 407 407 goto esfmt; 408 + 409 + ici->ops->init_videobuf(&icd->vb_vidq, icd); 408 410 } 409 411 410 412 file->private_data = icd; 411 413 dev_dbg(&icd->dev, "camera device open\n"); 412 - 413 - ici->ops->init_videobuf(&icd->vb_vidq, icd); 414 414 415 415 mutex_unlock(&icd->video_lock); 416 416
+39 -65
drivers/media/video/wm8775.c
··· 35 35 #include <media/v4l2-device.h> 36 36 #include <media/v4l2-chip-ident.h> 37 37 #include <media/v4l2-ctrls.h> 38 - #include <media/wm8775.h> 39 38 40 39 MODULE_DESCRIPTION("wm8775 driver"); 41 40 MODULE_AUTHOR("Ulf Eklund, Hans Verkuil"); ··· 50 51 TOT_REGS 51 52 }; 52 53 53 - #define ALC_HOLD 0x85 /* R17: use zero cross detection, ALC hold time 42.6 ms */ 54 - #define ALC_EN 0x100 /* R17: ALC enable */ 55 - 56 54 struct wm8775_state { 57 55 struct v4l2_subdev sd; 58 56 struct v4l2_ctrl_handler hdl; 59 57 struct v4l2_ctrl *mute; 60 - struct v4l2_ctrl *vol; 61 - struct v4l2_ctrl *bal; 62 - struct v4l2_ctrl *loud; 63 58 u8 input; /* Last selected input (0-0xf) */ 64 59 }; 65 60 ··· 85 92 return -1; 86 93 } 87 94 88 - static void wm8775_set_audio(struct v4l2_subdev *sd, int quietly) 89 - { 90 - struct wm8775_state *state = to_state(sd); 91 - u8 vol_l, vol_r; 92 - int muted = 0 != state->mute->val; 93 - u16 volume = (u16)state->vol->val; 94 - u16 balance = (u16)state->bal->val; 95 - 96 - /* normalize ( 65535 to 0 -> 255 to 0 (+24dB to -103dB) ) */ 97 - vol_l = (min(65536 - balance, 32768) * volume) >> 23; 98 - vol_r = (min(balance, (u16)32768) * volume) >> 23; 99 - 100 - /* Mute */ 101 - if (muted || quietly) 102 - wm8775_write(sd, R21, 0x0c0 | state->input); 103 - 104 - wm8775_write(sd, R14, vol_l | 0x100); /* 0x100= Left channel ADC zero cross enable */ 105 - wm8775_write(sd, R15, vol_r | 0x100); /* 0x100= Right channel ADC zero cross enable */ 106 - 107 - /* Un-mute */ 108 - if (!muted) 109 - wm8775_write(sd, R21, state->input); 110 - } 111 - 112 95 static int wm8775_s_routing(struct v4l2_subdev *sd, 113 96 u32 input, u32 output, u32 config) 114 97 { ··· 102 133 state->input = input; 103 134 if (!v4l2_ctrl_g_ctrl(state->mute)) 104 135 return 0; 105 - if (!v4l2_ctrl_g_ctrl(state->vol)) 106 - return 0; 107 - if (!v4l2_ctrl_g_ctrl(state->bal)) 108 - return 0; 109 - wm8775_set_audio(sd, 1); 136 + wm8775_write(sd, R21, 0x0c0); 137 + wm8775_write(sd, R14, 0x1d4); 138 + wm8775_write(sd, R15, 0x1d4); 139 + wm8775_write(sd, R21, 0x100 + state->input); 110 140 return 0; 111 141 } 112 142 113 143 static int wm8775_s_ctrl(struct v4l2_ctrl *ctrl) 114 144 { 115 145 struct v4l2_subdev *sd = to_sd(ctrl); 146 + struct wm8775_state *state = to_state(sd); 116 147 117 148 switch (ctrl->id) { 118 149 case V4L2_CID_AUDIO_MUTE: 119 - case V4L2_CID_AUDIO_VOLUME: 120 - case V4L2_CID_AUDIO_BALANCE: 121 - wm8775_set_audio(sd, 0); 122 - return 0; 123 - case V4L2_CID_AUDIO_LOUDNESS: 124 - wm8775_write(sd, R17, (ctrl->val ? ALC_EN : 0) | ALC_HOLD); 150 + wm8775_write(sd, R21, 0x0c0); 151 + wm8775_write(sd, R14, 0x1d4); 152 + wm8775_write(sd, R15, 0x1d4); 153 + if (!ctrl->val) 154 + wm8775_write(sd, R21, 0x100 + state->input); 125 155 return 0; 126 156 } 127 157 return -EINVAL; ··· 144 176 145 177 static int wm8775_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) 146 178 { 147 - wm8775_set_audio(sd, 0); 179 + struct wm8775_state *state = to_state(sd); 180 + 181 + /* If I remove this, then it can happen that I have no 182 + sound the first time I tune from static to a valid channel. 183 + It's difficult to reproduce and is almost certainly related 184 + to the zero cross detect circuit. */ 185 + wm8775_write(sd, R21, 0x0c0); 186 + wm8775_write(sd, R14, 0x1d4); 187 + wm8775_write(sd, R15, 0x1d4); 188 + wm8775_write(sd, R21, 0x100 + state->input); 148 189 return 0; 149 190 } 150 191 ··· 203 226 { 204 227 struct wm8775_state *state; 205 228 struct v4l2_subdev *sd; 206 - int err; 207 229 208 230 /* Check if the adapter supports the needed features */ 209 231 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) ··· 216 240 return -ENOMEM; 217 241 sd = &state->sd; 218 242 v4l2_i2c_subdev_init(sd, client, &wm8775_ops); 219 - sd->grp_id = WM8775_GID; /* subdev group id */ 220 243 state->input = 2; 221 244 222 - v4l2_ctrl_handler_init(&state->hdl, 4); 245 + v4l2_ctrl_handler_init(&state->hdl, 1); 223 246 state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops, 224 247 V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); 225 - state->vol = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops, 226 - V4L2_CID_AUDIO_VOLUME, 0, 65535, (65535+99)/100, 0xCF00); /* 0dB*/ 227 - state->bal = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops, 228 - V4L2_CID_AUDIO_BALANCE, 0, 65535, (65535+99)/100, 32768); 229 - state->loud = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops, 230 - V4L2_CID_AUDIO_LOUDNESS, 0, 1, 1, 1); 231 248 sd->ctrl_handler = &state->hdl; 232 - err = state->hdl.error; 233 - if (err) { 249 + if (state->hdl.error) { 250 + int err = state->hdl.error; 251 + 234 252 v4l2_ctrl_handler_free(&state->hdl); 235 253 kfree(state); 236 254 return err; ··· 236 266 wm8775_write(sd, R23, 0x000); 237 267 /* Disable zero cross detect timeout */ 238 268 wm8775_write(sd, R7, 0x000); 239 - /* HPF enable, I2S mode, 24-bit */ 240 - wm8775_write(sd, R11, 0x022); 269 + /* Left justified, 24-bit mode */ 270 + wm8775_write(sd, R11, 0x021); 241 271 /* Master mode, clock ratio 256fs */ 242 272 wm8775_write(sd, R12, 0x102); 243 273 /* Powered up */ 244 274 wm8775_write(sd, R13, 0x000); 245 - /* ALC stereo, ALC target level -5dB FS, ALC max gain +8dB */ 246 - wm8775_write(sd, R16, 0x1bb); 247 - /* Set ALC mode and hold time */ 248 - wm8775_write(sd, R17, (state->loud->val ? ALC_EN : 0) | ALC_HOLD); 275 + /* ADC gain +2.5dB, enable zero cross */ 276 + wm8775_write(sd, R14, 0x1d4); 277 + /* ADC gain +2.5dB, enable zero cross */ 278 + wm8775_write(sd, R15, 0x1d4); 279 + /* ALC Stereo, ALC target level -1dB FS max gain +8dB */ 280 + wm8775_write(sd, R16, 0x1bf); 281 + /* Enable gain control, use zero cross detection, 282 + ALC hold time 42.6 ms */ 283 + wm8775_write(sd, R17, 0x185); 249 284 /* ALC gain ramp up delay 34 s, ALC gain ramp down delay 33 ms */ 250 285 wm8775_write(sd, R18, 0x0a2); 251 286 /* Enable noise gate, threshold -72dBfs */ 252 287 wm8775_write(sd, R19, 0x005); 253 - /* Transient window 4ms, ALC min gain -5dB */ 254 - wm8775_write(sd, R20, 0x0fb); 255 - 256 - wm8775_set_audio(sd, 1); /* set volume/mute/mux */ 257 - 288 + /* Transient window 4ms, lower PGA gain limit -1dB */ 289 + wm8775_write(sd, R20, 0x07a); 290 + /* LRBOTH = 1, use input 2. */ 291 + wm8775_write(sd, R21, 0x102); 258 292 return 0; 259 293 } 260 294
+1 -1
drivers/mfd/ab8500-core.c
··· 303 303 continue; 304 304 305 305 do { 306 - int bit = __ffs(status); 306 + int bit = __ffs(value); 307 307 int line = i * 8 + bit; 308 308 309 309 handle_nested_irq(ab8500->irq_base + line);
+6 -2
drivers/mfd/wm831x-core.c
··· 1455 1455 dev_err(wm831x->dev, "Failed to read parent ID: %d\n", ret); 1456 1456 goto err; 1457 1457 } 1458 - if (ret != 0x6204) { 1458 + switch (ret) { 1459 + case 0x6204: 1460 + case 0x6246: 1461 + break; 1462 + default: 1459 1463 dev_err(wm831x->dev, "Device is not a WM831x: ID %x\n", ret); 1460 1464 ret = -EINVAL; 1461 1465 goto err; ··· 1624 1620 case WM8325: 1625 1621 ret = mfd_add_devices(wm831x->dev, -1, 1626 1622 wm8320_devs, ARRAY_SIZE(wm8320_devs), 1627 - NULL, 0); 1623 + NULL, wm831x->irq_base); 1628 1624 break; 1629 1625 1630 1626 default:
+1
drivers/mmc/core/core.c
··· 1773 1773 1774 1774 case PM_POST_SUSPEND: 1775 1775 case PM_POST_HIBERNATION: 1776 + case PM_POST_RESTORE: 1776 1777 1777 1778 spin_lock_irqsave(&host->lock, flags); 1778 1779 host->rescan_disable = 0;
+9 -4
drivers/mmc/host/at91_mci.c
··· 69 69 #include <linux/highmem.h> 70 70 71 71 #include <linux/mmc/host.h> 72 + #include <linux/mmc/sdio.h> 72 73 73 74 #include <asm/io.h> 74 75 #include <asm/irq.h> ··· 494 493 else if (data->flags & MMC_DATA_WRITE) 495 494 cmdr |= AT91_MCI_TRCMD_START; 496 495 497 - if (data->flags & MMC_DATA_STREAM) 498 - cmdr |= AT91_MCI_TRTYP_STREAM; 499 - if (data->blocks > 1) 500 - cmdr |= AT91_MCI_TRTYP_MULTIPLE; 496 + if (cmd->opcode == SD_IO_RW_EXTENDED) { 497 + cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK; 498 + } else { 499 + if (data->flags & MMC_DATA_STREAM) 500 + cmdr |= AT91_MCI_TRTYP_STREAM; 501 + if (data->blocks > 1) 502 + cmdr |= AT91_MCI_TRTYP_MULTIPLE; 503 + } 501 504 } 502 505 else { 503 506 block_length = 0;
+12 -6
drivers/mmc/host/atmel-mci.c
··· 26 26 #include <linux/stat.h> 27 27 28 28 #include <linux/mmc/host.h> 29 + #include <linux/mmc/sdio.h> 29 30 30 31 #include <mach/atmel-mci.h> 31 32 #include <linux/atmel-mci.h> ··· 533 532 data = cmd->data; 534 533 if (data) { 535 534 cmdr |= MCI_CMDR_START_XFER; 536 - if (data->flags & MMC_DATA_STREAM) 537 - cmdr |= MCI_CMDR_STREAM; 538 - else if (data->blocks > 1) 539 - cmdr |= MCI_CMDR_MULTI_BLOCK; 540 - else 541 - cmdr |= MCI_CMDR_BLOCK; 535 + 536 + if (cmd->opcode == SD_IO_RW_EXTENDED) { 537 + cmdr |= MCI_CMDR_SDIO_BLOCK; 538 + } else { 539 + if (data->flags & MMC_DATA_STREAM) 540 + cmdr |= MCI_CMDR_STREAM; 541 + else if (data->blocks > 1) 542 + cmdr |= MCI_CMDR_MULTI_BLOCK; 543 + else 544 + cmdr |= MCI_CMDR_BLOCK; 545 + } 542 546 543 547 if (data->flags & MMC_DATA_READ) 544 548 cmdr |= MCI_CMDR_TRDIR_READ;
+15 -24
drivers/net/atl1c/atl1c_main.c
··· 702 702 703 703 704 704 adapter->wol = 0; 705 + device_set_wakeup_enable(&pdev->dev, false); 705 706 adapter->link_speed = SPEED_0; 706 707 adapter->link_duplex = FULL_DUPLEX; 707 708 adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE; ··· 2445 2444 return 0; 2446 2445 } 2447 2446 2448 - static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state) 2447 + static int atl1c_suspend(struct device *dev) 2449 2448 { 2449 + struct pci_dev *pdev = to_pci_dev(dev); 2450 2450 struct net_device *netdev = pci_get_drvdata(pdev); 2451 2451 struct atl1c_adapter *adapter = netdev_priv(netdev); 2452 2452 struct atl1c_hw *hw = &adapter->hw; ··· 2456 2454 u32 wol_ctrl_data = 0; 2457 2455 u16 mii_intr_status_data = 0; 2458 2456 u32 wufc = adapter->wol; 2459 - int retval = 0; 2460 2457 2461 2458 atl1c_disable_l0s_l1(hw); 2462 2459 if (netif_running(netdev)) { ··· 2463 2462 atl1c_down(adapter); 2464 2463 } 2465 2464 netif_device_detach(netdev); 2466 - retval = pci_save_state(pdev); 2467 - if (retval) 2468 - return retval; 2469 2465 2470 2466 if (wufc) 2471 2467 if (atl1c_phy_power_saving(hw) != 0) ··· 2523 2525 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); 2524 2526 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); 2525 2527 2526 - /* pcie patch */ 2527 - device_set_wakeup_enable(&pdev->dev, 1); 2528 - 2529 2528 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | 2530 2529 GPHY_CTRL_EXT_RESET); 2531 - pci_prepare_to_sleep(pdev); 2532 2530 } else { 2533 2531 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING); 2534 2532 master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS; ··· 2534 2540 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); 2535 2541 AT_WRITE_REG(hw, REG_WOL_CTRL, 0); 2536 2542 hw->phy_configured = false; /* re-init PHY when resume */ 2537 - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 2538 2543 } 2539 - 2540 - pci_disable_device(pdev); 2541 - pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2542 2544 2543 2545 return 0; 2544 2546 } 2545 2547 2546 - static int atl1c_resume(struct pci_dev *pdev) 2548 + static int atl1c_resume(struct device *dev) 2547 2549 { 2550 + struct pci_dev *pdev = to_pci_dev(dev); 2548 2551 struct net_device *netdev = pci_get_drvdata(pdev); 2549 2552 struct atl1c_adapter *adapter = netdev_priv(netdev); 2550 - 2551 - pci_set_power_state(pdev, PCI_D0); 2552 - pci_restore_state(pdev); 2553 - pci_enable_wake(pdev, PCI_D3hot, 0); 2554 - pci_enable_wake(pdev, PCI_D3cold, 0); 2555 2553 2556 2554 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2557 2555 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | ··· 2568 2582 2569 2583 static void atl1c_shutdown(struct pci_dev *pdev) 2570 2584 { 2571 - atl1c_suspend(pdev, PMSG_SUSPEND); 2585 + struct net_device *netdev = pci_get_drvdata(pdev); 2586 + struct atl1c_adapter *adapter = netdev_priv(netdev); 2587 + 2588 + atl1c_suspend(&pdev->dev); 2589 + pci_wake_from_d3(pdev, adapter->wol); 2590 + pci_set_power_state(pdev, PCI_D3hot); 2572 2591 } 2573 2592 2574 2593 static const struct net_device_ops atl1c_netdev_ops = { ··· 2877 2886 .resume = atl1c_io_resume, 2878 2887 }; 2879 2888 2889 + static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume); 2890 + 2880 2891 static struct pci_driver atl1c_driver = { 2881 2892 .name = atl1c_driver_name, 2882 2893 .id_table = atl1c_pci_tbl, 2883 2894 .probe = atl1c_probe, 2884 2895 .remove = __devexit_p(atl1c_remove), 2885 - /* Power Managment Hooks */ 2886 - .suspend = atl1c_suspend, 2887 - .resume = atl1c_resume, 2888 2896 .shutdown = atl1c_shutdown, 2889 - .err_handler = &atl1c_err_handler 2897 + .err_handler = &atl1c_err_handler, 2898 + .driver.pm = &atl1c_pm_ops, 2890 2899 }; 2891 2900 2892 2901 /*
+10
drivers/net/atlx/atl1.c
··· 3504 3504 struct atl1_rfd_ring rfd_old, rfd_new; 3505 3505 struct atl1_rrd_ring rrd_old, rrd_new; 3506 3506 struct atl1_ring_header rhdr_old, rhdr_new; 3507 + struct atl1_smb smb; 3508 + struct atl1_cmb cmb; 3507 3509 int err; 3508 3510 3509 3511 tpd_old = adapter->tpd_ring; ··· 3546 3544 adapter->rrd_ring = rrd_old; 3547 3545 adapter->tpd_ring = tpd_old; 3548 3546 adapter->ring_header = rhdr_old; 3547 + /* 3548 + * Save SMB and CMB, since atl1_free_ring_resources 3549 + * will clear them. 3550 + */ 3551 + smb = adapter->smb; 3552 + cmb = adapter->cmb; 3549 3553 atl1_free_ring_resources(adapter); 3550 3554 adapter->rfd_ring = rfd_new; 3551 3555 adapter->rrd_ring = rrd_new; 3552 3556 adapter->tpd_ring = tpd_new; 3553 3557 adapter->ring_header = rhdr_new; 3558 + adapter->smb = smb; 3559 + adapter->cmb = cmb; 3554 3560 3555 3561 err = atl1_up(adapter); 3556 3562 if (err)
+1 -1
drivers/net/benet/be.h
··· 234 234 u8 __iomem *db; /* Door Bell */ 235 235 u8 __iomem *pcicfg; /* PCI config space */ 236 236 237 - spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */ 237 + struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 238 238 struct be_dma_mem mbox_mem; 239 239 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr 240 240 * is stored for freeing purpose */
+45 -30
drivers/net/benet/be_cmds.c
··· 462 462 u8 *wrb; 463 463 int status; 464 464 465 - spin_lock(&adapter->mbox_lock); 465 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 466 + return -1; 466 467 467 468 wrb = (u8 *)wrb_from_mbox(adapter); 468 469 *wrb++ = 0xFF; ··· 477 476 478 477 status = be_mbox_notify_wait(adapter); 479 478 480 - spin_unlock(&adapter->mbox_lock); 479 + mutex_unlock(&adapter->mbox_lock); 481 480 return status; 482 481 } 483 482 ··· 492 491 if (adapter->eeh_err) 493 492 return -EIO; 494 493 495 - spin_lock(&adapter->mbox_lock); 494 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 495 + return -1; 496 496 497 497 wrb = (u8 *)wrb_from_mbox(adapter); 498 498 *wrb++ = 0xFF; ··· 507 505 508 506 status = be_mbox_notify_wait(adapter); 509 507 510 - spin_unlock(&adapter->mbox_lock); 508 + mutex_unlock(&adapter->mbox_lock); 511 509 return status; 512 510 } 513 511 int be_cmd_eq_create(struct be_adapter *adapter, ··· 518 516 struct be_dma_mem *q_mem = &eq->dma_mem; 519 517 int status; 520 518 521 - spin_lock(&adapter->mbox_lock); 519 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 520 + return -1; 522 521 523 522 wrb = wrb_from_mbox(adapter); 524 523 req = embedded_payload(wrb); ··· 549 546 eq->created = true; 550 547 } 551 548 552 - spin_unlock(&adapter->mbox_lock); 549 + mutex_unlock(&adapter->mbox_lock); 553 550 return status; 554 551 } 555 552 ··· 561 558 struct be_cmd_req_mac_query *req; 562 559 int status; 563 560 564 - spin_lock(&adapter->mbox_lock); 561 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 562 + return -1; 565 563 566 564 wrb = wrb_from_mbox(adapter); 567 565 req = embedded_payload(wrb); ··· 587 583 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 588 584 } 589 585 590 - spin_unlock(&adapter->mbox_lock); 586 + mutex_unlock(&adapter->mbox_lock); 591 587 return status; 592 588 } 593 589 ··· 671 667 void *ctxt; 672 668 int status; 673 669 674 - spin_lock(&adapter->mbox_lock); 670 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 671 + return -1; 675 672 676 673 wrb = wrb_from_mbox(adapter); 677 674 req = embedded_payload(wrb); ··· 706 701 cq->created = true; 707 702 } 708 703 709 - spin_unlock(&adapter->mbox_lock); 704 + mutex_unlock(&adapter->mbox_lock); 710 705 711 706 return status; 712 707 } ··· 729 724 void *ctxt; 730 725 int status; 731 726 732 - spin_lock(&adapter->mbox_lock); 727 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 728 + return -1; 733 729 734 730 wrb = wrb_from_mbox(adapter); 735 731 req = embedded_payload(wrb); ··· 760 754 mccq->id = le16_to_cpu(resp->id); 761 755 mccq->created = true; 762 756 } 763 - spin_unlock(&adapter->mbox_lock); 757 + mutex_unlock(&adapter->mbox_lock); 764 758 765 759 return status; 766 760 } ··· 775 769 void *ctxt; 776 770 int status; 777 771 778 - spin_lock(&adapter->mbox_lock); 772 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 773 + return -1; 779 774 780 775 wrb = wrb_from_mbox(adapter); 781 776 req = embedded_payload(wrb); ··· 808 801 txq->created = true; 809 802 } 810 803 811 - spin_unlock(&adapter->mbox_lock); 804 + mutex_unlock(&adapter->mbox_lock); 812 805 813 806 return status; 814 807 } ··· 823 816 struct be_dma_mem *q_mem = &rxq->dma_mem; 824 817 int status; 825 818 826 - spin_lock(&adapter->mbox_lock); 819 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 820 + return -1; 827 821 828 822 wrb = wrb_from_mbox(adapter); 829 823 req = embedded_payload(wrb); ··· 851 843 *rss_id = resp->rss_id; 852 844 } 853 845 854 - spin_unlock(&adapter->mbox_lock); 846 + mutex_unlock(&adapter->mbox_lock); 855 847 856 848 return status; 857 849 } ··· 870 862 if (adapter->eeh_err) 871 863 return -EIO; 872 864 873 - spin_lock(&adapter->mbox_lock); 865 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 866 + return -1; 874 867 875 868 wrb = wrb_from_mbox(adapter); 876 869 req = embedded_payload(wrb); ··· 908 899 909 900 status = be_mbox_notify_wait(adapter); 910 901 911 - spin_unlock(&adapter->mbox_lock); 902 + mutex_unlock(&adapter->mbox_lock); 912 903 913 904 return status; 914 905 } ··· 924 915 struct be_cmd_req_if_create *req; 925 916 int status; 926 917 927 - spin_lock(&adapter->mbox_lock); 918 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 919 + return -1; 928 920 929 921 wrb = wrb_from_mbox(adapter); 930 922 req = embedded_payload(wrb); ··· 951 941 *pmac_id = le32_to_cpu(resp->pmac_id); 952 942 } 953 943 954 - spin_unlock(&adapter->mbox_lock); 944 + mutex_unlock(&adapter->mbox_lock); 955 945 return status; 956 946 } 957 947 ··· 965 955 if (adapter->eeh_err) 966 956 return -EIO; 967 957 968 - spin_lock(&adapter->mbox_lock); 958 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 959 + return -1; 969 960 970 961 wrb = wrb_from_mbox(adapter); 971 962 req = embedded_payload(wrb); ··· 981 970 982 971 status = be_mbox_notify_wait(adapter); 983 972 984 - spin_unlock(&adapter->mbox_lock); 973 + mutex_unlock(&adapter->mbox_lock); 985 974 986 975 return status; 987 976 } ··· 1071 1060 struct be_cmd_req_get_fw_version *req; 1072 1061 int status; 1073 1062 1074 - spin_lock(&adapter->mbox_lock); 1063 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 1064 + return -1; 1075 1065 1076 1066 wrb = wrb_from_mbox(adapter); 1077 1067 req = embedded_payload(wrb); ··· 1089 1077 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); 1090 1078 } 1091 1079 1092 - spin_unlock(&adapter->mbox_lock); 1080 + mutex_unlock(&adapter->mbox_lock); 1093 1081 return status; 1094 1082 } 1095 1083 ··· 1334 1322 struct be_cmd_req_query_fw_cfg *req; 1335 1323 int status; 1336 1324 1337 - spin_lock(&adapter->mbox_lock); 1325 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 1326 + return -1; 1338 1327 1339 1328 wrb = wrb_from_mbox(adapter); 1340 1329 req = embedded_payload(wrb); ··· 1354 1341 *caps = le32_to_cpu(resp->function_caps); 1355 1342 } 1356 1343 1357 - spin_unlock(&adapter->mbox_lock); 1344 + mutex_unlock(&adapter->mbox_lock); 1358 1345 return status; 1359 1346 } 1360 1347 ··· 1365 1352 struct be_cmd_req_hdr *req; 1366 1353 int status; 1367 1354 1368 - spin_lock(&adapter->mbox_lock); 1355 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 1356 + return -1; 1369 1357 1370 1358 wrb = wrb_from_mbox(adapter); 1371 1359 req = embedded_payload(wrb); ··· 1379 1365 1380 1366 status = be_mbox_notify_wait(adapter); 1381 1367 1382 - spin_unlock(&adapter->mbox_lock); 1368 + mutex_unlock(&adapter->mbox_lock); 1383 1369 return status; 1384 1370 } 1385 1371 ··· 1390 1376 u32 myhash[10]; 1391 1377 int status; 1392 1378 1393 - spin_lock(&adapter->mbox_lock); 1379 + if (mutex_lock_interruptible(&adapter->mbox_lock)) 1380 + return -1; 1394 1381 1395 1382 wrb = wrb_from_mbox(adapter); 1396 1383 req = embedded_payload(wrb); ··· 1411 1396 1412 1397 status = be_mbox_notify_wait(adapter); 1413 1398 1414 - spin_unlock(&adapter->mbox_lock); 1399 + mutex_unlock(&adapter->mbox_lock); 1415 1400 return status; 1416 1401 } 1417 1402
+1 -1
drivers/net/benet/be_main.c
··· 2677 2677 } 2678 2678 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size); 2679 2679 2680 - spin_lock_init(&adapter->mbox_lock); 2680 + mutex_init(&adapter->mbox_lock); 2681 2681 spin_lock_init(&adapter->mcc_lock); 2682 2682 spin_lock_init(&adapter->mcc_cq_lock); 2683 2683
+6 -1
drivers/net/bonding/bond_ipv6.c
··· 88 88 } 89 89 90 90 if (vlan_id) { 91 - skb = vlan_put_tag(skb, vlan_id); 91 + /* The Ethernet header is not present yet, so it is 92 + * too early to insert a VLAN tag. Force use of an 93 + * out-of-line tag here and let dev_hard_start_xmit() 94 + * insert it if the slave hardware can't. 95 + */ 96 + skb = __vlan_hwaccel_put_tag(skb, vlan_id); 92 97 if (!skb) { 93 98 pr_err("failed to insert VLAN tag\n"); 94 99 return;
+10 -32
drivers/net/bonding/bond_main.c
··· 418 418 * @bond: bond device that got this skb for tx. 419 419 * @skb: hw accel VLAN tagged skb to transmit 420 420 * @slave_dev: slave that is supposed to xmit this skbuff 421 - * 422 - * When the bond gets an skb to transmit that is 423 - * already hardware accelerated VLAN tagged, and it 424 - * needs to relay this skb to a slave that is not 425 - * hw accel capable, the skb needs to be "unaccelerated", 426 - * i.e. strip the hwaccel tag and re-insert it as part 427 - * of the payload. 428 421 */ 429 422 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, 430 423 struct net_device *slave_dev) 431 424 { 432 - unsigned short uninitialized_var(vlan_id); 433 - 434 - /* Test vlan_list not vlgrp to catch and handle 802.1p tags */ 435 - if (!list_empty(&bond->vlan_list) && 436 - !(slave_dev->features & NETIF_F_HW_VLAN_TX) && 437 - vlan_get_tag(skb, &vlan_id) == 0) { 438 - skb->dev = slave_dev; 439 - skb = vlan_put_tag(skb, vlan_id); 440 - if (!skb) { 441 - /* vlan_put_tag() frees the skb in case of error, 442 - * so return success here so the calling functions 443 - * won't attempt to free is again. 444 - */ 445 - return 0; 446 - } 447 - } else { 448 - skb->dev = slave_dev; 449 - } 450 - 425 + skb->dev = slave_dev; 451 426 skb->priority = 1; 452 427 #ifdef CONFIG_NET_POLL_CONTROLLER 453 428 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { ··· 1178 1203 bond_do_fail_over_mac(bond, new_active, 1179 1204 old_active); 1180 1205 1181 - bond->send_grat_arp = bond->params.num_grat_arp; 1182 - bond_send_gratuitous_arp(bond); 1206 + if (netif_running(bond->dev)) { 1207 + bond->send_grat_arp = bond->params.num_grat_arp; 1208 + bond_send_gratuitous_arp(bond); 1183 1209 1184 - bond->send_unsol_na = bond->params.num_unsol_na; 1185 - bond_send_unsolicited_na(bond); 1210 + bond->send_unsol_na = bond->params.num_unsol_na; 1211 + bond_send_unsolicited_na(bond); 1212 + } 1186 1213 1187 1214 write_unlock_bh(&bond->curr_slave_lock); 1188 1215 read_unlock(&bond->lock); ··· 1198 1221 1199 1222 /* resend IGMP joins since active slave has changed or 1200 1223 * all were sent on curr_active_slave */ 1201 - if ((USES_PRIMARY(bond->params.mode) && new_active) || 1202 - bond->params.mode == BOND_MODE_ROUNDROBIN) { 1224 + if (((USES_PRIMARY(bond->params.mode) && new_active) || 1225 + bond->params.mode == BOND_MODE_ROUNDROBIN) && 1226 + netif_running(bond->dev)) { 1203 1227 bond->igmp_retrans = bond->params.resend_igmp; 1204 1228 queue_delayed_work(bond->wq, &bond->mcast_work, 0); 1205 1229 }
+2 -2
drivers/net/bonding/bonding.h
··· 269 269 270 270 bond_for_each_slave(bond, slave, i) { 271 271 if (slave->dev == slave_dev) { 272 - break; 272 + return slave; 273 273 } 274 274 } 275 275 276 - return slave; 276 + return 0; 277 277 } 278 278 279 279 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
+8 -2
drivers/net/cnic.c
··· 940 940 &udev->l2_ring_map, 941 941 GFP_KERNEL | __GFP_COMP); 942 942 if (!udev->l2_ring) 943 - return -ENOMEM; 943 + goto err_udev; 944 944 945 945 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 946 946 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); ··· 948 948 &udev->l2_buf_map, 949 949 GFP_KERNEL | __GFP_COMP); 950 950 if (!udev->l2_buf) 951 - return -ENOMEM; 951 + goto err_dma; 952 952 953 953 write_lock(&cnic_dev_lock); 954 954 list_add(&udev->list, &cnic_udev_list); ··· 959 959 cp->udev = udev; 960 960 961 961 return 0; 962 + err_dma: 963 + dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 964 + udev->l2_ring, udev->l2_ring_map); 965 + err_udev: 966 + kfree(udev); 967 + return -ENOMEM; 962 968 } 963 969 964 970 static int cnic_init_uio(struct cnic_dev *dev)
+7
drivers/net/ehea/ehea_ethtool.c
··· 263 263 264 264 static int ehea_set_flags(struct net_device *dev, u32 data) 265 265 { 266 + /* Avoid changing the VLAN flags */ 267 + if ((data & (ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN)) != 268 + (ethtool_op_get_flags(dev) & (ETH_FLAG_RXVLAN | 269 + ETH_FLAG_TXVLAN))){ 270 + return -EINVAL; 271 + } 272 + 266 273 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO 267 274 | ETH_FLAG_TXVLAN 268 275 | ETH_FLAG_RXVLAN);
+2 -2
drivers/net/epic100.c
··· 935 935 936 936 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 937 937 for (i = 0; i < RX_RING_SIZE; i++) { 938 - struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz); 938 + struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2); 939 939 ep->rx_skbuff[i] = skb; 940 940 if (skb == NULL) 941 941 break; ··· 1233 1233 entry = ep->dirty_rx % RX_RING_SIZE; 1234 1234 if (ep->rx_skbuff[entry] == NULL) { 1235 1235 struct sk_buff *skb; 1236 - skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz); 1236 + skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2); 1237 1237 if (skb == NULL) 1238 1238 break; 1239 1239 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+2 -2
drivers/net/hamachi.c
··· 1202 1202 } 1203 1203 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1204 1204 for (i = 0; i < RX_RING_SIZE; i++) { 1205 - struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz); 1205 + struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2); 1206 1206 hmp->rx_skbuff[i] = skb; 1207 1207 if (skb == NULL) 1208 1208 break; ··· 1669 1669 entry = hmp->dirty_rx % RX_RING_SIZE; 1670 1670 desc = &(hmp->rx_ring[entry]); 1671 1671 if (hmp->rx_skbuff[entry] == NULL) { 1672 - struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz); 1672 + struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2); 1673 1673 1674 1674 hmp->rx_skbuff[entry] = skb; 1675 1675 if (skb == NULL)
+1
drivers/net/pcmcia/axnet_cs.c
··· 690 690 static struct pcmcia_device_id axnet_ids[] = { 691 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081), 692 692 PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301), 693 + PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), 693 694 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301), 694 695 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303), 695 696 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
-1
drivers/net/pcmcia/pcnet_cs.c
··· 1493 1493 PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), 1494 1494 PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), 1495 1495 PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), 1496 - PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), 1497 1496 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), 1498 1497 PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), 1499 1498 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
+7 -2
drivers/net/ppp_generic.c
··· 1285 1285 } 1286 1286 1287 1287 #ifdef CONFIG_PPP_MULTILINK 1288 + static bool mp_protocol_compress __read_mostly = true; 1289 + module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR); 1290 + MODULE_PARM_DESC(mp_protocol_compress, 1291 + "compress protocol id in multilink fragments"); 1292 + 1288 1293 /* 1289 1294 * Divide a packet to be transmitted into fragments and 1290 1295 * send them out the individual links. ··· 1352 1347 if (nfree == 0 || nfree < navail / 2) 1353 1348 return 0; /* can't take now, leave it in xmit_pending */ 1354 1349 1355 - /* Do protocol field compression (XXX this should be optional) */ 1350 + /* Do protocol field compression */ 1356 1351 p = skb->data; 1357 1352 len = skb->len; 1358 - if (*p == 0) { 1353 + if (*p == 0 && mp_protocol_compress) { 1359 1354 ++p; 1360 1355 --len; 1361 1356 }
+1 -1
drivers/net/skfp/skfddi.c
··· 412 412 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev, 413 413 bp->SharedMemSize, 414 414 &bp->SharedMemDMA); 415 - if (!bp->SharedMemSize) { 415 + if (!bp->SharedMemAddr) { 416 416 printk("could not allocate mem for "); 417 417 printk("hardware module: %ld byte\n", 418 418 bp->SharedMemSize);
+1 -1
drivers/net/starfire.c
··· 148 148 * This SUCKS. 149 149 * We need a much better method to determine if dma_addr_t is 64-bit. 150 150 */ 151 - #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)) 151 + #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || (defined(CONFIG_MIPS) && ((defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || defined(CONFIG_64BIT))) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)) 152 152 /* 64-bit dma_addr_t */ 153 153 #define ADDR_64BITS /* This chip uses 64 bit addresses. */ 154 154 #define netdrv_addr_t __le64
+2 -2
drivers/net/sundance.c
··· 1016 1016 1017 1017 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1018 1018 for (i = 0; i < RX_RING_SIZE; i++) { 1019 - struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1019 + struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2); 1020 1020 np->rx_skbuff[i] = skb; 1021 1021 if (skb == NULL) 1022 1022 break; ··· 1407 1407 struct sk_buff *skb; 1408 1408 entry = np->dirty_rx % RX_RING_SIZE; 1409 1409 if (np->rx_skbuff[entry] == NULL) { 1410 - skb = dev_alloc_skb(np->rx_buf_sz); 1410 + skb = dev_alloc_skb(np->rx_buf_sz + 2); 1411 1411 np->rx_skbuff[entry] = skb; 1412 1412 if (skb == NULL) 1413 1413 break; /* Better luck next round. */
+2 -2
drivers/net/tehuti.c
··· 324 324 ENTER; 325 325 master = READ_REG(priv, regINIT_SEMAPHORE); 326 326 if (!READ_REG(priv, regINIT_STATUS) && master) { 327 - rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev); 327 + rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev); 328 328 if (rc) 329 329 goto out; 330 330 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size); ··· 2510 2510 MODULE_LICENSE("GPL"); 2511 2511 MODULE_AUTHOR(DRIVER_AUTHOR); 2512 2512 MODULE_DESCRIPTION(BDX_DRV_DESC); 2513 - MODULE_FIRMWARE("tehuti/firmware.bin"); 2513 + MODULE_FIRMWARE("tehuti/bdx.bin");
+1 -1
drivers/net/tg3.c
··· 12658 12658 cnt = pci_read_vpd(tp->pdev, pos, 12659 12659 TG3_NVM_VPD_LEN - pos, 12660 12660 &vpd_data[pos]); 12661 - if (cnt == -ETIMEDOUT || -EINTR) 12661 + if (cnt == -ETIMEDOUT || cnt == -EINTR) 12662 12662 cnt = 0; 12663 12663 else if (cnt < 0) 12664 12664 goto out_not_found;
-1
drivers/net/typhoon.c
··· 1004 1004 } 1005 1005 1006 1006 strcpy(info->driver, KBUILD_MODNAME); 1007 - strcpy(info->version, UTS_RELEASE); 1008 1007 strcpy(info->bus_info, pci_name(pci_dev)); 1009 1008 } 1010 1009
+4
drivers/net/usb/asix.c
··· 1508 1508 USB_DEVICE (0x0b95, 0x1780), 1509 1509 .driver_info = (unsigned long) &ax88178_info, 1510 1510 }, { 1511 + // Logitec LAN-GTJ/U2A 1512 + USB_DEVICE (0x0789, 0x0160), 1513 + .driver_info = (unsigned long) &ax88178_info, 1514 + }, { 1511 1515 // Linksys USB200M Rev 2 1512 1516 USB_DEVICE (0x13b1, 0x0018), 1513 1517 .driver_info = (unsigned long) &ax88772_info,
+11 -3
drivers/net/usb/mcs7830.c
··· 1 1 /* 2 - * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices 2 + * MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices 3 3 * 4 4 * based on usbnet.c, asix.c and the vendor provided mcs7830 driver 5 5 * ··· 10 10 * Copyright (c) 2002-2003 TiVo Inc. 11 11 * 12 12 * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!). 13 + * 14 + * 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"), 15 + * per active notification by manufacturer 13 16 * 14 17 * TODO: 15 18 * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?) ··· 63 60 #define MCS7830_MAX_MCAST 64 64 61 65 62 #define MCS7830_VENDOR_ID 0x9710 63 + #define MCS7832_PRODUCT_ID 0x7832 66 64 #define MCS7830_PRODUCT_ID 0x7830 67 65 #define MCS7730_PRODUCT_ID 0x7730 68 66 ··· 355 351 if (!ret) 356 352 ret = mcs7830_write_phy(dev, MII_BMCR, 357 353 BMCR_ANENABLE | BMCR_ANRESTART ); 358 - return ret < 0 ? : 0; 354 + return ret; 359 355 } 360 356 361 357 ··· 630 626 } 631 627 632 628 static const struct driver_info moschip_info = { 633 - .description = "MOSCHIP 7830/7730 usb-NET adapter", 629 + .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", 634 630 .bind = mcs7830_bind, 635 631 .rx_fixup = mcs7830_rx_fixup, 636 632 .flags = FLAG_ETHER, ··· 648 644 }; 649 645 650 646 static const struct usb_device_id products[] = { 647 + { 648 + USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID), 649 + .driver_info = (unsigned long) &moschip_info, 650 + }, 651 651 { 652 652 USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), 653 653 .driver_info = (unsigned long) &moschip_info,
+3 -1
drivers/net/veth.c
··· 166 166 if (!(rcv->flags & IFF_UP)) 167 167 goto tx_drop; 168 168 169 - if (dev->features & NETIF_F_NO_CSUM) 169 + /* don't change ip_summed == CHECKSUM_PARTIAL, as that 170 + will cause bad checksum on forwarded packets */ 171 + if (skb->ip_summed == CHECKSUM_NONE) 170 172 skb->ip_summed = rcv_priv->ip_summed; 171 173 172 174 length = skb->len + ETH_HLEN;
-1
drivers/net/wireless/hostap/hostap_main.c
··· 891 891 892 892 SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops); 893 893 894 - netif_stop_queue(dev); 895 894 } 896 895 897 896 static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked)
+2
drivers/net/wireless/iwlwifi/iwl-1000.c
··· 315 315 .mod_params = &iwlagn_mod_params, 316 316 .base_params = &iwl1000_base_params, 317 317 .ht_params = &iwl1000_ht_params, 318 + .use_new_eeprom_reading = true, 318 319 }; 319 320 320 321 struct iwl_cfg iwl100_bg_cfg = { ··· 331 330 .ops = &iwl1000_ops, 332 331 .mod_params = &iwlagn_mod_params, 333 332 .base_params = &iwl1000_base_params, 333 + .use_new_eeprom_reading = true, 334 334 }; 335 335 336 336 MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
+12
drivers/net/wireless/iwlwifi/iwl-6000.c
··· 561 561 .ht_params = &iwl6000_ht_params, 562 562 .need_dc_calib = true, 563 563 .need_temp_offset_calib = true, 564 + .use_new_eeprom_reading = true, 564 565 }; 565 566 566 567 struct iwl_cfg iwl6000g2a_2abg_cfg = { ··· 579 578 .base_params = &iwl6000_base_params, 580 579 .need_dc_calib = true, 581 580 .need_temp_offset_calib = true, 581 + .use_new_eeprom_reading = true, 582 582 }; 583 583 584 584 struct iwl_cfg iwl6000g2a_2bg_cfg = { ··· 597 595 .base_params = &iwl6000_base_params, 598 596 .need_dc_calib = true, 599 597 .need_temp_offset_calib = true, 598 + .use_new_eeprom_reading = true, 600 599 }; 601 600 602 601 struct iwl_cfg iwl6000g2b_2agn_cfg = { ··· 619 616 .need_temp_offset_calib = true, 620 617 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 621 618 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 619 + .use_new_eeprom_reading = true, 622 620 }; 623 621 624 622 struct iwl_cfg iwl6000g2b_2abg_cfg = { ··· 640 636 .need_temp_offset_calib = true, 641 637 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 642 638 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 639 + .use_new_eeprom_reading = true, 643 640 }; 644 641 645 642 struct iwl_cfg iwl6000g2b_2bgn_cfg = { ··· 662 657 .need_temp_offset_calib = true, 663 658 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 664 659 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 660 + .use_new_eeprom_reading = true, 665 661 }; 666 662 667 663 struct iwl_cfg iwl6000g2b_2bg_cfg = { ··· 683 677 .need_temp_offset_calib = true, 684 678 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 685 679 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 680 + .use_new_eeprom_reading = true, 686 681 }; 687 682 688 683 struct iwl_cfg iwl6000g2b_bgn_cfg = { ··· 705 698 .need_temp_offset_calib = true, 706 699 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 707 700 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 701 + .use_new_eeprom_reading = true, 708 702 }; 709 703 710 704 struct iwl_cfg iwl6000g2b_bg_cfg = { ··· 726 718 .need_temp_offset_calib = true, 727 719 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 728 720 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 721 + .use_new_eeprom_reading = true, 729 722 }; 730 723 731 724 /* ··· 813 804 .base_params = &iwl6050_base_params, 814 805 .ht_params = &iwl6000_ht_params, 815 806 .need_dc_calib = true, 807 + .use_new_eeprom_reading = true, 816 808 }; 817 809 818 810 struct iwl_cfg iwl6050_2abg_cfg = { ··· 867 857 .need_dc_calib = true, 868 858 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 869 859 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 860 + .use_new_eeprom_reading = true, 870 861 }; 871 862 872 863 struct iwl_cfg iwl130_bg_cfg = { ··· 887 876 .need_dc_calib = true, 888 877 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 889 878 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 879 + .use_new_eeprom_reading = true, 890 880 }; 891 881 892 882 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+86 -2
drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
··· 392 392 /** 393 393 * iwlcore_eeprom_enhanced_txpower: process enhanced tx power info 394 394 */ 395 - void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv) 395 + static void iwlcore_eeprom_enhanced_txpower_old(struct iwl_priv *priv) 396 396 { 397 397 int eeprom_section_count = 0; 398 398 int section, element; ··· 419 419 * always check for valid entry before process 420 420 * the information 421 421 */ 422 - if (!enhanced_txpower->common || enhanced_txpower->reserved) 422 + if (!(enhanced_txpower->flags || enhanced_txpower->channel) || 423 + enhanced_txpower->delta_20_in_40) 423 424 continue; 424 425 425 426 for (element = 0; element < eeprom_section_count; element++) { ··· 452 451 max_txpower_in_half_dbm; 453 452 } 454 453 } 454 + } 455 + 456 + static void 457 + iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv, 458 + struct iwl_eeprom_enhanced_txpwr *txp, 459 + s8 max_txpower_avg) 460 + { 461 + int ch_idx; 462 + bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ; 463 + enum ieee80211_band band; 464 + 465 + band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? 466 + IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; 467 + 468 + for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) { 469 + struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx]; 470 + 471 + /* update matching channel or from common data only */ 472 + if (txp->channel != 0 && ch_info->channel != txp->channel) 473 + continue; 474 + 475 + /* update matching band only */ 476 + if (band != ch_info->band) 477 + continue; 478 + 479 + if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) { 480 + ch_info->max_power_avg = max_txpower_avg; 481 + ch_info->curr_txpow = max_txpower_avg; 482 + ch_info->scan_power = max_txpower_avg; 483 + } 484 + 485 + if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg) 486 + ch_info->ht40_max_power_avg = max_txpower_avg; 487 + } 488 + } 489 + 490 + #define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT) 491 + #define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr) 492 + #define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE) 493 + 494 + static void iwlcore_eeprom_enhanced_txpower_new(struct iwl_priv *priv) 495 + { 496 + struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; 497 + int idx, entries; 498 + __le16 *txp_len; 499 + s8 max_txp_avg, max_txp_avg_halfdbm; 500 + 501 + BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); 502 + 503 + /* the length is in 16-bit words, but we want entries */ 504 + txp_len = (__le16 *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS); 505 + entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; 506 + 507 + txp_array = (void *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_OFFS); 508 + for (idx = 0; idx < entries; idx++) { 509 + txp = &txp_array[idx]; 510 + 511 + /* skip invalid entries */ 512 + if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID)) 513 + continue; 514 + 515 + max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx, 516 + &max_txp_avg_halfdbm); 517 + 518 + /* 519 + * Update the user limit values values to the highest 520 + * power supported by any channel 521 + */ 522 + if (max_txp_avg > priv->tx_power_user_lmt) 523 + priv->tx_power_user_lmt = max_txp_avg; 524 + if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm) 525 + priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm; 526 + 527 + iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg); 528 + } 529 + } 530 + 531 + void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv) 532 + { 533 + if (priv->cfg->use_new_eeprom_reading) 534 + iwlcore_eeprom_enhanced_txpower_new(priv); 535 + else 536 + iwlcore_eeprom_enhanced_txpower_old(priv); 455 537 }
+6
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 569 569 case INDIRECT_REGULATORY: 570 570 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); 571 571 break; 572 + case INDIRECT_TXP_LIMIT: 573 + offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT); 574 + break; 575 + case INDIRECT_TXP_LIMIT_SIZE: 576 + offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE); 577 + break; 572 578 case INDIRECT_CALIBRATION: 573 579 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); 574 580 break;
+1
drivers/net/wireless/iwlwifi/iwl-core.h
··· 390 390 const bool need_temp_offset_calib; /* if used set to true */ 391 391 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 392 392 u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; 393 + const bool use_new_eeprom_reading; /* temporary, remove later */ 393 394 }; 394 395 395 396 /***************************
+21 -4
drivers/net/wireless/iwlwifi/iwl-eeprom.h
··· 120 120 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ 121 121 } __packed; 122 122 123 + enum iwl_eeprom_enhanced_txpwr_flags { 124 + IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0), 125 + IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1), 126 + IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2), 127 + IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3), 128 + IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4), 129 + IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5), 130 + IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6), 131 + IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7), 132 + }; 133 + 123 134 /** 124 135 * iwl_eeprom_enhanced_txpwr structure 125 136 * This structure presents the enhanced regulatory tx power limit layout ··· 138 127 * Enhanced regulatory tx power portion of eeprom image can be broken down 139 128 * into individual structures; each one is 8 bytes in size and contain the 140 129 * following information 141 - * @common: (desc + channel) not used by driver, should _NOT_ be "zero" 130 + * @flags: entry flags 131 + * @channel: channel number 142 132 * @chain_a_max_pwr: chain a max power in 1/2 dBm 143 133 * @chain_b_max_pwr: chain b max power in 1/2 dBm 144 134 * @chain_c_max_pwr: chain c max power in 1/2 dBm 145 - * @reserved: not used, should be "zero" 135 + * @delta_20_in_40: 20-in-40 deltas (hi/lo) 146 136 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm 147 137 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm 148 138 * 149 139 */ 150 140 struct iwl_eeprom_enhanced_txpwr { 151 - __le16 common; 141 + u8 flags; 142 + u8 channel; 152 143 s8 chain_a_max; 153 144 s8 chain_b_max; 154 145 s8 chain_c_max; 155 - s8 reserved; 146 + u8 delta_20_in_40; 156 147 s8 mimo2_max; 157 148 s8 mimo3_max; 158 149 } __packed; ··· 199 186 #define EEPROM_LINK_CALIBRATION (2*0x67) 200 187 #define EEPROM_LINK_PROCESS_ADJST (2*0x68) 201 188 #define EEPROM_LINK_OTHERS (2*0x69) 189 + #define EEPROM_LINK_TXP_LIMIT (2*0x6a) 190 + #define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b) 202 191 203 192 /* agn regulatory - indirect access */ 204 193 #define EEPROM_REG_BAND_1_CHANNELS ((0x08)\ ··· 404 389 #define INDIRECT_CALIBRATION 0x00040000 405 390 #define INDIRECT_PROCESS_ADJST 0x00050000 406 391 #define INDIRECT_OTHERS 0x00060000 392 + #define INDIRECT_TXP_LIMIT 0x00070000 393 + #define INDIRECT_TXP_LIMIT_SIZE 0x00080000 407 394 #define INDIRECT_ADDRESS 0x00100000 408 395 409 396 /* General */
+1 -1
drivers/net/wireless/libertas/cfg.c
··· 619 619 print_ssid(ssid_buf, ssid, ssid_len), 620 620 LBS_SCAN_RSSI_TO_MBM(rssi)/100); 621 621 622 - if (channel || 622 + if (channel && 623 623 !(channel->flags & IEEE80211_CHAN_DISABLED)) 624 624 cfg80211_inform_bss(wiphy, channel, 625 625 bssid, le64_to_cpu(*(__le64 *)tsfdesc),
+6
drivers/net/wireless/p54/p54usb.c
··· 43 43 44 44 static struct usb_device_id p54u_table[] __devinitdata = { 45 45 /* Version 1 devices (pci chip + net2280) */ 46 + {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ 46 47 {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ 47 48 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ 48 49 {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ ··· 57 56 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ 58 57 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ 59 58 {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ 59 + {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ 60 60 {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ 61 61 {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ 62 + {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */ 63 + {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */ 62 64 {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */ 65 + {USB_DEVICE(0x182d, 0x096b)}, /* Sitecom WL-107 */ 63 66 {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */ 64 67 {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ 65 68 {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ ··· 99 94 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 100 95 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 101 96 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 97 + {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ 102 98 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ 103 99 {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ 104 100 {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
+1
drivers/net/wireless/rt2x00/rt2800pci.c
··· 912 912 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 913 913 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 914 914 __set_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags); 915 + __set_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags); 915 916 if (!modparam_nohwcrypt) 916 917 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 917 918 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
+1
drivers/net/wireless/rt2x00/rt2x00.h
··· 664 664 DRIVER_REQUIRE_COPY_IV, 665 665 DRIVER_REQUIRE_L2PAD, 666 666 DRIVER_REQUIRE_TXSTATUS_FIFO, 667 + DRIVER_REQUIRE_TASKLET_CONTEXT, 667 668 668 669 /* 669 670 * Driver features
+6 -3
drivers/net/wireless/rt2x00/rt2x00dev.c
··· 390 390 * through a mac80211 library call (RTS/CTS) then we should not 391 391 * send the status report back. 392 392 */ 393 - if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) 394 - ieee80211_tx_status(rt2x00dev->hw, entry->skb); 395 - else 393 + if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { 394 + if (test_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags)) 395 + ieee80211_tx_status(rt2x00dev->hw, entry->skb); 396 + else 397 + ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); 398 + } else 396 399 dev_kfree_skb_any(entry->skb); 397 400 398 401 /*
+2 -2
drivers/net/yellowfin.c
··· 744 744 } 745 745 746 746 for (i = 0; i < RX_RING_SIZE; i++) { 747 - struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz); 747 + struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2); 748 748 yp->rx_skbuff[i] = skb; 749 749 if (skb == NULL) 750 750 break; ··· 1157 1157 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) { 1158 1158 entry = yp->dirty_rx % RX_RING_SIZE; 1159 1159 if (yp->rx_skbuff[entry] == NULL) { 1160 - struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz); 1160 + struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2); 1161 1161 if (skb == NULL) 1162 1162 break; /* Better luck next round. */ 1163 1163 yp->rx_skbuff[entry] = skb;
+1 -1
drivers/of/of_i2c.c
··· 61 61 info.of_node = of_node_get(node); 62 62 info.archdata = &dev_ad; 63 63 64 - request_module("%s", info.type); 64 + request_module("%s%s", I2C_MODULE_PREFIX, info.type); 65 65 66 66 result = i2c_new_device(adap, &info); 67 67 if (result == NULL) {
+2 -1
drivers/pci/hotplug/pciehp_acpi.c
··· 115 115 static int __init select_detection_mode(void) 116 116 { 117 117 struct dummy_slot *slot, *tmp; 118 - pcie_port_service_register(&dummy_driver); 118 + if (pcie_port_service_register(&dummy_driver)) 119 + return PCIEHP_DETECT_ACPI; 119 120 pcie_port_service_unregister(&dummy_driver); 120 121 list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { 121 122 list_del(&slot->list);
+33 -3
drivers/platform/x86/intel_ips.c
··· 75 75 #include <drm/i915_drm.h> 76 76 #include <asm/msr.h> 77 77 #include <asm/processor.h> 78 + #include "intel_ips.h" 78 79 79 80 #define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32 80 81 ··· 246 245 #define thm_writel(off, val) writel((val), ips->regmap + (off)) 247 246 248 247 static const int IPS_ADJUST_PERIOD = 5000; /* ms */ 248 + static bool late_i915_load = false; 249 249 250 250 /* For initial average collection */ 251 251 static const int IPS_SAMPLE_PERIOD = 200; /* ms */ ··· 340 338 u64 orig_turbo_limit; 341 339 u64 orig_turbo_ratios; 342 340 }; 341 + 342 + static bool 343 + ips_gpu_turbo_enabled(struct ips_driver *ips); 343 344 344 345 /** 345 346 * ips_cpu_busy - is CPU busy? ··· 522 517 */ 523 518 static bool ips_gpu_busy(struct ips_driver *ips) 524 519 { 525 - if (!ips->gpu_turbo_enabled) 520 + if (!ips_gpu_turbo_enabled(ips)) 526 521 return false; 527 522 528 523 return ips->gpu_busy(); ··· 537 532 */ 538 533 static void ips_gpu_raise(struct ips_driver *ips) 539 534 { 540 - if (!ips->gpu_turbo_enabled) 535 + if (!ips_gpu_turbo_enabled(ips)) 541 536 return; 542 537 543 538 if (!ips->gpu_raise()) ··· 554 549 */ 555 550 static void ips_gpu_lower(struct ips_driver *ips) 556 551 { 557 - if (!ips->gpu_turbo_enabled) 552 + if (!ips_gpu_turbo_enabled(ips)) 558 553 return; 559 554 560 555 if (!ips->gpu_lower()) ··· 1458 1453 out_err: 1459 1454 return false; 1460 1455 } 1456 + 1457 + static bool 1458 + ips_gpu_turbo_enabled(struct ips_driver *ips) 1459 + { 1460 + if (!ips->gpu_busy && late_i915_load) { 1461 + if (ips_get_i915_syms(ips)) { 1462 + dev_info(&ips->dev->dev, 1463 + "i915 driver attached, reenabling gpu turbo\n"); 1464 + ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS); 1465 + } 1466 + } 1467 + 1468 + return ips->gpu_turbo_enabled; 1469 + } 1470 + 1471 + void 1472 + ips_link_to_i915_driver() 1473 + { 1474 + /* We can't cleanly get at the various ips_driver structs from 1475 + * this caller (the i915 driver), so just set a flag saying 1476 + * that it's time to try getting the symbols again. 1477 + */ 1478 + late_i915_load = true; 1479 + } 1480 + EXPORT_SYMBOL_GPL(ips_link_to_i915_driver); 1461 1481 1462 1482 static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = { 1463 1483 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
+21
drivers/platform/x86/intel_ips.h
··· 1 + /* 2 + * Copyright (c) 2010 Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * The full GNU General Public License is included in this distribution in 18 + * the file called "COPYING". 19 + */ 20 + 21 + void ips_link_to_i915_driver(void);
+1 -1
drivers/rtc/rtc-rs5c372.c
··· 207 207 static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm) 208 208 { 209 209 struct rs5c372 *rs5c = i2c_get_clientdata(client); 210 - unsigned char buf[8]; 210 + unsigned char buf[7]; 211 211 int addr; 212 212 213 213 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
+2 -2
drivers/scsi/bfa/bfa_fcs.c
··· 677 677 bfa_trc(fabric->fcs, event); 678 678 wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn); 679 679 680 - BFA_LOG(KERN_INFO, bfad, log_level, 680 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 681 681 "Port is isolated due to VF_ID mismatch. " 682 682 "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.", 683 683 pwwn_ptr, fabric->fcs->port_vfid, ··· 1411 1411 wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport)); 1412 1412 wwn2str(fwwn_ptr, 1413 1413 bfa_fcs_lport_get_fabric_name(&fabric->bport)); 1414 - BFA_LOG(KERN_WARNING, bfad, log_level, 1414 + BFA_LOG(KERN_WARNING, bfad, bfa_log_level, 1415 1415 "Base port WWN = %s Fabric WWN = %s\n", 1416 1416 pwwn_ptr, fwwn_ptr); 1417 1417 }
+3 -3
drivers/scsi/bfa/bfa_fcs_fcpim.c
··· 261 261 bfa_fcb_itnim_online(itnim->itnim_drv); 262 262 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); 263 263 wwn2str(rpwwn_buf, itnim->rport->pwwn); 264 - BFA_LOG(KERN_INFO, bfad, log_level, 264 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 265 265 "Target (WWN = %s) is online for initiator (WWN = %s)\n", 266 266 rpwwn_buf, lpwwn_buf); 267 267 break; ··· 301 301 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); 302 302 wwn2str(rpwwn_buf, itnim->rport->pwwn); 303 303 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) 304 - BFA_LOG(KERN_ERR, bfad, log_level, 304 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 305 305 "Target (WWN = %s) connectivity lost for " 306 306 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); 307 307 else 308 - BFA_LOG(KERN_INFO, bfad, log_level, 308 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 309 309 "Target (WWN = %s) offlined by initiator (WWN = %s)\n", 310 310 rpwwn_buf, lpwwn_buf); 311 311 break;
+5 -5
drivers/scsi/bfa/bfa_fcs_lport.c
··· 491 491 __port_action[port->fabric->fab_type].online(port); 492 492 493 493 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 494 - BFA_LOG(KERN_INFO, bfad, log_level, 494 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 495 495 "Logical port online: WWN = %s Role = %s\n", 496 496 lpwwn_buf, "Initiator"); 497 497 ··· 512 512 513 513 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 514 514 if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) 515 - BFA_LOG(KERN_ERR, bfad, log_level, 515 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 516 516 "Logical port lost fabric connectivity: WWN = %s Role = %s\n", 517 517 lpwwn_buf, "Initiator"); 518 518 else 519 - BFA_LOG(KERN_INFO, bfad, log_level, 519 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 520 520 "Logical port taken offline: WWN = %s Role = %s\n", 521 521 lpwwn_buf, "Initiator"); 522 522 ··· 573 573 char lpwwn_buf[BFA_STRING_32]; 574 574 575 575 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 576 - BFA_LOG(KERN_INFO, bfad, log_level, 576 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 577 577 "Logical port deleted: WWN = %s Role = %s\n", 578 578 lpwwn_buf, "Initiator"); 579 579 ··· 878 878 vport ? vport->vport_drv : NULL); 879 879 880 880 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport)); 881 - BFA_LOG(KERN_INFO, bfad, log_level, 881 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 882 882 "New logical port created: WWN = %s Role = %s\n", 883 883 lpwwn_buf, "Initiator"); 884 884
+3 -3
drivers/scsi/bfa/bfa_fcs_rport.c
··· 2056 2056 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2057 2057 wwn2str(rpwwn_buf, rport->pwwn); 2058 2058 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2059 - BFA_LOG(KERN_INFO, bfad, log_level, 2059 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2060 2060 "Remote port (WWN = %s) online for logical port (WWN = %s)\n", 2061 2061 rpwwn_buf, lpwwn_buf); 2062 2062 } ··· 2075 2075 wwn2str(rpwwn_buf, rport->pwwn); 2076 2076 if (!BFA_FCS_PID_IS_WKA(rport->pid)) { 2077 2077 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) 2078 - BFA_LOG(KERN_ERR, bfad, log_level, 2078 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2079 2079 "Remote port (WWN = %s) connectivity lost for " 2080 2080 "logical port (WWN = %s)\n", 2081 2081 rpwwn_buf, lpwwn_buf); 2082 2082 else 2083 - BFA_LOG(KERN_INFO, bfad, log_level, 2083 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2084 2084 "Remote port (WWN = %s) offlined by " 2085 2085 "logical port (WWN = %s)\n", 2086 2086 rpwwn_buf, lpwwn_buf);
+4 -4
drivers/scsi/bfa/bfa_ioc.c
··· 402 402 403 403 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 404 404 bfa_ioc_hb_monitor(ioc); 405 - BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n"); 405 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); 406 406 } 407 407 408 408 static void ··· 444 444 { 445 445 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 446 446 bfa_iocpf_disable(ioc); 447 - BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); 447 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); 448 448 } 449 449 450 450 /* ··· 565 565 notify->cbfn(notify->cbarg); 566 566 } 567 567 568 - BFA_LOG(KERN_CRIT, bfad, log_level, 568 + BFA_LOG(KERN_CRIT, bfad, bfa_log_level, 569 569 "Heart Beat of IOC has failed\n"); 570 570 } 571 571 ··· 1812 1812 * Provide enable completion callback. 1813 1813 */ 1814 1814 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1815 - BFA_LOG(KERN_WARNING, bfad, log_level, 1815 + BFA_LOG(KERN_WARNING, bfad, bfa_log_level, 1816 1816 "Running firmware version is incompatible " 1817 1817 "with the driver version\n"); 1818 1818 }
+14 -14
drivers/scsi/bfa/bfa_svc.c
··· 2138 2138 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2139 2139 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 2140 2140 wwn2str(pwwn_buf, fcport->pwwn); 2141 - BFA_LOG(KERN_INFO, bfad, log_level, 2141 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2142 2142 "Base port disabled: WWN = %s\n", pwwn_buf); 2143 2143 break; 2144 2144 ··· 2198 2198 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2199 2199 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 2200 2200 wwn2str(pwwn_buf, fcport->pwwn); 2201 - BFA_LOG(KERN_INFO, bfad, log_level, 2201 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2202 2202 "Base port disabled: WWN = %s\n", pwwn_buf); 2203 2203 break; 2204 2204 ··· 2251 2251 2252 2252 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); 2253 2253 wwn2str(pwwn_buf, fcport->pwwn); 2254 - BFA_LOG(KERN_INFO, bfad, log_level, 2254 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2255 2255 "Base port online: WWN = %s\n", pwwn_buf); 2256 2256 break; 2257 2257 ··· 2277 2277 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2278 2278 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 2279 2279 wwn2str(pwwn_buf, fcport->pwwn); 2280 - BFA_LOG(KERN_INFO, bfad, log_level, 2280 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2281 2281 "Base port disabled: WWN = %s\n", pwwn_buf); 2282 2282 break; 2283 2283 ··· 2322 2322 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2323 2323 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 2324 2324 wwn2str(pwwn_buf, fcport->pwwn); 2325 - BFA_LOG(KERN_INFO, bfad, log_level, 2325 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2326 2326 "Base port offline: WWN = %s\n", pwwn_buf); 2327 - BFA_LOG(KERN_INFO, bfad, log_level, 2327 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2328 2328 "Base port disabled: WWN = %s\n", pwwn_buf); 2329 2329 break; 2330 2330 ··· 2336 2336 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); 2337 2337 wwn2str(pwwn_buf, fcport->pwwn); 2338 2338 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2339 - BFA_LOG(KERN_INFO, bfad, log_level, 2339 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2340 2340 "Base port offline: WWN = %s\n", pwwn_buf); 2341 2341 else 2342 - BFA_LOG(KERN_ERR, bfad, log_level, 2342 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2343 2343 "Base port (WWN = %s) " 2344 2344 "lost fabric connectivity\n", pwwn_buf); 2345 2345 break; ··· 2349 2349 bfa_fcport_reset_linkinfo(fcport); 2350 2350 wwn2str(pwwn_buf, fcport->pwwn); 2351 2351 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2352 - BFA_LOG(KERN_INFO, bfad, log_level, 2352 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2353 2353 "Base port offline: WWN = %s\n", pwwn_buf); 2354 2354 else 2355 - BFA_LOG(KERN_ERR, bfad, log_level, 2355 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2356 2356 "Base port (WWN = %s) " 2357 2357 "lost fabric connectivity\n", pwwn_buf); 2358 2358 break; ··· 2363 2363 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); 2364 2364 wwn2str(pwwn_buf, fcport->pwwn); 2365 2365 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2366 - BFA_LOG(KERN_INFO, bfad, log_level, 2366 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2367 2367 "Base port offline: WWN = %s\n", pwwn_buf); 2368 2368 else 2369 - BFA_LOG(KERN_ERR, bfad, log_level, 2369 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2370 2370 "Base port (WWN = %s) " 2371 2371 "lost fabric connectivity\n", pwwn_buf); 2372 2372 break; ··· 2497 2497 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2498 2498 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 2499 2499 wwn2str(pwwn_buf, fcport->pwwn); 2500 - BFA_LOG(KERN_INFO, bfad, log_level, 2500 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2501 2501 "Base port enabled: WWN = %s\n", pwwn_buf); 2502 2502 break; 2503 2503 ··· 2551 2551 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2552 2552 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 2553 2553 wwn2str(pwwn_buf, fcport->pwwn); 2554 - BFA_LOG(KERN_INFO, bfad, log_level, 2554 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2555 2555 "Base port enabled: WWN = %s\n", pwwn_buf); 2556 2556 break; 2557 2557
+4 -4
drivers/scsi/bfa/bfad.c
··· 50 50 int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 51 51 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 52 52 int bfa_io_max_sge = BFAD_IO_MAX_SGE; 53 - int log_level = 3; /* WARNING log level */ 53 + int bfa_log_level = 3; /* WARNING log level */ 54 54 int ioc_auto_recover = BFA_TRUE; 55 55 int bfa_linkup_delay = -1; 56 56 int fdmi_enable = BFA_TRUE; ··· 108 108 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); 109 109 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 110 110 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); 111 - module_param(log_level, int, S_IRUGO | S_IWUSR); 112 - MODULE_PARM_DESC(log_level, "Driver log level, default=3, " 111 + module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); 112 + MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " 113 113 "Range[Critical:1|Error:2|Warning:3|Info:4]"); 114 114 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 115 115 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " ··· 1112 1112 } else 1113 1113 bfad_os_rport_online_wait(bfad); 1114 1114 1115 - BFA_LOG(KERN_INFO, bfad, log_level, "bfa device claimed\n"); 1115 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); 1116 1116 1117 1117 return BFA_STATUS_OK; 1118 1118 }
+1 -1
drivers/scsi/bfa/bfad_drv.h
··· 337 337 extern int rport_del_timeout; 338 338 extern int bfa_lun_queue_depth; 339 339 extern int bfa_io_max_sge; 340 - extern int log_level; 340 + extern int bfa_log_level; 341 341 extern int ioc_auto_recover; 342 342 extern int bfa_linkup_delay; 343 343 extern int msix_disable_cb;
+11 -10
drivers/scsi/bfa/bfad_im.c
··· 225 225 } 226 226 227 227 bfa_trc(bfad, hal_io->iotag); 228 - BFA_LOG(KERN_INFO, bfad, log_level, "scsi%d: abort cmnd %p iotag %x\n", 228 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 229 + "scsi%d: abort cmnd %p iotag %x\n", 229 230 im_port->shost->host_no, cmnd, hal_io->iotag); 230 231 (void) bfa_ioim_abort(hal_io); 231 232 spin_unlock_irqrestore(&bfad->bfad_lock, flags); ··· 242 241 243 242 cmnd->scsi_done(cmnd); 244 243 bfa_trc(bfad, hal_io->iotag); 245 - BFA_LOG(KERN_INFO, bfad, log_level, 244 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 246 245 "scsi%d: complete abort 0x%p iotag 0x%x\n", 247 246 im_port->shost->host_no, cmnd, hal_io->iotag); 248 247 return SUCCESS; ··· 261 260 262 261 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 263 262 if (!tskim) { 264 - BFA_LOG(KERN_ERR, bfad, log_level, 263 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 265 264 "target reset, fail to allocate tskim\n"); 266 265 rc = BFA_STATUS_FAILED; 267 266 goto out; ··· 312 311 313 312 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 314 313 if (!tskim) { 315 - BFA_LOG(KERN_ERR, bfad, log_level, 314 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 316 315 "LUN reset, fail to allocate tskim"); 317 316 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 318 317 rc = FAILED; ··· 337 336 338 337 task_status = cmnd->SCp.Status >> 1; 339 338 if (task_status != BFI_TSKIM_STS_OK) { 340 - BFA_LOG(KERN_ERR, bfad, log_level, 339 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 341 340 "LUN reset failure, status: %d\n", task_status); 342 341 rc = FAILED; 343 342 } ··· 381 380 382 381 task_status = cmnd->SCp.Status >> 1; 383 382 if (task_status != BFI_TSKIM_STS_OK) { 384 - BFA_LOG(KERN_ERR, bfad, log_level, 383 + BFA_LOG(KERN_ERR, bfad, bfa_log_level, 385 384 "target reset failure," 386 385 " status: %d\n", task_status); 387 386 err_cnt++; ··· 461 460 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); 462 461 wwn2str(wwpn_str, wwpn); 463 462 fcid2str(fcid_str, fcid); 464 - BFA_LOG(KERN_INFO, bfad, log_level, 463 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 465 464 "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n", 466 465 port->im_port->shost->host_no, 467 466 fcid_str, wwpn_str); ··· 590 589 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 591 590 { 592 591 bfa_trc(bfad, bfad->inst_no); 593 - BFA_LOG(KERN_INFO, bfad, log_level, "Free scsi%d\n", 592 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", 594 593 im_port->shost->host_no); 595 594 596 595 fc_remove_host(im_port->shost); ··· 1049 1048 fcid2str(fcid_str, fcid); 1050 1049 list_add_tail(&itnim->list_entry, 1051 1050 &im_port->itnim_mapped_list); 1052 - BFA_LOG(KERN_INFO, bfad, log_level, 1051 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1053 1052 "ITNIM ONLINE Target: %d:0:%d " 1054 1053 "FCID: %s WWPN: %s\n", 1055 1054 im_port->shost->host_no, ··· 1082 1081 wwn2str(wwpn_str, wwpn); 1083 1082 fcid2str(fcid_str, fcid); 1084 1083 list_del(&itnim->list_entry); 1085 - BFA_LOG(KERN_INFO, bfad, log_level, 1084 + BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1086 1085 "ITNIM OFFLINE Target: %d:0:%d " 1087 1086 "FCID: %s WWPN: %s\n", 1088 1087 im_port->shost->host_no,
+1
drivers/sh/intc/core.c
··· 198 198 list_add_tail(&d->list, &intc_list); 199 199 200 200 raw_spin_lock_init(&d->lock); 201 + INIT_RADIX_TREE(&d->tree, GFP_ATOMIC); 201 202 202 203 d->index = nr_intc_controllers; 203 204
+1 -1
drivers/spi/coldfire_qspi.c
··· 317 317 msg = container_of(mcfqspi->msgq.next, struct spi_message, 318 318 queue); 319 319 320 - list_del_init(&mcfqspi->msgq); 320 + list_del_init(&msg->queue); 321 321 spin_unlock_irqrestore(&mcfqspi->lock, flags); 322 322 323 323 spi = msg->spi;
+1 -1
drivers/spi/mpc52xx_spi.c
··· 563 563 .of_match_table = mpc52xx_spi_match, 564 564 }, 565 565 .probe = mpc52xx_spi_probe, 566 - .remove = __exit_p(mpc52xx_spi_remove), 566 + .remove = __devexit_p(mpc52xx_spi_remove), 567 567 }; 568 568 569 569 static int __init mpc52xx_spi_init(void)
+39
drivers/spi/omap2_mcspi.c
··· 1305 1305 /* work with hotplug and coldplug */ 1306 1306 MODULE_ALIAS("platform:omap2_mcspi"); 1307 1307 1308 + #ifdef CONFIG_SUSPEND 1309 + /* 1310 + * When SPI wake up from off-mode, CS is in activate state. If it was in 1311 + * unactive state when driver was suspend, then force it to unactive state at 1312 + * wake up. 1313 + */ 1314 + static int omap2_mcspi_resume(struct device *dev) 1315 + { 1316 + struct spi_master *master = dev_get_drvdata(dev); 1317 + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1318 + struct omap2_mcspi_cs *cs; 1319 + 1320 + omap2_mcspi_enable_clocks(mcspi); 1321 + list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs, 1322 + node) { 1323 + if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { 1324 + 1325 + /* 1326 + * We need to toggle CS state for OMAP take this 1327 + * change in account. 1328 + */ 1329 + MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1); 1330 + __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 1331 + MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0); 1332 + __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 1333 + } 1334 + } 1335 + omap2_mcspi_disable_clocks(mcspi); 1336 + return 0; 1337 + } 1338 + #else 1339 + #define omap2_mcspi_resume NULL 1340 + #endif 1341 + 1342 + static const struct dev_pm_ops omap2_mcspi_pm_ops = { 1343 + .resume = omap2_mcspi_resume, 1344 + }; 1345 + 1308 1346 static struct platform_driver omap2_mcspi_driver = { 1309 1347 .driver = { 1310 1348 .name = "omap2_mcspi", 1311 1349 .owner = THIS_MODULE, 1350 + .pm = &omap2_mcspi_pm_ops 1312 1351 }, 1313 1352 .remove = __exit_p(omap2_mcspi_remove), 1314 1353 };
+1 -2
drivers/spi/spi.c
··· 584 584 list_del(&master->list); 585 585 mutex_unlock(&board_lock); 586 586 587 - dummy = device_for_each_child(master->dev.parent, &master->dev, 588 - __unregister); 587 + dummy = device_for_each_child(&master->dev, NULL, __unregister); 589 588 device_unregister(&master->dev); 590 589 } 591 590 EXPORT_SYMBOL_GPL(spi_unregister_master);
+25 -10
drivers/spi/spi_fsl_espi.c
··· 258 258 return mpc8xxx_spi->count; 259 259 } 260 260 261 - static void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) 261 + static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) 262 262 { 263 - if (cmd[1] && cmd[2] && cmd[3]) { 263 + if (cmd) { 264 264 cmd[1] = (u8)(addr >> 16); 265 265 cmd[2] = (u8)(addr >> 8); 266 266 cmd[3] = (u8)(addr >> 0); 267 267 } 268 268 } 269 269 270 - static unsigned int fsl_espi_cmd2addr(u8 *cmd) 270 + static inline unsigned int fsl_espi_cmd2addr(u8 *cmd) 271 271 { 272 - if (cmd[1] && cmd[2] && cmd[3]) 272 + if (cmd) 273 273 return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; 274 274 275 275 return 0; ··· 395 395 } 396 396 } 397 397 398 - addr = fsl_espi_cmd2addr(local_buf); 399 - addr += pos; 400 - fsl_espi_addr2cmd(addr, local_buf); 398 + if (pos > 0) { 399 + addr = fsl_espi_cmd2addr(local_buf); 400 + addr += pos; 401 + fsl_espi_addr2cmd(addr, local_buf); 402 + } 401 403 402 404 espi_trans->n_tx = n_tx; 403 405 espi_trans->n_rx = trans_len; ··· 509 507 510 508 /* We need handle RX first */ 511 509 if (events & SPIE_NE) { 512 - u32 rx_data; 510 + u32 rx_data, tmp; 511 + u8 rx_data_8; 513 512 514 513 /* Spin until RX is done */ 515 514 while (SPIE_RXCNT(events) < min(4, mspi->len)) { 516 515 cpu_relax(); 517 516 events = mpc8xxx_spi_read_reg(&reg_base->event); 518 517 } 519 - mspi->len -= 4; 520 518 521 - rx_data = mpc8xxx_spi_read_reg(&reg_base->receive); 519 + if (mspi->len >= 4) { 520 + rx_data = mpc8xxx_spi_read_reg(&reg_base->receive); 521 + } else { 522 + tmp = mspi->len; 523 + rx_data = 0; 524 + while (tmp--) { 525 + rx_data_8 = in_8((u8 *)&reg_base->receive); 526 + rx_data |= (rx_data_8 << (tmp * 8)); 527 + } 528 + 529 + rx_data <<= (4 - mspi->len) * 8; 530 + } 531 + 532 + mspi->len -= 4; 522 533 523 534 if (mspi->rx) 524 535 mspi->get_rx(rx_data, mspi);
-6
drivers/staging/zram/zram_drv.c
··· 435 435 int ret = 0; 436 436 struct zram *zram = queue->queuedata; 437 437 438 - if (unlikely(!zram->init_done)) { 439 - set_bit(BIO_UPTODATE, &bio->bi_flags); 440 - bio_endio(bio, 0); 441 - return 0; 442 - } 443 - 444 438 if (!valid_io_request(zram, bio)) { 445 439 zram_stat64_inc(zram, &zram->stats.invalid_io); 446 440 bio_io_error(bio);
+19 -3
drivers/usb/atm/ueagle-atm.c
··· 2206 2206 goto err1; 2207 2207 } 2208 2208 2209 - sc->kthread = kthread_run(uea_kthread, sc, "ueagle-atm"); 2210 - if (sc->kthread == ERR_PTR(-ENOMEM)) { 2209 + /* Create worker thread, but don't start it here. Start it after 2210 + * all usbatm generic initialization is done. 2211 + */ 2212 + sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm"); 2213 + if (IS_ERR(sc->kthread)) { 2211 2214 uea_err(INS_TO_USBDEV(sc), "failed to create thread\n"); 2212 2215 goto err2; 2213 2216 } ··· 2627 2624 static int uea_probe(struct usb_interface *intf, const struct usb_device_id *id) 2628 2625 { 2629 2626 struct usb_device *usb = interface_to_usbdev(intf); 2627 + int ret; 2630 2628 2631 2629 uea_enters(usb); 2632 2630 uea_info(usb, "ADSL device founded vid (%#X) pid (%#X) Rev (%#X): %s\n", ··· 2641 2637 if (UEA_IS_PREFIRM(id)) 2642 2638 return uea_load_firmware(usb, UEA_CHIP_VERSION(id)); 2643 2639 2644 - return usbatm_usb_probe(intf, id, &uea_usbatm_driver); 2640 + ret = usbatm_usb_probe(intf, id, &uea_usbatm_driver); 2641 + if (ret == 0) { 2642 + struct usbatm_data *usbatm = usb_get_intfdata(intf); 2643 + struct uea_softc *sc = usbatm->driver_data; 2644 + 2645 + /* Ensure carrier is initialized to off as early as possible */ 2646 + UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST); 2647 + 2648 + /* Only start the worker thread when all init is done */ 2649 + wake_up_process(sc->kthread); 2650 + } 2651 + 2652 + return ret; 2645 2653 } 2646 2654 2647 2655 static void uea_disconnect(struct usb_interface *intf)
+1
drivers/video/backlight/cr_bllcd.c
··· 242 242 backlight_device_unregister(crp->cr_backlight_device); 243 243 lcd_device_unregister(crp->cr_lcd_device); 244 244 pci_dev_put(lpc_dev); 245 + kfree(crp); 245 246 246 247 return 0; 247 248 }
+1 -1
drivers/video/fbmem.c
··· 1458 1458 if (gen->base == hw->base) 1459 1459 return true; 1460 1460 /* is the generic aperture base inside the hw base->hw base+size */ 1461 - if (gen->base > hw->base && gen->base <= hw->base + hw->size) 1461 + if (gen->base > hw->base && gen->base < hw->base + hw->size) 1462 1462 return true; 1463 1463 return false; 1464 1464 }
+13 -3
drivers/video/sh_mobile_hdmi.c
··· 845 845 found_rate_error = rate_error; 846 846 } 847 847 848 + hdmi->var.width = hdmi->monspec.max_x * 10; 849 + hdmi->var.height = hdmi->monspec.max_y * 10; 850 + 848 851 /* 849 852 * TODO 1: if no ->info is present, postpone running the config until 850 853 * after ->info first gets registered. ··· 1034 1031 dev_dbg(info->dev, "Old %ux%u, new %ux%u\n", 1035 1032 mode1.xres, mode1.yres, mode2.xres, mode2.yres); 1036 1033 1037 - if (fb_mode_is_equal(&mode1, &mode2)) 1034 + if (fb_mode_is_equal(&mode1, &mode2)) { 1035 + /* It can be a different monitor with an equal video-mode */ 1036 + old_var->width = new_var->width; 1037 + old_var->height = new_var->height; 1038 1038 return false; 1039 + } 1039 1040 1040 1041 dev_dbg(info->dev, "Switching %u -> %u lines\n", 1041 1042 mode1.yres, mode2.yres); ··· 1136 1129 * on, if we run a resume here, the logo disappears 1137 1130 */ 1138 1131 if (lock_fb_info(hdmi->info)) { 1139 - sh_hdmi_display_on(hdmi, hdmi->info); 1140 - unlock_fb_info(hdmi->info); 1132 + struct fb_info *info = hdmi->info; 1133 + info->var.width = hdmi->var.width; 1134 + info->var.height = hdmi->var.height; 1135 + sh_hdmi_display_on(hdmi, info); 1136 + unlock_fb_info(info); 1141 1137 } 1142 1138 } else { 1143 1139 /* New monitor or have to wake up */
+3 -1
drivers/video/sh_mobile_lcdcfb.c
··· 1280 1280 mode = &default_720p; 1281 1281 num_cfg = 1; 1282 1282 } else { 1283 - num_cfg = ch->cfg.num_cfg; 1283 + num_cfg = cfg->num_cfg; 1284 1284 } 1285 1285 1286 1286 fb_videomode_to_modelist(mode, num_cfg, &info->modelist); 1287 1287 1288 1288 fb_videomode_to_var(var, mode); 1289 + var->width = cfg->lcd_size_cfg.width; 1290 + var->height = cfg->lcd_size_cfg.height; 1289 1291 /* Default Y virtual resolution is 2x panel size */ 1290 1292 var->yres_virtual = var->yres * 2; 1291 1293 var->activate = FB_ACTIVATE_NOW;
+1 -1
drivers/watchdog/rdc321x_wdt.c
··· 231 231 struct resource *r; 232 232 struct rdc321x_wdt_pdata *pdata; 233 233 234 - pdata = pdev->dev.platform_data; 234 + pdata = platform_get_drvdata(pdev); 235 235 if (!pdata) { 236 236 dev_err(&pdev->dev, "no platform data supplied\n"); 237 237 return -ENODEV;
+5
fs/ext4/resize.c
··· 232 232 GFP_NOFS); 233 233 if (err) 234 234 goto exit_bh; 235 + for (i = 0, bit = gdblocks + 1; i < reserved_gdb; i++, bit++) 236 + ext4_set_bit(bit, bh->b_data); 235 237 236 238 ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap, 237 239 input->block_bitmap - start); ··· 249 247 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS); 250 248 if (err) 251 249 goto exit_bh; 250 + for (i = 0, bit = input->inode_table - start; 251 + i < sbi->s_itb_per_group; i++, bit++) 252 + ext4_set_bit(bit, bh->b_data); 252 253 253 254 if ((err = extend_or_restart_transaction(handle, 2, bh))) 254 255 goto exit_bh;
+1 -1
fs/logfs/journal.c
··· 828 828 super->s_journal_seg[i] = segno; 829 829 super->s_journal_ec[i] = ec; 830 830 logfs_set_segment_reserved(sb, segno); 831 - err = btree_insert32(head, segno, (void *)1, GFP_KERNEL); 831 + err = btree_insert32(head, segno, (void *)1, GFP_NOFS); 832 832 BUG_ON(err); /* mempool should prevent this */ 833 833 err = logfs_erase_segment(sb, segno, 1); 834 834 BUG_ON(err); /* FIXME: remount-ro would be nicer */
+3
fs/logfs/readwrite.c
··· 1994 1994 1995 1995 /* FIXME: transaction is part of logfs_block now. Is that enough? */ 1996 1996 err = logfs_write_buf(master_inode, page, 0); 1997 + if (err) 1998 + move_page_to_inode(inode, page); 1999 + 1997 2000 logfs_put_write_page(page); 1998 2001 return err; 1999 2002 }
+5 -2
fs/ocfs2/aops.c
··· 573 573 /* this io's submitter should not have unlocked this before we could */ 574 574 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 575 575 576 + if (ocfs2_iocb_is_sem_locked(iocb)) { 577 + up_read(&inode->i_alloc_sem); 578 + ocfs2_iocb_clear_sem_locked(iocb); 579 + } 580 + 576 581 ocfs2_iocb_clear_rw_locked(iocb); 577 582 578 583 level = ocfs2_iocb_rw_locked_level(iocb); 579 - if (!level) 580 - up_read(&inode->i_alloc_sem); 581 584 ocfs2_rw_unlock(inode, level); 582 585 583 586 if (is_async)
+21 -2
fs/ocfs2/aops.h
··· 68 68 else 69 69 clear_bit(1, (unsigned long *)&iocb->private); 70 70 } 71 + 72 + /* 73 + * Using a named enum representing lock types in terms of #N bit stored in 74 + * iocb->private, which is going to be used for communication bewteen 75 + * ocfs2_dio_end_io() and ocfs2_file_aio_write/read(). 76 + */ 77 + enum ocfs2_iocb_lock_bits { 78 + OCFS2_IOCB_RW_LOCK = 0, 79 + OCFS2_IOCB_RW_LOCK_LEVEL, 80 + OCFS2_IOCB_SEM, 81 + OCFS2_IOCB_NUM_LOCKS 82 + }; 83 + 71 84 #define ocfs2_iocb_clear_rw_locked(iocb) \ 72 - clear_bit(0, (unsigned long *)&iocb->private) 85 + clear_bit(OCFS2_IOCB_RW_LOCK, (unsigned long *)&iocb->private) 73 86 #define ocfs2_iocb_rw_locked_level(iocb) \ 74 - test_bit(1, (unsigned long *)&iocb->private) 87 + test_bit(OCFS2_IOCB_RW_LOCK_LEVEL, (unsigned long *)&iocb->private) 88 + #define ocfs2_iocb_set_sem_locked(iocb) \ 89 + set_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) 90 + #define ocfs2_iocb_clear_sem_locked(iocb) \ 91 + clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) 92 + #define ocfs2_iocb_is_sem_locked(iocb) \ 93 + test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) 75 94 #endif /* OCFS2_FILE_H */
+2 -1
fs/ocfs2/cluster/masklog.c
··· 113 113 define_mask(QUOTA), 114 114 define_mask(REFCOUNT), 115 115 define_mask(BASTS), 116 + define_mask(RESERVATIONS), 117 + define_mask(CLUSTER), 116 118 define_mask(ERROR), 117 119 define_mask(NOTICE), 118 120 define_mask(KTHREAD), 119 - define_mask(RESERVATIONS), 120 121 }; 121 122 122 123 static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
+8 -7
fs/ocfs2/cluster/masklog.h
··· 81 81 #include <linux/sched.h> 82 82 83 83 /* bits that are frequently given and infrequently matched in the low word */ 84 - /* NOTE: If you add a flag, you need to also update mlog.c! */ 84 + /* NOTE: If you add a flag, you need to also update masklog.c! */ 85 85 #define ML_ENTRY 0x0000000000000001ULL /* func call entry */ 86 86 #define ML_EXIT 0x0000000000000002ULL /* func call exit */ 87 87 #define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */ ··· 114 114 #define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ 115 115 #define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ 116 116 #define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */ 117 - #define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */ 117 + #define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */ 118 + #define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */ 119 + #define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */ 120 + 118 121 /* bits that are infrequently given and frequently matched in the high word */ 119 - #define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ 120 - #define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ 121 - #define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */ 122 - #define ML_RESERVATIONS 0x0000000800000000ULL /* ocfs2 alloc reservations */ 123 - #define ML_CLUSTER 0x0000001000000000ULL /* cluster stack */ 122 + #define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */ 123 + #define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */ 124 + #define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */ 124 125 125 126 #define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE) 126 127 #define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
+4
fs/ocfs2/dir.c
··· 2461 2461 2462 2462 di->i_dx_root = cpu_to_le64(dr_blkno); 2463 2463 2464 + spin_lock(&OCFS2_I(dir)->ip_lock); 2464 2465 OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL; 2465 2466 di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); 2467 + spin_unlock(&OCFS2_I(dir)->ip_lock); 2466 2468 2467 2469 ocfs2_journal_dirty(handle, di_bh); 2468 2470 ··· 4468 4466 goto out_commit; 4469 4467 } 4470 4468 4469 + spin_lock(&OCFS2_I(dir)->ip_lock); 4471 4470 OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL; 4472 4471 di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); 4472 + spin_unlock(&OCFS2_I(dir)->ip_lock); 4473 4473 di->i_dx_root = cpu_to_le64(0ULL); 4474 4474 4475 4475 ocfs2_journal_dirty(handle, di_bh);
+27 -13
fs/ocfs2/dlm/dlmmaster.c
··· 2346 2346 */ 2347 2347 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, 2348 2348 struct dlm_lock_resource *res, 2349 - int *numlocks) 2349 + int *numlocks, 2350 + int *hasrefs) 2350 2351 { 2351 2352 int ret; 2352 2353 int i; ··· 2356 2355 struct dlm_lock *lock; 2357 2356 2358 2357 assert_spin_locked(&res->spinlock); 2358 + 2359 + *numlocks = 0; 2360 + *hasrefs = 0; 2359 2361 2360 2362 ret = -EINVAL; 2361 2363 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { ··· 2390 2386 } 2391 2387 2392 2388 *numlocks = count; 2393 - mlog(0, "migrateable lockres having %d locks\n", *numlocks); 2389 + 2390 + count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 2391 + if (count < O2NM_MAX_NODES) 2392 + *hasrefs = 1; 2393 + 2394 + mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name, 2395 + res->lockname.len, res->lockname.name, *numlocks, *hasrefs); 2394 2396 2395 2397 leave: 2396 2398 return ret; ··· 2418 2408 const char *name; 2419 2409 unsigned int namelen; 2420 2410 int mle_added = 0; 2421 - int numlocks; 2411 + int numlocks, hasrefs; 2422 2412 int wake = 0; 2423 2413 2424 2414 if (!dlm_grab(dlm)) ··· 2427 2417 name = res->lockname.name; 2428 2418 namelen = res->lockname.len; 2429 2419 2430 - mlog(0, "migrating %.*s to %u\n", namelen, name, target); 2420 + mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target); 2431 2421 2432 2422 /* 2433 2423 * ensure this lockres is a proper candidate for migration 2434 2424 */ 2435 2425 spin_lock(&res->spinlock); 2436 - ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); 2426 + ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs); 2437 2427 if (ret < 0) { 2438 2428 spin_unlock(&res->spinlock); 2439 2429 goto leave; ··· 2441 2431 spin_unlock(&res->spinlock); 2442 2432 2443 2433 /* no work to do */ 2444 - if (numlocks == 0) { 2445 - mlog(0, "no locks were found on this lockres! done!\n"); 2434 + if (numlocks == 0 && !hasrefs) 2446 2435 goto leave; 2447 - } 2448 2436 2449 2437 /* 2450 2438 * preallocate up front ··· 2467 2459 * find a node to migrate the lockres to 2468 2460 */ 2469 2461 2470 - mlog(0, "picking a migration node\n"); 2471 2462 spin_lock(&dlm->spinlock); 2472 2463 /* pick a new node */ 2473 2464 if (!test_bit(target, dlm->domain_map) || 2474 2465 target >= O2NM_MAX_NODES) { 2475 2466 target = dlm_pick_migration_target(dlm, res); 2476 2467 } 2477 - mlog(0, "node %u chosen for migration\n", target); 2468 + mlog(0, "%s: res %.*s, Node %u chosen for migration\n", dlm->name, 2469 + namelen, name, target); 2478 2470 2479 2471 if (target >= O2NM_MAX_NODES || 2480 2472 !test_bit(target, dlm->domain_map)) { ··· 2675 2667 { 2676 2668 int ret; 2677 2669 int lock_dropped = 0; 2678 - int numlocks; 2670 + int numlocks, hasrefs; 2679 2671 2680 2672 spin_lock(&res->spinlock); 2681 2673 if (res->owner != dlm->node_num) { ··· 2689 2681 } 2690 2682 2691 2683 /* No need to migrate a lockres having no locks */ 2692 - ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); 2693 - if (ret >= 0 && numlocks == 0) { 2684 + ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs); 2685 + if (ret >= 0 && numlocks == 0 && !hasrefs) { 2694 2686 spin_unlock(&res->spinlock); 2695 2687 goto leave; 2696 2688 } ··· 2922 2914 } 2923 2915 } 2924 2916 queue++; 2917 + } 2918 + 2919 + nodenum = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 2920 + if (nodenum < O2NM_MAX_NODES) { 2921 + spin_unlock(&res->spinlock); 2922 + return nodenum; 2925 2923 } 2926 2924 spin_unlock(&res->spinlock); 2927 2925 mlog(0, "have not found a suitable target yet! checking domain map\n");
+13 -2
fs/ocfs2/file.c
··· 2241 2241 2242 2242 mutex_lock(&inode->i_mutex); 2243 2243 2244 + ocfs2_iocb_clear_sem_locked(iocb); 2245 + 2244 2246 relock: 2245 2247 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */ 2246 2248 if (direct_io) { 2247 2249 down_read(&inode->i_alloc_sem); 2248 2250 have_alloc_sem = 1; 2251 + /* communicate with ocfs2_dio_end_io */ 2252 + ocfs2_iocb_set_sem_locked(iocb); 2249 2253 } 2250 2254 2251 2255 /* ··· 2386 2382 ocfs2_rw_unlock(inode, rw_level); 2387 2383 2388 2384 out_sems: 2389 - if (have_alloc_sem) 2385 + if (have_alloc_sem) { 2390 2386 up_read(&inode->i_alloc_sem); 2387 + ocfs2_iocb_clear_sem_locked(iocb); 2388 + } 2391 2389 2392 2390 mutex_unlock(&inode->i_mutex); 2393 2391 ··· 2533 2527 goto bail; 2534 2528 } 2535 2529 2530 + ocfs2_iocb_clear_sem_locked(iocb); 2531 + 2536 2532 /* 2537 2533 * buffered reads protect themselves in ->readpage(). O_DIRECT reads 2538 2534 * need locks to protect pending reads from racing with truncate. ··· 2542 2534 if (filp->f_flags & O_DIRECT) { 2543 2535 down_read(&inode->i_alloc_sem); 2544 2536 have_alloc_sem = 1; 2537 + ocfs2_iocb_set_sem_locked(iocb); 2545 2538 2546 2539 ret = ocfs2_rw_lock(inode, 0); 2547 2540 if (ret < 0) { ··· 2584 2575 } 2585 2576 2586 2577 bail: 2587 - if (have_alloc_sem) 2578 + if (have_alloc_sem) { 2588 2579 up_read(&inode->i_alloc_sem); 2580 + ocfs2_iocb_clear_sem_locked(iocb); 2581 + } 2589 2582 if (rw_level != -1) 2590 2583 ocfs2_rw_unlock(inode, rw_level); 2591 2584 mlog_exit(ret);
+1 -1
fs/ocfs2/ocfs2_fs.h
··· 350 350 #define OCFS2_LAST_LOCAL_SYSTEM_INODE LOCAL_GROUP_QUOTA_SYSTEM_INODE 351 351 NUM_SYSTEM_INODES 352 352 }; 353 - #define NUM_GLOBAL_SYSTEM_INODES OCFS2_LAST_GLOBAL_SYSTEM_INODE 353 + #define NUM_GLOBAL_SYSTEM_INODES OCFS2_FIRST_LOCAL_SYSTEM_INODE 354 354 #define NUM_LOCAL_SYSTEM_INODES \ 355 355 (NUM_SYSTEM_INODES - OCFS2_FIRST_LOCAL_SYSTEM_INODE) 356 356
+10 -3
include/linux/dmaengine.h
··· 824 824 #ifdef CONFIG_DMA_ENGINE 825 825 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 826 826 void dma_issue_pending_all(void); 827 + struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); 828 + void dma_release_channel(struct dma_chan *chan); 827 829 #else 828 830 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 829 831 { ··· 833 831 } 834 832 static inline void dma_issue_pending_all(void) 835 833 { 836 - do { } while (0); 834 + } 835 + static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, 836 + dma_filter_fn fn, void *fn_param) 837 + { 838 + return NULL; 839 + } 840 + static inline void dma_release_channel(struct dma_chan *chan) 841 + { 837 842 } 838 843 #endif 839 844 ··· 851 842 void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 852 843 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 853 844 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 854 - struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); 855 - void dma_release_channel(struct dma_chan *chan); 856 845 857 846 /* --- Helper iov-locking functions --- */ 858 847
+34 -9
include/linux/kthread.h
··· 81 81 #define DEFINE_KTHREAD_WORK(work, fn) \ 82 82 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 83 83 84 - static inline void init_kthread_worker(struct kthread_worker *worker) 85 - { 86 - *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); 87 - } 84 + /* 85 + * kthread_worker.lock and kthread_work.done need their own lockdep class 86 + * keys if they are defined on stack with lockdep enabled. Use the 87 + * following macros when defining them on stack. 88 + */ 89 + #ifdef CONFIG_LOCKDEP 90 + # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ 91 + ({ init_kthread_worker(&worker); worker; }) 92 + # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ 93 + struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) 94 + # define KTHREAD_WORK_INIT_ONSTACK(work, fn) \ 95 + ({ init_kthread_work((&work), fn); work; }) 96 + # define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \ 97 + struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn) 98 + #else 99 + # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) 100 + # define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn) 101 + #endif 88 102 89 - static inline void init_kthread_work(struct kthread_work *work, 90 - kthread_work_func_t fn) 91 - { 92 - *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); 93 - } 103 + extern void __init_kthread_worker(struct kthread_worker *worker, 104 + const char *name, struct lock_class_key *key); 105 + 106 + #define init_kthread_worker(worker) \ 107 + do { \ 108 + static struct lock_class_key __key; \ 109 + __init_kthread_worker((worker), "("#worker")->lock", &__key); \ 110 + } while (0) 111 + 112 + #define init_kthread_work(work, fn) \ 113 + do { \ 114 + memset((work), 0, sizeof(struct kthread_work)); \ 115 + INIT_LIST_HEAD(&(work)->node); \ 116 + (work)->func = (fn); \ 117 + init_waitqueue_head(&(work)->done); \ 118 + } while (0) 94 119 95 120 int kthread_worker_fn(void *worker_ptr); 96 121
+1 -1
include/linux/netlink.h
··· 70 70 Check NLM_F_EXCL 71 71 */ 72 72 73 - #define NLMSG_ALIGNTO 4 73 + #define NLMSG_ALIGNTO 4U 74 74 #define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) ) 75 75 #define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) 76 76 #define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN))
+2 -1
include/linux/taskstats.h
··· 33 33 */ 34 34 35 35 36 - #define TASKSTATS_VERSION 7 36 + #define TASKSTATS_VERSION 8 37 37 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN 38 38 * in linux/sched.h */ 39 39 ··· 188 188 TASKSTATS_TYPE_STATS, /* taskstats structure */ 189 189 TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */ 190 190 TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */ 191 + TASKSTATS_TYPE_NULL, /* contains nothing */ 191 192 __TASKSTATS_TYPE_MAX, 192 193 }; 193 194
+3 -3
include/linux/unaligned/packed_struct.h
··· 3 3 4 4 #include <linux/kernel.h> 5 5 6 - struct __una_u16 { u16 x __attribute__((packed)); }; 7 - struct __una_u32 { u32 x __attribute__((packed)); }; 8 - struct __una_u64 { u64 x __attribute__((packed)); }; 6 + struct __una_u16 { u16 x; } __attribute__((packed)); 7 + struct __una_u32 { u32 x; } __attribute__((packed)); 8 + struct __una_u64 { u64 x; } __attribute__((packed)); 9 9 10 10 static inline u16 __get_unaligned_cpu16(const void *p) 11 11 {
-3
include/media/wm8775.h
··· 32 32 #define WM8775_AIN3 4 33 33 #define WM8775_AIN4 8 34 34 35 - /* subdev group ID */ 36 - #define WM8775_GID (1 << 0) 37 - 38 35 #endif
-1
include/net/flow.h
··· 49 49 __u8 proto; 50 50 __u8 flags; 51 51 #define FLOWI_FLAG_ANYSRC 0x01 52 - #define FLOWI_FLAG_MATCH_ANY_IIF 0x02 53 52 union { 54 53 struct { 55 54 __be16 sport;
+10
include/net/ip6_route.h
··· 164 164 return rt->rt6i_flags & RTF_LOCAL; 165 165 } 166 166 167 + int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 168 + 169 + static inline int ip6_skb_dst_mtu(struct sk_buff *skb) 170 + { 171 + struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; 172 + 173 + return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ? 174 + skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); 175 + } 176 + 167 177 #endif 168 178 #endif
+24 -4
include/net/mac80211.h
··· 2024 2024 * 2025 2025 * This function may not be called in IRQ context. Calls to this function 2026 2026 * for a single hardware must be synchronized against each other. Calls 2027 - * to this function and ieee80211_tx_status_irqsafe() may not be mixed 2028 - * for a single hardware. 2027 + * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe() 2028 + * may not be mixed for a single hardware. 2029 2029 * 2030 2030 * @hw: the hardware the frame was transmitted by 2031 2031 * @skb: the frame that was transmitted, owned by mac80211 after this call ··· 2034 2034 struct sk_buff *skb); 2035 2035 2036 2036 /** 2037 + * ieee80211_tx_status_ni - transmit status callback (in process context) 2038 + * 2039 + * Like ieee80211_tx_status() but can be called in process context. 2040 + * 2041 + * Calls to this function, ieee80211_tx_status() and 2042 + * ieee80211_tx_status_irqsafe() may not be mixed 2043 + * for a single hardware. 2044 + * 2045 + * @hw: the hardware the frame was transmitted by 2046 + * @skb: the frame that was transmitted, owned by mac80211 after this call 2047 + */ 2048 + static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw, 2049 + struct sk_buff *skb) 2050 + { 2051 + local_bh_disable(); 2052 + ieee80211_tx_status(hw, skb); 2053 + local_bh_enable(); 2054 + } 2055 + 2056 + /** 2037 2057 * ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback 2038 2058 * 2039 2059 * Like ieee80211_tx_status() but can be called in IRQ context 2040 2060 * (internally defers to a tasklet.) 2041 2061 * 2042 - * Calls to this function and ieee80211_tx_status() may not be mixed for a 2043 - * single hardware. 2062 + * Calls to this function, ieee80211_tx_status() and 2063 + * ieee80211_tx_status_ni() may not be mixed for a single hardware. 2044 2064 * 2045 2065 * @hw: the hardware the frame was transmitted by 2046 2066 * @skb: the frame that was transmitted, owned by mac80211 after this call
+3 -1
include/net/pkt_cls.h
··· 323 323 static inline int tcf_valid_offset(const struct sk_buff *skb, 324 324 const unsigned char *ptr, const int len) 325 325 { 326 - return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head); 326 + return likely((ptr + len) <= skb_tail_pointer(skb) && 327 + ptr >= skb->head && 328 + (ptr <= (ptr + len))); 327 329 } 328 330 329 331 #ifdef CONFIG_NET_CLS_IND
+1 -5
include/net/sch_generic.h
··· 610 610 { 611 611 struct sk_buff *n; 612 612 613 - if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) && 614 - !skb_shared(skb)) 615 - n = skb_get(skb); 616 - else 617 - n = skb_clone(skb, gfp_mask); 613 + n = skb_clone(skb, gfp_mask); 618 614 619 615 if (n) { 620 616 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
+3
include/net/sock.h
··· 754 754 void (*unhash)(struct sock *sk); 755 755 void (*rehash)(struct sock *sk); 756 756 int (*get_port)(struct sock *sk, unsigned short snum); 757 + void (*clear_sk)(struct sock *sk, int size); 757 758 758 759 /* Keeping track of sockets in use */ 759 760 #ifdef CONFIG_PROC_FS ··· 852 851 sk->sk_prot->unhash(sk); 853 852 sk->sk_prot->hash(sk); 854 853 } 854 + 855 + void sk_prot_clear_portaddr_nulls(struct sock *sk, int size); 855 856 856 857 /* About 10 seconds */ 857 858 #define SOCK_DESTROY_TIME (10*HZ)
+1 -1
init/do_mounts.c
··· 93 93 * 94 94 * Returns the matching dev_t on success or 0 on failure. 95 95 */ 96 - static dev_t __init devt_from_partuuid(char *uuid_str) 96 + static dev_t devt_from_partuuid(char *uuid_str) 97 97 { 98 98 dev_t res = 0; 99 99 struct device *dev = NULL;
+11
kernel/kthread.c
··· 265 265 return 0; 266 266 } 267 267 268 + void __init_kthread_worker(struct kthread_worker *worker, 269 + const char *name, 270 + struct lock_class_key *key) 271 + { 272 + spin_lock_init(&worker->lock); 273 + lockdep_set_class_and_name(&worker->lock, key, name); 274 + INIT_LIST_HEAD(&worker->work_list); 275 + worker->task = NULL; 276 + } 277 + EXPORT_SYMBOL_GPL(__init_kthread_worker); 278 + 268 279 /** 269 280 * kthread_worker_fn - kthread function to process kthread_worker 270 281 * @worker_ptr: pointer to initialized kthread_worker
+44 -13
kernel/taskstats.c
··· 349 349 return ret; 350 350 } 351 351 352 + #ifdef CONFIG_IA64 353 + #define TASKSTATS_NEEDS_PADDING 1 354 + #endif 355 + 352 356 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) 353 357 { 354 358 struct nlattr *na, *ret; 355 359 int aggr; 356 360 357 - /* If we don't pad, we end up with alignment on a 4 byte boundary. 358 - * This causes lots of runtime warnings on systems requiring 8 byte 359 - * alignment */ 360 - u32 pids[2] = { pid, 0 }; 361 - int pid_size = ALIGN(sizeof(pid), sizeof(long)); 362 - 363 361 aggr = (type == TASKSTATS_TYPE_PID) 364 362 ? TASKSTATS_TYPE_AGGR_PID 365 363 : TASKSTATS_TYPE_AGGR_TGID; 366 364 365 + /* 366 + * The taskstats structure is internally aligned on 8 byte 367 + * boundaries but the layout of the aggregrate reply, with 368 + * two NLA headers and the pid (each 4 bytes), actually 369 + * force the entire structure to be unaligned. This causes 370 + * the kernel to issue unaligned access warnings on some 371 + * architectures like ia64. Unfortunately, some software out there 372 + * doesn't properly unroll the NLA packet and assumes that the start 373 + * of the taskstats structure will always be 20 bytes from the start 374 + * of the netlink payload. Aligning the start of the taskstats 375 + * structure breaks this software, which we don't want. So, for now 376 + * the alignment only happens on architectures that require it 377 + * and those users will have to update to fixed versions of those 378 + * packages. Space is reserved in the packet only when needed. 379 + * This ifdef should be removed in several years e.g. 2012 once 380 + * we can be confident that fixed versions are installed on most 381 + * systems. We add the padding before the aggregate since the 382 + * aggregate is already a defined type. 383 + */ 384 + #ifdef TASKSTATS_NEEDS_PADDING 385 + if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0) 386 + goto err; 387 + #endif 367 388 na = nla_nest_start(skb, aggr); 368 389 if (!na) 369 390 goto err; 370 - if (nla_put(skb, type, pid_size, pids) < 0) 391 + 392 + if (nla_put(skb, type, sizeof(pid), &pid) < 0) 371 393 goto err; 372 394 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); 373 395 if (!ret) ··· 478 456 return rc; 479 457 } 480 458 459 + static size_t taskstats_packet_size(void) 460 + { 461 + size_t size; 462 + 463 + size = nla_total_size(sizeof(u32)) + 464 + nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 465 + #ifdef TASKSTATS_NEEDS_PADDING 466 + size += nla_total_size(0); /* Padding for alignment */ 467 + #endif 468 + return size; 469 + } 470 + 481 471 static int cmd_attr_pid(struct genl_info *info) 482 472 { 483 473 struct taskstats *stats; ··· 498 464 u32 pid; 499 465 int rc; 500 466 501 - size = nla_total_size(sizeof(u32)) + 502 - nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 467 + size = taskstats_packet_size(); 503 468 504 469 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); 505 470 if (rc < 0) ··· 527 494 u32 tgid; 528 495 int rc; 529 496 530 - size = nla_total_size(sizeof(u32)) + 531 - nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 497 + size = taskstats_packet_size(); 532 498 533 499 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); 534 500 if (rc < 0) ··· 602 570 /* 603 571 * Size includes space for nested attributes 604 572 */ 605 - size = nla_total_size(sizeof(u32)) + 606 - nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 573 + size = taskstats_packet_size(); 607 574 608 575 is_thread_group = !!taskstats_tgid_alloc(tsk); 609 576 if (is_thread_group) {
+8 -1
kernel/trace/ring_buffer.c
··· 3853 3853 3854 3854 /* Need to copy one event at a time */ 3855 3855 do { 3856 + /* We need the size of one event, because 3857 + * rb_advance_reader only advances by one event, 3858 + * whereas rb_event_ts_length may include the size of 3859 + * one or two events. 3860 + * We have already ensured there's enough space if this 3861 + * is a time extend. */ 3862 + size = rb_event_length(event); 3856 3863 memcpy(bpage->data + pos, rpage->data + rpos, size); 3857 3864 3858 3865 len -= size; ··· 3874 3867 event = rb_reader_event(cpu_buffer); 3875 3868 /* Always keep the time extend and data together */ 3876 3869 size = rb_event_ts_length(event); 3877 - } while (len > size); 3870 + } while (len >= size); 3878 3871 3879 3872 /* update bpage */ 3880 3873 local_set(&bpage->commit, pos);
+1
kernel/user.c
··· 158 158 spin_lock_irq(&uidhash_lock); 159 159 up = uid_hash_find(uid, hashent); 160 160 if (up) { 161 + put_user_ns(ns); 161 162 key_put(new->uid_keyring); 162 163 key_put(new->session_keyring); 163 164 kmem_cache_free(uid_cachep, new);
+2 -1
kernel/watchdog.c
··· 364 364 goto out_save; 365 365 } 366 366 367 - printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); 367 + printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", 368 + cpu, PTR_ERR(event)); 368 369 return PTR_ERR(event); 369 370 370 371 /* success path */
-1
mm/compaction.c
··· 279 279 /* Successfully isolated */ 280 280 del_page_from_lru_list(zone, page, page_lru(page)); 281 281 list_add(&page->lru, migratelist); 282 - mem_cgroup_del_lru(page); 283 282 cc->nr_migratepages++; 284 283 285 284 /* Avoid isolating too much */
+9 -10
mm/memcontrol.c
··· 1925 1925 1926 1926 rcu_read_lock(); 1927 1927 p = rcu_dereference(mm->owner); 1928 - VM_BUG_ON(!p); 1929 1928 /* 1930 - * because we don't have task_lock(), "p" can exit while 1931 - * we're here. In that case, "mem" can point to root 1932 - * cgroup but never be NULL. (and task_struct itself is freed 1933 - * by RCU, cgroup itself is RCU safe.) Then, we have small 1934 - * risk here to get wrong cgroup. But such kind of mis-account 1935 - * by race always happens because we don't have cgroup_mutex(). 1936 - * It's overkill and we allow that small race, here. 1929 + * Because we don't have task_lock(), "p" can exit. 1930 + * In that case, "mem" can point to root or p can be NULL with 1931 + * race with swapoff. Then, we have small risk of mis-accouning. 1932 + * But such kind of mis-account by race always happens because 1933 + * we don't have cgroup_mutex(). It's overkill and we allo that 1934 + * small race, here. 1935 + * (*) swapoff at el will charge against mm-struct not against 1936 + * task-struct. So, mm->owner can be NULL. 1937 1937 */ 1938 1938 mem = mem_cgroup_from_task(p); 1939 - VM_BUG_ON(!mem); 1940 - if (mem_cgroup_is_root(mem)) { 1939 + if (!mem || mem_cgroup_is_root(mem)) { 1941 1940 rcu_read_unlock(); 1942 1941 goto done; 1943 1942 }
+2
mm/migrate.c
··· 35 35 #include <linux/hugetlb.h> 36 36 #include <linux/gfp.h> 37 37 38 + #include <asm/tlbflush.h> 39 + 38 40 #include "internal.h" 39 41 40 42 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
+27 -1
mm/nommu.c
··· 10 10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> 11 11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> 12 12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 13 - * Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org> 13 + * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> 14 14 */ 15 15 16 16 #include <linux/module.h> ··· 328 328 { 329 329 return vmalloc(size); 330 330 } 331 + EXPORT_SYMBOL(vmalloc_node); 331 332 332 333 /** 333 334 * vzalloc_node - allocate memory on a specific node with zero fill ··· 440 439 void __attribute__((weak)) vmalloc_sync_all(void) 441 440 { 442 441 } 442 + 443 + /** 444 + * alloc_vm_area - allocate a range of kernel address space 445 + * @size: size of the area 446 + * 447 + * Returns: NULL on failure, vm_struct on success 448 + * 449 + * This function reserves a range of kernel address space, and 450 + * allocates pagetables to map that range. No actual mappings 451 + * are created. If the kernel address space is not shared 452 + * between processes, it syncs the pagetable across all 453 + * processes. 454 + */ 455 + struct vm_struct *alloc_vm_area(size_t size) 456 + { 457 + BUG(); 458 + return NULL; 459 + } 460 + EXPORT_SYMBOL_GPL(alloc_vm_area); 461 + 462 + void free_vm_area(struct vm_struct *area) 463 + { 464 + BUG(); 465 + } 466 + EXPORT_SYMBOL_GPL(free_vm_area); 443 467 444 468 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 445 469 struct page *page)
+1 -1
mm/page-writeback.c
··· 563 563 break; /* We've done our duty */ 564 564 } 565 565 trace_wbc_balance_dirty_wait(&wbc, bdi); 566 - __set_current_state(TASK_INTERRUPTIBLE); 566 + __set_current_state(TASK_UNINTERRUPTIBLE); 567 567 io_schedule_timeout(pause); 568 568 569 569 /*
+1 -1
mm/percpu.c
··· 1268 1268 1269 1269 /* we're done parsing the input, undefine BUG macro and dump config */ 1270 1270 #undef PCPU_SETUP_BUG_ON 1271 - pcpu_dump_alloc_info(KERN_INFO, ai); 1271 + pcpu_dump_alloc_info(KERN_DEBUG, ai); 1272 1272 1273 1273 pcpu_nr_groups = ai->nr_groups; 1274 1274 pcpu_group_offsets = group_offsets;
+1
net/bluetooth/rfcomm/core.c
··· 311 311 d->state = BT_OPEN; 312 312 d->flags = 0; 313 313 d->mscex = 0; 314 + d->sec_level = BT_SECURITY_LOW; 314 315 d->mtu = RFCOMM_DEFAULT_MTU; 315 316 d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV; 316 317
+19 -11
net/bridge/br_multicast.c
··· 437 437 ip6h = ipv6_hdr(skb); 438 438 439 439 *(__force __be32 *)ip6h = htonl(0x60000000); 440 - ip6h->payload_len = 8 + sizeof(*mldq); 440 + ip6h->payload_len = htons(8 + sizeof(*mldq)); 441 441 ip6h->nexthdr = IPPROTO_HOPOPTS; 442 442 ip6h->hop_limit = 1; 443 443 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); ··· 1430 1430 struct net_bridge_port *port, 1431 1431 struct sk_buff *skb) 1432 1432 { 1433 - struct sk_buff *skb2 = skb; 1433 + struct sk_buff *skb2; 1434 1434 struct ipv6hdr *ip6h; 1435 1435 struct icmp6hdr *icmp6h; 1436 1436 u8 nexthdr; ··· 1469 1469 if (!skb2) 1470 1470 return -ENOMEM; 1471 1471 1472 + err = -EINVAL; 1473 + if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr))) 1474 + goto out; 1475 + 1472 1476 len -= offset - skb_network_offset(skb2); 1473 1477 1474 1478 __skb_pull(skb2, offset); 1475 1479 skb_reset_transport_header(skb2); 1476 - 1477 - err = -EINVAL; 1478 - if (!pskb_may_pull(skb2, sizeof(*icmp6h))) 1479 - goto out; 1480 1480 1481 1481 icmp6h = icmp6_hdr(skb2); 1482 1482 ··· 1516 1516 switch (icmp6h->icmp6_type) { 1517 1517 case ICMPV6_MGM_REPORT: 1518 1518 { 1519 - struct mld_msg *mld = (struct mld_msg *)icmp6h; 1519 + struct mld_msg *mld; 1520 + if (!pskb_may_pull(skb2, sizeof(*mld))) { 1521 + err = -EINVAL; 1522 + goto out; 1523 + } 1524 + mld = (struct mld_msg *)skb_transport_header(skb2); 1520 1525 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1521 1526 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1522 1527 break; ··· 1534 1529 break; 1535 1530 case ICMPV6_MGM_REDUCTION: 1536 1531 { 1537 - struct mld_msg *mld = (struct mld_msg *)icmp6h; 1532 + struct mld_msg *mld; 1533 + if (!pskb_may_pull(skb2, sizeof(*mld))) { 1534 + err = -EINVAL; 1535 + goto out; 1536 + } 1537 + mld = (struct mld_msg *)skb_transport_header(skb2); 1538 1538 br_ip6_multicast_leave_group(br, port, &mld->mld_mca); 1539 1539 } 1540 1540 } 1541 1541 1542 1542 out: 1543 - __skb_push(skb2, offset); 1544 - if (skb2 != skb) 1545 - kfree_skb(skb2); 1543 + kfree_skb(skb2); 1546 1544 return err; 1547 1545 } 1548 1546 #endif
+2
net/bridge/br_stp_bpdu.c
··· 50 50 51 51 llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr); 52 52 53 + skb_reset_mac_header(skb); 54 + 53 55 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 54 56 dev_queue_xmit); 55 57 }
+2 -2
net/can/bcm.c
··· 125 125 struct list_head tx_ops; 126 126 unsigned long dropped_usr_msgs; 127 127 struct proc_dir_entry *bcm_proc_read; 128 - char procname [20]; /* pointer printed in ASCII with \0 */ 128 + char procname [32]; /* inode number in decimal with \0 */ 129 129 }; 130 130 131 131 static inline struct bcm_sock *bcm_sk(const struct sock *sk) ··· 1521 1521 1522 1522 if (proc_dir) { 1523 1523 /* unique socket address as filename */ 1524 - sprintf(bo->procname, "%p", sock); 1524 + sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1525 1525 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1526 1526 proc_dir, 1527 1527 &bcm_proc_fops, sk);
+1 -2
net/core/fib_rules.c
··· 181 181 { 182 182 int ret = 0; 183 183 184 - if (rule->iifindex && (rule->iifindex != fl->iif) && 185 - !(fl->flags & FLOWI_FLAG_MATCH_ANY_IIF)) 184 + if (rule->iifindex && (rule->iifindex != fl->iif)) 186 185 goto out; 187 186 188 187 if (rule->oifindex && (rule->oifindex != fl->oif))
+35 -12
net/core/sock.c
··· 1009 1009 #endif 1010 1010 } 1011 1011 1012 + /* 1013 + * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 1014 + * un-modified. Special care is taken when initializing object to zero. 1015 + */ 1016 + static inline void sk_prot_clear_nulls(struct sock *sk, int size) 1017 + { 1018 + if (offsetof(struct sock, sk_node.next) != 0) 1019 + memset(sk, 0, offsetof(struct sock, sk_node.next)); 1020 + memset(&sk->sk_node.pprev, 0, 1021 + size - offsetof(struct sock, sk_node.pprev)); 1022 + } 1023 + 1024 + void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1025 + { 1026 + unsigned long nulls1, nulls2; 1027 + 1028 + nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1029 + nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1030 + if (nulls1 > nulls2) 1031 + swap(nulls1, nulls2); 1032 + 1033 + if (nulls1 != 0) 1034 + memset((char *)sk, 0, nulls1); 1035 + memset((char *)sk + nulls1 + sizeof(void *), 0, 1036 + nulls2 - nulls1 - sizeof(void *)); 1037 + memset((char *)sk + nulls2 + sizeof(void *), 0, 1038 + size - nulls2 - sizeof(void *)); 1039 + } 1040 + EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1041 + 1012 1042 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1013 1043 int family) 1014 1044 { ··· 1051 1021 if (!sk) 1052 1022 return sk; 1053 1023 if (priority & __GFP_ZERO) { 1054 - /* 1055 - * caches using SLAB_DESTROY_BY_RCU should let 1056 - * sk_node.next un-modified. Special care is taken 1057 - * when initializing object to zero. 1058 - */ 1059 - if (offsetof(struct sock, sk_node.next) != 0) 1060 - memset(sk, 0, offsetof(struct sock, sk_node.next)); 1061 - memset(&sk->sk_node.pprev, 0, 1062 - prot->obj_size - offsetof(struct sock, 1063 - sk_node.pprev)); 1024 + if (prot->clear_sk) 1025 + prot->clear_sk(sk, prot->obj_size); 1026 + else 1027 + sk_prot_clear_nulls(sk, prot->obj_size); 1064 1028 } 1065 - } 1066 - else 1029 + } else 1067 1030 sk = kmalloc(prot->obj_size, priority); 1068 1031 1069 1032 if (sk != NULL) {
+8 -2
net/ipv4/fib_frontend.c
··· 163 163 .daddr = addr 164 164 } 165 165 }, 166 - .flags = FLOWI_FLAG_MATCH_ANY_IIF 167 166 }; 168 167 struct fib_result res = { 0 }; 169 168 struct net_device *dev = NULL; 169 + struct fib_table *local_table; 170 + 171 + #ifdef CONFIG_IP_MULTIPLE_TABLES 172 + res.r = NULL; 173 + #endif 170 174 171 175 rcu_read_lock(); 172 - if (fib_lookup(net, &fl, &res)) { 176 + local_table = fib_get_table(net, RT_TABLE_LOCAL); 177 + if (!local_table || 178 + fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) { 173 179 rcu_read_unlock(); 174 180 return NULL; 175 181 }
+10 -5
net/ipv4/route.c
··· 2585 2585 goto out; 2586 2586 2587 2587 /* RACE: Check return value of inet_select_addr instead. */ 2588 - if (rcu_dereference(dev_out->ip_ptr) == NULL) 2589 - goto out; /* Wrong error code */ 2590 - 2588 + if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) { 2589 + err = -ENETUNREACH; 2590 + goto out; 2591 + } 2591 2592 if (ipv4_is_local_multicast(oldflp->fl4_dst) || 2592 2593 ipv4_is_lbcast(oldflp->fl4_dst)) { 2593 2594 if (!fl.fl4_src) ··· 2649 2648 } 2650 2649 2651 2650 if (res.type == RTN_LOCAL) { 2652 - if (!fl.fl4_src) 2653 - fl.fl4_src = fl.fl4_dst; 2651 + if (!fl.fl4_src) { 2652 + if (res.fi->fib_prefsrc) 2653 + fl.fl4_src = res.fi->fib_prefsrc; 2654 + else 2655 + fl.fl4_src = fl.fl4_dst; 2656 + } 2654 2657 dev_out = net->loopback_dev; 2655 2658 fl.oif = dev_out->ifindex; 2656 2659 res.fi = NULL;
+2 -2
net/ipv4/tcp_ipv4.c
··· 2030 2030 get_req: 2031 2031 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; 2032 2032 } 2033 - sk = sk_next(st->syn_wait_sk); 2033 + sk = sk_nulls_next(st->syn_wait_sk); 2034 2034 st->state = TCP_SEQ_STATE_LISTENING; 2035 2035 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 2036 2036 } else { ··· 2039 2039 if (reqsk_queue_len(&icsk->icsk_accept_queue)) 2040 2040 goto start_req; 2041 2041 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 2042 - sk = sk_next(sk); 2042 + sk = sk_nulls_next(sk); 2043 2043 } 2044 2044 get_sk: 2045 2045 sk_nulls_for_each_from(sk, node) {
+1
net/ipv4/udp.c
··· 1899 1899 .compat_setsockopt = compat_udp_setsockopt, 1900 1900 .compat_getsockopt = compat_udp_getsockopt, 1901 1901 #endif 1902 + .clear_sk = sk_prot_clear_portaddr_nulls, 1902 1903 }; 1903 1904 EXPORT_SYMBOL(udp_prot); 1904 1905
+1
net/ipv4/udplite.c
··· 57 57 .compat_setsockopt = compat_udp_setsockopt, 58 58 .compat_getsockopt = compat_udp_getsockopt, 59 59 #endif 60 + .clear_sk = sk_prot_clear_portaddr_nulls, 60 61 }; 61 62 EXPORT_SYMBOL(udplite_prot); 62 63
+3 -1
net/ipv6/addrconf.c
··· 2669 2669 2670 2670 ASSERT_RTNL(); 2671 2671 2672 - rt6_ifdown(net, dev); 2672 + /* Flush routes if device is being removed or it is not loopback */ 2673 + if (how || !(dev->flags & IFF_LOOPBACK)) 2674 + rt6_ifdown(net, dev); 2673 2675 neigh_ifdown(&nd_tbl, dev); 2674 2676 2675 2677 idev = __in6_dev_get(dev);
+2 -10
net/ipv6/ip6_output.c
··· 56 56 #include <net/checksum.h> 57 57 #include <linux/mroute6.h> 58 58 59 - static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 59 + int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 60 60 61 61 int __ip6_local_out(struct sk_buff *skb) 62 62 { ··· 143 143 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 144 144 kfree_skb(skb); 145 145 return -EINVAL; 146 - } 147 - 148 - static inline int ip6_skb_dst_mtu(struct sk_buff *skb) 149 - { 150 - struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; 151 - 152 - return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ? 153 - skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); 154 146 } 155 147 156 148 static int ip6_finish_output(struct sk_buff *skb) ··· 593 601 return offset; 594 602 } 595 603 596 - static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 604 + int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 597 605 { 598 606 struct sk_buff *frag; 599 607 struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
+6 -1
net/ipv6/route.c
··· 1565 1565 { 1566 1566 struct rt6_info *rt, *nrt; 1567 1567 int allfrag = 0; 1568 - 1568 + again: 1569 1569 rt = rt6_lookup(net, daddr, saddr, ifindex, 0); 1570 1570 if (rt == NULL) 1571 1571 return; 1572 + 1573 + if (rt6_check_expired(rt)) { 1574 + ip6_del_rt(rt); 1575 + goto again; 1576 + } 1572 1577 1573 1578 if (pmtu >= dst_mtu(&rt->dst)) 1574 1579 goto out;
+1
net/ipv6/udp.c
··· 1477 1477 .compat_setsockopt = compat_udpv6_setsockopt, 1478 1478 .compat_getsockopt = compat_udpv6_getsockopt, 1479 1479 #endif 1480 + .clear_sk = sk_prot_clear_portaddr_nulls, 1480 1481 }; 1481 1482 1482 1483 static struct inet_protosw udpv6_protosw = {
+1
net/ipv6/udplite.c
··· 55 55 .compat_setsockopt = compat_udpv6_setsockopt, 56 56 .compat_getsockopt = compat_udpv6_getsockopt, 57 57 #endif 58 + .clear_sk = sk_prot_clear_portaddr_nulls, 58 59 }; 59 60 60 61 static struct inet_protosw udplite6_protosw = {
+15 -1
net/ipv6/xfrm6_output.c
··· 17 17 #include <linux/netfilter_ipv6.h> 18 18 #include <net/dst.h> 19 19 #include <net/ipv6.h> 20 + #include <net/ip6_route.h> 20 21 #include <net/xfrm.h> 21 22 22 23 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, ··· 89 88 return xfrm_output(skb); 90 89 } 91 90 91 + static int __xfrm6_output(struct sk_buff *skb) 92 + { 93 + struct dst_entry *dst = skb_dst(skb); 94 + struct xfrm_state *x = dst->xfrm; 95 + 96 + if ((x && x->props.mode == XFRM_MODE_TUNNEL) && 97 + ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 98 + dst_allfrag(skb_dst(skb)))) { 99 + return ip6_fragment(skb, xfrm6_output_finish); 100 + } 101 + return xfrm6_output_finish(skb); 102 + } 103 + 92 104 int xfrm6_output(struct sk_buff *skb) 93 105 { 94 106 return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, 95 - skb_dst(skb)->dev, xfrm6_output_finish); 107 + skb_dst(skb)->dev, __xfrm6_output); 96 108 }
+11 -7
net/irda/af_irda.c
··· 2280 2280 2281 2281 switch (optname) { 2282 2282 case IRLMP_ENUMDEVICES: 2283 + 2284 + /* Offset to first device entry */ 2285 + offset = sizeof(struct irda_device_list) - 2286 + sizeof(struct irda_device_info); 2287 + 2288 + if (len < offset) { 2289 + err = -EINVAL; 2290 + goto out; 2291 + } 2292 + 2283 2293 /* Ask lmp for the current discovery log */ 2284 2294 discoveries = irlmp_get_discoveries(&list.len, self->mask.word, 2285 2295 self->nslots); ··· 2300 2290 } 2301 2291 2302 2292 /* Write total list length back to client */ 2303 - if (copy_to_user(optval, &list, 2304 - sizeof(struct irda_device_list) - 2305 - sizeof(struct irda_device_info))) 2293 + if (copy_to_user(optval, &list, offset)) 2306 2294 err = -EFAULT; 2307 - 2308 - /* Offset to first device entry */ 2309 - offset = sizeof(struct irda_device_list) - 2310 - sizeof(struct irda_device_info); 2311 2295 2312 2296 /* Copy the list itself - watch for overflow */ 2313 2297 if (list.len > 2048) {
+4
net/mac80211/ibss.c
··· 780 780 781 781 mutex_lock(&sdata->u.ibss.mtx); 782 782 783 + if (!sdata->u.ibss.ssid_len) 784 + goto mgmt_out; /* not ready to merge yet */ 785 + 783 786 switch (fc & IEEE80211_FCTL_STYPE) { 784 787 case IEEE80211_STYPE_PROBE_REQ: 785 788 ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len); ··· 800 797 break; 801 798 } 802 799 800 + mgmt_out: 803 801 mutex_unlock(&sdata->u.ibss.mtx); 804 802 } 805 803
+4 -1
net/mac80211/rx.c
··· 1788 1788 1789 1789 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1790 1790 1791 - if (!fwd_skb && net_ratelimit()) 1791 + if (!fwd_skb && net_ratelimit()) { 1792 1792 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1793 1793 sdata->name); 1794 + goto out; 1795 + } 1794 1796 1795 1797 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1796 1798 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); ··· 1830 1828 } 1831 1829 } 1832 1830 1831 + out: 1833 1832 if (is_multicast_ether_addr(hdr->addr1) || 1834 1833 sdata->dev->flags & IFF_PROMISC) 1835 1834 return RX_CONTINUE;
+4 -1
net/mac80211/work.c
··· 1051 1051 { 1052 1052 struct ieee80211_local *local = sdata->local; 1053 1053 struct ieee80211_work *wk; 1054 + bool cleanup = false; 1054 1055 1055 1056 mutex_lock(&local->mtx); 1056 1057 list_for_each_entry(wk, &local->work_list, list) { 1057 1058 if (wk->sdata != sdata) 1058 1059 continue; 1060 + cleanup = true; 1059 1061 wk->type = IEEE80211_WORK_ABORT; 1060 1062 wk->started = true; 1061 1063 wk->timeout = jiffies; ··· 1065 1063 mutex_unlock(&local->mtx); 1066 1064 1067 1065 /* run cleanups etc. */ 1068 - ieee80211_work_work(&local->work_work); 1066 + if (cleanup) 1067 + ieee80211_work_work(&local->work_work); 1069 1068 1070 1069 mutex_lock(&local->mtx); 1071 1070 list_for_each_entry(wk, &local->work_list, list) {
+8 -12
net/sched/sch_sfq.c
··· 270 270 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ 271 271 d = q->next[q->tail]; 272 272 q->next[q->tail] = q->next[d]; 273 - q->allot[q->next[d]] += q->quantum; 274 273 skb = q->qs[d].prev; 275 274 len = qdisc_pkt_len(skb); 276 275 __skb_unlink(skb, &q->qs[d]); ··· 320 321 sfq_inc(q, x); 321 322 if (q->qs[x].qlen == 1) { /* The flow is new */ 322 323 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 323 - q->tail = x; 324 324 q->next[x] = x; 325 - q->allot[x] = q->quantum; 326 325 } else { 327 326 q->next[x] = q->next[q->tail]; 328 327 q->next[q->tail] = x; 329 - q->tail = x; 330 328 } 329 + q->tail = x; 330 + q->allot[x] = q->quantum; 331 331 } 332 332 if (++sch->q.qlen <= q->limit) { 333 333 sch->bstats.bytes += qdisc_pkt_len(skb); ··· 357 359 { 358 360 struct sfq_sched_data *q = qdisc_priv(sch); 359 361 struct sk_buff *skb; 360 - sfq_index a, old_a; 362 + sfq_index a, next_a; 361 363 362 364 /* No active slots */ 363 365 if (q->tail == SFQ_DEPTH) 364 366 return NULL; 365 367 366 - a = old_a = q->next[q->tail]; 368 + a = q->next[q->tail]; 367 369 368 370 /* Grab packet */ 369 371 skb = __skb_dequeue(&q->qs[a]); ··· 374 376 /* Is the slot empty? */ 375 377 if (q->qs[a].qlen == 0) { 376 378 q->ht[q->hash[a]] = SFQ_DEPTH; 377 - a = q->next[a]; 378 - if (a == old_a) { 379 + next_a = q->next[a]; 380 + if (a == next_a) { 379 381 q->tail = SFQ_DEPTH; 380 382 return skb; 381 383 } 382 - q->next[q->tail] = a; 383 - q->allot[a] += q->quantum; 384 + q->next[q->tail] = next_a; 384 385 } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) { 385 - q->tail = a; 386 - a = q->next[a]; 387 386 q->allot[a] += q->quantum; 387 + q->tail = a; 388 388 } 389 389 return skb; 390 390 }
+1 -1
net/sctp/socket.c
··· 5053 5053 if (copy_to_user(optval, &val, len)) 5054 5054 return -EFAULT; 5055 5055 5056 - return -ENOTSUPP; 5056 + return 0; 5057 5057 } 5058 5058 5059 5059 /*
+14
scripts/kconfig/menu.c
··· 140 140 } 141 141 if (current_entry->prompt && current_entry != &rootmenu) 142 142 prop_warn(prop, "prompt redefined"); 143 + 144 + /* Apply all upper menus' visibilities to actual prompts. */ 145 + if(type == P_PROMPT) { 146 + struct menu *menu = current_entry; 147 + 148 + while ((menu = menu->parent) != NULL) { 149 + if (!menu->visibility) 150 + continue; 151 + prop->visible.expr 152 + = expr_alloc_and(prop->visible.expr, 153 + menu->visibility); 154 + } 155 + } 156 + 143 157 current_entry->prompt = prop; 144 158 } 145 159 prop->text = prompt;
+2
security/integrity/ima/ima_policy.c
··· 253 253 result = security_filter_rule_init(entry->lsm[lsm_rule].type, 254 254 Audit_equal, args, 255 255 &entry->lsm[lsm_rule].rule); 256 + if (!entry->lsm[lsm_rule].rule) 257 + return -EINVAL; 256 258 return result; 257 259 } 258 260
-1
security/keys/request_key.c
··· 403 403 return ret; 404 404 405 405 link_prealloc_failed: 406 - up_write(&dest_keyring->sem); 407 406 mutex_unlock(&user->cons_lock); 408 407 kleave(" = %d [prelink]", ret); 409 408 return ret;
+7 -3
sound/core/pcm_lib.c
··· 1070 1070 struct snd_pcm_hw_rule *new; 1071 1071 unsigned int new_rules = constrs->rules_all + 16; 1072 1072 new = kcalloc(new_rules, sizeof(*c), GFP_KERNEL); 1073 - if (!new) 1073 + if (!new) { 1074 + va_end(args); 1074 1075 return -ENOMEM; 1076 + } 1075 1077 if (constrs->rules) { 1076 1078 memcpy(new, constrs->rules, 1077 1079 constrs->rules_num * sizeof(*c)); ··· 1089 1087 c->private = private; 1090 1088 k = 0; 1091 1089 while (1) { 1092 - if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) 1090 + if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { 1091 + va_end(args); 1093 1092 return -EINVAL; 1093 + } 1094 1094 c->deps[k++] = dep; 1095 1095 if (dep < 0) 1096 1096 break; ··· 1101 1097 constrs->rules_num++; 1102 1098 va_end(args); 1103 1099 return 0; 1104 - } 1100 + } 1105 1101 1106 1102 EXPORT_SYMBOL(snd_pcm_hw_rule_add); 1107 1103
+2 -2
sound/oss/soundcard.c
··· 87 87 int i, n; 88 88 89 89 for (i = 0; i < num_mixer_volumes; i++) { 90 - if (strcmp(name, mixer_vols[i].name) == 0) { 90 + if (strncmp(name, mixer_vols[i].name, 32) == 0) { 91 91 if (present) 92 92 mixer_vols[i].num = i; 93 93 return mixer_vols[i].levels; ··· 99 99 } 100 100 n = num_mixer_volumes++; 101 101 102 - strcpy(mixer_vols[n].name, name); 102 + strncpy(mixer_vols[n].name, name, 32); 103 103 104 104 if (present) 105 105 mixer_vols[n].num = n;
+34 -23
sound/pci/hda/hda_codec.c
··· 1919 1919 } 1920 1920 EXPORT_SYMBOL_HDA(snd_hda_find_mixer_ctl); 1921 1921 1922 + static int find_empty_mixer_ctl_idx(struct hda_codec *codec, const char *name) 1923 + { 1924 + int idx; 1925 + for (idx = 0; idx < 16; idx++) { /* 16 ctlrs should be large enough */ 1926 + if (!_snd_hda_find_mixer_ctl(codec, name, idx)) 1927 + return idx; 1928 + } 1929 + return -EBUSY; 1930 + } 1931 + 1922 1932 /** 1923 1933 * snd_hda_ctl_add - Add a control element and assign to the codec 1924 1934 * @codec: HD-audio codec ··· 2664 2654 { } /* end */ 2665 2655 }; 2666 2656 2667 - #define SPDIF_MAX_IDX 4 /* 4 instances should be enough to probe */ 2668 - 2669 2657 /** 2670 2658 * snd_hda_create_spdif_out_ctls - create Output SPDIF-related controls 2671 2659 * @codec: the HDA codec ··· 2681 2673 struct snd_kcontrol_new *dig_mix; 2682 2674 int idx; 2683 2675 2684 - for (idx = 0; idx < SPDIF_MAX_IDX; idx++) { 2685 - if (!_snd_hda_find_mixer_ctl(codec, "IEC958 Playback Switch", 2686 - idx)) 2687 - break; 2688 - } 2689 - if (idx >= SPDIF_MAX_IDX) { 2676 + idx = find_empty_mixer_ctl_idx(codec, "IEC958 Playback Switch"); 2677 + if (idx < 0) { 2690 2678 printk(KERN_ERR "hda_codec: too many IEC958 outputs\n"); 2691 2679 return -EBUSY; 2692 2680 } ··· 2833 2829 struct snd_kcontrol_new *dig_mix; 2834 2830 int idx; 2835 2831 2836 - for (idx = 0; idx < SPDIF_MAX_IDX; idx++) { 2837 - if (!_snd_hda_find_mixer_ctl(codec, "IEC958 Capture Switch", 2838 - idx)) 2839 - break; 2840 - } 2841 - if (idx >= SPDIF_MAX_IDX) { 2832 + idx = find_empty_mixer_ctl_idx(codec, "IEC958 Capture Switch"); 2833 + if (idx < 0) { 2842 2834 printk(KERN_ERR "hda_codec: too many IEC958 inputs\n"); 2843 2835 return -EBUSY; 2844 2836 } ··· 3808 3808 3809 3809 for (; knew->name; knew++) { 3810 3810 struct snd_kcontrol *kctl; 3811 + int addr = 0, idx = 0; 3811 3812 if (knew->iface == -1) /* skip this codec private value */ 3812 3813 continue; 3813 - kctl = snd_ctl_new1(knew, codec); 3814 - if (!kctl) 3815 - return -ENOMEM; 3816 - err = snd_hda_ctl_add(codec, 0, kctl); 3817 - if (err < 0) { 3818 - if (!codec->addr) 3819 - return err; 3814 + for (;;) { 3820 3815 kctl = snd_ctl_new1(knew, codec); 3821 3816 if (!kctl) 3822 3817 return -ENOMEM; 3823 - kctl->id.device = codec->addr; 3818 + if (addr > 0) 3819 + kctl->id.device = addr; 3820 + if (idx > 0) 3821 + kctl->id.index = idx; 3824 3822 err = snd_hda_ctl_add(codec, 0, kctl); 3825 - if (err < 0) 3823 + if (!err) 3824 + break; 3825 + /* try first with another device index corresponding to 3826 + * the codec addr; if it still fails (or it's the 3827 + * primary codec), then try another control index 3828 + */ 3829 + if (!addr && codec->addr) 3830 + addr = codec->addr; 3831 + else if (!idx && !knew->index) { 3832 + idx = find_empty_mixer_ctl_idx(codec, 3833 + knew->name); 3834 + if (idx <= 0) 3835 + return err; 3836 + } else 3826 3837 return err; 3827 3838 } 3828 3839 }
+1
sound/pci/hda/hda_intel.c
··· 2300 2300 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), 2301 2301 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), 2302 2302 SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB), 2303 + SND_PCI_QUIRK(0x1028, 0x0470, "Dell Inspiron 1120", POS_FIX_LPIB), 2303 2304 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), 2304 2305 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), 2305 2306 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
+39 -25
sound/pci/hda/patch_realtek.c
··· 14806 14806 14807 14807 enum { 14808 14808 ALC269_FIXUP_SONY_VAIO, 14809 + ALC275_FIX_SONY_VAIO_GPIO2, 14809 14810 ALC269_FIXUP_DELL_M101Z, 14810 - ALC269_FIXUP_LENOVO_EDGE14, 14811 + ALC269_FIXUP_SKU_IGNORE, 14811 14812 ALC269_FIXUP_ASUS_G73JW, 14812 14813 }; 14813 14814 ··· 14819 14818 {} 14820 14819 } 14821 14820 }, 14821 + [ALC275_FIX_SONY_VAIO_GPIO2] = { 14822 + .verbs = (const struct hda_verb[]) { 14823 + {0x01, AC_VERB_SET_GPIO_MASK, 0x04}, 14824 + {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04}, 14825 + {0x01, AC_VERB_SET_GPIO_DATA, 0x00}, 14826 + { } 14827 + } 14828 + }, 14822 14829 [ALC269_FIXUP_DELL_M101Z] = { 14823 14830 .verbs = (const struct hda_verb[]) { 14824 14831 /* Enables internal speaker */ ··· 14835 14826 {} 14836 14827 } 14837 14828 }, 14838 - [ALC269_FIXUP_LENOVO_EDGE14] = { 14829 + [ALC269_FIXUP_SKU_IGNORE] = { 14839 14830 .sku = ALC_FIXUP_SKU_IGNORE, 14840 14831 }, 14841 14832 [ALC269_FIXUP_ASUS_G73JW] = { ··· 14847 14838 }; 14848 14839 14849 14840 static struct snd_pci_quirk alc269_fixup_tbl[] = { 14841 + SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2), 14842 + SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2), 14843 + SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2), 14850 14844 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14851 14845 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 14852 - SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_LENOVO_EDGE14), 14846 + SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), 14847 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 14853 14848 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 14854 14849 {} 14855 14850 }; ··· 15104 15091 15105 15092 alc_auto_parse_customize_define(codec); 15106 15093 15107 - coef = alc_read_coef_idx(codec, 0); 15108 - if ((coef & 0x00f0) == 0x0010) { 15109 - if (codec->bus->pci->subsystem_vendor == 0x1025 && 15110 - spec->cdefine.platform_type == 1) { 15111 - alc_codec_rename(codec, "ALC271X"); 15112 - spec->codec_variant = ALC269_TYPE_ALC271X; 15113 - } else if ((coef & 0xf000) == 0x1000) { 15114 - spec->codec_variant = ALC269_TYPE_ALC270; 15115 - } else if ((coef & 0xf000) == 0x2000) { 15116 - alc_codec_rename(codec, "ALC259"); 15117 - spec->codec_variant = ALC269_TYPE_ALC259; 15118 - } else if ((coef & 0xf000) == 0x3000) { 15119 - alc_codec_rename(codec, "ALC258"); 15120 - spec->codec_variant = ALC269_TYPE_ALC258; 15121 - } else { 15122 - alc_codec_rename(codec, "ALC269VB"); 15123 - spec->codec_variant = ALC269_TYPE_ALC269VB; 15124 - } 15125 - } else 15126 - alc_fix_pll_init(codec, 0x20, 0x04, 15); 15127 - 15128 - alc269_fill_coef(codec); 15094 + if (codec->vendor_id == 0x10ec0269) { 15095 + coef = alc_read_coef_idx(codec, 0); 15096 + if ((coef & 0x00f0) == 0x0010) { 15097 + if (codec->bus->pci->subsystem_vendor == 0x1025 && 15098 + spec->cdefine.platform_type == 1) { 15099 + alc_codec_rename(codec, "ALC271X"); 15100 + spec->codec_variant = ALC269_TYPE_ALC271X; 15101 + } else if ((coef & 0xf000) == 0x1000) { 15102 + spec->codec_variant = ALC269_TYPE_ALC270; 15103 + } else if ((coef & 0xf000) == 0x2000) { 15104 + alc_codec_rename(codec, "ALC259"); 15105 + spec->codec_variant = ALC269_TYPE_ALC259; 15106 + } else if ((coef & 0xf000) == 0x3000) { 15107 + alc_codec_rename(codec, "ALC258"); 15108 + spec->codec_variant = ALC269_TYPE_ALC258; 15109 + } else { 15110 + alc_codec_rename(codec, "ALC269VB"); 15111 + spec->codec_variant = ALC269_TYPE_ALC269VB; 15112 + } 15113 + } else 15114 + alc_fix_pll_init(codec, 0x20, 0x04, 15); 15115 + alc269_fill_coef(codec); 15116 + } 15129 15117 15130 15118 board_config = snd_hda_check_board_config(codec, ALC269_MODEL_LAST, 15131 15119 alc269_models,
+2 -3
sound/pci/hda/patch_sigmatel.c
··· 3481 3481 3482 3482 label = hda_get_input_pin_label(codec, nid, 1); 3483 3483 snd_hda_add_imux_item(dimux, label, index, &type_idx); 3484 + if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) 3485 + snd_hda_add_imux_item(imux, label, index, &type_idx); 3484 3486 3485 3487 err = create_elem_capture_vol(codec, nid, label, type_idx, 3486 3488 HDA_INPUT); ··· 3494 3492 if (err < 0) 3495 3493 return err; 3496 3494 } 3497 - 3498 - if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) 3499 - snd_hda_add_imux_item(imux, label, index, NULL); 3500 3495 } 3501 3496 3502 3497 return 0;
+4 -6
sound/soc/codecs/max98088.c
··· 40 40 }; 41 41 42 42 struct max98088_priv { 43 - u8 reg_cache[M98088_REG_CNT]; 44 43 enum max98088_type devtype; 45 44 void *control_data; 46 45 struct max98088_pdata *pdata; ··· 1587 1588 1588 1589 static void max98088_sync_cache(struct snd_soc_codec *codec) 1589 1590 { 1590 - struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); 1591 + u16 *reg_cache = codec->reg_cache; 1591 1592 int i; 1592 1593 1593 1594 if (!codec->cache_sync) ··· 1598 1599 /* write back cached values if they're writeable and 1599 1600 * different from the hardware default. 1600 1601 */ 1601 - for (i = 1; i < ARRAY_SIZE(max98088->reg_cache); i++) { 1602 + for (i = 1; i < codec->driver->reg_cache_size; i++) { 1602 1603 if (!max98088_access[i].writable) 1603 1604 continue; 1604 1605 1605 - if (max98088->reg_cache[i] == max98088_reg[i]) 1606 + if (reg_cache[i] == max98088_reg[i]) 1606 1607 continue; 1607 1608 1608 - snd_soc_write(codec, i, max98088->reg_cache[i]); 1609 + snd_soc_write(codec, i, reg_cache[i]); 1609 1610 } 1610 1611 1611 1612 codec->cache_sync = 0; ··· 1950 1951 int ret = 0; 1951 1952 1952 1953 codec->cache_sync = 1; 1953 - memcpy(codec->reg_cache, max98088_reg, sizeof(max98088_reg)); 1954 1954 1955 1955 ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C); 1956 1956 if (ret != 0) {
+5 -4
sound/soc/codecs/wm8523.c
··· 41 41 /* codec private data */ 42 42 struct wm8523_priv { 43 43 enum snd_soc_control_type control_type; 44 - u16 reg_cache[WM8523_REGISTER_COUNT]; 45 44 struct regulator_bulk_data supplies[WM8523_NUM_SUPPLIES]; 46 45 unsigned int sysclk; 47 46 unsigned int rate_constraint_list[WM8523_NUM_RATES]; ··· 313 314 enum snd_soc_bias_level level) 314 315 { 315 316 struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); 317 + u16 *reg_cache = codec->reg_cache; 316 318 int ret, i; 317 319 318 320 switch (level) { ··· 344 344 /* Sync back default/cached values */ 345 345 for (i = WM8523_AIF_CTRL1; 346 346 i < WM8523_MAX_REGISTER; i++) 347 - snd_soc_write(codec, i, wm8523->reg_cache[i]); 347 + snd_soc_write(codec, i, reg_cache[i]); 348 348 349 349 350 350 msleep(100); ··· 414 414 static int wm8523_probe(struct snd_soc_codec *codec) 415 415 { 416 416 struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); 417 + u16 *reg_cache = codec->reg_cache; 417 418 int ret, i; 418 419 419 420 codec->hw_write = (hw_write_t)i2c_master_send; ··· 471 470 } 472 471 473 472 /* Change some default settings - latch VU and enable ZC */ 474 - wm8523->reg_cache[WM8523_DAC_GAINR] |= WM8523_DACR_VU; 475 - wm8523->reg_cache[WM8523_DAC_CTRL3] |= WM8523_ZC; 473 + reg_cache[WM8523_DAC_GAINR] |= WM8523_DACR_VU; 474 + reg_cache[WM8523_DAC_CTRL3] |= WM8523_ZC; 476 475 477 476 wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 478 477
+5 -5
sound/soc/codecs/wm8741.c
··· 41 41 /* codec private data */ 42 42 struct wm8741_priv { 43 43 enum snd_soc_control_type control_type; 44 - u16 reg_cache[WM8741_REGISTER_COUNT]; 45 44 struct regulator_bulk_data supplies[WM8741_NUM_SUPPLIES]; 46 45 unsigned int sysclk; 47 46 struct snd_pcm_hw_constraint_list *sysclk_constraints; ··· 421 422 static int wm8741_probe(struct snd_soc_codec *codec) 422 423 { 423 424 struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec); 425 + u16 *reg_cache = codec->reg_cache; 424 426 int ret = 0; 425 427 426 428 ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8741->control_type); ··· 437 437 } 438 438 439 439 /* Change some default settings - latch VU */ 440 - wm8741->reg_cache[WM8741_DACLLSB_ATTENUATION] |= WM8741_UPDATELL; 441 - wm8741->reg_cache[WM8741_DACLMSB_ATTENUATION] |= WM8741_UPDATELM; 442 - wm8741->reg_cache[WM8741_DACRLSB_ATTENUATION] |= WM8741_UPDATERL; 443 - wm8741->reg_cache[WM8741_DACRLSB_ATTENUATION] |= WM8741_UPDATERM; 440 + reg_cache[WM8741_DACLLSB_ATTENUATION] |= WM8741_UPDATELL; 441 + reg_cache[WM8741_DACLMSB_ATTENUATION] |= WM8741_UPDATELM; 442 + reg_cache[WM8741_DACRLSB_ATTENUATION] |= WM8741_UPDATERL; 443 + reg_cache[WM8741_DACRLSB_ATTENUATION] |= WM8741_UPDATERM; 444 444 445 445 snd_soc_add_controls(codec, wm8741_snd_controls, 446 446 ARRAY_SIZE(wm8741_snd_controls));
+83 -143
sound/soc/codecs/wm8753.c
··· 65 65 * are using 2 wire for device control, so we cache them instead. 66 66 */ 67 67 static const u16 wm8753_reg[] = { 68 - 0x0008, 0x0000, 0x000a, 0x000a, 69 - 0x0033, 0x0000, 0x0007, 0x00ff, 70 - 0x00ff, 0x000f, 0x000f, 0x007b, 71 - 0x0000, 0x0032, 0x0000, 0x00c3, 72 - 0x00c3, 0x00c0, 0x0000, 0x0000, 68 + 0x0000, 0x0008, 0x0000, 0x000a, 69 + 0x000a, 0x0033, 0x0000, 0x0007, 70 + 0x00ff, 0x00ff, 0x000f, 0x000f, 71 + 0x007b, 0x0000, 0x0032, 0x0000, 72 + 0x00c3, 0x00c3, 0x00c0, 0x0000, 73 73 0x0000, 0x0000, 0x0000, 0x0000, 74 74 0x0000, 0x0000, 0x0000, 0x0000, 75 - 0x0000, 0x0000, 0x0000, 0x0055, 76 - 0x0005, 0x0050, 0x0055, 0x0050, 77 - 0x0055, 0x0050, 0x0055, 0x0079, 75 + 0x0000, 0x0000, 0x0000, 0x0000, 76 + 0x0055, 0x0005, 0x0050, 0x0055, 77 + 0x0050, 0x0055, 0x0050, 0x0055, 78 78 0x0079, 0x0079, 0x0079, 0x0079, 79 - 0x0000, 0x0000, 0x0000, 0x0000, 80 - 0x0097, 0x0097, 0x0000, 0x0004, 81 - 0x0000, 0x0083, 0x0024, 0x01ba, 82 - 0x0000, 0x0083, 0x0024, 0x01ba, 83 - 0x0000, 0x0000, 0x0000 79 + 0x0079, 0x0000, 0x0000, 0x0000, 80 + 0x0000, 0x0097, 0x0097, 0x0000, 81 + 0x0004, 0x0000, 0x0083, 0x0024, 82 + 0x01ba, 0x0000, 0x0083, 0x0024, 83 + 0x01ba, 0x0000, 0x0000, 0x0000 84 84 }; 85 85 86 86 /* codec private data */ ··· 88 88 enum snd_soc_control_type control_type; 89 89 unsigned int sysclk; 90 90 unsigned int pcmclk; 91 - u16 reg_cache[ARRAY_SIZE(wm8753_reg)]; 92 91 int dai_func; 93 92 }; 94 93 95 - /* 96 - * read wm8753 register cache 97 - */ 98 - static inline unsigned int wm8753_read_reg_cache(struct snd_soc_codec *codec, 99 - unsigned int reg) 100 - { 101 - u16 *cache = codec->reg_cache; 102 - if (reg < 1 || reg >= (ARRAY_SIZE(wm8753_reg) + 1)) 103 - return -1; 104 - return cache[reg - 1]; 105 - } 106 - 107 - /* 108 - * write wm8753 register cache 109 - */ 110 - static inline void wm8753_write_reg_cache(struct snd_soc_codec *codec, 111 - unsigned int reg, unsigned int value) 112 - { 113 - u16 *cache = codec->reg_cache; 114 - if (reg < 1 || reg >= (ARRAY_SIZE(wm8753_reg) + 1)) 115 - return; 116 - cache[reg - 1] = value; 117 - } 118 - 119 - /* 120 - * write to the WM8753 register space 121 - */ 122 - static int wm8753_write(struct snd_soc_codec *codec, unsigned int reg, 123 - unsigned int value) 124 - { 125 - u8 data[2]; 126 - 127 - /* data is 128 - * D15..D9 WM8753 register offset 129 - * D8...D0 register data 130 - */ 131 - data[0] = (reg << 1) | ((value >> 8) & 0x0001); 132 - data[1] = value & 0x00ff; 133 - 134 - wm8753_write_reg_cache(codec, reg, value); 135 - if (codec->hw_write(codec->control_data, data, 2) == 2) 136 - return 0; 137 - else 138 - return -EIO; 139 - } 140 - 141 - #define wm8753_reset(c) wm8753_write(c, WM8753_RESET, 0) 94 + #define wm8753_reset(c) snd_soc_write(c, WM8753_RESET, 0) 142 95 143 96 /* 144 97 * WM8753 Controls ··· 171 218 struct snd_ctl_elem_value *ucontrol) 172 219 { 173 220 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 174 - int mode = wm8753_read_reg_cache(codec, WM8753_IOCTL); 221 + int mode = snd_soc_read(codec, WM8753_IOCTL); 175 222 176 223 ucontrol->value.integer.value[0] = (mode & 0xc) >> 2; 177 224 return 0; ··· 181 228 struct snd_ctl_elem_value *ucontrol) 182 229 { 183 230 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 184 - int mode = wm8753_read_reg_cache(codec, WM8753_IOCTL); 231 + int mode = snd_soc_read(codec, WM8753_IOCTL); 185 232 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); 186 233 187 234 if (((mode & 0xc) >> 2) == ucontrol->value.integer.value[0]) ··· 691 738 if (pll_id == WM8753_PLL1) { 692 739 offset = 0; 693 740 enable = 0x10; 694 - reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xffef; 741 + reg = snd_soc_read(codec, WM8753_CLOCK) & 0xffef; 695 742 } else { 696 743 offset = 4; 697 744 enable = 0x8; 698 - reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfff7; 745 + reg = snd_soc_read(codec, WM8753_CLOCK) & 0xfff7; 699 746 } 700 747 701 748 if (!freq_in || !freq_out) { 702 749 /* disable PLL */ 703 - wm8753_write(codec, WM8753_PLL1CTL1 + offset, 0x0026); 704 - wm8753_write(codec, WM8753_CLOCK, reg); 750 + snd_soc_write(codec, WM8753_PLL1CTL1 + offset, 0x0026); 751 + snd_soc_write(codec, WM8753_CLOCK, reg); 705 752 return 0; 706 753 } else { 707 754 u16 value = 0; ··· 712 759 /* set up N and K PLL divisor ratios */ 713 760 /* bits 8:5 = PLL_N, bits 3:0 = PLL_K[21:18] */ 714 761 value = (pll_div.n << 5) + ((pll_div.k & 0x3c0000) >> 18); 715 - wm8753_write(codec, WM8753_PLL1CTL2 + offset, value); 762 + snd_soc_write(codec, WM8753_PLL1CTL2 + offset, value); 716 763 717 764 /* bits 8:0 = PLL_K[17:9] */ 718 765 value = (pll_div.k & 0x03fe00) >> 9; 719 - wm8753_write(codec, WM8753_PLL1CTL3 + offset, value); 766 + snd_soc_write(codec, WM8753_PLL1CTL3 + offset, value); 720 767 721 768 /* bits 8:0 = PLL_K[8:0] */ 722 769 value = pll_div.k & 0x0001ff; 723 - wm8753_write(codec, WM8753_PLL1CTL4 + offset, value); 770 + snd_soc_write(codec, WM8753_PLL1CTL4 + offset, value); 724 771 725 772 /* set PLL as input and enable */ 726 - wm8753_write(codec, WM8753_PLL1CTL1 + offset, 0x0027 | 773 + snd_soc_write(codec, WM8753_PLL1CTL1 + offset, 0x0027 | 727 774 (pll_div.div2 << 3)); 728 - wm8753_write(codec, WM8753_CLOCK, reg | enable); 775 + snd_soc_write(codec, WM8753_CLOCK, reg | enable); 729 776 } 730 777 return 0; 731 778 } ··· 832 879 unsigned int fmt) 833 880 { 834 881 struct snd_soc_codec *codec = codec_dai->codec; 835 - u16 voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x01ec; 882 + u16 voice = snd_soc_read(codec, WM8753_PCM) & 0x01ec; 836 883 837 884 /* interface format */ 838 885 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { ··· 854 901 return -EINVAL; 855 902 } 856 903 857 - wm8753_write(codec, WM8753_PCM, voice); 904 + snd_soc_write(codec, WM8753_PCM, voice); 858 905 return 0; 859 906 } 860 907 ··· 875 922 struct snd_soc_pcm_runtime *rtd = substream->private_data; 876 923 struct snd_soc_codec *codec = rtd->codec; 877 924 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); 878 - u16 voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x01f3; 879 - u16 srate = wm8753_read_reg_cache(codec, WM8753_SRATE1) & 0x017f; 925 + u16 voice = snd_soc_read(codec, WM8753_PCM) & 0x01f3; 926 + u16 srate = snd_soc_read(codec, WM8753_SRATE1) & 0x017f; 880 927 881 928 /* bit size */ 882 929 switch (params_format(params)) { ··· 896 943 /* sample rate */ 897 944 if (params_rate(params) * 384 == wm8753->pcmclk) 898 945 srate |= 0x80; 899 - wm8753_write(codec, WM8753_SRATE1, srate); 946 + snd_soc_write(codec, WM8753_SRATE1, srate); 900 947 901 - wm8753_write(codec, WM8753_PCM, voice); 948 + snd_soc_write(codec, WM8753_PCM, voice); 902 949 return 0; 903 950 } 904 951 ··· 911 958 struct snd_soc_codec *codec = codec_dai->codec; 912 959 u16 voice, ioctl; 913 960 914 - voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x011f; 915 - ioctl = wm8753_read_reg_cache(codec, WM8753_IOCTL) & 0x015d; 961 + voice = snd_soc_read(codec, WM8753_PCM) & 0x011f; 962 + ioctl = snd_soc_read(codec, WM8753_IOCTL) & 0x015d; 916 963 917 964 /* set master/slave audio interface */ 918 965 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { ··· 966 1013 return -EINVAL; 967 1014 } 968 1015 969 - wm8753_write(codec, WM8753_PCM, voice); 970 - wm8753_write(codec, WM8753_IOCTL, ioctl); 1016 + snd_soc_write(codec, WM8753_PCM, voice); 1017 + snd_soc_write(codec, WM8753_IOCTL, ioctl); 971 1018 return 0; 972 1019 } 973 1020 ··· 979 1026 980 1027 switch (div_id) { 981 1028 case WM8753_PCMDIV: 982 - reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0x003f; 983 - wm8753_write(codec, WM8753_CLOCK, reg | div); 1029 + reg = snd_soc_read(codec, WM8753_CLOCK) & 0x003f; 1030 + snd_soc_write(codec, WM8753_CLOCK, reg | div); 984 1031 break; 985 1032 case WM8753_BCLKDIV: 986 - reg = wm8753_read_reg_cache(codec, WM8753_SRATE2) & 0x01c7; 987 - wm8753_write(codec, WM8753_SRATE2, reg | div); 1033 + reg = snd_soc_read(codec, WM8753_SRATE2) & 0x01c7; 1034 + snd_soc_write(codec, WM8753_SRATE2, reg | div); 988 1035 break; 989 1036 case WM8753_VXCLKDIV: 990 - reg = wm8753_read_reg_cache(codec, WM8753_SRATE2) & 0x003f; 991 - wm8753_write(codec, WM8753_SRATE2, reg | div); 1037 + reg = snd_soc_read(codec, WM8753_SRATE2) & 0x003f; 1038 + snd_soc_write(codec, WM8753_SRATE2, reg | div); 992 1039 break; 993 1040 default: 994 1041 return -EINVAL; ··· 1003 1050 unsigned int fmt) 1004 1051 { 1005 1052 struct snd_soc_codec *codec = codec_dai->codec; 1006 - u16 hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x01e0; 1053 + u16 hifi = snd_soc_read(codec, WM8753_HIFI) & 0x01e0; 1007 1054 1008 1055 /* interface format */ 1009 1056 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { ··· 1025 1072 return -EINVAL; 1026 1073 } 1027 1074 1028 - wm8753_write(codec, WM8753_HIFI, hifi); 1075 + snd_soc_write(codec, WM8753_HIFI, hifi); 1029 1076 return 0; 1030 1077 } 1031 1078 ··· 1038 1085 struct snd_soc_codec *codec = codec_dai->codec; 1039 1086 u16 ioctl, hifi; 1040 1087 1041 - hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x011f; 1042 - ioctl = wm8753_read_reg_cache(codec, WM8753_IOCTL) & 0x00ae; 1088 + hifi = snd_soc_read(codec, WM8753_HIFI) & 0x011f; 1089 + ioctl = snd_soc_read(codec, WM8753_IOCTL) & 0x00ae; 1043 1090 1044 1091 /* set master/slave audio interface */ 1045 1092 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { ··· 1093 1140 return -EINVAL; 1094 1141 } 1095 1142 1096 - wm8753_write(codec, WM8753_HIFI, hifi); 1097 - wm8753_write(codec, WM8753_IOCTL, ioctl); 1143 + snd_soc_write(codec, WM8753_HIFI, hifi); 1144 + snd_soc_write(codec, WM8753_IOCTL, ioctl); 1098 1145 return 0; 1099 1146 } 1100 1147 ··· 1115 1162 struct snd_soc_pcm_runtime *rtd = substream->private_data; 1116 1163 struct snd_soc_codec *codec = rtd->codec; 1117 1164 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); 1118 - u16 srate = wm8753_read_reg_cache(codec, WM8753_SRATE1) & 0x01c0; 1119 - u16 hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x01f3; 1165 + u16 srate = snd_soc_read(codec, WM8753_SRATE1) & 0x01c0; 1166 + u16 hifi = snd_soc_read(codec, WM8753_HIFI) & 0x01f3; 1120 1167 int coeff; 1121 1168 1122 1169 /* is digital filter coefficient valid ? */ ··· 1125 1172 printk(KERN_ERR "wm8753 invalid MCLK or rate\n"); 1126 1173 return coeff; 1127 1174 } 1128 - wm8753_write(codec, WM8753_SRATE1, srate | (coeff_div[coeff].sr << 1) | 1175 + snd_soc_write(codec, WM8753_SRATE1, srate | (coeff_div[coeff].sr << 1) | 1129 1176 coeff_div[coeff].usb); 1130 1177 1131 1178 /* bit size */ ··· 1143 1190 break; 1144 1191 } 1145 1192 1146 - wm8753_write(codec, WM8753_HIFI, hifi); 1193 + snd_soc_write(codec, WM8753_HIFI, hifi); 1147 1194 return 0; 1148 1195 } 1149 1196 ··· 1154 1201 u16 clock; 1155 1202 1156 1203 /* set clk source as pcmclk */ 1157 - clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; 1158 - wm8753_write(codec, WM8753_CLOCK, clock); 1204 + clock = snd_soc_read(codec, WM8753_CLOCK) & 0xfffb; 1205 + snd_soc_write(codec, WM8753_CLOCK, clock); 1159 1206 1160 1207 if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0) 1161 1208 return -EINVAL; ··· 1177 1224 u16 clock; 1178 1225 1179 1226 /* set clk source as pcmclk */ 1180 - clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; 1181 - wm8753_write(codec, WM8753_CLOCK, clock); 1227 + clock = snd_soc_read(codec, WM8753_CLOCK) & 0xfffb; 1228 + snd_soc_write(codec, WM8753_CLOCK, clock); 1182 1229 1183 1230 if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0) 1184 1231 return -EINVAL; ··· 1192 1239 u16 clock; 1193 1240 1194 1241 /* set clk source as mclk */ 1195 - clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; 1196 - wm8753_write(codec, WM8753_CLOCK, clock | 0x4); 1242 + clock = snd_soc_read(codec, WM8753_CLOCK) & 0xfffb; 1243 + snd_soc_write(codec, WM8753_CLOCK, clock | 0x4); 1197 1244 1198 1245 if (wm8753_hdac_set_dai_fmt(codec_dai, fmt) < 0) 1199 1246 return -EINVAL; ··· 1205 1252 static int wm8753_mute(struct snd_soc_dai *dai, int mute) 1206 1253 { 1207 1254 struct snd_soc_codec *codec = dai->codec; 1208 - u16 mute_reg = wm8753_read_reg_cache(codec, WM8753_DAC) & 0xfff7; 1255 + u16 mute_reg = snd_soc_read(codec, WM8753_DAC) & 0xfff7; 1209 1256 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); 1210 1257 1211 1258 /* the digital mute covers the HiFi and Voice DAC's on the WM8753. 1212 1259 * make sure we check if they are not both active when we mute */ 1213 1260 if (mute && wm8753->dai_func == 1) { 1214 1261 if (!codec->active) 1215 - wm8753_write(codec, WM8753_DAC, mute_reg | 0x8); 1262 + snd_soc_write(codec, WM8753_DAC, mute_reg | 0x8); 1216 1263 } else { 1217 1264 if (mute) 1218 - wm8753_write(codec, WM8753_DAC, mute_reg | 0x8); 1265 + snd_soc_write(codec, WM8753_DAC, mute_reg | 0x8); 1219 1266 else 1220 - wm8753_write(codec, WM8753_DAC, mute_reg); 1267 + snd_soc_write(codec, WM8753_DAC, mute_reg); 1221 1268 } 1222 1269 1223 1270 return 0; ··· 1226 1273 static int wm8753_set_bias_level(struct snd_soc_codec *codec, 1227 1274 enum snd_soc_bias_level level) 1228 1275 { 1229 - u16 pwr_reg = wm8753_read_reg_cache(codec, WM8753_PWR1) & 0xfe3e; 1276 + u16 pwr_reg = snd_soc_read(codec, WM8753_PWR1) & 0xfe3e; 1230 1277 1231 1278 switch (level) { 1232 1279 case SND_SOC_BIAS_ON: 1233 1280 /* set vmid to 50k and unmute dac */ 1234 - wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x00c0); 1281 + snd_soc_write(codec, WM8753_PWR1, pwr_reg | 0x00c0); 1235 1282 break; 1236 1283 case SND_SOC_BIAS_PREPARE: 1237 1284 /* set vmid to 5k for quick power up */ 1238 - wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x01c1); 1285 + snd_soc_write(codec, WM8753_PWR1, pwr_reg | 0x01c1); 1239 1286 break; 1240 1287 case SND_SOC_BIAS_STANDBY: 1241 1288 /* mute dac and set vmid to 500k, enable VREF */ 1242 - wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x0141); 1289 + snd_soc_write(codec, WM8753_PWR1, pwr_reg | 0x0141); 1243 1290 break; 1244 1291 case SND_SOC_BIAS_OFF: 1245 - wm8753_write(codec, WM8753_PWR1, 0x0001); 1292 + snd_soc_write(codec, WM8753_PWR1, 0x0001); 1246 1293 break; 1247 1294 } 1248 1295 codec->bias_level = level; ··· 1430 1477 else 1431 1478 dai->driver = &wm8753_all_dai[(wm8753->dai_func << 1) + 1]; 1432 1479 } 1433 - wm8753_write(codec, WM8753_IOCTL, wm8753->dai_func); 1480 + snd_soc_write(codec, WM8753_IOCTL, wm8753->dai_func); 1434 1481 } 1435 1482 1436 1483 static void wm8753_work(struct work_struct *work) ··· 1448 1495 1449 1496 static int wm8753_resume(struct snd_soc_codec *codec) 1450 1497 { 1498 + u16 *reg_cache = codec->reg_cache; 1451 1499 int i; 1452 - u8 data[2]; 1453 - u16 *cache = codec->reg_cache; 1454 1500 1455 1501 /* Sync reg_cache with the hardware */ 1456 - for (i = 0; i < ARRAY_SIZE(wm8753_reg); i++) { 1457 - if (i + 1 == WM8753_RESET) 1502 + for (i = 1; i < ARRAY_SIZE(wm8753_reg); i++) { 1503 + if (i == WM8753_RESET) 1458 1504 continue; 1459 1505 1460 1506 /* No point in writing hardware default values back */ 1461 - if (cache[i] == wm8753_reg[i]) 1507 + if (reg_cache[i] == wm8753_reg[i]) 1462 1508 continue; 1463 1509 1464 - data[0] = ((i + 1) << 1) | ((cache[i] >> 8) & 0x0001); 1465 - data[1] = cache[i] & 0x00ff; 1466 - codec->hw_write(codec->control_data, data, 2); 1510 + snd_soc_write(codec, i, reg_cache[i]); 1467 1511 } 1468 1512 1469 1513 wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY); ··· 1498 1548 static int wm8753_probe(struct snd_soc_codec *codec) 1499 1549 { 1500 1550 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); 1501 - int ret = 0, reg; 1551 + int ret; 1502 1552 1503 1553 INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work); 1504 1554 ··· 1523 1573 msecs_to_jiffies(caps_charge)); 1524 1574 1525 1575 /* set the update bits */ 1526 - reg = wm8753_read_reg_cache(codec, WM8753_LDAC); 1527 - wm8753_write(codec, WM8753_LDAC, reg | 0x0100); 1528 - reg = wm8753_read_reg_cache(codec, WM8753_RDAC); 1529 - wm8753_write(codec, WM8753_RDAC, reg | 0x0100); 1530 - reg = wm8753_read_reg_cache(codec, WM8753_LADC); 1531 - wm8753_write(codec, WM8753_LADC, reg | 0x0100); 1532 - reg = wm8753_read_reg_cache(codec, WM8753_RADC); 1533 - wm8753_write(codec, WM8753_RADC, reg | 0x0100); 1534 - reg = wm8753_read_reg_cache(codec, WM8753_LOUT1V); 1535 - wm8753_write(codec, WM8753_LOUT1V, reg | 0x0100); 1536 - reg = wm8753_read_reg_cache(codec, WM8753_ROUT1V); 1537 - wm8753_write(codec, WM8753_ROUT1V, reg | 0x0100); 1538 - reg = wm8753_read_reg_cache(codec, WM8753_LOUT2V); 1539 - wm8753_write(codec, WM8753_LOUT2V, reg | 0x0100); 1540 - reg = wm8753_read_reg_cache(codec, WM8753_ROUT2V); 1541 - wm8753_write(codec, WM8753_ROUT2V, reg | 0x0100); 1542 - reg = wm8753_read_reg_cache(codec, WM8753_LINVOL); 1543 - wm8753_write(codec, WM8753_LINVOL, reg | 0x0100); 1544 - reg = wm8753_read_reg_cache(codec, WM8753_RINVOL); 1545 - wm8753_write(codec, WM8753_RINVOL, reg | 0x0100); 1576 + snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); 1577 + snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); 1578 + snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); 1579 + snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); 1580 + snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); 1581 + snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); 1582 + snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); 1583 + snd_soc_update_bits(codec, WM8753_ROUT2V, 0x0100, 0x0100); 1584 + snd_soc_update_bits(codec, WM8753_LINVOL, 0x0100, 0x0100); 1585 + snd_soc_update_bits(codec, WM8753_RINVOL, 0x0100, 0x0100); 1546 1586 1547 1587 snd_soc_add_controls(codec, wm8753_snd_controls, 1548 1588 ARRAY_SIZE(wm8753_snd_controls));
+18 -19
sound/soc/codecs/wm8904.c
··· 50 50 /* codec private data */ 51 51 struct wm8904_priv { 52 52 53 - u16 reg_cache[WM8904_MAX_REGISTER + 1]; 54 - 55 53 enum wm8904_type devtype; 56 54 void *control_data; 57 55 ··· 2092 2094 2093 2095 static void wm8904_sync_cache(struct snd_soc_codec *codec) 2094 2096 { 2095 - struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 2097 + u16 *reg_cache = codec->reg_cache; 2096 2098 int i; 2097 2099 2098 2100 if (!codec->cache_sync) ··· 2103 2105 /* Sync back cached values if they're different from the 2104 2106 * hardware default. 2105 2107 */ 2106 - for (i = 1; i < ARRAY_SIZE(wm8904->reg_cache); i++) { 2108 + for (i = 1; i < codec->driver->reg_cache_size; i++) { 2107 2109 if (!wm8904_access[i].writable) 2108 2110 continue; 2109 2111 2110 - if (wm8904->reg_cache[i] == wm8904_reg[i]) 2112 + if (reg_cache[i] == wm8904_reg[i]) 2111 2113 continue; 2112 2114 2113 - snd_soc_write(codec, i, wm8904->reg_cache[i]); 2115 + snd_soc_write(codec, i, reg_cache[i]); 2114 2116 } 2115 2117 2116 2118 codec->cache_sync = 0; ··· 2369 2371 { 2370 2372 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 2371 2373 struct wm8904_pdata *pdata = wm8904->pdata; 2374 + u16 *reg_cache = codec->reg_cache; 2372 2375 int ret, i; 2373 2376 2374 2377 codec->cache_sync = 1; ··· 2436 2437 } 2437 2438 2438 2439 /* Change some default settings - latch VU and enable ZC */ 2439 - wm8904->reg_cache[WM8904_ADC_DIGITAL_VOLUME_LEFT] |= WM8904_ADC_VU; 2440 - wm8904->reg_cache[WM8904_ADC_DIGITAL_VOLUME_RIGHT] |= WM8904_ADC_VU; 2441 - wm8904->reg_cache[WM8904_DAC_DIGITAL_VOLUME_LEFT] |= WM8904_DAC_VU; 2442 - wm8904->reg_cache[WM8904_DAC_DIGITAL_VOLUME_RIGHT] |= WM8904_DAC_VU; 2443 - wm8904->reg_cache[WM8904_ANALOGUE_OUT1_LEFT] |= WM8904_HPOUT_VU | 2440 + reg_cache[WM8904_ADC_DIGITAL_VOLUME_LEFT] |= WM8904_ADC_VU; 2441 + reg_cache[WM8904_ADC_DIGITAL_VOLUME_RIGHT] |= WM8904_ADC_VU; 2442 + reg_cache[WM8904_DAC_DIGITAL_VOLUME_LEFT] |= WM8904_DAC_VU; 2443 + reg_cache[WM8904_DAC_DIGITAL_VOLUME_RIGHT] |= WM8904_DAC_VU; 2444 + reg_cache[WM8904_ANALOGUE_OUT1_LEFT] |= WM8904_HPOUT_VU | 2444 2445 WM8904_HPOUTLZC; 2445 - wm8904->reg_cache[WM8904_ANALOGUE_OUT1_RIGHT] |= WM8904_HPOUT_VU | 2446 + reg_cache[WM8904_ANALOGUE_OUT1_RIGHT] |= WM8904_HPOUT_VU | 2446 2447 WM8904_HPOUTRZC; 2447 - wm8904->reg_cache[WM8904_ANALOGUE_OUT2_LEFT] |= WM8904_LINEOUT_VU | 2448 + reg_cache[WM8904_ANALOGUE_OUT2_LEFT] |= WM8904_LINEOUT_VU | 2448 2449 WM8904_LINEOUTLZC; 2449 - wm8904->reg_cache[WM8904_ANALOGUE_OUT2_RIGHT] |= WM8904_LINEOUT_VU | 2450 + reg_cache[WM8904_ANALOGUE_OUT2_RIGHT] |= WM8904_LINEOUT_VU | 2450 2451 WM8904_LINEOUTRZC; 2451 - wm8904->reg_cache[WM8904_CLOCK_RATES_0] &= ~WM8904_SR_MODE; 2452 + reg_cache[WM8904_CLOCK_RATES_0] &= ~WM8904_SR_MODE; 2452 2453 2453 2454 /* Apply configuration from the platform data. */ 2454 2455 if (wm8904->pdata) { ··· 2456 2457 if (!pdata->gpio_cfg[i]) 2457 2458 continue; 2458 2459 2459 - wm8904->reg_cache[WM8904_GPIO_CONTROL_1 + i] 2460 + reg_cache[WM8904_GPIO_CONTROL_1 + i] 2460 2461 = pdata->gpio_cfg[i] & 0xffff; 2461 2462 } 2462 2463 2463 2464 /* Zero is the default value for these anyway */ 2464 2465 for (i = 0; i < WM8904_MIC_REGS; i++) 2465 - wm8904->reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i] 2466 + reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i] 2466 2467 = pdata->mic_cfg[i]; 2467 2468 } 2468 2469 2469 2470 /* Set Class W by default - this will be managed by the Class 2470 2471 * G widget at runtime where bypass paths are available. 2471 2472 */ 2472 - wm8904->reg_cache[WM8904_CLASS_W_0] |= WM8904_CP_DYN_PWR; 2473 + reg_cache[WM8904_CLASS_W_0] |= WM8904_CP_DYN_PWR; 2473 2474 2474 2475 /* Use normal bias source */ 2475 - wm8904->reg_cache[WM8904_BIAS_CONTROL_0] &= ~WM8904_POBCTRL; 2476 + reg_cache[WM8904_BIAS_CONTROL_0] &= ~WM8904_POBCTRL; 2476 2477 2477 2478 wm8904_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 2478 2479
+1
sound/soc/codecs/wm8940.c
··· 768 768 769 769 i2c_set_clientdata(i2c, wm8940); 770 770 wm8940->control_data = i2c; 771 + wm8940->control_type = SND_SOC_I2C; 771 772 772 773 ret = snd_soc_register_codec(&i2c->dev, 773 774 &soc_codec_dev_wm8940, &wm8940_dai, 1);
+16 -15
sound/soc/codecs/wm8955.c
··· 42 42 struct wm8955_priv { 43 43 enum snd_soc_control_type control_type; 44 44 45 - u16 reg_cache[WM8955_MAX_REGISTER + 1]; 46 - 47 45 unsigned int mclk_rate; 48 46 49 47 int deemph; ··· 766 768 enum snd_soc_bias_level level) 767 769 { 768 770 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); 771 + u16 *reg_cache = codec->reg_cache; 769 772 int ret, i; 770 773 771 774 switch (level) { ··· 799 800 /* Sync back cached values if they're 800 801 * different from the hardware default. 801 802 */ 802 - for (i = 0; i < ARRAY_SIZE(wm8955->reg_cache); i++) { 803 + for (i = 0; i < codec->driver->reg_cache_size; i++) { 803 804 if (i == WM8955_RESET) 804 805 continue; 805 806 806 - if (wm8955->reg_cache[i] == wm8955_reg[i]) 807 + if (reg_cache[i] == wm8955_reg[i]) 807 808 continue; 808 809 809 - snd_soc_write(codec, i, wm8955->reg_cache[i]); 810 + snd_soc_write(codec, i, reg_cache[i]); 810 811 } 811 812 812 813 /* Enable VREF and VMID */ ··· 901 902 { 902 903 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); 903 904 struct wm8955_pdata *pdata = dev_get_platdata(codec->dev); 905 + u16 *reg_cache = codec->reg_cache; 904 906 int ret, i; 905 907 906 908 ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8955->control_type); ··· 934 934 } 935 935 936 936 /* Change some default settings - latch VU and enable ZC */ 937 - wm8955->reg_cache[WM8955_LEFT_DAC_VOLUME] |= WM8955_LDVU; 938 - wm8955->reg_cache[WM8955_RIGHT_DAC_VOLUME] |= WM8955_RDVU; 939 - wm8955->reg_cache[WM8955_LOUT1_VOLUME] |= WM8955_LO1VU | WM8955_LO1ZC; 940 - wm8955->reg_cache[WM8955_ROUT1_VOLUME] |= WM8955_RO1VU | WM8955_RO1ZC; 941 - wm8955->reg_cache[WM8955_LOUT2_VOLUME] |= WM8955_LO2VU | WM8955_LO2ZC; 942 - wm8955->reg_cache[WM8955_ROUT2_VOLUME] |= WM8955_RO2VU | WM8955_RO2ZC; 943 - wm8955->reg_cache[WM8955_MONOOUT_VOLUME] |= WM8955_MOZC; 937 + reg_cache[WM8955_LEFT_DAC_VOLUME] |= WM8955_LDVU; 938 + reg_cache[WM8955_RIGHT_DAC_VOLUME] |= WM8955_RDVU; 939 + reg_cache[WM8955_LOUT1_VOLUME] |= WM8955_LO1VU | WM8955_LO1ZC; 940 + reg_cache[WM8955_ROUT1_VOLUME] |= WM8955_RO1VU | WM8955_RO1ZC; 941 + reg_cache[WM8955_LOUT2_VOLUME] |= WM8955_LO2VU | WM8955_LO2ZC; 942 + reg_cache[WM8955_ROUT2_VOLUME] |= WM8955_RO2VU | WM8955_RO2ZC; 943 + reg_cache[WM8955_MONOOUT_VOLUME] |= WM8955_MOZC; 944 944 945 945 /* Also enable adaptive bass boost by default */ 946 - wm8955->reg_cache[WM8955_BASS_CONTROL] |= WM8955_BB; 946 + reg_cache[WM8955_BASS_CONTROL] |= WM8955_BB; 947 947 948 948 /* Set platform data values */ 949 949 if (pdata) { 950 950 if (pdata->out2_speaker) 951 - wm8955->reg_cache[WM8955_ADDITIONAL_CONTROL_2] 951 + reg_cache[WM8955_ADDITIONAL_CONTROL_2] 952 952 |= WM8955_ROUT2INV; 953 953 954 954 if (pdata->monoin_diff) 955 - wm8955->reg_cache[WM8955_MONO_OUT_MIX_1] 955 + reg_cache[WM8955_MONO_OUT_MIX_1] 956 956 |= WM8955_DMEN; 957 957 } 958 958 ··· 1003 1003 return -ENOMEM; 1004 1004 1005 1005 i2c_set_clientdata(i2c, wm8955); 1006 + wm8955->control_type = SND_SOC_I2C; 1006 1007 1007 1008 ret = snd_soc_register_codec(&i2c->dev, 1008 1009 &soc_codec_dev_wm8955, &wm8955_dai, 1);
+1
sound/soc/codecs/wm8960.c
··· 1013 1013 return -ENOMEM; 1014 1014 1015 1015 i2c_set_clientdata(i2c, wm8960); 1016 + wm8960->control_type = SND_SOC_I2C; 1016 1017 wm8960->control_data = i2c; 1017 1018 1018 1019 ret = snd_soc_register_codec(&i2c->dev,
+20 -25
sound/soc/codecs/wm8962.c
··· 52 52 struct wm8962_priv { 53 53 struct snd_soc_codec *codec; 54 54 55 - u16 reg_cache[WM8962_MAX_REGISTER + 1]; 56 - 57 55 int sysclk; 58 56 int sysclk_rate; 59 57 ··· 1989 1991 struct snd_ctl_elem_value *ucontrol) 1990 1992 { 1991 1993 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 1992 - struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec); 1993 - u16 *reg_cache = wm8962->reg_cache; 1994 + u16 *reg_cache = codec->reg_cache; 1994 1995 int ret; 1995 1996 1996 1997 /* Apply the update (if any) */ ··· 2017 2020 struct snd_ctl_elem_value *ucontrol) 2018 2021 { 2019 2022 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 2020 - struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec); 2021 - u16 *reg_cache = wm8962->reg_cache; 2023 + u16 *reg_cache = codec->reg_cache; 2022 2024 int ret; 2023 2025 2024 2026 /* Apply the update (if any) */ ··· 2325 2329 struct snd_kcontrol *kcontrol, int event) 2326 2330 { 2327 2331 struct snd_soc_codec *codec = w->codec; 2328 - struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec); 2329 - u16 *reg_cache = wm8962->reg_cache; 2332 + u16 *reg_cache = codec->reg_cache; 2330 2333 int reg; 2331 2334 2332 2335 switch (w->shift) { ··· 2714 2719 2715 2720 static void wm8962_sync_cache(struct snd_soc_codec *codec) 2716 2721 { 2717 - struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec); 2722 + u16 *reg_cache = codec->reg_cache; 2718 2723 int i; 2719 2724 2720 2725 if (!codec->cache_sync) ··· 2727 2732 /* Sync back cached values if they're different from the 2728 2733 * hardware default. 2729 2734 */ 2730 - for (i = 1; i < ARRAY_SIZE(wm8962->reg_cache); i++) { 2735 + for (i = 1; i < codec->driver->reg_cache_size; i++) { 2731 2736 if (i == WM8962_SOFTWARE_RESET) 2732 2737 continue; 2733 - if (wm8962->reg_cache[i] == wm8962_reg[i]) 2738 + if (reg_cache[i] == wm8962_reg[i]) 2734 2739 continue; 2735 2740 2736 - snd_soc_write(codec, i, wm8962->reg_cache[i]); 2741 + snd_soc_write(codec, i, reg_cache[i]); 2737 2742 } 2738 2743 2739 2744 codec->cache_sync = 0; ··· 3401 3406 #ifdef CONFIG_PM 3402 3407 static int wm8962_resume(struct snd_soc_codec *codec) 3403 3408 { 3404 - struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec); 3405 3409 u16 *reg_cache = codec->reg_cache; 3406 3410 int i; 3407 3411 3408 3412 /* Restore the registers */ 3409 - for (i = 1; i < ARRAY_SIZE(wm8962->reg_cache); i++) { 3413 + for (i = 1; i < codec->driver->reg_cache_size; i++) { 3410 3414 switch (i) { 3411 3415 case WM8962_SOFTWARE_RESET: 3412 3416 continue; ··· 3699 3705 struct wm8962_pdata *pdata = dev_get_platdata(codec->dev); 3700 3706 struct i2c_client *i2c = container_of(codec->dev, struct i2c_client, 3701 3707 dev); 3708 + u16 *reg_cache = codec->reg_cache; 3702 3709 int i, trigger, irq_pol; 3703 3710 3704 3711 wm8962->codec = codec; ··· 3799 3804 3800 3805 /* Put the speakers into mono mode? */ 3801 3806 if (pdata->spk_mono) 3802 - wm8962->reg_cache[WM8962_CLASS_D_CONTROL_2] 3807 + reg_cache[WM8962_CLASS_D_CONTROL_2] 3803 3808 |= WM8962_SPK_MONO; 3804 3809 3805 3810 /* Micbias setup, detection enable and detection ··· 3814 3819 } 3815 3820 3816 3821 /* Latch volume update bits */ 3817 - wm8962->reg_cache[WM8962_LEFT_INPUT_VOLUME] |= WM8962_IN_VU; 3818 - wm8962->reg_cache[WM8962_RIGHT_INPUT_VOLUME] |= WM8962_IN_VU; 3819 - wm8962->reg_cache[WM8962_LEFT_ADC_VOLUME] |= WM8962_ADC_VU; 3820 - wm8962->reg_cache[WM8962_RIGHT_ADC_VOLUME] |= WM8962_ADC_VU; 3821 - wm8962->reg_cache[WM8962_LEFT_DAC_VOLUME] |= WM8962_DAC_VU; 3822 - wm8962->reg_cache[WM8962_RIGHT_DAC_VOLUME] |= WM8962_DAC_VU; 3823 - wm8962->reg_cache[WM8962_SPKOUTL_VOLUME] |= WM8962_SPKOUT_VU; 3824 - wm8962->reg_cache[WM8962_SPKOUTR_VOLUME] |= WM8962_SPKOUT_VU; 3825 - wm8962->reg_cache[WM8962_HPOUTL_VOLUME] |= WM8962_HPOUT_VU; 3826 - wm8962->reg_cache[WM8962_HPOUTR_VOLUME] |= WM8962_HPOUT_VU; 3822 + reg_cache[WM8962_LEFT_INPUT_VOLUME] |= WM8962_IN_VU; 3823 + reg_cache[WM8962_RIGHT_INPUT_VOLUME] |= WM8962_IN_VU; 3824 + reg_cache[WM8962_LEFT_ADC_VOLUME] |= WM8962_ADC_VU; 3825 + reg_cache[WM8962_RIGHT_ADC_VOLUME] |= WM8962_ADC_VU; 3826 + reg_cache[WM8962_LEFT_DAC_VOLUME] |= WM8962_DAC_VU; 3827 + reg_cache[WM8962_RIGHT_DAC_VOLUME] |= WM8962_DAC_VU; 3828 + reg_cache[WM8962_SPKOUTL_VOLUME] |= WM8962_SPKOUT_VU; 3829 + reg_cache[WM8962_SPKOUTR_VOLUME] |= WM8962_SPKOUT_VU; 3830 + reg_cache[WM8962_HPOUTL_VOLUME] |= WM8962_HPOUT_VU; 3831 + reg_cache[WM8962_HPOUTR_VOLUME] |= WM8962_HPOUT_VU; 3827 3832 3828 3833 wm8962_add_widgets(codec); 3829 3834
+1
sound/soc/codecs/wm8971.c
··· 718 718 if (wm8971 == NULL) 719 719 return -ENOMEM; 720 720 721 + wm8971->control_type = SND_SOC_I2C; 721 722 i2c_set_clientdata(i2c, wm8971); 722 723 723 724 ret = snd_soc_register_codec(&i2c->dev,
+1
sound/soc/codecs/wm9081.c
··· 1335 1335 return -ENOMEM; 1336 1336 1337 1337 i2c_set_clientdata(i2c, wm9081); 1338 + wm9081->control_type = SND_SOC_I2C; 1338 1339 wm9081->control_data = i2c; 1339 1340 1340 1341 ret = snd_soc_register_codec(&i2c->dev,
+9 -9
sound/soc/codecs/wm9090.c
··· 141 141 /* This struct is used to save the context */ 142 142 struct wm9090_priv { 143 143 struct mutex mutex; 144 - u16 reg_cache[WM9090_MAX_REGISTER + 1]; 145 144 struct wm9090_platform_data pdata; 146 145 void *control_data; 147 146 }; ··· 551 552 static int wm9090_probe(struct snd_soc_codec *codec) 552 553 { 553 554 struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec); 555 + u16 *reg_cache = codec->reg_cache; 554 556 int ret; 555 557 556 558 codec->control_data = wm9090->control_data; ··· 576 576 /* Configure some defaults; they will be written out when we 577 577 * bring the bias up. 578 578 */ 579 - wm9090->reg_cache[WM9090_IN1_LINE_INPUT_A_VOLUME] |= WM9090_IN1_VU 579 + reg_cache[WM9090_IN1_LINE_INPUT_A_VOLUME] |= WM9090_IN1_VU 580 580 | WM9090_IN1A_ZC; 581 - wm9090->reg_cache[WM9090_IN1_LINE_INPUT_B_VOLUME] |= WM9090_IN1_VU 581 + reg_cache[WM9090_IN1_LINE_INPUT_B_VOLUME] |= WM9090_IN1_VU 582 582 | WM9090_IN1B_ZC; 583 - wm9090->reg_cache[WM9090_IN2_LINE_INPUT_A_VOLUME] |= WM9090_IN2_VU 583 + reg_cache[WM9090_IN2_LINE_INPUT_A_VOLUME] |= WM9090_IN2_VU 584 584 | WM9090_IN2A_ZC; 585 - wm9090->reg_cache[WM9090_IN2_LINE_INPUT_B_VOLUME] |= WM9090_IN2_VU 585 + reg_cache[WM9090_IN2_LINE_INPUT_B_VOLUME] |= WM9090_IN2_VU 586 586 | WM9090_IN2B_ZC; 587 - wm9090->reg_cache[WM9090_SPEAKER_VOLUME_LEFT] |= 587 + reg_cache[WM9090_SPEAKER_VOLUME_LEFT] |= 588 588 WM9090_SPKOUT_VU | WM9090_SPKOUTL_ZC; 589 - wm9090->reg_cache[WM9090_LEFT_OUTPUT_VOLUME] |= 589 + reg_cache[WM9090_LEFT_OUTPUT_VOLUME] |= 590 590 WM9090_HPOUT1_VU | WM9090_HPOUT1L_ZC; 591 - wm9090->reg_cache[WM9090_RIGHT_OUTPUT_VOLUME] |= 591 + reg_cache[WM9090_RIGHT_OUTPUT_VOLUME] |= 592 592 WM9090_HPOUT1_VU | WM9090_HPOUT1R_ZC; 593 593 594 - wm9090->reg_cache[WM9090_CLOCKING_1] |= WM9090_TOCLK_ENA; 594 + reg_cache[WM9090_CLOCKING_1] |= WM9090_TOCLK_ENA; 595 595 596 596 wm9090_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 597 597
+1 -2
tools/perf/builtin-buildid-list.c
··· 36 36 37 37 static int __cmd_buildid_list(void) 38 38 { 39 - int err = -1; 40 39 struct perf_session *session; 41 40 42 41 session = perf_session__new(input_name, O_RDONLY, force, false); ··· 48 49 perf_session__fprintf_dsos_buildid(session, stdout, with_hits); 49 50 50 51 perf_session__delete(session); 51 - return err; 52 + return 0; 52 53 } 53 54 54 55 int cmd_buildid_list(int argc, const char **argv, const char *prefix __used)
+5
tools/perf/builtin-probe.c
··· 249 249 !params.show_lines)) 250 250 usage_with_options(probe_usage, options); 251 251 252 + /* 253 + * Only consider the user's kernel image path if given. 254 + */ 255 + symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); 256 + 252 257 if (params.list_events) { 253 258 if (params.mod_events) { 254 259 pr_err(" Error: Don't use --list with --add/--del.\n");
+6 -4
tools/perf/util/header.c
··· 265 265 const char *name, bool is_kallsyms) 266 266 { 267 267 const size_t size = PATH_MAX; 268 - char *filename = malloc(size), 268 + char *realname = realpath(name, NULL), 269 + *filename = malloc(size), 269 270 *linkname = malloc(size), *targetname; 270 271 int len, err = -1; 271 272 272 - if (filename == NULL || linkname == NULL) 273 + if (realname == NULL || filename == NULL || linkname == NULL) 273 274 goto out_free; 274 275 275 276 len = snprintf(filename, size, "%s%s%s", 276 - debugdir, is_kallsyms ? "/" : "", name); 277 + debugdir, is_kallsyms ? "/" : "", realname); 277 278 if (mkdir_p(filename, 0755)) 278 279 goto out_free; 279 280 ··· 284 283 if (is_kallsyms) { 285 284 if (copyfile("/proc/kallsyms", filename)) 286 285 goto out_free; 287 - } else if (link(name, filename) && copyfile(name, filename)) 286 + } else if (link(realname, filename) && copyfile(name, filename)) 288 287 goto out_free; 289 288 } 290 289 ··· 301 300 if (symlink(targetname, linkname) == 0) 302 301 err = 0; 303 302 out_free: 303 + free(realname); 304 304 free(filename); 305 305 free(linkname); 306 306 return err;
+1 -1
tools/perf/util/hist.c
··· 356 356 357 357 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, 358 358 int depth, int depth_mask, int period, 359 - u64 total_samples, int hits, 359 + u64 total_samples, u64 hits, 360 360 int left_margin) 361 361 { 362 362 int i;
+12 -3
tools/perf/util/probe-event.c
··· 114 114 const char *kernel_get_module_path(const char *module) 115 115 { 116 116 struct dso *dso; 117 + struct map *map; 118 + const char *vmlinux_name; 117 119 118 120 if (module) { 119 121 list_for_each_entry(dso, &machine.kernel_dsos, node) { ··· 125 123 } 126 124 pr_debug("Failed to find module %s.\n", module); 127 125 return NULL; 126 + } 127 + 128 + map = machine.vmlinux_maps[MAP__FUNCTION]; 129 + dso = map->dso; 130 + 131 + vmlinux_name = symbol_conf.vmlinux_name; 132 + if (vmlinux_name) { 133 + if (dso__load_vmlinux(dso, map, vmlinux_name, NULL) <= 0) 134 + return NULL; 128 135 } else { 129 - dso = machine.vmlinux_maps[MAP__FUNCTION]->dso; 130 - if (dso__load_vmlinux_path(dso, 131 - machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) { 136 + if (dso__load_vmlinux_path(dso, map, NULL) <= 0) { 132 137 pr_debug("Failed to load kernel map.\n"); 133 138 return NULL; 134 139 }
+55 -30
tools/perf/util/probe-finder.c
··· 117 117 } 118 118 119 119 /* Dwarf FL wrappers */ 120 - 121 - static int __linux_kernel_find_elf(Dwfl_Module *mod, 122 - void **userdata, 123 - const char *module_name, 124 - Dwarf_Addr base, 125 - char **file_name, Elf **elfp) 126 - { 127 - int fd; 128 - const char *path = kernel_get_module_path(module_name); 129 - 130 - if (path) { 131 - fd = open(path, O_RDONLY); 132 - if (fd >= 0) { 133 - *file_name = strdup(path); 134 - return fd; 135 - } 136 - } 137 - /* If failed, try to call standard method */ 138 - return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base, 139 - file_name, elfp); 140 - } 141 - 142 120 static char *debuginfo_path; /* Currently dummy */ 143 121 144 122 static const Dwfl_Callbacks offline_callbacks = { ··· 127 149 128 150 /* We use this table for core files too. */ 129 151 .find_elf = dwfl_build_id_find_elf, 130 - }; 131 - 132 - static const Dwfl_Callbacks kernel_callbacks = { 133 - .find_debuginfo = dwfl_standard_find_debuginfo, 134 - .debuginfo_path = &debuginfo_path, 135 - 136 - .find_elf = __linux_kernel_find_elf, 137 - .section_address = dwfl_linux_kernel_module_section_address, 138 152 }; 139 153 140 154 /* Get a Dwarf from offline image */ ··· 155 185 return dbg; 156 186 } 157 187 188 + #if _ELFUTILS_PREREQ(0, 148) 189 + /* This method is buggy if elfutils is older than 0.148 */ 190 + static int __linux_kernel_find_elf(Dwfl_Module *mod, 191 + void **userdata, 192 + const char *module_name, 193 + Dwarf_Addr base, 194 + char **file_name, Elf **elfp) 195 + { 196 + int fd; 197 + const char *path = kernel_get_module_path(module_name); 198 + 199 + pr_debug2("Use file %s for %s\n", path, module_name); 200 + if (path) { 201 + fd = open(path, O_RDONLY); 202 + if (fd >= 0) { 203 + *file_name = strdup(path); 204 + return fd; 205 + } 206 + } 207 + /* If failed, try to call standard method */ 208 + return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base, 209 + file_name, elfp); 210 + } 211 + 212 + static const Dwfl_Callbacks kernel_callbacks = { 213 + .find_debuginfo = dwfl_standard_find_debuginfo, 214 + .debuginfo_path = &debuginfo_path, 215 + 216 + .find_elf = __linux_kernel_find_elf, 217 + .section_address = dwfl_linux_kernel_module_section_address, 218 + }; 219 + 158 220 /* Get a Dwarf from live kernel image */ 159 221 static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp, 160 222 Dwarf_Addr *bias) ··· 207 205 dbg = dwfl_addrdwarf(*dwflp, addr, bias); 208 206 /* Here, check whether we could get a real dwarf */ 209 207 if (!dbg) { 208 + pr_debug("Failed to find kernel dwarf at %lx\n", 209 + (unsigned long)addr); 210 210 dwfl_end(*dwflp); 211 211 *dwflp = NULL; 212 212 } 213 213 return dbg; 214 214 } 215 + #else 216 + /* With older elfutils, this just support kernel module... */ 217 + static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr __used, Dwfl **dwflp, 218 + Dwarf_Addr *bias) 219 + { 220 + int fd; 221 + const char *path = kernel_get_module_path("kernel"); 222 + 223 + if (!path) { 224 + pr_err("Failed to find vmlinux path\n"); 225 + return NULL; 226 + } 227 + 228 + pr_debug2("Use file %s for debuginfo\n", path); 229 + fd = open(path, O_RDONLY); 230 + if (fd < 0) 231 + return NULL; 232 + 233 + return dwfl_init_offline_dwarf(fd, dwflp, bias); 234 + } 235 + #endif 215 236 216 237 /* Dwarf wrappers */ 217 238
+1 -1
tools/perf/util/string.c
··· 259 259 if (!*pat) /* Tail wild card matches all */ 260 260 return true; 261 261 while (*str) 262 - if (strglobmatch(str++, pat)) 262 + if (__match_glob(str++, pat, ignore_space)) 263 263 return true; 264 264 } 265 265 return !*str && !*pat;
+2 -2
tools/perf/util/symbol.c
··· 1780 1780 return -1; 1781 1781 } 1782 1782 1783 - static int dso__load_vmlinux(struct dso *self, struct map *map, 1784 - const char *vmlinux, symbol_filter_t filter) 1783 + int dso__load_vmlinux(struct dso *self, struct map *map, 1784 + const char *vmlinux, symbol_filter_t filter) 1785 1785 { 1786 1786 int err = -1, fd; 1787 1787
+2
tools/perf/util/symbol.h
··· 166 166 struct dso *__dsos__findnew(struct list_head *head, const char *name); 167 167 168 168 int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); 169 + int dso__load_vmlinux(struct dso *self, struct map *map, 170 + const char *vmlinux, symbol_filter_t filter); 169 171 int dso__load_vmlinux_path(struct dso *self, struct map *map, 170 172 symbol_filter_t filter); 171 173 int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map,