Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit 'nfs-for-3.3-4' into nfs-for-next

Conflicts:
fs/nfs/nfs4proc.c

Back-merge of the upstream kernel in order to fix a conflict with the
slotid type conversion and implementation id patches...

+818 -800
+10 -2
Documentation/DocBook/device-drivers.tmpl
··· 102 102 !Iinclude/linux/device.h 103 103 </sect1> 104 104 <sect1><title>Device Drivers Base</title> 105 + !Idrivers/base/init.c 105 106 !Edrivers/base/driver.c 106 107 !Edrivers/base/core.c 108 + !Edrivers/base/syscore.c 107 109 !Edrivers/base/class.c 110 + !Idrivers/base/node.c 108 111 !Edrivers/base/firmware_class.c 109 112 !Edrivers/base/transport_class.c 110 113 <!-- Cannot be included, because ··· 116 113 exceed allowed 44 characters maximum 117 114 X!Edrivers/base/attribute_container.c 118 115 --> 119 - !Edrivers/base/sys.c 116 + !Edrivers/base/dd.c 120 117 <!-- 121 118 X!Edrivers/base/interface.c 122 119 --> 123 120 !Iinclude/linux/platform_device.h 124 121 !Edrivers/base/platform.c 125 122 !Edrivers/base/bus.c 123 + </sect1> 124 + <sect1><title>Device Drivers DMA Management</title> 125 + !Edrivers/base/dma-buf.c 126 + !Edrivers/base/dma-coherent.c 127 + !Edrivers/base/dma-mapping.c 126 128 </sect1> 127 129 <sect1><title>Device Drivers Power Management</title> 128 130 !Edrivers/base/power/main.c ··· 227 219 <chapter id="uart16x50"> 228 220 <title>16x50 UART Driver</title> 229 221 !Edrivers/tty/serial/serial_core.c 230 - !Edrivers/tty/serial/8250.c 222 + !Edrivers/tty/serial/8250/8250.c 231 223 </chapter> 232 224 233 225 <chapter id="fbdev">
+14 -15
MAINTAINERS
··· 159 159 F: drivers/net/ethernet/realtek/r8169.c 160 160 161 161 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER 162 - M: Greg Kroah-Hartman <gregkh@suse.de> 162 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 163 163 L: linux-serial@vger.kernel.org 164 164 W: http://serial.sourceforge.net 165 165 S: Maintained ··· 1783 1783 1784 1784 CHAR and MISC DRIVERS 1785 1785 M: Arnd Bergmann <arnd@arndb.de> 1786 - M: Greg Kroah-Hartman <greg@kroah.com> 1786 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 1787 1787 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git 1788 - S: Maintained 1788 + S: Supported 1789 1789 F: drivers/char/* 1790 1790 F: drivers/misc/* 1791 1791 ··· 2320 2320 F: Documentation/blockdev/drbd/ 2321 2321 2322 2322 DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS 2323 - M: Greg Kroah-Hartman <gregkh@suse.de> 2323 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 2324 2324 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git 2325 2325 S: Supported 2326 2326 F: Documentation/kobject.txt ··· 6276 6276 F: arch/alpha/kernel/srm_env.c 6277 6277 6278 6278 STABLE BRANCH 6279 - M: Greg Kroah-Hartman <greg@kroah.com> 6279 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 6280 6280 L: stable@vger.kernel.org 6281 - S: Maintained 6281 + S: Supported 6282 6282 6283 6283 STAGING SUBSYSTEM 6284 - M: Greg Kroah-Hartman <gregkh@suse.de> 6284 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 6285 6285 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git 6286 6286 L: devel@driverdev.osuosl.org 6287 - S: Maintained 6287 + S: Supported 6288 6288 F: drivers/staging/ 6289 6289 6290 6290 STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS ··· 6669 6669 K: ^Subject:.*(?i)trivial 6670 6670 6671 6671 TTY LAYER 6672 - M: Greg Kroah-Hartman <gregkh@suse.de> 6673 - S: Maintained 6672 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 6673 + S: Supported 6674 6674 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git 6675 6675 F: drivers/tty/ 6676 6676 F: drivers/tty/serial/serial_core.c ··· 6958 6958 F: drivers/usb/serial/digi_acceleport.c 6959 6959 6960 6960 USB SERIAL DRIVER 6961 - M: Greg Kroah-Hartman <gregkh@suse.de> 6961 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 6962 6962 L: linux-usb@vger.kernel.org 6963 6963 S: Supported 6964 6964 F: Documentation/usb/usb-serial.txt ··· 6973 6973 F: drivers/usb/serial/empeg.c 6974 6974 6975 6975 USB SERIAL KEYSPAN DRIVER 6976 - M: Greg Kroah-Hartman <greg@kroah.com> 6976 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 6977 6977 L: linux-usb@vger.kernel.org 6978 - W: http://www.kroah.com/linux/ 6979 6978 S: Maintained 6980 6979 F: drivers/usb/serial/*keyspan* 6981 6980 ··· 7002 7003 F: drivers/media/video/sn9c102/ 7003 7004 7004 7005 USB SUBSYSTEM 7005 - M: Greg Kroah-Hartman <gregkh@suse.de> 7006 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 7006 7007 L: linux-usb@vger.kernel.org 7007 7008 W: http://www.linux-usb.org 7008 7009 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git ··· 7089 7090 7090 7091 USERSPACE I/O (UIO) 7091 7092 M: "Hans J. Koch" <hjk@hansjkoch.de> 7092 - M: Greg Kroah-Hartman <gregkh@suse.de> 7093 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 7093 7094 S: Maintained 7094 7095 F: Documentation/DocBook/uio-howto.tmpl 7095 7096 F: drivers/uio/
+1 -20
arch/microblaze/kernel/setup.c
··· 26 26 #include <linux/cache.h> 27 27 #include <linux/of_platform.h> 28 28 #include <linux/dma-mapping.h> 29 - #include <linux/cpu.h> 30 29 #include <asm/cacheflush.h> 31 30 #include <asm/entry.h> 32 31 #include <asm/cpuinfo.h> ··· 226 227 227 228 return 0; 228 229 } 230 + 229 231 arch_initcall(setup_bus_notifier); 230 - 231 - static DEFINE_PER_CPU(struct cpu, cpu_devices); 232 - 233 - static int __init topology_init(void) 234 - { 235 - int i, ret; 236 - 237 - for_each_present_cpu(i) { 238 - struct cpu *c = &per_cpu(cpu_devices, i); 239 - 240 - ret = register_cpu(c, i); 241 - if (ret) 242 - printk(KERN_WARNING "topology_init: register_cpu %d " 243 - "failed (%d)\n", i, ret); 244 - } 245 - 246 - return 0; 247 - } 248 - subsys_initcall(topology_init);
+1
arch/sparc/Kconfig
··· 33 33 config SPARC32 34 34 def_bool !64BIT 35 35 select GENERIC_ATOMIC64 36 + select CLZ_TAB 36 37 37 38 config SPARC64 38 39 def_bool 64BIT
+1 -15
arch/sparc/lib/divdi3.S
··· 17 17 the Free Software Foundation, 59 Temple Place - Suite 330, 18 18 Boston, MA 02111-1307, USA. */ 19 19 20 - .data 21 - .align 8 22 - .globl __clz_tab 23 - __clz_tab: 24 - .byte 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 25 - .byte 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6 26 - .byte 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 27 - .byte 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 28 - .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 29 - .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 30 - .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 31 - .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 32 - .size __clz_tab,256 33 - .global .udiv 34 - 35 20 .text 36 21 .align 4 22 + .global .udiv 37 23 .globl __divdi3 38 24 __divdi3: 39 25 save %sp,-104,%sp
+3 -3
arch/x86/include/asm/cmpxchg.h
··· 145 145 146 146 #ifdef __HAVE_ARCH_CMPXCHG 147 147 #define cmpxchg(ptr, old, new) \ 148 - __cmpxchg((ptr), (old), (new), sizeof(*ptr)) 148 + __cmpxchg(ptr, old, new, sizeof(*(ptr))) 149 149 150 150 #define sync_cmpxchg(ptr, old, new) \ 151 - __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) 151 + __sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) 152 152 153 153 #define cmpxchg_local(ptr, old, new) \ 154 - __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) 154 + __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) 155 155 #endif 156 156 157 157 /*
+2 -1
arch/x86/kernel/dumpstack.c
··· 252 252 unsigned short ss; 253 253 unsigned long sp; 254 254 #endif 255 - printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 255 + printk(KERN_DEFAULT 256 + "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 256 257 #ifdef CONFIG_PREEMPT 257 258 printk("PREEMPT "); 258 259 #endif
+4 -4
arch/x86/kernel/dumpstack_64.c
··· 129 129 if (!stack) { 130 130 if (regs) 131 131 stack = (unsigned long *)regs->sp; 132 - else if (task && task != current) 132 + else if (task != current) 133 133 stack = (unsigned long *)task->thread.sp; 134 134 else 135 135 stack = &dummy; ··· 269 269 unsigned char c; 270 270 u8 *ip; 271 271 272 - printk(KERN_EMERG "Stack:\n"); 272 + printk(KERN_DEFAULT "Stack:\n"); 273 273 show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 274 - 0, KERN_EMERG); 274 + 0, KERN_DEFAULT); 275 275 276 - printk(KERN_EMERG "Code: "); 276 + printk(KERN_DEFAULT "Code: "); 277 277 278 278 ip = (u8 *)regs->ip - code_prologue; 279 279 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+26 -10
arch/x86/kernel/reboot.c
··· 39 39 enum reboot_type reboot_type = BOOT_ACPI; 40 40 int reboot_force; 41 41 42 + /* This variable is used privately to keep track of whether or not 43 + * reboot_type is still set to its default value (i.e., reboot= hasn't 44 + * been set on the command line). This is needed so that we can 45 + * suppress DMI scanning for reboot quirks. Without it, it's 46 + * impossible to override a faulty reboot quirk without recompiling. 47 + */ 48 + static int reboot_default = 1; 49 + 42 50 #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 43 51 static int reboot_cpu = -1; 44 52 #endif ··· 75 67 static int __init reboot_setup(char *str) 76 68 { 77 69 for (;;) { 70 + /* Having anything passed on the command line via 71 + * reboot= will cause us to disable DMI checking 72 + * below. 73 + */ 74 + reboot_default = 0; 75 + 78 76 switch (*str) { 79 77 case 'w': 80 78 reboot_mode = 0x1234; ··· 309 295 DMI_MATCH(DMI_BOARD_NAME, "P4S800"), 310 296 }, 311 297 }, 312 - { /* Handle problems with rebooting on VersaLogic Menlow boards */ 313 - .callback = set_bios_reboot, 314 - .ident = "VersaLogic Menlow based board", 315 - .matches = { 316 - DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), 317 - DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), 318 - }, 319 - }, 320 298 { /* Handle reboot issue on Acer Aspire one */ 321 299 .callback = set_kbd_reboot, 322 300 .ident = "Acer Aspire One A110", ··· 322 316 323 317 static int __init reboot_init(void) 324 318 { 325 - dmi_check_system(reboot_dmi_table); 319 + /* Only do the DMI check if reboot_type hasn't been overridden 320 + * on the command line 321 + */ 322 + if (reboot_default) { 323 + dmi_check_system(reboot_dmi_table); 324 + } 326 325 return 0; 327 326 } 328 327 core_initcall(reboot_init); ··· 476 465 477 466 static int __init pci_reboot_init(void) 478 467 { 479 - dmi_check_system(pci_reboot_dmi_table); 468 + /* Only do the DMI check if reboot_type hasn't been overridden 469 + * on the command line 470 + */ 471 + if (reboot_default) { 472 + dmi_check_system(pci_reboot_dmi_table); 473 + } 480 474 return 0; 481 475 } 482 476 core_initcall(pci_reboot_init);
+2 -2
arch/x86/mm/fault.c
··· 673 673 674 674 stackend = end_of_stack(tsk); 675 675 if (tsk != &init_task && *stackend != STACK_END_MAGIC) 676 - printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 676 + printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 677 677 678 678 tsk->thread.cr2 = address; 679 679 tsk->thread.trap_no = 14; ··· 684 684 sig = 0; 685 685 686 686 /* Executive summary in case the body of the oops scrolled away */ 687 - printk(KERN_EMERG "CR2: %016lx\n", address); 687 + printk(KERN_DEFAULT "CR2: %016lx\n", address); 688 688 689 689 oops_end(flags, regs, sig); 690 690 }
+5 -2
drivers/block/rbd.c
··· 380 380 rbdc = __rbd_client_find(opt); 381 381 if (rbdc) { 382 382 ceph_destroy_options(opt); 383 + kfree(rbd_opts); 383 384 384 385 /* using an existing client */ 385 386 kref_get(&rbdc->kref); ··· 407 406 408 407 /* 409 408 * Destroy ceph client 409 + * 410 + * Caller must hold node_lock. 410 411 */ 411 412 static void rbd_client_release(struct kref *kref) 412 413 { 413 414 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); 414 415 415 416 dout("rbd_release_client %p\n", rbdc); 416 - spin_lock(&node_lock); 417 417 list_del(&rbdc->node); 418 - spin_unlock(&node_lock); 419 418 420 419 ceph_destroy_client(rbdc->client); 421 420 kfree(rbdc->rbd_opts); ··· 428 427 */ 429 428 static void rbd_put_client(struct rbd_device *rbd_dev) 430 429 { 430 + spin_lock(&node_lock); 431 431 kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); 432 + spin_unlock(&node_lock); 432 433 rbd_dev->rbd_client = NULL; 433 434 rbd_dev->client = NULL; 434 435 }
+5 -1
drivers/firewire/ohci.c
··· 263 263 static char ohci_driver_name[] = KBUILD_MODNAME; 264 264 265 265 #define PCI_DEVICE_ID_AGERE_FW643 0x5901 266 + #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001 266 267 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 267 268 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 268 269 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 ··· 290 289 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 291 290 QUIRK_NO_MSI}, 292 291 292 + {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, 293 + QUIRK_RESET_PACKET}, 294 + 293 295 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, 294 296 QUIRK_NO_MSI}, 295 297 ··· 303 299 QUIRK_NO_MSI}, 304 300 305 301 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 306 - QUIRK_CYCLE_TIMER}, 302 + QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 307 303 308 304 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, 309 305 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
+3 -2
drivers/gpu/drm/nouveau/nouveau_bios.h
··· 54 54 int bit_table(struct drm_device *, u8 id, struct bit_entry *); 55 55 56 56 enum dcb_gpio_tag { 57 - DCB_GPIO_TVDAC0 = 0xc, 57 + DCB_GPIO_PANEL_POWER = 0x01, 58 + DCB_GPIO_TVDAC0 = 0x0c, 58 59 DCB_GPIO_TVDAC1 = 0x2d, 59 - DCB_GPIO_PWM_FAN = 0x9, 60 + DCB_GPIO_PWM_FAN = 0x09, 60 61 DCB_GPIO_FAN_SENSE = 0x3d, 61 62 DCB_GPIO_UNUSED = 0xff 62 63 };
+10
drivers/gpu/drm/nouveau/nouveau_display.c
··· 219 219 if (ret) 220 220 return ret; 221 221 222 + /* power on internal panel if it's not already. the init tables of 223 + * some vbios default this to off for some reason, causing the 224 + * panel to not work after resume 225 + */ 226 + if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) { 227 + nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true); 228 + msleep(300); 229 + } 230 + 231 + /* enable polling for external displays */ 222 232 drm_kms_helper_poll_enable(dev); 223 233 224 234 /* enable hotplug interrupts */
+1 -1
drivers/gpu/drm/nouveau/nouveau_drv.c
··· 124 124 int nouveau_ctxfw; 125 125 module_param_named(ctxfw, nouveau_ctxfw, int, 0400); 126 126 127 - MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n"); 127 + MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS\n"); 128 128 int nouveau_mxmdcb = 1; 129 129 module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400); 130 130
+21 -2
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 380 380 } 381 381 382 382 static int 383 + validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) 384 + { 385 + struct nouveau_fence *fence = NULL; 386 + int ret = 0; 387 + 388 + spin_lock(&nvbo->bo.bdev->fence_lock); 389 + if (nvbo->bo.sync_obj) 390 + fence = nouveau_fence_ref(nvbo->bo.sync_obj); 391 + spin_unlock(&nvbo->bo.bdev->fence_lock); 392 + 393 + if (fence) { 394 + ret = nouveau_fence_sync(fence, chan); 395 + nouveau_fence_unref(&fence); 396 + } 397 + 398 + return ret; 399 + } 400 + 401 + static int 383 402 validate_list(struct nouveau_channel *chan, struct list_head *list, 384 403 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 385 404 { ··· 412 393 list_for_each_entry(nvbo, list, entry) { 413 394 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 414 395 415 - ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); 396 + ret = validate_sync(chan, nvbo); 416 397 if (unlikely(ret)) { 417 398 NV_ERROR(dev, "fail pre-validate sync\n"); 418 399 return ret; ··· 435 416 return ret; 436 417 } 437 418 438 - ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); 419 + ret = validate_sync(chan, nvbo); 439 420 if (unlikely(ret)) { 440 421 NV_ERROR(dev, "fail post-validate sync\n"); 441 422 return ret;
+9
drivers/gpu/drm/nouveau/nouveau_mxm.c
··· 656 656 657 657 if (mxm_shadow(dev, mxm[0])) { 658 658 MXM_MSG(dev, "failed to locate valid SIS\n"); 659 + #if 0 660 + /* we should, perhaps, fall back to some kind of limited 661 + * mode here if the x86 vbios hasn't already done the 662 + * work for us (so we prevent loading with completely 663 + * whacked vbios tables). 664 + */ 659 665 return -EINVAL; 666 + #else 667 + return 0; 668 + #endif 660 669 } 661 670 662 671 MXM_MSG(dev, "MXMS Version %d.%d\n",
+2 -2
drivers/gpu/drm/nouveau/nv50_pm.c
··· 495 495 struct drm_nouveau_private *dev_priv = dev->dev_private; 496 496 struct nv50_pm_state *info; 497 497 struct pll_lims pll; 498 - int ret = -EINVAL; 498 + int clk, ret = -EINVAL; 499 499 int N, M, P1, P2; 500 - u32 clk, out; 500 + u32 out; 501 501 502 502 if (dev_priv->chipset == 0xaa || 503 503 dev_priv->chipset == 0xac)
+2 -2
drivers/gpu/drm/radeon/atombios_crtc.c
··· 1184 1184 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1185 1185 1186 1186 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1187 - crtc->mode.vdisplay); 1187 + target_fb->height); 1188 1188 x &= ~3; 1189 1189 y &= ~1; 1190 1190 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, ··· 1353 1353 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1354 1354 1355 1355 WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1356 - crtc->mode.vdisplay); 1356 + target_fb->height); 1357 1357 x &= ~3; 1358 1358 y &= ~1; 1359 1359 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
+15 -3
drivers/gpu/drm/radeon/atombios_dp.c
··· 564 564 ENCODER_OBJECT_ID_NUTMEG) 565 565 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 566 566 else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == 567 - ENCODER_OBJECT_ID_TRAVIS) 568 - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 569 - else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 567 + ENCODER_OBJECT_ID_TRAVIS) { 568 + u8 id[6]; 569 + int i; 570 + for (i = 0; i < 6; i++) 571 + id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i); 572 + if (id[0] == 0x73 && 573 + id[1] == 0x69 && 574 + id[2] == 0x76 && 575 + id[3] == 0x61 && 576 + id[4] == 0x72 && 577 + id[5] == 0x54) 578 + panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 579 + else 580 + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 581 + } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 570 582 u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); 571 583 if (tmp & 1) 572 584 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+25 -10
drivers/gpu/drm/radeon/r600_blit_kms.c
··· 468 468 radeon_ring_write(ring, sq_stack_resource_mgmt_2); 469 469 } 470 470 471 + #define I2F_MAX_BITS 15 472 + #define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1) 473 + #define I2F_SHIFT (24 - I2F_MAX_BITS) 474 + 475 + /* 476 + * Converts unsigned integer into 32-bit IEEE floating point representation. 477 + * Conversion is not universal and only works for the range from 0 478 + * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between 479 + * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary, 480 + * I2F_MAX_BITS can be increased, but that will add to the loop iterations 481 + * and slow us down. Conversion is done by shifting the input and counting 482 + * down until the first 1 reaches bit position 23. The resulting counter 483 + * and the shifted input are, respectively, the exponent and the fraction. 484 + * The sign is always zero. 485 + */ 471 486 static uint32_t i2f(uint32_t input) 472 487 { 473 488 u32 result, i, exponent, fraction; 474 489 475 - if ((input & 0x3fff) == 0) 476 - result = 0; /* 0 is a special case */ 490 + WARN_ON_ONCE(input > I2F_MAX_INPUT); 491 + 492 + if ((input & I2F_MAX_INPUT) == 0) 493 + result = 0; 477 494 else { 478 - exponent = 140; /* exponent biased by 127; */ 479 - fraction = (input & 0x3fff) << 10; /* cheat and only 480 - handle numbers below 2^^15 */ 481 - for (i = 0; i < 14; i++) { 495 + exponent = 126 + I2F_MAX_BITS; 496 + fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT; 497 + 498 + for (i = 0; i < I2F_MAX_BITS; i++) { 482 499 if (fraction & 0x800000) 483 500 break; 484 501 else { 485 - fraction = fraction << 1; /* keep 486 - shifting left until top bit = 1 */ 502 + fraction = fraction << 1; 487 503 exponent = exponent - 1; 488 504 } 489 505 } 490 - result = exponent << 23 | (fraction & 0x7fffff); /* mask 491 - off top bit; assumed 1 */ 506 + result = exponent << 23 | (fraction & 0x7fffff); 492 507 } 493 508 return result; 494 509 }
+2 -1
drivers/gpu/drm/radeon/radeon_atpx_handler.c
··· 59 59 60 60 obj = (union acpi_object *)buffer.pointer; 61 61 memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); 62 + len = obj->buffer.length; 62 63 kfree(buffer.pointer); 63 - return obj->buffer.length; 64 + return len; 64 65 } 65 66 66 67 bool radeon_atrm_supported(struct pci_dev *pdev)
+4
drivers/gpu/drm/radeon/radeon_device.c
··· 883 883 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 884 884 return 0; 885 885 886 + drm_kms_helper_poll_disable(dev); 887 + 886 888 /* turn off display hw */ 887 889 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 888 890 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); ··· 974 972 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 975 973 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 976 974 } 975 + 976 + drm_kms_helper_poll_enable(dev); 977 977 return 0; 978 978 } 979 979
+1
drivers/gpu/drm/radeon/radeon_i2c.c
··· 958 958 i2c->rec = *rec; 959 959 i2c->adapter.owner = THIS_MODULE; 960 960 i2c->adapter.class = I2C_CLASS_DDC; 961 + i2c->adapter.dev.parent = &dev->pdev->dev; 961 962 i2c->dev = dev; 962 963 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 963 964 "Radeon aux bus %s", name);
+4 -1
drivers/infiniband/core/ucma.c
··· 808 808 return PTR_ERR(ctx); 809 809 810 810 if (cmd.conn_param.valid) { 811 - ctx->uid = cmd.uid; 812 811 ucma_copy_conn_param(&conn_param, &cmd.conn_param); 812 + mutex_lock(&file->mut); 813 813 ret = rdma_accept(ctx->cm_id, &conn_param); 814 + if (!ret) 815 + ctx->uid = cmd.uid; 816 + mutex_unlock(&file->mut); 814 817 } else 815 818 ret = rdma_accept(ctx->cm_id, NULL); 816 819
+1
drivers/infiniband/core/uverbs_cmd.c
··· 1485 1485 qp->event_handler = attr.event_handler; 1486 1486 qp->qp_context = attr.qp_context; 1487 1487 qp->qp_type = attr.qp_type; 1488 + atomic_set(&qp->usecnt, 0); 1488 1489 atomic_inc(&pd->usecnt); 1489 1490 atomic_inc(&attr.send_cq->usecnt); 1490 1491 if (attr.recv_cq)
+1 -1
drivers/infiniband/core/verbs.c
··· 421 421 qp->uobject = NULL; 422 422 qp->qp_type = qp_init_attr->qp_type; 423 423 424 + atomic_set(&qp->usecnt, 0); 424 425 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { 425 426 qp->event_handler = __ib_shared_qp_event_handler; 426 427 qp->qp_context = qp; ··· 431 430 qp->xrcd = qp_init_attr->xrcd; 432 431 atomic_inc(&qp_init_attr->xrcd->usecnt); 433 432 INIT_LIST_HEAD(&qp->open_list); 434 - atomic_set(&qp->usecnt, 0); 435 433 436 434 real_qp = qp; 437 435 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
+1 -1
drivers/infiniband/hw/ipath/ipath_fs.c
··· 89 89 error = ipathfs_mknod(parent->d_inode, *dentry, 90 90 mode, fops, data); 91 91 else 92 - error = PTR_ERR(dentry); 92 + error = PTR_ERR(*dentry); 93 93 mutex_unlock(&parent->d_inode->i_mutex); 94 94 95 95 return error;
+2 -5
drivers/infiniband/hw/mlx4/mad.c
··· 257 257 return IB_MAD_RESULT_SUCCESS; 258 258 259 259 /* 260 - * Don't process SMInfo queries or vendor-specific 261 - * MADs -- the SMA can't handle them. 260 + * Don't process SMInfo queries -- the SMA can't handle them. 262 261 */ 263 - if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || 264 - ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == 265 - IB_SMP_ATTR_VENDOR_MASK)) 262 + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 266 263 return IB_MAD_RESULT_SUCCESS; 267 264 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 268 265 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
+1 -1
drivers/infiniband/hw/nes/nes.c
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two
+1 -1
drivers/infiniband/hw/nes/nes.h
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two
+7 -3
drivers/infiniband/hw/nes/nes_cm.c
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU ··· 233 233 u8 *start_ptr = &start_addr; 234 234 u8 **start_buff = &start_ptr; 235 235 u16 buff_len = 0; 236 + struct ietf_mpa_v1 *mpa_frame; 236 237 237 238 skb = dev_alloc_skb(MAX_CM_BUFFER); 238 239 if (!skb) { ··· 243 242 244 243 /* send an MPA reject frame */ 245 244 cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY); 245 + mpa_frame = (struct ietf_mpa_v1 *)*start_buff; 246 + mpa_frame->flags |= IETF_MPA_FLAGS_REJECT; 246 247 form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN); 247 248 248 249 cm_node->state = NES_CM_STATE_FIN_WAIT1; ··· 1363 1360 if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, 1364 1361 neigh->ha, ETH_ALEN)) { 1365 1362 /* Mac address same as in nes_arp_table */ 1366 - ip_rt_put(rt); 1367 - return rc; 1363 + goto out; 1368 1364 } 1369 1365 1370 1366 nes_manage_arp_cache(nesvnic->netdev, ··· 1379 1377 neigh_event_send(neigh, NULL); 1380 1378 } 1381 1379 } 1380 + 1381 + out: 1382 1382 rcu_read_unlock(); 1383 1383 ip_rt_put(rt); 1384 1384 return rc;
+1 -1
drivers/infiniband/hw/nes/nes_cm.h
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU
+1 -1
drivers/infiniband/hw/nes/nes_context.h
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU
+1 -1
drivers/infiniband/hw/nes/nes_hw.c
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU
+1 -1
drivers/infiniband/hw/nes/nes_hw.h
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU
+1 -1
drivers/infiniband/hw/nes/nes_mgt.c
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU
+1 -1
drivers/infiniband/hw/nes/nes_mgt.h
··· 1 1 /* 2 - * Copyright (c) 2010 Intel-NE, Inc. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU
+1 -1
drivers/infiniband/hw/nes/nes_nic.c
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU
+1 -1
drivers/infiniband/hw/nes/nes_user.h
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * Copyright (c) 2005 Topspin Communications. All rights reserved. 4 4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 5 5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+1 -1
drivers/infiniband/hw/nes/nes_utils.c
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU
+4 -2
drivers/infiniband/hw/nes/nes_verbs.c
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU ··· 3428 3428 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, 3429 3429 ib_wr->wr.fast_reg.length); 3430 3430 set_wqe_32bit_value(wqe->wqe_words, 3431 + NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); 3432 + set_wqe_32bit_value(wqe->wqe_words, 3431 3433 NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX, 3432 3434 ib_wr->wr.fast_reg.rkey); 3433 3435 /* Set page size: */ ··· 3726 3724 entry->opcode = IB_WC_SEND; 3727 3725 break; 3728 3726 case NES_IWARP_SQ_OP_LOCINV: 3729 - entry->opcode = IB_WR_LOCAL_INV; 3727 + entry->opcode = IB_WC_LOCAL_INV; 3730 3728 break; 3731 3729 case NES_IWARP_SQ_OP_FAST_REG: 3732 3730 entry->opcode = IB_WC_FAST_REG_MR;
+1 -1
drivers/infiniband/hw/nes/nes_verbs.h
··· 1 1 /* 2 - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. 3 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two
+1 -1
drivers/infiniband/hw/qib/qib_iba6120.c
··· 2105 2105 dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev, 2106 2106 dd->rcd[0]->rcvhdrq_size, 2107 2107 &dd->cspec->dummy_hdrq_phys, 2108 - GFP_KERNEL | __GFP_COMP); 2108 + GFP_ATOMIC | __GFP_COMP); 2109 2109 if (!dd->cspec->dummy_hdrq) { 2110 2110 qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n"); 2111 2111 /* fallback to just 0'ing */
+1 -1
drivers/infiniband/hw/qib/qib_pcie.c
··· 560 560 * BIOS may not set PCIe bus-utilization parameters for best performance. 561 561 * Check and optionally adjust them to maximize our throughput. 562 562 */ 563 - static int qib_pcie_caps = 0x51; 563 + static int qib_pcie_caps; 564 564 module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO); 565 565 MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)"); 566 566
+1
drivers/staging/media/go7007/go7007-usb.c
··· 1279 1279 }; 1280 1280 1281 1281 module_usb_driver(go7007_usb_driver); 1282 + MODULE_LICENSE("GPL v2");
+2 -2
fs/ceph/caps.c
··· 641 641 unsigned long ttl; 642 642 u32 gen; 643 643 644 - spin_lock(&cap->session->s_cap_lock); 644 + spin_lock(&cap->session->s_gen_ttl_lock); 645 645 gen = cap->session->s_cap_gen; 646 646 ttl = cap->session->s_cap_ttl; 647 - spin_unlock(&cap->session->s_cap_lock); 647 + spin_unlock(&cap->session->s_gen_ttl_lock); 648 648 649 649 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { 650 650 dout("__cap_is_valid %p cap %p issued %s "
+2 -2
fs/ceph/dir.c
··· 975 975 di = ceph_dentry(dentry); 976 976 if (di->lease_session) { 977 977 s = di->lease_session; 978 - spin_lock(&s->s_cap_lock); 978 + spin_lock(&s->s_gen_ttl_lock); 979 979 gen = s->s_cap_gen; 980 980 ttl = s->s_cap_ttl; 981 - spin_unlock(&s->s_cap_lock); 981 + spin_unlock(&s->s_gen_ttl_lock); 982 982 983 983 if (di->lease_gen == gen && 984 984 time_before(jiffies, dentry->d_time) &&
+7 -3
fs/ceph/mds_client.c
··· 262 262 /* trace */ 263 263 ceph_decode_32_safe(&p, end, len, bad); 264 264 if (len > 0) { 265 + ceph_decode_need(&p, end, len, bad); 265 266 err = parse_reply_info_trace(&p, p+len, info, features); 266 267 if (err < 0) 267 268 goto out_bad; ··· 271 270 /* extra */ 272 271 ceph_decode_32_safe(&p, end, len, bad); 273 272 if (len > 0) { 273 + ceph_decode_need(&p, end, len, bad); 274 274 err = parse_reply_info_extra(&p, p+len, info, features); 275 275 if (err < 0) 276 276 goto out_bad; ··· 400 398 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; 401 399 s->s_con.peer_name.num = cpu_to_le64(mds); 402 400 403 - spin_lock_init(&s->s_cap_lock); 401 + spin_lock_init(&s->s_gen_ttl_lock); 404 402 s->s_cap_gen = 0; 405 403 s->s_cap_ttl = 0; 404 + 405 + spin_lock_init(&s->s_cap_lock); 406 406 s->s_renew_requested = 0; 407 407 s->s_renew_seq = 0; 408 408 INIT_LIST_HEAD(&s->s_caps); ··· 2330 2326 case CEPH_SESSION_STALE: 2331 2327 pr_info("mds%d caps went stale, renewing\n", 2332 2328 session->s_mds); 2333 - spin_lock(&session->s_cap_lock); 2329 + spin_lock(&session->s_gen_ttl_lock); 2334 2330 session->s_cap_gen++; 2335 2331 session->s_cap_ttl = 0; 2336 - spin_unlock(&session->s_cap_lock); 2332 + spin_unlock(&session->s_gen_ttl_lock); 2337 2333 send_renew_caps(mdsc, session); 2338 2334 break; 2339 2335
+5 -2
fs/ceph/mds_client.h
··· 117 117 void *s_authorizer_buf, *s_authorizer_reply_buf; 118 118 size_t s_authorizer_buf_len, s_authorizer_reply_buf_len; 119 119 120 - /* protected by s_cap_lock */ 121 - spinlock_t s_cap_lock; 120 + /* protected by s_gen_ttl_lock */ 121 + spinlock_t s_gen_ttl_lock; 122 122 u32 s_cap_gen; /* inc each time we get mds stale msg */ 123 123 unsigned long s_cap_ttl; /* when session caps expire */ 124 + 125 + /* protected by s_cap_lock */ 126 + spinlock_t s_cap_lock; 124 127 struct list_head s_caps; /* all caps issued by this session */ 125 128 int s_nr_caps, s_trim_caps; 126 129 int s_num_cap_releases;
+3 -1
fs/ceph/xattr.c
··· 111 111 } 112 112 113 113 static struct ceph_vxattr_cb ceph_file_vxattrs[] = { 114 + { true, "ceph.file.layout", ceph_vxattrcb_layout}, 115 + /* The following extended attribute name is deprecated */ 114 116 { true, "ceph.layout", ceph_vxattrcb_layout}, 115 - { NULL, NULL } 117 + { true, NULL, NULL } 116 118 }; 117 119 118 120 static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
-6
fs/logfs/dev_mtd.c
··· 152 152 filler_t *filler = logfs_mtd_readpage; 153 153 struct mtd_info *mtd = super->s_mtd; 154 154 155 - if (!mtd_can_have_bb(mtd)) 156 - return NULL; 157 - 158 155 *ofs = 0; 159 156 while (mtd_block_isbad(mtd, *ofs)) { 160 157 *ofs += mtd->erasesize; ··· 168 171 struct address_space *mapping = super->s_mapping_inode->i_mapping; 169 172 filler_t *filler = logfs_mtd_readpage; 170 173 struct mtd_info *mtd = super->s_mtd; 171 - 172 - if (!mtd_can_have_bb(mtd)) 173 - return NULL; 174 174 175 175 *ofs = mtd->size - mtd->erasesize; 176 176 while (mtd_block_isbad(mtd, *ofs)) {
+46 -69
fs/nfs/nfs4proc.c
··· 3621 3621 } 3622 3622 if (npages > 1) { 3623 3623 /* for decoding across pages */ 3624 - args.acl_scratch = alloc_page(GFP_KERNEL); 3625 - if (!args.acl_scratch) 3624 + res.acl_scratch = alloc_page(GFP_KERNEL); 3625 + if (!res.acl_scratch) 3626 3626 goto out_free; 3627 3627 } 3628 3628 args.acl_len = npages * PAGE_SIZE; ··· 3658 3658 for (i = 0; i < npages; i++) 3659 3659 if (pages[i]) 3660 3660 __free_page(pages[i]); 3661 - if (args.acl_scratch) 3662 - __free_page(args.acl_scratch); 3661 + if (res.acl_scratch) 3662 + __free_page(res.acl_scratch); 3663 3663 return ret; 3664 3664 } 3665 3665 ··· 5104 5104 return status; 5105 5105 } 5106 5106 5107 + static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) 5108 + { 5109 + return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); 5110 + } 5111 + 5112 + static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, 5113 + struct nfs4_slot *new, 5114 + u32 max_slots, 5115 + u32 ivalue) 5116 + { 5117 + struct nfs4_slot *old = NULL; 5118 + u32 i; 5119 + 5120 + spin_lock(&tbl->slot_tbl_lock); 5121 + if (new) { 5122 + old = tbl->slots; 5123 + tbl->slots = new; 5124 + tbl->max_slots = max_slots; 5125 + } 5126 + tbl->highest_used_slotid = -1; /* no slot is currently used */ 5127 + for (i = 0; i < tbl->max_slots; i++) 5128 + tbl->slots[i].seq_nr = ivalue; 5129 + spin_unlock(&tbl->slot_tbl_lock); 5130 + kfree(old); 5131 + } 5132 + 5107 5133 /* 5108 - * Reset a slot table 5134 + * (re)Initialise a slot table 5109 5135 */ 5110 - static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, 5111 - int ivalue) 5136 + static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, 5137 + u32 ivalue) 5112 5138 { 5113 5139 struct nfs4_slot *new = NULL; 5114 - int i; 5115 - int ret = 0; 5140 + int ret = -ENOMEM; 5116 5141 5117 5142 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, 5118 5143 max_reqs, tbl->max_slots); 5119 5144 5120 5145 /* Does the newly negotiated max_reqs match the existing slot table? */ 5121 5146 if (max_reqs != tbl->max_slots) { 5122 - ret = -ENOMEM; 5123 - new = kmalloc(max_reqs * sizeof(struct nfs4_slot), 5124 - GFP_NOFS); 5147 + new = nfs4_alloc_slots(max_reqs, GFP_NOFS); 5125 5148 if (!new) 5126 5149 goto out; 5127 - ret = 0; 5128 - kfree(tbl->slots); 5129 5150 } 5130 - spin_lock(&tbl->slot_tbl_lock); 5131 - if (new) { 5132 - tbl->slots = new; 5133 - tbl->max_slots = max_reqs; 5134 - } 5135 - for (i = 0; i < tbl->max_slots; ++i) 5136 - tbl->slots[i].seq_nr = ivalue; 5137 - spin_unlock(&tbl->slot_tbl_lock); 5151 + ret = 0; 5152 + 5153 + nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); 5138 5154 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, 5139 5155 tbl, tbl->slots, tbl->max_slots); 5140 5156 out: ··· 5173 5157 } 5174 5158 5175 5159 /* 5176 - * Initialize slot table 5177 - */ 5178 - static int nfs4_init_slot_table(struct nfs4_slot_table *tbl, 5179 - int max_slots, int ivalue) 5180 - { 5181 - struct nfs4_slot *slot; 5182 - int ret = -ENOMEM; 5183 - 5184 - BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE); 5185 - 5186 - dprintk("--> %s: max_reqs=%u\n", __func__, max_slots); 5187 - 5188 - slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS); 5189 - if (!slot) 5190 - goto out; 5191 - ret = 0; 5192 - 5193 - spin_lock(&tbl->slot_tbl_lock); 5194 - tbl->max_slots = max_slots; 5195 - tbl->slots = slot; 5196 - tbl->highest_used_slotid = NFS4_NO_SLOT; /* no slot is currently used */ 5197 - spin_unlock(&tbl->slot_tbl_lock); 5198 - dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, 5199 - tbl, tbl->slots, tbl->max_slots); 5200 - out: 5201 - dprintk("<-- %s: return %d\n", __func__, ret); 5202 - return ret; 5203 - } 5204 - 5205 - /* 5206 5160 * Initialize or reset the forechannel and backchannel tables 5207 5161 */ 5208 5162 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) ··· 5183 5197 dprintk("--> %s\n", __func__); 5184 5198 /* Fore channel */ 5185 5199 tbl = &ses->fc_slot_table; 5186 - if (tbl->slots == NULL) { 5187 - status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1); 5188 - if (status) /* -ENOMEM */ 5189 - return status; 5190 - } else { 5191 - status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1); 5192 - if (status) 5193 - return status; 5194 - } 5200 + status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); 5201 + if (status) /* -ENOMEM */ 5202 + return status; 5195 5203 /* Back channel */ 5196 5204 tbl = &ses->bc_slot_table; 5197 - if (tbl->slots == NULL) { 5198 - status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0); 5199 - if (status) 5200 - /* Fore and back channel share a connection so get 5201 - * both slot tables or neither */ 5202 - nfs4_destroy_slot_tables(ses); 5203 - } else 5204 - status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0); 5205 + status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); 5206 + if (status && tbl->slots == NULL) 5207 + /* Fore and back channel share a connection so get 5208 + * both slot tables or neither */ 5209 + nfs4_destroy_slot_tables(ses); 5205 5210 return status; 5206 5211 } 5207 5212
+2
fs/nfs/nfs4state.c
··· 1106 1106 { 1107 1107 struct nfs_client *clp = server->nfs_client; 1108 1108 1109 + if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags)) 1110 + nfs_async_inode_return_delegation(state->inode, &state->stateid); 1109 1111 nfs4_state_mark_reclaim_nograce(clp, state); 1110 1112 nfs4_schedule_state_manager(clp); 1111 1113 }
+4 -1
fs/nfs/nfs4xdr.c
··· 2563 2563 2564 2564 xdr_inline_pages(&req->rq_rcv_buf, replen << 2, 2565 2565 args->acl_pages, args->acl_pgbase, args->acl_len); 2566 - xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE); 2567 2566 2568 2567 encode_nops(&hdr); 2569 2568 } ··· 6131 6132 struct compound_hdr hdr; 6132 6133 int status; 6133 6134 6135 + if (res->acl_scratch != NULL) { 6136 + void *p = page_address(res->acl_scratch); 6137 + xdr_set_scratch_buffer(xdr, p, PAGE_SIZE); 6138 + } 6134 6139 status = decode_compound_hdr(xdr, &hdr); 6135 6140 if (status) 6136 6141 goto out;
+48 -82
fs/proc/base.c
··· 198 198 return result; 199 199 } 200 200 201 - static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 202 - { 203 - struct mm_struct *mm; 204 - int err; 205 - 206 - err = mutex_lock_killable(&task->signal->cred_guard_mutex); 207 - if (err) 208 - return ERR_PTR(err); 209 - 210 - mm = get_task_mm(task); 211 - if (mm && mm != current->mm && 212 - !ptrace_may_access(task, mode)) { 213 - mmput(mm); 214 - mm = ERR_PTR(-EACCES); 215 - } 216 - mutex_unlock(&task->signal->cred_guard_mutex); 217 - 218 - return mm; 219 - } 220 - 221 201 struct mm_struct *mm_for_maps(struct task_struct *task) 222 202 { 223 203 return mm_access(task, PTRACE_MODE_READ); ··· 691 711 if (IS_ERR(mm)) 692 712 return PTR_ERR(mm); 693 713 714 + if (mm) { 715 + /* ensure this mm_struct can't be freed */ 716 + atomic_inc(&mm->mm_count); 717 + /* but do not pin its memory */ 718 + mmput(mm); 719 + } 720 + 694 721 /* OK to pass negative loff_t, we can catch out-of-range */ 695 722 file->f_mode |= FMODE_UNSIGNED_OFFSET; 696 723 file->private_data = mm; ··· 705 718 return 0; 706 719 } 707 720 708 - static ssize_t mem_read(struct file * file, char __user * buf, 709 - size_t count, loff_t *ppos) 721 + static ssize_t mem_rw(struct file *file, char __user *buf, 722 + size_t count, loff_t *ppos, int write) 710 723 { 711 - int ret; 712 - char *page; 713 - unsigned long src = *ppos; 714 724 struct mm_struct *mm = file->private_data; 715 - 716 - if (!mm) 717 - return 0; 718 - 719 - page = (char *)__get_free_page(GFP_TEMPORARY); 720 - if (!page) 721 - return -ENOMEM; 722 - 723 - ret = 0; 724 - 725 - while (count > 0) { 726 - int this_len, retval; 727 - 728 - this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 729 - retval = access_remote_vm(mm, src, page, this_len, 0); 730 - if (!retval) { 731 - if (!ret) 732 - ret = -EIO; 733 - break; 734 - } 735 - 736 - if (copy_to_user(buf, page, retval)) { 737 - ret = -EFAULT; 738 - break; 739 - } 740 - 741 - ret += retval; 742 - src += retval; 743 - buf += retval; 744 - count -= retval; 745 - } 746 - *ppos = src; 747 - 748 - free_page((unsigned long) page); 749 - return ret; 750 - } 751 - 752 - static ssize_t mem_write(struct file * file, const char __user *buf, 753 - size_t count, loff_t *ppos) 754 - { 755 - int copied; 725 + unsigned long addr = *ppos; 726 + ssize_t copied; 756 727 char *page; 757 - unsigned long dst = *ppos; 758 - struct mm_struct *mm = file->private_data; 759 728 760 729 if (!mm) 761 730 return 0; ··· 721 778 return -ENOMEM; 722 779 723 780 copied = 0; 724 - while (count > 0) { 725 - int this_len, retval; 781 + if (!atomic_inc_not_zero(&mm->mm_users)) 782 + goto free; 726 783 727 - this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 728 - if (copy_from_user(page, buf, this_len)) { 784 + while (count > 0) { 785 + int this_len = min_t(int, count, PAGE_SIZE); 786 + 787 + if (write && copy_from_user(page, buf, this_len)) { 729 788 copied = -EFAULT; 730 789 break; 731 790 } 732 - retval = access_remote_vm(mm, dst, page, this_len, 1); 733 - if (!retval) { 791 + 792 + this_len = access_remote_vm(mm, addr, page, this_len, write); 793 + if (!this_len) { 734 794 if (!copied) 735 795 copied = -EIO; 736 796 break; 737 797 } 738 - copied += retval; 739 - buf += retval; 740 - dst += retval; 741 - count -= retval; 742 - } 743 - *ppos = dst; 744 798 799 + if (!write && copy_to_user(buf, page, this_len)) { 800 + copied = -EFAULT; 801 + break; 802 + } 803 + 804 + buf += this_len; 805 + addr += this_len; 806 + copied += this_len; 807 + count -= this_len; 808 + } 809 + *ppos = addr; 810 + 811 + mmput(mm); 812 + free: 745 813 free_page((unsigned long) page); 746 814 return copied; 815 + } 816 + 817 + static ssize_t mem_read(struct file *file, char __user *buf, 818 + size_t count, loff_t *ppos) 819 + { 820 + return mem_rw(file, buf, count, ppos, 0); 821 + } 822 + 823 + static ssize_t mem_write(struct file *file, const char __user *buf, 824 + size_t count, loff_t *ppos) 825 + { 826 + return mem_rw(file, (char __user*)buf, count, ppos, 1); 747 827 } 748 828 749 829 loff_t mem_lseek(struct file *file, loff_t offset, int orig) ··· 788 822 static int mem_release(struct inode *inode, struct file *file) 789 823 { 790 824 struct mm_struct *mm = file->private_data; 791 - 792 - mmput(mm); 825 + if (mm) 826 + mmdrop(mm); 793 827 return 0; 794 828 } 795 829
include/linux/lp8727.h
-2
include/linux/mpi.h
··· 57 57 58 58 typedef struct gcry_mpi *MPI; 59 59 60 - #define MPI_NULL NULL 61 - 62 60 #define mpi_get_nlimbs(a) ((a)->nlimbs) 63 61 #define mpi_is_neg(a) ((a)->sign) 64 62
+1 -1
include/linux/mtd/mtd.h
··· 489 489 490 490 static inline int mtd_can_have_bb(const struct mtd_info *mtd) 491 491 { 492 - return 0; 492 + return !!mtd->block_isbad; 493 493 } 494 494 495 495 /* Kernel-side ioctl definitions */
+1 -1
include/linux/nfs_xdr.h
··· 614 614 size_t acl_len; 615 615 unsigned int acl_pgbase; 616 616 struct page ** acl_pages; 617 - struct page * acl_scratch; 618 617 struct nfs4_sequence_args seq_args; 619 618 }; 620 619 ··· 623 624 size_t acl_len; 624 625 size_t acl_data_offset; 625 626 int acl_flags; 627 + struct page * acl_scratch; 626 628 struct nfs4_sequence_res seq_res; 627 629 }; 628 630
+1
include/linux/perf_event.h
··· 587 587 u64 sample_period; 588 588 u64 last_period; 589 589 local64_t period_left; 590 + u64 interrupts_seq; 590 591 u64 interrupts; 591 592 592 593 u64 freq_time_stamp;
+6
include/linux/sched.h
··· 2259 2259 extern void mmput(struct mm_struct *); 2260 2260 /* Grab a reference to a task's mm, if it is not already going away */ 2261 2261 extern struct mm_struct *get_task_mm(struct task_struct *task); 2262 + /* 2263 + * Grab a reference to a task's mm, if it is not already going away 2264 + * and ptrace_may_access with the mode parameter passed to it 2265 + * succeeds. 2266 + */ 2267 + extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 2262 2268 /* Remove the current tasks stale references to the old mm_struct */ 2263 2269 extern void mm_release(struct task_struct *, struct mm_struct *); 2264 2270 /* Allocate a new mm structure and copy contents from tsk->mm */
+2
include/sound/core.h
··· 417 417 #define gameport_get_port_data(gp) (gp)->port_data 418 418 #endif 419 419 420 + #ifdef CONFIG_PCI 420 421 /* PCI quirk list helper */ 421 422 struct snd_pci_quirk { 422 423 unsigned short subvendor; /* PCI subvendor ID */ ··· 457 456 const struct snd_pci_quirk * 458 457 snd_pci_quirk_lookup_id(u16 vendor, u16 device, 459 458 const struct snd_pci_quirk *list); 459 + #endif 460 460 461 461 #endif /* __SOUND_CORE_H */
+66 -38
kernel/events/core.c
··· 2300 2300 return div64_u64(dividend, divisor); 2301 2301 } 2302 2302 2303 + static DEFINE_PER_CPU(int, perf_throttled_count); 2304 + static DEFINE_PER_CPU(u64, perf_throttled_seq); 2305 + 2303 2306 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) 2304 2307 { 2305 2308 struct hw_perf_event *hwc = &event->hw; ··· 2328 2325 } 2329 2326 } 2330 2327 2331 - static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) 2328 + /* 2329 + * combine freq adjustment with unthrottling to avoid two passes over the 2330 + * events. At the same time, make sure, having freq events does not change 2331 + * the rate of unthrottling as that would introduce bias. 2332 + */ 2333 + static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2334 + int needs_unthr) 2332 2335 { 2333 2336 struct perf_event *event; 2334 2337 struct hw_perf_event *hwc; 2335 - u64 interrupts, now; 2338 + u64 now, period = TICK_NSEC; 2336 2339 s64 delta; 2337 2340 2338 - if (!ctx->nr_freq) 2341 + /* 2342 + * only need to iterate over all events iff: 2343 + * - context have events in frequency mode (needs freq adjust) 2344 + * - there are events to unthrottle on this cpu 2345 + */ 2346 + if (!(ctx->nr_freq || needs_unthr)) 2339 2347 return; 2348 + 2349 + raw_spin_lock(&ctx->lock); 2340 2350 2341 2351 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2342 2352 if (event->state != PERF_EVENT_STATE_ACTIVE) ··· 2360 2344 2361 2345 hwc = &event->hw; 2362 2346 2363 - interrupts = hwc->interrupts; 2364 - hwc->interrupts = 0; 2365 - 2366 - /* 2367 - * unthrottle events on the tick 2368 - */ 2369 - if (interrupts == MAX_INTERRUPTS) { 2347 + if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) { 2348 + hwc->interrupts = 0; 2370 2349 perf_log_throttle(event, 1); 2371 2350 event->pmu->start(event, 0); 2372 2351 } ··· 2369 2358 if (!event->attr.freq || !event->attr.sample_freq) 2370 2359 continue; 2371 2360 2372 - event->pmu->read(event); 2361 + /* 2362 + * stop the event and update event->count 2363 + */ 2364 + event->pmu->stop(event, PERF_EF_UPDATE); 2365 + 2373 2366 now = local64_read(&event->count); 2374 2367 delta = now - hwc->freq_count_stamp; 2375 2368 hwc->freq_count_stamp = now; 2376 2369 2370 + /* 2371 + * restart the event 2372 + * reload only if value has changed 2373 + */ 2377 2374 if (delta > 0) 2378 2375 perf_adjust_period(event, period, delta); 2376 + 2377 + event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2379 2378 } 2379 + 2380 + raw_spin_unlock(&ctx->lock); 2380 2381 } 2381 2382 2382 2383 /* ··· 2411 2388 */ 2412 2389 static void perf_rotate_context(struct perf_cpu_context *cpuctx) 2413 2390 { 2414 - u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; 2415 2391 struct perf_event_context *ctx = NULL; 2416 - int rotate = 0, remove = 1, freq = 0; 2392 + int rotate = 0, remove = 1; 2417 2393 2418 2394 if (cpuctx->ctx.nr_events) { 2419 2395 remove = 0; 2420 2396 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 2421 2397 rotate = 1; 2422 - if (cpuctx->ctx.nr_freq) 2423 - freq = 1; 2424 2398 } 2425 2399 2426 2400 ctx = cpuctx->task_ctx; ··· 2425 2405 remove = 0; 2426 2406 if (ctx->nr_events != ctx->nr_active) 2427 2407 rotate = 1; 2428 - if (ctx->nr_freq) 2429 - freq = 1; 2430 2408 } 2431 2409 2432 - if (!rotate && !freq) 2410 + if (!rotate) 2433 2411 goto done; 2434 2412 2435 2413 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2436 2414 perf_pmu_disable(cpuctx->ctx.pmu); 2437 2415 2438 - if (freq) { 2439 - perf_ctx_adjust_freq(&cpuctx->ctx, interval); 2440 - if (ctx) 2441 - perf_ctx_adjust_freq(ctx, interval); 2442 - } 2416 + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2417 + if (ctx) 2418 + ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 2443 2419 2444 - if (rotate) { 2445 - cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2446 - if (ctx) 2447 - ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 2420 + rotate_ctx(&cpuctx->ctx); 2421 + if (ctx) 2422 + rotate_ctx(ctx); 2448 2423 2449 - rotate_ctx(&cpuctx->ctx); 2450 - if (ctx) 2451 - rotate_ctx(ctx); 2452 - 2453 - perf_event_sched_in(cpuctx, ctx, current); 2454 - } 2424 + perf_event_sched_in(cpuctx, ctx, current); 2455 2425 2456 2426 perf_pmu_enable(cpuctx->ctx.pmu); 2457 2427 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2458 - 2459 2428 done: 2460 2429 if (remove) 2461 2430 list_del_init(&cpuctx->rotation_list); ··· 2454 2445 { 2455 2446 struct list_head *head = &__get_cpu_var(rotation_list); 2456 2447 struct perf_cpu_context *cpuctx, *tmp; 2448 + struct perf_event_context *ctx; 2449 + int throttled; 2457 2450 2458 2451 WARN_ON(!irqs_disabled()); 2459 2452 2453 + __this_cpu_inc(perf_throttled_seq); 2454 + throttled = __this_cpu_xchg(perf_throttled_count, 0); 2455 + 2460 2456 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { 2457 + ctx = &cpuctx->ctx; 2458 + perf_adjust_freq_unthr_context(ctx, throttled); 2459 + 2460 + ctx = cpuctx->task_ctx; 2461 + if (ctx) 2462 + perf_adjust_freq_unthr_context(ctx, throttled); 2463 + 2461 2464 if (cpuctx->jiffies_interval == 1 || 2462 2465 !(jiffies % cpuctx->jiffies_interval)) 2463 2466 perf_rotate_context(cpuctx); ··· 4530 4509 { 4531 4510 int events = atomic_read(&event->event_limit); 4532 4511 struct hw_perf_event *hwc = &event->hw; 4512 + u64 seq; 4533 4513 int ret = 0; 4534 4514 4535 4515 /* ··· 4540 4518 if (unlikely(!is_sampling_event(event))) 4541 4519 return 0; 4542 4520 4543 - if (unlikely(hwc->interrupts >= max_samples_per_tick)) { 4544 - if (throttle) { 4521 + seq = __this_cpu_read(perf_throttled_seq); 4522 + if (seq != hwc->interrupts_seq) { 4523 + hwc->interrupts_seq = seq; 4524 + hwc->interrupts = 1; 4525 + } else { 4526 + hwc->interrupts++; 4527 + if (unlikely(throttle 4528 + && hwc->interrupts >= max_samples_per_tick)) { 4529 + __this_cpu_inc(perf_throttled_count); 4545 4530 hwc->interrupts = MAX_INTERRUPTS; 4546 4531 perf_log_throttle(event, 0); 4547 4532 ret = 1; 4548 4533 } 4549 - } else 4550 - hwc->interrupts++; 4534 + } 4551 4535 4552 4536 if (event->attr.freq) { 4553 4537 u64 now = perf_clock();
+16
kernel/exit.c
··· 1038 1038 if (tsk->nr_dirtied) 1039 1039 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 1040 1040 exit_rcu(); 1041 + 1042 + /* 1043 + * The setting of TASK_RUNNING by try_to_wake_up() may be delayed 1044 + * when the following two conditions become true. 1045 + * - There is race condition of mmap_sem (It is acquired by 1046 + * exit_mm()), and 1047 + * - SMI occurs before setting TASK_RUNINNG. 1048 + * (or hypervisor of virtual machine switches to other guest) 1049 + * As a result, we may become TASK_RUNNING after becoming TASK_DEAD 1050 + * 1051 + * To avoid it, we have to wait for releasing tsk->pi_lock which 1052 + * is held by try_to_wake_up() 1053 + */ 1054 + smp_mb(); 1055 + raw_spin_unlock_wait(&tsk->pi_lock); 1056 + 1041 1057 /* causes final put_task_struct in finish_task_switch(). */ 1042 1058 tsk->state = TASK_DEAD; 1043 1059 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
+20
kernel/fork.c
··· 647 647 } 648 648 EXPORT_SYMBOL_GPL(get_task_mm); 649 649 650 + struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 651 + { 652 + struct mm_struct *mm; 653 + int err; 654 + 655 + err = mutex_lock_killable(&task->signal->cred_guard_mutex); 656 + if (err) 657 + return ERR_PTR(err); 658 + 659 + mm = get_task_mm(task); 660 + if (mm && mm != current->mm && 661 + !ptrace_may_access(task, mode)) { 662 + mmput(mm); 663 + mm = ERR_PTR(-EACCES); 664 + } 665 + mutex_unlock(&task->signal->cred_guard_mutex); 666 + 667 + return mm; 668 + } 669 + 650 670 /* Please note the differences between mmput and mm_release. 651 671 * mmput is called whenever we stop holding onto a mm_struct, 652 672 * error success whatever.
+7 -12
kernel/sched/core.c
··· 74 74 75 75 #include <asm/tlb.h> 76 76 #include <asm/irq_regs.h> 77 + #include <asm/mutex.h> 77 78 #ifdef CONFIG_PARAVIRT 78 79 #include <asm/paravirt.h> 79 80 #endif ··· 724 723 p->sched_class->dequeue_task(rq, p, flags); 725 724 } 726 725 727 - /* 728 - * activate_task - move a task to the runqueue. 729 - */ 730 726 void activate_task(struct rq *rq, struct task_struct *p, int flags) 731 727 { 732 728 if (task_contributes_to_load(p)) ··· 732 734 enqueue_task(rq, p, flags); 733 735 } 734 736 735 - /* 736 - * deactivate_task - remove a task from the runqueue. 737 - */ 738 737 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 739 738 { 740 739 if (task_contributes_to_load(p)) ··· 4129 4134 on_rq = p->on_rq; 4130 4135 running = task_current(rq, p); 4131 4136 if (on_rq) 4132 - deactivate_task(rq, p, 0); 4137 + dequeue_task(rq, p, 0); 4133 4138 if (running) 4134 4139 p->sched_class->put_prev_task(rq, p); 4135 4140 ··· 4142 4147 if (running) 4143 4148 p->sched_class->set_curr_task(rq); 4144 4149 if (on_rq) 4145 - activate_task(rq, p, 0); 4150 + enqueue_task(rq, p, 0); 4146 4151 4147 4152 check_class_changed(rq, p, prev_class, oldprio); 4148 4153 task_rq_unlock(rq, p, &flags); ··· 4993 4998 * placed properly. 4994 4999 */ 4995 5000 if (p->on_rq) { 4996 - deactivate_task(rq_src, p, 0); 5001 + dequeue_task(rq_src, p, 0); 4997 5002 set_task_cpu(p, dest_cpu); 4998 - activate_task(rq_dest, p, 0); 5003 + enqueue_task(rq_dest, p, 0); 4999 5004 check_preempt_curr(rq_dest, p, 0); 5000 5005 } 5001 5006 done: ··· 7027 7032 7028 7033 on_rq = p->on_rq; 7029 7034 if (on_rq) 7030 - deactivate_task(rq, p, 0); 7035 + dequeue_task(rq, p, 0); 7031 7036 __setscheduler(rq, p, SCHED_NORMAL, 0); 7032 7037 if (on_rq) { 7033 - activate_task(rq, p, 0); 7038 + enqueue_task(rq, p, 0); 7034 7039 resched_task(rq->curr); 7035 7040 } 7036 7041
+29 -5
kernel/sched/fair.c
··· 4866 4866 return; 4867 4867 } 4868 4868 4869 + static inline void clear_nohz_tick_stopped(int cpu) 4870 + { 4871 + if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { 4872 + cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); 4873 + atomic_dec(&nohz.nr_cpus); 4874 + clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 4875 + } 4876 + } 4877 + 4869 4878 static inline void set_cpu_sd_state_busy(void) 4870 4879 { 4871 4880 struct sched_domain *sd; ··· 4913 4904 { 4914 4905 int cpu = smp_processor_id(); 4915 4906 4907 + /* 4908 + * If this cpu is going down, then nothing needs to be done. 4909 + */ 4910 + if (!cpu_active(cpu)) 4911 + return; 4912 + 4916 4913 if (stop_tick) { 4917 4914 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) 4918 4915 return; ··· 4928 4913 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 4929 4914 } 4930 4915 return; 4916 + } 4917 + 4918 + static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, 4919 + unsigned long action, void *hcpu) 4920 + { 4921 + switch (action & ~CPU_TASKS_FROZEN) { 4922 + case CPU_DYING: 4923 + clear_nohz_tick_stopped(smp_processor_id()); 4924 + return NOTIFY_OK; 4925 + default: 4926 + return NOTIFY_DONE; 4927 + } 4931 4928 } 4932 4929 #endif 4933 4930 ··· 5097 5070 * busy tick after returning from idle, we will update the busy stats. 5098 5071 */ 5099 5072 set_cpu_sd_state_busy(); 5100 - if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { 5101 - clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 5102 - cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); 5103 - atomic_dec(&nohz.nr_cpus); 5104 - } 5073 + clear_nohz_tick_stopped(cpu); 5105 5074 5106 5075 /* 5107 5076 * None are in tickless mode and hence no need for NOHZ idle load ··· 5613 5590 5614 5591 #ifdef CONFIG_NO_HZ 5615 5592 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 5593 + cpu_notifier(sched_ilb_notifier, 0); 5616 5594 #endif 5617 5595 #endif /* SMP */ 5618 5596
+5
kernel/sched/rt.c
··· 1587 1587 if (!next_task) 1588 1588 return 0; 1589 1589 1590 + #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1591 + if (unlikely(task_running(rq, next_task))) 1592 + return 0; 1593 + #endif 1594 + 1590 1595 retry: 1591 1596 if (unlikely(next_task == rq->curr)) { 1592 1597 WARN_ON(1);
+1 -1
kernel/watchdog.c
··· 296 296 if (__this_cpu_read(soft_watchdog_warn) == true) 297 297 return HRTIMER_RESTART; 298 298 299 - printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 299 + printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 300 300 smp_processor_id(), duration, 301 301 current->comm, task_pid_nr(current)); 302 302 print_modules();
+4
lib/Kconfig
··· 279 279 280 280 If unsure, say N. 281 281 282 + config CLZ_TAB 283 + bool 284 + 282 285 config CORDIC 283 286 tristate "CORDIC algorithm" 284 287 help ··· 290 287 291 288 config MPILIB 292 289 tristate 290 + select CLZ_TAB 293 291 help 294 292 Multiprecision maths library from GnuPG. 295 293 It is used to implement RSA digital signature verification,
+2
lib/Makefile
··· 121 121 obj-$(CONFIG_MPILIB) += mpi/ 122 122 obj-$(CONFIG_SIGNATURE) += digsig.o 123 123 124 + obj-$(CONFIG_CLZ_TAB) += clz_tab.o 125 + 124 126 hostprogs-y := gen_crc32table 125 127 clean-files := crc32table.h 126 128
+1 -1
lib/bug.c
··· 169 169 return BUG_TRAP_TYPE_WARN; 170 170 } 171 171 172 - printk(KERN_EMERG "------------[ cut here ]------------\n"); 172 + printk(KERN_DEFAULT "------------[ cut here ]------------\n"); 173 173 174 174 if (file) 175 175 printk(KERN_CRIT "kernel BUG at %s:%u!\n",
+18
lib/clz_tab.c
··· 1 + const unsigned char __clz_tab[] = { 2 + 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 3 + 5, 5, 5, 5, 5, 5, 5, 5, 4 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5 + 6, 6, 6, 6, 6, 6, 6, 6, 6 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + 7, 7, 7, 7, 7, 7, 7, 7, 8 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 9 + 7, 7, 7, 7, 7, 7, 7, 7, 10 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 11 + 8, 8, 8, 8, 8, 8, 8, 8, 12 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 13 + 8, 8, 8, 8, 8, 8, 8, 8, 14 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 15 + 8, 8, 8, 8, 8, 8, 8, 8, 16 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 17 + 8, 8, 8, 8, 8, 8, 8, 8, 18 + };
+23 -29
lib/digsig.c
··· 34 34 unsigned long msglen, 35 35 unsigned long modulus_bitlen, 36 36 unsigned char *out, 37 - unsigned long *outlen, 38 - int *is_valid) 37 + unsigned long *outlen) 39 38 { 40 39 unsigned long modulus_len, ps_len, i; 41 - int result; 42 - 43 - /* default to invalid packet */ 44 - *is_valid = 0; 45 40 46 41 modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0); 47 42 ··· 45 50 return -EINVAL; 46 51 47 52 /* separate encoded message */ 48 - if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) { 49 - result = -EINVAL; 50 - goto bail; 51 - } 53 + if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) 54 + return -EINVAL; 52 55 53 56 for (i = 2; i < modulus_len - 1; i++) 54 57 if (msg[i] != 0xFF) 55 58 break; 56 59 57 60 /* separator check */ 58 - if (msg[i] != 0) { 61 + if (msg[i] != 0) 59 62 /* There was no octet with hexadecimal value 0x00 60 63 to separate ps from m. */ 61 - result = -EINVAL; 62 - goto bail; 63 - } 64 + return -EINVAL; 64 65 65 66 ps_len = i - 2; 66 67 67 68 if (*outlen < (msglen - (2 + ps_len + 1))) { 68 69 *outlen = msglen - (2 + ps_len + 1); 69 - result = -EOVERFLOW; 70 - goto bail; 70 + return -EOVERFLOW; 71 71 } 72 72 73 73 *outlen = (msglen - (2 + ps_len + 1)); 74 74 memcpy(out, &msg[2 + ps_len + 1], *outlen); 75 75 76 - /* valid packet */ 77 - *is_valid = 1; 78 - result = 0; 79 - bail: 80 - return result; 76 + return 0; 81 77 } 82 78 83 79 /* ··· 82 96 unsigned long len; 83 97 unsigned long mlen, mblen; 84 98 unsigned nret, l; 85 - int valid, head, i; 99 + int head, i; 86 100 unsigned char *out1 = NULL, *out2 = NULL; 87 101 MPI in = NULL, res = NULL, pkey[2]; 88 102 uint8_t *p, *datap, *endp; ··· 91 105 92 106 down_read(&key->sem); 93 107 ukp = key->payload.data; 108 + 109 + if (ukp->datalen < sizeof(*pkh)) 110 + goto err1; 111 + 94 112 pkh = (struct pubkey_hdr *)ukp->data; 95 113 96 114 if (pkh->version != 1) ··· 107 117 goto err1; 108 118 109 119 datap = pkh->mpi; 110 - endp = datap + ukp->datalen; 120 + endp = ukp->data + ukp->datalen; 121 + 122 + err = -ENOMEM; 111 123 112 124 for (i = 0; i < pkh->nmpi; i++) { 113 125 unsigned int remaining = endp - datap; 114 126 pkey[i] = mpi_read_from_buffer(datap, &remaining); 127 + if (!pkey[i]) 128 + goto err; 115 129 datap += remaining; 116 130 } 117 131 118 132 mblen = mpi_get_nbits(pkey[0]); 119 133 mlen = (mblen + 7)/8; 120 134 121 - err = -ENOMEM; 135 + if (mlen == 0) 136 + goto err; 122 137 123 138 out1 = kzalloc(mlen, GFP_KERNEL); 124 139 if (!out1) ··· 162 167 memset(out1, 0, head); 163 168 memcpy(out1 + head, p, l); 164 169 165 - err = -EINVAL; 166 - pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len, &valid); 170 + err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); 167 171 168 - if (valid && len == hlen) 172 + if (!err && len == hlen) 169 173 err = memcmp(out2, h, hlen); 170 174 171 175 err: ··· 172 178 mpi_free(res); 173 179 kfree(out1); 174 180 kfree(out2); 175 - mpi_free(pkey[0]); 176 - mpi_free(pkey[1]); 181 + while (--i >= 0) 182 + mpi_free(pkey[i]); 177 183 err1: 178 184 up_read(&key->sem); 179 185
+33 -11
lib/mpi/longlong.h
··· 1200 1200 "r" ((USItype)(v)) \ 1201 1201 : "%g1", "%g2" __AND_CLOBBER_CC) 1202 1202 #define UMUL_TIME 39 /* 39 instructions */ 1203 - #endif 1204 - #ifndef udiv_qrnnd 1205 - #ifndef LONGLONG_STANDALONE 1203 + /* It's quite necessary to add this much assembler for the sparc. 1204 + The default udiv_qrnnd (in C) is more than 10 times slower! */ 1206 1205 #define udiv_qrnnd(q, r, n1, n0, d) \ 1207 - do { USItype __r; \ 1208 - (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ 1209 - (r) = __r; \ 1210 - } while (0) 1211 - extern USItype __udiv_qrnnd(); 1212 - #define UDIV_TIME 140 1213 - #endif /* LONGLONG_STANDALONE */ 1214 - #endif /* udiv_qrnnd */ 1206 + __asm__ ("! Inlined udiv_qrnnd\n\t" \ 1207 + "mov 32,%%g1\n\t" \ 1208 + "subcc %1,%2,%%g0\n\t" \ 1209 + "1: bcs 5f\n\t" \ 1210 + "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \ 1211 + "sub %1,%2,%1 ! this kills msb of n\n\t" \ 1212 + "addx %1,%1,%1 ! so this can't give carry\n\t" \ 1213 + "subcc %%g1,1,%%g1\n\t" \ 1214 + "2: bne 1b\n\t" \ 1215 + "subcc %1,%2,%%g0\n\t" \ 1216 + "bcs 3f\n\t" \ 1217 + "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \ 1218 + "b 3f\n\t" \ 1219 + "sub %1,%2,%1 ! this kills msb of n\n\t" \ 1220 + "4: sub %1,%2,%1\n\t" \ 1221 + "5: addxcc %1,%1,%1\n\t" \ 1222 + "bcc 2b\n\t" \ 1223 + "subcc %%g1,1,%%g1\n\t" \ 1224 + "! Got carry from n. Subtract next step to cancel this carry.\n\t" \ 1225 + "bne 4b\n\t" \ 1226 + "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n\t" \ 1227 + "sub %1,%2,%1\n\t" \ 1228 + "3: xnor %0,0,%0\n\t" \ 1229 + "! End of inline udiv_qrnnd\n" \ 1230 + : "=&r" ((USItype)(q)), \ 1231 + "=&r" ((USItype)(r)) \ 1232 + : "r" ((USItype)(d)), \ 1233 + "1" ((USItype)(n1)), \ 1234 + "0" ((USItype)(n0)) : "%g1", "cc") 1235 + #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */ 1236 + #endif 1215 1237 #endif /* __sparc__ */ 1216 1238 1217 1239 /***************************************
-19
lib/mpi/mpi-bit.c
··· 21 21 #include "mpi-internal.h" 22 22 #include "longlong.h" 23 23 24 - const unsigned char __clz_tab[] = { 25 - 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 26 - 5, 5, 5, 5, 5, 5, 5, 5, 27 - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 28 - 6, 6, 6, 6, 6, 6, 6, 6, 29 - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 30 - 7, 7, 7, 7, 7, 7, 7, 7, 31 - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 32 - 7, 7, 7, 7, 7, 7, 7, 7, 33 - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 34 - 8, 8, 8, 8, 8, 8, 8, 8, 35 - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 36 - 8, 8, 8, 8, 8, 8, 8, 8, 37 - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 38 - 8, 8, 8, 8, 8, 8, 8, 8, 39 - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 40 - 8, 8, 8, 8, 8, 8, 8, 8, 41 - }; 42 - 43 24 #define A_LIMB_1 ((mpi_limb_t) 1) 44 25 45 26 /****************
+5
lib/mpi/mpi-div.c
··· 149 149 mpi_ptr_t marker[5]; 150 150 int markidx = 0; 151 151 152 + if (!dsize) 153 + return -EINVAL; 154 + 152 155 memset(marker, 0, sizeof(marker)); 153 156 154 157 /* Ensure space is enough for quotient and remainder. ··· 210 207 * numerator would be gradually overwritten by the quotient limbs. */ 211 208 if (qp == np) { /* Copy NP object to temporary space. */ 212 209 np = marker[markidx++] = mpi_alloc_limb_space(nsize); 210 + if (!np) 211 + goto nomem; 213 212 MPN_COPY(np, qp, nsize); 214 213 } 215 214 } else /* Put quotient at top of remainder. */
+1 -1
lib/mpi/mpi-pow.c
··· 59 59 ep = exp->d; 60 60 61 61 if (!msize) 62 - msize = 1 / msize; /* provoke a signal */ 62 + return -EINVAL; 63 63 64 64 if (!esize) { 65 65 /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
+2 -89
lib/mpi/mpicoder.c
··· 20 20 21 21 #include "mpi-internal.h" 22 22 23 - #define DIM(v) (sizeof(v)/sizeof((v)[0])) 24 23 #define MAX_EXTERN_MPI_BITS 16384 25 - 26 - static uint8_t asn[15] = /* Object ID is 1.3.14.3.2.26 */ 27 - { 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 28 - 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14 29 - }; 30 - 31 - MPI do_encode_md(const void *sha_buffer, unsigned nbits) 32 - { 33 - int nframe = (nbits + 7) / 8; 34 - uint8_t *frame, *fr_pt; 35 - int i = 0, n; 36 - size_t asnlen = DIM(asn); 37 - MPI a = MPI_NULL; 38 - 39 - if (SHA1_DIGEST_LENGTH + asnlen + 4 > nframe) 40 - pr_info("MPI: can't encode a %d bit MD into a %d bits frame\n", 41 - (int)(SHA1_DIGEST_LENGTH * 8), (int)nbits); 42 - 43 - /* We encode the MD in this way: 44 - * 45 - * 0 A PAD(n bytes) 0 ASN(asnlen bytes) MD(len bytes) 46 - * 47 - * PAD consists of FF bytes. 48 - */ 49 - frame = kmalloc(nframe, GFP_KERNEL); 50 - if (!frame) 51 - return MPI_NULL; 52 - n = 0; 53 - frame[n++] = 0; 54 - frame[n++] = 1; /* block type */ 55 - i = nframe - SHA1_DIGEST_LENGTH - asnlen - 3; 56 - 57 - if (i <= 1) { 58 - pr_info("MPI: message digest encoding failed\n"); 59 - kfree(frame); 60 - return a; 61 - } 62 - 63 - memset(frame + n, 0xff, i); 64 - n += i; 65 - frame[n++] = 0; 66 - memcpy(frame + n, &asn, asnlen); 67 - n += asnlen; 68 - memcpy(frame + n, sha_buffer, SHA1_DIGEST_LENGTH); 69 - n += SHA1_DIGEST_LENGTH; 70 - 71 - i = nframe; 72 - fr_pt = frame; 73 - 74 - if (n != nframe) { 75 - printk 76 - ("MPI: message digest encoding failed, frame length is wrong\n"); 77 - kfree(frame); 78 - return a; 79 - } 80 - 81 - a = mpi_alloc((nframe + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB); 82 - mpi_set_buffer(a, frame, nframe, 0); 83 - kfree(frame); 84 - 85 - return a; 86 - } 87 24 88 25 MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) 89 26 { ··· 28 91 int i, j; 29 92 unsigned nbits, nbytes, nlimbs, nread = 0; 30 93 mpi_limb_t a; 31 - MPI val = MPI_NULL; 94 + MPI val = NULL; 32 95 33 96 if (*ret_nread < 2) 34 97 goto leave; ··· 45 108 nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; 46 109 val = mpi_alloc(nlimbs); 47 110 if (!val) 48 - return MPI_NULL; 111 + return NULL; 49 112 i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; 50 113 i %= BYTES_PER_MPI_LIMB; 51 114 val->nbits = nbits; ··· 147 210 return 0; 148 211 } 149 212 EXPORT_SYMBOL_GPL(mpi_fromstr); 150 - 151 - /**************** 152 - * Special function to get the low 8 bytes from an mpi. 153 - * This can be used as a keyid; KEYID is an 2 element array. 154 - * Return the low 4 bytes. 155 - */ 156 - u32 mpi_get_keyid(const MPI a, u32 *keyid) 157 - { 158 - #if BYTES_PER_MPI_LIMB == 4 159 - if (keyid) { 160 - keyid[0] = a->nlimbs >= 2 ? a->d[1] : 0; 161 - keyid[1] = a->nlimbs >= 1 ? a->d[0] : 0; 162 - } 163 - return a->nlimbs >= 1 ? a->d[0] : 0; 164 - #elif BYTES_PER_MPI_LIMB == 8 165 - if (keyid) { 166 - keyid[0] = a->nlimbs ? (u32) (a->d[0] >> 32) : 0; 167 - keyid[1] = a->nlimbs ? (u32) (a->d[0] & 0xffffffff) : 0; 168 - } 169 - return a->nlimbs ? (u32) (a->d[0] & 0xffffffff) : 0; 170 - #else 171 - #error Make this function work with other LIMB sizes 172 - #endif 173 - } 174 213 175 214 /**************** 176 215 * Return an allocated buffer with the MPI (msb first).
+4
lib/mpi/mpih-div.c
··· 217 217 case 0: 218 218 /* We are asked to divide by zero, so go ahead and do it! (To make 219 219 the compiler not remove this statement, return the value.) */ 220 + /* 221 + * existing clients of this function have been modified 222 + * not to call it with dsize == 0, so this should not happen 223 + */ 220 224 return 1 / dsize; 221 225 222 226 case 1:
+4 -1
lib/mpi/mpiutil.c
··· 58 58 { 59 59 size_t len = nlimbs * sizeof(mpi_limb_t); 60 60 61 + if (!len) 62 + return NULL; 63 + 61 64 return kmalloc(len, GFP_KERNEL); 62 65 } 63 66 ··· 138 135 size_t i; 139 136 MPI b; 140 137 141 - *copied = MPI_NULL; 138 + *copied = NULL; 142 139 143 140 if (a) { 144 141 b = mpi_alloc(a->nlimbs);
+2 -1
mm/kmemleak.c
··· 1036 1036 { 1037 1037 pr_debug("%s(0x%p)\n", __func__, ptr); 1038 1038 1039 - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 1039 + if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr)) 1040 1040 add_scan_area((unsigned long)ptr, size, gfp); 1041 1041 else if (atomic_read(&kmemleak_early_log)) 1042 1042 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); ··· 1757 1757 1758 1758 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1759 1759 if (!kmemleak_skip_disable) { 1760 + atomic_set(&kmemleak_early_log, 0); 1760 1761 kmemleak_disable(); 1761 1762 return; 1762 1763 }
+9 -14
mm/process_vm_access.c
··· 298 298 goto free_proc_pages; 299 299 } 300 300 301 - task_lock(task); 302 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) { 303 - task_unlock(task); 304 - rc = -EPERM; 301 + mm = mm_access(task, PTRACE_MODE_ATTACH); 302 + if (!mm || IS_ERR(mm)) { 303 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 304 + /* 305 + * Explicitly map EACCES to EPERM as EPERM is a more a 306 + * appropriate error code for process_vw_readv/writev 307 + */ 308 + if (rc == -EACCES) 309 + rc = -EPERM; 305 310 goto put_task_struct; 306 311 } 307 - mm = task->mm; 308 - 309 - if (!mm || (task->flags & PF_KTHREAD)) { 310 - task_unlock(task); 311 - rc = -EINVAL; 312 - goto put_task_struct; 313 - } 314 - 315 - atomic_inc(&mm->mm_users); 316 - task_unlock(task); 317 312 318 313 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { 319 314 rc = process_vm_rw_single_vec(
-2
net/ceph/ceph_common.c
··· 85 85 } else { 86 86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); 87 87 memcpy(&client->fsid, fsid, sizeof(*fsid)); 88 - ceph_debugfs_client_init(client); 89 - client->have_fsid = true; 90 88 } 91 89 return 0; 92 90 }
+12 -1
net/ceph/mon_client.c
··· 8 8 9 9 #include <linux/ceph/mon_client.h> 10 10 #include <linux/ceph/libceph.h> 11 + #include <linux/ceph/debugfs.h> 11 12 #include <linux/ceph/decode.h> 12 - 13 13 #include <linux/ceph/auth.h> 14 14 15 15 /* ··· 340 340 client->monc.monmap = monmap; 341 341 kfree(old); 342 342 343 + if (!client->have_fsid) { 344 + client->have_fsid = true; 345 + mutex_unlock(&monc->mutex); 346 + /* 347 + * do debugfs initialization without mutex to avoid 348 + * creating a locking dependency 349 + */ 350 + ceph_debugfs_client_init(client); 351 + goto out_unlocked; 352 + } 343 353 out: 344 354 mutex_unlock(&monc->mutex); 355 + out_unlocked: 345 356 wake_up_all(&client->auth_wq); 346 357 } 347 358
+1 -1
sound/pci/hda/hda_codec.c
··· 1447 1447 for (i = 0; i < c->cvt_setups.used; i++) { 1448 1448 p = snd_array_elem(&c->cvt_setups, i); 1449 1449 if (!p->active && p->stream_tag == stream_tag && 1450 - get_wcaps_type(get_wcaps(codec, p->nid)) == type) 1450 + get_wcaps_type(get_wcaps(c, p->nid)) == type) 1451 1451 p->dirty = 1; 1452 1452 } 1453 1453 }
+15 -9
sound/pci/hda/hda_jack.c
··· 282 282 EXPORT_SYMBOL_HDA(snd_hda_jack_add_kctl); 283 283 284 284 static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid, 285 - const struct auto_pin_cfg *cfg) 285 + const struct auto_pin_cfg *cfg, 286 + char *lastname, int *lastidx) 286 287 { 287 288 unsigned int def_conf, conn; 288 289 char name[44]; ··· 299 298 return 0; 300 299 301 300 snd_hda_get_pin_label(codec, nid, cfg, name, sizeof(name), &idx); 301 + if (!strcmp(name, lastname) && idx == *lastidx) 302 + idx++; 303 + strncpy(lastname, name, 44); 304 + *lastidx = idx; 302 305 err = snd_hda_jack_add_kctl(codec, nid, name, idx); 303 306 if (err < 0) 304 307 return err; ··· 316 311 const struct auto_pin_cfg *cfg) 317 312 { 318 313 const hda_nid_t *p; 319 - int i, err; 314 + int i, err, lastidx = 0; 315 + char lastname[44] = ""; 320 316 321 317 for (i = 0, p = cfg->line_out_pins; i < cfg->line_outs; i++, p++) { 322 - err = add_jack_kctl(codec, *p, cfg); 318 + err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx); 323 319 if (err < 0) 324 320 return err; 325 321 } 326 322 for (i = 0, p = cfg->hp_pins; i < cfg->hp_outs; i++, p++) { 327 323 if (*p == *cfg->line_out_pins) /* might be duplicated */ 328 324 break; 329 - err = add_jack_kctl(codec, *p, cfg); 325 + err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx); 330 326 if (err < 0) 331 327 return err; 332 328 } 333 329 for (i = 0, p = cfg->speaker_pins; i < cfg->speaker_outs; i++, p++) { 334 330 if (*p == *cfg->line_out_pins) /* might be duplicated */ 335 331 break; 336 - err = add_jack_kctl(codec, *p, cfg); 332 + err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx); 337 333 if (err < 0) 338 334 return err; 339 335 } 340 336 for (i = 0; i < cfg->num_inputs; i++) { 341 - err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg); 337 + err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg, lastname, &lastidx); 342 338 if (err < 0) 343 339 return err; 344 340 } 345 341 for (i = 0, p = cfg->dig_out_pins; i < cfg->dig_outs; i++, p++) { 346 - err = add_jack_kctl(codec, *p, cfg); 342 + err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx); 347 343 if (err < 0) 348 344 return err; 349 345 } 350 - err = add_jack_kctl(codec, cfg->dig_in_pin, cfg); 346 + err = add_jack_kctl(codec, cfg->dig_in_pin, cfg, lastname, &lastidx); 351 347 if (err < 0) 352 348 return err; 353 - err = add_jack_kctl(codec, cfg->mono_out_pin, cfg); 349 + err = add_jack_kctl(codec, cfg->mono_out_pin, cfg, lastname, &lastidx); 354 350 if (err < 0) 355 351 return err; 356 352 return 0;
+4 -2
sound/pci/hda/patch_cirrus.c
··· 988 988 change_cur_input(codec, !spec->automic_idx, 0); 989 989 } else { 990 990 if (present) { 991 - spec->last_input = spec->cur_input; 992 - spec->cur_input = spec->automic_idx; 991 + if (spec->cur_input != spec->automic_idx) { 992 + spec->last_input = spec->cur_input; 993 + spec->cur_input = spec->automic_idx; 994 + } 993 995 } else { 994 996 spec->cur_input = spec->last_input; 995 997 }
+35 -22
sound/pci/hda/patch_realtek.c
··· 177 177 unsigned int detect_lo:1; /* Line-out detection enabled */ 178 178 unsigned int automute_speaker_possible:1; /* there are speakers and either LO or HP */ 179 179 unsigned int automute_lo_possible:1; /* there are line outs and HP */ 180 + unsigned int keep_vref_in_automute:1; /* Don't clear VREF in automute */ 180 181 181 182 /* other flags */ 182 183 unsigned int no_analog :1; /* digital I/O only */ ··· 496 495 497 496 for (i = 0; i < num_pins; i++) { 498 497 hda_nid_t nid = pins[i]; 498 + unsigned int val; 499 499 if (!nid) 500 500 break; 501 501 switch (spec->automute_mode) { 502 502 case ALC_AUTOMUTE_PIN: 503 + /* don't reset VREF value in case it's controlling 504 + * the amp (see alc861_fixup_asus_amp_vref_0f()) 505 + */ 506 + if (spec->keep_vref_in_automute) { 507 + val = snd_hda_codec_read(codec, nid, 0, 508 + AC_VERB_GET_PIN_WIDGET_CONTROL, 0); 509 + val &= ~PIN_HP; 510 + } else 511 + val = 0; 512 + val |= pin_bits; 503 513 snd_hda_codec_write(codec, nid, 0, 504 514 AC_VERB_SET_PIN_WIDGET_CONTROL, 505 - pin_bits); 515 + val); 506 516 break; 507 517 case ALC_AUTOMUTE_AMP: 508 518 snd_hda_codec_amp_stereo(codec, nid, HDA_OUTPUT, 0, ··· 4747 4735 ALC262_FIXUP_FSC_H270, 4748 4736 ALC262_FIXUP_HP_Z200, 4749 4737 ALC262_FIXUP_TYAN, 4750 - ALC262_FIXUP_TOSHIBA_RX1, 4751 4738 ALC262_FIXUP_LENOVO_3000, 4752 4739 ALC262_FIXUP_BENQ, 4753 4740 ALC262_FIXUP_BENQ_T31, ··· 4774 4763 .v.pins = (const struct alc_pincfg[]) { 4775 4764 { 0x14, 0x1993e1f0 }, /* int AUX */ 4776 4765 { } 4777 - } 4778 - }, 4779 - [ALC262_FIXUP_TOSHIBA_RX1] = { 4780 - .type = ALC_FIXUP_PINS, 4781 - .v.pins = (const struct alc_pincfg[]) { 4782 - { 0x14, 0x90170110 }, /* speaker */ 4783 - { 0x15, 0x0421101f }, /* HP */ 4784 - { 0x1a, 0x40f000f0 }, /* N/A */ 4785 - { 0x1b, 0x40f000f0 }, /* N/A */ 4786 - { 0x1e, 0x40f000f0 }, /* N/A */ 4787 4766 } 4788 4767 }, 4789 4768 [ALC262_FIXUP_LENOVO_3000] = { ··· 4808 4807 SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu", ALC262_FIXUP_BENQ), 4809 4808 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ), 4810 4809 SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN), 4811 - SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba dynabook SS RX1", 4812 - ALC262_FIXUP_TOSHIBA_RX1), 4813 4810 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270), 4814 4811 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000), 4815 4812 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ), ··· 5376 5377 SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A", 5377 5378 ALC269_FIXUP_AMIC), 5378 5379 SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC), 5379 - SND_PCI_QUIRK(0x1043, 0x1113, "ASUS N63Jn", ALC269_FIXUP_AMIC), 5380 5380 SND_PCI_QUIRK(0x1043, 0x1143, "ASUS B53f", ALC269_FIXUP_AMIC), 5381 5381 SND_PCI_QUIRK(0x1043, 0x1133, "ASUS UJ20ft", ALC269_FIXUP_AMIC), 5382 5382 SND_PCI_QUIRK(0x1043, 0x1183, "ASUS K72DR", ALC269_FIXUP_AMIC), ··· 5587 5589 PINFIX_ASUS_A6RP, 5588 5590 }; 5589 5591 5592 + /* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */ 5593 + static void alc861_fixup_asus_amp_vref_0f(struct hda_codec *codec, 5594 + const struct alc_fixup *fix, int action) 5595 + { 5596 + struct alc_spec *spec = codec->spec; 5597 + unsigned int val; 5598 + 5599 + if (action != ALC_FIXUP_ACT_INIT) 5600 + return; 5601 + val = snd_hda_codec_read(codec, 0x0f, 0, 5602 + AC_VERB_GET_PIN_WIDGET_CONTROL, 0); 5603 + if (!(val & (AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN))) 5604 + val |= AC_PINCTL_IN_EN; 5605 + val |= AC_PINCTL_VREF_50; 5606 + snd_hda_codec_write(codec, 0x0f, 0, 5607 + AC_VERB_SET_PIN_WIDGET_CONTROL, val); 5608 + spec->keep_vref_in_automute = 1; 5609 + } 5610 + 5590 5611 static const struct alc_fixup alc861_fixups[] = { 5591 5612 [PINFIX_FSC_AMILO_PI1505] = { 5592 5613 .type = ALC_FIXUP_PINS, ··· 5616 5599 } 5617 5600 }, 5618 5601 [PINFIX_ASUS_A6RP] = { 5619 - .type = ALC_FIXUP_VERBS, 5620 - .v.verbs = (const struct hda_verb[]) { 5621 - /* node 0x0f VREF seems controlling the master output */ 5622 - { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50 }, 5623 - { } 5624 - }, 5602 + .type = ALC_FIXUP_FUNC, 5603 + .v.func = alc861_fixup_asus_amp_vref_0f, 5625 5604 }, 5626 5605 }; 5627 5606 5628 5607 static const struct snd_pci_quirk alc861_fixup_tbl[] = { 5629 - SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", PINFIX_ASUS_A6RP), 5608 + SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", PINFIX_ASUS_A6RP), 5630 5609 SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP), 5631 5610 SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505), 5632 5611 {}
+129 -155
sound/pci/hda/patch_via.c
··· 199 199 unsigned int no_pin_power_ctl; 200 200 enum VIA_HDA_CODEC codec_type; 201 201 202 + /* analog low-power control */ 203 + bool alc_mode; 204 + 202 205 /* smart51 setup */ 203 206 unsigned int smart51_nums; 204 207 hda_nid_t smart51_pins[2]; ··· 690 687 } 691 688 } 692 689 690 + static void update_power_state(struct hda_codec *codec, hda_nid_t nid, 691 + unsigned int parm) 692 + { 693 + if (snd_hda_codec_read(codec, nid, 0, 694 + AC_VERB_GET_POWER_STATE, 0) == parm) 695 + return; 696 + snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm); 697 + } 698 + 693 699 static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid, 694 700 unsigned int *affected_parm) 695 701 { ··· 721 709 } else 722 710 parm = AC_PWRST_D3; 723 711 724 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm); 712 + update_power_state(codec, nid, parm); 725 713 } 726 714 727 715 static int via_pin_power_ctl_info(struct snd_kcontrol *kcontrol, ··· 761 749 return 0; 762 750 spec->no_pin_power_ctl = val; 763 751 set_widgets_power_state(codec); 752 + analog_low_current_mode(codec); 764 753 return 1; 765 754 } 766 755 ··· 1049 1036 } 1050 1037 1051 1038 /* enter/exit analog low-current mode */ 1052 - static void analog_low_current_mode(struct hda_codec *codec) 1039 + static void __analog_low_current_mode(struct hda_codec *codec, bool force) 1053 1040 { 1054 1041 struct via_spec *spec = codec->spec; 1055 1042 bool enable; 1056 1043 unsigned int verb, parm; 1057 1044 1058 - enable = is_aa_path_mute(codec) && (spec->opened_streams != 0); 1045 + if (spec->no_pin_power_ctl) 1046 + enable = false; 1047 + else 1048 + enable = is_aa_path_mute(codec) && !spec->opened_streams; 1049 + if (enable == spec->alc_mode && !force) 1050 + return; 1051 + spec->alc_mode = enable; 1059 1052 1060 1053 /* decide low current mode's verb & parameter */ 1061 1054 switch (spec->codec_type) { ··· 1091 1072 } 1092 1073 /* send verb */ 1093 1074 snd_hda_codec_write(codec, codec->afg, 0, verb, parm); 1075 + } 1076 + 1077 + static void analog_low_current_mode(struct hda_codec *codec) 1078 + { 1079 + return __analog_low_current_mode(codec, false); 1094 1080 } 1095 1081 1096 1082 /* ··· 1470 1446 struct snd_kcontrol *kctl; 1471 1447 int err, i; 1472 1448 1449 + spec->no_pin_power_ctl = 1; 1473 1450 if (spec->set_widgets_power_state) 1474 1451 if (!via_clone_control(spec, &via_pin_power_ctl_enum)) 1475 1452 return -ENOMEM; ··· 1523 1498 if (err < 0) 1524 1499 return err; 1525 1500 } 1526 - 1527 - /* init power states */ 1528 - set_widgets_power_state(codec); 1529 - analog_low_current_mode(codec); 1530 1501 1531 1502 via_free_kctls(codec); /* no longer needed */ 1532 1503 ··· 2316 2295 2317 2296 if (mux) { 2318 2297 /* switch to D0 beofre change index */ 2319 - if (snd_hda_codec_read(codec, mux, 0, 2320 - AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0) 2321 - snd_hda_codec_write(codec, mux, 0, 2322 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 2298 + update_power_state(codec, mux, AC_PWRST_D0); 2323 2299 snd_hda_codec_write(codec, mux, 0, 2324 2300 AC_VERB_SET_CONNECT_SEL, 2325 2301 spec->inputs[cur].mux_idx); ··· 2794 2776 for (i = 0; i < spec->num_iverbs; i++) 2795 2777 snd_hda_sequence_write(codec, spec->init_verbs[i]); 2796 2778 2779 + /* init power states */ 2780 + set_widgets_power_state(codec); 2781 + __analog_low_current_mode(codec, true); 2782 + 2797 2783 via_auto_init_multi_out(codec); 2798 2784 via_auto_init_hp_out(codec); 2799 2785 via_auto_init_speaker_out(codec); ··· 2944 2922 if (imux_is_smixer) 2945 2923 parm = AC_PWRST_D0; 2946 2924 /* SW0 (17h), AIW 0/1 (13h/14h) */ 2947 - snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_POWER_STATE, parm); 2948 - snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm); 2949 - snd_hda_codec_write(codec, 0x14, 0, AC_VERB_SET_POWER_STATE, parm); 2925 + update_power_state(codec, 0x17, parm); 2926 + update_power_state(codec, 0x13, parm); 2927 + update_power_state(codec, 0x14, parm); 2950 2928 2951 2929 /* outputs */ 2952 2930 /* PW0 (19h), SW1 (18h), AOW1 (11h) */ ··· 2954 2932 set_pin_power_state(codec, 0x19, &parm); 2955 2933 if (spec->smart51_enabled) 2956 2934 set_pin_power_state(codec, 0x1b, &parm); 2957 - snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm); 2958 - snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 2935 + update_power_state(codec, 0x18, parm); 2936 + update_power_state(codec, 0x11, parm); 2959 2937 2960 2938 /* PW6 (22h), SW2 (26h), AOW2 (24h) */ 2961 2939 if (is_8ch) { ··· 2963 2941 set_pin_power_state(codec, 0x22, &parm); 2964 2942 if (spec->smart51_enabled) 2965 2943 set_pin_power_state(codec, 0x1a, &parm); 2966 - snd_hda_codec_write(codec, 0x26, 0, 2967 - AC_VERB_SET_POWER_STATE, parm); 2968 - snd_hda_codec_write(codec, 0x24, 0, 2969 - AC_VERB_SET_POWER_STATE, parm); 2944 + update_power_state(codec, 0x26, parm); 2945 + update_power_state(codec, 0x24, parm); 2970 2946 } else if (codec->vendor_id == 0x11064397) { 2971 2947 /* PW7(23h), SW2(27h), AOW2(25h) */ 2972 2948 parm = AC_PWRST_D3; 2973 2949 set_pin_power_state(codec, 0x23, &parm); 2974 2950 if (spec->smart51_enabled) 2975 2951 set_pin_power_state(codec, 0x1a, &parm); 2976 - snd_hda_codec_write(codec, 0x27, 0, 2977 - AC_VERB_SET_POWER_STATE, parm); 2978 - snd_hda_codec_write(codec, 0x25, 0, 2979 - AC_VERB_SET_POWER_STATE, parm); 2952 + update_power_state(codec, 0x27, parm); 2953 + update_power_state(codec, 0x25, parm); 2980 2954 } 2981 2955 2982 2956 /* PW 3/4/7 (1ch/1dh/23h) */ ··· 2984 2966 set_pin_power_state(codec, 0x23, &parm); 2985 2967 2986 2968 /* MW0 (16h), Sw3 (27h), AOW 0/3 (10h/25h) */ 2987 - snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_POWER_STATE, 2988 - imux_is_smixer ? AC_PWRST_D0 : parm); 2989 - snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 2969 + update_power_state(codec, 0x16, imux_is_smixer ? AC_PWRST_D0 : parm); 2970 + update_power_state(codec, 0x10, parm); 2990 2971 if (is_8ch) { 2991 - snd_hda_codec_write(codec, 0x25, 0, 2992 - AC_VERB_SET_POWER_STATE, parm); 2993 - snd_hda_codec_write(codec, 0x27, 0, 2994 - AC_VERB_SET_POWER_STATE, parm); 2972 + update_power_state(codec, 0x25, parm); 2973 + update_power_state(codec, 0x27, parm); 2995 2974 } else if (codec->vendor_id == 0x11064397 && spec->hp_independent_mode) 2996 - snd_hda_codec_write(codec, 0x25, 0, 2997 - AC_VERB_SET_POWER_STATE, parm); 2975 + update_power_state(codec, 0x25, parm); 2998 2976 } 2999 2977 3000 2978 static int patch_vt1708S(struct hda_codec *codec); ··· 3163 3149 if (imux_is_smixer) 3164 3150 parm = AC_PWRST_D0; /* SW0 (13h) = stereo mixer (idx 3) */ 3165 3151 /* SW0 (13h), AIW 0/1/2 (12h/1fh/20h) */ 3166 - snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm); 3167 - snd_hda_codec_write(codec, 0x12, 0, AC_VERB_SET_POWER_STATE, parm); 3168 - snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm); 3169 - snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_POWER_STATE, parm); 3152 + update_power_state(codec, 0x13, parm); 3153 + update_power_state(codec, 0x12, parm); 3154 + update_power_state(codec, 0x1f, parm); 3155 + update_power_state(codec, 0x20, parm); 3170 3156 3171 3157 /* outputs */ 3172 3158 /* PW 3/4 (16h/17h) */ ··· 3174 3160 set_pin_power_state(codec, 0x17, &parm); 3175 3161 set_pin_power_state(codec, 0x16, &parm); 3176 3162 /* MW0 (1ah), AOW 0/1 (10h/1dh) */ 3177 - snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, 3178 - imux_is_smixer ? AC_PWRST_D0 : parm); 3179 - snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 3180 - snd_hda_codec_write(codec, 0x1d, 0, AC_VERB_SET_POWER_STATE, parm); 3163 + update_power_state(codec, 0x1a, imux_is_smixer ? AC_PWRST_D0 : parm); 3164 + update_power_state(codec, 0x10, parm); 3165 + update_power_state(codec, 0x1d, parm); 3181 3166 } 3182 3167 3183 3168 static int patch_vt1702(struct hda_codec *codec) ··· 3241 3228 if (imux_is_smixer) 3242 3229 parm = AC_PWRST_D0; 3243 3230 /* MUX6/7 (1eh/1fh), AIW 0/1 (10h/11h) */ 3244 - snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm); 3245 - snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm); 3246 - snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 3247 - snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 3231 + update_power_state(codec, 0x1e, parm); 3232 + update_power_state(codec, 0x1f, parm); 3233 + update_power_state(codec, 0x10, parm); 3234 + update_power_state(codec, 0x11, parm); 3248 3235 3249 3236 /* outputs */ 3250 3237 /* PW3 (27h), MW2 (1ah), AOW3 (bh) */ 3251 3238 parm = AC_PWRST_D3; 3252 3239 set_pin_power_state(codec, 0x27, &parm); 3253 - snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, parm); 3254 - snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm); 3240 + update_power_state(codec, 0x1a, parm); 3241 + update_power_state(codec, 0xb, parm); 3255 3242 3256 3243 /* PW2 (26h), AOW2 (ah) */ 3257 3244 parm = AC_PWRST_D3; 3258 3245 set_pin_power_state(codec, 0x26, &parm); 3259 3246 if (spec->smart51_enabled) 3260 3247 set_pin_power_state(codec, 0x2b, &parm); 3261 - snd_hda_codec_write(codec, 0xa, 0, AC_VERB_SET_POWER_STATE, parm); 3248 + update_power_state(codec, 0xa, parm); 3262 3249 3263 3250 /* PW0 (24h), AOW0 (8h) */ 3264 3251 parm = AC_PWRST_D3; 3265 3252 set_pin_power_state(codec, 0x24, &parm); 3266 3253 if (!spec->hp_independent_mode) /* check for redirected HP */ 3267 3254 set_pin_power_state(codec, 0x28, &parm); 3268 - snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm); 3255 + update_power_state(codec, 0x8, parm); 3269 3256 /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */ 3270 - snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_POWER_STATE, 3271 - imux_is_smixer ? AC_PWRST_D0 : parm); 3257 + update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm); 3272 3258 3273 3259 /* PW1 (25h), AOW1 (9h) */ 3274 3260 parm = AC_PWRST_D3; 3275 3261 set_pin_power_state(codec, 0x25, &parm); 3276 3262 if (spec->smart51_enabled) 3277 3263 set_pin_power_state(codec, 0x2a, &parm); 3278 - snd_hda_codec_write(codec, 0x9, 0, AC_VERB_SET_POWER_STATE, parm); 3264 + update_power_state(codec, 0x9, parm); 3279 3265 3280 3266 if (spec->hp_independent_mode) { 3281 3267 /* PW4 (28h), MW3 (1bh), MUX1(34h), AOW4 (ch) */ 3282 3268 parm = AC_PWRST_D3; 3283 3269 set_pin_power_state(codec, 0x28, &parm); 3284 - snd_hda_codec_write(codec, 0x1b, 0, 3285 - AC_VERB_SET_POWER_STATE, parm); 3286 - snd_hda_codec_write(codec, 0x34, 0, 3287 - AC_VERB_SET_POWER_STATE, parm); 3288 - snd_hda_codec_write(codec, 0xc, 0, 3289 - AC_VERB_SET_POWER_STATE, parm); 3270 + update_power_state(codec, 0x1b, parm); 3271 + update_power_state(codec, 0x34, parm); 3272 + update_power_state(codec, 0xc, parm); 3290 3273 } 3291 3274 } 3292 3275 ··· 3442 3433 if (imux_is_smixer) 3443 3434 parm = AC_PWRST_D0; 3444 3435 /* SW0 (17h), AIW0(13h) */ 3445 - snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_POWER_STATE, parm); 3446 - snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm); 3436 + update_power_state(codec, 0x17, parm); 3437 + update_power_state(codec, 0x13, parm); 3447 3438 3448 3439 parm = AC_PWRST_D3; 3449 3440 set_pin_power_state(codec, 0x1e, &parm); ··· 3451 3442 if (spec->dmic_enabled) 3452 3443 set_pin_power_state(codec, 0x22, &parm); 3453 3444 else 3454 - snd_hda_codec_write(codec, 0x22, 0, 3455 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3445 + update_power_state(codec, 0x22, AC_PWRST_D3); 3456 3446 3457 3447 /* SW2(26h), AIW1(14h) */ 3458 - snd_hda_codec_write(codec, 0x26, 0, AC_VERB_SET_POWER_STATE, parm); 3459 - snd_hda_codec_write(codec, 0x14, 0, AC_VERB_SET_POWER_STATE, parm); 3448 + update_power_state(codec, 0x26, parm); 3449 + update_power_state(codec, 0x14, parm); 3460 3450 3461 3451 /* outputs */ 3462 3452 /* PW0 (19h), SW1 (18h), AOW1 (11h) */ ··· 3464 3456 /* Smart 5.1 PW2(1bh) */ 3465 3457 if (spec->smart51_enabled) 3466 3458 set_pin_power_state(codec, 0x1b, &parm); 3467 - snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm); 3468 - snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 3459 + update_power_state(codec, 0x18, parm); 3460 + update_power_state(codec, 0x11, parm); 3469 3461 3470 3462 /* PW7 (23h), SW3 (27h), AOW3 (25h) */ 3471 3463 parm = AC_PWRST_D3; ··· 3473 3465 /* Smart 5.1 PW1(1ah) */ 3474 3466 if (spec->smart51_enabled) 3475 3467 set_pin_power_state(codec, 0x1a, &parm); 3476 - snd_hda_codec_write(codec, 0x27, 0, AC_VERB_SET_POWER_STATE, parm); 3468 + update_power_state(codec, 0x27, parm); 3477 3469 3478 3470 /* Smart 5.1 PW5(1eh) */ 3479 3471 if (spec->smart51_enabled) 3480 3472 set_pin_power_state(codec, 0x1e, &parm); 3481 - snd_hda_codec_write(codec, 0x25, 0, AC_VERB_SET_POWER_STATE, parm); 3473 + update_power_state(codec, 0x25, parm); 3482 3474 3483 3475 /* Mono out */ 3484 3476 /* SW4(28h)->MW1(29h)-> PW12 (2ah)*/ ··· 3494 3486 mono_out = 1; 3495 3487 } 3496 3488 parm = mono_out ? AC_PWRST_D0 : AC_PWRST_D3; 3497 - snd_hda_codec_write(codec, 0x28, 0, AC_VERB_SET_POWER_STATE, parm); 3498 - snd_hda_codec_write(codec, 0x29, 0, AC_VERB_SET_POWER_STATE, parm); 3499 - snd_hda_codec_write(codec, 0x2a, 0, AC_VERB_SET_POWER_STATE, parm); 3489 + update_power_state(codec, 0x28, parm); 3490 + update_power_state(codec, 0x29, parm); 3491 + update_power_state(codec, 0x2a, parm); 3500 3492 3501 3493 /* PW 3/4 (1ch/1dh) */ 3502 3494 parm = AC_PWRST_D3; ··· 3504 3496 set_pin_power_state(codec, 0x1d, &parm); 3505 3497 /* HP Independent Mode, power on AOW3 */ 3506 3498 if (spec->hp_independent_mode) 3507 - snd_hda_codec_write(codec, 0x25, 0, 3508 - AC_VERB_SET_POWER_STATE, parm); 3499 + update_power_state(codec, 0x25, parm); 3509 3500 3510 3501 /* force to D0 for internal Speaker */ 3511 3502 /* MW0 (16h), AOW0 (10h) */ 3512 - snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_POWER_STATE, 3513 - imux_is_smixer ? AC_PWRST_D0 : parm); 3514 - snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, 3515 - mono_out ? AC_PWRST_D0 : parm); 3503 + update_power_state(codec, 0x16, imux_is_smixer ? AC_PWRST_D0 : parm); 3504 + update_power_state(codec, 0x10, mono_out ? AC_PWRST_D0 : parm); 3516 3505 } 3517 3506 3518 3507 static int patch_vt1716S(struct hda_codec *codec) ··· 3585 3580 set_pin_power_state(codec, 0x2b, &parm); 3586 3581 parm = AC_PWRST_D0; 3587 3582 /* MUX9/10 (1eh/1fh), AIW 0/1 (10h/11h) */ 3588 - snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm); 3589 - snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm); 3590 - snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 3591 - snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 3583 + update_power_state(codec, 0x1e, parm); 3584 + update_power_state(codec, 0x1f, parm); 3585 + update_power_state(codec, 0x10, parm); 3586 + update_power_state(codec, 0x11, parm); 3592 3587 3593 3588 /* outputs */ 3594 3589 /* AOW0 (8h)*/ 3595 - snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm); 3590 + update_power_state(codec, 0x8, parm); 3596 3591 3597 3592 if (spec->codec_type == VT1802) { 3598 3593 /* PW4 (28h), MW4 (18h), MUX4(38h) */ 3599 3594 parm = AC_PWRST_D3; 3600 3595 set_pin_power_state(codec, 0x28, &parm); 3601 - snd_hda_codec_write(codec, 0x18, 0, 3602 - AC_VERB_SET_POWER_STATE, parm); 3603 - snd_hda_codec_write(codec, 0x38, 0, 3604 - AC_VERB_SET_POWER_STATE, parm); 3596 + update_power_state(codec, 0x18, parm); 3597 + update_power_state(codec, 0x38, parm); 3605 3598 } else { 3606 3599 /* PW4 (26h), MW4 (1ch), MUX4(37h) */ 3607 3600 parm = AC_PWRST_D3; 3608 3601 set_pin_power_state(codec, 0x26, &parm); 3609 - snd_hda_codec_write(codec, 0x1c, 0, 3610 - AC_VERB_SET_POWER_STATE, parm); 3611 - snd_hda_codec_write(codec, 0x37, 0, 3612 - AC_VERB_SET_POWER_STATE, parm); 3602 + update_power_state(codec, 0x1c, parm); 3603 + update_power_state(codec, 0x37, parm); 3613 3604 } 3614 3605 3615 3606 if (spec->codec_type == VT1802) { 3616 3607 /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */ 3617 3608 parm = AC_PWRST_D3; 3618 3609 set_pin_power_state(codec, 0x25, &parm); 3619 - snd_hda_codec_write(codec, 0x15, 0, 3620 - AC_VERB_SET_POWER_STATE, parm); 3621 - snd_hda_codec_write(codec, 0x35, 0, 3622 - AC_VERB_SET_POWER_STATE, parm); 3610 + update_power_state(codec, 0x15, parm); 3611 + update_power_state(codec, 0x35, parm); 3623 3612 } else { 3624 3613 /* PW1 (25h), MW1 (19h), MUX1(35h), AOW1 (9h) */ 3625 3614 parm = AC_PWRST_D3; 3626 3615 set_pin_power_state(codec, 0x25, &parm); 3627 - snd_hda_codec_write(codec, 0x19, 0, 3628 - AC_VERB_SET_POWER_STATE, parm); 3629 - snd_hda_codec_write(codec, 0x35, 0, 3630 - AC_VERB_SET_POWER_STATE, parm); 3616 + update_power_state(codec, 0x19, parm); 3617 + update_power_state(codec, 0x35, parm); 3631 3618 } 3632 3619 3633 3620 if (spec->hp_independent_mode) 3634 - snd_hda_codec_write(codec, 0x9, 0, 3635 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3621 + update_power_state(codec, 0x9, AC_PWRST_D0); 3636 3622 3637 3623 /* Class-D */ 3638 3624 /* PW0 (24h), MW0(18h/14h), MUX0(34h) */ ··· 3633 3637 set_pin_power_state(codec, 0x24, &parm); 3634 3638 parm = present ? AC_PWRST_D3 : AC_PWRST_D0; 3635 3639 if (spec->codec_type == VT1802) 3636 - snd_hda_codec_write(codec, 0x14, 0, 3637 - AC_VERB_SET_POWER_STATE, parm); 3640 + update_power_state(codec, 0x14, parm); 3638 3641 else 3639 - snd_hda_codec_write(codec, 0x18, 0, 3640 - AC_VERB_SET_POWER_STATE, parm); 3641 - snd_hda_codec_write(codec, 0x34, 0, AC_VERB_SET_POWER_STATE, parm); 3642 + update_power_state(codec, 0x18, parm); 3643 + update_power_state(codec, 0x34, parm); 3642 3644 3643 3645 /* Mono Out */ 3644 3646 present = snd_hda_jack_detect(codec, 0x26); ··· 3644 3650 parm = present ? AC_PWRST_D3 : AC_PWRST_D0; 3645 3651 if (spec->codec_type == VT1802) { 3646 3652 /* PW15 (33h), MW8(1ch), MUX8(3ch) */ 3647 - snd_hda_codec_write(codec, 0x33, 0, 3648 - AC_VERB_SET_POWER_STATE, parm); 3649 - snd_hda_codec_write(codec, 0x1c, 0, 3650 - AC_VERB_SET_POWER_STATE, parm); 3651 - snd_hda_codec_write(codec, 0x3c, 0, 3652 - AC_VERB_SET_POWER_STATE, parm); 3653 + update_power_state(codec, 0x33, parm); 3654 + update_power_state(codec, 0x1c, parm); 3655 + update_power_state(codec, 0x3c, parm); 3653 3656 } else { 3654 3657 /* PW15 (31h), MW8(17h), MUX8(3bh) */ 3655 - snd_hda_codec_write(codec, 0x31, 0, 3656 - AC_VERB_SET_POWER_STATE, parm); 3657 - snd_hda_codec_write(codec, 0x17, 0, 3658 - AC_VERB_SET_POWER_STATE, parm); 3659 - snd_hda_codec_write(codec, 0x3b, 0, 3660 - AC_VERB_SET_POWER_STATE, parm); 3658 + update_power_state(codec, 0x31, parm); 3659 + update_power_state(codec, 0x17, parm); 3660 + update_power_state(codec, 0x3b, parm); 3661 3661 } 3662 3662 /* MW9 (21h) */ 3663 3663 if (imux_is_smixer || !is_aa_path_mute(codec)) 3664 - snd_hda_codec_write(codec, 0x21, 0, 3665 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3664 + update_power_state(codec, 0x21, AC_PWRST_D0); 3666 3665 else 3667 - snd_hda_codec_write(codec, 0x21, 0, 3668 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3666 + update_power_state(codec, 0x21, AC_PWRST_D3); 3669 3667 } 3670 3668 3671 3669 /* patch for vt2002P */ ··· 3717 3731 set_pin_power_state(codec, 0x2b, &parm); 3718 3732 parm = AC_PWRST_D0; 3719 3733 /* MUX10/11 (1eh/1fh), AIW 0/1 (10h/11h) */ 3720 - snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm); 3721 - snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm); 3722 - snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm); 3723 - snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm); 3734 + update_power_state(codec, 0x1e, parm); 3735 + update_power_state(codec, 0x1f, parm); 3736 + update_power_state(codec, 0x10, parm); 3737 + update_power_state(codec, 0x11, parm); 3724 3738 3725 3739 /* outputs */ 3726 3740 /* AOW0 (8h)*/ 3727 - snd_hda_codec_write(codec, 0x8, 0, 3728 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3741 + update_power_state(codec, 0x8, AC_PWRST_D0); 3729 3742 3730 3743 /* PW4 (28h), MW4 (18h), MUX4(38h) */ 3731 3744 parm = AC_PWRST_D3; 3732 3745 set_pin_power_state(codec, 0x28, &parm); 3733 - snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm); 3734 - snd_hda_codec_write(codec, 0x38, 0, AC_VERB_SET_POWER_STATE, parm); 3746 + update_power_state(codec, 0x18, parm); 3747 + update_power_state(codec, 0x38, parm); 3735 3748 3736 3749 /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */ 3737 3750 parm = AC_PWRST_D3; 3738 3751 set_pin_power_state(codec, 0x25, &parm); 3739 - snd_hda_codec_write(codec, 0x15, 0, AC_VERB_SET_POWER_STATE, parm); 3740 - snd_hda_codec_write(codec, 0x35, 0, AC_VERB_SET_POWER_STATE, parm); 3752 + update_power_state(codec, 0x15, parm); 3753 + update_power_state(codec, 0x35, parm); 3741 3754 if (spec->hp_independent_mode) 3742 - snd_hda_codec_write(codec, 0x9, 0, 3743 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3755 + update_power_state(codec, 0x9, AC_PWRST_D0); 3744 3756 3745 3757 /* Internal Speaker */ 3746 3758 /* PW0 (24h), MW0(14h), MUX0(34h) */ ··· 3747 3763 parm = AC_PWRST_D3; 3748 3764 set_pin_power_state(codec, 0x24, &parm); 3749 3765 if (present) { 3750 - snd_hda_codec_write(codec, 0x14, 0, 3751 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3752 - snd_hda_codec_write(codec, 0x34, 0, 3753 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3766 + update_power_state(codec, 0x14, AC_PWRST_D3); 3767 + update_power_state(codec, 0x34, AC_PWRST_D3); 3754 3768 } else { 3755 - snd_hda_codec_write(codec, 0x14, 0, 3756 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3757 - snd_hda_codec_write(codec, 0x34, 0, 3758 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3769 + update_power_state(codec, 0x14, AC_PWRST_D0); 3770 + update_power_state(codec, 0x34, AC_PWRST_D0); 3759 3771 } 3760 3772 3761 3773 ··· 3762 3782 parm = AC_PWRST_D3; 3763 3783 set_pin_power_state(codec, 0x31, &parm); 3764 3784 if (present) { 3765 - snd_hda_codec_write(codec, 0x1c, 0, 3766 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3767 - snd_hda_codec_write(codec, 0x3c, 0, 3768 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3769 - snd_hda_codec_write(codec, 0x3e, 0, 3770 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 3785 + update_power_state(codec, 0x1c, AC_PWRST_D3); 3786 + update_power_state(codec, 0x3c, AC_PWRST_D3); 3787 + update_power_state(codec, 0x3e, AC_PWRST_D3); 3771 3788 } else { 3772 - snd_hda_codec_write(codec, 0x1c, 0, 3773 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3774 - snd_hda_codec_write(codec, 0x3c, 0, 3775 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3776 - snd_hda_codec_write(codec, 0x3e, 0, 3777 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 3789 + update_power_state(codec, 0x1c, AC_PWRST_D0); 3790 + update_power_state(codec, 0x3c, AC_PWRST_D0); 3791 + update_power_state(codec, 0x3e, AC_PWRST_D0); 3778 3792 } 3779 3793 3780 3794 /* PW15 (33h), MW15 (1dh), MUX15(3dh) */ 3781 3795 parm = AC_PWRST_D3; 3782 3796 set_pin_power_state(codec, 0x33, &parm); 3783 - snd_hda_codec_write(codec, 0x1d, 0, AC_VERB_SET_POWER_STATE, parm); 3784 - snd_hda_codec_write(codec, 0x3d, 0, AC_VERB_SET_POWER_STATE, parm); 3797 + update_power_state(codec, 0x1d, parm); 3798 + update_power_state(codec, 0x3d, parm); 3785 3799 3786 3800 } 3787 3801
+11 -1
sound/soc/codecs/wm5100.c
··· 1405 1405 1406 1406 case SND_SOC_BIAS_OFF: 1407 1407 regcache_cache_only(wm5100->regmap, true); 1408 + regcache_mark_dirty(wm5100->regmap); 1408 1409 if (wm5100->pdata.ldo_ena) 1409 1410 gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0); 1410 1411 regulator_bulk_disable(ARRAY_SIZE(wm5100->core_supplies), ··· 2184 2183 if (wm5100->jack_detecting) { 2185 2184 dev_dbg(codec->dev, "Microphone detected\n"); 2186 2185 wm5100->jack_mic = true; 2186 + wm5100->jack_detecting = false; 2187 2187 snd_soc_jack_report(wm5100->jack, 2188 2188 SND_JACK_HEADSET, 2189 2189 SND_JACK_HEADSET | SND_JACK_BTN_0); ··· 2223 2221 SND_JACK_BTN_0); 2224 2222 } else if (wm5100->jack_detecting) { 2225 2223 dev_dbg(codec->dev, "Headphone detected\n"); 2224 + wm5100->jack_detecting = false; 2226 2225 snd_soc_jack_report(wm5100->jack, SND_JACK_HEADPHONE, 2227 2226 SND_JACK_HEADPHONE); 2228 2227 ··· 2613 2610 .cache_type = REGCACHE_RBTREE, 2614 2611 }; 2615 2612 2613 + static const unsigned int wm5100_mic_ctrl_reg[] = { 2614 + WM5100_IN1L_CONTROL, 2615 + WM5100_IN2L_CONTROL, 2616 + WM5100_IN3L_CONTROL, 2617 + WM5100_IN4L_CONTROL, 2618 + }; 2619 + 2616 2620 static __devinit int wm5100_i2c_probe(struct i2c_client *i2c, 2617 2621 const struct i2c_device_id *id) 2618 2622 { ··· 2752 2742 } 2753 2743 2754 2744 for (i = 0; i < ARRAY_SIZE(wm5100->pdata.in_mode); i++) { 2755 - regmap_update_bits(wm5100->regmap, WM5100_IN1L_CONTROL, 2745 + regmap_update_bits(wm5100->regmap, wm5100_mic_ctrl_reg[i], 2756 2746 WM5100_IN1_MODE_MASK | 2757 2747 WM5100_IN1_DMIC_SUP_MASK, 2758 2748 (wm5100->pdata.in_mode[i] <<
+1 -1
sound/soc/codecs/wm8962.c
··· 96 96 struct wm8962_priv *wm8962 = container_of(nb, struct wm8962_priv, \ 97 97 disable_nb[n]); \ 98 98 if (event & REGULATOR_EVENT_DISABLE) { \ 99 - regcache_cache_only(wm8962->regmap, true); \ 99 + regcache_mark_dirty(wm8962->regmap); \ 100 100 } \ 101 101 return 0; \ 102 102 }
+1 -1
sound/soc/codecs/wm8996.c
··· 108 108 struct wm8996_priv *wm8996 = container_of(nb, struct wm8996_priv, \ 109 109 disable_nb[n]); \ 110 110 if (event & REGULATOR_EVENT_DISABLE) { \ 111 - regcache_cache_only(wm8996->regmap, true); \ 111 + regcache_mark_dirty(wm8996->regmap); \ 112 112 } \ 113 113 return 0; \ 114 114 }
+8 -2
sound/soc/codecs/wm_hubs.c
··· 592 592 }; 593 593 594 594 static const struct snd_kcontrol_new line2n_mix[] = { 595 - SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 6, 1, 0), 596 - SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 5, 1, 0), 595 + SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 5, 1, 0), 596 + SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 6, 1, 0), 597 597 }; 598 598 599 599 static const struct snd_kcontrol_new line2p_mix[] = { ··· 612 612 613 613 SND_SOC_DAPM_SUPPLY("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0, NULL, 0), 614 614 SND_SOC_DAPM_SUPPLY("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0, NULL, 0), 615 + 616 + SND_SOC_DAPM_SUPPLY("LINEOUT_VMID_BUF", WM8993_ANTIPOP1, 7, 0, NULL, 0), 615 617 616 618 SND_SOC_DAPM_MIXER("IN1L PGA", WM8993_POWER_MANAGEMENT_2, 6, 0, 617 619 in1l_pga, ARRAY_SIZE(in1l_pga)), ··· 836 834 }; 837 835 838 836 static const struct snd_soc_dapm_route lineout1_se_routes[] = { 837 + { "LINEOUT1N Mixer", NULL, "LINEOUT_VMID_BUF" }, 839 838 { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" }, 840 839 { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" }, 841 840 841 + { "LINEOUT1P Mixer", NULL, "LINEOUT_VMID_BUF" }, 842 842 { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" }, 843 843 844 844 { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" }, ··· 857 853 }; 858 854 859 855 static const struct snd_soc_dapm_route lineout2_se_routes[] = { 856 + { "LINEOUT2N Mixer", NULL, "LINEOUT_VMID_BUF" }, 860 857 { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" }, 861 858 { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" }, 862 859 860 + { "LINEOUT2P Mixer", NULL, "LINEOUT_VMID_BUF" }, 863 861 { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" }, 864 862 865 863 { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
+1 -64
sound/soc/samsung/neo1973_wm8753.c
··· 230 230 231 231 /* GTA02 specific routes and controls */ 232 232 233 - #ifdef CONFIG_MACH_NEO1973_GTA02 234 - 235 233 static int gta02_speaker_enabled; 236 234 237 235 static int lm4853_set_spk(struct snd_kcontrol *kcontrol, ··· 309 311 return 0; 310 312 } 311 313 312 - #else 313 - static int neo1973_gta02_wm8753_init(struct snd_soc_code *codec) { return 0; } 314 - #endif 315 - 316 314 static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd) 317 315 { 318 316 struct snd_soc_codec *codec = rtd->codec; ··· 316 322 int ret; 317 323 318 324 /* set up NC codec pins */ 319 - if (machine_is_neo1973_gta01()) { 320 - snd_soc_dapm_nc_pin(dapm, "LOUT2"); 321 - snd_soc_dapm_nc_pin(dapm, "ROUT2"); 322 - } 323 325 snd_soc_dapm_nc_pin(dapm, "OUT3"); 324 326 snd_soc_dapm_nc_pin(dapm, "OUT4"); 325 327 snd_soc_dapm_nc_pin(dapm, "LINE1"); ··· 360 370 return 0; 361 371 } 362 372 363 - /* GTA01 specific controls */ 364 - 365 - #ifdef CONFIG_MACH_NEO1973_GTA01 366 - 367 - static const struct snd_soc_dapm_route neo1973_lm4857_routes[] = { 368 - {"Amp IN", NULL, "ROUT1"}, 369 - {"Amp IN", NULL, "LOUT1"}, 370 - 371 - {"Handset Spk", NULL, "Amp EP"}, 372 - {"Stereo Out", NULL, "Amp LS"}, 373 - {"Headphone", NULL, "Amp HP"}, 374 - }; 375 - 376 - static const struct snd_soc_dapm_widget neo1973_lm4857_dapm_widgets[] = { 377 - SND_SOC_DAPM_SPK("Handset Spk", NULL), 378 - SND_SOC_DAPM_SPK("Stereo Out", NULL), 379 - SND_SOC_DAPM_HP("Headphone", NULL), 380 - }; 381 - 382 - static int neo1973_lm4857_init(struct snd_soc_dapm_context *dapm) 383 - { 384 - int ret; 385 - 386 - ret = snd_soc_dapm_new_controls(dapm, neo1973_lm4857_dapm_widgets, 387 - ARRAY_SIZE(neo1973_lm4857_dapm_widgets)); 388 - if (ret) 389 - return ret; 390 - 391 - ret = snd_soc_dapm_add_routes(dapm, neo1973_lm4857_routes, 392 - ARRAY_SIZE(neo1973_lm4857_routes)); 393 - if (ret) 394 - return ret; 395 - 396 - snd_soc_dapm_ignore_suspend(dapm, "Stereo Out"); 397 - snd_soc_dapm_ignore_suspend(dapm, "Handset Spk"); 398 - snd_soc_dapm_ignore_suspend(dapm, "Headphone"); 399 - 400 - return 0; 401 - } 402 - 403 - #else 404 - static int neo1973_lm4857_init(struct snd_soc_dapm_context *dapm) { return 0; }; 405 - #endif 406 - 407 373 static struct snd_soc_dai_link neo1973_dai[] = { 408 374 { /* Hifi Playback - for similatious use with voice below */ 409 375 .name = "WM8753", ··· 386 440 .name = "dfbmcs320", 387 441 .codec_name = "dfbmcs320.0", 388 442 }, 389 - { 390 - .name = "lm4857", 391 - .codec_name = "lm4857.0-007c", 392 - .init = neo1973_lm4857_init, 393 - }, 394 443 }; 395 444 396 445 static struct snd_soc_codec_conf neo1973_codec_conf[] = { ··· 395 454 }, 396 455 }; 397 456 398 - #ifdef CONFIG_MACH_NEO1973_GTA02 399 457 static const struct gpio neo1973_gta02_gpios[] = { 400 458 { GTA02_GPIO_HP_IN, GPIOF_OUT_INIT_HIGH, "GTA02_HP_IN" }, 401 459 { GTA02_GPIO_AMP_SHUT, GPIOF_OUT_INIT_HIGH, "GTA02_AMP_SHUT" }, 402 460 }; 403 - #else 404 - static const struct gpio neo1973_gta02_gpios[] = {}; 405 - #endif 406 461 407 462 static struct snd_soc_card neo1973 = { 408 463 .name = "neo1973", ··· 417 480 { 418 481 int ret; 419 482 420 - if (!machine_is_neo1973_gta01() && !machine_is_neo1973_gta02()) 483 + if (!machine_is_neo1973_gta02()) 421 484 return -ENODEV; 422 485 423 486 if (machine_is_neo1973_gta02()) {
+2 -5
tools/perf/Makefile
··· 104 104 105 105 CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 106 106 EXTLIBS = -lpthread -lrt -lelf -lm 107 - ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 107 + ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE 108 108 ALL_LDFLAGS = $(LDFLAGS) 109 109 STRIP ?= strip 110 110 ··· 168 168 169 169 ### --- END CONFIGURATION SECTION --- 170 170 171 - # Those must not be GNU-specific; they are shared with perl/ which may 172 - # be built by a different compiler. (Note that this is an artifact now 173 - # but it still might be nice to keep that distinction.) 174 - BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include 171 + BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE 175 172 BASIC_LDFLAGS = 176 173 177 174 # Guard against environment variables
-2
tools/perf/builtin-probe.c
··· 20 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 21 * 22 22 */ 23 - #define _GNU_SOURCE 24 23 #include <sys/utsname.h> 25 24 #include <sys/types.h> 26 25 #include <sys/stat.h> ··· 30 31 #include <stdlib.h> 31 32 #include <string.h> 32 33 33 - #undef _GNU_SOURCE 34 34 #include "perf.h" 35 35 #include "builtin.h" 36 36 #include "util/util.h"
+10 -3
tools/perf/builtin-top.c
··· 89 89 90 90 static void perf_top__update_print_entries(struct perf_top *top) 91 91 { 92 - top->print_entries = top->winsize.ws_row; 93 - 94 92 if (top->print_entries > 9) 95 93 top->print_entries -= 9; 96 94 } ··· 98 100 struct perf_top *top = arg; 99 101 100 102 get_term_dimensions(&top->winsize); 103 + if (!top->print_entries 104 + || (top->print_entries+4) > top->winsize.ws_row) { 105 + top->print_entries = top->winsize.ws_row; 106 + } else { 107 + top->print_entries += 4; 108 + top->winsize.ws_row = top->print_entries; 109 + } 101 110 perf_top__update_print_entries(top); 102 111 } 103 112 ··· 458 453 }; 459 454 perf_top__sig_winch(SIGWINCH, NULL, top); 460 455 sigaction(SIGWINCH, &act, NULL); 461 - } else 456 + } else { 457 + perf_top__sig_winch(SIGWINCH, NULL, top); 462 458 signal(SIGWINCH, SIG_DFL); 459 + } 463 460 break; 464 461 case 'E': 465 462 if (top->evlist->nr_entries > 1) {
+1 -1
tools/perf/util/header.c
··· 2105 2105 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); 2106 2106 2107 2107 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; 2108 - size = strlen(name); 2108 + size = strlen(ev.event_type.event_type.name); 2109 2109 size = ALIGN(size, sizeof(u64)); 2110 2110 ev.event_type.header.size = sizeof(ev.event_type) - 2111 2111 (sizeof(ev.event_type.event_type.name) - size);
-2
tools/perf/util/probe-event.c
··· 19 19 * 20 20 */ 21 21 22 - #define _GNU_SOURCE 23 22 #include <sys/utsname.h> 24 23 #include <sys/types.h> 25 24 #include <sys/stat.h> ··· 32 33 #include <limits.h> 33 34 #include <elf.h> 34 35 35 - #undef _GNU_SOURCE 36 36 #include "util.h" 37 37 #include "event.h" 38 38 #include "string.h"
-1
tools/perf/util/symbol.c
··· 1 - #define _GNU_SOURCE 2 1 #include <ctype.h> 3 2 #include <dirent.h> 4 3 #include <errno.h>
+1 -2
tools/perf/util/trace-event-parse.c
··· 21 21 * The parts for function graph printing was taken and modified from the 22 22 * Linux Kernel that were written by Frederic Weisbecker. 23 23 */ 24 - #define _GNU_SOURCE 24 + 25 25 #include <stdio.h> 26 26 #include <stdlib.h> 27 27 #include <string.h> 28 28 #include <ctype.h> 29 29 #include <errno.h> 30 30 31 - #undef _GNU_SOURCE 32 31 #include "../perf.h" 33 32 #include "util.h" 34 33 #include "trace-event.h"
-2
tools/perf/util/ui/browsers/hists.c
··· 1 - #define _GNU_SOURCE 2 1 #include <stdio.h> 3 - #undef _GNU_SOURCE 4 2 #include "../libslang.h" 5 3 #include <stdlib.h> 6 4 #include <string.h>
-1
tools/perf/util/ui/helpline.c
··· 1 - #define _GNU_SOURCE 2 1 #include <stdio.h> 3 2 #include <stdlib.h> 4 3 #include <string.h>
-1
tools/perf/util/util.h
··· 40 40 #define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) 41 41 42 42 #define _ALL_SOURCE 1 43 - #define _GNU_SOURCE 1 44 43 #define _BSD_SOURCE 1 45 44 #define HAS_BOOL 46 45