Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of ../netdev/

+1881 -1131
+1 -2
Documentation/power/runtime_pm.txt
··· 431 431 432 432 void pm_runtime_irq_safe(struct device *dev); 433 433 - set the power.irq_safe flag for the device, causing the runtime-PM 434 - suspend and resume callbacks (but not the idle callback) to be invoked 435 - with interrupts disabled 434 + callbacks to be invoked with interrupts off 436 435 437 436 void pm_runtime_mark_last_busy(struct device *dev); 438 437 - set the power.last_busy field to the current time
+8 -9
MAINTAINERS
··· 1278 1278 ATLX ETHERNET DRIVERS 1279 1279 M: Jay Cliburn <jcliburn@gmail.com> 1280 1280 M: Chris Snook <chris.snook@gmail.com> 1281 - M: Jie Yang <jie.yang@atheros.com> 1282 1281 L: netdev@vger.kernel.org 1283 1282 W: http://sourceforge.net/projects/atl1 1284 1283 W: http://atl1.sourceforge.net ··· 1573 1574 1574 1575 BROCADE BNA 10 GIGABIT ETHERNET DRIVER 1575 1576 M: Rasesh Mody <rmody@brocade.com> 1576 - M: Debashis Dutt <ddutt@brocade.com> 1577 1577 L: netdev@vger.kernel.org 1578 1578 S: Supported 1579 1579 F: drivers/net/bna/ ··· 1756 1758 1757 1759 CISCO VIC ETHERNET NIC DRIVER 1758 1760 M: Christian Benvenuti <benve@cisco.com> 1759 - M: Vasanthy Kolluri <vkolluri@cisco.com> 1760 1761 M: Roopa Prabhu <roprabhu@cisco.com> 1761 1762 M: David Wang <dwang2@cisco.com> 1762 1763 S: Supported ··· 1880 1883 F: drivers/connector/ 1881 1884 1882 1885 CONTROL GROUPS (CGROUPS) 1883 - M: Paul Menage <menage@google.com> 1886 + M: Paul Menage <paul@paulmenage.org> 1884 1887 M: Li Zefan <lizf@cn.fujitsu.com> 1885 1888 L: containers@lists.linux-foundation.org 1886 1889 S: Maintained ··· 1929 1932 F: tools/power/cpupower 1930 1933 1931 1934 CPUSETS 1932 - M: Paul Menage <menage@google.com> 1935 + M: Paul Menage <paul@paulmenage.org> 1933 1936 W: http://www.bullopensource.org/cpuset/ 1934 1937 W: http://oss.sgi.com/projects/cpusets/ 1935 1938 S: Supported ··· 2646 2649 F: drivers/net/wan/sdla.c 2647 2650 2648 2651 FRAMEBUFFER LAYER 2649 - M: Paul Mundt <lethal@linux-sh.org> 2652 + M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de> 2650 2653 L: linux-fbdev@vger.kernel.org 2651 2654 W: http://linux-fbdev.sourceforge.net/ 2652 2655 Q: http://patchwork.kernel.org/project/linux-fbdev/list/ 2653 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git 2656 + T: git git://github.com/schandinat/linux-2.6.git fbdev-next 2654 2657 S: Maintained 2655 2658 F: Documentation/fb/ 2656 2659 F: Documentation/devicetree/bindings/fb/ ··· 4401 4404 L: coreteam@netfilter.org 4402 4405 W: http://www.netfilter.org/ 4403 4406 W: http://www.iptables.org/ 4404 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git 4407 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git 4408 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git 4405 4409 S: Supported 4406 4410 F: include/linux/netfilter* 4407 4411 F: include/linux/netfilter/ ··· 5530 5532 5531 5533 SAMSUNG AUDIO (ASoC) DRIVERS 5532 5534 M: Jassi Brar <jassisinghbrar@gmail.com> 5535 + M: Sangbeom Kim <sbkim73@samsung.com> 5533 5536 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5534 5537 S: Supported 5535 5538 F: sound/soc/samsung ··· 7086 7087 F: drivers/mmc/host/vub300.c 7087 7088 7088 7089 W1 DALLAS'S 1-WIRE BUS 7089 - M: Evgeniy Polyakov <johnpol@2ka.mipt.ru> 7090 + M: Evgeniy Polyakov <zbr@ioremap.net> 7090 7091 S: Maintained 7091 7092 F: Documentation/w1/ 7092 7093 F: drivers/w1/
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 1 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = "Divemaster Edition" 6 6 7 7 # *DOCUMENTATION*
-9
arch/alpha/include/asm/sysinfo.h
··· 27 27 #define UAC_NOFIX 2 28 28 #define UAC_SIGBUS 4 29 29 30 - 31 - #ifdef __KERNEL__ 32 - 33 - /* This is the shift that is applied to the UAC bits as stored in the 34 - per-thread flags. See thread_info.h. */ 35 - #define UAC_SHIFT 6 36 - 37 - #endif 38 - 39 30 #endif /* __ASM_ALPHA_SYSINFO_H */
+4 -4
arch/alpha/include/asm/thread_info.h
··· 74 74 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 75 75 #define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */ 76 76 #define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ 77 - #define TIF_UAC_NOPRINT 10 /* see sysinfo.h */ 78 - #define TIF_UAC_NOFIX 11 79 - #define TIF_UAC_SIGBUS 12 77 + #define TIF_UAC_NOPRINT 10 /* ! Preserve sequence of following */ 78 + #define TIF_UAC_NOFIX 11 /* ! flags as they match */ 79 + #define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */ 80 80 #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ 81 81 #define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ 82 82 #define TIF_FREEZE 16 /* is freezing for suspend */ ··· 97 97 #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ 98 98 | _TIF_SYSCALL_TRACE) 99 99 100 - #define ALPHA_UAC_SHIFT 10 100 + #define ALPHA_UAC_SHIFT TIF_UAC_NOPRINT 101 101 #define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ 102 102 1 << TIF_UAC_SIGBUS) 103 103
+7 -5
arch/alpha/kernel/osf_sys.c
··· 42 42 #include <asm/uaccess.h> 43 43 #include <asm/system.h> 44 44 #include <asm/sysinfo.h> 45 + #include <asm/thread_info.h> 45 46 #include <asm/hwrpb.h> 46 47 #include <asm/processor.h> 47 48 ··· 634 633 case GSI_UACPROC: 635 634 if (nbytes < sizeof(unsigned int)) 636 635 return -EINVAL; 637 - w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; 638 - if (put_user(w, (unsigned int __user *)buffer)) 639 - return -EFAULT; 636 + w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) & 637 + UAC_BITMASK; 638 + if (put_user(w, (unsigned int __user *)buffer)) 639 + return -EFAULT; 640 640 return 1; 641 641 642 642 case GSI_PROC_TYPE: ··· 758 756 case SSIN_UACPROC: 759 757 again: 760 758 old = current_thread_info()->flags; 761 - new = old & ~(UAC_BITMASK << UAC_SHIFT); 762 - new = new | (w & UAC_BITMASK) << UAC_SHIFT; 759 + new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT); 760 + new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT; 763 761 if (cmpxchg(&current_thread_info()->flags, 764 762 old, new) != old) 765 763 goto again;
+1 -1
arch/alpha/kernel/systbls.S
··· 360 360 .quad sys_newuname 361 361 .quad sys_nanosleep /* 340 */ 362 362 .quad sys_mremap 363 - .quad sys_nfsservctl 363 + .quad sys_ni_syscall /* old nfsservctl */ 364 364 .quad sys_setresuid 365 365 .quad sys_getresuid 366 366 .quad sys_pciconfig_read /* 345 */
+1 -1
arch/arm/boot/compressed/mmcif-sh7372.c
··· 82 82 83 83 84 84 /* Disable clock to MMC hardware block */ 85 - __raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3); 85 + __raw_writel(__raw_readl(SMSTPCR3) | (1 << 12), SMSTPCR3); 86 86 87 87 mmc_update_progress(MMC_PROGRESS_DONE); 88 88 }
+1 -1
arch/arm/boot/compressed/sdhi-sh7372.c
··· 85 85 goto err; 86 86 87 87 /* Disable clock to SDHI1 hardware block */ 88 - __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3); 88 + __raw_writel(__raw_readl(SMSTPCR3) | (1 << 13), SMSTPCR3); 89 89 90 90 mmc_update_progress(MMC_PROGRESS_DONE); 91 91
+1 -1
arch/arm/kernel/calls.S
··· 178 178 CALL(sys_ni_syscall) /* vm86 */ 179 179 CALL(sys_ni_syscall) /* was sys_query_module */ 180 180 CALL(sys_poll) 181 - CALL(sys_nfsservctl) 181 + CALL(sys_ni_syscall) /* was nfsservctl */ 182 182 /* 170 */ CALL(sys_setresgid16) 183 183 CALL(sys_getresgid16) 184 184 CALL(sys_prctl)
+2 -1
arch/arm/mach-shmobile/board-ag5evm.c
··· 341 341 static struct sh_mobile_sdhi_info sdhi0_info = { 342 342 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 343 343 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 344 + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, 344 345 .tmio_caps = MMC_CAP_SD_HIGHSPEED, 345 346 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, 346 347 }; ··· 383 382 } 384 383 385 384 static struct sh_mobile_sdhi_info sh_sdhi1_info = { 386 - .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, 385 + .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, 387 386 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, 388 387 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 389 388 .set_pwr = ag5evm_sdhi1_set_pwr,
+1
arch/arm/mach-shmobile/board-ap4evb.c
··· 1412 1412 fsi_init_pm_clock(); 1413 1413 sh7372_pm_init(); 1414 1414 pm_clk_add(&fsi_device.dev, "spu2"); 1415 + pm_clk_add(&lcdc1_device.dev, "hdmi"); 1415 1416 } 1416 1417 1417 1418 static void __init ap4evb_timer_init(void)
+5
arch/arm/mach-shmobile/board-mackerel.c
··· 641 641 }, 642 642 .driver_param = { 643 643 .buswait_bwait = 4, 644 + .d0_tx_id = SHDMA_SLAVE_USB0_TX, 645 + .d1_rx_id = SHDMA_SLAVE_USB0_RX, 644 646 }, 645 647 }, 646 648 }; ··· 812 810 .buswait_bwait = 4, 813 811 .pipe_type = usbhs1_pipe_cfg, 814 812 .pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg), 813 + .d0_tx_id = SHDMA_SLAVE_USB1_TX, 814 + .d1_rx_id = SHDMA_SLAVE_USB1_RX, 815 815 }, 816 816 }, 817 817 }; ··· 1592 1588 hdmi_init_pm_clock(); 1593 1589 sh7372_pm_init(); 1594 1590 pm_clk_add(&fsi_device.dev, "spu2"); 1591 + pm_clk_add(&hdmi_lcdc_device.dev, "hdmi"); 1595 1592 } 1596 1593 1597 1594 static void __init mackerel_timer_init(void)
+24 -7
arch/arm/mach-shmobile/clock-sh7372.c
··· 503 503 &sh7372_fsidivb_clk, 504 504 }; 505 505 506 - enum { MSTP001, 506 + enum { MSTP001, MSTP000, 507 507 MSTP131, MSTP130, 508 508 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, 509 509 MSTP118, MSTP117, MSTP116, MSTP113, 510 510 MSTP106, MSTP101, MSTP100, 511 511 MSTP223, 512 - MSTP218, MSTP217, MSTP216, 513 - MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 514 - MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, 515 - MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403, 512 + MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207, 513 + MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 514 + MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, 515 + MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406, 516 + MSTP405, MSTP404, MSTP403, MSTP400, 516 517 MSTP_NR }; 517 518 518 519 #define MSTP(_parent, _reg, _bit, _flags) \ ··· 521 520 522 521 static struct clk mstp_clks[MSTP_NR] = { 523 522 [MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */ 523 + [MSTP000] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 0, 0), /* MSIOF0 */ 524 524 [MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */ 525 525 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ 526 526 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ ··· 540 538 [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ 541 539 [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ 542 540 [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */ 541 + [MSTP214] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */ 542 + [MSTP208] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 8, 0), /* MSIOF1 */ 543 543 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ 544 544 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ 545 + [MSTP205] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 5, 0), /* MSIOF2 */ 545 546 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ 546 547 [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */ 547 548 [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */ 548 549 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ 549 550 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ 550 - [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ 551 551 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */ 552 552 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ 553 553 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ ··· 561 557 [MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */ 562 558 [MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */ 563 559 [MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */ 560 + [MSTP407] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 7, 0), /* USB-DMAC1 */ 564 561 [MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */ 562 + [MSTP405] = MSTP(&r_clk, SMSTPCR4, 5, 0), /* CMT4 */ 563 + [MSTP404] = MSTP(&r_clk, SMSTPCR4, 4, 0), /* CMT3 */ 565 564 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ 565 + [MSTP400] = MSTP(&r_clk, SMSTPCR4, 0, 0), /* CMT2 */ 566 566 }; 567 567 568 568 static struct clk_lookup lookups[] = { ··· 617 609 618 610 /* MSTP32 clocks */ 619 611 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */ 612 + CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[MSTP000]), /* MSIOF0 */ 620 613 CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */ 621 614 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ 622 615 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ ··· 638 629 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */ 639 630 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */ 640 631 CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */ 632 + CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), /* USB-DMAC0 */ 633 + CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[MSTP208]), /* MSIOF1 */ 641 634 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 642 635 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */ 636 + CLKDEV_DEV_ID("spi_sh_msiof.2", &mstp_clks[MSTP205]), /* MSIOF2 */ 643 637 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ 644 638 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */ 645 639 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */ 646 640 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */ 647 641 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */ 648 - CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */ 649 642 CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */ 650 643 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */ 651 644 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ ··· 661 650 CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */ 662 651 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */ 663 652 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */ 653 + CLKDEV_DEV_ID("sh-dma-engine.4", &mstp_clks[MSTP407]), /* USB-DMAC1 */ 664 654 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */ 665 655 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */ 666 656 CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */ 657 + CLKDEV_DEV_ID("sh_cmt.4", &mstp_clks[MSTP405]), /* CMT4 */ 658 + CLKDEV_DEV_ID("sh_cmt.3", &mstp_clks[MSTP404]), /* CMT3 */ 667 659 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ 660 + CLKDEV_DEV_ID("sh_cmt.2", &mstp_clks[MSTP400]), /* CMT2 */ 668 661 662 + CLKDEV_ICK_ID("hdmi", "sh_mobile_lcdc_fb.1", 663 + &div6_reparent_clks[DIV6_HDMI]), 669 664 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), 670 665 CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), 671 666 CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
+1 -1
arch/arm/mach-shmobile/clock-sh73a0.c
··· 365 365 __raw_writel(0x108, SD2CKCR); 366 366 367 367 /* detect main clock parent */ 368 - switch ((__raw_readl(CKSCR) >> 24) & 0x03) { 368 + switch ((__raw_readl(CKSCR) >> 28) & 0x03) { 369 369 case 0: 370 370 main_clk.parent = &sh73a0_extal1_clk; 371 371 break;
+4
arch/arm/mach-shmobile/include/mach/sh7372.h
··· 459 459 SHDMA_SLAVE_SDHI2_TX, 460 460 SHDMA_SLAVE_MMCIF_RX, 461 461 SHDMA_SLAVE_MMCIF_TX, 462 + SHDMA_SLAVE_USB0_TX, 463 + SHDMA_SLAVE_USB0_RX, 464 + SHDMA_SLAVE_USB1_TX, 465 + SHDMA_SLAVE_USB1_RX, 462 466 }; 463 467 464 468 extern struct clk sh7372_extal1_clk;
+3 -4
arch/arm/mach-shmobile/intc-sh7372.c
··· 379 379 /* BBIF2 */ 380 380 VPU, 381 381 TSIF1, 382 - _3DG_SGX530, 382 + /* 3DG */ 383 383 _2DDMAC, 384 384 IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2, 385 385 IPMMU_IPMMUR, IPMMU_IPMMUR2, ··· 436 436 /* BBIF2 */ 437 437 INTCS_VECT(VPU, 0x980), 438 438 INTCS_VECT(TSIF1, 0x9a0), 439 - INTCS_VECT(_3DG_SGX530, 0x9e0), 439 + /* 3DG */ 440 440 INTCS_VECT(_2DDMAC, 0xa00), 441 441 INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0), 442 442 INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0), ··· 521 521 RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } }, 522 522 { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */ 523 523 { 0, 0, MSIOF, 0, 524 - _3DG_SGX530, 0, 0, 0 } }, 524 + 0, 0, 0, 0 } }, 525 525 { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */ 526 526 { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0, 527 527 0, 0, 0, 0 } }, ··· 561 561 TMU_TUNI2, TSIF1 } }, 562 562 { 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } }, 563 563 { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } }, 564 - { 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, _3DG_SGX530, 0, 0 } }, 565 564 { 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } }, 566 565 { 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } }, 567 566 { 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } },
+161 -15
arch/arm/mach-shmobile/setup-sh7372.c
··· 169 169 }; 170 170 171 171 /* CMT */ 172 - static struct sh_timer_config cmt10_platform_data = { 173 - .name = "CMT10", 174 - .channel_offset = 0x10, 175 - .timer_bit = 0, 172 + static struct sh_timer_config cmt2_platform_data = { 173 + .name = "CMT2", 174 + .channel_offset = 0x40, 175 + .timer_bit = 5, 176 176 .clockevent_rating = 125, 177 177 .clocksource_rating = 125, 178 178 }; 179 179 180 - static struct resource cmt10_resources[] = { 180 + static struct resource cmt2_resources[] = { 181 181 [0] = { 182 - .name = "CMT10", 183 - .start = 0xe6138010, 184 - .end = 0xe613801b, 182 + .name = "CMT2", 183 + .start = 0xe6130040, 184 + .end = 0xe613004b, 185 185 .flags = IORESOURCE_MEM, 186 186 }, 187 187 [1] = { 188 - .start = evt2irq(0x0b00), /* CMT1_CMT10 */ 188 + .start = evt2irq(0x0b80), /* CMT2 */ 189 189 .flags = IORESOURCE_IRQ, 190 190 }, 191 191 }; 192 192 193 - static struct platform_device cmt10_device = { 193 + static struct platform_device cmt2_device = { 194 194 .name = "sh_cmt", 195 - .id = 10, 195 + .id = 2, 196 196 .dev = { 197 - .platform_data = &cmt10_platform_data, 197 + .platform_data = &cmt2_platform_data, 198 198 }, 199 - .resource = cmt10_resources, 200 - .num_resources = ARRAY_SIZE(cmt10_resources), 199 + .resource = cmt2_resources, 200 + .num_resources = ARRAY_SIZE(cmt2_resources), 201 201 }; 202 202 203 203 /* TMU */ ··· 602 602 }, 603 603 }; 604 604 605 + /* 606 + * USB-DMAC 607 + */ 608 + 609 + unsigned int usbts_shift[] = {3, 4, 5}; 610 + 611 + enum { 612 + XMIT_SZ_8BYTE = 0, 613 + XMIT_SZ_16BYTE = 1, 614 + XMIT_SZ_32BYTE = 2, 615 + }; 616 + 617 + #define USBTS_INDEX2VAL(i) (((i) & 3) << 6) 618 + 619 + static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = { 620 + { 621 + .offset = 0, 622 + }, { 623 + .offset = 0x20, 624 + }, 625 + }; 626 + 627 + /* USB DMAC0 */ 628 + static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = { 629 + { 630 + .slave_id = SHDMA_SLAVE_USB0_TX, 631 + .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), 632 + }, { 633 + .slave_id = SHDMA_SLAVE_USB0_RX, 634 + .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), 635 + }, 636 + }; 637 + 638 + static struct sh_dmae_pdata usb_dma0_platform_data = { 639 + .slave = sh7372_usb_dmae0_slaves, 640 + .slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves), 641 + .channel = sh7372_usb_dmae_channels, 642 + .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels), 643 + .ts_low_shift = 6, 644 + .ts_low_mask = 0xc0, 645 + .ts_high_shift = 0, 646 + .ts_high_mask = 0, 647 + .ts_shift = usbts_shift, 648 + .ts_shift_num = ARRAY_SIZE(usbts_shift), 649 + .dmaor_init = DMAOR_DME, 650 + .chcr_offset = 0x14, 651 + .chcr_ie_bit = 1 << 5, 652 + .dmaor_is_32bit = 1, 653 + .needs_tend_set = 1, 654 + .no_dmars = 1, 655 + }; 656 + 657 + static struct resource sh7372_usb_dmae0_resources[] = { 658 + { 659 + /* Channel registers and DMAOR */ 660 + .start = 0xe68a0020, 661 + .end = 0xe68a0064 - 1, 662 + .flags = IORESOURCE_MEM, 663 + }, 664 + { 665 + /* VCR/SWR/DMICR */ 666 + .start = 0xe68a0000, 667 + .end = 0xe68a0014 - 1, 668 + .flags = IORESOURCE_MEM, 669 + }, 670 + { 671 + /* IRQ for channels */ 672 + .start = evt2irq(0x0a00), 673 + .end = evt2irq(0x0a00), 674 + .flags = IORESOURCE_IRQ, 675 + }, 676 + }; 677 + 678 + static struct platform_device usb_dma0_device = { 679 + .name = "sh-dma-engine", 680 + .id = 3, 681 + .resource = sh7372_usb_dmae0_resources, 682 + .num_resources = ARRAY_SIZE(sh7372_usb_dmae0_resources), 683 + .dev = { 684 + .platform_data = &usb_dma0_platform_data, 685 + }, 686 + }; 687 + 688 + /* USB DMAC1 */ 689 + static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = { 690 + { 691 + .slave_id = SHDMA_SLAVE_USB1_TX, 692 + .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), 693 + }, { 694 + .slave_id = SHDMA_SLAVE_USB1_RX, 695 + .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), 696 + }, 697 + }; 698 + 699 + static struct sh_dmae_pdata usb_dma1_platform_data = { 700 + .slave = sh7372_usb_dmae1_slaves, 701 + .slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves), 702 + .channel = sh7372_usb_dmae_channels, 703 + .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels), 704 + .ts_low_shift = 6, 705 + .ts_low_mask = 0xc0, 706 + .ts_high_shift = 0, 707 + .ts_high_mask = 0, 708 + .ts_shift = usbts_shift, 709 + .ts_shift_num = ARRAY_SIZE(usbts_shift), 710 + .dmaor_init = DMAOR_DME, 711 + .chcr_offset = 0x14, 712 + .chcr_ie_bit = 1 << 5, 713 + .dmaor_is_32bit = 1, 714 + .needs_tend_set = 1, 715 + .no_dmars = 1, 716 + }; 717 + 718 + static struct resource sh7372_usb_dmae1_resources[] = { 719 + { 720 + /* Channel registers and DMAOR */ 721 + .start = 0xe68c0020, 722 + .end = 0xe68c0064 - 1, 723 + .flags = IORESOURCE_MEM, 724 + }, 725 + { 726 + /* VCR/SWR/DMICR */ 727 + .start = 0xe68c0000, 728 + .end = 0xe68c0014 - 1, 729 + .flags = IORESOURCE_MEM, 730 + }, 731 + { 732 + /* IRQ for channels */ 733 + .start = evt2irq(0x1d00), 734 + .end = evt2irq(0x1d00), 735 + .flags = IORESOURCE_IRQ, 736 + }, 737 + }; 738 + 739 + static struct platform_device usb_dma1_device = { 740 + .name = "sh-dma-engine", 741 + .id = 4, 742 + .resource = sh7372_usb_dmae1_resources, 743 + .num_resources = ARRAY_SIZE(sh7372_usb_dmae1_resources), 744 + .dev = { 745 + .platform_data = &usb_dma1_platform_data, 746 + }, 747 + }; 748 + 605 749 /* VPU */ 606 750 static struct uio_info vpu_platform_data = { 607 751 .name = "VPU5HG", ··· 962 818 &scif4_device, 963 819 &scif5_device, 964 820 &scif6_device, 965 - &cmt10_device, 821 + &cmt2_device, 966 822 &tmu00_device, 967 823 &tmu01_device, 968 824 }; ··· 973 829 &dma0_device, 974 830 &dma1_device, 975 831 &dma2_device, 832 + &usb_dma0_device, 833 + &usb_dma1_device, 976 834 &vpu_device, 977 835 &veu0_device, 978 836 &veu1_device,
+2 -1
arch/arm/plat-omap/omap_device.c
··· 622 622 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, 623 623 _od_runtime_idle) 624 624 USE_PLATFORM_PM_SLEEP_OPS 625 - SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq) 625 + .suspend_noirq = _od_suspend_noirq, 626 + .resume_noirq = _od_resume_noirq, 626 627 } 627 628 }; 628 629
+1 -1
arch/avr32/kernel/syscall_table.S
··· 158 158 .long sys_sched_rr_get_interval 159 159 .long sys_nanosleep 160 160 .long sys_poll 161 - .long sys_nfsservctl /* 145 */ 161 + .long sys_ni_syscall /* 145 was nfsservctl */ 162 162 .long sys_setresgid 163 163 .long sys_getresgid 164 164 .long sys_prctl
+1 -1
arch/blackfin/mach-common/entry.S
··· 1543 1543 .long _sys_ni_syscall /* for vm86 */ 1544 1544 .long _sys_ni_syscall /* old "query_module" */ 1545 1545 .long _sys_ni_syscall /* sys_poll */ 1546 - .long _sys_nfsservctl 1546 + .long _sys_ni_syscall /* old nfsservctl */ 1547 1547 .long _sys_setresgid /* setresgid16 */ /* 170 */ 1548 1548 .long _sys_getresgid /* getresgid16 */ 1549 1549 .long _sys_prctl
+1 -1
arch/cris/arch-v10/kernel/entry.S
··· 771 771 .long sys_ni_syscall /* sys_vm86 */ 772 772 .long sys_ni_syscall /* Old sys_query_module */ 773 773 .long sys_poll 774 - .long sys_nfsservctl 774 + .long sys_ni_syscall /* old nfsservctl */ 775 775 .long sys_setresgid16 /* 170 */ 776 776 .long sys_getresgid16 777 777 .long sys_prctl
+1 -1
arch/cris/arch-v32/kernel/entry.S
··· 714 714 .long sys_ni_syscall /* sys_vm86 */ 715 715 .long sys_ni_syscall /* Old sys_query_module */ 716 716 .long sys_poll 717 - .long sys_nfsservctl 717 + .long sys_ni_syscall /* Old nfsservctl */ 718 718 .long sys_setresgid16 /* 170 */ 719 719 .long sys_getresgid16 720 720 .long sys_prctl
+9
arch/cris/include/asm/serial.h
··· 1 + #ifndef _ASM_SERIAL_H 2 + #define _ASM_SERIAL_H 3 + 4 + /* 5 + * This assumes you have a 1.8432 MHz clock for your UART. 6 + */ 7 + #define BASE_BAUD (1843200 / 16) 8 + 9 + #endif /* _ASM_SERIAL_H */
+1 -1
arch/frv/kernel/entry.S
··· 1358 1358 .long sys_ni_syscall /* for vm86 */ 1359 1359 .long sys_ni_syscall /* Old sys_query_module */ 1360 1360 .long sys_poll 1361 - .long sys_nfsservctl 1361 + .long sys_ni_syscall /* Old nfsservctl */ 1362 1362 .long sys_setresgid16 /* 170 */ 1363 1363 .long sys_getresgid16 1364 1364 .long sys_prctl
+1 -1
arch/h8300/kernel/syscalls.S
··· 183 183 .long SYMBOL_NAME(sys_ni_syscall) /* for vm86 */ 184 184 .long SYMBOL_NAME(sys_ni_syscall) /* sys_query_module */ 185 185 .long SYMBOL_NAME(sys_poll) 186 - .long SYMBOL_NAME(sys_nfsservctl) 186 + .long SYMBOL_NAME(sys_ni_syscall) /* old nfsservctl */ 187 187 .long SYMBOL_NAME(sys_setresgid16) /* 170 */ 188 188 .long SYMBOL_NAME(sys_getresgid16) 189 189 .long SYMBOL_NAME(sys_prctl)
+1 -1
arch/ia64/kernel/entry.S
··· 1614 1614 data8 sys_sched_get_priority_min 1615 1615 data8 sys_sched_rr_get_interval 1616 1616 data8 sys_nanosleep 1617 - data8 sys_nfsservctl 1617 + data8 sys_ni_syscall // old nfsservctl 1618 1618 data8 sys_prctl // 1170 1619 1619 data8 sys_getpagesize 1620 1620 data8 sys_mmap2
+1 -1
arch/m32r/kernel/syscall_table.S
··· 168 168 .long sys_tas /* vm86 syscall holder */ 169 169 .long sys_ni_syscall /* query_module syscall holder */ 170 170 .long sys_poll 171 - .long sys_nfsservctl 171 + .long sys_ni_syscall /* was nfsservctl */ 172 172 .long sys_setresgid /* 170 */ 173 173 .long sys_getresgid 174 174 .long sys_prctl
+1 -1
arch/m68k/kernel/syscalltable.S
··· 189 189 .long sys_getpagesize 190 190 .long sys_ni_syscall /* old "query_module" */ 191 191 .long sys_poll 192 - .long sys_nfsservctl 192 + .long sys_ni_syscall /* old nfsservctl */ 193 193 .long sys_setresgid16 /* 170 */ 194 194 .long sys_getresgid16 195 195 .long sys_prctl
+1 -1
arch/microblaze/kernel/syscall_table.S
··· 173 173 .long sys_ni_syscall /* sys_vm86 */ 174 174 .long sys_ni_syscall /* Old sys_query_module */ 175 175 .long sys_poll 176 - .long sys_nfsservctl 176 + .long sys_ni_syscall /* old nfsservctl */ 177 177 .long sys_setresgid /* 170 */ 178 178 .long sys_getresgid 179 179 .long sys_prctl
+1 -1
arch/mips/kernel/scall32-o32.S
··· 424 424 sys sys_getresuid 3 425 425 sys sys_ni_syscall 0 /* was sys_query_module */ 426 426 sys sys_poll 3 427 - sys sys_nfsservctl 3 427 + sys sys_ni_syscall 0 /* was nfsservctl */ 428 428 sys sys_setresgid 3 /* 4190 */ 429 429 sys sys_getresgid 3 430 430 sys sys_prctl 5
+1 -1
arch/mips/kernel/scall64-64.S
··· 299 299 PTR sys_ni_syscall /* 5170, was get_kernel_syms */ 300 300 PTR sys_ni_syscall /* was query_module */ 301 301 PTR sys_quotactl 302 - PTR sys_nfsservctl 302 + PTR sys_ni_syscall /* was nfsservctl */ 303 303 PTR sys_ni_syscall /* res. for getpmsg */ 304 304 PTR sys_ni_syscall /* 5175 for putpmsg */ 305 305 PTR sys_ni_syscall /* res. for afs_syscall */
+1 -1
arch/mips/kernel/scall64-n32.S
··· 294 294 PTR sys_ni_syscall /* 6170, was get_kernel_syms */ 295 295 PTR sys_ni_syscall /* was query_module */ 296 296 PTR sys_quotactl 297 - PTR compat_sys_nfsservctl 297 + PTR sys_ni_syscall /* was nfsservctl */ 298 298 PTR sys_ni_syscall /* res. for getpmsg */ 299 299 PTR sys_ni_syscall /* 6175 for putpmsg */ 300 300 PTR sys_ni_syscall /* res. for afs_syscall */
+1 -1
arch/mips/kernel/scall64-o32.S
··· 392 392 PTR sys_getresuid 393 393 PTR sys_ni_syscall /* was query_module */ 394 394 PTR sys_poll 395 - PTR compat_sys_nfsservctl 395 + PTR sys_ni_syscall /* was nfsservctl */ 396 396 PTR sys_setresgid /* 4190 */ 397 397 PTR sys_getresgid 398 398 PTR sys_prctl
+1 -1
arch/mn10300/kernel/entry.S
··· 589 589 .long sys_ni_syscall /* vm86 */ 590 590 .long sys_ni_syscall /* Old sys_query_module */ 591 591 .long sys_poll 592 - .long sys_nfsservctl 592 + .long sys_ni_syscall /* was nfsservctl */ 593 593 .long sys_setresgid16 /* 170 */ 594 594 .long sys_getresgid16 595 595 .long sys_prctl
+3 -2
arch/powerpc/sysdev/fsl_rio.c
··· 54 54 #define ODSR_CLEAR 0x1c00 55 55 #define LTLEECSR_ENABLE_ALL 0xFFC000FC 56 56 #define ESCSR_CLEAR 0x07120204 57 + #define IECSR_CLEAR 0x80000000 57 58 58 59 #define RIO_PORT1_EDCSR 0x0640 59 60 #define RIO_PORT2_EDCSR 0x0680 ··· 1090 1089 1091 1090 if (offset == 0) { 1092 1091 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); 1093 - out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0); 1092 + out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR); 1094 1093 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); 1095 1094 } else { 1096 1095 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); 1097 - out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0); 1096 + out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR); 1098 1097 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); 1099 1098 } 1100 1099 }
-6
arch/s390/kernel/compat_wrapper.S
··· 665 665 lgfr %r4,%r4 # long 666 666 jg sys_poll # branch to system call 667 667 668 - ENTRY(compat_sys_nfsservctl_wrapper) 669 - lgfr %r2,%r2 # int 670 - llgtr %r3,%r3 # struct compat_nfsctl_arg* 671 - llgtr %r4,%r4 # union compat_nfsctl_res* 672 - jg compat_sys_nfsservctl # branch to system call 673 - 674 668 ENTRY(sys32_setresgid16_wrapper) 675 669 llgfr %r2,%r2 # __kernel_old_gid_emu31_t 676 670 llgfr %r3,%r3 # __kernel_old_gid_emu31_t
+8 -6
arch/s390/kernel/early.c
··· 396 396 static __init void rescue_initrd(void) 397 397 { 398 398 #ifdef CONFIG_BLK_DEV_INITRD 399 + unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); 399 400 /* 400 - * Move the initrd right behind the bss section in case it starts 401 - * within the bss section. So we don't overwrite it when the bss 402 - * section gets cleared. 401 + * Just like in case of IPL from VM reader we make sure there is a 402 + * gap of 4MB between end of kernel and start of initrd. 403 + * That way we can also be sure that saving an NSS will succeed, 404 + * which however only requires different segments. 403 405 */ 404 406 if (!INITRD_START || !INITRD_SIZE) 405 407 return; 406 - if (INITRD_START >= (unsigned long) __bss_stop) 408 + if (INITRD_START >= min_initrd_addr) 407 409 return; 408 - memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE); 409 - INITRD_START = (unsigned long) __bss_stop; 410 + memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); 411 + INITRD_START = min_initrd_addr; 410 412 #endif 411 413 } 412 414
+4 -3
arch/s390/kernel/ipl.c
··· 1220 1220 /* sysfs: create fcp kset for mixing attr group and bin attrs */ 1221 1221 reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL, 1222 1222 &reipl_kset->kobj); 1223 - if (!reipl_kset) { 1223 + if (!reipl_fcp_kset) { 1224 1224 free_page((unsigned long) reipl_block_fcp); 1225 1225 return -ENOMEM; 1226 1226 } ··· 1618 1618 1619 1619 static void stop_run(struct shutdown_trigger *trigger) 1620 1620 { 1621 - if (strcmp(trigger->name, ON_PANIC_STR) == 0) 1621 + if (strcmp(trigger->name, ON_PANIC_STR) == 0 || 1622 + strcmp(trigger->name, ON_RESTART_STR) == 0) 1622 1623 disabled_wait((unsigned long) __builtin_return_address(0)); 1623 1624 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 1624 1625 cpu_relax(); ··· 1718 1717 /* on restart */ 1719 1718 1720 1719 static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR, 1721 - &reipl_action}; 1720 + &stop_action}; 1722 1721 1723 1722 static ssize_t on_restart_show(struct kobject *kobj, 1724 1723 struct kobj_attribute *attr, char *page)
+1 -1
arch/s390/kernel/syscalls.S
··· 177 177 NI_SYSCALL /* for vm86 */ 178 178 NI_SYSCALL /* old sys_query_module */ 179 179 SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper) 180 - SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper) 180 + NI_SYSCALL /* old nfsservctl */ 181 181 SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ 182 182 SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ 183 183 SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
+1 -1
arch/sh/include/asm/ptrace.h
··· 123 123 struct perf_event; 124 124 struct perf_sample_data; 125 125 126 - extern void ptrace_triggered(struct perf_event *bp, int nmi, 126 + extern void ptrace_triggered(struct perf_event *bp, 127 127 struct perf_sample_data *data, struct pt_regs *regs); 128 128 129 129 #define task_pt_regs(task) \
+1
arch/sh/kernel/cpu/sh4a/setup-sh7757.c
··· 15 15 #include <linux/serial_sci.h> 16 16 #include <linux/io.h> 17 17 #include <linux/mm.h> 18 + #include <linux/dma-mapping.h> 18 19 #include <linux/sh_timer.h> 19 20 #include <linux/sh_dma.h> 20 21
+1 -1
arch/sh/kernel/idle.c
··· 22 22 #include <linux/atomic.h> 23 23 #include <asm/smp.h> 24 24 25 - static void (*pm_idle)(void); 25 + void (*pm_idle)(void); 26 26 27 27 static int hlt_counter; 28 28
+1 -1
arch/sh/kernel/syscalls_32.S
··· 185 185 .long sys_ni_syscall /* vm86 */ 186 186 .long sys_ni_syscall /* old "query_module" */ 187 187 .long sys_poll 188 - .long sys_nfsservctl 188 + .long sys_ni_syscall /* was nfsservctl */ 189 189 .long sys_setresgid16 /* 170 */ 190 190 .long sys_getresgid16 191 191 .long sys_prctl
+1 -1
arch/sh/kernel/syscalls_64.S
··· 189 189 .long sys_ni_syscall /* vm86 */ 190 190 .long sys_ni_syscall /* old "query_module" */ 191 191 .long sys_poll 192 - .long sys_nfsservctl 192 + .long sys_ni_syscall /* was nfsservctl */ 193 193 .long sys_setresgid16 /* 170 */ 194 194 .long sys_getresgid16 195 195 .long sys_prctl
+37
arch/sh/kernel/traps_32.c
··· 316 316 break; 317 317 } 318 318 break; 319 + 320 + case 9: /* mov.w @(disp,PC),Rn */ 321 + srcu = (unsigned char __user *)regs->pc; 322 + srcu += 4; 323 + srcu += (instruction & 0x00FF) << 1; 324 + dst = (unsigned char *)rn; 325 + *(unsigned long *)dst = 0; 326 + 327 + #if !defined(__LITTLE_ENDIAN__) 328 + dst += 2; 329 + #endif 330 + 331 + if (ma->from(dst, srcu, 2)) 332 + goto fetch_fault; 333 + sign_extend(2, dst); 334 + ret = 0; 335 + break; 336 + 337 + case 0xd: /* mov.l @(disp,PC),Rn */ 338 + srcu = (unsigned char __user *)(regs->pc & ~0x3); 339 + srcu += 4; 340 + srcu += (instruction & 0x00FF) << 2; 341 + dst = (unsigned char *)rn; 342 + *(unsigned long *)dst = 0; 343 + 344 + if (ma->from(dst, srcu, 4)) 345 + goto fetch_fault; 346 + ret = 0; 347 + break; 319 348 } 320 349 return ret; 321 350 ··· 495 466 case 0x0500: /* mov.w @(disp,Rm),R0 */ 496 467 goto simple; 497 468 case 0x0B00: /* bf lab - no delayslot*/ 469 + ret = 0; 498 470 break; 499 471 case 0x0F00: /* bf/s lab */ 500 472 ret = handle_delayslot(regs, instruction, ma); ··· 509 479 } 510 480 break; 511 481 case 0x0900: /* bt lab - no delayslot */ 482 + ret = 0; 512 483 break; 513 484 case 0x0D00: /* bt/s lab */ 514 485 ret = handle_delayslot(regs, instruction, ma); ··· 525 494 } 526 495 break; 527 496 497 + case 0x9000: /* mov.w @(disp,Rm),Rn */ 498 + goto simple; 499 + 528 500 case 0xA000: /* bra label */ 529 501 ret = handle_delayslot(regs, instruction, ma); 530 502 if (ret==0) ··· 541 507 regs->pc += SH_PC_12BIT_OFFSET(instruction); 542 508 } 543 509 break; 510 + 511 + case 0xD000: /* mov.l @(disp,Rm),Rn */ 512 + goto simple; 544 513 } 545 514 return ret; 546 515
+1 -1
arch/sparc/kernel/irq.h
··· 88 88 #define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu) 89 89 90 90 /* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */ 91 - #define SUN4D_IPI_IRQ 14 91 + #define SUN4D_IPI_IRQ 13 92 92 93 93 extern void sun4d_ipi_interrupt(void); 94 94
-1
arch/sparc/kernel/sys32.S
··· 81 81 SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5) 82 82 SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1) 83 83 SIGN1(sys32_mlockall, sys_mlockall, %o0) 84 - SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0) 85 84 SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1) 86 85 SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1) 87 86 SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
+1 -1
arch/sparc/kernel/systbls_32.S
··· 67 67 /*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall 68 68 /*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler 69 69 /*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep 70 - /*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl 70 + /*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall 71 71 /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep 72 72 /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun 73 73 /*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
+2 -2
arch/sparc/kernel/systbls_64.S
··· 68 68 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall 69 69 /*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler 70 70 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep 71 - /*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl 71 + /*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys_nis_syscall 72 72 .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep 73 73 /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun 74 74 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy ··· 145 145 .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall 146 146 /*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler 147 147 .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep 148 - /*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl 148 + /*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall 149 149 .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep 150 150 /*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun 151 151 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
+1 -1
arch/x86/ia32/ia32entry.S
··· 672 672 .quad sys32_vm86_warning /* vm86 */ 673 673 .quad quiet_ni_syscall /* query_module */ 674 674 .quad sys_poll 675 - .quad compat_sys_nfsservctl 675 + .quad quiet_ni_syscall /* old nfsservctl */ 676 676 .quad sys_setresgid16 /* 170 */ 677 677 .quad sys_getresgid16 678 678 .quad sys_prctl
+1 -1
arch/x86/include/asm/unistd_64.h
··· 414 414 __SYSCALL(__NR_quotactl, sys_quotactl) 415 415 416 416 #define __NR_nfsservctl 180 417 - __SYSCALL(__NR_nfsservctl, sys_nfsservctl) 417 + __SYSCALL(__NR_nfsservctl, sys_ni_syscall) 418 418 419 419 /* reserved for LiS/STREAMS */ 420 420 #define __NR_getpmsg 181
-2
arch/x86/kernel/cpu/mtrr/main.c
··· 149 149 */ 150 150 static int mtrr_rendezvous_handler(void *info) 151 151 { 152 - #ifdef CONFIG_SMP 153 152 struct set_mtrr_data *data = info; 154 153 155 154 /* ··· 170 171 } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { 171 172 mtrr_if->set_all(); 172 173 } 173 - #endif 174 174 return 0; 175 175 } 176 176
+2 -6
arch/x86/kernel/entry_32.S
··· 54 54 #include <asm/ftrace.h> 55 55 #include <asm/irq_vectors.h> 56 56 #include <asm/cpufeature.h> 57 + #include <asm/alternative-asm.h> 57 58 58 59 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 59 60 #include <linux/elf-em.h> ··· 874 873 661: pushl_cfi $do_general_protection 875 874 662: 876 875 .section .altinstructions,"a" 877 - .balign 4 878 - .long 661b 879 - .long 663f 880 - .word X86_FEATURE_XMM 881 - .byte 662b-661b 882 - .byte 664f-663f 876 + altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f 883 877 .previous 884 878 .section .altinstr_replacement,"ax" 885 879 663: pushl $do_simd_coprocessor_error
+1 -1
arch/x86/kernel/syscall_table_32.S
··· 168 168 .long ptregs_vm86 169 169 .long sys_ni_syscall /* Old sys_query_module */ 170 170 .long sys_poll 171 - .long sys_nfsservctl 171 + .long sys_ni_syscall /* Old nfsservctl */ 172 172 .long sys_setresgid16 /* 170 */ 173 173 .long sys_getresgid16 174 174 .long sys_prctl
+3 -1
arch/x86/platform/mrst/mrst.c
··· 689 689 irq_attr.trigger = 1; 690 690 irq_attr.polarity = 1; 691 691 io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr); 692 - } 692 + } else 693 + pentry->irq = 0; /* No irq */ 694 + 693 695 switch (pentry->type) { 694 696 case SFI_DEV_TYPE_IPC: 695 697 /* ID as IRQ is a hack that will go away */
+1 -1
arch/xtensa/include/asm/unistd.h
··· 455 455 #define __NR_quotactl 204 456 456 __SYSCALL(204, sys_quotactl, 4) 457 457 #define __NR_nfsservctl 205 458 - __SYSCALL(205, sys_nfsservctl, 3) 458 + __SYSCALL(205, sys_ni_syscall, 0) 459 459 #define __NR__sysctl 206 460 460 __SYSCALL(206, sys_sysctl, 1) 461 461 #define __NR_bdflush 207
+1
drivers/base/devres.c
··· 397 397 398 398 static int release_nodes(struct device *dev, struct list_head *first, 399 399 struct list_head *end, unsigned long flags) 400 + __releases(&dev->devres_lock) 400 401 { 401 402 LIST_HEAD(todo); 402 403 int cnt;
+1 -1
drivers/base/devtmpfs.c
··· 376 376 return err; 377 377 } 378 378 379 - static __initdata DECLARE_COMPLETION(setup_done); 379 + static DECLARE_COMPLETION(setup_done); 380 380 381 381 static int handle(const char *name, mode_t mode, struct device *dev) 382 382 {
+6 -5
drivers/base/firmware_class.c
··· 521 521 if (!firmware_p) 522 522 return -EINVAL; 523 523 524 - if (WARN_ON(usermodehelper_is_disabled())) { 525 - dev_err(device, "firmware: %s will not be loaded\n", name); 526 - return -EBUSY; 527 - } 528 - 529 524 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 530 525 if (!firmware) { 531 526 dev_err(device, "%s: kmalloc(struct firmware) failed\n", ··· 532 537 if (fw_get_builtin_firmware(firmware, name)) { 533 538 dev_dbg(device, "firmware: using built-in firmware %s\n", name); 534 539 return 0; 540 + } 541 + 542 + if (WARN_ON(usermodehelper_is_disabled())) { 543 + dev_err(device, "firmware: %s will not be loaded\n", name); 544 + retval = -EBUSY; 545 + goto out; 535 546 } 536 547 537 548 if (uevent)
+1 -1
drivers/base/platform.c
··· 33 33 34 34 /** 35 35 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used 36 - * @dev: platform device 36 + * @pdev: platform device 37 37 * 38 38 * This is called before platform_device_add() such that any pdev_archdata may 39 39 * be setup before the platform_notifier is called. So if a user needs to
+22 -18
drivers/base/power/clock_ops.c
··· 19 19 20 20 struct pm_clk_data { 21 21 struct list_head clock_list; 22 - struct mutex lock; 22 + spinlock_t lock; 23 23 }; 24 24 25 25 enum pce_status { ··· 73 73 } 74 74 } 75 75 76 - mutex_lock(&pcd->lock); 76 + spin_lock_irq(&pcd->lock); 77 77 list_add_tail(&ce->node, &pcd->clock_list); 78 - mutex_unlock(&pcd->lock); 78 + spin_unlock_irq(&pcd->lock); 79 79 return 0; 80 80 } 81 81 ··· 83 83 * __pm_clk_remove - Destroy PM clock entry. 84 84 * @ce: PM clock entry to destroy. 85 85 * 86 - * This routine must be called under the mutex protecting the PM list of clocks 87 - * corresponding the the @ce's device. 86 + * This routine must be called under the spinlock protecting the PM list of 87 + * clocks corresponding the the @ce's device. 88 88 */ 89 89 static void __pm_clk_remove(struct pm_clock_entry *ce) 90 90 { ··· 123 123 if (!pcd) 124 124 return; 125 125 126 - mutex_lock(&pcd->lock); 126 + spin_lock_irq(&pcd->lock); 127 127 128 128 list_for_each_entry(ce, &pcd->clock_list, node) { 129 129 if (!con_id && !ce->con_id) { ··· 137 137 } 138 138 } 139 139 140 - mutex_unlock(&pcd->lock); 140 + spin_unlock_irq(&pcd->lock); 141 141 } 142 142 143 143 /** ··· 158 158 } 159 159 160 160 INIT_LIST_HEAD(&pcd->clock_list); 161 - mutex_init(&pcd->lock); 161 + spin_lock_init(&pcd->lock); 162 162 dev->power.subsys_data = pcd; 163 163 return 0; 164 164 } ··· 181 181 182 182 dev->power.subsys_data = NULL; 183 183 184 - mutex_lock(&pcd->lock); 184 + spin_lock_irq(&pcd->lock); 185 185 186 186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 187 187 __pm_clk_remove(ce); 188 188 189 - mutex_unlock(&pcd->lock); 189 + spin_unlock_irq(&pcd->lock); 190 190 191 191 kfree(pcd); 192 192 } ··· 220 220 { 221 221 struct pm_clk_data *pcd = __to_pcd(dev); 222 222 struct pm_clock_entry *ce; 223 + unsigned long flags; 223 224 224 225 dev_dbg(dev, "%s()\n", __func__); 225 226 226 227 if (!pcd) 227 228 return 0; 228 229 229 - mutex_lock(&pcd->lock); 230 + spin_lock_irqsave(&pcd->lock, flags); 230 231 231 232 list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 232 233 if (ce->status == PCE_STATUS_NONE) ··· 239 238 } 240 239 } 241 240 242 - mutex_unlock(&pcd->lock); 241 + spin_unlock_irqrestore(&pcd->lock, flags); 243 242 244 243 return 0; 245 244 } ··· 252 251 { 253 252 struct pm_clk_data *pcd = __to_pcd(dev); 254 253 struct pm_clock_entry *ce; 254 + unsigned long flags; 255 255 256 256 dev_dbg(dev, "%s()\n", __func__); 257 257 258 258 if (!pcd) 259 259 return 0; 260 260 261 - mutex_lock(&pcd->lock); 261 + spin_lock_irqsave(&pcd->lock, flags); 262 262 263 263 list_for_each_entry(ce, &pcd->clock_list, node) { 264 264 if (ce->status == PCE_STATUS_NONE) ··· 271 269 } 272 270 } 273 271 274 - mutex_unlock(&pcd->lock); 272 + spin_unlock_irqrestore(&pcd->lock, flags); 275 273 276 274 return 0; 277 275 } ··· 346 344 { 347 345 struct pm_clk_data *pcd = __to_pcd(dev); 348 346 struct pm_clock_entry *ce; 347 + unsigned long flags; 349 348 350 349 dev_dbg(dev, "%s()\n", __func__); 351 350 ··· 354 351 if (!pcd || !dev->driver) 355 352 return 0; 356 353 357 - mutex_lock(&pcd->lock); 354 + spin_lock_irqsave(&pcd->lock, flags); 358 355 359 356 list_for_each_entry_reverse(ce, &pcd->clock_list, node) 360 357 clk_disable(ce->clk); 361 358 362 - mutex_unlock(&pcd->lock); 359 + spin_unlock_irqrestore(&pcd->lock, flags); 363 360 364 361 return 0; 365 362 } ··· 372 369 { 373 370 struct pm_clk_data *pcd = __to_pcd(dev); 374 371 struct pm_clock_entry *ce; 372 + unsigned long flags; 375 373 376 374 dev_dbg(dev, "%s()\n", __func__); 377 375 ··· 380 376 if (!pcd || !dev->driver) 381 377 return 0; 382 378 383 - mutex_lock(&pcd->lock); 379 + spin_lock_irqsave(&pcd->lock, flags); 384 380 385 381 list_for_each_entry(ce, &pcd->clock_list, node) 386 382 clk_enable(ce->clk); 387 383 388 - mutex_unlock(&pcd->lock); 384 + spin_unlock_irqrestore(&pcd->lock, flags); 389 385 390 386 return 0; 391 387 }
+2 -3
drivers/char/msm_smd_pkt.c
··· 379 379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { 380 380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), 381 381 GFP_KERNEL); 382 - if (IS_ERR(smd_pkt_devp[i])) { 383 - r = PTR_ERR(smd_pkt_devp[i]); 384 - pr_err("kmalloc() failed %d\n", r); 382 + if (!smd_pkt_devp[i]) { 383 + pr_err("kmalloc() failed\n"); 385 384 goto clean_cdevs; 386 385 } 387 386
+32 -2
drivers/clocksource/sh_cmt.c
··· 26 26 #include <linux/clk.h> 27 27 #include <linux/irq.h> 28 28 #include <linux/err.h> 29 + #include <linux/delay.h> 29 30 #include <linux/clocksource.h> 30 31 #include <linux/clockchips.h> 31 32 #include <linux/sh_timer.h> ··· 151 150 152 151 static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 153 152 { 154 - int ret; 153 + int k, ret; 155 154 156 155 /* enable clock */ 157 156 ret = clk_enable(p->clk); 158 157 if (ret) { 159 158 dev_err(&p->pdev->dev, "cannot enable clock\n"); 160 - return ret; 159 + goto err0; 161 160 } 162 161 163 162 /* make sure channel is disabled */ ··· 175 174 sh_cmt_write(p, CMCOR, 0xffffffff); 176 175 sh_cmt_write(p, CMCNT, 0); 177 176 177 + /* 178 + * According to the sh73a0 user's manual, as CMCNT can be operated 179 + * only by the RCLK (Pseudo 32 KHz), there's one restriction on 180 + * modifying CMCNT register; two RCLK cycles are necessary before 181 + * this register is either read or any modification of the value 182 + * it holds is reflected in the LSI's actual operation. 183 + * 184 + * While at it, we're supposed to clear out the CMCNT as of this 185 + * moment, so make sure it's processed properly here. This will 186 + * take RCLKx2 at maximum. 187 + */ 188 + for (k = 0; k < 100; k++) { 189 + if (!sh_cmt_read(p, CMCNT)) 190 + break; 191 + udelay(1); 192 + } 193 + 194 + if (sh_cmt_read(p, CMCNT)) { 195 + dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); 196 + ret = -ETIMEDOUT; 197 + goto err1; 198 + } 199 + 178 200 /* enable channel */ 179 201 sh_cmt_start_stop_ch(p, 1); 180 202 return 0; 203 + err1: 204 + /* stop clock */ 205 + clk_disable(p->clk); 206 + 207 + err0: 208 + return ret; 181 209 } 182 210 183 211 static void sh_cmt_disable(struct sh_cmt_priv *p)
+4
drivers/firewire/sbp2.c
··· 1198 1198 { 1199 1199 struct fw_unit *unit = fw_unit(dev); 1200 1200 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); 1201 + struct sbp2_logical_unit *lu; 1202 + 1203 + list_for_each_entry(lu, &tgt->lu_list, link) 1204 + cancel_delayed_work_sync(&lu->work); 1201 1205 1202 1206 sbp2_target_put(tgt); 1203 1207 return 0;
+1 -1
drivers/firmware/google/gsmi.c
··· 420 420 421 421 static efi_status_t gsmi_set_variable(efi_char16_t *name, 422 422 efi_guid_t *vendor, 423 - unsigned long attr, 423 + u32 attr, 424 424 unsigned long data_size, 425 425 void *data) 426 426 {
+1 -3
drivers/gpu/drm/i915/intel_display.c
··· 878 878 int pp_reg, lvds_reg; 879 879 u32 val; 880 880 enum pipe panel_pipe = PIPE_A; 881 - bool locked = locked; 881 + bool locked = true; 882 882 883 883 if (HAS_PCH_SPLIT(dev_priv->dev)) { 884 884 pp_reg = PCH_PP_CONTROL; ··· 7237 7237 encoder->base.possible_clones = 7238 7238 intel_encoder_clones(dev, encoder->clone_mask); 7239 7239 } 7240 - 7241 - intel_panel_setup_backlight(dev); 7242 7240 7243 7241 /* disable all the possible outputs/crtcs before entering KMS mode */ 7244 7242 drm_helper_disable_unused_functions(dev);
+14 -28
drivers/hwmon/i5k_amb.c
··· 114 114 void __iomem *amb_mmio; 115 115 struct i5k_device_attribute *attrs; 116 116 unsigned int num_attrs; 117 - unsigned long chipset_id; 118 117 }; 119 118 120 119 static ssize_t show_name(struct device *dev, struct device_attribute *devattr, ··· 443 444 goto out; 444 445 } 445 446 446 - data->chipset_id = devid; 447 - 448 447 res = 0; 449 448 out: 450 449 pci_dev_put(pcidev); ··· 475 478 return res; 476 479 } 477 480 478 - static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, 479 - unsigned long channel) 480 - { 481 - switch (data->chipset_id) { 482 - case PCI_DEVICE_ID_INTEL_5000_ERR: 483 - return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; 484 - case PCI_DEVICE_ID_INTEL_5400_ERR: 485 - return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel; 486 - default: 487 - BUG(); 488 - } 489 - } 490 - 491 - static unsigned long chipset_ids[] = { 492 - PCI_DEVICE_ID_INTEL_5000_ERR, 493 - PCI_DEVICE_ID_INTEL_5400_ERR, 494 - 0 481 + static struct { 482 + unsigned long err; 483 + unsigned long fbd0; 484 + } chipset_ids[] __devinitdata = { 485 + { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 }, 486 + { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 }, 487 + { 0, 0 } 495 488 }; 496 489 497 490 #ifdef MODULE ··· 497 510 { 498 511 struct i5k_amb_data *data; 499 512 struct resource *reso; 500 - int i; 501 - int res = -ENODEV; 513 + int i, res; 502 514 503 515 data = kzalloc(sizeof(*data), GFP_KERNEL); 504 516 if (!data) ··· 506 520 /* Figure out where the AMB registers live */ 507 521 i = 0; 508 522 do { 509 - res = i5k_find_amb_registers(data, chipset_ids[i]); 523 + res = i5k_find_amb_registers(data, chipset_ids[i].err); 524 + if (res == 0) 525 + break; 510 526 i++; 511 - } while (res && chipset_ids[i]); 527 + } while (chipset_ids[i].err); 512 528 513 529 if (res) 514 530 goto err; 515 531 516 532 /* Copy the DIMM presence map for the first two channels */ 517 - res = i5k_channel_probe(&data->amb_present[0], 518 - i5k_channel_pci_id(data, 0)); 533 + res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0); 519 534 if (res) 520 535 goto err; 521 536 522 537 /* Copy the DIMM presence map for the optional second two channels */ 523 - i5k_channel_probe(&data->amb_present[2], 524 - i5k_channel_pci_id(data, 1)); 538 + i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1); 525 539 526 540 /* Set up resource regions */ 527 541 reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME);
+1 -2
drivers/hwmon/ntc_thermistor.c
··· 211 211 if (data->comp[mid].ohm <= ohm) { 212 212 *i_low = mid; 213 213 *i_high = mid - 1; 214 - } 215 - if (data->comp[mid].ohm > ohm) { 214 + } else { 216 215 *i_low = mid + 1; 217 216 *i_high = mid; 218 217 }
+5 -4
drivers/i2c/busses/i2c-nomadik.c
··· 146 146 * @stop: stop condition 147 147 * @xfer_complete: acknowledge completion for a I2C message 148 148 * @result: controller propogated result 149 + * @regulator: pointer to i2c regulator 149 150 * @busy: Busy doing transfer 150 151 */ 151 152 struct nmk_i2c_dev { ··· 418 417 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, 419 418 dev->virtbase + I2C_IMSCR); 420 419 421 - timeout = wait_for_completion_interruptible_timeout( 420 + timeout = wait_for_completion_timeout( 422 421 &dev->xfer_complete, dev->adap.timeout); 423 422 424 423 if (timeout < 0) { 425 424 dev_err(&dev->pdev->dev, 426 - "wait_for_completion_interruptible_timeout" 425 + "wait_for_completion_timeout" 427 426 "returned %d waiting for event\n", timeout); 428 427 status = timeout; 429 428 } ··· 505 504 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, 506 505 dev->virtbase + I2C_IMSCR); 507 506 508 - timeout = wait_for_completion_interruptible_timeout( 507 + timeout = wait_for_completion_timeout( 509 508 &dev->xfer_complete, dev->adap.timeout); 510 509 511 510 if (timeout < 0) { 512 511 dev_err(&dev->pdev->dev, 513 - "wait_for_completion_interruptible_timeout" 512 + "wait_for_completion_timeout " 514 513 "returned %d waiting for event\n", timeout); 515 514 status = timeout; 516 515 }
-29
drivers/i2c/busses/i2c-omap.c
··· 1139 1139 return 0; 1140 1140 } 1141 1141 1142 - #ifdef CONFIG_SUSPEND 1143 - static int omap_i2c_suspend(struct device *dev) 1144 - { 1145 - if (!pm_runtime_suspended(dev)) 1146 - if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) 1147 - dev->bus->pm->runtime_suspend(dev); 1148 - 1149 - return 0; 1150 - } 1151 - 1152 - static int omap_i2c_resume(struct device *dev) 1153 - { 1154 - if (!pm_runtime_suspended(dev)) 1155 - if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) 1156 - dev->bus->pm->runtime_resume(dev); 1157 - 1158 - return 0; 1159 - } 1160 - 1161 - static struct dev_pm_ops omap_i2c_pm_ops = { 1162 - .suspend = omap_i2c_suspend, 1163 - .resume = omap_i2c_resume, 1164 - }; 1165 - #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) 1166 - #else 1167 - #define OMAP_I2C_PM_OPS NULL 1168 - #endif 1169 - 1170 1142 static struct platform_driver omap_i2c_driver = { 1171 1143 .probe = omap_i2c_probe, 1172 1144 .remove = omap_i2c_remove, 1173 1145 .driver = { 1174 1146 .name = "omap_i2c", 1175 1147 .owner = THIS_MODULE, 1176 - .pm = OMAP_I2C_PM_OPS, 1177 1148 }, 1178 1149 }; 1179 1150
+1
drivers/leds/leds-ams-delta.c
··· 8 8 * published by the Free Software Foundation. 9 9 */ 10 10 11 + #include <linux/module.h> 11 12 #include <linux/kernel.h> 12 13 #include <linux/init.h> 13 14 #include <linux/platform_device.h>
+5
drivers/leds/leds-bd2802.c
··· 662 662 static void bd2802_unregister_led_classdev(struct bd2802_led *led) 663 663 { 664 664 cancel_work_sync(&led->work); 665 + led_classdev_unregister(&led->cdev_led2b); 666 + led_classdev_unregister(&led->cdev_led2g); 667 + led_classdev_unregister(&led->cdev_led2r); 668 + led_classdev_unregister(&led->cdev_led1b); 669 + led_classdev_unregister(&led->cdev_led1g); 665 670 led_classdev_unregister(&led->cdev_led1r); 666 671 } 667 672
+1
drivers/leds/leds-hp6xx.c
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #include <linux/module.h> 13 14 #include <linux/kernel.h> 14 15 #include <linux/init.h> 15 16 #include <linux/platform_device.h>
+1
drivers/misc/Kconfig
··· 146 146 147 147 config INTEL_MID_PTI 148 148 tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" 149 + depends on PCI 149 150 default n 150 151 help 151 152 The PTI (Parallel Trace Interface) driver directs
+1 -1
drivers/misc/ab8500-pwm.c
··· 164 164 module_exit(ab8500_pwm_exit); 165 165 MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); 166 166 MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); 167 - MODULE_ALIAS("AB8500 PWM driver"); 167 + MODULE_ALIAS("platform:ab8500-pwm"); 168 168 MODULE_LICENSE("GPL v2");
+2 -2
drivers/misc/fsa9480.c
··· 455 455 456 456 fail2: 457 457 if (client->irq) 458 - free_irq(client->irq, NULL); 458 + free_irq(client->irq, usbsw); 459 459 fail1: 460 460 i2c_set_clientdata(client, NULL); 461 461 kfree(usbsw); ··· 466 466 { 467 467 struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); 468 468 if (client->irq) 469 - free_irq(client->irq, NULL); 469 + free_irq(client->irq, usbsw); 470 470 i2c_set_clientdata(client, NULL); 471 471 472 472 sysfs_remove_group(&client->dev.kobj, &fsa9480_group);
+2
drivers/misc/pti.c
··· 33 33 #include <linux/mutex.h> 34 34 #include <linux/miscdevice.h> 35 35 #include <linux/pti.h> 36 + #include <linux/slab.h> 37 + #include <linux/uaccess.h> 36 38 37 39 #define DRIVERNAME "pti" 38 40 #define PCINAME "pciPTI"
+9 -1
drivers/misc/ti-st/st_core.c
··· 338 338 /* Unknow packet? */ 339 339 default: 340 340 type = *ptr; 341 + if (st_gdata->list[type] == NULL) { 342 + pr_err("chip/interface misbehavior dropping" 343 + " frame starting with 0x%02x", type); 344 + goto done; 345 + 346 + } 341 347 st_gdata->rx_skb = alloc_skb( 342 348 st_gdata->list[type]->max_frame_size, 343 349 GFP_ATOMIC); ··· 360 354 ptr++; 361 355 count--; 362 356 } 357 + done: 363 358 spin_unlock_irqrestore(&st_gdata->lock, flags); 364 359 pr_debug("done %s", __func__); 365 360 return; ··· 724 717 */ 725 718 spin_lock_irqsave(&st_gdata->lock, flags); 726 719 for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { 727 - if (st_gdata->list[i] != NULL) 720 + if (st_gdata->is_registered[i] == true) 728 721 pr_err("%d not un-registered", i); 729 722 st_gdata->list[i] = NULL; 723 + st_gdata->is_registered[i] = false; 730 724 } 731 725 st_gdata->protos_registered = 0; 732 726 spin_unlock_irqrestore(&st_gdata->lock, flags);
+33
drivers/misc/ti-st/st_kim.c
··· 68 68 if (unlikely(skb->data[5] != 0)) { 69 69 pr_err("no proper response during fw download"); 70 70 pr_err("data6 %x", skb->data[5]); 71 + kfree_skb(skb); 71 72 return; /* keep waiting for the proper response */ 72 73 } 73 74 /* becos of all the script being downloaded */ ··· 211 210 pr_err(" waiting for ver info- timed out "); 212 211 return -ETIMEDOUT; 213 212 } 213 + INIT_COMPLETION(kim_gdata->kim_rcvd); 214 214 215 215 version = 216 216 MAKEWORD(kim_gdata->resp_buffer[13], ··· 300 298 301 299 switch (((struct bts_action *)ptr)->type) { 302 300 case ACTION_SEND_COMMAND: /* action send */ 301 + pr_debug("S"); 303 302 action_ptr = &(((struct bts_action *)ptr)->data[0]); 304 303 if (unlikely 305 304 (((struct hci_command *)action_ptr)->opcode == ··· 338 335 release_firmware(kim_gdata->fw_entry); 339 336 return -ETIMEDOUT; 340 337 } 338 + /* reinit completion before sending for the 339 + * relevant wait 340 + */ 341 + INIT_COMPLETION(kim_gdata->kim_rcvd); 341 342 342 343 /* 343 344 * Free space found in uart buffer, call st_int_write ··· 368 361 } 369 362 break; 370 363 case ACTION_WAIT_EVENT: /* wait */ 364 + pr_debug("W"); 371 365 if (!wait_for_completion_timeout 372 366 (&kim_gdata->kim_rcvd, 373 367 msecs_to_jiffies(CMD_RESP_TIME))) { ··· 442 434 { 443 435 long err = 0; 444 436 long retry = POR_RETRY_COUNT; 437 + struct ti_st_plat_data *pdata; 445 438 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; 446 439 447 440 pr_info(" %s", __func__); 441 + pdata = kim_gdata->kim_pdev->dev.platform_data; 448 442 449 443 do { 444 + /* platform specific enabling code here */ 445 + if (pdata->chip_enable) 446 + pdata->chip_enable(kim_gdata); 447 + 450 448 /* Configure BT nShutdown to HIGH state */ 451 449 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 452 450 mdelay(5); /* FIXME: a proper toggle */ ··· 474 460 pr_info("ldisc_install = 0"); 475 461 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, 476 462 NULL, "install"); 463 + /* the following wait is never going to be completed, 464 + * since the ldisc was never installed, hence serving 465 + * as a mdelay of LDISC_TIME msecs */ 466 + err = wait_for_completion_timeout 467 + (&kim_gdata->ldisc_installed, 468 + msecs_to_jiffies(LDISC_TIME)); 477 469 err = -ETIMEDOUT; 478 470 continue; 479 471 } else { ··· 492 472 pr_info("ldisc_install = 0"); 493 473 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, 494 474 NULL, "install"); 475 + /* this wait might be completed, though in the 476 + * tty_close() since the ldisc is already 477 + * installed */ 478 + err = wait_for_completion_timeout 479 + (&kim_gdata->ldisc_installed, 480 + msecs_to_jiffies(LDISC_TIME)); 481 + err = -EINVAL; 495 482 continue; 496 483 } else { /* on success don't retry */ 497 484 break; ··· 516 489 { 517 490 long err = 0; 518 491 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; 492 + struct ti_st_plat_data *pdata = 493 + kim_gdata->kim_pdev->dev.platform_data; 519 494 520 495 INIT_COMPLETION(kim_gdata->ldisc_installed); 521 496 ··· 544 515 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); 545 516 mdelay(1); 546 517 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 518 + 519 + /* platform specific disable */ 520 + if (pdata->chip_disable) 521 + pdata->chip_disable(kim_gdata); 547 522 return err; 548 523 } 549 524
+19
drivers/misc/ti-st/st_ll.c
··· 22 22 #define pr_fmt(fmt) "(stll) :" fmt 23 23 #include <linux/skbuff.h> 24 24 #include <linux/module.h> 25 + #include <linux/platform_device.h> 25 26 #include <linux/ti_wilink_st.h> 26 27 27 28 /**********************************************************************/ ··· 38 37 39 38 static void ll_device_want_to_sleep(struct st_data_s *st_data) 40 39 { 40 + struct kim_data_s *kim_data; 41 + struct ti_st_plat_data *pdata; 42 + 41 43 pr_debug("%s", __func__); 42 44 /* sanity check */ 43 45 if (st_data->ll_state != ST_LL_AWAKE) ··· 50 46 send_ll_cmd(st_data, LL_SLEEP_ACK); 51 47 /* update state */ 52 48 st_data->ll_state = ST_LL_ASLEEP; 49 + 50 + /* communicate to platform about chip asleep */ 51 + kim_data = st_data->kim_data; 52 + pdata = kim_data->kim_pdev->dev.platform_data; 53 + if (pdata->chip_asleep) 54 + pdata->chip_asleep(NULL); 53 55 } 54 56 55 57 static void ll_device_want_to_wakeup(struct st_data_s *st_data) 56 58 { 59 + struct kim_data_s *kim_data; 60 + struct ti_st_plat_data *pdata; 61 + 57 62 /* diff actions in diff states */ 58 63 switch (st_data->ll_state) { 59 64 case ST_LL_ASLEEP: ··· 83 70 } 84 71 /* update state */ 85 72 st_data->ll_state = ST_LL_AWAKE; 73 + 74 + /* communicate to platform about chip wakeup */ 75 + kim_data = st_data->kim_data; 76 + pdata = kim_data->kim_pdev->dev.platform_data; 77 + if (pdata->chip_asleep) 78 + pdata->chip_awake(NULL); 86 79 } 87 80 88 81 /**********************************************************************/
+6
drivers/net/e1000/e1000_hw.c
··· 4026 4026 checksum += eeprom_data; 4027 4027 } 4028 4028 4029 + #ifdef CONFIG_PARISC 4030 + /* This is a signature and not a checksum on HP c8000 */ 4031 + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) 4032 + return E1000_SUCCESS; 4033 + 4034 + #endif 4029 4035 if (checksum == (u16) EEPROM_SUM) 4030 4036 return E1000_SUCCESS; 4031 4037 else {
+8 -15
drivers/net/rionet.c
··· 80 80 */ 81 81 static struct rio_dev **rionet_active; 82 82 83 - #define is_rionet_capable(pef, src_ops, dst_ops) \ 84 - ((pef & RIO_PEF_INB_MBOX) && \ 85 - (pef & RIO_PEF_INB_DOORBELL) && \ 83 + #define is_rionet_capable(src_ops, dst_ops) \ 84 + ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 85 + (dst_ops & RIO_DST_OPS_DATA_MSG) && \ 86 86 (src_ops & RIO_SRC_OPS_DOORBELL) && \ 87 87 (dst_ops & RIO_DST_OPS_DOORBELL)) 88 88 #define dev_rionet_capable(dev) \ 89 - is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) 89 + is_rionet_capable(dev->src_ops, dev->dst_ops) 90 90 91 91 #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) 92 92 #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) ··· 282 282 { 283 283 int i, rc = 0; 284 284 struct rionet_peer *peer, *tmp; 285 - u32 pwdcsr; 286 285 struct rionet_private *rnet = netdev_priv(ndev); 287 286 288 287 if (netif_msg_ifup(rnet)) ··· 331 332 continue; 332 333 } 333 334 334 - /* 335 - * If device has initialized inbound doorbells, 336 - * send a join message 337 - */ 338 - rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr); 339 - if (pwdcsr & RIO_DOORBELL_AVAIL) 340 - rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); 335 + /* Send a join message */ 336 + rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); 341 337 } 342 338 343 339 out: ··· 486 492 static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) 487 493 { 488 494 int rc = -ENODEV; 489 - u32 lpef, lsrc_ops, ldst_ops; 495 + u32 lsrc_ops, ldst_ops; 490 496 struct rionet_peer *peer; 491 497 struct net_device *ndev = NULL; 492 498 ··· 509 515 * on later probes 510 516 */ 511 517 if (!rionet_check) { 512 - rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef); 513 518 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 514 519 &lsrc_ops); 515 520 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, 516 521 &ldst_ops); 517 - if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { 522 + if (!is_rionet_capable(lsrc_ops, ldst_ops)) { 518 523 printk(KERN_ERR 519 524 "%s: local device is not network capable\n", 520 525 DRV_NAME);
+1 -2
drivers/rapidio/rio-scan.c
··· 505 505 rdev->dev.dma_mask = &rdev->dma_mask; 506 506 rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 507 507 508 - if ((rdev->pef & RIO_PEF_INB_DOORBELL) && 509 - (rdev->dst_ops & RIO_DST_OPS_DOORBELL)) 508 + if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) 510 509 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 511 510 0, 0xffff); 512 511
+31 -48
drivers/rtc/rtc-s3c.c
··· 152 152 goto retry_get_time; 153 153 } 154 154 155 - pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", 156 - 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, 157 - rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); 158 - 159 155 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); 160 156 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); 161 157 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); ··· 160 164 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); 161 165 162 166 rtc_tm->tm_year += 100; 167 + 168 + pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", 169 + 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, 170 + rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); 171 + 163 172 rtc_tm->tm_mon -= 1; 164 173 165 174 clk_disable(rtc_clk); ··· 270 269 clk_enable(rtc_clk); 271 270 pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", 272 271 alrm->enabled, 273 - 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 272 + 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 274 273 tm->tm_hour, tm->tm_min, tm->tm_sec); 275 - 276 274 277 275 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; 278 276 writeb(0x00, base + S3C2410_RTCALM); ··· 319 319 return 0; 320 320 } 321 321 322 - static int s3c_rtc_open(struct device *dev) 323 - { 324 - struct platform_device *pdev = to_platform_device(dev); 325 - struct rtc_device *rtc_dev = platform_get_drvdata(pdev); 326 - int ret; 327 - 328 - ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, 329 - IRQF_DISABLED, "s3c2410-rtc alarm", rtc_dev); 330 - 331 - if (ret) { 332 - dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); 333 - return ret; 334 - } 335 - 336 - ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, 337 - IRQF_DISABLED, "s3c2410-rtc tick", rtc_dev); 338 - 339 - if (ret) { 340 - dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); 341 - goto tick_err; 342 - } 343 - 344 - return ret; 345 - 346 - tick_err: 347 - free_irq(s3c_rtc_alarmno, rtc_dev); 348 - return ret; 349 - } 350 - 351 - static void s3c_rtc_release(struct device *dev) 352 - { 353 - struct platform_device *pdev = to_platform_device(dev); 354 - struct rtc_device *rtc_dev = platform_get_drvdata(pdev); 355 - 356 - /* do not clear AIE here, it may be needed for wake */ 357 - 358 - free_irq(s3c_rtc_alarmno, rtc_dev); 359 - free_irq(s3c_rtc_tickno, rtc_dev); 360 - } 361 - 362 322 static const struct rtc_class_ops s3c_rtcops = { 363 - .open = s3c_rtc_open, 364 - .release = s3c_rtc_release, 365 323 .read_time = s3c_rtc_gettime, 366 324 .set_time = s3c_rtc_settime, 367 325 .read_alarm = s3c_rtc_getalarm, ··· 382 424 static int __devexit s3c_rtc_remove(struct platform_device *dev) 383 425 { 384 426 struct rtc_device *rtc = platform_get_drvdata(dev); 427 + 428 + free_irq(s3c_rtc_alarmno, rtc); 429 + free_irq(s3c_rtc_tickno, rtc); 385 430 386 431 platform_set_drvdata(dev, NULL); 387 432 rtc_device_unregister(rtc); ··· 509 548 510 549 s3c_rtc_setfreq(&pdev->dev, 1); 511 550 551 + ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, 552 + IRQF_DISABLED, "s3c2410-rtc alarm", rtc); 553 + if (ret) { 554 + dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); 555 + goto err_alarm_irq; 556 + } 557 + 558 + ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, 559 + IRQF_DISABLED, "s3c2410-rtc tick", rtc); 560 + if (ret) { 561 + dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); 562 + free_irq(s3c_rtc_alarmno, rtc); 563 + goto err_tick_irq; 564 + } 565 + 512 566 clk_disable(rtc_clk); 513 567 514 568 return 0; 569 + 570 + err_tick_irq: 571 + free_irq(s3c_rtc_alarmno, rtc); 572 + 573 + err_alarm_irq: 574 + platform_set_drvdata(pdev, NULL); 575 + rtc_device_unregister(rtc); 515 576 516 577 err_nortc: 517 578 s3c_rtc_enable(pdev, 0);
+7 -3
drivers/s390/block/dasd_ioctl.c
··· 249 249 static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) 250 250 { 251 251 struct dasd_profile_info_t *data; 252 + int rc = 0; 252 253 253 254 data = kmalloc(sizeof(*data), GFP_KERNEL); 254 255 if (!data) ··· 280 279 spin_unlock_bh(&block->profile.lock); 281 280 } else { 282 281 spin_unlock_bh(&block->profile.lock); 283 - return -EIO; 282 + rc = -EIO; 283 + goto out; 284 284 } 285 285 if (copy_to_user(argp, data, sizeof(*data))) 286 - return -EFAULT; 287 - return 0; 286 + rc = -EFAULT; 287 + out: 288 + kfree(data); 289 + return rc; 288 290 } 289 291 #else 290 292 static int dasd_ioctl_reset_profile(struct dasd_block *block)
+4 -2
drivers/s390/char/sclp_cmd.c
··· 383 383 switch (sccb->header.response_code) { 384 384 case 0x0020: 385 385 set_bit(id, sclp_storage_ids); 386 - for (i = 0; i < sccb->assigned; i++) 387 - sclp_unassign_storage(sccb->entries[i] >> 16); 386 + for (i = 0; i < sccb->assigned; i++) { 387 + if (sccb->entries[i]) 388 + sclp_unassign_storage(sccb->entries[i] >> 16); 389 + } 388 390 break; 389 391 default: 390 392 rc = -EIO;
+3
drivers/sh/intc/chip.c
··· 186 186 !defined(CONFIG_CPU_SUBTYPE_SH7709) 187 187 [IRQ_TYPE_LEVEL_HIGH] = VALID(3), 188 188 #endif 189 + #if defined(CONFIG_ARCH_SH7372) 190 + [IRQ_TYPE_EDGE_BOTH] = VALID(4), 191 + #endif 189 192 }; 190 193 191 194 static int intc_set_type(struct irq_data *data, unsigned int type)
+1
drivers/staging/brcm80211/brcmsmac/otp.c
··· 16 16 17 17 #include <linux/io.h> 18 18 #include <linux/errno.h> 19 + #include <linux/string.h> 19 20 20 21 #include <brcm_hw_ids.h> 21 22 #include <chipcommon.h>
+1
drivers/staging/brcm80211/brcmsmac/types.h
··· 18 18 #define _BRCM_TYPES_H_ 19 19 20 20 #include <linux/types.h> 21 + #include <linux/io.h> 21 22 22 23 /* Bus types */ 23 24 #define SI_BUS 0 /* SOC Interconnect */
+1
drivers/staging/octeon/ethernet-rgmii.c
··· 26 26 **********************************************************************/ 27 27 #include <linux/kernel.h> 28 28 #include <linux/netdevice.h> 29 + #include <linux/interrupt.h> 29 30 #include <linux/phy.h> 30 31 #include <linux/ratelimit.h> 31 32 #include <net/dst.h>
+1
drivers/staging/octeon/ethernet-spi.c
··· 26 26 **********************************************************************/ 27 27 #include <linux/kernel.h> 28 28 #include <linux/netdevice.h> 29 + #include <linux/interrupt.h> 29 30 #include <net/dst.h> 30 31 31 32 #include <asm/octeon/octeon.h>
-1
drivers/staging/tidspbridge/core/dsp-clock.c
··· 209 209 break; 210 210 #ifdef CONFIG_OMAP_MCBSP 211 211 case MCBSP_CLK: 212 - omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO); 213 212 omap_mcbsp_request(MCBSP_ID(clk_id)); 214 213 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); 215 214 break;
+1 -1
drivers/staging/zcache/tmem.c
··· 604 604 struct tmem_obj *obj; 605 605 void *pampd; 606 606 bool ephemeral = is_ephemeral(pool); 607 - uint32_t ret = -1; 607 + int ret = -1; 608 608 struct tmem_hashbucket *hb; 609 609 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral); 610 610 bool lock_held = false;
+4 -4
drivers/staging/zcache/zcache-main.c
··· 1158 1158 size_t clen; 1159 1159 int ret; 1160 1160 unsigned long count; 1161 - struct page *page = virt_to_page(data); 1161 + struct page *page = (struct page *)(data); 1162 1162 struct zcache_client *cli = pool->client; 1163 1163 uint16_t client_id = get_client_id_from_client(cli); 1164 1164 unsigned long zv_mean_zsize; ··· 1227 1227 int ret = 0; 1228 1228 1229 1229 BUG_ON(is_ephemeral(pool)); 1230 - zv_decompress(virt_to_page(data), pampd); 1230 + zv_decompress((struct page *)(data), pampd); 1231 1231 return ret; 1232 1232 } 1233 1233 ··· 1539 1539 goto out; 1540 1540 if (!zcache_freeze && zcache_do_preload(pool) == 0) { 1541 1541 /* preload does preempt_disable on success */ 1542 - ret = tmem_put(pool, oidp, index, page_address(page), 1542 + ret = tmem_put(pool, oidp, index, (char *)(page), 1543 1543 PAGE_SIZE, 0, is_ephemeral(pool)); 1544 1544 if (ret < 0) { 1545 1545 if (is_ephemeral(pool)) ··· 1572 1572 pool = zcache_get_pool_by_id(cli_id, pool_id); 1573 1573 if (likely(pool != NULL)) { 1574 1574 if (atomic_read(&pool->obj_count) > 0) 1575 - ret = tmem_get(pool, oidp, index, page_address(page), 1575 + ret = tmem_get(pool, oidp, index, (char *)(page), 1576 1576 &size, 0, is_ephemeral(pool)); 1577 1577 zcache_put_pool(pool); 1578 1578 }
-1
drivers/target/iscsi/iscsi_target.c
··· 2243 2243 case 0: 2244 2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2245 2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2246 - return 0; 2247 2246 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2248 2247 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2249 2248 hdr->begrun, hdr->runlength);
+2 -2
drivers/target/iscsi/iscsi_target_configfs.c
··· 268 268 ISCSI_TCP); 269 269 if (IS_ERR(tpg_np)) { 270 270 iscsit_put_tpg(tpg); 271 - return ERR_PTR(PTR_ERR(tpg_np)); 271 + return ERR_CAST(tpg_np); 272 272 } 273 273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); 274 274 ··· 1285 1285 1286 1286 tiqn = iscsit_add_tiqn((unsigned char *)name); 1287 1287 if (IS_ERR(tiqn)) 1288 - return ERR_PTR(PTR_ERR(tiqn)); 1288 + return ERR_CAST(tiqn); 1289 1289 /* 1290 1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. 1291 1291 */
+1 -1
drivers/target/iscsi/iscsi_target_erl1.c
··· 834 834 */ 835 835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 836 836 ooo_list) { 837 - while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 837 + if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 838 838 continue; 839 839 840 840 list_add(&ooo_cmdsn->ooo_list,
+3 -13
drivers/target/iscsi/iscsi_target_login.c
··· 1013 1013 ISCSI_LOGIN_STATUS_TARGET_ERROR); 1014 1014 goto new_sess_out; 1015 1015 } 1016 - #if 0 1017 - if (!iscsi_ntop6((const unsigned char *) 1018 - &sock_in6.sin6_addr.in6_u, 1019 - (char *)&conn->ipv6_login_ip[0], 1020 - IPV6_ADDRESS_SPACE)) { 1021 - pr_err("iscsi_ntop6() failed\n"); 1022 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1023 - ISCSI_LOGIN_STATUS_TARGET_ERROR); 1024 - goto new_sess_out; 1025 - } 1026 - #else 1027 - pr_debug("Skipping iscsi_ntop6()\n"); 1028 - #endif 1016 + snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 1017 + &sock_in6.sin6_addr.in6_u); 1018 + conn->login_port = ntohs(sock_in6.sin6_port); 1029 1019 } else { 1030 1020 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1031 1021
+16 -27
drivers/target/iscsi/iscsi_target_parameters.c
··· 545 545 struct iscsi_param_list *src_param_list, 546 546 int leading) 547 547 { 548 - struct iscsi_param *new_param = NULL, *param = NULL; 548 + struct iscsi_param *param = NULL; 549 + struct iscsi_param *new_param = NULL; 549 550 struct iscsi_param_list *param_list = NULL; 550 551 551 552 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 552 553 if (!param_list) { 553 - pr_err("Unable to allocate memory for" 554 - " struct iscsi_param_list.\n"); 554 + pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); 555 555 goto err_out; 556 556 } 557 557 INIT_LIST_HEAD(&param_list->param_list); ··· 567 567 568 568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); 569 569 if (!new_param) { 570 - pr_err("Unable to allocate memory for" 571 - " struct iscsi_param.\n"); 570 + pr_err("Unable to allocate memory for struct iscsi_param.\n"); 571 + goto err_out; 572 + } 573 + 574 + new_param->name = kstrdup(param->name, GFP_KERNEL); 575 + new_param->value = kstrdup(param->value, GFP_KERNEL); 576 + if (!new_param->value || !new_param->name) { 577 + kfree(new_param->value); 578 + kfree(new_param->name); 579 + kfree(new_param); 580 + pr_err("Unable to allocate memory for parameter name/value.\n"); 572 581 goto err_out; 573 582 } 574 583 ··· 589 580 new_param->use = param->use; 590 581 new_param->type_range = param->type_range; 591 582 592 - new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL); 593 - if (!new_param->name) { 594 - pr_err("Unable to allocate memory for" 595 - " parameter name.\n"); 596 - goto err_out; 597 - } 598 - 599 - new_param->value = kzalloc(strlen(param->value) + 1, 600 - GFP_KERNEL); 601 - if (!new_param->value) { 602 - pr_err("Unable to allocate memory for" 603 - " parameter value.\n"); 604 - goto err_out; 605 - } 606 - 607 - memcpy(new_param->name, param->name, strlen(param->name)); 608 - new_param->name[strlen(param->name)] = '\0'; 609 - memcpy(new_param->value, param->value, strlen(param->value)); 610 - new_param->value[strlen(param->value)] = '\0'; 611 - 612 583 list_add_tail(&new_param->p_list, &param_list->param_list); 613 584 } 614 585 615 - if (!list_empty(&param_list->param_list)) 586 + if (!list_empty(&param_list->param_list)) { 616 587 *dst_param_list = param_list; 617 - else { 588 + } else { 618 589 pr_err("No parameters allocated.\n"); 619 590 goto err_out; 620 591 }
+1 -3
drivers/target/iscsi/iscsi_target_util.c
··· 243 243 if (!cmd->tmr_req) { 244 244 pr_err("Unable to allocate memory for" 245 245 " Task Management command!\n"); 246 - return NULL; 246 + goto out; 247 247 } 248 248 /* 249 249 * TASK_REASSIGN for ERL=2 / connection stays inside of ··· 298 298 return cmd; 299 299 out: 300 300 iscsit_release_cmd(cmd); 301 - if (se_cmd) 302 - transport_free_se_cmd(se_cmd); 303 301 return NULL; 304 302 } 305 303
+34 -23
drivers/target/target_core_cdb.c
··· 67 67 { 68 68 struct se_lun *lun = cmd->se_lun; 69 69 struct se_device *dev = cmd->se_dev; 70 + struct se_portal_group *tpg = lun->lun_sep->sep_tpg; 70 71 unsigned char *buf; 71 72 72 73 /* ··· 82 81 83 82 buf = transport_kmap_first_data_page(cmd); 84 83 85 - buf[0] = dev->transport->get_device_type(dev); 86 - if (buf[0] == TYPE_TAPE) 87 - buf[1] = 0x80; 84 + if (dev == tpg->tpg_virt_lun0.lun_se_dev) { 85 + buf[0] = 0x3f; /* Not connected */ 86 + } else { 87 + buf[0] = dev->transport->get_device_type(dev); 88 + if (buf[0] == TYPE_TAPE) 89 + buf[1] = 0x80; 90 + } 88 91 buf[2] = dev->transport->get_device_rev(dev); 89 92 90 93 /* ··· 920 915 length += target_modesense_control(dev, &buf[offset+length]); 921 916 break; 922 917 default: 923 - pr_err("Got Unknown Mode Page: 0x%02x\n", 924 - cdb[2] & 0x3f); 918 + pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 919 + cdb[2] & 0x3f, cdb[3]); 925 920 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 926 921 } 927 922 offset += length; ··· 1077 1072 size -= 16; 1078 1073 } 1079 1074 1080 - task->task_scsi_status = GOOD; 1081 - transport_complete_task(task, 1); 1082 1075 err: 1083 1076 transport_kunmap_first_data_page(cmd); 1084 1077 ··· 1088 1085 * Note this is not used for TCM/pSCSI passthrough 1089 1086 */ 1090 1087 static int 1091 - target_emulate_write_same(struct se_task *task, int write_same32) 1088 + target_emulate_write_same(struct se_task *task, u32 num_blocks) 1092 1089 { 1093 1090 struct se_cmd *cmd = task->task_se_cmd; 1094 1091 struct se_device *dev = cmd->se_dev; 1095 1092 sector_t range; 1096 1093 sector_t lba = cmd->t_task_lba; 1097 - unsigned int num_blocks; 1098 1094 int ret; 1099 1095 /* 1100 - * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict 1101 - * range when non zero is supplied, otherwise calculate the remaining 1102 - * range based on ->get_blocks() - starting LBA. 1096 + * Use the explicit range when non zero is supplied, otherwise calculate 1097 + * the remaining range based on ->get_blocks() - starting LBA. 1103 1098 */ 1104 - if (write_same32) 1105 - num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 1106 - else 1107 - num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 1108 - 1109 1099 if (num_blocks != 0) 1110 1100 range = num_blocks; 1111 1101 else ··· 1113 1117 return ret; 1114 1118 } 1115 1119 1116 - task->task_scsi_status = GOOD; 1117 - transport_complete_task(task, 1); 1118 1120 return 0; 1119 1121 } 1120 1122 ··· 1159 1165 } 1160 1166 ret = target_emulate_unmap(task); 1161 1167 break; 1168 + case WRITE_SAME: 1169 + if (!dev->transport->do_discard) { 1170 + pr_err("WRITE_SAME emulation not supported" 1171 + " for: %s\n", dev->transport->name); 1172 + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1173 + } 1174 + ret = target_emulate_write_same(task, 1175 + get_unaligned_be16(&cmd->t_task_cdb[7])); 1176 + break; 1162 1177 case WRITE_SAME_16: 1163 1178 if (!dev->transport->do_discard) { 1164 1179 pr_err("WRITE_SAME_16 emulation not supported" 1165 1180 " for: %s\n", dev->transport->name); 1166 1181 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1167 1182 } 1168 - ret = target_emulate_write_same(task, 0); 1183 + ret = target_emulate_write_same(task, 1184 + get_unaligned_be32(&cmd->t_task_cdb[10])); 1169 1185 break; 1170 1186 case VARIABLE_LENGTH_CMD: 1171 1187 service_action = ··· 1188 1184 dev->transport->name); 1189 1185 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1190 1186 } 1191 - ret = target_emulate_write_same(task, 1); 1187 + ret = target_emulate_write_same(task, 1188 + get_unaligned_be32(&cmd->t_task_cdb[28])); 1192 1189 break; 1193 1190 default: 1194 1191 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" ··· 1224 1219 1225 1220 if (ret < 0) 1226 1221 return ret; 1227 - task->task_scsi_status = GOOD; 1228 - transport_complete_task(task, 1); 1222 + /* 1223 + * Handle the successful completion here unless a caller 1224 + * has explictly requested an asychronous completion. 1225 + */ 1226 + if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { 1227 + task->task_scsi_status = GOOD; 1228 + transport_complete_task(task, 1); 1229 + } 1229 1230 1230 1231 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1231 1232 }
+39 -9
drivers/target/target_core_device.c
··· 472 472 struct se_dev_entry *deve; 473 473 u32 i; 474 474 475 - spin_lock_bh(&tpg->acl_node_lock); 475 + spin_lock_irq(&tpg->acl_node_lock); 476 476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 477 - spin_unlock_bh(&tpg->acl_node_lock); 477 + spin_unlock_irq(&tpg->acl_node_lock); 478 478 479 479 spin_lock_irq(&nacl->device_list_lock); 480 480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { ··· 491 491 } 492 492 spin_unlock_irq(&nacl->device_list_lock); 493 493 494 - spin_lock_bh(&tpg->acl_node_lock); 494 + spin_lock_irq(&tpg->acl_node_lock); 495 495 } 496 - spin_unlock_bh(&tpg->acl_node_lock); 496 + spin_unlock_irq(&tpg->acl_node_lock); 497 497 } 498 498 499 499 static struct se_port *core_alloc_port(struct se_device *dev) ··· 839 839 return ret; 840 840 } 841 841 842 + u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 843 + { 844 + u32 tmp, aligned_max_sectors; 845 + /* 846 + * Limit max_sectors to a PAGE_SIZE aligned value for modern 847 + * transport_allocate_data_tasks() operation. 848 + */ 849 + tmp = rounddown((max_sectors * block_size), PAGE_SIZE); 850 + aligned_max_sectors = (tmp / block_size); 851 + if (max_sectors != aligned_max_sectors) { 852 + printk(KERN_INFO "Rounding down aligned max_sectors from %u" 853 + " to %u\n", max_sectors, aligned_max_sectors); 854 + return aligned_max_sectors; 855 + } 856 + 857 + return max_sectors; 858 + } 859 + 842 860 void se_dev_set_default_attribs( 843 861 struct se_device *dev, 844 862 struct se_dev_limits *dev_limits) ··· 896 878 * max_sectors is based on subsystem plugin dependent requirements. 897 879 */ 898 880 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 881 + /* 882 + * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 883 + */ 884 + limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, 885 + limits->logical_block_size); 899 886 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 900 887 /* 901 888 * Set optimal_sectors from max_sectors, which can be lowered via ··· 1265 1242 return -EINVAL; 1266 1243 } 1267 1244 } 1245 + /* 1246 + * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1247 + */ 1248 + max_sectors = se_dev_align_max_sectors(max_sectors, 1249 + dev->se_sub_dev->se_dev_attrib.block_size); 1268 1250 1269 1251 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1270 1252 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", ··· 1372 1344 */ 1373 1345 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1374 1346 struct se_node_acl *acl; 1375 - spin_lock_bh(&tpg->acl_node_lock); 1347 + spin_lock_irq(&tpg->acl_node_lock); 1376 1348 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1377 - if (acl->dynamic_node_acl) { 1378 - spin_unlock_bh(&tpg->acl_node_lock); 1349 + if (acl->dynamic_node_acl && 1350 + (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 1351 + !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 1352 + spin_unlock_irq(&tpg->acl_node_lock); 1379 1353 core_tpg_add_node_to_devs(acl, tpg); 1380 - spin_lock_bh(&tpg->acl_node_lock); 1354 + spin_lock_irq(&tpg->acl_node_lock); 1381 1355 } 1382 1356 } 1383 - spin_unlock_bh(&tpg->acl_node_lock); 1357 + spin_unlock_irq(&tpg->acl_node_lock); 1384 1358 } 1385 1359 1386 1360 return lun_p;
+1 -1
drivers/target/target_core_fabric_configfs.c
··· 481 481 482 482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); 483 483 if (IS_ERR(se_nacl)) 484 - return ERR_PTR(PTR_ERR(se_nacl)); 484 + return ERR_CAST(se_nacl); 485 485 486 486 nacl_cg = &se_nacl->acl_group; 487 487 nacl_cg->default_groups = se_nacl->acl_default_groups;
+4 -4
drivers/target/target_core_pr.c
··· 1598 1598 * from the decoded fabric module specific TransportID 1599 1599 * at *i_str. 1600 1600 */ 1601 - spin_lock_bh(&tmp_tpg->acl_node_lock); 1601 + spin_lock_irq(&tmp_tpg->acl_node_lock); 1602 1602 dest_node_acl = __core_tpg_get_initiator_node_acl( 1603 1603 tmp_tpg, i_str); 1604 1604 if (dest_node_acl) { 1605 1605 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1606 1606 smp_mb__after_atomic_inc(); 1607 1607 } 1608 - spin_unlock_bh(&tmp_tpg->acl_node_lock); 1608 + spin_unlock_irq(&tmp_tpg->acl_node_lock); 1609 1609 1610 1610 if (!dest_node_acl) { 1611 1611 core_scsi3_tpg_undepend_item(tmp_tpg); ··· 3496 3496 /* 3497 3497 * Locate the destination struct se_node_acl from the received Transport ID 3498 3498 */ 3499 - spin_lock_bh(&dest_se_tpg->acl_node_lock); 3499 + spin_lock_irq(&dest_se_tpg->acl_node_lock); 3500 3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3501 3501 initiator_str); 3502 3502 if (dest_node_acl) { 3503 3503 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3504 3504 smp_mb__after_atomic_inc(); 3505 3505 } 3506 - spin_unlock_bh(&dest_se_tpg->acl_node_lock); 3506 + spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3507 3507 3508 3508 if (!dest_node_acl) { 3509 3509 pr_err("Unable to locate %s dest_node_acl for"
+8 -16
drivers/target/target_core_rd.c
··· 390 390 length = req->rd_size; 391 391 392 392 dst = sg_virt(&sg_d[i++]) + dst_offset; 393 - if (!dst) 394 - BUG(); 393 + BUG_ON(!dst); 395 394 396 395 src = sg_virt(&sg_s[j]) + src_offset; 397 - if (!src) 398 - BUG(); 396 + BUG_ON(!src); 399 397 400 398 dst_offset = 0; 401 399 src_offset = length; ··· 413 415 length = req->rd_size; 414 416 415 417 dst = sg_virt(&sg_d[i]) + dst_offset; 416 - if (!dst) 417 - BUG(); 418 + BUG_ON(!dst); 418 419 419 420 if (sg_d[i].length == length) { 420 421 i++; ··· 422 425 dst_offset = length; 423 426 424 427 src = sg_virt(&sg_s[j++]) + src_offset; 425 - if (!src) 426 - BUG(); 428 + BUG_ON(!src); 427 429 428 430 src_offset = 0; 429 431 page_end = 1; ··· 506 510 length = req->rd_size; 507 511 508 512 src = sg_virt(&sg_s[i++]) + src_offset; 509 - if (!src) 510 - BUG(); 513 + BUG_ON(!src); 511 514 512 515 dst = sg_virt(&sg_d[j]) + dst_offset; 513 - if (!dst) 514 - BUG(); 516 + BUG_ON(!dst); 515 517 516 518 src_offset = 0; 517 519 dst_offset = length; ··· 529 535 length = req->rd_size; 530 536 531 537 src = sg_virt(&sg_s[i]) + src_offset; 532 - if (!src) 533 - BUG(); 538 + BUG_ON(!src); 534 539 535 540 if (sg_s[i].length == length) { 536 541 i++; ··· 538 545 src_offset = length; 539 546 540 547 dst = sg_virt(&sg_d[j++]) + dst_offset; 541 - if (!dst) 542 - BUG(); 548 + BUG_ON(!dst); 543 549 544 550 dst_offset = 0; 545 551 page_end = 1;
+36 -28
drivers/target/target_core_tpg.c
··· 137 137 { 138 138 struct se_node_acl *acl; 139 139 140 - spin_lock_bh(&tpg->acl_node_lock); 140 + spin_lock_irq(&tpg->acl_node_lock); 141 141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 142 142 if (!strcmp(acl->initiatorname, initiatorname) && 143 143 !acl->dynamic_node_acl) { 144 - spin_unlock_bh(&tpg->acl_node_lock); 144 + spin_unlock_irq(&tpg->acl_node_lock); 145 145 return acl; 146 146 } 147 147 } 148 - spin_unlock_bh(&tpg->acl_node_lock); 148 + spin_unlock_irq(&tpg->acl_node_lock); 149 149 150 150 return NULL; 151 151 } ··· 298 298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 299 299 return NULL; 300 300 } 301 + /* 302 + * Here we only create demo-mode MappedLUNs from the active 303 + * TPG LUNs if the fabric is not explictly asking for 304 + * tpg_check_demo_mode_login_only() == 1. 305 + */ 306 + if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && 307 + (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) 308 + do { ; } while (0); 309 + else 310 + core_tpg_add_node_to_devs(acl, tpg); 301 311 302 - core_tpg_add_node_to_devs(acl, tpg); 303 - 304 - spin_lock_bh(&tpg->acl_node_lock); 312 + spin_lock_irq(&tpg->acl_node_lock); 305 313 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 306 314 tpg->num_node_acls++; 307 - spin_unlock_bh(&tpg->acl_node_lock); 315 + spin_unlock_irq(&tpg->acl_node_lock); 308 316 309 317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 310 318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), ··· 362 354 { 363 355 struct se_node_acl *acl = NULL; 364 356 365 - spin_lock_bh(&tpg->acl_node_lock); 357 + spin_lock_irq(&tpg->acl_node_lock); 366 358 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 367 359 if (acl) { 368 360 if (acl->dynamic_node_acl) { ··· 370 362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 371 363 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 372 364 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 373 - spin_unlock_bh(&tpg->acl_node_lock); 365 + spin_unlock_irq(&tpg->acl_node_lock); 374 366 /* 375 367 * Release the locally allocated struct se_node_acl 376 368 * because * core_tpg_add_initiator_node_acl() returned ··· 386 378 " Node %s already exists for TPG %u, ignoring" 387 379 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 388 380 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 389 - spin_unlock_bh(&tpg->acl_node_lock); 381 + spin_unlock_irq(&tpg->acl_node_lock); 390 382 return ERR_PTR(-EEXIST); 391 383 } 392 - spin_unlock_bh(&tpg->acl_node_lock); 384 + spin_unlock_irq(&tpg->acl_node_lock); 393 385 394 386 if (!se_nacl) { 395 387 pr_err("struct se_node_acl pointer is NULL\n"); ··· 426 418 return ERR_PTR(-EINVAL); 427 419 } 428 420 429 - spin_lock_bh(&tpg->acl_node_lock); 421 + spin_lock_irq(&tpg->acl_node_lock); 430 422 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 431 423 tpg->num_node_acls++; 432 - spin_unlock_bh(&tpg->acl_node_lock); 424 + spin_unlock_irq(&tpg->acl_node_lock); 433 425 434 426 done: 435 427 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" ··· 453 445 struct se_session *sess, *sess_tmp; 454 446 int dynamic_acl = 0; 455 447 456 - spin_lock_bh(&tpg->acl_node_lock); 448 + spin_lock_irq(&tpg->acl_node_lock); 457 449 if (acl->dynamic_node_acl) { 458 450 acl->dynamic_node_acl = 0; 459 451 dynamic_acl = 1; 460 452 } 461 453 list_del(&acl->acl_list); 462 454 tpg->num_node_acls--; 463 - spin_unlock_bh(&tpg->acl_node_lock); 455 + spin_unlock_irq(&tpg->acl_node_lock); 464 456 465 457 spin_lock_bh(&tpg->session_lock); 466 458 list_for_each_entry_safe(sess, sess_tmp, ··· 511 503 struct se_node_acl *acl; 512 504 int dynamic_acl = 0; 513 505 514 - spin_lock_bh(&tpg->acl_node_lock); 506 + spin_lock_irq(&tpg->acl_node_lock); 515 507 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 516 508 if (!acl) { 517 509 pr_err("Access Control List entry for %s Initiator" 518 510 " Node %s does not exists for TPG %hu, ignoring" 519 511 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 520 512 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 521 - spin_unlock_bh(&tpg->acl_node_lock); 513 + spin_unlock_irq(&tpg->acl_node_lock); 522 514 return -ENODEV; 523 515 } 524 516 if (acl->dynamic_node_acl) { 525 517 acl->dynamic_node_acl = 0; 526 518 dynamic_acl = 1; 527 519 } 528 - spin_unlock_bh(&tpg->acl_node_lock); 520 + spin_unlock_irq(&tpg->acl_node_lock); 529 521 530 522 spin_lock_bh(&tpg->session_lock); 531 523 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { ··· 541 533 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 542 534 spin_unlock_bh(&tpg->session_lock); 543 535 544 - spin_lock_bh(&tpg->acl_node_lock); 536 + spin_lock_irq(&tpg->acl_node_lock); 545 537 if (dynamic_acl) 546 538 acl->dynamic_node_acl = 1; 547 - spin_unlock_bh(&tpg->acl_node_lock); 539 + spin_unlock_irq(&tpg->acl_node_lock); 548 540 return -EEXIST; 549 541 } 550 542 /* ··· 579 571 if (init_sess) 580 572 tpg->se_tpg_tfo->close_session(init_sess); 581 573 582 - spin_lock_bh(&tpg->acl_node_lock); 574 + spin_lock_irq(&tpg->acl_node_lock); 583 575 if (dynamic_acl) 584 576 acl->dynamic_node_acl = 1; 585 - spin_unlock_bh(&tpg->acl_node_lock); 577 + spin_unlock_irq(&tpg->acl_node_lock); 586 578 return -EINVAL; 587 579 } 588 580 spin_unlock_bh(&tpg->session_lock); ··· 598 590 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 599 591 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 600 592 601 - spin_lock_bh(&tpg->acl_node_lock); 593 + spin_lock_irq(&tpg->acl_node_lock); 602 594 if (dynamic_acl) 603 595 acl->dynamic_node_acl = 1; 604 - spin_unlock_bh(&tpg->acl_node_lock); 596 + spin_unlock_irq(&tpg->acl_node_lock); 605 597 606 598 return 0; 607 599 } ··· 725 717 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 726 718 * in transport_deregister_session(). 727 719 */ 728 - spin_lock_bh(&se_tpg->acl_node_lock); 720 + spin_lock_irq(&se_tpg->acl_node_lock); 729 721 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 730 722 acl_list) { 731 723 list_del(&nacl->acl_list); 732 724 se_tpg->num_node_acls--; 733 - spin_unlock_bh(&se_tpg->acl_node_lock); 725 + spin_unlock_irq(&se_tpg->acl_node_lock); 734 726 735 727 core_tpg_wait_for_nacl_pr_ref(nacl); 736 728 core_free_device_list_for_node(nacl, se_tpg); 737 729 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); 738 730 739 - spin_lock_bh(&se_tpg->acl_node_lock); 731 + spin_lock_irq(&se_tpg->acl_node_lock); 740 732 } 741 - spin_unlock_bh(&se_tpg->acl_node_lock); 733 + spin_unlock_irq(&se_tpg->acl_node_lock); 742 734 743 735 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 744 736 core_tpg_release_virtual_lun0(se_tpg);
+123 -85
drivers/target/target_core_transport.c
··· 389 389 { 390 390 struct se_portal_group *se_tpg = se_sess->se_tpg; 391 391 struct se_node_acl *se_nacl; 392 + unsigned long flags; 392 393 393 394 if (!se_tpg) { 394 395 transport_free_session(se_sess); 395 396 return; 396 397 } 397 398 398 - spin_lock_bh(&se_tpg->session_lock); 399 + spin_lock_irqsave(&se_tpg->session_lock, flags); 399 400 list_del(&se_sess->sess_list); 400 401 se_sess->se_tpg = NULL; 401 402 se_sess->fabric_sess_ptr = NULL; 402 - spin_unlock_bh(&se_tpg->session_lock); 403 + spin_unlock_irqrestore(&se_tpg->session_lock, flags); 403 404 404 405 /* 405 406 * Determine if we need to do extra work for this initiator node's ··· 408 407 */ 409 408 se_nacl = se_sess->se_node_acl; 410 409 if (se_nacl) { 411 - spin_lock_bh(&se_tpg->acl_node_lock); 410 + spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 412 411 if (se_nacl->dynamic_node_acl) { 413 412 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 414 413 se_tpg)) { 415 414 list_del(&se_nacl->acl_list); 416 415 se_tpg->num_node_acls--; 417 - spin_unlock_bh(&se_tpg->acl_node_lock); 416 + spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 418 417 419 418 core_tpg_wait_for_nacl_pr_ref(se_nacl); 420 419 core_free_device_list_for_node(se_nacl, se_tpg); 421 420 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, 422 421 se_nacl); 423 - spin_lock_bh(&se_tpg->acl_node_lock); 422 + spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 424 423 } 425 424 } 426 - spin_unlock_bh(&se_tpg->acl_node_lock); 425 + spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 427 426 } 428 427 429 428 transport_free_session(se_sess); ··· 2054 2053 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2055 2054 break; 2056 2055 } 2057 - 2058 - if (!sc) 2056 + /* 2057 + * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, 2058 + * make the call to transport_send_check_condition_and_sense() 2059 + * directly. Otherwise expect the fabric to make the call to 2060 + * transport_send_check_condition_and_sense() after handling 2061 + * possible unsoliticied write data payloads. 2062 + */ 2063 + if (!sc && !cmd->se_tfo->new_cmd_map) 2059 2064 transport_new_cmd_failure(cmd); 2060 2065 else { 2061 2066 ret = transport_send_check_condition_and_sense(cmd, ··· 2854 2847 " transport_dev_end_lba(): %llu\n", 2855 2848 cmd->t_task_lba, sectors, 2856 2849 transport_dev_end_lba(dev)); 2857 - pr_err(" We should return CHECK_CONDITION" 2858 - " but we don't yet\n"); 2859 - return 0; 2850 + return -EINVAL; 2860 2851 } 2861 2852 2862 - return sectors; 2853 + return 0; 2854 + } 2855 + 2856 + static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) 2857 + { 2858 + /* 2859 + * Determine if the received WRITE_SAME is used to for direct 2860 + * passthrough into Linux/SCSI with struct request via TCM/pSCSI 2861 + * or we are signaling the use of internal WRITE_SAME + UNMAP=1 2862 + * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. 2863 + */ 2864 + int passthrough = (dev->transport->transport_type == 2865 + TRANSPORT_PLUGIN_PHBA_PDEV); 2866 + 2867 + if (!passthrough) { 2868 + if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 2869 + pr_err("WRITE_SAME PBDATA and LBDATA" 2870 + " bits not supported for Block Discard" 2871 + " Emulation\n"); 2872 + return -ENOSYS; 2873 + } 2874 + /* 2875 + * Currently for the emulated case we only accept 2876 + * tpws with the UNMAP=1 bit set. 2877 + */ 2878 + if (!(flags[0] & 0x08)) { 2879 + pr_err("WRITE_SAME w/o UNMAP bit not" 2880 + " supported for Block Discard Emulation\n"); 2881 + return -ENOSYS; 2882 + } 2883 + } 2884 + 2885 + return 0; 2863 2886 } 2864 2887 2865 2888 /* transport_generic_cmd_sequencer(): ··· 3102 3065 goto out_unsupported_cdb; 3103 3066 3104 3067 if (sectors) 3105 - size = transport_get_size(sectors, cdb, cmd); 3068 + size = transport_get_size(1, cdb, cmd); 3106 3069 else { 3107 3070 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 3108 3071 " supported\n"); ··· 3112 3075 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 3113 3076 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3114 3077 3115 - /* 3116 - * Skip the remaining assignments for TCM/PSCSI passthrough 3117 - */ 3118 - if (passthrough) 3119 - break; 3078 + if (target_check_write_same_discard(&cdb[10], dev) < 0) 3079 + goto out_invalid_cdb_field; 3120 3080 3121 - if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { 3122 - pr_err("WRITE_SAME PBDATA and LBDATA" 3123 - " bits not supported for Block Discard" 3124 - " Emulation\n"); 3125 - goto out_invalid_cdb_field; 3126 - } 3127 - /* 3128 - * Currently for the emulated case we only accept 3129 - * tpws with the UNMAP=1 bit set. 3130 - */ 3131 - if (!(cdb[10] & 0x08)) { 3132 - pr_err("WRITE_SAME w/o UNMAP bit not" 3133 - " supported for Block Discard Emulation\n"); 3134 - goto out_invalid_cdb_field; 3135 - } 3136 3081 break; 3137 3082 default: 3138 3083 pr_err("VARIABLE_LENGTH_CMD service action" ··· 3349 3330 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; 3350 3331 /* 3351 3332 * Check to ensure that LBA + Range does not exceed past end of 3352 - * device. 3333 + * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 3353 3334 */ 3354 - if (!transport_cmd_get_valid_sectors(cmd)) 3355 - goto out_invalid_cdb_field; 3335 + if ((cmd->t_task_lba != 0) || (sectors != 0)) { 3336 + if (transport_cmd_get_valid_sectors(cmd) < 0) 3337 + goto out_invalid_cdb_field; 3338 + } 3356 3339 break; 3357 3340 case UNMAP: 3358 3341 size = get_unaligned_be16(&cdb[7]); ··· 3366 3345 goto out_unsupported_cdb; 3367 3346 3368 3347 if (sectors) 3369 - size = transport_get_size(sectors, cdb, cmd); 3348 + size = transport_get_size(1, cdb, cmd); 3370 3349 else { 3371 3350 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3372 3351 goto out_invalid_cdb_field; 3373 3352 } 3374 3353 3375 3354 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 3376 - passthrough = (dev->transport->transport_type == 3377 - TRANSPORT_PLUGIN_PHBA_PDEV); 3378 - /* 3379 - * Determine if the received WRITE_SAME_16 is used to for direct 3380 - * passthrough into Linux/SCSI with struct request via TCM/pSCSI 3381 - * or we are signaling the use of internal WRITE_SAME + UNMAP=1 3382 - * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and 3383 - * TCM/FILEIO subsystem plugin backstores. 3384 - */ 3385 - if (!passthrough) { 3386 - if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { 3387 - pr_err("WRITE_SAME PBDATA and LBDATA" 3388 - " bits not supported for Block Discard" 3389 - " Emulation\n"); 3390 - goto out_invalid_cdb_field; 3391 - } 3392 - /* 3393 - * Currently for the emulated case we only accept 3394 - * tpws with the UNMAP=1 bit set. 3395 - */ 3396 - if (!(cdb[1] & 0x08)) { 3397 - pr_err("WRITE_SAME w/o UNMAP bit not " 3398 - " supported for Block Discard Emulation\n"); 3399 - goto out_invalid_cdb_field; 3400 - } 3401 - } 3402 3355 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3356 + 3357 + if (target_check_write_same_discard(&cdb[1], dev) < 0) 3358 + goto out_invalid_cdb_field; 3359 + break; 3360 + case WRITE_SAME: 3361 + sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 3362 + if (sector_ret) 3363 + goto out_unsupported_cdb; 3364 + 3365 + if (sectors) 3366 + size = transport_get_size(1, cdb, cmd); 3367 + else { 3368 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3369 + goto out_invalid_cdb_field; 3370 + } 3371 + 3372 + cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 3373 + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3374 + /* 3375 + * Follow sbcr26 with WRITE_SAME (10) and check for the existence 3376 + * of byte 1 bit 3 UNMAP instead of original reserved field 3377 + */ 3378 + if (target_check_write_same_discard(&cdb[1], dev) < 0) 3379 + goto out_invalid_cdb_field; 3403 3380 break; 3404 3381 case ALLOW_MEDIUM_REMOVAL: 3405 3382 case GPCMD_CLOSE_TRACK: ··· 3892 3873 static int transport_new_cmd_obj(struct se_cmd *cmd) 3893 3874 { 3894 3875 struct se_device *dev = cmd->se_dev; 3895 - u32 task_cdbs; 3896 - u32 rc; 3897 - int set_counts = 1; 3876 + int set_counts = 1, rc, task_cdbs; 3898 3877 3899 3878 /* 3900 3879 * Setup any BIDI READ tasks and memory from ··· 3910 3893 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3911 3894 cmd->scsi_sense_reason = 3912 3895 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3913 - return PYX_TRANSPORT_LU_COMM_FAILURE; 3896 + return -EINVAL; 3914 3897 } 3915 3898 atomic_inc(&cmd->t_fe_count); 3916 3899 atomic_inc(&cmd->t_se_count); ··· 3929 3912 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3930 3913 cmd->scsi_sense_reason = 3931 3914 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3932 - return PYX_TRANSPORT_LU_COMM_FAILURE; 3915 + return -EINVAL; 3933 3916 } 3934 3917 3935 3918 if (set_counts) { ··· 4045 4028 if (!task->task_sg) 4046 4029 continue; 4047 4030 4048 - BUG_ON(!task->task_padded_sg); 4049 - 4050 4031 if (!sg_first) { 4051 4032 sg_first = task->task_sg; 4052 4033 chained_nents = task->task_sg_nents; ··· 4052 4037 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4053 4038 chained_nents += task->task_sg_nents; 4054 4039 } 4040 + /* 4041 + * For the padded tasks, use the extra SGL vector allocated 4042 + * in transport_allocate_data_tasks() for the sg_prev_nents 4043 + * offset into sg_chain() above.. The last task of a 4044 + * multi-task list, or a single task will not have 4045 + * task->task_sg_padded set.. 4046 + */ 4047 + if (task->task_padded_sg) 4048 + sg_prev_nents = (task->task_sg_nents + 1); 4049 + else 4050 + sg_prev_nents = task->task_sg_nents; 4055 4051 4056 4052 sg_prev = task->task_sg; 4057 - sg_prev_nents = task->task_sg_nents; 4058 4053 } 4059 4054 /* 4060 4055 * Setup the starting pointer and total t_tasks_sg_linked_no including ··· 4116 4091 4117 4092 cmd_sg = sgl; 4118 4093 for (i = 0; i < task_count; i++) { 4119 - unsigned int task_size; 4094 + unsigned int task_size, task_sg_nents_padded; 4120 4095 int count; 4121 4096 4122 4097 task = transport_generic_get_task(cmd, data_direction); ··· 4135 4110 4136 4111 /* Update new cdb with updated lba/sectors */ 4137 4112 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); 4138 - 4113 + /* 4114 + * This now assumes that passed sg_ents are in PAGE_SIZE chunks 4115 + * in order to calculate the number per task SGL entries 4116 + */ 4117 + task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); 4139 4118 /* 4140 4119 * Check if the fabric module driver is requesting that all 4141 4120 * struct se_task->task_sg[] be chained together.. If so, 4142 4121 * then allocate an extra padding SG entry for linking and 4143 - * marking the end of the chained SGL. 4144 - * Possibly over-allocate task sgl size by using cmd sgl size. 4145 - * It's so much easier and only a waste when task_count > 1. 4146 - * That is extremely rare. 4122 + * marking the end of the chained SGL for every task except 4123 + * the last one for (task_count > 1) operation, or skipping 4124 + * the extra padding for the (task_count == 1) case. 4147 4125 */ 4148 - task->task_sg_nents = sgl_nents; 4149 - if (cmd->se_tfo->task_sg_chaining) { 4150 - task->task_sg_nents++; 4126 + if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { 4127 + task_sg_nents_padded = (task->task_sg_nents + 1); 4151 4128 task->task_padded_sg = 1; 4152 - } 4129 + } else 4130 + task_sg_nents_padded = task->task_sg_nents; 4153 4131 4154 4132 task->task_sg = kmalloc(sizeof(struct scatterlist) * 4155 - task->task_sg_nents, GFP_KERNEL); 4133 + task_sg_nents_padded, GFP_KERNEL); 4156 4134 if (!task->task_sg) { 4157 4135 cmd->se_dev->transport->free_task(task); 4158 4136 return -ENOMEM; 4159 4137 } 4160 4138 4161 - sg_init_table(task->task_sg, task->task_sg_nents); 4139 + sg_init_table(task->task_sg, task_sg_nents_padded); 4162 4140 4163 4141 task_size = task->task_size; 4164 4142 ··· 4258 4230 struct scatterlist *sgl, 4259 4231 unsigned int sgl_nents) 4260 4232 { 4261 - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) 4233 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 4234 + if (transport_cmd_get_valid_sectors(cmd) < 0) 4235 + return -EINVAL; 4236 + 4262 4237 return transport_allocate_data_tasks(cmd, lba, data_direction, 4263 4238 sgl, sgl_nents); 4264 - else 4239 + } else 4265 4240 return transport_allocate_control_task(cmd); 4266 4241 4267 4242 } ··· 4757 4726 */ 4758 4727 switch (reason) { 4759 4728 case TCM_NON_EXISTENT_LUN: 4729 + /* CURRENT ERROR */ 4730 + buffer[offset] = 0x70; 4731 + /* ILLEGAL REQUEST */ 4732 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4733 + /* LOGICAL UNIT NOT SUPPORTED */ 4734 + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; 4735 + break; 4760 4736 case TCM_UNSUPPORTED_SCSI_OPCODE: 4761 4737 case TCM_SECTOR_COUNT_TOO_MANY: 4762 4738 /* CURRENT ERROR */
+2 -4
drivers/target/tcm_fc/tfc_conf.c
··· 256 256 struct se_portal_group *se_tpg = &tpg->se_tpg; 257 257 struct se_node_acl *se_acl; 258 258 259 - spin_lock_bh(&se_tpg->acl_node_lock); 259 + spin_lock_irq(&se_tpg->acl_node_lock); 260 260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { 261 261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl); 262 262 pr_debug("acl %p port_name %llx\n", ··· 270 270 break; 271 271 } 272 272 } 273 - spin_unlock_bh(&se_tpg->acl_node_lock); 273 + spin_unlock_irq(&se_tpg->acl_node_lock); 274 274 return found; 275 275 } 276 276 ··· 655 655 synchronize_rcu(); 656 656 } 657 657 658 - #ifdef MODULE 659 658 MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); 660 659 MODULE_LICENSE("GPL"); 661 660 module_init(ft_init); 662 661 module_exit(ft_exit); 663 - #endif /* MODULE */
+15 -2
drivers/tty/pty.c
··· 446 446 int pty_limit = NR_UNIX98_PTY_DEFAULT; 447 447 static int pty_limit_min; 448 448 static int pty_limit_max = NR_UNIX98_PTY_MAX; 449 + static int tty_count; 449 450 static int pty_count; 451 + 452 + static inline void pty_inc_count(void) 453 + { 454 + pty_count = (++tty_count) / 2; 455 + } 456 + 457 + static inline void pty_dec_count(void) 458 + { 459 + pty_count = (--tty_count) / 2; 460 + } 450 461 451 462 static struct cdev ptmx_cdev; 452 463 ··· 553 542 554 543 static void pty_unix98_shutdown(struct tty_struct *tty) 555 544 { 545 + tty_driver_remove_tty(tty->driver, tty); 556 546 /* We have our own method as we don't use the tty index */ 557 547 kfree(tty->termios); 558 548 } ··· 600 588 */ 601 589 tty_driver_kref_get(driver); 602 590 tty->count++; 603 - pty_count++; 591 + pty_inc_count(); /* tty */ 592 + pty_inc_count(); /* tty->link */ 604 593 return 0; 605 594 err_free_mem: 606 595 deinitialize_tty_struct(o_tty); ··· 615 602 616 603 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 617 604 { 618 - pty_count--; 605 + pty_dec_count(); 619 606 } 620 607 621 608 static const struct tty_operations ptm_unix98_ops = {
+5 -3
drivers/tty/serial/8250.c
··· 1819 1819 unsigned int iir, ier = 0, lsr; 1820 1820 unsigned long flags; 1821 1821 1822 + spin_lock_irqsave(&up->port.lock, flags); 1823 + 1822 1824 /* 1823 1825 * Must disable interrupts or else we risk racing with the interrupt 1824 1826 * based handler. ··· 1838 1836 * the "Diva" UART used on the management processor on many HP 1839 1837 * ia64 and parisc boxes. 1840 1838 */ 1841 - spin_lock_irqsave(&up->port.lock, flags); 1842 1839 lsr = serial_in(up, UART_LSR); 1843 1840 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1844 - spin_unlock_irqrestore(&up->port.lock, flags); 1845 1841 if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && 1846 1842 (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && 1847 1843 (lsr & UART_LSR_THRE)) { ··· 1848 1848 } 1849 1849 1850 1850 if (!(iir & UART_IIR_NO_INT)) 1851 - serial8250_handle_port(up); 1851 + transmit_chars(up); 1852 1852 1853 1853 if (is_real_interrupt(up->port.irq)) 1854 1854 serial_out(up, UART_IER, ier); 1855 + 1856 + spin_unlock_irqrestore(&up->port.lock, flags); 1855 1857 1856 1858 /* Standard timer interval plus 0.2s to keep the port running */ 1857 1859 mod_timer(&up->timer,
+5 -6
drivers/tty/serial/8250_pci.c
··· 1599 1599 .device = 0x800D, 1600 1600 .init = pci_eg20t_init, 1601 1601 }, 1602 - { 1603 - .vendor = 0x10DB, 1604 - .device = 0x800D, 1605 - .init = pci_eg20t_init, 1606 - }, 1607 1602 /* 1608 1603 * Cronyx Omega PCI (PLX-chip based) 1609 1604 */ ··· 4016 4021 0, 0, pbn_NETMOS9900_2s_115200 }, 4017 4022 4018 4023 /* 4019 - * Best Connectivity PCI Multi I/O cards 4024 + * Best Connectivity and Rosewill PCI Multi I/O cards 4020 4025 */ 4021 4026 4022 4027 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4023 4028 0xA000, 0x1000, 4024 4029 0, 0, pbn_b0_1_115200 }, 4030 + 4031 + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4032 + 0xA000, 0x3002, 4033 + 0, 0, pbn_b0_bt_2_115200 }, 4025 4034 4026 4035 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4027 4036 0xA000, 0x3004,
+3
drivers/tty/serial/8250_pnp.c
··· 109 109 /* IBM */ 110 110 /* IBM Thinkpad 701 Internal Modem Voice */ 111 111 { "IBM0033", 0 }, 112 + /* Intermec */ 113 + /* Intermec CV60 touchscreen port */ 114 + { "PNP4972", 0 }, 112 115 /* Intertex */ 113 116 /* Intertex 28k8 33k6 Voice EXT PnP */ 114 117 { "IXDC801", 0 },
+5 -3
drivers/tty/serial/atmel_serial.c
··· 1609 1609 static int __init atmel_console_init(void) 1610 1610 { 1611 1611 if (atmel_default_console_device) { 1612 - add_preferred_console(ATMEL_DEVICENAME, 1613 - atmel_default_console_device->id, NULL); 1614 - atmel_init_port(&atmel_ports[atmel_default_console_device->id], 1612 + struct atmel_uart_data *pdata = 1613 + atmel_default_console_device->dev.platform_data; 1614 + 1615 + add_preferred_console(ATMEL_DEVICENAME, pdata->num, NULL); 1616 + atmel_init_port(&atmel_ports[pdata->num], 1615 1617 atmel_default_console_device); 1616 1618 register_console(&atmel_console); 1617 1619 }
+1 -1
drivers/tty/serial/max3107-aava.c
··· 340 340 341 341 MODULE_DESCRIPTION("MAX3107 driver"); 342 342 MODULE_AUTHOR("Aavamobile"); 343 - MODULE_ALIAS("aava-max3107-spi"); 343 + MODULE_ALIAS("spi:aava-max3107"); 344 344 MODULE_LICENSE("GPL v2");
+1 -1
drivers/tty/serial/max3107.c
··· 1209 1209 1210 1210 MODULE_DESCRIPTION("MAX3107 driver"); 1211 1211 MODULE_AUTHOR("Aavamobile"); 1212 - MODULE_ALIAS("max3107-spi"); 1212 + MODULE_ALIAS("spi:max3107"); 1213 1213 MODULE_LICENSE("GPL v2");
+1 -1
drivers/tty/serial/mrst_max3110.c
··· 917 917 module_exit(serial_m3110_exit); 918 918 919 919 MODULE_LICENSE("GPL v2"); 920 - MODULE_ALIAS("max3110-uart"); 920 + MODULE_ALIAS("spi:max3110-uart");
+1 -2
drivers/tty/serial/omap-serial.c
··· 806 806 807 807 serial_omap_set_mctrl(&up->port, up->port.mctrl); 808 808 /* Software Flow Control Configuration */ 809 - if (termios->c_iflag & (IXON | IXOFF)) 810 - serial_omap_configure_xonxoff(up, termios); 809 + serial_omap_configure_xonxoff(up, termios); 811 810 812 811 spin_unlock_irqrestore(&up->port.lock, flags); 813 812 dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
+2 -1
drivers/tty/serial/pch_uart.c
··· 598 598 dma_cap_zero(mask); 599 599 dma_cap_set(DMA_SLAVE, mask); 600 600 601 - dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev 601 + dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number, 602 + PCI_DEVFN(0xa, 0)); /* Get DMA's dev 602 603 information */ 603 604 /* Set Tx DMA */ 604 605 param = &priv->param_tx;
+6 -2
drivers/tty/serial/samsung.c
··· 1225 1225 .suspend = s3c24xx_serial_suspend, 1226 1226 .resume = s3c24xx_serial_resume, 1227 1227 }; 1228 + #define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops) 1229 + 1228 1230 #else /* !CONFIG_PM_SLEEP */ 1229 - #define s3c24xx_serial_pm_ops NULL 1231 + 1232 + #define SERIAL_SAMSUNG_PM_OPS NULL 1230 1233 #endif /* CONFIG_PM_SLEEP */ 1231 1234 1232 1235 int s3c24xx_serial_init(struct platform_driver *drv, 1233 1236 struct s3c24xx_uart_info *info) 1234 1237 { 1235 1238 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info); 1236 - drv->driver.pm = &s3c24xx_serial_pm_ops; 1239 + 1240 + drv->driver.pm = SERIAL_SAMSUNG_PM_OPS; 1237 1241 1238 1242 return platform_driver_register(drv); 1239 1243 }
+5
drivers/tty/serial/serial_core.c
··· 200 200 clear_bit(TTY_IO_ERROR, &tty->flags); 201 201 } 202 202 203 + /* 204 + * This is to allow setserial on this port. People may want to set 205 + * port/irq/type and then reconfigure the port properly if it failed 206 + * now. 207 + */ 203 208 if (retval && capable(CAP_SYS_ADMIN)) 204 209 retval = 0; 205 210
+61 -11
drivers/tty/serial/sh-sci.c
··· 47 47 #include <linux/ctype.h> 48 48 #include <linux/err.h> 49 49 #include <linux/dmaengine.h> 50 + #include <linux/dma-mapping.h> 50 51 #include <linux/scatterlist.h> 51 52 #include <linux/slab.h> 52 53 ··· 96 95 #endif 97 96 98 97 struct notifier_block freq_transition; 98 + 99 + #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 100 + unsigned short saved_smr; 101 + unsigned short saved_fcr; 102 + unsigned char saved_brr; 103 + #endif 99 104 }; 100 105 101 106 /* Function prototypes */ ··· 1083 1076 /* This routine is used for getting signals of: DTR, DCD, DSR, RI, 1084 1077 and CTS/RTS */ 1085 1078 1086 - return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; 1079 + return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR; 1087 1080 } 1088 1081 1089 1082 #ifdef CONFIG_SERIAL_SH_SCI_DMA ··· 1640 1633 return ((freq + 16 * bps) / (32 * bps) - 1); 1641 1634 } 1642 1635 1636 + static void sci_reset(struct uart_port *port) 1637 + { 1638 + unsigned int status; 1639 + 1640 + do { 1641 + status = sci_in(port, SCxSR); 1642 + } while (!(status & SCxSR_TEND(port))); 1643 + 1644 + sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1645 + 1646 + if (port->type != PORT_SCI) 1647 + sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); 1648 + } 1649 + 1643 1650 static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1644 1651 struct ktermios *old) 1645 1652 { 1646 1653 struct sci_port *s = to_sci_port(port); 1647 - unsigned int status, baud, smr_val, max_baud; 1654 + unsigned int baud, smr_val, max_baud; 1648 1655 int t = -1; 1649 1656 u16 scfcr = 0; 1650 1657 ··· 1678 1657 1679 1658 sci_port_enable(s); 1680 1659 1681 - do { 1682 - status = sci_in(port, SCxSR); 1683 - } while (!(status & SCxSR_TEND(port))); 1684 - 1685 - sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1686 - 1687 - if (port->type != PORT_SCI) 1688 - sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST); 1660 + sci_reset(port); 1689 1661 1690 1662 smr_val = sci_in(port, SCSMR) & 3; 1691 1663 ··· 1927 1913 1928 1914 port->dev = &dev->dev; 1929 1915 1916 + pm_runtime_irq_safe(&dev->dev); 1930 1917 pm_runtime_enable(&dev->dev); 1931 1918 } 1932 1919 ··· 2051 2036 if (options) 2052 2037 uart_parse_options(options, &baud, &parity, &bits, &flow); 2053 2038 2054 - /* TODO: disable clock */ 2039 + sci_port_disable(sci_port); 2040 + 2055 2041 return uart_set_options(port, co, baud, parity, bits, flow); 2056 2042 } 2057 2043 ··· 2095 2079 return 0; 2096 2080 } 2097 2081 2082 + #define uart_console(port) ((port)->cons->index == (port)->line) 2083 + 2084 + static int sci_runtime_suspend(struct device *dev) 2085 + { 2086 + struct sci_port *sci_port = dev_get_drvdata(dev); 2087 + struct uart_port *port = &sci_port->port; 2088 + 2089 + if (uart_console(port)) { 2090 + sci_port->saved_smr = sci_in(port, SCSMR); 2091 + sci_port->saved_brr = sci_in(port, SCBRR); 2092 + sci_port->saved_fcr = sci_in(port, SCFCR); 2093 + } 2094 + return 0; 2095 + } 2096 + 2097 + static int sci_runtime_resume(struct device *dev) 2098 + { 2099 + struct sci_port *sci_port = dev_get_drvdata(dev); 2100 + struct uart_port *port = &sci_port->port; 2101 + 2102 + if (uart_console(port)) { 2103 + sci_reset(port); 2104 + sci_out(port, SCSMR, sci_port->saved_smr); 2105 + sci_out(port, SCBRR, sci_port->saved_brr); 2106 + sci_out(port, SCFCR, sci_port->saved_fcr); 2107 + sci_out(port, SCSCR, sci_port->cfg->scscr); 2108 + } 2109 + return 0; 2110 + } 2111 + 2098 2112 #define SCI_CONSOLE (&serial_console) 2099 2113 2100 2114 #else ··· 2134 2088 } 2135 2089 2136 2090 #define SCI_CONSOLE NULL 2091 + #define sci_runtime_suspend NULL 2092 + #define sci_runtime_resume NULL 2137 2093 2138 2094 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 2139 2095 ··· 2251 2203 } 2252 2204 2253 2205 static const struct dev_pm_ops sci_dev_pm_ops = { 2206 + .runtime_suspend = sci_runtime_suspend, 2207 + .runtime_resume = sci_runtime_resume, 2254 2208 .suspend = sci_suspend, 2255 2209 .resume = sci_resume, 2256 2210 };
+1 -1
drivers/tty/serial/ucc_uart.c
··· 235 235 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); 236 236 237 237 /* something nasty happened */ 238 - printk(KERN_ERR "%s: addr=%x\n", __func__, addr); 238 + printk(KERN_ERR "%s: addr=%llx\n", __func__, (u64)addr); 239 239 BUG(); 240 240 return NULL; 241 241 }
+1 -2
drivers/tty/tty_io.c
··· 1295 1295 * 1296 1296 * Locking: tty_mutex for now 1297 1297 */ 1298 - static void tty_driver_remove_tty(struct tty_driver *driver, 1299 - struct tty_struct *tty) 1298 + void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty) 1300 1299 { 1301 1300 if (driver->ops->remove) 1302 1301 driver->ops->remove(driver, tty);
+2
drivers/usb/core/hcd.c
··· 1775 1775 struct usb_interface *iface = usb_ifnum_to_if(udev, 1776 1776 cur_alt->desc.bInterfaceNumber); 1777 1777 1778 + if (!iface) 1779 + return -EINVAL; 1778 1780 if (iface->resetting_device) { 1779 1781 /* 1780 1782 * The USB core just reset the device, so the xHCI host
+1
drivers/usb/gadget/f_phonet.c
··· 434 434 config_ep_by_speed(gadget, f, fp->out_ep)) { 435 435 fp->in_ep->desc = NULL; 436 436 fp->out_ep->desc = NULL; 437 + spin_unlock(&port->lock); 437 438 return -EINVAL; 438 439 } 439 440 usb_ep_enable(fp->out_ep);
+3 -4
drivers/usb/host/ehci-hub.c
··· 343 343 u32 temp; 344 344 u32 power_okay; 345 345 int i; 346 - u8 resume_needed = 0; 346 + unsigned long resume_needed = 0; 347 347 348 348 if (time_before (jiffies, ehci->next_statechange)) 349 349 msleep(5); ··· 416 416 if (test_bit(i, &ehci->bus_suspended) && 417 417 (temp & PORT_SUSPEND)) { 418 418 temp |= PORT_RESUME; 419 - resume_needed = 1; 419 + set_bit(i, &resume_needed); 420 420 } 421 421 ehci_writel(ehci, temp, &ehci->regs->port_status [i]); 422 422 } ··· 431 431 i = HCS_N_PORTS (ehci->hcs_params); 432 432 while (i--) { 433 433 temp = ehci_readl(ehci, &ehci->regs->port_status [i]); 434 - if (test_bit(i, &ehci->bus_suspended) && 435 - (temp & PORT_SUSPEND)) { 434 + if (test_bit(i, &resume_needed)) { 436 435 temp &= ~(PORT_RWC_BITS | PORT_RESUME); 437 436 ehci_writel(ehci, temp, &ehci->regs->port_status [i]); 438 437 ehci_vdbg (ehci, "resumed port %d\n", i + 1);
+1
drivers/usb/host/ehci-s5p.c
··· 86 86 goto fail_hcd; 87 87 } 88 88 89 + s5p_ehci->hcd = hcd; 89 90 s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); 90 91 91 92 if (IS_ERR(s5p_ehci->clk)) {
+13 -4
drivers/usb/host/xhci-hub.c
··· 463 463 && (temp & PORT_POWER)) 464 464 status |= USB_PORT_STAT_SUSPEND; 465 465 } 466 - if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { 466 + if ((temp & PORT_PLS_MASK) == XDEV_RESUME && 467 + !DEV_SUPERSPEED(temp)) { 467 468 if ((temp & PORT_RESET) || !(temp & PORT_PE)) 468 469 goto error; 469 - if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies, 470 - bus_state->resume_done[wIndex])) { 470 + if (time_after_eq(jiffies, 471 + bus_state->resume_done[wIndex])) { 471 472 xhci_dbg(xhci, "Resume USB2 port %d\n", 472 473 wIndex + 1); 473 474 bus_state->resume_done[wIndex] = 0; ··· 488 487 xhci_ring_device(xhci, slot_id); 489 488 bus_state->port_c_suspend |= 1 << wIndex; 490 489 bus_state->suspended_ports &= ~(1 << wIndex); 490 + } else { 491 + /* 492 + * The resume has been signaling for less than 493 + * 20ms. Report the port status as SUSPEND, 494 + * let the usbcore check port status again 495 + * and clear resume signaling later. 496 + */ 497 + status |= USB_PORT_STAT_SUSPEND; 491 498 } 492 499 } 493 500 if ((temp & PORT_PLS_MASK) == XDEV_U0 ··· 673 664 xhci_dbg(xhci, "PORTSC %04x\n", temp); 674 665 if (temp & PORT_RESET) 675 666 goto error; 676 - if (temp & XDEV_U3) { 667 + if ((temp & PORT_PLS_MASK) == XDEV_U3) { 677 668 if ((temp & PORT_PE) == 0) 678 669 goto error; 679 670
+63 -27
drivers/usb/host/xhci-ring.c
··· 514 514 (unsigned long long) addr); 515 515 } 516 516 517 + /* flip_cycle means flip the cycle bit of all but the first and last TRB. 518 + * (The last TRB actually points to the ring enqueue pointer, which is not part 519 + * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. 520 + */ 517 521 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 518 - struct xhci_td *cur_td) 522 + struct xhci_td *cur_td, bool flip_cycle) 519 523 { 520 524 struct xhci_segment *cur_seg; 521 525 union xhci_trb *cur_trb; ··· 532 528 * leave the pointers intact. 533 529 */ 534 530 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 531 + /* Flip the cycle bit (link TRBs can't be the first 532 + * or last TRB). 533 + */ 534 + if (flip_cycle) 535 + cur_trb->generic.field[3] ^= 536 + cpu_to_le32(TRB_CYCLE); 535 537 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 536 538 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 537 539 "in seg %p (0x%llx dma)\n", ··· 551 541 cur_trb->generic.field[2] = 0; 552 542 /* Preserve only the cycle bit of this TRB */ 553 543 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 544 + /* Flip the cycle bit except on the first or last TRB */ 545 + if (flip_cycle && cur_trb != cur_td->first_trb && 546 + cur_trb != cur_td->last_trb) 547 + cur_trb->generic.field[3] ^= 548 + cpu_to_le32(TRB_CYCLE); 554 549 cur_trb->generic.field[3] |= cpu_to_le32( 555 550 TRB_TYPE(TRB_TR_NOOP)); 556 551 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " ··· 734 719 cur_td->urb->stream_id, 735 720 cur_td, &deq_state); 736 721 else 737 - td_to_noop(xhci, ep_ring, cur_td); 722 + td_to_noop(xhci, ep_ring, cur_td, false); 738 723 remove_finished_td: 739 724 /* 740 725 * The event handler won't see a completion for this TD anymore, 741 726 * so remove it from the endpoint ring's TD list. Keep it in 742 727 * the cancelled TD list for URB completion later. 743 728 */ 744 - list_del(&cur_td->td_list); 729 + list_del_init(&cur_td->td_list); 745 730 } 746 731 last_unlinked_td = cur_td; 747 732 xhci_stop_watchdog_timer_in_irq(xhci, ep); ··· 769 754 do { 770 755 cur_td = list_entry(ep->cancelled_td_list.next, 771 756 struct xhci_td, cancelled_td_list); 772 - list_del(&cur_td->cancelled_td_list); 757 + list_del_init(&cur_td->cancelled_td_list); 773 758 774 759 /* Clean up the cancelled URB */ 775 760 /* Doesn't matter what we pass for status, since the core will ··· 877 862 cur_td = list_first_entry(&ring->td_list, 878 863 struct xhci_td, 879 864 td_list); 880 - list_del(&cur_td->td_list); 865 + list_del_init(&cur_td->td_list); 881 866 if (!list_empty(&cur_td->cancelled_td_list)) 882 - list_del(&cur_td->cancelled_td_list); 867 + list_del_init(&cur_td->cancelled_td_list); 883 868 xhci_giveback_urb_in_irq(xhci, cur_td, 884 869 -ESHUTDOWN, "killed"); 885 870 } ··· 888 873 &temp_ep->cancelled_td_list, 889 874 struct xhci_td, 890 875 cancelled_td_list); 891 - list_del(&cur_td->cancelled_td_list); 876 + list_del_init(&cur_td->cancelled_td_list); 892 877 xhci_giveback_urb_in_irq(xhci, cur_td, 893 878 -ESHUTDOWN, "killed"); 894 879 } ··· 1580 1565 else 1581 1566 *status = 0; 1582 1567 } 1583 - list_del(&td->td_list); 1568 + list_del_init(&td->td_list); 1584 1569 /* Was this TD slated to be cancelled but completed anyway? */ 1585 1570 if (!list_empty(&td->cancelled_td_list)) 1586 - list_del(&td->cancelled_td_list); 1571 + list_del_init(&td->cancelled_td_list); 1587 1572 1588 1573 urb_priv->td_cnt++; 1589 1574 /* Giveback the urb when all the tds are completed */ ··· 2515 2500 2516 2501 if (td_index == 0) { 2517 2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2518 - if (unlikely(ret)) { 2519 - xhci_urb_free_priv(xhci, urb_priv); 2520 - urb->hcpriv = NULL; 2503 + if (unlikely(ret)) 2521 2504 return ret; 2522 - } 2523 2505 } 2524 2506 2525 2507 td->urb = urb; ··· 2683 2671 unsigned int total_packet_count, struct urb *urb) 2684 2672 { 2685 2673 int packets_transferred; 2674 + 2675 + /* One TRB with a zero-length data packet. */ 2676 + if (running_total == 0 && trb_buff_len == 0) 2677 + return 0; 2686 2678 2687 2679 /* All the TRB queueing functions don't count the current TRB in 2688 2680 * running_total. ··· 3129 3113 struct urb *urb, int i) 3130 3114 { 3131 3115 int num_trbs = 0; 3132 - u64 addr, td_len, running_total; 3116 + u64 addr, td_len; 3133 3117 3134 3118 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3135 3119 td_len = urb->iso_frame_desc[i].length; 3136 3120 3137 - running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3138 - running_total &= TRB_MAX_BUFF_SIZE - 1; 3139 - if (running_total != 0) 3121 + num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)), 3122 + TRB_MAX_BUFF_SIZE); 3123 + if (num_trbs == 0) 3140 3124 num_trbs++; 3141 - 3142 - while (running_total < td_len) { 3143 - num_trbs++; 3144 - running_total += TRB_MAX_BUFF_SIZE; 3145 - } 3146 3125 3147 3126 return num_trbs; 3148 3127 } ··· 3237 3226 start_trb = &ep_ring->enqueue->generic; 3238 3227 start_cycle = ep_ring->cycle_state; 3239 3228 3229 + urb_priv = urb->hcpriv; 3240 3230 /* Queue the first TRB, even if it's zero-length */ 3241 3231 for (i = 0; i < num_tds; i++) { 3242 3232 unsigned int total_packet_count; ··· 3249 3237 addr = start_addr + urb->iso_frame_desc[i].offset; 3250 3238 td_len = urb->iso_frame_desc[i].length; 3251 3239 td_remain_len = td_len; 3252 - /* FIXME: Ignoring zero-length packets, can those happen? */ 3253 3240 total_packet_count = roundup(td_len, 3254 3241 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 3242 + /* A zero-length transfer still involves at least one packet. */ 3243 + if (total_packet_count == 0) 3244 + total_packet_count++; 3255 3245 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3256 3246 total_packet_count); 3257 3247 residue = xhci_get_last_burst_packet_count(xhci, ··· 3263 3249 3264 3250 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3265 3251 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3266 - if (ret < 0) 3267 - return ret; 3252 + if (ret < 0) { 3253 + if (i == 0) 3254 + return ret; 3255 + goto cleanup; 3256 + } 3268 3257 3269 - urb_priv = urb->hcpriv; 3270 3258 td = urb_priv->td[i]; 3271 - 3272 3259 for (j = 0; j < trbs_per_td; j++) { 3273 3260 u32 remainder = 0; 3274 3261 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); ··· 3359 3344 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3360 3345 start_cycle, start_trb); 3361 3346 return 0; 3347 + cleanup: 3348 + /* Clean up a partially enqueued isoc transfer. */ 3349 + 3350 + for (i--; i >= 0; i--) 3351 + list_del_init(&urb_priv->td[i]->td_list); 3352 + 3353 + /* Use the first TD as a temporary variable to turn the TDs we've queued 3354 + * into No-ops with a software-owned cycle bit. That way the hardware 3355 + * won't accidentally start executing bogus TDs when we partially 3356 + * overwrite them. td->first_trb and td->start_seg are already set. 3357 + */ 3358 + urb_priv->td[0]->last_trb = ep_ring->enqueue; 3359 + /* Every TRB except the first & last will have its cycle bit flipped. */ 3360 + td_to_noop(xhci, ep_ring, urb_priv->td[0], true); 3361 + 3362 + /* Reset the ring enqueue back to the first TRB and its cycle bit. */ 3363 + ep_ring->enqueue = urb_priv->td[0]->first_trb; 3364 + ep_ring->enq_seg = urb_priv->td[0]->start_seg; 3365 + ep_ring->cycle_state = start_cycle; 3366 + usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 3367 + return ret; 3362 3368 } 3363 3369 3364 3370 /*
+24 -4
drivers/usb/host/xhci.c
··· 1085 1085 if (urb->dev->speed == USB_SPEED_FULL) { 1086 1086 ret = xhci_check_maxpacket(xhci, slot_id, 1087 1087 ep_index, urb); 1088 - if (ret < 0) 1088 + if (ret < 0) { 1089 + xhci_urb_free_priv(xhci, urb_priv); 1090 + urb->hcpriv = NULL; 1089 1091 return ret; 1092 + } 1090 1093 } 1091 1094 1092 1095 /* We have a spinlock and interrupts disabled, so we must pass ··· 1100 1097 goto dying; 1101 1098 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1102 1099 slot_id, ep_index); 1100 + if (ret) 1101 + goto free_priv; 1103 1102 spin_unlock_irqrestore(&xhci->lock, flags); 1104 1103 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1105 1104 spin_lock_irqsave(&xhci->lock, flags); ··· 1122 1117 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1123 1118 slot_id, ep_index); 1124 1119 } 1120 + if (ret) 1121 + goto free_priv; 1125 1122 spin_unlock_irqrestore(&xhci->lock, flags); 1126 1123 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1127 1124 spin_lock_irqsave(&xhci->lock, flags); ··· 1131 1124 goto dying; 1132 1125 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1133 1126 slot_id, ep_index); 1127 + if (ret) 1128 + goto free_priv; 1134 1129 spin_unlock_irqrestore(&xhci->lock, flags); 1135 1130 } else { 1136 1131 spin_lock_irqsave(&xhci->lock, flags); ··· 1140 1131 goto dying; 1141 1132 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1142 1133 slot_id, ep_index); 1134 + if (ret) 1135 + goto free_priv; 1143 1136 spin_unlock_irqrestore(&xhci->lock, flags); 1144 1137 } 1145 1138 exit: 1146 1139 return ret; 1147 1140 dying: 1148 - xhci_urb_free_priv(xhci, urb_priv); 1149 - urb->hcpriv = NULL; 1150 1141 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1151 1142 "non-responsive xHCI host.\n", 1152 1143 urb->ep->desc.bEndpointAddress, urb); 1144 + ret = -ESHUTDOWN; 1145 + free_priv: 1146 + xhci_urb_free_priv(xhci, urb_priv); 1147 + urb->hcpriv = NULL; 1153 1148 spin_unlock_irqrestore(&xhci->lock, flags); 1154 - return -ESHUTDOWN; 1149 + return ret; 1155 1150 } 1156 1151 1157 1152 /* Get the right ring for the given URB. ··· 1252 1239 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1253 1240 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1254 1241 urb_priv = urb->hcpriv; 1242 + for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1243 + td = urb_priv->td[i]; 1244 + if (!list_empty(&td->td_list)) 1245 + list_del_init(&td->td_list); 1246 + if (!list_empty(&td->cancelled_td_list)) 1247 + list_del_init(&td->cancelled_td_list); 1248 + } 1255 1249 1256 1250 usb_hcd_unlink_urb_from_ep(hcd, urb); 1257 1251 spin_unlock_irqrestore(&xhci->lock, flags);
+1
drivers/usb/musb/blackfin.c
··· 17 17 #include <linux/io.h> 18 18 #include <linux/platform_device.h> 19 19 #include <linux/dma-mapping.h> 20 + #include <linux/prefetch.h> 20 21 21 22 #include <asm/cacheflush.h> 22 23
+17 -9
drivers/usb/musb/cppi_dma.c
··· 226 226 struct cppi *controller; 227 227 void __iomem *tibase; 228 228 int i; 229 + struct musb *musb; 229 230 230 231 controller = container_of(c, struct cppi, controller); 232 + musb = controller->musb; 231 233 232 234 tibase = controller->tibase; 233 235 /* DISABLE INDIVIDUAL CHANNEL Interrupts */ ··· 291 289 u8 index; 292 290 struct cppi_channel *cppi_ch; 293 291 void __iomem *tibase; 292 + struct musb *musb; 294 293 295 294 controller = container_of(c, struct cppi, controller); 296 295 tibase = controller->tibase; 296 + musb = controller->musb; 297 297 298 298 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ 299 299 index = ep->epnum - 1; ··· 343 339 c = container_of(channel, struct cppi_channel, channel); 344 340 tibase = c->controller->tibase; 345 341 if (!c->hw_ep) 346 - dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c); 342 + dev_dbg(c->controller->musb->controller, 343 + "releasing idle DMA channel %p\n", c); 347 344 else if (!c->transmit) 348 345 core_rxirq_enable(tibase, c->index + 1); 349 346 ··· 362 357 363 358 musb_ep_select(base, c->index + 1); 364 359 365 - DBG(level, "RX DMA%d%s: %d left, csr %04x, " 366 - "%08x H%08x S%08x C%08x, " 367 - "B%08x L%08x %08x .. %08x" 368 - "\n", 360 + dev_dbg(c->controller->musb->controller, 361 + "RX DMA%d%s: %d left, csr %04x, " 362 + "%08x H%08x S%08x C%08x, " 363 + "B%08x L%08x %08x .. %08x" 364 + "\n", 369 365 c->index, tag, 370 366 musb_readl(c->controller->tibase, 371 367 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), ··· 393 387 394 388 musb_ep_select(base, c->index + 1); 395 389 396 - DBG(level, "TX DMA%d%s: csr %04x, " 397 - "H%08x S%08x C%08x %08x, " 398 - "F%08x L%08x .. %08x" 399 - "\n", 390 + dev_dbg(c->controller->musb->controller, 391 + "TX DMA%d%s: csr %04x, " 392 + "H%08x S%08x C%08x %08x, " 393 + "F%08x L%08x .. %08x" 394 + "\n", 400 395 c->index, tag, 401 396 musb_readw(c->hw_ep->regs, MUSB_TXCSR), 402 397 ··· 1029 1022 int i; 1030 1023 dma_addr_t safe2ack; 1031 1024 void __iomem *regs = rx->hw_ep->regs; 1025 + struct musb *musb = cppi->musb; 1032 1026 1033 1027 cppi_dump_rx(6, rx, "/K"); 1034 1028
+8 -4
drivers/usb/musb/musb_core.h
··· 172 172 #endif 173 173 174 174 /* TUSB mapping: "flat" plus ep0 special cases */ 175 - #if defined(CONFIG_USB_MUSB_TUSB6010) 175 + #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 176 + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 176 177 #define musb_ep_select(_mbase, _epnum) \ 177 178 musb_writeb((_mbase), MUSB_INDEX, (_epnum)) 178 179 #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET ··· 242 241 void __iomem *fifo; 243 242 void __iomem *regs; 244 243 245 - #ifdef CONFIG_USB_MUSB_TUSB6010 244 + #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 245 + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 246 246 void __iomem *conf; 247 247 #endif 248 248 ··· 260 258 struct dma_channel *tx_channel; 261 259 struct dma_channel *rx_channel; 262 260 263 - #ifdef CONFIG_USB_MUSB_TUSB6010 261 + #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 262 + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 264 263 /* TUSB has "asynchronous" and "synchronous" dma modes */ 265 264 dma_addr_t fifo_async; 266 265 dma_addr_t fifo_sync; ··· 359 356 void __iomem *ctrl_base; 360 357 void __iomem *mregs; 361 358 362 - #ifdef CONFIG_USB_MUSB_TUSB6010 359 + #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 360 + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 363 361 dma_addr_t async; 364 362 dma_addr_t sync; 365 363 void __iomem *sync_va;
+3 -1
drivers/usb/musb/musb_gadget.c
··· 1856 1856 1857 1857 return 0; 1858 1858 err: 1859 + musb->g.dev.parent = NULL; 1859 1860 device_unregister(&musb->g.dev); 1860 1861 return status; 1861 1862 } ··· 1864 1863 void musb_gadget_cleanup(struct musb *musb) 1865 1864 { 1866 1865 usb_del_gadget_udc(&musb->g); 1867 - device_unregister(&musb->g.dev); 1866 + if (musb->g.dev.parent) 1867 + device_unregister(&musb->g.dev); 1868 1868 } 1869 1869 1870 1870 /*
+4 -2
drivers/usb/musb/musb_regs.h
··· 234 234 #define MUSB_TESTMODE 0x0F /* 8 bit */ 235 235 236 236 /* Get offset for a given FIFO from musb->mregs */ 237 - #ifdef CONFIG_USB_MUSB_TUSB6010 237 + #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 238 + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 238 239 #define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) 239 240 #else 240 241 #define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) ··· 296 295 #define MUSB_FLAT_OFFSET(_epnum, _offset) \ 297 296 (0x100 + (0x10*(_epnum)) + (_offset)) 298 297 299 - #ifdef CONFIG_USB_MUSB_TUSB6010 298 + #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 299 + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 300 300 /* TUSB6010 EP0 configuration register is special */ 301 301 #define MUSB_TUSB_OFFSET(_epnum, _offset) \ 302 302 (0x10 + _offset)
+1
drivers/usb/musb/tusb6010.c
··· 18 18 #include <linux/kernel.h> 19 19 #include <linux/errno.h> 20 20 #include <linux/init.h> 21 + #include <linux/prefetch.h> 21 22 #include <linux/usb.h> 22 23 #include <linux/irq.h> 23 24 #include <linux/platform_device.h>
+1
drivers/usb/musb/tusb6010_omap.c
··· 20 20 #include <plat/mux.h> 21 21 22 22 #include "musb_core.h" 23 + #include "tusb6010.h" 23 24 24 25 #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) 25 26
+21 -17
drivers/usb/musb/ux500_dma.c
··· 65 65 struct musb *musb = hw_ep->musb; 66 66 unsigned long flags; 67 67 68 - DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); 68 + dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n", 69 + hw_ep->epnum); 69 70 70 71 spin_lock_irqsave(&musb->lock, flags); 71 72 ux500_channel->channel.actual_len = ux500_channel->cur_len; ··· 85 84 struct musb *musb = hw_ep->musb; 86 85 unsigned long flags; 87 86 88 - DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum); 87 + dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n", 88 + hw_ep->epnum); 89 89 90 90 spin_lock_irqsave(&musb->lock, flags); 91 91 ux500_channel->channel.actual_len = ux500_channel->cur_len; ··· 118 116 enum dma_slave_buswidth addr_width; 119 117 dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) + 120 118 ux500_channel->controller->phy_base); 119 + struct musb *musb = ux500_channel->controller->private_data; 121 120 122 - DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", 123 - packet_sz, mode, dma_addr, len, ux500_channel->is_tx); 121 + dev_dbg(musb->controller, 122 + "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", 123 + packet_sz, mode, dma_addr, len, ux500_channel->is_tx); 124 124 125 125 ux500_channel->cur_len = len; 126 126 ··· 137 133 DMA_SLAVE_BUSWIDTH_4_BYTES; 138 134 139 135 slave_conf.direction = direction; 140 - if (direction == DMA_FROM_DEVICE) { 141 - slave_conf.src_addr = usb_fifo_addr; 142 - slave_conf.src_addr_width = addr_width; 143 - slave_conf.src_maxburst = 16; 144 - } else { 145 - slave_conf.dst_addr = usb_fifo_addr; 146 - slave_conf.dst_addr_width = addr_width; 147 - slave_conf.dst_maxburst = 16; 148 - } 136 + slave_conf.src_addr = usb_fifo_addr; 137 + slave_conf.src_addr_width = addr_width; 138 + slave_conf.src_maxburst = 16; 139 + slave_conf.dst_addr = usb_fifo_addr; 140 + slave_conf.dst_addr_width = addr_width; 141 + slave_conf.dst_maxburst = 16; 142 + 149 143 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, 150 144 (unsigned long) &slave_conf); 151 145 ··· 168 166 struct ux500_dma_controller *controller = container_of(c, 169 167 struct ux500_dma_controller, controller); 170 168 struct ux500_dma_channel *ux500_channel = NULL; 169 + struct musb *musb = controller->private_data; 171 170 u8 ch_num = hw_ep->epnum - 1; 172 171 u32 max_ch; 173 172 ··· 195 192 ux500_channel->hw_ep = hw_ep; 196 193 ux500_channel->is_allocated = 1; 197 194 198 - DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n", 195 + dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n", 199 196 hw_ep->epnum, is_tx, ch_num); 200 197 201 198 return &(ux500_channel->channel); ··· 204 201 static void ux500_dma_channel_release(struct dma_channel *channel) 205 202 { 206 203 struct ux500_dma_channel *ux500_channel = channel->private_data; 204 + struct musb *musb = ux500_channel->controller->private_data; 207 205 208 - DBG(7, "channel=%d\n", ux500_channel->ch_num); 206 + dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num); 209 207 210 208 if (ux500_channel->is_allocated) { 211 209 ux500_channel->is_allocated = 0; ··· 256 252 void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs; 257 253 u16 csr; 258 254 259 - DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num, 260 - ux500_channel->is_tx); 255 + dev_dbg(musb->controller, "channel=%d, is_tx=%d\n", 256 + ux500_channel->ch_num, ux500_channel->is_tx); 261 257 262 258 if (channel->status == MUSB_DMA_STATUS_BUSY) { 263 259 if (ux500_channel->is_tx) {
+19 -1
drivers/usb/serial/ftdi_sio.c
··· 101 101 static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); 102 102 static int ftdi_NDI_device_setup(struct usb_serial *serial); 103 103 static int ftdi_stmclite_probe(struct usb_serial *serial); 104 + static int ftdi_8u2232c_probe(struct usb_serial *serial); 104 105 static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); 105 106 static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); 106 107 ··· 127 126 128 127 static struct ftdi_sio_quirk ftdi_stmclite_quirk = { 129 128 .probe = ftdi_stmclite_probe, 129 + }; 130 + 131 + static struct ftdi_sio_quirk ftdi_8u2232c_quirk = { 132 + .probe = ftdi_8u2232c_probe, 130 133 }; 131 134 132 135 /* ··· 183 178 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, 184 179 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, 185 180 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, 186 - { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, 181 + { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) , 182 + .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk }, 187 183 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, 188 184 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, 189 185 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, ··· 1739 1733 "Ignoring serial port reserved for JTAG\n"); 1740 1734 return -ENODEV; 1741 1735 } 1736 + 1737 + return 0; 1738 + } 1739 + 1740 + static int ftdi_8u2232c_probe(struct usb_serial *serial) 1741 + { 1742 + struct usb_device *udev = serial->dev; 1743 + 1744 + dbg("%s", __func__); 1745 + 1746 + if (strcmp(udev->manufacturer, "CALAO Systems") == 0) 1747 + return ftdi_jtag_probe(serial); 1742 1748 1743 1749 return 0; 1744 1750 }
+101 -3
drivers/usb/serial/option.c
··· 148 148 #define HUAWEI_PRODUCT_K4505 0x1464 149 149 #define HUAWEI_PRODUCT_K3765 0x1465 150 150 #define HUAWEI_PRODUCT_E14AC 0x14AC 151 + #define HUAWEI_PRODUCT_K3806 0x14AE 152 + #define HUAWEI_PRODUCT_K4605 0x14C6 151 153 #define HUAWEI_PRODUCT_K3770 0x14C9 152 154 #define HUAWEI_PRODUCT_K3771 0x14CA 153 155 #define HUAWEI_PRODUCT_K4510 0x14CB ··· 418 416 #define SAMSUNG_VENDOR_ID 0x04e8 419 417 #define SAMSUNG_PRODUCT_GT_B3730 0x6889 420 418 419 + /* YUGA products www.yuga-info.com*/ 420 + #define YUGA_VENDOR_ID 0x257A 421 + #define YUGA_PRODUCT_CEM600 0x1601 422 + #define YUGA_PRODUCT_CEM610 0x1602 423 + #define YUGA_PRODUCT_CEM500 0x1603 424 + #define YUGA_PRODUCT_CEM510 0x1604 425 + #define YUGA_PRODUCT_CEM800 0x1605 426 + #define YUGA_PRODUCT_CEM900 0x1606 427 + 428 + #define YUGA_PRODUCT_CEU818 0x1607 429 + #define YUGA_PRODUCT_CEU816 0x1608 430 + #define YUGA_PRODUCT_CEU828 0x1609 431 + #define YUGA_PRODUCT_CEU826 0x160A 432 + #define YUGA_PRODUCT_CEU518 0x160B 433 + #define YUGA_PRODUCT_CEU516 0x160C 434 + #define YUGA_PRODUCT_CEU528 0x160D 435 + #define YUGA_PRODUCT_CEU526 0x160F 436 + 437 + #define YUGA_PRODUCT_CWM600 0x2601 438 + #define YUGA_PRODUCT_CWM610 0x2602 439 + #define YUGA_PRODUCT_CWM500 0x2603 440 + #define YUGA_PRODUCT_CWM510 0x2604 441 + #define YUGA_PRODUCT_CWM800 0x2605 442 + #define YUGA_PRODUCT_CWM900 0x2606 443 + 444 + #define YUGA_PRODUCT_CWU718 0x2607 445 + #define YUGA_PRODUCT_CWU716 0x2608 446 + #define YUGA_PRODUCT_CWU728 0x2609 447 + #define YUGA_PRODUCT_CWU726 0x260A 448 + #define YUGA_PRODUCT_CWU518 0x260B 449 + #define YUGA_PRODUCT_CWU516 0x260C 450 + #define YUGA_PRODUCT_CWU528 0x260D 451 + #define YUGA_PRODUCT_CWU526 0x260F 452 + 453 + #define YUGA_PRODUCT_CLM600 0x2601 454 + #define YUGA_PRODUCT_CLM610 0x2602 455 + #define YUGA_PRODUCT_CLM500 0x2603 456 + #define YUGA_PRODUCT_CLM510 0x2604 457 + #define YUGA_PRODUCT_CLM800 0x2605 458 + #define YUGA_PRODUCT_CLM900 0x2606 459 + 460 + #define YUGA_PRODUCT_CLU718 0x2607 461 + #define YUGA_PRODUCT_CLU716 0x2608 462 + #define YUGA_PRODUCT_CLU728 0x2609 463 + #define YUGA_PRODUCT_CLU726 0x260A 464 + #define YUGA_PRODUCT_CLU518 0x260B 465 + #define YUGA_PRODUCT_CLU516 0x260C 466 + #define YUGA_PRODUCT_CLU528 0x260D 467 + #define YUGA_PRODUCT_CLU526 0x260F 468 + 421 469 /* some devices interfaces need special handling due to a number of reasons */ 422 470 enum option_blacklist_reason { 423 471 OPTION_BLACKLIST_NONE = 0, ··· 603 551 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 604 552 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 605 553 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, 554 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, 555 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) }, 606 556 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, 607 557 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, 608 558 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, ··· 1059 1005 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1060 1006 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ 1061 1007 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1008 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1009 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, 1010 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) }, 1011 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) }, 1012 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) }, 1013 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) }, 1014 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) }, 1015 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) }, 1016 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) }, 1017 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) }, 1018 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) }, 1019 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) }, 1020 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) }, 1021 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) }, 1022 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) }, 1023 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) }, 1024 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) }, 1025 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) }, 1026 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) }, 1027 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) }, 1028 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) }, 1029 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) }, 1030 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) }, 1031 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) }, 1032 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) }, 1033 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) }, 1034 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) }, 1035 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) }, 1036 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) }, 1037 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) }, 1038 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) }, 1039 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) }, 1040 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) }, 1041 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) }, 1042 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) }, 1043 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) }, 1044 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) }, 1045 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) }, 1046 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) }, 1047 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, 1048 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, 1049 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, 1062 1050 { } /* Terminating entry */ 1063 1051 }; 1064 1052 MODULE_DEVICE_TABLE(usb, option_ids); ··· 1230 1134 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) 1231 1135 return -ENODEV; 1232 1136 1233 - /* Don't bind network interfaces on Huawei K3765 & K4505 */ 1137 + /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */ 1234 1138 if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && 1235 1139 (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || 1236 - serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && 1237 - serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) 1140 + serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 || 1141 + serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) && 1142 + (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 || 1143 + serial->interface->cur_altsetting->desc.bInterfaceNumber == 2)) 1238 1144 return -ENODEV; 1239 1145 1240 1146 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
+1 -1
drivers/video/backlight/adp8870_bl.c
··· 1009 1009 MODULE_LICENSE("GPL v2"); 1010 1010 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 1011 1011 MODULE_DESCRIPTION("ADP8870 Backlight driver"); 1012 - MODULE_ALIAS("platform:adp8870-backlight"); 1012 + MODULE_ALIAS("i2c:adp8870-backlight");
+1 -1
drivers/video/backlight/ep93xx_bl.c
··· 11 11 * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. 12 12 */ 13 13 14 - 14 + #include <linux/module.h> 15 15 #include <linux/platform_device.h> 16 16 #include <linux/io.h> 17 17 #include <linux/fb.h>
+9
drivers/video/backlight/pwm_bl.c
··· 28 28 unsigned int lth_brightness; 29 29 int (*notify)(struct device *, 30 30 int brightness); 31 + void (*notify_after)(struct device *, 32 + int brightness); 31 33 int (*check_fb)(struct device *, struct fb_info *); 32 34 }; 33 35 ··· 57 55 pwm_config(pb->pwm, brightness, pb->period); 58 56 pwm_enable(pb->pwm); 59 57 } 58 + 59 + if (pb->notify_after) 60 + pb->notify_after(pb->dev, brightness); 61 + 60 62 return 0; 61 63 } 62 64 ··· 111 105 112 106 pb->period = data->pwm_period_ns; 113 107 pb->notify = data->notify; 108 + pb->notify_after = data->notify_after; 114 109 pb->check_fb = data->check_fb; 115 110 pb->lth_brightness = data->lth_brightness * 116 111 (data->pwm_period_ns / data->max_brightness); ··· 179 172 pb->notify(pb->dev, 0); 180 173 pwm_config(pb->pwm, 0, pb->period); 181 174 pwm_disable(pb->pwm); 175 + if (pb->notify_after) 176 + pb->notify_after(pb->dev, 0); 182 177 return 0; 183 178 } 184 179
+2 -2
drivers/w1/masters/ds2490.c
··· 1 1 /* 2 2 * dscore.c 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify ··· 1024 1024 module_exit(ds_fini); 1025 1025 1026 1026 MODULE_LICENSE("GPL"); 1027 - MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 1027 + MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 1028 1028 MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
+2 -2
drivers/w1/masters/matrox_w1.c
··· 1 1 /* 2 2 * matrox_w1.c 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify ··· 39 39 #include "../w1_log.h" 40 40 41 41 MODULE_LICENSE("GPL"); 42 - MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 42 + MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 43 43 MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); 44 44 45 45 static struct pci_device_id matrox_w1_tbl[] = {
+1 -1
drivers/w1/slaves/w1_ds2408.c
··· 373 373 static void w1_f29_remove_slave(struct w1_slave *sl) 374 374 { 375 375 int i; 376 - for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) 376 + for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i) 377 377 sysfs_remove_bin_file(&sl->dev.kobj, 378 378 &(w1_f29_sysfs_bin_files[i])); 379 379 }
+2 -2
drivers/w1/slaves/w1_smem.c
··· 1 1 /* 2 2 * w1_smem.c 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify ··· 32 32 #include "../w1_family.h" 33 33 34 34 MODULE_LICENSE("GPL"); 35 - MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 35 + MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 36 36 MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); 37 37 38 38 static struct w1_family w1_smem_family_01 = {
+2 -2
drivers/w1/slaves/w1_therm.c
··· 1 1 /* 2 2 * w1_therm.c 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify ··· 34 34 #include "../w1_family.h" 35 35 36 36 MODULE_LICENSE("GPL"); 37 - MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 37 + MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 38 38 MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); 39 39 40 40 /* Allow the strong pullup to be disabled, but default to enabled.
+2 -2
drivers/w1/w1.c
··· 1 1 /* 2 2 * w1.c 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify ··· 42 42 #include "w1_netlink.h" 43 43 44 44 MODULE_LICENSE("GPL"); 45 - MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 45 + MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 46 46 MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); 47 47 48 48 static int w1_timeout = 10;
+1 -1
drivers/w1/w1.h
··· 1 1 /* 2 2 * w1.h 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+1 -1
drivers/w1/w1_family.c
··· 1 1 /* 2 2 * w1_family.c 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+1 -1
drivers/w1/w1_family.h
··· 1 1 /* 2 2 * w1_family.h 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+1 -1
drivers/w1/w1_int.c
··· 1 1 /* 2 2 * w1_int.c 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+1 -1
drivers/w1/w1_int.h
··· 1 1 /* 2 2 * w1_int.h 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+1 -1
drivers/w1/w1_io.c
··· 1 1 /* 2 2 * w1_io.c 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+1 -1
drivers/w1/w1_log.h
··· 1 1 /* 2 2 * w1_log.h 3 3 * 4 - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+1 -1
drivers/w1/w1_netlink.c
··· 1 1 /* 2 2 * w1_netlink.c 3 3 * 4 - * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+1 -1
drivers/w1/w1_netlink.h
··· 1 1 /* 2 2 * w1_netlink.h 3 3 * 4 - * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> 5 5 * 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
-5
fs/compat.c
··· 1675 1675 } 1676 1676 #endif /* HAVE_SET_RESTORE_SIGMASK */ 1677 1677 1678 - long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2) 1679 - { 1680 - return sys_ni_syscall(); 1681 - } 1682 - 1683 1678 #ifdef CONFIG_EPOLL 1684 1679 1685 1680 #ifdef HAVE_SET_RESTORE_SIGMASK
+1
fs/hugetlbfs/inode.c
··· 491 491 inode->i_op = &page_symlink_inode_operations; 492 492 break; 493 493 } 494 + lockdep_annotate_inode_mutex_key(inode); 494 495 } 495 496 return inode; 496 497 }
+15 -9
fs/inode.c
··· 848 848 } 849 849 EXPORT_SYMBOL(new_inode); 850 850 851 - /** 852 - * unlock_new_inode - clear the I_NEW state and wake up any waiters 853 - * @inode: new inode to unlock 854 - * 855 - * Called when the inode is fully initialised to clear the new state of the 856 - * inode and wake up anyone waiting for the inode to finish initialisation. 857 - */ 858 - void unlock_new_inode(struct inode *inode) 859 - { 860 851 #ifdef CONFIG_DEBUG_LOCK_ALLOC 852 + void lockdep_annotate_inode_mutex_key(struct inode *inode) 853 + { 861 854 if (S_ISDIR(inode->i_mode)) { 862 855 struct file_system_type *type = inode->i_sb->s_type; 863 856 ··· 866 873 &type->i_mutex_dir_key); 867 874 } 868 875 } 876 + } 877 + EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 869 878 #endif 879 + 880 + /** 881 + * unlock_new_inode - clear the I_NEW state and wake up any waiters 882 + * @inode: new inode to unlock 883 + * 884 + * Called when the inode is fully initialised to clear the new state of the 885 + * inode and wake up anyone waiting for the inode to finish initialisation. 886 + */ 887 + void unlock_new_inode(struct inode *inode) 888 + { 889 + lockdep_annotate_inode_mutex_key(inode); 870 890 spin_lock(&inode->i_lock); 871 891 WARN_ON(!(inode->i_state & I_NEW)); 872 892 inode->i_state &= ~I_NEW;
+1 -1
include/asm-generic/unistd.h
··· 143 143 144 144 /* fs/nfsctl.c */ 145 145 #define __NR_nfsservctl 42 146 - __SC_COMP(__NR_nfsservctl, sys_nfsservctl, compat_sys_nfsservctl) 146 + __SYSCALL(__NR_nfsservctl, sys_ni_syscall) 147 147 148 148 /* fs/open.c */ 149 149 #define __NR3264_statfs 43
-1
include/linux/compat.h
··· 438 438 struct compat_timespec __user *tsp, 439 439 const compat_sigset_t __user *sigmask, 440 440 compat_size_t sigsetsize); 441 - asmlinkage long compat_sys_nfsservctl(int cmd, void *notused, void *notused2); 442 441 asmlinkage long compat_sys_signalfd4(int ufd, 443 442 const compat_sigset_t __user *sigmask, 444 443 compat_size_t sigsetsize, int flags);
+1 -1
include/linux/connector.h
··· 1 1 /* 2 2 * connector.h 3 3 * 4 - * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 + * 2004-2005 Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> 5 5 * All rights reserved. 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify
+5
include/linux/fs.h
··· 2318 2318 extern struct inode * iget_locked(struct super_block *, unsigned long); 2319 2319 extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); 2320 2320 extern int insert_inode_locked(struct inode *); 2321 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 2322 + extern void lockdep_annotate_inode_mutex_key(struct inode *inode); 2323 + #else 2324 + static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; 2325 + #endif 2321 2326 extern void unlock_new_inode(struct inode *); 2322 2327 extern unsigned int get_next_ino(void); 2323 2328
+1
include/linux/personality.h
··· 22 22 * These occupy the top three bytes. 23 23 */ 24 24 enum { 25 + UNAME26 = 0x0020000, 25 26 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 26 27 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors 27 28 * (signal handling)
+1
include/linux/pwm_backlight.h
··· 14 14 unsigned int pwm_period_ns; 15 15 int (*init)(struct device *dev); 16 16 int (*notify)(struct device *dev, int brightness); 17 + void (*notify_after)(struct device *dev, int brightness); 17 18 void (*exit)(struct device *dev); 18 19 int (*check_fb)(struct device *dev, struct fb_info *info); 19 20 };
+9 -9
include/linux/rio_regs.h
··· 36 36 #define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */ 37 37 #define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */ 38 38 #define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */ 39 - #define RIO_PEF_INB_MBOX 0x00f00000 /* [II] Mailboxes */ 40 - #define RIO_PEF_INB_MBOX0 0x00800000 /* [II] Mailbox 0 */ 41 - #define RIO_PEF_INB_MBOX1 0x00400000 /* [II] Mailbox 1 */ 42 - #define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */ 43 - #define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */ 44 - #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */ 39 + #define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */ 40 + #define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */ 41 + #define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */ 42 + #define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ 43 + #define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ 44 + #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ 45 45 #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ 46 46 #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ 47 47 #define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ ··· 102 102 #define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ 103 103 #define RIO_RT_MAX_DESTID 0x0000ffff 104 104 105 - #define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */ 105 + #define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */ 106 106 #define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ 107 107 #define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */ 108 108 #define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */ ··· 128 128 #define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */ 129 129 #define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */ 130 130 131 - #define RIO_WRITE_PORT_CSR 0x44 /* [I] Write Port CSR */ 132 - #define RIO_DOORBELL_CSR 0x44 /* [II] Doorbell CSR */ 131 + #define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */ 132 + #define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */ 133 133 #define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */ 134 134 #define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */ 135 135 #define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */
-3
include/linux/syscalls.h
··· 702 702 asmlinkage long sys_sysinfo(struct sysinfo __user *info); 703 703 asmlinkage long sys_sysfs(int option, 704 704 unsigned long arg1, unsigned long arg2); 705 - asmlinkage long sys_nfsservctl(int cmd, 706 - struct nfsctl_arg __user *arg, 707 - void __user *res); 708 705 asmlinkage long sys_syslog(int type, char __user *buf, int len); 709 706 asmlinkage long sys_uselib(const char __user *library); 710 707 asmlinkage long sys_ni_syscall(void);
+2
include/linux/tty.h
··· 421 421 extern void tty_throttle(struct tty_struct *tty); 422 422 extern void tty_unthrottle(struct tty_struct *tty); 423 423 extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); 424 + extern void tty_driver_remove_tty(struct tty_driver *driver, 425 + struct tty_struct *tty); 424 426 extern void tty_shutdown(struct tty_struct *tty); 425 427 extern void tty_free_termios(struct tty_struct *tty); 426 428 extern int is_current_pgrp_orphaned(void);
+3
include/linux/tty_driver.h
··· 47 47 * 48 48 * This routine is called synchronously when a particular tty device 49 49 * is closed for the last time freeing up the resources. 50 + * Note that tty_shutdown() is not called if ops->shutdown is defined. 51 + * This means one is responsible to take care of calling ops->remove (e.g. 52 + * via tty_driver_remove_tty) and releasing tty->termios. 50 53 * 51 54 * 52 55 * void (*cleanup)(struct tty_struct * tty);
-11
include/linux/writeback.h
··· 12 12 * 13 13 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) 14 14 * 15 - * The 1/16 region above the global dirty limit will be put to maximum pauses: 16 - * 17 - * (limit, limit + limit/DIRTY_MAXPAUSE_AREA) 18 - * 19 - * The 1/16 region above the max-pause region, dirty exceeded bdi's will be put 20 - * to loops: 21 - * 22 - * (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA) 23 - * 24 15 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long 25 16 * time) for the dirty pages to drop, unless written enough pages. 26 17 * ··· 22 31 */ 23 32 #define DIRTY_SCOPE 8 24 33 #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) 25 - #define DIRTY_MAXPAUSE_AREA 16 26 - #define DIRTY_PASSGOOD_AREA 8 27 34 28 35 /* 29 36 * 4MB minimal write chunk size
+1
include/net/transp_v6.h
··· 39 39 struct sk_buff *skb); 40 40 41 41 extern int datagram_send_ctl(struct net *net, 42 + struct sock *sk, 42 43 struct msghdr *msg, 43 44 struct flowi6 *fl6, 44 45 struct ipv6_txoptions *opt,
+6
include/target/target_core_fabric_ops.h
··· 27 27 int (*tpg_check_demo_mode_cache)(struct se_portal_group *); 28 28 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); 29 29 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); 30 + /* 31 + * Optionally used by fabrics to allow demo-mode login, but not 32 + * expose any TPG LUNs, and return 'not connected' in standard 33 + * inquiry response 34 + */ 35 + int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); 30 36 struct se_node_acl *(*tpg_alloc_fabric_acl)( 31 37 struct se_portal_group *); 32 38 void (*tpg_release_fabric_acl)(struct se_portal_group *,
+1 -1
kernel/printk.c
··· 1604 1604 struct console *con; 1605 1605 1606 1606 for_each_console(con) { 1607 - if (con->flags & CON_BOOT) { 1607 + if (!keep_bootcon && con->flags & CON_BOOT) { 1608 1608 printk(KERN_INFO "turn off boot console %s%d\n", 1609 1609 con->name, con->index); 1610 1610 unregister_console(con);
+38
kernel/sys.c
··· 37 37 #include <linux/fs_struct.h> 38 38 #include <linux/gfp.h> 39 39 #include <linux/syscore_ops.h> 40 + #include <linux/version.h> 41 + #include <linux/ctype.h> 40 42 41 43 #include <linux/compat.h> 42 44 #include <linux/syscalls.h> ··· 46 44 #include <linux/user_namespace.h> 47 45 48 46 #include <linux/kmsg_dump.h> 47 + /* Move somewhere else to avoid recompiling? */ 48 + #include <generated/utsrelease.h> 49 49 50 50 #include <asm/uaccess.h> 51 51 #include <asm/io.h> ··· 1165 1161 #define override_architecture(name) 0 1166 1162 #endif 1167 1163 1164 + /* 1165 + * Work around broken programs that cannot handle "Linux 3.0". 1166 + * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1167 + */ 1168 + static int override_release(char __user *release, int len) 1169 + { 1170 + int ret = 0; 1171 + char buf[len]; 1172 + 1173 + if (current->personality & UNAME26) { 1174 + char *rest = UTS_RELEASE; 1175 + int ndots = 0; 1176 + unsigned v; 1177 + 1178 + while (*rest) { 1179 + if (*rest == '.' && ++ndots >= 3) 1180 + break; 1181 + if (!isdigit(*rest) && *rest != '.') 1182 + break; 1183 + rest++; 1184 + } 1185 + v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; 1186 + snprintf(buf, len, "2.6.%u%s", v, rest); 1187 + ret = copy_to_user(release, buf, len); 1188 + } 1189 + return ret; 1190 + } 1191 + 1168 1192 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1169 1193 { 1170 1194 int errno = 0; ··· 1202 1170 errno = -EFAULT; 1203 1171 up_read(&uts_sem); 1204 1172 1173 + if (!errno && override_release(name->release, sizeof(name->release))) 1174 + errno = -EFAULT; 1205 1175 if (!errno && override_architecture(name)) 1206 1176 errno = -EFAULT; 1207 1177 return errno; ··· 1225 1191 error = -EFAULT; 1226 1192 up_read(&uts_sem); 1227 1193 1194 + if (!error && override_release(name->release, sizeof(name->release))) 1195 + error = -EFAULT; 1228 1196 if (!error && override_architecture(name)) 1229 1197 error = -EFAULT; 1230 1198 return error; ··· 1260 1224 up_read(&uts_sem); 1261 1225 1262 1226 if (!error && override_architecture(name)) 1227 + error = -EFAULT; 1228 + if (!error && override_release(name->release, sizeof(name->release))) 1263 1229 error = -EFAULT; 1264 1230 return error ? -EFAULT : 0; 1265 1231 }
-1
kernel/sys_ni.c
··· 16 16 return -ENOSYS; 17 17 } 18 18 19 - cond_syscall(sys_nfsservctl); 20 19 cond_syscall(sys_quotactl); 21 20 cond_syscall(sys32_quotactl); 22 21 cond_syscall(sys_acct);
+7 -19
mm/memcontrol.c
··· 1841 1841 */ 1842 1842 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) 1843 1843 { 1844 - int lock_count = -1; 1845 1844 struct mem_cgroup *iter, *failed = NULL; 1846 1845 bool cond = true; 1847 1846 1848 1847 for_each_mem_cgroup_tree_cond(iter, mem, cond) { 1849 - bool locked = iter->oom_lock; 1850 - 1851 - iter->oom_lock = true; 1852 - if (lock_count == -1) 1853 - lock_count = iter->oom_lock; 1854 - else if (lock_count != locked) { 1848 + if (iter->oom_lock) { 1855 1849 /* 1856 1850 * this subtree of our hierarchy is already locked 1857 1851 * so we cannot give a lock. 1858 1852 */ 1859 - lock_count = 0; 1860 1853 failed = iter; 1861 1854 cond = false; 1862 - } 1855 + } else 1856 + iter->oom_lock = true; 1863 1857 } 1864 1858 1865 1859 if (!failed) 1866 - goto done; 1860 + return true; 1867 1861 1868 1862 /* 1869 1863 * OK, we failed to lock the whole subtree so we have to clean up ··· 1871 1877 } 1872 1878 iter->oom_lock = false; 1873 1879 } 1874 - done: 1875 - return lock_count; 1880 + return false; 1876 1881 } 1877 1882 1878 1883 /* ··· 2162 2169 2163 2170 /* Notify other cpus that system-wide "drain" is running */ 2164 2171 get_online_cpus(); 2165 - /* 2166 - * Get a hint for avoiding draining charges on the current cpu, 2167 - * which must be exhausted by our charging. It is not required that 2168 - * this be a precise check, so we use raw_smp_processor_id() instead of 2169 - * getcpu()/putcpu(). 2170 - */ 2171 - curcpu = raw_smp_processor_id(); 2172 + curcpu = get_cpu(); 2172 2173 for_each_online_cpu(cpu) { 2173 2174 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2174 2175 struct mem_cgroup *mem; ··· 2179 2192 schedule_work_on(cpu, &stock->work); 2180 2193 } 2181 2194 } 2195 + put_cpu(); 2182 2196 2183 2197 if (!sync) 2184 2198 goto out;
+2 -13
mm/page-writeback.c
··· 754 754 * 200ms is typically more than enough to curb heavy dirtiers; 755 755 * (b) the pause time limit makes the dirtiers more responsive. 756 756 */ 757 - if (nr_dirty < dirty_thresh + 758 - dirty_thresh / DIRTY_MAXPAUSE_AREA && 757 + if (nr_dirty < dirty_thresh && 758 + bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 && 759 759 time_after(jiffies, start_time + MAX_PAUSE)) 760 - break; 761 - /* 762 - * pass-good area. When some bdi gets blocked (eg. NFS server 763 - * not responding), or write bandwidth dropped dramatically due 764 - * to concurrent reads, or dirty threshold suddenly dropped and 765 - * the dirty pages cannot be brought down anytime soon (eg. on 766 - * slow USB stick), at least let go of the good bdi's. 767 - */ 768 - if (nr_dirty < dirty_thresh + 769 - dirty_thresh / DIRTY_PASSGOOD_AREA && 770 - bdi_dirty < bdi_thresh) 771 760 break; 772 761 773 762 /*
+11 -8
mm/vmscan.c
··· 2283 2283 .mem_cgroup = mem, 2284 2284 .memcg_record = rec, 2285 2285 }; 2286 - unsigned long start, end; 2286 + ktime_t start, end; 2287 2287 2288 2288 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2289 2289 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); ··· 2292 2292 sc.may_writepage, 2293 2293 sc.gfp_mask); 2294 2294 2295 - start = sched_clock(); 2295 + start = ktime_get(); 2296 2296 /* 2297 2297 * NOTE: Although we can get the priority field, using it 2298 2298 * here is not a good idea, since it limits the pages we can scan. ··· 2301 2301 * the priority and make it zero. 2302 2302 */ 2303 2303 shrink_zone(0, zone, &sc); 2304 - end = sched_clock(); 2304 + end = ktime_get(); 2305 2305 2306 2306 if (rec) 2307 - rec->elapsed += end - start; 2307 + rec->elapsed += ktime_to_ns(ktime_sub(end, start)); 2308 2308 *scanned = sc.nr_scanned; 2309 2309 2310 2310 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); ··· 2319 2319 { 2320 2320 struct zonelist *zonelist; 2321 2321 unsigned long nr_reclaimed; 2322 - unsigned long start, end; 2322 + ktime_t start, end; 2323 2323 int nid; 2324 2324 struct scan_control sc = { 2325 2325 .may_writepage = !laptop_mode, ··· 2337 2337 .gfp_mask = sc.gfp_mask, 2338 2338 }; 2339 2339 2340 - start = sched_clock(); 2340 + start = ktime_get(); 2341 2341 /* 2342 2342 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2343 2343 * take care of from where we get pages. So the node where we start the ··· 2352 2352 sc.gfp_mask); 2353 2353 2354 2354 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2355 - end = sched_clock(); 2355 + end = ktime_get(); 2356 2356 if (rec) 2357 - rec->elapsed += end - start; 2357 + rec->elapsed += ktime_to_ns(ktime_sub(end, start)); 2358 2358 2359 2359 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2360 2360 ··· 2529 2529 high_wmark_pages(zone), 0, 0)) { 2530 2530 end_zone = i; 2531 2531 break; 2532 + } else { 2533 + /* If balanced, clear the congested flag */ 2534 + zone_clear_flag(zone, ZONE_CONGESTED); 2532 2535 } 2533 2536 } 2534 2537 if (i < 0)
+1 -1
net/bridge/netfilter/Kconfig
··· 4 4 5 5 menuconfig BRIDGE_NF_EBTABLES 6 6 tristate "Ethernet Bridge tables (ebtables) support" 7 - depends on BRIDGE && BRIDGE_NETFILTER 7 + depends on BRIDGE && NETFILTER 8 8 select NETFILTER_XTABLES 9 9 help 10 10 ebtables is a general, extensible frame/packet identification
+6 -1
net/ipv4/af_inet.c
··· 466 466 goto out; 467 467 468 468 if (addr->sin_family != AF_INET) { 469 + /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) 470 + * only if s_addr is INADDR_ANY. 471 + */ 469 472 err = -EAFNOSUPPORT; 470 - goto out; 473 + if (addr->sin_family != AF_UNSPEC || 474 + addr->sin_addr.s_addr != htonl(INADDR_ANY)) 475 + goto out; 471 476 } 472 477 473 478 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+5 -7
net/ipv4/netfilter/ip_queue.c
··· 218 218 return skb; 219 219 220 220 nlmsg_failure: 221 + kfree_skb(skb); 221 222 *errp = -EINVAL; 222 223 printk(KERN_ERR "ip_queue: error creating packet message\n"); 223 224 return NULL; ··· 314 313 { 315 314 struct nf_queue_entry *entry; 316 315 317 - if (vmsg->value > NF_MAX_VERDICT) 316 + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) 318 317 return -EINVAL; 319 318 320 319 entry = ipq_find_dequeue_entry(vmsg->id); ··· 359 358 break; 360 359 361 360 case IPQM_VERDICT: 362 - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 363 - status = -EINVAL; 364 - else 365 - status = ipq_set_verdict(&pmsg->msg.verdict, 366 - len - sizeof(*pmsg)); 367 - break; 361 + status = ipq_set_verdict(&pmsg->msg.verdict, 362 + len - sizeof(*pmsg)); 363 + break; 368 364 default: 369 365 status = -EINVAL; 370 366 }
+3 -2
net/ipv6/datagram.c
··· 599 599 return 0; 600 600 } 601 601 602 - int datagram_send_ctl(struct net *net, 602 + int datagram_send_ctl(struct net *net, struct sock *sk, 603 603 struct msghdr *msg, struct flowi6 *fl6, 604 604 struct ipv6_txoptions *opt, 605 605 int *hlimit, int *tclass, int *dontfrag) ··· 658 658 659 659 if (addr_type != IPV6_ADDR_ANY) { 660 660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 661 - if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 661 + if (!inet_sk(sk)->transparent && 662 + !ipv6_chk_addr(net, &src_info->ipi6_addr, 662 663 strict ? dev : NULL, 0)) 663 664 err = -EINVAL; 664 665 else
+4 -4
net/ipv6/ip6_flowlabel.c
··· 322 322 } 323 323 324 324 static struct ip6_flowlabel * 325 - fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 326 - int optlen, int *err_p) 325 + fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, 326 + char __user *optval, int optlen, int *err_p) 327 327 { 328 328 struct ip6_flowlabel *fl = NULL; 329 329 int olen; ··· 360 360 msg.msg_control = (void*)(fl->opt+1); 361 361 memset(&flowi6, 0, sizeof(flowi6)); 362 362 363 - err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, 363 + err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 364 364 &junk, &junk); 365 365 if (err) 366 366 goto done; ··· 528 528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 529 529 return -EINVAL; 530 530 531 - fl = fl_create(net, &freq, optval, optlen, &err); 531 + fl = fl_create(net, sk, &freq, optval, optlen, &err); 532 532 if (fl == NULL) 533 533 return err; 534 534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+1 -1
net/ipv6/ipv6_sockglue.c
··· 475 475 msg.msg_controllen = optlen; 476 476 msg.msg_control = (void*)(opt+1); 477 477 478 - retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, 478 + retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 479 479 &junk); 480 480 if (retv) 481 481 goto done;
+5 -7
net/ipv6/netfilter/ip6_queue.c
··· 218 218 return skb; 219 219 220 220 nlmsg_failure: 221 + kfree_skb(skb); 221 222 *errp = -EINVAL; 222 223 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 223 224 return NULL; ··· 314 313 { 315 314 struct nf_queue_entry *entry; 316 315 317 - if (vmsg->value > NF_MAX_VERDICT) 316 + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) 318 317 return -EINVAL; 319 318 320 319 entry = ipq_find_dequeue_entry(vmsg->id); ··· 359 358 break; 360 359 361 360 case IPQM_VERDICT: 362 - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 363 - status = -EINVAL; 364 - else 365 - status = ipq_set_verdict(&pmsg->msg.verdict, 366 - len - sizeof(*pmsg)); 367 - break; 361 + status = ipq_set_verdict(&pmsg->msg.verdict, 362 + len - sizeof(*pmsg)); 363 + break; 368 364 default: 369 365 status = -EINVAL; 370 366 }
+2 -2
net/ipv6/raw.c
··· 817 817 memset(opt, 0, sizeof(struct ipv6_txoptions)); 818 818 opt->tot_len = sizeof(struct ipv6_txoptions); 819 819 820 - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 821 - &tclass, &dontfrag); 820 + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 821 + &hlimit, &tclass, &dontfrag); 822 822 if (err < 0) { 823 823 fl6_sock_release(flowlabel); 824 824 return err;
+2 -2
net/ipv6/udp.c
··· 1090 1090 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1091 1091 opt->tot_len = sizeof(*opt); 1092 1092 1093 - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 1094 - &tclass, &dontfrag); 1093 + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1094 + &hlimit, &tclass, &dontfrag); 1095 1095 if (err < 0) { 1096 1096 fl6_sock_release(flowlabel); 1097 1097 return err;
+1
net/netfilter/nf_conntrack_pptp.c
··· 364 364 break; 365 365 366 366 case PPTP_WAN_ERROR_NOTIFY: 367 + case PPTP_SET_LINK_INFO: 367 368 case PPTP_ECHO_REQUEST: 368 369 case PPTP_ECHO_REPLY: 369 370 /* I don't have to explain these ;) */
+3 -3
net/netfilter/nf_conntrack_proto_tcp.c
··· 409 409 if (opsize < 2) /* "silly options" */ 410 410 return; 411 411 if (opsize > length) 412 - break; /* don't parse partial options */ 412 + return; /* don't parse partial options */ 413 413 414 414 if (opcode == TCPOPT_SACK_PERM 415 415 && opsize == TCPOLEN_SACK_PERM) ··· 447 447 BUG_ON(ptr == NULL); 448 448 449 449 /* Fast path for timestamp-only option */ 450 - if (length == TCPOLEN_TSTAMP_ALIGNED*4 450 + if (length == TCPOLEN_TSTAMP_ALIGNED 451 451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) 452 452 | (TCPOPT_NOP << 16) 453 453 | (TCPOPT_TIMESTAMP << 8) ··· 469 469 if (opsize < 2) /* "silly options" */ 470 470 return; 471 471 if (opsize > length) 472 - break; /* don't parse partial options */ 472 + return; /* don't parse partial options */ 473 473 474 474 if (opcode == TCPOPT_SACK 475 475 && opsize >= (TCPOLEN_SACK_BASE
+4 -5
net/netfilter/xt_rateest.c
··· 78 78 { 79 79 struct xt_rateest_match_info *info = par->matchinfo; 80 80 struct xt_rateest *est1, *est2; 81 - int ret = false; 81 + int ret = -EINVAL; 82 82 83 83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 84 84 XT_RATEEST_MATCH_REL)) != 1) ··· 101 101 if (!est1) 102 102 goto err1; 103 103 104 + est2 = NULL; 104 105 if (info->flags & XT_RATEEST_MATCH_REL) { 105 106 est2 = xt_rateest_lookup(info->name2); 106 107 if (!est2) 107 108 goto err2; 108 - } else 109 - est2 = NULL; 110 - 109 + } 111 110 112 111 info->est1 = est1; 113 112 info->est2 = est2; ··· 115 116 err2: 116 117 xt_rateest_put(est1); 117 118 err1: 118 - return -EINVAL; 119 + return ret; 119 120 } 120 121 121 122 static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
+2 -1
scripts/checkpatch.pl
··· 2574 2574 } else { 2575 2575 $cast = $cast2; 2576 2576 } 2577 - WARN("$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr); 2577 + WARN("MINMAX", 2578 + "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr); 2578 2579 } 2579 2580 } 2580 2581
+1 -1
scripts/get_maintainer.pl
··· 1389 1389 warn("$P: No supported VCS found. Add --nogit to options?\n"); 1390 1390 warn("Using a git repository produces better results.\n"); 1391 1391 warn("Try Linus Torvalds' latest git repository using:\n"); 1392 - warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git\n"); 1392 + warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git\n"); 1393 1393 $printed_novcs = 1; 1394 1394 } 1395 1395 return 0;
+36 -21
sound/pci/hda/patch_conexant.c
··· 3348 3348 3349 3349 #define MAX_AUTO_DACS 5 3350 3350 3351 + #define DAC_SLAVE_FLAG 0x8000 /* filled dac is a slave */ 3352 + 3351 3353 /* fill analog DAC list from the widget tree */ 3352 3354 static int fill_cx_auto_dacs(struct hda_codec *codec, hda_nid_t *dacs) 3353 3355 { ··· 3372 3370 /* fill pin_dac_pair list from the pin and dac list */ 3373 3371 static int fill_dacs_for_pins(struct hda_codec *codec, hda_nid_t *pins, 3374 3372 int num_pins, hda_nid_t *dacs, int *rest, 3375 - struct pin_dac_pair *filled, int type) 3373 + struct pin_dac_pair *filled, int nums, 3374 + int type) 3376 3375 { 3377 - int i, nums; 3376 + int i, start = nums; 3378 3377 3379 - nums = 0; 3380 - for (i = 0; i < num_pins; i++) { 3378 + for (i = 0; i < num_pins; i++, nums++) { 3381 3379 filled[nums].pin = pins[i]; 3382 3380 filled[nums].type = type; 3383 3381 filled[nums].dac = get_unassigned_dac(codec, pins[i], dacs, rest); 3384 - nums++; 3382 + if (filled[nums].dac) 3383 + continue; 3384 + if (filled[start].dac && get_connection_index(codec, pins[i], filled[start].dac) >= 0) { 3385 + filled[nums].dac = filled[start].dac | DAC_SLAVE_FLAG; 3386 + continue; 3387 + } 3388 + if (filled[0].dac && get_connection_index(codec, pins[i], filled[0].dac) >= 0) { 3389 + filled[nums].dac = filled[0].dac | DAC_SLAVE_FLAG; 3390 + continue; 3391 + } 3392 + snd_printdd("Failed to find a DAC for pin 0x%x", pins[i]); 3385 3393 } 3386 3394 return nums; 3387 3395 } ··· 3407 3395 rest = fill_cx_auto_dacs(codec, dacs); 3408 3396 /* parse all analog output pins */ 3409 3397 nums = fill_dacs_for_pins(codec, cfg->line_out_pins, cfg->line_outs, 3410 - dacs, &rest, spec->dac_info, 3411 - AUTO_PIN_LINE_OUT); 3412 - nums += fill_dacs_for_pins(codec, cfg->hp_pins, cfg->hp_outs, 3413 - dacs, &rest, spec->dac_info + nums, 3414 - AUTO_PIN_HP_OUT); 3415 - nums += fill_dacs_for_pins(codec, cfg->speaker_pins, cfg->speaker_outs, 3416 - dacs, &rest, spec->dac_info + nums, 3417 - AUTO_PIN_SPEAKER_OUT); 3398 + dacs, &rest, spec->dac_info, 0, 3399 + AUTO_PIN_LINE_OUT); 3400 + nums = fill_dacs_for_pins(codec, cfg->hp_pins, cfg->hp_outs, 3401 + dacs, &rest, spec->dac_info, nums, 3402 + AUTO_PIN_HP_OUT); 3403 + nums = fill_dacs_for_pins(codec, cfg->speaker_pins, cfg->speaker_outs, 3404 + dacs, &rest, spec->dac_info, nums, 3405 + AUTO_PIN_SPEAKER_OUT); 3418 3406 spec->dac_info_filled = nums; 3419 3407 /* fill multiout struct */ 3420 3408 for (i = 0; i < nums; i++) { 3421 3409 hda_nid_t dac = spec->dac_info[i].dac; 3422 - if (!dac) 3410 + if (!dac || (dac & DAC_SLAVE_FLAG)) 3423 3411 continue; 3424 3412 switch (spec->dac_info[i].type) { 3425 3413 case AUTO_PIN_LINE_OUT: ··· 3874 3862 } 3875 3863 if (imux->num_items >= 2 && cfg->num_inputs == imux->num_items) 3876 3864 cx_auto_check_auto_mic(codec); 3877 - if (imux->num_items > 1 && !spec->auto_mic) { 3865 + if (imux->num_items > 1) { 3878 3866 for (i = 1; i < imux->num_items; i++) { 3879 3867 if (spec->imux_info[i].adc != spec->imux_info[0].adc) { 3880 3868 spec->adc_switching = 1; ··· 4047 4035 nid = spec->dac_info[i].dac; 4048 4036 if (!nid) 4049 4037 nid = spec->multiout.dac_nids[0]; 4038 + else if (nid & DAC_SLAVE_FLAG) 4039 + nid &= ~DAC_SLAVE_FLAG; 4050 4040 select_connection(codec, spec->dac_info[i].pin, nid); 4051 4041 } 4052 4042 if (spec->auto_mute) { ··· 4181 4167 hda_nid_t pin, const char *name, int idx) 4182 4168 { 4183 4169 unsigned int caps; 4184 - caps = query_amp_caps(codec, dac, HDA_OUTPUT); 4185 - if (caps & AC_AMPCAP_NUM_STEPS) 4186 - return cx_auto_add_pb_volume(codec, dac, name, idx); 4170 + if (dac && !(dac & DAC_SLAVE_FLAG)) { 4171 + caps = query_amp_caps(codec, dac, HDA_OUTPUT); 4172 + if (caps & AC_AMPCAP_NUM_STEPS) 4173 + return cx_auto_add_pb_volume(codec, dac, name, idx); 4174 + } 4187 4175 caps = query_amp_caps(codec, pin, HDA_OUTPUT); 4188 4176 if (caps & AC_AMPCAP_NUM_STEPS) 4189 4177 return cx_auto_add_pb_volume(codec, pin, name, idx); ··· 4207 4191 for (i = 0; i < spec->dac_info_filled; i++) { 4208 4192 const char *label; 4209 4193 int idx, type; 4210 - if (!spec->dac_info[i].dac) 4211 - continue; 4194 + hda_nid_t dac = spec->dac_info[i].dac; 4212 4195 type = spec->dac_info[i].type; 4213 4196 if (type == AUTO_PIN_LINE_OUT) 4214 4197 type = spec->autocfg.line_out_type; ··· 4226 4211 idx = num_spk++; 4227 4212 break; 4228 4213 } 4229 - err = try_add_pb_volume(codec, spec->dac_info[i].dac, 4214 + err = try_add_pb_volume(codec, dac, 4230 4215 spec->dac_info[i].pin, 4231 4216 label, idx); 4232 4217 if (err < 0)
+17 -11
sound/pci/hda/patch_realtek.c
··· 565 565 { 566 566 struct alc_spec *spec = codec->spec; 567 567 568 - if (!spec->automute) 569 - return; 570 568 spec->jack_present = 571 569 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.hp_pins), 572 570 spec->autocfg.hp_pins); 571 + if (!spec->automute) 572 + return; 573 573 update_speakers(codec); 574 574 } 575 575 ··· 578 578 { 579 579 struct alc_spec *spec = codec->spec; 580 580 581 - if (!spec->automute || !spec->detect_line) 582 - return; 583 581 spec->line_jack_present = 584 582 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins), 585 583 spec->autocfg.line_out_pins); 584 + if (!spec->automute || !spec->detect_line) 585 + return; 586 586 update_speakers(codec); 587 587 } 588 588 ··· 3083 3083 static void alc_auto_init_extra_out(struct hda_codec *codec) 3084 3084 { 3085 3085 struct alc_spec *spec = codec->spec; 3086 - hda_nid_t pin; 3086 + hda_nid_t pin, dac; 3087 3087 3088 3088 pin = spec->autocfg.hp_pins[0]; 3089 - if (pin) 3090 - alc_auto_set_output_and_unmute(codec, pin, PIN_HP, 3091 - spec->multiout.hp_nid); 3089 + if (pin) { 3090 + dac = spec->multiout.hp_nid; 3091 + if (!dac) 3092 + dac = spec->multiout.dac_nids[0]; 3093 + alc_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); 3094 + } 3092 3095 pin = spec->autocfg.speaker_pins[0]; 3093 - if (pin) 3094 - alc_auto_set_output_and_unmute(codec, pin, PIN_OUT, 3095 - spec->multiout.extra_out_nid[0]); 3096 + if (pin) { 3097 + dac = spec->multiout.extra_out_nid[0]; 3098 + if (!dac) 3099 + dac = spec->multiout.dac_nids[0]; 3100 + alc_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); 3101 + } 3096 3102 } 3097 3103 3098 3104 /*
+1 -1
sound/soc/blackfin/bf5xx-ad193x.c
··· 56 56 57 57 switch (params_rate(params)) { 58 58 case 48000: 59 - clk = 12288000; 59 + clk = 24576000; 60 60 break; 61 61 } 62 62
+2 -9
sound/soc/codecs/ad193x.c
··· 27 27 int sysclk; 28 28 }; 29 29 30 - /* ad193x register cache & default register settings */ 31 - static const u8 ad193x_reg[AD193X_NUM_REGS] = { 32 - 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 33 - }; 34 - 35 30 /* 36 31 * AD193X volume/mute/de-emphasis etc. controls 37 32 */ ··· 302 307 snd_soc_write(codec, AD193X_PLL_CLK_CTRL0, reg); 303 308 304 309 reg = snd_soc_read(codec, AD193X_DAC_CTRL2); 305 - reg = (reg & (~AD193X_DAC_WORD_LEN_MASK)) | word_len; 310 + reg = (reg & (~AD193X_DAC_WORD_LEN_MASK)) 311 + | (word_len << AD193X_DAC_WORD_LEN_SHFT); 306 312 snd_soc_write(codec, AD193X_DAC_CTRL2, reg); 307 313 308 314 reg = snd_soc_read(codec, AD193X_ADC_CTRL1); ··· 385 389 386 390 static struct snd_soc_codec_driver soc_codec_dev_ad193x = { 387 391 .probe = ad193x_probe, 388 - .reg_cache_default = ad193x_reg, 389 - .reg_cache_size = AD193X_NUM_REGS, 390 - .reg_word_size = sizeof(u16), 391 392 }; 392 393 393 394 #if defined(CONFIG_SPI_MASTER)
+3 -2
sound/soc/codecs/ad193x.h
··· 34 34 #define AD193X_DAC_LEFT_HIGH (1 << 3) 35 35 #define AD193X_DAC_BCLK_INV (1 << 7) 36 36 #define AD193X_DAC_CTRL2 0x804 37 - #define AD193X_DAC_WORD_LEN_MASK 0xC 37 + #define AD193X_DAC_WORD_LEN_SHFT 3 38 + #define AD193X_DAC_WORD_LEN_MASK 0x18 38 39 #define AD193X_DAC_MASTER_MUTE 1 39 40 #define AD193X_DAC_CHNL_MUTE 0x805 40 41 #define AD193X_DACL1_MUTE 0 ··· 64 63 #define AD193X_ADC_CTRL1 0x80f 65 64 #define AD193X_ADC_SERFMT_MASK 0x60 66 65 #define AD193X_ADC_SERFMT_STEREO (0 << 5) 67 - #define AD193X_ADC_SERFMT_TDM (1 << 2) 66 + #define AD193X_ADC_SERFMT_TDM (1 << 5) 68 67 #define AD193X_ADC_SERFMT_AUX (2 << 5) 69 68 #define AD193X_ADC_WORD_LEN_MASK 0x3 70 69 #define AD193X_ADC_CTRL2 0x810
+1
sound/soc/codecs/sta32x.c
··· 857 857 ret = snd_soc_register_codec(&i2c->dev, &sta32x_codec, &sta32x_dai, 1); 858 858 if (ret != 0) { 859 859 dev_err(&i2c->dev, "Failed to register codec (%d)\n", ret); 860 + kfree(sta32x); 860 861 return ret; 861 862 } 862 863
+8 -4
sound/soc/codecs/wm8962.c
··· 2221 2221 switch (event) { 2222 2222 case SND_SOC_DAPM_PRE_PMU: 2223 2223 if (fll) { 2224 + try_wait_for_completion(&wm8962->fll_lock); 2225 + 2224 2226 snd_soc_update_bits(codec, WM8962_FLL_CONTROL_1, 2225 2227 WM8962_FLL_ENA, WM8962_FLL_ENA); 2226 2228 if (wm8962->irq) { ··· 2929 2927 WM8962_BIAS_ENA | 0x180); 2930 2928 2931 2929 msleep(5); 2932 - 2933 - snd_soc_update_bits(codec, WM8962_CLOCKING2, 2934 - WM8962_CLKREG_OVD, 2935 - WM8962_CLKREG_OVD); 2936 2930 } 2937 2931 2938 2932 /* VMID 2*250k */ ··· 3285 3287 snd_soc_write(codec, WM8962_FLL_CONTROL_6, fll_div.theta); 3286 3288 snd_soc_write(codec, WM8962_FLL_CONTROL_7, fll_div.lambda); 3287 3289 snd_soc_write(codec, WM8962_FLL_CONTROL_8, fll_div.n); 3290 + 3291 + try_wait_for_completion(&wm8962->fll_lock); 3288 3292 3289 3293 snd_soc_update_bits(codec, WM8962_FLL_CONTROL_1, 3290 3294 WM8962_FLL_FRAC | WM8962_FLL_REFCLK_SRC_MASK | ··· 3867 3867 * write to registers if the device is declocked. 3868 3868 */ 3869 3869 snd_soc_update_bits(codec, WM8962_CLOCKING2, WM8962_SYSCLK_ENA, 0); 3870 + 3871 + /* Ensure we have soft control over all registers */ 3872 + snd_soc_update_bits(codec, WM8962_CLOCKING2, 3873 + WM8962_CLKREG_OVD, WM8962_CLKREG_OVD); 3870 3874 3871 3875 regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); 3872 3876
+18 -10
sound/soc/codecs/wm8996.c
··· 420 420 }; 421 421 422 422 static const struct soc_enum sidetone_hpf = 423 - SOC_ENUM_SINGLE(WM8996_SIDETONE, 7, 6, sidetone_hpf_text); 423 + SOC_ENUM_SINGLE(WM8996_SIDETONE, 7, 7, sidetone_hpf_text); 424 424 425 425 static const char *hpf_mode_text[] = { 426 426 "HiFi", "Custom", "Voice" ··· 988 988 SND_SOC_DAPM_PGA("IN1L PGA", WM8996_POWER_MANAGEMENT_2, 5, 0, NULL, 0), 989 989 SND_SOC_DAPM_PGA("IN1R PGA", WM8996_POWER_MANAGEMENT_2, 4, 0, NULL, 0), 990 990 991 - SND_SOC_DAPM_MUX("IN1L Mux", SND_SOC_NOPM, 0, 0, &in1_mux), 992 - SND_SOC_DAPM_MUX("IN1R Mux", SND_SOC_NOPM, 0, 0, &in1_mux), 993 - SND_SOC_DAPM_MUX("IN2L Mux", SND_SOC_NOPM, 0, 0, &in2_mux), 994 - SND_SOC_DAPM_MUX("IN2R Mux", SND_SOC_NOPM, 0, 0, &in2_mux), 995 - 996 - SND_SOC_DAPM_PGA("IN1L", WM8996_POWER_MANAGEMENT_7, 2, 0, NULL, 0), 997 - SND_SOC_DAPM_PGA("IN1R", WM8996_POWER_MANAGEMENT_7, 3, 0, NULL, 0), 998 - SND_SOC_DAPM_PGA("IN2L", WM8996_POWER_MANAGEMENT_7, 6, 0, NULL, 0), 999 - SND_SOC_DAPM_PGA("IN2R", WM8996_POWER_MANAGEMENT_7, 7, 0, NULL, 0), 991 + SND_SOC_DAPM_MUX("IN1L Mux", WM8996_POWER_MANAGEMENT_7, 2, 0, &in1_mux), 992 + SND_SOC_DAPM_MUX("IN1R Mux", WM8996_POWER_MANAGEMENT_7, 3, 0, &in1_mux), 993 + SND_SOC_DAPM_MUX("IN2L Mux", WM8996_POWER_MANAGEMENT_7, 6, 0, &in2_mux), 994 + SND_SOC_DAPM_MUX("IN2R Mux", WM8996_POWER_MANAGEMENT_7, 7, 0, &in2_mux), 1000 995 1001 996 SND_SOC_DAPM_SUPPLY("DMIC2", WM8996_POWER_MANAGEMENT_7, 9, 0, NULL, 0), 1002 997 SND_SOC_DAPM_SUPPLY("DMIC1", WM8996_POWER_MANAGEMENT_7, 8, 0, NULL, 0), ··· 1207 1212 1208 1213 { "AIF2RX0", NULL, "AIFCLK" }, 1209 1214 { "AIF2RX1", NULL, "AIFCLK" }, 1215 + 1216 + { "AIF1TX0", NULL, "AIFCLK" }, 1217 + { "AIF1TX1", NULL, "AIFCLK" }, 1218 + { "AIF1TX2", NULL, "AIFCLK" }, 1219 + { "AIF1TX3", NULL, "AIFCLK" }, 1220 + { "AIF1TX4", NULL, "AIFCLK" }, 1221 + { "AIF1TX5", NULL, "AIFCLK" }, 1222 + 1223 + { "AIF2TX0", NULL, "AIFCLK" }, 1224 + { "AIF2TX1", NULL, "AIFCLK" }, 1210 1225 1211 1226 { "DSP1RXL", NULL, "SYSDSPCLK" }, 1212 1227 { "DSP1RXR", NULL, "SYSDSPCLK" }, ··· 2110 2105 fll_div.fll_loop_gain); 2111 2106 2112 2107 snd_soc_write(codec, WM8996_FLL_EFS_1, fll_div.lambda); 2108 + 2109 + /* Clear any pending completions (eg, from failed startups) */ 2110 + try_wait_for_completion(&wm8996->fll_lock); 2113 2111 2114 2112 snd_soc_update_bits(codec, WM8996_FLL_CONTROL_1, 2115 2113 WM8996_FLL_ENA, WM8996_FLL_ENA);
+3 -2
sound/soc/ep93xx/ep93xx-i2s.c
··· 385 385 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 386 386 if (!res) { 387 387 err = -ENODEV; 388 - goto fail; 388 + goto fail_free_info; 389 389 } 390 390 391 391 info->mem = request_mem_region(res->start, resource_size(res), 392 392 pdev->name); 393 393 if (!info->mem) { 394 394 err = -EBUSY; 395 - goto fail; 395 + goto fail_free_info; 396 396 } 397 397 398 398 info->regs = ioremap(info->mem->start, resource_size(info->mem)); ··· 435 435 iounmap(info->regs); 436 436 fail_release_mem: 437 437 release_mem_region(info->mem->start, resource_size(info->mem)); 438 + fail_free_info: 438 439 kfree(info); 439 440 fail: 440 441 return err;
+2
sound/soc/fsl/fsl_dma.c
··· 879 879 * assume that device_node pointers are a valid comparison. 880 880 */ 881 881 np = of_parse_phandle(ssi_np, "fsl,playback-dma", 0); 882 + of_node_put(np); 882 883 if (np == dma_channel_np) 883 884 return ssi_np; 884 885 885 886 np = of_parse_phandle(ssi_np, "fsl,capture-dma", 0); 887 + of_node_put(np); 886 888 if (np == dma_channel_np) 887 889 return ssi_np; 888 890 }
+9 -9
sound/soc/fsl/mpc8610_hpcd.c
··· 345 345 } 346 346 347 347 machine_data = kzalloc(sizeof(struct mpc8610_hpcd_data), GFP_KERNEL); 348 - if (!machine_data) 349 - return -ENOMEM; 348 + if (!machine_data) { 349 + ret = -ENOMEM; 350 + goto error_alloc; 351 + } 350 352 351 353 machine_data->dai[0].cpu_dai_name = dev_name(&ssi_pdev->dev); 352 354 machine_data->dai[0].ops = &mpc8610_hpcd_ops; ··· 496 494 ret = platform_device_add(sound_device); 497 495 if (ret) { 498 496 dev_err(&pdev->dev, "platform device add failed\n"); 499 - goto error; 497 + goto error_sound; 500 498 } 501 499 dev_set_drvdata(&pdev->dev, sound_device); 502 500 ··· 504 502 505 503 return 0; 506 504 505 + error_sound: 506 + platform_device_unregister(sound_device); 507 507 error: 508 - of_node_put(codec_np); 509 - 510 - if (sound_device) 511 - platform_device_unregister(sound_device); 512 - 513 508 kfree(machine_data); 514 - 509 + error_alloc: 510 + of_node_put(codec_np); 515 511 return ret; 516 512 } 517 513
+3 -1
sound/soc/fsl/p1022_ds.c
··· 297 297 * dai->platform name should already point to an allocated buffer. 298 298 */ 299 299 ret = of_address_to_resource(dma_channel_np, 0, &res); 300 - if (ret) 300 + if (ret) { 301 + of_node_put(dma_channel_np); 301 302 return ret; 303 + } 302 304 snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%s", 303 305 (unsigned long long) res.start, dma_channel_np->name); 304 306
+1 -1
sound/soc/kirkwood/kirkwood-i2s.c
··· 424 424 if (!priv->mem) { 425 425 dev_err(&pdev->dev, "request_mem_region failed\n"); 426 426 err = -EBUSY; 427 - goto error; 427 + goto error_alloc; 428 428 } 429 429 430 430 priv->io = ioremap(priv->mem->start, SZ_16K);
+4 -2
sound/soc/omap/ams-delta.c
··· 514 514 } 515 515 516 516 /* Set codec bias level */ 517 - ams_delta_set_bias_level(card, SND_SOC_BIAS_STANDBY); 517 + ams_delta_set_bias_level(card, dapm, SND_SOC_BIAS_STANDBY); 518 518 519 519 /* Add hook switch - can be used to control the codec from userspace 520 520 * even if line discipline fails */ ··· 649 649 ams_delta_hook_switch_gpios); 650 650 651 651 /* Keep modem power on */ 652 - ams_delta_set_bias_level(&ams_delta_audio_card, SND_SOC_BIAS_STANDBY); 652 + ams_delta_set_bias_level(&ams_delta_audio_card, 653 + &ams_delta_audio_card.rtd[0].codec->dapm, 654 + SND_SOC_BIAS_STANDBY); 653 655 654 656 platform_device_unregister(cx20442_platform_device); 655 657 platform_device_unregister(ams_delta_audio_platform_device);
+1
sound/soc/samsung/Kconfig
··· 185 185 select SND_SAMSUNG_I2S 186 186 select SND_SOC_WM8996 187 187 select SND_SOC_WM9081 188 + select SND_SOC_WM1250_EV1 188 189 189 190 config SND_SOC_SPEYSIDE_WM8962 190 191 tristate "Audio support for Wolfson Speyside with WM8962"
+1
sound/soc/samsung/h1940_uda1380.c
··· 13 13 * 14 14 */ 15 15 16 + #include <linux/types.h> 16 17 #include <linux/gpio.h> 17 18 18 19 #include <sound/soc.h>
+1
sound/soc/samsung/rx1950_uda1380.c
··· 17 17 * 18 18 */ 19 19 20 + #include <linux/types.h> 20 21 #include <linux/gpio.h> 21 22 22 23 #include <sound/soc.h>
+6
sound/soc/samsung/speyside_wm8962.c
··· 23 23 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 24 24 int ret; 25 25 26 + if (dapm->dev != codec_dai->dev) 27 + return 0; 28 + 26 29 switch (level) { 27 30 case SND_SOC_BIAS_PREPARE: 28 31 if (dapm->bias_level == SND_SOC_BIAS_STANDBY) { ··· 59 56 { 60 57 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 61 58 int ret; 59 + 60 + if (dapm->dev != codec_dai->dev) 61 + return 0; 62 62 63 63 switch (level) { 64 64 case SND_SOC_BIAS_STANDBY:
+1 -1
sound/soc/soc-core.c
··· 1913 1913 1914 1914 if (prefix) { 1915 1915 name_len = strlen(long_name) + strlen(prefix) + 2; 1916 - name = kmalloc(name_len, GFP_ATOMIC); 1916 + name = kmalloc(name_len, GFP_KERNEL); 1917 1917 if (!name) 1918 1918 return NULL; 1919 1919
+23
sound/soc/soc-io.c
··· 205 205 #define snd_soc_16_8_read_i2c NULL 206 206 #endif 207 207 208 + #if defined(CONFIG_SPI_MASTER) 209 + static unsigned int snd_soc_16_8_read_spi(struct snd_soc_codec *codec, 210 + unsigned int r) 211 + { 212 + struct spi_device *spi = codec->control_data; 213 + 214 + const u16 reg = cpu_to_be16(r | 0x100); 215 + u8 data; 216 + int ret; 217 + 218 + ret = spi_write_then_read(spi, &reg, 2, &data, 1); 219 + if (ret < 0) 220 + return 0; 221 + return data; 222 + } 223 + #else 224 + #define snd_soc_16_8_read_spi NULL 225 + #endif 226 + 208 227 static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg, 209 228 unsigned int value) 210 229 { ··· 314 295 int (*write)(struct snd_soc_codec *codec, unsigned int, unsigned int); 315 296 unsigned int (*read)(struct snd_soc_codec *, unsigned int); 316 297 unsigned int (*i2c_read)(struct snd_soc_codec *, unsigned int); 298 + unsigned int (*spi_read)(struct snd_soc_codec *, unsigned int); 317 299 } io_types[] = { 318 300 { 319 301 .addr_bits = 4, .data_bits = 12, ··· 338 318 .addr_bits = 16, .data_bits = 8, 339 319 .write = snd_soc_16_8_write, 340 320 .i2c_read = snd_soc_16_8_read_i2c, 321 + .spi_read = snd_soc_16_8_read_spi, 341 322 }, 342 323 { 343 324 .addr_bits = 16, .data_bits = 16, ··· 404 383 #ifdef CONFIG_SPI_MASTER 405 384 codec->hw_write = do_spi_write; 406 385 #endif 386 + if (io_types[i].spi_read) 387 + codec->hw_read = io_types[i].spi_read; 407 388 408 389 codec->control_data = container_of(codec->dev, 409 390 struct spi_device,
+1 -1
sound/soc/soc-jack.c
··· 327 327 IRQF_TRIGGER_FALLING, 328 328 gpios[i].name, 329 329 &gpios[i]); 330 - if (ret) 330 + if (ret < 0) 331 331 goto err; 332 332 333 333 if (gpios[i].wake) {
+3
sound/soc/soc-pcm.c
··· 290 290 codec_dai->active--; 291 291 codec->active--; 292 292 293 + if (!cpu_dai->active && !codec_dai->active) 294 + rtd->rate = 0; 295 + 293 296 /* Muting the DAC suppresses artifacts caused during digital 294 297 * shutdown, for example from stopping clocks. 295 298 */
+2 -2
sound/soc/tegra/tegra_wm8903.c
··· 319 319 snd_soc_dapm_force_enable_pin(dapm, "Mic Bias"); 320 320 321 321 /* FIXME: Calculate automatically based on DAPM routes? */ 322 - if (!machine_is_harmony() && !machine_is_ventana()) 322 + if (!machine_is_harmony()) 323 323 snd_soc_dapm_nc_pin(dapm, "IN1L"); 324 324 if (!machine_is_seaboard() && !machine_is_aebl()) 325 325 snd_soc_dapm_nc_pin(dapm, "IN1R"); ··· 395 395 platform_set_drvdata(pdev, card); 396 396 snd_soc_card_set_drvdata(card, machine); 397 397 398 - if (machine_is_harmony() || machine_is_ventana()) { 398 + if (machine_is_harmony()) { 399 399 card->dapm_routes = harmony_audio_map; 400 400 card->num_dapm_routes = ARRAY_SIZE(harmony_audio_map); 401 401 } else if (machine_is_seaboard()) {
+4 -3
tools/power/cpupower/Makefile
··· 24 24 25 25 # Set the following to `true' to make a unstripped, unoptimized 26 26 # binary. Leave this set to `false' for production use. 27 - DEBUG ?= false 27 + DEBUG ?= true 28 28 29 29 # make the build silent. Set this to something else to make it noisy again. 30 30 V ?= false ··· 35 35 36 36 # Set the following to 'true' to build/install the 37 37 # cpufreq-bench benchmarking tool 38 - CPUFRQ_BENCH ?= true 38 + CPUFREQ_BENCH ?= true 39 39 40 40 # Prefix to the directories we're installing to 41 41 DESTDIR ?= ··· 137 137 ifeq ($(strip $(NLS)),true) 138 138 INSTALL_NLS += install-gmo 139 139 COMPILE_NLS += create-gmo 140 + CFLAGS += -DNLS 140 141 endif 141 142 142 - ifeq ($(strip $(CPUFRQ_BENCH)),true) 143 + ifeq ($(strip $(CPUFREQ_BENCH)),true) 143 144 INSTALL_BENCH += install-bench 144 145 COMPILE_BENCH += compile-bench 145 146 endif
+4 -4
tools/power/cpupower/debug/x86_64/Makefile
··· 1 1 default: all 2 2 3 - centrino-decode: centrino-decode.c 4 - $(CC) $(CFLAGS) -o centrino-decode centrino-decode.c 3 + centrino-decode: ../i386/centrino-decode.c 4 + $(CC) $(CFLAGS) -o $@ $< 5 5 6 - powernow-k8-decode: powernow-k8-decode.c 7 - $(CC) $(CFLAGS) -o powernow-k8-decode powernow-k8-decode.c 6 + powernow-k8-decode: ../i386/powernow-k8-decode.c 7 + $(CC) $(CFLAGS) -o $@ $< 8 8 9 9 all: centrino-decode powernow-k8-decode 10 10
+3 -3
tools/power/cpupower/man/cpupower-frequency-info.1
··· 1 - .TH "cpufreq-info" "1" "0.1" "Mattia Dongili" "" 1 + .TH "cpupower-frequency-info" "1" "0.1" "Mattia Dongili" "" 2 2 .SH "NAME" 3 3 .LP 4 - cpufreq\-info \- Utility to retrieve cpufreq kernel information 4 + cpupower frequency\-info \- Utility to retrieve cpufreq kernel information 5 5 .SH "SYNTAX" 6 6 .LP 7 - cpufreq\-info [\fIoptions\fP] 7 + cpupower [ \-c cpulist ] frequency\-info [\fIoptions\fP] 8 8 .SH "DESCRIPTION" 9 9 .LP 10 10 A small tool which prints out cpufreq information helpful to developers and interested users.
+4 -4
tools/power/cpupower/man/cpupower-frequency-set.1
··· 1 - .TH "cpufreq-set" "1" "0.1" "Mattia Dongili" "" 1 + .TH "cpupower-freqency-set" "1" "0.1" "Mattia Dongili" "" 2 2 .SH "NAME" 3 3 .LP 4 - cpufreq\-set \- A small tool which allows to modify cpufreq settings. 4 + cpupower frequency\-set \- A small tool which allows to modify cpufreq settings. 5 5 .SH "SYNTAX" 6 6 .LP 7 - cpufreq\-set [\fIoptions\fP] 7 + cpupower [ \-c cpu ] frequency\-set [\fIoptions\fP] 8 8 .SH "DESCRIPTION" 9 9 .LP 10 - cpufreq\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time. 10 + cpupower frequency\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time. 11 11 .SH "OPTIONS" 12 12 .LP 13 13 .TP
+7 -7
tools/power/cpupower/man/cpupower.1
··· 3 3 cpupower \- Shows and sets processor power related values 4 4 .SH SYNOPSIS 5 5 .ft B 6 - .B cpupower [ \-c cpulist ] subcommand [ARGS] 6 + .B cpupower [ \-c cpulist ] <command> [ARGS] 7 7 8 8 .B cpupower \-v|\-\-version 9 9 ··· 13 13 \fBcpupower \fP is a collection of tools to examine and tune power saving 14 14 related features of your processor. 15 15 16 - The manpages of the subcommands (cpupower\-<subcommand>(1)) provide detailed 16 + The manpages of the commands (cpupower\-<command>(1)) provide detailed 17 17 descriptions of supported features. Run \fBcpupower help\fP to get an overview 18 - of supported subcommands. 18 + of supported commands. 19 19 20 20 .SH Options 21 21 .PP 22 22 \-\-help, \-h 23 23 .RS 4 24 - Shows supported subcommands and general usage. 24 + Shows supported commands and general usage. 25 25 .RE 26 26 .PP 27 27 \-\-cpu cpulist, \-c cpulist 28 28 .RS 4 29 29 Only show or set values for specific cores. 30 - This option is not supported by all subcommands, details can be found in the 31 - manpages of the subcommands. 30 + This option is not supported by all commands, details can be found in the 31 + manpages of the commands. 32 32 33 - Some subcommands access all cores (typically the *\-set commands), some only 33 + Some commands access all cores (typically the *\-set commands), some only 34 34 the first core (typically the *\-info commands) by default. 35 35 36 36 The syntax for <cpulist> is based on how the kernel exports CPU bitmasks via
-7
tools/power/cpupower/utils/builtin.h
··· 8 8 extern int cmd_idle_info(int argc, const char **argv); 9 9 extern int cmd_monitor(int argc, const char **argv); 10 10 11 - extern void set_help(void); 12 - extern void info_help(void); 13 - extern void freq_set_help(void); 14 - extern void freq_info_help(void); 15 - extern void idle_info_help(void); 16 - extern void monitor_help(void); 17 - 18 11 #endif
+1 -41
tools/power/cpupower/utils/cpufreq-info.c
··· 510 510 return 0; 511 511 } 512 512 513 - void freq_info_help(void) 514 - { 515 - printf(_("Usage: cpupower freqinfo [options]\n")); 516 - printf(_("Options:\n")); 517 - printf(_(" -e, --debug Prints out debug information [default]\n")); 518 - printf(_(" -f, --freq Get frequency the CPU currently runs at, according\n" 519 - " to the cpufreq core *\n")); 520 - printf(_(" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n" 521 - " it from hardware (only available to root) *\n")); 522 - printf(_(" -l, --hwlimits Determine the minimum and maximum CPU frequency allowed *\n")); 523 - printf(_(" -d, --driver Determines the used cpufreq kernel driver *\n")); 524 - printf(_(" -p, --policy Gets the currently used cpufreq policy *\n")); 525 - printf(_(" -g, --governors Determines available cpufreq governors *\n")); 526 - printf(_(" -r, --related-cpus Determines which CPUs run at the same hardware frequency *\n")); 527 - printf(_(" -a, --affected-cpus Determines which CPUs need to have their frequency\n" 528 - " coordinated by software *\n")); 529 - printf(_(" -s, --stats Shows cpufreq statistics if available\n")); 530 - printf(_(" -y, --latency Determines the maximum latency on CPU frequency changes *\n")); 531 - printf(_(" -b, --boost Checks for turbo or boost modes *\n")); 532 - printf(_(" -o, --proc Prints out information like provided by the /proc/cpufreq\n" 533 - " interface in 2.4. and early 2.6. kernels\n")); 534 - printf(_(" -m, --human human-readable output for the -f, -w, -s and -y parameters\n")); 535 - printf(_(" -h, --help Prints out this screen\n")); 536 - 537 - printf("\n"); 538 - printf(_("If no argument is given, full output about\n" 539 - "cpufreq is printed which is useful e.g. for reporting bugs.\n\n")); 540 - printf(_("By default info of CPU 0 is shown which can be overridden\n" 541 - "with the cpupower --cpu main command option.\n")); 542 - } 543 - 544 513 static struct option info_opts[] = { 545 514 { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'}, 546 515 { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'}, ··· 525 556 { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, 526 557 { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, 527 558 { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, 528 - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, 529 559 { }, 530 560 }; 531 561 ··· 538 570 int output_param = 0; 539 571 540 572 do { 541 - ret = getopt_long(argc, argv, "hoefwldpgrasmyb", info_opts, NULL); 573 + ret = getopt_long(argc, argv, "oefwldpgrasmyb", info_opts, NULL); 542 574 switch (ret) { 543 575 case '?': 544 576 output_param = '?'; 545 - cont = 0; 546 - break; 547 - case 'h': 548 - output_param = 'h'; 549 577 cont = 0; 550 578 break; 551 579 case -1: ··· 606 642 return -EINVAL; 607 643 case '?': 608 644 printf(_("invalid or unknown argument\n")); 609 - freq_info_help(); 610 645 return -EINVAL; 611 - case 'h': 612 - freq_info_help(); 613 - return EXIT_SUCCESS; 614 646 case 'o': 615 647 proc_cpufreq_output(); 616 648 return EXIT_SUCCESS;
+1 -28
tools/power/cpupower/utils/cpufreq-set.c
··· 20 20 21 21 #define NORM_FREQ_LEN 32 22 22 23 - void freq_set_help(void) 24 - { 25 - printf(_("Usage: cpupower frequency-set [options]\n")); 26 - printf(_("Options:\n")); 27 - printf(_(" -d FREQ, --min FREQ new minimum CPU frequency the governor may select\n")); 28 - printf(_(" -u FREQ, --max FREQ new maximum CPU frequency the governor may select\n")); 29 - printf(_(" -g GOV, --governor GOV new cpufreq governor\n")); 30 - printf(_(" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n" 31 - " governor to be available and loaded\n")); 32 - printf(_(" -r, --related Switches all hardware-related CPUs\n")); 33 - printf(_(" -h, --help Prints out this screen\n")); 34 - printf("\n"); 35 - printf(_("Notes:\n" 36 - "1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n")); 37 - printf(_("2. The -f FREQ, --freq FREQ parameter cannot be combined with any other parameter\n" 38 - " except the -c CPU, --cpu CPU parameter\n" 39 - "3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n" 40 - " by postfixing the value with the wanted unit name, without any space\n" 41 - " (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n")); 42 - 43 - } 44 - 45 23 static struct option set_opts[] = { 46 24 { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'}, 47 25 { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'}, 48 26 { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'}, 49 27 { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'}, 50 - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, 51 28 { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'}, 52 29 { }, 53 30 }; ··· 57 80 static void print_unknown_arg(void) 58 81 { 59 82 printf(_("invalid or unknown argument\n")); 60 - freq_set_help(); 61 83 } 62 84 63 85 static unsigned long string_to_frequency(const char *str) ··· 207 231 208 232 /* parameter parsing */ 209 233 do { 210 - ret = getopt_long(argc, argv, "d:u:g:f:hr", set_opts, NULL); 234 + ret = getopt_long(argc, argv, "d:u:g:f:r", set_opts, NULL); 211 235 switch (ret) { 212 236 case '?': 213 237 print_unknown_arg(); 214 238 return -EINVAL; 215 - case 'h': 216 - freq_set_help(); 217 - return 0; 218 239 case -1: 219 240 cont = 0; 220 241 break;
+1 -23
tools/power/cpupower/utils/cpuidle-info.c
··· 139 139 } 140 140 } 141 141 142 - /* --freq / -f */ 143 - 144 - void idle_info_help(void) 145 - { 146 - printf(_ ("Usage: cpupower idleinfo [options]\n")); 147 - printf(_ ("Options:\n")); 148 - printf(_ (" -s, --silent Only show general C-state information\n")); 149 - printf(_ (" -o, --proc Prints out information like provided by the /proc/acpi/processor/*/power\n" 150 - " interface in older kernels\n")); 151 - printf(_ (" -h, --help Prints out this screen\n")); 152 - 153 - printf("\n"); 154 - } 155 - 156 142 static struct option info_opts[] = { 157 143 { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'}, 158 144 { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, 159 - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, 160 145 { }, 161 146 }; 162 147 163 148 static inline void cpuidle_exit(int fail) 164 149 { 165 - idle_info_help(); 166 150 exit(EXIT_FAILURE); 167 151 } 168 152 ··· 158 174 unsigned int cpu = 0; 159 175 160 176 do { 161 - ret = getopt_long(argc, argv, "hos", info_opts, NULL); 177 + ret = getopt_long(argc, argv, "os", info_opts, NULL); 162 178 if (ret == -1) 163 179 break; 164 180 switch (ret) { 165 181 case '?': 166 182 output_param = '?'; 167 - cont = 0; 168 - break; 169 - case 'h': 170 - output_param = 'h'; 171 183 cont = 0; 172 184 break; 173 185 case 's': ··· 191 211 case '?': 192 212 printf(_("invalid or unknown argument\n")); 193 213 cpuidle_exit(EXIT_FAILURE); 194 - case 'h': 195 - cpuidle_exit(EXIT_SUCCESS); 196 214 } 197 215 198 216 /* Default is: show output of CPU 0 only */
+1 -19
tools/power/cpupower/utils/cpupower-info.c
··· 16 16 #include "helpers/helpers.h" 17 17 #include "helpers/sysfs.h" 18 18 19 - void info_help(void) 20 - { 21 - printf(_("Usage: cpupower info [ -b ] [ -m ] [ -s ]\n")); 22 - printf(_("Options:\n")); 23 - printf(_(" -b, --perf-bias Gets CPU's power vs performance policy on some\n" 24 - " Intel models [0-15], see manpage for details\n")); 25 - printf(_(" -m, --sched-mc Gets the kernel's multi core scheduler policy.\n")); 26 - printf(_(" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n")); 27 - printf(_(" -h, --help Prints out this screen\n")); 28 - printf(_("\nPassing no option will show all info, by default only on core 0\n")); 29 - printf("\n"); 30 - } 31 - 32 19 static struct option set_opts[] = { 33 20 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 34 21 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, 35 22 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, 36 - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, 37 23 { }, 38 24 }; 39 25 40 26 static void print_wrong_arg_exit(void) 41 27 { 42 28 printf(_("invalid or unknown argument\n")); 43 - info_help(); 44 29 exit(EXIT_FAILURE); 45 30 } 46 31 ··· 49 64 textdomain(PACKAGE); 50 65 51 66 /* parameter parsing */ 52 - while ((ret = getopt_long(argc, argv, "msbh", set_opts, NULL)) != -1) { 67 + while ((ret = getopt_long(argc, argv, "msb", set_opts, NULL)) != -1) { 53 68 switch (ret) { 54 - case 'h': 55 - info_help(); 56 - return 0; 57 69 case 'b': 58 70 if (params.perf_bias) 59 71 print_wrong_arg_exit();
+3 -22
tools/power/cpupower/utils/cpupower-set.c
··· 17 17 #include "helpers/sysfs.h" 18 18 #include "helpers/bitmask.h" 19 19 20 - void set_help(void) 21 - { 22 - printf(_("Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n")); 23 - printf(_("Options:\n")); 24 - printf(_(" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n" 25 - " Intel models [0-15], see manpage for details\n")); 26 - printf(_(" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n")); 27 - printf(_(" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler policy.\n")); 28 - printf(_(" -h, --help Prints out this screen\n")); 29 - printf("\n"); 30 - } 31 - 32 20 static struct option set_opts[] = { 33 21 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 34 22 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, 35 23 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, 36 - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, 37 24 { }, 38 25 }; 39 26 40 27 static void print_wrong_arg_exit(void) 41 28 { 42 29 printf(_("invalid or unknown argument\n")); 43 - set_help(); 44 30 exit(EXIT_FAILURE); 45 31 } 46 32 ··· 52 66 53 67 params.params = 0; 54 68 /* parameter parsing */ 55 - while ((ret = getopt_long(argc, argv, "m:s:b:h", 69 + while ((ret = getopt_long(argc, argv, "m:s:b:", 56 70 set_opts, NULL)) != -1) { 57 71 switch (ret) { 58 - case 'h': 59 - set_help(); 60 - return 0; 61 72 case 'b': 62 73 if (params.perf_bias) 63 74 print_wrong_arg_exit(); ··· 93 110 } 94 111 }; 95 112 96 - if (!params.params) { 97 - set_help(); 98 - return -EINVAL; 99 - } 113 + if (!params.params) 114 + print_wrong_arg_exit(); 100 115 101 116 if (params.sched_mc) { 102 117 ret = sysfs_set_sched("mc", sched_mc);
+51 -40
tools/power/cpupower/utils/cpupower.c
··· 11 11 #include <stdlib.h> 12 12 #include <string.h> 13 13 #include <unistd.h> 14 + #include <errno.h> 14 15 15 16 #include "builtin.h" 16 17 #include "helpers/helpers.h" ··· 20 19 struct cmd_struct { 21 20 const char *cmd; 22 21 int (*main)(int, const char **); 23 - void (*usage)(void); 24 22 int needs_root; 25 23 }; 26 24 27 25 #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) 28 26 29 - int cmd_help(int argc, const char **argv); 27 + static int cmd_help(int argc, const char **argv); 30 28 31 29 /* Global cpu_info object available for all binaries 32 30 * Info only retrieved from CPU 0 ··· 44 44 static void print_help(void); 45 45 46 46 static struct cmd_struct commands[] = { 47 - { "frequency-info", cmd_freq_info, freq_info_help, 0 }, 48 - { "frequency-set", cmd_freq_set, freq_set_help, 1 }, 49 - { "idle-info", cmd_idle_info, idle_info_help, 0 }, 50 - { "set", cmd_set, set_help, 1 }, 51 - { "info", cmd_info, info_help, 0 }, 52 - { "monitor", cmd_monitor, monitor_help, 0 }, 53 - { "help", cmd_help, print_help, 0 }, 54 - /* { "bench", cmd_bench, NULL, 1 }, */ 47 + { "frequency-info", cmd_freq_info, 0 }, 48 + { "frequency-set", cmd_freq_set, 1 }, 49 + { "idle-info", cmd_idle_info, 0 }, 50 + { "set", cmd_set, 1 }, 51 + { "info", cmd_info, 0 }, 52 + { "monitor", cmd_monitor, 0 }, 53 + { "help", cmd_help, 0 }, 54 + /* { "bench", cmd_bench, 1 }, */ 55 55 }; 56 - 57 - int cmd_help(int argc, const char **argv) 58 - { 59 - unsigned int i; 60 - 61 - if (argc > 1) { 62 - for (i = 0; i < ARRAY_SIZE(commands); i++) { 63 - struct cmd_struct *p = commands + i; 64 - if (strcmp(p->cmd, argv[1])) 65 - continue; 66 - if (p->usage) { 67 - p->usage(); 68 - return EXIT_SUCCESS; 69 - } 70 - } 71 - } 72 - print_help(); 73 - if (argc == 1) 74 - return EXIT_SUCCESS; /* cpupower help */ 75 - return EXIT_FAILURE; 76 - } 77 56 78 57 static void print_help(void) 79 58 { 80 59 unsigned int i; 81 60 82 61 #ifdef DEBUG 83 - printf(_("cpupower [ -d ][ -c cpulist ] subcommand [ARGS]\n")); 84 - printf(_(" -d, --debug May increase output (stderr) on some subcommands\n")); 62 + printf(_("Usage:\tcpupower [-d|--debug] [-c|--cpu cpulist ] <command> [<args>]\n")); 85 63 #else 86 - printf(_("cpupower [ -c cpulist ] subcommand [ARGS]\n")); 64 + printf(_("Usage:\tcpupower [-c|--cpu cpulist ] <command> [<args>]\n")); 87 65 #endif 88 - printf(_("cpupower --version\n")); 89 - printf(_("Supported subcommands are:\n")); 66 + printf(_("Supported commands are:\n")); 90 67 for (i = 0; i < ARRAY_SIZE(commands); i++) 91 68 printf("\t%s\n", commands[i].cmd); 92 - printf(_("\nSome subcommands can make use of the -c cpulist option.\n")); 93 - printf(_("Look at the general cpupower manpage how to use it\n")); 94 - printf(_("and read up the subcommand's manpage whether it is supported.\n")); 95 - printf(_("\nUse cpupower help subcommand for getting help for above subcommands.\n")); 69 + printf(_("\nNot all commands can make use of the -c cpulist option.\n")); 70 + printf(_("\nUse 'cpupower help <command>' for getting help for above commands.\n")); 71 + } 72 + 73 + static int print_man_page(const char *subpage) 74 + { 75 + int len; 76 + char *page; 77 + 78 + len = 10; /* enough for "cpupower-" */ 79 + if (subpage != NULL) 80 + len += strlen(subpage); 81 + 82 + page = malloc(len); 83 + if (!page) 84 + return -ENOMEM; 85 + 86 + sprintf(page, "cpupower"); 87 + if ((subpage != NULL) && strcmp(subpage, "help")) { 88 + strcat(page, "-"); 89 + strcat(page, subpage); 90 + } 91 + 92 + execlp("man", "man", page, NULL); 93 + 94 + /* should not be reached */ 95 + return -EINVAL; 96 + } 97 + 98 + static int cmd_help(int argc, const char **argv) 99 + { 100 + if (argc > 1) { 101 + print_man_page(argv[1]); /* exits within execlp() */ 102 + return EXIT_FAILURE; 103 + } 104 + 105 + print_help(); 106 + return EXIT_SUCCESS; 96 107 } 97 108 98 109 static void print_version(void)
+12
tools/power/cpupower/utils/helpers/helpers.h
··· 16 16 #include "helpers/bitmask.h" 17 17 18 18 /* Internationalization ****************************/ 19 + #ifdef NLS 20 + 19 21 #define _(String) gettext(String) 20 22 #ifndef gettext_noop 21 23 #define gettext_noop(String) String 22 24 #endif 23 25 #define N_(String) gettext_noop(String) 26 + 27 + #else /* !NLS */ 28 + 29 + #define _(String) String 30 + #define N_(String) String 31 + 32 + #endif 24 33 /* Internationalization ****************************/ 25 34 26 35 extern int run_as_root; ··· 105 96 int pkg; 106 97 int core; 107 98 int cpu; 99 + 100 + /* flags */ 101 + unsigned int is_online:1; 108 102 } *core_info; 109 103 }; 110 104
+50
tools/power/cpupower/utils/helpers/sysfs.c
··· 56 56 return (unsigned int) numwrite; 57 57 } 58 58 59 + /* 60 + * Detect whether a CPU is online 61 + * 62 + * Returns: 63 + * 1 -> if CPU is online 64 + * 0 -> if CPU is offline 65 + * negative errno values in error case 66 + */ 67 + int sysfs_is_cpu_online(unsigned int cpu) 68 + { 69 + char path[SYSFS_PATH_MAX]; 70 + int fd; 71 + ssize_t numread; 72 + unsigned long long value; 73 + char linebuf[MAX_LINE_LEN]; 74 + char *endp; 75 + struct stat statbuf; 76 + 77 + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); 78 + 79 + if (stat(path, &statbuf) != 0) 80 + return 0; 81 + 82 + /* 83 + * kernel without CONFIG_HOTPLUG_CPU 84 + * -> cpuX directory exists, but not cpuX/online file 85 + */ 86 + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); 87 + if (stat(path, &statbuf) != 0) 88 + return 1; 89 + 90 + fd = open(path, O_RDONLY); 91 + if (fd == -1) 92 + return -errno; 93 + 94 + numread = read(fd, linebuf, MAX_LINE_LEN - 1); 95 + if (numread < 1) { 96 + close(fd); 97 + return -EIO; 98 + } 99 + linebuf[numread] = '\0'; 100 + close(fd); 101 + 102 + value = strtoull(linebuf, &endp, 0); 103 + if (value > 1 || value < 0) 104 + return -EINVAL; 105 + 106 + return value; 107 + } 108 + 59 109 /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ 60 110 61 111 /*
+2
tools/power/cpupower/utils/helpers/sysfs.h
··· 7 7 8 8 extern unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); 9 9 10 + extern int sysfs_is_cpu_online(unsigned int cpu); 11 + 10 12 extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu, 11 13 unsigned int idlestate); 12 14 extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
+4 -1
tools/power/cpupower/utils/helpers/topology.c
··· 41 41 unsigned int pkg; 42 42 unsigned int thread; 43 43 unsigned int cpu; 44 + /* flags */ 45 + unsigned int is_online:1; 44 46 }; 45 47 46 48 static int __compare(const void *t1, const void *t2) ··· 80 78 return -ENOMEM; 81 79 cpu_top->pkgs = cpu_top->cores = 0; 82 80 for (cpu = 0; cpu < cpus; cpu++) { 81 + cpu_top->core_info[cpu].cpu = cpu; 82 + cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); 83 83 cpu_top->core_info[cpu].pkg = 84 84 sysfs_topology_read_file(cpu, "physical_package_id"); 85 85 if ((int)cpu_top->core_info[cpu].pkg != -1 && ··· 89 85 cpu_top->pkgs = cpu_top->core_info[cpu].pkg; 90 86 cpu_top->core_info[cpu].core = 91 87 sysfs_topology_read_file(cpu, "core_id"); 92 - cpu_top->core_info[cpu].cpu = cpu; 93 88 } 94 89 cpu_top->pkgs++; 95 90
+1 -1
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
··· 134 134 /* Assume idle state count is the same for all CPUs */ 135 135 cpuidle_sysfs_monitor.hw_states_num = sysfs_get_idlestate_count(0); 136 136 137 - if (cpuidle_sysfs_monitor.hw_states_num == 0) 137 + if (cpuidle_sysfs_monitor.hw_states_num <= 0) 138 138 return NULL; 139 139 140 140 for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
+29 -37
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
··· 43 43 /* ToDo: Document this in the manpage */ 44 44 static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; 45 45 46 + static void print_wrong_arg_exit(void) 47 + { 48 + printf(_("invalid or unknown argument\n")); 49 + exit(EXIT_FAILURE); 50 + } 51 + 46 52 long long timespec_diff_us(struct timespec start, struct timespec end) 47 53 { 48 54 struct timespec temp; ··· 60 54 temp.tv_nsec = end.tv_nsec - start.tv_nsec; 61 55 } 62 56 return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000); 63 - } 64 - 65 - void monitor_help(void) 66 - { 67 - printf(_("cpupower monitor: [-m <mon1>,[<mon2>],.. ] command\n")); 68 - printf(_("cpupower monitor: [-m <mon1>,[<mon2>],.. ] [ -i interval_sec ]\n")); 69 - printf(_("cpupower monitor: -l\n")); 70 - printf(_("\t command: pass an arbitrary command to measure specific workload\n")); 71 - printf(_("\t -i: time intervall to measure for in seconds (default 1)\n")); 72 - printf(_("\t -l: list available CPU sleep monitors (for use with -m)\n")); 73 - printf(_("\t -m: show specific CPU sleep monitors only (in same order)\n")); 74 - printf(_("\t -h: print this help\n")); 75 - printf("\n"); 76 - printf(_("only one of: -l, -m are allowed\nIf none of them is passed,")); 77 - printf(_(" all supported monitors are shown\n")); 78 57 } 79 58 80 59 void print_n_spaces(int n) ··· 140 149 unsigned long long result; 141 150 cstate_t s; 142 151 152 + /* Be careful CPUs may got resorted for pkg value do not just use cpu */ 153 + if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu)) 154 + return; 155 + 143 156 if (topology_depth > 2) 144 157 printf("%4d|", cpu_top.core_info[cpu].pkg); 145 158 if (topology_depth > 1) ··· 185 190 } 186 191 } 187 192 } 188 - /* cpu offline */ 189 - if (cpu_top.core_info[cpu].pkg == -1 || 190 - cpu_top.core_info[cpu].core == -1) { 193 + /* 194 + * The monitor could still provide useful data, for example 195 + * AMD HW counters partly sit in PCI config space. 196 + * It's up to the monitor plug-in to check .is_online, this one 197 + * is just for additional info. 198 + */ 199 + if (!cpu_top.core_info[cpu].is_online) { 191 200 printf(_(" *is offline\n")); 192 201 return; 193 202 } else ··· 237 238 if (hits == 0) { 238 239 printf(_("No matching monitor found in %s, " 239 240 "try -l option\n"), param); 240 - monitor_help(); 241 241 exit(EXIT_FAILURE); 242 242 } 243 243 /* Override detected/registerd monitors array with requested one */ ··· 333 335 int opt; 334 336 progname = basename(argv[0]); 335 337 336 - while ((opt = getopt(argc, argv, "+hli:m:")) != -1) { 338 + while ((opt = getopt(argc, argv, "+li:m:")) != -1) { 337 339 switch (opt) { 338 - case 'h': 339 - monitor_help(); 340 - exit(EXIT_SUCCESS); 341 340 case 'l': 342 - if (mode) { 343 - monitor_help(); 344 - exit(EXIT_FAILURE); 345 - } 341 + if (mode) 342 + print_wrong_arg_exit(); 346 343 mode = list; 347 344 break; 348 345 case 'i': 349 346 /* only allow -i with -m or no option */ 350 - if (mode && mode != show) { 351 - monitor_help(); 352 - exit(EXIT_FAILURE); 353 - } 347 + if (mode && mode != show) 348 + print_wrong_arg_exit(); 354 349 interval = atoi(optarg); 355 350 break; 356 351 case 'm': 357 - if (mode) { 358 - monitor_help(); 359 - exit(EXIT_FAILURE); 360 - } 352 + if (mode) 353 + print_wrong_arg_exit(); 361 354 mode = show; 362 355 show_monitors_param = optarg; 363 356 break; 364 357 default: 365 - monitor_help(); 366 - exit(EXIT_FAILURE); 358 + print_wrong_arg_exit(); 367 359 } 368 360 } 369 361 if (!mode) ··· 372 384 printf(_("Cannot read number of available processors\n")); 373 385 return EXIT_FAILURE; 374 386 } 387 + 388 + /* Default is: monitor all CPUs */ 389 + if (bitmask_isallclear(cpus_chosen)) 390 + bitmask_setall(cpus_chosen); 375 391 376 392 dprint("System has up to %d CPU cores\n", cpu_count); 377 393
+129 -46
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
··· 22 22 23 23 #define MSR_TSC 0x10 24 24 25 + #define MSR_AMD_HWCR 0xc0010015 26 + 25 27 enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT }; 26 28 27 29 static int mperf_get_count_percent(unsigned int self_id, double *percent, 28 30 unsigned int cpu); 29 31 static int mperf_get_count_freq(unsigned int id, unsigned long long *count, 30 32 unsigned int cpu); 33 + static struct timespec time_start, time_end; 31 34 32 35 static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { 33 36 { ··· 57 54 }, 58 55 }; 59 56 57 + enum MAX_FREQ_MODE { MAX_FREQ_SYSFS, MAX_FREQ_TSC_REF }; 58 + static int max_freq_mode; 59 + /* 60 + * The max frequency mperf is ticking at (in C0), either retrieved via: 61 + * 1) calculated after measurements if we know TSC ticks at mperf/P0 frequency 62 + * 2) cpufreq /sys/devices/.../cpu0/cpufreq/cpuinfo_max_freq at init time 63 + * 1. Is preferred as it also works without cpufreq subsystem (e.g. on Xen) 64 + */ 65 + static unsigned long max_frequency; 66 + 60 67 static unsigned long long tsc_at_measure_start; 61 68 static unsigned long long tsc_at_measure_end; 62 - static unsigned long max_frequency; 63 69 static unsigned long long *mperf_previous_count; 64 70 static unsigned long long *aperf_previous_count; 65 71 static unsigned long long *mperf_current_count; 66 72 static unsigned long long *aperf_current_count; 73 + 67 74 /* valid flag for all CPUs. If a MSR read failed it will be zero */ 68 75 static int *is_valid; 69 76 70 77 static int mperf_get_tsc(unsigned long long *tsc) 71 78 { 72 - return read_msr(0, MSR_TSC, tsc); 79 + int ret; 80 + ret = read_msr(0, MSR_TSC, tsc); 81 + if (ret) 82 + dprint("Reading TSC MSR failed, returning %llu\n", *tsc); 83 + return ret; 73 84 } 74 85 75 86 static int mperf_init_stats(unsigned int cpu) ··· 114 97 return 0; 115 98 } 116 99 117 - /* 118 - * get_average_perf() 119 - * 120 - * Returns the average performance (also considers boosted frequencies) 121 - * 122 - * Input: 123 - * aperf_diff: Difference of the aperf register over a time period 124 - * mperf_diff: Difference of the mperf register over the same time period 125 - * max_freq: Maximum frequency (P0) 126 - * 127 - * Returns: 128 - * Average performance over the time period 129 - */ 130 - static unsigned long get_average_perf(unsigned long long aperf_diff, 131 - unsigned long long mperf_diff) 132 - { 133 - unsigned int perf_percent = 0; 134 - if (((unsigned long)(-1) / 100) < aperf_diff) { 135 - int shift_count = 7; 136 - aperf_diff >>= shift_count; 137 - mperf_diff >>= shift_count; 138 - } 139 - perf_percent = (aperf_diff * 100) / mperf_diff; 140 - return (max_frequency * perf_percent) / 100; 141 - } 142 - 143 100 static int mperf_get_count_percent(unsigned int id, double *percent, 144 101 unsigned int cpu) 145 102 { 146 103 unsigned long long aperf_diff, mperf_diff, tsc_diff; 104 + unsigned long long timediff; 147 105 148 106 if (!is_valid[cpu]) 149 107 return -1; ··· 128 136 129 137 mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; 130 138 aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; 131 - tsc_diff = tsc_at_measure_end - tsc_at_measure_start; 132 139 133 - *percent = 100.0 * mperf_diff / tsc_diff; 134 - dprint("%s: mperf_diff: %llu, tsc_diff: %llu\n", 135 - mperf_cstates[id].name, mperf_diff, tsc_diff); 140 + if (max_freq_mode == MAX_FREQ_TSC_REF) { 141 + tsc_diff = tsc_at_measure_end - tsc_at_measure_start; 142 + *percent = 100.0 * mperf_diff / tsc_diff; 143 + dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n", 144 + mperf_cstates[id].name, mperf_diff, tsc_diff); 145 + } else if (max_freq_mode == MAX_FREQ_SYSFS) { 146 + timediff = timespec_diff_us(time_start, time_end); 147 + *percent = 100.0 * mperf_diff / timediff; 148 + dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n", 149 + mperf_cstates[id].name, mperf_diff, timediff); 150 + } else 151 + return -1; 136 152 137 153 if (id == Cx) 138 154 *percent = 100.0 - *percent; ··· 154 154 static int mperf_get_count_freq(unsigned int id, unsigned long long *count, 155 155 unsigned int cpu) 156 156 { 157 - unsigned long long aperf_diff, mperf_diff; 157 + unsigned long long aperf_diff, mperf_diff, time_diff, tsc_diff; 158 158 159 159 if (id != AVG_FREQ) 160 160 return 1; ··· 165 165 mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; 166 166 aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; 167 167 168 - /* Return MHz for now, might want to return KHz if column width is more 169 - generic */ 170 - *count = get_average_perf(aperf_diff, mperf_diff) / 1000; 171 - dprint("%s: %llu\n", mperf_cstates[id].name, *count); 168 + if (max_freq_mode == MAX_FREQ_TSC_REF) { 169 + /* Calculate max_freq from TSC count */ 170 + tsc_diff = tsc_at_measure_end - tsc_at_measure_start; 171 + time_diff = timespec_diff_us(time_start, time_end); 172 + max_frequency = tsc_diff / time_diff; 173 + } 172 174 175 + *count = max_frequency * ((double)aperf_diff / mperf_diff); 176 + dprint("%s: Average freq based on %s maximum frequency:\n", 177 + mperf_cstates[id].name, 178 + (max_freq_mode == MAX_FREQ_TSC_REF) ? "TSC calculated" : "sysfs read"); 179 + dprint("%max_frequency: %lu", max_frequency); 180 + dprint("aperf_diff: %llu\n", aperf_diff); 181 + dprint("mperf_diff: %llu\n", mperf_diff); 182 + dprint("avg freq: %llu\n", *count); 173 183 return 0; 174 184 } 175 185 ··· 188 178 int cpu; 189 179 unsigned long long dbg; 190 180 181 + clock_gettime(CLOCK_REALTIME, &time_start); 191 182 mperf_get_tsc(&tsc_at_measure_start); 192 183 193 184 for (cpu = 0; cpu < cpu_count; cpu++) ··· 204 193 unsigned long long dbg; 205 194 int cpu; 206 195 207 - mperf_get_tsc(&tsc_at_measure_end); 208 - 209 196 for (cpu = 0; cpu < cpu_count; cpu++) 210 197 mperf_measure_stats(cpu); 198 + 199 + mperf_get_tsc(&tsc_at_measure_end); 200 + clock_gettime(CLOCK_REALTIME, &time_end); 211 201 212 202 mperf_get_tsc(&dbg); 213 203 dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end); ··· 216 204 return 0; 217 205 } 218 206 219 - struct cpuidle_monitor mperf_monitor; 220 - 221 - struct cpuidle_monitor *mperf_register(void) 207 + /* 208 + * Mperf register is defined to tick at P0 (maximum) frequency 209 + * 210 + * Instead of reading out P0 which can be tricky to read out from HW, 211 + * we use TSC counter if it reliably ticks at P0/mperf frequency. 212 + * 213 + * Still try to fall back to: 214 + * /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq 215 + * on older Intel HW without invariant TSC feature. 216 + * Or on AMD machines where TSC does not tick at P0 (do not exist yet, but 217 + * it's still double checked (MSR_AMD_HWCR)). 218 + * 219 + * On these machines the user would still get useful mperf 220 + * stats when acpi-cpufreq driver is loaded. 221 + */ 222 + static int init_maxfreq_mode(void) 222 223 { 224 + int ret; 225 + unsigned long long hwcr; 223 226 unsigned long min; 224 227 225 - if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) 226 - return NULL; 228 + if (!cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC) 229 + goto use_sysfs; 227 230 228 - /* Assume min/max all the same on all cores */ 231 + if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) { 232 + /* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf 233 + * freq. 234 + * A test whether hwcr is accessable/available would be: 235 + * (cpupower_cpu_info.family > 0x10 || 236 + * cpupower_cpu_info.family == 0x10 && 237 + * cpupower_cpu_info.model >= 0x2)) 238 + * This should be the case for all aperf/mperf 239 + * capable AMD machines and is therefore safe to test here. 240 + * Compare with Linus kernel git commit: acf01734b1747b1ec4 241 + */ 242 + ret = read_msr(0, MSR_AMD_HWCR, &hwcr); 243 + /* 244 + * If the MSR read failed, assume a Xen system that did 245 + * not explicitly provide access to it and assume TSC works 246 + */ 247 + if (ret != 0) { 248 + dprint("TSC read 0x%x failed - assume TSC working\n", 249 + MSR_AMD_HWCR); 250 + return 0; 251 + } else if (1 & (hwcr >> 24)) { 252 + max_freq_mode = MAX_FREQ_TSC_REF; 253 + return 0; 254 + } else { /* Use sysfs max frequency if available */ } 255 + } else if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL) { 256 + /* 257 + * On Intel we assume mperf (in C0) is ticking at same 258 + * rate than TSC 259 + */ 260 + max_freq_mode = MAX_FREQ_TSC_REF; 261 + return 0; 262 + } 263 + use_sysfs: 229 264 if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) { 230 265 dprint("Cannot retrieve max freq from cpufreq kernel " 231 266 "subsystem\n"); 232 - return NULL; 267 + return -1; 233 268 } 269 + max_freq_mode = MAX_FREQ_SYSFS; 270 + return 0; 271 + } 272 + 273 + /* 274 + * This monitor provides: 275 + * 276 + * 1) Average frequency a CPU resided in 277 + * This always works if the CPU has aperf/mperf capabilities 278 + * 279 + * 2) C0 and Cx (any sleep state) time a CPU resided in 280 + * Works if mperf timer stops ticking in sleep states which 281 + * seem to be the case on all current HW. 282 + * Both is directly retrieved from HW registers and is independent 283 + * from kernel statistics. 284 + */ 285 + struct cpuidle_monitor mperf_monitor; 286 + struct cpuidle_monitor *mperf_register(void) 287 + { 288 + if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) 289 + return NULL; 290 + 291 + if (init_maxfreq_mode()) 292 + return NULL; 234 293 235 294 /* Free this at program termination */ 236 295 is_valid = calloc(cpu_count, sizeof(int));