Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'imx-cleanup' of git://git.pengutronix.de/git/imx/linux-2.6 into next/cleanup

From: Sascha Hauer <s.hauer@pengutronix.de>

ARM: i.MX cleanups for 3.5

* tag 'imx-cleanup' of git://git.pengutronix.de/git/imx/linux-2.6: (5 commits)
ARM: mx53: fix pad definitions for MX53_PAD_EIM_D28__I2C1_SDA and MX53_PAD_GPIO_8__CAN1_RXCAN
ARM: imx/eukrea_mbimx27-baseboard: fix typo in error message
ARM: i.MX51 iomux: add missed definitions for SION-bit and mode for some pads
arm: imx: add missing select IMX_HAVE_PLATFORM for MACH_MX35_3DS in Kconfig
arm: imx: make various struct sys_timer static

Includes an update to 3.4-rc4

Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+1787 -1309
+1 -1
Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
··· 1 1 <refentry id="V4L2-PIX-FMT-NV12M"> 2 2 <refmeta> 3 - <refentrytitle>V4L2_PIX_FMT_NV12M ('NV12M')</refentrytitle> 3 + <refentrytitle>V4L2_PIX_FMT_NV12M ('NM12')</refentrytitle> 4 4 &manvol; 5 5 </refmeta> 6 6 <refnamediv>
+1 -1
Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
··· 1 1 <refentry id="V4L2-PIX-FMT-YUV420M"> 2 2 <refmeta> 3 - <refentrytitle>V4L2_PIX_FMT_YUV420M ('YU12M')</refentrytitle> 3 + <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12')</refentrytitle> 4 4 &manvol; 5 5 </refmeta> 6 6 <refnamediv>
+2 -2
MAINTAINERS
··· 2304 2304 F: drivers/acpi/dock.c 2305 2305 2306 2306 DOCUMENTATION 2307 - M: Randy Dunlap <rdunlap@xenotime.net> 2307 + M: Rob Landley <rob@landley.net> 2308 2308 L: linux-doc@vger.kernel.org 2309 - T: quilt http://xenotime.net/kernel-doc-patches/current/ 2309 + T: TBD 2310 2310 S: Maintained 2311 2311 F: Documentation/ 2312 2312
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 4 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = Saber-toothed Squirrel 6 6 7 7 # *DOCUMENTATION*
+1
arch/arm/configs/imx_v4_v5_defconfig
··· 112 112 CONFIG_IMX2_WDT=y 113 113 CONFIG_MFD_MC13XXX=y 114 114 CONFIG_REGULATOR=y 115 + CONFIG_REGULATOR_FIXED_VOLTAGE=y 115 116 CONFIG_REGULATOR_MC13783=y 116 117 CONFIG_REGULATOR_MC13892=y 117 118 CONFIG_FB=y
+4 -5
arch/arm/configs/u8500_defconfig
··· 8 8 # CONFIG_LBDAF is not set 9 9 # CONFIG_BLK_DEV_BSG is not set 10 10 CONFIG_ARCH_U8500=y 11 - CONFIG_UX500_SOC_DB5500=y 12 - CONFIG_UX500_SOC_DB8500=y 13 11 CONFIG_MACH_HREFV60=y 14 12 CONFIG_MACH_SNOWBALL=y 15 13 CONFIG_MACH_U5500=y ··· 37 39 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 38 40 CONFIG_BLK_DEV_RAM=y 39 41 CONFIG_BLK_DEV_RAM_SIZE=65536 40 - CONFIG_MISC_DEVICES=y 41 42 CONFIG_AB8500_PWM=y 42 43 CONFIG_SENSORS_BH1780=y 43 44 CONFIG_NETDEVICES=y ··· 62 65 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 63 66 CONFIG_HW_RANDOM=y 64 67 CONFIG_HW_RANDOM_NOMADIK=y 65 - CONFIG_I2C=y 66 - CONFIG_I2C_NOMADIK=y 67 68 CONFIG_SPI=y 68 69 CONFIG_SPI_PL022=y 69 70 CONFIG_GPIO_STMPE=y 70 71 CONFIG_GPIO_TC3589X=y 72 + CONFIG_POWER_SUPPLY=y 73 + CONFIG_AB8500_BM=y 74 + CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL=y 71 75 CONFIG_MFD_STMPE=y 72 76 CONFIG_MFD_TC3589X=y 73 77 CONFIG_AB5500_CORE=y 74 78 CONFIG_AB8500_CORE=y 79 + CONFIG_REGULATOR=y 75 80 CONFIG_REGULATOR_AB8500=y 76 81 # CONFIG_HID_SUPPORT is not set 77 82 CONFIG_USB_GADGET=y
-1
arch/arm/mach-at91/at91rm9200_devices.c
··· 1162 1162 } 1163 1163 } 1164 1164 #else 1165 - void __init __deprecated at91_init_serial(struct at91_uart_config *config) {} 1166 1165 void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} 1167 1166 void __init at91_add_device_serial(void) {} 1168 1167 #endif
+2
arch/arm/mach-at91/at91rm9200_time.c
··· 23 23 #include <linux/interrupt.h> 24 24 #include <linux/irq.h> 25 25 #include <linux/clockchips.h> 26 + #include <linux/export.h> 26 27 27 28 #include <asm/mach/time.h> 28 29 ··· 177 176 }; 178 177 179 178 void __iomem *at91_st_base; 179 + EXPORT_SYMBOL_GPL(at91_st_base); 180 180 181 181 void __init at91rm9200_ioremap_st(u32 addr) 182 182 {
+1 -1
arch/arm/mach-at91/board-rm9200ek.c
··· 103 103 }; 104 104 105 105 #define EK_FLASH_BASE AT91_CHIPSELECT_0 106 - #define EK_FLASH_SIZE SZ_2M 106 + #define EK_FLASH_SIZE SZ_8M 107 107 108 108 static struct physmap_flash_data ek_flash_data = { 109 109 .width = 2,
+3 -2
arch/arm/mach-at91/board-sam9261ek.c
··· 76 76 .flags = IORESOURCE_MEM 77 77 }, 78 78 [2] = { 79 - .start = AT91_PIN_PC11, 80 - .end = AT91_PIN_PC11, 81 79 .flags = IORESOURCE_IRQ 82 80 | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE, 83 81 } ··· 119 121 120 122 static void __init ek_add_device_dm9000(void) 121 123 { 124 + struct resource *r = &dm9000_resource[2]; 125 + 122 126 /* Configure chip-select 2 (DM9000) */ 123 127 sam9_smc_configure(0, 2, &dm9000_smc_config); 124 128 ··· 130 130 /* Configure Interrupt pin as input, no pull-up */ 131 131 at91_set_gpio_input(AT91_PIN_PC11, 0); 132 132 133 + r->start = r->end = gpio_to_irq(AT91_PIN_PC11); 133 134 platform_device_register(&dm9000_device); 134 135 } 135 136 #else
+1
arch/arm/mach-at91/clock.c
··· 35 35 #include "generic.h" 36 36 37 37 void __iomem *at91_pmc_base; 38 + EXPORT_SYMBOL_GPL(at91_pmc_base); 38 39 39 40 /* 40 41 * There's a lot more which can be done with clocks, including cpufreq
+1 -1
arch/arm/mach-at91/include/mach/at91_pmc.h
··· 25 25 #define at91_pmc_write(field, value) \ 26 26 __raw_writel(value, at91_pmc_base + field) 27 27 #else 28 - .extern at91_aic_base 28 + .extern at91_pmc_base 29 29 #endif 30 30 31 31 #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
+2
arch/arm/mach-at91/setup.c
··· 54 54 } 55 55 56 56 void __iomem *at91_ramc_base[2]; 57 + EXPORT_SYMBOL_GPL(at91_ramc_base); 57 58 58 59 void __init at91_ioremap_ramc(int id, u32 addr, u32 size) 59 60 { ··· 293 292 } 294 293 295 294 void __iomem *at91_matrix_base; 295 + EXPORT_SYMBOL_GPL(at91_matrix_base); 296 296 297 297 void __init at91_ioremap_matrix(u32 base_addr) 298 298 {
+2 -2
arch/arm/mach-bcmring/core.c
··· 52 52 #include <mach/csp/chipcHw_inline.h> 53 53 #include <mach/csp/tmrHw_reg.h> 54 54 55 - static AMBA_APB_DEVICE(uartA, "uarta", MM_ADDR_IO_UARTA, { IRQ_UARTA }, NULL); 56 - static AMBA_APB_DEVICE(uartB, "uartb", MM_ADDR_IO_UARTB, { IRQ_UARTB }, NULL); 55 + static AMBA_APB_DEVICE(uartA, "uartA", 0, MM_ADDR_IO_UARTA, {IRQ_UARTA}, NULL); 56 + static AMBA_APB_DEVICE(uartB, "uartB", 0, MM_ADDR_IO_UARTB, {IRQ_UARTB}, NULL); 57 57 58 58 static struct clk pll1_clk = { 59 59 .name = "PLL1",
+2
arch/arm/mach-imx/Kconfig
··· 571 571 select MXC_DEBUG_BOARD 572 572 select IMX_HAVE_PLATFORM_FSL_USB2_UDC 573 573 select IMX_HAVE_PLATFORM_IMX2_WDT 574 + select IMX_HAVE_PLATFORM_IMX_FB 574 575 select IMX_HAVE_PLATFORM_IMX_I2C 575 576 select IMX_HAVE_PLATFORM_IMX_UART 577 + select IMX_HAVE_PLATFORM_IPU_CORE 576 578 select IMX_HAVE_PLATFORM_MXC_EHCI 577 579 select IMX_HAVE_PLATFORM_MXC_NAND 578 580 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+1 -1
arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
··· 243 243 static void __maybe_unused ads7846_dev_init(void) 244 244 { 245 245 if (gpio_request(ADS7846_PENDOWN, "ADS7846 pendown") < 0) { 246 - printk(KERN_ERR "can't get ads746 pen down GPIO\n"); 246 + printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); 247 247 return; 248 248 } 249 249 gpio_direction_input(ADS7846_PENDOWN);
+4 -2
arch/arm/mach-imx/imx27-dt.c
··· 35 35 static int __init imx27_avic_add_irq_domain(struct device_node *np, 36 36 struct device_node *interrupt_parent) 37 37 { 38 - irq_domain_add_simple(np, 0); 38 + irq_domain_add_legacy(np, 64, 0, 0, &irq_domain_simple_ops, NULL); 39 39 return 0; 40 40 } 41 41 ··· 44 44 { 45 45 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; 46 46 47 - irq_domain_add_simple(np, gpio_irq_base); 47 + gpio_irq_base -= 32; 48 + irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, 49 + NULL); 48 50 49 51 return 0; 50 52 }
+1 -1
arch/arm/mach-imx/mach-cpuimx35.c
··· 194 194 mx35_clocks_init(); 195 195 } 196 196 197 - struct sys_timer eukrea_cpuimx35_timer = { 197 + static struct sys_timer eukrea_cpuimx35_timer = { 198 198 .init = eukrea_cpuimx35_timer_init, 199 199 }; 200 200
+1 -1
arch/arm/mach-imx/mach-mx1ads.c
··· 134 134 mx1_clocks_init(32000); 135 135 } 136 136 137 - struct sys_timer mx1ads_timer = { 137 + static struct sys_timer mx1ads_timer = { 138 138 .init = mx1ads_timer_init, 139 139 }; 140 140
+1 -1
arch/arm/mach-imx/mach-mx31lite.c
··· 283 283 mx31_clocks_init(26000000); 284 284 } 285 285 286 - struct sys_timer mx31lite_timer = { 286 + static struct sys_timer mx31lite_timer = { 287 287 .init = mx31lite_timer_init, 288 288 }; 289 289
+1 -1
arch/arm/mach-imx/mach-mx31moboard.c
··· 580 580 mx31_clocks_init(26000000); 581 581 } 582 582 583 - struct sys_timer mx31moboard_timer = { 583 + static struct sys_timer mx31moboard_timer = { 584 584 .init = mx31moboard_timer_init, 585 585 }; 586 586
+1 -1
arch/arm/mach-imx/mach-mx35_3ds.c
··· 419 419 mx35_clocks_init(); 420 420 } 421 421 422 - struct sys_timer mx35pdk_timer = { 422 + static struct sys_timer mx35pdk_timer = { 423 423 .init = mx35pdk_timer_init, 424 424 }; 425 425
+1 -1
arch/arm/mach-imx/mach-pcm037.c
··· 683 683 mx31_clocks_init(26000000); 684 684 } 685 685 686 - struct sys_timer pcm037_timer = { 686 + static struct sys_timer pcm037_timer = { 687 687 .init = pcm037_timer_init, 688 688 }; 689 689
+1 -1
arch/arm/mach-imx/mach-pcm043.c
··· 399 399 mx35_clocks_init(); 400 400 } 401 401 402 - struct sys_timer pcm043_timer = { 402 + static struct sys_timer pcm043_timer = { 403 403 .init = pcm043_timer_init, 404 404 }; 405 405
+1 -1
arch/arm/mach-imx/mach-vpr200.c
··· 310 310 mx35_clocks_init(); 311 311 } 312 312 313 - struct sys_timer vpr200_timer = { 313 + static struct sys_timer vpr200_timer = { 314 314 .init = vpr200_timer_init, 315 315 }; 316 316
+1 -1
arch/arm/mach-imx/mm-imx5.c
··· 35 35 } 36 36 clk_enable(gpc_dvfs_clk); 37 37 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); 38 - if (tzic_enable_wake() != 0) 38 + if (!tzic_enable_wake()) 39 39 cpu_do_idle(); 40 40 clk_disable(gpc_dvfs_clk); 41 41 }
+1
arch/arm/mach-omap1/mux.c
··· 27 27 #include <linux/io.h> 28 28 #include <linux/spinlock.h> 29 29 30 + #include <mach/hardware.h> 30 31 31 32 #include <plat/mux.h> 32 33
+2 -2
arch/arm/mach-omap1/timer.c
··· 47 47 int n = (pdev->id - 1) << 1; 48 48 u32 l; 49 49 50 - l = __raw_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); 50 + l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); 51 51 l |= source << n; 52 - __raw_writel(l, MOD_CONF_CTRL_1); 52 + omap_writel(l, MOD_CONF_CTRL_1); 53 53 54 54 return 0; 55 55 }
+6 -6
arch/arm/mach-omap2/board-4430sdp.c
··· 20 20 #include <linux/usb/otg.h> 21 21 #include <linux/spi/spi.h> 22 22 #include <linux/i2c/twl.h> 23 + #include <linux/mfd/twl6040.h> 23 24 #include <linux/gpio_keys.h> 24 25 #include <linux/regulator/machine.h> 25 26 #include <linux/regulator/fixed.h> ··· 561 560 }, 562 561 }; 563 562 564 - static struct twl4030_codec_data twl6040_codec = { 563 + static struct twl6040_codec_data twl6040_codec = { 565 564 /* single-step ramp for headset and handsfree */ 566 565 .hs_left_step = 0x0f, 567 566 .hs_right_step = 0x0f, ··· 569 568 .hf_right_step = 0x1d, 570 569 }; 571 570 572 - static struct twl4030_vibra_data twl6040_vibra = { 571 + static struct twl6040_vibra_data twl6040_vibra = { 573 572 .vibldrv_res = 8, 574 573 .vibrdrv_res = 3, 575 574 .viblmotor_res = 10, ··· 578 577 .vddvibr_uV = 0, /* fixed volt supply - VBAT */ 579 578 }; 580 579 581 - static struct twl4030_audio_data twl6040_audio = { 580 + static struct twl6040_platform_data twl6040_data = { 582 581 .codec = &twl6040_codec, 583 582 .vibra = &twl6040_vibra, 584 583 .audpwron_gpio = 127, 585 - .naudint_irq = OMAP44XX_IRQ_SYS_2N, 586 584 .irq_base = TWL6040_CODEC_IRQ_BASE, 587 585 }; 588 586 589 587 static struct twl4030_platform_data sdp4430_twldata = { 590 - .audio = &twl6040_audio, 591 588 /* Regulators */ 592 589 .vusim = &sdp4430_vusim, 593 590 .vaux1 = &sdp4430_vaux1, ··· 616 617 TWL_COMMON_REGULATOR_VCXIO | 617 618 TWL_COMMON_REGULATOR_VUSB | 618 619 TWL_COMMON_REGULATOR_CLK32KG); 619 - omap4_pmic_init("twl6030", &sdp4430_twldata); 620 + omap4_pmic_init("twl6030", &sdp4430_twldata, 621 + &twl6040_data, OMAP44XX_IRQ_SYS_2N); 620 622 omap_register_i2c_bus(2, 400, NULL, 0); 621 623 omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo, 622 624 ARRAY_SIZE(sdp4430_i2c_3_boardinfo));
+1 -1
arch/arm/mach-omap2/board-generic.c
··· 137 137 138 138 static void __init omap4_i2c_init(void) 139 139 { 140 - omap4_pmic_init("twl6030", &sdp4430_twldata); 140 + omap4_pmic_init("twl6030", &sdp4430_twldata, NULL, 0); 141 141 } 142 142 143 143 static void __init omap4_init(void)
+6 -7
arch/arm/mach-omap2/board-omap4panda.c
··· 25 25 #include <linux/gpio.h> 26 26 #include <linux/usb/otg.h> 27 27 #include <linux/i2c/twl.h> 28 + #include <linux/mfd/twl6040.h> 28 29 #include <linux/regulator/machine.h> 29 30 #include <linux/regulator/fixed.h> 30 31 #include <linux/wl12xx.h> ··· 285 284 return 0; 286 285 } 287 286 288 - static struct twl4030_codec_data twl6040_codec = { 287 + static struct twl6040_codec_data twl6040_codec = { 289 288 /* single-step ramp for headset and handsfree */ 290 289 .hs_left_step = 0x0f, 291 290 .hs_right_step = 0x0f, ··· 293 292 .hf_right_step = 0x1d, 294 293 }; 295 294 296 - static struct twl4030_audio_data twl6040_audio = { 295 + static struct twl6040_platform_data twl6040_data = { 297 296 .codec = &twl6040_codec, 298 297 .audpwron_gpio = 127, 299 - .naudint_irq = OMAP44XX_IRQ_SYS_2N, 300 298 .irq_base = TWL6040_CODEC_IRQ_BASE, 301 299 }; 302 300 303 301 /* Panda board uses the common PMIC configuration */ 304 - static struct twl4030_platform_data omap4_panda_twldata = { 305 - .audio = &twl6040_audio, 306 - }; 302 + static struct twl4030_platform_data omap4_panda_twldata; 307 303 308 304 /* 309 305 * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM ··· 324 326 TWL_COMMON_REGULATOR_VCXIO | 325 327 TWL_COMMON_REGULATOR_VUSB | 326 328 TWL_COMMON_REGULATOR_CLK32KG); 327 - omap4_pmic_init("twl6030", &omap4_panda_twldata); 329 + omap4_pmic_init("twl6030", &omap4_panda_twldata, 330 + &twl6040_data, OMAP44XX_IRQ_SYS_2N); 328 331 omap_register_i2c_bus(2, 400, NULL, 0); 329 332 /* 330 333 * Bus 3 is attached to the DVI port where devices like the pico DLP
+15 -2
arch/arm/mach-omap2/omap_hwmod.c
··· 1422 1422 goto dis_opt_clks; 1423 1423 _write_sysconfig(v, oh); 1424 1424 1425 + if (oh->class->sysc->srst_udelay) 1426 + udelay(oh->class->sysc->srst_udelay); 1427 + 1425 1428 if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS) 1426 1429 omap_test_timeout((omap_hwmod_read(oh, 1427 1430 oh->class->sysc->syss_offs) ··· 1906 1903 */ 1907 1904 int omap_hwmod_softreset(struct omap_hwmod *oh) 1908 1905 { 1909 - if (!oh) 1906 + u32 v; 1907 + int ret; 1908 + 1909 + if (!oh || !(oh->_sysc_cache)) 1910 1910 return -EINVAL; 1911 1911 1912 - return _ocp_softreset(oh); 1912 + v = oh->_sysc_cache; 1913 + ret = _set_softreset(oh, &v); 1914 + if (ret) 1915 + goto error; 1916 + _write_sysconfig(v, oh); 1917 + 1918 + error: 1919 + return ret; 1913 1920 } 1914 1921 1915 1922 /**
-1
arch/arm/mach-omap2/omap_hwmod_2420_data.c
··· 1000 1000 .flags = OMAP_FIREWALL_L4, 1001 1001 } 1002 1002 }, 1003 - .flags = OCPIF_SWSUP_IDLE, 1004 1003 .user = OCP_USER_MPU | OCP_USER_SDMA, 1005 1004 }; 1006 1005
-1
arch/arm/mach-omap2/omap_hwmod_2430_data.c
··· 1049 1049 .slave = &omap2430_dss_venc_hwmod, 1050 1050 .clk = "dss_ick", 1051 1051 .addr = omap2_dss_venc_addrs, 1052 - .flags = OCPIF_SWSUP_IDLE, 1053 1052 .user = OCP_USER_MPU | OCP_USER_SDMA, 1054 1053 }; 1055 1054
-1
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
··· 1676 1676 .flags = OMAP_FIREWALL_L4, 1677 1677 } 1678 1678 }, 1679 - .flags = OCPIF_SWSUP_IDLE, 1680 1679 .user = OCP_USER_MPU | OCP_USER_SDMA, 1681 1680 }; 1682 1681
+9
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
··· 2594 2594 static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = { 2595 2595 .rev_offs = 0x0000, 2596 2596 .sysc_offs = 0x0010, 2597 + /* 2598 + * ISS needs 100 OCP clk cycles delay after a softreset before 2599 + * accessing sysconfig again. 2600 + * The lowest frequency at the moment for L3 bus is 100 MHz, so 2601 + * 1usec delay is needed. Add an x2 margin to be safe (2 usecs). 2602 + * 2603 + * TODO: Indicate errata when available. 2604 + */ 2605 + .srst_udelay = 2, 2597 2606 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | 2598 2607 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), 2599 2608 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+7 -117
arch/arm/mach-omap2/serial.c
··· 108 108 static void omap_uart_set_smartidle(struct platform_device *pdev) 109 109 { 110 110 struct omap_device *od = to_omap_device(pdev); 111 + u8 idlemode; 111 112 112 - omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART); 113 + if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP) 114 + idlemode = HWMOD_IDLEMODE_SMART_WKUP; 115 + else 116 + idlemode = HWMOD_IDLEMODE_SMART; 117 + 118 + omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode); 113 119 } 114 120 115 121 #else ··· 126 120 #endif /* CONFIG_PM */ 127 121 128 122 #ifdef CONFIG_OMAP_MUX 129 - static struct omap_device_pad default_uart1_pads[] __initdata = { 130 - { 131 - .name = "uart1_cts.uart1_cts", 132 - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 133 - }, 134 - { 135 - .name = "uart1_rts.uart1_rts", 136 - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 137 - }, 138 - { 139 - .name = "uart1_tx.uart1_tx", 140 - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 141 - }, 142 - { 143 - .name = "uart1_rx.uart1_rx", 144 - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 145 - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 146 - .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 147 - }, 148 - }; 149 - 150 - static struct omap_device_pad default_uart2_pads[] __initdata = { 151 - { 152 - .name = "uart2_cts.uart2_cts", 153 - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 154 - }, 155 - { 156 - .name = "uart2_rts.uart2_rts", 157 - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 158 - }, 159 - { 160 - .name = "uart2_tx.uart2_tx", 161 - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 162 - }, 163 - { 164 - .name = "uart2_rx.uart2_rx", 165 - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 166 - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 167 - .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 168 - }, 169 - }; 170 - 171 - static struct omap_device_pad default_uart3_pads[] __initdata = { 172 - { 173 - .name = "uart3_cts_rctx.uart3_cts_rctx", 174 - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 175 - }, 176 - { 177 - .name = "uart3_rts_sd.uart3_rts_sd", 178 - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 179 - }, 180 - { 181 - .name = "uart3_tx_irtx.uart3_tx_irtx", 182 - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 183 - }, 184 - { 185 - .name = "uart3_rx_irrx.uart3_rx_irrx", 186 - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 187 - .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0, 188 - .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0, 189 - }, 190 - }; 191 - 192 - static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = { 193 - { 194 - .name = "gpmc_wait2.uart4_tx", 195 - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 196 - }, 197 - { 198 - .name = "gpmc_wait3.uart4_rx", 199 - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 200 - .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE2, 201 - .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE2, 202 - }, 203 - }; 204 - 205 - static struct omap_device_pad default_omap4_uart4_pads[] __initdata = { 206 - { 207 - .name = "uart4_tx.uart4_tx", 208 - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 209 - }, 210 - { 211 - .name = "uart4_rx.uart4_rx", 212 - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 213 - .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0, 214 - .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0, 215 - }, 216 - }; 217 - 218 123 static void omap_serial_fill_default_pads(struct omap_board_data *bdata) 219 124 { 220 - switch (bdata->id) { 221 - case 0: 222 - bdata->pads = default_uart1_pads; 223 - bdata->pads_cnt = ARRAY_SIZE(default_uart1_pads); 224 - break; 225 - case 1: 226 - bdata->pads = default_uart2_pads; 227 - bdata->pads_cnt = ARRAY_SIZE(default_uart2_pads); 228 - break; 229 - case 2: 230 - bdata->pads = default_uart3_pads; 231 - bdata->pads_cnt = ARRAY_SIZE(default_uart3_pads); 232 - break; 233 - case 3: 234 - if (cpu_is_omap44xx()) { 235 - bdata->pads = default_omap4_uart4_pads; 236 - bdata->pads_cnt = 237 - ARRAY_SIZE(default_omap4_uart4_pads); 238 - } else if (cpu_is_omap3630()) { 239 - bdata->pads = default_omap36xx_uart4_pads; 240 - bdata->pads_cnt = 241 - ARRAY_SIZE(default_omap36xx_uart4_pads); 242 - } 243 - break; 244 - default: 245 - break; 246 - } 247 125 } 248 126 #else 249 127 static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
+32 -5
arch/arm/mach-omap2/twl-common.c
··· 37 37 .flags = I2C_CLIENT_WAKE, 38 38 }; 39 39 40 + static struct i2c_board_info __initdata omap4_i2c1_board_info[] = { 41 + { 42 + .addr = 0x48, 43 + .flags = I2C_CLIENT_WAKE, 44 + }, 45 + { 46 + I2C_BOARD_INFO("twl6040", 0x4b), 47 + }, 48 + }; 49 + 40 50 void __init omap_pmic_init(int bus, u32 clkrate, 41 51 const char *pmic_type, int pmic_irq, 42 52 struct twl4030_platform_data *pmic_data) ··· 59 49 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); 60 50 } 61 51 52 + void __init omap4_pmic_init(const char *pmic_type, 53 + struct twl4030_platform_data *pmic_data, 54 + struct twl6040_platform_data *twl6040_data, int twl6040_irq) 55 + { 56 + /* PMIC part*/ 57 + strncpy(omap4_i2c1_board_info[0].type, pmic_type, 58 + sizeof(omap4_i2c1_board_info[0].type)); 59 + omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N; 60 + omap4_i2c1_board_info[0].platform_data = pmic_data; 61 + 62 + /* TWL6040 audio IC part */ 63 + omap4_i2c1_board_info[1].irq = twl6040_irq; 64 + omap4_i2c1_board_info[1].platform_data = twl6040_data; 65 + 66 + omap_register_i2c_bus(1, 400, omap4_i2c1_board_info, 2); 67 + 68 + } 69 + 62 70 void __init omap_pmic_late_init(void) 63 71 { 64 72 /* Init the OMAP TWL parameters (if PMIC has been registerd) */ 65 - if (!pmic_i2c_board_info.irq) 66 - return; 67 - 68 - omap3_twl_init(); 69 - omap4_twl_init(); 73 + if (pmic_i2c_board_info.irq) 74 + omap3_twl_init(); 75 + if (omap4_i2c1_board_info[0].irq) 76 + omap4_twl_init(); 70 77 } 71 78 72 79 #if defined(CONFIG_ARCH_OMAP3)
+4 -6
arch/arm/mach-omap2/twl-common.h
··· 29 29 30 30 31 31 struct twl4030_platform_data; 32 + struct twl6040_platform_data; 32 33 33 34 void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, 34 35 struct twl4030_platform_data *pmic_data); ··· 47 46 omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data); 48 47 } 49 48 50 - static inline void omap4_pmic_init(const char *pmic_type, 51 - struct twl4030_platform_data *pmic_data) 52 - { 53 - /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */ 54 - omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data); 55 - } 49 + void omap4_pmic_init(const char *pmic_type, 50 + struct twl4030_platform_data *pmic_data, 51 + struct twl6040_platform_data *audio_data, int twl6040_irq); 56 52 57 53 void omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, 58 54 u32 pdata_flags, u32 regulators_flags);
+1
arch/arm/mach-ux500/Kconfig
··· 17 17 config UX500_SOC_DB8500 18 18 bool 19 19 select MFD_DB8500_PRCMU 20 + select REGULATOR 20 21 select REGULATOR_DB8500_PRCMU 21 22 select CPU_FREQ_TABLE if CPU_FREQ 22 23
+1 -1
arch/arm/mach-ux500/platsmp.c
··· 99 99 */ 100 100 write_pen_release(cpu_logical_map(cpu)); 101 101 102 - gic_raise_softirq(cpumask_of(cpu), 1); 102 + smp_send_reschedule(cpu); 103 103 104 104 timeout = jiffies + (1 * HZ); 105 105 while (time_before(jiffies, timeout)) {
+24 -24
arch/arm/plat-mxc/include/mach/iomux-mx51.h
··· 256 256 #define MX51_PAD_NANDF_RB1__GPIO3_9 IOMUX_PAD(0x4fc, 0x120, 3, __NA_, 0, MX51_GPIO_PAD_CTRL) 257 257 #define MX51_PAD_NANDF_RB1__NANDF_RB1 IOMUX_PAD(0x4fc, 0x120, 0, __NA_, 0, NO_PAD_CTRL) 258 258 #define MX51_PAD_NANDF_RB1__PATA_IORDY IOMUX_PAD(0x4fc, 0x120, 1, __NA_, 0, NO_PAD_CTRL) 259 - #define MX51_PAD_NANDF_RB1__SD4_CMD IOMUX_PAD(0x4fc, 0x120, 5, __NA_, 0, MX51_SDHCI_PAD_CTRL) 259 + #define MX51_PAD_NANDF_RB1__SD4_CMD IOMUX_PAD(0x4fc, 0x120, 0x15, __NA_, 0, MX51_SDHCI_PAD_CTRL) 260 260 #define MX51_PAD_NANDF_RB2__DISP2_WAIT IOMUX_PAD(0x500, 0x124, 5, 0x9a8, 0, NO_PAD_CTRL) 261 261 #define MX51_PAD_NANDF_RB2__ECSPI2_SCLK IOMUX_PAD(0x500, 0x124, 2, __NA_, 0, MX51_ECSPI_PAD_CTRL) 262 262 #define MX51_PAD_NANDF_RB2__FEC_COL IOMUX_PAD(0x500, 0x124, 1, 0x94c, 0, MX51_PAD_CTRL_2) 263 263 #define MX51_PAD_NANDF_RB2__GPIO3_10 IOMUX_PAD(0x500, 0x124, 3, __NA_, 0, MX51_GPIO_PAD_CTRL) 264 264 #define MX51_PAD_NANDF_RB2__NANDF_RB2 IOMUX_PAD(0x500, 0x124, 0, __NA_, 0, NO_PAD_CTRL) 265 - #define MX51_PAD_NANDF_RB2__USBH3_H3_DP IOMUX_PAD(0x500, 0x124, 7, __NA_, 0, NO_PAD_CTRL) 265 + #define MX51_PAD_NANDF_RB2__USBH3_H3_DP IOMUX_PAD(0x500, 0x124, 0x17, __NA_, 0, NO_PAD_CTRL) 266 266 #define MX51_PAD_NANDF_RB2__USBH3_NXT IOMUX_PAD(0x500, 0x124, 6, 0xa20, 0, NO_PAD_CTRL) 267 267 #define MX51_PAD_NANDF_RB3__DISP1_WAIT IOMUX_PAD(0x504, 0x128, 5, __NA_, 0, NO_PAD_CTRL) 268 268 #define MX51_PAD_NANDF_RB3__ECSPI2_MISO IOMUX_PAD(0x504, 0x128, 2, __NA_, 0, MX51_ECSPI_PAD_CTRL) ··· 270 270 #define MX51_PAD_NANDF_RB3__GPIO3_11 IOMUX_PAD(0x504, 0x128, 3, __NA_, 0, MX51_GPIO_PAD_CTRL) 271 271 #define MX51_PAD_NANDF_RB3__NANDF_RB3 IOMUX_PAD(0x504, 0x128, 0, __NA_, 0, NO_PAD_CTRL) 272 272 #define MX51_PAD_NANDF_RB3__USBH3_CLK IOMUX_PAD(0x504, 0x128, 6, 0x9f8, 0, NO_PAD_CTRL) 273 - #define MX51_PAD_NANDF_RB3__USBH3_H3_DM IOMUX_PAD(0x504, 0x128, 7, __NA_, 0, NO_PAD_CTRL) 273 + #define MX51_PAD_NANDF_RB3__USBH3_H3_DM IOMUX_PAD(0x504, 0x128, 0x17, __NA_, 0, NO_PAD_CTRL) 274 274 #define MX51_PAD_GPIO_NAND__GPIO_NAND IOMUX_PAD(0x514, 0x12c, 0, 0x998, 0, MX51_GPIO_PAD_CTRL) 275 275 #define MX51_PAD_GPIO_NAND__PATA_INTRQ IOMUX_PAD(0x514, 0x12c, 1, __NA_, 0, NO_PAD_CTRL) 276 276 #define MX51_PAD_NANDF_CS0__GPIO3_16 IOMUX_PAD(0x518, 0x130, 3, __NA_, 0, MX51_GPIO_PAD_CTRL) ··· 283 283 #define MX51_PAD_NANDF_CS2__NANDF_CS2 IOMUX_PAD(0x520, 0x138, 0, __NA_, 0, NO_PAD_CTRL) 284 284 #define MX51_PAD_NANDF_CS2__PATA_CS_0 IOMUX_PAD(0x520, 0x138, 1, __NA_, 0, NO_PAD_CTRL) 285 285 #define MX51_PAD_NANDF_CS2__SD4_CLK IOMUX_PAD(0x520, 0x138, 5, __NA_, 0, MX51_SDHCI_PAD_CTRL | PAD_CTL_HYS) 286 - #define MX51_PAD_NANDF_CS2__USBH3_H1_DP IOMUX_PAD(0x520, 0x138, 7, __NA_, 0, NO_PAD_CTRL) 286 + #define MX51_PAD_NANDF_CS2__USBH3_H1_DP IOMUX_PAD(0x520, 0x138, 0x17, __NA_, 0, NO_PAD_CTRL) 287 287 #define MX51_PAD_NANDF_CS3__FEC_MDC IOMUX_PAD(0x524, 0x13c, 2, __NA_, 0, MX51_PAD_CTRL_5) 288 288 #define MX51_PAD_NANDF_CS3__GPIO3_19 IOMUX_PAD(0x524, 0x13c, 3, __NA_, 0, MX51_GPIO_PAD_CTRL) 289 289 #define MX51_PAD_NANDF_CS3__NANDF_CS3 IOMUX_PAD(0x524, 0x13c, 0, __NA_, 0, NO_PAD_CTRL) 290 290 #define MX51_PAD_NANDF_CS3__PATA_CS_1 IOMUX_PAD(0x524, 0x13c, 1, __NA_, 0, NO_PAD_CTRL) 291 291 #define MX51_PAD_NANDF_CS3__SD4_DAT0 IOMUX_PAD(0x524, 0x13c, 5, __NA_, 0, MX51_SDHCI_PAD_CTRL) 292 - #define MX51_PAD_NANDF_CS3__USBH3_H1_DM IOMUX_PAD(0x524, 0x13c, 7, __NA_, 0, NO_PAD_CTRL) 292 + #define MX51_PAD_NANDF_CS3__USBH3_H1_DM IOMUX_PAD(0x524, 0x13c, 0x17, __NA_, 0, NO_PAD_CTRL) 293 293 #define MX51_PAD_NANDF_CS4__FEC_TDATA1 IOMUX_PAD(0x528, 0x140, 2, __NA_, 0, MX51_PAD_CTRL_5) 294 294 #define MX51_PAD_NANDF_CS4__GPIO3_20 IOMUX_PAD(0x528, 0x140, 3, __NA_, 0, MX51_GPIO_PAD_CTRL) 295 295 #define MX51_PAD_NANDF_CS4__NANDF_CS4 IOMUX_PAD(0x528, 0x140, 0, __NA_, 0, NO_PAD_CTRL) ··· 316 316 #define MX51_PAD_NANDF_RDY_INT__FEC_TX_CLK IOMUX_PAD(0x538, 0x150, 1, 0x974, 0, MX51_PAD_CTRL_4) 317 317 #define MX51_PAD_NANDF_RDY_INT__GPIO3_24 IOMUX_PAD(0x538, 0x150, 3, __NA_, 0, MX51_GPIO_PAD_CTRL) 318 318 #define MX51_PAD_NANDF_RDY_INT__NANDF_RDY_INT IOMUX_PAD(0x538, 0x150, 0, 0x938, 0, NO_PAD_CTRL) 319 - #define MX51_PAD_NANDF_RDY_INT__SD3_CMD IOMUX_PAD(0x538, 0x150, 5, __NA_, 0, MX51_SDHCI_PAD_CTRL) 319 + #define MX51_PAD_NANDF_RDY_INT__SD3_CMD IOMUX_PAD(0x538, 0x150, 0x15, __NA_, 0, MX51_SDHCI_PAD_CTRL) 320 320 #define MX51_PAD_NANDF_D15__ECSPI2_MOSI IOMUX_PAD(0x53c, 0x154, 2, __NA_, 0, MX51_ECSPI_PAD_CTRL) 321 321 #define MX51_PAD_NANDF_D15__GPIO3_25 IOMUX_PAD(0x53c, 0x154, 3, __NA_, 0, MX51_GPIO_PAD_CTRL) 322 322 #define MX51_PAD_NANDF_D15__NANDF_D15 IOMUX_PAD(0x53c, 0x154, 0, __NA_, 0, NO_PAD_CTRL) ··· 672 672 #define MX51_PAD_DISP2_DAT5__DISP2_DAT5 IOMUX_PAD(0x770, 0x368, 0, __NA_, 0, NO_PAD_CTRL) 673 673 #define MX51_PAD_DISP2_DAT6__DISP2_DAT6 IOMUX_PAD(0x774, 0x36c, 0, __NA_, 0, NO_PAD_CTRL) 674 674 #define MX51_PAD_DISP2_DAT6__FEC_TDATA1 IOMUX_PAD(0x774, 0x36c, 2, __NA_, 0, MX51_PAD_CTRL_5) 675 - #define MX51_PAD_DISP2_DAT6__GPIO1_19 IOMUX_PAD(0x774, 0x36c, 5, __NA_, 0, NO_PAD_CTRL) 675 + #define MX51_PAD_DISP2_DAT6__GPIO1_19 IOMUX_PAD(0x774, 0x36c, 5, __NA_, 0, MX51_GPIO_PAD_CTRL) 676 676 #define MX51_PAD_DISP2_DAT6__KEY_ROW4 IOMUX_PAD(0x774, 0x36c, 4, 0x9d0, 1, NO_PAD_CTRL) 677 677 #define MX51_PAD_DISP2_DAT6__USBH3_STP IOMUX_PAD(0x774, 0x36c, 3, 0xa24, 1, NO_PAD_CTRL) 678 678 #define MX51_PAD_DISP2_DAT7__DISP2_DAT7 IOMUX_PAD(0x778, 0x370, 0, __NA_, 0, NO_PAD_CTRL) 679 679 #define MX51_PAD_DISP2_DAT7__FEC_TDATA2 IOMUX_PAD(0x778, 0x370, 2, __NA_, 0, MX51_PAD_CTRL_5) 680 - #define MX51_PAD_DISP2_DAT7__GPIO1_29 IOMUX_PAD(0x778, 0x370, 5, __NA_, 0, NO_PAD_CTRL) 680 + #define MX51_PAD_DISP2_DAT7__GPIO1_29 IOMUX_PAD(0x778, 0x370, 5, __NA_, 0, MX51_GPIO_PAD_CTRL) 681 681 #define MX51_PAD_DISP2_DAT7__KEY_ROW5 IOMUX_PAD(0x778, 0x370, 4, 0x9d4, 1, NO_PAD_CTRL) 682 682 #define MX51_PAD_DISP2_DAT7__USBH3_NXT IOMUX_PAD(0x778, 0x370, 3, 0xa20, 1, NO_PAD_CTRL) 683 683 #define MX51_PAD_DISP2_DAT8__DISP2_DAT8 IOMUX_PAD(0x77c, 0x374, 0, __NA_, 0, NO_PAD_CTRL) 684 684 #define MX51_PAD_DISP2_DAT8__FEC_TDATA3 IOMUX_PAD(0x77c, 0x374, 2, __NA_, 0, MX51_PAD_CTRL_5) 685 - #define MX51_PAD_DISP2_DAT8__GPIO1_30 IOMUX_PAD(0x77c, 0x374, 5, __NA_, 0, NO_PAD_CTRL) 685 + #define MX51_PAD_DISP2_DAT8__GPIO1_30 IOMUX_PAD(0x77c, 0x374, 5, __NA_, 0, MX51_GPIO_PAD_CTRL) 686 686 #define MX51_PAD_DISP2_DAT8__KEY_ROW6 IOMUX_PAD(0x77c, 0x374, 4, 0x9d8, 1, NO_PAD_CTRL) 687 687 #define MX51_PAD_DISP2_DAT8__USBH3_DATA0 IOMUX_PAD(0x77c, 0x374, 3, 0x9fc, 1, NO_PAD_CTRL) 688 688 #define MX51_PAD_DISP2_DAT9__AUD6_RXC IOMUX_PAD(0x780, 0x378, 4, 0x8f4, 1, NO_PAD_CTRL) 689 689 #define MX51_PAD_DISP2_DAT9__DISP2_DAT9 IOMUX_PAD(0x780, 0x378, 0, __NA_, 0, NO_PAD_CTRL) 690 690 #define MX51_PAD_DISP2_DAT9__FEC_TX_EN IOMUX_PAD(0x780, 0x378, 2, __NA_, 0, MX51_PAD_CTRL_5) 691 - #define MX51_PAD_DISP2_DAT9__GPIO1_31 IOMUX_PAD(0x780, 0x378, 5, __NA_, 0, NO_PAD_CTRL) 691 + #define MX51_PAD_DISP2_DAT9__GPIO1_31 IOMUX_PAD(0x780, 0x378, 5, __NA_, 0, MX51_GPIO_PAD_CTRL) 692 692 #define MX51_PAD_DISP2_DAT9__USBH3_DATA1 IOMUX_PAD(0x780, 0x378, 3, 0xa00, 1, NO_PAD_CTRL) 693 693 #define MX51_PAD_DISP2_DAT10__DISP2_DAT10 IOMUX_PAD(0x784, 0x37c, 0, __NA_, 0, NO_PAD_CTRL) 694 694 #define MX51_PAD_DISP2_DAT10__DISP2_SER_CS IOMUX_PAD(0x784, 0x37c, 5, __NA_, 0, NO_PAD_CTRL) ··· 698 698 #define MX51_PAD_DISP2_DAT11__AUD6_TXD IOMUX_PAD(0x788, 0x380, 4, 0x8f0, 1, NO_PAD_CTRL) 699 699 #define MX51_PAD_DISP2_DAT11__DISP2_DAT11 IOMUX_PAD(0x788, 0x380, 0, __NA_, 0, NO_PAD_CTRL) 700 700 #define MX51_PAD_DISP2_DAT11__FEC_RX_CLK IOMUX_PAD(0x788, 0x380, 2, 0x968, 1, NO_PAD_CTRL) 701 - #define MX51_PAD_DISP2_DAT11__GPIO1_10 IOMUX_PAD(0x788, 0x380, 7, __NA_, 0, NO_PAD_CTRL) 701 + #define MX51_PAD_DISP2_DAT11__GPIO1_10 IOMUX_PAD(0x788, 0x380, 7, __NA_, 0, MX51_GPIO_PAD_CTRL) 702 702 #define MX51_PAD_DISP2_DAT11__USBH3_DATA3 IOMUX_PAD(0x788, 0x380, 3, 0xa08, 1, NO_PAD_CTRL) 703 703 #define MX51_PAD_DISP2_DAT12__AUD6_RXD IOMUX_PAD(0x78c, 0x384, 4, 0x8ec, 1, NO_PAD_CTRL) 704 704 #define MX51_PAD_DISP2_DAT12__DISP2_DAT12 IOMUX_PAD(0x78c, 0x384, 0, __NA_, 0, NO_PAD_CTRL) ··· 746 746 #define MX51_PAD_SD1_DATA3__CSPI_SS1 IOMUX_PAD(0x7b0, 0x3a8, 2, 0x920, 1, MX51_ECSPI_PAD_CTRL) 747 747 #define MX51_PAD_SD1_DATA3__SD1_DATA3 IOMUX_PAD(0x7b0, 0x3a8, 0x10, __NA_, 0, MX51_SDHCI_PAD_CTRL) 748 748 #define MX51_PAD_GPIO1_0__CSPI_SS2 IOMUX_PAD(0x7b4, 0x3ac, 2, 0x924, 0, MX51_ECSPI_PAD_CTRL) 749 - #define MX51_PAD_GPIO1_0__GPIO1_0 IOMUX_PAD(0x7b4, 0x3ac, 1, __NA_, 0, NO_PAD_CTRL) 749 + #define MX51_PAD_GPIO1_0__GPIO1_0 IOMUX_PAD(0x7b4, 0x3ac, 1, __NA_, 0, MX51_GPIO_PAD_CTRL) 750 750 #define MX51_PAD_GPIO1_0__SD1_CD IOMUX_PAD(0x7b4, 0x3ac, 0, __NA_, 0, MX51_ESDHC_PAD_CTRL) 751 751 #define MX51_PAD_GPIO1_1__CSPI_MISO IOMUX_PAD(0x7b8, 0x3b0, 2, 0x918, 2, MX51_ECSPI_PAD_CTRL) 752 - #define MX51_PAD_GPIO1_1__GPIO1_1 IOMUX_PAD(0x7b8, 0x3b0, 1, __NA_, 0, NO_PAD_CTRL) 752 + #define MX51_PAD_GPIO1_1__GPIO1_1 IOMUX_PAD(0x7b8, 0x3b0, 1, __NA_, 0, MX51_GPIO_PAD_CTRL) 753 753 #define MX51_PAD_GPIO1_1__SD1_WP IOMUX_PAD(0x7b8, 0x3b0, 0, __NA_, 0, MX51_ESDHC_PAD_CTRL) 754 754 #define MX51_PAD_EIM_DA12__EIM_DA12 IOMUX_PAD(__NA_, 0x04c, 0, 0x000, 0, NO_PAD_CTRL) 755 755 #define MX51_PAD_EIM_DA13__EIM_DA13 IOMUX_PAD(__NA_, 0x050, 0, 0x000, 0, NO_PAD_CTRL) 756 756 #define MX51_PAD_EIM_DA14__EIM_DA14 IOMUX_PAD(__NA_, 0x054, 0, 0x000, 0, NO_PAD_CTRL) 757 757 #define MX51_PAD_EIM_DA15__EIM_DA15 IOMUX_PAD(__NA_, 0x058, 0, 0x000, 0, NO_PAD_CTRL) 758 - #define MX51_PAD_SD2_CMD__CSPI_MOSI IOMUX_PAD(__NA_, 0x3b4, 2, 0x91c, 3, MX51_ECSPI_PAD_CTRL) 758 + #define MX51_PAD_SD2_CMD__CSPI_MOSI IOMUX_PAD(0x7bc, 0x3b4, 2, 0x91c, 3, MX51_ECSPI_PAD_CTRL) 759 759 #define MX51_PAD_SD2_CMD__I2C1_SCL IOMUX_PAD(0x7bc, 0x3b4, 0x11, 0x9b0, 2, MX51_I2C_PAD_CTRL) 760 760 #define MX51_PAD_SD2_CMD__SD2_CMD IOMUX_PAD(0x7bc, 0x3b4, 0x10, __NA_, 0, MX51_SDHCI_PAD_CTRL) 761 761 #define MX51_PAD_SD2_CLK__CSPI_SCLK IOMUX_PAD(0x7c0, 0x3b8, 2, 0x914, 3, MX51_ECSPI_PAD_CTRL) ··· 766 766 #define MX51_PAD_SD2_DATA0__SD2_DATA0 IOMUX_PAD(0x7c4, 0x3bc, 0x10, __NA_, 0, MX51_SDHCI_PAD_CTRL) 767 767 #define MX51_PAD_SD2_DATA1__SD1_DAT5 IOMUX_PAD(0x7c8, 0x3c0, 1, __NA_, 0, NO_PAD_CTRL) 768 768 #define MX51_PAD_SD2_DATA1__SD2_DATA1 IOMUX_PAD(0x7c8, 0x3c0, 0x10, __NA_, 0, MX51_SDHCI_PAD_CTRL) 769 - #define MX51_PAD_SD2_DATA1__USBH3_H2_DP IOMUX_PAD(0x7c8, 0x3c0, 2, __NA_, 0, NO_PAD_CTRL) 769 + #define MX51_PAD_SD2_DATA1__USBH3_H2_DP IOMUX_PAD(0x7c8, 0x3c0, 0x12, __NA_, 0, NO_PAD_CTRL) 770 770 #define MX51_PAD_SD2_DATA2__SD1_DAT6 IOMUX_PAD(0x7cc, 0x3c4, 1, __NA_, 0, NO_PAD_CTRL) 771 771 #define MX51_PAD_SD2_DATA2__SD2_DATA2 IOMUX_PAD(0x7cc, 0x3c4, 0x10, __NA_, 0, MX51_SDHCI_PAD_CTRL) 772 - #define MX51_PAD_SD2_DATA2__USBH3_H2_DM IOMUX_PAD(0x7cc, 0x3c4, 2, __NA_, 0, NO_PAD_CTRL) 772 + #define MX51_PAD_SD2_DATA2__USBH3_H2_DM IOMUX_PAD(0x7cc, 0x3c4, 0x12, __NA_, 0, NO_PAD_CTRL) 773 773 #define MX51_PAD_SD2_DATA3__CSPI_SS2 IOMUX_PAD(0x7d0, 0x3c8, 2, 0x924, 1, MX51_ECSPI_PAD_CTRL) 774 774 #define MX51_PAD_SD2_DATA3__SD1_DAT7 IOMUX_PAD(0x7d0, 0x3c8, 1, __NA_, 0, NO_PAD_CTRL) 775 775 #define MX51_PAD_SD2_DATA3__SD2_DATA3 IOMUX_PAD(0x7d0, 0x3c8, 0x10, __NA_, 0, MX51_SDHCI_PAD_CTRL) 776 776 #define MX51_PAD_GPIO1_2__CCM_OUT_2 IOMUX_PAD(0x7d4, 0x3cc, 5, __NA_, 0, NO_PAD_CTRL) 777 - #define MX51_PAD_GPIO1_2__GPIO1_2 IOMUX_PAD(0x7d4, 0x3cc, 0, __NA_, 0, NO_PAD_CTRL) 777 + #define MX51_PAD_GPIO1_2__GPIO1_2 IOMUX_PAD(0x7d4, 0x3cc, 0, __NA_, 0, MX51_GPIO_PAD_CTRL) 778 778 #define MX51_PAD_GPIO1_2__I2C2_SCL IOMUX_PAD(0x7d4, 0x3cc, 0x12, 0x9b8, 3, MX51_I2C_PAD_CTRL) 779 779 #define MX51_PAD_GPIO1_2__PLL1_BYP IOMUX_PAD(0x7d4, 0x3cc, 7, 0x90c, 1, NO_PAD_CTRL) 780 780 #define MX51_PAD_GPIO1_2__PWM1_PWMO IOMUX_PAD(0x7d4, 0x3cc, 1, __NA_, 0, NO_PAD_CTRL) 781 - #define MX51_PAD_GPIO1_3__GPIO1_3 IOMUX_PAD(0x7d8, 0x3d0, 0, __NA_, 0, NO_PAD_CTRL) 781 + #define MX51_PAD_GPIO1_3__GPIO1_3 IOMUX_PAD(0x7d8, 0x3d0, 0, __NA_, 0, MX51_GPIO_PAD_CTRL) 782 782 #define MX51_PAD_GPIO1_3__I2C2_SDA IOMUX_PAD(0x7d8, 0x3d0, 0x12, 0x9bc, 3, MX51_I2C_PAD_CTRL) 783 783 #define MX51_PAD_GPIO1_3__PLL2_BYP IOMUX_PAD(0x7d8, 0x3d0, 7, 0x910, 1, NO_PAD_CTRL) 784 784 #define MX51_PAD_GPIO1_3__PWM2_PWMO IOMUX_PAD(0x7d8, 0x3d0, 1, __NA_, 0, NO_PAD_CTRL) ··· 786 786 #define MX51_PAD_PMIC_INT_REQ__PMIC_PMU_IRQ_B IOMUX_PAD(0x7fc, 0x3d4, 1, __NA_, 0, NO_PAD_CTRL) 787 787 #define MX51_PAD_GPIO1_4__DISP2_EXT_CLK IOMUX_PAD(0x804, 0x3d8, 4, 0x908, 1, NO_PAD_CTRL) 788 788 #define MX51_PAD_GPIO1_4__EIM_RDY IOMUX_PAD(0x804, 0x3d8, 3, 0x938, 1, NO_PAD_CTRL) 789 - #define MX51_PAD_GPIO1_4__GPIO1_4 IOMUX_PAD(0x804, 0x3d8, 0, __NA_, 0, NO_PAD_CTRL) 789 + #define MX51_PAD_GPIO1_4__GPIO1_4 IOMUX_PAD(0x804, 0x3d8, 0, __NA_, 0, MX51_GPIO_PAD_CTRL) 790 790 #define MX51_PAD_GPIO1_4__WDOG1_WDOG_B IOMUX_PAD(0x804, 0x3d8, 2, __NA_, 0, NO_PAD_CTRL) 791 791 #define MX51_PAD_GPIO1_5__CSI2_MCLK IOMUX_PAD(0x808, 0x3dc, 6, __NA_, 0, NO_PAD_CTRL) 792 792 #define MX51_PAD_GPIO1_5__DISP2_PIN16 IOMUX_PAD(0x808, 0x3dc, 3, __NA_, 0, NO_PAD_CTRL) 793 - #define MX51_PAD_GPIO1_5__GPIO1_5 IOMUX_PAD(0x808, 0x3dc, 0, __NA_, 0, NO_PAD_CTRL) 793 + #define MX51_PAD_GPIO1_5__GPIO1_5 IOMUX_PAD(0x808, 0x3dc, 0, __NA_, 0, MX51_GPIO_PAD_CTRL) 794 794 #define MX51_PAD_GPIO1_5__WDOG2_WDOG_B IOMUX_PAD(0x808, 0x3dc, 2, __NA_, 0, NO_PAD_CTRL) 795 795 #define MX51_PAD_GPIO1_6__DISP2_PIN17 IOMUX_PAD(0x80c, 0x3e0, 4, __NA_, 0, NO_PAD_CTRL) 796 - #define MX51_PAD_GPIO1_6__GPIO1_6 IOMUX_PAD(0x80c, 0x3e0, 0, __NA_, 0, NO_PAD_CTRL) 796 + #define MX51_PAD_GPIO1_6__GPIO1_6 IOMUX_PAD(0x80c, 0x3e0, 0, __NA_, 0, MX51_GPIO_PAD_CTRL) 797 797 #define MX51_PAD_GPIO1_6__REF_EN_B IOMUX_PAD(0x80c, 0x3e0, 3, __NA_, 0, NO_PAD_CTRL) 798 798 #define MX51_PAD_GPIO1_7__CCM_OUT_0 IOMUX_PAD(0x810, 0x3e4, 3, __NA_, 0, NO_PAD_CTRL) 799 - #define MX51_PAD_GPIO1_7__GPIO1_7 IOMUX_PAD(0x810, 0x3e4, 0, __NA_, 0, NO_PAD_CTRL) 799 + #define MX51_PAD_GPIO1_7__GPIO1_7 IOMUX_PAD(0x810, 0x3e4, 0, __NA_, 0, MX51_GPIO_PAD_CTRL) 800 800 #define MX51_PAD_GPIO1_7__SD2_WP IOMUX_PAD(0x810, 0x3e4, 6, __NA_, 0, MX51_ESDHC_PAD_CTRL) 801 801 #define MX51_PAD_GPIO1_7__SPDIF_OUT1 IOMUX_PAD(0x810, 0x3e4, 2, __NA_, 0, NO_PAD_CTRL) 802 802 #define MX51_PAD_GPIO1_8__CSI2_DATA_EN IOMUX_PAD(0x814, 0x3e8, 2, 0x99c, 2, NO_PAD_CTRL) 803 - #define MX51_PAD_GPIO1_8__GPIO1_8 IOMUX_PAD(0x814, 0x3e8, 0, __NA_, 0, NO_PAD_CTRL) 803 + #define MX51_PAD_GPIO1_8__GPIO1_8 IOMUX_PAD(0x814, 0x3e8, 0, __NA_, 0, MX51_GPIO_PAD_CTRL) 804 804 #define MX51_PAD_GPIO1_8__SD2_CD IOMUX_PAD(0x814, 0x3e8, 6, __NA_, 0, MX51_ESDHC_PAD_CTRL) 805 805 #define MX51_PAD_GPIO1_8__USBH3_PWR IOMUX_PAD(0x814, 0x3e8, 1, __NA_, 0, NO_PAD_CTRL) 806 806 #define MX51_PAD_GPIO1_9__CCM_OUT_1 IOMUX_PAD(0x818, 0x3ec, 3, __NA_, 0, NO_PAD_CTRL) 807 807 #define MX51_PAD_GPIO1_9__DISP2_D1_CS IOMUX_PAD(0x818, 0x3ec, 2, __NA_, 0, NO_PAD_CTRL) 808 808 #define MX51_PAD_GPIO1_9__DISP2_SER_CS IOMUX_PAD(0x818, 0x3ec, 7, __NA_, 0, NO_PAD_CTRL) 809 - #define MX51_PAD_GPIO1_9__GPIO1_9 IOMUX_PAD(0x818, 0x3ec, 0, __NA_, 0, NO_PAD_CTRL) 809 + #define MX51_PAD_GPIO1_9__GPIO1_9 IOMUX_PAD(0x818, 0x3ec, 0, __NA_, 0, MX51_GPIO_PAD_CTRL) 810 810 #define MX51_PAD_GPIO1_9__SD2_LCTL IOMUX_PAD(0x818, 0x3ec, 6, __NA_, 0, NO_PAD_CTRL) 811 811 #define MX51_PAD_GPIO1_9__USBH3_OC IOMUX_PAD(0x818, 0x3ec, 1, __NA_, 0, NO_PAD_CTRL) 812 812
+2 -2
arch/arm/plat-mxc/include/mach/iomux-mx53.h
··· 573 573 #define MX53_PAD_EIM_D28__UART2_CTS IOMUX_PAD(0x494, 0x14C, 2, __NA_, 0, MX53_UART_PAD_CTRL) 574 574 #define MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO IOMUX_PAD(0x494, 0x14C, 3, 0x82C, 1, NO_PAD_CTRL) 575 575 #define MX53_PAD_EIM_D28__CSPI_MOSI IOMUX_PAD(0x494, 0x14C, 4, 0x788, 1, NO_PAD_CTRL) 576 - #define MX53_PAD_EIM_D28__I2C1_SDA IOMUX_PAD(0x494, 0x14C, 5 | IOMUX_CONFIG_SION, 0x818, 1, PAD_CTRL_I2C) 576 + #define MX53_PAD_EIM_D28__I2C1_SDA IOMUX_PAD(0x494, 0x14C, 5 | IOMUX_CONFIG_SION, 0x818, 1, NO_PAD_CTRL) 577 577 #define MX53_PAD_EIM_D28__IPU_EXT_TRIG IOMUX_PAD(0x494, 0x14C, 6, __NA_, 0, NO_PAD_CTRL) 578 578 #define MX53_PAD_EIM_D28__IPU_DI0_PIN13 IOMUX_PAD(0x494, 0x14C, 7, __NA_, 0, NO_PAD_CTRL) 579 579 #define MX53_PAD_EIM_D29__EMI_WEIM_D_29 IOMUX_PAD(0x498, 0x150, 0, __NA_, 0, NO_PAD_CTRL) ··· 1187 1187 #define MX53_PAD_GPIO_8__ESAI1_TX5_RX0 IOMUX_PAD(0x6C8, 0x338, 0, 0x7F8, 1, NO_PAD_CTRL) 1188 1188 #define MX53_PAD_GPIO_8__GPIO1_8 IOMUX_PAD(0x6C8, 0x338, 1, __NA_, 0, NO_PAD_CTRL) 1189 1189 #define MX53_PAD_GPIO_8__EPIT2_EPITO IOMUX_PAD(0x6C8, 0x338, 2, __NA_, 0, NO_PAD_CTRL) 1190 - #define MX53_PAD_GPIO_8__CAN1_RXCAN IOMUX_PAD(0x6C8, 0x338, 3, 0x760, 3, NO_PAD_CTRL) 1190 + #define MX53_PAD_GPIO_8__CAN1_RXCAN IOMUX_PAD(0x6C8, 0x338, 3, 0x760, 2, NO_PAD_CTRL) 1191 1191 #define MX53_PAD_GPIO_8__UART2_RXD_MUX IOMUX_PAD(0x6C8, 0x338, 4, 0x880, 5, MX53_UART_PAD_CTRL) 1192 1192 #define MX53_PAD_GPIO_8__FIRI_TXD IOMUX_PAD(0x6C8, 0x338, 5, __NA_, 0, NO_PAD_CTRL) 1193 1193 #define MX53_PAD_GPIO_8__SPDIF_SRCLK IOMUX_PAD(0x6C8, 0x338, 6, __NA_, 0, NO_PAD_CTRL)
+3 -1
arch/arm/plat-omap/include/plat/omap_hwmod.h
··· 305 305 * @rev_offs: IP block revision register offset (from module base addr) 306 306 * @sysc_offs: OCP_SYSCONFIG register offset (from module base addr) 307 307 * @syss_offs: OCP_SYSSTATUS register offset (from module base addr) 308 + * @srst_udelay: Delay needed after doing a softreset in usecs 308 309 * @idlemodes: One or more of {SIDLE,MSTANDBY}_{OFF,FORCE,SMART} 309 310 * @sysc_flags: SYS{C,S}_HAS* flags indicating SYSCONFIG bits supported 310 311 * @clockact: the default value of the module CLOCKACTIVITY bits ··· 331 330 u16 sysc_offs; 332 331 u16 syss_offs; 333 332 u16 sysc_flags; 333 + struct omap_hwmod_sysc_fields *sysc_fields; 334 + u8 srst_udelay; 334 335 u8 idlemodes; 335 336 u8 clockact; 336 - struct omap_hwmod_sysc_fields *sysc_fields; 337 337 }; 338 338 339 339 /**
+6 -6
arch/arm/plat-omap/sram.c
··· 348 348 sdrc_actim_ctrl_b_1, sdrc_mr_1); 349 349 } 350 350 351 - #ifdef CONFIG_PM 352 351 void omap3_sram_restore_context(void) 353 352 { 354 353 omap_sram_ceil = omap_sram_base + omap_sram_size; ··· 357 358 omap3_sram_configure_core_dpll_sz); 358 359 omap_push_sram_idle(); 359 360 } 360 - #endif /* CONFIG_PM */ 361 - 362 - #endif /* CONFIG_ARCH_OMAP3 */ 363 361 364 362 static inline int omap34xx_sram_init(void) 365 363 { 366 - #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) 367 364 omap3_sram_restore_context(); 368 - #endif 369 365 return 0; 370 366 } 367 + #else 368 + static inline int omap34xx_sram_init(void) 369 + { 370 + return 0; 371 + } 372 + #endif /* CONFIG_ARCH_OMAP3 */ 371 373 372 374 static inline int am33xx_sram_init(void) 373 375 {
+5 -4
arch/ia64/include/asm/futex.h
··· 106 106 return -EFAULT; 107 107 108 108 { 109 - register unsigned long r8 __asm ("r8") = 0; 109 + register unsigned long r8 __asm ("r8"); 110 110 unsigned long prev; 111 111 __asm__ __volatile__( 112 112 " mf;; \n" 113 - " mov ar.ccv=%3;; \n" 114 - "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" 113 + " mov %0=r0 \n" 114 + " mov ar.ccv=%4;; \n" 115 + "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" 115 116 " .xdata4 \"__ex_table\", 1b-., 2f-. \n" 116 117 "[2:]" 117 - : "=r" (prev) 118 + : "=r" (r8), "=r" (prev) 118 119 : "r" (uaddr), "r" (newval), 119 120 "rO" ((long) (unsigned) oldval) 120 121 : "memory");
+4 -14
arch/ia64/kernel/perfmon.c
··· 604 604 spin_unlock(&(x)->ctx_lock); 605 605 } 606 606 607 - static inline unsigned int 608 - pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) 609 - { 610 - return do_munmap(mm, addr, len); 611 - } 612 - 613 607 static inline unsigned long 614 608 pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec) 615 609 { ··· 1452 1458 * a PROTECT_CTX() section. 1453 1459 */ 1454 1460 static int 1455 - pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size) 1461 + pfm_remove_smpl_mapping(void *vaddr, unsigned long size) 1456 1462 { 1463 + struct task_struct *task = current; 1457 1464 int r; 1458 1465 1459 1466 /* sanity checks */ ··· 1468 1473 /* 1469 1474 * does the actual unmapping 1470 1475 */ 1471 - down_write(&task->mm->mmap_sem); 1476 + r = vm_munmap((unsigned long)vaddr, size); 1472 1477 1473 - DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size)); 1474 - 1475 - r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0); 1476 - 1477 - up_write(&task->mm->mmap_sem); 1478 1478 if (r !=0) { 1479 1479 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); 1480 1480 } ··· 1935 1945 * because some VM function reenables interrupts. 1936 1946 * 1937 1947 */ 1938 - if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size); 1948 + if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size); 1939 1949 1940 1950 return 0; 1941 1951 }
-1
arch/m68k/configs/m5275evb_defconfig
··· 49 49 CONFIG_NETDEVICES=y 50 50 CONFIG_NET_ETHERNET=y 51 51 CONFIG_FEC=y 52 - CONFIG_FEC2=y 53 52 # CONFIG_NETDEV_1000 is not set 54 53 # CONFIG_NETDEV_10000 is not set 55 54 CONFIG_PPP=y
-2
arch/m68k/platform/527x/config.c
··· 74 74 writew(par | 0xf00, MCF_IPSBAR + 0x100082); 75 75 v = readb(MCF_IPSBAR + 0x100078); 76 76 writeb(v | 0xc0, MCF_IPSBAR + 0x100078); 77 - #endif 78 77 79 - #ifdef CONFIG_FEC2 80 78 /* Set multi-function pins to ethernet mode for fec1 */ 81 79 par = readw(MCF_IPSBAR + 0x100082); 82 80 writew(par | 0xa0, MCF_IPSBAR + 0x100082);
-6
arch/m68k/platform/68EZ328/Makefile
··· 3 3 # 4 4 5 5 obj-y := config.o 6 - 7 - extra-y := bootlogo.rh 8 - 9 - $(obj)/bootlogo.rh: $(src)/bootlogo.h 10 - perl $(src)/../68328/bootlogo.pl < $(src)/bootlogo.h \ 11 - > $(obj)/bootlogo.rh
+1 -1
arch/m68k/platform/68EZ328/bootlogo.h arch/m68k/platform/68VZ328/bootlogo.h
··· 1 1 #define splash_width 640 2 2 #define splash_height 480 3 - static unsigned char splash_bits[] = { 3 + unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = { 4 4 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 5 5 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 6 6 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+2 -7
arch/m68k/platform/68VZ328/Makefile
··· 3 3 # 4 4 5 5 obj-y := config.o 6 - logo-$(UCDIMM) := bootlogo.rh 7 - logo-$(DRAGEN2) := screen.h 8 - extra-y := $(logo-y) 9 - 10 - $(obj)/bootlogo.rh: $(src)/../68EZ328/bootlogo.h 11 - perl $(src)/bootlogo.pl < $(src)/../68328/bootlogo.h > $(obj)/bootlogo.rh 6 + extra-$(DRAGEN2):= screen.h 12 7 13 8 $(obj)/screen.h: $(src)/screen.xbm $(src)/xbm2lcd.pl 14 9 perl $(src)/xbm2lcd.pl < $(src)/screen.xbm > $(obj)/screen.h 15 10 16 - clean-files := $(obj)/screen.h $(obj)/bootlogo.rh 11 + clean-files := $(obj)/screen.h
+1 -1
arch/m68k/platform/coldfire/device.c
··· 114 114 115 115 static struct platform_device mcf_fec1 = { 116 116 .name = "fec", 117 - .id = 0, 117 + .id = 1, 118 118 .num_resources = ARRAY_SIZE(mcf_fec1_resources), 119 119 .resource = mcf_fec1_resources, 120 120 };
-1
arch/s390/Kconfig
··· 90 90 select HAVE_KERNEL_XZ 91 91 select HAVE_ARCH_MUTEX_CPU_RELAX 92 92 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 93 - select HAVE_RCU_TABLE_FREE if SMP 94 93 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 95 94 select HAVE_MEMBLOCK 96 95 select HAVE_MEMBLOCK_NODE_MAP
+18 -19
arch/s390/defconfig
··· 1 1 CONFIG_EXPERIMENTAL=y 2 2 CONFIG_SYSVIPC=y 3 3 CONFIG_POSIX_MQUEUE=y 4 + CONFIG_FHANDLE=y 5 + CONFIG_TASKSTATS=y 6 + CONFIG_TASK_DELAY_ACCT=y 7 + CONFIG_TASK_XACCT=y 8 + CONFIG_TASK_IO_ACCOUNTING=y 4 9 CONFIG_AUDIT=y 5 - CONFIG_RCU_TRACE=y 6 10 CONFIG_IKCONFIG=y 7 11 CONFIG_IKCONFIG_PROC=y 8 12 CONFIG_CGROUPS=y ··· 18 14 CONFIG_CGROUP_SCHED=y 19 15 CONFIG_RT_GROUP_SCHED=y 20 16 CONFIG_BLK_CGROUP=y 17 + CONFIG_NAMESPACES=y 21 18 CONFIG_BLK_DEV_INITRD=y 22 - # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 19 + CONFIG_RD_BZIP2=y 20 + CONFIG_RD_LZMA=y 21 + CONFIG_RD_XZ=y 22 + CONFIG_RD_LZO=y 23 + CONFIG_EXPERT=y 23 24 # CONFIG_COMPAT_BRK is not set 24 - CONFIG_SLAB=y 25 25 CONFIG_PROFILING=y 26 26 CONFIG_OPROFILE=y 27 27 CONFIG_KPROBES=y 28 28 CONFIG_MODULES=y 29 29 CONFIG_MODULE_UNLOAD=y 30 30 CONFIG_MODVERSIONS=y 31 + CONFIG_PARTITION_ADVANCED=y 32 + CONFIG_IBM_PARTITION=y 31 33 CONFIG_DEFAULT_DEADLINE=y 32 34 CONFIG_NO_HZ=y 33 35 CONFIG_HIGH_RES_TIMERS=y ··· 44 34 CONFIG_BINFMT_MISC=m 45 35 CONFIG_CMM=m 46 36 CONFIG_HZ_100=y 47 - CONFIG_KEXEC=y 48 - CONFIG_PM=y 37 + CONFIG_CRASH_DUMP=y 49 38 CONFIG_HIBERNATION=y 50 39 CONFIG_PACKET=y 51 40 CONFIG_UNIX=y 52 41 CONFIG_NET_KEY=y 53 - CONFIG_AFIUCV=m 54 42 CONFIG_INET=y 55 43 CONFIG_IP_MULTICAST=y 56 44 # CONFIG_INET_LRO is not set 57 45 CONFIG_IPV6=y 58 - CONFIG_NET_SCTPPROBE=m 59 46 CONFIG_L2TP=m 60 47 CONFIG_L2TP_DEBUGFS=m 61 48 CONFIG_VLAN_8021Q=y ··· 91 84 CONFIG_SCSI_LOGGING=y 92 85 CONFIG_SCSI_SCAN_ASYNC=y 93 86 CONFIG_ZFCP=y 94 - CONFIG_ZFCP_DIF=y 95 87 CONFIG_NETDEVICES=y 96 - CONFIG_DUMMY=m 97 88 CONFIG_BONDING=m 89 + CONFIG_DUMMY=m 98 90 CONFIG_EQUALIZER=m 99 91 CONFIG_TUN=m 100 - CONFIG_NET_ETHERNET=y 101 92 CONFIG_VIRTIO_NET=y 102 93 CONFIG_RAW_DRIVER=m 94 + CONFIG_VIRTIO_BALLOON=y 103 95 CONFIG_EXT2_FS=y 104 96 CONFIG_EXT3_FS=y 105 97 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set ··· 109 103 CONFIG_TMPFS=y 110 104 CONFIG_TMPFS_POSIX_ACL=y 111 105 # CONFIG_NETWORK_FILESYSTEMS is not set 112 - CONFIG_PARTITION_ADVANCED=y 113 - CONFIG_IBM_PARTITION=y 114 - CONFIG_DLM=m 115 106 CONFIG_MAGIC_SYSRQ=y 116 - CONFIG_DEBUG_KERNEL=y 117 107 CONFIG_TIMER_STATS=y 118 108 CONFIG_PROVE_LOCKING=y 119 109 CONFIG_PROVE_RCU=y 120 110 CONFIG_LOCK_STAT=y 121 111 CONFIG_DEBUG_LOCKDEP=y 122 - CONFIG_DEBUG_SPINLOCK_SLEEP=y 123 112 CONFIG_DEBUG_LIST=y 124 113 CONFIG_DEBUG_NOTIFIERS=y 125 - # CONFIG_RCU_CPU_STALL_DETECTOR is not set 114 + CONFIG_RCU_TRACE=y 126 115 CONFIG_KPROBES_SANITY_TEST=y 127 116 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 128 117 CONFIG_CPU_NOTIFIER_ERROR_INJECT=m 129 118 CONFIG_LATENCYTOP=y 130 - CONFIG_SYSCTL_SYSCALL_CHECK=y 131 119 CONFIG_DEBUG_PAGEALLOC=y 132 - # CONFIG_FTRACE is not set 120 + CONFIG_BLK_DEV_IO_TRACE=y 133 121 # CONFIG_STRICT_DEVMEM is not set 134 122 CONFIG_CRYPTO_NULL=m 135 123 CONFIG_CRYPTO_CRYPTD=m ··· 173 173 CONFIG_CRYPTO_DES_S390=m 174 174 CONFIG_CRYPTO_AES_S390=m 175 175 CONFIG_CRC7=m 176 - CONFIG_VIRTIO_BALLOON=y
+1 -2
arch/s390/include/asm/facility.h
··· 38 38 unsigned long nr; 39 39 40 40 preempt_disable(); 41 - S390_lowcore.stfl_fac_list = 0; 42 41 asm volatile( 43 42 " .insn s,0xb2b10000,0(0)\n" /* stfl */ 44 43 "0:\n" 45 44 EX_TABLE(0b, 0b) 46 - : "=m" (S390_lowcore.stfl_fac_list)); 45 + : "+m" (S390_lowcore.stfl_fac_list)); 47 46 nr = 4; /* bytes stored by stfl */ 48 47 memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); 49 48 if (S390_lowcore.stfl_fac_list & 0x01000000) {
-3
arch/s390/include/asm/pgalloc.h
··· 22 22 23 23 unsigned long *page_table_alloc(struct mm_struct *, unsigned long); 24 24 void page_table_free(struct mm_struct *, unsigned long *); 25 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 26 25 void page_table_free_rcu(struct mmu_gather *, unsigned long *); 27 - void __tlb_remove_table(void *_table); 28 - #endif 29 26 30 27 static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 31 28 {
+1 -1
arch/s390/include/asm/swab.h
··· 77 77 78 78 asm volatile( 79 79 #ifndef __s390x__ 80 - " icm %0,2,%O+1(%R1)\n" 80 + " icm %0,2,%O1+1(%R1)\n" 81 81 " ic %0,%1\n" 82 82 : "=&d" (result) : "Q" (*x) : "cc"); 83 83 #else /* __s390x__ */
+1 -21
arch/s390/include/asm/tlb.h
··· 30 30 31 31 struct mmu_gather { 32 32 struct mm_struct *mm; 33 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 34 33 struct mmu_table_batch *batch; 35 - #endif 36 34 unsigned int fullmm; 37 - unsigned int need_flush; 38 35 }; 39 36 40 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 41 37 struct mmu_table_batch { 42 38 struct rcu_head rcu; 43 39 unsigned int nr; ··· 45 49 46 50 extern void tlb_table_flush(struct mmu_gather *tlb); 47 51 extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 48 - #endif 49 52 50 53 static inline void tlb_gather_mmu(struct mmu_gather *tlb, 51 54 struct mm_struct *mm, ··· 52 57 { 53 58 tlb->mm = mm; 54 59 tlb->fullmm = full_mm_flush; 55 - tlb->need_flush = 0; 56 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 57 60 tlb->batch = NULL; 58 - #endif 59 61 if (tlb->fullmm) 60 62 __tlb_flush_mm(mm); 61 63 } 62 64 63 65 static inline void tlb_flush_mmu(struct mmu_gather *tlb) 64 66 { 65 - if (!tlb->need_flush) 66 - return; 67 - tlb->need_flush = 0; 68 - __tlb_flush_mm(tlb->mm); 69 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 70 67 tlb_table_flush(tlb); 71 - #endif 72 68 } 73 69 74 70 static inline void tlb_finish_mmu(struct mmu_gather *tlb, 75 71 unsigned long start, unsigned long end) 76 72 { 77 - tlb_flush_mmu(tlb); 73 + tlb_table_flush(tlb); 78 74 } 79 75 80 76 /* ··· 91 105 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 92 106 unsigned long address) 93 107 { 94 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 95 108 if (!tlb->fullmm) 96 109 return page_table_free_rcu(tlb, (unsigned long *) pte); 97 - #endif 98 110 page_table_free(tlb->mm, (unsigned long *) pte); 99 111 } 100 112 ··· 109 125 #ifdef __s390x__ 110 126 if (tlb->mm->context.asce_limit <= (1UL << 31)) 111 127 return; 112 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 113 128 if (!tlb->fullmm) 114 129 return tlb_remove_table(tlb, pmd); 115 - #endif 116 130 crst_table_free(tlb->mm, (unsigned long *) pmd); 117 131 #endif 118 132 } ··· 128 146 #ifdef __s390x__ 129 147 if (tlb->mm->context.asce_limit <= (1UL << 42)) 130 148 return; 131 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 132 149 if (!tlb->fullmm) 133 150 return tlb_remove_table(tlb, pud); 134 - #endif 135 151 crst_table_free(tlb->mm, (unsigned long *) pud); 136 152 #endif 137 153 }
+1 -1
arch/s390/kernel/head.S
··· 474 474 stck __LC_LAST_UPDATE_CLOCK 475 475 spt 5f-.LPG0(%r13) 476 476 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) 477 + xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 477 478 #ifndef CONFIG_MARCH_G5 478 479 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 479 - xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 480 480 .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list 481 481 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 482 482 jz 0f
+6 -3
arch/s390/kernel/irq.c
··· 118 118 "a" (__do_softirq) 119 119 : "0", "1", "2", "3", "4", "5", "14", 120 120 "cc", "memory" ); 121 - } else 121 + } else { 122 122 /* We are already on the async stack. */ 123 123 __do_softirq(); 124 + } 124 125 } 125 126 126 127 local_irq_restore(flags); ··· 193 192 int index = ext_hash(code); 194 193 195 194 spin_lock_irqsave(&ext_int_hash_lock, flags); 196 - list_for_each_entry_rcu(p, &ext_int_hash[index], entry) 195 + list_for_each_entry_rcu(p, &ext_int_hash[index], entry) { 197 196 if (p->code == code && p->handler == handler) { 198 197 list_del_rcu(&p->entry); 199 198 kfree_rcu(p, rcu); 200 199 } 200 + } 201 201 spin_unlock_irqrestore(&ext_int_hash_lock, flags); 202 202 return 0; 203 203 } ··· 213 211 214 212 old_regs = set_irq_regs(regs); 215 213 irq_enter(); 216 - if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 214 + if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) { 217 215 /* Serve timer interrupts first. */ 218 216 clock_comparator_work(); 217 + } 219 218 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 220 219 if (ext_code.code != 0x1004) 221 220 __get_cpu_var(s390_idle).nohz_delay = 1;
+2 -2
arch/s390/kernel/perf_cpum_cf.c
··· 178 178 err = lcctl(cpuhw->state); 179 179 if (err) { 180 180 pr_err("Enabling the performance measuring unit " 181 - "failed with rc=%lx\n", err); 181 + "failed with rc=%x\n", err); 182 182 return; 183 183 } 184 184 ··· 203 203 err = lcctl(inactive); 204 204 if (err) { 205 205 pr_err("Disabling the performance measuring unit " 206 - "failed with rc=%lx\n", err); 206 + "failed with rc=%x\n", err); 207 207 return; 208 208 } 209 209
+18 -9
arch/s390/mm/maccess.c
··· 61 61 return copied < 0 ? -EFAULT : 0; 62 62 } 63 63 64 - /* 65 - * Copy memory in real mode (kernel to kernel) 66 - */ 67 - int memcpy_real(void *dest, void *src, size_t count) 64 + static int __memcpy_real(void *dest, void *src, size_t count) 68 65 { 69 66 register unsigned long _dest asm("2") = (unsigned long) dest; 70 67 register unsigned long _len1 asm("3") = (unsigned long) count; 71 68 register unsigned long _src asm("4") = (unsigned long) src; 72 69 register unsigned long _len2 asm("5") = (unsigned long) count; 73 - unsigned long flags; 74 70 int rc = -EFAULT; 75 71 76 - if (!count) 77 - return 0; 78 - flags = __arch_local_irq_stnsm(0xf8UL); 79 72 asm volatile ( 80 73 "0: mvcle %1,%2,0x0\n" 81 74 "1: jo 0b\n" ··· 79 86 "+d" (_len2), "=m" (*((long *) dest)) 80 87 : "m" (*((long *) src)) 81 88 : "cc", "memory"); 82 - arch_local_irq_restore(flags); 89 + return rc; 90 + } 91 + 92 + /* 93 + * Copy memory in real mode (kernel to kernel) 94 + */ 95 + int memcpy_real(void *dest, void *src, size_t count) 96 + { 97 + unsigned long flags; 98 + int rc; 99 + 100 + if (!count) 101 + return 0; 102 + local_irq_save(flags); 103 + __arch_local_irq_stnsm(0xfbUL); 104 + rc = __memcpy_real(dest, src, count); 105 + local_irq_restore(flags); 83 106 return rc; 84 107 } 85 108
+60 -3
arch/s390/mm/pgtable.c
··· 678 678 } 679 679 } 680 680 681 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 682 - 683 681 static void __page_table_free_rcu(void *table, unsigned bit) 684 682 { 685 683 struct page *page; ··· 731 733 free_pages((unsigned long) table, ALLOC_ORDER); 732 734 } 733 735 734 - #endif 736 + static void tlb_remove_table_smp_sync(void *arg) 737 + { 738 + /* Simply deliver the interrupt */ 739 + } 740 + 741 + static void tlb_remove_table_one(void *table) 742 + { 743 + /* 744 + * This isn't an RCU grace period and hence the page-tables cannot be 745 + * assumed to be actually RCU-freed. 746 + * 747 + * It is however sufficient for software page-table walkers that rely 748 + * on IRQ disabling. See the comment near struct mmu_table_batch. 749 + */ 750 + smp_call_function(tlb_remove_table_smp_sync, NULL, 1); 751 + __tlb_remove_table(table); 752 + } 753 + 754 + static void tlb_remove_table_rcu(struct rcu_head *head) 755 + { 756 + struct mmu_table_batch *batch; 757 + int i; 758 + 759 + batch = container_of(head, struct mmu_table_batch, rcu); 760 + 761 + for (i = 0; i < batch->nr; i++) 762 + __tlb_remove_table(batch->tables[i]); 763 + 764 + free_page((unsigned long)batch); 765 + } 766 + 767 + void tlb_table_flush(struct mmu_gather *tlb) 768 + { 769 + struct mmu_table_batch **batch = &tlb->batch; 770 + 771 + if (*batch) { 772 + __tlb_flush_mm(tlb->mm); 773 + call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); 774 + *batch = NULL; 775 + } 776 + } 777 + 778 + void tlb_remove_table(struct mmu_gather *tlb, void *table) 779 + { 780 + struct mmu_table_batch **batch = &tlb->batch; 781 + 782 + if (*batch == NULL) { 783 + *batch = (struct mmu_table_batch *) 784 + __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 785 + if (*batch == NULL) { 786 + __tlb_flush_mm(tlb->mm); 787 + tlb_remove_table_one(table); 788 + return; 789 + } 790 + (*batch)->nr = 0; 791 + } 792 + (*batch)->tables[(*batch)->nr++] = table; 793 + if ((*batch)->nr == MAX_TABLE_BATCH) 794 + tlb_table_flush(tlb); 795 + } 735 796 736 797 /* 737 798 * switch on pgstes for its userspace process (for kvm)
+3
arch/sparc/kernel/leon_smp.c
··· 23 23 #include <linux/pm.h> 24 24 #include <linux/delay.h> 25 25 #include <linux/gfp.h> 26 + #include <linux/cpu.h> 26 27 27 28 #include <asm/cacheflush.h> 28 29 #include <asm/tlbflush.h> ··· 78 77 local_flush_cache_all(); 79 78 local_flush_tlb_all(); 80 79 leon_configure_cache_smp(); 80 + 81 + notify_cpu_starting(cpuid); 81 82 82 83 /* Get our local ticker going. */ 83 84 smp_setup_percpu_timer();
+1 -6
arch/sparc/kernel/sys_sparc_64.c
··· 566 566 567 567 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len) 568 568 { 569 - long ret; 570 - 571 569 if (invalid_64bit_range(addr, len)) 572 570 return -EINVAL; 573 571 574 - down_write(&current->mm->mmap_sem); 575 - ret = do_munmap(current->mm, addr, len); 576 - up_write(&current->mm->mmap_sem); 577 - return ret; 572 + return vm_munmap(addr, len); 578 573 } 579 574 580 575 extern unsigned long do_mremap(unsigned long addr,
+1 -3
arch/tile/kernel/single_step.c
··· 346 346 } 347 347 348 348 /* allocate a cache line of writable, executable memory */ 349 - down_write(&current->mm->mmap_sem); 350 - buffer = (void __user *) do_mmap(NULL, 0, 64, 349 + buffer = (void __user *) vm_mmap(NULL, 0, 64, 351 350 PROT_EXEC | PROT_READ | PROT_WRITE, 352 351 MAP_PRIVATE | MAP_ANONYMOUS, 353 352 0); 354 - up_write(&current->mm->mmap_sem); 355 353 356 354 if (IS_ERR((void __force *)buffer)) { 357 355 kfree(state);
+8 -24
arch/x86/ia32/ia32_aout.c
··· 119 119 end = PAGE_ALIGN(end); 120 120 if (end <= start) 121 121 return; 122 - down_write(&current->mm->mmap_sem); 123 - do_brk(start, end - start); 124 - up_write(&current->mm->mmap_sem); 122 + vm_brk(start, end - start); 125 123 } 126 124 127 125 #ifdef CORE_DUMP ··· 330 332 pos = 32; 331 333 map_size = ex.a_text+ex.a_data; 332 334 333 - down_write(&current->mm->mmap_sem); 334 - error = do_brk(text_addr & PAGE_MASK, map_size); 335 - up_write(&current->mm->mmap_sem); 335 + error = vm_brk(text_addr & PAGE_MASK, map_size); 336 336 337 337 if (error != (text_addr & PAGE_MASK)) { 338 338 send_sig(SIGKILL, current, 0); ··· 369 373 if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) { 370 374 loff_t pos = fd_offset; 371 375 372 - down_write(&current->mm->mmap_sem); 373 - do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); 374 - up_write(&current->mm->mmap_sem); 376 + vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); 375 377 bprm->file->f_op->read(bprm->file, 376 378 (char __user *)N_TXTADDR(ex), 377 379 ex.a_text+ex.a_data, &pos); ··· 379 385 goto beyond_if; 380 386 } 381 387 382 - down_write(&current->mm->mmap_sem); 383 - error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, 388 + error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, 384 389 PROT_READ | PROT_EXEC, 385 390 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | 386 391 MAP_EXECUTABLE | MAP_32BIT, 387 392 fd_offset); 388 - up_write(&current->mm->mmap_sem); 389 393 390 394 if (error != N_TXTADDR(ex)) { 391 395 send_sig(SIGKILL, current, 0); 392 396 return error; 393 397 } 394 398 395 - down_write(&current->mm->mmap_sem); 396 - error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, 399 + error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, 397 400 PROT_READ | PROT_WRITE | PROT_EXEC, 398 401 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | 399 402 MAP_EXECUTABLE | MAP_32BIT, 400 403 fd_offset + ex.a_text); 401 - up_write(&current->mm->mmap_sem); 402 404 if (error != N_DATADDR(ex)) { 403 405 send_sig(SIGKILL, current, 0); 404 406 return error; ··· 466 476 error_time = jiffies; 467 477 } 468 478 #endif 469 - down_write(&current->mm->mmap_sem); 470 - do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); 471 - up_write(&current->mm->mmap_sem); 479 + vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); 472 480 473 481 file->f_op->read(file, (char __user *)start_addr, 474 482 ex.a_text + ex.a_data, &pos); ··· 478 490 goto out; 479 491 } 480 492 /* Now use mmap to map the library into memory. */ 481 - down_write(&current->mm->mmap_sem); 482 - error = do_mmap(file, start_addr, ex.a_text + ex.a_data, 493 + error = vm_mmap(file, start_addr, ex.a_text + ex.a_data, 483 494 PROT_READ | PROT_WRITE | PROT_EXEC, 484 495 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT, 485 496 N_TXTOFF(ex)); 486 - up_write(&current->mm->mmap_sem); 487 497 retval = error; 488 498 if (error != start_addr) 489 499 goto out; ··· 489 503 len = PAGE_ALIGN(ex.a_text + ex.a_data); 490 504 bss = ex.a_text + ex.a_data + ex.a_bss; 491 505 if (bss > len) { 492 - down_write(&current->mm->mmap_sem); 493 - error = do_brk(start_addr + len, bss - len); 494 - up_write(&current->mm->mmap_sem); 506 + error = vm_brk(start_addr + len, bss - len); 495 507 retval = error; 496 508 if (error != start_addr + len) 497 509 goto out;
+9 -9
arch/x86/kvm/pmu.c
··· 459 459 pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1); 460 460 461 461 if (pmu->version == 1) { 462 - pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1; 463 - return; 462 + pmu->nr_arch_fixed_counters = 0; 463 + } else { 464 + pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), 465 + X86_PMC_MAX_FIXED); 466 + pmu->counter_bitmask[KVM_PMC_FIXED] = 467 + ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; 464 468 } 465 469 466 - pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), 467 - X86_PMC_MAX_FIXED); 468 - pmu->counter_bitmask[KVM_PMC_FIXED] = 469 - ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; 470 - pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1) 471 - | (((1ull << pmu->nr_arch_fixed_counters) - 1) 472 - << X86_PMC_IDX_FIXED)); 470 + pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | 471 + (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED); 472 + pmu->global_ctrl_mask = ~pmu->global_ctrl; 473 473 } 474 474 475 475 void kvm_pmu_init(struct kvm_vcpu *vcpu)
+4 -1
arch/x86/kvm/vmx.c
··· 2210 2210 msr = find_msr_entry(vmx, msr_index); 2211 2211 if (msr) { 2212 2212 msr->data = data; 2213 - if (msr - vmx->guest_msrs < vmx->save_nmsrs) 2213 + if (msr - vmx->guest_msrs < vmx->save_nmsrs) { 2214 + preempt_disable(); 2214 2215 kvm_set_shared_msr(msr->index, msr->data, 2215 2216 msr->mask); 2217 + preempt_enable(); 2218 + } 2216 2219 break; 2217 2220 } 2218 2221 ret = kvm_set_msr_common(vcpu, msr_index, data);
+2 -6
arch/x86/kvm/x86.c
··· 6336 6336 if (npages && !old.rmap) { 6337 6337 unsigned long userspace_addr; 6338 6338 6339 - down_write(&current->mm->mmap_sem); 6340 - userspace_addr = do_mmap(NULL, 0, 6339 + userspace_addr = vm_mmap(NULL, 0, 6341 6340 npages * PAGE_SIZE, 6342 6341 PROT_READ | PROT_WRITE, 6343 6342 map_flags, 6344 6343 0); 6345 - up_write(&current->mm->mmap_sem); 6346 6344 6347 6345 if (IS_ERR((void *)userspace_addr)) 6348 6346 return PTR_ERR((void *)userspace_addr); ··· 6364 6366 if (!user_alloc && !old.user_alloc && old.rmap && !npages) { 6365 6367 int ret; 6366 6368 6367 - down_write(&current->mm->mmap_sem); 6368 - ret = do_munmap(current->mm, old.userspace_addr, 6369 + ret = vm_munmap(old.userspace_addr, 6369 6370 old.npages * PAGE_SIZE); 6370 - up_write(&current->mm->mmap_sem); 6371 6371 if (ret < 0) 6372 6372 printk(KERN_WARNING 6373 6373 "kvm_vm_ioctl_set_memory_region: "
+36 -17
arch/x86/lib/insn.c
··· 379 379 return; 380 380 } 381 381 382 - /* Decode moffset16/32/64 */ 383 - static void __get_moffset(struct insn *insn) 382 + /* Decode moffset16/32/64. Return 0 if failed */ 383 + static int __get_moffset(struct insn *insn) 384 384 { 385 385 switch (insn->addr_bytes) { 386 386 case 2: ··· 397 397 insn->moffset2.value = get_next(int, insn); 398 398 insn->moffset2.nbytes = 4; 399 399 break; 400 + default: /* opnd_bytes must be modified manually */ 401 + goto err_out; 400 402 } 401 403 insn->moffset1.got = insn->moffset2.got = 1; 402 404 405 + return 1; 406 + 403 407 err_out: 404 - return; 408 + return 0; 405 409 } 406 410 407 - /* Decode imm v32(Iz) */ 408 - static void __get_immv32(struct insn *insn) 411 + /* Decode imm v32(Iz). Return 0 if failed */ 412 + static int __get_immv32(struct insn *insn) 409 413 { 410 414 switch (insn->opnd_bytes) { 411 415 case 2: ··· 421 417 insn->immediate.value = get_next(int, insn); 422 418 insn->immediate.nbytes = 4; 423 419 break; 420 + default: /* opnd_bytes must be modified manually */ 421 + goto err_out; 424 422 } 425 423 424 + return 1; 425 + 426 426 err_out: 427 - return; 427 + return 0; 428 428 } 429 429 430 - /* Decode imm v64(Iv/Ov) */ 431 - static void __get_immv(struct insn *insn) 430 + /* Decode imm v64(Iv/Ov), Return 0 if failed */ 431 + static int __get_immv(struct insn *insn) 432 432 { 433 433 switch (insn->opnd_bytes) { 434 434 case 2: ··· 449 441 insn->immediate2.value = get_next(int, insn); 450 442 insn->immediate2.nbytes = 4; 451 443 break; 444 + default: /* opnd_bytes must be modified manually */ 445 + goto err_out; 452 446 } 453 447 insn->immediate1.got = insn->immediate2.got = 1; 454 448 449 + return 1; 455 450 err_out: 456 - return; 451 + return 0; 457 452 } 458 453 459 454 /* Decode ptr16:16/32(Ap) */ 460 - static void __get_immptr(struct insn *insn) 455 + static int __get_immptr(struct insn *insn) 461 456 { 462 457 switch (insn->opnd_bytes) { 463 458 case 2: ··· 473 462 break; 474 463 case 8: 475 464 /* ptr16:64 is not exist (no segment) */ 476 - return; 465 + return 0; 466 + default: /* opnd_bytes must be modified manually */ 467 + goto err_out; 477 468 } 478 469 insn->immediate2.value = get_next(unsigned short, insn); 479 470 insn->immediate2.nbytes = 2; 480 471 insn->immediate1.got = insn->immediate2.got = 1; 481 472 473 + return 1; 482 474 err_out: 483 - return; 475 + return 0; 484 476 } 485 477 486 478 /** ··· 503 489 insn_get_displacement(insn); 504 490 505 491 if (inat_has_moffset(insn->attr)) { 506 - __get_moffset(insn); 492 + if (!__get_moffset(insn)) 493 + goto err_out; 507 494 goto done; 508 495 } 509 496 ··· 532 517 insn->immediate2.nbytes = 4; 533 518 break; 534 519 case INAT_IMM_PTR: 535 - __get_immptr(insn); 520 + if (!__get_immptr(insn)) 521 + goto err_out; 536 522 break; 537 523 case INAT_IMM_VWORD32: 538 - __get_immv32(insn); 524 + if (!__get_immv32(insn)) 525 + goto err_out; 539 526 break; 540 527 case INAT_IMM_VWORD: 541 - __get_immv(insn); 528 + if (!__get_immv(insn)) 529 + goto err_out; 542 530 break; 543 531 default: 544 - break; 532 + /* Here, insn must have an immediate, but failed */ 533 + goto err_out; 545 534 } 546 535 if (inat_has_second_immediate(insn->attr)) { 547 536 insn->immediate2.value = get_next(char, insn);
+1 -1
crypto/sha512_generic.c
··· 174 174 index = sctx->count[0] & 0x7f; 175 175 176 176 /* Update number of bytes */ 177 - if (!(sctx->count[0] += len)) 177 + if ((sctx->count[0] += len) < len) 178 178 sctx->count[1]++; 179 179 180 180 part_len = 128 - index;
+2 -1
drivers/acpi/acpica/hwxface.c
··· 74 74 75 75 /* Check if the reset register is supported */ 76 76 77 - if (!reset_reg->address) { 77 + if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) || 78 + !reset_reg->address) { 78 79 return_ACPI_STATUS(AE_NOT_EXIST); 79 80 } 80 81
+1 -2
drivers/acpi/osl.c
··· 607 607 608 608 acpi_irq_handler = handler; 609 609 acpi_irq_context = context; 610 - if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED, "acpi", 611 - acpi_irq)) { 610 + if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 612 611 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 613 612 acpi_irq_handler = NULL; 614 613 return AE_NOT_ACQUIRED;
+2 -1
drivers/acpi/reboot.c
··· 23 23 /* Is the reset register supported? The spec says we should be 24 24 * checking the bit width and bit offset, but Windows ignores 25 25 * these fields */ 26 - /* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */ 26 + if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)) 27 + return; 27 28 28 29 reset_value = acpi_gbl_FADT.reset_value; 29 30
+2
drivers/ata/ata_piix.c
··· 329 329 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 330 330 /* SATA Controller IDE (Lynx Point) */ 331 331 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 332 + /* SATA Controller IDE (DH89xxCC) */ 333 + { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 332 334 { } /* terminate list */ 333 335 }; 334 336
+2 -2
drivers/ata/libata-core.c
··· 95 95 static void ata_dev_xfermask(struct ata_device *dev); 96 96 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 97 97 98 - unsigned int ata_print_id = 1; 98 + atomic_t ata_print_id = ATOMIC_INIT(1); 99 99 100 100 struct ata_force_param { 101 101 const char *name; ··· 6029 6029 6030 6030 /* give ports names and add SCSI hosts */ 6031 6031 for (i = 0; i < host->n_ports; i++) 6032 - host->ports[i]->print_id = ata_print_id++; 6032 + host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6033 6033 6034 6034 6035 6035 /* Create associated sysfs transport objects */
+2 -2
drivers/ata/libata-scsi.c
··· 3843 3843 int rc = ap->ops->port_start(ap); 3844 3844 3845 3845 if (!rc) { 3846 - ap->print_id = ata_print_id++; 3846 + ap->print_id = atomic_inc_return(&ata_print_id); 3847 3847 __ata_port_probe(ap); 3848 3848 } 3849 3849 ··· 3867 3867 int rc = ap->ops->port_start(ap); 3868 3868 3869 3869 if (!rc) { 3870 - ap->print_id = ata_print_id++; 3870 + ap->print_id = atomic_inc_return(&ata_print_id); 3871 3871 rc = ata_port_probe(ap); 3872 3872 } 3873 3873
+1
drivers/ata/libata-transport.c
··· 294 294 device_enable_async_suspend(dev); 295 295 pm_runtime_set_active(dev); 296 296 pm_runtime_enable(dev); 297 + pm_runtime_forbid(dev); 297 298 298 299 transport_add_device(dev); 299 300 transport_configure_device(dev);
+1 -1
drivers/ata/libata.h
··· 53 53 ATA_DNXFER_QUIET = (1 << 31), 54 54 }; 55 55 56 - extern unsigned int ata_print_id; 56 + extern atomic_t ata_print_id; 57 57 extern int atapi_passthru16; 58 58 extern int libata_fua; 59 59 extern int libata_noacpi;
+2 -1
drivers/ata/sata_mv.c
··· 4025 4025 struct ata_host *host; 4026 4026 struct mv_host_priv *hpriv; 4027 4027 struct resource *res; 4028 - int n_ports, rc; 4028 + int n_ports = 0; 4029 + int rc; 4029 4030 4030 4031 ata_print_version_once(&pdev->dev, DRV_VERSION); 4031 4032
+29 -12
drivers/block/virtio_blk.c
··· 375 375 return err; 376 376 } 377 377 378 + /* 379 + * Legacy naming scheme used for virtio devices. We are stuck with it for 380 + * virtio blk but don't ever use it for any new driver. 381 + */ 382 + static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) 383 + { 384 + const int base = 'z' - 'a' + 1; 385 + char *begin = buf + strlen(prefix); 386 + char *end = buf + buflen; 387 + char *p; 388 + int unit; 389 + 390 + p = end - 1; 391 + *p = '\0'; 392 + unit = base; 393 + do { 394 + if (p == begin) 395 + return -EINVAL; 396 + *--p = 'a' + (index % unit); 397 + index = (index / unit) - 1; 398 + } while (index >= 0); 399 + 400 + memmove(begin, p, end - p); 401 + memcpy(buf, prefix, strlen(prefix)); 402 + 403 + return 0; 404 + } 405 + 378 406 static int __devinit virtblk_probe(struct virtio_device *vdev) 379 407 { 380 408 struct virtio_blk *vblk; ··· 471 443 472 444 q->queuedata = vblk; 473 445 474 - if (index < 26) { 475 - sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); 476 - } else if (index < (26 + 1) * 26) { 477 - sprintf(vblk->disk->disk_name, "vd%c%c", 478 - 'a' + index / 26 - 1, 'a' + index % 26); 479 - } else { 480 - const unsigned int m1 = (index / 26 - 1) / 26 - 1; 481 - const unsigned int m2 = (index / 26 - 1) % 26; 482 - const unsigned int m3 = index % 26; 483 - sprintf(vblk->disk->disk_name, "vd%c%c%c", 484 - 'a' + m1, 'a' + m2, 'a' + m3); 485 - } 446 + virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); 486 447 487 448 vblk->disk->major = major; 488 449 vblk->disk->first_minor = index_to_minor(index);
+1 -1
drivers/block/xen-blkback/xenbus.c
··· 416 416 "discard-secure", "%d", 417 417 blkif->vbd.discard_secure); 418 418 if (err) { 419 - dev_warn(dev-dev, "writing discard-secure (%d)", err); 419 + dev_warn(&dev->dev, "writing discard-secure (%d)", err); 420 420 return; 421 421 } 422 422 }
+1
drivers/crypto/ixp4xx_crypto.c
··· 18 18 #include <linux/interrupt.h> 19 19 #include <linux/spinlock.h> 20 20 #include <linux/gfp.h> 21 + #include <linux/module.h> 21 22 22 23 #include <crypto/ctr.h> 23 24 #include <crypto/des.h>
+17 -3
drivers/crypto/talitos.c
··· 124 124 void __iomem *reg; 125 125 int irq[2]; 126 126 127 + /* SEC global registers lock */ 128 + spinlock_t reg_lock ____cacheline_aligned; 129 + 127 130 /* SEC version geometry (from device tree node) */ 128 131 unsigned int num_channels; 129 132 unsigned int chfifo_len; ··· 415 412 { \ 416 413 struct device *dev = (struct device *)data; \ 417 414 struct talitos_private *priv = dev_get_drvdata(dev); \ 415 + unsigned long flags; \ 418 416 \ 419 417 if (ch_done_mask & 1) \ 420 418 flush_channel(dev, 0, 0, 0); \ ··· 431 427 out: \ 432 428 /* At this point, all completed channels have been processed */ \ 433 429 /* Unmask done interrupts for channels completed later on. */ \ 430 + spin_lock_irqsave(&priv->reg_lock, flags); \ 434 431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 435 432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \ 433 + spin_unlock_irqrestore(&priv->reg_lock, flags); \ 436 434 } 437 435 DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE) 438 436 DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE) ··· 625 619 struct device *dev = data; \ 626 620 struct talitos_private *priv = dev_get_drvdata(dev); \ 627 621 u32 isr, isr_lo; \ 622 + unsigned long flags; \ 628 623 \ 624 + spin_lock_irqsave(&priv->reg_lock, flags); \ 629 625 isr = in_be32(priv->reg + TALITOS_ISR); \ 630 626 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 631 627 /* Acknowledge interrupt */ \ 632 628 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 633 629 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 634 630 \ 635 - if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo)) \ 636 - talitos_error(dev, isr, isr_lo); \ 637 - else \ 631 + if (unlikely(isr & ch_err_mask || isr_lo)) { \ 632 + spin_unlock_irqrestore(&priv->reg_lock, flags); \ 633 + talitos_error(dev, isr & ch_err_mask, isr_lo); \ 634 + } \ 635 + else { \ 638 636 if (likely(isr & ch_done_mask)) { \ 639 637 /* mask further done interrupts. */ \ 640 638 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 641 639 /* done_task will unmask done interrupts at exit */ \ 642 640 tasklet_schedule(&priv->done_task[tlet]); \ 643 641 } \ 642 + spin_unlock_irqrestore(&priv->reg_lock, flags); \ 643 + } \ 644 644 \ 645 645 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 646 646 IRQ_NONE; \ ··· 2730 2718 dev_set_drvdata(dev, priv); 2731 2719 2732 2720 priv->ofdev = ofdev; 2721 + 2722 + spin_lock_init(&priv->reg_lock); 2733 2723 2734 2724 err = talitos_probe_irq(ofdev); 2735 2725 if (err)
+2 -3
drivers/dma/Kconfig
··· 91 91 92 92 config AT_HDMAC 93 93 tristate "Atmel AHB DMA support" 94 - depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 94 + depends on ARCH_AT91 95 95 select DMA_ENGINE 96 96 help 97 - Support the Atmel AHB DMA controller. This can be integrated in 98 - chips such as the Atmel AT91SAM9RL. 97 + Support the Atmel AHB DMA controller. 99 98 100 99 config FSL_DMA 101 100 tristate "Freescale Elo and Elo Plus DMA support"
+4 -8
drivers/gpu/drm/drm_bufs.c
··· 1510 1510 * \param arg pointer to a drm_buf_map structure. 1511 1511 * \return zero on success or a negative number on failure. 1512 1512 * 1513 - * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information 1514 - * about each buffer into user space. For PCI buffers, it calls do_mmap() with 1513 + * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information 1514 + * about each buffer into user space. For PCI buffers, it calls vm_mmap() with 1515 1515 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1516 1516 * drm_mmap_dma(). 1517 1517 */ ··· 1553 1553 retcode = -EINVAL; 1554 1554 goto done; 1555 1555 } 1556 - down_write(&current->mm->mmap_sem); 1557 - virtual = do_mmap(file_priv->filp, 0, map->size, 1556 + virtual = vm_mmap(file_priv->filp, 0, map->size, 1558 1557 PROT_READ | PROT_WRITE, 1559 1558 MAP_SHARED, 1560 1559 token); 1561 - up_write(&current->mm->mmap_sem); 1562 1560 } else { 1563 - down_write(&current->mm->mmap_sem); 1564 - virtual = do_mmap(file_priv->filp, 0, dma->byte_count, 1561 + virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, 1565 1562 PROT_READ | PROT_WRITE, 1566 1563 MAP_SHARED, 0); 1567 - up_write(&current->mm->mmap_sem); 1568 1564 } 1569 1565 if (virtual > -1024UL) { 1570 1566 /* Real error */
+6 -4
drivers/gpu/drm/drm_crtc.c
··· 3335 3335 3336 3336 ret = crtc->funcs->page_flip(crtc, fb, e); 3337 3337 if (ret) { 3338 - spin_lock_irqsave(&dev->event_lock, flags); 3339 - file_priv->event_space += sizeof e->event; 3340 - spin_unlock_irqrestore(&dev->event_lock, flags); 3341 - kfree(e); 3338 + if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 3339 + spin_lock_irqsave(&dev->event_lock, flags); 3340 + file_priv->event_space += sizeof e->event; 3341 + spin_unlock_irqrestore(&dev->event_lock, flags); 3342 + kfree(e); 3343 + } 3342 3344 } 3343 3345 3344 3346 out:
+3 -3
drivers/gpu/drm/drm_fops.c
··· 507 507 508 508 drm_events_release(file_priv); 509 509 510 - if (dev->driver->driver_features & DRIVER_GEM) 511 - drm_gem_release(dev, file_priv); 512 - 513 510 if (dev->driver->driver_features & DRIVER_MODESET) 514 511 drm_fb_release(file_priv); 512 + 513 + if (dev->driver->driver_features & DRIVER_GEM) 514 + drm_gem_release(dev, file_priv); 515 515 516 516 mutex_lock(&dev->ctxlist_mutex); 517 517 if (!list_empty(&dev->ctxlist)) {
+5 -1
drivers/gpu/drm/drm_usb.c
··· 1 1 #include "drmP.h" 2 2 #include <linux/usb.h> 3 - #include <linux/export.h> 3 + #include <linux/module.h> 4 4 5 5 int drm_get_usb_dev(struct usb_interface *interface, 6 6 const struct usb_device_id *id, ··· 114 114 usb_deregister(udriver); 115 115 } 116 116 EXPORT_SYMBOL(drm_usb_exit); 117 + 118 + MODULE_AUTHOR("David Airlie"); 119 + MODULE_DESCRIPTION("USB DRM support"); 120 + MODULE_LICENSE("GPL and additional rights");
+1 -3
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 581 581 obj->filp->f_op = &exynos_drm_gem_fops; 582 582 obj->filp->private_data = obj; 583 583 584 - down_write(&current->mm->mmap_sem); 585 - addr = do_mmap(obj->filp, 0, args->size, 584 + addr = vm_mmap(obj->filp, 0, args->size, 586 585 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 587 - up_write(&current->mm->mmap_sem); 588 586 589 587 drm_gem_object_unreference_unlocked(obj); 590 588
-1
drivers/gpu/drm/gma500/mdfld_dsi_output.h
··· 29 29 #define __MDFLD_DSI_OUTPUT_H__ 30 30 31 31 #include <linux/backlight.h> 32 - #include <linux/version.h> 33 32 #include <drm/drmP.h> 34 33 #include <drm/drm.h> 35 34 #include <drm/drm_crtc.h>
+2 -4
drivers/gpu/drm/i810/i810_dma.c
··· 129 129 if (buf_priv->currently_mapped == I810_BUF_MAPPED) 130 130 return -EINVAL; 131 131 132 + /* This is all entirely broken */ 132 133 down_write(&current->mm->mmap_sem); 133 134 old_fops = file_priv->filp->f_op; 134 135 file_priv->filp->f_op = &i810_buffer_fops; ··· 158 157 if (buf_priv->currently_mapped != I810_BUF_MAPPED) 159 158 return -EINVAL; 160 159 161 - down_write(&current->mm->mmap_sem); 162 - retcode = do_munmap(current->mm, 163 - (unsigned long)buf_priv->virtual, 160 + retcode = vm_munmap((unsigned long)buf_priv->virtual, 164 161 (size_t) buf->total); 165 - up_write(&current->mm->mmap_sem); 166 162 167 163 buf_priv->currently_mapped = I810_BUF_UNMAPPED; 168 164 buf_priv->virtual = NULL;
+1 -3
drivers/gpu/drm/i915/i915_gem.c
··· 1087 1087 if (obj == NULL) 1088 1088 return -ENOENT; 1089 1089 1090 - down_write(&current->mm->mmap_sem); 1091 - addr = do_mmap(obj->filp, 0, args->size, 1090 + addr = vm_mmap(obj->filp, 0, args->size, 1092 1091 PROT_READ | PROT_WRITE, MAP_SHARED, 1093 1092 args->offset); 1094 - up_write(&current->mm->mmap_sem); 1095 1093 drm_gem_object_unreference_unlocked(obj); 1096 1094 if (IS_ERR((void *)addr)) 1097 1095 return addr;
+12 -3
drivers/gpu/drm/i915/intel_display.c
··· 3478 3478 return false; 3479 3479 } 3480 3480 3481 - /* All interlaced capable intel hw wants timings in frames. */ 3482 - drm_mode_set_crtcinfo(adjusted_mode, 0); 3481 + /* All interlaced capable intel hw wants timings in frames. Note though 3482 + * that intel_lvds_mode_fixup does some funny tricks with the crtc 3483 + * timings, so we need to be careful not to clobber these.*/ 3484 + if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) 3485 + drm_mode_set_crtcinfo(adjusted_mode, 0); 3483 3486 3484 3487 return true; 3485 3488 } ··· 7468 7465 OUT_RING(fb->pitches[0] | obj->tiling_mode); 7469 7466 OUT_RING(obj->gtt_offset); 7470 7467 7471 - pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 7468 + /* Contrary to the suggestions in the documentation, 7469 + * "Enable Panel Fitter" does not seem to be required when page 7470 + * flipping with a non-native mode, and worse causes a normal 7471 + * modeset to fail. 7472 + * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 7473 + */ 7474 + pf = 0; 7472 7475 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7473 7476 OUT_RING(pf | pipesrc); 7474 7477 ADVANCE_LP_RING();
+4
drivers/gpu/drm/i915/intel_drv.h
··· 105 105 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) 106 106 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) 107 107 #define INTEL_MODE_DP_FORCE_6BPC (0x10) 108 + /* This flag must be set by the encoder's mode_fixup if it changes the crtc 109 + * timings in the mode to prevent the crtc fixup from overwriting them. 110 + * Currently only lvds needs that. */ 111 + #define INTEL_MODE_CRTC_TIMINGS_SET (0x20) 108 112 109 113 static inline void 110 114 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+4
drivers/gpu/drm/i915/intel_fb.c
··· 279 279 struct drm_mode_config *config = &dev->mode_config; 280 280 struct drm_plane *plane; 281 281 282 + mutex_lock(&dev->mode_config.mutex); 283 + 282 284 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); 283 285 if (ret) 284 286 DRM_DEBUG("failed to restore crtc mode\n"); ··· 288 286 /* Be sure to shut off any planes that may be active */ 289 287 list_for_each_entry(plane, &config->plane_list, head) 290 288 plane->funcs->disable_plane(plane); 289 + 290 + mutex_unlock(&dev->mode_config.mutex); 291 291 }
+6
drivers/gpu/drm/i915/intel_lvds.c
··· 187 187 188 188 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; 189 189 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; 190 + 191 + mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET; 190 192 } 191 193 192 194 static void ··· 210 208 211 209 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; 212 210 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; 211 + 212 + mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET; 213 213 } 214 214 215 215 static inline u32 panel_fitter_scaling(u32 source, u32 target) ··· 286 282 */ 287 283 for_each_pipe(pipe) 288 284 I915_WRITE(BCLRPAT(pipe), 0); 285 + 286 + drm_mode_set_crtcinfo(adjusted_mode, 0); 289 287 290 288 switch (intel_lvds->fitting_mode) { 291 289 case DRM_MODE_SCALE_CENTER:
-2
drivers/gpu/drm/i915/intel_panel.c
··· 47 47 adjusted_mode->vtotal = fixed_mode->vtotal; 48 48 49 49 adjusted_mode->clock = fixed_mode->clock; 50 - 51 - drm_mode_set_crtcinfo(adjusted_mode, 0); 52 50 } 53 51 54 52 /* adjusted_mode has been preset to be the panel's fixed mode */
+1
drivers/gpu/drm/nouveau/nouveau_pm.c
··· 235 235 return -EPERM; 236 236 237 237 strncpy(string, profile, sizeof(string)); 238 + string[sizeof(string) - 1] = 0; 238 239 if ((ptr = strchr(string, '\n'))) 239 240 *ptr = '\0'; 240 241
+1 -1
drivers/gpu/drm/nouveau/nv50_sor.c
··· 42 42 struct drm_nouveau_private *dev_priv = dev->dev_private; 43 43 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ 44 44 static const u8 nv50[] = { 16, 8, 0, 24 }; 45 - if (dev_priv->card_type == 0xaf) 45 + if (dev_priv->chipset == 0xaf) 46 46 return nvaf[lane]; 47 47 return nv50[lane]; 48 48 }
+2 -2
drivers/gpu/drm/radeon/r600.c
··· 1135 1135 } 1136 1136 if (rdev->flags & RADEON_IS_AGP) { 1137 1137 size_bf = mc->gtt_start; 1138 - size_af = 0xFFFFFFFF - mc->gtt_end + 1; 1138 + size_af = 0xFFFFFFFF - mc->gtt_end; 1139 1139 if (size_bf > size_af) { 1140 1140 if (mc->mc_vram_size > size_bf) { 1141 1141 dev_warn(rdev->dev, "limiting VRAM\n"); ··· 1149 1149 mc->real_vram_size = size_af; 1150 1150 mc->mc_vram_size = size_af; 1151 1151 } 1152 - mc->vram_start = mc->gtt_end; 1152 + mc->vram_start = mc->gtt_end + 1; 1153 1153 } 1154 1154 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 1155 1155 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+2 -2
drivers/gpu/drm/radeon/radeon_connectors.c
··· 970 970 971 971 encoder = obj_to_encoder(obj); 972 972 973 - if (encoder->encoder_type != DRM_MODE_ENCODER_DAC || 973 + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && 974 974 encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) 975 975 continue; 976 976 ··· 1000 1000 * cases the DVI port is actually a virtual KVM port connected to the service 1001 1001 * processor. 1002 1002 */ 1003 + out: 1003 1004 if ((!rdev->is_atom_bios) && 1004 1005 (ret == connector_status_disconnected) && 1005 1006 rdev->mode_info.bios_hardcoded_edid_size) { ··· 1008 1007 ret = connector_status_connected; 1009 1008 } 1010 1009 1011 - out: 1012 1010 /* updated in get modes as well since we need to know if it's analog or digital */ 1013 1011 radeon_connector_update_scratch_regs(connector, ret); 1014 1012 return ret;
+6
drivers/gpu/drm/radeon/radeon_irq_kms.c
··· 147 147 (rdev->pdev->subsystem_device == 0x01fd)) 148 148 return true; 149 149 150 + /* RV515 seems to have MSI issues where it loses 151 + * MSI rearms occasionally. This leads to lockups and freezes. 152 + * disable it by default. 153 + */ 154 + if (rdev->family == CHIP_RV515) 155 + return false; 150 156 if (rdev->flags & RADEON_IS_IGP) { 151 157 /* APUs work fine with MSIs */ 152 158 if (rdev->family >= CHIP_PALM)
+2 -2
drivers/gpu/drm/radeon/rv770.c
··· 969 969 } 970 970 if (rdev->flags & RADEON_IS_AGP) { 971 971 size_bf = mc->gtt_start; 972 - size_af = 0xFFFFFFFF - mc->gtt_end + 1; 972 + size_af = 0xFFFFFFFF - mc->gtt_end; 973 973 if (size_bf > size_af) { 974 974 if (mc->mc_vram_size > size_bf) { 975 975 dev_warn(rdev->dev, "limiting VRAM\n"); ··· 983 983 mc->real_vram_size = size_af; 984 984 mc->mc_vram_size = size_af; 985 985 } 986 - mc->vram_start = mc->gtt_end; 986 + mc->vram_start = mc->gtt_end + 1; 987 987 } 988 988 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 989 989 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+2 -3
drivers/gpu/drm/radeon/si.c
··· 2999 2999 } 3000 3000 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 3001 3001 &rdev->rlc.save_restore_gpu_addr); 3002 + radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3002 3003 if (r) { 3003 - radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3004 3004 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); 3005 3005 si_rlc_fini(rdev); 3006 3006 return r; ··· 3023 3023 } 3024 3024 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 3025 3025 &rdev->rlc.clear_state_gpu_addr); 3026 + radeon_bo_unreserve(rdev->rlc.clear_state_obj); 3026 3027 if (r) { 3027 - 3028 - radeon_bo_unreserve(rdev->rlc.clear_state_obj); 3029 3028 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 3030 3029 si_rlc_fini(rdev); 3031 3030 return r;
+1 -1
drivers/hid/Kconfig
··· 34 34 config HID_BATTERY_STRENGTH 35 35 bool 36 36 depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY 37 - default y 37 + default n 38 38 39 39 config HIDRAW 40 40 bool "/dev/hidraw raw HID device support"
+1 -1
drivers/hid/hid-tivo.c
··· 62 62 63 63 static const struct hid_device_id tivo_devices[] = { 64 64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ 65 - { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 65 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 66 66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 67 67 { } 68 68 };
+16 -17
drivers/hwmon/ads1015.c
··· 59 59 struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; 60 60 }; 61 61 62 - static int ads1015_read_value(struct i2c_client *client, unsigned int channel, 63 - int *value) 62 + static int ads1015_read_adc(struct i2c_client *client, unsigned int channel) 64 63 { 65 64 u16 config; 66 - s16 conversion; 67 65 struct ads1015_data *data = i2c_get_clientdata(client); 68 66 unsigned int pga = data->channel_data[channel].pga; 69 - int fullscale; 70 67 unsigned int data_rate = data->channel_data[channel].data_rate; 71 68 unsigned int conversion_time_ms; 72 69 int res; ··· 75 78 if (res < 0) 76 79 goto err_unlock; 77 80 config = res; 78 - fullscale = fullscale_table[pga]; 79 81 conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]); 80 82 81 83 /* setup and start single conversion */ ··· 101 105 } 102 106 103 107 res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION); 104 - if (res < 0) 105 - goto err_unlock; 106 - conversion = res; 107 - 108 - mutex_unlock(&data->update_lock); 109 - 110 - *value = DIV_ROUND_CLOSEST(conversion * fullscale, 0x7ff0); 111 - 112 - return 0; 113 108 114 109 err_unlock: 115 110 mutex_unlock(&data->update_lock); 116 111 return res; 112 + } 113 + 114 + static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel, 115 + s16 reg) 116 + { 117 + struct ads1015_data *data = i2c_get_clientdata(client); 118 + unsigned int pga = data->channel_data[channel].pga; 119 + int fullscale = fullscale_table[pga]; 120 + 121 + return DIV_ROUND_CLOSEST(reg * fullscale, 0x7ff0); 117 122 } 118 123 119 124 /* sysfs callback function */ ··· 123 126 { 124 127 struct sensor_device_attribute *attr = to_sensor_dev_attr(da); 125 128 struct i2c_client *client = to_i2c_client(dev); 126 - int in; 127 129 int res; 130 + int index = attr->index; 128 131 129 - res = ads1015_read_value(client, attr->index, &in); 132 + res = ads1015_read_adc(client, index); 133 + if (res < 0) 134 + return res; 130 135 131 - return (res < 0) ? res : sprintf(buf, "%d\n", in); 136 + return sprintf(buf, "%d\n", ads1015_reg_to_mv(client, index, res)); 132 137 } 133 138 134 139 static const struct sensor_device_attribute ads1015_in[] = {
+39
drivers/hwmon/fam15h_power.c
··· 122 122 return true; 123 123 } 124 124 125 + /* 126 + * Newer BKDG versions have an updated recommendation on how to properly 127 + * initialize the running average range (was: 0xE, now: 0x9). This avoids 128 + * counter saturations resulting in bogus power readings. 129 + * We correct this value ourselves to cope with older BIOSes. 130 + */ 131 + static void __devinit tweak_runavg_range(struct pci_dev *pdev) 132 + { 133 + u32 val; 134 + const struct pci_device_id affected_device = { 135 + PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }; 136 + 137 + /* 138 + * let this quirk apply only to the current version of the 139 + * northbridge, since future versions may change the behavior 140 + */ 141 + if (!pci_match_id(&affected_device, pdev)) 142 + return; 143 + 144 + pci_bus_read_config_dword(pdev->bus, 145 + PCI_DEVFN(PCI_SLOT(pdev->devfn), 5), 146 + REG_TDP_RUNNING_AVERAGE, &val); 147 + if ((val & 0xf) != 0xe) 148 + return; 149 + 150 + val &= ~0xf; 151 + val |= 0x9; 152 + pci_bus_write_config_dword(pdev->bus, 153 + PCI_DEVFN(PCI_SLOT(pdev->devfn), 5), 154 + REG_TDP_RUNNING_AVERAGE, val); 155 + } 156 + 125 157 static void __devinit fam15h_power_init_data(struct pci_dev *f4, 126 158 struct fam15h_power_data *data) 127 159 { ··· 186 154 struct fam15h_power_data *data; 187 155 struct device *dev; 188 156 int err; 157 + 158 + /* 159 + * though we ignore every other northbridge, we still have to 160 + * do the tweaking on _each_ node in MCM processors as the counters 161 + * are working hand-in-hand 162 + */ 163 + tweak_runavg_range(pdev); 189 164 190 165 if (!fam15h_power_is_internal_node0(pdev)) { 191 166 err = -ENODEV;
+1 -2
drivers/input/misc/Kconfig
··· 380 380 381 381 config INPUT_TWL6040_VIBRA 382 382 tristate "Support for TWL6040 Vibrator" 383 - depends on TWL4030_CORE 384 - select TWL6040_CORE 383 + depends on TWL6040_CORE 385 384 select INPUT_FF_MEMLESS 386 385 help 387 386 This option enables support for TWL6040 Vibrator Driver.
+2 -2
drivers/input/misc/twl6040-vibra.c
··· 28 28 #include <linux/module.h> 29 29 #include <linux/platform_device.h> 30 30 #include <linux/workqueue.h> 31 - #include <linux/i2c/twl.h> 31 + #include <linux/input.h> 32 32 #include <linux/mfd/twl6040.h> 33 33 #include <linux/slab.h> 34 34 #include <linux/delay.h> ··· 257 257 258 258 static int __devinit twl6040_vibra_probe(struct platform_device *pdev) 259 259 { 260 - struct twl4030_vibra_data *pdata = pdev->dev.platform_data; 260 + struct twl6040_vibra_data *pdata = pdev->dev.platform_data; 261 261 struct vibra_info *info; 262 262 int ret; 263 263
+1 -1
drivers/leds/leds-atmel-pwm.c
··· 35 35 * NOTE: we reuse the platform_data structure of GPIO leds, 36 36 * but repurpose its "gpio" number as a PWM channel number. 37 37 */ 38 - static int __init pwmled_probe(struct platform_device *pdev) 38 + static int __devinit pwmled_probe(struct platform_device *pdev) 39 39 { 40 40 const struct gpio_led_platform_data *pdata; 41 41 struct pwmled *leds;
+35 -4
drivers/media/common/tuners/xc5000.c
··· 54 54 struct list_head hybrid_tuner_instance_list; 55 55 56 56 u32 if_khz; 57 + u32 xtal_khz; 57 58 u32 freq_hz; 58 59 u32 bandwidth; 59 60 u8 video_standard; ··· 215 214 .size = 12401, 216 215 }; 217 216 218 - static const struct xc5000_fw_cfg xc5000c_41_024_5_31875 = { 219 - .name = "dvb-fe-xc5000c-41.024.5-31875.fw", 220 - .size = 16503, 217 + static const struct xc5000_fw_cfg xc5000c_41_024_5 = { 218 + .name = "dvb-fe-xc5000c-41.024.5.fw", 219 + .size = 16497, 221 220 }; 222 221 223 222 static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id) ··· 227 226 case XC5000A: 228 227 return &xc5000a_1_6_114; 229 228 case XC5000C: 230 - return &xc5000c_41_024_5_31875; 229 + return &xc5000c_41_024_5; 231 230 } 232 231 } 233 232 ··· 573 572 return found; 574 573 } 575 574 575 + static int xc_set_xtal(struct dvb_frontend *fe) 576 + { 577 + struct xc5000_priv *priv = fe->tuner_priv; 578 + int ret = XC_RESULT_SUCCESS; 579 + 580 + switch (priv->chip_id) { 581 + default: 582 + case XC5000A: 583 + /* 32.000 MHz xtal is default */ 584 + break; 585 + case XC5000C: 586 + switch (priv->xtal_khz) { 587 + default: 588 + case 32000: 589 + /* 32.000 MHz xtal is default */ 590 + break; 591 + case 31875: 592 + /* 31.875 MHz xtal configuration */ 593 + ret = xc_write_reg(priv, 0x000f, 0x8081); 594 + break; 595 + } 596 + break; 597 + } 598 + return ret; 599 + } 576 600 577 601 static int xc5000_fwupload(struct dvb_frontend *fe) 578 602 { ··· 629 603 } else { 630 604 printk(KERN_INFO "xc5000: firmware uploading...\n"); 631 605 ret = xc_load_i2c_sequence(fe, fw->data); 606 + if (XC_RESULT_SUCCESS == ret) 607 + ret = xc_set_xtal(fe); 632 608 printk(KERN_INFO "xc5000: firmware upload complete...\n"); 633 609 } 634 610 ··· 1191 1163 call to xc5000_attach occurs before the digital side) */ 1192 1164 priv->if_khz = cfg->if_khz; 1193 1165 } 1166 + 1167 + if (priv->xtal_khz == 0) 1168 + priv->xtal_khz = cfg->xtal_khz; 1194 1169 1195 1170 if (priv->radio_input == 0) 1196 1171 priv->radio_input = cfg->radio_input;
+1
drivers/media/common/tuners/xc5000.h
··· 34 34 u8 i2c_address; 35 35 u32 if_khz; 36 36 u8 radio_input; 37 + u32 xtal_khz; 37 38 38 39 int chip_id; 39 40 };
+24 -1
drivers/media/dvb/dvb-core/dvb_frontend.c
··· 1446 1446 __func__); 1447 1447 return -EINVAL; 1448 1448 } 1449 + /* 1450 + * Get a delivery system that is compatible with DVBv3 1451 + * NOTE: in order for this to work with softwares like Kaffeine that 1452 + * uses a DVBv5 call for DVB-S2 and a DVBv3 call to go back to 1453 + * DVB-S, drivers that support both should put the SYS_DVBS entry 1454 + * before the SYS_DVBS2, otherwise it won't switch back to DVB-S. 1455 + * The real fix is that userspace applications should not use DVBv3 1456 + * and not trust on calling FE_SET_FRONTEND to switch the delivery 1457 + * system. 1458 + */ 1459 + ncaps = 0; 1460 + while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) { 1461 + if (fe->ops.delsys[ncaps] == desired_system) { 1462 + delsys = desired_system; 1463 + break; 1464 + } 1465 + ncaps++; 1466 + } 1467 + if (delsys == SYS_UNDEFINED) { 1468 + dprintk("%s() Couldn't find a delivery system that matches %d\n", 1469 + __func__, desired_system); 1470 + } 1449 1471 } else { 1450 1472 /* 1451 1473 * This is a DVBv5 call. So, it likely knows the supported ··· 1516 1494 __func__); 1517 1495 return -EINVAL; 1518 1496 } 1519 - c->delivery_system = delsys; 1520 1497 } 1498 + 1499 + c->delivery_system = delsys; 1521 1500 1522 1501 /* 1523 1502 * The DVBv3 or DVBv5 call is requesting a different system. So,
+4 -2
drivers/media/dvb/frontends/drxk_hard.c
··· 1520 1520 dprintk(1, "\n"); 1521 1521 1522 1522 if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) || 1523 - ((resultLen > 0) && (result == NULL))) 1524 - goto error; 1523 + ((resultLen > 0) && (result == NULL))) { 1524 + printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); 1525 + return status; 1526 + } 1525 1527 1526 1528 mutex_lock(&state->mutex); 1527 1529
+1
drivers/media/rc/winbond-cir.c
··· 1046 1046 goto exit_unregister_led; 1047 1047 } 1048 1048 1049 + data->dev->driver_type = RC_DRIVER_IR_RAW; 1049 1050 data->dev->driver_name = WBCIR_NAME; 1050 1051 data->dev->input_name = WBCIR_NAME; 1051 1052 data->dev->input_phys = "wbcir/cir0";
+1 -1
drivers/media/video/Kconfig
··· 492 492 493 493 config VIDEO_MT9M032 494 494 tristate "MT9M032 camera sensor support" 495 - depends on I2C && VIDEO_V4L2 495 + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 496 496 select VIDEO_APTINA_PLL 497 497 ---help--- 498 498 This driver supports MT9M032 camera sensors from Aptina, monochrome
+3 -2
drivers/media/video/mt9m032.c
··· 392 392 } 393 393 394 394 /* Scaling is not supported, the format is thus fixed. */ 395 - ret = mt9m032_get_pad_format(subdev, fh, fmt); 395 + fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which); 396 + ret = 0; 396 397 397 398 done: 398 - mutex_lock(&sensor->lock); 399 + mutex_unlock(&sensor->lock); 399 400 return ret; 400 401 } 401 402
+9 -2
drivers/mfd/Kconfig
··· 268 268 This is used to control charging LED brightness. 269 269 270 270 config TWL6040_CORE 271 - bool 272 - depends on TWL4030_CORE && GENERIC_HARDIRQS 271 + bool "Support for TWL6040 audio codec" 272 + depends on I2C=y && GENERIC_HARDIRQS 273 273 select MFD_CORE 274 + select REGMAP_I2C 274 275 default n 276 + help 277 + Say yes here if you want support for Texas Instruments TWL6040 audio 278 + codec. 279 + This driver provides common support for accessing the device, 280 + additional drivers must be enabled in order to use the 281 + functionality of the device (audio, vibra). 275 282 276 283 config MFD_STMPE 277 284 bool "Support STMicroelectronics STMPE"
+3 -1
drivers/mfd/asic3.c
··· 527 527 528 528 static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 529 529 { 530 - return (offset < ASIC3_NUM_GPIOS) ? IRQ_BOARD_START + offset : -ENXIO; 530 + struct asic3 *asic = container_of(chip, struct asic3, gpio); 531 + 532 + return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO; 531 533 } 532 534 533 535 static __init int asic3_gpio_probe(struct platform_device *pdev,
-44
drivers/mfd/omap-usb-host.c
··· 25 25 #include <linux/clk.h> 26 26 #include <linux/dma-mapping.h> 27 27 #include <linux/spinlock.h> 28 - #include <linux/gpio.h> 29 28 #include <plat/usb.h> 30 29 #include <linux/pm_runtime.h> 31 30 ··· 501 502 pm_runtime_get_sync(dev); 502 503 spin_lock_irqsave(&omap->lock, flags); 503 504 504 - if (pdata->ehci_data->phy_reset) { 505 - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) 506 - gpio_request_one(pdata->ehci_data->reset_gpio_port[0], 507 - GPIOF_OUT_INIT_LOW, "USB1 PHY reset"); 508 - 509 - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) 510 - gpio_request_one(pdata->ehci_data->reset_gpio_port[1], 511 - GPIOF_OUT_INIT_LOW, "USB2 PHY reset"); 512 - 513 - /* Hold the PHY in RESET for enough time till DIR is high */ 514 - udelay(10); 515 - } 516 - 517 505 omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION); 518 506 dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev); 519 507 ··· 579 593 usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT); 580 594 } 581 595 582 - if (pdata->ehci_data->phy_reset) { 583 - /* Hold the PHY in RESET for enough time till 584 - * PHY is settled and ready 585 - */ 586 - udelay(10); 587 - 588 - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) 589 - gpio_set_value 590 - (pdata->ehci_data->reset_gpio_port[0], 1); 591 - 592 - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) 593 - gpio_set_value 594 - (pdata->ehci_data->reset_gpio_port[1], 1); 595 - } 596 - 597 596 spin_unlock_irqrestore(&omap->lock, flags); 598 597 pm_runtime_put_sync(dev); 599 - } 600 - 601 - static void omap_usbhs_deinit(struct device *dev) 602 - { 603 - struct usbhs_hcd_omap *omap = dev_get_drvdata(dev); 604 - struct usbhs_omap_platform_data *pdata = &omap->platdata; 605 - 606 - if (pdata->ehci_data->phy_reset) { 607 - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) 608 - gpio_free(pdata->ehci_data->reset_gpio_port[0]); 609 - 610 - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) 611 - gpio_free(pdata->ehci_data->reset_gpio_port[1]); 612 - } 613 598 } 614 599 615 600 ··· 817 860 { 818 861 struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev); 819 862 820 - omap_usbhs_deinit(&pdev->dev); 821 863 iounmap(omap->tll_base); 822 864 iounmap(omap->uhh_base); 823 865 clk_put(omap->init_60m_fclk);
+1 -38
drivers/mfd/rc5t583.c
··· 80 80 {.name = "rc5t583-key", } 81 81 }; 82 82 83 - int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val) 84 - { 85 - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 86 - return regmap_write(rc5t583->regmap, reg, val); 87 - } 88 - 89 - int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val) 90 - { 91 - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 92 - unsigned int ival; 93 - int ret; 94 - ret = regmap_read(rc5t583->regmap, reg, &ival); 95 - if (!ret) 96 - *val = (uint8_t)ival; 97 - return ret; 98 - } 99 - 100 - int rc5t583_set_bits(struct device *dev, unsigned int reg, 101 - unsigned int bit_mask) 102 - { 103 - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 104 - return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask); 105 - } 106 - 107 - int rc5t583_clear_bits(struct device *dev, unsigned int reg, 108 - unsigned int bit_mask) 109 - { 110 - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 111 - return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0); 112 - } 113 - 114 - int rc5t583_update(struct device *dev, unsigned int reg, 115 - unsigned int val, unsigned int mask) 116 - { 117 - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 118 - return regmap_update_bits(rc5t583->regmap, reg, mask, val); 119 - } 120 - 121 83 static int __rc5t583_set_ext_pwrreq1_control(struct device *dev, 122 84 int id, int ext_pwr, int slots) 123 85 { ··· 159 197 ds_id, ext_pwr_req); 160 198 return 0; 161 199 } 200 + EXPORT_SYMBOL(rc5t583_ext_power_req_config); 162 201 163 202 static int rc5t583_clear_ext_power_req(struct rc5t583 *rc5t583, 164 203 struct rc5t583_platform_data *pdata)
+68 -46
drivers/mfd/twl6040-core.c
··· 30 30 #include <linux/platform_device.h> 31 31 #include <linux/gpio.h> 32 32 #include <linux/delay.h> 33 - #include <linux/i2c/twl.h> 33 + #include <linux/i2c.h> 34 + #include <linux/regmap.h> 35 + #include <linux/err.h> 34 36 #include <linux/mfd/core.h> 35 37 #include <linux/mfd/twl6040.h> 36 38 ··· 41 39 int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg) 42 40 { 43 41 int ret; 44 - u8 val = 0; 42 + unsigned int val; 45 43 46 44 mutex_lock(&twl6040->io_mutex); 47 45 /* Vibra control registers from cache */ ··· 49 47 reg == TWL6040_REG_VIBCTLR)) { 50 48 val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)]; 51 49 } else { 52 - ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); 50 + ret = regmap_read(twl6040->regmap, reg, &val); 53 51 if (ret < 0) { 54 52 mutex_unlock(&twl6040->io_mutex); 55 53 return ret; ··· 66 64 int ret; 67 65 68 66 mutex_lock(&twl6040->io_mutex); 69 - ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg); 67 + ret = regmap_write(twl6040->regmap, reg, val); 70 68 /* Cache the vibra control registers */ 71 69 if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR) 72 70 twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val; ··· 79 77 int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask) 80 78 { 81 79 int ret; 82 - u8 val; 83 80 84 81 mutex_lock(&twl6040->io_mutex); 85 - ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); 86 - if (ret) 87 - goto out; 88 - 89 - val |= mask; 90 - ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg); 91 - out: 82 + ret = regmap_update_bits(twl6040->regmap, reg, mask, mask); 92 83 mutex_unlock(&twl6040->io_mutex); 93 84 return ret; 94 85 } ··· 90 95 int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask) 91 96 { 92 97 int ret; 93 - u8 val; 94 98 95 99 mutex_lock(&twl6040->io_mutex); 96 - ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); 97 - if (ret) 98 - goto out; 99 - 100 - val &= ~mask; 101 - ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg); 102 - out: 100 + ret = regmap_update_bits(twl6040->regmap, reg, mask, 0); 103 101 mutex_unlock(&twl6040->io_mutex); 104 102 return ret; 105 103 } ··· 482 494 }, 483 495 }; 484 496 485 - static int __devinit twl6040_probe(struct platform_device *pdev) 497 + static bool twl6040_readable_reg(struct device *dev, unsigned int reg) 486 498 { 487 - struct twl4030_audio_data *pdata = pdev->dev.platform_data; 499 + /* Register 0 is not readable */ 500 + if (!reg) 501 + return false; 502 + return true; 503 + } 504 + 505 + static struct regmap_config twl6040_regmap_config = { 506 + .reg_bits = 8, 507 + .val_bits = 8, 508 + .max_register = TWL6040_REG_STATUS, /* 0x2e */ 509 + 510 + .readable_reg = twl6040_readable_reg, 511 + }; 512 + 513 + static int __devinit twl6040_probe(struct i2c_client *client, 514 + const struct i2c_device_id *id) 515 + { 516 + struct twl6040_platform_data *pdata = client->dev.platform_data; 488 517 struct twl6040 *twl6040; 489 518 struct mfd_cell *cell = NULL; 490 519 int ret, children = 0; 491 520 492 521 if (!pdata) { 493 - dev_err(&pdev->dev, "Platform data is missing\n"); 522 + dev_err(&client->dev, "Platform data is missing\n"); 494 523 return -EINVAL; 495 524 } 496 525 497 526 /* In order to operate correctly we need valid interrupt config */ 498 - if (!pdata->naudint_irq || !pdata->irq_base) { 499 - dev_err(&pdev->dev, "Invalid IRQ configuration\n"); 527 + if (!client->irq || !pdata->irq_base) { 528 + dev_err(&client->dev, "Invalid IRQ configuration\n"); 500 529 return -EINVAL; 501 530 } 502 531 503 - twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL); 504 - if (!twl6040) 505 - return -ENOMEM; 532 + twl6040 = devm_kzalloc(&client->dev, sizeof(struct twl6040), 533 + GFP_KERNEL); 534 + if (!twl6040) { 535 + ret = -ENOMEM; 536 + goto err; 537 + } 506 538 507 - platform_set_drvdata(pdev, twl6040); 539 + twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config); 540 + if (IS_ERR(twl6040->regmap)) { 541 + ret = PTR_ERR(twl6040->regmap); 542 + goto err; 543 + } 508 544 509 - twl6040->dev = &pdev->dev; 510 - twl6040->irq = pdata->naudint_irq; 545 + i2c_set_clientdata(client, twl6040); 546 + 547 + twl6040->dev = &client->dev; 548 + twl6040->irq = client->irq; 511 549 twl6040->irq_base = pdata->irq_base; 512 550 513 551 mutex_init(&twl6040->mutex); ··· 602 588 } 603 589 604 590 if (children) { 605 - ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells, 591 + ret = mfd_add_devices(&client->dev, -1, twl6040->cells, 606 592 children, NULL, 0); 607 593 if (ret) 608 594 goto mfd_err; 609 595 } else { 610 - dev_err(&pdev->dev, "No platform data found for children\n"); 596 + dev_err(&client->dev, "No platform data found for children\n"); 611 597 ret = -ENODEV; 612 598 goto mfd_err; 613 599 } ··· 622 608 if (gpio_is_valid(twl6040->audpwron)) 623 609 gpio_free(twl6040->audpwron); 624 610 gpio1_err: 625 - platform_set_drvdata(pdev, NULL); 626 - kfree(twl6040); 611 + i2c_set_clientdata(client, NULL); 612 + regmap_exit(twl6040->regmap); 613 + err: 627 614 return ret; 628 615 } 629 616 630 - static int __devexit twl6040_remove(struct platform_device *pdev) 617 + static int __devexit twl6040_remove(struct i2c_client *client) 631 618 { 632 - struct twl6040 *twl6040 = platform_get_drvdata(pdev); 619 + struct twl6040 *twl6040 = i2c_get_clientdata(client); 633 620 634 621 if (twl6040->power_count) 635 622 twl6040_power(twl6040, 0); ··· 641 626 free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040); 642 627 twl6040_irq_exit(twl6040); 643 628 644 - mfd_remove_devices(&pdev->dev); 645 - platform_set_drvdata(pdev, NULL); 646 - kfree(twl6040); 629 + mfd_remove_devices(&client->dev); 630 + i2c_set_clientdata(client, NULL); 631 + regmap_exit(twl6040->regmap); 647 632 648 633 return 0; 649 634 } 650 635 651 - static struct platform_driver twl6040_driver = { 636 + static const struct i2c_device_id twl6040_i2c_id[] = { 637 + { "twl6040", 0, }, 638 + { }, 639 + }; 640 + MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id); 641 + 642 + static struct i2c_driver twl6040_driver = { 643 + .driver = { 644 + .name = "twl6040", 645 + .owner = THIS_MODULE, 646 + }, 652 647 .probe = twl6040_probe, 653 648 .remove = __devexit_p(twl6040_remove), 654 - .driver = { 655 - .owner = THIS_MODULE, 656 - .name = "twl6040", 657 - }, 649 + .id_table = twl6040_i2c_id, 658 650 }; 659 651 660 - module_platform_driver(twl6040_driver); 652 + module_i2c_driver(twl6040_driver); 661 653 662 654 MODULE_DESCRIPTION("TWL6040 MFD"); 663 655 MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
+40 -18
drivers/mmc/card/block.c
··· 873 873 { 874 874 struct mmc_blk_data *md = mq->data; 875 875 struct mmc_card *card = md->queue.card; 876 - unsigned int from, nr, arg; 876 + unsigned int from, nr, arg, trim_arg, erase_arg; 877 877 int err = 0, type = MMC_BLK_SECDISCARD; 878 878 879 879 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { ··· 881 881 goto out; 882 882 } 883 883 884 - /* The sanitize operation is supported at v4.5 only */ 885 - if (mmc_can_sanitize(card)) { 886 - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 887 - EXT_CSD_SANITIZE_START, 1, 0); 888 - goto out; 889 - } 890 - 891 884 from = blk_rq_pos(req); 892 885 nr = blk_rq_sectors(req); 893 886 894 - if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 895 - arg = MMC_SECURE_TRIM1_ARG; 896 - else 897 - arg = MMC_SECURE_ERASE_ARG; 887 + /* The sanitize operation is supported at v4.5 only */ 888 + if (mmc_can_sanitize(card)) { 889 + erase_arg = MMC_ERASE_ARG; 890 + trim_arg = MMC_TRIM_ARG; 891 + } else { 892 + erase_arg = MMC_SECURE_ERASE_ARG; 893 + trim_arg = MMC_SECURE_TRIM1_ARG; 894 + } 895 + 896 + if (mmc_erase_group_aligned(card, from, nr)) 897 + arg = erase_arg; 898 + else if (mmc_can_trim(card)) 899 + arg = trim_arg; 900 + else { 901 + err = -EINVAL; 902 + goto out; 903 + } 898 904 retry: 899 905 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 900 906 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, ··· 910 904 INAND_CMD38_ARG_SECERASE, 911 905 0); 912 906 if (err) 913 - goto out; 907 + goto out_retry; 914 908 } 909 + 915 910 err = mmc_erase(card, from, nr, arg); 916 - if (!err && arg == MMC_SECURE_TRIM1_ARG) { 911 + if (err == -EIO) 912 + goto out_retry; 913 + if (err) 914 + goto out; 915 + 916 + if (arg == MMC_SECURE_TRIM1_ARG) { 917 917 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 918 918 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 919 919 INAND_CMD38_ARG_EXT_CSD, 920 920 INAND_CMD38_ARG_SECTRIM2, 921 921 0); 922 922 if (err) 923 - goto out; 923 + goto out_retry; 924 924 } 925 + 925 926 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 927 + if (err == -EIO) 928 + goto out_retry; 929 + if (err) 930 + goto out; 926 931 } 927 - out: 928 - if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 932 + 933 + if (mmc_can_sanitize(card)) 934 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 935 + EXT_CSD_SANITIZE_START, 1, 0); 936 + out_retry: 937 + if (err && !mmc_blk_reset(md, card->host, type)) 929 938 goto retry; 930 939 if (!err) 931 940 mmc_blk_reset_success(md, type); 941 + out: 932 942 spin_lock_irq(&md->lock); 933 943 __blk_end_request(req, err, blk_rq_bytes(req)); 934 944 spin_unlock_irq(&md->lock); ··· 1824 1802 } 1825 1803 1826 1804 #ifdef CONFIG_PM 1827 - static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1805 + static int mmc_blk_suspend(struct mmc_card *card) 1828 1806 { 1829 1807 struct mmc_blk_data *part_md; 1830 1808 struct mmc_blk_data *md = mmc_get_drvdata(card);
+1 -1
drivers/mmc/card/queue.c
··· 139 139 140 140 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 141 141 q->limits.max_discard_sectors = max_discard; 142 - if (card->erased_byte == 0) 142 + if (card->erased_byte == 0 && !mmc_can_discard(card)) 143 143 q->limits.discard_zeroes_data = 1; 144 144 q->limits.discard_granularity = card->pref_erase << 9; 145 145 /* granularity must not be greater than max. discard */
+9 -17
drivers/mmc/core/bus.c
··· 122 122 return 0; 123 123 } 124 124 125 - static int mmc_bus_suspend(struct device *dev, pm_message_t state) 125 + static int mmc_bus_suspend(struct device *dev) 126 126 { 127 127 struct mmc_driver *drv = to_mmc_driver(dev->driver); 128 128 struct mmc_card *card = mmc_dev_to_card(dev); 129 129 int ret = 0; 130 130 131 131 if (dev->driver && drv->suspend) 132 - ret = drv->suspend(card, state); 132 + ret = drv->suspend(card); 133 133 return ret; 134 134 } 135 135 ··· 165 165 return pm_runtime_suspend(dev); 166 166 } 167 167 168 - static const struct dev_pm_ops mmc_bus_pm_ops = { 169 - .runtime_suspend = mmc_runtime_suspend, 170 - .runtime_resume = mmc_runtime_resume, 171 - .runtime_idle = mmc_runtime_idle, 172 - }; 173 - 174 - #define MMC_PM_OPS_PTR (&mmc_bus_pm_ops) 175 - 176 - #else /* !CONFIG_PM_RUNTIME */ 177 - 178 - #define MMC_PM_OPS_PTR NULL 179 - 180 168 #endif /* !CONFIG_PM_RUNTIME */ 169 + 170 + static const struct dev_pm_ops mmc_bus_pm_ops = { 171 + SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, 172 + mmc_runtime_idle) 173 + SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume) 174 + }; 181 175 182 176 static struct bus_type mmc_bus_type = { 183 177 .name = "mmc", ··· 180 186 .uevent = mmc_bus_uevent, 181 187 .probe = mmc_bus_probe, 182 188 .remove = mmc_bus_remove, 183 - .suspend = mmc_bus_suspend, 184 - .resume = mmc_bus_resume, 185 - .pm = MMC_PM_OPS_PTR, 189 + .pm = &mmc_bus_pm_ops, 186 190 }; 187 191 188 192 int mmc_register_bus(void)
+1
drivers/mmc/core/cd-gpio.c
··· 12 12 #include <linux/gpio.h> 13 13 #include <linux/interrupt.h> 14 14 #include <linux/jiffies.h> 15 + #include <linux/mmc/cd-gpio.h> 15 16 #include <linux/mmc/host.h> 16 17 #include <linux/module.h> 17 18 #include <linux/slab.h>
+26 -38
drivers/mmc/core/core.c
··· 1409 1409 { 1410 1410 unsigned int erase_timeout; 1411 1411 1412 - if (card->ext_csd.erase_group_def & 1) { 1412 + if (arg == MMC_DISCARD_ARG || 1413 + (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { 1414 + erase_timeout = card->ext_csd.trim_timeout; 1415 + } else if (card->ext_csd.erase_group_def & 1) { 1413 1416 /* High Capacity Erase Group Size uses HC timeouts */ 1414 1417 if (arg == MMC_TRIM_ARG) 1415 1418 erase_timeout = card->ext_csd.trim_timeout; ··· 1684 1681 { 1685 1682 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1686 1683 return 1; 1687 - if (mmc_can_discard(card)) 1688 - return 1; 1689 1684 return 0; 1690 1685 } 1691 1686 EXPORT_SYMBOL(mmc_can_trim); ··· 1702 1701 1703 1702 int mmc_can_sanitize(struct mmc_card *card) 1704 1703 { 1704 + if (!mmc_can_trim(card) && !mmc_can_erase(card)) 1705 + return 0; 1705 1706 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 1706 1707 return 1; 1707 1708 return 0; ··· 2238 2235 mmc_card_is_removable(host)) 2239 2236 return err; 2240 2237 2238 + mmc_claim_host(host); 2241 2239 if (card && mmc_card_mmc(card) && 2242 2240 (card->ext_csd.cache_size > 0)) { 2243 2241 enable = !!enable; ··· 2256 2252 card->ext_csd.cache_ctrl = enable; 2257 2253 } 2258 2254 } 2255 + mmc_release_host(host); 2259 2256 2260 2257 return err; 2261 2258 } ··· 2274 2269 2275 2270 cancel_delayed_work(&host->detect); 2276 2271 mmc_flush_scheduled_work(); 2277 - if (mmc_try_claim_host(host)) { 2278 - err = mmc_cache_ctrl(host, 0); 2279 - mmc_release_host(host); 2280 - } else { 2281 - err = -EBUSY; 2282 - } 2283 2272 2273 + err = mmc_cache_ctrl(host, 0); 2284 2274 if (err) 2285 2275 goto out; 2286 2276 2287 2277 mmc_bus_get(host); 2288 2278 if (host->bus_ops && !host->bus_dead) { 2289 2279 2290 - /* 2291 - * A long response time is not acceptable for device drivers 2292 - * when doing suspend. Prevent mmc_claim_host in the suspend 2293 - * sequence, to potentially wait "forever" by trying to 2294 - * pre-claim the host. 2295 - */ 2296 - if (mmc_try_claim_host(host)) { 2297 - if (host->bus_ops->suspend) { 2298 - err = host->bus_ops->suspend(host); 2299 - } 2300 - mmc_release_host(host); 2280 + if (host->bus_ops->suspend) 2281 + err = host->bus_ops->suspend(host); 2301 2282 2302 - if (err == -ENOSYS || !host->bus_ops->resume) { 2303 - /* 2304 - * We simply "remove" the card in this case. 2305 - * It will be redetected on resume. (Calling 2306 - * bus_ops->remove() with a claimed host can 2307 - * deadlock.) 2308 - */ 2309 - if (host->bus_ops->remove) 2310 - host->bus_ops->remove(host); 2311 - mmc_claim_host(host); 2312 - mmc_detach_bus(host); 2313 - mmc_power_off(host); 2314 - mmc_release_host(host); 2315 - host->pm_flags = 0; 2316 - err = 0; 2317 - } 2318 - } else { 2319 - err = -EBUSY; 2283 + if (err == -ENOSYS || !host->bus_ops->resume) { 2284 + /* 2285 + * We simply "remove" the card in this case. 2286 + * It will be redetected on resume. (Calling 2287 + * bus_ops->remove() with a claimed host can 2288 + * deadlock.) 2289 + */ 2290 + if (host->bus_ops->remove) 2291 + host->bus_ops->remove(host); 2292 + mmc_claim_host(host); 2293 + mmc_detach_bus(host); 2294 + mmc_power_off(host); 2295 + mmc_release_host(host); 2296 + host->pm_flags = 0; 2297 + err = 0; 2320 2298 } 2321 2299 } 2322 2300 mmc_bus_put(host);
+5 -2
drivers/mmc/host/dw_mmc.c
··· 526 526 return -ENODEV; 527 527 528 528 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 529 - if (sg_len < 0) 529 + if (sg_len < 0) { 530 + host->dma_ops->stop(host); 530 531 return sg_len; 532 + } 531 533 532 534 host->using_dma = 1; 533 535 ··· 1881 1879 if (!host->dma_ops) 1882 1880 goto no_dma; 1883 1881 1884 - if (host->dma_ops->init) { 1882 + if (host->dma_ops->init && host->dma_ops->start && 1883 + host->dma_ops->stop && host->dma_ops->cleanup) { 1885 1884 if (host->dma_ops->init(host)) { 1886 1885 dev_err(&host->dev, "%s: Unable to initialize " 1887 1886 "DMA Controller.\n", __func__);
+3 -3
drivers/mmc/host/omap_hsmmc.c
··· 249 249 * the pbias cell programming support is still missing when 250 250 * booting with Device tree 251 251 */ 252 - if (of_have_populated_dt() && !vdd) 252 + if (dev->of_node && !vdd) 253 253 return 0; 254 254 255 255 if (mmc_slot(host).before_set_reg) ··· 1549 1549 * can't be allowed when booting with device 1550 1550 * tree. 1551 1551 */ 1552 - (!of_have_populated_dt())) { 1552 + !host->dev->of_node) { 1553 1553 /* 1554 1554 * The mmc_select_voltage fn of the core does 1555 1555 * not seem to set the power_mode to ··· 1741 1741 .data = &omap4_reg_offset, 1742 1742 }, 1743 1743 {}, 1744 - } 1744 + }; 1745 1745 MODULE_DEVICE_TABLE(of, omap_mmc_of_match); 1746 1746 1747 1747 static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
+1 -2
drivers/mmc/host/sdhci-esdhc-imx.c
··· 467 467 clk_prepare_enable(clk); 468 468 pltfm_host->clk = clk; 469 469 470 - if (!is_imx25_esdhc(imx_data)) 471 - host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 470 + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 472 471 473 472 if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) 474 473 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
+1 -1
drivers/mmc/host/sdhci.c
··· 147 147 u32 present, irqs; 148 148 149 149 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 150 - !mmc_card_is_removable(host->mmc)) 150 + (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 151 151 return; 152 152 153 153 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+15 -9
drivers/pci/pci.c
··· 991 991 } 992 992 } 993 993 994 - static void pci_restore_config_space(struct pci_dev *pdev, int start, int end, 995 - int retry) 994 + static void pci_restore_config_space_range(struct pci_dev *pdev, 995 + int start, int end, int retry) 996 996 { 997 997 int index; 998 998 ··· 1000 1000 pci_restore_config_dword(pdev, 4 * index, 1001 1001 pdev->saved_config_space[index], 1002 1002 retry); 1003 + } 1004 + 1005 + static void pci_restore_config_space(struct pci_dev *pdev) 1006 + { 1007 + if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { 1008 + pci_restore_config_space_range(pdev, 10, 15, 0); 1009 + /* Restore BARs before the command register. */ 1010 + pci_restore_config_space_range(pdev, 4, 9, 10); 1011 + pci_restore_config_space_range(pdev, 0, 3, 0); 1012 + } else { 1013 + pci_restore_config_space_range(pdev, 0, 15, 0); 1014 + } 1003 1015 } 1004 1016 1005 1017 /** ··· 1027 1015 pci_restore_pcie_state(dev); 1028 1016 pci_restore_ats_state(dev); 1029 1017 1030 - pci_restore_config_space(dev, 10, 15, 0); 1031 - /* 1032 - * The Base Address register should be programmed before the command 1033 - * register(s) 1034 - */ 1035 - pci_restore_config_space(dev, 4, 9, 10); 1036 - pci_restore_config_space(dev, 0, 3, 0); 1018 + pci_restore_config_space(dev); 1037 1019 1038 1020 pci_restore_pcix_state(dev); 1039 1021 pci_restore_msi_state(dev);
+21 -4
drivers/pinctrl/core.c
··· 908 908 const struct pinctrl_ops *ops = pctldev->desc->pctlops; 909 909 unsigned selector = 0; 910 910 911 - /* No grouping */ 912 - if (!ops) 913 - return 0; 914 - 915 911 mutex_lock(&pinctrl_mutex); 916 912 917 913 seq_puts(s, "registered pin groups:\n"); ··· 1221 1225 1222 1226 #endif 1223 1227 1228 + static int pinctrl_check_ops(struct pinctrl_dev *pctldev) 1229 + { 1230 + const struct pinctrl_ops *ops = pctldev->desc->pctlops; 1231 + 1232 + if (!ops || 1233 + !ops->list_groups || 1234 + !ops->get_group_name || 1235 + !ops->get_group_pins) 1236 + return -EINVAL; 1237 + 1238 + return 0; 1239 + } 1240 + 1224 1241 /** 1225 1242 * pinctrl_register() - register a pin controller device 1226 1243 * @pctldesc: descriptor for this pin controller ··· 1264 1255 INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL); 1265 1256 INIT_LIST_HEAD(&pctldev->gpio_ranges); 1266 1257 pctldev->dev = dev; 1258 + 1259 + /* check core ops for sanity */ 1260 + ret = pinctrl_check_ops(pctldev); 1261 + if (ret) { 1262 + pr_err("%s pinctrl ops lacks necessary functions\n", 1263 + pctldesc->name); 1264 + goto out_err; 1265 + } 1267 1266 1268 1267 /* If we're implementing pinmuxing, check the ops for sanity */ 1269 1268 if (pctldesc->pmxops) {
+16 -8
drivers/s390/block/dasd_eckd.c
··· 2844 2844 sector_t recid, trkid; 2845 2845 unsigned int offs; 2846 2846 unsigned int count, count_to_trk_end; 2847 + int ret; 2847 2848 2848 2849 basedev = block->base; 2849 2850 if (rq_data_dir(req) == READ) { ··· 2885 2884 2886 2885 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 2887 2886 if (IS_ERR(itcw)) { 2888 - dasd_sfree_request(cqr, startdev); 2889 - return ERR_PTR(-EINVAL); 2887 + ret = -EINVAL; 2888 + goto out_error; 2890 2889 } 2891 2890 cqr->cpaddr = itcw_get_tcw(itcw); 2892 2891 if (prepare_itcw(itcw, first_trk, last_trk, ··· 2898 2897 /* Clock not in sync and XRC is enabled. 2899 2898 * Try again later. 2900 2899 */ 2901 - dasd_sfree_request(cqr, startdev); 2902 - return ERR_PTR(-EAGAIN); 2900 + ret = -EAGAIN; 2901 + goto out_error; 2903 2902 } 2904 2903 len_to_track_end = 0; 2905 2904 /* ··· 2938 2937 tidaw_flags = 0; 2939 2938 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, 2940 2939 dst, part_len); 2941 - if (IS_ERR(last_tidaw)) 2942 - return ERR_PTR(-EINVAL); 2940 + if (IS_ERR(last_tidaw)) { 2941 + ret = -EINVAL; 2942 + goto out_error; 2943 + } 2943 2944 dst += part_len; 2944 2945 } 2945 2946 } ··· 2950 2947 dst = page_address(bv->bv_page) + bv->bv_offset; 2951 2948 last_tidaw = itcw_add_tidaw(itcw, 0x00, 2952 2949 dst, bv->bv_len); 2953 - if (IS_ERR(last_tidaw)) 2954 - return ERR_PTR(-EINVAL); 2950 + if (IS_ERR(last_tidaw)) { 2951 + ret = -EINVAL; 2952 + goto out_error; 2953 + } 2955 2954 } 2956 2955 } 2957 2956 last_tidaw->flags |= TIDAW_FLAGS_LAST; ··· 2973 2968 cqr->buildclk = get_clock(); 2974 2969 cqr->status = DASD_CQR_FILLED; 2975 2970 return cqr; 2971 + out_error: 2972 + dasd_sfree_request(cqr, startdev); 2973 + return ERR_PTR(ret); 2976 2974 } 2977 2975 2978 2976 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
+1 -1
drivers/s390/char/vmur.c
··· 903 903 goto fail_urdev_put; 904 904 } 905 905 906 - cdev_init(urd->char_device, &ur_fops); 906 + urd->char_device->ops = &ur_fops; 907 907 urd->char_device->dev = MKDEV(major, minor); 908 908 urd->char_device->owner = ur_fops.owner; 909 909
+3 -1
drivers/tty/amiserial.c
··· 1073 1073 (new_serial.close_delay != port->close_delay) || 1074 1074 (new_serial.xmit_fifo_size != state->xmit_fifo_size) || 1075 1075 ((new_serial.flags & ~ASYNC_USR_MASK) != 1076 - (port->flags & ~ASYNC_USR_MASK))) 1076 + (port->flags & ~ASYNC_USR_MASK))) { 1077 + tty_unlock(); 1077 1078 return -EPERM; 1079 + } 1078 1080 port->flags = ((port->flags & ~ASYNC_USR_MASK) | 1079 1081 (new_serial.flags & ASYNC_USR_MASK)); 1080 1082 state->custom_divisor = new_serial.custom_divisor;
+8 -6
drivers/tty/serial/clps711x.c
··· 154 154 port->x_char = 0; 155 155 return IRQ_HANDLED; 156 156 } 157 - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 158 - clps711xuart_stop_tx(port); 159 - return IRQ_HANDLED; 160 - } 157 + 158 + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 159 + goto disable_tx_irq; 161 160 162 161 count = port->fifosize >> 1; 163 162 do { ··· 170 171 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 171 172 uart_write_wakeup(port); 172 173 173 - if (uart_circ_empty(xmit)) 174 - clps711xuart_stop_tx(port); 174 + if (uart_circ_empty(xmit)) { 175 + disable_tx_irq: 176 + disable_irq_nosync(TX_IRQ(port)); 177 + tx_enabled(port) = 0; 178 + } 175 179 176 180 return IRQ_HANDLED; 177 181 }
+3 -1
drivers/tty/serial/pch_uart.c
··· 1447 1447 __func__); 1448 1448 return -EOPNOTSUPP; 1449 1449 #endif 1450 - priv->use_dma = 1; 1451 1450 priv->use_dma_flag = 1; 1452 1451 dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n"); 1452 + if (!priv->use_dma) 1453 + pch_request_dma(port); 1454 + priv->use_dma = 1; 1453 1455 } 1454 1456 1455 1457 return 0;
-3
drivers/usb/core/hub.c
··· 1667 1667 { 1668 1668 struct usb_device *udev = *pdev; 1669 1669 int i; 1670 - struct usb_hcd *hcd = bus_to_hcd(udev->bus); 1671 1670 1672 1671 /* mark the device as inactive, so any further urb submissions for 1673 1672 * this device (and any of its children) will fail immediately. ··· 1689 1690 * so that the hardware is now fully quiesced. 1690 1691 */ 1691 1692 dev_dbg (&udev->dev, "unregistering device\n"); 1692 - mutex_lock(hcd->bandwidth_mutex); 1693 1693 usb_disable_device(udev, 0); 1694 - mutex_unlock(hcd->bandwidth_mutex); 1695 1694 usb_hcd_synchronize_unlinks(udev); 1696 1695 1697 1696 usb_remove_ep_devs(&udev->ep0);
+3 -3
drivers/usb/core/message.c
··· 1136 1136 * Deallocates hcd/hardware state for the endpoints (nuking all or most 1137 1137 * pending urbs) and usbcore state for the interfaces, so that usbcore 1138 1138 * must usb_set_configuration() before any interfaces could be used. 1139 - * 1140 - * Must be called with hcd->bandwidth_mutex held. 1141 1139 */ 1142 1140 void usb_disable_device(struct usb_device *dev, int skip_ep0) 1143 1141 { ··· 1188 1190 usb_disable_endpoint(dev, i + USB_DIR_IN, false); 1189 1191 } 1190 1192 /* Remove endpoints from the host controller internal state */ 1193 + mutex_lock(hcd->bandwidth_mutex); 1191 1194 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); 1195 + mutex_unlock(hcd->bandwidth_mutex); 1192 1196 /* Second pass: remove endpoint pointers */ 1193 1197 } 1194 1198 for (i = skip_ep0; i < 16; ++i) { ··· 1750 1750 /* if it's already configured, clear out old state first. 1751 1751 * getting rid of old interfaces means unbinding their drivers. 1752 1752 */ 1753 - mutex_lock(hcd->bandwidth_mutex); 1754 1753 if (dev->state != USB_STATE_ADDRESS) 1755 1754 usb_disable_device(dev, 1); /* Skip ep0 */ 1756 1755 ··· 1762 1763 * host controller will not allow submissions to dropped endpoints. If 1763 1764 * this call fails, the device state is unchanged. 1764 1765 */ 1766 + mutex_lock(hcd->bandwidth_mutex); 1765 1767 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); 1766 1768 if (ret < 0) { 1767 1769 mutex_unlock(hcd->bandwidth_mutex);
+3 -3
drivers/usb/dwc3/core.c
··· 206 206 207 207 for (i = 0; i < dwc->num_event_buffers; i++) { 208 208 evt = dwc->ev_buffs[i]; 209 - if (evt) { 209 + if (evt) 210 210 dwc3_free_one_event_buffer(dwc, evt); 211 - dwc->ev_buffs[i] = NULL; 212 - } 213 211 } 212 + 213 + kfree(dwc->ev_buffs); 214 214 } 215 215 216 216 /**
+10 -2
drivers/usb/dwc3/ep0.c
··· 353 353 354 354 dwc->test_mode_nr = wIndex >> 8; 355 355 dwc->test_mode = true; 356 + break; 357 + default: 358 + return -EINVAL; 356 359 } 357 360 break; 358 361 ··· 562 559 length = trb->size & DWC3_TRB_SIZE_MASK; 563 560 564 561 if (dwc->ep0_bounced) { 562 + unsigned transfer_size = ur->length; 563 + unsigned maxp = ep0->endpoint.maxpacket; 564 + 565 + transfer_size += (maxp - (transfer_size % maxp)); 565 566 transferred = min_t(u32, ur->length, 566 - ep0->endpoint.maxpacket - length); 567 + transfer_size - length); 567 568 memcpy(ur->buf, dwc->ep0_bounce, transferred); 568 569 dwc->ep0_bounced = false; 569 570 } else { 570 571 transferred = ur->length - length; 571 - ur->actual += transferred; 572 572 } 573 + 574 + ur->actual += transferred; 573 575 574 576 if ((epnum & 1) && ur->actual < ur->length) { 575 577 /* for some reason we did not get everything out */
+4 -4
drivers/usb/gadget/at91_udc.c
··· 1863 1863 mod_timer(&udc->vbus_timer, 1864 1864 jiffies + VBUS_POLL_TIMEOUT); 1865 1865 } else { 1866 - if (request_irq(udc->board.vbus_pin, at91_vbus_irq, 1867 - 0, driver_name, udc)) { 1866 + if (request_irq(gpio_to_irq(udc->board.vbus_pin), 1867 + at91_vbus_irq, 0, driver_name, udc)) { 1868 1868 DBG("request vbus irq %d failed\n", 1869 1869 udc->board.vbus_pin); 1870 1870 retval = -EBUSY; ··· 1886 1886 return 0; 1887 1887 fail4: 1888 1888 if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled) 1889 - free_irq(udc->board.vbus_pin, udc); 1889 + free_irq(gpio_to_irq(udc->board.vbus_pin), udc); 1890 1890 fail3: 1891 1891 if (gpio_is_valid(udc->board.vbus_pin)) 1892 1892 gpio_free(udc->board.vbus_pin); ··· 1924 1924 device_init_wakeup(&pdev->dev, 0); 1925 1925 remove_debug_file(udc); 1926 1926 if (gpio_is_valid(udc->board.vbus_pin)) { 1927 - free_irq(udc->board.vbus_pin, udc); 1927 + free_irq(gpio_to_irq(udc->board.vbus_pin), udc); 1928 1928 gpio_free(udc->board.vbus_pin); 1929 1929 } 1930 1930 free_irq(udc->udp_irq, udc);
+2 -1
drivers/usb/gadget/f_fs.c
··· 712 712 if (code == FUNCTIONFS_INTERFACE_REVMAP) { 713 713 struct ffs_function *func = ffs->func; 714 714 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV; 715 - } else if (gadget->ops->ioctl) { 715 + } else if (gadget && gadget->ops->ioctl) { 716 716 ret = gadget->ops->ioctl(gadget, code, value); 717 717 } else { 718 718 ret = -ENOTTY; ··· 1382 1382 ffs->ep0req = NULL; 1383 1383 ffs->gadget = NULL; 1384 1384 ffs_data_put(ffs); 1385 + clear_bit(FFS_FL_BOUND, &ffs->flags); 1385 1386 } 1386 1387 } 1387 1388
+1
drivers/usb/gadget/f_rndis.c
··· 500 500 if (buf) { 501 501 memcpy(req->buf, buf, n); 502 502 req->complete = rndis_response_complete; 503 + req->context = rndis; 503 504 rndis_free_response(rndis->config, buf); 504 505 value = n; 505 506 }
+16 -9
drivers/usb/gadget/fsl_udc_core.c
··· 730 730 : (1 << (ep_index(ep))); 731 731 732 732 /* check if the pipe is empty */ 733 - if (!(list_empty(&ep->queue))) { 733 + if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) { 734 734 /* Add td to the end */ 735 735 struct fsl_req *lastreq; 736 736 lastreq = list_entry(ep->queue.prev, struct fsl_req, queue); ··· 917 917 } else { 918 918 return -ENOMEM; 919 919 } 920 - 921 - /* Update ep0 state */ 922 - if ((ep_index(ep) == 0)) 923 - udc->ep0_state = DATA_STATE_XMIT; 924 920 925 921 /* irq handler advances the queue */ 926 922 if (req != NULL) ··· 1275 1279 udc->ep0_dir = USB_DIR_OUT; 1276 1280 1277 1281 ep = &udc->eps[0]; 1278 - udc->ep0_state = WAIT_FOR_OUT_STATUS; 1282 + if (udc->ep0_state != DATA_STATE_XMIT) 1283 + udc->ep0_state = WAIT_FOR_OUT_STATUS; 1279 1284 1280 1285 req->ep = ep; 1281 1286 req->req.length = 0; ··· 1381 1384 1382 1385 list_add_tail(&req->queue, &ep->queue); 1383 1386 udc->ep0_state = DATA_STATE_XMIT; 1387 + if (ep0_prime_status(udc, EP_DIR_OUT)) 1388 + ep0stall(udc); 1389 + 1384 1390 return; 1385 1391 stall: 1386 1392 ep0stall(udc); ··· 1492 1492 spin_lock(&udc->lock); 1493 1493 udc->ep0_state = (setup->bRequestType & USB_DIR_IN) 1494 1494 ? DATA_STATE_XMIT : DATA_STATE_RECV; 1495 + /* 1496 + * If the data stage is IN, send status prime immediately. 1497 + * See 2.0 Spec chapter 8.5.3.3 for detail. 1498 + */ 1499 + if (udc->ep0_state == DATA_STATE_XMIT) 1500 + if (ep0_prime_status(udc, EP_DIR_OUT)) 1501 + ep0stall(udc); 1502 + 1495 1503 } else { 1496 1504 /* No data phase, IN status from gadget */ 1497 1505 udc->ep0_dir = USB_DIR_IN; ··· 1528 1520 1529 1521 switch (udc->ep0_state) { 1530 1522 case DATA_STATE_XMIT: 1531 - /* receive status phase */ 1532 - if (ep0_prime_status(udc, EP_DIR_OUT)) 1533 - ep0stall(udc); 1523 + /* already primed at setup_received_irq */ 1524 + udc->ep0_state = WAIT_FOR_OUT_STATUS; 1534 1525 break; 1535 1526 case DATA_STATE_RECV: 1536 1527 /* send status phase */
+2 -2
drivers/usb/gadget/g_ffs.c
··· 161 161 static struct ffs_data *gfs_ffs_data; 162 162 static unsigned long gfs_registered; 163 163 164 - static int gfs_init(void) 164 + static int __init gfs_init(void) 165 165 { 166 166 ENTER(); 167 167 ··· 169 169 } 170 170 module_init(gfs_init); 171 171 172 - static void gfs_exit(void) 172 + static void __exit gfs_exit(void) 173 173 { 174 174 ENTER(); 175 175
+10 -7
drivers/usb/gadget/s3c-hsotg.c
··· 340 340 /* currently we allocate TX FIFOs for all possible endpoints, 341 341 * and assume that they are all the same size. */ 342 342 343 - for (ep = 0; ep <= 15; ep++) { 343 + for (ep = 1; ep <= 15; ep++) { 344 344 val = addr; 345 345 val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT; 346 346 addr += size; ··· 741 741 /* write size / packets */ 742 742 writel(epsize, hsotg->regs + epsize_reg); 743 743 744 - if (using_dma(hsotg)) { 744 + if (using_dma(hsotg) && !continuing) { 745 745 unsigned int dma_reg; 746 746 747 747 /* write DMA address to control register, buffer already ··· 1696 1696 reg |= mpsval; 1697 1697 writel(reg, regs + S3C_DIEPCTL(ep)); 1698 1698 1699 - reg = readl(regs + S3C_DOEPCTL(ep)); 1700 - reg &= ~S3C_DxEPCTL_MPS_MASK; 1701 - reg |= mpsval; 1702 - writel(reg, regs + S3C_DOEPCTL(ep)); 1699 + if (ep) { 1700 + reg = readl(regs + S3C_DOEPCTL(ep)); 1701 + reg &= ~S3C_DxEPCTL_MPS_MASK; 1702 + reg |= mpsval; 1703 + writel(reg, regs + S3C_DOEPCTL(ep)); 1704 + } 1703 1705 1704 1706 return; 1705 1707 ··· 1921 1919 ints & S3C_DIEPMSK_TxFIFOEmpty) { 1922 1920 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", 1923 1921 __func__, idx); 1924 - s3c_hsotg_trytx(hsotg, hs_ep); 1922 + if (!using_dma(hsotg)) 1923 + s3c_hsotg_trytx(hsotg, hs_ep); 1925 1924 } 1926 1925 } 1927 1926 }
+5 -1
drivers/usb/gadget/udc-core.c
··· 264 264 if (udc_is_newstyle(udc)) { 265 265 udc->driver->disconnect(udc->gadget); 266 266 udc->driver->unbind(udc->gadget); 267 - usb_gadget_udc_stop(udc->gadget, udc->driver); 268 267 usb_gadget_disconnect(udc->gadget); 268 + usb_gadget_udc_stop(udc->gadget, udc->driver); 269 269 } else { 270 270 usb_gadget_stop(udc->gadget, udc->driver); 271 271 } ··· 411 411 struct usb_udc *udc = container_of(dev, struct usb_udc, dev); 412 412 413 413 if (sysfs_streq(buf, "connect")) { 414 + if (udc_is_newstyle(udc)) 415 + usb_gadget_udc_start(udc->gadget, udc->driver); 414 416 usb_gadget_connect(udc->gadget); 415 417 } else if (sysfs_streq(buf, "disconnect")) { 418 + if (udc_is_newstyle(udc)) 419 + usb_gadget_udc_stop(udc->gadget, udc->driver); 416 420 usb_gadget_disconnect(udc->gadget); 417 421 } else { 418 422 dev_err(dev, "unsupported command '%s'\n", buf);
+1 -3
drivers/usb/gadget/uvc_queue.c
··· 543 543 return ret; 544 544 } 545 545 546 + /* called with queue->irqlock held.. */ 546 547 static struct uvc_buffer * 547 548 uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) 548 549 { 549 550 struct uvc_buffer *nextbuf; 550 - unsigned long flags; 551 551 552 552 if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && 553 553 buf->buf.length != buf->buf.bytesused) { ··· 556 556 return buf; 557 557 } 558 558 559 - spin_lock_irqsave(&queue->irqlock, flags); 560 559 list_del(&buf->queue); 561 560 if (!list_empty(&queue->irqqueue)) 562 561 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 563 562 queue); 564 563 else 565 564 nextbuf = NULL; 566 - spin_unlock_irqrestore(&queue->irqlock, flags); 567 565 568 566 buf->buf.sequence = queue->sequence++; 569 567 do_gettimeofday(&buf->buf.timestamp);
+6 -1
drivers/usb/host/ehci-fsl.c
··· 218 218 u32 portsc; 219 219 struct usb_hcd *hcd = ehci_to_hcd(ehci); 220 220 void __iomem *non_ehci = hcd->regs; 221 + struct fsl_usb2_platform_data *pdata; 222 + 223 + pdata = hcd->self.controller->platform_data; 221 224 222 225 portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]); 223 226 portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW); ··· 237 234 /* fall through */ 238 235 case FSL_USB2_PHY_UTMI: 239 236 /* enable UTMI PHY */ 240 - setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN); 237 + if (pdata->have_sysif_regs) 238 + setbits32(non_ehci + FSL_SOC_USB_CTRL, 239 + CTRL_UTMI_PHY_EN); 241 240 portsc |= PORT_PTS_UTMI; 242 241 break; 243 242 case FSL_USB2_PHY_NONE:
+7 -2
drivers/usb/host/ehci-hcd.c
··· 858 858 goto dead; 859 859 } 860 860 861 + /* 862 + * We don't use STS_FLR, but some controllers don't like it to 863 + * remain on, so mask it out along with the other status bits. 864 + */ 865 + masked_status = status & (INTR_MASK | STS_FLR); 866 + 861 867 /* Shared IRQ? */ 862 - masked_status = status & INTR_MASK; 863 868 if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { 864 869 spin_unlock(&ehci->lock); 865 870 return IRQ_NONE; ··· 915 910 pcd_status = status; 916 911 917 912 /* resume root hub? */ 918 - if (!(cmd & CMD_RUN)) 913 + if (ehci->rh_state == EHCI_RH_SUSPENDED) 919 914 usb_hcd_resume_root_hub(hcd); 920 915 921 916 /* get per-port change detect bits */
+37 -2
drivers/usb/host/ehci-omap.c
··· 42 42 #include <plat/usb.h> 43 43 #include <linux/regulator/consumer.h> 44 44 #include <linux/pm_runtime.h> 45 + #include <linux/gpio.h> 45 46 46 47 /* EHCI Register Set */ 47 48 #define EHCI_INSNREG04 (0xA0) ··· 192 191 } 193 192 } 194 193 194 + if (pdata->phy_reset) { 195 + if (gpio_is_valid(pdata->reset_gpio_port[0])) 196 + gpio_request_one(pdata->reset_gpio_port[0], 197 + GPIOF_OUT_INIT_LOW, "USB1 PHY reset"); 198 + 199 + if (gpio_is_valid(pdata->reset_gpio_port[1])) 200 + gpio_request_one(pdata->reset_gpio_port[1], 201 + GPIOF_OUT_INIT_LOW, "USB2 PHY reset"); 202 + 203 + /* Hold the PHY in RESET for enough time till DIR is high */ 204 + udelay(10); 205 + } 206 + 195 207 pm_runtime_enable(dev); 196 208 pm_runtime_get_sync(dev); 197 209 ··· 251 237 /* root ports should always stay powered */ 252 238 ehci_port_power(omap_ehci, 1); 253 239 240 + if (pdata->phy_reset) { 241 + /* Hold the PHY in RESET for enough time till 242 + * PHY is settled and ready 243 + */ 244 + udelay(10); 245 + 246 + if (gpio_is_valid(pdata->reset_gpio_port[0])) 247 + gpio_set_value(pdata->reset_gpio_port[0], 1); 248 + 249 + if (gpio_is_valid(pdata->reset_gpio_port[1])) 250 + gpio_set_value(pdata->reset_gpio_port[1], 1); 251 + } 252 + 254 253 return 0; 255 254 256 255 err_add_hcd: ··· 286 259 */ 287 260 static int ehci_hcd_omap_remove(struct platform_device *pdev) 288 261 { 289 - struct device *dev = &pdev->dev; 290 - struct usb_hcd *hcd = dev_get_drvdata(dev); 262 + struct device *dev = &pdev->dev; 263 + struct usb_hcd *hcd = dev_get_drvdata(dev); 264 + struct ehci_hcd_omap_platform_data *pdata = dev->platform_data; 291 265 292 266 usb_remove_hcd(hcd); 293 267 disable_put_regulator(dev->platform_data); ··· 297 269 pm_runtime_put_sync(dev); 298 270 pm_runtime_disable(dev); 299 271 272 + if (pdata->phy_reset) { 273 + if (gpio_is_valid(pdata->reset_gpio_port[0])) 274 + gpio_free(pdata->reset_gpio_port[0]); 275 + 276 + if (gpio_is_valid(pdata->reset_gpio_port[1])) 277 + gpio_free(pdata->reset_gpio_port[1]); 278 + } 300 279 return 0; 301 280 } 302 281
-1
drivers/usb/host/ehci-tegra.c
··· 731 731 err = -ENODEV; 732 732 goto fail; 733 733 } 734 - set_irq_flags(irq, IRQF_VALID); 735 734 736 735 #ifdef CONFIG_USB_OTG_UTILS 737 736 if (pdata->operating_mode == TEGRA_USB_OTG) {
+6 -6
drivers/usb/host/ohci-at91.c
··· 94 94 95 95 /*-------------------------------------------------------------------------*/ 96 96 97 - static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); 97 + static void __devexit usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); 98 98 99 99 /* configure so an HC device and id are always provided */ 100 100 /* always called with process context; sleeping is OK */ ··· 108 108 * then invokes the start() method for the HCD associated with it 109 109 * through the hotplug entry's driver_data. 110 110 */ 111 - static int usb_hcd_at91_probe(const struct hc_driver *driver, 111 + static int __devinit usb_hcd_at91_probe(const struct hc_driver *driver, 112 112 struct platform_device *pdev) 113 113 { 114 114 int retval; ··· 203 203 * context, "rmmod" or something similar. 204 204 * 205 205 */ 206 - static void usb_hcd_at91_remove(struct usb_hcd *hcd, 206 + static void __devexit usb_hcd_at91_remove(struct usb_hcd *hcd, 207 207 struct platform_device *pdev) 208 208 { 209 209 usb_remove_hcd(hcd); ··· 545 545 546 546 /*-------------------------------------------------------------------------*/ 547 547 548 - static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) 548 + static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev) 549 549 { 550 550 struct at91_usbh_data *pdata; 551 551 int i; ··· 620 620 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); 621 621 } 622 622 623 - static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) 623 + static int __devexit ohci_hcd_at91_drv_remove(struct platform_device *pdev) 624 624 { 625 625 struct at91_usbh_data *pdata = pdev->dev.platform_data; 626 626 int i; ··· 696 696 697 697 static struct platform_driver ohci_hcd_at91_driver = { 698 698 .probe = ohci_hcd_at91_drv_probe, 699 - .remove = ohci_hcd_at91_drv_remove, 699 + .remove = __devexit_p(ohci_hcd_at91_drv_remove), 700 700 .shutdown = usb_hcd_platform_shutdown, 701 701 .suspend = ohci_hcd_at91_drv_suspend, 702 702 .resume = ohci_hcd_at91_drv_resume,
+6 -3
drivers/usb/misc/usbtest.c
··· 423 423 unsigned i; 424 424 unsigned size = max; 425 425 426 - sg = kmalloc(nents * sizeof *sg, GFP_KERNEL); 426 + sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL); 427 427 if (!sg) 428 428 return NULL; 429 429 sg_init_table(sg, nents); ··· 903 903 struct urb **urb; 904 904 struct ctrl_ctx context; 905 905 int i; 906 + 907 + if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen) 908 + return -EOPNOTSUPP; 906 909 907 910 spin_lock_init(&context.lock); 908 911 context.dev = dev; ··· 1984 1981 1985 1982 /* queued control messaging */ 1986 1983 case 10: 1987 - if (param->sglen == 0) 1988 - break; 1989 1984 retval = 0; 1990 1985 dev_info(&intf->dev, 1991 1986 "TEST 10: queue %d control calls, %d times\n", ··· 2277 2276 if (status < 0) { 2278 2277 WARNING(dev, "couldn't get endpoints, %d\n", 2279 2278 status); 2279 + kfree(dev->buf); 2280 + kfree(dev); 2280 2281 return status; 2281 2282 } 2282 2283 /* may find bulk or ISO pipes */
+3 -7
drivers/usb/misc/yurex.c
··· 99 99 usb_put_dev(dev->udev); 100 100 if (dev->cntl_urb) { 101 101 usb_kill_urb(dev->cntl_urb); 102 - if (dev->cntl_req) 103 - usb_free_coherent(dev->udev, YUREX_BUF_SIZE, 104 - dev->cntl_req, dev->cntl_urb->setup_dma); 102 + kfree(dev->cntl_req); 105 103 if (dev->cntl_buffer) 106 104 usb_free_coherent(dev->udev, YUREX_BUF_SIZE, 107 105 dev->cntl_buffer, dev->cntl_urb->transfer_dma); ··· 232 234 } 233 235 234 236 /* allocate buffer for control req */ 235 - dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, 236 - GFP_KERNEL, 237 - &dev->cntl_urb->setup_dma); 237 + dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL); 238 238 if (!dev->cntl_req) { 239 239 err("Could not allocate cntl_req"); 240 240 goto error; ··· 282 286 usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr), 283 287 dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt, 284 288 dev, 1); 285 - dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 289 + dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 286 290 if (usb_submit_urb(dev->urb, GFP_KERNEL)) { 287 291 retval = -EIO; 288 292 err("Could not submitting URB");
+33 -7
drivers/usb/musb/musb_core.c
··· 137 137 int i = 0; 138 138 u8 r; 139 139 u8 power; 140 + int ret; 141 + 142 + pm_runtime_get_sync(phy->io_dev); 140 143 141 144 /* Make sure the transceiver is not in low power mode */ 142 145 power = musb_readb(addr, MUSB_POWER); ··· 157 154 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 158 155 & MUSB_ULPI_REG_CMPLT)) { 159 156 i++; 160 - if (i == 10000) 161 - return -ETIMEDOUT; 157 + if (i == 10000) { 158 + ret = -ETIMEDOUT; 159 + goto out; 160 + } 162 161 163 162 } 164 163 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); 165 164 r &= ~MUSB_ULPI_REG_CMPLT; 166 165 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); 167 166 168 - return musb_readb(addr, MUSB_ULPI_REG_DATA); 167 + ret = musb_readb(addr, MUSB_ULPI_REG_DATA); 168 + 169 + out: 170 + pm_runtime_put(phy->io_dev); 171 + 172 + return ret; 169 173 } 170 174 171 175 static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) ··· 181 171 int i = 0; 182 172 u8 r = 0; 183 173 u8 power; 174 + int ret = 0; 175 + 176 + pm_runtime_get_sync(phy->io_dev); 184 177 185 178 /* Make sure the transceiver is not in low power mode */ 186 179 power = musb_readb(addr, MUSB_POWER); ··· 197 184 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 198 185 & MUSB_ULPI_REG_CMPLT)) { 199 186 i++; 200 - if (i == 10000) 201 - return -ETIMEDOUT; 187 + if (i == 10000) { 188 + ret = -ETIMEDOUT; 189 + goto out; 190 + } 202 191 } 203 192 204 193 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); 205 194 r &= ~MUSB_ULPI_REG_CMPLT; 206 195 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); 207 196 208 - return 0; 197 + out: 198 + pm_runtime_put(phy->io_dev); 199 + 200 + return ret; 209 201 } 210 202 #else 211 203 #define musb_ulpi_read NULL ··· 1922 1904 1923 1905 if (!musb->isr) { 1924 1906 status = -ENODEV; 1925 - goto fail3; 1907 + goto fail2; 1926 1908 } 1927 1909 1928 1910 if (!musb->xceiv->io_ops) { 1911 + musb->xceiv->io_dev = musb->controller; 1929 1912 musb->xceiv->io_priv = musb->mregs; 1930 1913 musb->xceiv->io_ops = &musb_ulpi_access; 1931 1914 } 1915 + 1916 + pm_runtime_get_sync(musb->controller); 1932 1917 1933 1918 #ifndef CONFIG_MUSB_PIO_ONLY 1934 1919 if (use_dma && dev->dma_mask) { ··· 2044 2023 goto fail5; 2045 2024 #endif 2046 2025 2026 + pm_runtime_put(musb->controller); 2027 + 2047 2028 dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n", 2048 2029 ({char *s; 2049 2030 switch (musb->board_mode) { ··· 2070 2047 musb_gadget_cleanup(musb); 2071 2048 2072 2049 fail3: 2050 + pm_runtime_put_sync(musb->controller); 2051 + 2052 + fail2: 2073 2053 if (musb->irq_wake) 2074 2054 device_init_wakeup(dev, 0); 2075 2055 musb_platform_exit(musb);
+1 -1
drivers/usb/musb/musb_host.c
··· 2098 2098 } 2099 2099 2100 2100 /* turn off DMA requests, discard state, stop polling ... */ 2101 - if (is_in) { 2101 + if (ep->epnum && is_in) { 2102 2102 /* giveback saves bulk toggle */ 2103 2103 csr = musb_h_flush_rxfifo(ep, 0); 2104 2104
+18 -13
drivers/usb/musb/omap2430.c
··· 282 282 283 283 static int omap2430_musb_init(struct musb *musb) 284 284 { 285 - u32 l, status = 0; 285 + u32 l; 286 + int status = 0; 286 287 struct device *dev = musb->controller; 287 288 struct musb_hdrc_platform_data *plat = dev->platform_data; 288 289 struct omap_musb_board_data *data = plat->board_data; ··· 302 301 303 302 status = pm_runtime_get_sync(dev); 304 303 if (status < 0) { 305 - dev_err(dev, "pm_runtime_get_sync FAILED"); 304 + dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); 306 305 goto err1; 307 306 } 308 307 ··· 334 333 335 334 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); 336 335 336 + pm_runtime_put_noidle(musb->controller); 337 337 return 0; 338 338 339 339 err1: ··· 454 452 goto err2; 455 453 } 456 454 455 + pm_runtime_enable(&pdev->dev); 456 + 457 457 ret = platform_device_add(musb); 458 458 if (ret) { 459 459 dev_err(&pdev->dev, "failed to register musb device\n"); 460 460 goto err2; 461 461 } 462 - 463 - pm_runtime_enable(&pdev->dev); 464 462 465 463 return 0; 466 464 ··· 480 478 481 479 platform_device_del(glue->musb); 482 480 platform_device_put(glue->musb); 483 - pm_runtime_put(&pdev->dev); 484 481 kfree(glue); 485 482 486 483 return 0; ··· 492 491 struct omap2430_glue *glue = dev_get_drvdata(dev); 493 492 struct musb *musb = glue_to_musb(glue); 494 493 495 - musb->context.otg_interfsel = musb_readl(musb->mregs, 496 - OTG_INTERFSEL); 494 + if (musb) { 495 + musb->context.otg_interfsel = musb_readl(musb->mregs, 496 + OTG_INTERFSEL); 497 497 498 - omap2430_low_level_exit(musb); 499 - usb_phy_set_suspend(musb->xceiv, 1); 498 + omap2430_low_level_exit(musb); 499 + usb_phy_set_suspend(musb->xceiv, 1); 500 + } 500 501 501 502 return 0; 502 503 } ··· 508 505 struct omap2430_glue *glue = dev_get_drvdata(dev); 509 506 struct musb *musb = glue_to_musb(glue); 510 507 511 - omap2430_low_level_init(musb); 512 - musb_writel(musb->mregs, OTG_INTERFSEL, 513 - musb->context.otg_interfsel); 508 + if (musb) { 509 + omap2430_low_level_init(musb); 510 + musb_writel(musb->mregs, OTG_INTERFSEL, 511 + musb->context.otg_interfsel); 514 512 515 - usb_phy_set_suspend(musb->xceiv, 0); 513 + usb_phy_set_suspend(musb->xceiv, 0); 514 + } 516 515 517 516 return 0; 518 517 }
+6 -3
drivers/usb/serial/cp210x.c
··· 287 287 /* Issue the request, attempting to read 'size' bytes */ 288 288 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 289 289 request, REQTYPE_DEVICE_TO_HOST, 0x0000, 290 - port_priv->bInterfaceNumber, buf, size, 300); 290 + port_priv->bInterfaceNumber, buf, size, 291 + USB_CTRL_GET_TIMEOUT); 291 292 292 293 /* Convert data into an array of integers */ 293 294 for (i = 0; i < length; i++) ··· 341 340 result = usb_control_msg(serial->dev, 342 341 usb_sndctrlpipe(serial->dev, 0), 343 342 request, REQTYPE_HOST_TO_DEVICE, 0x0000, 344 - port_priv->bInterfaceNumber, buf, size, 300); 343 + port_priv->bInterfaceNumber, buf, size, 344 + USB_CTRL_SET_TIMEOUT); 345 345 } else { 346 346 result = usb_control_msg(serial->dev, 347 347 usb_sndctrlpipe(serial->dev, 0), 348 348 request, REQTYPE_HOST_TO_DEVICE, data[0], 349 - port_priv->bInterfaceNumber, NULL, 0, 300); 349 + port_priv->bInterfaceNumber, NULL, 0, 350 + USB_CTRL_SET_TIMEOUT); 350 351 } 351 352 352 353 kfree(buf);
+4 -2
drivers/usb/serial/sierra.c
··· 221 221 }; 222 222 223 223 /* 'blacklist' of interfaces not served by this driver */ 224 - static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 }; 224 + static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 }; 225 225 static const struct sierra_iface_info direct_ip_interface_blacklist = { 226 226 .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces), 227 227 .ifaceinfo = direct_ip_non_serial_ifaces, ··· 289 289 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ 290 290 { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ 291 291 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ 292 - { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */ 293 292 /* Sierra Wireless C885 */ 294 293 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, 295 294 /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */ ··· 298 299 /* Sierra Wireless HSPA Non-Composite Device */ 299 300 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, 300 301 { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ 302 + { USB_DEVICE(0x1199, 0x68A2), /* Sierra Wireless MC77xx in QMI mode */ 303 + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 304 + }, 301 305 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 302 306 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 303 307 },
+2 -1
drivers/uwb/hwa-rc.c
··· 645 645 dev_err(dev, "NEEP: URB error %d\n", urb->status); 646 646 } 647 647 result = usb_submit_urb(urb, GFP_ATOMIC); 648 - if (result < 0) { 648 + if (result < 0 && result != -ENODEV && result != -EPERM) { 649 + /* ignoring unrecoverable errors */ 649 650 dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", 650 651 result); 651 652 goto error;
+11 -1
drivers/uwb/neh.c
··· 107 107 u8 evt_type; 108 108 __le16 evt; 109 109 u8 context; 110 + u8 completed; 110 111 uwb_rc_cmd_cb_f cb; 111 112 void *arg; 112 113 ··· 410 409 struct device *dev = &rc->uwb_dev.dev; 411 410 struct uwb_rc_neh *neh; 412 411 struct uwb_rceb *notif; 412 + unsigned long flags; 413 413 414 414 if (rceb->bEventContext == 0) { 415 415 notif = kmalloc(size, GFP_ATOMIC); ··· 424 422 } else { 425 423 neh = uwb_rc_neh_lookup(rc, rceb); 426 424 if (neh) { 427 - del_timer_sync(&neh->timer); 425 + spin_lock_irqsave(&rc->neh_lock, flags); 426 + /* to guard against a timeout */ 427 + neh->completed = 1; 428 + del_timer(&neh->timer); 429 + spin_unlock_irqrestore(&rc->neh_lock, flags); 428 430 uwb_rc_neh_cb(neh, rceb, size); 429 431 } else 430 432 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", ··· 574 568 unsigned long flags; 575 569 576 570 spin_lock_irqsave(&rc->neh_lock, flags); 571 + if (neh->completed) { 572 + spin_unlock_irqrestore(&rc->neh_lock, flags); 573 + return; 574 + } 577 575 if (neh->context) 578 576 __uwb_rc_neh_rm(rc, neh); 579 577 else
+1 -1
drivers/vhost/test.c
··· 155 155 156 156 vhost_test_stop(n, &private); 157 157 vhost_test_flush(n); 158 - vhost_dev_cleanup(&n->dev); 158 + vhost_dev_cleanup(&n->dev, false); 159 159 /* We do an extra flush before freeing memory, 160 160 * since jobs can re-queue themselves. */ 161 161 vhost_test_flush(n);
+46 -12
drivers/virtio/virtio_balloon.c
··· 28 28 #include <linux/slab.h> 29 29 #include <linux/module.h> 30 30 31 + /* 32 + * Balloon device works in 4K page units. So each page is pointed to by 33 + * multiple balloon pages. All memory counters in this driver are in balloon 34 + * page units. 35 + */ 36 + #define VIRTIO_BALLOON_PAGES_PER_PAGE (PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) 37 + 31 38 struct virtio_balloon 32 39 { 33 40 struct virtio_device *vdev; ··· 49 42 /* Waiting for host to ack the pages we released. */ 50 43 struct completion acked; 51 44 52 - /* The pages we've told the Host we're not using. */ 45 + /* Number of balloon pages we've told the Host we're not using. */ 53 46 unsigned int num_pages; 47 + /* 48 + * The pages we've told the Host we're not using. 49 + * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE 50 + * to num_pages above. 51 + */ 54 52 struct list_head pages; 55 53 56 54 /* The array of pfns we tell the Host about. */ ··· 78 66 79 67 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT); 80 68 /* Convert pfn from Linux page size to balloon page size. */ 81 - return pfn >> (PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT); 69 + return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; 70 + } 71 + 72 + static struct page *balloon_pfn_to_page(u32 pfn) 73 + { 74 + BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE); 75 + return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE); 82 76 } 83 77 84 78 static void balloon_ack(struct virtqueue *vq) ··· 114 96 wait_for_completion(&vb->acked); 115 97 } 116 98 99 + static void set_page_pfns(u32 pfns[], struct page *page) 100 + { 101 + unsigned int i; 102 + 103 + /* Set balloon pfns pointing at this page. 104 + * Note that the first pfn points at start of the page. */ 105 + for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) 106 + pfns[i] = page_to_balloon_pfn(page) + i; 107 + } 108 + 117 109 static void fill_balloon(struct virtio_balloon *vb, size_t num) 118 110 { 119 111 /* We can only do one array worth at a time. */ 120 112 num = min(num, ARRAY_SIZE(vb->pfns)); 121 113 122 - for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { 114 + for (vb->num_pfns = 0; vb->num_pfns < num; 115 + vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { 123 116 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | 124 117 __GFP_NOMEMALLOC | __GFP_NOWARN); 125 118 if (!page) { ··· 142 113 msleep(200); 143 114 break; 144 115 } 145 - vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page); 116 + set_page_pfns(vb->pfns + vb->num_pfns, page); 117 + vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; 146 118 totalram_pages--; 147 - vb->num_pages++; 148 119 list_add(&page->lru, &vb->pages); 149 120 } 150 121 ··· 159 130 { 160 131 unsigned int i; 161 132 162 - for (i = 0; i < num; i++) { 163 - __free_page(pfn_to_page(pfns[i])); 133 + /* Find pfns pointing at start of each page, get pages and free them. */ 134 + for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { 135 + __free_page(balloon_pfn_to_page(pfns[i])); 164 136 totalram_pages++; 165 137 } 166 138 } ··· 173 143 /* We can only do one array worth at a time. */ 174 144 num = min(num, ARRAY_SIZE(vb->pfns)); 175 145 176 - for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { 146 + for (vb->num_pfns = 0; vb->num_pfns < num; 147 + vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { 177 148 page = list_first_entry(&vb->pages, struct page, lru); 178 149 list_del(&page->lru); 179 - vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page); 180 - vb->num_pages--; 150 + set_page_pfns(vb->pfns + vb->num_pfns, page); 151 + vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; 181 152 } 182 153 183 154 /* ··· 265 234 266 235 static inline s64 towards_target(struct virtio_balloon *vb) 267 236 { 268 - u32 v; 237 + __le32 v; 238 + s64 target; 239 + 269 240 vb->vdev->config->get(vb->vdev, 270 241 offsetof(struct virtio_balloon_config, num_pages), 271 242 &v, sizeof(v)); 272 - return (s64)v - vb->num_pages; 243 + target = le32_to_cpu(v); 244 + return target - vb->num_pages; 273 245 } 274 246 275 247 static void update_balloon_size(struct virtio_balloon *vb)
+1 -1
drivers/xen/gntdev.c
··· 722 722 vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; 723 723 724 724 if (use_ptemod) 725 - vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP; 725 + vma->vm_flags |= VM_DONTCOPY; 726 726 727 727 vma->vm_private_data = map; 728 728
+9 -4
drivers/xen/grant-table.c
··· 1029 1029 int i; 1030 1030 unsigned int max_nr_glist_frames, nr_glist_frames; 1031 1031 unsigned int nr_init_grefs; 1032 + int ret; 1032 1033 1033 1034 nr_grant_frames = 1; 1034 1035 boot_max_nr_grant_frames = __max_nr_grant_frames(); ··· 1048 1047 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 1049 1048 for (i = 0; i < nr_glist_frames; i++) { 1050 1049 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1051 - if (gnttab_list[i] == NULL) 1050 + if (gnttab_list[i] == NULL) { 1051 + ret = -ENOMEM; 1052 1052 goto ini_nomem; 1053 + } 1053 1054 } 1054 1055 1055 - if (gnttab_resume() < 0) 1056 - return -ENODEV; 1056 + if (gnttab_resume() < 0) { 1057 + ret = -ENODEV; 1058 + goto ini_nomem; 1059 + } 1057 1060 1058 1061 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; 1059 1062 ··· 1075 1070 for (i--; i >= 0; i--) 1076 1071 free_page((unsigned long)gnttab_list[i]); 1077 1072 kfree(gnttab_list); 1078 - return -ENOMEM; 1073 + return ret; 1079 1074 } 1080 1075 EXPORT_SYMBOL_GPL(gnttab_init); 1081 1076
+1
drivers/xen/manage.c
··· 132 132 err = dpm_suspend_end(PMSG_FREEZE); 133 133 if (err) { 134 134 printk(KERN_ERR "dpm_suspend_end failed: %d\n", err); 135 + si.cancelled = 0; 135 136 goto out_resume; 136 137 } 137 138
+52 -15
drivers/xen/xenbus/xenbus_probe_frontend.c
··· 135 135 return xenbus_read_otherend_details(xendev, "backend-id", "backend"); 136 136 } 137 137 138 - static int is_device_connecting(struct device *dev, void *data) 138 + static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential) 139 139 { 140 140 struct xenbus_device *xendev = to_xenbus_device(dev); 141 141 struct device_driver *drv = data; ··· 152 152 if (drv && (dev->driver != drv)) 153 153 return 0; 154 154 155 + if (ignore_nonessential) { 156 + /* With older QEMU, for PVonHVM guests the guest config files 157 + * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0'] 158 + * which is nonsensical as there is no PV FB (there can be 159 + * a PVKB) running as HVM guest. */ 160 + 161 + if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0)) 162 + return 0; 163 + 164 + if ((strncmp(xendev->nodename, "device/vfb", 10) == 0)) 165 + return 0; 166 + } 155 167 xendrv = to_xenbus_driver(dev->driver); 156 168 return (xendev->state < XenbusStateConnected || 157 169 (xendev->state == XenbusStateConnected && 158 170 xendrv->is_ready && !xendrv->is_ready(xendev))); 159 171 } 172 + static int essential_device_connecting(struct device *dev, void *data) 173 + { 174 + return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */); 175 + } 176 + static int non_essential_device_connecting(struct device *dev, void *data) 177 + { 178 + return is_device_connecting(dev, data, false); 179 + } 160 180 161 - static int exists_connecting_device(struct device_driver *drv) 181 + static int exists_essential_connecting_device(struct device_driver *drv) 162 182 { 163 183 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 164 - is_device_connecting); 184 + essential_device_connecting); 185 + } 186 + static int exists_non_essential_connecting_device(struct device_driver *drv) 187 + { 188 + return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 189 + non_essential_device_connecting); 165 190 } 166 191 167 192 static int print_device_status(struct device *dev, void *data) ··· 217 192 /* We only wait for device setup after most initcalls have run. */ 218 193 static int ready_to_wait_for_devices; 219 194 195 + static bool wait_loop(unsigned long start, unsigned int max_delay, 196 + unsigned int *seconds_waited) 197 + { 198 + if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) { 199 + if (!*seconds_waited) 200 + printk(KERN_WARNING "XENBUS: Waiting for " 201 + "devices to initialise: "); 202 + *seconds_waited += 5; 203 + printk("%us...", max_delay - *seconds_waited); 204 + if (*seconds_waited == max_delay) 205 + return true; 206 + } 207 + 208 + schedule_timeout_interruptible(HZ/10); 209 + 210 + return false; 211 + } 220 212 /* 221 213 * On a 5-minute timeout, wait for all devices currently configured. We need 222 214 * to do this to guarantee that the filesystems and / or network devices ··· 257 215 if (!ready_to_wait_for_devices || !xen_domain()) 258 216 return; 259 217 260 - while (exists_connecting_device(drv)) { 261 - if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { 262 - if (!seconds_waited) 263 - printk(KERN_WARNING "XENBUS: Waiting for " 264 - "devices to initialise: "); 265 - seconds_waited += 5; 266 - printk("%us...", 300 - seconds_waited); 267 - if (seconds_waited == 300) 268 - break; 269 - } 218 + while (exists_non_essential_connecting_device(drv)) 219 + if (wait_loop(start, 30, &seconds_waited)) 220 + break; 270 221 271 - schedule_timeout_interruptible(HZ/10); 272 - } 222 + /* Skips PVKB and PVFB check.*/ 223 + while (exists_essential_connecting_device(drv)) 224 + if (wait_loop(start, 270, &seconds_waited)) 225 + break; 273 226 274 227 if (seconds_waited) 275 228 printk("\n");
+13 -3
fs/aio.c
··· 93 93 put_page(info->ring_pages[i]); 94 94 95 95 if (info->mmap_size) { 96 - down_write(&ctx->mm->mmap_sem); 97 - do_munmap(ctx->mm, info->mmap_base, info->mmap_size); 98 - up_write(&ctx->mm->mmap_sem); 96 + BUG_ON(ctx->mm != current->mm); 97 + vm_munmap(info->mmap_base, info->mmap_size); 99 98 } 100 99 101 100 if (info->ring_pages && info->ring_pages != info->internal_pages) ··· 388 389 "exit_aio:ioctx still alive: %d %d %d\n", 389 390 atomic_read(&ctx->users), ctx->dead, 390 391 ctx->reqs_active); 392 + /* 393 + * We don't need to bother with munmap() here - 394 + * exit_mmap(mm) is coming and it'll unmap everything. 395 + * Since aio_free_ring() uses non-zero ->mmap_size 396 + * as indicator that it needs to unmap the area, 397 + * just set it to 0; aio_free_ring() is the only 398 + * place that uses ->mmap_size, so it's safe. 399 + * That way we get all munmap done to current->mm - 400 + * all other callers have ctx->mm == current->mm. 401 + */ 402 + ctx->ring_info.mmap_size = 0; 391 403 put_ioctx(ctx); 392 404 } 393 405 }
+8 -24
fs/binfmt_aout.c
··· 50 50 end = PAGE_ALIGN(end); 51 51 if (end > start) { 52 52 unsigned long addr; 53 - down_write(&current->mm->mmap_sem); 54 - addr = do_brk(start, end - start); 55 - up_write(&current->mm->mmap_sem); 53 + addr = vm_brk(start, end - start); 56 54 if (BAD_ADDR(addr)) 57 55 return addr; 58 56 } ··· 278 280 pos = 32; 279 281 map_size = ex.a_text+ex.a_data; 280 282 #endif 281 - down_write(&current->mm->mmap_sem); 282 - error = do_brk(text_addr & PAGE_MASK, map_size); 283 - up_write(&current->mm->mmap_sem); 283 + error = vm_brk(text_addr & PAGE_MASK, map_size); 284 284 if (error != (text_addr & PAGE_MASK)) { 285 285 send_sig(SIGKILL, current, 0); 286 286 return error; ··· 309 313 310 314 if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { 311 315 loff_t pos = fd_offset; 312 - down_write(&current->mm->mmap_sem); 313 - do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); 314 - up_write(&current->mm->mmap_sem); 316 + vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); 315 317 bprm->file->f_op->read(bprm->file, 316 318 (char __user *)N_TXTADDR(ex), 317 319 ex.a_text+ex.a_data, &pos); ··· 319 325 goto beyond_if; 320 326 } 321 327 322 - down_write(&current->mm->mmap_sem); 323 - error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, 328 + error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, 324 329 PROT_READ | PROT_EXEC, 325 330 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, 326 331 fd_offset); 327 - up_write(&current->mm->mmap_sem); 328 332 329 333 if (error != N_TXTADDR(ex)) { 330 334 send_sig(SIGKILL, current, 0); 331 335 return error; 332 336 } 333 337 334 - down_write(&current->mm->mmap_sem); 335 - error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, 338 + error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, 336 339 PROT_READ | PROT_WRITE | PROT_EXEC, 337 340 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, 338 341 fd_offset + ex.a_text); 339 - up_write(&current->mm->mmap_sem); 340 342 if (error != N_DATADDR(ex)) { 341 343 send_sig(SIGKILL, current, 0); 342 344 return error; ··· 402 412 "N_TXTOFF is not page aligned. Please convert library: %s\n", 403 413 file->f_path.dentry->d_name.name); 404 414 } 405 - down_write(&current->mm->mmap_sem); 406 - do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); 407 - up_write(&current->mm->mmap_sem); 415 + vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); 408 416 409 417 file->f_op->read(file, (char __user *)start_addr, 410 418 ex.a_text + ex.a_data, &pos); ··· 413 425 goto out; 414 426 } 415 427 /* Now use mmap to map the library into memory. */ 416 - down_write(&current->mm->mmap_sem); 417 - error = do_mmap(file, start_addr, ex.a_text + ex.a_data, 428 + error = vm_mmap(file, start_addr, ex.a_text + ex.a_data, 418 429 PROT_READ | PROT_WRITE | PROT_EXEC, 419 430 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, 420 431 N_TXTOFF(ex)); 421 - up_write(&current->mm->mmap_sem); 422 432 retval = error; 423 433 if (error != start_addr) 424 434 goto out; ··· 424 438 len = PAGE_ALIGN(ex.a_text + ex.a_data); 425 439 bss = ex.a_text + ex.a_data + ex.a_bss; 426 440 if (bss > len) { 427 - down_write(&current->mm->mmap_sem); 428 - error = do_brk(start_addr + len, bss - len); 429 - up_write(&current->mm->mmap_sem); 441 + error = vm_brk(start_addr + len, bss - len); 430 442 retval = error; 431 443 if (error != start_addr + len) 432 444 goto out;
+6 -17
fs/binfmt_elf.c
··· 82 82 end = ELF_PAGEALIGN(end); 83 83 if (end > start) { 84 84 unsigned long addr; 85 - down_write(&current->mm->mmap_sem); 86 - addr = do_brk(start, end - start); 87 - up_write(&current->mm->mmap_sem); 85 + addr = vm_brk(start, end - start); 88 86 if (BAD_ADDR(addr)) 89 87 return addr; 90 88 } ··· 512 514 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); 513 515 514 516 /* Map the last of the bss segment */ 515 - down_write(&current->mm->mmap_sem); 516 - error = do_brk(elf_bss, last_bss - elf_bss); 517 - up_write(&current->mm->mmap_sem); 517 + error = vm_brk(elf_bss, last_bss - elf_bss); 518 518 if (BAD_ADDR(error)) 519 519 goto out_close; 520 520 } ··· 958 962 and some applications "depend" upon this behavior. 959 963 Since we do not have the power to recompile these, we 960 964 emulate the SVr4 behavior. Sigh. */ 961 - down_write(&current->mm->mmap_sem); 962 - error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, 965 + error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, 963 966 MAP_FIXED | MAP_PRIVATE, 0); 964 - up_write(&current->mm->mmap_sem); 965 967 } 966 968 967 969 #ifdef ELF_PLAT_INIT ··· 1044 1050 eppnt++; 1045 1051 1046 1052 /* Now use mmap to map the library into memory. */ 1047 - down_write(&current->mm->mmap_sem); 1048 - error = do_mmap(file, 1053 + error = vm_mmap(file, 1049 1054 ELF_PAGESTART(eppnt->p_vaddr), 1050 1055 (eppnt->p_filesz + 1051 1056 ELF_PAGEOFFSET(eppnt->p_vaddr)), ··· 1052 1059 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, 1053 1060 (eppnt->p_offset - 1054 1061 ELF_PAGEOFFSET(eppnt->p_vaddr))); 1055 - up_write(&current->mm->mmap_sem); 1056 1062 if (error != ELF_PAGESTART(eppnt->p_vaddr)) 1057 1063 goto out_free_ph; 1058 1064 ··· 1064 1072 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + 1065 1073 ELF_MIN_ALIGN - 1); 1066 1074 bss = eppnt->p_memsz + eppnt->p_vaddr; 1067 - if (bss > len) { 1068 - down_write(&current->mm->mmap_sem); 1069 - do_brk(len, bss - len); 1070 - up_write(&current->mm->mmap_sem); 1071 - } 1075 + if (bss > len) 1076 + vm_brk(len, bss - len); 1072 1077 error = 0; 1073 1078 1074 1079 out_free_ph:
+4 -14
fs/binfmt_elf_fdpic.c
··· 390 390 (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC)) 391 391 stack_prot |= PROT_EXEC; 392 392 393 - down_write(&current->mm->mmap_sem); 394 - current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot, 393 + current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot, 395 394 MAP_PRIVATE | MAP_ANONYMOUS | 396 395 MAP_UNINITIALIZED | MAP_GROWSDOWN, 397 396 0); 398 397 399 398 if (IS_ERR_VALUE(current->mm->start_brk)) { 400 - up_write(&current->mm->mmap_sem); 401 399 retval = current->mm->start_brk; 402 400 current->mm->start_brk = 0; 403 401 goto error_kill; 404 402 } 405 - 406 - up_write(&current->mm->mmap_sem); 407 403 408 404 current->mm->brk = current->mm->start_brk; 409 405 current->mm->context.end_brk = current->mm->start_brk; ··· 951 955 if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE) 952 956 mflags |= MAP_EXECUTABLE; 953 957 954 - down_write(&mm->mmap_sem); 955 - maddr = do_mmap(NULL, load_addr, top - base, 958 + maddr = vm_mmap(NULL, load_addr, top - base, 956 959 PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0); 957 - up_write(&mm->mmap_sem); 958 960 if (IS_ERR_VALUE(maddr)) 959 961 return (int) maddr; 960 962 ··· 1090 1096 1091 1097 /* create the mapping */ 1092 1098 disp = phdr->p_vaddr & ~PAGE_MASK; 1093 - down_write(&mm->mmap_sem); 1094 - maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags, 1099 + maddr = vm_mmap(file, maddr, phdr->p_memsz + disp, prot, flags, 1095 1100 phdr->p_offset - disp); 1096 - up_write(&mm->mmap_sem); 1097 1101 1098 1102 kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx", 1099 1103 loop, phdr->p_memsz + disp, prot, flags, ··· 1135 1143 unsigned long xmaddr; 1136 1144 1137 1145 flags |= MAP_FIXED | MAP_ANONYMOUS; 1138 - down_write(&mm->mmap_sem); 1139 - xmaddr = do_mmap(NULL, xaddr, excess - excess1, 1146 + xmaddr = vm_mmap(NULL, xaddr, excess - excess1, 1140 1147 prot, flags, 0); 1141 - up_write(&mm->mmap_sem); 1142 1148 1143 1149 kdebug("mmap[%d] <anon>" 1144 1150 " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx",
+3 -9
fs/binfmt_flat.c
··· 542 542 */ 543 543 DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n"); 544 544 545 - down_write(&current->mm->mmap_sem); 546 - textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, 545 + textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, 547 546 MAP_PRIVATE|MAP_EXECUTABLE, 0); 548 - up_write(&current->mm->mmap_sem); 549 547 if (!textpos || IS_ERR_VALUE(textpos)) { 550 548 if (!textpos) 551 549 textpos = (unsigned long) -ENOMEM; ··· 554 556 555 557 len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); 556 558 len = PAGE_ALIGN(len); 557 - down_write(&current->mm->mmap_sem); 558 - realdatastart = do_mmap(0, 0, len, 559 + realdatastart = vm_mmap(0, 0, len, 559 560 PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); 560 - up_write(&current->mm->mmap_sem); 561 561 562 562 if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) { 563 563 if (!realdatastart) ··· 599 603 600 604 len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); 601 605 len = PAGE_ALIGN(len); 602 - down_write(&current->mm->mmap_sem); 603 - textpos = do_mmap(0, 0, len, 606 + textpos = vm_mmap(0, 0, len, 604 607 PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); 605 - up_write(&current->mm->mmap_sem); 606 608 607 609 if (!textpos || IS_ERR_VALUE(textpos)) { 608 610 if (!textpos)
+3 -9
fs/binfmt_som.c
··· 147 147 code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize); 148 148 current->mm->start_code = code_start; 149 149 current->mm->end_code = code_start + code_size; 150 - down_write(&current->mm->mmap_sem); 151 - retval = do_mmap(file, code_start, code_size, prot, 150 + retval = vm_mmap(file, code_start, code_size, prot, 152 151 flags, SOM_PAGESTART(hpuxhdr->exec_tfile)); 153 - up_write(&current->mm->mmap_sem); 154 152 if (retval < 0 && retval > -1024) 155 153 goto out; 156 154 ··· 156 158 data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize); 157 159 current->mm->start_data = data_start; 158 160 current->mm->end_data = bss_start = data_start + data_size; 159 - down_write(&current->mm->mmap_sem); 160 - retval = do_mmap(file, data_start, data_size, 161 + retval = vm_mmap(file, data_start, data_size, 161 162 prot | PROT_WRITE, flags, 162 163 SOM_PAGESTART(hpuxhdr->exec_dfile)); 163 - up_write(&current->mm->mmap_sem); 164 164 if (retval < 0 && retval > -1024) 165 165 goto out; 166 166 167 167 som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize); 168 168 current->mm->start_brk = current->mm->brk = som_brk; 169 - down_write(&current->mm->mmap_sem); 170 - retval = do_mmap(NULL, bss_start, som_brk - bss_start, 169 + retval = vm_mmap(NULL, bss_start, som_brk - bss_start, 171 170 prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0); 172 - up_write(&current->mm->mmap_sem); 173 171 if (retval > 0 || retval < -1024) 174 172 retval = 0; 175 173 out:
+1 -1
fs/btrfs/ctree.h
··· 2166 2166 2167 2167 static inline bool btrfs_root_readonly(struct btrfs_root *root) 2168 2168 { 2169 - return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY; 2169 + return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; 2170 2170 } 2171 2171 2172 2172 /* struct btrfs_root_backup */
+20 -62
fs/cifs/connect.c
··· 109 109 110 110 /* Options which could be blank */ 111 111 Opt_blank_pass, 112 + Opt_blank_user, 113 + Opt_blank_ip, 112 114 113 115 Opt_err 114 116 }; ··· 185 183 { Opt_wsize, "wsize=%s" }, 186 184 { Opt_actimeo, "actimeo=%s" }, 187 185 186 + { Opt_blank_user, "user=" }, 187 + { Opt_blank_user, "username=" }, 188 188 { Opt_user, "user=%s" }, 189 189 { Opt_user, "username=%s" }, 190 190 { Opt_blank_pass, "pass=" }, 191 191 { Opt_pass, "pass=%s" }, 192 192 { Opt_pass, "password=%s" }, 193 + { Opt_blank_ip, "ip=" }, 194 + { Opt_blank_ip, "addr=" }, 193 195 { Opt_ip, "ip=%s" }, 194 196 { Opt_ip, "addr=%s" }, 195 197 { Opt_unc, "unc=%s" }, ··· 1123 1117 string = match_strdup(args); 1124 1118 if (string == NULL) 1125 1119 return -ENOMEM; 1126 - rc = kstrtoul(string, 10, option); 1120 + rc = kstrtoul(string, 0, option); 1127 1121 kfree(string); 1128 1122 1129 1123 return rc; ··· 1540 1534 1541 1535 /* String Arguments */ 1542 1536 1537 + case Opt_blank_user: 1538 + /* null user, ie. anonymous authentication */ 1539 + vol->nullauth = 1; 1540 + vol->username = NULL; 1541 + break; 1543 1542 case Opt_user: 1544 1543 string = match_strdup(args); 1545 1544 if (string == NULL) 1546 1545 goto out_nomem; 1547 1546 1548 - if (!*string) { 1549 - /* null user, ie. anonymous authentication */ 1550 - vol->nullauth = 1; 1551 - } else if (strnlen(string, MAX_USERNAME_SIZE) > 1547 + if (strnlen(string, MAX_USERNAME_SIZE) > 1552 1548 MAX_USERNAME_SIZE) { 1553 1549 printk(KERN_WARNING "CIFS: username too long\n"); 1554 1550 goto cifs_parse_mount_err; ··· 1619 1611 } 1620 1612 vol->password[j] = '\0'; 1621 1613 break; 1614 + case Opt_blank_ip: 1615 + vol->UNCip = NULL; 1616 + break; 1622 1617 case Opt_ip: 1623 1618 string = match_strdup(args); 1624 1619 if (string == NULL) 1625 1620 goto out_nomem; 1626 1621 1627 - if (!*string) { 1628 - vol->UNCip = NULL; 1629 - } else if (strnlen(string, INET6_ADDRSTRLEN) > 1622 + if (strnlen(string, INET6_ADDRSTRLEN) > 1630 1623 INET6_ADDRSTRLEN) { 1631 1624 printk(KERN_WARNING "CIFS: ip address " 1632 1625 "too long\n"); ··· 1644 1635 string = match_strdup(args); 1645 1636 if (string == NULL) 1646 1637 goto out_nomem; 1647 - 1648 - if (!*string) { 1649 - printk(KERN_WARNING "CIFS: invalid path to " 1650 - "network resource\n"); 1651 - goto cifs_parse_mount_err; 1652 - } 1653 1638 1654 1639 temp_len = strnlen(string, 300); 1655 1640 if (temp_len == 300) { ··· 1673 1670 if (string == NULL) 1674 1671 goto out_nomem; 1675 1672 1676 - if (!*string) { 1677 - printk(KERN_WARNING "CIFS: invalid domain" 1678 - " name\n"); 1679 - goto cifs_parse_mount_err; 1680 - } else if (strnlen(string, 256) == 256) { 1673 + if (strnlen(string, 256) == 256) { 1681 1674 printk(KERN_WARNING "CIFS: domain name too" 1682 1675 " long\n"); 1683 1676 goto cifs_parse_mount_err; ··· 1692 1693 if (string == NULL) 1693 1694 goto out_nomem; 1694 1695 1695 - if (!*string) { 1696 - printk(KERN_WARNING "CIFS: srcaddr value not" 1697 - " specified\n"); 1698 - goto cifs_parse_mount_err; 1699 - } else if (!cifs_convert_address( 1696 + if (!cifs_convert_address( 1700 1697 (struct sockaddr *)&vol->srcaddr, 1701 1698 string, strlen(string))) { 1702 1699 printk(KERN_WARNING "CIFS: Could not parse" ··· 1705 1710 if (string == NULL) 1706 1711 goto out_nomem; 1707 1712 1708 - if (!*string) { 1709 - printk(KERN_WARNING "CIFS: Invalid path" 1710 - " prefix\n"); 1711 - goto cifs_parse_mount_err; 1712 - } 1713 1713 temp_len = strnlen(string, 1024); 1714 1714 if (string[0] != '/') 1715 1715 temp_len++; /* missing leading slash */ ··· 1732 1742 if (string == NULL) 1733 1743 goto out_nomem; 1734 1744 1735 - if (!*string) { 1736 - printk(KERN_WARNING "CIFS: Invalid iocharset" 1737 - " specified\n"); 1738 - goto cifs_parse_mount_err; 1739 - } else if (strnlen(string, 1024) >= 65) { 1745 + if (strnlen(string, 1024) >= 65) { 1740 1746 printk(KERN_WARNING "CIFS: iocharset name " 1741 1747 "too long.\n"); 1742 1748 goto cifs_parse_mount_err; ··· 1757 1771 if (string == NULL) 1758 1772 goto out_nomem; 1759 1773 1760 - if (!*string) { 1761 - printk(KERN_WARNING "CIFS: No socket option" 1762 - " specified\n"); 1763 - goto cifs_parse_mount_err; 1764 - } 1765 1774 if (strnicmp(string, "TCP_NODELAY", 11) == 0) 1766 1775 vol->sockopt_tcp_nodelay = 1; 1767 1776 break; ··· 1764 1783 string = match_strdup(args); 1765 1784 if (string == NULL) 1766 1785 goto out_nomem; 1767 - 1768 - if (!*string) { 1769 - printk(KERN_WARNING "CIFS: Invalid (empty)" 1770 - " netbiosname\n"); 1771 - break; 1772 - } 1773 1786 1774 1787 memset(vol->source_rfc1001_name, 0x20, 1775 1788 RFC1001_NAME_LEN); ··· 1792 1817 if (string == NULL) 1793 1818 goto out_nomem; 1794 1819 1795 - if (!*string) { 1796 - printk(KERN_WARNING "CIFS: Empty server" 1797 - " netbiosname specified\n"); 1798 - break; 1799 - } 1800 1820 /* last byte, type, is 0x20 for servr type */ 1801 1821 memset(vol->target_rfc1001_name, 0x20, 1802 1822 RFC1001_NAME_LEN_WITH_NULL); ··· 1818 1848 if (string == NULL) 1819 1849 goto out_nomem; 1820 1850 1821 - if (!*string) { 1822 - cERROR(1, "no protocol version specified" 1823 - " after vers= mount option"); 1824 - goto cifs_parse_mount_err; 1825 - } 1826 - 1827 1851 if (strnicmp(string, "cifs", 4) == 0 || 1828 1852 strnicmp(string, "1", 1) == 0) { 1829 1853 /* This is the default */ ··· 1831 1867 string = match_strdup(args); 1832 1868 if (string == NULL) 1833 1869 goto out_nomem; 1834 - 1835 - if (!*string) { 1836 - printk(KERN_WARNING "CIFS: no security flavor" 1837 - " specified\n"); 1838 - break; 1839 - } 1840 1870 1841 1871 if (cifs_parse_security_flavors(string, vol) != 0) 1842 1872 goto cifs_parse_mount_err;
-3
fs/ext4/ext4.h
··· 1203 1203 unsigned long s_ext_blocks; 1204 1204 unsigned long s_ext_extents; 1205 1205 #endif 1206 - /* ext4 extent cache stats */ 1207 - unsigned long extent_cache_hits; 1208 - unsigned long extent_cache_misses; 1209 1206 1210 1207 /* for buddy allocator */ 1211 1208 struct ext4_group_info ***s_group_info;
+1 -5
fs/ext4/extents.c
··· 2066 2066 ret = 1; 2067 2067 } 2068 2068 errout: 2069 - if (!ret) 2070 - sbi->extent_cache_misses++; 2071 - else 2072 - sbi->extent_cache_hits++; 2073 2069 trace_ext4_ext_in_cache(inode, block, ret); 2074 2070 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 2075 2071 return ret; ··· 2878 2882 if (err) 2879 2883 goto fix_extent_len; 2880 2884 /* update the extent length and mark as initialized */ 2881 - ex->ee_len = cpu_to_le32(ee_len); 2885 + ex->ee_len = cpu_to_le16(ee_len); 2882 2886 ext4_ext_try_to_merge(inode, path, ex); 2883 2887 err = ext4_ext_dirty(handle, inode, path + depth); 2884 2888 goto out;
+15 -33
fs/ext4/super.c
··· 1305 1305 ext4_msg(sb, KERN_ERR, 1306 1306 "Cannot change journaled " 1307 1307 "quota options when quota turned on"); 1308 - return 0; 1308 + return -1; 1309 1309 } 1310 1310 qname = match_strdup(args); 1311 1311 if (!qname) { 1312 1312 ext4_msg(sb, KERN_ERR, 1313 1313 "Not enough memory for storing quotafile name"); 1314 - return 0; 1314 + return -1; 1315 1315 } 1316 1316 if (sbi->s_qf_names[qtype] && 1317 1317 strcmp(sbi->s_qf_names[qtype], qname)) { 1318 1318 ext4_msg(sb, KERN_ERR, 1319 1319 "%s quota file already specified", QTYPE2NAME(qtype)); 1320 1320 kfree(qname); 1321 - return 0; 1321 + return -1; 1322 1322 } 1323 1323 sbi->s_qf_names[qtype] = qname; 1324 1324 if (strchr(sbi->s_qf_names[qtype], '/')) { ··· 1326 1326 "quotafile must be on filesystem root"); 1327 1327 kfree(sbi->s_qf_names[qtype]); 1328 1328 sbi->s_qf_names[qtype] = NULL; 1329 - return 0; 1329 + return -1; 1330 1330 } 1331 1331 set_opt(sb, QUOTA); 1332 1332 return 1; ··· 1341 1341 sbi->s_qf_names[qtype]) { 1342 1342 ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options" 1343 1343 " when quota turned on"); 1344 - return 0; 1344 + return -1; 1345 1345 } 1346 1346 /* 1347 1347 * The space will be released later when all options are confirmed ··· 1450 1450 const struct mount_opts *m; 1451 1451 int arg = 0; 1452 1452 1453 + #ifdef CONFIG_QUOTA 1454 + if (token == Opt_usrjquota) 1455 + return set_qf_name(sb, USRQUOTA, &args[0]); 1456 + else if (token == Opt_grpjquota) 1457 + return set_qf_name(sb, GRPQUOTA, &args[0]); 1458 + else if (token == Opt_offusrjquota) 1459 + return clear_qf_name(sb, USRQUOTA); 1460 + else if (token == Opt_offgrpjquota) 1461 + return clear_qf_name(sb, GRPQUOTA); 1462 + #endif 1453 1463 if (args->from && match_int(args, &arg)) 1454 1464 return -1; 1455 1465 switch (token) { ··· 1559 1549 sbi->s_mount_opt |= m->mount_opt; 1560 1550 } 1561 1551 #ifdef CONFIG_QUOTA 1562 - } else if (token == Opt_usrjquota) { 1563 - if (!set_qf_name(sb, USRQUOTA, &args[0])) 1564 - return -1; 1565 - } else if (token == Opt_grpjquota) { 1566 - if (!set_qf_name(sb, GRPQUOTA, &args[0])) 1567 - return -1; 1568 - } else if (token == Opt_offusrjquota) { 1569 - if (!clear_qf_name(sb, USRQUOTA)) 1570 - return -1; 1571 - } else if (token == Opt_offgrpjquota) { 1572 - if (!clear_qf_name(sb, GRPQUOTA)) 1573 - return -1; 1574 1552 } else if (m->flags & MOPT_QFMT) { 1575 1553 if (sb_any_quota_loaded(sb) && 1576 1554 sbi->s_jquota_fmt != m->mount_opt) { ··· 2364 2366 EXT4_SB(sb)->s_sectors_written_start) >> 1))); 2365 2367 } 2366 2368 2367 - static ssize_t extent_cache_hits_show(struct ext4_attr *a, 2368 - struct ext4_sb_info *sbi, char *buf) 2369 - { 2370 - return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits); 2371 - } 2372 - 2373 - static ssize_t extent_cache_misses_show(struct ext4_attr *a, 2374 - struct ext4_sb_info *sbi, char *buf) 2375 - { 2376 - return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses); 2377 - } 2378 - 2379 2369 static ssize_t inode_readahead_blks_store(struct ext4_attr *a, 2380 2370 struct ext4_sb_info *sbi, 2381 2371 const char *buf, size_t count) ··· 2421 2435 EXT4_RO_ATTR(delayed_allocation_blocks); 2422 2436 EXT4_RO_ATTR(session_write_kbytes); 2423 2437 EXT4_RO_ATTR(lifetime_write_kbytes); 2424 - EXT4_RO_ATTR(extent_cache_hits); 2425 - EXT4_RO_ATTR(extent_cache_misses); 2426 2438 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show, 2427 2439 inode_readahead_blks_store, s_inode_readahead_blks); 2428 2440 EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); ··· 2436 2452 ATTR_LIST(delayed_allocation_blocks), 2437 2453 ATTR_LIST(session_write_kbytes), 2438 2454 ATTR_LIST(lifetime_write_kbytes), 2439 - ATTR_LIST(extent_cache_hits), 2440 - ATTR_LIST(extent_cache_misses), 2441 2455 ATTR_LIST(inode_readahead_blks), 2442 2456 ATTR_LIST(inode_goal), 2443 2457 ATTR_LIST(mb_stats),
+15 -10
fs/fuse/dir.c
··· 387 387 if (fc->no_create) 388 388 return -ENOSYS; 389 389 390 - if (flags & O_DIRECT) 391 - return -EINVAL; 392 - 393 390 forget = fuse_alloc_forget(); 394 391 if (!forget) 395 392 return -ENOMEM; ··· 641 644 fuse_put_request(fc, req); 642 645 if (!err) { 643 646 struct inode *inode = entry->d_inode; 647 + struct fuse_inode *fi = get_fuse_inode(inode); 644 648 645 - /* 646 - * Set nlink to zero so the inode can be cleared, if the inode 647 - * does have more links this will be discovered at the next 648 - * lookup/getattr. 649 - */ 650 - clear_nlink(inode); 649 + spin_lock(&fc->lock); 650 + fi->attr_version = ++fc->attr_version; 651 + drop_nlink(inode); 652 + spin_unlock(&fc->lock); 651 653 fuse_invalidate_attr(inode); 652 654 fuse_invalidate_attr(dir); 653 655 fuse_invalidate_entry_cache(entry); ··· 758 762 will reflect changes in the backing inode (link count, 759 763 etc.) 760 764 */ 761 - if (!err || err == -EINTR) 765 + if (!err) { 766 + struct fuse_inode *fi = get_fuse_inode(inode); 767 + 768 + spin_lock(&fc->lock); 769 + fi->attr_version = ++fc->attr_version; 770 + inc_nlink(inode); 771 + spin_unlock(&fc->lock); 762 772 fuse_invalidate_attr(inode); 773 + } else if (err == -EINTR) { 774 + fuse_invalidate_attr(inode); 775 + } 763 776 return err; 764 777 } 765 778
+112 -17
fs/fuse/file.c
··· 194 194 struct fuse_conn *fc = get_fuse_conn(inode); 195 195 int err; 196 196 197 - /* VFS checks this, but only _after_ ->open() */ 198 - if (file->f_flags & O_DIRECT) 199 - return -EINVAL; 200 - 201 197 err = generic_file_open(inode, file); 202 198 if (err) 203 199 return err; ··· 928 932 struct file *file = iocb->ki_filp; 929 933 struct address_space *mapping = file->f_mapping; 930 934 size_t count = 0; 935 + size_t ocount = 0; 931 936 ssize_t written = 0; 937 + ssize_t written_buffered = 0; 932 938 struct inode *inode = mapping->host; 933 939 ssize_t err; 934 940 struct iov_iter i; 941 + loff_t endbyte = 0; 935 942 936 943 WARN_ON(iocb->ki_pos != pos); 937 944 938 - err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); 945 + ocount = 0; 946 + err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 939 947 if (err) 940 948 return err; 949 + 950 + count = ocount; 941 951 942 952 mutex_lock(&inode->i_mutex); 943 953 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); ··· 964 962 965 963 file_update_time(file); 966 964 967 - iov_iter_init(&i, iov, nr_segs, count, 0); 968 - written = fuse_perform_write(file, mapping, &i, pos); 969 - if (written >= 0) 970 - iocb->ki_pos = pos + written; 965 + if (file->f_flags & O_DIRECT) { 966 + written = generic_file_direct_write(iocb, iov, &nr_segs, 967 + pos, &iocb->ki_pos, 968 + count, ocount); 969 + if (written < 0 || written == count) 970 + goto out; 971 971 972 + pos += written; 973 + count -= written; 974 + 975 + iov_iter_init(&i, iov, nr_segs, count, written); 976 + written_buffered = fuse_perform_write(file, mapping, &i, pos); 977 + if (written_buffered < 0) { 978 + err = written_buffered; 979 + goto out; 980 + } 981 + endbyte = pos + written_buffered - 1; 982 + 983 + err = filemap_write_and_wait_range(file->f_mapping, pos, 984 + endbyte); 985 + if (err) 986 + goto out; 987 + 988 + invalidate_mapping_pages(file->f_mapping, 989 + pos >> PAGE_CACHE_SHIFT, 990 + endbyte >> PAGE_CACHE_SHIFT); 991 + 992 + written += written_buffered; 993 + iocb->ki_pos = pos + written_buffered; 994 + } else { 995 + iov_iter_init(&i, iov, nr_segs, count, 0); 996 + written = fuse_perform_write(file, mapping, &i, pos); 997 + if (written >= 0) 998 + iocb->ki_pos = pos + written; 999 + } 972 1000 out: 973 1001 current->backing_dev_info = NULL; 974 1002 mutex_unlock(&inode->i_mutex); ··· 1133 1101 return res; 1134 1102 } 1135 1103 1104 + static ssize_t __fuse_direct_write(struct file *file, const char __user *buf, 1105 + size_t count, loff_t *ppos) 1106 + { 1107 + struct inode *inode = file->f_path.dentry->d_inode; 1108 + ssize_t res; 1109 + 1110 + res = generic_write_checks(file, ppos, &count, 0); 1111 + if (!res) { 1112 + res = fuse_direct_io(file, buf, count, ppos, 1); 1113 + if (res > 0) 1114 + fuse_write_update_size(inode, *ppos); 1115 + } 1116 + 1117 + fuse_invalidate_attr(inode); 1118 + 1119 + return res; 1120 + } 1121 + 1136 1122 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 1137 1123 size_t count, loff_t *ppos) 1138 1124 { ··· 1162 1112 1163 1113 /* Don't allow parallel writes to the same file */ 1164 1114 mutex_lock(&inode->i_mutex); 1165 - res = generic_write_checks(file, ppos, &count, 0); 1166 - if (!res) { 1167 - res = fuse_direct_io(file, buf, count, ppos, 1); 1168 - if (res > 0) 1169 - fuse_write_update_size(inode, *ppos); 1170 - } 1115 + res = __fuse_direct_write(file, buf, count, ppos); 1171 1116 mutex_unlock(&inode->i_mutex); 1172 - 1173 - fuse_invalidate_attr(inode); 1174 1117 1175 1118 return res; 1176 1119 } ··· 2120 2077 return 0; 2121 2078 } 2122 2079 2080 + static ssize_t fuse_loop_dio(struct file *filp, const struct iovec *iov, 2081 + unsigned long nr_segs, loff_t *ppos, int rw) 2082 + { 2083 + const struct iovec *vector = iov; 2084 + ssize_t ret = 0; 2085 + 2086 + while (nr_segs > 0) { 2087 + void __user *base; 2088 + size_t len; 2089 + ssize_t nr; 2090 + 2091 + base = vector->iov_base; 2092 + len = vector->iov_len; 2093 + vector++; 2094 + nr_segs--; 2095 + 2096 + if (rw == WRITE) 2097 + nr = __fuse_direct_write(filp, base, len, ppos); 2098 + else 2099 + nr = fuse_direct_read(filp, base, len, ppos); 2100 + 2101 + if (nr < 0) { 2102 + if (!ret) 2103 + ret = nr; 2104 + break; 2105 + } 2106 + ret += nr; 2107 + if (nr != len) 2108 + break; 2109 + } 2110 + 2111 + return ret; 2112 + } 2113 + 2114 + 2115 + static ssize_t 2116 + fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2117 + loff_t offset, unsigned long nr_segs) 2118 + { 2119 + ssize_t ret = 0; 2120 + struct file *file = NULL; 2121 + loff_t pos = 0; 2122 + 2123 + file = iocb->ki_filp; 2124 + pos = offset; 2125 + 2126 + ret = fuse_loop_dio(file, iov, nr_segs, &pos, rw); 2127 + 2128 + return ret; 2129 + } 2130 + 2123 2131 static const struct file_operations fuse_file_operations = { 2124 2132 .llseek = fuse_file_llseek, 2125 2133 .read = do_sync_read, ··· 2214 2120 .readpages = fuse_readpages, 2215 2121 .set_page_dirty = __set_page_dirty_nobuffers, 2216 2122 .bmap = fuse_bmap, 2123 + .direct_IO = fuse_direct_IO, 2217 2124 }; 2218 2125 2219 2126 void fuse_init_file_inode(struct inode *inode)
+1
fs/fuse/inode.c
··· 947 947 sb->s_magic = FUSE_SUPER_MAGIC; 948 948 sb->s_op = &fuse_super_operations; 949 949 sb->s_maxbytes = MAX_LFS_FILESIZE; 950 + sb->s_time_gran = 1; 950 951 sb->s_export_op = &fuse_export_operations; 951 952 952 953 file = fget(d.fd);
+1 -1
fs/lockd/clnt4xdr.c
··· 241 241 p = xdr_inline_decode(xdr, 4); 242 242 if (unlikely(p == NULL)) 243 243 goto out_overflow; 244 - if (unlikely(*p > nlm4_failed)) 244 + if (unlikely(ntohl(*p) > ntohl(nlm4_failed))) 245 245 goto out_bad_xdr; 246 246 *stat = *p; 247 247 return 0;
+1 -1
fs/lockd/clntxdr.c
··· 236 236 p = xdr_inline_decode(xdr, 4); 237 237 if (unlikely(p == NULL)) 238 238 goto out_overflow; 239 - if (unlikely(*p > nlm_lck_denied_grace_period)) 239 + if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period))) 240 240 goto out_enum; 241 241 *stat = *p; 242 242 return 0;
+8 -14
fs/nfsd/nfs3xdr.c
··· 803 803 return p; 804 804 } 805 805 806 - static int 806 + static __be32 807 807 compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, 808 808 const char *name, int namlen) 809 809 { 810 810 struct svc_export *exp; 811 811 struct dentry *dparent, *dchild; 812 - int rv = 0; 812 + __be32 rv = nfserr_noent; 813 813 814 814 dparent = cd->fh.fh_dentry; 815 815 exp = cd->fh.fh_export; ··· 817 817 if (isdotent(name, namlen)) { 818 818 if (namlen == 2) { 819 819 dchild = dget_parent(dparent); 820 - if (dchild == dparent) { 821 - /* filesystem root - cannot return filehandle for ".." */ 822 - dput(dchild); 823 - return -ENOENT; 824 - } 820 + /* filesystem root - cannot return filehandle for ".." */ 821 + if (dchild == dparent) 822 + goto out; 825 823 } else 826 824 dchild = dget(dparent); 827 825 } else 828 826 dchild = lookup_one_len(name, dparent, namlen); 829 827 if (IS_ERR(dchild)) 830 - return -ENOENT; 831 - rv = -ENOENT; 828 + return rv; 832 829 if (d_mountpoint(dchild)) 833 - goto out; 834 - rv = fh_compose(fhp, exp, dchild, &cd->fh); 835 - if (rv) 836 830 goto out; 837 831 if (!dchild->d_inode) 838 832 goto out; 839 - rv = 0; 833 + rv = fh_compose(fhp, exp, dchild, &cd->fh); 840 834 out: 841 835 dput(dchild); 842 836 return rv; ··· 839 845 static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen) 840 846 { 841 847 struct svc_fh fh; 842 - int err; 848 + __be32 err; 843 849 844 850 fh_init(&fh, NFS3_FHSIZE); 845 851 err = compose_entry_fh(cd, &fh, name, namlen);
+8 -7
fs/nfsd/nfs4proc.c
··· 235 235 */ 236 236 if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0) 237 237 open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS | 238 - FATTR4_WORD1_TIME_MODIFY); 238 + FATTR4_WORD1_TIME_MODIFY); 239 239 } else { 240 240 status = nfsd_lookup(rqstp, current_fh, 241 241 open->op_fname.data, open->op_fname.len, resfh); 242 242 fh_unlock(current_fh); 243 - if (status) 244 - goto out; 245 - status = nfsd_check_obj_isreg(resfh); 246 243 } 244 + if (status) 245 + goto out; 246 + status = nfsd_check_obj_isreg(resfh); 247 247 if (status) 248 248 goto out; 249 249 ··· 841 841 struct nfsd4_setattr *setattr) 842 842 { 843 843 __be32 status = nfs_ok; 844 + int err; 844 845 845 846 if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { 846 847 nfs4_lock_state(); ··· 853 852 return status; 854 853 } 855 854 } 856 - status = fh_want_write(&cstate->current_fh); 857 - if (status) 858 - return status; 855 + err = fh_want_write(&cstate->current_fh); 856 + if (err) 857 + return nfserrno(err); 859 858 status = nfs_ok; 860 859 861 860 status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
+9 -14
fs/nfsd/nfs4state.c
··· 4211 4211 * vfs_test_lock. (Arguably perhaps test_lock should be done with an 4212 4212 * inode operation.) 4213 4213 */ 4214 - static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 4214 + static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 4215 4215 { 4216 4216 struct file *file; 4217 - int err; 4218 - 4219 - err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); 4220 - if (err) 4221 - return err; 4222 - err = vfs_test_lock(file, lock); 4223 - nfsd_close(file); 4217 + __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); 4218 + if (!err) { 4219 + err = nfserrno(vfs_test_lock(file, lock)); 4220 + nfsd_close(file); 4221 + } 4224 4222 return err; 4225 4223 } 4226 4224 ··· 4232 4234 struct inode *inode; 4233 4235 struct file_lock file_lock; 4234 4236 struct nfs4_lockowner *lo; 4235 - int error; 4236 4237 __be32 status; 4237 4238 4238 4239 if (locks_in_grace()) ··· 4277 4280 4278 4281 nfs4_transform_lock_offset(&file_lock); 4279 4282 4280 - status = nfs_ok; 4281 - error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock); 4282 - if (error) { 4283 - status = nfserrno(error); 4283 + status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock); 4284 + if (status) 4284 4285 goto out; 4285 - } 4286 + 4286 4287 if (file_lock.fl_type != F_UNLCK) { 4287 4288 status = nfserr_denied; 4288 4289 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
+2 -2
fs/nfsd/nfs4xdr.c
··· 1392 1392 for (i = 0; i < test_stateid->ts_num_ids; i++) { 1393 1393 stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL); 1394 1394 if (!stateid) { 1395 - status = PTR_ERR(stateid); 1395 + status = nfserrno(-ENOMEM); 1396 1396 goto out; 1397 1397 } 1398 1398 ··· 3410 3410 *p++ = htonl(test_stateid->ts_num_ids); 3411 3411 3412 3412 list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) { 3413 - *p++ = htonl(stateid->ts_id_status); 3413 + *p++ = stateid->ts_id_status; 3414 3414 } 3415 3415 3416 3416 ADJUST_ARGS();
+1 -1
fs/nfsd/vfs.c
··· 1458 1458 switch (createmode) { 1459 1459 case NFS3_CREATE_UNCHECKED: 1460 1460 if (! S_ISREG(dchild->d_inode->i_mode)) 1461 - err = nfserr_exist; 1461 + goto out; 1462 1462 else if (truncp) { 1463 1463 /* in nfsv4, we need to treat this case a little 1464 1464 * differently. we don't want to truncate the
+1 -1
fs/ocfs2/alloc.c
··· 1134 1134 } 1135 1135 1136 1136 el = path_leaf_el(path); 1137 - rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1]; 1137 + rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1]; 1138 1138 1139 1139 ocfs2_adjust_rightmost_records(handle, et, path, rec); 1140 1140
+6 -6
fs/ocfs2/refcounttree.c
··· 1036 1036 1037 1037 tmp_el = left_path->p_node[subtree_root].el; 1038 1038 blkno = left_path->p_node[subtree_root+1].bh->b_blocknr; 1039 - for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) { 1039 + for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) { 1040 1040 if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) { 1041 1041 *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos); 1042 1042 break; 1043 1043 } 1044 1044 } 1045 1045 1046 - BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec)); 1046 + BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec)); 1047 1047 1048 1048 out: 1049 1049 ocfs2_free_path(left_path); ··· 1468 1468 1469 1469 trace_ocfs2_divide_leaf_refcount_block( 1470 1470 (unsigned long long)ref_leaf_bh->b_blocknr, 1471 - le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used)); 1471 + le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used)); 1472 1472 1473 1473 /* 1474 1474 * XXX: Improvement later. ··· 2411 2411 rb = (struct ocfs2_refcount_block *) 2412 2412 prev_bh->b_data; 2413 2413 2414 - if (le64_to_cpu(rb->rf_records.rl_used) + 2414 + if (le16_to_cpu(rb->rf_records.rl_used) + 2415 2415 recs_add > 2416 2416 le16_to_cpu(rb->rf_records.rl_count)) 2417 2417 ref_blocks++; ··· 2476 2476 if (prev_bh) { 2477 2477 rb = (struct ocfs2_refcount_block *)prev_bh->b_data; 2478 2478 2479 - if (le64_to_cpu(rb->rf_records.rl_used) + recs_add > 2479 + if (le16_to_cpu(rb->rf_records.rl_used) + recs_add > 2480 2480 le16_to_cpu(rb->rf_records.rl_count)) 2481 2481 ref_blocks++; 2482 2482 ··· 3629 3629 * one will split a refcount rec, so totally we need 3630 3630 * clusters * 2 new refcount rec. 3631 3631 */ 3632 - if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 > 3632 + if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 > 3633 3633 le16_to_cpu(rb->rf_records.rl_count)) 3634 3634 ref_blocks++; 3635 3635
+2 -2
fs/ocfs2/suballoc.c
··· 600 600 ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode, 601 601 cluster_ac->ac_bh, 602 602 le64_to_cpu(rec->e_blkno), 603 - le32_to_cpu(rec->e_leaf_clusters)); 603 + le16_to_cpu(rec->e_leaf_clusters)); 604 604 if (ret) 605 605 mlog_errno(ret); 606 606 /* Try all the clusters to free */ ··· 1628 1628 { 1629 1629 unsigned int bpc = le16_to_cpu(cl->cl_bpc); 1630 1630 unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc; 1631 - unsigned int bitcount = le32_to_cpu(rec->e_leaf_clusters) * bpc; 1631 + unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc; 1632 1632 1633 1633 if (res->sr_bit_offset < bitoff) 1634 1634 return 0;
+1 -1
include/linux/fuse.h
··· 593 593 __u64 off; 594 594 __u32 namelen; 595 595 __u32 type; 596 - char name[0]; 596 + char name[]; 597 597 }; 598 598 599 599 #define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
-12
include/linux/i2c/twl.h
··· 666 666 unsigned int check_defaults:1; 667 667 unsigned int reset_registers:1; 668 668 unsigned int hs_extmute:1; 669 - u16 hs_left_step; 670 - u16 hs_right_step; 671 - u16 hf_left_step; 672 - u16 hf_right_step; 673 669 void (*set_hs_extmute)(int mute); 674 670 }; 675 671 676 672 struct twl4030_vibra_data { 677 673 unsigned int coexist; 678 - 679 - /* twl6040 */ 680 - unsigned int vibldrv_res; /* left driver resistance */ 681 - unsigned int vibrdrv_res; /* right driver resistance */ 682 - unsigned int viblmotor_res; /* left motor resistance */ 683 - unsigned int vibrmotor_res; /* right motor resistance */ 684 - int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */ 685 - int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */ 686 674 }; 687 675 688 676 struct twl4030_audio_data {
+6
include/linux/kvm_host.h
··· 596 596 597 597 #ifdef CONFIG_IOMMU_API 598 598 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 599 + void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 599 600 int kvm_iommu_map_guest(struct kvm *kvm); 600 601 int kvm_iommu_unmap_guest(struct kvm *kvm); 601 602 int kvm_assign_device(struct kvm *kvm, ··· 608 607 struct kvm_memory_slot *slot) 609 608 { 610 609 return 0; 610 + } 611 + 612 + static inline void kvm_iommu_unmap_pages(struct kvm *kvm, 613 + struct kvm_memory_slot *slot) 614 + { 611 615 } 612 616 613 617 static inline int kvm_iommu_map_guest(struct kvm *kvm)
+53 -67
include/linux/mfd/db5500-prcmu.h
··· 8 8 #ifndef __MFD_DB5500_PRCMU_H 9 9 #define __MFD_DB5500_PRCMU_H 10 10 11 - #ifdef CONFIG_MFD_DB5500_PRCMU 12 - 13 - void db5500_prcmu_early_init(void); 14 - int db5500_prcmu_set_epod(u16 epod_id, u8 epod_state); 15 - int db5500_prcmu_set_display_clocks(void); 16 - int db5500_prcmu_disable_dsipll(void); 17 - int db5500_prcmu_enable_dsipll(void); 18 - int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); 19 - int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); 20 - void db5500_prcmu_enable_wakeups(u32 wakeups); 21 - int db5500_prcmu_request_clock(u8 clock, bool enable); 22 - void db5500_prcmu_config_abb_event_readout(u32 abb_events); 23 - void db5500_prcmu_get_abb_event_buffer(void __iomem **buf); 24 - int prcmu_resetout(u8 resoutn, u8 state); 25 - int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, 26 - bool keep_ap_pll); 27 - int db5500_prcmu_config_esram0_deep_sleep(u8 state); 28 - void db5500_prcmu_system_reset(u16 reset_code); 29 - u16 db5500_prcmu_get_reset_code(void); 30 - bool db5500_prcmu_is_ac_wake_requested(void); 31 - int db5500_prcmu_set_arm_opp(u8 opp); 32 - int db5500_prcmu_get_arm_opp(void); 33 - 34 - #else /* !CONFIG_UX500_SOC_DB5500 */ 35 - 36 - static inline void db5500_prcmu_early_init(void) {} 37 - 38 - static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) 39 - { 40 - return -ENOSYS; 41 - } 42 - 43 - static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) 44 - { 45 - return -ENOSYS; 46 - } 47 - 48 - static inline int db5500_prcmu_request_clock(u8 clock, bool enable) 49 - { 50 - return 0; 51 - } 52 - 53 - static inline int db5500_prcmu_set_display_clocks(void) 54 - { 55 - return 0; 56 - } 57 - 58 - static inline int db5500_prcmu_disable_dsipll(void) 59 - { 60 - return 0; 61 - } 62 - 63 - static inline int db5500_prcmu_enable_dsipll(void) 64 - { 65 - return 0; 66 - } 67 - 68 - static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state) 69 - { 70 - return 0; 71 - } 72 - 73 - static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {} 74 - 75 11 static inline int prcmu_resetout(u8 resoutn, u8 state) 76 12 { 77 13 return 0; ··· 18 82 return 0; 19 83 } 20 84 21 - static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {} 22 - static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {} 85 + static inline int db5500_prcmu_request_clock(u8 clock, bool enable) 86 + { 87 + return 0; 88 + } 23 89 24 90 static inline int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, 25 91 bool keep_ap_pll) ··· 29 91 return 0; 30 92 } 31 93 32 - static inline void db5500_prcmu_system_reset(u16 reset_code) {} 94 + static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state) 95 + { 96 + return 0; 97 + } 33 98 34 99 static inline u16 db5500_prcmu_get_reset_code(void) 35 100 { ··· 54 113 return 0; 55 114 } 56 115 116 + static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {} 117 + 118 + static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {} 119 + 120 + static inline void db5500_prcmu_system_reset(u16 reset_code) {} 121 + 122 + static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {} 123 + 124 + #ifdef CONFIG_MFD_DB5500_PRCMU 125 + 126 + void db5500_prcmu_early_init(void); 127 + int db5500_prcmu_set_display_clocks(void); 128 + int db5500_prcmu_disable_dsipll(void); 129 + int db5500_prcmu_enable_dsipll(void); 130 + int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); 131 + int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); 132 + 133 + #else /* !CONFIG_UX500_SOC_DB5500 */ 134 + 135 + static inline void db5500_prcmu_early_init(void) {} 136 + 137 + static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) 138 + { 139 + return -ENOSYS; 140 + } 141 + 142 + static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) 143 + { 144 + return -ENOSYS; 145 + } 146 + 147 + static inline int db5500_prcmu_set_display_clocks(void) 148 + { 149 + return 0; 150 + } 151 + 152 + static inline int db5500_prcmu_disable_dsipll(void) 153 + { 154 + return 0; 155 + } 156 + 157 + static inline int db5500_prcmu_enable_dsipll(void) 158 + { 159 + return 0; 160 + } 57 161 58 162 #endif /* CONFIG_MFD_DB5500_PRCMU */ 59 163
+39 -8
include/linux/mfd/rc5t583.h
··· 26 26 27 27 #include <linux/mutex.h> 28 28 #include <linux/types.h> 29 + #include <linux/regmap.h> 29 30 30 31 #define RC5T583_MAX_REGS 0xF8 31 32 ··· 280 279 bool enable_shutdown; 281 280 }; 282 281 283 - int rc5t583_write(struct device *dev, u8 reg, uint8_t val); 284 - int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val); 285 - int rc5t583_set_bits(struct device *dev, unsigned int reg, 286 - unsigned int bit_mask); 287 - int rc5t583_clear_bits(struct device *dev, unsigned int reg, 288 - unsigned int bit_mask); 289 - int rc5t583_update(struct device *dev, unsigned int reg, 290 - unsigned int val, unsigned int mask); 282 + static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val) 283 + { 284 + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 285 + return regmap_write(rc5t583->regmap, reg, val); 286 + } 287 + 288 + static inline int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val) 289 + { 290 + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 291 + unsigned int ival; 292 + int ret; 293 + ret = regmap_read(rc5t583->regmap, reg, &ival); 294 + if (!ret) 295 + *val = (uint8_t)ival; 296 + return ret; 297 + } 298 + 299 + static inline int rc5t583_set_bits(struct device *dev, unsigned int reg, 300 + unsigned int bit_mask) 301 + { 302 + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 303 + return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask); 304 + } 305 + 306 + static inline int rc5t583_clear_bits(struct device *dev, unsigned int reg, 307 + unsigned int bit_mask) 308 + { 309 + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 310 + return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0); 311 + } 312 + 313 + static inline int rc5t583_update(struct device *dev, unsigned int reg, 314 + unsigned int val, unsigned int mask) 315 + { 316 + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); 317 + return regmap_update_bits(rc5t583->regmap, reg, mask, val); 318 + } 319 + 291 320 int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id, 292 321 int ext_pwr_req, int deepsleep_slot_nr); 293 322 int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base);
+27
include/linux/mfd/twl6040.h
··· 174 174 #define TWL6040_SYSCLK_SEL_LPPLL 0 175 175 #define TWL6040_SYSCLK_SEL_HPPLL 1 176 176 177 + struct twl6040_codec_data { 178 + u16 hs_left_step; 179 + u16 hs_right_step; 180 + u16 hf_left_step; 181 + u16 hf_right_step; 182 + }; 183 + 184 + struct twl6040_vibra_data { 185 + unsigned int vibldrv_res; /* left driver resistance */ 186 + unsigned int vibrdrv_res; /* right driver resistance */ 187 + unsigned int viblmotor_res; /* left motor resistance */ 188 + unsigned int vibrmotor_res; /* right motor resistance */ 189 + int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */ 190 + int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */ 191 + }; 192 + 193 + struct twl6040_platform_data { 194 + int audpwron_gpio; /* audio power-on gpio */ 195 + unsigned int irq_base; 196 + 197 + struct twl6040_codec_data *codec; 198 + struct twl6040_vibra_data *vibra; 199 + }; 200 + 201 + struct regmap; 202 + 177 203 struct twl6040 { 178 204 struct device *dev; 205 + struct regmap *regmap; 179 206 struct mutex mutex; 180 207 struct mutex io_mutex; 181 208 struct mutex irq_mutex;
+9 -18
include/linux/mm.h
··· 1393 1393 1394 1394 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1395 1395 1396 - extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 1397 - unsigned long len, unsigned long prot, 1398 - unsigned long flag, unsigned long pgoff); 1399 1396 extern unsigned long mmap_region(struct file *file, unsigned long addr, 1400 1397 unsigned long len, unsigned long flags, 1401 1398 vm_flags_t vm_flags, unsigned long pgoff); 1402 - 1403 - static inline unsigned long do_mmap(struct file *file, unsigned long addr, 1404 - unsigned long len, unsigned long prot, 1405 - unsigned long flag, unsigned long offset) 1406 - { 1407 - unsigned long ret = -EINVAL; 1408 - if ((offset + PAGE_ALIGN(len)) < offset) 1409 - goto out; 1410 - if (!(offset & ~PAGE_MASK)) 1411 - ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 1412 - out: 1413 - return ret; 1414 - } 1415 - 1399 + extern unsigned long do_mmap(struct file *, unsigned long, 1400 + unsigned long, unsigned long, 1401 + unsigned long, unsigned long); 1416 1402 extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1417 1403 1418 - extern unsigned long do_brk(unsigned long, unsigned long); 1404 + /* These take the mm semaphore themselves */ 1405 + extern unsigned long vm_brk(unsigned long, unsigned long); 1406 + extern int vm_munmap(unsigned long, size_t); 1407 + extern unsigned long vm_mmap(struct file *, unsigned long, 1408 + unsigned long, unsigned long, 1409 + unsigned long, unsigned long); 1419 1410 1420 1411 /* truncate.c */ 1421 1412 extern void truncate_inode_pages(struct address_space *, loff_t);
+1 -1
include/linux/mmc/card.h
··· 481 481 struct device_driver drv; 482 482 int (*probe)(struct mmc_card *); 483 483 void (*remove)(struct mmc_card *); 484 - int (*suspend)(struct mmc_card *, pm_message_t); 484 + int (*suspend)(struct mmc_card *); 485 485 int (*resume)(struct mmc_card *); 486 486 }; 487 487
+1
include/linux/nfsd/Kbuild
··· 1 + header-y += cld.h 1 2 header-y += debug.h 2 3 header-y += export.h 3 4 header-y += nfsfh.h
+3 -1
include/linux/pinctrl/machine.h
··· 12 12 #ifndef __LINUX_PINCTRL_MACHINE_H 13 13 #define __LINUX_PINCTRL_MACHINE_H 14 14 15 + #include <linux/bug.h> 16 + 15 17 #include "pinctrl-state.h" 16 18 17 19 enum pinctrl_map_type { ··· 150 148 #define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \ 151 149 PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs) 152 150 153 - #ifdef CONFIG_PINMUX 151 + #ifdef CONFIG_PINCTRL 154 152 155 153 extern int pinctrl_register_mappings(struct pinctrl_map const *map, 156 154 unsigned num_maps);
+1
include/linux/usb/otg.h
··· 94 94 95 95 struct usb_otg *otg; 96 96 97 + struct device *io_dev; 97 98 struct usb_phy_io_ops *io_ops; 98 99 void __iomem *io_priv; 99 100
+3 -2
lib/mpi/mpi-bit.c
··· 177 177 */ 178 178 int mpi_lshift_limbs(MPI a, unsigned int count) 179 179 { 180 - mpi_ptr_t ap = a->d; 181 - int n = a->nlimbs; 180 + const int n = a->nlimbs; 181 + mpi_ptr_t ap; 182 182 int i; 183 183 184 184 if (!count || !n) ··· 187 187 if (RESIZE_IF_NEEDED(a, n + count) < 0) 188 188 return -ENOMEM; 189 189 190 + ap = a->d; 190 191 for (i = n - 1; i >= 0; i--) 191 192 ap[i + count] = ap[i]; 192 193 for (i = 0; i < count; i++)
+6 -1
mm/memblock.c
··· 330 330 phys_addr_t end = base + memblock_cap_size(base, &size); 331 331 int i, nr_new; 332 332 333 + if (!size) 334 + return 0; 335 + 333 336 /* special case for empty array */ 334 337 if (type->regions[0].size == 0) { 335 338 WARN_ON(type->cnt != 1 || type->total_size); ··· 433 430 434 431 *start_rgn = *end_rgn = 0; 435 432 433 + if (!size) 434 + return 0; 435 + 436 436 /* we'll create at most two more regions */ 437 437 while (type->cnt + 2 > type->max) 438 438 if (memblock_double_array(type) < 0) ··· 520 514 (unsigned long long)base, 521 515 (unsigned long long)base + size, 522 516 (void *)_RET_IP_); 523 - BUG_ON(0 == size); 524 517 525 518 return memblock_add_region(_rgn, base, size, MAX_NUMNODES); 526 519 }
+1
mm/memcontrol.c
··· 3392 3392 * the newpage may be on LRU(or pagevec for LRU) already. We lock 3393 3393 * LRU while we overwrite pc->mem_cgroup. 3394 3394 */ 3395 + pc = lookup_page_cgroup(newpage); 3395 3396 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true); 3396 3397 } 3397 3398
+50 -9
mm/mmap.c
··· 240 240 return next; 241 241 } 242 242 243 + static unsigned long do_brk(unsigned long addr, unsigned long len); 244 + 243 245 SYSCALL_DEFINE1(brk, unsigned long, brk) 244 246 { 245 247 unsigned long rlim, retval; ··· 953 951 * The caller must hold down_write(&current->mm->mmap_sem). 954 952 */ 955 953 956 - unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 954 + static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 957 955 unsigned long len, unsigned long prot, 958 956 unsigned long flags, unsigned long pgoff) 959 957 { ··· 1089 1087 1090 1088 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1091 1089 } 1092 - EXPORT_SYMBOL(do_mmap_pgoff); 1090 + 1091 + unsigned long do_mmap(struct file *file, unsigned long addr, 1092 + unsigned long len, unsigned long prot, 1093 + unsigned long flag, unsigned long offset) 1094 + { 1095 + if (unlikely(offset + PAGE_ALIGN(len) < offset)) 1096 + return -EINVAL; 1097 + if (unlikely(offset & ~PAGE_MASK)) 1098 + return -EINVAL; 1099 + return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 1100 + } 1101 + EXPORT_SYMBOL(do_mmap); 1102 + 1103 + unsigned long vm_mmap(struct file *file, unsigned long addr, 1104 + unsigned long len, unsigned long prot, 1105 + unsigned long flag, unsigned long offset) 1106 + { 1107 + unsigned long ret; 1108 + struct mm_struct *mm = current->mm; 1109 + 1110 + down_write(&mm->mmap_sem); 1111 + ret = do_mmap(file, addr, len, prot, flag, offset); 1112 + up_write(&mm->mmap_sem); 1113 + return ret; 1114 + } 1115 + EXPORT_SYMBOL(vm_mmap); 1093 1116 1094 1117 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1095 1118 unsigned long, prot, unsigned long, flags, ··· 2132 2105 2133 2106 return 0; 2134 2107 } 2135 - 2136 2108 EXPORT_SYMBOL(do_munmap); 2137 2109 2138 - SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 2110 + int vm_munmap(unsigned long start, size_t len) 2139 2111 { 2140 2112 int ret; 2141 2113 struct mm_struct *mm = current->mm; 2142 2114 2143 - profile_munmap(addr); 2144 - 2145 2115 down_write(&mm->mmap_sem); 2146 - ret = do_munmap(mm, addr, len); 2116 + ret = do_munmap(mm, start, len); 2147 2117 up_write(&mm->mmap_sem); 2148 2118 return ret; 2119 + } 2120 + EXPORT_SYMBOL(vm_munmap); 2121 + 2122 + SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 2123 + { 2124 + profile_munmap(addr); 2125 + return vm_munmap(addr, len); 2149 2126 } 2150 2127 2151 2128 static inline void verify_mm_writelocked(struct mm_struct *mm) ··· 2167 2136 * anonymous maps. eventually we may be able to do some 2168 2137 * brk-specific accounting here. 2169 2138 */ 2170 - unsigned long do_brk(unsigned long addr, unsigned long len) 2139 + static unsigned long do_brk(unsigned long addr, unsigned long len) 2171 2140 { 2172 2141 struct mm_struct * mm = current->mm; 2173 2142 struct vm_area_struct * vma, * prev; ··· 2263 2232 return addr; 2264 2233 } 2265 2234 2266 - EXPORT_SYMBOL(do_brk); 2235 + unsigned long vm_brk(unsigned long addr, unsigned long len) 2236 + { 2237 + struct mm_struct *mm = current->mm; 2238 + unsigned long ret; 2239 + 2240 + down_write(&mm->mmap_sem); 2241 + ret = do_brk(addr, len); 2242 + up_write(&mm->mmap_sem); 2243 + return ret; 2244 + } 2245 + EXPORT_SYMBOL(vm_brk); 2267 2246 2268 2247 /* Release all mmaps. */ 2269 2248 void exit_mmap(struct mm_struct *mm)
+36 -5
mm/nommu.c
··· 1233 1233 /* 1234 1234 * handle mapping creation for uClinux 1235 1235 */ 1236 - unsigned long do_mmap_pgoff(struct file *file, 1236 + static unsigned long do_mmap_pgoff(struct file *file, 1237 1237 unsigned long addr, 1238 1238 unsigned long len, 1239 1239 unsigned long prot, ··· 1470 1470 show_free_areas(0); 1471 1471 return -ENOMEM; 1472 1472 } 1473 - EXPORT_SYMBOL(do_mmap_pgoff); 1473 + 1474 + unsigned long do_mmap(struct file *file, unsigned long addr, 1475 + unsigned long len, unsigned long prot, 1476 + unsigned long flag, unsigned long offset) 1477 + { 1478 + if (unlikely(offset + PAGE_ALIGN(len) < offset)) 1479 + return -EINVAL; 1480 + if (unlikely(offset & ~PAGE_MASK)) 1481 + return -EINVAL; 1482 + return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 1483 + } 1484 + EXPORT_SYMBOL(do_mmap); 1485 + 1486 + unsigned long vm_mmap(struct file *file, unsigned long addr, 1487 + unsigned long len, unsigned long prot, 1488 + unsigned long flag, unsigned long offset) 1489 + { 1490 + unsigned long ret; 1491 + struct mm_struct *mm = current->mm; 1492 + 1493 + down_write(&mm->mmap_sem); 1494 + ret = do_mmap(file, addr, len, prot, flag, offset); 1495 + up_write(&mm->mmap_sem); 1496 + return ret; 1497 + } 1498 + EXPORT_SYMBOL(vm_mmap); 1474 1499 1475 1500 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1476 1501 unsigned long, prot, unsigned long, flags, ··· 1734 1709 } 1735 1710 EXPORT_SYMBOL(do_munmap); 1736 1711 1737 - SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1712 + int vm_munmap(unsigned long addr, size_t len) 1738 1713 { 1739 - int ret; 1740 1714 struct mm_struct *mm = current->mm; 1715 + int ret; 1741 1716 1742 1717 down_write(&mm->mmap_sem); 1743 1718 ret = do_munmap(mm, addr, len); 1744 1719 up_write(&mm->mmap_sem); 1745 1720 return ret; 1721 + } 1722 + EXPORT_SYMBOL(vm_munmap); 1723 + 1724 + SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1725 + { 1726 + return vm_munmap(addr, len); 1746 1727 } 1747 1728 1748 1729 /* ··· 1775 1744 kleave(""); 1776 1745 } 1777 1746 1778 - unsigned long do_brk(unsigned long addr, unsigned long len) 1747 + unsigned long vm_brk(unsigned long addr, unsigned long len) 1779 1748 { 1780 1749 return -ENOMEM; 1781 1750 }
-6
scripts/checkpatch.pl
··· 1869 1869 "No space is necessary after a cast\n" . $hereprev); 1870 1870 } 1871 1871 1872 - if ($rawline =~ /^\+[ \t]*\/\*[ \t]*$/ && 1873 - $prevrawline =~ /^\+[ \t]*$/) { 1874 - CHK("BLOCK_COMMENT_STYLE", 1875 - "Don't begin block comments with only a /* line, use /* comment...\n" . $hereprev); 1876 - } 1877 - 1878 1872 # check for spaces at the beginning of a line. 1879 1873 # Exceptions: 1880 1874 # 1) within comments
+2 -2
scripts/xz_wrap.sh
··· 12 12 BCJ= 13 13 LZMA2OPTS= 14 14 15 - case $ARCH in 16 - x86|x86_64) BCJ=--x86 ;; 15 + case $SRCARCH in 16 + x86) BCJ=--x86 ;; 17 17 powerpc) BCJ=--powerpc ;; 18 18 ia64) BCJ=--ia64; LZMA2OPTS=pb=4 ;; 19 19 arm) BCJ=--arm ;;
+6
security/commoncap.c
··· 29 29 #include <linux/securebits.h> 30 30 #include <linux/user_namespace.h> 31 31 #include <linux/binfmts.h> 32 + #include <linux/personality.h> 32 33 33 34 /* 34 35 * If a non-root user executes a setuid-root binary in ··· 505 504 effective = true; 506 505 } 507 506 skip: 507 + 508 + /* if we have fs caps, clear dangerous personality flags */ 509 + if (!cap_issubset(new->cap_permitted, old->cap_permitted)) 510 + bprm->per_clear |= PER_CLEAR_ON_SETID; 511 + 508 512 509 513 /* Don't let someone trace a set[ug]id/setpcap binary with the revised 510 514 * credentials unless they have the appropriate permit
+33 -11
security/smack/smack_lsm.c
··· 3640 3640 }; 3641 3641 3642 3642 3643 - static __init void init_smack_know_list(void) 3643 + static __init void init_smack_known_list(void) 3644 3644 { 3645 + /* 3646 + * Initialize CIPSO locks 3647 + */ 3648 + spin_lock_init(&smack_known_huh.smk_cipsolock); 3649 + spin_lock_init(&smack_known_hat.smk_cipsolock); 3650 + spin_lock_init(&smack_known_star.smk_cipsolock); 3651 + spin_lock_init(&smack_known_floor.smk_cipsolock); 3652 + spin_lock_init(&smack_known_invalid.smk_cipsolock); 3653 + spin_lock_init(&smack_known_web.smk_cipsolock); 3654 + /* 3655 + * Initialize rule list locks 3656 + */ 3657 + mutex_init(&smack_known_huh.smk_rules_lock); 3658 + mutex_init(&smack_known_hat.smk_rules_lock); 3659 + mutex_init(&smack_known_floor.smk_rules_lock); 3660 + mutex_init(&smack_known_star.smk_rules_lock); 3661 + mutex_init(&smack_known_invalid.smk_rules_lock); 3662 + mutex_init(&smack_known_web.smk_rules_lock); 3663 + /* 3664 + * Initialize rule lists 3665 + */ 3666 + INIT_LIST_HEAD(&smack_known_huh.smk_rules); 3667 + INIT_LIST_HEAD(&smack_known_hat.smk_rules); 3668 + INIT_LIST_HEAD(&smack_known_star.smk_rules); 3669 + INIT_LIST_HEAD(&smack_known_floor.smk_rules); 3670 + INIT_LIST_HEAD(&smack_known_invalid.smk_rules); 3671 + INIT_LIST_HEAD(&smack_known_web.smk_rules); 3672 + /* 3673 + * Create the known labels list 3674 + */ 3645 3675 list_add(&smack_known_huh.list, &smack_known_list); 3646 3676 list_add(&smack_known_hat.list, &smack_known_list); 3647 3677 list_add(&smack_known_star.list, &smack_known_list); ··· 3706 3676 cred = (struct cred *) current->cred; 3707 3677 cred->security = tsp; 3708 3678 3709 - /* initialize the smack_know_list */ 3710 - init_smack_know_list(); 3711 - /* 3712 - * Initialize locks 3713 - */ 3714 - spin_lock_init(&smack_known_huh.smk_cipsolock); 3715 - spin_lock_init(&smack_known_hat.smk_cipsolock); 3716 - spin_lock_init(&smack_known_star.smk_cipsolock); 3717 - spin_lock_init(&smack_known_floor.smk_cipsolock); 3718 - spin_lock_init(&smack_known_invalid.smk_cipsolock); 3679 + /* initialize the smack_known_list */ 3680 + init_smack_known_list(); 3719 3681 3720 3682 /* 3721 3683 * Register with LSM
-14
security/smack/smackfs.c
··· 1614 1614 smk_cipso_doi(); 1615 1615 smk_unlbl_ambient(NULL); 1616 1616 1617 - mutex_init(&smack_known_floor.smk_rules_lock); 1618 - mutex_init(&smack_known_hat.smk_rules_lock); 1619 - mutex_init(&smack_known_huh.smk_rules_lock); 1620 - mutex_init(&smack_known_invalid.smk_rules_lock); 1621 - mutex_init(&smack_known_star.smk_rules_lock); 1622 - mutex_init(&smack_known_web.smk_rules_lock); 1623 - 1624 - INIT_LIST_HEAD(&smack_known_floor.smk_rules); 1625 - INIT_LIST_HEAD(&smack_known_hat.smk_rules); 1626 - INIT_LIST_HEAD(&smack_known_huh.smk_rules); 1627 - INIT_LIST_HEAD(&smack_known_invalid.smk_rules); 1628 - INIT_LIST_HEAD(&smack_known_star.smk_rules); 1629 - INIT_LIST_HEAD(&smack_known_web.smk_rules); 1630 - 1631 1617 return err; 1632 1618 } 1633 1619
+1
sound/core/vmaster.c
··· 419 419 * snd_ctl_add_vmaster_hook - Add a hook to a vmaster control 420 420 * @kcontrol: vmaster kctl element 421 421 * @hook: the hook function 422 + * @private_data: the private_data pointer to be saved 422 423 * 423 424 * Adds the given hook to the vmaster control element so that it's called 424 425 * at each time when the value is changed.
+1 -1
sound/last.c
··· 38 38 return 0; 39 39 } 40 40 41 - __initcall(alsa_sound_last_init); 41 + late_initcall_sync(alsa_sound_last_init);
+32 -7
sound/pci/hda/patch_conexant.c
··· 3971 3971 int i; 3972 3972 3973 3973 mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids); 3974 - for (i = 0; i < cfg->hp_outs; i++) 3974 + for (i = 0; i < cfg->hp_outs; i++) { 3975 + unsigned int val = PIN_OUT; 3976 + if (snd_hda_query_pin_caps(codec, cfg->hp_pins[i]) & 3977 + AC_PINCAP_HP_DRV) 3978 + val |= AC_PINCTL_HP_EN; 3975 3979 snd_hda_codec_write(codec, cfg->hp_pins[i], 0, 3976 - AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP); 3980 + AC_VERB_SET_PIN_WIDGET_CONTROL, val); 3981 + } 3977 3982 mute_outputs(codec, cfg->hp_outs, cfg->hp_pins); 3978 3983 mute_outputs(codec, cfg->line_outs, cfg->line_out_pins); 3979 3984 mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins); ··· 4396 4391 4397 4392 enum { 4398 4393 CXT_PINCFG_LENOVO_X200, 4394 + CXT_PINCFG_LENOVO_TP410, 4399 4395 }; 4400 4396 4397 + /* ThinkPad X200 & co with cxt5051 */ 4401 4398 static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = { 4402 4399 { 0x16, 0x042140ff }, /* HP (seq# overridden) */ 4403 4400 { 0x17, 0x21a11000 }, /* dock-mic */ ··· 4408 4401 {} 4409 4402 }; 4410 4403 4411 - static const struct cxt_pincfg *cxt_pincfg_tbl[] = { 4412 - [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200, 4404 + /* ThinkPad 410/420/510/520, X201 & co with cxt5066 */ 4405 + static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = { 4406 + { 0x19, 0x042110ff }, /* HP (seq# overridden) */ 4407 + { 0x1a, 0x21a190f0 }, /* dock-mic */ 4408 + { 0x1c, 0x212140ff }, /* dock-HP */ 4409 + {} 4413 4410 }; 4414 4411 4415 - static const struct snd_pci_quirk cxt_fixups[] = { 4412 + static const struct cxt_pincfg *cxt_pincfg_tbl[] = { 4413 + [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200, 4414 + [CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410, 4415 + }; 4416 + 4417 + static const struct snd_pci_quirk cxt5051_fixups[] = { 4416 4418 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), 4419 + {} 4420 + }; 4421 + 4422 + static const struct snd_pci_quirk cxt5066_fixups[] = { 4423 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), 4424 + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), 4425 + SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410), 4426 + SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410), 4427 + SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410), 4417 4428 {} 4418 4429 }; 4419 4430 ··· 4471 4446 case 0x14f15051: 4472 4447 add_cx5051_fake_mutes(codec); 4473 4448 codec->pin_amp_workaround = 1; 4449 + apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl); 4474 4450 break; 4475 4451 default: 4476 4452 codec->pin_amp_workaround = 1; 4453 + apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl); 4477 4454 } 4478 - 4479 - apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl); 4480 4455 4481 4456 /* Show mute-led control only on HP laptops 4482 4457 * This is a sort of white-list: on HP laptops, EAPD corresponds
+45 -4
sound/pci/hda/patch_realtek.c
··· 1445 1445 ALC_FIXUP_ACT_BUILD, 1446 1446 }; 1447 1447 1448 + static void alc_apply_pincfgs(struct hda_codec *codec, 1449 + const struct alc_pincfg *cfg) 1450 + { 1451 + for (; cfg->nid; cfg++) 1452 + snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); 1453 + } 1454 + 1448 1455 static void alc_apply_fixup(struct hda_codec *codec, int action) 1449 1456 { 1450 1457 struct alc_spec *spec = codec->spec; ··· 1485 1478 snd_printdd(KERN_INFO "hda_codec: %s: " 1486 1479 "Apply pincfg for %s\n", 1487 1480 codec->chip_name, modelname); 1488 - for (; cfg->nid; cfg++) 1489 - snd_hda_codec_set_pincfg(codec, cfg->nid, 1490 - cfg->val); 1481 + alc_apply_pincfgs(codec, cfg); 1491 1482 break; 1492 1483 case ALC_FIXUP_VERBS: 1493 1484 if (action != ALC_FIXUP_ACT_PROBE || !fix->v.verbs) ··· 4866 4861 ALC260_FIXUP_GPIO1_TOGGLE, 4867 4862 ALC260_FIXUP_REPLACER, 4868 4863 ALC260_FIXUP_HP_B1900, 4864 + ALC260_FIXUP_KN1, 4869 4865 }; 4870 4866 4871 4867 static void alc260_gpio1_automute(struct hda_codec *codec) ··· 4891 4885 snd_hda_jack_detect_enable(codec, 0x0f, ALC_HP_EVENT); 4892 4886 spec->unsol_event = alc_sku_unsol_event; 4893 4887 add_verb(codec->spec, alc_gpio1_init_verbs); 4888 + } 4889 + } 4890 + 4891 + static void alc260_fixup_kn1(struct hda_codec *codec, 4892 + const struct alc_fixup *fix, int action) 4893 + { 4894 + struct alc_spec *spec = codec->spec; 4895 + static const struct alc_pincfg pincfgs[] = { 4896 + { 0x0f, 0x02214000 }, /* HP/speaker */ 4897 + { 0x12, 0x90a60160 }, /* int mic */ 4898 + { 0x13, 0x02a19000 }, /* ext mic */ 4899 + { 0x18, 0x01446000 }, /* SPDIF out */ 4900 + /* disable bogus I/O pins */ 4901 + { 0x10, 0x411111f0 }, 4902 + { 0x11, 0x411111f0 }, 4903 + { 0x14, 0x411111f0 }, 4904 + { 0x15, 0x411111f0 }, 4905 + { 0x16, 0x411111f0 }, 4906 + { 0x17, 0x411111f0 }, 4907 + { 0x19, 0x411111f0 }, 4908 + { } 4909 + }; 4910 + 4911 + switch (action) { 4912 + case ALC_FIXUP_ACT_PRE_PROBE: 4913 + alc_apply_pincfgs(codec, pincfgs); 4914 + break; 4915 + case ALC_FIXUP_ACT_PROBE: 4916 + spec->init_amp = ALC_INIT_NONE; 4917 + break; 4894 4918 } 4895 4919 } 4896 4920 ··· 4974 4938 .v.func = alc260_fixup_gpio1_toggle, 4975 4939 .chained = true, 4976 4940 .chain_id = ALC260_FIXUP_COEF, 4977 - } 4941 + }, 4942 + [ALC260_FIXUP_KN1] = { 4943 + .type = ALC_FIXUP_FUNC, 4944 + .v.func = alc260_fixup_kn1, 4945 + }, 4978 4946 }; 4979 4947 4980 4948 static const struct snd_pci_quirk alc260_fixup_tbl[] = { ··· 4988 4948 SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750), 4989 4949 SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900), 4990 4950 SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1), 4951 + SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1), 4991 4952 SND_PCI_QUIRK(0x161f, 0x2057, "Replacer 672V", ALC260_FIXUP_REPLACER), 4992 4953 SND_PCI_QUIRK(0x1631, 0xc017, "PB V7900", ALC260_FIXUP_COEF), 4993 4954 {}
+2 -3
sound/pci/hda/patch_sigmatel.c
··· 5063 5063 if (spec->gpio_led_polarity) 5064 5064 muted = !muted; 5065 5065 5066 - /*polarity defines *not* muted state level*/ 5067 5066 if (!spec->vref_mute_led_nid) { 5068 5067 if (muted) 5069 - spec->gpio_data &= ~spec->gpio_led; /* orange */ 5068 + spec->gpio_data |= spec->gpio_led; 5070 5069 else 5071 - spec->gpio_data |= spec->gpio_led; /* white */ 5070 + spec->gpio_data &= ~spec->gpio_led; 5072 5071 stac_gpio_set(codec, spec->gpio_mask, 5073 5072 spec->gpio_dir, spec->gpio_data); 5074 5073 } else {
+1 -2
sound/soc/codecs/Kconfig
··· 57 57 select SND_SOC_TPA6130A2 if I2C 58 58 select SND_SOC_TLV320DAC33 if I2C 59 59 select SND_SOC_TWL4030 if TWL4030_CORE 60 - select SND_SOC_TWL6040 if TWL4030_CORE 60 + select SND_SOC_TWL6040 if TWL6040_CORE 61 61 select SND_SOC_UDA134X 62 62 select SND_SOC_UDA1380 if I2C 63 63 select SND_SOC_WL1273 if MFD_WL1273_CORE ··· 276 276 tristate 277 277 278 278 config SND_SOC_TWL6040 279 - select TWL6040_CORE 280 279 tristate 281 280 282 281 config SND_SOC_UDA134X
+1 -2
sound/soc/codecs/twl6040.c
··· 26 26 #include <linux/pm.h> 27 27 #include <linux/platform_device.h> 28 28 #include <linux/slab.h> 29 - #include <linux/i2c/twl.h> 30 29 #include <linux/mfd/twl6040.h> 31 30 32 31 #include <sound/core.h> ··· 1527 1528 static int twl6040_probe(struct snd_soc_codec *codec) 1528 1529 { 1529 1530 struct twl6040_data *priv; 1530 - struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); 1531 + struct twl6040_codec_data *pdata = dev_get_platdata(codec->dev); 1531 1532 struct platform_device *pdev = container_of(codec->dev, 1532 1533 struct platform_device, dev); 1533 1534 int ret = 0;
+1 -1
sound/soc/omap/Kconfig
··· 97 97 98 98 config SND_OMAP_SOC_OMAP_ABE_TWL6040 99 99 tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec" 100 - depends on TWL4030_CORE && SND_OMAP_SOC && ARCH_OMAP4 100 + depends on TWL6040_CORE && SND_OMAP_SOC && ARCH_OMAP4 101 101 select SND_OMAP_SOC_DMIC 102 102 select SND_OMAP_SOC_MCPDM 103 103 select SND_SOC_TWL6040
+2
tools/perf/.gitignore
··· 19 19 cscope* 20 20 config.mak 21 21 config.mak.autogen 22 + *-bison.* 23 + *-flex.*
+9 -12
tools/perf/Makefile
··· 237 237 FLEX = $(CROSS_COMPILE)flex 238 238 BISON= $(CROSS_COMPILE)bison 239 239 240 - event-parser: 241 - $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c 240 + $(OUTPUT)util/parse-events-flex.c: util/parse-events.l 242 241 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c 243 242 244 - $(OUTPUT)util/parse-events-flex.c: event-parser 245 - $(OUTPUT)util/parse-events-bison.c: event-parser 243 + $(OUTPUT)util/parse-events-bison.c: util/parse-events.y 244 + $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c 246 245 247 - pmu-parser: 248 - $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c 246 + $(OUTPUT)util/pmu-flex.c: util/pmu.l 249 247 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c 250 248 251 - $(OUTPUT)util/pmu-flex.c: pmu-parser 252 - $(OUTPUT)util/pmu-bison.c: pmu-parser 249 + $(OUTPUT)util/pmu-bison.c: util/pmu.y 250 + $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c 253 251 254 - $(OUTPUT)util/parse-events.o: event-parser pmu-parser 252 + $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c 253 + $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c 255 254 256 255 LIB_FILE=$(OUTPUT)libperf.a 257 256 ··· 526 527 endif 527 528 528 529 ifdef NO_GTK2 529 - BASIC_CFLAGS += -DNO_GTK2 530 + BASIC_CFLAGS += -DNO_GTK2_SUPPORT 530 531 else 531 532 FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0) 532 533 ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y) ··· 851 852 @echo ' html - make html documentation' 852 853 @echo ' info - make GNU info documentation (access with info <foo>)' 853 854 @echo ' pdf - make pdf documentation' 854 - @echo ' event-parser - make event parser code' 855 - @echo ' pmu-parser - make pmu format parser code' 856 855 @echo ' TAGS - use etags to make tag information for source browsing' 857 856 @echo ' tags - use ctags to make tag information for source browsing' 858 857 @echo ' cscope - use cscope to make interactive browsing database'
+2 -1
tools/perf/perf-archive.sh
··· 29 29 fi 30 30 31 31 MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX) 32 + PERF_BUILDID_LINKDIR=$(readlink -f $PERF_BUILDID_DIR)/ 32 33 33 34 cut -d ' ' -f 1 $BUILDIDS | \ 34 35 while read build_id ; do 35 36 linkname=$PERF_BUILDID_DIR.build-id/${build_id:0:2}/${build_id:2} 36 37 filename=$(readlink -f $linkname) 37 38 echo ${linkname#$PERF_BUILDID_DIR} >> $MANIFEST 38 - echo ${filename#$PERF_BUILDID_DIR} >> $MANIFEST 39 + echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST 39 40 done 40 41 41 42 tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
+2 -2
tools/perf/util/session.c
··· 876 876 dump_sample(session, event, sample); 877 877 if (evsel == NULL) { 878 878 ++session->hists.stats.nr_unknown_id; 879 - return -1; 879 + return 0; 880 880 } 881 881 if (machine == NULL) { 882 882 ++session->hists.stats.nr_unprocessable_samples; 883 - return -1; 883 + return 0; 884 884 } 885 885 return tool->sample(tool, event, sample, evsel, machine); 886 886 case PERF_RECORD_MMAP:
+21 -9
virt/kvm/iommu.c
··· 240 240 return -ENODEV; 241 241 } 242 242 243 + mutex_lock(&kvm->slots_lock); 244 + 243 245 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); 244 - if (!kvm->arch.iommu_domain) 245 - return -ENOMEM; 246 + if (!kvm->arch.iommu_domain) { 247 + r = -ENOMEM; 248 + goto out_unlock; 249 + } 246 250 247 251 if (!allow_unsafe_assigned_interrupts && 248 252 !iommu_domain_has_cap(kvm->arch.iommu_domain, ··· 257 253 " module option.\n", __func__); 258 254 iommu_domain_free(kvm->arch.iommu_domain); 259 255 kvm->arch.iommu_domain = NULL; 260 - return -EPERM; 256 + r = -EPERM; 257 + goto out_unlock; 261 258 } 262 259 263 260 r = kvm_iommu_map_memslots(kvm); 264 261 if (r) 265 - goto out_unmap; 262 + kvm_iommu_unmap_memslots(kvm); 266 263 267 - return 0; 268 - 269 - out_unmap: 270 - kvm_iommu_unmap_memslots(kvm); 264 + out_unlock: 265 + mutex_unlock(&kvm->slots_lock); 271 266 return r; 272 267 } 273 268 ··· 313 310 } 314 311 } 315 312 313 + void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) 314 + { 315 + kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); 316 + } 317 + 316 318 static int kvm_iommu_unmap_memslots(struct kvm *kvm) 317 319 { 318 320 int idx; ··· 328 320 slots = kvm_memslots(kvm); 329 321 330 322 kvm_for_each_memslot(memslot, slots) 331 - kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages); 323 + kvm_iommu_unmap_pages(kvm, memslot); 332 324 333 325 srcu_read_unlock(&kvm->srcu, idx); 334 326 ··· 343 335 if (!domain) 344 336 return 0; 345 337 338 + mutex_lock(&kvm->slots_lock); 346 339 kvm_iommu_unmap_memslots(kvm); 340 + kvm->arch.iommu_domain = NULL; 341 + mutex_unlock(&kvm->slots_lock); 342 + 347 343 iommu_domain_free(domain); 348 344 return 0; 349 345 }
+3 -2
virt/kvm/kvm_main.c
··· 808 808 if (r) 809 809 goto out_free; 810 810 811 - /* map the pages in iommu page table */ 811 + /* map/unmap the pages in iommu page table */ 812 812 if (npages) { 813 813 r = kvm_iommu_map_pages(kvm, &new); 814 814 if (r) 815 815 goto out_free; 816 - } 816 + } else 817 + kvm_iommu_unmap_pages(kvm, &old); 817 818 818 819 r = -ENOMEM; 819 820 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),