Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

USB: Merge 2.6.37-rc5 into usb-next

This is to resolve the conflict in the file,
drivers/usb/gadget/composite.c that was due to a revert in Linus's tree
needed for the 2.6.37 release.

Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

+2087 -1569
-5
Documentation/kernel-parameters.txt
··· 2175 2175 reset_devices [KNL] Force drivers to reset the underlying device 2176 2176 during initialization. 2177 2177 2178 - resource_alloc_from_bottom 2179 - Allocate new resources from the beginning of available 2180 - space, not the end. If you need to use this, please 2181 - report a bug. 2182 - 2183 2178 resume= [SWSUSP] 2184 2179 Specify the partition device for software suspend 2185 2180
+2 -2
Documentation/power/runtime_pm.txt
··· 379 379 zero) 380 380 381 381 bool pm_runtime_suspended(struct device *dev); 382 - - return true if the device's runtime PM status is 'suspended', or false 383 - otherwise 382 + - return true if the device's runtime PM status is 'suspended' and its 383 + 'power.disable_depth' field is equal to zero, or false otherwise 384 384 385 385 void pm_runtime_allow(struct device *dev); 386 386 - set the power.runtime_auto flag for the device and decrease its usage
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 37 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Flesh-Eating Bats with Fangs 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/arm/mach-at91/Makefile
··· 65 65 obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o 66 66 obj-$(CONFIG_MACH_STAMP9G20) += board-stamp9g20.o 67 67 obj-$(CONFIG_MACH_PORTUXG20) += board-stamp9g20.o 68 - obj-$(CONFIG_MACH_PCONTROL_G20) += board-pcontrol-g20.o 68 + obj-$(CONFIG_MACH_PCONTROL_G20) += board-pcontrol-g20.o board-stamp9g20.o 69 69 70 70 # AT91SAM9260/AT91SAM9G20 board-specific support 71 71 obj-$(CONFIG_MACH_SNAPPER_9260) += board-snapper9260.o
+3 -95
arch/arm/mach-at91/board-pcontrol-g20.c
··· 31 31 32 32 #include <mach/board.h> 33 33 #include <mach/at91sam9_smc.h> 34 + #include <mach/stamp9g20.h> 34 35 35 36 #include "sam9_smc.h" 36 37 #include "generic.h" ··· 39 38 40 39 static void __init pcontrol_g20_map_io(void) 41 40 { 42 - /* Initialize processor: 18.432 MHz crystal */ 43 - at91sam9260_initialize(18432000); 44 - 45 - /* DGBU on ttyS0. (Rx, Tx) only TTL -> JTAG connector X7 17,19 ) */ 46 - at91_register_uart(0, 0, 0); 41 + stamp9g20_map_io(); 47 42 48 43 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */ 49 44 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS ··· 51 54 52 55 /* USART2 on ttyS3. (Rx, Tx) 9bit-Bus Multidrop-mode X4 */ 53 56 at91_register_uart(AT91SAM9260_ID_US4, 3, 0); 54 - 55 - /* set serial console to ttyS0 (ie, DBGU) */ 56 - at91_set_serial_console(0); 57 57 } 58 58 59 59 ··· 59 65 at91sam9260_init_interrupts(NULL); 60 66 } 61 67 62 - 63 - /* 64 - * NAND flash 512MiB 1,8V 8-bit, sector size 128 KiB 65 - */ 66 - static struct atmel_nand_data __initdata nand_data = { 67 - .ale = 21, 68 - .cle = 22, 69 - .rdy_pin = AT91_PIN_PC13, 70 - .enable_pin = AT91_PIN_PC14, 71 - }; 72 - 73 - /* 74 - * Bus timings; unit = 7.57ns 75 - */ 76 - static struct sam9_smc_config __initdata nand_smc_config = { 77 - .ncs_read_setup = 0, 78 - .nrd_setup = 2, 79 - .ncs_write_setup = 0, 80 - .nwe_setup = 2, 81 - 82 - .ncs_read_pulse = 4, 83 - .nrd_pulse = 4, 84 - .ncs_write_pulse = 4, 85 - .nwe_pulse = 4, 86 - 87 - .read_cycle = 7, 88 - .write_cycle = 7, 89 - 90 - .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE 91 - | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, 92 - .tdf_cycles = 3, 93 - }; 94 68 95 69 static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { { 96 70 .ncs_read_setup = 16, ··· 100 138 .tdf_cycles = 1, 101 139 } }; 102 140 103 - static void __init add_device_nand(void) 104 - { 105 - /* configure chip-select 3 (NAND) */ 106 - sam9_smc_configure(3, &nand_smc_config); 107 - at91_add_device_nand(&nand_data); 108 - } 109 - 110 - 111 141 static void __init add_device_pcontrol(void) 112 142 { 113 143 /* configure chip-select 4 (IO compatible to 8051 X4 ) */ ··· 107 153 /* configure chip-select 7 (FerroRAM 256KiBx16bit MR2A16A D4 ) */ 108 154 sam9_smc_configure(7, &pcontrol_smc_config[1]); 109 155 } 110 - 111 - 112 - /* 113 - * MCI (SD/MMC) 114 - * det_pin, wp_pin and vcc_pin are not connected 115 - */ 116 - #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) 117 - static struct mci_platform_data __initdata mmc_data = { 118 - .slot[0] = { 119 - .bus_width = 4, 120 - }, 121 - }; 122 - #else 123 - static struct at91_mmc_data __initdata mmc_data = { 124 - .wire4 = 1, 125 - }; 126 - #endif 127 156 128 157 129 158 /* ··· 202 265 }; 203 266 204 267 205 - /* 206 - * Dallas 1-Wire DS2431 207 - */ 208 - static struct w1_gpio_platform_data w1_gpio_pdata = { 209 - .pin = AT91_PIN_PA29, 210 - .is_open_drain = 1, 211 - }; 212 - 213 - static struct platform_device w1_device = { 214 - .name = "w1-gpio", 215 - .id = -1, 216 - .dev.platform_data = &w1_gpio_pdata, 217 - }; 218 - 219 - static void add_wire1(void) 220 - { 221 - at91_set_GPIO_periph(w1_gpio_pdata.pin, 1); 222 - at91_set_multi_drive(w1_gpio_pdata.pin, 1); 223 - platform_device_register(&w1_device); 224 - } 225 - 226 - 227 268 static void __init pcontrol_g20_board_init(void) 228 269 { 229 - at91_add_device_serial(); 230 - add_device_nand(); 231 - #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) 232 - at91_add_device_mci(0, &mmc_data); 233 - #else 234 - at91_add_device_mmc(0, &mmc_data); 235 - #endif 270 + stamp9g20_board_init(); 236 271 at91_add_device_usbh(&usbh_data); 237 272 at91_add_device_eth(&macb_data); 238 273 at91_add_device_i2c(pcontrol_g20_i2c_devices, 239 274 ARRAY_SIZE(pcontrol_g20_i2c_devices)); 240 - add_wire1(); 241 275 add_device_pcontrol(); 242 276 at91_add_device_spi(pcontrol_g20_spi_devices, 243 277 ARRAY_SIZE(pcontrol_g20_spi_devices));
+43 -39
arch/arm/mach-at91/board-stamp9g20.c
··· 32 32 #include "generic.h" 33 33 34 34 35 - static void __init portuxg20_map_io(void) 35 + void __init stamp9g20_map_io(void) 36 36 { 37 37 /* Initialize processor: 18.432 MHz crystal */ 38 38 at91sam9260_initialize(18432000); 39 39 40 40 /* DGBU on ttyS0. (Rx & Tx only) */ 41 41 at91_register_uart(0, 0, 0); 42 + 43 + /* set serial console to ttyS0 (ie, DBGU) */ 44 + at91_set_serial_console(0); 45 + } 46 + 47 + static void __init stamp9g20evb_map_io(void) 48 + { 49 + stamp9g20_map_io(); 50 + 51 + /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ 52 + at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS 53 + | ATMEL_UART_DTR | ATMEL_UART_DSR 54 + | ATMEL_UART_DCD | ATMEL_UART_RI); 55 + } 56 + 57 + static void __init portuxg20_map_io(void) 58 + { 59 + stamp9g20_map_io(); 42 60 43 61 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ 44 62 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS ··· 74 56 75 57 /* USART5 on ttyS6. (Rx, Tx only) */ 76 58 at91_register_uart(AT91SAM9260_ID_US5, 6, 0); 77 - 78 - /* set serial console to ttyS0 (ie, DBGU) */ 79 - at91_set_serial_console(0); 80 - } 81 - 82 - static void __init stamp9g20_map_io(void) 83 - { 84 - /* Initialize processor: 18.432 MHz crystal */ 85 - at91sam9260_initialize(18432000); 86 - 87 - /* DGBU on ttyS0. (Rx & Tx only) */ 88 - at91_register_uart(0, 0, 0); 89 - 90 - /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ 91 - at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS 92 - | ATMEL_UART_DTR | ATMEL_UART_DSR 93 - | ATMEL_UART_DCD | ATMEL_UART_RI); 94 - 95 - /* set serial console to ttyS0 (ie, DBGU) */ 96 - at91_set_serial_console(0); 97 59 } 98 60 99 61 static void __init init_irq(void) ··· 154 156 .pullup_pin = 0, /* pull-up driven by UDC */ 155 157 }; 156 158 157 - static struct at91_udc_data __initdata stamp9g20_udc_data = { 159 + static struct at91_udc_data __initdata stamp9g20evb_udc_data = { 158 160 .vbus_pin = AT91_PIN_PA22, 159 161 .pullup_pin = 0, /* pull-up driven by UDC */ 160 162 }; ··· 188 190 } 189 191 }; 190 192 191 - static struct gpio_led stamp9g20_leds[] = { 193 + static struct gpio_led stamp9g20evb_leds[] = { 192 194 { 193 195 .name = "D8", 194 196 .gpio = AT91_PIN_PB18, ··· 248 250 } 249 251 250 252 251 - static void __init generic_board_init(void) 253 + void __init stamp9g20_board_init(void) 252 254 { 253 255 /* Serial */ 254 256 at91_add_device_serial(); ··· 260 262 #else 261 263 at91_add_device_mmc(0, &mmc_data); 262 264 #endif 263 - /* USB Host */ 264 - at91_add_device_usbh(&usbh_data); 265 - /* Ethernet */ 266 - at91_add_device_eth(&macb_data); 267 - /* I2C */ 268 - at91_add_device_i2c(NULL, 0); 269 265 /* W1 */ 270 266 add_w1(); 271 267 } 272 268 273 269 static void __init portuxg20_board_init(void) 274 270 { 275 - generic_board_init(); 276 - /* SPI */ 277 - at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices)); 271 + stamp9g20_board_init(); 272 + /* USB Host */ 273 + at91_add_device_usbh(&usbh_data); 278 274 /* USB Device */ 279 275 at91_add_device_udc(&portuxg20_udc_data); 276 + /* Ethernet */ 277 + at91_add_device_eth(&macb_data); 278 + /* I2C */ 279 + at91_add_device_i2c(NULL, 0); 280 + /* SPI */ 281 + at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices)); 280 282 /* LEDs */ 281 283 at91_gpio_leds(portuxg20_leds, ARRAY_SIZE(portuxg20_leds)); 282 284 } 283 285 284 - static void __init stamp9g20_board_init(void) 286 + static void __init stamp9g20evb_board_init(void) 285 287 { 286 - generic_board_init(); 288 + stamp9g20_board_init(); 289 + /* USB Host */ 290 + at91_add_device_usbh(&usbh_data); 287 291 /* USB Device */ 288 - at91_add_device_udc(&stamp9g20_udc_data); 292 + at91_add_device_udc(&stamp9g20evb_udc_data); 293 + /* Ethernet */ 294 + at91_add_device_eth(&macb_data); 295 + /* I2C */ 296 + at91_add_device_i2c(NULL, 0); 289 297 /* LEDs */ 290 - at91_gpio_leds(stamp9g20_leds, ARRAY_SIZE(stamp9g20_leds)); 298 + at91_gpio_leds(stamp9g20evb_leds, ARRAY_SIZE(stamp9g20evb_leds)); 291 299 } 292 300 293 301 MACHINE_START(PORTUXG20, "taskit PortuxG20") ··· 309 305 /* Maintainer: taskit GmbH */ 310 306 .boot_params = AT91_SDRAM_BASE + 0x100, 311 307 .timer = &at91sam926x_timer, 312 - .map_io = stamp9g20_map_io, 308 + .map_io = stamp9g20evb_map_io, 313 309 .init_irq = init_irq, 314 - .init_machine = stamp9g20_board_init, 310 + .init_machine = stamp9g20evb_board_init, 315 311 MACHINE_END
+1 -1
arch/arm/mach-at91/clock.c
··· 658 658 /* Now set uhpck values */ 659 659 uhpck.parent = &utmi_clk; 660 660 uhpck.pmc_mask = AT91SAM926x_PMC_UHP; 661 - uhpck.rate_hz = utmi_clk.parent->rate_hz; 661 + uhpck.rate_hz = utmi_clk.rate_hz; 662 662 uhpck.rate_hz /= 1 + ((at91_sys_read(AT91_PMC_USB) & AT91_PMC_OHCIUSBDIV) >> 8); 663 663 } 664 664
+7
arch/arm/mach-at91/include/mach/stamp9g20.h
··· 1 + #ifndef __MACH_STAMP9G20_H 2 + #define __MACH_STAMP9G20_H 3 + 4 + void stamp9g20_map_io(void); 5 + void stamp9g20_board_init(void); 6 + 7 + #endif
+7
arch/arm/mach-s3c2412/Kconfig
··· 28 28 29 29 config S3C2412_PM 30 30 bool 31 + select S3C2412_PM_SLEEP 31 32 help 32 33 Internal config node to apply S3C2412 power management 34 + 35 + config S3C2412_PM_SLEEP 36 + bool 37 + help 38 + Internal config node to apply sleep for S3C2412 power management. 39 + Can be selected by another SoCs with similar sleep procedure. 33 40 34 41 # Note, the S3C2412 IOtiming support is in plat-s3c24xx 35 42
+2 -1
arch/arm/mach-s3c2412/Makefile
··· 14 14 obj-$(CONFIG_CPU_S3C2412) += clock.o 15 15 obj-$(CONFIG_CPU_S3C2412) += gpio.o 16 16 obj-$(CONFIG_S3C2412_DMA) += dma.o 17 - obj-$(CONFIG_S3C2412_PM) += pm.o sleep.o 17 + obj-$(CONFIG_S3C2412_PM) += pm.o 18 + obj-$(CONFIG_S3C2412_PM_SLEEP) += sleep.o 18 19 obj-$(CONFIG_S3C2412_CPUFREQ) += cpu-freq.o 19 20 20 21 # Machine support
+1
arch/arm/mach-s3c2416/Kconfig
··· 27 27 28 28 config S3C2416_PM 29 29 bool 30 + select S3C2412_PM_SLEEP 30 31 help 31 32 Internal config node to apply S3C2416 power management 32 33
+6
arch/arm/mach-s5pv210/mach-aquila.c
··· 378 378 static struct max8998_platform_data aquila_max8998_pdata = { 379 379 .num_regulators = ARRAY_SIZE(aquila_regulators), 380 380 .regulators = aquila_regulators, 381 + .buck1_set1 = S5PV210_GPH0(3), 382 + .buck1_set2 = S5PV210_GPH0(4), 383 + .buck2_set3 = S5PV210_GPH0(5), 384 + .buck1_max_voltage1 = 1200000, 385 + .buck1_max_voltage2 = 1200000, 386 + .buck2_max_voltage = 1200000, 381 387 }; 382 388 #endif 383 389
+6
arch/arm/mach-s5pv210/mach-goni.c
··· 518 518 static struct max8998_platform_data goni_max8998_pdata = { 519 519 .num_regulators = ARRAY_SIZE(goni_regulators), 520 520 .regulators = goni_regulators, 521 + .buck1_set1 = S5PV210_GPH0(3), 522 + .buck1_set2 = S5PV210_GPH0(4), 523 + .buck2_set3 = S5PV210_GPH0(5), 524 + .buck1_max_voltage1 = 1200000, 525 + .buck1_max_voltage2 = 1200000, 526 + .buck2_max_voltage = 1200000, 521 527 }; 522 528 #endif 523 529
+26 -4
arch/arm/mach-shmobile/include/mach/entry-macro.S
··· 1 1 /* 2 + * Copyright (C) 2010 Magnus Damm 2 3 * Copyright (C) 2008 Renesas Solutions Corp. 3 4 * 4 5 * This program is free software; you can redistribute it and/or modify ··· 15 14 * along with this program; if not, write to the Free Software 16 15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 16 */ 18 - #include <mach/hardware.h> 19 17 #include <mach/irqs.h> 18 + 19 + #define INTCA_BASE 0xe6980000 20 + #define INTFLGA_OFFS 0x00000018 /* accept pending interrupt */ 21 + #define INTEVTA_OFFS 0x00000020 /* vector number of accepted interrupt */ 22 + #define INTLVLA_OFFS 0x00000030 /* priority level of accepted interrupt */ 23 + #define INTLVLB_OFFS 0x00000034 /* previous priority level */ 20 24 21 25 .macro disable_fiq 22 26 .endm 23 27 24 28 .macro get_irqnr_preamble, base, tmp 25 - ldr \base, =INTFLGA 29 + ldr \base, =INTCA_BASE 26 30 .endm 27 31 28 32 .macro arch_ret_to_user, tmp1, tmp2 29 33 .endm 30 34 31 35 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp 32 - ldr \irqnr, [\base] 36 + /* The single INTFLGA read access below results in the following: 37 + * 38 + * 1. INTLVLB is updated with old priority value from INTLVLA 39 + * 2. Highest priority interrupt is accepted 40 + * 3. INTLVLA is updated to contain priority of accepted interrupt 41 + * 4. Accepted interrupt vector is stored in INTFLGA and INTEVTA 42 + */ 43 + ldr \irqnr, [\base, #INTFLGA_OFFS] 44 + 45 + /* Restore INTLVLA with the value saved in INTLVLB. 46 + * This is required to support interrupt priorities properly. 47 + */ 48 + ldrb \tmp, [\base, #INTLVLB_OFFS] 49 + strb \tmp, [\base, #INTLVLA_OFFS] 50 + 51 + /* Handle invalid vector number case */ 33 52 cmp \irqnr, #0 34 53 beq 1000f 35 - /* intevt to irq number */ 54 + 55 + /* Convert vector to irq number, same as the evt2irq() macro */ 36 56 lsr \irqnr, \irqnr, #0x5 37 57 subs \irqnr, \irqnr, #16 38 58
+1 -1
arch/arm/mach-shmobile/include/mach/vmalloc.h
··· 2 2 #define __ASM_MACH_VMALLOC_H 3 3 4 4 /* Vmalloc at ... - 0xe5ffffff */ 5 - #define VMALLOC_END 0xe6000000 5 + #define VMALLOC_END 0xe6000000UL 6 6 7 7 #endif /* __ASM_MACH_VMALLOC_H */
+1 -1
arch/arm/plat-s3c24xx/Kconfig
··· 8 8 default y 9 9 select NO_IOPORT 10 10 select ARCH_REQUIRE_GPIOLIB 11 - select S3C_DEVICE_NAND 11 + select S3C_DEV_NAND 12 12 select S3C_GPIO_CFG_S3C24XX 13 13 help 14 14 Base platform code for any Samsung S3C24XX device
+24 -14
arch/mips/Kconfig
··· 19 19 select GENERIC_ATOMIC64 if !64BIT 20 20 select HAVE_DMA_ATTRS 21 21 select HAVE_DMA_API_DEBUG 22 + select HAVE_GENERIC_HARDIRQS 23 + select GENERIC_IRQ_PROBE 22 24 23 25 menu "Machine selection" 24 26 ··· 1666 1664 1667 1665 endchoice 1668 1666 1667 + config FORCE_MAX_ZONEORDER 1668 + int "Maximum zone order" 1669 + range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB 1670 + default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB 1671 + range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB 1672 + default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB 1673 + range 11 64 1674 + default "11" 1675 + help 1676 + The kernel memory allocator divides physically contiguous memory 1677 + blocks into "zones", where each zone is a power of two number of 1678 + pages. This option selects the largest power of two that the kernel 1679 + keeps in the memory allocator. If you need to allocate very large 1680 + blocks of physically contiguous memory, then you may need to 1681 + increase this value. 1682 + 1683 + This config option is actually maximum order plus one. For example, 1684 + a value of 11 means that the largest free memory block is 2^10 pages. 1685 + 1686 + The page size is not necessarily 4KB. Keep this in mind 1687 + when choosing a value for this option. 1688 + 1669 1689 config BOARD_SCACHE 1670 1690 bool 1671 1691 ··· 1943 1919 select CPU_R4400_WORKAROUNDS 1944 1920 1945 1921 config CPU_R4400_WORKAROUNDS 1946 - bool 1947 - 1948 - # 1949 - # Use the generic interrupt handling code in kernel/irq/: 1950 - # 1951 - config GENERIC_HARDIRQS 1952 - bool 1953 - default y 1954 - 1955 - config GENERIC_IRQ_PROBE 1956 - bool 1957 - default y 1958 - 1959 - config IRQ_PER_CPU 1960 1922 bool 1961 1923 1962 1924 #
+2
arch/mips/alchemy/common/platform.c
··· 27 27 static void alchemy_8250_pm(struct uart_port *port, unsigned int state, 28 28 unsigned int old_state) 29 29 { 30 + #ifdef CONFIG_SERIAL_8250 30 31 switch (state) { 31 32 case 0: 32 33 if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) { ··· 50 49 serial8250_do_pm(port, state, old_state); 51 50 break; 52 51 } 52 + #endif 53 53 } 54 54 55 55 #define PORT(_base, _irq) \
+2 -3
arch/mips/alchemy/devboards/prom.c
··· 54 54 55 55 prom_init_cmdline(); 56 56 memsize_str = prom_getenv("memsize"); 57 - if (!memsize_str) 57 + if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) 58 58 memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE; 59 - else 60 - strict_strtoul(memsize_str, 0, &memsize); 59 + 61 60 add_memory_region(0, memsize, BOOT_MEM_RAM); 62 61 } 63 62
+3 -6
arch/mips/ar7/clock.c
··· 239 239 calculate(base_clock, frequency, &prediv, &postdiv, &mul); 240 240 241 241 writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl); 242 - msleep(1); 242 + mdelay(1); 243 243 writel(4, &clock->pll); 244 244 while (readl(&clock->pll) & PLL_STATUS) 245 245 ; 246 246 writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll); 247 - msleep(75); 247 + mdelay(75); 248 248 } 249 249 250 250 static void __init tnetd7300_init_clocks(void) ··· 456 456 } 457 457 EXPORT_SYMBOL(clk_put); 458 458 459 - int __init ar7_init_clocks(void) 459 + void __init ar7_init_clocks(void) 460 460 { 461 461 switch (ar7_chip_id()) { 462 462 case AR7_CHIP_7100: ··· 472 472 } 473 473 /* adjust vbus clock rate */ 474 474 vbus_clk.rate = bus_clk.rate / 2; 475 - 476 - return 0; 477 475 } 478 - arch_initcall(ar7_init_clocks);
+3
arch/mips/ar7/time.c
··· 30 30 { 31 31 struct clk *cpu_clk; 32 32 33 + /* Initialize ar7 clocks so the CPU clock frequency is correct */ 34 + ar7_init_clocks(); 35 + 33 36 cpu_clk = clk_get(NULL, "cpu"); 34 37 if (IS_ERR(cpu_clk)) { 35 38 printk(KERN_ERR "unable to get cpu clock\n");
+105 -46
arch/mips/bcm47xx/setup.c
··· 32 32 #include <asm/reboot.h> 33 33 #include <asm/time.h> 34 34 #include <bcm47xx.h> 35 - #include <asm/fw/cfe/cfe_api.h> 36 35 #include <asm/mach-bcm47xx/nvram.h> 37 36 38 37 struct ssb_bus ssb_bcm47xx; ··· 56 57 cpu_relax(); 57 58 } 58 59 59 - static void str2eaddr(char *str, char *dest) 60 + #define READ_FROM_NVRAM(_outvar, name, buf) \ 61 + if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\ 62 + sprom->_outvar = simple_strtoul(buf, NULL, 0); 63 + 64 + static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) 60 65 { 61 - int i = 0; 66 + char buf[100]; 67 + u32 boardflags; 62 68 63 - if (str == NULL) { 64 - memset(dest, 0, 6); 65 - return; 69 + memset(sprom, 0, sizeof(struct ssb_sprom)); 70 + 71 + sprom->revision = 1; /* Fallback: Old hardware does not define this. */ 72 + READ_FROM_NVRAM(revision, "sromrev", buf); 73 + if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0) 74 + nvram_parse_macaddr(buf, sprom->il0mac); 75 + if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) 76 + nvram_parse_macaddr(buf, sprom->et0mac); 77 + if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) 78 + nvram_parse_macaddr(buf, sprom->et1mac); 79 + READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); 80 + READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); 81 + READ_FROM_NVRAM(et0mdcport, "et0mdcport", buf); 82 + READ_FROM_NVRAM(et1mdcport, "et1mdcport", buf); 83 + READ_FROM_NVRAM(board_rev, "boardrev", buf); 84 + READ_FROM_NVRAM(country_code, "ccode", buf); 85 + READ_FROM_NVRAM(ant_available_a, "aa5g", buf); 86 + READ_FROM_NVRAM(ant_available_bg, "aa2g", buf); 87 + READ_FROM_NVRAM(pa0b0, "pa0b0", buf); 88 + READ_FROM_NVRAM(pa0b1, "pa0b1", buf); 89 + READ_FROM_NVRAM(pa0b2, "pa0b2", buf); 90 + READ_FROM_NVRAM(pa1b0, "pa1b0", buf); 91 + READ_FROM_NVRAM(pa1b1, "pa1b1", buf); 92 + READ_FROM_NVRAM(pa1b2, "pa1b2", buf); 93 + READ_FROM_NVRAM(pa1lob0, "pa1lob0", buf); 94 + READ_FROM_NVRAM(pa1lob2, "pa1lob1", buf); 95 + READ_FROM_NVRAM(pa1lob1, "pa1lob2", buf); 96 + READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); 97 + READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); 98 + READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); 99 + READ_FROM_NVRAM(gpio0, "wl0gpio0", buf); 100 + READ_FROM_NVRAM(gpio1, "wl0gpio1", buf); 101 + READ_FROM_NVRAM(gpio2, "wl0gpio2", buf); 102 + READ_FROM_NVRAM(gpio3, "wl0gpio3", buf); 103 + READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf); 104 + READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf); 105 + READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf); 106 + READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf); 107 + READ_FROM_NVRAM(itssi_a, "pa1itssit", buf); 108 + READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf); 109 + READ_FROM_NVRAM(tri2g, "tri2g", buf); 110 + READ_FROM_NVRAM(tri5gl, "tri5gl", buf); 111 + READ_FROM_NVRAM(tri5g, "tri5g", buf); 112 + READ_FROM_NVRAM(tri5gh, "tri5gh", buf); 113 + READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); 114 + READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); 115 + READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); 116 + READ_FROM_NVRAM(rssismc2g, "rssismc2g", buf); 117 + READ_FROM_NVRAM(rssismf2g, "rssismf2g", buf); 118 + READ_FROM_NVRAM(bxa2g, "bxa2g", buf); 119 + READ_FROM_NVRAM(rssisav5g, "rssisav5g", buf); 120 + READ_FROM_NVRAM(rssismc5g, "rssismc5g", buf); 121 + READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); 122 + READ_FROM_NVRAM(bxa5g, "bxa5g", buf); 123 + READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); 124 + READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf); 125 + READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf); 126 + READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf); 127 + READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf); 128 + 129 + if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) { 130 + boardflags = simple_strtoul(buf, NULL, 0); 131 + if (boardflags) { 132 + sprom->boardflags_lo = (boardflags & 0x0000FFFFU); 133 + sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; 134 + } 66 135 } 67 - 68 - for (;;) { 69 - dest[i++] = (char) simple_strtoul(str, NULL, 16); 70 - str += 2; 71 - if (!*str++ || i == 6) 72 - break; 136 + if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) { 137 + boardflags = simple_strtoul(buf, NULL, 0); 138 + if (boardflags) { 139 + sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); 140 + sprom->boardflags2_hi = (boardflags & 0xFFFF0000U) >> 16; 141 + } 73 142 } 74 143 } 75 144 76 145 static int bcm47xx_get_invariants(struct ssb_bus *bus, 77 146 struct ssb_init_invariants *iv) 78 147 { 79 - char buf[100]; 148 + char buf[20]; 80 149 81 150 /* Fill boardinfo structure */ 82 151 memset(&(iv->boardinfo), 0 , sizeof(struct ssb_boardinfo)); 83 152 84 - if (cfe_getenv("boardvendor", buf, sizeof(buf)) >= 0 || 85 - nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0) 153 + if (nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0) 154 + iv->boardinfo.vendor = (u16)simple_strtoul(buf, NULL, 0); 155 + else 156 + iv->boardinfo.vendor = SSB_BOARDVENDOR_BCM; 157 + if (nvram_getenv("boardtype", buf, sizeof(buf)) >= 0) 86 158 iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0); 87 - if (cfe_getenv("boardtype", buf, sizeof(buf)) >= 0 || 88 - nvram_getenv("boardtype", buf, sizeof(buf)) >= 0) 89 - iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0); 90 - if (cfe_getenv("boardrev", buf, sizeof(buf)) >= 0 || 91 - nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) 159 + if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) 92 160 iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); 93 161 94 - /* Fill sprom structure */ 95 - memset(&(iv->sprom), 0, sizeof(struct ssb_sprom)); 96 - iv->sprom.revision = 3; 162 + bcm47xx_fill_sprom(&iv->sprom); 97 163 98 - if (cfe_getenv("et0macaddr", buf, sizeof(buf)) >= 0 || 99 - nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) 100 - str2eaddr(buf, iv->sprom.et0mac); 101 - 102 - if (cfe_getenv("et1macaddr", buf, sizeof(buf)) >= 0 || 103 - nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) 104 - str2eaddr(buf, iv->sprom.et1mac); 105 - 106 - if (cfe_getenv("et0phyaddr", buf, sizeof(buf)) >= 0 || 107 - nvram_getenv("et0phyaddr", buf, sizeof(buf)) >= 0) 108 - iv->sprom.et0phyaddr = simple_strtoul(buf, NULL, 0); 109 - 110 - if (cfe_getenv("et1phyaddr", buf, sizeof(buf)) >= 0 || 111 - nvram_getenv("et1phyaddr", buf, sizeof(buf)) >= 0) 112 - iv->sprom.et1phyaddr = simple_strtoul(buf, NULL, 0); 113 - 114 - if (cfe_getenv("et0mdcport", buf, sizeof(buf)) >= 0 || 115 - nvram_getenv("et0mdcport", buf, sizeof(buf)) >= 0) 116 - iv->sprom.et0mdcport = simple_strtoul(buf, NULL, 10); 117 - 118 - if (cfe_getenv("et1mdcport", buf, sizeof(buf)) >= 0 || 119 - nvram_getenv("et1mdcport", buf, sizeof(buf)) >= 0) 120 - iv->sprom.et1mdcport = simple_strtoul(buf, NULL, 10); 164 + if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) 165 + iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); 121 166 122 167 return 0; 123 168 } ··· 169 126 void __init plat_mem_setup(void) 170 127 { 171 128 int err; 129 + char buf[100]; 130 + struct ssb_mipscore *mcore; 172 131 173 132 err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, 174 133 bcm47xx_get_invariants); 175 134 if (err) 176 135 panic("Failed to initialize SSB bus (err %d)\n", err); 136 + 137 + mcore = &ssb_bcm47xx.mipscore; 138 + if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) { 139 + if (strstr(buf, "console=ttyS1")) { 140 + struct ssb_serial_port port; 141 + 142 + printk(KERN_DEBUG "Swapping serial ports!\n"); 143 + /* swap serial ports */ 144 + memcpy(&port, &mcore->serial_ports[0], sizeof(port)); 145 + memcpy(&mcore->serial_ports[0], &mcore->serial_ports[1], 146 + sizeof(port)); 147 + memcpy(&mcore->serial_ports[1], &port, sizeof(port)); 148 + } 149 + } 177 150 178 151 _machine_restart = bcm47xx_machine_restart; 179 152 _machine_halt = bcm47xx_machine_halt;
+2 -2
arch/mips/include/asm/cpu.h
··· 111 111 * These are the PRID's for when 23:16 == PRID_COMP_BROADCOM 112 112 */ 113 113 114 - #define PRID_IMP_BMIPS4KC 0x4000 115 - #define PRID_IMP_BMIPS32 0x8000 114 + #define PRID_IMP_BMIPS32_REV4 0x4000 115 + #define PRID_IMP_BMIPS32_REV8 0x8000 116 116 #define PRID_IMP_BMIPS3300 0x9000 117 117 #define PRID_IMP_BMIPS3300_ALT 0x9100 118 118 #define PRID_IMP_BMIPS3300_BUG 0x0000
+6 -2
arch/mips/include/asm/elf.h
··· 249 249 250 250 #define SET_PERSONALITY(ex) \ 251 251 do { \ 252 - set_personality(PER_LINUX); \ 252 + if (personality(current->personality) != PER_LINUX) \ 253 + set_personality(PER_LINUX); \ 253 254 \ 254 255 current->thread.abi = &mips_abi; \ 255 256 } while (0) ··· 297 296 298 297 #define SET_PERSONALITY(ex) \ 299 298 do { \ 299 + unsigned int p; \ 300 + \ 300 301 clear_thread_flag(TIF_32BIT_REGS); \ 301 302 clear_thread_flag(TIF_32BIT_ADDR); \ 302 303 \ ··· 307 304 else \ 308 305 current->thread.abi = &mips_abi; \ 309 306 \ 310 - if (current->personality != PER_LINUX32) \ 307 + p = personality(current->personality); \ 308 + if (p != PER_LINUX32 && p != PER_LINUX) \ 311 309 set_personality(PER_LINUX); \ 312 310 } while (0) 313 311
+10 -2
arch/mips/include/asm/io.h
··· 329 329 "dsrl32 %L0, %L0, 0" "\n\t" \ 330 330 "dsll32 %M0, %M0, 0" "\n\t" \ 331 331 "or %L0, %L0, %M0" "\n\t" \ 332 + ".set push" "\n\t" \ 333 + ".set noreorder" "\n\t" \ 334 + ".set nomacro" "\n\t" \ 332 335 "sd %L0, %2" "\n\t" \ 336 + ".set pop" "\n\t" \ 333 337 ".set mips0" "\n" \ 334 338 : "=r" (__tmp) \ 335 - : "0" (__val), "m" (*__mem)); \ 339 + : "0" (__val), "R" (*__mem)); \ 336 340 if (irq) \ 337 341 local_irq_restore(__flags); \ 338 342 } else \ ··· 359 355 local_irq_save(__flags); \ 360 356 __asm__ __volatile__( \ 361 357 ".set mips3" "\t\t# __readq" "\n\t" \ 358 + ".set push" "\n\t" \ 359 + ".set noreorder" "\n\t" \ 360 + ".set nomacro" "\n\t" \ 362 361 "ld %L0, %1" "\n\t" \ 362 + ".set pop" "\n\t" \ 363 363 "dsra32 %M0, %L0, 0" "\n\t" \ 364 364 "sll %L0, %L0, 0" "\n\t" \ 365 365 ".set mips0" "\n" \ 366 366 : "=r" (__val) \ 367 - : "m" (*__mem)); \ 367 + : "R" (*__mem)); \ 368 368 if (irq) \ 369 369 local_irq_restore(__flags); \ 370 370 } else { \
+1 -2
arch/mips/include/asm/mach-ar7/ar7.h
··· 201 201 } 202 202 203 203 int __init ar7_gpio_init(void); 204 - 205 - int __init ar7_gpio_init(void); 204 + void __init ar7_init_clocks(void); 206 205 207 206 #endif /* __AR7_H__ */
+7
arch/mips/include/asm/mach-bcm47xx/nvram.h
··· 12 12 #define __NVRAM_H 13 13 14 14 #include <linux/types.h> 15 + #include <linux/kernel.h> 15 16 16 17 struct nvram_header { 17 18 u32 magic; ··· 36 35 #define NVRAM_ERR_ENVNOTFOUND -9 37 36 38 37 extern int nvram_getenv(char *name, char *val, size_t val_len); 38 + 39 + static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) 40 + { 41 + sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1], 42 + &macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]); 43 + } 39 44 40 45 #endif
+2 -2
arch/mips/jz4740/board-qi_lb60.c
··· 5 5 * 6 6 * Copyright (c) 2009 Qi Hardware inc., 7 7 * Author: Xiangfu Liu <xiangfu@qi-hardware.com> 8 - * Copyright 2010, Lars-Petrer Clausen <lars@metafoo.de> 8 + * Copyright 2010, Lars-Peter Clausen <lars@metafoo.de> 9 9 * 10 10 * This program is free software; you can redistribute it and/or modify 11 11 * it under the terms of the GNU General Public License version 2 or later ··· 235 235 QI_LB60_GPIO_KEYIN(3), 236 236 QI_LB60_GPIO_KEYIN(4), 237 237 QI_LB60_GPIO_KEYIN(5), 238 - QI_LB60_GPIO_KEYIN(7), 238 + QI_LB60_GPIO_KEYIN(6), 239 239 QI_LB60_GPIO_KEYIN8, 240 240 }; 241 241
+1 -1
arch/mips/jz4740/platform.c
··· 208 208 209 209 /* PCM */ 210 210 struct platform_device jz4740_pcm_device = { 211 - .name = "jz4740-pcm", 211 + .name = "jz4740-pcm-audio", 212 212 .id = -1, 213 213 }; 214 214
+1 -1
arch/mips/jz4740/prom.c
··· 23 23 #include <asm/bootinfo.h> 24 24 #include <asm/mach-jz4740/base.h> 25 25 26 - void jz4740_init_cmdline(int argc, char *argv[]) 26 + static __init void jz4740_init_cmdline(int argc, char *argv[]) 27 27 { 28 28 unsigned int count = COMMAND_LINE_SIZE - 1; 29 29 int i;
+1 -1
arch/mips/kernel/cevt-r4k.c
··· 32 32 cnt = read_c0_count(); 33 33 cnt += delta; 34 34 write_c0_compare(cnt); 35 - res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; 35 + res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; 36 36 return res; 37 37 } 38 38
+2 -5
arch/mips/kernel/cpu-probe.c
··· 905 905 { 906 906 decode_configs(c); 907 907 switch (c->processor_id & 0xff00) { 908 - case PRID_IMP_BMIPS32: 908 + case PRID_IMP_BMIPS32_REV4: 909 + case PRID_IMP_BMIPS32_REV8: 909 910 c->cputype = CPU_BMIPS32; 910 911 __cpu_name[cpu] = "Broadcom BMIPS32"; 911 912 break; ··· 933 932 c->cputype = CPU_BMIPS5000; 934 933 __cpu_name[cpu] = "Broadcom BMIPS5000"; 935 934 c->options |= MIPS_CPU_ULRI; 936 - break; 937 - case PRID_IMP_BMIPS4KC: 938 - c->cputype = CPU_4KC; 939 - __cpu_name[cpu] = "MIPS 4Kc"; 940 935 break; 941 936 } 942 937 }
+7 -6
arch/mips/kernel/linux32.c
··· 251 251 252 252 SYSCALL_DEFINE1(32_personality, unsigned long, personality) 253 253 { 254 + unsigned int p = personality & 0xffffffff; 254 255 int ret; 255 - personality &= 0xffffffff; 256 + 256 257 if (personality(current->personality) == PER_LINUX32 && 257 - personality == PER_LINUX) 258 - personality = PER_LINUX32; 259 - ret = sys_personality(personality); 260 - if (ret == PER_LINUX32) 261 - ret = PER_LINUX; 258 + personality(p) == PER_LINUX) 259 + p = (p & ~PER_MASK) | PER_LINUX32; 260 + ret = sys_personality(p); 261 + if (ret != -1 && personality(ret) == PER_LINUX32) 262 + ret = (ret & ~PER_MASK) | PER_LINUX; 262 263 return ret; 263 264 } 264 265
-1
arch/mips/kernel/process.c
··· 142 142 childregs->regs[7] = 0; /* Clear error flag */ 143 143 144 144 childregs->regs[2] = 0; /* Child gets zero as return value */ 145 - regs->regs[2] = p->pid; 146 145 147 146 if (childregs->cp0_status & ST0_CU0) { 148 147 childregs->regs[28] = (unsigned long) ti;
+1 -1
arch/mips/kernel/prom.c
··· 100 100 return; 101 101 102 102 base = virt_to_phys((void *)initial_boot_params); 103 - size = initial_boot_params->totalsize; 103 + size = be32_to_cpu(initial_boot_params->totalsize); 104 104 105 105 /* Before we do anything, lets reserve the dt blob */ 106 106 reserve_mem_mach(base, size);
+1 -1
arch/mips/kernel/smp-mt.c
··· 153 153 { 154 154 extern int gic_present; 155 155 156 - /* This is Malta specific: IPI,performance and timer inetrrupts */ 156 + /* This is Malta specific: IPI,performance and timer interrupts */ 157 157 if (gic_present) 158 158 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 159 159 STATUSF_IP6 | STATUSF_IP7);
+35 -9
arch/mips/kernel/traps.c
··· 83 83 extern asmlinkage void handle_reserved(void); 84 84 85 85 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 86 - struct mips_fpu_struct *ctx, int has_fpu); 86 + struct mips_fpu_struct *ctx, int has_fpu, 87 + void *__user *fault_addr); 87 88 88 89 void (*board_be_init)(void); 89 90 int (*board_be_handler)(struct pt_regs *regs, int is_fixup); ··· 662 661 force_sig_info(SIGFPE, &info, current); 663 662 } 664 663 664 + static int process_fpemu_return(int sig, void __user *fault_addr) 665 + { 666 + if (sig == SIGSEGV || sig == SIGBUS) { 667 + struct siginfo si = {0}; 668 + si.si_addr = fault_addr; 669 + si.si_signo = sig; 670 + if (sig == SIGSEGV) { 671 + if (find_vma(current->mm, (unsigned long)fault_addr)) 672 + si.si_code = SEGV_ACCERR; 673 + else 674 + si.si_code = SEGV_MAPERR; 675 + } else { 676 + si.si_code = BUS_ADRERR; 677 + } 678 + force_sig_info(sig, &si, current); 679 + return 1; 680 + } else if (sig) { 681 + force_sig(sig, current); 682 + return 1; 683 + } else { 684 + return 0; 685 + } 686 + } 687 + 665 688 /* 666 689 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 667 690 */ 668 691 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 669 692 { 670 - siginfo_t info; 693 + siginfo_t info = {0}; 671 694 672 695 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) 673 696 == NOTIFY_STOP) ··· 700 675 701 676 if (fcr31 & FPU_CSR_UNI_X) { 702 677 int sig; 678 + void __user *fault_addr = NULL; 703 679 704 680 /* 705 681 * Unimplemented operation exception. If we've got the full ··· 716 690 lose_fpu(1); 717 691 718 692 /* Run the emulator */ 719 - sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1); 693 + sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 694 + &fault_addr); 720 695 721 696 /* 722 697 * We can't allow the emulated instruction to leave any of ··· 729 702 own_fpu(1); /* Using the FPU again. */ 730 703 731 704 /* If something went wrong, signal */ 732 - if (sig) 733 - force_sig(sig, current); 705 + process_fpemu_return(sig, fault_addr); 734 706 735 707 return; 736 708 } else if (fcr31 & FPU_CSR_INV_X) ··· 1022 996 1023 997 if (!raw_cpu_has_fpu) { 1024 998 int sig; 999 + void __user *fault_addr = NULL; 1025 1000 sig = fpu_emulator_cop1Handler(regs, 1026 - &current->thread.fpu, 0); 1027 - if (sig) 1028 - force_sig(sig, current); 1029 - else 1001 + &current->thread.fpu, 1002 + 0, &fault_addr); 1003 + if (!process_fpemu_return(sig, fault_addr)) 1030 1004 mt_ase_fp_affinity(); 1031 1005 } 1032 1006
+6 -8
arch/mips/kernel/vpe.c
··· 1092 1092 1093 1093 /* this of-course trashes what was there before... */ 1094 1094 v->pbuffer = vmalloc(P_SIZE); 1095 + if (!v->pbuffer) { 1096 + pr_warning("VPE loader: unable to allocate memory\n"); 1097 + return -ENOMEM; 1098 + } 1095 1099 v->plen = P_SIZE; 1096 1100 v->load_addr = NULL; 1097 1101 v->len = 0; ··· 1153 1149 if (ret < 0) 1154 1150 v->shared_ptr = NULL; 1155 1151 1156 - // cleanup any temp buffers 1157 - if (v->pbuffer) 1158 - vfree(v->pbuffer); 1152 + vfree(v->pbuffer); 1159 1153 v->plen = 0; 1154 + 1160 1155 return ret; 1161 1156 } 1162 1157 ··· 1171 1168 v = get_vpe(tclimit); 1172 1169 if (v == NULL) 1173 1170 return -ENODEV; 1174 - 1175 - if (v->pbuffer == NULL) { 1176 - printk(KERN_ERR "VPE loader: no buffer for program\n"); 1177 - return -ENOMEM; 1178 - } 1179 1171 1180 1172 if ((count + v->len) > v->plen) { 1181 1173 printk(KERN_WARNING
+2 -2
arch/mips/lib/memset.S
··· 161 161 162 162 .Lfwd_fixup: 163 163 PTR_L t0, TI_TASK($28) 164 - LONG_L t0, THREAD_BUADDR(t0) 165 164 andi a2, 0x3f 165 + LONG_L t0, THREAD_BUADDR(t0) 166 166 LONG_ADDU a2, t1 167 167 jr ra 168 168 LONG_SUBU a2, t0 169 169 170 170 .Lpartial_fixup: 171 171 PTR_L t0, TI_TASK($28) 172 - LONG_L t0, THREAD_BUADDR(t0) 173 172 andi a2, LONGMASK 173 + LONG_L t0, THREAD_BUADDR(t0) 174 174 LONG_ADDU a2, t1 175 175 jr ra 176 176 LONG_SUBU a2, t0
+2 -2
arch/mips/loongson/common/env.c
··· 29 29 30 30 #define parse_even_earlier(res, option, p) \ 31 31 do { \ 32 + int ret; \ 32 33 if (strncmp(option, (char *)p, strlen(option)) == 0) \ 33 - strict_strtol((char *)p + strlen(option"="), \ 34 - 10, &res); \ 34 + ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ 35 35 } while (0) 36 36 37 37 void __init prom_init_env(void)
+95 -21
arch/mips/math-emu/cp1emu.c
··· 64 64 65 65 #if __mips >= 4 && __mips != 32 66 66 static int fpux_emu(struct pt_regs *, 67 - struct mips_fpu_struct *, mips_instruction); 67 + struct mips_fpu_struct *, mips_instruction, void *__user *); 68 68 #endif 69 69 70 70 /* Further private data for which no space exists in mips_fpu_struct */ ··· 208 208 * Two instructions if the instruction is in a branch delay slot. 209 209 */ 210 210 211 - static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx) 211 + static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 212 + void *__user *fault_addr) 212 213 { 213 214 mips_instruction ir; 214 215 unsigned long emulpc, contpc; 215 216 unsigned int cond; 216 217 217 - if (get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) { 218 + if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { 218 219 MIPS_FPU_EMU_INC_STATS(errors); 220 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 219 221 return SIGBUS; 222 + } 223 + if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) { 224 + MIPS_FPU_EMU_INC_STATS(errors); 225 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 226 + return SIGSEGV; 220 227 } 221 228 222 229 /* XXX NEC Vr54xx bug workaround */ ··· 252 245 #endif 253 246 return SIGILL; 254 247 } 255 - if (get_user(ir, (mips_instruction __user *) emulpc)) { 248 + if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) { 256 249 MIPS_FPU_EMU_INC_STATS(errors); 250 + *fault_addr = (mips_instruction __user *)emulpc; 257 251 return SIGBUS; 252 + } 253 + if (__get_user(ir, (mips_instruction __user *) emulpc)) { 254 + MIPS_FPU_EMU_INC_STATS(errors); 255 + *fault_addr = (mips_instruction __user *)emulpc; 256 + return SIGSEGV; 258 257 } 259 258 /* __compute_return_epc() will have updated cp0_epc */ 260 259 contpc = xcp->cp0_epc; ··· 282 269 u64 val; 283 270 284 271 MIPS_FPU_EMU_INC_STATS(loads); 285 - if (get_user(val, va)) { 272 + 273 + if (!access_ok(VERIFY_READ, va, sizeof(u64))) { 286 274 MIPS_FPU_EMU_INC_STATS(errors); 275 + *fault_addr = va; 287 276 return SIGBUS; 277 + } 278 + if (__get_user(val, va)) { 279 + MIPS_FPU_EMU_INC_STATS(errors); 280 + *fault_addr = va; 281 + return SIGSEGV; 288 282 } 289 283 DITOREG(val, MIPSInst_RT(ir)); 290 284 break; ··· 304 284 305 285 MIPS_FPU_EMU_INC_STATS(stores); 306 286 DIFROMREG(val, MIPSInst_RT(ir)); 307 - if (put_user(val, va)) { 287 + if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) { 308 288 MIPS_FPU_EMU_INC_STATS(errors); 289 + *fault_addr = va; 309 290 return SIGBUS; 291 + } 292 + if (__put_user(val, va)) { 293 + MIPS_FPU_EMU_INC_STATS(errors); 294 + *fault_addr = va; 295 + return SIGSEGV; 310 296 } 311 297 break; 312 298 } ··· 323 297 u32 val; 324 298 325 299 MIPS_FPU_EMU_INC_STATS(loads); 326 - if (get_user(val, va)) { 300 + if (!access_ok(VERIFY_READ, va, sizeof(u32))) { 327 301 MIPS_FPU_EMU_INC_STATS(errors); 302 + *fault_addr = va; 328 303 return SIGBUS; 304 + } 305 + if (__get_user(val, va)) { 306 + MIPS_FPU_EMU_INC_STATS(errors); 307 + *fault_addr = va; 308 + return SIGSEGV; 329 309 } 330 310 SITOREG(val, MIPSInst_RT(ir)); 331 311 break; ··· 344 312 345 313 MIPS_FPU_EMU_INC_STATS(stores); 346 314 SIFROMREG(val, MIPSInst_RT(ir)); 347 - if (put_user(val, va)) { 315 + if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) { 348 316 MIPS_FPU_EMU_INC_STATS(errors); 317 + *fault_addr = va; 349 318 return SIGBUS; 319 + } 320 + if (__put_user(val, va)) { 321 + MIPS_FPU_EMU_INC_STATS(errors); 322 + *fault_addr = va; 323 + return SIGSEGV; 350 324 } 351 325 break; 352 326 } ··· 478 440 contpc = (xcp->cp0_epc + 479 441 (MIPSInst_SIMM(ir) << 2)); 480 442 481 - if (get_user(ir, 443 + if (!access_ok(VERIFY_READ, xcp->cp0_epc, 444 + sizeof(mips_instruction))) { 445 + MIPS_FPU_EMU_INC_STATS(errors); 446 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 447 + return SIGBUS; 448 + } 449 + if (__get_user(ir, 482 450 (mips_instruction __user *) xcp->cp0_epc)) { 483 451 MIPS_FPU_EMU_INC_STATS(errors); 484 - return SIGBUS; 452 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 453 + return SIGSEGV; 485 454 } 486 455 487 456 switch (MIPSInst_OPCODE(ir)) { ··· 551 506 552 507 #if __mips >= 4 && __mips != 32 553 508 case cop1x_op:{ 554 - int sig; 555 - 556 - if ((sig = fpux_emu(xcp, ctx, ir))) 509 + int sig = fpux_emu(xcp, ctx, ir, fault_addr); 510 + if (sig) 557 511 return sig; 558 512 break; 559 513 } ··· 648 604 DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg); 649 605 650 606 static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 651 - mips_instruction ir) 607 + mips_instruction ir, void *__user *fault_addr) 652 608 { 653 609 unsigned rcsr = 0; /* resulting csr */ 654 610 ··· 668 624 xcp->regs[MIPSInst_FT(ir)]); 669 625 670 626 MIPS_FPU_EMU_INC_STATS(loads); 671 - if (get_user(val, va)) { 627 + if (!access_ok(VERIFY_READ, va, sizeof(u32))) { 672 628 MIPS_FPU_EMU_INC_STATS(errors); 629 + *fault_addr = va; 673 630 return SIGBUS; 631 + } 632 + if (__get_user(val, va)) { 633 + MIPS_FPU_EMU_INC_STATS(errors); 634 + *fault_addr = va; 635 + return SIGSEGV; 674 636 } 675 637 SITOREG(val, MIPSInst_FD(ir)); 676 638 break; ··· 688 638 MIPS_FPU_EMU_INC_STATS(stores); 689 639 690 640 SIFROMREG(val, MIPSInst_FS(ir)); 641 + if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) { 642 + MIPS_FPU_EMU_INC_STATS(errors); 643 + *fault_addr = va; 644 + return SIGBUS; 645 + } 691 646 if (put_user(val, va)) { 692 647 MIPS_FPU_EMU_INC_STATS(errors); 693 - return SIGBUS; 648 + *fault_addr = va; 649 + return SIGSEGV; 694 650 } 695 651 break; 696 652 ··· 757 701 xcp->regs[MIPSInst_FT(ir)]); 758 702 759 703 MIPS_FPU_EMU_INC_STATS(loads); 760 - if (get_user(val, va)) { 704 + if (!access_ok(VERIFY_READ, va, sizeof(u64))) { 761 705 MIPS_FPU_EMU_INC_STATS(errors); 706 + *fault_addr = va; 762 707 return SIGBUS; 708 + } 709 + if (__get_user(val, va)) { 710 + MIPS_FPU_EMU_INC_STATS(errors); 711 + *fault_addr = va; 712 + return SIGSEGV; 763 713 } 764 714 DITOREG(val, MIPSInst_FD(ir)); 765 715 break; ··· 776 714 777 715 MIPS_FPU_EMU_INC_STATS(stores); 778 716 DIFROMREG(val, MIPSInst_FS(ir)); 779 - if (put_user(val, va)) { 717 + if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) { 780 718 MIPS_FPU_EMU_INC_STATS(errors); 719 + *fault_addr = va; 781 720 return SIGBUS; 721 + } 722 + if (__put_user(val, va)) { 723 + MIPS_FPU_EMU_INC_STATS(errors); 724 + *fault_addr = va; 725 + return SIGSEGV; 782 726 } 783 727 break; 784 728 ··· 1310 1242 } 1311 1243 1312 1244 int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 1313 - int has_fpu) 1245 + int has_fpu, void *__user *fault_addr) 1314 1246 { 1315 1247 unsigned long oldepc, prevepc; 1316 1248 mips_instruction insn; ··· 1320 1252 do { 1321 1253 prevepc = xcp->cp0_epc; 1322 1254 1323 - if (get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) { 1255 + if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { 1324 1256 MIPS_FPU_EMU_INC_STATS(errors); 1257 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 1325 1258 return SIGBUS; 1259 + } 1260 + if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) { 1261 + MIPS_FPU_EMU_INC_STATS(errors); 1262 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 1263 + return SIGSEGV; 1326 1264 } 1327 1265 if (insn == 0) 1328 1266 xcp->cp0_epc += 4; /* skip nops */ ··· 1341 1267 */ 1342 1268 /* convert to ieee library modes */ 1343 1269 ieee754_csr.rm = ieee_rm[ieee754_csr.rm]; 1344 - sig = cop1Emulate(xcp, ctx); 1270 + sig = cop1Emulate(xcp, ctx, fault_addr); 1345 1271 /* revert to mips rounding mode */ 1346 1272 ieee754_csr.rm = mips_rm[ieee754_csr.rm]; 1347 1273 }
+3 -1
arch/mips/mm/dma-default.c
··· 288 288 return plat_dma_supported(dev, mask); 289 289 } 290 290 291 - void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 291 + void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 292 292 enum dma_data_direction direction) 293 293 { 294 294 BUG_ON(direction == DMA_NONE); ··· 297 297 if (!plat_device_is_coherent(dev)) 298 298 __dma_sync((unsigned long)vaddr, size, direction); 299 299 } 300 + 301 + EXPORT_SYMBOL(dma_cache_sync); 300 302 301 303 static struct dma_map_ops mips_default_dma_map_ops = { 302 304 .alloc_coherent = mips_dma_alloc_coherent,
+4
arch/mips/mm/sc-mips.c
··· 68 68 */ 69 69 static inline int mips_sc_is_activated(struct cpuinfo_mips *c) 70 70 { 71 + unsigned int config2 = read_c0_config2(); 72 + unsigned int tmp; 73 + 71 74 /* Check the bypass bit (L2B) */ 72 75 switch (c->cputype) { 73 76 case CPU_34K: ··· 86 83 c->scache.linesz = 2 << tmp; 87 84 else 88 85 return 0; 86 + return 1; 89 87 } 90 88 91 89 static inline int __init mips_sc_probe(void)
+10 -2
arch/mips/pmc-sierra/yosemite/py-console.c
··· 65 65 66 66 __asm__ __volatile__ ( 67 67 " .set mips3 \n" 68 + " .set push \n" 69 + " .set noreorder \n" 70 + " .set nomacro \n" 68 71 " ld %0, %1 \n" 72 + " .set pop \n" 69 73 " lbu %0, (%0) \n" 70 74 " .set mips0 \n" 71 75 : "=r" (res) 72 - : "m" (vaddr)); 76 + : "R" (vaddr)); 73 77 74 78 write_c0_status(sr); 75 79 ssnop_4(); ··· 93 89 94 90 __asm__ __volatile__ ( 95 91 " .set mips3 \n" 92 + " .set push \n" 93 + " .set noreorder \n" 94 + " .set nomacro \n" 96 95 " ld %0, %1 \n" 96 + " .set pop \n" 97 97 " sb %2, (%0) \n" 98 98 " .set mips0 \n" 99 99 : "=&r" (tmp) 100 - : "m" (vaddr), "r" (c)); 100 + : "R" (vaddr), "r" (c)); 101 101 102 102 write_c0_status(sr); 103 103 ssnop_4();
+4 -4
arch/mips/sibyte/swarm/setup.c
··· 82 82 enum swarm_rtc_type { 83 83 RTC_NONE, 84 84 RTC_XICOR, 85 - RTC_M4LT81 85 + RTC_M41T81, 86 86 }; 87 87 88 88 enum swarm_rtc_type swarm_rtc_type; ··· 96 96 sec = xicor_get_time(); 97 97 break; 98 98 99 - case RTC_M4LT81: 99 + case RTC_M41T81: 100 100 sec = m41t81_get_time(); 101 101 break; 102 102 ··· 115 115 case RTC_XICOR: 116 116 return xicor_set_time(sec); 117 117 118 - case RTC_M4LT81: 118 + case RTC_M41T81: 119 119 return m41t81_set_time(sec); 120 120 121 121 case RTC_NONE: ··· 141 141 if (xicor_probe()) 142 142 swarm_rtc_type = RTC_XICOR; 143 143 if (m41t81_probe()) 144 - swarm_rtc_type = RTC_M4LT81; 144 + swarm_rtc_type = RTC_M41T81; 145 145 146 146 #ifdef CONFIG_VT 147 147 screen_info = (struct screen_info) {
+3 -7
arch/mn10300/kernel/time.c
··· 40 40 unsigned long long ll; 41 41 unsigned l[2]; 42 42 } tsc64, result; 43 - unsigned long tsc, tmp; 43 + unsigned long tmp; 44 44 unsigned product[3]; /* 96-bit intermediate value */ 45 45 46 46 /* cnt32_to_63() is not safe with preemption */ 47 47 preempt_disable(); 48 48 49 - /* read the TSC value 50 - */ 51 - tsc = get_cycles(); 52 - 53 - /* expand to 64-bits. 49 + /* expand the tsc to 64-bits. 54 50 * - sched_clock() must be called once a minute or better or the 55 51 * following will go horribly wrong - see cnt32_to_63() 56 52 */ 57 - tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL; 53 + tsc64.ll = cnt32_to_63(get_cycles()) & 0x7fffffffffffffffULL; 58 54 59 55 preempt_enable(); 60 56
+1 -1
arch/tile/include/asm/signal.h
··· 25 25 26 26 #if defined(__KERNEL__) && !defined(__ASSEMBLY__) 27 27 struct pt_regs; 28 - int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); 28 + int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); 29 29 int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); 30 30 void do_signal(struct pt_regs *regs); 31 31 #endif
+3 -3
arch/tile/kernel/compat_signal.c
··· 290 290 return ret; 291 291 } 292 292 293 + /* The assembly shim for this function arranges to ignore the return value. */ 293 294 long compat_sys_rt_sigreturn(struct pt_regs *regs) 294 295 { 295 296 struct compat_rt_sigframe __user *frame = 296 297 (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); 297 298 sigset_t set; 298 - long r0; 299 299 300 300 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 301 301 goto badframe; ··· 308 308 recalc_sigpending(); 309 309 spin_unlock_irq(&current->sighand->siglock); 310 310 311 - if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 311 + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 312 312 goto badframe; 313 313 314 314 if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) 315 315 goto badframe; 316 316 317 - return r0; 317 + return 0; 318 318 319 319 badframe: 320 320 force_sig(SIGSEGV, current);
+21 -3
arch/tile/kernel/intvec_32.S
··· 1342 1342 lw r20, r20 1343 1343 1344 1344 /* Jump to syscall handler. */ 1345 - jalr r20; .Lhandle_syscall_link: 1346 - FEEDBACK_REENTER(handle_syscall) 1345 + jalr r20 1346 + .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */ 1347 1347 1348 1348 /* 1349 1349 * Write our r0 onto the stack so it gets restored instead ··· 1351 1351 */ 1352 1352 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) 1353 1353 sw r29, r0 1354 + 1355 + .Lsyscall_sigreturn_skip: 1356 + FEEDBACK_REENTER(handle_syscall) 1354 1357 1355 1358 /* Do syscall trace again, if requested. */ 1356 1359 lw r30, r31 ··· 1539 1536 }; \ 1540 1537 STD_ENDPROC(_##x) 1541 1538 1539 + /* 1540 + * Special-case sigreturn to not write r0 to the stack on return. 1541 + * This is technically more efficient, but it also avoids difficulties 1542 + * in the 64-bit OS when handling 32-bit compat code, since we must not 1543 + * sign-extend r0 for the sigreturn return-value case. 1544 + */ 1545 + #define PTREGS_SYSCALL_SIGRETURN(x, reg) \ 1546 + STD_ENTRY(_##x); \ 1547 + addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \ 1548 + { \ 1549 + PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ 1550 + j x \ 1551 + }; \ 1552 + STD_ENDPROC(_##x) 1553 + 1542 1554 PTREGS_SYSCALL(sys_execve, r3) 1543 1555 PTREGS_SYSCALL(sys_sigaltstack, r2) 1544 - PTREGS_SYSCALL(sys_rt_sigreturn, r0) 1556 + PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0) 1545 1557 PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1) 1546 1558 1547 1559 /* Save additional callee-saves to pt_regs, put address in r4 and jump. */
+8
arch/tile/kernel/process.c
··· 212 212 childregs->sp = sp; /* override with new user stack pointer */ 213 213 214 214 /* 215 + * If CLONE_SETTLS is set, set "tp" in the new task to "r4", 216 + * which is passed in as arg #5 to sys_clone(). 217 + */ 218 + if (clone_flags & CLONE_SETTLS) 219 + childregs->tp = regs->regs[4]; 220 + 221 + /* 215 222 * Copy the callee-saved registers from the passed pt_regs struct 216 223 * into the context-switch callee-saved registers area. 217 224 * This way when we start the interrupt-return sequence, the ··· 546 539 return __switch_to(prev, next, next_current_ksp0(next)); 547 540 } 548 541 542 + /* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */ 549 543 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 550 544 void __user *, parent_tidptr, void __user *, child_tidptr, 551 545 struct pt_regs *, regs)
+4 -6
arch/tile/kernel/signal.c
··· 52 52 */ 53 53 54 54 int restore_sigcontext(struct pt_regs *regs, 55 - struct sigcontext __user *sc, long *pr0) 55 + struct sigcontext __user *sc) 56 56 { 57 57 int err = 0; 58 58 int i; ··· 75 75 76 76 regs->faultnum = INT_SWINT_1_SIGRETURN; 77 77 78 - err |= __get_user(*pr0, &sc->gregs[0]); 79 78 return err; 80 79 } 81 80 82 - /* sigreturn() returns long since it restores r0 in the interrupted code. */ 81 + /* The assembly shim for this function arranges to ignore the return value. */ 83 82 SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs) 84 83 { 85 84 struct rt_sigframe __user *frame = 86 85 (struct rt_sigframe __user *)(regs->sp); 87 86 sigset_t set; 88 - long r0; 89 87 90 88 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 91 89 goto badframe; ··· 96 98 recalc_sigpending(); 97 99 spin_unlock_irq(&current->sighand->siglock); 98 100 99 - if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 101 + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 100 102 goto badframe; 101 103 102 104 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) 103 105 goto badframe; 104 106 105 - return r0; 107 + return 0; 106 108 107 109 badframe: 108 110 force_sig(SIGSEGV, current);
+1 -1
arch/x86/boot/compressed/misc.c
··· 355 355 if (heap > 0x3fffffffffffUL) 356 356 error("Destination address too large"); 357 357 #else 358 - if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff)) 358 + if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff)) 359 359 error("Destination address too large"); 360 360 #endif 361 361 #ifndef CONFIG_RELOCATABLE
+3
arch/x86/include/asm/e820.h
··· 72 72 #define BIOS_BEGIN 0x000a0000 73 73 #define BIOS_END 0x00100000 74 74 75 + #define BIOS_ROM_BASE 0xffe00000 76 + #define BIOS_ROM_END 0xffffffff 77 + 75 78 #ifdef __KERNEL__ 76 79 /* see comment in arch/x86/kernel/e820.c */ 77 80 extern struct e820map e820;
+1 -1
arch/x86/include/asm/kvm_host.h
··· 79 79 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 80 80 #define KVM_MIN_FREE_MMU_PAGES 5 81 81 #define KVM_REFILL_PAGES 25 82 - #define KVM_MAX_CPUID_ENTRIES 40 82 + #define KVM_MAX_CPUID_ENTRIES 80 83 83 #define KVM_NR_FIXED_MTRR_REGION 88 84 84 #define KVM_NR_VAR_MTRR 8 85 85
+1
arch/x86/kernel/Makefile
··· 45 45 obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o 46 46 obj-y += tsc.o io_delay.o rtc.o 47 47 obj-y += pci-iommu_table.o 48 + obj-y += resource.o 48 49 49 50 obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 50 51 obj-y += process.o
+8
arch/x86/kernel/apic/apic.c
··· 1389 1389 1390 1390 setup_apic_nmi_watchdog(NULL); 1391 1391 apic_pm_activate(); 1392 + 1393 + /* 1394 + * Now that local APIC setup is completed for BP, configure the fault 1395 + * handling for interrupt remapping. 1396 + */ 1397 + if (!smp_processor_id() && intr_remapping_enabled) 1398 + enable_drhd_fault_handling(); 1399 + 1392 1400 } 1393 1401 1394 1402 #ifdef CONFIG_X86_X2APIC
+2 -2
arch/x86/kernel/apic/io_apic.c
··· 2430 2430 { 2431 2431 struct irq_cfg *cfg = data->chip_data; 2432 2432 int i, do_unmask_irq = 0, irq = data->irq; 2433 - struct irq_desc *desc = irq_to_desc(irq); 2434 2433 unsigned long v; 2435 2434 2436 2435 irq_complete_move(cfg); 2437 2436 #ifdef CONFIG_GENERIC_PENDING_IRQ 2438 2437 /* If we are moving the irq we need to mask it */ 2439 - if (unlikely(desc->status & IRQ_MOVE_PENDING)) { 2438 + if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { 2440 2439 do_unmask_irq = 1; 2441 2440 mask_ioapic(cfg); 2442 2441 } ··· 3412 3413 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3413 3414 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3414 3415 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3416 + msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3415 3417 3416 3418 dmar_msi_write(irq, &msg); 3417 3419
-7
arch/x86/kernel/apic/probe_64.c
··· 79 79 /* need to update phys_pkg_id */ 80 80 apic->phys_pkg_id = apicid_phys_pkg_id; 81 81 } 82 - 83 - /* 84 - * Now that apic routing model is selected, configure the 85 - * fault handling for intr remapping. 86 - */ 87 - if (intr_remapping_enabled) 88 - enable_drhd_fault_handling(); 89 82 } 90 83 91 84 /* Same for both flat and physical. */
+9 -7
arch/x86/kernel/head_32.S
··· 60 60 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 61 61 #endif 62 62 63 + /* Number of possible pages in the lowmem region */ 64 + LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) 65 + 63 66 /* Enough space to fit pagetables for the low memory linear map */ 64 - MAPPING_BEYOND_END = \ 65 - PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT 67 + MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT 66 68 67 69 /* 68 70 * Worst-case size of the kernel mapping we need to make: 69 - * the worst-case size of the kernel itself, plus the extra we need 70 - * to map for the linear map. 71 + * a relocatable kernel can live anywhere in lowmem, so we need to be able 72 + * to map all of lowmem. 71 73 */ 72 - KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT 74 + KERNEL_PAGES = LOWMEM_PAGES 73 75 74 76 INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm 75 77 RESERVE_BRK(pagetables, INIT_MAP_SIZE) ··· 622 620 __PAGE_ALIGNED_BSS 623 621 .align PAGE_SIZE_asm 624 622 #ifdef CONFIG_X86_PAE 625 - initial_pg_pmd: 623 + ENTRY(initial_pg_pmd) 626 624 .fill 1024*KPMDS,4,0 627 625 #else 628 626 ENTRY(initial_page_table) 629 627 .fill 1024,4,0 630 628 #endif 631 - initial_pg_fixmap: 629 + ENTRY(initial_pg_fixmap) 632 630 .fill 1024,4,0 633 631 ENTRY(empty_zero_page) 634 632 .fill 4096,1,0
+16 -10
arch/x86/kernel/hpet.c
··· 27 27 #define HPET_DEV_FSB_CAP 0x1000 28 28 #define HPET_DEV_PERI_CAP 0x2000 29 29 30 + #define HPET_MIN_CYCLES 128 31 + #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) 32 + 30 33 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) 31 34 32 35 /* ··· 302 299 /* Calculate the min / max delta */ 303 300 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 304 301 &hpet_clockevent); 305 - /* 5 usec minimum reprogramming delta. */ 306 - hpet_clockevent.min_delta_ns = 5000; 302 + /* Setup minimum reprogramming delta. */ 303 + hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, 304 + &hpet_clockevent); 307 305 308 306 /* 309 307 * Start hpet with the boot cpu mask and make it ··· 397 393 * the wraparound into account) nor a simple count down event 398 394 * mode. Further the write to the comparator register is 399 395 * delayed internally up to two HPET clock cycles in certain 400 - * chipsets (ATI, ICH9,10). We worked around that by reading 401 - * back the compare register, but that required another 402 - * workaround for ICH9,10 chips where the first readout after 403 - * write can return the old stale value. We already have a 404 - * minimum delta of 5us enforced, but a NMI or SMI hitting 396 + * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even 397 + * longer delays. We worked around that by reading back the 398 + * compare register, but that required another workaround for 399 + * ICH9,10 chips where the first readout after write can 400 + * return the old stale value. We already had a minimum 401 + * programming delta of 5us enforced, but a NMI or SMI hitting 405 402 * between the counter readout and the comparator write can 406 403 * move us behind that point easily. Now instead of reading 407 404 * the compare register back several times, we make the ETIME 408 405 * decision based on the following: Return ETIME if the 409 - * counter value after the write is less than 8 HPET cycles 406 + * counter value after the write is less than HPET_MIN_CYCLES 410 407 * away from the event or if the counter is already ahead of 411 - * the event. 408 + * the event. The minimum programming delta for the generic 409 + * clockevents code is set to 1.5 * HPET_MIN_CYCLES. 412 410 */ 413 411 res = (s32)(cnt - hpet_readl(HPET_COUNTER)); 414 412 415 - return res < 8 ? -ETIME : 0; 413 + return res < HPET_MIN_CYCLES ? -ETIME : 0; 416 414 } 417 415 418 416 static void hpet_legacy_set_mode(enum clock_event_mode mode,
+48
arch/x86/kernel/resource.c
··· 1 + #include <linux/ioport.h> 2 + #include <asm/e820.h> 3 + 4 + static void resource_clip(struct resource *res, resource_size_t start, 5 + resource_size_t end) 6 + { 7 + resource_size_t low = 0, high = 0; 8 + 9 + if (res->end < start || res->start > end) 10 + return; /* no conflict */ 11 + 12 + if (res->start < start) 13 + low = start - res->start; 14 + 15 + if (res->end > end) 16 + high = res->end - end; 17 + 18 + /* Keep the area above or below the conflict, whichever is larger */ 19 + if (low > high) 20 + res->end = start - 1; 21 + else 22 + res->start = end + 1; 23 + } 24 + 25 + static void remove_e820_regions(struct resource *avail) 26 + { 27 + int i; 28 + struct e820entry *entry; 29 + 30 + for (i = 0; i < e820.nr_map; i++) { 31 + entry = &e820.map[i]; 32 + 33 + resource_clip(avail, entry->addr, 34 + entry->addr + entry->size - 1); 35 + } 36 + } 37 + 38 + void arch_remove_reservations(struct resource *avail) 39 + { 40 + /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */ 41 + if (avail->flags & IORESOURCE_MEM) { 42 + if (avail->start < BIOS_END) 43 + avail->start = BIOS_END; 44 + resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END); 45 + 46 + remove_e820_regions(avail); 47 + } 48 + }
-1
arch/x86/kernel/setup.c
··· 769 769 770 770 x86_init.oem.arch_setup(); 771 771 772 - resource_alloc_from_bottom = 0; 773 772 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; 774 773 setup_memory_map(); 775 774 parse_setup_data();
+2 -1
arch/x86/kernel/xsave.c
··· 394 394 * Setup init_xstate_buf to represent the init state of 395 395 * all the features managed by the xsave 396 396 */ 397 - init_xstate_buf = alloc_bootmem(xstate_size); 397 + init_xstate_buf = alloc_bootmem_align(xstate_size, 398 + __alignof__(struct xsave_struct)); 398 399 init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; 399 400 400 401 clts();
+4
arch/x86/kvm/svm.c
··· 3494 3494 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 3495 3495 { 3496 3496 switch (func) { 3497 + case 0x00000001: 3498 + /* Mask out xsave bit as long as it is not supported by SVM */ 3499 + entry->ecx &= ~(bit(X86_FEATURE_XSAVE)); 3500 + break; 3497 3501 case 0x80000001: 3498 3502 if (nested) 3499 3503 entry->ecx |= (1 << 2); /* Set SVM bit */
-5
arch/x86/kvm/vmx.c
··· 4227 4227 return PT_PDPE_LEVEL; 4228 4228 } 4229 4229 4230 - static inline u32 bit(int bitno) 4231 - { 4232 - return 1 << (bitno & 31); 4233 - } 4234 - 4235 4230 static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 4236 4231 { 4237 4232 struct kvm_cpuid_entry2 *best;
+5 -6
arch/x86/kvm/x86.c
··· 155 155 156 156 u64 __read_mostly host_xcr0; 157 157 158 - static inline u32 bit(int bitno) 159 - { 160 - return 1 << (bitno & 31); 161 - } 162 - 163 158 static void kvm_on_user_return(struct user_return_notifier *urn) 164 159 { 165 160 unsigned slot; ··· 4564 4569 #ifdef CONFIG_CPU_FREQ 4565 4570 struct cpufreq_policy policy; 4566 4571 memset(&policy, 0, sizeof(policy)); 4567 - cpufreq_get_policy(&policy, get_cpu()); 4572 + cpu = get_cpu(); 4573 + cpufreq_get_policy(&policy, cpu); 4568 4574 if (policy.cpuinfo.max_freq) 4569 4575 max_tsc_khz = policy.cpuinfo.max_freq; 4576 + put_cpu(); 4570 4577 #endif 4571 4578 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 4572 4579 CPUFREQ_TRANSITION_NOTIFIER); ··· 5519 5522 5520 5523 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5521 5524 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5525 + if (sregs->cr4 & X86_CR4_OSXSAVE) 5526 + update_cpuid(vcpu); 5522 5527 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5523 5528 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); 5524 5529 mmu_reset_needed = 1;
+5
arch/x86/kvm/x86.h
··· 70 70 return kvm_read_cr0_bits(vcpu, X86_CR0_PG); 71 71 } 72 72 73 + static inline u32 bit(int bitno) 74 + { 75 + return 1 << (bitno & 31); 76 + } 77 + 73 78 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 74 79 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 75 80 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq);
+8 -8
arch/x86/lguest/boot.c
··· 531 531 { 532 532 lguest_data.pgdir = cr3; 533 533 lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); 534 - cr3_changed = true; 534 + 535 + /* These two page tables are simple, linear, and used during boot */ 536 + if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table)) 537 + cr3_changed = true; 535 538 } 536 539 537 540 static unsigned long lguest_read_cr3(void) ··· 706 703 * to forget all of them. Fortunately, this is very rare. 707 704 * 708 705 * ... except in early boot when the kernel sets up the initial pagetables, 709 - * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell 710 - * the Host anything changed until we've done the first page table switch, 711 - * which brings boot back to 0.25 seconds. 706 + * which makes booting astonishingly slow: 48 seconds! So we don't even tell 707 + * the Host anything changed until we've done the first real page table switch, 708 + * which brings boot back to 4.3 seconds. 712 709 */ 713 710 static void lguest_set_pte(pte_t *ptep, pte_t pteval) 714 711 { ··· 1005 1002 clockevents_register_device(&lguest_clockevent); 1006 1003 1007 1004 /* Finally, we unblock the timer interrupt. */ 1008 - enable_lguest_irq(0); 1005 + clear_bit(0, lguest_data.blocked_interrupts); 1009 1006 } 1010 1007 1011 1008 /* ··· 1351 1348 * per-cpu segment descriptor register %fs as well. 1352 1349 */ 1353 1350 switch_to_new_gdt(0); 1354 - 1355 - /* We actually boot with all memory mapped, but let's say 128MB. */ 1356 - max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; 1357 1351 1358 1352 /* 1359 1353 * The Host<->Guest Switcher lives at the top of our address space, and
+105
arch/x86/lguest/i386_head.S
··· 4 4 #include <asm/asm-offsets.h> 5 5 #include <asm/thread_info.h> 6 6 #include <asm/processor-flags.h> 7 + #include <asm/pgtable.h> 7 8 8 9 /*G:020 9 10 * Our story starts with the kernel booting into startup_32 in ··· 38 37 /* Set up the initial stack so we can run C code. */ 39 38 movl $(init_thread_union+THREAD_SIZE),%esp 40 39 40 + call init_pagetables 41 + 41 42 /* Jumps are relative: we're running __PAGE_OFFSET too low. */ 42 43 jmp lguest_init+__PAGE_OFFSET 44 + 45 + /* 46 + * Initialize page tables. This creates a PDE and a set of page 47 + * tables, which are located immediately beyond __brk_base. The variable 48 + * _brk_end is set up to point to the first "safe" location. 49 + * Mappings are created both at virtual address 0 (identity mapping) 50 + * and PAGE_OFFSET for up to _end. 51 + * 52 + * FIXME: This code is taken verbatim from arch/x86/kernel/head_32.S: they 53 + * don't have a stack at this point, so we can't just use call and ret. 54 + */ 55 + init_pagetables: 56 + #if PTRS_PER_PMD > 1 57 + #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) 58 + #else 59 + #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 60 + #endif 61 + #define pa(X) ((X) - __PAGE_OFFSET) 62 + 63 + /* Enough space to fit pagetables for the low memory linear map */ 64 + MAPPING_BEYOND_END = \ 65 + PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT 66 + #ifdef CONFIG_X86_PAE 67 + 68 + /* 69 + * In PAE mode initial_page_table is statically defined to contain 70 + * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 71 + * entries). The identity mapping is handled by pointing two PGD entries 72 + * to the first kernel PMD. 73 + * 74 + * Note the upper half of each PMD or PTE are always zero at this stage. 75 + */ 76 + 77 + #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 78 + 79 + xorl %ebx,%ebx /* %ebx is kept at zero */ 80 + 81 + movl $pa(__brk_base), %edi 82 + movl $pa(initial_pg_pmd), %edx 83 + movl $PTE_IDENT_ATTR, %eax 84 + 10: 85 + leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 86 + movl %ecx,(%edx) /* Store PMD entry */ 87 + /* Upper half already zero */ 88 + addl $8,%edx 89 + movl $512,%ecx 90 + 11: 91 + stosl 92 + xchgl %eax,%ebx 93 + stosl 94 + xchgl %eax,%ebx 95 + addl $0x1000,%eax 96 + loop 11b 97 + 98 + /* 99 + * End condition: we must map up to the end + MAPPING_BEYOND_END. 100 + */ 101 + movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 102 + cmpl %ebp,%eax 103 + jb 10b 104 + 1: 105 + addl $__PAGE_OFFSET, %edi 106 + movl %edi, pa(_brk_end) 107 + shrl $12, %eax 108 + movl %eax, pa(max_pfn_mapped) 109 + 110 + /* Do early initialization of the fixmap area */ 111 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 112 + movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 113 + #else /* Not PAE */ 114 + 115 + page_pde_offset = (__PAGE_OFFSET >> 20); 116 + 117 + movl $pa(__brk_base), %edi 118 + movl $pa(initial_page_table), %edx 119 + movl $PTE_IDENT_ATTR, %eax 120 + 10: 121 + leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ 122 + movl %ecx,(%edx) /* Store identity PDE entry */ 123 + movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 124 + addl $4,%edx 125 + movl $1024, %ecx 126 + 11: 127 + stosl 128 + addl $0x1000,%eax 129 + loop 11b 130 + /* 131 + * End condition: we must map up to the end + MAPPING_BEYOND_END. 132 + */ 133 + movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 134 + cmpl %ebp,%eax 135 + jb 10b 136 + addl $__PAGE_OFFSET, %edi 137 + movl %edi, pa(_brk_end) 138 + shrl $12, %eax 139 + movl %eax, pa(max_pfn_mapped) 140 + 141 + /* Do early initialization of the fixmap area */ 142 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 143 + movl %eax,pa(initial_page_table+0xffc) 144 + #endif 145 + ret 43 146 44 147 /*G:055 45 148 * We create a macro which puts the assembler code between lgstart_ and lgend_
+5 -13
arch/x86/pci/i386.c
··· 65 65 resource_size_t size, resource_size_t align) 66 66 { 67 67 struct pci_dev *dev = data; 68 - resource_size_t start = round_down(res->end - size + 1, align); 68 + resource_size_t start = res->start; 69 69 70 70 if (res->flags & IORESOURCE_IO) { 71 - 72 - /* 73 - * If we're avoiding ISA aliases, the largest contiguous I/O 74 - * port space is 256 bytes. Clearing bits 9 and 10 preserves 75 - * all 256-byte and smaller alignments, so the result will 76 - * still be correctly aligned. 77 - */ 78 - if (!skip_isa_ioresource_align(dev)) 79 - start &= ~0x300; 80 - } else if (res->flags & IORESOURCE_MEM) { 81 - if (start < BIOS_END) 82 - start = res->end; /* fail; no space */ 71 + if (skip_isa_ioresource_align(dev)) 72 + return start; 73 + if (start & 0x300) 74 + start = (start + 0x3ff) & ~0x3ff; 83 75 } 84 76 return start; 85 77 }
+2 -2
arch/x86/vdso/Makefile
··· 25 25 26 26 export CPPFLAGS_vdso.lds += -P -C 27 27 28 - VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \ 28 + VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ 29 29 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 30 30 31 31 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so ··· 69 69 vdso32-images = $(vdso32.so-y:%=vdso32-%.so) 70 70 71 71 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) 72 - VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1 72 + VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1 73 73 74 74 # This makes sure the $(obj) subdirectory exists even though vdso32/ 75 75 # is not a kbuild sub-make subdirectory.
+3 -2
block/blk-map.c
··· 201 201 for (i = 0; i < iov_count; i++) { 202 202 unsigned long uaddr = (unsigned long)iov[i].iov_base; 203 203 204 + if (!iov[i].iov_len) 205 + return -EINVAL; 206 + 204 207 if (uaddr & queue_dma_alignment(q)) { 205 208 unaligned = 1; 206 209 break; 207 210 } 208 - if (!iov[i].iov_len) 209 - return -EINVAL; 210 211 } 211 212 212 213 if (unaligned || (q->dma_pad_mask & len) || map_data)
+3 -3
block/blk-merge.c
··· 21 21 return 0; 22 22 23 23 fbio = bio; 24 - cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 24 + cluster = blk_queue_cluster(q); 25 25 seg_size = 0; 26 26 nr_phys_segs = 0; 27 27 for_each_bio(bio) { ··· 87 87 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 88 88 struct bio *nxt) 89 89 { 90 - if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 90 + if (!blk_queue_cluster(q)) 91 91 return 0; 92 92 93 93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > ··· 123 123 int nsegs, cluster; 124 124 125 125 nsegs = 0; 126 - cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 126 + cluster = blk_queue_cluster(q); 127 127 128 128 /* 129 129 * for each bio in rq
+22 -29
block/blk-settings.c
··· 126 126 lim->alignment_offset = 0; 127 127 lim->io_opt = 0; 128 128 lim->misaligned = 0; 129 - lim->no_cluster = 0; 129 + lim->cluster = 1; 130 130 } 131 131 EXPORT_SYMBOL(blk_set_default_limits); 132 132 ··· 229 229 EXPORT_SYMBOL(blk_queue_bounce_limit); 230 230 231 231 /** 232 - * blk_queue_max_hw_sectors - set max sectors for a request for this queue 233 - * @q: the request queue for the device 232 + * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request 233 + * @limits: the queue limits 234 234 * @max_hw_sectors: max hardware sectors in the usual 512b unit 235 235 * 236 236 * Description: ··· 244 244 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 245 245 * The soft limit can not exceed max_hw_sectors. 246 246 **/ 247 - void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 247 + void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) 248 248 { 249 249 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 250 250 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); ··· 252 252 __func__, max_hw_sectors); 253 253 } 254 254 255 - q->limits.max_hw_sectors = max_hw_sectors; 256 - q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, 257 - BLK_DEF_MAX_SECTORS); 255 + limits->max_hw_sectors = max_hw_sectors; 256 + limits->max_sectors = min_t(unsigned int, max_hw_sectors, 257 + BLK_DEF_MAX_SECTORS); 258 + } 259 + EXPORT_SYMBOL(blk_limits_max_hw_sectors); 260 + 261 + /** 262 + * blk_queue_max_hw_sectors - set max sectors for a request for this queue 263 + * @q: the request queue for the device 264 + * @max_hw_sectors: max hardware sectors in the usual 512b unit 265 + * 266 + * Description: 267 + * See description for blk_limits_max_hw_sectors(). 268 + **/ 269 + void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 270 + { 271 + blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); 258 272 } 259 273 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 260 274 ··· 478 464 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 479 465 { 480 466 blk_stack_limits(&t->limits, &b->limits, 0); 481 - 482 - if (!t->queue_lock) 483 - WARN_ON_ONCE(1); 484 - else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 485 - unsigned long flags; 486 - spin_lock_irqsave(t->queue_lock, flags); 487 - queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 488 - spin_unlock_irqrestore(t->queue_lock, flags); 489 - } 490 467 } 491 468 EXPORT_SYMBOL(blk_queue_stack_limits); 492 469 ··· 550 545 t->io_min = max(t->io_min, b->io_min); 551 546 t->io_opt = lcm(t->io_opt, b->io_opt); 552 547 553 - t->no_cluster |= b->no_cluster; 548 + t->cluster &= b->cluster; 554 549 t->discard_zeroes_data &= b->discard_zeroes_data; 555 550 556 551 /* Physical block size a multiple of the logical block size? */ ··· 646 641 sector_t offset) 647 642 { 648 643 struct request_queue *t = disk->queue; 649 - struct request_queue *b = bdev_get_queue(bdev); 650 644 651 645 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { 652 646 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; ··· 655 651 656 652 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 657 653 top, bottom); 658 - } 659 - 660 - if (!t->queue_lock) 661 - WARN_ON_ONCE(1); 662 - else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 663 - unsigned long flags; 664 - 665 - spin_lock_irqsave(t->queue_lock, flags); 666 - if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 667 - queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 668 - spin_unlock_irqrestore(t->queue_lock, flags); 669 654 } 670 655 } 671 656 EXPORT_SYMBOL(disk_stack_limits);
+1 -1
block/blk-sysfs.c
··· 119 119 120 120 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 121 121 { 122 - if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 122 + if (blk_queue_cluster(q)) 123 123 return queue_var_show(queue_max_segment_size(q), (page)); 124 124 125 125 return queue_var_show(PAGE_CACHE_SIZE, (page));
+25 -14
block/blk-throttle.c
··· 355 355 tg->slice_end[rw], jiffies); 356 356 } 357 357 358 + static inline void throtl_set_slice_end(struct throtl_data *td, 359 + struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 360 + { 361 + tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); 362 + } 363 + 358 364 static inline void throtl_extend_slice(struct throtl_data *td, 359 365 struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 360 366 { ··· 396 390 */ 397 391 if (throtl_slice_used(td, tg, rw)) 398 392 return; 393 + 394 + /* 395 + * A bio has been dispatched. Also adjust slice_end. It might happen 396 + * that initially cgroup limit was very low resulting in high 397 + * slice_end, but later limit was bumped up and bio was dispached 398 + * sooner, then we need to reduce slice_end. A high bogus slice_end 399 + * is bad because it does not allow new slice to start. 400 + */ 401 + 402 + throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice); 399 403 400 404 time_elapsed = jiffies - tg->slice_start[rw]; 401 405 ··· 725 709 struct throtl_grp *tg; 726 710 struct hlist_node *pos, *n; 727 711 728 - /* 729 - * Make sure atomic_inc() effects from 730 - * throtl_update_blkio_group_read_bps(), group of functions are 731 - * visible. 732 - * Is this required or smp_mb__after_atomic_inc() was suffcient 733 - * after the atomic_inc(). 734 - */ 735 - smp_rmb(); 736 712 if (!atomic_read(&td->limits_changed)) 737 713 return; 738 714 739 715 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); 740 716 741 - hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 742 - /* 743 - * Do I need an smp_rmb() here to make sure tg->limits_changed 744 - * update is visible. I am relying on smp_rmb() at the 745 - * beginning of function and not putting a new one here. 746 - */ 717 + /* 718 + * Make sure updates from throtl_update_blkio_group_read_bps() group 719 + * of functions to tg->limits_changed are visible. We do not 720 + * want update td->limits_changed to be visible but update to 721 + * tg->limits_changed not being visible yet on this cpu. Hence 722 + * the read barrier. 723 + */ 724 + smp_rmb(); 747 725 726 + hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 748 727 if (throtl_tg_on_rr(tg) && tg->limits_changed) { 749 728 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" 750 729 " riops=%u wiops=%u", tg->bps[READ],
+2
drivers/block/cciss.c
··· 2834 2834 InquiryData_struct *inq_buff = NULL; 2835 2835 2836 2836 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2837 + if (!h->drv[logvol]) 2838 + continue; 2837 2839 if (memcmp(h->drv[logvol]->LunID, drv->LunID, 2838 2840 sizeof(drv->LunID)) == 0) { 2839 2841 FOUND = 1;
+8 -6
drivers/block/drbd/drbd_receiver.c
··· 3627 3627 } 3628 3628 3629 3629 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); 3630 - rv = drbd_recv(mdev, &header->h80.payload, shs); 3631 - if (unlikely(rv != shs)) { 3632 - dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); 3633 - goto err_out; 3634 - } 3635 - 3636 3630 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { 3637 3631 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); 3638 3632 goto err_out; 3633 + } 3634 + 3635 + if (shs) { 3636 + rv = drbd_recv(mdev, &header->h80.payload, shs); 3637 + if (unlikely(rv != shs)) { 3638 + dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); 3639 + goto err_out; 3640 + } 3639 3641 } 3640 3642 3641 3643 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
+2 -1
drivers/block/drbd/drbd_req.h
··· 339 339 } 340 340 341 341 /* completion of master bio is outside of spinlock. 342 - * If you need it irqsave, do it your self! */ 342 + * If you need it irqsave, do it your self! 343 + * Which means: don't use from bio endio callback. */ 343 344 static inline int req_mod(struct drbd_request *req, 344 345 enum drbd_req_event what) 345 346 {
+9 -1
drivers/block/drbd/drbd_worker.c
··· 193 193 */ 194 194 void drbd_endio_pri(struct bio *bio, int error) 195 195 { 196 + unsigned long flags; 196 197 struct drbd_request *req = bio->bi_private; 197 198 struct drbd_conf *mdev = req->mdev; 199 + struct bio_and_error m; 198 200 enum drbd_req_event what; 199 201 int uptodate = bio_flagged(bio, BIO_UPTODATE); 200 202 ··· 222 220 bio_put(req->private_bio); 223 221 req->private_bio = ERR_PTR(error); 224 222 225 - req_mod(req, what); 223 + /* not req_mod(), we need irqsave here! */ 224 + spin_lock_irqsave(&mdev->req_lock, flags); 225 + __req_mod(req, what, &m); 226 + spin_unlock_irqrestore(&mdev->req_lock, flags); 227 + 228 + if (m.bio) 229 + complete_master_bio(mdev, &m); 226 230 } 227 231 228 232 int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+12 -7
drivers/clocksource/sh_cmt.c
··· 283 283 } while (delay); 284 284 } 285 285 286 + static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) 287 + { 288 + if (delta > p->max_match_value) 289 + dev_warn(&p->pdev->dev, "delta out of range\n"); 290 + 291 + p->next_match_value = delta; 292 + sh_cmt_clock_event_program_verify(p, 0); 293 + } 294 + 286 295 static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) 287 296 { 288 297 unsigned long flags; 289 298 290 - if (delta > p->max_match_value) 291 - dev_warn(&p->pdev->dev, "delta out of range\n"); 292 - 293 299 spin_lock_irqsave(&p->lock, flags); 294 - p->next_match_value = delta; 295 - sh_cmt_clock_event_program_verify(p, 0); 300 + __sh_cmt_set_next(p, delta); 296 301 spin_unlock_irqrestore(&p->lock, flags); 297 302 } 298 303 ··· 364 359 365 360 /* setup timeout if no clockevent */ 366 361 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) 367 - sh_cmt_set_next(p, p->max_match_value); 362 + __sh_cmt_set_next(p, p->max_match_value); 368 363 out: 369 364 spin_unlock_irqrestore(&p->lock, flags); 370 365 ··· 386 381 387 382 /* adjust the timeout to maximum if only clocksource left */ 388 383 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) 389 - sh_cmt_set_next(p, p->max_match_value); 384 + __sh_cmt_set_next(p, p->max_match_value); 390 385 391 386 spin_unlock_irqrestore(&p->lock, flags); 392 387 }
+65 -62
drivers/input/evdev.c
··· 534 534 } 535 535 #undef OLD_KEY_MAX 536 536 537 - static int evdev_handle_get_keycode(struct input_dev *dev, 538 - void __user *p, size_t size) 537 + static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p) 538 + { 539 + struct input_keymap_entry ke = { 540 + .len = sizeof(unsigned int), 541 + .flags = 0, 542 + }; 543 + int __user *ip = (int __user *)p; 544 + int error; 545 + 546 + /* legacy case */ 547 + if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 548 + return -EFAULT; 549 + 550 + error = input_get_keycode(dev, &ke); 551 + if (error) 552 + return error; 553 + 554 + if (put_user(ke.keycode, ip + 1)) 555 + return -EFAULT; 556 + 557 + return 0; 558 + } 559 + 560 + static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p) 539 561 { 540 562 struct input_keymap_entry ke; 541 563 int error; 542 564 543 - memset(&ke, 0, sizeof(ke)); 565 + if (copy_from_user(&ke, p, sizeof(ke))) 566 + return -EFAULT; 544 567 545 - if (size == sizeof(unsigned int[2])) { 546 - /* legacy case */ 547 - int __user *ip = (int __user *)p; 568 + error = input_get_keycode(dev, &ke); 569 + if (error) 570 + return error; 548 571 549 - if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 550 - return -EFAULT; 572 + if (copy_to_user(p, &ke, sizeof(ke))) 573 + return -EFAULT; 551 574 552 - ke.len = sizeof(unsigned int); 553 - ke.flags = 0; 554 - 555 - error = input_get_keycode(dev, &ke); 556 - if (error) 557 - return error; 558 - 559 - if (put_user(ke.keycode, ip + 1)) 560 - return -EFAULT; 561 - 562 - } else { 563 - size = min(size, sizeof(ke)); 564 - 565 - if (copy_from_user(&ke, p, size)) 566 - return -EFAULT; 567 - 568 - error = input_get_keycode(dev, &ke); 569 - if (error) 570 - return error; 571 - 572 - if (copy_to_user(p, &ke, size)) 573 - return -EFAULT; 574 - } 575 575 return 0; 576 576 } 577 577 578 - static int evdev_handle_set_keycode(struct input_dev *dev, 579 - void __user *p, size_t size) 578 + static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p) 579 + { 580 + struct input_keymap_entry ke = { 581 + .len = sizeof(unsigned int), 582 + .flags = 0, 583 + }; 584 + int __user *ip = (int __user *)p; 585 + 586 + if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 587 + return -EFAULT; 588 + 589 + if (get_user(ke.keycode, ip + 1)) 590 + return -EFAULT; 591 + 592 + return input_set_keycode(dev, &ke); 593 + } 594 + 595 + static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p) 580 596 { 581 597 struct input_keymap_entry ke; 582 598 583 - memset(&ke, 0, sizeof(ke)); 599 + if (copy_from_user(&ke, p, sizeof(ke))) 600 + return -EFAULT; 584 601 585 - if (size == sizeof(unsigned int[2])) { 586 - /* legacy case */ 587 - int __user *ip = (int __user *)p; 588 - 589 - if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 590 - return -EFAULT; 591 - 592 - if (get_user(ke.keycode, ip + 1)) 593 - return -EFAULT; 594 - 595 - ke.len = sizeof(unsigned int); 596 - ke.flags = 0; 597 - 598 - } else { 599 - size = min(size, sizeof(ke)); 600 - 601 - if (copy_from_user(&ke, p, size)) 602 - return -EFAULT; 603 - 604 - if (ke.len > sizeof(ke.scancode)) 605 - return -EINVAL; 606 - } 602 + if (ke.len > sizeof(ke.scancode)) 603 + return -EINVAL; 607 604 608 605 return input_set_keycode(dev, &ke); 609 606 } ··· 666 669 return evdev_grab(evdev, client); 667 670 else 668 671 return evdev_ungrab(evdev, client); 672 + 673 + case EVIOCGKEYCODE: 674 + return evdev_handle_get_keycode(dev, p); 675 + 676 + case EVIOCSKEYCODE: 677 + return evdev_handle_set_keycode(dev, p); 678 + 679 + case EVIOCGKEYCODE_V2: 680 + return evdev_handle_get_keycode_v2(dev, p); 681 + 682 + case EVIOCSKEYCODE_V2: 683 + return evdev_handle_set_keycode_v2(dev, p); 669 684 } 670 685 671 686 size = _IOC_SIZE(cmd); ··· 717 708 return -EFAULT; 718 709 719 710 return error; 720 - 721 - case EVIOC_MASK_SIZE(EVIOCGKEYCODE): 722 - return evdev_handle_get_keycode(dev, p, size); 723 - 724 - case EVIOC_MASK_SIZE(EVIOCSKEYCODE): 725 - return evdev_handle_set_keycode(dev, p, size); 726 711 } 727 712 728 713 /* Multi-number variable-length handlers */
+3
drivers/input/tablet/wacom_wac.c
··· 1436 1436 { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT }; 1437 1437 static struct wacom_features wacom_features_0xD3 = 1438 1438 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; 1439 + static const struct wacom_features wacom_features_0xD4 = 1440 + { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, 63, BAMBOO_PT }; 1439 1441 static struct wacom_features wacom_features_0xD8 = 1440 1442 { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; 1441 1443 static struct wacom_features wacom_features_0xDA = ··· 1512 1510 { USB_DEVICE_WACOM(0xD1) }, 1513 1511 { USB_DEVICE_WACOM(0xD2) }, 1514 1512 { USB_DEVICE_WACOM(0xD3) }, 1513 + { USB_DEVICE_WACOM(0xD4) }, 1515 1514 { USB_DEVICE_WACOM(0xD8) }, 1516 1515 { USB_DEVICE_WACOM(0xDA) }, 1517 1516 { USB_DEVICE_WACOM(0xDB) },
+2 -8
drivers/md/dm-table.c
··· 517 517 */ 518 518 519 519 if (q->merge_bvec_fn && !ti->type->merge) 520 - limits->max_sectors = 521 - min_not_zero(limits->max_sectors, 522 - (unsigned int) (PAGE_SIZE >> 9)); 520 + blk_limits_max_hw_sectors(limits, 521 + (unsigned int) (PAGE_SIZE >> 9)); 523 522 return 0; 524 523 } 525 524 EXPORT_SYMBOL_GPL(dm_set_device_limits); ··· 1129 1130 * Copy table's limits to the DM device's request_queue 1130 1131 */ 1131 1132 q->limits = *limits; 1132 - 1133 - if (limits->no_cluster) 1134 - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1135 - else 1136 - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); 1137 1133 1138 1134 if (!dm_table_supports_discards(t)) 1139 1135 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
-3
drivers/md/md.c
··· 4295 4295 goto abort; 4296 4296 mddev->queue->queuedata = mddev; 4297 4297 4298 - /* Can be unlocked because the queue is new: no concurrency */ 4299 - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); 4300 - 4301 4298 blk_queue_make_request(mddev->queue, md_make_request); 4302 4299 4303 4300 disk = alloc_disk(1 << shift);
+4 -4
drivers/media/common/saa7146_hlp.c
··· 558 558 static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat) 559 559 { 560 560 struct saa7146_vv *vv = dev->vv_data; 561 - struct saa7146_format *sfmt = format_by_fourcc(dev, pixelformat); 561 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat); 562 562 563 563 int b_depth = vv->ov_fmt->depth; 564 564 int b_bpl = vv->ov_fb.fmt.bytesperline; ··· 702 702 struct saa7146_vv *vv = dev->vv_data; 703 703 struct saa7146_video_dma vdma1; 704 704 705 - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 705 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 706 706 707 707 int width = buf->fmt->width; 708 708 int height = buf->fmt->height; ··· 827 827 struct saa7146_video_dma vdma2; 828 828 struct saa7146_video_dma vdma3; 829 829 830 - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 830 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 831 831 832 832 int width = buf->fmt->width; 833 833 int height = buf->fmt->height; ··· 994 994 995 995 void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) 996 996 { 997 - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 997 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 998 998 struct saa7146_vv *vv = dev->vv_data; 999 999 u32 vdma1_prot_addr; 1000 1000
+8 -8
drivers/media/common/saa7146_video.c
··· 84 84 85 85 static int NUM_FORMATS = sizeof(formats)/sizeof(struct saa7146_format); 86 86 87 - struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc) 87 + struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc) 88 88 { 89 89 int i, j = NUM_FORMATS; 90 90 ··· 266 266 struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); 267 267 struct scatterlist *list = dma->sglist; 268 268 int length = dma->sglen; 269 - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 269 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 270 270 271 271 DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length)); 272 272 ··· 408 408 } 409 409 } 410 410 411 - fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); 411 + fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); 412 412 /* we need to have a valid format set here */ 413 413 BUG_ON(NULL == fmt); 414 414 ··· 460 460 return -EBUSY; 461 461 } 462 462 463 - fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); 463 + fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); 464 464 /* we need to have a valid format set here */ 465 465 BUG_ON(NULL == fmt); 466 466 ··· 536 536 return -EPERM; 537 537 538 538 /* check args */ 539 - fmt = format_by_fourcc(dev, fb->fmt.pixelformat); 539 + fmt = saa7146_format_by_fourcc(dev, fb->fmt.pixelformat); 540 540 if (NULL == fmt) 541 541 return -EINVAL; 542 542 ··· 760 760 761 761 DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh)); 762 762 763 - fmt = format_by_fourcc(dev, f->fmt.pix.pixelformat); 763 + fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat); 764 764 if (NULL == fmt) 765 765 return -EINVAL; 766 766 ··· 1264 1264 buf->fmt = &fh->video_fmt; 1265 1265 buf->vb.field = fh->video_fmt.field; 1266 1266 1267 - sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 1267 + sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 1268 1268 1269 1269 release_all_pagetables(dev, buf); 1270 1270 if( 0 != IS_PLANAR(sfmt->trans)) { ··· 1378 1378 fh->video_fmt.pixelformat = V4L2_PIX_FMT_BGR24; 1379 1379 fh->video_fmt.bytesperline = 0; 1380 1380 fh->video_fmt.field = V4L2_FIELD_ANY; 1381 - sfmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); 1381 + sfmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); 1382 1382 fh->video_fmt.sizeimage = (fh->video_fmt.width * fh->video_fmt.height * sfmt->depth)/8; 1383 1383 1384 1384 videobuf_queue_sg_init(&fh->video_q, &video_qops,
+8 -8
drivers/media/radio/radio-aimslab.c
··· 361 361 362 362 static const struct v4l2_file_operations rtrack_fops = { 363 363 .owner = THIS_MODULE, 364 - .ioctl = video_ioctl2, 364 + .unlocked_ioctl = video_ioctl2, 365 365 }; 366 366 367 367 static const struct v4l2_ioctl_ops rtrack_ioctl_ops = { ··· 412 412 rt->vdev.release = video_device_release_empty; 413 413 video_set_drvdata(&rt->vdev, rt); 414 414 415 - if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 416 - v4l2_device_unregister(&rt->v4l2_dev); 417 - release_region(rt->io, 2); 418 - return -EINVAL; 419 - } 420 - v4l2_info(v4l2_dev, "AIMSlab RadioTrack/RadioReveal card driver.\n"); 421 - 422 415 /* Set up the I/O locking */ 423 416 424 417 mutex_init(&rt->lock); ··· 422 429 outb(0x48, rt->io); /* volume down but still "on" */ 423 430 sleep_delay(2000000); /* make sure it's totally down */ 424 431 outb(0xc0, rt->io); /* steady volume, mute card */ 432 + 433 + if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 434 + v4l2_device_unregister(&rt->v4l2_dev); 435 + release_region(rt->io, 2); 436 + return -EINVAL; 437 + } 438 + v4l2_info(v4l2_dev, "AIMSlab RadioTrack/RadioReveal card driver.\n"); 425 439 426 440 return 0; 427 441 }
+3 -3
drivers/media/radio/radio-aztech.c
··· 324 324 325 325 static const struct v4l2_file_operations aztech_fops = { 326 326 .owner = THIS_MODULE, 327 - .ioctl = video_ioctl2, 327 + .unlocked_ioctl = video_ioctl2, 328 328 }; 329 329 330 330 static const struct v4l2_ioctl_ops aztech_ioctl_ops = { ··· 375 375 az->vdev.ioctl_ops = &aztech_ioctl_ops; 376 376 az->vdev.release = video_device_release_empty; 377 377 video_set_drvdata(&az->vdev, az); 378 + /* mute card - prevents noisy bootups */ 379 + outb(0, az->io); 378 380 379 381 if (video_register_device(&az->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 380 382 v4l2_device_unregister(v4l2_dev); ··· 385 383 } 386 384 387 385 v4l2_info(v4l2_dev, "Aztech radio card driver v1.00/19990224 rkroll@exploits.org\n"); 388 - /* mute card - prevents noisy bootups */ 389 - outb(0, az->io); 390 386 return 0; 391 387 } 392 388
+9 -3
drivers/media/radio/radio-cadet.c
··· 328 328 unsigned char readbuf[RDS_BUFFER]; 329 329 int i = 0; 330 330 331 + mutex_lock(&dev->lock); 331 332 if (dev->rdsstat == 0) { 332 - mutex_lock(&dev->lock); 333 333 dev->rdsstat = 1; 334 334 outb(0x80, dev->io); /* Select RDS fifo */ 335 - mutex_unlock(&dev->lock); 336 335 init_timer(&dev->readtimer); 337 336 dev->readtimer.function = cadet_handler; 338 337 dev->readtimer.data = (unsigned long)dev; ··· 339 340 add_timer(&dev->readtimer); 340 341 } 341 342 if (dev->rdsin == dev->rdsout) { 343 + mutex_unlock(&dev->lock); 342 344 if (file->f_flags & O_NONBLOCK) 343 345 return -EWOULDBLOCK; 344 346 interruptible_sleep_on(&dev->read_queue); 347 + mutex_lock(&dev->lock); 345 348 } 346 349 while (i < count && dev->rdsin != dev->rdsout) 347 350 readbuf[i++] = dev->rdsbuf[dev->rdsout++]; 351 + mutex_unlock(&dev->lock); 348 352 349 353 if (copy_to_user(data, readbuf, i)) 350 354 return -EFAULT; ··· 527 525 { 528 526 struct cadet *dev = video_drvdata(file); 529 527 528 + mutex_lock(&dev->lock); 530 529 dev->users++; 531 530 if (1 == dev->users) 532 531 init_waitqueue_head(&dev->read_queue); 532 + mutex_unlock(&dev->lock); 533 533 return 0; 534 534 } 535 535 ··· 539 535 { 540 536 struct cadet *dev = video_drvdata(file); 541 537 538 + mutex_lock(&dev->lock); 542 539 dev->users--; 543 540 if (0 == dev->users) { 544 541 del_timer_sync(&dev->readtimer); 545 542 dev->rdsstat = 0; 546 543 } 544 + mutex_unlock(&dev->lock); 547 545 return 0; 548 546 } 549 547 ··· 565 559 .open = cadet_open, 566 560 .release = cadet_release, 567 561 .read = cadet_read, 568 - .ioctl = video_ioctl2, 562 + .unlocked_ioctl = video_ioctl2, 569 563 .poll = cadet_poll, 570 564 }; 571 565
+3 -3
drivers/media/radio/radio-gemtek-pci.c
··· 361 361 362 362 static const struct v4l2_file_operations gemtek_pci_fops = { 363 363 .owner = THIS_MODULE, 364 - .ioctl = video_ioctl2, 364 + .unlocked_ioctl = video_ioctl2, 365 365 }; 366 366 367 367 static const struct v4l2_ioctl_ops gemtek_pci_ioctl_ops = { ··· 422 422 card->vdev.release = video_device_release_empty; 423 423 video_set_drvdata(&card->vdev, card); 424 424 425 + gemtek_pci_mute(card); 426 + 425 427 if (video_register_device(&card->vdev, VFL_TYPE_RADIO, nr_radio) < 0) 426 428 goto err_video; 427 - 428 - gemtek_pci_mute(card); 429 429 430 430 v4l2_info(v4l2_dev, "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n", 431 431 pdev->revision, card->iobase, card->iobase + card->length - 1);
+7 -7
drivers/media/radio/radio-gemtek.c
··· 378 378 379 379 static const struct v4l2_file_operations gemtek_fops = { 380 380 .owner = THIS_MODULE, 381 - .ioctl = video_ioctl2, 381 + .unlocked_ioctl = video_ioctl2, 382 382 }; 383 383 384 384 static int vidioc_querycap(struct file *file, void *priv, ··· 577 577 gt->vdev.release = video_device_release_empty; 578 578 video_set_drvdata(&gt->vdev, gt); 579 579 580 - if (video_register_device(&gt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 581 - v4l2_device_unregister(v4l2_dev); 582 - release_region(gt->io, 1); 583 - return -EBUSY; 584 - } 585 - 586 580 /* Set defaults */ 587 581 gt->lastfreq = GEMTEK_LOWFREQ; 588 582 gt->bu2614data = 0; 589 583 590 584 if (initmute) 591 585 gemtek_mute(gt); 586 + 587 + if (video_register_device(&gt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 588 + v4l2_device_unregister(v4l2_dev); 589 + release_region(gt->io, 1); 590 + return -EBUSY; 591 + } 592 592 593 593 return 0; 594 594 }
+6 -8
drivers/media/radio/radio-maestro.c
··· 299 299 300 300 static const struct v4l2_file_operations maestro_fops = { 301 301 .owner = THIS_MODULE, 302 - .ioctl = video_ioctl2, 302 + .unlocked_ioctl = video_ioctl2, 303 303 }; 304 304 305 305 static const struct v4l2_ioctl_ops maestro_ioctl_ops = { ··· 383 383 dev->vdev.release = video_device_release_empty; 384 384 video_set_drvdata(&dev->vdev, dev); 385 385 386 + if (!radio_power_on(dev)) { 387 + retval = -EIO; 388 + goto errfr1; 389 + } 390 + 386 391 retval = video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr); 387 392 if (retval) { 388 393 v4l2_err(v4l2_dev, "can't register video device!\n"); 389 394 goto errfr1; 390 395 } 391 396 392 - if (!radio_power_on(dev)) { 393 - retval = -EIO; 394 - goto errunr; 395 - } 396 - 397 397 v4l2_info(v4l2_dev, "version " DRIVER_VERSION "\n"); 398 398 399 399 return 0; 400 - errunr: 401 - video_unregister_device(&dev->vdev); 402 400 errfr1: 403 401 v4l2_device_unregister(v4l2_dev); 404 402 errfr:
+1 -1
drivers/media/radio/radio-maxiradio.c
··· 346 346 347 347 static const struct v4l2_file_operations maxiradio_fops = { 348 348 .owner = THIS_MODULE, 349 - .ioctl = video_ioctl2, 349 + .unlocked_ioctl = video_ioctl2, 350 350 }; 351 351 352 352 static const struct v4l2_ioctl_ops maxiradio_ioctl_ops = {
+4 -2
drivers/media/radio/radio-miropcm20.c
··· 33 33 unsigned long freq; 34 34 int muted; 35 35 struct snd_miro_aci *aci; 36 + struct mutex lock; 36 37 }; 37 38 38 39 static struct pcm20 pcm20_card = { ··· 73 72 74 73 static const struct v4l2_file_operations pcm20_fops = { 75 74 .owner = THIS_MODULE, 76 - .ioctl = video_ioctl2, 75 + .unlocked_ioctl = video_ioctl2, 77 76 }; 78 77 79 78 static int vidioc_querycap(struct file *file, void *priv, ··· 230 229 return -ENODEV; 231 230 } 232 231 strlcpy(v4l2_dev->name, "miropcm20", sizeof(v4l2_dev->name)); 233 - 232 + mutex_init(&dev->lock); 234 233 235 234 res = v4l2_device_register(NULL, v4l2_dev); 236 235 if (res < 0) { ··· 243 242 dev->vdev.fops = &pcm20_fops; 244 243 dev->vdev.ioctl_ops = &pcm20_ioctl_ops; 245 244 dev->vdev.release = video_device_release_empty; 245 + dev->vdev.lock = &dev->lock; 246 246 video_set_drvdata(&dev->vdev, dev); 247 247 248 248 if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0)
+5 -5
drivers/media/radio/radio-rtrack2.c
··· 266 266 267 267 static const struct v4l2_file_operations rtrack2_fops = { 268 268 .owner = THIS_MODULE, 269 - .ioctl = video_ioctl2, 269 + .unlocked_ioctl = video_ioctl2, 270 270 }; 271 271 272 272 static const struct v4l2_ioctl_ops rtrack2_ioctl_ops = { ··· 315 315 dev->vdev.release = video_device_release_empty; 316 316 video_set_drvdata(&dev->vdev, dev); 317 317 318 + /* mute card - prevents noisy bootups */ 319 + outb(1, dev->io); 320 + dev->muted = 1; 321 + 318 322 mutex_init(&dev->lock); 319 323 if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 320 324 v4l2_device_unregister(v4l2_dev); ··· 327 323 } 328 324 329 325 v4l2_info(v4l2_dev, "AIMSlab Radiotrack II card driver.\n"); 330 - 331 - /* mute card - prevents noisy bootups */ 332 - outb(1, dev->io); 333 - dev->muted = 1; 334 326 335 327 return 0; 336 328 }
+4 -3
drivers/media/radio/radio-sf16fmi.c
··· 260 260 261 261 static const struct v4l2_file_operations fmi_fops = { 262 262 .owner = THIS_MODULE, 263 - .ioctl = video_ioctl2, 263 + .unlocked_ioctl = video_ioctl2, 264 264 }; 265 265 266 266 static const struct v4l2_ioctl_ops fmi_ioctl_ops = { ··· 382 382 383 383 mutex_init(&fmi->lock); 384 384 385 + /* mute card - prevents noisy bootups */ 386 + fmi_mute(fmi); 387 + 385 388 if (video_register_device(&fmi->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 386 389 v4l2_device_unregister(v4l2_dev); 387 390 release_region(fmi->io, 2); ··· 394 391 } 395 392 396 393 v4l2_info(v4l2_dev, "card driver at 0x%x\n", fmi->io); 397 - /* mute card - prevents noisy bootups */ 398 - fmi_mute(fmi); 399 394 return 0; 400 395 } 401 396
+5 -6
drivers/media/radio/radio-sf16fmr2.c
··· 376 376 377 377 static const struct v4l2_file_operations fmr2_fops = { 378 378 .owner = THIS_MODULE, 379 - .ioctl = video_ioctl2, 379 + .unlocked_ioctl = video_ioctl2, 380 380 }; 381 381 382 382 static const struct v4l2_ioctl_ops fmr2_ioctl_ops = { ··· 424 424 fmr2->vdev.release = video_device_release_empty; 425 425 video_set_drvdata(&fmr2->vdev, fmr2); 426 426 427 + /* mute card - prevents noisy bootups */ 428 + fmr2_mute(fmr2->io); 429 + fmr2_product_info(fmr2); 430 + 427 431 if (video_register_device(&fmr2->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 428 432 v4l2_device_unregister(v4l2_dev); 429 433 release_region(fmr2->io, 2); ··· 435 431 } 436 432 437 433 v4l2_info(v4l2_dev, "SF16FMR2 radio card driver at 0x%x.\n", fmr2->io); 438 - /* mute card - prevents noisy bootups */ 439 - mutex_lock(&fmr2->lock); 440 - fmr2_mute(fmr2->io); 441 - fmr2_product_info(fmr2); 442 - mutex_unlock(&fmr2->lock); 443 434 debug_print((KERN_DEBUG "card_type %d\n", fmr2->card_type)); 444 435 return 0; 445 436 }
+2 -1
drivers/media/radio/radio-si4713.c
··· 53 53 /* radio_si4713_fops - file operations interface */ 54 54 static const struct v4l2_file_operations radio_si4713_fops = { 55 55 .owner = THIS_MODULE, 56 - .ioctl = video_ioctl2, 56 + /* Note: locking is done at the subdev level in the i2c driver. */ 57 + .unlocked_ioctl = video_ioctl2, 57 58 }; 58 59 59 60 /* Video4Linux Interface */
+9 -40
drivers/media/radio/radio-tea5764.c
··· 142 142 struct video_device *videodev; 143 143 struct tea5764_regs regs; 144 144 struct mutex mutex; 145 - int users; 146 145 }; 147 146 148 147 /* I2C code related */ ··· 457 458 return 0; 458 459 } 459 460 460 - static int tea5764_open(struct file *file) 461 - { 462 - /* Currently we support only one device */ 463 - struct tea5764_device *radio = video_drvdata(file); 464 - 465 - mutex_lock(&radio->mutex); 466 - /* Only exclusive access */ 467 - if (radio->users) { 468 - mutex_unlock(&radio->mutex); 469 - return -EBUSY; 470 - } 471 - radio->users++; 472 - mutex_unlock(&radio->mutex); 473 - file->private_data = radio; 474 - return 0; 475 - } 476 - 477 - static int tea5764_close(struct file *file) 478 - { 479 - struct tea5764_device *radio = video_drvdata(file); 480 - 481 - if (!radio) 482 - return -ENODEV; 483 - mutex_lock(&radio->mutex); 484 - radio->users--; 485 - mutex_unlock(&radio->mutex); 486 - return 0; 487 - } 488 - 489 461 /* File system interface */ 490 462 static const struct v4l2_file_operations tea5764_fops = { 491 463 .owner = THIS_MODULE, 492 - .open = tea5764_open, 493 - .release = tea5764_close, 494 - .ioctl = video_ioctl2, 464 + .unlocked_ioctl = video_ioctl2, 495 465 }; 496 466 497 467 static const struct v4l2_ioctl_ops tea5764_ioctl_ops = { ··· 495 527 int ret; 496 528 497 529 PDEBUG("probe"); 498 - radio = kmalloc(sizeof(struct tea5764_device), GFP_KERNEL); 530 + radio = kzalloc(sizeof(struct tea5764_device), GFP_KERNEL); 499 531 if (!radio) 500 532 return -ENOMEM; 501 533 ··· 523 555 524 556 i2c_set_clientdata(client, radio); 525 557 video_set_drvdata(radio->videodev, radio); 526 - 527 - ret = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr); 528 - if (ret < 0) { 529 - PWARN("Could not register video device!"); 530 - goto errrel; 531 - } 558 + radio->videodev->lock = &radio->mutex; 532 559 533 560 /* initialize and power off the chip */ 534 561 tea5764_i2c_read(radio); 535 562 tea5764_set_audout_mode(radio, V4L2_TUNER_MODE_STEREO); 536 563 tea5764_mute(radio, 1); 537 564 tea5764_power_down(radio); 565 + 566 + ret = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr); 567 + if (ret < 0) { 568 + PWARN("Could not register video device!"); 569 + goto errrel; 570 + } 538 571 539 572 PINFO("registered."); 540 573 return 0;
+4 -4
drivers/media/radio/radio-terratec.c
··· 338 338 339 339 static const struct v4l2_file_operations terratec_fops = { 340 340 .owner = THIS_MODULE, 341 - .ioctl = video_ioctl2, 341 + .unlocked_ioctl = video_ioctl2, 342 342 }; 343 343 344 344 static const struct v4l2_ioctl_ops terratec_ioctl_ops = { ··· 389 389 390 390 mutex_init(&tt->lock); 391 391 392 + /* mute card - prevents noisy bootups */ 393 + tt_write_vol(tt, 0); 394 + 392 395 if (video_register_device(&tt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 393 396 v4l2_device_unregister(&tt->v4l2_dev); 394 397 release_region(tt->io, 2); ··· 399 396 } 400 397 401 398 v4l2_info(v4l2_dev, "TERRATEC ActivRadio Standalone card driver.\n"); 402 - 403 - /* mute card - prevents noisy bootups */ 404 - tt_write_vol(tt, 0); 405 399 return 0; 406 400 } 407 401
+4 -1
drivers/media/radio/radio-timb.c
··· 34 34 struct v4l2_subdev *sd_dsp; 35 35 struct video_device video_dev; 36 36 struct v4l2_device v4l2_dev; 37 + struct mutex lock; 37 38 }; 38 39 39 40 ··· 143 142 144 143 static const struct v4l2_file_operations timbradio_fops = { 145 144 .owner = THIS_MODULE, 146 - .ioctl = video_ioctl2, 145 + .unlocked_ioctl = video_ioctl2, 147 146 }; 148 147 149 148 static int __devinit timbradio_probe(struct platform_device *pdev) ··· 165 164 } 166 165 167 166 tr->pdata = *pdata; 167 + mutex_init(&tr->lock); 168 168 169 169 strlcpy(tr->video_dev.name, "Timberdale Radio", 170 170 sizeof(tr->video_dev.name)); ··· 173 171 tr->video_dev.ioctl_ops = &timbradio_ioctl_ops; 174 172 tr->video_dev.release = video_device_release_empty; 175 173 tr->video_dev.minor = -1; 174 + tr->video_dev.lock = &tr->lock; 176 175 177 176 strlcpy(tr->v4l2_dev.name, DRIVER_NAME, sizeof(tr->v4l2_dev.name)); 178 177 err = v4l2_device_register(NULL, &tr->v4l2_dev);
+9 -9
drivers/media/radio/radio-trust.c
··· 344 344 345 345 static const struct v4l2_file_operations trust_fops = { 346 346 .owner = THIS_MODULE, 347 - .ioctl = video_ioctl2, 347 + .unlocked_ioctl = video_ioctl2, 348 348 }; 349 349 350 350 static const struct v4l2_ioctl_ops trust_ioctl_ops = { ··· 396 396 tr->vdev.release = video_device_release_empty; 397 397 video_set_drvdata(&tr->vdev, tr); 398 398 399 - if (video_register_device(&tr->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 400 - v4l2_device_unregister(v4l2_dev); 401 - release_region(tr->io, 2); 402 - return -EINVAL; 403 - } 404 - 405 - v4l2_info(v4l2_dev, "Trust FM Radio card driver v1.0.\n"); 406 - 407 399 write_i2c(tr, 2, TDA7318_ADDR, 0x80); /* speaker att. LF = 0 dB */ 408 400 write_i2c(tr, 2, TDA7318_ADDR, 0xa0); /* speaker att. RF = 0 dB */ 409 401 write_i2c(tr, 2, TDA7318_ADDR, 0xc0); /* speaker att. LR = 0 dB */ ··· 409 417 410 418 /* mute card - prevents noisy bootups */ 411 419 tr_setmute(tr, 1); 420 + 421 + if (video_register_device(&tr->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 422 + v4l2_device_unregister(v4l2_dev); 423 + release_region(tr->io, 2); 424 + return -EINVAL; 425 + } 426 + 427 + v4l2_info(v4l2_dev, "Trust FM Radio card driver v1.0.\n"); 412 428 413 429 return 0; 414 430 }
+8 -8
drivers/media/radio/radio-typhoon.c
··· 317 317 318 318 static const struct v4l2_file_operations typhoon_fops = { 319 319 .owner = THIS_MODULE, 320 - .ioctl = video_ioctl2, 320 + .unlocked_ioctl = video_ioctl2, 321 321 }; 322 322 323 323 static const struct v4l2_ioctl_ops typhoon_ioctl_ops = { ··· 344 344 345 345 strlcpy(v4l2_dev->name, "typhoon", sizeof(v4l2_dev->name)); 346 346 dev->io = io; 347 - dev->curfreq = dev->mutefreq = mutefreq; 348 347 349 348 if (dev->io == -1) { 350 349 v4l2_err(v4l2_dev, "You must set an I/O address with io=0x316 or io=0x336\n"); 351 350 return -EINVAL; 352 351 } 353 352 354 - if (dev->mutefreq < 87000 || dev->mutefreq > 108500) { 353 + if (mutefreq < 87000 || mutefreq > 108500) { 355 354 v4l2_err(v4l2_dev, "You must set a frequency (in kHz) used when muting the card,\n"); 356 355 v4l2_err(v4l2_dev, "e.g. with \"mutefreq=87500\" (87000 <= mutefreq <= 108500)\n"); 357 356 return -EINVAL; 358 357 } 358 + dev->curfreq = dev->mutefreq = mutefreq << 4; 359 359 360 360 mutex_init(&dev->lock); 361 361 if (!request_region(dev->io, 8, "typhoon")) { ··· 378 378 dev->vdev.ioctl_ops = &typhoon_ioctl_ops; 379 379 dev->vdev.release = video_device_release_empty; 380 380 video_set_drvdata(&dev->vdev, dev); 381 + 382 + /* mute card - prevents noisy bootups */ 383 + typhoon_mute(dev); 384 + 381 385 if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 382 386 v4l2_device_unregister(&dev->v4l2_dev); 383 387 release_region(dev->io, 8); 384 388 return -EINVAL; 385 389 } 386 390 v4l2_info(v4l2_dev, "port 0x%x.\n", dev->io); 387 - v4l2_info(v4l2_dev, "mute frequency is %lu kHz.\n", dev->mutefreq); 388 - dev->mutefreq <<= 4; 389 - 390 - /* mute card - prevents noisy bootups */ 391 - typhoon_mute(dev); 391 + v4l2_info(v4l2_dev, "mute frequency is %lu kHz.\n", mutefreq); 392 392 393 393 return 0; 394 394 }
+15 -15
drivers/media/radio/radio-zoltrix.c
··· 377 377 static const struct v4l2_file_operations zoltrix_fops = 378 378 { 379 379 .owner = THIS_MODULE, 380 - .ioctl = video_ioctl2, 380 + .unlocked_ioctl = video_ioctl2, 381 381 }; 382 382 383 383 static const struct v4l2_ioctl_ops zoltrix_ioctl_ops = { ··· 424 424 return res; 425 425 } 426 426 427 - strlcpy(zol->vdev.name, v4l2_dev->name, sizeof(zol->vdev.name)); 428 - zol->vdev.v4l2_dev = v4l2_dev; 429 - zol->vdev.fops = &zoltrix_fops; 430 - zol->vdev.ioctl_ops = &zoltrix_ioctl_ops; 431 - zol->vdev.release = video_device_release_empty; 432 - video_set_drvdata(&zol->vdev, zol); 433 - 434 - if (video_register_device(&zol->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 435 - v4l2_device_unregister(v4l2_dev); 436 - release_region(zol->io, 2); 437 - return -EINVAL; 438 - } 439 - v4l2_info(v4l2_dev, "Zoltrix Radio Plus card driver.\n"); 440 - 441 427 mutex_init(&zol->lock); 442 428 443 429 /* mute card - prevents noisy bootups */ ··· 437 451 438 452 zol->curvol = 0; 439 453 zol->stereo = 1; 454 + 455 + strlcpy(zol->vdev.name, v4l2_dev->name, sizeof(zol->vdev.name)); 456 + zol->vdev.v4l2_dev = v4l2_dev; 457 + zol->vdev.fops = &zoltrix_fops; 458 + zol->vdev.ioctl_ops = &zoltrix_ioctl_ops; 459 + zol->vdev.release = video_device_release_empty; 460 + video_set_drvdata(&zol->vdev, zol); 461 + 462 + if (video_register_device(&zol->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 463 + v4l2_device_unregister(v4l2_dev); 464 + release_region(zol->io, 2); 465 + return -EINVAL; 466 + } 467 + v4l2_info(v4l2_dev, "Zoltrix Radio Plus card driver.\n"); 440 468 441 469 return 0; 442 470 }
+1 -1
drivers/media/video/arv.c
··· 712 712 static const struct v4l2_file_operations ar_fops = { 713 713 .owner = THIS_MODULE, 714 714 .read = ar_read, 715 - .ioctl = video_ioctl2, 715 + .unlocked_ioctl = video_ioctl2, 716 716 }; 717 717 718 718 static const struct v4l2_ioctl_ops ar_ioctl_ops = {
+3 -114
drivers/media/video/bt8xx/bttv-driver.c
··· 854 854 xbits |= RESOURCE_VIDEO_READ | RESOURCE_VIDEO_STREAM; 855 855 856 856 /* is it free? */ 857 - mutex_lock(&btv->lock); 858 857 if (btv->resources & xbits) { 859 858 /* no, someone else uses it */ 860 859 goto fail; ··· 883 884 /* it's free, grab it */ 884 885 fh->resources |= bit; 885 886 btv->resources |= bit; 886 - mutex_unlock(&btv->lock); 887 887 return 1; 888 888 889 889 fail: 890 - mutex_unlock(&btv->lock); 891 890 return 0; 892 891 } 893 892 ··· 937 940 /* trying to free ressources not allocated by us ... */ 938 941 printk("bttv: BUG! (btres)\n"); 939 942 } 940 - mutex_lock(&btv->lock); 941 943 fh->resources &= ~bits; 942 944 btv->resources &= ~bits; 943 945 ··· 947 951 948 952 if (0 == (bits & VBI_RESOURCES)) 949 953 disclaim_vbi_lines(btv); 950 - 951 - mutex_unlock(&btv->lock); 952 954 } 953 955 954 956 /* ----------------------------------------------------------------------- */ ··· 1707 1713 1708 1714 /* Make sure tvnorm and vbi_end remain consistent 1709 1715 until we're done. */ 1710 - mutex_lock(&btv->lock); 1711 1716 1712 1717 norm = btv->tvnorm; 1713 1718 1714 1719 /* In this mode capturing always starts at defrect.top 1715 1720 (default VDELAY), ignoring cropping parameters. */ 1716 1721 if (btv->vbi_end > bttv_tvnorms[norm].cropcap.defrect.top) { 1717 - mutex_unlock(&btv->lock); 1718 1722 return -EINVAL; 1719 1723 } 1720 1724 1721 - mutex_unlock(&btv->lock); 1722 - 1723 1725 c.rect = bttv_tvnorms[norm].cropcap.defrect; 1724 1726 } else { 1725 - mutex_lock(&btv->lock); 1726 - 1727 1727 norm = btv->tvnorm; 1728 1728 c = btv->crop[!!fh->do_crop]; 1729 - 1730 - mutex_unlock(&btv->lock); 1731 1729 1732 1730 if (width < c.min_scaled_width || 1733 1731 width > c.max_scaled_width || ··· 1844 1858 unsigned int i; 1845 1859 int err; 1846 1860 1847 - mutex_lock(&btv->lock); 1848 1861 err = v4l2_prio_check(&btv->prio, fh->prio); 1849 1862 if (err) 1850 1863 goto err; ··· 1859 1874 set_tvnorm(btv, i); 1860 1875 1861 1876 err: 1862 - mutex_unlock(&btv->lock); 1863 1877 1864 1878 return err; 1865 1879 } ··· 1882 1898 struct bttv *btv = fh->btv; 1883 1899 int rc = 0; 1884 1900 1885 - mutex_lock(&btv->lock); 1886 1901 if (i->index >= bttv_tvcards[btv->c.type].video_inputs) { 1887 1902 rc = -EINVAL; 1888 1903 goto err; ··· 1911 1928 i->std = BTTV_NORMS; 1912 1929 1913 1930 err: 1914 - mutex_unlock(&btv->lock); 1915 1931 1916 1932 return rc; 1917 1933 } ··· 1920 1938 struct bttv_fh *fh = priv; 1921 1939 struct bttv *btv = fh->btv; 1922 1940 1923 - mutex_lock(&btv->lock); 1924 1941 *i = btv->input; 1925 - mutex_unlock(&btv->lock); 1926 1942 1927 1943 return 0; 1928 1944 } ··· 1932 1952 1933 1953 int err; 1934 1954 1935 - mutex_lock(&btv->lock); 1936 1955 err = v4l2_prio_check(&btv->prio, fh->prio); 1937 1956 if (unlikely(err)) 1938 1957 goto err; ··· 1944 1965 set_input(btv, i, btv->tvnorm); 1945 1966 1946 1967 err: 1947 - mutex_unlock(&btv->lock); 1948 1968 return 0; 1949 1969 } 1950 1970 ··· 1957 1979 if (unlikely(0 != t->index)) 1958 1980 return -EINVAL; 1959 1981 1960 - mutex_lock(&btv->lock); 1961 1982 if (unlikely(btv->tuner_type == TUNER_ABSENT)) { 1962 1983 err = -EINVAL; 1963 1984 goto err; ··· 1972 1995 btv->audio_mode_gpio(btv, t, 1); 1973 1996 1974 1997 err: 1975 - mutex_unlock(&btv->lock); 1976 1998 1977 1999 return 0; 1978 2000 } ··· 1982 2006 struct bttv_fh *fh = priv; 1983 2007 struct bttv *btv = fh->btv; 1984 2008 1985 - mutex_lock(&btv->lock); 1986 2009 f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; 1987 2010 f->frequency = btv->freq; 1988 - mutex_unlock(&btv->lock); 1989 2011 1990 2012 return 0; 1991 2013 } ··· 1998 2024 if (unlikely(f->tuner != 0)) 1999 2025 return -EINVAL; 2000 2026 2001 - mutex_lock(&btv->lock); 2002 2027 err = v4l2_prio_check(&btv->prio, fh->prio); 2003 2028 if (unlikely(err)) 2004 2029 goto err; ··· 2012 2039 if (btv->has_matchbox && btv->radio_user) 2013 2040 tea5757_set_freq(btv, btv->freq); 2014 2041 err: 2015 - mutex_unlock(&btv->lock); 2016 2042 2017 2043 return 0; 2018 2044 } ··· 2144 2172 2145 2173 /* Make sure tvnorm, vbi_end and the current cropping parameters 2146 2174 remain consistent until we're done. */ 2147 - mutex_lock(&btv->lock); 2148 2175 2149 2176 b = &bttv_tvnorms[btv->tvnorm].cropcap.bounds; 2150 2177 ··· 2221 2250 rc = 0; /* success */ 2222 2251 2223 2252 fail: 2224 - mutex_unlock(&btv->lock); 2225 2253 2226 2254 return rc; 2227 2255 } ··· 2252 2282 if (V4L2_FIELD_ANY == field) { 2253 2283 __s32 height2; 2254 2284 2255 - mutex_lock(&fh->btv->lock); 2256 2285 height2 = fh->btv->crop[!!fh->do_crop].rect.height >> 1; 2257 - mutex_unlock(&fh->btv->lock); 2258 2286 field = (win->w.height > height2) 2259 2287 ? V4L2_FIELD_INTERLACED 2260 2288 : V4L2_FIELD_TOP; ··· 2328 2360 } 2329 2361 } 2330 2362 2331 - mutex_lock(&fh->cap.vb_lock); 2332 2363 /* clip against screen */ 2333 2364 if (NULL != btv->fbuf.base) 2334 2365 n = btcx_screen_clips(btv->fbuf.fmt.width, btv->fbuf.fmt.height, ··· 2358 2391 fh->ov.field = win->field; 2359 2392 fh->ov.setup_ok = 1; 2360 2393 2361 - /* 2362 - * FIXME: btv is protected by btv->lock mutex, while btv->init 2363 - * is protected by fh->cap.vb_lock. This seems to open the 2364 - * possibility for some race situations. Maybe the better would 2365 - * be to unify those locks or to use another way to store the 2366 - * init values that will be consumed by videobuf callbacks 2367 - */ 2368 2394 btv->init.ov.w.width = win->w.width; 2369 2395 btv->init.ov.w.height = win->w.height; 2370 2396 btv->init.ov.field = win->field; ··· 2372 2412 bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); 2373 2413 retval = bttv_switch_overlay(btv,fh,new); 2374 2414 } 2375 - mutex_unlock(&fh->cap.vb_lock); 2376 2415 return retval; 2377 2416 } 2378 2417 ··· 2485 2526 if (V4L2_FIELD_ANY == field) { 2486 2527 __s32 height2; 2487 2528 2488 - mutex_lock(&btv->lock); 2489 2529 height2 = btv->crop[!!fh->do_crop].rect.height >> 1; 2490 - mutex_unlock(&btv->lock); 2491 2530 field = (f->fmt.pix.height > height2) 2492 2531 ? V4L2_FIELD_INTERLACED 2493 2532 : V4L2_FIELD_BOTTOM; ··· 2571 2614 fmt = format_by_fourcc(f->fmt.pix.pixelformat); 2572 2615 2573 2616 /* update our state informations */ 2574 - mutex_lock(&fh->cap.vb_lock); 2575 2617 fh->fmt = fmt; 2576 2618 fh->cap.field = f->fmt.pix.field; 2577 2619 fh->cap.last = V4L2_FIELD_NONE; ··· 2579 2623 btv->init.fmt = fmt; 2580 2624 btv->init.width = f->fmt.pix.width; 2581 2625 btv->init.height = f->fmt.pix.height; 2582 - mutex_unlock(&fh->cap.vb_lock); 2583 2626 2584 2627 return 0; 2585 2628 } ··· 2604 2649 unsigned int i; 2605 2650 struct bttv_fh *fh = priv; 2606 2651 2607 - mutex_lock(&fh->cap.vb_lock); 2608 2652 retval = __videobuf_mmap_setup(&fh->cap, gbuffers, gbufsize, 2609 2653 V4L2_MEMORY_MMAP); 2610 2654 if (retval < 0) { 2611 - mutex_unlock(&fh->cap.vb_lock); 2612 2655 return retval; 2613 2656 } 2614 2657 ··· 2618 2665 for (i = 0; i < gbuffers; i++) 2619 2666 mbuf->offsets[i] = i * gbufsize; 2620 2667 2621 - mutex_unlock(&fh->cap.vb_lock); 2622 2668 return 0; 2623 2669 } 2624 2670 #endif ··· 2727 2775 int retval = 0; 2728 2776 2729 2777 if (on) { 2730 - mutex_lock(&fh->cap.vb_lock); 2731 2778 /* verify args */ 2732 2779 if (unlikely(!btv->fbuf.base)) { 2733 - mutex_unlock(&fh->cap.vb_lock); 2734 2780 return -EINVAL; 2735 2781 } 2736 2782 if (unlikely(!fh->ov.setup_ok)) { ··· 2737 2787 } 2738 2788 if (retval) 2739 2789 return retval; 2740 - mutex_unlock(&fh->cap.vb_lock); 2741 2790 } 2742 2791 2743 2792 if (!check_alloc_btres_lock(btv, fh, RESOURCE_OVERLAY)) 2744 2793 return -EBUSY; 2745 2794 2746 - mutex_lock(&fh->cap.vb_lock); 2747 2795 if (on) { 2748 2796 fh->ov.tvnorm = btv->tvnorm; 2749 2797 new = videobuf_sg_alloc(sizeof(*new)); ··· 2753 2805 2754 2806 /* switch over */ 2755 2807 retval = bttv_switch_overlay(btv, fh, new); 2756 - mutex_unlock(&fh->cap.vb_lock); 2757 2808 return retval; 2758 2809 } 2759 2810 ··· 2791 2844 } 2792 2845 2793 2846 /* ok, accept it */ 2794 - mutex_lock(&fh->cap.vb_lock); 2795 2847 btv->fbuf.base = fb->base; 2796 2848 btv->fbuf.fmt.width = fb->fmt.width; 2797 2849 btv->fbuf.fmt.height = fb->fmt.height; ··· 2822 2876 retval = bttv_switch_overlay(btv, fh, new); 2823 2877 } 2824 2878 } 2825 - mutex_unlock(&fh->cap.vb_lock); 2826 2879 return retval; 2827 2880 } 2828 2881 ··· 2900 2955 c->id >= V4L2_CID_PRIVATE_LASTP1)) 2901 2956 return -EINVAL; 2902 2957 2903 - mutex_lock(&btv->lock); 2904 2958 if (!btv->volume_gpio && (c->id == V4L2_CID_AUDIO_VOLUME)) 2905 2959 *c = no_ctl; 2906 2960 else { ··· 2907 2963 2908 2964 *c = (NULL != ctrl) ? *ctrl : no_ctl; 2909 2965 } 2910 - mutex_unlock(&btv->lock); 2911 2966 2912 2967 return 0; 2913 2968 } ··· 2917 2974 struct bttv_fh *fh = f; 2918 2975 struct bttv *btv = fh->btv; 2919 2976 2920 - mutex_lock(&btv->lock); 2921 2977 v4l2_video_std_frame_period(bttv_tvnorms[btv->tvnorm].v4l2_id, 2922 2978 &parm->parm.capture.timeperframe); 2923 - mutex_unlock(&btv->lock); 2924 2979 2925 2980 return 0; 2926 2981 } ··· 2934 2993 if (0 != t->index) 2935 2994 return -EINVAL; 2936 2995 2937 - mutex_lock(&btv->lock); 2938 2996 t->rxsubchans = V4L2_TUNER_SUB_MONO; 2939 2997 bttv_call_all(btv, tuner, g_tuner, t); 2940 2998 strcpy(t->name, "Television"); ··· 2945 3005 if (btv->audio_mode_gpio) 2946 3006 btv->audio_mode_gpio(btv, t, 0); 2947 3007 2948 - mutex_unlock(&btv->lock); 2949 3008 return 0; 2950 3009 } 2951 3010 ··· 2953 3014 struct bttv_fh *fh = f; 2954 3015 struct bttv *btv = fh->btv; 2955 3016 2956 - mutex_lock(&btv->lock); 2957 3017 *p = v4l2_prio_max(&btv->prio); 2958 - mutex_unlock(&btv->lock); 2959 3018 2960 3019 return 0; 2961 3020 } ··· 2965 3028 struct bttv *btv = fh->btv; 2966 3029 int rc; 2967 3030 2968 - mutex_lock(&btv->lock); 2969 3031 rc = v4l2_prio_change(&btv->prio, &fh->prio, prio); 2970 - mutex_unlock(&btv->lock); 2971 3032 2972 3033 return rc; 2973 3034 } ··· 2980 3045 cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) 2981 3046 return -EINVAL; 2982 3047 2983 - mutex_lock(&btv->lock); 2984 3048 *cap = bttv_tvnorms[btv->tvnorm].cropcap; 2985 - mutex_unlock(&btv->lock); 2986 3049 2987 3050 return 0; 2988 3051 } ··· 2998 3065 inconsistent with fh->width or fh->height and apps 2999 3066 do not expect a change here. */ 3000 3067 3001 - mutex_lock(&btv->lock); 3002 3068 crop->c = btv->crop[!!fh->do_crop].rect; 3003 - mutex_unlock(&btv->lock); 3004 3069 3005 3070 return 0; 3006 3071 } ··· 3022 3091 /* Make sure tvnorm, vbi_end and the current cropping 3023 3092 parameters remain consistent until we're done. Note 3024 3093 read() may change vbi_end in check_alloc_btres_lock(). */ 3025 - mutex_lock(&btv->lock); 3026 3094 retval = v4l2_prio_check(&btv->prio, fh->prio); 3027 3095 if (0 != retval) { 3028 - mutex_unlock(&btv->lock); 3029 3096 return retval; 3030 3097 } 3031 3098 3032 3099 retval = -EBUSY; 3033 3100 3034 3101 if (locked_btres(fh->btv, VIDEO_RESOURCES)) { 3035 - mutex_unlock(&btv->lock); 3036 3102 return retval; 3037 3103 } 3038 3104 ··· 3041 3113 3042 3114 b_top = max(b->top, btv->vbi_end); 3043 3115 if (b_top + 32 >= b_bottom) { 3044 - mutex_unlock(&btv->lock); 3045 3116 return retval; 3046 3117 } 3047 3118 ··· 3063 3136 3064 3137 btv->crop[1] = c; 3065 3138 3066 - mutex_unlock(&btv->lock); 3067 - 3068 3139 fh->do_crop = 1; 3069 - 3070 - mutex_lock(&fh->cap.vb_lock); 3071 3140 3072 3141 if (fh->width < c.min_scaled_width) { 3073 3142 fh->width = c.min_scaled_width; ··· 3080 3157 fh->height = c.max_scaled_height; 3081 3158 btv->init.height = c.max_scaled_height; 3082 3159 } 3083 - 3084 - mutex_unlock(&fh->cap.vb_lock); 3085 3160 3086 3161 return 0; 3087 3162 } ··· 3148 3227 return videobuf_poll_stream(file, &fh->vbi, wait); 3149 3228 } 3150 3229 3151 - mutex_lock(&fh->cap.vb_lock); 3152 3230 if (check_btres(fh,RESOURCE_VIDEO_STREAM)) { 3153 3231 /* streaming capture */ 3154 3232 if (list_empty(&fh->cap.stream)) ··· 3182 3262 else 3183 3263 rc = 0; 3184 3264 err: 3185 - mutex_unlock(&fh->cap.vb_lock); 3186 3265 return rc; 3187 3266 } 3188 3267 ··· 3212 3293 return -ENOMEM; 3213 3294 file->private_data = fh; 3214 3295 3215 - /* 3216 - * btv is protected by btv->lock mutex, while btv->init and other 3217 - * streaming vars are protected by fh->cap.vb_lock. We need to take 3218 - * care of both locks to avoid troubles. However, vb_lock is used also 3219 - * inside videobuf, without calling buf->lock. So, it is a very bad 3220 - * idea to hold both locks at the same time. 3221 - * Let's first copy btv->init at fh, holding cap.vb_lock, and then work 3222 - * with the rest of init, holding btv->lock. 3223 - */ 3224 - mutex_lock(&fh->cap.vb_lock); 3225 3296 *fh = btv->init; 3226 - mutex_unlock(&fh->cap.vb_lock); 3227 3297 3228 3298 fh->type = type; 3229 3299 fh->ov.setup_ok = 0; 3230 3300 3231 - mutex_lock(&btv->lock); 3232 3301 v4l2_prio_open(&btv->prio, &fh->prio); 3233 3302 3234 3303 videobuf_queue_sg_init(&fh->cap, &bttv_video_qops, ··· 3224 3317 V4L2_BUF_TYPE_VIDEO_CAPTURE, 3225 3318 V4L2_FIELD_INTERLACED, 3226 3319 sizeof(struct bttv_buffer), 3227 - fh, NULL); 3320 + fh, &btv->lock); 3228 3321 videobuf_queue_sg_init(&fh->vbi, &bttv_vbi_qops, 3229 3322 &btv->c.pci->dev, &btv->s_lock, 3230 3323 V4L2_BUF_TYPE_VBI_CAPTURE, 3231 3324 V4L2_FIELD_SEQ_TB, 3232 3325 sizeof(struct bttv_buffer), 3233 - fh, NULL); 3326 + fh, &btv->lock); 3234 3327 set_tvnorm(btv,btv->tvnorm); 3235 3328 set_input(btv, btv->input, btv->tvnorm); 3236 3329 ··· 3253 3346 bttv_vbi_fmt_reset(&fh->vbi_fmt, btv->tvnorm); 3254 3347 3255 3348 bttv_field_count(btv); 3256 - mutex_unlock(&btv->lock); 3257 3349 return 0; 3258 3350 } 3259 3351 ··· 3261 3355 struct bttv_fh *fh = file->private_data; 3262 3356 struct bttv *btv = fh->btv; 3263 3357 3264 - mutex_lock(&btv->lock); 3265 3358 /* turn off overlay */ 3266 3359 if (check_btres(fh, RESOURCE_OVERLAY)) 3267 3360 bttv_switch_overlay(btv,fh,NULL); ··· 3286 3381 3287 3382 /* free stuff */ 3288 3383 3289 - /* 3290 - * videobuf uses cap.vb_lock - we should avoid holding btv->lock, 3291 - * otherwise we may have dead lock conditions 3292 - */ 3293 - mutex_unlock(&btv->lock); 3294 3384 videobuf_mmap_free(&fh->cap); 3295 3385 videobuf_mmap_free(&fh->vbi); 3296 - mutex_lock(&btv->lock); 3297 3386 v4l2_prio_close(&btv->prio, fh->prio); 3298 3387 file->private_data = NULL; 3299 3388 kfree(fh); ··· 3297 3398 3298 3399 if (!btv->users) 3299 3400 audio_mute(btv, 1); 3300 - mutex_unlock(&btv->lock); 3301 3401 3302 3402 return 0; 3303 3403 } ··· 3400 3502 if (unlikely(!fh)) 3401 3503 return -ENOMEM; 3402 3504 file->private_data = fh; 3403 - mutex_lock(&fh->cap.vb_lock); 3404 3505 *fh = btv->init; 3405 - mutex_unlock(&fh->cap.vb_lock); 3406 3506 3407 - mutex_lock(&btv->lock); 3408 3507 v4l2_prio_open(&btv->prio, &fh->prio); 3409 3508 3410 3509 btv->radio_user++; ··· 3409 3514 bttv_call_all(btv, tuner, s_radio); 3410 3515 audio_input(btv,TVAUDIO_INPUT_RADIO); 3411 3516 3412 - mutex_unlock(&btv->lock); 3413 3517 return 0; 3414 3518 } 3415 3519 ··· 3418 3524 struct bttv *btv = fh->btv; 3419 3525 struct rds_command cmd; 3420 3526 3421 - mutex_lock(&btv->lock); 3422 3527 v4l2_prio_close(&btv->prio, fh->prio); 3423 3528 file->private_data = NULL; 3424 3529 kfree(fh); ··· 3425 3532 btv->radio_user--; 3426 3533 3427 3534 bttv_call_all(btv, core, ioctl, RDS_CMD_CLOSE, &cmd); 3428 - mutex_unlock(&btv->lock); 3429 3535 3430 3536 return 0; 3431 3537 } ··· 3453 3561 return -EINVAL; 3454 3562 if (0 != t->index) 3455 3563 return -EINVAL; 3456 - mutex_lock(&btv->lock); 3457 3564 strcpy(t->name, "Radio"); 3458 3565 t->type = V4L2_TUNER_RADIO; 3459 3566 ··· 3460 3569 3461 3570 if (btv->audio_mode_gpio) 3462 3571 btv->audio_mode_gpio(btv, t, 0); 3463 - 3464 - mutex_unlock(&btv->lock); 3465 3572 3466 3573 return 0; 3467 3574 } ··· 3581 3692 .open = radio_open, 3582 3693 .read = radio_read, 3583 3694 .release = radio_release, 3584 - .ioctl = video_ioctl2, 3695 + .unlocked_ioctl = video_ioctl2, 3585 3696 .poll = radio_poll, 3586 3697 }; 3587 3698
+1 -1
drivers/media/video/bw-qcam.c
··· 860 860 861 861 static const struct v4l2_file_operations qcam_fops = { 862 862 .owner = THIS_MODULE, 863 - .ioctl = video_ioctl2, 863 + .unlocked_ioctl = video_ioctl2, 864 864 .read = qcam_read, 865 865 }; 866 866
+1 -1
drivers/media/video/c-qcam.c
··· 718 718 719 719 static const struct v4l2_file_operations qcam_fops = { 720 720 .owner = THIS_MODULE, 721 - .ioctl = video_ioctl2, 721 + .unlocked_ioctl = video_ioctl2, 722 722 .read = qcam_read, 723 723 }; 724 724
+1 -1
drivers/media/video/cafe_ccic.c
··· 1775 1775 .read = cafe_v4l_read, 1776 1776 .poll = cafe_v4l_poll, 1777 1777 .mmap = cafe_v4l_mmap, 1778 - .ioctl = video_ioctl2, 1778 + .unlocked_ioctl = video_ioctl2, 1779 1779 }; 1780 1780 1781 1781 static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
+7 -1
drivers/media/video/cx18/cx18-alsa-pcm.c
··· 218 218 static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream, 219 219 unsigned int cmd, void *arg) 220 220 { 221 - return snd_pcm_lib_ioctl(substream, cmd, arg); 221 + struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream); 222 + int ret; 223 + 224 + snd_cx18_lock(cxsc); 225 + ret = snd_pcm_lib_ioctl(substream, cmd, arg); 226 + snd_cx18_unlock(cxsc); 227 + return ret; 222 228 } 223 229 224 230
+1 -1
drivers/media/video/cx18/cx18-streams.c
··· 41 41 .read = cx18_v4l2_read, 42 42 .open = cx18_v4l2_open, 43 43 /* FIXME change to video_ioctl2 if serialization lock can be removed */ 44 - .ioctl = cx18_v4l2_ioctl, 44 + .unlocked_ioctl = cx18_v4l2_ioctl, 45 45 .release = cx18_v4l2_close, 46 46 .poll = cx18_v4l2_enc_poll, 47 47 };
+1 -1
drivers/media/video/et61x251/et61x251_core.c
··· 2530 2530 .owner = THIS_MODULE, 2531 2531 .open = et61x251_open, 2532 2532 .release = et61x251_release, 2533 - .ioctl = et61x251_ioctl, 2533 + .unlocked_ioctl = et61x251_ioctl, 2534 2534 .read = et61x251_read, 2535 2535 .poll = et61x251_poll, 2536 2536 .mmap = et61x251_mmap,
+184 -232
drivers/media/video/gspca/sonixj.c
··· 63 63 #define QUALITY_DEF 80 64 64 u8 jpegqual; /* webcam quality */ 65 65 66 + u8 reg01; 67 + u8 reg17; 66 68 u8 reg18; 69 + u8 flags; 67 70 68 71 s8 ag_cnt; 69 72 #define AG_CNT_START 13 ··· 98 95 SENSOR_SOI768, 99 96 SENSOR_SP80708, 100 97 }; 98 + 99 + /* device flags */ 100 + #define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */ 101 + 102 + /* sn9c1xx definitions */ 103 + /* register 0x01 */ 104 + #define S_PWR_DN 0x01 /* sensor power down */ 105 + #define S_PDN_INV 0x02 /* inverse pin S_PWR_DN */ 106 + #define V_TX_EN 0x04 /* video transfer enable */ 107 + #define LED 0x08 /* output to pin LED */ 108 + #define SCL_SEL_OD 0x20 /* open-drain mode */ 109 + #define SYS_SEL_48M 0x40 /* system clock 0: 24MHz, 1: 48MHz */ 110 + /* register 0x17 */ 111 + #define MCK_SIZE_MASK 0x1f /* sensor master clock */ 112 + #define SEN_CLK_EN 0x20 /* enable sensor clock */ 113 + #define DEF_EN 0x80 /* defect pixel by 0: soft, 1: hard */ 101 114 102 115 /* V4L2 controls supported by the driver */ 103 116 static void setbrightness(struct gspca_dev *gspca_dev); ··· 1774 1755 } 1775 1756 } 1776 1757 1777 - static void bridge_init(struct gspca_dev *gspca_dev, 1778 - const u8 *sn9c1xx) 1779 - { 1780 - struct sd *sd = (struct sd *) gspca_dev; 1781 - u8 reg0102[2]; 1782 - const u8 *reg9a; 1783 - static const u8 reg9a_def[] = 1784 - {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; 1785 - static const u8 reg9a_spec[] = 1786 - {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; 1787 - static const u8 regd4[] = {0x60, 0x00, 0x00}; 1788 - 1789 - /* sensor clock already enabled in sd_init */ 1790 - /* reg_w1(gspca_dev, 0xf1, 0x00); */ 1791 - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); 1792 - 1793 - /* configure gpio */ 1794 - reg0102[0] = sn9c1xx[1]; 1795 - reg0102[1] = sn9c1xx[2]; 1796 - if (gspca_dev->audio) 1797 - reg0102[1] |= 0x04; /* keep the audio connection */ 1798 - reg_w(gspca_dev, 0x01, reg0102, 2); 1799 - reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); 1800 - reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); 1801 - switch (sd->sensor) { 1802 - case SENSOR_GC0307: 1803 - case SENSOR_OV7660: 1804 - case SENSOR_PO1030: 1805 - case SENSOR_PO2030N: 1806 - case SENSOR_SOI768: 1807 - case SENSOR_SP80708: 1808 - reg9a = reg9a_spec; 1809 - break; 1810 - default: 1811 - reg9a = reg9a_def; 1812 - break; 1813 - } 1814 - reg_w(gspca_dev, 0x9a, reg9a, 6); 1815 - 1816 - reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); 1817 - 1818 - reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); 1819 - 1820 - switch (sd->sensor) { 1821 - case SENSOR_ADCM1700: 1822 - reg_w1(gspca_dev, 0x01, 0x43); 1823 - reg_w1(gspca_dev, 0x17, 0x62); 1824 - reg_w1(gspca_dev, 0x01, 0x42); 1825 - reg_w1(gspca_dev, 0x01, 0x42); 1826 - break; 1827 - case SENSOR_GC0307: 1828 - msleep(50); 1829 - reg_w1(gspca_dev, 0x01, 0x61); 1830 - reg_w1(gspca_dev, 0x17, 0x22); 1831 - reg_w1(gspca_dev, 0x01, 0x60); 1832 - reg_w1(gspca_dev, 0x01, 0x40); 1833 - msleep(50); 1834 - break; 1835 - case SENSOR_MI0360B: 1836 - reg_w1(gspca_dev, 0x01, 0x61); 1837 - reg_w1(gspca_dev, 0x17, 0x60); 1838 - reg_w1(gspca_dev, 0x01, 0x60); 1839 - reg_w1(gspca_dev, 0x01, 0x40); 1840 - break; 1841 - case SENSOR_MT9V111: 1842 - reg_w1(gspca_dev, 0x01, 0x61); 1843 - reg_w1(gspca_dev, 0x17, 0x61); 1844 - reg_w1(gspca_dev, 0x01, 0x60); 1845 - reg_w1(gspca_dev, 0x01, 0x40); 1846 - break; 1847 - case SENSOR_OM6802: 1848 - msleep(10); 1849 - reg_w1(gspca_dev, 0x02, 0x73); 1850 - reg_w1(gspca_dev, 0x17, 0x60); 1851 - reg_w1(gspca_dev, 0x01, 0x22); 1852 - msleep(100); 1853 - reg_w1(gspca_dev, 0x01, 0x62); 1854 - reg_w1(gspca_dev, 0x17, 0x64); 1855 - reg_w1(gspca_dev, 0x17, 0x64); 1856 - reg_w1(gspca_dev, 0x01, 0x42); 1857 - msleep(10); 1858 - reg_w1(gspca_dev, 0x01, 0x42); 1859 - i2c_w8(gspca_dev, om6802_init0[0]); 1860 - i2c_w8(gspca_dev, om6802_init0[1]); 1861 - msleep(15); 1862 - reg_w1(gspca_dev, 0x02, 0x71); 1863 - msleep(150); 1864 - break; 1865 - case SENSOR_OV7630: 1866 - reg_w1(gspca_dev, 0x01, 0x61); 1867 - reg_w1(gspca_dev, 0x17, 0xe2); 1868 - reg_w1(gspca_dev, 0x01, 0x60); 1869 - reg_w1(gspca_dev, 0x01, 0x40); 1870 - break; 1871 - case SENSOR_OV7648: 1872 - reg_w1(gspca_dev, 0x01, 0x63); 1873 - reg_w1(gspca_dev, 0x17, 0x20); 1874 - reg_w1(gspca_dev, 0x01, 0x62); 1875 - reg_w1(gspca_dev, 0x01, 0x42); 1876 - break; 1877 - case SENSOR_PO1030: 1878 - case SENSOR_SOI768: 1879 - reg_w1(gspca_dev, 0x01, 0x61); 1880 - reg_w1(gspca_dev, 0x17, 0x20); 1881 - reg_w1(gspca_dev, 0x01, 0x60); 1882 - reg_w1(gspca_dev, 0x01, 0x40); 1883 - break; 1884 - case SENSOR_PO2030N: 1885 - case SENSOR_OV7660: 1886 - reg_w1(gspca_dev, 0x01, 0x63); 1887 - reg_w1(gspca_dev, 0x17, 0x20); 1888 - reg_w1(gspca_dev, 0x01, 0x62); 1889 - reg_w1(gspca_dev, 0x01, 0x42); 1890 - break; 1891 - case SENSOR_SP80708: 1892 - reg_w1(gspca_dev, 0x01, 0x63); 1893 - reg_w1(gspca_dev, 0x17, 0x20); 1894 - reg_w1(gspca_dev, 0x01, 0x62); 1895 - reg_w1(gspca_dev, 0x01, 0x42); 1896 - msleep(100); 1897 - reg_w1(gspca_dev, 0x02, 0x62); 1898 - break; 1899 - default: 1900 - /* case SENSOR_HV7131R: */ 1901 - /* case SENSOR_MI0360: */ 1902 - /* case SENSOR_MO4000: */ 1903 - reg_w1(gspca_dev, 0x01, 0x43); 1904 - reg_w1(gspca_dev, 0x17, 0x61); 1905 - reg_w1(gspca_dev, 0x01, 0x42); 1906 - if (sd->sensor == SENSOR_HV7131R) 1907 - hv7131r_probe(gspca_dev); 1908 - break; 1909 - } 1910 - } 1911 - 1912 1758 /* this function is called at probe time */ 1913 1759 static int sd_config(struct gspca_dev *gspca_dev, 1914 1760 const struct usb_device_id *id) ··· 1782 1898 struct cam *cam; 1783 1899 1784 1900 sd->bridge = id->driver_info >> 16; 1785 - sd->sensor = id->driver_info; 1901 + sd->sensor = id->driver_info >> 8; 1902 + sd->flags = id->driver_info; 1786 1903 1787 1904 cam = &gspca_dev->cam; 1788 1905 if (sd->sensor == SENSOR_ADCM1700) { ··· 1814 1929 /* setup a selector by bridge */ 1815 1930 reg_w1(gspca_dev, 0xf1, 0x01); 1816 1931 reg_r(gspca_dev, 0x00, 1); 1817 - reg_w1(gspca_dev, 0xf1, gspca_dev->usb_buf[0]); 1932 + reg_w1(gspca_dev, 0xf1, 0x00); 1818 1933 reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */ 1819 1934 regF1 = gspca_dev->usb_buf[0]; 1820 1935 if (gspca_dev->usb_err < 0) ··· 2308 2423 { 2309 2424 struct sd *sd = (struct sd *) gspca_dev; 2310 2425 int i; 2311 - u8 reg1, reg17; 2426 + u8 reg01, reg17; 2427 + u8 reg0102[2]; 2312 2428 const u8 *sn9c1xx; 2313 2429 const u8 (*init)[8]; 2430 + const u8 *reg9a; 2314 2431 int mode; 2432 + static const u8 reg9a_def[] = 2433 + {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; 2434 + static const u8 reg9a_spec[] = 2435 + {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; 2436 + static const u8 regd4[] = {0x60, 0x00, 0x00}; 2315 2437 static const u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f }; 2316 2438 static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; 2317 2439 static const u8 CA_adcm1700[] = ··· 2340 2448 2341 2449 /* initialize the bridge */ 2342 2450 sn9c1xx = sn_tb[sd->sensor]; 2343 - bridge_init(gspca_dev, sn9c1xx); 2451 + 2452 + /* sensor clock already enabled in sd_init */ 2453 + /* reg_w1(gspca_dev, 0xf1, 0x00); */ 2454 + reg01 = sn9c1xx[1]; 2455 + if (sd->flags & PDN_INV) 2456 + reg01 ^= S_PDN_INV; /* power down inverted */ 2457 + reg_w1(gspca_dev, 0x01, reg01); 2458 + 2459 + /* configure gpio */ 2460 + reg0102[0] = reg01; 2461 + reg0102[1] = sn9c1xx[2]; 2462 + if (gspca_dev->audio) 2463 + reg0102[1] |= 0x04; /* keep the audio connection */ 2464 + reg_w(gspca_dev, 0x01, reg0102, 2); 2465 + reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); 2466 + reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); 2467 + switch (sd->sensor) { 2468 + case SENSOR_GC0307: 2469 + case SENSOR_OV7660: 2470 + case SENSOR_PO1030: 2471 + case SENSOR_PO2030N: 2472 + case SENSOR_SOI768: 2473 + case SENSOR_SP80708: 2474 + reg9a = reg9a_spec; 2475 + break; 2476 + default: 2477 + reg9a = reg9a_def; 2478 + break; 2479 + } 2480 + reg_w(gspca_dev, 0x9a, reg9a, 6); 2481 + 2482 + reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); 2483 + 2484 + reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); 2485 + 2486 + reg17 = sn9c1xx[0x17]; 2487 + switch (sd->sensor) { 2488 + case SENSOR_GC0307: 2489 + msleep(50); /*fixme: is it useful? */ 2490 + break; 2491 + case SENSOR_OM6802: 2492 + msleep(10); 2493 + reg_w1(gspca_dev, 0x02, 0x73); 2494 + reg17 |= SEN_CLK_EN; 2495 + reg_w1(gspca_dev, 0x17, reg17); 2496 + reg_w1(gspca_dev, 0x01, 0x22); 2497 + msleep(100); 2498 + reg01 = SCL_SEL_OD | S_PDN_INV; 2499 + reg17 &= MCK_SIZE_MASK; 2500 + reg17 |= 0x04; /* clock / 4 */ 2501 + break; 2502 + } 2503 + reg01 |= SYS_SEL_48M; 2504 + reg_w1(gspca_dev, 0x01, reg01); 2505 + reg17 |= SEN_CLK_EN; 2506 + reg_w1(gspca_dev, 0x17, reg17); 2507 + reg01 &= ~S_PWR_DN; /* sensor power on */ 2508 + reg_w1(gspca_dev, 0x01, reg01); 2509 + reg01 &= ~SYS_SEL_48M; 2510 + reg_w1(gspca_dev, 0x01, reg01); 2511 + 2512 + switch (sd->sensor) { 2513 + case SENSOR_HV7131R: 2514 + hv7131r_probe(gspca_dev); /*fixme: is it useful? */ 2515 + break; 2516 + case SENSOR_OM6802: 2517 + msleep(10); 2518 + reg_w1(gspca_dev, 0x01, reg01); 2519 + i2c_w8(gspca_dev, om6802_init0[0]); 2520 + i2c_w8(gspca_dev, om6802_init0[1]); 2521 + msleep(15); 2522 + reg_w1(gspca_dev, 0x02, 0x71); 2523 + msleep(150); 2524 + break; 2525 + case SENSOR_SP80708: 2526 + msleep(100); 2527 + reg_w1(gspca_dev, 0x02, 0x62); 2528 + break; 2529 + } 2344 2530 2345 2531 /* initialize the sensor */ 2346 2532 i2c_w_seq(gspca_dev, sensor_init[sd->sensor]); ··· 2446 2476 } 2447 2477 reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); 2448 2478 switch (sd->sensor) { 2449 - case SENSOR_GC0307: 2450 - reg17 = 0xa2; 2451 - break; 2452 - case SENSOR_MT9V111: 2453 - case SENSOR_MI0360B: 2454 - reg17 = 0xe0; 2455 - break; 2456 - case SENSOR_ADCM1700: 2457 - case SENSOR_OV7630: 2458 - reg17 = 0xe2; 2459 - break; 2460 - case SENSOR_OV7648: 2461 - reg17 = 0x20; 2462 - break; 2463 - case SENSOR_OV7660: 2464 - case SENSOR_SOI768: 2465 - reg17 = 0xa0; 2466 - break; 2467 - case SENSOR_PO1030: 2468 - case SENSOR_PO2030N: 2469 - reg17 = 0xa0; 2479 + case SENSOR_OM6802: 2480 + /* case SENSOR_OV7648: * fixme: sometimes */ 2470 2481 break; 2471 2482 default: 2472 - reg17 = 0x60; 2483 + reg17 |= DEF_EN; 2473 2484 break; 2474 2485 } 2475 2486 reg_w1(gspca_dev, 0x17, reg17); ··· 2497 2546 2498 2547 init = NULL; 2499 2548 mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; 2500 - if (mode) 2501 - reg1 = 0x46; /* 320x240: clk 48Mhz, video trf enable */ 2502 - else 2503 - reg1 = 0x06; /* 640x480: clk 24Mhz, video trf enable */ 2504 - reg17 = 0x61; /* 0x:20: enable sensor clock */ 2549 + reg01 |= SYS_SEL_48M | V_TX_EN; 2550 + reg17 &= ~MCK_SIZE_MASK; 2551 + reg17 |= 0x02; /* clock / 2 */ 2505 2552 switch (sd->sensor) { 2506 2553 case SENSOR_ADCM1700: 2507 2554 init = adcm1700_sensor_param1; 2508 - reg1 = 0x46; 2509 - reg17 = 0xe2; 2510 2555 break; 2511 2556 case SENSOR_GC0307: 2512 2557 init = gc0307_sensor_param1; 2513 - reg17 = 0xa2; 2514 - reg1 = 0x44; 2558 + break; 2559 + case SENSOR_HV7131R: 2560 + case SENSOR_MI0360: 2561 + if (mode) 2562 + reg01 |= SYS_SEL_48M; /* 320x240: clk 48Mhz */ 2563 + else 2564 + reg01 &= ~SYS_SEL_48M; /* 640x480: clk 24Mhz */ 2565 + reg17 &= ~MCK_SIZE_MASK; 2566 + reg17 |= 0x01; /* clock / 1 */ 2515 2567 break; 2516 2568 case SENSOR_MI0360B: 2517 2569 init = mi0360b_sensor_param1; 2518 - reg1 &= ~0x02; /* don't inverse pin S_PWR_DN */ 2519 - reg17 = 0xe2; 2520 2570 break; 2521 2571 case SENSOR_MO4000: 2522 - if (mode) { 2523 - /* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */ 2524 - reg1 = 0x06; /* clk 24Mz */ 2525 - } else { 2526 - reg17 = 0x22; /* 640 MCKSIZE */ 2527 - /* reg1 = 0x06; * 640 clk 24Mz (done) */ 2572 + if (mode) { /* if 320x240 */ 2573 + reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ 2574 + reg17 &= ~MCK_SIZE_MASK; 2575 + reg17 |= 0x01; /* clock / 1 */ 2528 2576 } 2529 2577 break; 2530 2578 case SENSOR_MT9V111: 2531 2579 init = mt9v111_sensor_param1; 2532 - if (mode) { 2533 - reg1 = 0x04; /* 320 clk 48Mhz */ 2534 - } else { 2535 - /* reg1 = 0x06; * 640 clk 24Mz (done) */ 2536 - reg17 = 0xc2; 2537 - } 2538 2580 break; 2539 2581 case SENSOR_OM6802: 2540 2582 init = om6802_sensor_param1; 2541 - reg17 = 0x64; /* 640 MCKSIZE */ 2583 + if (!mode) { /* if 640x480 */ 2584 + reg17 &= ~MCK_SIZE_MASK; 2585 + reg17 |= 0x01; /* clock / 4 */ 2586 + } 2542 2587 break; 2543 2588 case SENSOR_OV7630: 2544 2589 init = ov7630_sensor_param1; 2545 - reg17 = 0xe2; 2546 - reg1 = 0x44; 2547 2590 break; 2548 2591 case SENSOR_OV7648: 2549 2592 init = ov7648_sensor_param1; 2550 - reg17 = 0x21; 2551 - /* reg1 = 0x42; * 42 - 46? */ 2593 + reg17 &= ~MCK_SIZE_MASK; 2594 + reg17 |= 0x01; /* clock / 1 */ 2552 2595 break; 2553 2596 case SENSOR_OV7660: 2554 2597 init = ov7660_sensor_param1; 2555 - if (sd->bridge == BRIDGE_SN9C120) { 2556 - if (mode) { /* 320x240 - 160x120 */ 2557 - reg17 = 0xa2; 2558 - reg1 = 0x44; /* 48 Mhz, video trf eneble */ 2559 - } 2560 - } else { 2561 - reg17 = 0x22; 2562 - reg1 = 0x06; /* 24 Mhz, video trf eneble 2563 - * inverse power down */ 2564 - } 2565 2598 break; 2566 2599 case SENSOR_PO1030: 2567 2600 init = po1030_sensor_param1; 2568 - reg17 = 0xa2; 2569 - reg1 = 0x44; 2570 2601 break; 2571 2602 case SENSOR_PO2030N: 2572 2603 init = po2030n_sensor_param1; 2573 - reg1 = 0x46; 2574 - reg17 = 0xa2; 2575 2604 break; 2576 2605 case SENSOR_SOI768: 2577 2606 init = soi768_sensor_param1; 2578 - reg1 = 0x44; 2579 - reg17 = 0xa2; 2580 2607 break; 2581 2608 case SENSOR_SP80708: 2582 2609 init = sp80708_sensor_param1; 2583 - if (mode) { 2584 - /*?? reg1 = 0x04; * 320 clk 48Mhz */ 2585 - } else { 2586 - reg1 = 0x46; /* 640 clk 48Mz */ 2587 - reg17 = 0xa2; 2588 - } 2589 2610 break; 2590 2611 } 2591 2612 ··· 2607 2684 setjpegqual(gspca_dev); 2608 2685 2609 2686 reg_w1(gspca_dev, 0x17, reg17); 2610 - reg_w1(gspca_dev, 0x01, reg1); 2687 + reg_w1(gspca_dev, 0x01, reg01); 2688 + sd->reg01 = reg01; 2689 + sd->reg17 = reg17; 2611 2690 2612 2691 sethvflip(gspca_dev); 2613 2692 setbrightness(gspca_dev); ··· 2631 2706 { 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 }; 2632 2707 static const u8 stopsoi768[] = 2633 2708 { 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 }; 2634 - u8 data; 2635 - const u8 *sn9c1xx; 2709 + u8 reg01; 2710 + u8 reg17; 2636 2711 2637 - data = 0x0b; 2712 + reg01 = sd->reg01; 2713 + reg17 = sd->reg17 & ~SEN_CLK_EN; 2638 2714 switch (sd->sensor) { 2715 + case SENSOR_ADCM1700: 2639 2716 case SENSOR_GC0307: 2640 - data = 0x29; 2717 + case SENSOR_PO2030N: 2718 + case SENSOR_SP80708: 2719 + reg01 |= LED; 2720 + reg_w1(gspca_dev, 0x01, reg01); 2721 + reg01 &= ~(LED | V_TX_EN); 2722 + reg_w1(gspca_dev, 0x01, reg01); 2723 + /* reg_w1(gspca_dev, 0x02, 0x??); * LED off ? */ 2641 2724 break; 2642 2725 case SENSOR_HV7131R: 2726 + reg01 &= ~V_TX_EN; 2727 + reg_w1(gspca_dev, 0x01, reg01); 2643 2728 i2c_w8(gspca_dev, stophv7131); 2644 - data = 0x2b; 2645 2729 break; 2646 2730 case SENSOR_MI0360: 2647 2731 case SENSOR_MI0360B: 2732 + reg01 &= ~V_TX_EN; 2733 + reg_w1(gspca_dev, 0x01, reg01); 2734 + /* reg_w1(gspca_dev, 0x02, 0x40); * LED off ? */ 2648 2735 i2c_w8(gspca_dev, stopmi0360); 2649 - data = 0x29; 2650 2736 break; 2651 - case SENSOR_OV7648: 2652 - i2c_w8(gspca_dev, stopov7648); 2653 - /* fall thru */ 2654 2737 case SENSOR_MT9V111: 2655 - case SENSOR_OV7630: 2738 + case SENSOR_OM6802: 2656 2739 case SENSOR_PO1030: 2657 - data = 0x29; 2740 + reg01 &= ~V_TX_EN; 2741 + reg_w1(gspca_dev, 0x01, reg01); 2742 + break; 2743 + case SENSOR_OV7630: 2744 + case SENSOR_OV7648: 2745 + reg01 &= ~V_TX_EN; 2746 + reg_w1(gspca_dev, 0x01, reg01); 2747 + i2c_w8(gspca_dev, stopov7648); 2748 + break; 2749 + case SENSOR_OV7660: 2750 + reg01 &= ~V_TX_EN; 2751 + reg_w1(gspca_dev, 0x01, reg01); 2658 2752 break; 2659 2753 case SENSOR_SOI768: 2660 2754 i2c_w8(gspca_dev, stopsoi768); 2661 - data = 0x29; 2662 2755 break; 2663 2756 } 2664 - sn9c1xx = sn_tb[sd->sensor]; 2665 - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); 2666 - reg_w1(gspca_dev, 0x17, sn9c1xx[0x17]); 2667 - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); 2668 - reg_w1(gspca_dev, 0x01, data); 2757 + 2758 + reg01 |= SCL_SEL_OD; 2759 + reg_w1(gspca_dev, 0x01, reg01); 2760 + reg01 |= S_PWR_DN; /* sensor power down */ 2761 + reg_w1(gspca_dev, 0x01, reg01); 2762 + reg_w1(gspca_dev, 0x17, reg17); 2763 + reg01 &= ~SYS_SEL_48M; /* clock 24MHz */ 2764 + reg_w1(gspca_dev, 0x01, reg01); 2765 + reg01 |= LED; 2766 + reg_w1(gspca_dev, 0x01, reg01); 2669 2767 /* Don't disable sensor clock as that disables the button on the cam */ 2670 2768 /* reg_w1(gspca_dev, 0xf1, 0x01); */ 2671 2769 } ··· 2902 2954 /* -- module initialisation -- */ 2903 2955 #define BS(bridge, sensor) \ 2904 2956 .driver_info = (BRIDGE_ ## bridge << 16) \ 2905 - | SENSOR_ ## sensor 2957 + | (SENSOR_ ## sensor << 8) 2958 + #define BSF(bridge, sensor, flags) \ 2959 + .driver_info = (BRIDGE_ ## bridge << 16) \ 2960 + | (SENSOR_ ## sensor << 8) \ 2961 + | (flags) 2906 2962 static const __devinitdata struct usb_device_id device_table[] = { 2907 2963 #if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE 2908 2964 {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)}, 2909 2965 {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)}, 2910 2966 #endif 2911 - {USB_DEVICE(0x045e, 0x00f5), BS(SN9C105, OV7660)}, 2912 - {USB_DEVICE(0x045e, 0x00f7), BS(SN9C105, OV7660)}, 2967 + {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)}, 2968 + {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)}, 2913 2969 {USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)}, 2914 2970 {USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)}, 2915 2971 {USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
+7 -7
drivers/media/video/meye.c
··· 1659 1659 .open = meye_open, 1660 1660 .release = meye_release, 1661 1661 .mmap = meye_mmap, 1662 - .ioctl = video_ioctl2, 1662 + .unlocked_ioctl = video_ioctl2, 1663 1663 .poll = meye_poll, 1664 1664 }; 1665 1665 ··· 1831 1831 msleep(1); 1832 1832 mchip_set(MCHIP_MM_INTA, MCHIP_MM_INTA_HIC_1_MASK); 1833 1833 1834 - if (video_register_device(meye.vdev, VFL_TYPE_GRABBER, 1835 - video_nr) < 0) { 1836 - v4l2_err(v4l2_dev, "video_register_device failed\n"); 1837 - goto outvideoreg; 1838 - } 1839 - 1840 1834 mutex_init(&meye.lock); 1841 1835 init_waitqueue_head(&meye.proc_list); 1842 1836 meye.brightness = 32 << 10; ··· 1851 1857 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERASHARPNESS, 32); 1852 1858 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAPICTURE, 0); 1853 1859 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAAGC, 48); 1860 + 1861 + if (video_register_device(meye.vdev, VFL_TYPE_GRABBER, 1862 + video_nr) < 0) { 1863 + v4l2_err(v4l2_dev, "video_register_device failed\n"); 1864 + goto outvideoreg; 1865 + } 1854 1866 1855 1867 v4l2_info(v4l2_dev, "Motion Eye Camera Driver v%s.\n", 1856 1868 MEYE_DRIVER_VERSION);
+1 -1
drivers/media/video/pms.c
··· 932 932 933 933 static const struct v4l2_file_operations pms_fops = { 934 934 .owner = THIS_MODULE, 935 - .ioctl = video_ioctl2, 935 + .unlocked_ioctl = video_ioctl2, 936 936 .read = pms_read, 937 937 }; 938 938
+8 -5
drivers/media/video/sh_vou.c
··· 75 75 int pix_idx; 76 76 struct videobuf_buffer *active; 77 77 enum sh_vou_status status; 78 + struct mutex fop_lock; 78 79 }; 79 80 80 81 struct sh_vou_file { ··· 236 235 vb->state = VIDEOBUF_NEEDS_INIT; 237 236 } 238 237 239 - /* Locking: caller holds vq->vb_lock mutex */ 238 + /* Locking: caller holds fop_lock mutex */ 240 239 static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count, 241 240 unsigned int *size) 242 241 { ··· 258 257 return 0; 259 258 } 260 259 261 - /* Locking: caller holds vq->vb_lock mutex */ 260 + /* Locking: caller holds fop_lock mutex */ 262 261 static int sh_vou_buf_prepare(struct videobuf_queue *vq, 263 262 struct videobuf_buffer *vb, 264 263 enum v4l2_field field) ··· 307 306 return 0; 308 307 } 309 308 310 - /* Locking: caller holds vq->vb_lock mutex and vq->irqlock spinlock */ 309 + /* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */ 311 310 static void sh_vou_buf_queue(struct videobuf_queue *vq, 312 311 struct videobuf_buffer *vb) 313 312 { ··· 1191 1190 V4L2_BUF_TYPE_VIDEO_OUTPUT, 1192 1191 V4L2_FIELD_NONE, 1193 1192 sizeof(struct videobuf_buffer), vdev, 1194 - NULL); 1193 + &vou_dev->fop_lock); 1195 1194 1196 1195 return 0; 1197 1196 } ··· 1293 1292 .owner = THIS_MODULE, 1294 1293 .open = sh_vou_open, 1295 1294 .release = sh_vou_release, 1296 - .ioctl = video_ioctl2, 1295 + .unlocked_ioctl = video_ioctl2, 1297 1296 .mmap = sh_vou_mmap, 1298 1297 .poll = sh_vou_poll, 1299 1298 }; ··· 1332 1331 1333 1332 INIT_LIST_HEAD(&vou_dev->queue); 1334 1333 spin_lock_init(&vou_dev->lock); 1334 + mutex_init(&vou_dev->fop_lock); 1335 1335 atomic_set(&vou_dev->use_count, 0); 1336 1336 vou_dev->pdata = vou_pdata; 1337 1337 vou_dev->status = SH_VOU_IDLE; ··· 1390 1388 vdev->tvnorms |= V4L2_STD_PAL; 1391 1389 vdev->v4l2_dev = &vou_dev->v4l2_dev; 1392 1390 vdev->release = video_device_release; 1391 + vdev->lock = &vou_dev->fop_lock; 1393 1392 1394 1393 vou_dev->vdev = vdev; 1395 1394 video_set_drvdata(vdev, vou_dev);
+1 -1
drivers/media/video/sn9c102/sn9c102_core.c
··· 3238 3238 .owner = THIS_MODULE, 3239 3239 .open = sn9c102_open, 3240 3240 .release = sn9c102_release, 3241 - .ioctl = sn9c102_ioctl, 3241 + .unlocked_ioctl = sn9c102_ioctl, 3242 3242 .read = sn9c102_read, 3243 3243 .poll = sn9c102_poll, 3244 3244 .mmap = sn9c102_mmap,
+47 -1
drivers/media/video/uvc/uvc_ctrl.c
··· 785 785 } 786 786 } 787 787 788 - struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, 788 + static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, 789 789 __u32 v4l2_id, struct uvc_control_mapping **mapping) 790 790 { 791 791 struct uvc_control *ctrl = NULL; ··· 938 938 if (ctrl->info.flags & UVC_CONTROL_GET_RES) 939 939 v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES, 940 940 uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); 941 + 942 + done: 943 + mutex_unlock(&chain->ctrl_mutex); 944 + return ret; 945 + } 946 + 947 + /* 948 + * Mapping V4L2 controls to UVC controls can be straighforward if done well. 949 + * Most of the UVC controls exist in V4L2, and can be mapped directly. Some 950 + * must be grouped (for instance the Red Balance, Blue Balance and Do White 951 + * Balance V4L2 controls use the White Balance Component UVC control) or 952 + * otherwise translated. The approach we take here is to use a translation 953 + * table for the controls that can be mapped directly, and handle the others 954 + * manually. 955 + */ 956 + int uvc_query_v4l2_menu(struct uvc_video_chain *chain, 957 + struct v4l2_querymenu *query_menu) 958 + { 959 + struct uvc_menu_info *menu_info; 960 + struct uvc_control_mapping *mapping; 961 + struct uvc_control *ctrl; 962 + u32 index = query_menu->index; 963 + u32 id = query_menu->id; 964 + int ret; 965 + 966 + memset(query_menu, 0, sizeof(*query_menu)); 967 + query_menu->id = id; 968 + query_menu->index = index; 969 + 970 + ret = mutex_lock_interruptible(&chain->ctrl_mutex); 971 + if (ret < 0) 972 + return -ERESTARTSYS; 973 + 974 + ctrl = uvc_find_control(chain, query_menu->id, &mapping); 975 + if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) { 976 + ret = -EINVAL; 977 + goto done; 978 + } 979 + 980 + if (query_menu->index >= mapping->menu_count) { 981 + ret = -EINVAL; 982 + goto done; 983 + } 984 + 985 + menu_info = &mapping->menu_info[query_menu->index]; 986 + strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name); 941 987 942 988 done: 943 989 mutex_unlock(&chain->ctrl_mutex);
+110 -23
drivers/media/video/uvc/uvc_queue.c
··· 90 90 } 91 91 92 92 /* 93 + * Free the video buffers. 94 + * 95 + * This function must be called with the queue lock held. 96 + */ 97 + static int __uvc_free_buffers(struct uvc_video_queue *queue) 98 + { 99 + unsigned int i; 100 + 101 + for (i = 0; i < queue->count; ++i) { 102 + if (queue->buffer[i].vma_use_count != 0) 103 + return -EBUSY; 104 + } 105 + 106 + if (queue->count) { 107 + vfree(queue->mem); 108 + queue->count = 0; 109 + } 110 + 111 + return 0; 112 + } 113 + 114 + int uvc_free_buffers(struct uvc_video_queue *queue) 115 + { 116 + int ret; 117 + 118 + mutex_lock(&queue->mutex); 119 + ret = __uvc_free_buffers(queue); 120 + mutex_unlock(&queue->mutex); 121 + 122 + return ret; 123 + } 124 + 125 + /* 93 126 * Allocate the video buffers. 94 127 * 95 128 * Pages are reserved to make sure they will not be swapped, as they will be ··· 143 110 144 111 mutex_lock(&queue->mutex); 145 112 146 - if ((ret = uvc_free_buffers(queue)) < 0) 113 + if ((ret = __uvc_free_buffers(queue)) < 0) 147 114 goto done; 148 115 149 116 /* Bail out if no buffers should be allocated. */ ··· 182 149 done: 183 150 mutex_unlock(&queue->mutex); 184 151 return ret; 185 - } 186 - 187 - /* 188 - * Free the video buffers. 189 - * 190 - * This function must be called with the queue lock held. 191 - */ 192 - int uvc_free_buffers(struct uvc_video_queue *queue) 193 - { 194 - unsigned int i; 195 - 196 - for (i = 0; i < queue->count; ++i) { 197 - if (queue->buffer[i].vma_use_count != 0) 198 - return -EBUSY; 199 - } 200 - 201 - if (queue->count) { 202 - vfree(queue->mem); 203 - queue->count = 0; 204 - } 205 - 206 - return 0; 207 152 } 208 153 209 154 /* ··· 373 362 374 363 list_del(&buf->stream); 375 364 __uvc_query_buffer(buf, v4l2_buf); 365 + 366 + done: 367 + mutex_unlock(&queue->mutex); 368 + return ret; 369 + } 370 + 371 + /* 372 + * VMA operations. 373 + */ 374 + static void uvc_vm_open(struct vm_area_struct *vma) 375 + { 376 + struct uvc_buffer *buffer = vma->vm_private_data; 377 + buffer->vma_use_count++; 378 + } 379 + 380 + static void uvc_vm_close(struct vm_area_struct *vma) 381 + { 382 + struct uvc_buffer *buffer = vma->vm_private_data; 383 + buffer->vma_use_count--; 384 + } 385 + 386 + static const struct vm_operations_struct uvc_vm_ops = { 387 + .open = uvc_vm_open, 388 + .close = uvc_vm_close, 389 + }; 390 + 391 + /* 392 + * Memory-map a video buffer. 393 + * 394 + * This function implements video buffers memory mapping and is intended to be 395 + * used by the device mmap handler. 396 + */ 397 + int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 398 + { 399 + struct uvc_buffer *uninitialized_var(buffer); 400 + struct page *page; 401 + unsigned long addr, start, size; 402 + unsigned int i; 403 + int ret = 0; 404 + 405 + start = vma->vm_start; 406 + size = vma->vm_end - vma->vm_start; 407 + 408 + mutex_lock(&queue->mutex); 409 + 410 + for (i = 0; i < queue->count; ++i) { 411 + buffer = &queue->buffer[i]; 412 + if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) 413 + break; 414 + } 415 + 416 + if (i == queue->count || size != queue->buf_size) { 417 + ret = -EINVAL; 418 + goto done; 419 + } 420 + 421 + /* 422 + * VM_IO marks the area as being an mmaped region for I/O to a 423 + * device. It also prevents the region from being core dumped. 424 + */ 425 + vma->vm_flags |= VM_IO; 426 + 427 + addr = (unsigned long)queue->mem + buffer->buf.m.offset; 428 + while (size > 0) { 429 + page = vmalloc_to_page((void *)addr); 430 + if ((ret = vm_insert_page(vma, start, page)) < 0) 431 + goto done; 432 + 433 + start += PAGE_SIZE; 434 + addr += PAGE_SIZE; 435 + size -= PAGE_SIZE; 436 + } 437 + 438 + vma->vm_ops = &uvc_vm_ops; 439 + vma->vm_private_data = buffer; 440 + uvc_vm_open(vma); 376 441 377 442 done: 378 443 mutex_unlock(&queue->mutex);
+58 -127
drivers/media/video/uvc/uvc_v4l2.c
··· 101 101 */ 102 102 103 103 /* 104 - * Mapping V4L2 controls to UVC controls can be straighforward if done well. 105 - * Most of the UVC controls exist in V4L2, and can be mapped directly. Some 106 - * must be grouped (for instance the Red Balance, Blue Balance and Do White 107 - * Balance V4L2 controls use the White Balance Component UVC control) or 108 - * otherwise translated. The approach we take here is to use a translation 109 - * table for the controls that can be mapped directly, and handle the others 110 - * manually. 111 - */ 112 - static int uvc_v4l2_query_menu(struct uvc_video_chain *chain, 113 - struct v4l2_querymenu *query_menu) 114 - { 115 - struct uvc_menu_info *menu_info; 116 - struct uvc_control_mapping *mapping; 117 - struct uvc_control *ctrl; 118 - u32 index = query_menu->index; 119 - u32 id = query_menu->id; 120 - 121 - ctrl = uvc_find_control(chain, query_menu->id, &mapping); 122 - if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) 123 - return -EINVAL; 124 - 125 - if (query_menu->index >= mapping->menu_count) 126 - return -EINVAL; 127 - 128 - memset(query_menu, 0, sizeof(*query_menu)); 129 - query_menu->id = id; 130 - query_menu->index = index; 131 - 132 - menu_info = &mapping->menu_info[query_menu->index]; 133 - strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name); 134 - return 0; 135 - } 136 - 137 - /* 138 104 * Find the frame interval closest to the requested frame interval for the 139 105 * given frame format and size. This should be done by the device as part of 140 106 * the Video Probe and Commit negotiation, but some hardware don't implement ··· 226 260 * developers test their webcams with the Linux driver as well as with 227 261 * the Windows driver). 228 262 */ 263 + mutex_lock(&stream->mutex); 229 264 if (stream->dev->quirks & UVC_QUIRK_PROBE_EXTRAFIELDS) 230 265 probe->dwMaxVideoFrameSize = 231 266 stream->ctrl.dwMaxVideoFrameSize; 232 267 233 268 /* Probe the device. */ 234 269 ret = uvc_probe_video(stream, probe); 270 + mutex_unlock(&stream->mutex); 235 271 if (ret < 0) 236 272 goto done; 237 273 ··· 257 289 static int uvc_v4l2_get_format(struct uvc_streaming *stream, 258 290 struct v4l2_format *fmt) 259 291 { 260 - struct uvc_format *format = stream->cur_format; 261 - struct uvc_frame *frame = stream->cur_frame; 292 + struct uvc_format *format; 293 + struct uvc_frame *frame; 294 + int ret = 0; 262 295 263 296 if (fmt->type != stream->type) 264 297 return -EINVAL; 265 298 266 - if (format == NULL || frame == NULL) 267 - return -EINVAL; 299 + mutex_lock(&stream->mutex); 300 + format = stream->cur_format; 301 + frame = stream->cur_frame; 302 + 303 + if (format == NULL || frame == NULL) { 304 + ret = -EINVAL; 305 + goto done; 306 + } 268 307 269 308 fmt->fmt.pix.pixelformat = format->fcc; 270 309 fmt->fmt.pix.width = frame->wWidth; ··· 282 307 fmt->fmt.pix.colorspace = format->colorspace; 283 308 fmt->fmt.pix.priv = 0; 284 309 285 - return 0; 310 + done: 311 + mutex_unlock(&stream->mutex); 312 + return ret; 286 313 } 287 314 288 315 static int uvc_v4l2_set_format(struct uvc_streaming *stream, ··· 298 321 if (fmt->type != stream->type) 299 322 return -EINVAL; 300 323 301 - if (uvc_queue_allocated(&stream->queue)) 302 - return -EBUSY; 303 - 304 324 ret = uvc_v4l2_try_format(stream, fmt, &probe, &format, &frame); 305 325 if (ret < 0) 306 326 return ret; 327 + 328 + mutex_lock(&stream->mutex); 329 + 330 + if (uvc_queue_allocated(&stream->queue)) { 331 + ret = -EBUSY; 332 + goto done; 333 + } 307 334 308 335 memcpy(&stream->ctrl, &probe, sizeof probe); 309 336 stream->cur_format = format; 310 337 stream->cur_frame = frame; 311 338 312 - return 0; 339 + done: 340 + mutex_unlock(&stream->mutex); 341 + return ret; 313 342 } 314 343 315 344 static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream, ··· 326 343 if (parm->type != stream->type) 327 344 return -EINVAL; 328 345 346 + mutex_lock(&stream->mutex); 329 347 numerator = stream->ctrl.dwFrameInterval; 348 + mutex_unlock(&stream->mutex); 349 + 330 350 denominator = 10000000; 331 351 uvc_simplify_fraction(&numerator, &denominator, 8, 333); 332 352 ··· 356 370 static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream, 357 371 struct v4l2_streamparm *parm) 358 372 { 359 - struct uvc_frame *frame = stream->cur_frame; 360 373 struct uvc_streaming_control probe; 361 374 struct v4l2_fract timeperframe; 362 375 uint32_t interval; ··· 364 379 if (parm->type != stream->type) 365 380 return -EINVAL; 366 381 367 - if (uvc_queue_streaming(&stream->queue)) 368 - return -EBUSY; 369 - 370 382 if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 371 383 timeperframe = parm->parm.capture.timeperframe; 372 384 else 373 385 timeperframe = parm->parm.output.timeperframe; 374 386 375 - memcpy(&probe, &stream->ctrl, sizeof probe); 376 387 interval = uvc_fraction_to_interval(timeperframe.numerator, 377 388 timeperframe.denominator); 378 - 379 389 uvc_trace(UVC_TRACE_FORMAT, "Setting frame interval to %u/%u (%u).\n", 380 390 timeperframe.numerator, timeperframe.denominator, interval); 381 - probe.dwFrameInterval = uvc_try_frame_interval(frame, interval); 391 + 392 + mutex_lock(&stream->mutex); 393 + 394 + if (uvc_queue_streaming(&stream->queue)) { 395 + mutex_unlock(&stream->mutex); 396 + return -EBUSY; 397 + } 398 + 399 + memcpy(&probe, &stream->ctrl, sizeof probe); 400 + probe.dwFrameInterval = 401 + uvc_try_frame_interval(stream->cur_frame, interval); 382 402 383 403 /* Probe the device with the new settings. */ 384 404 ret = uvc_probe_video(stream, &probe); 385 - if (ret < 0) 405 + if (ret < 0) { 406 + mutex_unlock(&stream->mutex); 386 407 return ret; 408 + } 387 409 388 410 memcpy(&stream->ctrl, &probe, sizeof probe); 411 + mutex_unlock(&stream->mutex); 389 412 390 413 /* Return the actual frame period. */ 391 414 timeperframe.numerator = probe.dwFrameInterval; ··· 521 528 if (uvc_has_privileges(handle)) { 522 529 uvc_video_enable(stream, 0); 523 530 524 - mutex_lock(&stream->queue.mutex); 525 531 if (uvc_free_buffers(&stream->queue) < 0) 526 532 uvc_printk(KERN_ERR, "uvc_v4l2_release: Unable to " 527 533 "free buffers.\n"); 528 - mutex_unlock(&stream->queue.mutex); 529 534 } 530 535 531 536 /* Release the file handle. */ ··· 615 624 } 616 625 617 626 case VIDIOC_QUERYMENU: 618 - return uvc_v4l2_query_menu(chain, arg); 627 + return uvc_query_v4l2_menu(chain, arg); 619 628 620 629 case VIDIOC_G_EXT_CTRLS: 621 630 { ··· 896 905 case VIDIOC_CROPCAP: 897 906 { 898 907 struct v4l2_cropcap *ccap = arg; 899 - struct uvc_frame *frame = stream->cur_frame; 900 908 901 909 if (ccap->type != stream->type) 902 910 return -EINVAL; 903 911 904 912 ccap->bounds.left = 0; 905 913 ccap->bounds.top = 0; 906 - ccap->bounds.width = frame->wWidth; 907 - ccap->bounds.height = frame->wHeight; 914 + 915 + mutex_lock(&stream->mutex); 916 + ccap->bounds.width = stream->cur_frame->wWidth; 917 + ccap->bounds.height = stream->cur_frame->wHeight; 918 + mutex_unlock(&stream->mutex); 908 919 909 920 ccap->defrect = ccap->bounds; 910 921 ··· 923 930 case VIDIOC_REQBUFS: 924 931 { 925 932 struct v4l2_requestbuffers *rb = arg; 926 - unsigned int bufsize = 927 - stream->ctrl.dwMaxVideoFrameSize; 928 933 929 934 if (rb->type != stream->type || 930 935 rb->memory != V4L2_MEMORY_MMAP) ··· 931 940 if ((ret = uvc_acquire_privileges(handle)) < 0) 932 941 return ret; 933 942 934 - ret = uvc_alloc_buffers(&stream->queue, rb->count, bufsize); 943 + mutex_lock(&stream->mutex); 944 + ret = uvc_alloc_buffers(&stream->queue, rb->count, 945 + stream->ctrl.dwMaxVideoFrameSize); 946 + mutex_unlock(&stream->mutex); 935 947 if (ret < 0) 936 948 return ret; 937 949 ··· 982 988 if (!uvc_has_privileges(handle)) 983 989 return -EBUSY; 984 990 991 + mutex_lock(&stream->mutex); 985 992 ret = uvc_video_enable(stream, 1); 993 + mutex_unlock(&stream->mutex); 986 994 if (ret < 0) 987 995 return ret; 988 996 break; ··· 1064 1068 return -EINVAL; 1065 1069 } 1066 1070 1067 - /* 1068 - * VMA operations. 1069 - */ 1070 - static void uvc_vm_open(struct vm_area_struct *vma) 1071 - { 1072 - struct uvc_buffer *buffer = vma->vm_private_data; 1073 - buffer->vma_use_count++; 1074 - } 1075 - 1076 - static void uvc_vm_close(struct vm_area_struct *vma) 1077 - { 1078 - struct uvc_buffer *buffer = vma->vm_private_data; 1079 - buffer->vma_use_count--; 1080 - } 1081 - 1082 - static const struct vm_operations_struct uvc_vm_ops = { 1083 - .open = uvc_vm_open, 1084 - .close = uvc_vm_close, 1085 - }; 1086 - 1087 1071 static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma) 1088 1072 { 1089 1073 struct uvc_fh *handle = file->private_data; 1090 1074 struct uvc_streaming *stream = handle->stream; 1091 - struct uvc_video_queue *queue = &stream->queue; 1092 - struct uvc_buffer *uninitialized_var(buffer); 1093 - struct page *page; 1094 - unsigned long addr, start, size; 1095 - unsigned int i; 1096 - int ret = 0; 1097 1075 1098 1076 uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_mmap\n"); 1099 1077 1100 - start = vma->vm_start; 1101 - size = vma->vm_end - vma->vm_start; 1102 - 1103 - mutex_lock(&queue->mutex); 1104 - 1105 - for (i = 0; i < queue->count; ++i) { 1106 - buffer = &queue->buffer[i]; 1107 - if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) 1108 - break; 1109 - } 1110 - 1111 - if (i == queue->count || size != queue->buf_size) { 1112 - ret = -EINVAL; 1113 - goto done; 1114 - } 1115 - 1116 - /* 1117 - * VM_IO marks the area as being an mmaped region for I/O to a 1118 - * device. It also prevents the region from being core dumped. 1119 - */ 1120 - vma->vm_flags |= VM_IO; 1121 - 1122 - addr = (unsigned long)queue->mem + buffer->buf.m.offset; 1123 - while (size > 0) { 1124 - page = vmalloc_to_page((void *)addr); 1125 - if ((ret = vm_insert_page(vma, start, page)) < 0) 1126 - goto done; 1127 - 1128 - start += PAGE_SIZE; 1129 - addr += PAGE_SIZE; 1130 - size -= PAGE_SIZE; 1131 - } 1132 - 1133 - vma->vm_ops = &uvc_vm_ops; 1134 - vma->vm_private_data = buffer; 1135 - uvc_vm_open(vma); 1136 - 1137 - done: 1138 - mutex_unlock(&queue->mutex); 1139 - return ret; 1078 + return uvc_queue_mmap(&stream->queue, vma); 1140 1079 } 1141 1080 1142 1081 static unsigned int uvc_v4l2_poll(struct file *file, poll_table *wait) ··· 1088 1157 .owner = THIS_MODULE, 1089 1158 .open = uvc_v4l2_open, 1090 1159 .release = uvc_v4l2_release, 1091 - .ioctl = uvc_v4l2_ioctl, 1160 + .unlocked_ioctl = uvc_v4l2_ioctl, 1092 1161 .read = uvc_v4l2_read, 1093 1162 .mmap = uvc_v4l2_mmap, 1094 1163 .poll = uvc_v4l2_poll,
-3
drivers/media/video/uvc/uvc_video.c
··· 293 293 unsigned int i; 294 294 int ret; 295 295 296 - mutex_lock(&stream->mutex); 297 - 298 296 /* Perform probing. The device should adjust the requested values 299 297 * according to its capabilities. However, some devices, namely the 300 298 * first generation UVC Logitech webcams, don't implement the Video ··· 344 346 } 345 347 346 348 done: 347 - mutex_unlock(&stream->mutex); 348 349 return ret; 349 350 } 350 351
+7 -3
drivers/media/video/uvc/uvcvideo.h
··· 436 436 struct uvc_streaming_control ctrl; 437 437 struct uvc_format *cur_format; 438 438 struct uvc_frame *cur_frame; 439 - 439 + /* Protect access to ctrl, cur_format, cur_frame and hardware video 440 + * probe control. 441 + */ 440 442 struct mutex mutex; 441 443 442 444 unsigned int frozen : 1; ··· 576 574 extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect); 577 575 extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, 578 576 struct uvc_buffer *buf); 577 + extern int uvc_queue_mmap(struct uvc_video_queue *queue, 578 + struct vm_area_struct *vma); 579 579 extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue, 580 580 struct file *file, poll_table *wait); 581 581 extern int uvc_queue_allocated(struct uvc_video_queue *queue); ··· 610 606 extern int uvc_status_resume(struct uvc_device *dev); 611 607 612 608 /* Controls */ 613 - extern struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, 614 - __u32 v4l2_id, struct uvc_control_mapping **mapping); 615 609 extern int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, 616 610 struct v4l2_queryctrl *v4l2_ctrl); 611 + extern int uvc_query_v4l2_menu(struct uvc_video_chain *chain, 612 + struct v4l2_querymenu *query_menu); 617 613 618 614 extern int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, 619 615 const struct uvc_control_mapping *mapping);
+51 -18
drivers/media/video/v4l2-dev.c
··· 186 186 size_t sz, loff_t *off) 187 187 { 188 188 struct video_device *vdev = video_devdata(filp); 189 - int ret = -EIO; 189 + int ret = -ENODEV; 190 190 191 191 if (!vdev->fops->read) 192 192 return -EINVAL; 193 - if (vdev->lock) 194 - mutex_lock(vdev->lock); 193 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) 194 + return -ERESTARTSYS; 195 195 if (video_is_registered(vdev)) 196 196 ret = vdev->fops->read(filp, buf, sz, off); 197 197 if (vdev->lock) ··· 203 203 size_t sz, loff_t *off) 204 204 { 205 205 struct video_device *vdev = video_devdata(filp); 206 - int ret = -EIO; 206 + int ret = -ENODEV; 207 207 208 208 if (!vdev->fops->write) 209 209 return -EINVAL; 210 - if (vdev->lock) 211 - mutex_lock(vdev->lock); 210 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) 211 + return -ERESTARTSYS; 212 212 if (video_is_registered(vdev)) 213 213 ret = vdev->fops->write(filp, buf, sz, off); 214 214 if (vdev->lock) ··· 219 219 static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll) 220 220 { 221 221 struct video_device *vdev = video_devdata(filp); 222 - int ret = DEFAULT_POLLMASK; 222 + int ret = POLLERR | POLLHUP; 223 223 224 224 if (!vdev->fops->poll) 225 - return ret; 225 + return DEFAULT_POLLMASK; 226 226 if (vdev->lock) 227 227 mutex_lock(vdev->lock); 228 228 if (video_is_registered(vdev)) ··· 238 238 int ret = -ENODEV; 239 239 240 240 if (vdev->fops->unlocked_ioctl) { 241 - if (vdev->lock) 242 - mutex_lock(vdev->lock); 241 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) 242 + return -ERESTARTSYS; 243 243 if (video_is_registered(vdev)) 244 244 ret = vdev->fops->unlocked_ioctl(filp, cmd, arg); 245 245 if (vdev->lock) 246 246 mutex_unlock(vdev->lock); 247 247 } else if (vdev->fops->ioctl) { 248 - /* TODO: convert all drivers to unlocked_ioctl */ 248 + /* This code path is a replacement for the BKL. It is a major 249 + * hack but it will have to do for those drivers that are not 250 + * yet converted to use unlocked_ioctl. 251 + * 252 + * There are two options: if the driver implements struct 253 + * v4l2_device, then the lock defined there is used to 254 + * serialize the ioctls. Otherwise the v4l2 core lock defined 255 + * below is used. This lock is really bad since it serializes 256 + * completely independent devices. 257 + * 258 + * Both variants suffer from the same problem: if the driver 259 + * sleeps, then it blocks all ioctls since the lock is still 260 + * held. This is very common for VIDIOC_DQBUF since that 261 + * normally waits for a frame to arrive. As a result any other 262 + * ioctl calls will proceed very, very slowly since each call 263 + * will have to wait for the VIDIOC_QBUF to finish. Things that 264 + * should take 0.01s may now take 10-20 seconds. 265 + * 266 + * The workaround is to *not* take the lock for VIDIOC_DQBUF. 267 + * This actually works OK for videobuf-based drivers, since 268 + * videobuf will take its own internal lock. 269 + */ 249 270 static DEFINE_MUTEX(v4l2_ioctl_mutex); 271 + struct mutex *m = vdev->v4l2_dev ? 272 + &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex; 250 273 251 - mutex_lock(&v4l2_ioctl_mutex); 274 + if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m)) 275 + return -ERESTARTSYS; 252 276 if (video_is_registered(vdev)) 253 277 ret = vdev->fops->ioctl(filp, cmd, arg); 254 - mutex_unlock(&v4l2_ioctl_mutex); 278 + if (cmd != VIDIOC_DQBUF) 279 + mutex_unlock(m); 255 280 } else 256 281 ret = -ENOTTY; 257 282 ··· 290 265 291 266 if (!vdev->fops->mmap) 292 267 return ret; 293 - if (vdev->lock) 294 - mutex_lock(vdev->lock); 268 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) 269 + return -ERESTARTSYS; 295 270 if (video_is_registered(vdev)) 296 271 ret = vdev->fops->mmap(filp, vm); 297 272 if (vdev->lock) ··· 309 284 mutex_lock(&videodev_lock); 310 285 vdev = video_devdata(filp); 311 286 /* return ENODEV if the video device has already been removed. */ 312 - if (vdev == NULL) { 287 + if (vdev == NULL || !video_is_registered(vdev)) { 313 288 mutex_unlock(&videodev_lock); 314 289 return -ENODEV; 315 290 } ··· 317 292 video_get(vdev); 318 293 mutex_unlock(&videodev_lock); 319 294 if (vdev->fops->open) { 320 - if (vdev->lock) 321 - mutex_lock(vdev->lock); 295 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) { 296 + ret = -ERESTARTSYS; 297 + goto err; 298 + } 322 299 if (video_is_registered(vdev)) 323 300 ret = vdev->fops->open(filp); 324 301 else ··· 329 302 mutex_unlock(vdev->lock); 330 303 } 331 304 305 + err: 332 306 /* decrease the refcount in case of an error */ 333 307 if (ret) 334 308 video_put(vdev); ··· 624 596 if (!vdev || !video_is_registered(vdev)) 625 597 return; 626 598 599 + mutex_lock(&videodev_lock); 600 + /* This must be in a critical section to prevent a race with v4l2_open. 601 + * Once this bit has been cleared video_get may never be called again. 602 + */ 627 603 clear_bit(V4L2_FL_REGISTERED, &vdev->flags); 604 + mutex_unlock(&videodev_lock); 628 605 device_unregister(&vdev->dev); 629 606 } 630 607 EXPORT_SYMBOL(video_unregister_device);
+1
drivers/media/video/v4l2-device.c
··· 35 35 36 36 INIT_LIST_HEAD(&v4l2_dev->subdevs); 37 37 spin_lock_init(&v4l2_dev->lock); 38 + mutex_init(&v4l2_dev->ioctl_lock); 38 39 v4l2_dev->dev = dev; 39 40 if (dev == NULL) { 40 41 /* If dev == NULL, then name must be filled in by the caller */
+1 -1
drivers/media/video/w9966.c
··· 815 815 816 816 static const struct v4l2_file_operations w9966_fops = { 817 817 .owner = THIS_MODULE, 818 - .ioctl = video_ioctl2, 818 + .unlocked_ioctl = video_ioctl2, 819 819 .read = w9966_v4l_read, 820 820 }; 821 821
+5 -76
drivers/pci/bus.c
··· 64 64 } 65 65 } 66 66 67 - static bool pci_bus_resource_better(struct resource *res1, bool pos1, 68 - struct resource *res2, bool pos2) 69 - { 70 - /* If exactly one is positive decode, always prefer that one */ 71 - if (pos1 != pos2) 72 - return pos1 ? true : false; 73 - 74 - /* Prefer the one that contains the highest address */ 75 - if (res1->end != res2->end) 76 - return (res1->end > res2->end) ? true : false; 77 - 78 - /* Otherwise, prefer the one with highest "center of gravity" */ 79 - if (res1->start != res2->start) 80 - return (res1->start > res2->start) ? true : false; 81 - 82 - /* Otherwise, choose one arbitrarily (but consistently) */ 83 - return (res1 > res2) ? true : false; 84 - } 85 - 86 - static bool pci_bus_resource_positive(struct pci_bus *bus, struct resource *res) 87 - { 88 - struct pci_bus_resource *bus_res; 89 - 90 - /* 91 - * This relies on the fact that pci_bus.resource[] refers to P2P or 92 - * CardBus bridge base/limit registers, which are always positively 93 - * decoded. The pci_bus.resources list contains host bridge or 94 - * subtractively decoded resources. 95 - */ 96 - list_for_each_entry(bus_res, &bus->resources, list) { 97 - if (bus_res->res == res) 98 - return (bus_res->flags & PCI_SUBTRACTIVE_DECODE) ? 99 - false : true; 100 - } 101 - return true; 102 - } 103 - 104 - /* 105 - * Find the next-best bus resource after the cursor "res". If the cursor is 106 - * NULL, return the best resource. "Best" means that we prefer positive 107 - * decode regions over subtractive decode, then those at higher addresses. 108 - */ 109 - static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, 110 - unsigned int type, 111 - struct resource *res) 112 - { 113 - bool res_pos, r_pos, prev_pos = false; 114 - struct resource *r, *prev = NULL; 115 - int i; 116 - 117 - res_pos = pci_bus_resource_positive(bus, res); 118 - pci_bus_for_each_resource(bus, r, i) { 119 - if (!r) 120 - continue; 121 - 122 - if ((r->flags & IORESOURCE_TYPE_BITS) != type) 123 - continue; 124 - 125 - r_pos = pci_bus_resource_positive(bus, r); 126 - if (!res || pci_bus_resource_better(res, res_pos, r, r_pos)) { 127 - if (!prev || pci_bus_resource_better(r, r_pos, 128 - prev, prev_pos)) { 129 - prev = r; 130 - prev_pos = r_pos; 131 - } 132 - } 133 - } 134 - 135 - return prev; 136 - } 137 - 138 67 /** 139 68 * pci_bus_alloc_resource - allocate a resource from a parent bus 140 69 * @bus: PCI bus ··· 89 160 resource_size_t), 90 161 void *alignf_data) 91 162 { 92 - int ret = -ENOMEM; 163 + int i, ret = -ENOMEM; 93 164 struct resource *r; 94 165 resource_size_t max = -1; 95 - unsigned int type = res->flags & IORESOURCE_TYPE_BITS; 96 166 97 167 type_mask |= IORESOURCE_IO | IORESOURCE_MEM; 98 168 ··· 99 171 if (!(res->flags & IORESOURCE_MEM_64)) 100 172 max = PCIBIOS_MAX_MEM_32; 101 173 102 - /* Look for space at highest addresses first */ 103 - r = pci_bus_find_resource_prev(bus, type, NULL); 104 - for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) { 174 + pci_bus_for_each_resource(bus, r, i) { 175 + if (!r) 176 + continue; 177 + 105 178 /* type_mask must match */ 106 179 if ((res->flags ^ r->flags) & type_mask) 107 180 continue;
+5
drivers/pci/dmar.c
··· 1417 1417 (unsigned long long)drhd->reg_base_addr, ret); 1418 1418 return -1; 1419 1419 } 1420 + 1421 + /* 1422 + * Clear any previous faults. 1423 + */ 1424 + dmar_fault(iommu->irq, iommu); 1420 1425 } 1421 1426 1422 1427 return 0;
+26
drivers/pci/quirks.c
··· 2329 2329 { 2330 2330 u32 cfg; 2331 2331 2332 + if (!pci_find_capability(dev, PCI_CAP_ID_HT)) 2333 + return; 2334 + 2332 2335 pci_read_config_dword(dev, 0x74, &cfg); 2333 2336 2334 2337 if (cfg & ((1 << 2) | (1 << 15))) { ··· 2767 2764 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); 2768 2765 #endif /*CONFIG_MMC_RICOH_MMC*/ 2769 2766 2767 + #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) 2768 + #define VTUNCERRMSK_REG 0x1ac 2769 + #define VTD_MSK_SPEC_ERRORS (1 << 31) 2770 + /* 2771 + * This is a quirk for masking vt-d spec defined errors to platform error 2772 + * handling logic. With out this, platforms using Intel 7500, 5500 chipsets 2773 + * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based 2774 + * on the RAS config settings of the platform) when a vt-d fault happens. 2775 + * The resulting SMI caused the system to hang. 2776 + * 2777 + * VT-d spec related errors are already handled by the VT-d OS code, so no 2778 + * need to report the same error through other channels. 2779 + */ 2780 + static void vtd_mask_spec_errors(struct pci_dev *dev) 2781 + { 2782 + u32 word; 2783 + 2784 + pci_read_config_dword(dev, VTUNCERRMSK_REG, &word); 2785 + pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS); 2786 + } 2787 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); 2788 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); 2789 + #endif 2770 2790 2771 2791 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 2772 2792 struct pci_fixup *end)
+1 -2
drivers/scsi/scsi_lib.c
··· 1637 1637 1638 1638 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1639 1639 1640 - /* New queue, no concurrency on queue_flags */ 1641 1640 if (!shost->use_clustering) 1642 - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1641 + q->limits.cluster = 0; 1643 1642 1644 1643 /* 1645 1644 * set a reasonable default alignment on word boundaries: the
+4 -4
drivers/staging/cx25821/cx25821-video.c
··· 92 92 return ARRAY_SIZE(formats); 93 93 } 94 94 95 - struct cx25821_fmt *format_by_fourcc(unsigned int fourcc) 95 + struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc) 96 96 { 97 97 unsigned int i; 98 98 ··· 848 848 pix_format = 849 849 (dev->channels[ch_id].pixel_formats == 850 850 PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV; 851 - fh->fmt = format_by_fourcc(pix_format); 851 + fh->fmt = cx25821_format_by_fourcc(pix_format); 852 852 853 853 v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio); 854 854 ··· 1010 1010 if (0 != err) 1011 1011 return err; 1012 1012 1013 - fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); 1013 + fh->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat); 1014 1014 fh->vidq.field = f->fmt.pix.field; 1015 1015 1016 1016 /* check if width and height is valid based on set standard */ ··· 1119 1119 enum v4l2_field field; 1120 1120 unsigned int maxw, maxh; 1121 1121 1122 - fmt = format_by_fourcc(f->fmt.pix.pixelformat); 1122 + fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat); 1123 1123 if (NULL == fmt) 1124 1124 return -EINVAL; 1125 1125
+1 -1
drivers/staging/cx25821/cx25821-video.h
··· 87 87 88 88 #define FORMAT_FLAGS_PACKED 0x01 89 89 extern struct cx25821_fmt formats[]; 90 - extern struct cx25821_fmt *format_by_fourcc(unsigned int fourcc); 90 + extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc); 91 91 extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM]; 92 92 93 93 extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
+4 -2
drivers/tty/n_gsm.c
··· 716 716 if (msg->len < 128) 717 717 *--dp = (msg->len << 1) | EA; 718 718 else { 719 - *--dp = ((msg->len & 127) << 1) | EA; 720 - *--dp = (msg->len >> 6) & 0xfe; 719 + *--dp = (msg->len >> 7); /* bits 7 - 15 */ 720 + *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */ 721 721 } 722 722 } 723 723 ··· 968 968 { 969 969 struct gsm_msg *msg; 970 970 msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); 971 + if (msg == NULL) 972 + return; 971 973 msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ 972 974 msg->data[1] = (dlen << 1) | EA; 973 975 memcpy(msg->data + 2, data, dlen);
+9 -1
drivers/usb/core/Kconfig
··· 107 107 If you are unsure about this, say N here. 108 108 109 109 config USB_OTG 110 - bool 110 + bool "OTG support" 111 111 depends on USB && EXPERIMENTAL 112 112 depends on USB_SUSPEND 113 113 default n 114 + help 115 + The most notable feature of USB OTG is support for a 116 + "Dual-Role" device, which can act as either a device 117 + or a host. The initial role is decided by the type of 118 + plug inserted and can be changed later when two dual 119 + role devices talk to each other. 114 120 121 + Select this only if your board has Mini-AB/Micro-AB 122 + connector. 115 123 116 124 config USB_OTG_WHITELIST 117 125 bool "Rely on OTG Targeted Peripherals List"
+9 -9
drivers/usb/gadget/composite.c
··· 1047 1047 kfree(cdev->req->buf); 1048 1048 usb_ep_free_request(gadget->ep0, cdev->req); 1049 1049 } 1050 + device_remove_file(&gadget->dev, &dev_attr_suspended); 1050 1051 kfree(cdev); 1051 1052 set_gadget_data(gadget, NULL); 1052 - device_remove_file(&gadget->dev, &dev_attr_suspended); 1053 1053 composite = NULL; 1054 1054 } 1055 1055 ··· 1107 1107 */ 1108 1108 usb_ep_autoconfig_reset(cdev->gadget); 1109 1109 1110 - /* standardized runtime overrides for device ID data */ 1111 - if (idVendor) 1112 - cdev->desc.idVendor = cpu_to_le16(idVendor); 1113 - if (idProduct) 1114 - cdev->desc.idProduct = cpu_to_le16(idProduct); 1115 - if (bcdDevice) 1116 - cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); 1117 - 1118 1110 /* composite gadget needs to assign strings for whole device (like 1119 1111 * serial number), register function drivers, potentially update 1120 1112 * power state and consumption, etc ··· 1117 1125 1118 1126 cdev->desc = *composite->dev; 1119 1127 cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket; 1128 + 1129 + /* standardized runtime overrides for device ID data */ 1130 + if (idVendor) 1131 + cdev->desc.idVendor = cpu_to_le16(idVendor); 1132 + if (idProduct) 1133 + cdev->desc.idProduct = cpu_to_le16(idProduct); 1134 + if (bcdDevice) 1135 + cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); 1120 1136 1121 1137 /* string overrides */ 1122 1138 if (iManufacturer || !cdev->desc.iManufacturer) {
+15 -10
drivers/usb/host/xhci-mem.c
··· 1680 1680 xhci->port_array[i] = (u8) -1; 1681 1681 } 1682 1682 /* FIXME: Should we disable the port? */ 1683 + continue; 1683 1684 } 1684 1685 xhci->port_array[i] = major_revision; 1685 1686 if (major_revision == 0x03) ··· 1759 1758 return -ENOMEM; 1760 1759 1761 1760 port_index = 0; 1762 - for (i = 0; i < num_ports; i++) 1763 - if (xhci->port_array[i] != 0x03) { 1764 - xhci->usb2_ports[port_index] = 1765 - &xhci->op_regs->port_status_base + 1766 - NUM_PORT_REGS*i; 1767 - xhci_dbg(xhci, "USB 2.0 port at index %u, " 1768 - "addr = %p\n", i, 1769 - xhci->usb2_ports[port_index]); 1770 - port_index++; 1771 - } 1761 + for (i = 0; i < num_ports; i++) { 1762 + if (xhci->port_array[i] == 0x03 || 1763 + xhci->port_array[i] == 0 || 1764 + xhci->port_array[i] == -1) 1765 + continue; 1766 + 1767 + xhci->usb2_ports[port_index] = 1768 + &xhci->op_regs->port_status_base + 1769 + NUM_PORT_REGS*i; 1770 + xhci_dbg(xhci, "USB 2.0 port at index %u, " 1771 + "addr = %p\n", i, 1772 + xhci->usb2_ports[port_index]); 1773 + port_index++; 1774 + } 1772 1775 } 1773 1776 if (xhci->num_usb3_ports) { 1774 1777 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
+3 -1
drivers/usb/misc/uss720.c
··· 3 3 /* 4 4 * uss720.c -- USS720 USB Parport Cable. 5 5 * 6 - * Copyright (C) 1999, 2005 6 + * Copyright (C) 1999, 2005, 2010 7 7 * Thomas Sailer (t.sailer@alumni.ethz.ch) 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify ··· 776 776 { USB_DEVICE(0x0557, 0x2001) }, 777 777 { USB_DEVICE(0x0729, 0x1284) }, 778 778 { USB_DEVICE(0x1293, 0x0002) }, 779 + { USB_DEVICE(0x1293, 0x0002) }, 780 + { USB_DEVICE(0x050d, 0x0002) }, 779 781 { } /* Terminating entry */ 780 782 }; 781 783
+1
drivers/usb/serial/ftdi_sio.c
··· 797 797 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, 798 798 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, 799 799 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, 800 + { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, 800 801 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), 801 802 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 802 803 { }, /* Optional parameter entry */
+5
drivers/usb/serial/ftdi_sio_ids.h
··· 1081 1081 #define MJSG_HD_RADIO_PID 0x937C 1082 1082 1083 1083 /* 1084 + * D.O.Tec products (http://www.directout.eu) 1085 + */ 1086 + #define FTDI_DOTEC_PID 0x9868 1087 + 1088 + /* 1084 1089 * Xverve Signalyzer tools (http://www.signalyzer.com/) 1085 1090 */ 1086 1091 #define XVERVE_SIGNALYZER_ST_PID 0xBCA0
+7
drivers/usb/storage/unusual_devs.h
··· 481 481 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 482 482 US_FL_MAX_SECTORS_64), 483 483 484 + /* Reported by Vitaly Kuznetsov <vitty@altlinux.ru> */ 485 + UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, 486 + "Samsung", 487 + "YP-CP3", 488 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 489 + US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), 490 + 484 491 /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. 485 492 * Device uses standards-violating 32-byte Bulk Command Block Wrappers and 486 493 * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
+2 -2
drivers/video/omap/Kconfig
··· 1 1 config FB_OMAP 2 2 tristate "OMAP frame buffer support (EXPERIMENTAL)" 3 - depends on FB && ARCH_OMAP && (OMAP2_DSS = "n") 4 - 3 + depends on FB && (OMAP2_DSS = "n") 4 + depends on ARCH_OMAP1 || ARCH_OMAP2 || ARCH_OMAP3 5 5 select FB_CFB_FILLRECT 6 6 select FB_CFB_COPYAREA 7 7 select FB_CFB_IMAGEBLIT
+2 -2
drivers/video/omap2/vram.c
··· 551 551 if (!size) 552 552 return; 553 553 554 - size = PAGE_ALIGN(size); 554 + size = ALIGN(size, SZ_2M); 555 555 556 556 if (paddr) { 557 557 if (paddr & ~PAGE_MASK) { ··· 576 576 return; 577 577 } 578 578 } else { 579 - paddr = memblock_alloc(size, PAGE_SIZE); 579 + paddr = memblock_alloc(size, SZ_2M); 580 580 } 581 581 582 582 memblock_free(paddr, size);
+1 -1
fs/btrfs/export.c
··· 166 166 static struct dentry *btrfs_get_parent(struct dentry *child) 167 167 { 168 168 struct inode *dir = child->d_inode; 169 - static struct dentry *dentry; 169 + struct dentry *dentry; 170 170 struct btrfs_root *root = BTRFS_I(dir)->root; 171 171 struct btrfs_path *path; 172 172 struct extent_buffer *leaf;
+2 -1
fs/ceph/dir.c
··· 40 40 if (dentry->d_fsdata) 41 41 return 0; 42 42 43 - if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 43 + if (dentry->d_parent == NULL || /* nfs fh_to_dentry */ 44 + ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 44 45 dentry->d_op = &ceph_dentry_ops; 45 46 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 46 47 dentry->d_op = &ceph_snapdir_dentry_ops;
+23 -16
fs/ceph/file.c
··· 282 282 static int striped_read(struct inode *inode, 283 283 u64 off, u64 len, 284 284 struct page **pages, int num_pages, 285 - int *checkeof, bool align_to_pages) 285 + int *checkeof, bool align_to_pages, 286 + unsigned long buf_align) 286 287 { 287 288 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 288 289 struct ceph_inode_info *ci = ceph_inode(inode); ··· 308 307 309 308 more: 310 309 if (align_to_pages) 311 - page_align = (pos - io_align) & ~PAGE_MASK; 310 + page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 312 311 else 313 312 page_align = pos & ~PAGE_MASK; 314 313 this_len = left; ··· 377 376 struct inode *inode = file->f_dentry->d_inode; 378 377 struct page **pages; 379 378 u64 off = *poff; 380 - int num_pages = calc_pages_for(off, len); 381 - int ret; 379 + int num_pages, ret; 382 380 383 381 dout("sync_read on file %p %llu~%u %s\n", file, off, len, 384 382 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 385 383 386 - if (file->f_flags & O_DIRECT) 387 - pages = ceph_get_direct_page_vector(data, num_pages); 388 - else 384 + if (file->f_flags & O_DIRECT) { 385 + num_pages = calc_pages_for((unsigned long)data, len); 386 + pages = ceph_get_direct_page_vector(data, num_pages, true); 387 + } else { 388 + num_pages = calc_pages_for(off, len); 389 389 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); 390 + } 390 391 if (IS_ERR(pages)) 391 392 return PTR_ERR(pages); 392 393 ··· 403 400 goto done; 404 401 405 402 ret = striped_read(inode, off, len, pages, num_pages, checkeof, 406 - file->f_flags & O_DIRECT); 403 + file->f_flags & O_DIRECT, 404 + (unsigned long)data & ~PAGE_MASK); 407 405 408 406 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) 409 407 ret = ceph_copy_page_vector_to_user(pages, data, off, ret); ··· 413 409 414 410 done: 415 411 if (file->f_flags & O_DIRECT) 416 - ceph_put_page_vector(pages, num_pages); 412 + ceph_put_page_vector(pages, num_pages, true); 417 413 else 418 414 ceph_release_page_vector(pages, num_pages); 419 415 dout("sync_read result %d\n", ret); ··· 460 456 int do_sync = 0; 461 457 int check_caps = 0; 462 458 int page_align, io_align; 459 + unsigned long buf_align; 463 460 int ret; 464 461 struct timespec mtime = CURRENT_TIME; 465 462 ··· 476 471 pos = *offset; 477 472 478 473 io_align = pos & ~PAGE_MASK; 474 + buf_align = (unsigned long)data & ~PAGE_MASK; 479 475 480 476 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); 481 477 if (ret < 0) ··· 502 496 */ 503 497 more: 504 498 len = left; 505 - if (file->f_flags & O_DIRECT) 499 + if (file->f_flags & O_DIRECT) { 506 500 /* write from beginning of first page, regardless of 507 501 io alignment */ 508 - page_align = (pos - io_align) & ~PAGE_MASK; 509 - else 502 + page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 503 + num_pages = calc_pages_for((unsigned long)data, len); 504 + } else { 510 505 page_align = pos & ~PAGE_MASK; 506 + num_pages = calc_pages_for(pos, len); 507 + } 511 508 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 512 509 ceph_vino(inode), pos, &len, 513 510 CEPH_OSD_OP_WRITE, flags, ··· 521 512 if (!req) 522 513 return -ENOMEM; 523 514 524 - num_pages = calc_pages_for(pos, len); 525 - 526 515 if (file->f_flags & O_DIRECT) { 527 - pages = ceph_get_direct_page_vector(data, num_pages); 516 + pages = ceph_get_direct_page_vector(data, num_pages, false); 528 517 if (IS_ERR(pages)) { 529 518 ret = PTR_ERR(pages); 530 519 goto out; ··· 572 565 } 573 566 574 567 if (file->f_flags & O_DIRECT) 575 - ceph_put_page_vector(pages, num_pages); 568 + ceph_put_page_vector(pages, num_pages, false); 576 569 else if (file->f_flags & O_SYNC) 577 570 ceph_release_page_vector(pages, num_pages); 578 571
+3
fs/namei.c
··· 1748 1748 if (!(open_flag & O_CREAT)) 1749 1749 mode = 0; 1750 1750 1751 + /* Must never be set by userspace */ 1752 + open_flag &= ~FMODE_NONOTIFY; 1753 + 1751 1754 /* 1752 1755 * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only 1753 1756 * check for O_DSYNC if the need any syncing at all we enforce it's
-9
fs/nilfs2/gcinode.c
··· 176 176 int nilfs_init_gcinode(struct inode *inode) 177 177 { 178 178 struct nilfs_inode_info *ii = NILFS_I(inode); 179 - struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs; 180 179 181 180 inode->i_mode = S_IFREG; 182 181 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); ··· 184 185 185 186 ii->i_flags = 0; 186 187 nilfs_bmap_init_gc(ii->i_bmap); 187 - 188 - /* 189 - * Add the inode to GC inode list. Garbage Collection 190 - * is serialized and no two processes manipulate the 191 - * list simultaneously. 192 - */ 193 - igrab(inode); 194 - list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes); 195 188 196 189 return 0; 197 190 }
+12
fs/nilfs2/ioctl.c
··· 337 337 struct nilfs_argv *argv, void *buf) 338 338 { 339 339 size_t nmembs = argv->v_nmembs; 340 + struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs; 340 341 struct inode *inode; 341 342 struct nilfs_vdesc *vdesc; 342 343 struct buffer_head *bh, *n; ··· 354 353 ret = PTR_ERR(inode); 355 354 goto failed; 356 355 } 356 + if (list_empty(&NILFS_I(inode)->i_dirty)) { 357 + /* 358 + * Add the inode to GC inode list. Garbage Collection 359 + * is serialized and no two processes manipulate the 360 + * list simultaneously. 361 + */ 362 + igrab(inode); 363 + list_add(&NILFS_I(inode)->i_dirty, 364 + &nilfs->ns_gc_inodes); 365 + } 366 + 357 367 do { 358 368 ret = nilfs_ioctl_move_inode_block(inode, vdesc, 359 369 &buffers);
+5 -1
fs/notify/fanotify/fanotify.c
··· 92 92 93 93 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 94 94 95 - wait_event(group->fanotify_data.access_waitq, event->response); 95 + wait_event(group->fanotify_data.access_waitq, event->response || 96 + atomic_read(&group->fanotify_data.bypass_perm)); 97 + 98 + if (!event->response) /* bypass_perm set */ 99 + return 0; 96 100 97 101 /* userspace responded, convert to something usable */ 98 102 spin_lock(&event->lock);
+51 -30
fs/notify/fanotify/fanotify_user.c
··· 106 106 return client_fd; 107 107 } 108 108 109 - static ssize_t fill_event_metadata(struct fsnotify_group *group, 109 + static int fill_event_metadata(struct fsnotify_group *group, 110 110 struct fanotify_event_metadata *metadata, 111 111 struct fsnotify_event *event) 112 112 { 113 + int ret = 0; 114 + 113 115 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, 114 116 group, metadata, event); 115 117 116 118 metadata->event_len = FAN_EVENT_METADATA_LEN; 119 + metadata->metadata_len = FAN_EVENT_METADATA_LEN; 117 120 metadata->vers = FANOTIFY_METADATA_VERSION; 118 121 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; 119 122 metadata->pid = pid_vnr(event->tgid); 120 - metadata->fd = create_fd(group, event); 123 + if (unlikely(event->mask & FAN_Q_OVERFLOW)) 124 + metadata->fd = FAN_NOFD; 125 + else { 126 + metadata->fd = create_fd(group, event); 127 + if (metadata->fd < 0) 128 + ret = metadata->fd; 129 + } 121 130 122 - return metadata->fd; 131 + return ret; 123 132 } 124 133 125 134 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS ··· 209 200 210 201 mutex_lock(&group->fanotify_data.access_mutex); 211 202 212 - if (group->fanotify_data.bypass_perm) { 203 + if (atomic_read(&group->fanotify_data.bypass_perm)) { 213 204 mutex_unlock(&group->fanotify_data.access_mutex); 214 205 kmem_cache_free(fanotify_response_event_cache, re); 215 206 event->response = FAN_ALLOW; ··· 266 257 267 258 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 268 259 269 - fd = fill_event_metadata(group, &fanotify_event_metadata, event); 270 - if (fd < 0) 271 - return fd; 260 + ret = fill_event_metadata(group, &fanotify_event_metadata, event); 261 + if (ret < 0) 262 + goto out; 272 263 264 + fd = fanotify_event_metadata.fd; 273 265 ret = prepare_for_access_response(group, event, fd); 274 266 if (ret) 275 267 goto out_close_fd; 276 268 277 269 ret = -EFAULT; 278 - if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN)) 270 + if (copy_to_user(buf, &fanotify_event_metadata, 271 + fanotify_event_metadata.event_len)) 279 272 goto out_kill_access_response; 280 273 281 - return FAN_EVENT_METADATA_LEN; 274 + return fanotify_event_metadata.event_len; 282 275 283 276 out_kill_access_response: 284 277 remove_access_response(group, event, fd); 285 278 out_close_fd: 286 - sys_close(fd); 279 + if (fd != FAN_NOFD) 280 + sys_close(fd); 281 + out: 282 + #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 283 + if (event->mask & FAN_ALL_PERM_EVENTS) { 284 + event->response = FAN_DENY; 285 + wake_up(&group->fanotify_data.access_waitq); 286 + } 287 + #endif 287 288 return ret; 288 289 } 289 290 ··· 401 382 402 383 mutex_lock(&group->fanotify_data.access_mutex); 403 384 404 - group->fanotify_data.bypass_perm = true; 385 + atomic_inc(&group->fanotify_data.bypass_perm); 405 386 406 387 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { 407 388 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, ··· 605 586 { 606 587 struct fsnotify_mark *fsn_mark; 607 588 __u32 added; 589 + int ret = 0; 608 590 609 591 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); 610 592 if (!fsn_mark) { 611 - int ret; 612 - 613 593 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 614 594 return -ENOSPC; 615 595 ··· 618 600 619 601 fsnotify_init_mark(fsn_mark, fanotify_free_mark); 620 602 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); 621 - if (ret) { 622 - fanotify_free_mark(fsn_mark); 623 - return ret; 624 - } 603 + if (ret) 604 + goto err; 625 605 } 626 606 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 627 - fsnotify_put_mark(fsn_mark); 607 + 628 608 if (added & ~mnt->mnt_fsnotify_mask) 629 609 fsnotify_recalc_vfsmount_mask(mnt); 630 - 631 - return 0; 610 + err: 611 + fsnotify_put_mark(fsn_mark); 612 + return ret; 632 613 } 633 614 634 615 static int fanotify_add_inode_mark(struct fsnotify_group *group, ··· 636 619 { 637 620 struct fsnotify_mark *fsn_mark; 638 621 __u32 added; 622 + int ret = 0; 639 623 640 624 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); 641 625 ··· 652 634 653 635 fsn_mark = fsnotify_find_inode_mark(group, inode); 654 636 if (!fsn_mark) { 655 - int ret; 656 - 657 637 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 658 638 return -ENOSPC; 659 639 ··· 661 645 662 646 fsnotify_init_mark(fsn_mark, fanotify_free_mark); 663 647 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); 664 - if (ret) { 665 - fanotify_free_mark(fsn_mark); 666 - return ret; 667 - } 648 + if (ret) 649 + goto err; 668 650 } 669 651 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 670 - fsnotify_put_mark(fsn_mark); 652 + 671 653 if (added & ~inode->i_fsnotify_mask) 672 654 fsnotify_recalc_inode_mask(inode); 673 - return 0; 655 + err: 656 + fsnotify_put_mark(fsn_mark); 657 + return ret; 674 658 } 675 659 676 660 /* fanotify syscalls */ ··· 703 687 704 688 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 705 689 group = fsnotify_alloc_group(&fanotify_fsnotify_ops); 706 - if (IS_ERR(group)) 690 + if (IS_ERR(group)) { 691 + free_uid(user); 707 692 return PTR_ERR(group); 693 + } 708 694 709 695 group->fanotify_data.user = user; 710 696 atomic_inc(&user->fanotify_listeners); ··· 716 698 mutex_init(&group->fanotify_data.access_mutex); 717 699 init_waitqueue_head(&group->fanotify_data.access_waitq); 718 700 INIT_LIST_HEAD(&group->fanotify_data.access_list); 701 + atomic_set(&group->fanotify_data.bypass_perm, 0); 719 702 #endif 720 703 switch (flags & FAN_ALL_CLASS_BITS) { 721 704 case FAN_CLASS_NOTIF: ··· 783 764 if (flags & ~FAN_ALL_MARK_FLAGS) 784 765 return -EINVAL; 785 766 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 786 - case FAN_MARK_ADD: 767 + case FAN_MARK_ADD: /* fallthrough */ 787 768 case FAN_MARK_REMOVE: 769 + if (!mask) 770 + return -EINVAL; 788 771 case FAN_MARK_FLUSH: 789 772 break; 790 773 default:
+1
fs/notify/inotify/inotify_user.c
··· 752 752 if (ret >= 0) 753 753 return ret; 754 754 755 + fsnotify_put_group(group); 755 756 atomic_dec(&user->inotify_devs); 756 757 out_free_uid: 757 758 free_uid(user);
+7 -3
include/linux/blkdev.h
··· 250 250 251 251 unsigned char misaligned; 252 252 unsigned char discard_misaligned; 253 - unsigned char no_cluster; 253 + unsigned char cluster; 254 254 signed char discard_zeroes_data; 255 255 }; 256 256 ··· 380 380 #endif 381 381 }; 382 382 383 - #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 384 383 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 385 384 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 386 385 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ ··· 402 403 #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 403 404 404 405 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 405 - (1 << QUEUE_FLAG_CLUSTER) | \ 406 406 (1 << QUEUE_FLAG_STACKABLE) | \ 407 407 (1 << QUEUE_FLAG_SAME_COMP) | \ 408 408 (1 << QUEUE_FLAG_ADD_RANDOM)) ··· 507 509 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 508 510 509 511 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 512 + 513 + static inline unsigned int blk_queue_cluster(struct request_queue *q) 514 + { 515 + return q->limits.cluster; 516 + } 510 517 511 518 /* 512 519 * We regard a request as sync, if either a read or a sync write ··· 808 805 extern void blk_cleanup_queue(struct request_queue *); 809 806 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 810 807 extern void blk_queue_bounce_limit(struct request_queue *, u64); 808 + extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 811 809 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 812 810 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 813 811 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+2
include/linux/bootmem.h
··· 105 105 106 106 #define alloc_bootmem(x) \ 107 107 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 108 + #define alloc_bootmem_align(x, align) \ 109 + __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS)) 108 110 #define alloc_bootmem_nopanic(x) \ 109 111 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 110 112 #define alloc_bootmem_pages(x) \
+4 -2
include/linux/ceph/libceph.h
··· 227 227 extern void ceph_release_page_vector(struct page **pages, int num_pages); 228 228 229 229 extern struct page **ceph_get_direct_page_vector(const char __user *data, 230 - int num_pages); 231 - extern void ceph_put_page_vector(struct page **pages, int num_pages); 230 + int num_pages, 231 + bool write_page); 232 + extern void ceph_put_page_vector(struct page **pages, int num_pages, 233 + bool dirty); 232 234 extern void ceph_release_page_vector(struct page **pages, int num_pages); 233 235 extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); 234 236 extern int ceph_copy_user_to_page_vector(struct page **pages,
+19 -1
include/linux/cnt32_to_63.h
··· 61 61 * 62 62 * 2) this code must not be preempted for a duration longer than the 63 63 * 32-bit counter half period minus the longest period between two 64 - * calls to this code. 64 + * calls to this code; 65 65 * 66 66 * Those requirements ensure proper update to the state bit in memory. 67 67 * This is usually not a problem in practice, but if it is then a kernel 68 68 * timer should be scheduled to manage for this code to be executed often 69 69 * enough. 70 + * 71 + * And finally: 72 + * 73 + * 3) the cnt_lo argument must be seen as a globally incrementing value, 74 + * meaning that it should be a direct reference to the counter data which 75 + * can be evaluated according to a specific ordering within the macro, 76 + * and not the result of a previous evaluation stored in a variable. 77 + * 78 + * For example, this is wrong: 79 + * 80 + * u32 partial = get_hw_count(); 81 + * u64 full = cnt32_to_63(partial); 82 + * return full; 83 + * 84 + * This is fine: 85 + * 86 + * u64 full = cnt32_to_63(get_hw_count()); 87 + * return full; 70 88 * 71 89 * Note that the top bit (bit 63) in the returned value should be considered 72 90 * as garbage. It is not cleared here because callers are likely to use a
+7 -3
include/linux/fanotify.h
··· 83 83 FAN_ALL_PERM_EVENTS |\ 84 84 FAN_Q_OVERFLOW) 85 85 86 - #define FANOTIFY_METADATA_VERSION 2 86 + #define FANOTIFY_METADATA_VERSION 3 87 87 88 88 struct fanotify_event_metadata { 89 89 __u32 event_len; 90 - __u32 vers; 90 + __u8 vers; 91 + __u8 reserved; 92 + __u16 metadata_len; 91 93 __aligned_u64 mask; 92 94 __s32 fd; 93 95 __s32 pid; ··· 98 96 struct fanotify_response { 99 97 __s32 fd; 100 98 __u32 response; 101 - } __attribute__ ((packed)); 99 + }; 102 100 103 101 /* Legit userspace responses to a _PERM event */ 104 102 #define FAN_ALLOW 0x01 105 103 #define FAN_DENY 0x02 104 + /* No fd set in event */ 105 + #define FAN_NOFD -1 106 106 107 107 /* Helper functions to deal with fanotify_event_metadata buffers */ 108 108 #define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
-3
include/linux/fsnotify.h
··· 235 235 if (S_ISDIR(inode->i_mode)) 236 236 mask |= FS_ISDIR; 237 237 238 - /* FMODE_NONOTIFY must never be set from user */ 239 - file->f_mode &= ~FMODE_NONOTIFY; 240 - 241 238 fsnotify_parent(path, NULL, mask); 242 239 fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); 243 240 }
+1 -1
include/linux/fsnotify_backend.h
··· 166 166 struct mutex access_mutex; 167 167 struct list_head access_list; 168 168 wait_queue_head_t access_waitq; 169 - bool bypass_perm; /* protected by access_mutex */ 169 + atomic_t bypass_perm; 170 170 #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ 171 171 int f_flags; 172 172 unsigned int max_marks;
+4 -2
include/linux/input.h
··· 104 104 #define EVIOCGREP _IOR('E', 0x03, unsigned int[2]) /* get repeat settings */ 105 105 #define EVIOCSREP _IOW('E', 0x03, unsigned int[2]) /* set repeat settings */ 106 106 107 - #define EVIOCGKEYCODE _IOR('E', 0x04, struct input_keymap_entry) /* get keycode */ 108 - #define EVIOCSKEYCODE _IOW('E', 0x04, struct input_keymap_entry) /* set keycode */ 107 + #define EVIOCGKEYCODE _IOR('E', 0x04, unsigned int[2]) /* get keycode */ 108 + #define EVIOCGKEYCODE_V2 _IOR('E', 0x04, struct input_keymap_entry) 109 + #define EVIOCSKEYCODE _IOW('E', 0x04, unsigned int[2]) /* set keycode */ 110 + #define EVIOCSKEYCODE_V2 _IOW('E', 0x04, struct input_keymap_entry) 109 111 110 112 #define EVIOCGNAME(len) _IOC(_IOC_READ, 'E', 0x06, len) /* get device name */ 111 113 #define EVIOCGPHYS(len) _IOC(_IOC_READ, 'E', 0x07, len) /* get physical location */
+1 -1
include/linux/ioport.h
··· 112 112 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ 113 113 extern struct resource ioport_resource; 114 114 extern struct resource iomem_resource; 115 - extern int resource_alloc_from_bottom; 116 115 117 116 extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); 118 117 extern int request_resource(struct resource *root, struct resource *new); ··· 123 124 extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); 124 125 extern int insert_resource(struct resource *parent, struct resource *new); 125 126 extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); 127 + extern void arch_remove_reservations(struct resource *avail); 126 128 extern int allocate_resource(struct resource *root, struct resource *new, 127 129 resource_size_t size, resource_size_t min, 128 130 resource_size_t max, resource_size_t align,
+1
include/linux/perf_event.h
··· 887 887 int exclusive; 888 888 struct list_head rotation_list; 889 889 int jiffies_interval; 890 + struct pmu *active_pmu; 890 891 }; 891 892 892 893 struct perf_output_handle {
+2 -1
include/linux/pm_runtime.h
··· 77 77 78 78 static inline bool pm_runtime_suspended(struct device *dev) 79 79 { 80 - return dev->power.runtime_status == RPM_SUSPENDED; 80 + return dev->power.runtime_status == RPM_SUSPENDED 81 + && !dev->power.disable_depth; 81 82 } 82 83 83 84 static inline void pm_runtime_mark_last_busy(struct device *dev)
+1 -1
include/linux/sched.h
··· 143 143 extern unsigned long this_cpu_load(void); 144 144 145 145 146 - extern void calc_global_load(void); 146 + extern void calc_global_load(unsigned long ticks); 147 147 148 148 extern unsigned long get_parent_ip(unsigned long addr); 149 149
+11 -6
include/linux/ssb/ssb_driver_gige.h
··· 96 96 return 0; 97 97 } 98 98 99 - extern char * nvram_get(const char *name); 99 + #ifdef CONFIG_BCM47XX 100 + #include <asm/mach-bcm47xx/nvram.h> 100 101 /* Get the device MAC address */ 101 102 static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) 102 103 { 103 - #ifdef CONFIG_BCM47XX 104 - char *res = nvram_get("et0macaddr"); 105 - if (res) 106 - memcpy(macaddr, res, 6); 107 - #endif 104 + char buf[20]; 105 + if (nvram_getenv("et0macaddr", buf, sizeof(buf)) < 0) 106 + return; 107 + nvram_parse_macaddr(buf, macaddr); 108 108 } 109 + #else 110 + static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) 111 + { 112 + } 113 + #endif 109 114 110 115 extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, 111 116 struct pci_dev *pdev);
+1 -1
include/media/saa7146.h
··· 161 161 extern struct mutex saa7146_devices_lock; 162 162 int saa7146_register_extension(struct saa7146_extension*); 163 163 int saa7146_unregister_extension(struct saa7146_extension*); 164 - struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc); 164 + struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc); 165 165 int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt); 166 166 void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt); 167 167 int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
+2
include/media/v4l2-device.h
··· 51 51 unsigned int notification, void *arg); 52 52 /* The control handler. May be NULL. */ 53 53 struct v4l2_ctrl_handler *ctrl_handler; 54 + /* BKL replacement mutex. Temporary solution only. */ 55 + struct mutex ioctl_lock; 54 56 }; 55 57 56 58 /* Initialize v4l2_dev and make dev->driver_data point to v4l2_dev.
+1
kernel/fork.c
··· 273 273 274 274 setup_thread_stack(tsk, orig); 275 275 clear_user_return_notifier(tsk); 276 + clear_tsk_need_resched(tsk); 276 277 stackend = end_of_stack(tsk); 277 278 *stackend = STACK_END_MAGIC; /* for overflow detection */ 278 279
+30 -7
kernel/perf_event.c
··· 3824 3824 rcu_read_lock(); 3825 3825 list_for_each_entry_rcu(pmu, &pmus, entry) { 3826 3826 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 3827 + if (cpuctx->active_pmu != pmu) 3828 + goto next; 3827 3829 perf_event_task_ctx(&cpuctx->ctx, task_event); 3828 3830 3829 3831 ctx = task_event->task_ctx; ··· 3961 3959 rcu_read_lock(); 3962 3960 list_for_each_entry_rcu(pmu, &pmus, entry) { 3963 3961 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 3962 + if (cpuctx->active_pmu != pmu) 3963 + goto next; 3964 3964 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 3965 3965 3966 3966 ctxn = pmu->task_ctx_nr; ··· 4148 4144 rcu_read_lock(); 4149 4145 list_for_each_entry_rcu(pmu, &pmus, entry) { 4150 4146 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4147 + if (cpuctx->active_pmu != pmu) 4148 + goto next; 4151 4149 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, 4152 4150 vma->vm_flags & VM_EXEC); 4153 4151 ··· 4719 4713 break; 4720 4714 } 4721 4715 4722 - if (event_id > PERF_COUNT_SW_MAX) 4716 + if (event_id >= PERF_COUNT_SW_MAX) 4723 4717 return -ENOENT; 4724 4718 4725 4719 if (!event->parent) { ··· 5151 5145 return NULL; 5152 5146 } 5153 5147 5154 - static void free_pmu_context(void * __percpu cpu_context) 5148 + static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 5155 5149 { 5156 - struct pmu *pmu; 5150 + int cpu; 5151 + 5152 + for_each_possible_cpu(cpu) { 5153 + struct perf_cpu_context *cpuctx; 5154 + 5155 + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 5156 + 5157 + if (cpuctx->active_pmu == old_pmu) 5158 + cpuctx->active_pmu = pmu; 5159 + } 5160 + } 5161 + 5162 + static void free_pmu_context(struct pmu *pmu) 5163 + { 5164 + struct pmu *i; 5157 5165 5158 5166 mutex_lock(&pmus_lock); 5159 5167 /* 5160 5168 * Like a real lame refcount. 5161 5169 */ 5162 - list_for_each_entry(pmu, &pmus, entry) { 5163 - if (pmu->pmu_cpu_context == cpu_context) 5170 + list_for_each_entry(i, &pmus, entry) { 5171 + if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 5172 + update_pmu_context(i, pmu); 5164 5173 goto out; 5174 + } 5165 5175 } 5166 5176 5167 - free_percpu(cpu_context); 5177 + free_percpu(pmu->pmu_cpu_context); 5168 5178 out: 5169 5179 mutex_unlock(&pmus_lock); 5170 5180 } ··· 5212 5190 cpuctx->ctx.pmu = pmu; 5213 5191 cpuctx->jiffies_interval = 1; 5214 5192 INIT_LIST_HEAD(&cpuctx->rotation_list); 5193 + cpuctx->active_pmu = pmu; 5215 5194 } 5216 5195 5217 5196 got_cpu_context: ··· 5264 5241 synchronize_rcu(); 5265 5242 5266 5243 free_percpu(pmu->pmu_disable_count); 5267 - free_pmu_context(pmu->pmu_cpu_context); 5244 + free_pmu_context(pmu); 5268 5245 } 5269 5246 5270 5247 struct pmu *perf_init_event(struct perf_event *event)
+1 -1
kernel/power/swap.c
··· 30 30 31 31 #include "power.h" 32 32 33 - #define HIBERNATE_SIG "LINHIB0001" 33 + #define HIBERNATE_SIG "S1SUSPEND" 34 34 35 35 /* 36 36 * The swap map is a data structure used for keeping track of each page
+1 -1
kernel/power/user.c
··· 137 137 free_all_swap_pages(data->swap); 138 138 if (data->frozen) 139 139 thaw_processes(); 140 - pm_notifier_call_chain(data->mode == O_WRONLY ? 140 + pm_notifier_call_chain(data->mode == O_RDONLY ? 141 141 PM_POST_HIBERNATION : PM_POST_RESTORE); 142 142 atomic_inc(&snapshot_device_available); 143 143
+10 -94
kernel/resource.c
··· 40 40 41 41 static DEFINE_RWLOCK(resource_lock); 42 42 43 - /* 44 - * By default, we allocate free space bottom-up. The architecture can request 45 - * top-down by clearing this flag. The user can override the architecture's 46 - * choice with the "resource_alloc_from_bottom" kernel boot option, but that 47 - * should only be a debugging tool. 48 - */ 49 - int resource_alloc_from_bottom = 1; 50 - 51 - static __init int setup_alloc_from_bottom(char *s) 52 - { 53 - printk(KERN_INFO 54 - "resource: allocating from bottom-up; please report a bug\n"); 55 - resource_alloc_from_bottom = 1; 56 - return 0; 57 - } 58 - early_param("resource_alloc_from_bottom", setup_alloc_from_bottom); 59 - 60 43 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 61 44 { 62 45 struct resource *p = v; ··· 357 374 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 358 375 } 359 376 377 + void __weak arch_remove_reservations(struct resource *avail) 378 + { 379 + } 380 + 360 381 static resource_size_t simple_align_resource(void *data, 361 382 const struct resource *avail, 362 383 resource_size_t size, ··· 384 397 } 385 398 386 399 /* 387 - * Find the resource before "child" in the sibling list of "root" children. 388 - */ 389 - static struct resource *find_sibling_prev(struct resource *root, struct resource *child) 390 - { 391 - struct resource *this; 392 - 393 - for (this = root->child; this; this = this->sibling) 394 - if (this->sibling == child) 395 - return this; 396 - 397 - return NULL; 398 - } 399 - 400 - /* 401 400 * Find empty slot in the resource tree given range and alignment. 402 - * This version allocates from the end of the root resource first. 403 - */ 404 - static int find_resource_from_top(struct resource *root, struct resource *new, 405 - resource_size_t size, resource_size_t min, 406 - resource_size_t max, resource_size_t align, 407 - resource_size_t (*alignf)(void *, 408 - const struct resource *, 409 - resource_size_t, 410 - resource_size_t), 411 - void *alignf_data) 412 - { 413 - struct resource *this; 414 - struct resource tmp, avail, alloc; 415 - 416 - tmp.start = root->end; 417 - tmp.end = root->end; 418 - 419 - this = find_sibling_prev(root, NULL); 420 - for (;;) { 421 - if (this) { 422 - if (this->end < root->end) 423 - tmp.start = this->end + 1; 424 - } else 425 - tmp.start = root->start; 426 - 427 - resource_clip(&tmp, min, max); 428 - 429 - /* Check for overflow after ALIGN() */ 430 - avail = *new; 431 - avail.start = ALIGN(tmp.start, align); 432 - avail.end = tmp.end; 433 - if (avail.start >= tmp.start) { 434 - alloc.start = alignf(alignf_data, &avail, size, align); 435 - alloc.end = alloc.start + size - 1; 436 - if (resource_contains(&avail, &alloc)) { 437 - new->start = alloc.start; 438 - new->end = alloc.end; 439 - return 0; 440 - } 441 - } 442 - 443 - if (!this || this->start == root->start) 444 - break; 445 - 446 - tmp.end = this->start - 1; 447 - this = find_sibling_prev(root, this); 448 - } 449 - return -EBUSY; 450 - } 451 - 452 - /* 453 - * Find empty slot in the resource tree given range and alignment. 454 - * This version allocates from the beginning of the root resource first. 455 401 */ 456 402 static int find_resource(struct resource *root, struct resource *new, 457 403 resource_size_t size, resource_size_t min, ··· 398 478 struct resource *this = root->child; 399 479 struct resource tmp = *new, avail, alloc; 400 480 481 + tmp.flags = new->flags; 401 482 tmp.start = root->start; 402 483 /* 403 - * Skip past an allocated resource that starts at 0, since the 404 - * assignment of this->start - 1 to tmp->end below would cause an 405 - * underflow. 484 + * Skip past an allocated resource that starts at 0, since the assignment 485 + * of this->start - 1 to tmp->end below would cause an underflow. 406 486 */ 407 487 if (this && this->start == 0) { 408 488 tmp.start = this->end + 1; 409 489 this = this->sibling; 410 490 } 411 - for (;;) { 491 + for(;;) { 412 492 if (this) 413 493 tmp.end = this->start - 1; 414 494 else 415 495 tmp.end = root->end; 416 496 417 497 resource_clip(&tmp, min, max); 498 + arch_remove_reservations(&tmp); 418 499 419 500 /* Check for overflow after ALIGN() */ 420 501 avail = *new; ··· 430 509 return 0; 431 510 } 432 511 } 433 - 434 512 if (!this) 435 513 break; 436 - 437 514 tmp.start = this->end + 1; 438 515 this = this->sibling; 439 516 } ··· 464 545 alignf = simple_align_resource; 465 546 466 547 write_lock(&resource_lock); 467 - if (resource_alloc_from_bottom) 468 - err = find_resource(root, new, size, min, max, align, alignf, alignf_data); 469 - else 470 - err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data); 548 + err = find_resource(root, new, size, min, max, align, alignf, alignf_data); 471 549 if (err >= 0 && __request_resource(root, new)) 472 550 err = -EBUSY; 473 551 write_unlock(&resource_lock);
+238 -53
kernel/sched.c
··· 636 636 637 637 #endif /* CONFIG_CGROUP_SCHED */ 638 638 639 - static u64 irq_time_cpu(int cpu); 640 - static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); 639 + static void update_rq_clock_task(struct rq *rq, s64 delta); 641 640 642 - inline void update_rq_clock(struct rq *rq) 641 + static void update_rq_clock(struct rq *rq) 643 642 { 644 - if (!rq->skip_clock_update) { 645 - int cpu = cpu_of(rq); 646 - u64 irq_time; 643 + s64 delta; 647 644 648 - rq->clock = sched_clock_cpu(cpu); 649 - irq_time = irq_time_cpu(cpu); 650 - if (rq->clock - irq_time > rq->clock_task) 651 - rq->clock_task = rq->clock - irq_time; 645 + if (rq->skip_clock_update) 646 + return; 652 647 653 - sched_irq_time_avg_update(rq, irq_time); 654 - } 648 + delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 649 + rq->clock += delta; 650 + update_rq_clock_task(rq, delta); 655 651 } 656 652 657 653 /* ··· 1920 1924 * They are read and saved off onto struct rq in update_rq_clock(). 1921 1925 * This may result in other CPU reading this CPU's irq time and can 1922 1926 * race with irq/account_system_vtime on this CPU. We would either get old 1923 - * or new value (or semi updated value on 32 bit) with a side effect of 1924 - * accounting a slice of irq time to wrong task when irq is in progress 1925 - * while we read rq->clock. That is a worthy compromise in place of having 1926 - * locks on each irq in account_system_time. 1927 + * or new value with a side effect of accounting a slice of irq time to wrong 1928 + * task when irq is in progress while we read rq->clock. That is a worthy 1929 + * compromise in place of having locks on each irq in account_system_time. 1927 1930 */ 1928 1931 static DEFINE_PER_CPU(u64, cpu_hardirq_time); 1929 1932 static DEFINE_PER_CPU(u64, cpu_softirq_time); ··· 1940 1945 sched_clock_irqtime = 0; 1941 1946 } 1942 1947 1943 - static u64 irq_time_cpu(int cpu) 1944 - { 1945 - if (!sched_clock_irqtime) 1946 - return 0; 1948 + #ifndef CONFIG_64BIT 1949 + static DEFINE_PER_CPU(seqcount_t, irq_time_seq); 1947 1950 1948 - return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); 1951 + static inline void irq_time_write_begin(void) 1952 + { 1953 + __this_cpu_inc(irq_time_seq.sequence); 1954 + smp_wmb(); 1949 1955 } 1950 1956 1957 + static inline void irq_time_write_end(void) 1958 + { 1959 + smp_wmb(); 1960 + __this_cpu_inc(irq_time_seq.sequence); 1961 + } 1962 + 1963 + static inline u64 irq_time_read(int cpu) 1964 + { 1965 + u64 irq_time; 1966 + unsigned seq; 1967 + 1968 + do { 1969 + seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); 1970 + irq_time = per_cpu(cpu_softirq_time, cpu) + 1971 + per_cpu(cpu_hardirq_time, cpu); 1972 + } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); 1973 + 1974 + return irq_time; 1975 + } 1976 + #else /* CONFIG_64BIT */ 1977 + static inline void irq_time_write_begin(void) 1978 + { 1979 + } 1980 + 1981 + static inline void irq_time_write_end(void) 1982 + { 1983 + } 1984 + 1985 + static inline u64 irq_time_read(int cpu) 1986 + { 1987 + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); 1988 + } 1989 + #endif /* CONFIG_64BIT */ 1990 + 1991 + /* 1992 + * Called before incrementing preempt_count on {soft,}irq_enter 1993 + * and before decrementing preempt_count on {soft,}irq_exit. 1994 + */ 1951 1995 void account_system_vtime(struct task_struct *curr) 1952 1996 { 1953 1997 unsigned long flags; 1998 + s64 delta; 1954 1999 int cpu; 1955 - u64 now, delta; 1956 2000 1957 2001 if (!sched_clock_irqtime) 1958 2002 return; ··· 1999 1965 local_irq_save(flags); 2000 1966 2001 1967 cpu = smp_processor_id(); 2002 - now = sched_clock_cpu(cpu); 2003 - delta = now - per_cpu(irq_start_time, cpu); 2004 - per_cpu(irq_start_time, cpu) = now; 1968 + delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); 1969 + __this_cpu_add(irq_start_time, delta); 1970 + 1971 + irq_time_write_begin(); 2005 1972 /* 2006 1973 * We do not account for softirq time from ksoftirqd here. 2007 1974 * We want to continue accounting softirq time to ksoftirqd thread ··· 2010 1975 * that do not consume any time, but still wants to run. 2011 1976 */ 2012 1977 if (hardirq_count()) 2013 - per_cpu(cpu_hardirq_time, cpu) += delta; 1978 + __this_cpu_add(cpu_hardirq_time, delta); 2014 1979 else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) 2015 - per_cpu(cpu_softirq_time, cpu) += delta; 1980 + __this_cpu_add(cpu_softirq_time, delta); 2016 1981 1982 + irq_time_write_end(); 2017 1983 local_irq_restore(flags); 2018 1984 } 2019 1985 EXPORT_SYMBOL_GPL(account_system_vtime); 2020 1986 2021 - static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) 1987 + static void update_rq_clock_task(struct rq *rq, s64 delta) 2022 1988 { 2023 - if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { 2024 - u64 delta_irq = curr_irq_time - rq->prev_irq_time; 2025 - rq->prev_irq_time = curr_irq_time; 2026 - sched_rt_avg_update(rq, delta_irq); 2027 - } 1989 + s64 irq_delta; 1990 + 1991 + irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 1992 + 1993 + /* 1994 + * Since irq_time is only updated on {soft,}irq_exit, we might run into 1995 + * this case when a previous update_rq_clock() happened inside a 1996 + * {soft,}irq region. 1997 + * 1998 + * When this happens, we stop ->clock_task and only update the 1999 + * prev_irq_time stamp to account for the part that fit, so that a next 2000 + * update will consume the rest. This ensures ->clock_task is 2001 + * monotonic. 2002 + * 2003 + * It does however cause some slight miss-attribution of {soft,}irq 2004 + * time, a more accurate solution would be to update the irq_time using 2005 + * the current rq->clock timestamp, except that would require using 2006 + * atomic ops. 2007 + */ 2008 + if (irq_delta > delta) 2009 + irq_delta = delta; 2010 + 2011 + rq->prev_irq_time += irq_delta; 2012 + delta -= irq_delta; 2013 + rq->clock_task += delta; 2014 + 2015 + if (irq_delta && sched_feat(NONIRQ_POWER)) 2016 + sched_rt_avg_update(rq, irq_delta); 2028 2017 } 2029 2018 2030 - #else 2019 + #else /* CONFIG_IRQ_TIME_ACCOUNTING */ 2031 2020 2032 - static u64 irq_time_cpu(int cpu) 2021 + static void update_rq_clock_task(struct rq *rq, s64 delta) 2033 2022 { 2034 - return 0; 2023 + rq->clock_task += delta; 2035 2024 } 2036 2025 2037 - static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } 2038 - 2039 - #endif 2026 + #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2040 2027 2041 2028 #include "sched_idletask.c" 2042 2029 #include "sched_fair.c" ··· 2186 2129 * A queue event has occurred, and we're going to schedule. In 2187 2130 * this case, we can save a useless back to back clock update. 2188 2131 */ 2189 - if (test_tsk_need_resched(rq->curr)) 2132 + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) 2190 2133 rq->skip_clock_update = 1; 2191 2134 } 2192 2135 ··· 3176 3119 return delta; 3177 3120 } 3178 3121 3122 + static unsigned long 3123 + calc_load(unsigned long load, unsigned long exp, unsigned long active) 3124 + { 3125 + load *= exp; 3126 + load += active * (FIXED_1 - exp); 3127 + load += 1UL << (FSHIFT - 1); 3128 + return load >> FSHIFT; 3129 + } 3130 + 3179 3131 #ifdef CONFIG_NO_HZ 3180 3132 /* 3181 3133 * For NO_HZ we delay the active fold to the next LOAD_FREQ update. ··· 3214 3148 3215 3149 return delta; 3216 3150 } 3151 + 3152 + /** 3153 + * fixed_power_int - compute: x^n, in O(log n) time 3154 + * 3155 + * @x: base of the power 3156 + * @frac_bits: fractional bits of @x 3157 + * @n: power to raise @x to. 3158 + * 3159 + * By exploiting the relation between the definition of the natural power 3160 + * function: x^n := x*x*...*x (x multiplied by itself for n times), and 3161 + * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, 3162 + * (where: n_i \elem {0, 1}, the binary vector representing n), 3163 + * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is 3164 + * of course trivially computable in O(log_2 n), the length of our binary 3165 + * vector. 3166 + */ 3167 + static unsigned long 3168 + fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) 3169 + { 3170 + unsigned long result = 1UL << frac_bits; 3171 + 3172 + if (n) for (;;) { 3173 + if (n & 1) { 3174 + result *= x; 3175 + result += 1UL << (frac_bits - 1); 3176 + result >>= frac_bits; 3177 + } 3178 + n >>= 1; 3179 + if (!n) 3180 + break; 3181 + x *= x; 3182 + x += 1UL << (frac_bits - 1); 3183 + x >>= frac_bits; 3184 + } 3185 + 3186 + return result; 3187 + } 3188 + 3189 + /* 3190 + * a1 = a0 * e + a * (1 - e) 3191 + * 3192 + * a2 = a1 * e + a * (1 - e) 3193 + * = (a0 * e + a * (1 - e)) * e + a * (1 - e) 3194 + * = a0 * e^2 + a * (1 - e) * (1 + e) 3195 + * 3196 + * a3 = a2 * e + a * (1 - e) 3197 + * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) 3198 + * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) 3199 + * 3200 + * ... 3201 + * 3202 + * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] 3203 + * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) 3204 + * = a0 * e^n + a * (1 - e^n) 3205 + * 3206 + * [1] application of the geometric series: 3207 + * 3208 + * n 1 - x^(n+1) 3209 + * S_n := \Sum x^i = ------------- 3210 + * i=0 1 - x 3211 + */ 3212 + static unsigned long 3213 + calc_load_n(unsigned long load, unsigned long exp, 3214 + unsigned long active, unsigned int n) 3215 + { 3216 + 3217 + return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); 3218 + } 3219 + 3220 + /* 3221 + * NO_HZ can leave us missing all per-cpu ticks calling 3222 + * calc_load_account_active(), but since an idle CPU folds its delta into 3223 + * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold 3224 + * in the pending idle delta if our idle period crossed a load cycle boundary. 3225 + * 3226 + * Once we've updated the global active value, we need to apply the exponential 3227 + * weights adjusted to the number of cycles missed. 3228 + */ 3229 + static void calc_global_nohz(unsigned long ticks) 3230 + { 3231 + long delta, active, n; 3232 + 3233 + if (time_before(jiffies, calc_load_update)) 3234 + return; 3235 + 3236 + /* 3237 + * If we crossed a calc_load_update boundary, make sure to fold 3238 + * any pending idle changes, the respective CPUs might have 3239 + * missed the tick driven calc_load_account_active() update 3240 + * due to NO_HZ. 3241 + */ 3242 + delta = calc_load_fold_idle(); 3243 + if (delta) 3244 + atomic_long_add(delta, &calc_load_tasks); 3245 + 3246 + /* 3247 + * If we were idle for multiple load cycles, apply them. 3248 + */ 3249 + if (ticks >= LOAD_FREQ) { 3250 + n = ticks / LOAD_FREQ; 3251 + 3252 + active = atomic_long_read(&calc_load_tasks); 3253 + active = active > 0 ? active * FIXED_1 : 0; 3254 + 3255 + avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); 3256 + avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); 3257 + avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); 3258 + 3259 + calc_load_update += n * LOAD_FREQ; 3260 + } 3261 + 3262 + /* 3263 + * Its possible the remainder of the above division also crosses 3264 + * a LOAD_FREQ period, the regular check in calc_global_load() 3265 + * which comes after this will take care of that. 3266 + * 3267 + * Consider us being 11 ticks before a cycle completion, and us 3268 + * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will 3269 + * age us 4 cycles, and the test in calc_global_load() will 3270 + * pick up the final one. 3271 + */ 3272 + } 3217 3273 #else 3218 3274 static void calc_load_account_idle(struct rq *this_rq) 3219 3275 { ··· 3344 3156 static inline long calc_load_fold_idle(void) 3345 3157 { 3346 3158 return 0; 3159 + } 3160 + 3161 + static void calc_global_nohz(unsigned long ticks) 3162 + { 3347 3163 } 3348 3164 #endif 3349 3165 ··· 3366 3174 loads[2] = (avenrun[2] + offset) << shift; 3367 3175 } 3368 3176 3369 - static unsigned long 3370 - calc_load(unsigned long load, unsigned long exp, unsigned long active) 3371 - { 3372 - load *= exp; 3373 - load += active * (FIXED_1 - exp); 3374 - return load >> FSHIFT; 3375 - } 3376 - 3377 3177 /* 3378 3178 * calc_load - update the avenrun load estimates 10 ticks after the 3379 3179 * CPUs have updated calc_load_tasks. 3380 3180 */ 3381 - void calc_global_load(void) 3181 + void calc_global_load(unsigned long ticks) 3382 3182 { 3383 - unsigned long upd = calc_load_update + 10; 3384 3183 long active; 3385 3184 3386 - if (time_before(jiffies, upd)) 3185 + calc_global_nohz(ticks); 3186 + 3187 + if (time_before(jiffies, calc_load_update + 10)) 3387 3188 return; 3388 3189 3389 3190 active = atomic_long_read(&calc_load_tasks); ··· 4030 3845 { 4031 3846 if (prev->se.on_rq) 4032 3847 update_rq_clock(rq); 4033 - rq->skip_clock_update = 0; 4034 3848 prev->sched_class->put_prev_task(rq, prev); 4035 3849 } 4036 3850 ··· 4087 3903 hrtick_clear(rq); 4088 3904 4089 3905 raw_spin_lock_irq(&rq->lock); 4090 - clear_tsk_need_resched(prev); 4091 3906 4092 3907 switch_count = &prev->nivcsw; 4093 3908 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { ··· 4118 3935 4119 3936 put_prev_task(rq, prev); 4120 3937 next = pick_next_task(rq); 3938 + clear_tsk_need_resched(prev); 3939 + rq->skip_clock_update = 0; 4121 3940 4122 3941 if (likely(prev != next)) { 4123 3942 sched_info_switch(prev, next);
+7 -1
kernel/timer.c
··· 1252 1252 struct tvec_base *base = __get_cpu_var(tvec_bases); 1253 1253 unsigned long expires; 1254 1254 1255 + /* 1256 + * Pretend that there is no timer pending if the cpu is offline. 1257 + * Possible pending timers will be migrated later to an active cpu. 1258 + */ 1259 + if (cpu_is_offline(smp_processor_id())) 1260 + return now + NEXT_TIMER_MAX_DELTA; 1255 1261 spin_lock(&base->lock); 1256 1262 if (time_before_eq(base->next_timer, base->timer_jiffies)) 1257 1263 base->next_timer = __next_timer_interrupt(base); ··· 1325 1319 { 1326 1320 jiffies_64 += ticks; 1327 1321 update_wall_time(); 1328 - calc_global_load(); 1322 + calc_global_load(ticks); 1329 1323 } 1330 1324 1331 1325 #ifdef __ARCH_WANT_SYS_ALARM
+9 -1
kernel/trace/trace.c
··· 2338 2338 return count; 2339 2339 } 2340 2340 2341 + static loff_t tracing_seek(struct file *file, loff_t offset, int origin) 2342 + { 2343 + if (file->f_mode & FMODE_READ) 2344 + return seq_lseek(file, offset, origin); 2345 + else 2346 + return 0; 2347 + } 2348 + 2341 2349 static const struct file_operations tracing_fops = { 2342 2350 .open = tracing_open, 2343 2351 .read = seq_read, 2344 2352 .write = tracing_write_stub, 2345 - .llseek = seq_lseek, 2353 + .llseek = tracing_seek, 2346 2354 .release = tracing_release, 2347 2355 }; 2348 2356
+3 -5
net/ceph/messenger.c
··· 97 97 int ceph_msgr_init(void) 98 98 { 99 99 ceph_msgr_wq = create_workqueue("ceph-msgr"); 100 - if (IS_ERR(ceph_msgr_wq)) { 101 - int ret = PTR_ERR(ceph_msgr_wq); 102 - pr_err("msgr_init failed to create workqueue: %d\n", ret); 103 - ceph_msgr_wq = NULL; 104 - return ret; 100 + if (!ceph_msgr_wq) { 101 + pr_err("msgr_init failed to create workqueue\n"); 102 + return -ENOMEM; 105 103 } 106 104 return 0; 107 105 }
+9 -6
net/ceph/pagevec.c
··· 13 13 * build a vector of user pages 14 14 */ 15 15 struct page **ceph_get_direct_page_vector(const char __user *data, 16 - int num_pages) 16 + int num_pages, bool write_page) 17 17 { 18 18 struct page **pages; 19 19 int rc; ··· 24 24 25 25 down_read(&current->mm->mmap_sem); 26 26 rc = get_user_pages(current, current->mm, (unsigned long)data, 27 - num_pages, 0, 0, pages, NULL); 27 + num_pages, write_page, 0, pages, NULL); 28 28 up_read(&current->mm->mmap_sem); 29 - if (rc < 0) 29 + if (rc < num_pages) 30 30 goto fail; 31 31 return pages; 32 32 33 33 fail: 34 - kfree(pages); 34 + ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); 35 35 return ERR_PTR(rc); 36 36 } 37 37 EXPORT_SYMBOL(ceph_get_direct_page_vector); 38 38 39 - void ceph_put_page_vector(struct page **pages, int num_pages) 39 + void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) 40 40 { 41 41 int i; 42 42 43 - for (i = 0; i < num_pages; i++) 43 + for (i = 0; i < num_pages; i++) { 44 + if (dirty) 45 + set_page_dirty_lock(pages[i]); 44 46 put_page(pages[i]); 47 + } 45 48 kfree(pages); 46 49 } 47 50 EXPORT_SYMBOL(ceph_put_page_vector);
+1 -1
scripts/recordmcount.h
··· 119 119 120 120 static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type) 121 121 { 122 - rp->r_info = ELF_R_INFO(sym, type); 122 + rp->r_info = _w(ELF_R_INFO(sym, type)); 123 123 } 124 124 static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO; 125 125
+3 -1
scripts/tags.sh
··· 125 125 -I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \ 126 126 --extra=+f --c-kinds=-px \ 127 127 --regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \ 128 - --regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' 128 + --regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \ 129 + --regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \ 130 + --regex-c++='/^DEFINE_EVENT\(([^,)]*).*/trace_\1/' 129 131 130 132 all_kconfigs | xargs $1 -a \ 131 133 --langdef=kconfig --language-force=kconfig \
+22 -2
sound/pci/hda/patch_realtek.c
··· 10830 10830 { 10831 10831 struct alc_spec *spec = codec->spec; 10832 10832 struct auto_pin_cfg *cfg = &spec->autocfg; 10833 - int i, err; 10833 + int i, err, type; 10834 + int type_idx = 0; 10834 10835 hda_nid_t nid; 10835 10836 10836 10837 for (i = 0; i < cfg->num_inputs; i++) { ··· 10840 10839 nid = cfg->inputs[i].pin; 10841 10840 if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) { 10842 10841 char label[32]; 10842 + type = cfg->inputs[i].type; 10843 + if (i > 0 && type == cfg->inputs[i - 1].type) 10844 + type_idx++; 10845 + else 10846 + type_idx = 0; 10843 10847 snprintf(label, sizeof(label), "%s Boost", 10844 10848 hda_get_autocfg_input_label(codec, cfg, i)); 10845 - err = add_control(spec, ALC_CTL_WIDGET_VOL, label, 0, 10849 + err = add_control(spec, ALC_CTL_WIDGET_VOL, label, 10850 + type_idx, 10846 10851 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT)); 10847 10852 if (err < 0) 10848 10853 return err; ··· 14807 14800 enum { 14808 14801 ALC269_FIXUP_SONY_VAIO, 14809 14802 ALC269_FIXUP_DELL_M101Z, 14803 + ALC269_FIXUP_LENOVO_EDGE14, 14804 + ALC269_FIXUP_ASUS_G73JW, 14810 14805 }; 14811 14806 14812 14807 static const struct alc_fixup alc269_fixups[] = { ··· 14826 14817 {} 14827 14818 } 14828 14819 }, 14820 + [ALC269_FIXUP_LENOVO_EDGE14] = { 14821 + .sku = ALC_FIXUP_SKU_IGNORE, 14822 + }, 14823 + [ALC269_FIXUP_ASUS_G73JW] = { 14824 + .pins = (const struct alc_pincfg[]) { 14825 + { 0x17, 0x99130111 }, /* subwoofer */ 14826 + { } 14827 + } 14828 + }, 14829 14829 }; 14830 14830 14831 14831 static struct snd_pci_quirk alc269_fixup_tbl[] = { 14832 14832 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14833 14833 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 14834 + SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_LENOVO_EDGE14), 14835 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 14834 14836 {} 14835 14837 }; 14836 14838
+1 -1
sound/soc/codecs/wm8580.c
··· 161 161 static const u16 wm8580_reg[] = { 162 162 0x0121, 0x017e, 0x007d, 0x0014, /*R3*/ 163 163 0x0121, 0x017e, 0x007d, 0x0194, /*R7*/ 164 - 0x001c, 0x0002, 0x0002, 0x00c2, /*R11*/ 164 + 0x0010, 0x0002, 0x0002, 0x00c2, /*R11*/ 165 165 0x0182, 0x0082, 0x000a, 0x0024, /*R15*/ 166 166 0x0009, 0x0000, 0x00ff, 0x0000, /*R19*/ 167 167 0x00ff, 0x00ff, 0x00ff, 0x00ff, /*R23*/
+2 -1
sound/soc/codecs/wm8904.c
··· 818 818 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 819 819 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 820 820 821 - return wm8904->deemph; 821 + ucontrol->value.enumerated.item[0] = wm8904->deemph; 822 + return 0; 822 823 } 823 824 824 825 static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
+2 -1
sound/soc/codecs/wm8955.c
··· 380 380 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 381 381 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); 382 382 383 - return wm8955->deemph; 383 + ucontrol->value.enumerated.item[0] = wm8955->deemph; 384 + return 0; 384 385 } 385 386 386 387 static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
+2 -1
sound/soc/codecs/wm8960.c
··· 138 138 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 139 139 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); 140 140 141 - return wm8960->deemph; 141 + ucontrol->value.enumerated.item[0] = wm8960->deemph; 142 + return 0; 142 143 } 143 144 144 145 static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
+3
sound/soc/soc-dapm.c
··· 944 944 case SND_SOC_DAPM_STREAM_RESUME: 945 945 sys_power = 1; 946 946 break; 947 + case SND_SOC_DAPM_STREAM_STOP: 948 + sys_power = !!codec->active; 949 + break; 947 950 case SND_SOC_DAPM_STREAM_SUSPEND: 948 951 sys_power = 0; 949 952 break;