Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit 'v2.6.37-rc7' into perf/core

Merge reason: Pick up the latest -rc.

Signed-off-by: Ingo Molnar <mingo@elte.hu>

+4236 -3160
+6 -1
Documentation/filesystems/Locking
··· 173 173 sector_t (*bmap)(struct address_space *, sector_t); 174 174 int (*invalidatepage) (struct page *, unsigned long); 175 175 int (*releasepage) (struct page *, int); 176 + void (*freepage)(struct page *); 176 177 int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 177 178 loff_t offset, unsigned long nr_segs); 178 179 int (*launder_page) (struct page *); 179 180 180 181 locking rules: 181 - All except set_page_dirty may block 182 + All except set_page_dirty and freepage may block 182 183 183 184 BKL PageLocked(page) i_mutex 184 185 writepage: no yes, unlocks (see below) ··· 194 193 bmap: no 195 194 invalidatepage: no yes 196 195 releasepage: no yes 196 + freepage: no yes 197 197 direct_IO: no 198 198 launder_page: no yes 199 199 ··· 289 287 buffers from the page in preparation for freeing it. It returns zero to 290 288 indicate that the buffers are (or may be) freeable. If ->releasepage is zero, 291 289 the kernel assumes that the fs has no private interest in the buffers. 290 + 291 + ->freepage() is called when the kernel is done dropping the page 292 + from the page cache. 292 293 293 294 ->launder_page() may be called prior to releasing a page if 294 295 it is still found to be dirty. It returns zero if the page was successfully
+7
Documentation/filesystems/vfs.txt
··· 534 534 sector_t (*bmap)(struct address_space *, sector_t); 535 535 int (*invalidatepage) (struct page *, unsigned long); 536 536 int (*releasepage) (struct page *, int); 537 + void (*freepage)(struct page *); 537 538 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 538 539 loff_t offset, unsigned long nr_segs); 539 540 struct page* (*get_xip_page)(struct address_space *, sector_t, ··· 678 677 that all pages are invalidated, then its releasepage will 679 678 need to ensure this. Possibly it can clear the PageUptodate 680 679 bit if it cannot free private data yet. 680 + 681 + freepage: freepage is called once the page is no longer visible in 682 + the page cache in order to allow the cleanup of any private 683 + data. Since it may be called by the memory reclaimer, it 684 + should not assume that the original address_space mapping still 685 + exists, and it should not block. 681 686 682 687 direct_IO: called by the generic read/write routines to perform 683 688 direct_IO - that is IO requests which bypass the page cache
-5
Documentation/kernel-parameters.txt
··· 2167 2167 reset_devices [KNL] Force drivers to reset the underlying device 2168 2168 during initialization. 2169 2169 2170 - resource_alloc_from_bottom 2171 - Allocate new resources from the beginning of available 2172 - space, not the end. If you need to use this, please 2173 - report a bug. 2174 - 2175 2170 resume= [SWSUSP] 2176 2171 Specify the partition device for software suspend 2177 2172
+2 -2
Documentation/power/runtime_pm.txt
··· 379 379 zero) 380 380 381 381 bool pm_runtime_suspended(struct device *dev); 382 - - return true if the device's runtime PM status is 'suspended', or false 383 - otherwise 382 + - return true if the device's runtime PM status is 'suspended' and its 383 + 'power.disable_depth' field is equal to zero, or false otherwise 384 384 385 385 void pm_runtime_allow(struct device *dev); 386 386 - set the power.runtime_auto flag for the device and decrease its usage
+12 -4
MAINTAINERS
··· 559 559 S: Maintained 560 560 561 561 ARM/BCMRING ARM ARCHITECTURE 562 - M: Leo Chen <leochen@broadcom.com> 562 + M: Jiandong Zheng <jdzheng@broadcom.com> 563 563 M: Scott Branden <sbranden@broadcom.com> 564 564 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 565 565 S: Maintained 566 566 F: arch/arm/mach-bcmring 567 567 568 568 ARM/BCMRING MTD NAND DRIVER 569 - M: Leo Chen <leochen@broadcom.com> 569 + M: Jiandong Zheng <jdzheng@broadcom.com> 570 570 M: Scott Branden <sbranden@broadcom.com> 571 571 L: linux-mtd@lists.infradead.org 572 572 S: Maintained ··· 815 815 F: drivers/mmc/host/msm_sdcc.h 816 816 F: drivers/serial/msm_serial.h 817 817 F: drivers/serial/msm_serial.c 818 - T: git git://codeaurora.org/quic/kernel/dwalker/linux-msm.git 818 + T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git 819 819 S: Maintained 820 820 821 821 ARM/TOSA MACHINE SUPPORT ··· 5932 5932 5933 5933 TULIP NETWORK DRIVERS 5934 5934 M: Grant Grundler <grundler@parisc-linux.org> 5935 - M: Kyle McMartin <kyle@mcmartin.ca> 5936 5935 L: netdev@vger.kernel.org 5937 5936 S: Maintained 5938 5937 F: drivers/net/tulip/ ··· 6582 6583 F: include/linux/mfd/wm8400* 6583 6584 F: include/sound/wm????.h 6584 6585 F: sound/soc/codecs/wm* 6586 + 6587 + WORKQUEUE 6588 + M: Tejun Heo <tj@kernel.org> 6589 + L: linux-kernel@vger.kernel.org 6590 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git 6591 + S: Maintained 6592 + F: include/linux/workqueue.h 6593 + F: kernel/workqueue.c 6594 + F: Documentation/workqueue.txt 6585 6595 6586 6596 X.25 NETWORK LAYER 6587 6597 M: Andrew Hendry <andrew.hendry@gmail.com>
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 37 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc7 5 5 NAME = Flesh-Eating Bats with Fangs 6 6 7 7 # *DOCUMENTATION*
+2 -2
arch/arm/Kconfig
··· 1311 1311 1312 1312 config THUMB2_KERNEL 1313 1313 bool "Compile the kernel in Thumb-2 mode" 1314 - depends on CPU_V7 && EXPERIMENTAL 1314 + depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL 1315 1315 select AEABI 1316 1316 select ARM_ASM_UNIFIED 1317 1317 help ··· 1759 1759 1760 1760 config FPE_NWFPE 1761 1761 bool "NWFPE math emulation" 1762 - depends on !AEABI || OABI_COMPAT 1762 + depends on (!AEABI || OABI_COMPAT) && !THUMB2_KERNEL 1763 1763 ---help--- 1764 1764 Say Y to include the NWFPE floating point emulator in the kernel. 1765 1765 This is necessary to run most binaries. Linux does not currently
+1 -1
arch/arm/mach-at91/Makefile
··· 65 65 obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o 66 66 obj-$(CONFIG_MACH_STAMP9G20) += board-stamp9g20.o 67 67 obj-$(CONFIG_MACH_PORTUXG20) += board-stamp9g20.o 68 - obj-$(CONFIG_MACH_PCONTROL_G20) += board-pcontrol-g20.o 68 + obj-$(CONFIG_MACH_PCONTROL_G20) += board-pcontrol-g20.o board-stamp9g20.o 69 69 70 70 # AT91SAM9260/AT91SAM9G20 board-specific support 71 71 obj-$(CONFIG_MACH_SNAPPER_9260) += board-snapper9260.o
+3 -95
arch/arm/mach-at91/board-pcontrol-g20.c
··· 31 31 32 32 #include <mach/board.h> 33 33 #include <mach/at91sam9_smc.h> 34 + #include <mach/stamp9g20.h> 34 35 35 36 #include "sam9_smc.h" 36 37 #include "generic.h" ··· 39 38 40 39 static void __init pcontrol_g20_map_io(void) 41 40 { 42 - /* Initialize processor: 18.432 MHz crystal */ 43 - at91sam9260_initialize(18432000); 44 - 45 - /* DGBU on ttyS0. (Rx, Tx) only TTL -> JTAG connector X7 17,19 ) */ 46 - at91_register_uart(0, 0, 0); 41 + stamp9g20_map_io(); 47 42 48 43 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */ 49 44 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS ··· 51 54 52 55 /* USART2 on ttyS3. (Rx, Tx) 9bit-Bus Multidrop-mode X4 */ 53 56 at91_register_uart(AT91SAM9260_ID_US4, 3, 0); 54 - 55 - /* set serial console to ttyS0 (ie, DBGU) */ 56 - at91_set_serial_console(0); 57 57 } 58 58 59 59 ··· 59 65 at91sam9260_init_interrupts(NULL); 60 66 } 61 67 62 - 63 - /* 64 - * NAND flash 512MiB 1,8V 8-bit, sector size 128 KiB 65 - */ 66 - static struct atmel_nand_data __initdata nand_data = { 67 - .ale = 21, 68 - .cle = 22, 69 - .rdy_pin = AT91_PIN_PC13, 70 - .enable_pin = AT91_PIN_PC14, 71 - }; 72 - 73 - /* 74 - * Bus timings; unit = 7.57ns 75 - */ 76 - static struct sam9_smc_config __initdata nand_smc_config = { 77 - .ncs_read_setup = 0, 78 - .nrd_setup = 2, 79 - .ncs_write_setup = 0, 80 - .nwe_setup = 2, 81 - 82 - .ncs_read_pulse = 4, 83 - .nrd_pulse = 4, 84 - .ncs_write_pulse = 4, 85 - .nwe_pulse = 4, 86 - 87 - .read_cycle = 7, 88 - .write_cycle = 7, 89 - 90 - .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE 91 - | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, 92 - .tdf_cycles = 3, 93 - }; 94 68 95 69 static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { { 96 70 .ncs_read_setup = 16, ··· 100 138 .tdf_cycles = 1, 101 139 } }; 102 140 103 - static void __init add_device_nand(void) 104 - { 105 - /* configure chip-select 3 (NAND) */ 106 - sam9_smc_configure(3, &nand_smc_config); 107 - at91_add_device_nand(&nand_data); 108 - } 109 - 110 - 111 141 static void __init add_device_pcontrol(void) 112 142 { 113 143 /* configure chip-select 4 (IO compatible to 8051 X4 ) */ ··· 107 153 /* configure chip-select 7 (FerroRAM 256KiBx16bit MR2A16A D4 ) */ 108 154 sam9_smc_configure(7, &pcontrol_smc_config[1]); 109 155 } 110 - 111 - 112 - /* 113 - * MCI (SD/MMC) 114 - * det_pin, wp_pin and vcc_pin are not connected 115 - */ 116 - #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) 117 - static struct mci_platform_data __initdata mmc_data = { 118 - .slot[0] = { 119 - .bus_width = 4, 120 - }, 121 - }; 122 - #else 123 - static struct at91_mmc_data __initdata mmc_data = { 124 - .wire4 = 1, 125 - }; 126 - #endif 127 156 128 157 129 158 /* ··· 202 265 }; 203 266 204 267 205 - /* 206 - * Dallas 1-Wire DS2431 207 - */ 208 - static struct w1_gpio_platform_data w1_gpio_pdata = { 209 - .pin = AT91_PIN_PA29, 210 - .is_open_drain = 1, 211 - }; 212 - 213 - static struct platform_device w1_device = { 214 - .name = "w1-gpio", 215 - .id = -1, 216 - .dev.platform_data = &w1_gpio_pdata, 217 - }; 218 - 219 - static void add_wire1(void) 220 - { 221 - at91_set_GPIO_periph(w1_gpio_pdata.pin, 1); 222 - at91_set_multi_drive(w1_gpio_pdata.pin, 1); 223 - platform_device_register(&w1_device); 224 - } 225 - 226 - 227 268 static void __init pcontrol_g20_board_init(void) 228 269 { 229 - at91_add_device_serial(); 230 - add_device_nand(); 231 - #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) 232 - at91_add_device_mci(0, &mmc_data); 233 - #else 234 - at91_add_device_mmc(0, &mmc_data); 235 - #endif 270 + stamp9g20_board_init(); 236 271 at91_add_device_usbh(&usbh_data); 237 272 at91_add_device_eth(&macb_data); 238 273 at91_add_device_i2c(pcontrol_g20_i2c_devices, 239 274 ARRAY_SIZE(pcontrol_g20_i2c_devices)); 240 - add_wire1(); 241 275 add_device_pcontrol(); 242 276 at91_add_device_spi(pcontrol_g20_spi_devices, 243 277 ARRAY_SIZE(pcontrol_g20_spi_devices));
+43 -39
arch/arm/mach-at91/board-stamp9g20.c
··· 32 32 #include "generic.h" 33 33 34 34 35 - static void __init portuxg20_map_io(void) 35 + void __init stamp9g20_map_io(void) 36 36 { 37 37 /* Initialize processor: 18.432 MHz crystal */ 38 38 at91sam9260_initialize(18432000); 39 39 40 40 /* DGBU on ttyS0. (Rx & Tx only) */ 41 41 at91_register_uart(0, 0, 0); 42 + 43 + /* set serial console to ttyS0 (ie, DBGU) */ 44 + at91_set_serial_console(0); 45 + } 46 + 47 + static void __init stamp9g20evb_map_io(void) 48 + { 49 + stamp9g20_map_io(); 50 + 51 + /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ 52 + at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS 53 + | ATMEL_UART_DTR | ATMEL_UART_DSR 54 + | ATMEL_UART_DCD | ATMEL_UART_RI); 55 + } 56 + 57 + static void __init portuxg20_map_io(void) 58 + { 59 + stamp9g20_map_io(); 42 60 43 61 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ 44 62 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS ··· 74 56 75 57 /* USART5 on ttyS6. (Rx, Tx only) */ 76 58 at91_register_uart(AT91SAM9260_ID_US5, 6, 0); 77 - 78 - /* set serial console to ttyS0 (ie, DBGU) */ 79 - at91_set_serial_console(0); 80 - } 81 - 82 - static void __init stamp9g20_map_io(void) 83 - { 84 - /* Initialize processor: 18.432 MHz crystal */ 85 - at91sam9260_initialize(18432000); 86 - 87 - /* DGBU on ttyS0. (Rx & Tx only) */ 88 - at91_register_uart(0, 0, 0); 89 - 90 - /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ 91 - at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS 92 - | ATMEL_UART_DTR | ATMEL_UART_DSR 93 - | ATMEL_UART_DCD | ATMEL_UART_RI); 94 - 95 - /* set serial console to ttyS0 (ie, DBGU) */ 96 - at91_set_serial_console(0); 97 59 } 98 60 99 61 static void __init init_irq(void) ··· 154 156 .pullup_pin = 0, /* pull-up driven by UDC */ 155 157 }; 156 158 157 - static struct at91_udc_data __initdata stamp9g20_udc_data = { 159 + static struct at91_udc_data __initdata stamp9g20evb_udc_data = { 158 160 .vbus_pin = AT91_PIN_PA22, 159 161 .pullup_pin = 0, /* pull-up driven by UDC */ 160 162 }; ··· 188 190 } 189 191 }; 190 192 191 - static struct gpio_led stamp9g20_leds[] = { 193 + static struct gpio_led stamp9g20evb_leds[] = { 192 194 { 193 195 .name = "D8", 194 196 .gpio = AT91_PIN_PB18, ··· 248 250 } 249 251 250 252 251 - static void __init generic_board_init(void) 253 + void __init stamp9g20_board_init(void) 252 254 { 253 255 /* Serial */ 254 256 at91_add_device_serial(); ··· 260 262 #else 261 263 at91_add_device_mmc(0, &mmc_data); 262 264 #endif 263 - /* USB Host */ 264 - at91_add_device_usbh(&usbh_data); 265 - /* Ethernet */ 266 - at91_add_device_eth(&macb_data); 267 - /* I2C */ 268 - at91_add_device_i2c(NULL, 0); 269 265 /* W1 */ 270 266 add_w1(); 271 267 } 272 268 273 269 static void __init portuxg20_board_init(void) 274 270 { 275 - generic_board_init(); 276 - /* SPI */ 277 - at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices)); 271 + stamp9g20_board_init(); 272 + /* USB Host */ 273 + at91_add_device_usbh(&usbh_data); 278 274 /* USB Device */ 279 275 at91_add_device_udc(&portuxg20_udc_data); 276 + /* Ethernet */ 277 + at91_add_device_eth(&macb_data); 278 + /* I2C */ 279 + at91_add_device_i2c(NULL, 0); 280 + /* SPI */ 281 + at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices)); 280 282 /* LEDs */ 281 283 at91_gpio_leds(portuxg20_leds, ARRAY_SIZE(portuxg20_leds)); 282 284 } 283 285 284 - static void __init stamp9g20_board_init(void) 286 + static void __init stamp9g20evb_board_init(void) 285 287 { 286 - generic_board_init(); 288 + stamp9g20_board_init(); 289 + /* USB Host */ 290 + at91_add_device_usbh(&usbh_data); 287 291 /* USB Device */ 288 - at91_add_device_udc(&stamp9g20_udc_data); 292 + at91_add_device_udc(&stamp9g20evb_udc_data); 293 + /* Ethernet */ 294 + at91_add_device_eth(&macb_data); 295 + /* I2C */ 296 + at91_add_device_i2c(NULL, 0); 289 297 /* LEDs */ 290 - at91_gpio_leds(stamp9g20_leds, ARRAY_SIZE(stamp9g20_leds)); 298 + at91_gpio_leds(stamp9g20evb_leds, ARRAY_SIZE(stamp9g20evb_leds)); 291 299 } 292 300 293 301 MACHINE_START(PORTUXG20, "taskit PortuxG20") ··· 309 305 /* Maintainer: taskit GmbH */ 310 306 .boot_params = AT91_SDRAM_BASE + 0x100, 311 307 .timer = &at91sam926x_timer, 312 - .map_io = stamp9g20_map_io, 308 + .map_io = stamp9g20evb_map_io, 313 309 .init_irq = init_irq, 314 - .init_machine = stamp9g20_board_init, 310 + .init_machine = stamp9g20evb_board_init, 315 311 MACHINE_END
+1 -1
arch/arm/mach-at91/clock.c
··· 658 658 /* Now set uhpck values */ 659 659 uhpck.parent = &utmi_clk; 660 660 uhpck.pmc_mask = AT91SAM926x_PMC_UHP; 661 - uhpck.rate_hz = utmi_clk.parent->rate_hz; 661 + uhpck.rate_hz = utmi_clk.rate_hz; 662 662 uhpck.rate_hz /= 1 + ((at91_sys_read(AT91_PMC_USB) & AT91_PMC_OHCIUSBDIV) >> 8); 663 663 } 664 664
+7
arch/arm/mach-at91/include/mach/stamp9g20.h
··· 1 + #ifndef __MACH_STAMP9G20_H 2 + #define __MACH_STAMP9G20_H 3 + 4 + void stamp9g20_map_io(void); 5 + void stamp9g20_board_init(void); 6 + 7 + #endif
-1
arch/arm/mach-mmp/mmp2.c
··· 126 126 static APBC_CLK(twsi4, MMP2_TWSI4, 0, 26000000); 127 127 static APBC_CLK(twsi5, MMP2_TWSI5, 0, 26000000); 128 128 static APBC_CLK(twsi6, MMP2_TWSI6, 0, 26000000); 129 - static APBC_CLK(rtc, MMP2_RTC, 0, 32768); 130 129 131 130 static APMU_CLK(nand, NAND, 0xbf, 100000000); 132 131
+1 -1
arch/arm/mach-omap2/board-zoom-peripherals.c
··· 216 216 { 217 217 .name = "wl1271", 218 218 .mmc = 3, 219 - .caps = MMC_CAP_4_BIT_DATA, 219 + .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD, 220 220 .gpio_wp = -EINVAL, 221 221 .gpio_cd = -EINVAL, 222 222 .nonremovable = true,
+1 -1
arch/arm/mach-omap2/io.c
··· 297 297 return 0; 298 298 299 299 dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck"); 300 - if (!dpll3_m2_ck) 300 + if (IS_ERR(dpll3_m2_ck)) 301 301 return -EINVAL; 302 302 303 303 rate = clk_get_rate(dpll3_m2_ck);
+17 -17
arch/arm/mach-omap2/pm-debug.c
··· 161 161 printk(KERN_INFO "%-20s: 0x%08x\n", regs[i].name, regs[i].val); 162 162 } 163 163 164 + void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds) 165 + { 166 + u32 tick_rate, cycles; 167 + 168 + if (!seconds && !milliseconds) 169 + return; 170 + 171 + tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup)); 172 + cycles = tick_rate * seconds + tick_rate * milliseconds / 1000; 173 + omap_dm_timer_stop(gptimer_wakeup); 174 + omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles); 175 + 176 + pr_info("PM: Resume timer in %u.%03u secs" 177 + " (%d ticks at %d ticks/sec.)\n", 178 + seconds, milliseconds, cycles, tick_rate); 179 + } 180 + 164 181 #ifdef CONFIG_DEBUG_FS 165 182 #include <linux/debugfs.h> 166 183 #include <linux/seq_file.h> ··· 369 352 pwrdm->state_timer[prev] += t - pwrdm->timer; 370 353 371 354 pwrdm->timer = t; 372 - } 373 - 374 - void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds) 375 - { 376 - u32 tick_rate, cycles; 377 - 378 - if (!seconds && !milliseconds) 379 - return; 380 - 381 - tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup)); 382 - cycles = tick_rate * seconds + tick_rate * milliseconds / 1000; 383 - omap_dm_timer_stop(gptimer_wakeup); 384 - omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles); 385 - 386 - pr_info("PM: Resume timer in %u.%03u secs" 387 - " (%d ticks at %d ticks/sec.)\n", 388 - seconds, milliseconds, cycles, tick_rate); 389 355 } 390 356 391 357 static int clkdm_dbg_show_counter(struct clockdomain *clkdm, void *user)
+31 -3
arch/arm/mach-omap2/pm24xx.c
··· 53 53 #include <plat/powerdomain.h> 54 54 #include <plat/clockdomain.h> 55 55 56 + #ifdef CONFIG_SUSPEND 57 + static suspend_state_t suspend_state = PM_SUSPEND_ON; 58 + static inline bool is_suspending(void) 59 + { 60 + return (suspend_state != PM_SUSPEND_ON); 61 + } 62 + #else 63 + static inline bool is_suspending(void) 64 + { 65 + return false; 66 + } 67 + #endif 68 + 56 69 static void (*omap2_sram_idle)(void); 57 70 static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl, 58 71 void __iomem *sdrc_power); ··· 133 120 goto no_sleep; 134 121 135 122 /* Block console output in case it is on one of the OMAP UARTs */ 136 - if (try_acquire_console_sem()) 137 - goto no_sleep; 123 + if (!is_suspending()) 124 + if (try_acquire_console_sem()) 125 + goto no_sleep; 138 126 139 127 omap_uart_prepare_idle(0); 140 128 omap_uart_prepare_idle(1); ··· 150 136 omap_uart_resume_idle(1); 151 137 omap_uart_resume_idle(0); 152 138 153 - release_console_sem(); 139 + if (!is_suspending()) 140 + release_console_sem(); 154 141 155 142 no_sleep: 156 143 if (omap2_pm_debug) { ··· 299 284 local_irq_enable(); 300 285 } 301 286 287 + static int omap2_pm_begin(suspend_state_t state) 288 + { 289 + suspend_state = state; 290 + return 0; 291 + } 292 + 302 293 static int omap2_pm_prepare(void) 303 294 { 304 295 /* We cannot sleep in idle until we have resumed */ ··· 354 333 enable_hlt(); 355 334 } 356 335 336 + static void omap2_pm_end(void) 337 + { 338 + suspend_state = PM_SUSPEND_ON; 339 + } 340 + 357 341 static struct platform_suspend_ops omap_pm_ops = { 342 + .begin = omap2_pm_begin, 358 343 .prepare = omap2_pm_prepare, 359 344 .enter = omap2_pm_enter, 360 345 .finish = omap2_pm_finish, 346 + .end = omap2_pm_end, 361 347 .valid = suspend_valid_only_mem, 362 348 }; 363 349
+20 -7
arch/arm/mach-omap2/pm34xx.c
··· 50 50 #include "sdrc.h" 51 51 #include "control.h" 52 52 53 + #ifdef CONFIG_SUSPEND 54 + static suspend_state_t suspend_state = PM_SUSPEND_ON; 55 + static inline bool is_suspending(void) 56 + { 57 + return (suspend_state != PM_SUSPEND_ON); 58 + } 59 + #else 60 + static inline bool is_suspending(void) 61 + { 62 + return false; 63 + } 64 + #endif 65 + 53 66 /* Scratchpad offsets */ 54 67 #define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4 55 68 #define OMAP343X_TABLE_VALUE_OFFSET 0xc0 ··· 400 387 } 401 388 402 389 /* Block console output in case it is on one of the OMAP UARTs */ 403 - if (per_next_state < PWRDM_POWER_ON || 404 - core_next_state < PWRDM_POWER_ON) 405 - if (try_acquire_console_sem()) 406 - goto console_still_active; 390 + if (!is_suspending()) 391 + if (per_next_state < PWRDM_POWER_ON || 392 + core_next_state < PWRDM_POWER_ON) 393 + if (try_acquire_console_sem()) 394 + goto console_still_active; 407 395 408 396 /* PER */ 409 397 if (per_next_state < PWRDM_POWER_ON) { ··· 484 470 omap_uart_resume_idle(3); 485 471 } 486 472 487 - release_console_sem(); 473 + if (!is_suspending()) 474 + release_console_sem(); 488 475 489 476 console_still_active: 490 477 /* Disable IO-PAD and IO-CHAIN wakeup */ ··· 529 514 } 530 515 531 516 #ifdef CONFIG_SUSPEND 532 - static suspend_state_t suspend_state; 533 - 534 517 static int omap3_pm_prepare(void) 535 518 { 536 519 disable_hlt();
+6 -5
arch/arm/mach-omap2/prcm-common.h
··· 243 243 #define OMAP24XX_EN_GPT1_MASK (1 << 0) 244 244 245 245 /* PM_WKST_WKUP, CM_IDLEST_WKUP shared bits */ 246 - #define OMAP24XX_ST_GPIOS_SHIFT (1 << 2) 247 - #define OMAP24XX_ST_GPIOS_MASK 2 248 - #define OMAP24XX_ST_GPT1_SHIFT (1 << 0) 249 - #define OMAP24XX_ST_GPT1_MASK 0 246 + #define OMAP24XX_ST_GPIOS_SHIFT 2 247 + #define OMAP24XX_ST_GPIOS_MASK (1 << 2) 248 + #define OMAP24XX_ST_GPT1_SHIFT 0 249 + #define OMAP24XX_ST_GPT1_MASK (1 << 0) 250 250 251 251 /* CM_IDLEST_MDM and PM_WKST_MDM shared bits */ 252 - #define OMAP2430_ST_MDM_SHIFT (1 << 0) 252 + #define OMAP2430_ST_MDM_SHIFT 0 253 + #define OMAP2430_ST_MDM_MASK (1 << 0) 253 254 254 255 255 256 /* 3430 register bits shared between CM & PRM registers */
+2 -1
arch/arm/mach-pxa/palmtx.c
··· 241 241 /****************************************************************************** 242 242 * NAND Flash 243 243 ******************************************************************************/ 244 - #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE) 244 + #if defined(CONFIG_MTD_NAND_PLATFORM) || \ 245 + defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 245 246 static void palmtx_nand_cmd_ctl(struct mtd_info *mtd, int cmd, 246 247 unsigned int ctrl) 247 248 {
+7
arch/arm/mach-s3c2412/Kconfig
··· 28 28 29 29 config S3C2412_PM 30 30 bool 31 + select S3C2412_PM_SLEEP 31 32 help 32 33 Internal config node to apply S3C2412 power management 34 + 35 + config S3C2412_PM_SLEEP 36 + bool 37 + help 38 + Internal config node to apply sleep for S3C2412 power management. 39 + Can be selected by another SoCs with similar sleep procedure. 33 40 34 41 # Note, the S3C2412 IOtiming support is in plat-s3c24xx 35 42
+2 -1
arch/arm/mach-s3c2412/Makefile
··· 14 14 obj-$(CONFIG_CPU_S3C2412) += clock.o 15 15 obj-$(CONFIG_CPU_S3C2412) += gpio.o 16 16 obj-$(CONFIG_S3C2412_DMA) += dma.o 17 - obj-$(CONFIG_S3C2412_PM) += pm.o sleep.o 17 + obj-$(CONFIG_S3C2412_PM) += pm.o 18 + obj-$(CONFIG_S3C2412_PM_SLEEP) += sleep.o 18 19 obj-$(CONFIG_S3C2412_CPUFREQ) += cpu-freq.o 19 20 20 21 # Machine support
+1
arch/arm/mach-s3c2416/Kconfig
··· 27 27 28 28 config S3C2416_PM 29 29 bool 30 + select S3C2412_PM_SLEEP 30 31 help 31 32 Internal config node to apply S3C2416 power management 32 33
+6
arch/arm/mach-s5pv210/mach-aquila.c
··· 378 378 static struct max8998_platform_data aquila_max8998_pdata = { 379 379 .num_regulators = ARRAY_SIZE(aquila_regulators), 380 380 .regulators = aquila_regulators, 381 + .buck1_set1 = S5PV210_GPH0(3), 382 + .buck1_set2 = S5PV210_GPH0(4), 383 + .buck2_set3 = S5PV210_GPH0(5), 384 + .buck1_max_voltage1 = 1200000, 385 + .buck1_max_voltage2 = 1200000, 386 + .buck2_max_voltage = 1200000, 381 387 }; 382 388 #endif 383 389
+6
arch/arm/mach-s5pv210/mach-goni.c
··· 518 518 static struct max8998_platform_data goni_max8998_pdata = { 519 519 .num_regulators = ARRAY_SIZE(goni_regulators), 520 520 .regulators = goni_regulators, 521 + .buck1_set1 = S5PV210_GPH0(3), 522 + .buck1_set2 = S5PV210_GPH0(4), 523 + .buck2_set3 = S5PV210_GPH0(5), 524 + .buck1_max_voltage1 = 1200000, 525 + .buck1_max_voltage2 = 1200000, 526 + .buck2_max_voltage = 1200000, 521 527 }; 522 528 #endif 523 529
+26 -4
arch/arm/mach-shmobile/include/mach/entry-macro.S
··· 1 1 /* 2 + * Copyright (C) 2010 Magnus Damm 2 3 * Copyright (C) 2008 Renesas Solutions Corp. 3 4 * 4 5 * This program is free software; you can redistribute it and/or modify ··· 15 14 * along with this program; if not, write to the Free Software 16 15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 16 */ 18 - #include <mach/hardware.h> 19 17 #include <mach/irqs.h> 18 + 19 + #define INTCA_BASE 0xe6980000 20 + #define INTFLGA_OFFS 0x00000018 /* accept pending interrupt */ 21 + #define INTEVTA_OFFS 0x00000020 /* vector number of accepted interrupt */ 22 + #define INTLVLA_OFFS 0x00000030 /* priority level of accepted interrupt */ 23 + #define INTLVLB_OFFS 0x00000034 /* previous priority level */ 20 24 21 25 .macro disable_fiq 22 26 .endm 23 27 24 28 .macro get_irqnr_preamble, base, tmp 25 - ldr \base, =INTFLGA 29 + ldr \base, =INTCA_BASE 26 30 .endm 27 31 28 32 .macro arch_ret_to_user, tmp1, tmp2 29 33 .endm 30 34 31 35 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp 32 - ldr \irqnr, [\base] 36 + /* The single INTFLGA read access below results in the following: 37 + * 38 + * 1. INTLVLB is updated with old priority value from INTLVLA 39 + * 2. Highest priority interrupt is accepted 40 + * 3. INTLVLA is updated to contain priority of accepted interrupt 41 + * 4. Accepted interrupt vector is stored in INTFLGA and INTEVTA 42 + */ 43 + ldr \irqnr, [\base, #INTFLGA_OFFS] 44 + 45 + /* Restore INTLVLA with the value saved in INTLVLB. 46 + * This is required to support interrupt priorities properly. 47 + */ 48 + ldrb \tmp, [\base, #INTLVLB_OFFS] 49 + strb \tmp, [\base, #INTLVLA_OFFS] 50 + 51 + /* Handle invalid vector number case */ 33 52 cmp \irqnr, #0 34 53 beq 1000f 35 - /* intevt to irq number */ 54 + 55 + /* Convert vector to irq number, same as the evt2irq() macro */ 36 56 lsr \irqnr, \irqnr, #0x5 37 57 subs \irqnr, \irqnr, #16 38 58
+1 -1
arch/arm/mach-shmobile/include/mach/vmalloc.h
··· 2 2 #define __ASM_MACH_VMALLOC_H 3 3 4 4 /* Vmalloc at ... - 0xe5ffffff */ 5 - #define VMALLOC_END 0xe6000000 5 + #define VMALLOC_END 0xe6000000UL 6 6 7 7 #endif /* __ASM_MACH_VMALLOC_H */
+20 -8
arch/arm/mm/cache-v6.S
··· 203 203 * - end - virtual end address of region 204 204 */ 205 205 v6_dma_inv_range: 206 + #ifdef CONFIG_DMA_CACHE_RWFO 207 + ldrb r2, [r0] @ read for ownership 208 + strb r2, [r0] @ write for ownership 209 + #endif 206 210 tst r0, #D_CACHE_LINE_SIZE - 1 207 211 bic r0, r0, #D_CACHE_LINE_SIZE - 1 208 212 #ifdef HARVARD_CACHE ··· 215 211 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line 216 212 #endif 217 213 tst r1, #D_CACHE_LINE_SIZE - 1 214 + #ifdef CONFIG_DMA_CACHE_RWFO 215 + ldrneb r2, [r1, #-1] @ read for ownership 216 + strneb r2, [r1, #-1] @ write for ownership 217 + #endif 218 218 bic r1, r1, #D_CACHE_LINE_SIZE - 1 219 219 #ifdef HARVARD_CACHE 220 220 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line ··· 226 218 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 227 219 #endif 228 220 1: 229 - #ifdef CONFIG_DMA_CACHE_RWFO 230 - ldr r2, [r0] @ read for ownership 231 - str r2, [r0] @ write for ownership 232 - #endif 233 221 #ifdef HARVARD_CACHE 234 222 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 235 223 #else ··· 233 229 #endif 234 230 add r0, r0, #D_CACHE_LINE_SIZE 235 231 cmp r0, r1 232 + #ifdef CONFIG_DMA_CACHE_RWFO 233 + ldrlo r2, [r0] @ read for ownership 234 + strlo r2, [r0] @ write for ownership 235 + #endif 236 236 blo 1b 237 237 mov r0, #0 238 238 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ··· 271 263 * - end - virtual end address of region 272 264 */ 273 265 ENTRY(v6_dma_flush_range) 266 + #ifdef CONFIG_DMA_CACHE_RWFO 267 + ldrb r2, [r0] @ read for ownership 268 + strb r2, [r0] @ write for ownership 269 + #endif 274 270 bic r0, r0, #D_CACHE_LINE_SIZE - 1 275 271 1: 276 - #ifdef CONFIG_DMA_CACHE_RWFO 277 - ldr r2, [r0] @ read for ownership 278 - str r2, [r0] @ write for ownership 279 - #endif 280 272 #ifdef HARVARD_CACHE 281 273 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 282 274 #else ··· 284 276 #endif 285 277 add r0, r0, #D_CACHE_LINE_SIZE 286 278 cmp r0, r1 279 + #ifdef CONFIG_DMA_CACHE_RWFO 280 + ldrlob r2, [r0] @ read for ownership 281 + strlob r2, [r0] @ write for ownership 282 + #endif 287 283 blo 1b 288 284 mov r0, #0 289 285 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+18 -11
arch/arm/mm/cache-v7.S
··· 173 173 UNWIND(.fnstart ) 174 174 dcache_line_size r2, r3 175 175 sub r3, r2, #1 176 - bic r0, r0, r3 176 + bic r12, r0, r3 177 177 1: 178 - USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification 179 - dsb 180 - USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line 181 - add r0, r0, r2 182 - 2: 183 - cmp r0, r1 178 + USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification 179 + add r12, r12, r2 180 + cmp r12, r1 184 181 blo 1b 182 + dsb 183 + icache_line_size r2, r3 184 + sub r3, r2, #1 185 + bic r12, r0, r3 186 + 2: 187 + USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line 188 + add r12, r12, r2 189 + cmp r12, r1 190 + blo 2b 191 + 3: 185 192 mov r0, #0 186 193 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable 187 194 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB ··· 201 194 * isn't mapped, just try the next page. 202 195 */ 203 196 9001: 204 - mov r0, r0, lsr #12 205 - mov r0, r0, lsl #12 206 - add r0, r0, #4096 207 - b 2b 197 + mov r12, r12, lsr #12 198 + mov r12, r12, lsl #12 199 + add r12, r12, #4096 200 + b 3b 208 201 UNWIND(.fnend ) 209 202 ENDPROC(v7_coherent_kern_range) 210 203 ENDPROC(v7_coherent_user_range)
+16 -6
arch/arm/mm/proc-macros.S
··· 61 61 .endm 62 62 63 63 /* 64 - * cache_line_size - get the cache line size from the CSIDR register 65 - * (available on ARMv7+). It assumes that the CSSR register was configured 66 - * to access the L1 data cache CSIDR. 64 + * dcache_line_size - get the minimum D-cache line size from the CTR register 65 + * on ARMv7. 67 66 */ 68 67 .macro dcache_line_size, reg, tmp 69 - mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR 70 - and \tmp, \tmp, #7 @ cache line size encoding 71 - mov \reg, #16 @ size offset 68 + mrc p15, 0, \tmp, c0, c0, 1 @ read ctr 69 + lsr \tmp, \tmp, #16 70 + and \tmp, \tmp, #0xf @ cache line size encoding 71 + mov \reg, #4 @ bytes per word 72 72 mov \reg, \reg, lsl \tmp @ actual cache line size 73 73 .endm 74 74 75 + /* 76 + * icache_line_size - get the minimum I-cache line size from the CTR register 77 + * on ARMv7. 78 + */ 79 + .macro icache_line_size, reg, tmp 80 + mrc p15, 0, \tmp, c0, c0, 1 @ read ctr 81 + and \tmp, \tmp, #0xf @ cache line size encoding 82 + mov \reg, #4 @ bytes per word 83 + mov \reg, \reg, lsl \tmp @ actual cache line size 84 + .endm 75 85 76 86 /* 77 87 * Sanity check the PTE configuration for the code below - which makes
+2 -1
arch/arm/plat-omap/counter_32k.c
··· 16 16 #include <linux/init.h> 17 17 #include <linux/clk.h> 18 18 #include <linux/io.h> 19 + #include <linux/err.h> 19 20 20 21 #include <plat/common.h> 21 22 #include <plat/board.h> ··· 165 164 return -ENODEV; 166 165 167 166 sync_32k_ick = clk_get(NULL, "omap_32ksync_ick"); 168 - if (sync_32k_ick) 167 + if (!IS_ERR(sync_32k_ick)) 169 168 clk_enable(sync_32k_ick); 170 169 171 170 clocksource_32k.mult = clocksource_hz2mult(32768,
+1 -1
arch/arm/plat-omap/sram.c
··· 166 166 cpu_is_omap1710()) 167 167 omap_sram_size = 0x4000; /* 16K */ 168 168 else if (cpu_is_omap1611()) 169 - omap_sram_size = 0x3e800; /* 250K */ 169 + omap_sram_size = SZ_256K; 170 170 else { 171 171 printk(KERN_ERR "Could not detect SRAM size\n"); 172 172 omap_sram_size = 0x4000;
+1 -1
arch/arm/plat-s3c24xx/Kconfig
··· 8 8 default y 9 9 select NO_IOPORT 10 10 select ARCH_REQUIRE_GPIOLIB 11 - select S3C_DEVICE_NAND 11 + select S3C_DEV_NAND 12 12 select S3C_GPIO_CFG_S3C24XX 13 13 help 14 14 Base platform code for any Samsung S3C24XX device
+179 -4
arch/arm/tools/mach-types
··· 12 12 # 13 13 # http://www.arm.linux.org.uk/developer/machines/?action=new 14 14 # 15 - # Last update: Thu Sep 9 22:43:01 2010 15 + # Last update: Sun Dec 12 23:24:27 2010 16 16 # 17 17 # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 18 18 # ··· 2321 2321 u380 MACH_U380 U380 2333 2322 2322 oamp3_hualu MACH_HUALU_BOARD HUALU_BOARD 2334 2323 2323 npcmx50 MACH_NPCMX50 NPCMX50 2335 2324 - mx51_lange51 MACH_MX51_LANGE51 MX51_LANGE51 2336 2324 + mx51_efikamx MACH_MX51_EFIKAMX MX51_EFIKAMX 2336 2325 2325 mx51_lange52 MACH_MX51_LANGE52 MX51_LANGE52 2337 2326 2326 riom MACH_RIOM RIOM 2338 2327 2327 comcas MACH_COMCAS COMCAS 2339 ··· 2355 2355 csb732 MACH_CSB732 CSB732 2367 2356 2356 u8500 MACH_U8500 U8500 2368 2357 2357 huqiu MACH_HUQIU HUQIU 2369 2358 - mx51_kunlun MACH_MX51_KUNLUN MX51_KUNLUN 2370 2358 + mx51_efikasb MACH_MX51_EFIKASB MX51_EFIKASB 2370 2359 2359 pmt1g MACH_PMT1G PMT1G 2371 2360 2360 htcelf MACH_HTCELF HTCELF 2372 2361 2361 armadillo420 MACH_ARMADILLO420 ARMADILLO420 2373 ··· 2971 2971 wasabi MACH_WASABI WASABI 2986 2972 2972 vivow MACH_VIVOW VIVOW 2987 2973 2973 mx50_rdp MACH_MX50_RDP MX50_RDP 2988 2974 - universal MACH_UNIVERSAL UNIVERSAL 2989 2974 + universal_c210 MACH_UNIVERSAL_C210 UNIVERSAL_C210 2989 2975 2975 real6410 MACH_REAL6410 REAL6410 2990 2976 2976 spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991 2977 2977 ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992 ··· 3044 3044 msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060 3045 3045 spear900 MACH_SPEAR900 SPEAR900 3061 3046 3046 pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062 3047 + rdstor MACH_RDSTOR RDSTOR 3063 3048 + usdloader MACH_USDLOADER USDLOADER 3064 3049 + tsoploader MACH_TSOPLOADER TSOPLOADER 3065 3050 + kronos MACH_KRONOS KRONOS 3066 3051 + ffcore MACH_FFCORE FFCORE 3067 3052 + mone MACH_MONE MONE 3068 3053 + unit2s MACH_UNIT2S UNIT2S 3069 3054 + acer_a5 MACH_ACER_A5 ACER_A5 3070 3055 + etherpro_isp MACH_ETHERPRO_ISP ETHERPRO_ISP 3071 3056 + stretchs7000 MACH_STRETCHS7000 STRETCHS7000 3072 3057 + p87_smartsim MACH_P87_SMARTSIM P87_SMARTSIM 3073 3058 + tulip MACH_TULIP TULIP 3074 3059 + sunflower MACH_SUNFLOWER SUNFLOWER 3075 3060 + rib MACH_RIB RIB 3076 3061 + clod MACH_CLOD CLOD 3077 3062 + rump MACH_RUMP RUMP 3078 3063 + tenderloin MACH_TENDERLOIN TENDERLOIN 3079 3064 + shortloin MACH_SHORTLOIN SHORTLOIN 3080 3065 + crespo MACH_CRESPO CRESPO 3081 3066 + antares MACH_ANTARES ANTARES 3082 3067 + wb40n MACH_WB40N WB40N 3083 3068 + herring MACH_HERRING HERRING 3084 3069 + naxy400 MACH_NAXY400 NAXY400 3085 3070 + naxy1200 MACH_NAXY1200 NAXY1200 3086 3071 + vpr200 MACH_VPR200 VPR200 3087 3072 + bug20 MACH_BUG20 BUG20 3088 3073 + goflexnet MACH_GOFLEXNET GOFLEXNET 3089 3074 + torbreck MACH_TORBRECK TORBRECK 3090 3075 + saarb_mg1 MACH_SAARB_MG1 SAARB_MG1 3091 3076 + callisto MACH_CALLISTO CALLISTO 3092 3077 + multhsu MACH_MULTHSU MULTHSU 3093 3078 + saluda MACH_SALUDA SALUDA 3094 3079 + pemp_omap3_apollo MACH_PEMP_OMAP3_APOLLO PEMP_OMAP3_APOLLO 3095 3080 + vc0718 MACH_VC0718 VC0718 3096 3081 + mvblx MACH_MVBLX MVBLX 3097 3082 + inhand_apeiron MACH_INHAND_APEIRON INHAND_APEIRON 3098 3083 + inhand_fury MACH_INHAND_FURY INHAND_FURY 3099 3084 + inhand_siren MACH_INHAND_SIREN INHAND_SIREN 3100 3085 + hdnvp MACH_HDNVP HDNVP 3101 3086 + softwinner MACH_SOFTWINNER SOFTWINNER 3102 3087 + prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103 3088 + nas6210 MACH_NAS6210 NAS6210 3104 3089 + unisdev MACH_UNISDEV UNISDEV 3105 3090 + sbca11 MACH_SBCA11 SBCA11 3106 3091 + saga MACH_SAGA SAGA 3107 3092 + ns_k330 MACH_NS_K330 NS_K330 3108 3093 + tanna MACH_TANNA TANNA 3109 3094 + imate8502 MACH_IMATE8502 IMATE8502 3110 3095 + aspen MACH_ASPEN ASPEN 3111 3096 + daintree_cwac MACH_DAINTREE_CWAC DAINTREE_CWAC 3112 3097 + zmx25 MACH_ZMX25 ZMX25 3113 3098 + maple1 MACH_MAPLE1 MAPLE1 3114 3099 + qsd8x72_surf MACH_QSD8X72_SURF QSD8X72_SURF 3115 3100 + qsd8x72_ffa MACH_QSD8X72_FFA QSD8X72_FFA 3116 3101 + abilene MACH_ABILENE ABILENE 3117 3102 + eigen_ttr MACH_EIGEN_TTR EIGEN_TTR 3118 3103 + iomega_ix2_200 MACH_IOMEGA_IX2_200 IOMEGA_IX2_200 3119 3104 + coretec_vcx7400 MACH_CORETEC_VCX7400 CORETEC_VCX7400 3120 3105 + santiago MACH_SANTIAGO SANTIAGO 3121 3106 + mx257sol MACH_MX257SOL MX257SOL 3122 3107 + strasbourg MACH_STRASBOURG STRASBOURG 3123 3108 + msm8x60_fluid MACH_MSM8X60_FLUID MSM8X60_FLUID 3124 3109 + smartqv5 MACH_SMARTQV5 SMARTQV5 3125 3110 + smartqv3 MACH_SMARTQV3 SMARTQV3 3126 3111 + smartqv7 MACH_SMARTQV7 SMARTQV7 3127 3112 + paz00 MACH_PAZ00 PAZ00 3128 3113 + acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129 3114 + htcwillow MACH_HTCWILLOW HTCWILLOW 3130 3115 + fwbd_0404 MACH_FWBD_0404 FWBD_0404 3131 3116 + hdgu MACH_HDGU HDGU 3132 3117 + pyramid MACH_PYRAMID PYRAMID 3133 3118 + epiphan MACH_EPIPHAN EPIPHAN 3134 3119 + omap_bender MACH_OMAP_BENDER OMAP_BENDER 3135 3120 + gurnard MACH_GURNARD GURNARD 3136 3121 + gtl_it5100 MACH_GTL_IT5100 GTL_IT5100 3137 3122 + bcm2708 MACH_BCM2708 BCM2708 3138 3123 + mx51_ggc MACH_MX51_GGC MX51_GGC 3139 3124 + sharespace MACH_SHARESPACE SHARESPACE 3140 3125 + haba_knx_explorer MACH_HABA_KNX_EXPLORER HABA_KNX_EXPLORER 3141 3126 + simtec_kirkmod MACH_SIMTEC_KIRKMOD SIMTEC_KIRKMOD 3142 3127 + crux MACH_CRUX CRUX 3143 3128 + mx51_bravo MACH_MX51_BRAVO MX51_BRAVO 3144 3129 + charon MACH_CHARON CHARON 3145 3130 + picocom3 MACH_PICOCOM3 PICOCOM3 3146 3131 + picocom4 MACH_PICOCOM4 PICOCOM4 3147 3132 + serrano MACH_SERRANO SERRANO 3148 3133 + doubleshot MACH_DOUBLESHOT DOUBLESHOT 3149 3134 + evsy MACH_EVSY EVSY 3150 3135 + huashan MACH_HUASHAN HUASHAN 3151 3136 + lausanne MACH_LAUSANNE LAUSANNE 3152 3137 + emerald MACH_EMERALD EMERALD 3153 3138 + tqma35 MACH_TQMA35 TQMA35 3154 3139 + marvel MACH_MARVEL MARVEL 3155 3140 + manuae MACH_MANUAE MANUAE 3156 3141 + chacha MACH_CHACHA CHACHA 3157 3142 + lemon MACH_LEMON LEMON 3158 3143 + csc MACH_CSC CSC 3159 3144 + gira_knxip_router MACH_GIRA_KNXIP_ROUTER GIRA_KNXIP_ROUTER 3160 3145 + t20 MACH_T20 T20 3161 3146 + hdmini MACH_HDMINI HDMINI 3162 3147 + sciphone_g2 MACH_SCIPHONE_G2 SCIPHONE_G2 3163 3148 + express MACH_EXPRESS EXPRESS 3164 3149 + express_kt MACH_EXPRESS_KT EXPRESS_KT 3165 3150 + maximasp MACH_MAXIMASP MAXIMASP 3166 3151 + nitrogen_imx51 MACH_NITROGEN_IMX51 NITROGEN_IMX51 3167 3152 + nitrogen_imx53 MACH_NITROGEN_IMX53 NITROGEN_IMX53 3168 3153 + sunfire MACH_SUNFIRE SUNFIRE 3169 3154 + arowana MACH_AROWANA AROWANA 3170 3155 + tegra_daytona MACH_TEGRA_DAYTONA TEGRA_DAYTONA 3171 3156 + tegra_swordfish MACH_TEGRA_SWORDFISH TEGRA_SWORDFISH 3172 3157 + edison MACH_EDISON EDISON 3173 3158 + svp8500v1 MACH_SVP8500V1 SVP8500V1 3174 3159 + svp8500v2 MACH_SVP8500V2 SVP8500V2 3175 3160 + svp5500 MACH_SVP5500 SVP5500 3176 3161 + b5500 MACH_B5500 B5500 3177 3162 + s5500 MACH_S5500 S5500 3178 3163 + icon MACH_ICON ICON 3179 3164 + elephant MACH_ELEPHANT ELEPHANT 3180 3165 + msm8x60_fusion MACH_MSM8X60_FUSION MSM8X60_FUSION 3181 3166 + shooter MACH_SHOOTER SHOOTER 3182 3167 + spade_lte MACH_SPADE_LTE SPADE_LTE 3183 3168 + philhwani MACH_PHILHWANI PHILHWANI 3184 3169 + gsncomm MACH_GSNCOMM GSNCOMM 3185 3170 + strasbourg_a2 MACH_STRASBOURG_A2 STRASBOURG_A2 3186 3171 + mmm MACH_MMM MMM 3187 3172 + davinci_dm365_bv MACH_DAVINCI_DM365_BV DAVINCI_DM365_BV 3188 3173 + ag5evm MACH_AG5EVM AG5EVM 3189 3174 + sc575plc MACH_SC575PLC SC575PLC 3190 3175 + sc575hmi MACH_SC575IPC SC575IPC 3191 3176 + omap3_tdm3730 MACH_OMAP3_TDM3730 OMAP3_TDM3730 3192 3177 + g7 MACH_G7 G7 3193 3178 + top9000_eval MACH_TOP9000_EVAL TOP9000_EVAL 3194 3179 + top9000_su MACH_TOP9000_SU TOP9000_SU 3195 3180 + utm300 MACH_UTM300 UTM300 3196 3181 + tsunagi MACH_TSUNAGI TSUNAGI 3197 3182 + ts75xx MACH_TS75XX TS75XX 3198 3183 + msm8x60_fusn_ffa MACH_MSM8X60_FUSN_FFA MSM8X60_FUSN_FFA 3199 3184 + ts47xx MACH_TS47XX TS47XX 3200 3185 + da850_k5 MACH_DA850_K5 DA850_K5 3201 3186 + ax502 MACH_AX502 AX502 3202 3187 + igep0032 MACH_IGEP0032 IGEP0032 3203 3188 + antero MACH_ANTERO ANTERO 3204 3189 + synergy MACH_SYNERGY SYNERGY 3205 3190 + ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 3191 + wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 3192 + punica MACH_PUNICA PUNICA 3208 3193 + sbc_nt250 MACH_SBC_NT250 SBC_NT250 3209 3194 + mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210 3195 + mackerel MACH_MACKEREL MACKEREL 3211 3196 + fa9x27 MACH_FA9X27 FA9X27 3213 3197 + ns2816tb MACH_NS2816TB NS2816TB 3214 3198 + ns2816_ntpad MACH_NS2816_NTPAD NS2816_NTPAD 3215 3199 + ns2816_ntnb MACH_NS2816_NTNB NS2816_NTNB 3216 3200 + kaen MACH_KAEN KAEN 3217 3201 + nv1000 MACH_NV1000 NV1000 3218 3202 + nuc950ts MACH_NUC950TS NUC950TS 3219 3203 + nokia_rm680 MACH_NOKIA_RM680 NOKIA_RM680 3220 3204 + ast2200 MACH_AST2200 AST2200 3221 3205 + lead MACH_LEAD LEAD 3222 3206 + unino1 MACH_UNINO1 UNINO1 3223 3207 + greeco MACH_GREECO GREECO 3224 3208 + verdi MACH_VERDI VERDI 3225 3209 + dm6446_adbox MACH_DM6446_ADBOX DM6446_ADBOX 3226 3210 + quad_salsa MACH_QUAD_SALSA QUAD_SALSA 3227 3211 + abb_gma_1_1 MACH_ABB_GMA_1_1 ABB_GMA_1_1 3228 3212 + svcid MACH_SVCID SVCID 3229 3213 + msm8960_sim MACH_MSM8960_SIM MSM8960_SIM 3230 3214 + msm8960_rumi3 MACH_MSM8960_RUMI3 MSM8960_RUMI3 3231 3215 + icon_g MACH_ICON_G ICON_G 3232 3216 + mb3 MACH_MB3 MB3 3233 3217 + gsia18s MACH_GSIA18S GSIA18S 3234 3218 + pivicc MACH_PIVICC PIVICC 3235 3219 + pcm048 MACH_PCM048 PCM048 3236 3220 + dds MACH_DDS DDS 3237 3221 + chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238
+24 -14
arch/mips/Kconfig
··· 19 19 select GENERIC_ATOMIC64 if !64BIT 20 20 select HAVE_DMA_ATTRS 21 21 select HAVE_DMA_API_DEBUG 22 + select HAVE_GENERIC_HARDIRQS 23 + select GENERIC_IRQ_PROBE 22 24 23 25 menu "Machine selection" 24 26 ··· 1666 1664 1667 1665 endchoice 1668 1666 1667 + config FORCE_MAX_ZONEORDER 1668 + int "Maximum zone order" 1669 + range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB 1670 + default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB 1671 + range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB 1672 + default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB 1673 + range 11 64 1674 + default "11" 1675 + help 1676 + The kernel memory allocator divides physically contiguous memory 1677 + blocks into "zones", where each zone is a power of two number of 1678 + pages. This option selects the largest power of two that the kernel 1679 + keeps in the memory allocator. If you need to allocate very large 1680 + blocks of physically contiguous memory, then you may need to 1681 + increase this value. 1682 + 1683 + This config option is actually maximum order plus one. For example, 1684 + a value of 11 means that the largest free memory block is 2^10 pages. 1685 + 1686 + The page size is not necessarily 4KB. Keep this in mind 1687 + when choosing a value for this option. 1688 + 1669 1689 config BOARD_SCACHE 1670 1690 bool 1671 1691 ··· 1943 1919 select CPU_R4400_WORKAROUNDS 1944 1920 1945 1921 config CPU_R4400_WORKAROUNDS 1946 - bool 1947 - 1948 - # 1949 - # Use the generic interrupt handling code in kernel/irq/: 1950 - # 1951 - config GENERIC_HARDIRQS 1952 - bool 1953 - default y 1954 - 1955 - config GENERIC_IRQ_PROBE 1956 - bool 1957 - default y 1958 - 1959 - config IRQ_PER_CPU 1960 1922 bool 1961 1923 1962 1924 #
+2
arch/mips/alchemy/common/platform.c
··· 27 27 static void alchemy_8250_pm(struct uart_port *port, unsigned int state, 28 28 unsigned int old_state) 29 29 { 30 + #ifdef CONFIG_SERIAL_8250 30 31 switch (state) { 31 32 case 0: 32 33 if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) { ··· 50 49 serial8250_do_pm(port, state, old_state); 51 50 break; 52 51 } 52 + #endif 53 53 } 54 54 55 55 #define PORT(_base, _irq) \
+2 -3
arch/mips/alchemy/devboards/prom.c
··· 54 54 55 55 prom_init_cmdline(); 56 56 memsize_str = prom_getenv("memsize"); 57 - if (!memsize_str) 57 + if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) 58 58 memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE; 59 - else 60 - strict_strtoul(memsize_str, 0, &memsize); 59 + 61 60 add_memory_region(0, memsize, BOOT_MEM_RAM); 62 61 } 63 62
+3 -6
arch/mips/ar7/clock.c
··· 239 239 calculate(base_clock, frequency, &prediv, &postdiv, &mul); 240 240 241 241 writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl); 242 - msleep(1); 242 + mdelay(1); 243 243 writel(4, &clock->pll); 244 244 while (readl(&clock->pll) & PLL_STATUS) 245 245 ; 246 246 writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll); 247 - msleep(75); 247 + mdelay(75); 248 248 } 249 249 250 250 static void __init tnetd7300_init_clocks(void) ··· 456 456 } 457 457 EXPORT_SYMBOL(clk_put); 458 458 459 - int __init ar7_init_clocks(void) 459 + void __init ar7_init_clocks(void) 460 460 { 461 461 switch (ar7_chip_id()) { 462 462 case AR7_CHIP_7100: ··· 472 472 } 473 473 /* adjust vbus clock rate */ 474 474 vbus_clk.rate = bus_clk.rate / 2; 475 - 476 - return 0; 477 475 } 478 - arch_initcall(ar7_init_clocks);
+3
arch/mips/ar7/time.c
··· 30 30 { 31 31 struct clk *cpu_clk; 32 32 33 + /* Initialize ar7 clocks so the CPU clock frequency is correct */ 34 + ar7_init_clocks(); 35 + 33 36 cpu_clk = clk_get(NULL, "cpu"); 34 37 if (IS_ERR(cpu_clk)) { 35 38 printk(KERN_ERR "unable to get cpu clock\n");
+105 -46
arch/mips/bcm47xx/setup.c
··· 32 32 #include <asm/reboot.h> 33 33 #include <asm/time.h> 34 34 #include <bcm47xx.h> 35 - #include <asm/fw/cfe/cfe_api.h> 36 35 #include <asm/mach-bcm47xx/nvram.h> 37 36 38 37 struct ssb_bus ssb_bcm47xx; ··· 56 57 cpu_relax(); 57 58 } 58 59 59 - static void str2eaddr(char *str, char *dest) 60 + #define READ_FROM_NVRAM(_outvar, name, buf) \ 61 + if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\ 62 + sprom->_outvar = simple_strtoul(buf, NULL, 0); 63 + 64 + static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) 60 65 { 61 - int i = 0; 66 + char buf[100]; 67 + u32 boardflags; 62 68 63 - if (str == NULL) { 64 - memset(dest, 0, 6); 65 - return; 69 + memset(sprom, 0, sizeof(struct ssb_sprom)); 70 + 71 + sprom->revision = 1; /* Fallback: Old hardware does not define this. */ 72 + READ_FROM_NVRAM(revision, "sromrev", buf); 73 + if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0) 74 + nvram_parse_macaddr(buf, sprom->il0mac); 75 + if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) 76 + nvram_parse_macaddr(buf, sprom->et0mac); 77 + if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) 78 + nvram_parse_macaddr(buf, sprom->et1mac); 79 + READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); 80 + READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); 81 + READ_FROM_NVRAM(et0mdcport, "et0mdcport", buf); 82 + READ_FROM_NVRAM(et1mdcport, "et1mdcport", buf); 83 + READ_FROM_NVRAM(board_rev, "boardrev", buf); 84 + READ_FROM_NVRAM(country_code, "ccode", buf); 85 + READ_FROM_NVRAM(ant_available_a, "aa5g", buf); 86 + READ_FROM_NVRAM(ant_available_bg, "aa2g", buf); 87 + READ_FROM_NVRAM(pa0b0, "pa0b0", buf); 88 + READ_FROM_NVRAM(pa0b1, "pa0b1", buf); 89 + READ_FROM_NVRAM(pa0b2, "pa0b2", buf); 90 + READ_FROM_NVRAM(pa1b0, "pa1b0", buf); 91 + READ_FROM_NVRAM(pa1b1, "pa1b1", buf); 92 + READ_FROM_NVRAM(pa1b2, "pa1b2", buf); 93 + READ_FROM_NVRAM(pa1lob0, "pa1lob0", buf); 94 + READ_FROM_NVRAM(pa1lob2, "pa1lob1", buf); 95 + READ_FROM_NVRAM(pa1lob1, "pa1lob2", buf); 96 + READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); 97 + READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); 98 + READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); 99 + READ_FROM_NVRAM(gpio0, "wl0gpio0", buf); 100 + READ_FROM_NVRAM(gpio1, "wl0gpio1", buf); 101 + READ_FROM_NVRAM(gpio2, "wl0gpio2", buf); 102 + READ_FROM_NVRAM(gpio3, "wl0gpio3", buf); 103 + READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf); 104 + READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf); 105 + READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf); 106 + READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf); 107 + READ_FROM_NVRAM(itssi_a, "pa1itssit", buf); 108 + READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf); 109 + READ_FROM_NVRAM(tri2g, "tri2g", buf); 110 + READ_FROM_NVRAM(tri5gl, "tri5gl", buf); 111 + READ_FROM_NVRAM(tri5g, "tri5g", buf); 112 + READ_FROM_NVRAM(tri5gh, "tri5gh", buf); 113 + READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); 114 + READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); 115 + READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); 116 + READ_FROM_NVRAM(rssismc2g, "rssismc2g", buf); 117 + READ_FROM_NVRAM(rssismf2g, "rssismf2g", buf); 118 + READ_FROM_NVRAM(bxa2g, "bxa2g", buf); 119 + READ_FROM_NVRAM(rssisav5g, "rssisav5g", buf); 120 + READ_FROM_NVRAM(rssismc5g, "rssismc5g", buf); 121 + READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); 122 + READ_FROM_NVRAM(bxa5g, "bxa5g", buf); 123 + READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); 124 + READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf); 125 + READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf); 126 + READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf); 127 + READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf); 128 + 129 + if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) { 130 + boardflags = simple_strtoul(buf, NULL, 0); 131 + if (boardflags) { 132 + sprom->boardflags_lo = (boardflags & 0x0000FFFFU); 133 + sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; 134 + } 66 135 } 67 - 68 - for (;;) { 69 - dest[i++] = (char) simple_strtoul(str, NULL, 16); 70 - str += 2; 71 - if (!*str++ || i == 6) 72 - break; 136 + if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) { 137 + boardflags = simple_strtoul(buf, NULL, 0); 138 + if (boardflags) { 139 + sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); 140 + sprom->boardflags2_hi = (boardflags & 0xFFFF0000U) >> 16; 141 + } 73 142 } 74 143 } 75 144 76 145 static int bcm47xx_get_invariants(struct ssb_bus *bus, 77 146 struct ssb_init_invariants *iv) 78 147 { 79 - char buf[100]; 148 + char buf[20]; 80 149 81 150 /* Fill boardinfo structure */ 82 151 memset(&(iv->boardinfo), 0 , sizeof(struct ssb_boardinfo)); 83 152 84 - if (cfe_getenv("boardvendor", buf, sizeof(buf)) >= 0 || 85 - nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0) 153 + if (nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0) 154 + iv->boardinfo.vendor = (u16)simple_strtoul(buf, NULL, 0); 155 + else 156 + iv->boardinfo.vendor = SSB_BOARDVENDOR_BCM; 157 + if (nvram_getenv("boardtype", buf, sizeof(buf)) >= 0) 86 158 iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0); 87 - if (cfe_getenv("boardtype", buf, sizeof(buf)) >= 0 || 88 - nvram_getenv("boardtype", buf, sizeof(buf)) >= 0) 89 - iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0); 90 - if (cfe_getenv("boardrev", buf, sizeof(buf)) >= 0 || 91 - nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) 159 + if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) 92 160 iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); 93 161 94 - /* Fill sprom structure */ 95 - memset(&(iv->sprom), 0, sizeof(struct ssb_sprom)); 96 - iv->sprom.revision = 3; 162 + bcm47xx_fill_sprom(&iv->sprom); 97 163 98 - if (cfe_getenv("et0macaddr", buf, sizeof(buf)) >= 0 || 99 - nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) 100 - str2eaddr(buf, iv->sprom.et0mac); 101 - 102 - if (cfe_getenv("et1macaddr", buf, sizeof(buf)) >= 0 || 103 - nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) 104 - str2eaddr(buf, iv->sprom.et1mac); 105 - 106 - if (cfe_getenv("et0phyaddr", buf, sizeof(buf)) >= 0 || 107 - nvram_getenv("et0phyaddr", buf, sizeof(buf)) >= 0) 108 - iv->sprom.et0phyaddr = simple_strtoul(buf, NULL, 0); 109 - 110 - if (cfe_getenv("et1phyaddr", buf, sizeof(buf)) >= 0 || 111 - nvram_getenv("et1phyaddr", buf, sizeof(buf)) >= 0) 112 - iv->sprom.et1phyaddr = simple_strtoul(buf, NULL, 0); 113 - 114 - if (cfe_getenv("et0mdcport", buf, sizeof(buf)) >= 0 || 115 - nvram_getenv("et0mdcport", buf, sizeof(buf)) >= 0) 116 - iv->sprom.et0mdcport = simple_strtoul(buf, NULL, 10); 117 - 118 - if (cfe_getenv("et1mdcport", buf, sizeof(buf)) >= 0 || 119 - nvram_getenv("et1mdcport", buf, sizeof(buf)) >= 0) 120 - iv->sprom.et1mdcport = simple_strtoul(buf, NULL, 10); 164 + if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) 165 + iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); 121 166 122 167 return 0; 123 168 } ··· 169 126 void __init plat_mem_setup(void) 170 127 { 171 128 int err; 129 + char buf[100]; 130 + struct ssb_mipscore *mcore; 172 131 173 132 err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, 174 133 bcm47xx_get_invariants); 175 134 if (err) 176 135 panic("Failed to initialize SSB bus (err %d)\n", err); 136 + 137 + mcore = &ssb_bcm47xx.mipscore; 138 + if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) { 139 + if (strstr(buf, "console=ttyS1")) { 140 + struct ssb_serial_port port; 141 + 142 + printk(KERN_DEBUG "Swapping serial ports!\n"); 143 + /* swap serial ports */ 144 + memcpy(&port, &mcore->serial_ports[0], sizeof(port)); 145 + memcpy(&mcore->serial_ports[0], &mcore->serial_ports[1], 146 + sizeof(port)); 147 + memcpy(&mcore->serial_ports[1], &port, sizeof(port)); 148 + } 149 + } 177 150 178 151 _machine_restart = bcm47xx_machine_restart; 179 152 _machine_halt = bcm47xx_machine_halt;
+2 -2
arch/mips/include/asm/cpu.h
··· 111 111 * These are the PRID's for when 23:16 == PRID_COMP_BROADCOM 112 112 */ 113 113 114 - #define PRID_IMP_BMIPS4KC 0x4000 115 - #define PRID_IMP_BMIPS32 0x8000 114 + #define PRID_IMP_BMIPS32_REV4 0x4000 115 + #define PRID_IMP_BMIPS32_REV8 0x8000 116 116 #define PRID_IMP_BMIPS3300 0x9000 117 117 #define PRID_IMP_BMIPS3300_ALT 0x9100 118 118 #define PRID_IMP_BMIPS3300_BUG 0x0000
+6 -2
arch/mips/include/asm/elf.h
··· 249 249 250 250 #define SET_PERSONALITY(ex) \ 251 251 do { \ 252 - set_personality(PER_LINUX); \ 252 + if (personality(current->personality) != PER_LINUX) \ 253 + set_personality(PER_LINUX); \ 253 254 \ 254 255 current->thread.abi = &mips_abi; \ 255 256 } while (0) ··· 297 296 298 297 #define SET_PERSONALITY(ex) \ 299 298 do { \ 299 + unsigned int p; \ 300 + \ 300 301 clear_thread_flag(TIF_32BIT_REGS); \ 301 302 clear_thread_flag(TIF_32BIT_ADDR); \ 302 303 \ ··· 307 304 else \ 308 305 current->thread.abi = &mips_abi; \ 309 306 \ 310 - if (current->personality != PER_LINUX32) \ 307 + p = personality(current->personality); \ 308 + if (p != PER_LINUX32 && p != PER_LINUX) \ 311 309 set_personality(PER_LINUX); \ 312 310 } while (0) 313 311
+10 -2
arch/mips/include/asm/io.h
··· 329 329 "dsrl32 %L0, %L0, 0" "\n\t" \ 330 330 "dsll32 %M0, %M0, 0" "\n\t" \ 331 331 "or %L0, %L0, %M0" "\n\t" \ 332 + ".set push" "\n\t" \ 333 + ".set noreorder" "\n\t" \ 334 + ".set nomacro" "\n\t" \ 332 335 "sd %L0, %2" "\n\t" \ 336 + ".set pop" "\n\t" \ 333 337 ".set mips0" "\n" \ 334 338 : "=r" (__tmp) \ 335 - : "0" (__val), "m" (*__mem)); \ 339 + : "0" (__val), "R" (*__mem)); \ 336 340 if (irq) \ 337 341 local_irq_restore(__flags); \ 338 342 } else \ ··· 359 355 local_irq_save(__flags); \ 360 356 __asm__ __volatile__( \ 361 357 ".set mips3" "\t\t# __readq" "\n\t" \ 358 + ".set push" "\n\t" \ 359 + ".set noreorder" "\n\t" \ 360 + ".set nomacro" "\n\t" \ 362 361 "ld %L0, %1" "\n\t" \ 362 + ".set pop" "\n\t" \ 363 363 "dsra32 %M0, %L0, 0" "\n\t" \ 364 364 "sll %L0, %L0, 0" "\n\t" \ 365 365 ".set mips0" "\n" \ 366 366 : "=r" (__val) \ 367 - : "m" (*__mem)); \ 367 + : "R" (*__mem)); \ 368 368 if (irq) \ 369 369 local_irq_restore(__flags); \ 370 370 } else { \
+1 -2
arch/mips/include/asm/mach-ar7/ar7.h
··· 201 201 } 202 202 203 203 int __init ar7_gpio_init(void); 204 - 205 - int __init ar7_gpio_init(void); 204 + void __init ar7_init_clocks(void); 206 205 207 206 #endif /* __AR7_H__ */
+7
arch/mips/include/asm/mach-bcm47xx/nvram.h
··· 12 12 #define __NVRAM_H 13 13 14 14 #include <linux/types.h> 15 + #include <linux/kernel.h> 15 16 16 17 struct nvram_header { 17 18 u32 magic; ··· 36 35 #define NVRAM_ERR_ENVNOTFOUND -9 37 36 38 37 extern int nvram_getenv(char *name, char *val, size_t val_len); 38 + 39 + static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) 40 + { 41 + sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1], 42 + &macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]); 43 + } 39 44 40 45 #endif
+2 -2
arch/mips/jz4740/board-qi_lb60.c
··· 5 5 * 6 6 * Copyright (c) 2009 Qi Hardware inc., 7 7 * Author: Xiangfu Liu <xiangfu@qi-hardware.com> 8 - * Copyright 2010, Lars-Petrer Clausen <lars@metafoo.de> 8 + * Copyright 2010, Lars-Peter Clausen <lars@metafoo.de> 9 9 * 10 10 * This program is free software; you can redistribute it and/or modify 11 11 * it under the terms of the GNU General Public License version 2 or later ··· 235 235 QI_LB60_GPIO_KEYIN(3), 236 236 QI_LB60_GPIO_KEYIN(4), 237 237 QI_LB60_GPIO_KEYIN(5), 238 - QI_LB60_GPIO_KEYIN(7), 238 + QI_LB60_GPIO_KEYIN(6), 239 239 QI_LB60_GPIO_KEYIN8, 240 240 }; 241 241
+1 -1
arch/mips/jz4740/platform.c
··· 208 208 209 209 /* PCM */ 210 210 struct platform_device jz4740_pcm_device = { 211 - .name = "jz4740-pcm", 211 + .name = "jz4740-pcm-audio", 212 212 .id = -1, 213 213 }; 214 214
+1 -1
arch/mips/jz4740/prom.c
··· 23 23 #include <asm/bootinfo.h> 24 24 #include <asm/mach-jz4740/base.h> 25 25 26 - void jz4740_init_cmdline(int argc, char *argv[]) 26 + static __init void jz4740_init_cmdline(int argc, char *argv[]) 27 27 { 28 28 unsigned int count = COMMAND_LINE_SIZE - 1; 29 29 int i;
+1 -1
arch/mips/kernel/cevt-r4k.c
··· 32 32 cnt = read_c0_count(); 33 33 cnt += delta; 34 34 write_c0_compare(cnt); 35 - res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; 35 + res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; 36 36 return res; 37 37 } 38 38
+2 -5
arch/mips/kernel/cpu-probe.c
··· 905 905 { 906 906 decode_configs(c); 907 907 switch (c->processor_id & 0xff00) { 908 - case PRID_IMP_BMIPS32: 908 + case PRID_IMP_BMIPS32_REV4: 909 + case PRID_IMP_BMIPS32_REV8: 909 910 c->cputype = CPU_BMIPS32; 910 911 __cpu_name[cpu] = "Broadcom BMIPS32"; 911 912 break; ··· 933 932 c->cputype = CPU_BMIPS5000; 934 933 __cpu_name[cpu] = "Broadcom BMIPS5000"; 935 934 c->options |= MIPS_CPU_ULRI; 936 - break; 937 - case PRID_IMP_BMIPS4KC: 938 - c->cputype = CPU_4KC; 939 - __cpu_name[cpu] = "MIPS 4Kc"; 940 935 break; 941 936 } 942 937 }
+7 -6
arch/mips/kernel/linux32.c
··· 251 251 252 252 SYSCALL_DEFINE1(32_personality, unsigned long, personality) 253 253 { 254 + unsigned int p = personality & 0xffffffff; 254 255 int ret; 255 - personality &= 0xffffffff; 256 + 256 257 if (personality(current->personality) == PER_LINUX32 && 257 - personality == PER_LINUX) 258 - personality = PER_LINUX32; 259 - ret = sys_personality(personality); 260 - if (ret == PER_LINUX32) 261 - ret = PER_LINUX; 258 + personality(p) == PER_LINUX) 259 + p = (p & ~PER_MASK) | PER_LINUX32; 260 + ret = sys_personality(p); 261 + if (ret != -1 && personality(ret) == PER_LINUX32) 262 + ret = (ret & ~PER_MASK) | PER_LINUX; 262 263 return ret; 263 264 } 264 265
-1
arch/mips/kernel/process.c
··· 142 142 childregs->regs[7] = 0; /* Clear error flag */ 143 143 144 144 childregs->regs[2] = 0; /* Child gets zero as return value */ 145 - regs->regs[2] = p->pid; 146 145 147 146 if (childregs->cp0_status & ST0_CU0) { 148 147 childregs->regs[28] = (unsigned long) ti;
+1 -1
arch/mips/kernel/prom.c
··· 100 100 return; 101 101 102 102 base = virt_to_phys((void *)initial_boot_params); 103 - size = initial_boot_params->totalsize; 103 + size = be32_to_cpu(initial_boot_params->totalsize); 104 104 105 105 /* Before we do anything, lets reserve the dt blob */ 106 106 reserve_mem_mach(base, size);
+1 -1
arch/mips/kernel/smp-mt.c
··· 153 153 { 154 154 extern int gic_present; 155 155 156 - /* This is Malta specific: IPI,performance and timer inetrrupts */ 156 + /* This is Malta specific: IPI,performance and timer interrupts */ 157 157 if (gic_present) 158 158 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 159 159 STATUSF_IP6 | STATUSF_IP7);
+35 -9
arch/mips/kernel/traps.c
··· 83 83 extern asmlinkage void handle_reserved(void); 84 84 85 85 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 86 - struct mips_fpu_struct *ctx, int has_fpu); 86 + struct mips_fpu_struct *ctx, int has_fpu, 87 + void *__user *fault_addr); 87 88 88 89 void (*board_be_init)(void); 89 90 int (*board_be_handler)(struct pt_regs *regs, int is_fixup); ··· 662 661 force_sig_info(SIGFPE, &info, current); 663 662 } 664 663 664 + static int process_fpemu_return(int sig, void __user *fault_addr) 665 + { 666 + if (sig == SIGSEGV || sig == SIGBUS) { 667 + struct siginfo si = {0}; 668 + si.si_addr = fault_addr; 669 + si.si_signo = sig; 670 + if (sig == SIGSEGV) { 671 + if (find_vma(current->mm, (unsigned long)fault_addr)) 672 + si.si_code = SEGV_ACCERR; 673 + else 674 + si.si_code = SEGV_MAPERR; 675 + } else { 676 + si.si_code = BUS_ADRERR; 677 + } 678 + force_sig_info(sig, &si, current); 679 + return 1; 680 + } else if (sig) { 681 + force_sig(sig, current); 682 + return 1; 683 + } else { 684 + return 0; 685 + } 686 + } 687 + 665 688 /* 666 689 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 667 690 */ 668 691 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 669 692 { 670 - siginfo_t info; 693 + siginfo_t info = {0}; 671 694 672 695 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) 673 696 == NOTIFY_STOP) ··· 700 675 701 676 if (fcr31 & FPU_CSR_UNI_X) { 702 677 int sig; 678 + void __user *fault_addr = NULL; 703 679 704 680 /* 705 681 * Unimplemented operation exception. If we've got the full ··· 716 690 lose_fpu(1); 717 691 718 692 /* Run the emulator */ 719 - sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1); 693 + sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 694 + &fault_addr); 720 695 721 696 /* 722 697 * We can't allow the emulated instruction to leave any of ··· 729 702 own_fpu(1); /* Using the FPU again. */ 730 703 731 704 /* If something went wrong, signal */ 732 - if (sig) 733 - force_sig(sig, current); 705 + process_fpemu_return(sig, fault_addr); 734 706 735 707 return; 736 708 } else if (fcr31 & FPU_CSR_INV_X) ··· 1022 996 1023 997 if (!raw_cpu_has_fpu) { 1024 998 int sig; 999 + void __user *fault_addr = NULL; 1025 1000 sig = fpu_emulator_cop1Handler(regs, 1026 - &current->thread.fpu, 0); 1027 - if (sig) 1028 - force_sig(sig, current); 1029 - else 1001 + &current->thread.fpu, 1002 + 0, &fault_addr); 1003 + if (!process_fpemu_return(sig, fault_addr)) 1030 1004 mt_ase_fp_affinity(); 1031 1005 } 1032 1006
+6 -8
arch/mips/kernel/vpe.c
··· 1092 1092 1093 1093 /* this of-course trashes what was there before... */ 1094 1094 v->pbuffer = vmalloc(P_SIZE); 1095 + if (!v->pbuffer) { 1096 + pr_warning("VPE loader: unable to allocate memory\n"); 1097 + return -ENOMEM; 1098 + } 1095 1099 v->plen = P_SIZE; 1096 1100 v->load_addr = NULL; 1097 1101 v->len = 0; ··· 1153 1149 if (ret < 0) 1154 1150 v->shared_ptr = NULL; 1155 1151 1156 - // cleanup any temp buffers 1157 - if (v->pbuffer) 1158 - vfree(v->pbuffer); 1152 + vfree(v->pbuffer); 1159 1153 v->plen = 0; 1154 + 1160 1155 return ret; 1161 1156 } 1162 1157 ··· 1171 1168 v = get_vpe(tclimit); 1172 1169 if (v == NULL) 1173 1170 return -ENODEV; 1174 - 1175 - if (v->pbuffer == NULL) { 1176 - printk(KERN_ERR "VPE loader: no buffer for program\n"); 1177 - return -ENOMEM; 1178 - } 1179 1171 1180 1172 if ((count + v->len) > v->plen) { 1181 1173 printk(KERN_WARNING
+2 -2
arch/mips/lib/memset.S
··· 161 161 162 162 .Lfwd_fixup: 163 163 PTR_L t0, TI_TASK($28) 164 - LONG_L t0, THREAD_BUADDR(t0) 165 164 andi a2, 0x3f 165 + LONG_L t0, THREAD_BUADDR(t0) 166 166 LONG_ADDU a2, t1 167 167 jr ra 168 168 LONG_SUBU a2, t0 169 169 170 170 .Lpartial_fixup: 171 171 PTR_L t0, TI_TASK($28) 172 - LONG_L t0, THREAD_BUADDR(t0) 173 172 andi a2, LONGMASK 173 + LONG_L t0, THREAD_BUADDR(t0) 174 174 LONG_ADDU a2, t1 175 175 jr ra 176 176 LONG_SUBU a2, t0
+2 -2
arch/mips/loongson/common/env.c
··· 29 29 30 30 #define parse_even_earlier(res, option, p) \ 31 31 do { \ 32 + int ret; \ 32 33 if (strncmp(option, (char *)p, strlen(option)) == 0) \ 33 - strict_strtol((char *)p + strlen(option"="), \ 34 - 10, &res); \ 34 + ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ 35 35 } while (0) 36 36 37 37 void __init prom_init_env(void)
+95 -21
arch/mips/math-emu/cp1emu.c
··· 64 64 65 65 #if __mips >= 4 && __mips != 32 66 66 static int fpux_emu(struct pt_regs *, 67 - struct mips_fpu_struct *, mips_instruction); 67 + struct mips_fpu_struct *, mips_instruction, void *__user *); 68 68 #endif 69 69 70 70 /* Further private data for which no space exists in mips_fpu_struct */ ··· 208 208 * Two instructions if the instruction is in a branch delay slot. 209 209 */ 210 210 211 - static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx) 211 + static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 212 + void *__user *fault_addr) 212 213 { 213 214 mips_instruction ir; 214 215 unsigned long emulpc, contpc; 215 216 unsigned int cond; 216 217 217 - if (get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) { 218 + if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { 218 219 MIPS_FPU_EMU_INC_STATS(errors); 220 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 219 221 return SIGBUS; 222 + } 223 + if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) { 224 + MIPS_FPU_EMU_INC_STATS(errors); 225 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 226 + return SIGSEGV; 220 227 } 221 228 222 229 /* XXX NEC Vr54xx bug workaround */ ··· 252 245 #endif 253 246 return SIGILL; 254 247 } 255 - if (get_user(ir, (mips_instruction __user *) emulpc)) { 248 + if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) { 256 249 MIPS_FPU_EMU_INC_STATS(errors); 250 + *fault_addr = (mips_instruction __user *)emulpc; 257 251 return SIGBUS; 252 + } 253 + if (__get_user(ir, (mips_instruction __user *) emulpc)) { 254 + MIPS_FPU_EMU_INC_STATS(errors); 255 + *fault_addr = (mips_instruction __user *)emulpc; 256 + return SIGSEGV; 258 257 } 259 258 /* __compute_return_epc() will have updated cp0_epc */ 260 259 contpc = xcp->cp0_epc; ··· 282 269 u64 val; 283 270 284 271 MIPS_FPU_EMU_INC_STATS(loads); 285 - if (get_user(val, va)) { 272 + 273 + if (!access_ok(VERIFY_READ, va, sizeof(u64))) { 286 274 MIPS_FPU_EMU_INC_STATS(errors); 275 + *fault_addr = va; 287 276 return SIGBUS; 277 + } 278 + if (__get_user(val, va)) { 279 + MIPS_FPU_EMU_INC_STATS(errors); 280 + *fault_addr = va; 281 + return SIGSEGV; 288 282 } 289 283 DITOREG(val, MIPSInst_RT(ir)); 290 284 break; ··· 304 284 305 285 MIPS_FPU_EMU_INC_STATS(stores); 306 286 DIFROMREG(val, MIPSInst_RT(ir)); 307 - if (put_user(val, va)) { 287 + if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) { 308 288 MIPS_FPU_EMU_INC_STATS(errors); 289 + *fault_addr = va; 309 290 return SIGBUS; 291 + } 292 + if (__put_user(val, va)) { 293 + MIPS_FPU_EMU_INC_STATS(errors); 294 + *fault_addr = va; 295 + return SIGSEGV; 310 296 } 311 297 break; 312 298 } ··· 323 297 u32 val; 324 298 325 299 MIPS_FPU_EMU_INC_STATS(loads); 326 - if (get_user(val, va)) { 300 + if (!access_ok(VERIFY_READ, va, sizeof(u32))) { 327 301 MIPS_FPU_EMU_INC_STATS(errors); 302 + *fault_addr = va; 328 303 return SIGBUS; 304 + } 305 + if (__get_user(val, va)) { 306 + MIPS_FPU_EMU_INC_STATS(errors); 307 + *fault_addr = va; 308 + return SIGSEGV; 329 309 } 330 310 SITOREG(val, MIPSInst_RT(ir)); 331 311 break; ··· 344 312 345 313 MIPS_FPU_EMU_INC_STATS(stores); 346 314 SIFROMREG(val, MIPSInst_RT(ir)); 347 - if (put_user(val, va)) { 315 + if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) { 348 316 MIPS_FPU_EMU_INC_STATS(errors); 317 + *fault_addr = va; 349 318 return SIGBUS; 319 + } 320 + if (__put_user(val, va)) { 321 + MIPS_FPU_EMU_INC_STATS(errors); 322 + *fault_addr = va; 323 + return SIGSEGV; 350 324 } 351 325 break; 352 326 } ··· 478 440 contpc = (xcp->cp0_epc + 479 441 (MIPSInst_SIMM(ir) << 2)); 480 442 481 - if (get_user(ir, 443 + if (!access_ok(VERIFY_READ, xcp->cp0_epc, 444 + sizeof(mips_instruction))) { 445 + MIPS_FPU_EMU_INC_STATS(errors); 446 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 447 + return SIGBUS; 448 + } 449 + if (__get_user(ir, 482 450 (mips_instruction __user *) xcp->cp0_epc)) { 483 451 MIPS_FPU_EMU_INC_STATS(errors); 484 - return SIGBUS; 452 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 453 + return SIGSEGV; 485 454 } 486 455 487 456 switch (MIPSInst_OPCODE(ir)) { ··· 551 506 552 507 #if __mips >= 4 && __mips != 32 553 508 case cop1x_op:{ 554 - int sig; 555 - 556 - if ((sig = fpux_emu(xcp, ctx, ir))) 509 + int sig = fpux_emu(xcp, ctx, ir, fault_addr); 510 + if (sig) 557 511 return sig; 558 512 break; 559 513 } ··· 648 604 DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg); 649 605 650 606 static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 651 - mips_instruction ir) 607 + mips_instruction ir, void *__user *fault_addr) 652 608 { 653 609 unsigned rcsr = 0; /* resulting csr */ 654 610 ··· 668 624 xcp->regs[MIPSInst_FT(ir)]); 669 625 670 626 MIPS_FPU_EMU_INC_STATS(loads); 671 - if (get_user(val, va)) { 627 + if (!access_ok(VERIFY_READ, va, sizeof(u32))) { 672 628 MIPS_FPU_EMU_INC_STATS(errors); 629 + *fault_addr = va; 673 630 return SIGBUS; 631 + } 632 + if (__get_user(val, va)) { 633 + MIPS_FPU_EMU_INC_STATS(errors); 634 + *fault_addr = va; 635 + return SIGSEGV; 674 636 } 675 637 SITOREG(val, MIPSInst_FD(ir)); 676 638 break; ··· 688 638 MIPS_FPU_EMU_INC_STATS(stores); 689 639 690 640 SIFROMREG(val, MIPSInst_FS(ir)); 641 + if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) { 642 + MIPS_FPU_EMU_INC_STATS(errors); 643 + *fault_addr = va; 644 + return SIGBUS; 645 + } 691 646 if (put_user(val, va)) { 692 647 MIPS_FPU_EMU_INC_STATS(errors); 693 - return SIGBUS; 648 + *fault_addr = va; 649 + return SIGSEGV; 694 650 } 695 651 break; 696 652 ··· 757 701 xcp->regs[MIPSInst_FT(ir)]); 758 702 759 703 MIPS_FPU_EMU_INC_STATS(loads); 760 - if (get_user(val, va)) { 704 + if (!access_ok(VERIFY_READ, va, sizeof(u64))) { 761 705 MIPS_FPU_EMU_INC_STATS(errors); 706 + *fault_addr = va; 762 707 return SIGBUS; 708 + } 709 + if (__get_user(val, va)) { 710 + MIPS_FPU_EMU_INC_STATS(errors); 711 + *fault_addr = va; 712 + return SIGSEGV; 763 713 } 764 714 DITOREG(val, MIPSInst_FD(ir)); 765 715 break; ··· 776 714 777 715 MIPS_FPU_EMU_INC_STATS(stores); 778 716 DIFROMREG(val, MIPSInst_FS(ir)); 779 - if (put_user(val, va)) { 717 + if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) { 780 718 MIPS_FPU_EMU_INC_STATS(errors); 719 + *fault_addr = va; 781 720 return SIGBUS; 721 + } 722 + if (__put_user(val, va)) { 723 + MIPS_FPU_EMU_INC_STATS(errors); 724 + *fault_addr = va; 725 + return SIGSEGV; 782 726 } 783 727 break; 784 728 ··· 1310 1242 } 1311 1243 1312 1244 int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 1313 - int has_fpu) 1245 + int has_fpu, void *__user *fault_addr) 1314 1246 { 1315 1247 unsigned long oldepc, prevepc; 1316 1248 mips_instruction insn; ··· 1320 1252 do { 1321 1253 prevepc = xcp->cp0_epc; 1322 1254 1323 - if (get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) { 1255 + if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { 1324 1256 MIPS_FPU_EMU_INC_STATS(errors); 1257 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 1325 1258 return SIGBUS; 1259 + } 1260 + if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) { 1261 + MIPS_FPU_EMU_INC_STATS(errors); 1262 + *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 1263 + return SIGSEGV; 1326 1264 } 1327 1265 if (insn == 0) 1328 1266 xcp->cp0_epc += 4; /* skip nops */ ··· 1341 1267 */ 1342 1268 /* convert to ieee library modes */ 1343 1269 ieee754_csr.rm = ieee_rm[ieee754_csr.rm]; 1344 - sig = cop1Emulate(xcp, ctx); 1270 + sig = cop1Emulate(xcp, ctx, fault_addr); 1345 1271 /* revert to mips rounding mode */ 1346 1272 ieee754_csr.rm = mips_rm[ieee754_csr.rm]; 1347 1273 }
+3 -1
arch/mips/mm/dma-default.c
··· 288 288 return plat_dma_supported(dev, mask); 289 289 } 290 290 291 - void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 291 + void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 292 292 enum dma_data_direction direction) 293 293 { 294 294 BUG_ON(direction == DMA_NONE); ··· 297 297 if (!plat_device_is_coherent(dev)) 298 298 __dma_sync((unsigned long)vaddr, size, direction); 299 299 } 300 + 301 + EXPORT_SYMBOL(dma_cache_sync); 300 302 301 303 static struct dma_map_ops mips_default_dma_map_ops = { 302 304 .alloc_coherent = mips_dma_alloc_coherent,
+4
arch/mips/mm/sc-mips.c
··· 68 68 */ 69 69 static inline int mips_sc_is_activated(struct cpuinfo_mips *c) 70 70 { 71 + unsigned int config2 = read_c0_config2(); 72 + unsigned int tmp; 73 + 71 74 /* Check the bypass bit (L2B) */ 72 75 switch (c->cputype) { 73 76 case CPU_34K: ··· 86 83 c->scache.linesz = 2 << tmp; 87 84 else 88 85 return 0; 86 + return 1; 89 87 } 90 88 91 89 static inline int __init mips_sc_probe(void)
+10 -2
arch/mips/pmc-sierra/yosemite/py-console.c
··· 65 65 66 66 __asm__ __volatile__ ( 67 67 " .set mips3 \n" 68 + " .set push \n" 69 + " .set noreorder \n" 70 + " .set nomacro \n" 68 71 " ld %0, %1 \n" 72 + " .set pop \n" 69 73 " lbu %0, (%0) \n" 70 74 " .set mips0 \n" 71 75 : "=r" (res) 72 - : "m" (vaddr)); 76 + : "R" (vaddr)); 73 77 74 78 write_c0_status(sr); 75 79 ssnop_4(); ··· 93 89 94 90 __asm__ __volatile__ ( 95 91 " .set mips3 \n" 92 + " .set push \n" 93 + " .set noreorder \n" 94 + " .set nomacro \n" 96 95 " ld %0, %1 \n" 96 + " .set pop \n" 97 97 " sb %2, (%0) \n" 98 98 " .set mips0 \n" 99 99 : "=&r" (tmp) 100 - : "m" (vaddr), "r" (c)); 100 + : "R" (vaddr), "r" (c)); 101 101 102 102 write_c0_status(sr); 103 103 ssnop_4();
+4 -4
arch/mips/sibyte/swarm/setup.c
··· 82 82 enum swarm_rtc_type { 83 83 RTC_NONE, 84 84 RTC_XICOR, 85 - RTC_M4LT81 85 + RTC_M41T81, 86 86 }; 87 87 88 88 enum swarm_rtc_type swarm_rtc_type; ··· 96 96 sec = xicor_get_time(); 97 97 break; 98 98 99 - case RTC_M4LT81: 99 + case RTC_M41T81: 100 100 sec = m41t81_get_time(); 101 101 break; 102 102 ··· 115 115 case RTC_XICOR: 116 116 return xicor_set_time(sec); 117 117 118 - case RTC_M4LT81: 118 + case RTC_M41T81: 119 119 return m41t81_set_time(sec); 120 120 121 121 case RTC_NONE: ··· 141 141 if (xicor_probe()) 142 142 swarm_rtc_type = RTC_XICOR; 143 143 if (m41t81_probe()) 144 - swarm_rtc_type = RTC_M4LT81; 144 + swarm_rtc_type = RTC_M41T81; 145 145 146 146 #ifdef CONFIG_VT 147 147 screen_info = (struct screen_info) {
+3 -7
arch/mn10300/kernel/time.c
··· 40 40 unsigned long long ll; 41 41 unsigned l[2]; 42 42 } tsc64, result; 43 - unsigned long tsc, tmp; 43 + unsigned long tmp; 44 44 unsigned product[3]; /* 96-bit intermediate value */ 45 45 46 46 /* cnt32_to_63() is not safe with preemption */ 47 47 preempt_disable(); 48 48 49 - /* read the TSC value 50 - */ 51 - tsc = get_cycles(); 52 - 53 - /* expand to 64-bits. 49 + /* expand the tsc to 64-bits. 54 50 * - sched_clock() must be called once a minute or better or the 55 51 * following will go horribly wrong - see cnt32_to_63() 56 52 */ 57 - tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL; 53 + tsc64.ll = cnt32_to_63(get_cycles()) & 0x7fffffffffffffffULL; 58 54 59 55 preempt_enable(); 60 56
+2 -1
arch/sh/Kconfig
··· 22 22 select HAVE_SPARSE_IRQ 23 23 select RTC_LIB 24 24 select GENERIC_ATOMIC64 25 - select GENERIC_HARDIRQS_NO_DEPRECATED 25 + # Support the deprecated APIs until MFD and GPIOLIB catch up. 26 + select GENERIC_HARDIRQS_NO_DEPRECATED if !MFD_SUPPORT && !GPIOLIB 26 27 help 27 28 The SuperH is a RISC processor targeted for use in embedded systems 28 29 and consumer electronics; it was also used in the Sega Dreamcast
+2 -1
arch/sh/include/asm/unistd_32.h
··· 368 368 #define __NR_sendmsg 355 369 369 #define __NR_recvmsg 356 370 370 #define __NR_recvmmsg 357 371 + #define __NR_accept4 358 371 372 372 - #define NR_syscalls 358 373 + #define NR_syscalls 359 373 374 374 375 #ifdef __KERNEL__ 375 376
+1
arch/sh/kernel/syscalls_32.S
··· 375 375 .long sys_sendmsg /* 355 */ 376 376 .long sys_recvmsg 377 377 .long sys_recvmmsg 378 + .long sys_accept4
+1 -1
arch/sparc/include/asm/openprom.h
··· 39 39 int (*v2_dev_open)(char *devpath); 40 40 void (*v2_dev_close)(int d); 41 41 int (*v2_dev_read)(int d, char *buf, int nbytes); 42 - int (*v2_dev_write)(int d, char *buf, int nbytes); 42 + int (*v2_dev_write)(int d, const char *buf, int nbytes); 43 43 int (*v2_dev_seek)(int d, int hi, int lo); 44 44 45 45 /* Never issued (multistage load support) */
+2 -33
arch/sparc/include/asm/oplib_32.h
··· 60 60 extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes); 61 61 extern void prom_unmapio(char *virt_addr, unsigned int num_bytes); 62 62 63 - /* Device operations. */ 64 - 65 - /* Open the device described by the passed string. Note, that the format 66 - * of the string is different on V0 vs. V2->higher proms. The caller must 67 - * know what he/she is doing! Returns the device descriptor, an int. 68 - */ 69 - extern int prom_devopen(char *device_string); 70 - 71 - /* Close a previously opened device described by the passed integer 72 - * descriptor. 73 - */ 74 - extern int prom_devclose(int device_handle); 75 - 76 - /* Do a seek operation on the device described by the passed integer 77 - * descriptor. 78 - */ 79 - extern void prom_seek(int device_handle, unsigned int seek_hival, 80 - unsigned int seek_lowval); 81 - 82 63 /* Miscellaneous routines, don't really fit in any category per se. */ 83 64 84 65 /* Reboot the machine with the command line passed. */ ··· 102 121 /* Get the prom firmware revision. */ 103 122 extern int prom_getprev(void); 104 123 105 - /* Character operations to/from the console.... */ 106 - 107 - /* Non-blocking get character from console. */ 108 - extern int prom_nbgetchar(void); 109 - 110 - /* Non-blocking put character to console. */ 111 - extern int prom_nbputchar(char character); 112 - 113 - /* Blocking get character from console. */ 114 - extern char prom_getchar(void); 115 - 116 - /* Blocking put character to console. */ 117 - extern void prom_putchar(char character); 124 + /* Write a buffer of characters to the console. */ 125 + extern void prom_console_write_buf(const char *buf, int len); 118 126 119 127 /* Prom's internal routines, don't use in kernel/boot code. */ 120 128 extern void prom_printf(const char *fmt, ...); ··· 208 238 extern int prom_setprop(phandle node, const char *prop_name, char *prop_value, 209 239 int value_size); 210 240 211 - extern phandle prom_pathtoinode(char *path); 212 241 extern phandle prom_inst2pkg(int); 213 242 214 243 /* Dorking with Bus ranges... */
+2 -44
arch/sparc/include/asm/oplib_64.h
··· 67 67 /* Boot argument acquisition, returns the boot command line string. */ 68 68 extern char *prom_getbootargs(void); 69 69 70 - /* Device utilities. */ 71 - 72 - /* Device operations. */ 73 - 74 - /* Open the device described by the passed string. Note, that the format 75 - * of the string is different on V0 vs. V2->higher proms. The caller must 76 - * know what he/she is doing! Returns the device descriptor, an int. 77 - */ 78 - extern int prom_devopen(const char *device_string); 79 - 80 - /* Close a previously opened device described by the passed integer 81 - * descriptor. 82 - */ 83 - extern int prom_devclose(int device_handle); 84 - 85 - /* Do a seek operation on the device described by the passed integer 86 - * descriptor. 87 - */ 88 - extern void prom_seek(int device_handle, unsigned int seek_hival, 89 - unsigned int seek_lowval); 90 - 91 70 /* Miscellaneous routines, don't really fit in any category per se. */ 92 71 93 72 /* Reboot the machine with the command line passed. */ ··· 88 109 /* Halt and power-off the machine. */ 89 110 extern void prom_halt_power_off(void) __attribute__ ((noreturn)); 90 111 91 - /* Set the PROM 'sync' callback function to the passed function pointer. 92 - * When the user gives the 'sync' command at the prom prompt while the 93 - * kernel is still active, the prom will call this routine. 94 - * 95 - */ 96 - typedef int (*callback_func_t)(long *cmd); 97 - extern void prom_setcallback(callback_func_t func_ptr); 98 - 99 112 /* Acquire the IDPROM of the root node in the prom device tree. This 100 113 * gets passed a buffer where you would like it stuffed. The return value 101 114 * is the format type of this idprom or 0xff on error. 102 115 */ 103 116 extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); 104 117 105 - /* Character operations to/from the console.... */ 106 - 107 - /* Non-blocking get character from console. */ 108 - extern int prom_nbgetchar(void); 109 - 110 - /* Non-blocking put character to console. */ 111 - extern int prom_nbputchar(char character); 112 - 113 - /* Blocking get character from console. */ 114 - extern char prom_getchar(void); 115 - 116 - /* Blocking put character to console. */ 117 - extern void prom_putchar(char character); 118 + /* Write a buffer of characters to the console. */ 119 + extern void prom_console_write_buf(const char *buf, int len); 118 120 119 121 /* Prom's internal routines, don't use in kernel/boot code. */ 120 122 extern void prom_printf(const char *fmt, ...); ··· 239 279 extern int prom_setprop(phandle node, const char *prop_name, char *prop_value, 240 280 int value_size); 241 281 242 - extern phandle prom_pathtoinode(const char *path); 243 282 extern phandle prom_inst2pkg(int); 244 - extern int prom_service_exists(const char *service_name); 245 283 extern void prom_sun4v_guest_soft_state(void); 246 284 247 285 extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
+2 -2
arch/sparc/kernel/leon_kernel.c
··· 114 114 if (leon3_gptimer_regs && leon3_irqctrl_regs) { 115 115 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); 116 116 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, 117 - (((1000000 / 100) - 1))); 117 + (((1000000 / HZ) - 1))); 118 118 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); 119 119 120 120 #ifdef CONFIG_SMP ··· 128 128 } 129 129 130 130 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); 131 - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1))); 131 + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/HZ) - 1))); 132 132 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0); 133 133 # endif 134 134
-1
arch/sparc/prom/Makefile
··· 6 6 7 7 lib-y := bootstr_$(BITS).o 8 8 lib-$(CONFIG_SPARC32) += devmap.o 9 - lib-y += devops_$(BITS).o 10 9 lib-y += init_$(BITS).o 11 10 lib-$(CONFIG_SPARC32) += memory.o 12 11 lib-y += misc_$(BITS).o
+12 -53
arch/sparc/prom/console_32.c
··· 16 16 17 17 extern void restore_current(void); 18 18 19 - /* Non blocking get character from console input device, returns -1 20 - * if no input was taken. This can be used for polling. 21 - */ 22 - int 23 - prom_nbgetchar(void) 24 - { 25 - static char inc; 26 - int i = -1; 27 - unsigned long flags; 28 - 29 - spin_lock_irqsave(&prom_lock, flags); 30 - switch(prom_vers) { 31 - case PROM_V0: 32 - i = (*(romvec->pv_nbgetchar))(); 33 - break; 34 - case PROM_V2: 35 - case PROM_V3: 36 - if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) { 37 - i = inc; 38 - } else { 39 - i = -1; 40 - } 41 - break; 42 - default: 43 - i = -1; 44 - break; 45 - }; 46 - restore_current(); 47 - spin_unlock_irqrestore(&prom_lock, flags); 48 - return i; /* Ugh, we could spin forever on unsupported proms ;( */ 49 - } 50 - 51 19 /* Non blocking put character to console device, returns -1 if 52 20 * unsuccessful. 53 21 */ 54 - int 55 - prom_nbputchar(char c) 22 + static int prom_nbputchar(const char *buf) 56 23 { 57 - static char outc; 58 24 unsigned long flags; 59 25 int i = -1; 60 26 61 27 spin_lock_irqsave(&prom_lock, flags); 62 28 switch(prom_vers) { 63 29 case PROM_V0: 64 - i = (*(romvec->pv_nbputchar))(c); 30 + i = (*(romvec->pv_nbputchar))(*buf); 65 31 break; 66 32 case PROM_V2: 67 33 case PROM_V3: 68 - outc = c; 69 - if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1) 34 + if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, 35 + buf, 0x1) == 1) 70 36 i = 0; 71 - else 72 - i = -1; 73 37 break; 74 38 default: 75 - i = -1; 76 39 break; 77 40 }; 78 41 restore_current(); ··· 43 80 return i; /* Ugh, we could spin forever on unsupported proms ;( */ 44 81 } 45 82 46 - /* Blocking version of get character routine above. */ 47 - char 48 - prom_getchar(void) 83 + void prom_console_write_buf(const char *buf, int len) 49 84 { 50 - int character; 51 - while((character = prom_nbgetchar()) == -1) ; 52 - return (char) character; 85 + while (len) { 86 + int n = prom_nbputchar(buf); 87 + if (n) 88 + continue; 89 + len--; 90 + buf++; 91 + } 53 92 } 54 93 55 - /* Blocking version of put character routine above. */ 56 - void 57 - prom_putchar(char c) 58 - { 59 - while(prom_nbputchar(c) == -1) ; 60 - }
+15 -66
arch/sparc/prom/console_64.c
··· 15 15 16 16 extern int prom_stdin, prom_stdout; 17 17 18 - /* Non blocking get character from console input device, returns -1 19 - * if no input was taken. This can be used for polling. 20 - */ 21 - inline int 22 - prom_nbgetchar(void) 18 + static int __prom_console_write_buf(const char *buf, int len) 23 19 { 24 20 unsigned long args[7]; 25 - char inc; 26 - 27 - args[0] = (unsigned long) "read"; 28 - args[1] = 3; 29 - args[2] = 1; 30 - args[3] = (unsigned int) prom_stdin; 31 - args[4] = (unsigned long) &inc; 32 - args[5] = 1; 33 - args[6] = (unsigned long) -1; 34 - 35 - p1275_cmd_direct(args); 36 - 37 - if (args[6] == 1) 38 - return inc; 39 - return -1; 40 - } 41 - 42 - /* Non blocking put character to console device, returns -1 if 43 - * unsuccessful. 44 - */ 45 - inline int 46 - prom_nbputchar(char c) 47 - { 48 - unsigned long args[7]; 49 - char outc; 50 - 51 - outc = c; 21 + int ret; 52 22 53 23 args[0] = (unsigned long) "write"; 54 24 args[1] = 3; 55 25 args[2] = 1; 56 26 args[3] = (unsigned int) prom_stdout; 57 - args[4] = (unsigned long) &outc; 58 - args[5] = 1; 27 + args[4] = (unsigned long) buf; 28 + args[5] = (unsigned int) len; 59 29 args[6] = (unsigned long) -1; 60 30 61 31 p1275_cmd_direct(args); 62 32 63 - if (args[6] == 1) 64 - return 0; 65 - else 33 + ret = (int) args[6]; 34 + if (ret < 0) 66 35 return -1; 36 + return ret; 67 37 } 68 38 69 - /* Blocking version of get character routine above. */ 70 - char 71 - prom_getchar(void) 39 + void prom_console_write_buf(const char *buf, int len) 72 40 { 73 - int character; 74 - while((character = prom_nbgetchar()) == -1) ; 75 - return (char) character; 76 - } 77 - 78 - /* Blocking version of put character routine above. */ 79 - void 80 - prom_putchar(char c) 81 - { 82 - prom_nbputchar(c); 83 - } 84 - 85 - void 86 - prom_puts(const char *s, int len) 87 - { 88 - unsigned long args[7]; 89 - 90 - args[0] = (unsigned long) "write"; 91 - args[1] = 3; 92 - args[2] = 1; 93 - args[3] = (unsigned int) prom_stdout; 94 - args[4] = (unsigned long) s; 95 - args[5] = len; 96 - args[6] = (unsigned long) -1; 97 - 98 - p1275_cmd_direct(args); 41 + while (len) { 42 + int n = __prom_console_write_buf(buf, len); 43 + if (n < 0) 44 + continue; 45 + len -= n; 46 + buf += len; 47 + } 99 48 }
-87
arch/sparc/prom/devops_32.c
··· 1 - /* 2 - * devops.c: Device operations using the PROM. 3 - * 4 - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 - */ 6 - #include <linux/types.h> 7 - #include <linux/kernel.h> 8 - #include <linux/sched.h> 9 - 10 - #include <asm/openprom.h> 11 - #include <asm/oplib.h> 12 - 13 - extern void restore_current(void); 14 - 15 - /* Open the device described by the string 'dstr'. Returns the handle 16 - * to that device used for subsequent operations on that device. 17 - * Returns -1 on failure. 18 - */ 19 - int 20 - prom_devopen(char *dstr) 21 - { 22 - int handle; 23 - unsigned long flags; 24 - spin_lock_irqsave(&prom_lock, flags); 25 - switch(prom_vers) { 26 - case PROM_V0: 27 - handle = (*(romvec->pv_v0devops.v0_devopen))(dstr); 28 - if(handle == 0) handle = -1; 29 - break; 30 - case PROM_V2: 31 - case PROM_V3: 32 - handle = (*(romvec->pv_v2devops.v2_dev_open))(dstr); 33 - break; 34 - default: 35 - handle = -1; 36 - break; 37 - }; 38 - restore_current(); 39 - spin_unlock_irqrestore(&prom_lock, flags); 40 - 41 - return handle; 42 - } 43 - 44 - /* Close the device described by device handle 'dhandle'. */ 45 - int 46 - prom_devclose(int dhandle) 47 - { 48 - unsigned long flags; 49 - spin_lock_irqsave(&prom_lock, flags); 50 - switch(prom_vers) { 51 - case PROM_V0: 52 - (*(romvec->pv_v0devops.v0_devclose))(dhandle); 53 - break; 54 - case PROM_V2: 55 - case PROM_V3: 56 - (*(romvec->pv_v2devops.v2_dev_close))(dhandle); 57 - break; 58 - default: 59 - break; 60 - }; 61 - restore_current(); 62 - spin_unlock_irqrestore(&prom_lock, flags); 63 - return 0; 64 - } 65 - 66 - /* Seek to specified location described by 'seekhi' and 'seeklo' 67 - * for device 'dhandle'. 68 - */ 69 - void 70 - prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo) 71 - { 72 - unsigned long flags; 73 - spin_lock_irqsave(&prom_lock, flags); 74 - switch(prom_vers) { 75 - case PROM_V0: 76 - (*(romvec->pv_v0devops.v0_seekdev))(dhandle, seekhi, seeklo); 77 - break; 78 - case PROM_V2: 79 - case PROM_V3: 80 - (*(romvec->pv_v2devops.v2_dev_seek))(dhandle, seekhi, seeklo); 81 - break; 82 - default: 83 - break; 84 - }; 85 - restore_current(); 86 - spin_unlock_irqrestore(&prom_lock, flags); 87 - }
-67
arch/sparc/prom/devops_64.c
··· 1 - /* 2 - * devops.c: Device operations using the PROM. 3 - * 4 - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 - * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 - */ 7 - #include <linux/types.h> 8 - #include <linux/kernel.h> 9 - #include <linux/sched.h> 10 - 11 - #include <asm/openprom.h> 12 - #include <asm/oplib.h> 13 - 14 - /* Open the device described by the string 'dstr'. Returns the handle 15 - * to that device used for subsequent operations on that device. 16 - * Returns 0 on failure. 17 - */ 18 - int 19 - prom_devopen(const char *dstr) 20 - { 21 - unsigned long args[5]; 22 - 23 - args[0] = (unsigned long) "open"; 24 - args[1] = 1; 25 - args[2] = 1; 26 - args[3] = (unsigned long) dstr; 27 - args[4] = (unsigned long) -1; 28 - 29 - p1275_cmd_direct(args); 30 - 31 - return (int) args[4]; 32 - } 33 - 34 - /* Close the device described by device handle 'dhandle'. */ 35 - int 36 - prom_devclose(int dhandle) 37 - { 38 - unsigned long args[4]; 39 - 40 - args[0] = (unsigned long) "close"; 41 - args[1] = 1; 42 - args[2] = 0; 43 - args[3] = (unsigned int) dhandle; 44 - 45 - p1275_cmd_direct(args); 46 - 47 - return 0; 48 - } 49 - 50 - /* Seek to specified location described by 'seekhi' and 'seeklo' 51 - * for device 'dhandle'. 52 - */ 53 - void 54 - prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo) 55 - { 56 - unsigned long args[7]; 57 - 58 - args[0] = (unsigned long) "seek"; 59 - args[1] = 3; 60 - args[2] = 1; 61 - args[3] = (unsigned int) dhandle; 62 - args[4] = seekhi; 63 - args[5] = seeklo; 64 - args[6] = (unsigned long) -1; 65 - 66 - p1275_cmd_direct(args); 67 - }
+1 -15
arch/sparc/prom/misc_64.c
··· 18 18 #include <asm/system.h> 19 19 #include <asm/ldc.h> 20 20 21 - int prom_service_exists(const char *service_name) 21 + static int prom_service_exists(const char *service_name) 22 22 { 23 23 unsigned long args[5]; 24 24 ··· 148 148 149 149 /* if nothing else helps, we just halt */ 150 150 prom_halt(); 151 - } 152 - 153 - /* Set prom sync handler to call function 'funcp'. */ 154 - void prom_setcallback(callback_func_t funcp) 155 - { 156 - unsigned long args[5]; 157 - if (!funcp) 158 - return; 159 - args[0] = (unsigned long) "set-callback"; 160 - args[1] = 1; 161 - args[2] = 1; 162 - args[3] = (unsigned long) funcp; 163 - args[4] = (unsigned long) -1; 164 - p1275_cmd_direct(args); 165 151 } 166 152 167 153 /* Get the idprom and stuff it into buffer 'idbuf'. Returns the
+29 -6
arch/sparc/prom/printf.c
··· 15 15 16 16 #include <linux/kernel.h> 17 17 #include <linux/compiler.h> 18 + #include <linux/spinlock.h> 18 19 19 20 #include <asm/openprom.h> 20 21 #include <asm/oplib.h> 21 22 23 + #define CONSOLE_WRITE_BUF_SIZE 1024 24 + 22 25 static char ppbuf[1024]; 26 + static char console_write_buf[CONSOLE_WRITE_BUF_SIZE]; 27 + static DEFINE_RAW_SPINLOCK(console_write_lock); 23 28 24 29 void notrace prom_write(const char *buf, unsigned int n) 25 30 { 26 - char ch; 31 + unsigned int dest_len; 32 + unsigned long flags; 33 + char *dest; 27 34 28 - while (n != 0) { 29 - --n; 30 - if ((ch = *buf++) == '\n') 31 - prom_putchar('\r'); 32 - prom_putchar(ch); 35 + dest = console_write_buf; 36 + raw_spin_lock_irqsave(&console_write_lock, flags); 37 + 38 + dest_len = 0; 39 + while (n-- != 0) { 40 + char ch = *buf++; 41 + if (ch == '\n') { 42 + *dest++ = '\r'; 43 + dest_len++; 44 + } 45 + *dest++ = ch; 46 + dest_len++; 47 + if (dest_len >= CONSOLE_WRITE_BUF_SIZE - 1) { 48 + prom_console_write_buf(console_write_buf, dest_len); 49 + dest = console_write_buf; 50 + dest_len = 0; 51 + } 33 52 } 53 + if (dest_len) 54 + prom_console_write_buf(console_write_buf, dest_len); 55 + 56 + raw_spin_unlock_irqrestore(&console_write_lock, flags); 34 57 } 35 58 36 59 void notrace prom_printf(const char *fmt, ...)
-16
arch/sparc/prom/tree_32.c
··· 342 342 if (node == -1) return 0; 343 343 return node; 344 344 } 345 - 346 - /* Return 'node' assigned to a particular prom 'path' 347 - * FIXME: Should work for v0 as well 348 - */ 349 - phandle prom_pathtoinode(char *path) 350 - { 351 - phandle node; 352 - int inst; 353 - 354 - inst = prom_devopen (path); 355 - if (inst == -1) return 0; 356 - node = prom_inst2pkg (inst); 357 - prom_devclose (inst); 358 - if (node == -1) return 0; 359 - return node; 360 - }
-18
arch/sparc/prom/tree_64.c
··· 374 374 return node; 375 375 } 376 376 377 - /* Return 'node' assigned to a particular prom 'path' 378 - * FIXME: Should work for v0 as well 379 - */ 380 - phandle prom_pathtoinode(const char *path) 381 - { 382 - phandle node; 383 - int inst; 384 - 385 - inst = prom_devopen (path); 386 - if (inst == 0) 387 - return 0; 388 - node = prom_inst2pkg(inst); 389 - prom_devclose(inst); 390 - if (node == -1) 391 - return 0; 392 - return node; 393 - } 394 - 395 377 int prom_ihandle2path(int handle, char *buffer, int bufsize) 396 378 { 397 379 unsigned long args[7];
+1 -1
arch/tile/include/asm/signal.h
··· 25 25 26 26 #if defined(__KERNEL__) && !defined(__ASSEMBLY__) 27 27 struct pt_regs; 28 - int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); 28 + int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); 29 29 int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); 30 30 void do_signal(struct pt_regs *regs); 31 31 #endif
+3 -3
arch/tile/kernel/compat_signal.c
··· 290 290 return ret; 291 291 } 292 292 293 + /* The assembly shim for this function arranges to ignore the return value. */ 293 294 long compat_sys_rt_sigreturn(struct pt_regs *regs) 294 295 { 295 296 struct compat_rt_sigframe __user *frame = 296 297 (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); 297 298 sigset_t set; 298 - long r0; 299 299 300 300 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 301 301 goto badframe; ··· 308 308 recalc_sigpending(); 309 309 spin_unlock_irq(&current->sighand->siglock); 310 310 311 - if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 311 + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 312 312 goto badframe; 313 313 314 314 if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) 315 315 goto badframe; 316 316 317 - return r0; 317 + return 0; 318 318 319 319 badframe: 320 320 force_sig(SIGSEGV, current);
+21 -3
arch/tile/kernel/intvec_32.S
··· 1342 1342 lw r20, r20 1343 1343 1344 1344 /* Jump to syscall handler. */ 1345 - jalr r20; .Lhandle_syscall_link: 1346 - FEEDBACK_REENTER(handle_syscall) 1345 + jalr r20 1346 + .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */ 1347 1347 1348 1348 /* 1349 1349 * Write our r0 onto the stack so it gets restored instead ··· 1351 1351 */ 1352 1352 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) 1353 1353 sw r29, r0 1354 + 1355 + .Lsyscall_sigreturn_skip: 1356 + FEEDBACK_REENTER(handle_syscall) 1354 1357 1355 1358 /* Do syscall trace again, if requested. */ 1356 1359 lw r30, r31 ··· 1539 1536 }; \ 1540 1537 STD_ENDPROC(_##x) 1541 1538 1539 + /* 1540 + * Special-case sigreturn to not write r0 to the stack on return. 1541 + * This is technically more efficient, but it also avoids difficulties 1542 + * in the 64-bit OS when handling 32-bit compat code, since we must not 1543 + * sign-extend r0 for the sigreturn return-value case. 1544 + */ 1545 + #define PTREGS_SYSCALL_SIGRETURN(x, reg) \ 1546 + STD_ENTRY(_##x); \ 1547 + addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \ 1548 + { \ 1549 + PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ 1550 + j x \ 1551 + }; \ 1552 + STD_ENDPROC(_##x) 1553 + 1542 1554 PTREGS_SYSCALL(sys_execve, r3) 1543 1555 PTREGS_SYSCALL(sys_sigaltstack, r2) 1544 - PTREGS_SYSCALL(sys_rt_sigreturn, r0) 1556 + PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0) 1545 1557 PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1) 1546 1558 1547 1559 /* Save additional callee-saves to pt_regs, put address in r4 and jump. */
+8
arch/tile/kernel/process.c
··· 212 212 childregs->sp = sp; /* override with new user stack pointer */ 213 213 214 214 /* 215 + * If CLONE_SETTLS is set, set "tp" in the new task to "r4", 216 + * which is passed in as arg #5 to sys_clone(). 217 + */ 218 + if (clone_flags & CLONE_SETTLS) 219 + childregs->tp = regs->regs[4]; 220 + 221 + /* 215 222 * Copy the callee-saved registers from the passed pt_regs struct 216 223 * into the context-switch callee-saved registers area. 217 224 * This way when we start the interrupt-return sequence, the ··· 546 539 return __switch_to(prev, next, next_current_ksp0(next)); 547 540 } 548 541 542 + /* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */ 549 543 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 550 544 void __user *, parent_tidptr, void __user *, child_tidptr, 551 545 struct pt_regs *, regs)
+4 -6
arch/tile/kernel/signal.c
··· 52 52 */ 53 53 54 54 int restore_sigcontext(struct pt_regs *regs, 55 - struct sigcontext __user *sc, long *pr0) 55 + struct sigcontext __user *sc) 56 56 { 57 57 int err = 0; 58 58 int i; ··· 75 75 76 76 regs->faultnum = INT_SWINT_1_SIGRETURN; 77 77 78 - err |= __get_user(*pr0, &sc->gregs[0]); 79 78 return err; 80 79 } 81 80 82 - /* sigreturn() returns long since it restores r0 in the interrupted code. */ 81 + /* The assembly shim for this function arranges to ignore the return value. */ 83 82 SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs) 84 83 { 85 84 struct rt_sigframe __user *frame = 86 85 (struct rt_sigframe __user *)(regs->sp); 87 86 sigset_t set; 88 - long r0; 89 87 90 88 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 91 89 goto badframe; ··· 96 98 recalc_sigpending(); 97 99 spin_unlock_irq(&current->sighand->siglock); 98 100 99 - if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 101 + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 100 102 goto badframe; 101 103 102 104 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) 103 105 goto badframe; 104 106 105 - return r0; 107 + return 0; 106 108 107 109 badframe: 108 110 force_sig(SIGSEGV, current);
+1 -1
arch/x86/boot/compressed/misc.c
··· 355 355 if (heap > 0x3fffffffffffUL) 356 356 error("Destination address too large"); 357 357 #else 358 - if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff)) 358 + if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff)) 359 359 error("Destination address too large"); 360 360 #endif 361 361 #ifndef CONFIG_RELOCATABLE
+1
arch/x86/crypto/ghash-clmulni-intel_glue.c
··· 10 10 * by the Free Software Foundation. 11 11 */ 12 12 13 + #include <linux/err.h> 13 14 #include <linux/module.h> 14 15 #include <linux/init.h> 15 16 #include <linux/kernel.h>
+3
arch/x86/include/asm/e820.h
··· 72 72 #define BIOS_BEGIN 0x000a0000 73 73 #define BIOS_END 0x00100000 74 74 75 + #define BIOS_ROM_BASE 0xffe00000 76 + #define BIOS_ROM_END 0xffffffff 77 + 75 78 #ifdef __KERNEL__ 76 79 /* see comment in arch/x86/kernel/e820.c */ 77 80 extern struct e820map e820;
+1 -1
arch/x86/include/asm/kvm_host.h
··· 79 79 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 80 80 #define KVM_MIN_FREE_MMU_PAGES 5 81 81 #define KVM_REFILL_PAGES 25 82 - #define KVM_MAX_CPUID_ENTRIES 40 82 + #define KVM_MAX_CPUID_ENTRIES 80 83 83 #define KVM_NR_FIXED_MTRR_REGION 88 84 84 #define KVM_NR_VAR_MTRR 8 85 85
+1
arch/x86/kernel/Makefile
··· 45 45 obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o 46 46 obj-y += tsc.o io_delay.o rtc.o 47 47 obj-y += pci-iommu_table.o 48 + obj-y += resource.o 48 49 49 50 obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 50 51 obj-y += process.o
+8
arch/x86/kernel/apic/apic.c
··· 1383 1383 #endif 1384 1384 1385 1385 apic_pm_activate(); 1386 + 1387 + /* 1388 + * Now that local APIC setup is completed for BP, configure the fault 1389 + * handling for interrupt remapping. 1390 + */ 1391 + if (!smp_processor_id() && intr_remapping_enabled) 1392 + enable_drhd_fault_handling(); 1393 + 1386 1394 } 1387 1395 1388 1396 #ifdef CONFIG_X86_X2APIC
+2 -2
arch/x86/kernel/apic/io_apic.c
··· 2429 2429 { 2430 2430 struct irq_cfg *cfg = data->chip_data; 2431 2431 int i, do_unmask_irq = 0, irq = data->irq; 2432 - struct irq_desc *desc = irq_to_desc(irq); 2433 2432 unsigned long v; 2434 2433 2435 2434 irq_complete_move(cfg); 2436 2435 #ifdef CONFIG_GENERIC_PENDING_IRQ 2437 2436 /* If we are moving the irq we need to mask it */ 2438 - if (unlikely(desc->status & IRQ_MOVE_PENDING)) { 2437 + if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { 2439 2438 do_unmask_irq = 1; 2440 2439 mask_ioapic(cfg); 2441 2440 } ··· 3366 3367 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3367 3368 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3368 3369 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3370 + msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3369 3371 3370 3372 dmar_msi_write(irq, &msg); 3371 3373
-7
arch/x86/kernel/apic/probe_64.c
··· 79 79 /* need to update phys_pkg_id */ 80 80 apic->phys_pkg_id = apicid_phys_pkg_id; 81 81 } 82 - 83 - /* 84 - * Now that apic routing model is selected, configure the 85 - * fault handling for intr remapping. 86 - */ 87 - if (intr_remapping_enabled) 88 - enable_drhd_fault_handling(); 89 82 } 90 83 91 84 /* Same for both flat and physical. */
+9 -7
arch/x86/kernel/head_32.S
··· 60 60 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 61 61 #endif 62 62 63 + /* Number of possible pages in the lowmem region */ 64 + LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) 65 + 63 66 /* Enough space to fit pagetables for the low memory linear map */ 64 - MAPPING_BEYOND_END = \ 65 - PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT 67 + MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT 66 68 67 69 /* 68 70 * Worst-case size of the kernel mapping we need to make: 69 - * the worst-case size of the kernel itself, plus the extra we need 70 - * to map for the linear map. 71 + * a relocatable kernel can live anywhere in lowmem, so we need to be able 72 + * to map all of lowmem. 71 73 */ 72 - KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT 74 + KERNEL_PAGES = LOWMEM_PAGES 73 75 74 76 INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm 75 77 RESERVE_BRK(pagetables, INIT_MAP_SIZE) ··· 622 620 __PAGE_ALIGNED_BSS 623 621 .align PAGE_SIZE_asm 624 622 #ifdef CONFIG_X86_PAE 625 - initial_pg_pmd: 623 + ENTRY(initial_pg_pmd) 626 624 .fill 1024*KPMDS,4,0 627 625 #else 628 626 ENTRY(initial_page_table) 629 627 .fill 1024,4,0 630 628 #endif 631 - initial_pg_fixmap: 629 + ENTRY(initial_pg_fixmap) 632 630 .fill 1024,4,0 633 631 ENTRY(empty_zero_page) 634 632 .fill 4096,1,0
+16 -10
arch/x86/kernel/hpet.c
··· 27 27 #define HPET_DEV_FSB_CAP 0x1000 28 28 #define HPET_DEV_PERI_CAP 0x2000 29 29 30 + #define HPET_MIN_CYCLES 128 31 + #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) 32 + 30 33 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) 31 34 32 35 /* ··· 302 299 /* Calculate the min / max delta */ 303 300 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 304 301 &hpet_clockevent); 305 - /* 5 usec minimum reprogramming delta. */ 306 - hpet_clockevent.min_delta_ns = 5000; 302 + /* Setup minimum reprogramming delta. */ 303 + hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, 304 + &hpet_clockevent); 307 305 308 306 /* 309 307 * Start hpet with the boot cpu mask and make it ··· 397 393 * the wraparound into account) nor a simple count down event 398 394 * mode. Further the write to the comparator register is 399 395 * delayed internally up to two HPET clock cycles in certain 400 - * chipsets (ATI, ICH9,10). We worked around that by reading 401 - * back the compare register, but that required another 402 - * workaround for ICH9,10 chips where the first readout after 403 - * write can return the old stale value. We already have a 404 - * minimum delta of 5us enforced, but a NMI or SMI hitting 396 + * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even 397 + * longer delays. We worked around that by reading back the 398 + * compare register, but that required another workaround for 399 + * ICH9,10 chips where the first readout after write can 400 + * return the old stale value. We already had a minimum 401 + * programming delta of 5us enforced, but a NMI or SMI hitting 405 402 * between the counter readout and the comparator write can 406 403 * move us behind that point easily. Now instead of reading 407 404 * the compare register back several times, we make the ETIME 408 405 * decision based on the following: Return ETIME if the 409 - * counter value after the write is less than 8 HPET cycles 406 + * counter value after the write is less than HPET_MIN_CYCLES 410 407 * away from the event or if the counter is already ahead of 411 - * the event. 408 + * the event. The minimum programming delta for the generic 409 + * clockevents code is set to 1.5 * HPET_MIN_CYCLES. 412 410 */ 413 411 res = (s32)(cnt - hpet_readl(HPET_COUNTER)); 414 412 415 - return res < 8 ? -ETIME : 0; 413 + return res < HPET_MIN_CYCLES ? -ETIME : 0; 416 414 } 417 415 418 416 static void hpet_legacy_set_mode(enum clock_event_mode mode,
+48
arch/x86/kernel/resource.c
··· 1 + #include <linux/ioport.h> 2 + #include <asm/e820.h> 3 + 4 + static void resource_clip(struct resource *res, resource_size_t start, 5 + resource_size_t end) 6 + { 7 + resource_size_t low = 0, high = 0; 8 + 9 + if (res->end < start || res->start > end) 10 + return; /* no conflict */ 11 + 12 + if (res->start < start) 13 + low = start - res->start; 14 + 15 + if (res->end > end) 16 + high = res->end - end; 17 + 18 + /* Keep the area above or below the conflict, whichever is larger */ 19 + if (low > high) 20 + res->end = start - 1; 21 + else 22 + res->start = end + 1; 23 + } 24 + 25 + static void remove_e820_regions(struct resource *avail) 26 + { 27 + int i; 28 + struct e820entry *entry; 29 + 30 + for (i = 0; i < e820.nr_map; i++) { 31 + entry = &e820.map[i]; 32 + 33 + resource_clip(avail, entry->addr, 34 + entry->addr + entry->size - 1); 35 + } 36 + } 37 + 38 + void arch_remove_reservations(struct resource *avail) 39 + { 40 + /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */ 41 + if (avail->flags & IORESOURCE_MEM) { 42 + if (avail->start < BIOS_END) 43 + avail->start = BIOS_END; 44 + resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END); 45 + 46 + remove_e820_regions(avail); 47 + } 48 + }
-1
arch/x86/kernel/setup.c
··· 769 769 770 770 x86_init.oem.arch_setup(); 771 771 772 - resource_alloc_from_bottom = 0; 773 772 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; 774 773 setup_memory_map(); 775 774 parse_setup_data();
+2 -1
arch/x86/kernel/xsave.c
··· 394 394 * Setup init_xstate_buf to represent the init state of 395 395 * all the features managed by the xsave 396 396 */ 397 - init_xstate_buf = alloc_bootmem(xstate_size); 397 + init_xstate_buf = alloc_bootmem_align(xstate_size, 398 + __alignof__(struct xsave_struct)); 398 399 init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; 399 400 400 401 clts();
+4
arch/x86/kvm/svm.c
··· 3494 3494 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 3495 3495 { 3496 3496 switch (func) { 3497 + case 0x00000001: 3498 + /* Mask out xsave bit as long as it is not supported by SVM */ 3499 + entry->ecx &= ~(bit(X86_FEATURE_XSAVE)); 3500 + break; 3497 3501 case 0x80000001: 3498 3502 if (nested) 3499 3503 entry->ecx |= (1 << 2); /* Set SVM bit */
-5
arch/x86/kvm/vmx.c
··· 4227 4227 return PT_PDPE_LEVEL; 4228 4228 } 4229 4229 4230 - static inline u32 bit(int bitno) 4231 - { 4232 - return 1 << (bitno & 31); 4233 - } 4234 - 4235 4230 static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 4236 4231 { 4237 4232 struct kvm_cpuid_entry2 *best;
+5 -6
arch/x86/kvm/x86.c
··· 155 155 156 156 u64 __read_mostly host_xcr0; 157 157 158 - static inline u32 bit(int bitno) 159 - { 160 - return 1 << (bitno & 31); 161 - } 162 - 163 158 static void kvm_on_user_return(struct user_return_notifier *urn) 164 159 { 165 160 unsigned slot; ··· 4564 4569 #ifdef CONFIG_CPU_FREQ 4565 4570 struct cpufreq_policy policy; 4566 4571 memset(&policy, 0, sizeof(policy)); 4567 - cpufreq_get_policy(&policy, get_cpu()); 4572 + cpu = get_cpu(); 4573 + cpufreq_get_policy(&policy, cpu); 4568 4574 if (policy.cpuinfo.max_freq) 4569 4575 max_tsc_khz = policy.cpuinfo.max_freq; 4576 + put_cpu(); 4570 4577 #endif 4571 4578 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 4572 4579 CPUFREQ_TRANSITION_NOTIFIER); ··· 5519 5522 5520 5523 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5521 5524 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5525 + if (sregs->cr4 & X86_CR4_OSXSAVE) 5526 + update_cpuid(vcpu); 5522 5527 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5523 5528 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); 5524 5529 mmu_reset_needed = 1;
+5
arch/x86/kvm/x86.h
··· 70 70 return kvm_read_cr0_bits(vcpu, X86_CR0_PG); 71 71 } 72 72 73 + static inline u32 bit(int bitno) 74 + { 75 + return 1 << (bitno & 31); 76 + } 77 + 73 78 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 74 79 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 75 80 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq);
+8 -8
arch/x86/lguest/boot.c
··· 531 531 { 532 532 lguest_data.pgdir = cr3; 533 533 lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); 534 - cr3_changed = true; 534 + 535 + /* These two page tables are simple, linear, and used during boot */ 536 + if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table)) 537 + cr3_changed = true; 535 538 } 536 539 537 540 static unsigned long lguest_read_cr3(void) ··· 706 703 * to forget all of them. Fortunately, this is very rare. 707 704 * 708 705 * ... except in early boot when the kernel sets up the initial pagetables, 709 - * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell 710 - * the Host anything changed until we've done the first page table switch, 711 - * which brings boot back to 0.25 seconds. 706 + * which makes booting astonishingly slow: 48 seconds! So we don't even tell 707 + * the Host anything changed until we've done the first real page table switch, 708 + * which brings boot back to 4.3 seconds. 712 709 */ 713 710 static void lguest_set_pte(pte_t *ptep, pte_t pteval) 714 711 { ··· 1005 1002 clockevents_register_device(&lguest_clockevent); 1006 1003 1007 1004 /* Finally, we unblock the timer interrupt. */ 1008 - enable_lguest_irq(0); 1005 + clear_bit(0, lguest_data.blocked_interrupts); 1009 1006 } 1010 1007 1011 1008 /* ··· 1351 1348 * per-cpu segment descriptor register %fs as well. 1352 1349 */ 1353 1350 switch_to_new_gdt(0); 1354 - 1355 - /* We actually boot with all memory mapped, but let's say 128MB. */ 1356 - max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; 1357 1351 1358 1352 /* 1359 1353 * The Host<->Guest Switcher lives at the top of our address space, and
+105
arch/x86/lguest/i386_head.S
··· 4 4 #include <asm/asm-offsets.h> 5 5 #include <asm/thread_info.h> 6 6 #include <asm/processor-flags.h> 7 + #include <asm/pgtable.h> 7 8 8 9 /*G:020 9 10 * Our story starts with the kernel booting into startup_32 in ··· 38 37 /* Set up the initial stack so we can run C code. */ 39 38 movl $(init_thread_union+THREAD_SIZE),%esp 40 39 40 + call init_pagetables 41 + 41 42 /* Jumps are relative: we're running __PAGE_OFFSET too low. */ 42 43 jmp lguest_init+__PAGE_OFFSET 44 + 45 + /* 46 + * Initialize page tables. This creates a PDE and a set of page 47 + * tables, which are located immediately beyond __brk_base. The variable 48 + * _brk_end is set up to point to the first "safe" location. 49 + * Mappings are created both at virtual address 0 (identity mapping) 50 + * and PAGE_OFFSET for up to _end. 51 + * 52 + * FIXME: This code is taken verbatim from arch/x86/kernel/head_32.S: they 53 + * don't have a stack at this point, so we can't just use call and ret. 54 + */ 55 + init_pagetables: 56 + #if PTRS_PER_PMD > 1 57 + #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) 58 + #else 59 + #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 60 + #endif 61 + #define pa(X) ((X) - __PAGE_OFFSET) 62 + 63 + /* Enough space to fit pagetables for the low memory linear map */ 64 + MAPPING_BEYOND_END = \ 65 + PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT 66 + #ifdef CONFIG_X86_PAE 67 + 68 + /* 69 + * In PAE mode initial_page_table is statically defined to contain 70 + * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 71 + * entries). The identity mapping is handled by pointing two PGD entries 72 + * to the first kernel PMD. 73 + * 74 + * Note the upper half of each PMD or PTE are always zero at this stage. 75 + */ 76 + 77 + #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 78 + 79 + xorl %ebx,%ebx /* %ebx is kept at zero */ 80 + 81 + movl $pa(__brk_base), %edi 82 + movl $pa(initial_pg_pmd), %edx 83 + movl $PTE_IDENT_ATTR, %eax 84 + 10: 85 + leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 86 + movl %ecx,(%edx) /* Store PMD entry */ 87 + /* Upper half already zero */ 88 + addl $8,%edx 89 + movl $512,%ecx 90 + 11: 91 + stosl 92 + xchgl %eax,%ebx 93 + stosl 94 + xchgl %eax,%ebx 95 + addl $0x1000,%eax 96 + loop 11b 97 + 98 + /* 99 + * End condition: we must map up to the end + MAPPING_BEYOND_END. 100 + */ 101 + movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 102 + cmpl %ebp,%eax 103 + jb 10b 104 + 1: 105 + addl $__PAGE_OFFSET, %edi 106 + movl %edi, pa(_brk_end) 107 + shrl $12, %eax 108 + movl %eax, pa(max_pfn_mapped) 109 + 110 + /* Do early initialization of the fixmap area */ 111 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 112 + movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 113 + #else /* Not PAE */ 114 + 115 + page_pde_offset = (__PAGE_OFFSET >> 20); 116 + 117 + movl $pa(__brk_base), %edi 118 + movl $pa(initial_page_table), %edx 119 + movl $PTE_IDENT_ATTR, %eax 120 + 10: 121 + leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ 122 + movl %ecx,(%edx) /* Store identity PDE entry */ 123 + movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 124 + addl $4,%edx 125 + movl $1024, %ecx 126 + 11: 127 + stosl 128 + addl $0x1000,%eax 129 + loop 11b 130 + /* 131 + * End condition: we must map up to the end + MAPPING_BEYOND_END. 132 + */ 133 + movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 134 + cmpl %ebp,%eax 135 + jb 10b 136 + addl $__PAGE_OFFSET, %edi 137 + movl %edi, pa(_brk_end) 138 + shrl $12, %eax 139 + movl %eax, pa(max_pfn_mapped) 140 + 141 + /* Do early initialization of the fixmap area */ 142 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 143 + movl %eax,pa(initial_page_table+0xffc) 144 + #endif 145 + ret 43 146 44 147 /*G:055 45 148 * We create a macro which puts the assembler code between lgstart_ and lgend_
+5 -13
arch/x86/pci/i386.c
··· 65 65 resource_size_t size, resource_size_t align) 66 66 { 67 67 struct pci_dev *dev = data; 68 - resource_size_t start = round_down(res->end - size + 1, align); 68 + resource_size_t start = res->start; 69 69 70 70 if (res->flags & IORESOURCE_IO) { 71 - 72 - /* 73 - * If we're avoiding ISA aliases, the largest contiguous I/O 74 - * port space is 256 bytes. Clearing bits 9 and 10 preserves 75 - * all 256-byte and smaller alignments, so the result will 76 - * still be correctly aligned. 77 - */ 78 - if (!skip_isa_ioresource_align(dev)) 79 - start &= ~0x300; 80 - } else if (res->flags & IORESOURCE_MEM) { 81 - if (start < BIOS_END) 82 - start = res->end; /* fail; no space */ 71 + if (skip_isa_ioresource_align(dev)) 72 + return start; 73 + if (start & 0x300) 74 + start = (start + 0x3ff) & ~0x3ff; 83 75 } 84 76 return start; 85 77 }
+2 -2
arch/x86/vdso/Makefile
··· 25 25 26 26 export CPPFLAGS_vdso.lds += -P -C 27 27 28 - VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \ 28 + VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ 29 29 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 30 30 31 31 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so ··· 69 69 vdso32-images = $(vdso32.so-y:%=vdso32-%.so) 70 70 71 71 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) 72 - VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1 72 + VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1 73 73 74 74 # This makes sure the $(obj) subdirectory exists even though vdso32/ 75 75 # is not a kbuild sub-make subdirectory.
+3 -2
block/blk-map.c
··· 201 201 for (i = 0; i < iov_count; i++) { 202 202 unsigned long uaddr = (unsigned long)iov[i].iov_base; 203 203 204 + if (!iov[i].iov_len) 205 + return -EINVAL; 206 + 204 207 if (uaddr & queue_dma_alignment(q)) { 205 208 unaligned = 1; 206 209 break; 207 210 } 208 - if (!iov[i].iov_len) 209 - return -EINVAL; 210 211 } 211 212 212 213 if (unaligned || (q->dma_pad_mask & len) || map_data)
+3 -3
block/blk-merge.c
··· 21 21 return 0; 22 22 23 23 fbio = bio; 24 - cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 24 + cluster = blk_queue_cluster(q); 25 25 seg_size = 0; 26 26 nr_phys_segs = 0; 27 27 for_each_bio(bio) { ··· 87 87 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 88 88 struct bio *nxt) 89 89 { 90 - if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 90 + if (!blk_queue_cluster(q)) 91 91 return 0; 92 92 93 93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > ··· 123 123 int nsegs, cluster; 124 124 125 125 nsegs = 0; 126 - cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 126 + cluster = blk_queue_cluster(q); 127 127 128 128 /* 129 129 * for each bio in rq
+22 -29
block/blk-settings.c
··· 126 126 lim->alignment_offset = 0; 127 127 lim->io_opt = 0; 128 128 lim->misaligned = 0; 129 - lim->no_cluster = 0; 129 + lim->cluster = 1; 130 130 } 131 131 EXPORT_SYMBOL(blk_set_default_limits); 132 132 ··· 229 229 EXPORT_SYMBOL(blk_queue_bounce_limit); 230 230 231 231 /** 232 - * blk_queue_max_hw_sectors - set max sectors for a request for this queue 233 - * @q: the request queue for the device 232 + * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request 233 + * @limits: the queue limits 234 234 * @max_hw_sectors: max hardware sectors in the usual 512b unit 235 235 * 236 236 * Description: ··· 244 244 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 245 245 * The soft limit can not exceed max_hw_sectors. 246 246 **/ 247 - void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 247 + void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) 248 248 { 249 249 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 250 250 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); ··· 252 252 __func__, max_hw_sectors); 253 253 } 254 254 255 - q->limits.max_hw_sectors = max_hw_sectors; 256 - q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, 257 - BLK_DEF_MAX_SECTORS); 255 + limits->max_hw_sectors = max_hw_sectors; 256 + limits->max_sectors = min_t(unsigned int, max_hw_sectors, 257 + BLK_DEF_MAX_SECTORS); 258 + } 259 + EXPORT_SYMBOL(blk_limits_max_hw_sectors); 260 + 261 + /** 262 + * blk_queue_max_hw_sectors - set max sectors for a request for this queue 263 + * @q: the request queue for the device 264 + * @max_hw_sectors: max hardware sectors in the usual 512b unit 265 + * 266 + * Description: 267 + * See description for blk_limits_max_hw_sectors(). 268 + **/ 269 + void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 270 + { 271 + blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); 258 272 } 259 273 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 260 274 ··· 478 464 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 479 465 { 480 466 blk_stack_limits(&t->limits, &b->limits, 0); 481 - 482 - if (!t->queue_lock) 483 - WARN_ON_ONCE(1); 484 - else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 485 - unsigned long flags; 486 - spin_lock_irqsave(t->queue_lock, flags); 487 - queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 488 - spin_unlock_irqrestore(t->queue_lock, flags); 489 - } 490 467 } 491 468 EXPORT_SYMBOL(blk_queue_stack_limits); 492 469 ··· 550 545 t->io_min = max(t->io_min, b->io_min); 551 546 t->io_opt = lcm(t->io_opt, b->io_opt); 552 547 553 - t->no_cluster |= b->no_cluster; 548 + t->cluster &= b->cluster; 554 549 t->discard_zeroes_data &= b->discard_zeroes_data; 555 550 556 551 /* Physical block size a multiple of the logical block size? */ ··· 646 641 sector_t offset) 647 642 { 648 643 struct request_queue *t = disk->queue; 649 - struct request_queue *b = bdev_get_queue(bdev); 650 644 651 645 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { 652 646 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; ··· 655 651 656 652 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 657 653 top, bottom); 658 - } 659 - 660 - if (!t->queue_lock) 661 - WARN_ON_ONCE(1); 662 - else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 663 - unsigned long flags; 664 - 665 - spin_lock_irqsave(t->queue_lock, flags); 666 - if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 667 - queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 668 - spin_unlock_irqrestore(t->queue_lock, flags); 669 654 } 670 655 } 671 656 EXPORT_SYMBOL(disk_stack_limits);
+1 -1
block/blk-sysfs.c
··· 119 119 120 120 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 121 121 { 122 - if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 122 + if (blk_queue_cluster(q)) 123 123 return queue_var_show(queue_max_segment_size(q), (page)); 124 124 125 125 return queue_var_show(PAGE_CACHE_SIZE, (page));
+25 -14
block/blk-throttle.c
··· 355 355 tg->slice_end[rw], jiffies); 356 356 } 357 357 358 + static inline void throtl_set_slice_end(struct throtl_data *td, 359 + struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 360 + { 361 + tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); 362 + } 363 + 358 364 static inline void throtl_extend_slice(struct throtl_data *td, 359 365 struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 360 366 { ··· 396 390 */ 397 391 if (throtl_slice_used(td, tg, rw)) 398 392 return; 393 + 394 + /* 395 + * A bio has been dispatched. Also adjust slice_end. It might happen 396 + * that initially cgroup limit was very low resulting in high 397 + * slice_end, but later limit was bumped up and bio was dispached 398 + * sooner, then we need to reduce slice_end. A high bogus slice_end 399 + * is bad because it does not allow new slice to start. 400 + */ 401 + 402 + throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice); 399 403 400 404 time_elapsed = jiffies - tg->slice_start[rw]; 401 405 ··· 725 709 struct throtl_grp *tg; 726 710 struct hlist_node *pos, *n; 727 711 728 - /* 729 - * Make sure atomic_inc() effects from 730 - * throtl_update_blkio_group_read_bps(), group of functions are 731 - * visible. 732 - * Is this required or smp_mb__after_atomic_inc() was suffcient 733 - * after the atomic_inc(). 734 - */ 735 - smp_rmb(); 736 712 if (!atomic_read(&td->limits_changed)) 737 713 return; 738 714 739 715 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); 740 716 741 - hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 742 - /* 743 - * Do I need an smp_rmb() here to make sure tg->limits_changed 744 - * update is visible. I am relying on smp_rmb() at the 745 - * beginning of function and not putting a new one here. 746 - */ 717 + /* 718 + * Make sure updates from throtl_update_blkio_group_read_bps() group 719 + * of functions to tg->limits_changed are visible. We do not 720 + * want update td->limits_changed to be visible but update to 721 + * tg->limits_changed not being visible yet on this cpu. Hence 722 + * the read barrier. 723 + */ 724 + smp_rmb(); 747 725 726 + hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 748 727 if (throtl_tg_on_rr(tg) && tg->limits_changed) { 749 728 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" 750 729 " riops=%u wiops=%u", tg->bps[READ],
+8
block/bsg.c
··· 250 250 int ret, rw; 251 251 unsigned int dxfer_len; 252 252 void *dxferp = NULL; 253 + struct bsg_class_device *bcd = &q->bsg_dev; 254 + 255 + /* if the LLD has been removed then the bsg_unregister_queue will 256 + * eventually be called and the class_dev was freed, so we can no 257 + * longer use this request_queue. Return no such address. 258 + */ 259 + if (!bcd->class_dev) 260 + return ERR_PTR(-ENXIO); 253 261 254 262 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, 255 263 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
+29 -17
drivers/acpi/ac.c
··· 100 100 .release = single_release, 101 101 }; 102 102 #endif 103 - static int get_ac_property(struct power_supply *psy, 104 - enum power_supply_property psp, 105 - union power_supply_propval *val) 106 - { 107 - struct acpi_ac *ac = to_acpi_ac(psy); 108 - switch (psp) { 109 - case POWER_SUPPLY_PROP_ONLINE: 110 - val->intval = ac->state; 111 - break; 112 - default: 113 - return -EINVAL; 114 - } 115 - return 0; 116 - } 117 103 118 - static enum power_supply_property ac_props[] = { 119 - POWER_SUPPLY_PROP_ONLINE, 120 - }; 121 104 /* -------------------------------------------------------------------------- 122 105 AC Adapter Management 123 106 -------------------------------------------------------------------------- */ ··· 122 139 123 140 return 0; 124 141 } 142 + 143 + /* -------------------------------------------------------------------------- 144 + sysfs I/F 145 + -------------------------------------------------------------------------- */ 146 + static int get_ac_property(struct power_supply *psy, 147 + enum power_supply_property psp, 148 + union power_supply_propval *val) 149 + { 150 + struct acpi_ac *ac = to_acpi_ac(psy); 151 + 152 + if (!ac) 153 + return -ENODEV; 154 + 155 + if (acpi_ac_get_state(ac)) 156 + return -ENODEV; 157 + 158 + switch (psp) { 159 + case POWER_SUPPLY_PROP_ONLINE: 160 + val->intval = ac->state; 161 + break; 162 + default: 163 + return -EINVAL; 164 + } 165 + return 0; 166 + } 167 + 168 + static enum power_supply_property ac_props[] = { 169 + POWER_SUPPLY_PROP_ONLINE, 170 + }; 125 171 126 172 #ifdef CONFIG_ACPI_PROCFS_POWER 127 173 /* --------------------------------------------------------------------------
+17 -17
drivers/acpi/apei/erst.c
··· 86 86 * It is used to provide exclusive accessing for ERST Error Log 87 87 * Address Range too. 88 88 */ 89 - static DEFINE_SPINLOCK(erst_lock); 89 + static DEFINE_RAW_SPINLOCK(erst_lock); 90 90 91 91 static inline int erst_errno(int command_status) 92 92 { ··· 421 421 if (erst_disable) 422 422 return -ENODEV; 423 423 424 - spin_lock_irqsave(&erst_lock, flags); 424 + raw_spin_lock_irqsave(&erst_lock, flags); 425 425 count = __erst_get_record_count(); 426 - spin_unlock_irqrestore(&erst_lock, flags); 426 + raw_spin_unlock_irqrestore(&erst_lock, flags); 427 427 428 428 return count; 429 429 } ··· 456 456 if (erst_disable) 457 457 return -ENODEV; 458 458 459 - spin_lock_irqsave(&erst_lock, flags); 459 + raw_spin_lock_irqsave(&erst_lock, flags); 460 460 rc = __erst_get_next_record_id(record_id); 461 - spin_unlock_irqrestore(&erst_lock, flags); 461 + raw_spin_unlock_irqrestore(&erst_lock, flags); 462 462 463 463 return rc; 464 464 } ··· 624 624 return -EINVAL; 625 625 626 626 if (erst_erange.attr & ERST_RANGE_NVRAM) { 627 - if (!spin_trylock_irqsave(&erst_lock, flags)) 627 + if (!raw_spin_trylock_irqsave(&erst_lock, flags)) 628 628 return -EBUSY; 629 629 rc = __erst_write_to_nvram(record); 630 - spin_unlock_irqrestore(&erst_lock, flags); 630 + raw_spin_unlock_irqrestore(&erst_lock, flags); 631 631 return rc; 632 632 } 633 633 634 634 if (record->record_length > erst_erange.size) 635 635 return -EINVAL; 636 636 637 - if (!spin_trylock_irqsave(&erst_lock, flags)) 637 + if (!raw_spin_trylock_irqsave(&erst_lock, flags)) 638 638 return -EBUSY; 639 639 memcpy(erst_erange.vaddr, record, record->record_length); 640 640 rcd_erange = erst_erange.vaddr; ··· 642 642 memcpy(&rcd_erange->persistence_information, "ER", 2); 643 643 644 644 rc = __erst_write_to_storage(0); 645 - spin_unlock_irqrestore(&erst_lock, flags); 645 + raw_spin_unlock_irqrestore(&erst_lock, flags); 646 646 647 647 return rc; 648 648 } ··· 696 696 if (erst_disable) 697 697 return -ENODEV; 698 698 699 - spin_lock_irqsave(&erst_lock, flags); 699 + raw_spin_lock_irqsave(&erst_lock, flags); 700 700 len = __erst_read(record_id, record, buflen); 701 - spin_unlock_irqrestore(&erst_lock, flags); 701 + raw_spin_unlock_irqrestore(&erst_lock, flags); 702 702 return len; 703 703 } 704 704 EXPORT_SYMBOL_GPL(erst_read); ··· 719 719 if (erst_disable) 720 720 return -ENODEV; 721 721 722 - spin_lock_irqsave(&erst_lock, flags); 722 + raw_spin_lock_irqsave(&erst_lock, flags); 723 723 rc = __erst_get_next_record_id(&record_id); 724 724 if (rc) { 725 - spin_unlock_irqrestore(&erst_lock, flags); 725 + raw_spin_unlock_irqrestore(&erst_lock, flags); 726 726 return rc; 727 727 } 728 728 /* no more record */ 729 729 if (record_id == APEI_ERST_INVALID_RECORD_ID) { 730 - spin_unlock_irqrestore(&erst_lock, flags); 730 + raw_spin_unlock_irqrestore(&erst_lock, flags); 731 731 return 0; 732 732 } 733 733 734 734 len = __erst_read(record_id, record, buflen); 735 - spin_unlock_irqrestore(&erst_lock, flags); 735 + raw_spin_unlock_irqrestore(&erst_lock, flags); 736 736 737 737 return len; 738 738 } ··· 746 746 if (erst_disable) 747 747 return -ENODEV; 748 748 749 - spin_lock_irqsave(&erst_lock, flags); 749 + raw_spin_lock_irqsave(&erst_lock, flags); 750 750 if (erst_erange.attr & ERST_RANGE_NVRAM) 751 751 rc = __erst_clear_from_nvram(record_id); 752 752 else 753 753 rc = __erst_clear_from_storage(record_id); 754 - spin_unlock_irqrestore(&erst_lock, flags); 754 + raw_spin_unlock_irqrestore(&erst_lock, flags); 755 755 756 756 return rc; 757 757 }
+5 -5
drivers/acpi/apei/hest.c
··· 46 46 47 47 /* HEST table parsing */ 48 48 49 - static struct acpi_table_hest *hest_tab; 49 + static struct acpi_table_hest *__read_mostly hest_tab; 50 50 51 - static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { 51 + static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { 52 52 [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ 53 53 [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, 54 54 [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), ··· 126 126 unsigned int count; 127 127 }; 128 128 129 - static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) 129 + static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) 130 130 { 131 131 int *count = data; 132 132 ··· 135 135 return 0; 136 136 } 137 137 138 - static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) 138 + static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) 139 139 { 140 140 struct platform_device *ghes_dev; 141 141 struct ghes_arr *ghes_arr = data; ··· 165 165 return rc; 166 166 } 167 167 168 - static int hest_ghes_dev_register(unsigned int ghes_count) 168 + static int __init hest_ghes_dev_register(unsigned int ghes_count) 169 169 { 170 170 int rc, i; 171 171 struct ghes_arr ghes_arr;
+5
drivers/acpi/battery.c
··· 130 130 unsigned long flags; 131 131 }; 132 132 133 + static int acpi_battery_update(struct acpi_battery *battery); 134 + 133 135 #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); 134 136 135 137 inline int acpi_battery_present(struct acpi_battery *battery) ··· 185 183 { 186 184 int ret = 0; 187 185 struct acpi_battery *battery = to_acpi_battery(psy); 186 + 187 + if (acpi_battery_update(battery)) 188 + return -ENODEV; 188 189 189 190 if (acpi_battery_present(battery)) { 190 191 /* run battery update only if it is present */
+3
drivers/acpi/ec.c
··· 934 934 ec_flag_msi, "MSI hardware", { 935 935 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, 936 936 { 937 + ec_flag_msi, "MSI hardware", { 938 + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, 939 + { 937 940 ec_validate_ecdt, "ASUS hardware", { 938 941 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, 939 942 {},
+77 -36
drivers/acpi/osl.c
··· 110 110 static LIST_HEAD(acpi_ioremaps); 111 111 static DEFINE_SPINLOCK(acpi_ioremap_lock); 112 112 113 - #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ 114 - static char osi_setup_string[OSI_STRING_LENGTH_MAX]; 115 - 116 113 static void __init acpi_osi_setup_late(void); 117 114 118 115 /* ··· 149 152 unsigned int enable:1; 150 153 unsigned int dmi:1; 151 154 unsigned int cmdline:1; 152 - unsigned int known:1; 153 - } osi_linux = { 0, 0, 0, 0}; 155 + } osi_linux = {0, 0, 0}; 154 156 155 157 static u32 acpi_osi_handler(acpi_string interface, u32 supported) 156 158 { ··· 1051 1055 1052 1056 __setup("acpi_os_name=", acpi_os_name_setup); 1053 1057 1058 + #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ 1059 + #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */ 1060 + 1061 + struct osi_setup_entry { 1062 + char string[OSI_STRING_LENGTH_MAX]; 1063 + bool enable; 1064 + }; 1065 + 1066 + static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX]; 1067 + 1068 + void __init acpi_osi_setup(char *str) 1069 + { 1070 + struct osi_setup_entry *osi; 1071 + bool enable = true; 1072 + int i; 1073 + 1074 + if (!acpi_gbl_create_osi_method) 1075 + return; 1076 + 1077 + if (str == NULL || *str == '\0') { 1078 + printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1079 + acpi_gbl_create_osi_method = FALSE; 1080 + return; 1081 + } 1082 + 1083 + if (*str == '!') { 1084 + str++; 1085 + enable = false; 1086 + } 1087 + 1088 + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1089 + osi = &osi_setup_entries[i]; 1090 + if (!strcmp(osi->string, str)) { 1091 + osi->enable = enable; 1092 + break; 1093 + } else if (osi->string[0] == '\0') { 1094 + osi->enable = enable; 1095 + strncpy(osi->string, str, OSI_STRING_LENGTH_MAX); 1096 + break; 1097 + } 1098 + } 1099 + } 1100 + 1054 1101 static void __init set_osi_linux(unsigned int enable) 1055 1102 { 1056 - if (osi_linux.enable != enable) { 1103 + if (osi_linux.enable != enable) 1057 1104 osi_linux.enable = enable; 1058 - printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n", 1059 - enable ? "Add": "Delet"); 1060 - } 1061 1105 1062 1106 if (osi_linux.enable) 1063 1107 acpi_osi_setup("Linux"); ··· 1109 1073 1110 1074 static void __init acpi_cmdline_osi_linux(unsigned int enable) 1111 1075 { 1112 - osi_linux.cmdline = 1; /* cmdline set the default */ 1076 + osi_linux.cmdline = 1; /* cmdline set the default and override DMI */ 1077 + osi_linux.dmi = 0; 1113 1078 set_osi_linux(enable); 1114 1079 1115 1080 return; ··· 1118 1081 1119 1082 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) 1120 1083 { 1121 - osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ 1122 - 1123 1084 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 1124 1085 1125 1086 if (enable == -1) 1126 1087 return; 1127 1088 1128 - osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */ 1129 - 1089 + osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ 1130 1090 set_osi_linux(enable); 1131 1091 1132 1092 return; ··· 1138 1104 */ 1139 1105 static void __init acpi_osi_setup_late(void) 1140 1106 { 1141 - char *str = osi_setup_string; 1107 + struct osi_setup_entry *osi; 1108 + char *str; 1109 + int i; 1110 + acpi_status status; 1142 1111 1143 - if (*str == '\0') 1144 - return; 1112 + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1113 + osi = &osi_setup_entries[i]; 1114 + str = osi->string; 1145 1115 1146 - if (!strcmp("!Linux", str)) { 1147 - acpi_cmdline_osi_linux(0); /* !enable */ 1148 - } else if (*str == '!') { 1149 - if (acpi_remove_interface(++str) == AE_OK) 1150 - printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); 1151 - } else if (!strcmp("Linux", str)) { 1152 - acpi_cmdline_osi_linux(1); /* enable */ 1153 - } else { 1154 - if (acpi_install_interface(str) == AE_OK) 1155 - printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); 1116 + if (*str == '\0') 1117 + break; 1118 + if (osi->enable) { 1119 + status = acpi_install_interface(str); 1120 + 1121 + if (ACPI_SUCCESS(status)) 1122 + printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); 1123 + } else { 1124 + status = acpi_remove_interface(str); 1125 + 1126 + if (ACPI_SUCCESS(status)) 1127 + printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); 1128 + } 1156 1129 } 1157 1130 } 1158 1131 1159 - int __init acpi_osi_setup(char *str) 1132 + static int __init osi_setup(char *str) 1160 1133 { 1161 - if (str == NULL || *str == '\0') { 1162 - printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1163 - acpi_gbl_create_osi_method = FALSE; 1164 - } else { 1165 - strncpy(osi_setup_string, str, OSI_STRING_LENGTH_MAX); 1166 - } 1134 + if (str && !strcmp("Linux", str)) 1135 + acpi_cmdline_osi_linux(1); 1136 + else if (str && !strcmp("!Linux", str)) 1137 + acpi_cmdline_osi_linux(0); 1138 + else 1139 + acpi_osi_setup(str); 1167 1140 1168 1141 return 1; 1169 1142 } 1170 1143 1171 - __setup("acpi_osi=", acpi_osi_setup); 1144 + __setup("acpi_osi=", osi_setup); 1172 1145 1173 1146 /* enable serialization to combat AE_ALREADY_EXISTS errors */ 1174 1147 static int __init acpi_serialize_setup(char *str) ··· 1571 1530 return AE_OK; 1572 1531 } 1573 1532 1574 - acpi_status acpi_os_initialize1(void) 1533 + acpi_status __init acpi_os_initialize1(void) 1575 1534 { 1576 1535 kacpid_wq = create_workqueue("kacpid"); 1577 1536 kacpi_notify_wq = create_workqueue("kacpi_notify");
+6 -6
drivers/acpi/power.c
··· 213 213 resource->name)); 214 214 } else { 215 215 result = __acpi_power_on(resource); 216 + if (result) 217 + resource->ref_count--; 216 218 } 217 219 218 220 mutex_unlock(&resource->resource_lock); 219 221 220 - return 0; 222 + return result; 221 223 } 222 224 223 225 static int acpi_power_off_device(acpi_handle handle) ··· 467 465 struct acpi_handle_list *tl = NULL; /* Target Resources */ 468 466 int i = 0; 469 467 470 - 471 468 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) 472 469 return -EINVAL; 470 + 471 + if (device->power.state == state) 472 + return 0; 473 473 474 474 if ((device->power.state < ACPI_STATE_D0) 475 475 || (device->power.state > ACPI_STATE_D3)) ··· 490 486 result = acpi_power_on(tl->handles[i]); 491 487 if (result) 492 488 goto end; 493 - } 494 - 495 - if (device->power.state == state) { 496 - goto end; 497 489 } 498 490 499 491 /*
-9
drivers/acpi/processor_thermal.c
··· 156 156 return 0; 157 157 } 158 158 159 - static int acpi_thermal_cpufreq_increase(unsigned int cpu) 160 - { 161 - return -ENODEV; 162 - } 163 - static int acpi_thermal_cpufreq_decrease(unsigned int cpu) 164 - { 165 - return -ENODEV; 166 - } 167 - 168 159 #endif 169 160 170 161 int acpi_processor_get_limit_info(struct acpi_processor *pr)
+10 -2
drivers/acpi/sleep.c
··· 27 27 28 28 static u8 sleep_states[ACPI_S_STATE_COUNT]; 29 29 30 - static u32 acpi_target_sleep_state = ACPI_STATE_S0; 31 - 32 30 static void acpi_sleep_tts_switch(u32 acpi_state) 33 31 { 34 32 union acpi_object in_arg = { ACPI_TYPE_INTEGER }; ··· 79 81 } 80 82 81 83 #ifdef CONFIG_ACPI_SLEEP 84 + static u32 acpi_target_sleep_state = ACPI_STATE_S0; 85 + 82 86 /* 83 87 * The ACPI specification wants us to save NVS memory regions during hibernation 84 88 * and to restore them during the subsequent resume. Windows does that also for ··· 425 425 .matches = { 426 426 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 427 427 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), 428 + }, 429 + }, 430 + { 431 + .callback = init_nvs_nosave, 432 + .ident = "Sony Vaio VGN-NW130D", 433 + .matches = { 434 + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 435 + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), 428 436 }, 429 437 }, 430 438 {},
+1 -1
drivers/atm/adummy.c
··· 154 154 err = -ENOMEM; 155 155 goto out; 156 156 } 157 - atm_dev = atm_dev_register(DEV_LABEL, &adummy_ops, -1, NULL); 157 + atm_dev = atm_dev_register(DEV_LABEL, NULL, &adummy_ops, -1, NULL); 158 158 if (!atm_dev) { 159 159 printk(KERN_ERR DEV_LABEL ": atm_dev_register() failed\n"); 160 160 err = -ENODEV;
+2 -1
drivers/atm/ambassador.c
··· 2244 2244 goto out_reset; 2245 2245 } 2246 2246 2247 - dev->atm_dev = atm_dev_register (DEV_LABEL, &amb_ops, -1, NULL); 2247 + dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1, 2248 + NULL); 2248 2249 if (!dev->atm_dev) { 2249 2250 PRINTD (DBG_ERR, "failed to register Madge ATM adapter"); 2250 2251 err = -EINVAL;
+1 -1
drivers/atm/atmtcp.c
··· 366 366 if (!dev_data) 367 367 return -ENOMEM; 368 368 369 - dev = atm_dev_register(DEV_LABEL,&atmtcp_v_dev_ops,itf,NULL); 369 + dev = atm_dev_register(DEV_LABEL,NULL,&atmtcp_v_dev_ops,itf,NULL); 370 370 if (!dev) { 371 371 kfree(dev_data); 372 372 return itf == -1 ? -ENOMEM : -EBUSY;
+1 -1
drivers/atm/eni.c
··· 2244 2244 &zeroes); 2245 2245 if (!cpu_zeroes) goto out1; 2246 2246 } 2247 - dev = atm_dev_register(DEV_LABEL,&ops,-1,NULL); 2247 + dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); 2248 2248 if (!dev) goto out2; 2249 2249 pci_set_drvdata(pci_dev, dev); 2250 2250 eni_dev->pci_dev = pci_dev;
+1 -1
drivers/atm/firestream.c
··· 1911 1911 fs_dev, sizeof (struct fs_dev)); 1912 1912 if (!fs_dev) 1913 1913 goto err_out; 1914 - atm_dev = atm_dev_register("fs", &ops, -1, NULL); 1914 + atm_dev = atm_dev_register("fs", &pci_dev->dev, &ops, -1, NULL); 1915 1915 if (!atm_dev) 1916 1916 goto err_out_free_fs_dev; 1917 1917
+7 -7
drivers/atm/fore200e.c
··· 2567 2567 2568 2568 2569 2569 static int __devinit 2570 - fore200e_register(struct fore200e* fore200e) 2570 + fore200e_register(struct fore200e* fore200e, struct device *parent) 2571 2571 { 2572 2572 struct atm_dev* atm_dev; 2573 2573 2574 2574 DPRINTK(2, "device %s being registered\n", fore200e->name); 2575 2575 2576 - atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1, 2577 - NULL); 2576 + atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops, 2577 + -1, NULL); 2578 2578 if (atm_dev == NULL) { 2579 2579 printk(FORE200E "unable to register device %s\n", fore200e->name); 2580 2580 return -ENODEV; ··· 2594 2594 2595 2595 2596 2596 static int __devinit 2597 - fore200e_init(struct fore200e* fore200e) 2597 + fore200e_init(struct fore200e* fore200e, struct device *parent) 2598 2598 { 2599 - if (fore200e_register(fore200e) < 0) 2599 + if (fore200e_register(fore200e, parent) < 0) 2600 2600 return -ENODEV; 2601 2601 2602 2602 if (fore200e->bus->configure(fore200e) < 0) ··· 2662 2662 2663 2663 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2664 2664 2665 - err = fore200e_init(fore200e); 2665 + err = fore200e_init(fore200e, &op->dev); 2666 2666 if (err < 0) { 2667 2667 fore200e_shutdown(fore200e); 2668 2668 kfree(fore200e); ··· 2740 2740 2741 2741 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2742 2742 2743 - err = fore200e_init(fore200e); 2743 + err = fore200e_init(fore200e, &pci_dev->dev); 2744 2744 if (err < 0) { 2745 2745 fore200e_shutdown(fore200e); 2746 2746 goto out_free;
+1 -1
drivers/atm/he.c
··· 366 366 goto init_one_failure; 367 367 } 368 368 369 - atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL); 369 + atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL); 370 370 if (!atm_dev) { 371 371 err = -ENODEV; 372 372 goto init_one_failure;
+2 -1
drivers/atm/horizon.c
··· 2733 2733 PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p", 2734 2734 iobase, irq, membase); 2735 2735 2736 - dev->atm_dev = atm_dev_register(DEV_LABEL, &hrz_ops, -1, NULL); 2736 + dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1, 2737 + NULL); 2737 2738 if (!(dev->atm_dev)) { 2738 2739 PRINTD(DBG_ERR, "failed to register Madge ATM adapter"); 2739 2740 err = -EINVAL;
+2 -1
drivers/atm/idt77252.c
··· 3698 3698 goto err_out_iounmap; 3699 3699 } 3700 3700 3701 - dev = atm_dev_register("idt77252", &idt77252_ops, -1, NULL); 3701 + dev = atm_dev_register("idt77252", &pcidev->dev, &idt77252_ops, -1, 3702 + NULL); 3702 3703 if (!dev) { 3703 3704 printk("%s: can't register atm device\n", card->name); 3704 3705 err = -EIO;
+1 -1
drivers/atm/iphase.c
··· 3172 3172 ret = -ENODEV; 3173 3173 goto err_out_free_iadev; 3174 3174 } 3175 - dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL); 3175 + dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL); 3176 3176 if (!dev) { 3177 3177 ret = -ENOMEM; 3178 3178 goto err_out_disable_dev;
+1 -1
drivers/atm/lanai.c
··· 2591 2591 return -ENOMEM; 2592 2592 } 2593 2593 2594 - atmdev = atm_dev_register(DEV_LABEL, &ops, -1, NULL); 2594 + atmdev = atm_dev_register(DEV_LABEL, &pci->dev, &ops, -1, NULL); 2595 2595 if (atmdev == NULL) { 2596 2596 printk(KERN_ERR DEV_LABEL 2597 2597 ": couldn't register atm device!\n");
+2 -1
drivers/atm/nicstar.c
··· 771 771 } 772 772 773 773 /* Register device */ 774 - card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); 774 + card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, 775 + -1, NULL); 775 776 if (card->atmdev == NULL) { 776 777 printk("nicstar%d: can't register device.\n", i); 777 778 error = 17;
+4 -4
drivers/atm/solos-pci.c
··· 166 166 static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci); 167 167 static int list_vccs(int vci); 168 168 static void release_vccs(struct atm_dev *dev); 169 - static int atm_init(struct solos_card *); 169 + static int atm_init(struct solos_card *, struct device *); 170 170 static void atm_remove(struct solos_card *); 171 171 static int send_command(struct solos_card *card, int dev, const char *buf, size_t size); 172 172 static void solos_bh(unsigned long); ··· 1210 1210 if (db_firmware_upgrade) 1211 1211 flash_upgrade(card, 3); 1212 1212 1213 - err = atm_init(card); 1213 + err = atm_init(card, &dev->dev); 1214 1214 if (err) 1215 1215 goto out_free_irq; 1216 1216 ··· 1233 1233 return err; 1234 1234 } 1235 1235 1236 - static int atm_init(struct solos_card *card) 1236 + static int atm_init(struct solos_card *card, struct device *parent) 1237 1237 { 1238 1238 int i; 1239 1239 ··· 1244 1244 skb_queue_head_init(&card->tx_queue[i]); 1245 1245 skb_queue_head_init(&card->cli_queue[i]); 1246 1246 1247 - card->atmdev[i] = atm_dev_register("solos-pci", &fpga_ops, -1, NULL); 1247 + card->atmdev[i] = atm_dev_register("solos-pci", parent, &fpga_ops, -1, NULL); 1248 1248 if (!card->atmdev[i]) { 1249 1249 dev_err(&card->dev->dev, "Could not register ATM device %d\n", i); 1250 1250 atm_remove(card);
+1 -1
drivers/atm/zatm.c
··· 1597 1597 goto out; 1598 1598 } 1599 1599 1600 - dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL); 1600 + dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); 1601 1601 if (!dev) 1602 1602 goto out_free; 1603 1603
+2
drivers/block/cciss.c
··· 2834 2834 InquiryData_struct *inq_buff = NULL; 2835 2835 2836 2836 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2837 + if (!h->drv[logvol]) 2838 + continue; 2837 2839 if (memcmp(h->drv[logvol]->LunID, drv->LunID, 2838 2840 sizeof(drv->LunID)) == 0) { 2839 2841 FOUND = 1;
+8 -6
drivers/block/drbd/drbd_receiver.c
··· 3627 3627 } 3628 3628 3629 3629 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); 3630 - rv = drbd_recv(mdev, &header->h80.payload, shs); 3631 - if (unlikely(rv != shs)) { 3632 - dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); 3633 - goto err_out; 3634 - } 3635 - 3636 3630 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { 3637 3631 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); 3638 3632 goto err_out; 3633 + } 3634 + 3635 + if (shs) { 3636 + rv = drbd_recv(mdev, &header->h80.payload, shs); 3637 + if (unlikely(rv != shs)) { 3638 + dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); 3639 + goto err_out; 3640 + } 3639 3641 } 3640 3642 3641 3643 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
+2 -1
drivers/block/drbd/drbd_req.h
··· 339 339 } 340 340 341 341 /* completion of master bio is outside of spinlock. 342 - * If you need it irqsave, do it your self! */ 342 + * If you need it irqsave, do it your self! 343 + * Which means: don't use from bio endio callback. */ 343 344 static inline int req_mod(struct drbd_request *req, 344 345 enum drbd_req_event what) 345 346 {
+9 -1
drivers/block/drbd/drbd_worker.c
··· 193 193 */ 194 194 void drbd_endio_pri(struct bio *bio, int error) 195 195 { 196 + unsigned long flags; 196 197 struct drbd_request *req = bio->bi_private; 197 198 struct drbd_conf *mdev = req->mdev; 199 + struct bio_and_error m; 198 200 enum drbd_req_event what; 199 201 int uptodate = bio_flagged(bio, BIO_UPTODATE); 200 202 ··· 222 220 bio_put(req->private_bio); 223 221 req->private_bio = ERR_PTR(error); 224 222 225 - req_mod(req, what); 223 + /* not req_mod(), we need irqsave here! */ 224 + spin_lock_irqsave(&mdev->req_lock, flags); 225 + __req_mod(req, what, &m); 226 + spin_unlock_irqrestore(&mdev->req_lock, flags); 227 + 228 + if (m.bio) 229 + complete_master_bio(mdev, &m); 226 230 } 227 231 228 232 int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+1 -1
drivers/block/xen-blkfront.c
··· 72 72 static DEFINE_MUTEX(blkfront_mutex); 73 73 static const struct block_device_operations xlvbd_block_fops; 74 74 75 - #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) 75 + #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) 76 76 77 77 /* 78 78 * We have one of these per vbd, whether ide, scsi or 'other'. They
+4
drivers/bluetooth/ath3k.c
··· 35 35 static struct usb_device_id ath3k_table[] = { 36 36 /* Atheros AR3011 */ 37 37 { USB_DEVICE(0x0CF3, 0x3000) }, 38 + 39 + /* Atheros AR3011 with sflash firmware*/ 40 + { USB_DEVICE(0x0CF3, 0x3002) }, 41 + 38 42 { } /* Terminating entry */ 39 43 }; 40 44
+9 -3
drivers/bluetooth/btusb.c
··· 99 99 /* Broadcom BCM2033 without firmware */ 100 100 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, 101 101 102 + /* Atheros 3011 with sflash firmware */ 103 + { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 104 + 102 105 /* Broadcom BCM2035 */ 103 106 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, 104 107 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, ··· 242 239 243 240 err = usb_submit_urb(urb, GFP_ATOMIC); 244 241 if (err < 0) { 245 - BT_ERR("%s urb %p failed to resubmit (%d)", 242 + if (err != -EPERM) 243 + BT_ERR("%s urb %p failed to resubmit (%d)", 246 244 hdev->name, urb, -err); 247 245 usb_unanchor_urb(urb); 248 246 } ··· 327 323 328 324 err = usb_submit_urb(urb, GFP_ATOMIC); 329 325 if (err < 0) { 330 - BT_ERR("%s urb %p failed to resubmit (%d)", 326 + if (err != -EPERM) 327 + BT_ERR("%s urb %p failed to resubmit (%d)", 331 328 hdev->name, urb, -err); 332 329 usb_unanchor_urb(urb); 333 330 } ··· 417 412 418 413 err = usb_submit_urb(urb, GFP_ATOMIC); 419 414 if (err < 0) { 420 - BT_ERR("%s urb %p failed to resubmit (%d)", 415 + if (err != -EPERM) 416 + BT_ERR("%s urb %p failed to resubmit (%d)", 421 417 hdev->name, urb, -err); 422 418 usb_unanchor_urb(urb); 423 419 }
+12 -7
drivers/clocksource/sh_cmt.c
··· 283 283 } while (delay); 284 284 } 285 285 286 + static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) 287 + { 288 + if (delta > p->max_match_value) 289 + dev_warn(&p->pdev->dev, "delta out of range\n"); 290 + 291 + p->next_match_value = delta; 292 + sh_cmt_clock_event_program_verify(p, 0); 293 + } 294 + 286 295 static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) 287 296 { 288 297 unsigned long flags; 289 298 290 - if (delta > p->max_match_value) 291 - dev_warn(&p->pdev->dev, "delta out of range\n"); 292 - 293 299 spin_lock_irqsave(&p->lock, flags); 294 - p->next_match_value = delta; 295 - sh_cmt_clock_event_program_verify(p, 0); 300 + __sh_cmt_set_next(p, delta); 296 301 spin_unlock_irqrestore(&p->lock, flags); 297 302 } 298 303 ··· 364 359 365 360 /* setup timeout if no clockevent */ 366 361 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) 367 - sh_cmt_set_next(p, p->max_match_value); 362 + __sh_cmt_set_next(p, p->max_match_value); 368 363 out: 369 364 spin_unlock_irqrestore(&p->lock, flags); 370 365 ··· 386 381 387 382 /* adjust the timeout to maximum if only clocksource left */ 388 383 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) 389 - sh_cmt_set_next(p, p->max_match_value); 384 + __sh_cmt_set_next(p, p->max_match_value); 390 385 391 386 spin_unlock_irqrestore(&p->lock, flags); 392 387 }
+1
drivers/connector/connector.c
··· 36 36 MODULE_LICENSE("GPL"); 37 37 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 38 38 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); 39 + MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR); 39 40 40 41 static struct cn_dev cdev; 41 42
+2 -2
drivers/dma/Makefile
··· 1 1 ifeq ($(CONFIG_DMADEVICES_DEBUG),y) 2 - EXTRA_CFLAGS += -DDEBUG 2 + ccflags-y += -DDEBUG 3 3 endif 4 4 ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) 5 - EXTRA_CFLAGS += -DVERBOSE_DEBUG 5 + ccflags-y += -DVERBOSE_DEBUG 6 6 endif 7 7 8 8 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+1 -1
drivers/dma/at_hdmac.c
··· 722 722 desc->lli.daddr = mem; 723 723 desc->lli.ctrla = ctrla 724 724 | ATC_DST_WIDTH(mem_width) 725 - | len >> mem_width; 725 + | len >> reg_width; 726 726 desc->lli.ctrlb = ctrlb; 727 727 728 728 if (!first) {
+4 -2
drivers/dma/fsldma.c
··· 50 50 * EIE - Error interrupt enable 51 51 * EOSIE - End of segments interrupt enable (basic mode) 52 52 * EOLNIE - End of links interrupt enable 53 + * BWC - Bandwidth sharing among channels 53 54 */ 54 - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE 55 - | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); 55 + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC 56 + | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE 57 + | FSL_DMA_MR_EOSIE, 32); 56 58 break; 57 59 case FSL_DMA_IP_83XX: 58 60 /* Set the channel to below modes:
+8 -1
drivers/dma/fsldma.h
··· 1 1 /* 2 - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 2 + * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. 3 3 * 4 4 * Author: 5 5 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 ··· 35 35 #define FSL_DMA_MR_EMS_EN 0x00040000 36 36 #define FSL_DMA_MR_DAHE 0x00002000 37 37 #define FSL_DMA_MR_SAHE 0x00001000 38 + 39 + /* 40 + * Bandwidth/pause control determines how many bytes a given 41 + * channel is allowed to transfer before the DMA engine pauses 42 + * the current channel and switches to the next channel 43 + */ 44 + #define FSL_DMA_MR_BWC 0x08000000 38 45 39 46 /* Special MR definition for MPC8349 */ 40 47 #define FSL_DMA_MR_EOTIE 0x00000080
+1 -1
drivers/dma/imx-dma.c
··· 379 379 return 0; 380 380 381 381 err_init: 382 - while (i-- >= 0) { 382 + while (--i >= 0) { 383 383 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 384 384 imx_dma_free(imxdmac->imxdma_channel); 385 385 }
+2 -2
drivers/dma/imx-sdma.c
··· 951 951 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 952 952 int param; 953 953 954 - bd->buffer_addr = sgl->dma_address; 954 + bd->buffer_addr = sg->dma_address; 955 955 956 956 count = sg->length; 957 957 ··· 1385 1385 { 1386 1386 return platform_driver_probe(&sdma_driver, sdma_probe); 1387 1387 } 1388 - subsys_initcall(sdma_module_init); 1388 + module_init(sdma_module_init); 1389 1389 1390 1390 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 1391 1391 MODULE_DESCRIPTION("i.MX SDMA driver");
+3 -5
drivers/dma/intel_mid_dma.c
··· 1075 1075 if (NULL == dma->dma_pool) { 1076 1076 pr_err("ERR_MDMA:pci_pool_create failed\n"); 1077 1077 err = -ENOMEM; 1078 - kfree(dma); 1079 1078 goto err_dma_pool; 1080 1079 } 1081 1080 ··· 1185 1186 free_irq(pdev->irq, dma); 1186 1187 err_irq: 1187 1188 pci_pool_destroy(dma->dma_pool); 1188 - kfree(dma); 1189 1189 err_dma_pool: 1190 1190 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1191 1191 return err; ··· 1411 1413 .runtime_idle = dma_runtime_idle, 1412 1414 }; 1413 1415 1414 - static struct pci_driver intel_mid_dma_pci = { 1416 + static struct pci_driver intel_mid_dma_pci_driver = { 1415 1417 .name = "Intel MID DMA", 1416 1418 .id_table = intel_mid_dma_ids, 1417 1419 .probe = intel_mid_dma_probe, ··· 1429 1431 { 1430 1432 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1431 1433 INTEL_MID_DMA_DRIVER_VERSION); 1432 - return pci_register_driver(&intel_mid_dma_pci); 1434 + return pci_register_driver(&intel_mid_dma_pci_driver); 1433 1435 } 1434 1436 fs_initcall(intel_mid_dma_init); 1435 1437 1436 1438 static void __exit intel_mid_dma_exit(void) 1437 1439 { 1438 - pci_unregister_driver(&intel_mid_dma_pci); 1440 + pci_unregister_driver(&intel_mid_dma_pci_driver); 1439 1441 } 1440 1442 module_exit(intel_mid_dma_exit); 1441 1443
+1 -1
drivers/dma/ioat/Makefile
··· 1 1 obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 2 - ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o 2 + ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o
+8 -7
drivers/dma/pch_dma.c
··· 259 259 return; 260 260 } 261 261 262 - channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); 263 - channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); 264 - channel_writel(pd_chan, SIZE, desc->regs.size); 265 - channel_writel(pd_chan, NEXT, desc->regs.next); 266 - 267 262 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", 268 263 pd_chan->chan.chan_id, desc->regs.dev_addr); 269 264 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", ··· 268 273 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", 269 274 pd_chan->chan.chan_id, desc->regs.next); 270 275 271 - if (list_empty(&desc->tx_list)) 276 + if (list_empty(&desc->tx_list)) { 277 + channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); 278 + channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); 279 + channel_writel(pd_chan, SIZE, desc->regs.size); 280 + channel_writel(pd_chan, NEXT, desc->regs.next); 272 281 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); 273 - else 282 + } else { 283 + channel_writel(pd_chan, NEXT, desc->txd.phys); 274 284 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); 285 + } 275 286 276 287 val = dma_readl(pd, CTL2); 277 288 val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
+2 -3
drivers/dma/ppc4xx/adma.c
··· 4449 4449 4450 4450 if (!request_mem_region(res.start, resource_size(&res), 4451 4451 dev_driver_string(&ofdev->dev))) { 4452 - dev_err(&ofdev->dev, "failed to request memory region " 4453 - "(0x%016llx-0x%016llx)\n", 4454 - (u64)res.start, (u64)res.end); 4452 + dev_err(&ofdev->dev, "failed to request memory region %pR\n", 4453 + &res); 4455 4454 initcode = PPC_ADMA_INIT_MEMREG; 4456 4455 ret = -EBUSY; 4457 4456 goto out;
+1 -1
drivers/edac/amd64_edac.c
··· 1572 1572 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", 1573 1573 hole_off, hole_valid, intlv_sel); 1574 1574 1575 - if (intlv_en || 1575 + if (intlv_en && 1576 1576 (intlv_sel != ((sys_addr >> 12) & intlv_en))) 1577 1577 return -EINVAL; 1578 1578
+3 -3
drivers/edac/edac_core.h
··· 41 41 #define MC_PROC_NAME_MAX_LEN 7 42 42 43 43 #if PAGE_SHIFT < 20 44 - #define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) ) 45 - #define MiB_TO_PAGES(mb) ((mb) >> (20 - PAGE_SHIFT)) 44 + #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) 45 + #define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) 46 46 #else /* PAGE_SHIFT > 20 */ 47 - #define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) ) 47 + #define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20)) 48 48 #define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20)) 49 49 #endif 50 50
+6 -4
drivers/edac/edac_mc.c
··· 586 586 return NULL; 587 587 } 588 588 589 - /* marking MCI offline */ 590 - mci->op_state = OP_OFFLINE; 591 - 592 589 del_mc_from_global_list(mci); 593 590 mutex_unlock(&mem_ctls_mutex); 594 591 595 - /* flush workq processes and remove sysfs */ 592 + /* flush workq processes */ 596 593 edac_mc_workq_teardown(mci); 594 + 595 + /* marking MCI offline */ 596 + mci->op_state = OP_OFFLINE; 597 + 598 + /* remove from sysfs */ 597 599 edac_remove_sysfs_mci_device(mci); 598 600 599 601 edac_printk(KERN_INFO, EDAC_MC,
+33 -14
drivers/firewire/ohci.c
··· 242 242 243 243 static char ohci_driver_name[] = KBUILD_MODNAME; 244 244 245 + #define PCI_DEVICE_ID_AGERE_FW643 0x5901 245 246 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 246 247 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 247 248 ··· 254 253 255 254 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ 256 255 static const struct { 257 - unsigned short vendor, device, flags; 256 + unsigned short vendor, device, revision, flags; 258 257 } ohci_quirks[] = { 259 - {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | 260 - QUIRK_RESET_PACKET | 261 - QUIRK_NO_1394A}, 262 - {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 263 - {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 264 - {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, 265 - {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 266 - {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 267 - {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 268 - {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, 258 + {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, 259 + QUIRK_CYCLE_TIMER}, 260 + 261 + {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, 262 + QUIRK_BE_HEADERS}, 263 + 264 + {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 265 + QUIRK_NO_MSI}, 266 + 267 + {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, 268 + QUIRK_NO_MSI}, 269 + 270 + {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 271 + QUIRK_CYCLE_TIMER}, 272 + 273 + {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 274 + QUIRK_CYCLE_TIMER}, 275 + 276 + {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, 277 + QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, 278 + 279 + {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, 280 + QUIRK_RESET_PACKET}, 281 + 282 + {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 283 + QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 269 284 }; 270 285 271 286 /* This overrides anything that was found in ohci_quirks[]. */ ··· 2944 2927 } 2945 2928 2946 2929 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) 2947 - if (ohci_quirks[i].vendor == dev->vendor && 2948 - (ohci_quirks[i].device == dev->device || 2949 - ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) { 2930 + if ((ohci_quirks[i].vendor == dev->vendor) && 2931 + (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || 2932 + ohci_quirks[i].device == dev->device) && 2933 + (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || 2934 + ohci_quirks[i].revision >= dev->revision)) { 2950 2935 ohci->quirks = ohci_quirks[i].flags; 2951 2936 break; 2952 2937 }
+5 -5
drivers/gpu/drm/drm_crtc.c
··· 156 156 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, 157 157 { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, 158 158 { DRM_MODE_CONNECTOR_Component, "Component", 0 }, 159 - { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 }, 160 - { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, 161 - { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, 162 - { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, 159 + { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, 160 + { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, 161 + { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, 162 + { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, 163 163 { DRM_MODE_CONNECTOR_TV, "TV", 0 }, 164 - { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 }, 164 + { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, 165 165 }; 166 166 167 167 static struct drm_prop_enum_list drm_encoder_enum_list[] =
+2 -2
drivers/gpu/drm/drm_irq.c
··· 628 628 if ((seq - vblwait->request.sequence) <= (1 << 23)) { 629 629 e->event.tv_sec = now.tv_sec; 630 630 e->event.tv_usec = now.tv_usec; 631 - drm_vblank_put(dev, e->pipe); 631 + drm_vblank_put(dev, pipe); 632 632 list_add_tail(&e->base.link, &e->base.file_priv->event_list); 633 633 wake_up_interruptible(&e->base.file_priv->event_wait); 634 634 trace_drm_vblank_event_delivered(current->pid, pipe, ··· 645 645 spin_unlock_irqrestore(&dev->event_lock, flags); 646 646 kfree(e); 647 647 err_put: 648 - drm_vblank_put(dev, e->pipe); 648 + drm_vblank_put(dev, pipe); 649 649 return ret; 650 650 } 651 651
+8 -3
drivers/gpu/drm/radeon/r600.c
··· 878 878 u32 tmp; 879 879 880 880 /* flush hdp cache so updates hit vram */ 881 - if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 881 + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 882 + !(rdev->flags & RADEON_IS_AGP)) { 882 883 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 883 884 u32 tmp; 884 885 885 886 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 886 887 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 888 + * This seems to cause problems on some AGP cards. Just use the old 889 + * method for them. 887 890 */ 888 891 WREG32(HDP_DEBUG1, 0); 889 892 tmp = readl((void __iomem *)ptr); ··· 3488 3485 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) 3489 3486 { 3490 3487 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 3491 - * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3488 + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. 3489 + * This seems to cause problems on some AGP cards. Just use the old 3490 + * method for them. 3492 3491 */ 3493 3492 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 3494 - rdev->vram_scratch.ptr) { 3493 + rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { 3495 3494 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 3496 3495 u32 tmp; 3497 3496
+10 -10
drivers/hwmon/adm1026.c
··· 916 916 int nr = sensor_attr->index; 917 917 struct i2c_client *client = to_i2c_client(dev); 918 918 struct adm1026_data *data = i2c_get_clientdata(client); 919 - int val, orig_div, new_div, shift; 919 + int val, orig_div, new_div; 920 920 921 921 val = simple_strtol(buf, NULL, 10); 922 922 new_div = DIV_TO_REG(val); 923 - if (new_div == 0) { 924 - return -EINVAL; 925 - } 923 + 926 924 mutex_lock(&data->update_lock); 927 925 orig_div = data->fan_div[nr]; 928 926 data->fan_div[nr] = DIV_FROM_REG(new_div); 929 927 930 928 if (nr < 4) { /* 0 <= nr < 4 */ 931 - shift = 2 * nr; 932 929 adm1026_write_value(client, ADM1026_REG_FAN_DIV_0_3, 933 - ((DIV_TO_REG(orig_div) & (~(0x03 << shift))) | 934 - (new_div << shift))); 930 + (DIV_TO_REG(data->fan_div[0]) << 0) | 931 + (DIV_TO_REG(data->fan_div[1]) << 2) | 932 + (DIV_TO_REG(data->fan_div[2]) << 4) | 933 + (DIV_TO_REG(data->fan_div[3]) << 6)); 935 934 } else { /* 3 < nr < 8 */ 936 - shift = 2 * (nr - 4); 937 935 adm1026_write_value(client, ADM1026_REG_FAN_DIV_4_7, 938 - ((DIV_TO_REG(orig_div) & (~(0x03 << (2 * shift)))) | 939 - (new_div << shift))); 936 + (DIV_TO_REG(data->fan_div[4]) << 0) | 937 + (DIV_TO_REG(data->fan_div[5]) << 2) | 938 + (DIV_TO_REG(data->fan_div[6]) << 4) | 939 + (DIV_TO_REG(data->fan_div[7]) << 6)); 940 940 } 941 941 942 942 if (data->fan_div[nr] != orig_div) {
+45 -16
drivers/hwmon/it87.c
··· 187 187 #define IT87_REG_FAN_MAIN_CTRL 0x13 188 188 #define IT87_REG_FAN_CTL 0x14 189 189 #define IT87_REG_PWM(nr) (0x15 + (nr)) 190 + #define IT87_REG_PWM_DUTY(nr) (0x63 + (nr) * 8) 190 191 191 192 #define IT87_REG_VIN(nr) (0x20 + (nr)) 192 193 #define IT87_REG_TEMP(nr) (0x29 + (nr)) ··· 252 251 u8 fan_main_ctrl; /* Register value */ 253 252 u8 fan_ctl; /* Register value */ 254 253 255 - /* The following 3 arrays correspond to the same registers. The 256 - * meaning of bits 6-0 depends on the value of bit 7, and we want 257 - * to preserve settings on mode changes, so we have to track all 258 - * values separately. */ 254 + /* The following 3 arrays correspond to the same registers up to 255 + * the IT8720F. The meaning of bits 6-0 depends on the value of bit 256 + * 7, and we want to preserve settings on mode changes, so we have 257 + * to track all values separately. 258 + * Starting with the IT8721F, the manual PWM duty cycles are stored 259 + * in separate registers (8-bit values), so the separate tracking 260 + * is no longer needed, but it is still done to keep the driver 261 + * simple. */ 259 262 u8 pwm_ctrl[3]; /* Register value */ 260 - u8 pwm_duty[3]; /* Manual PWM value set by user (bit 6-0) */ 263 + u8 pwm_duty[3]; /* Manual PWM value set by user */ 261 264 u8 pwm_temp_map[3]; /* PWM to temp. chan. mapping (bits 1-0) */ 262 265 263 266 /* Automatic fan speed control registers */ ··· 837 832 data->fan_main_ctrl); 838 833 } else { 839 834 if (val == 1) /* Manual mode */ 840 - data->pwm_ctrl[nr] = data->pwm_duty[nr]; 835 + data->pwm_ctrl[nr] = data->type == it8721 ? 836 + data->pwm_temp_map[nr] : 837 + data->pwm_duty[nr]; 841 838 else /* Automatic mode */ 842 839 data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr]; 843 840 it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]); ··· 865 858 return -EINVAL; 866 859 867 860 mutex_lock(&data->update_lock); 868 - data->pwm_duty[nr] = pwm_to_reg(data, val); 869 - /* If we are in manual mode, write the duty cycle immediately; 870 - * otherwise, just store it for later use. */ 871 - if (!(data->pwm_ctrl[nr] & 0x80)) { 872 - data->pwm_ctrl[nr] = data->pwm_duty[nr]; 873 - it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]); 861 + if (data->type == it8721) { 862 + /* If we are in automatic mode, the PWM duty cycle register 863 + * is read-only so we can't write the value */ 864 + if (data->pwm_ctrl[nr] & 0x80) { 865 + mutex_unlock(&data->update_lock); 866 + return -EBUSY; 867 + } 868 + data->pwm_duty[nr] = pwm_to_reg(data, val); 869 + it87_write_value(data, IT87_REG_PWM_DUTY(nr), 870 + data->pwm_duty[nr]); 871 + } else { 872 + data->pwm_duty[nr] = pwm_to_reg(data, val); 873 + /* If we are in manual mode, write the duty cycle immediately; 874 + * otherwise, just store it for later use. */ 875 + if (!(data->pwm_ctrl[nr] & 0x80)) { 876 + data->pwm_ctrl[nr] = data->pwm_duty[nr]; 877 + it87_write_value(data, IT87_REG_PWM(nr), 878 + data->pwm_ctrl[nr]); 879 + } 874 880 } 875 881 mutex_unlock(&data->update_lock); 876 882 return count; ··· 1978 1958 * channels to use when later setting to automatic mode later. 1979 1959 * Use a 1:1 mapping by default (we are clueless.) 1980 1960 * In both cases, the value can (and should) be changed by the user 1981 - * prior to switching to a different mode. */ 1961 + * prior to switching to a different mode. 1962 + * Note that this is no longer needed for the IT8721F and later, as 1963 + * these have separate registers for the temperature mapping and the 1964 + * manual duty cycle. */ 1982 1965 for (i = 0; i < 3; i++) { 1983 1966 data->pwm_temp_map[i] = i; 1984 1967 data->pwm_duty[i] = 0x7f; /* Full speed */ ··· 2057 2034 static void it87_update_pwm_ctrl(struct it87_data *data, int nr) 2058 2035 { 2059 2036 data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr)); 2060 - if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */ 2037 + if (data->type == it8721) { 2061 2038 data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03; 2062 - else /* Manual mode */ 2063 - data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f; 2039 + data->pwm_duty[nr] = it87_read_value(data, 2040 + IT87_REG_PWM_DUTY(nr)); 2041 + } else { 2042 + if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */ 2043 + data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03; 2044 + else /* Manual mode */ 2045 + data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f; 2046 + } 2064 2047 2065 2048 if (has_old_autopwm(data)) { 2066 2049 int i;
+2 -2
drivers/hwmon/ltc4215.c
··· 205 205 206 206 /* Power (virtual) */ 207 207 LTC4215_POWER(power1_input); 208 - LTC4215_ALARM(power1_alarm, (1 << 3), LTC4215_STATUS); 209 208 210 209 /* Input Voltage */ 211 210 LTC4215_VOLTAGE(in1_input, LTC4215_ADIN); ··· 213 214 214 215 /* Output Voltage */ 215 216 LTC4215_VOLTAGE(in2_input, LTC4215_SOURCE); 217 + LTC4215_ALARM(in2_min_alarm, (1 << 3), LTC4215_STATUS); 216 218 217 219 /* Finally, construct an array of pointers to members of the above objects, 218 220 * as required for sysfs_create_group() ··· 223 223 &sensor_dev_attr_curr1_max_alarm.dev_attr.attr, 224 224 225 225 &sensor_dev_attr_power1_input.dev_attr.attr, 226 - &sensor_dev_attr_power1_alarm.dev_attr.attr, 227 226 228 227 &sensor_dev_attr_in1_input.dev_attr.attr, 229 228 &sensor_dev_attr_in1_max_alarm.dev_attr.attr, 230 229 &sensor_dev_attr_in1_min_alarm.dev_attr.attr, 231 230 232 231 &sensor_dev_attr_in2_input.dev_attr.attr, 232 + &sensor_dev_attr_in2_min_alarm.dev_attr.attr, 233 233 234 234 NULL, 235 235 };
+1 -1
drivers/i2c/busses/i2c-intel-mid.c
··· 999 999 1000 1000 /* Initialize struct members */ 1001 1001 snprintf(mrst->adap.name, sizeof(mrst->adap.name), 1002 - "MRST/Medfield I2C at %lx", start); 1002 + "Intel MID I2C at %lx", start); 1003 1003 mrst->adap.owner = THIS_MODULE; 1004 1004 mrst->adap.algo = &intel_mid_i2c_algorithm; 1005 1005 mrst->adap.dev.parent = &dev->dev;
+3 -9
drivers/idle/intel_idle.c
··· 273 273 274 274 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); 275 275 276 - if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 277 - lapic_timer_reliable_states = 0xFFFFFFFF; 278 276 279 277 if (boot_cpu_data.x86 != 6) /* family 6 */ 280 278 return -ENODEV; ··· 284 286 case 0x1F: /* Core i7 and i5 Processor - Nehalem */ 285 287 case 0x2E: /* Nehalem-EX Xeon */ 286 288 case 0x2F: /* Westmere-EX Xeon */ 287 - lapic_timer_reliable_states = (1 << 1); /* C1 */ 288 - 289 289 case 0x25: /* Westmere */ 290 290 case 0x2C: /* Westmere */ 291 291 cpuidle_state_table = nehalem_cstates; ··· 291 295 292 296 case 0x1C: /* 28 - Atom Processor */ 293 297 case 0x26: /* 38 - Lincroft Atom Processor */ 294 - lapic_timer_reliable_states = (1 << 1); /* C1 */ 295 298 cpuidle_state_table = atom_cstates; 296 299 break; 297 300 ··· 298 303 case 0x2D: /* SNB Xeon */ 299 304 cpuidle_state_table = snb_cstates; 300 305 break; 301 - #ifdef FUTURE_USE 302 - case 0x17: /* 23 - Core 2 Duo */ 303 - lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ 304 - #endif 305 306 306 307 default: 307 308 pr_debug(PREFIX "does not run on family %d model %d\n", 308 309 boot_cpu_data.x86, boot_cpu_data.x86_model); 309 310 return -ENODEV; 310 311 } 312 + 313 + if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 314 + lapic_timer_reliable_states = 0xFFFFFFFF; 311 315 312 316 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 313 317 " model 0x%X\n", boot_cpu_data.x86_model);
+59 -46
drivers/infiniband/core/uverbs_cmd.c
··· 893 893 return ret ? ret : in_len; 894 894 } 895 895 896 + static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 897 + { 898 + struct ib_uverbs_wc tmp; 899 + 900 + tmp.wr_id = wc->wr_id; 901 + tmp.status = wc->status; 902 + tmp.opcode = wc->opcode; 903 + tmp.vendor_err = wc->vendor_err; 904 + tmp.byte_len = wc->byte_len; 905 + tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 906 + tmp.qp_num = wc->qp->qp_num; 907 + tmp.src_qp = wc->src_qp; 908 + tmp.wc_flags = wc->wc_flags; 909 + tmp.pkey_index = wc->pkey_index; 910 + tmp.slid = wc->slid; 911 + tmp.sl = wc->sl; 912 + tmp.dlid_path_bits = wc->dlid_path_bits; 913 + tmp.port_num = wc->port_num; 914 + tmp.reserved = 0; 915 + 916 + if (copy_to_user(dest, &tmp, sizeof tmp)) 917 + return -EFAULT; 918 + 919 + return 0; 920 + } 921 + 896 922 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 897 923 const char __user *buf, int in_len, 898 924 int out_len) 899 925 { 900 926 struct ib_uverbs_poll_cq cmd; 901 - struct ib_uverbs_poll_cq_resp *resp; 927 + struct ib_uverbs_poll_cq_resp resp; 928 + u8 __user *header_ptr; 929 + u8 __user *data_ptr; 902 930 struct ib_cq *cq; 903 - struct ib_wc *wc; 904 - int ret = 0; 905 - int i; 906 - int rsize; 931 + struct ib_wc wc; 932 + int ret; 907 933 908 934 if (copy_from_user(&cmd, buf, sizeof cmd)) 909 935 return -EFAULT; 910 936 911 - wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); 912 - if (!wc) 913 - return -ENOMEM; 914 - 915 - rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); 916 - resp = kmalloc(rsize, GFP_KERNEL); 917 - if (!resp) { 918 - ret = -ENOMEM; 919 - goto out_wc; 920 - } 921 - 922 937 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 923 - if (!cq) { 924 - ret = -EINVAL; 925 - goto out; 938 + if (!cq) 939 + return -EINVAL; 940 + 941 + /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 942 + header_ptr = (void __user *)(unsigned long) cmd.response; 943 + data_ptr = header_ptr + sizeof resp; 944 + 945 + memset(&resp, 0, sizeof resp); 946 + while (resp.count < cmd.ne) { 947 + ret = ib_poll_cq(cq, 1, &wc); 948 + if (ret < 0) 949 + goto out_put; 950 + if (!ret) 951 + break; 952 + 953 + ret = copy_wc_to_user(data_ptr, &wc); 954 + if (ret) 955 + goto out_put; 956 + 957 + data_ptr += sizeof(struct ib_uverbs_wc); 958 + ++resp.count; 926 959 } 927 960 928 - resp->count = ib_poll_cq(cq, cmd.ne, wc); 929 - 930 - put_cq_read(cq); 931 - 932 - for (i = 0; i < resp->count; i++) { 933 - resp->wc[i].wr_id = wc[i].wr_id; 934 - resp->wc[i].status = wc[i].status; 935 - resp->wc[i].opcode = wc[i].opcode; 936 - resp->wc[i].vendor_err = wc[i].vendor_err; 937 - resp->wc[i].byte_len = wc[i].byte_len; 938 - resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data; 939 - resp->wc[i].qp_num = wc[i].qp->qp_num; 940 - resp->wc[i].src_qp = wc[i].src_qp; 941 - resp->wc[i].wc_flags = wc[i].wc_flags; 942 - resp->wc[i].pkey_index = wc[i].pkey_index; 943 - resp->wc[i].slid = wc[i].slid; 944 - resp->wc[i].sl = wc[i].sl; 945 - resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; 946 - resp->wc[i].port_num = wc[i].port_num; 947 - } 948 - 949 - if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 961 + if (copy_to_user(header_ptr, &resp, sizeof resp)) { 950 962 ret = -EFAULT; 963 + goto out_put; 964 + } 951 965 952 - out: 953 - kfree(resp); 966 + ret = in_len; 954 967 955 - out_wc: 956 - kfree(wc); 957 - return ret ? ret : in_len; 968 + out_put: 969 + put_cq_read(cq); 970 + return ret; 958 971 } 959 972 960 973 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
+65 -62
drivers/input/evdev.c
··· 534 534 } 535 535 #undef OLD_KEY_MAX 536 536 537 - static int evdev_handle_get_keycode(struct input_dev *dev, 538 - void __user *p, size_t size) 537 + static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p) 538 + { 539 + struct input_keymap_entry ke = { 540 + .len = sizeof(unsigned int), 541 + .flags = 0, 542 + }; 543 + int __user *ip = (int __user *)p; 544 + int error; 545 + 546 + /* legacy case */ 547 + if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 548 + return -EFAULT; 549 + 550 + error = input_get_keycode(dev, &ke); 551 + if (error) 552 + return error; 553 + 554 + if (put_user(ke.keycode, ip + 1)) 555 + return -EFAULT; 556 + 557 + return 0; 558 + } 559 + 560 + static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p) 539 561 { 540 562 struct input_keymap_entry ke; 541 563 int error; 542 564 543 - memset(&ke, 0, sizeof(ke)); 565 + if (copy_from_user(&ke, p, sizeof(ke))) 566 + return -EFAULT; 544 567 545 - if (size == sizeof(unsigned int[2])) { 546 - /* legacy case */ 547 - int __user *ip = (int __user *)p; 568 + error = input_get_keycode(dev, &ke); 569 + if (error) 570 + return error; 548 571 549 - if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 550 - return -EFAULT; 572 + if (copy_to_user(p, &ke, sizeof(ke))) 573 + return -EFAULT; 551 574 552 - ke.len = sizeof(unsigned int); 553 - ke.flags = 0; 554 - 555 - error = input_get_keycode(dev, &ke); 556 - if (error) 557 - return error; 558 - 559 - if (put_user(ke.keycode, ip + 1)) 560 - return -EFAULT; 561 - 562 - } else { 563 - size = min(size, sizeof(ke)); 564 - 565 - if (copy_from_user(&ke, p, size)) 566 - return -EFAULT; 567 - 568 - error = input_get_keycode(dev, &ke); 569 - if (error) 570 - return error; 571 - 572 - if (copy_to_user(p, &ke, size)) 573 - return -EFAULT; 574 - } 575 575 return 0; 576 576 } 577 577 578 - static int evdev_handle_set_keycode(struct input_dev *dev, 579 - void __user *p, size_t size) 578 + static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p) 579 + { 580 + struct input_keymap_entry ke = { 581 + .len = sizeof(unsigned int), 582 + .flags = 0, 583 + }; 584 + int __user *ip = (int __user *)p; 585 + 586 + if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 587 + return -EFAULT; 588 + 589 + if (get_user(ke.keycode, ip + 1)) 590 + return -EFAULT; 591 + 592 + return input_set_keycode(dev, &ke); 593 + } 594 + 595 + static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p) 580 596 { 581 597 struct input_keymap_entry ke; 582 598 583 - memset(&ke, 0, sizeof(ke)); 599 + if (copy_from_user(&ke, p, sizeof(ke))) 600 + return -EFAULT; 584 601 585 - if (size == sizeof(unsigned int[2])) { 586 - /* legacy case */ 587 - int __user *ip = (int __user *)p; 588 - 589 - if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 590 - return -EFAULT; 591 - 592 - if (get_user(ke.keycode, ip + 1)) 593 - return -EFAULT; 594 - 595 - ke.len = sizeof(unsigned int); 596 - ke.flags = 0; 597 - 598 - } else { 599 - size = min(size, sizeof(ke)); 600 - 601 - if (copy_from_user(&ke, p, size)) 602 - return -EFAULT; 603 - 604 - if (ke.len > sizeof(ke.scancode)) 605 - return -EINVAL; 606 - } 602 + if (ke.len > sizeof(ke.scancode)) 603 + return -EINVAL; 607 604 608 605 return input_set_keycode(dev, &ke); 609 606 } ··· 666 669 return evdev_grab(evdev, client); 667 670 else 668 671 return evdev_ungrab(evdev, client); 672 + 673 + case EVIOCGKEYCODE: 674 + return evdev_handle_get_keycode(dev, p); 675 + 676 + case EVIOCSKEYCODE: 677 + return evdev_handle_set_keycode(dev, p); 678 + 679 + case EVIOCGKEYCODE_V2: 680 + return evdev_handle_get_keycode_v2(dev, p); 681 + 682 + case EVIOCSKEYCODE_V2: 683 + return evdev_handle_set_keycode_v2(dev, p); 669 684 } 670 685 671 686 size = _IOC_SIZE(cmd); ··· 717 708 return -EFAULT; 718 709 719 710 return error; 720 - 721 - case EVIOC_MASK_SIZE(EVIOCGKEYCODE): 722 - return evdev_handle_get_keycode(dev, p, size); 723 - 724 - case EVIOC_MASK_SIZE(EVIOCSKEYCODE): 725 - return evdev_handle_set_keycode(dev, p, size); 726 711 } 727 712 728 713 /* Multi-number variable-length handlers */
+3
drivers/input/tablet/wacom_wac.c
··· 1436 1436 { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT }; 1437 1437 static struct wacom_features wacom_features_0xD3 = 1438 1438 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; 1439 + static const struct wacom_features wacom_features_0xD4 = 1440 + { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, 63, BAMBOO_PT }; 1439 1441 static struct wacom_features wacom_features_0xD8 = 1440 1442 { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; 1441 1443 static struct wacom_features wacom_features_0xDA = ··· 1512 1510 { USB_DEVICE_WACOM(0xD1) }, 1513 1511 { USB_DEVICE_WACOM(0xD2) }, 1514 1512 { USB_DEVICE_WACOM(0xD3) }, 1513 + { USB_DEVICE_WACOM(0xD4) }, 1515 1514 { USB_DEVICE_WACOM(0xD8) }, 1516 1515 { USB_DEVICE_WACOM(0xDA) }, 1517 1516 { USB_DEVICE_WACOM(0xDB) },
+2 -8
drivers/md/dm-table.c
··· 517 517 */ 518 518 519 519 if (q->merge_bvec_fn && !ti->type->merge) 520 - limits->max_sectors = 521 - min_not_zero(limits->max_sectors, 522 - (unsigned int) (PAGE_SIZE >> 9)); 520 + blk_limits_max_hw_sectors(limits, 521 + (unsigned int) (PAGE_SIZE >> 9)); 523 522 return 0; 524 523 } 525 524 EXPORT_SYMBOL_GPL(dm_set_device_limits); ··· 1129 1130 * Copy table's limits to the DM device's request_queue 1130 1131 */ 1131 1132 q->limits = *limits; 1132 - 1133 - if (limits->no_cluster) 1134 - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1135 - else 1136 - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); 1137 1133 1138 1134 if (!dm_table_supports_discards(t)) 1139 1135 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+20 -22
drivers/md/md.c
··· 371 371 bio_put(bio); 372 372 } 373 373 374 - static void submit_flushes(mddev_t *mddev) 374 + static void md_submit_flush_data(struct work_struct *ws); 375 + 376 + static void submit_flushes(struct work_struct *ws) 375 377 { 378 + mddev_t *mddev = container_of(ws, mddev_t, flush_work); 376 379 mdk_rdev_t *rdev; 377 380 381 + INIT_WORK(&mddev->flush_work, md_submit_flush_data); 382 + atomic_set(&mddev->flush_pending, 1); 378 383 rcu_read_lock(); 379 384 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 380 385 if (rdev->raid_disk >= 0 && ··· 402 397 rdev_dec_pending(rdev, mddev); 403 398 } 404 399 rcu_read_unlock(); 400 + if (atomic_dec_and_test(&mddev->flush_pending)) 401 + queue_work(md_wq, &mddev->flush_work); 405 402 } 406 403 407 404 static void md_submit_flush_data(struct work_struct *ws) 408 405 { 409 406 mddev_t *mddev = container_of(ws, mddev_t, flush_work); 410 407 struct bio *bio = mddev->flush_bio; 411 - 412 - atomic_set(&mddev->flush_pending, 1); 413 408 414 409 if (bio->bi_size == 0) 415 410 /* an empty barrier - all done */ ··· 419 414 if (mddev->pers->make_request(mddev, bio)) 420 415 generic_make_request(bio); 421 416 } 422 - if (atomic_dec_and_test(&mddev->flush_pending)) { 423 - mddev->flush_bio = NULL; 424 - wake_up(&mddev->sb_wait); 425 - } 417 + 418 + mddev->flush_bio = NULL; 419 + wake_up(&mddev->sb_wait); 426 420 } 427 421 428 422 void md_flush_request(mddev_t *mddev, struct bio *bio) ··· 433 429 mddev->flush_bio = bio; 434 430 spin_unlock_irq(&mddev->write_lock); 435 431 436 - atomic_set(&mddev->flush_pending, 1); 437 - INIT_WORK(&mddev->flush_work, md_submit_flush_data); 438 - 439 - submit_flushes(mddev); 440 - 441 - if (atomic_dec_and_test(&mddev->flush_pending)) 442 - queue_work(md_wq, &mddev->flush_work); 432 + INIT_WORK(&mddev->flush_work, submit_flushes); 433 + queue_work(md_wq, &mddev->flush_work); 443 434 } 444 435 EXPORT_SYMBOL(md_flush_request); 445 436 ··· 4295 4296 goto abort; 4296 4297 mddev->queue->queuedata = mddev; 4297 4298 4298 - /* Can be unlocked because the queue is new: no concurrency */ 4299 - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); 4300 - 4301 4299 blk_queue_make_request(mddev->queue, md_make_request); 4302 4300 4303 4301 disk = alloc_disk(1 << shift); ··· 5156 5160 PTR_ERR(rdev)); 5157 5161 return PTR_ERR(rdev); 5158 5162 } 5159 - /* set save_raid_disk if appropriate */ 5163 + /* set saved_raid_disk if appropriate */ 5160 5164 if (!mddev->persistent) { 5161 5165 if (info->state & (1<<MD_DISK_SYNC) && 5162 5166 info->raid_disk < mddev->raid_disks) ··· 5166 5170 } else 5167 5171 super_types[mddev->major_version]. 5168 5172 validate_super(mddev, rdev); 5169 - rdev->saved_raid_disk = rdev->raid_disk; 5173 + if (test_bit(In_sync, &rdev->flags)) 5174 + rdev->saved_raid_disk = rdev->raid_disk; 5175 + else 5176 + rdev->saved_raid_disk = -1; 5170 5177 5171 5178 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5172 5179 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) ··· 6041 6042 || kthread_should_stop(), 6042 6043 thread->timeout); 6043 6044 6044 - clear_bit(THREAD_WAKEUP, &thread->flags); 6045 - 6046 - thread->run(thread->mddev); 6045 + if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags)) 6046 + thread->run(thread->mddev); 6047 6047 } 6048 6048 6049 6049 return 0;
+1 -1
drivers/md/raid10.c
··· 2397 2397 return 0; 2398 2398 2399 2399 out_free_conf: 2400 + md_unregister_thread(mddev->thread); 2400 2401 if (conf->r10bio_pool) 2401 2402 mempool_destroy(conf->r10bio_pool); 2402 2403 safe_put_page(conf->tmppage); 2403 2404 kfree(conf->mirrors); 2404 2405 kfree(conf); 2405 2406 mddev->private = NULL; 2406 - md_unregister_thread(mddev->thread); 2407 2407 out: 2408 2408 return -EIO; 2409 2409 }
+4 -4
drivers/media/common/saa7146_hlp.c
··· 558 558 static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat) 559 559 { 560 560 struct saa7146_vv *vv = dev->vv_data; 561 - struct saa7146_format *sfmt = format_by_fourcc(dev, pixelformat); 561 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat); 562 562 563 563 int b_depth = vv->ov_fmt->depth; 564 564 int b_bpl = vv->ov_fb.fmt.bytesperline; ··· 702 702 struct saa7146_vv *vv = dev->vv_data; 703 703 struct saa7146_video_dma vdma1; 704 704 705 - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 705 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 706 706 707 707 int width = buf->fmt->width; 708 708 int height = buf->fmt->height; ··· 827 827 struct saa7146_video_dma vdma2; 828 828 struct saa7146_video_dma vdma3; 829 829 830 - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 830 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 831 831 832 832 int width = buf->fmt->width; 833 833 int height = buf->fmt->height; ··· 994 994 995 995 void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) 996 996 { 997 - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 997 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 998 998 struct saa7146_vv *vv = dev->vv_data; 999 999 u32 vdma1_prot_addr; 1000 1000
+8 -8
drivers/media/common/saa7146_video.c
··· 84 84 85 85 static int NUM_FORMATS = sizeof(formats)/sizeof(struct saa7146_format); 86 86 87 - struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc) 87 + struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc) 88 88 { 89 89 int i, j = NUM_FORMATS; 90 90 ··· 266 266 struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); 267 267 struct scatterlist *list = dma->sglist; 268 268 int length = dma->sglen; 269 - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 269 + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 270 270 271 271 DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length)); 272 272 ··· 408 408 } 409 409 } 410 410 411 - fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); 411 + fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); 412 412 /* we need to have a valid format set here */ 413 413 BUG_ON(NULL == fmt); 414 414 ··· 460 460 return -EBUSY; 461 461 } 462 462 463 - fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); 463 + fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); 464 464 /* we need to have a valid format set here */ 465 465 BUG_ON(NULL == fmt); 466 466 ··· 536 536 return -EPERM; 537 537 538 538 /* check args */ 539 - fmt = format_by_fourcc(dev, fb->fmt.pixelformat); 539 + fmt = saa7146_format_by_fourcc(dev, fb->fmt.pixelformat); 540 540 if (NULL == fmt) 541 541 return -EINVAL; 542 542 ··· 760 760 761 761 DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh)); 762 762 763 - fmt = format_by_fourcc(dev, f->fmt.pix.pixelformat); 763 + fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat); 764 764 if (NULL == fmt) 765 765 return -EINVAL; 766 766 ··· 1264 1264 buf->fmt = &fh->video_fmt; 1265 1265 buf->vb.field = fh->video_fmt.field; 1266 1266 1267 - sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); 1267 + sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 1268 1268 1269 1269 release_all_pagetables(dev, buf); 1270 1270 if( 0 != IS_PLANAR(sfmt->trans)) { ··· 1378 1378 fh->video_fmt.pixelformat = V4L2_PIX_FMT_BGR24; 1379 1379 fh->video_fmt.bytesperline = 0; 1380 1380 fh->video_fmt.field = V4L2_FIELD_ANY; 1381 - sfmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); 1381 + sfmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); 1382 1382 fh->video_fmt.sizeimage = (fh->video_fmt.width * fh->video_fmt.height * sfmt->depth)/8; 1383 1383 1384 1384 videobuf_queue_sg_init(&fh->video_q, &video_qops,
+8 -8
drivers/media/radio/radio-aimslab.c
··· 361 361 362 362 static const struct v4l2_file_operations rtrack_fops = { 363 363 .owner = THIS_MODULE, 364 - .ioctl = video_ioctl2, 364 + .unlocked_ioctl = video_ioctl2, 365 365 }; 366 366 367 367 static const struct v4l2_ioctl_ops rtrack_ioctl_ops = { ··· 412 412 rt->vdev.release = video_device_release_empty; 413 413 video_set_drvdata(&rt->vdev, rt); 414 414 415 - if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 416 - v4l2_device_unregister(&rt->v4l2_dev); 417 - release_region(rt->io, 2); 418 - return -EINVAL; 419 - } 420 - v4l2_info(v4l2_dev, "AIMSlab RadioTrack/RadioReveal card driver.\n"); 421 - 422 415 /* Set up the I/O locking */ 423 416 424 417 mutex_init(&rt->lock); ··· 422 429 outb(0x48, rt->io); /* volume down but still "on" */ 423 430 sleep_delay(2000000); /* make sure it's totally down */ 424 431 outb(0xc0, rt->io); /* steady volume, mute card */ 432 + 433 + if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 434 + v4l2_device_unregister(&rt->v4l2_dev); 435 + release_region(rt->io, 2); 436 + return -EINVAL; 437 + } 438 + v4l2_info(v4l2_dev, "AIMSlab RadioTrack/RadioReveal card driver.\n"); 425 439 426 440 return 0; 427 441 }
+3 -3
drivers/media/radio/radio-aztech.c
··· 324 324 325 325 static const struct v4l2_file_operations aztech_fops = { 326 326 .owner = THIS_MODULE, 327 - .ioctl = video_ioctl2, 327 + .unlocked_ioctl = video_ioctl2, 328 328 }; 329 329 330 330 static const struct v4l2_ioctl_ops aztech_ioctl_ops = { ··· 375 375 az->vdev.ioctl_ops = &aztech_ioctl_ops; 376 376 az->vdev.release = video_device_release_empty; 377 377 video_set_drvdata(&az->vdev, az); 378 + /* mute card - prevents noisy bootups */ 379 + outb(0, az->io); 378 380 379 381 if (video_register_device(&az->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 380 382 v4l2_device_unregister(v4l2_dev); ··· 385 383 } 386 384 387 385 v4l2_info(v4l2_dev, "Aztech radio card driver v1.00/19990224 rkroll@exploits.org\n"); 388 - /* mute card - prevents noisy bootups */ 389 - outb(0, az->io); 390 386 return 0; 391 387 } 392 388
+9 -3
drivers/media/radio/radio-cadet.c
··· 328 328 unsigned char readbuf[RDS_BUFFER]; 329 329 int i = 0; 330 330 331 + mutex_lock(&dev->lock); 331 332 if (dev->rdsstat == 0) { 332 - mutex_lock(&dev->lock); 333 333 dev->rdsstat = 1; 334 334 outb(0x80, dev->io); /* Select RDS fifo */ 335 - mutex_unlock(&dev->lock); 336 335 init_timer(&dev->readtimer); 337 336 dev->readtimer.function = cadet_handler; 338 337 dev->readtimer.data = (unsigned long)dev; ··· 339 340 add_timer(&dev->readtimer); 340 341 } 341 342 if (dev->rdsin == dev->rdsout) { 343 + mutex_unlock(&dev->lock); 342 344 if (file->f_flags & O_NONBLOCK) 343 345 return -EWOULDBLOCK; 344 346 interruptible_sleep_on(&dev->read_queue); 347 + mutex_lock(&dev->lock); 345 348 } 346 349 while (i < count && dev->rdsin != dev->rdsout) 347 350 readbuf[i++] = dev->rdsbuf[dev->rdsout++]; 351 + mutex_unlock(&dev->lock); 348 352 349 353 if (copy_to_user(data, readbuf, i)) 350 354 return -EFAULT; ··· 527 525 { 528 526 struct cadet *dev = video_drvdata(file); 529 527 528 + mutex_lock(&dev->lock); 530 529 dev->users++; 531 530 if (1 == dev->users) 532 531 init_waitqueue_head(&dev->read_queue); 532 + mutex_unlock(&dev->lock); 533 533 return 0; 534 534 } 535 535 ··· 539 535 { 540 536 struct cadet *dev = video_drvdata(file); 541 537 538 + mutex_lock(&dev->lock); 542 539 dev->users--; 543 540 if (0 == dev->users) { 544 541 del_timer_sync(&dev->readtimer); 545 542 dev->rdsstat = 0; 546 543 } 544 + mutex_unlock(&dev->lock); 547 545 return 0; 548 546 } 549 547 ··· 565 559 .open = cadet_open, 566 560 .release = cadet_release, 567 561 .read = cadet_read, 568 - .ioctl = video_ioctl2, 562 + .unlocked_ioctl = video_ioctl2, 569 563 .poll = cadet_poll, 570 564 }; 571 565
+3 -3
drivers/media/radio/radio-gemtek-pci.c
··· 361 361 362 362 static const struct v4l2_file_operations gemtek_pci_fops = { 363 363 .owner = THIS_MODULE, 364 - .ioctl = video_ioctl2, 364 + .unlocked_ioctl = video_ioctl2, 365 365 }; 366 366 367 367 static const struct v4l2_ioctl_ops gemtek_pci_ioctl_ops = { ··· 422 422 card->vdev.release = video_device_release_empty; 423 423 video_set_drvdata(&card->vdev, card); 424 424 425 + gemtek_pci_mute(card); 426 + 425 427 if (video_register_device(&card->vdev, VFL_TYPE_RADIO, nr_radio) < 0) 426 428 goto err_video; 427 - 428 - gemtek_pci_mute(card); 429 429 430 430 v4l2_info(v4l2_dev, "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n", 431 431 pdev->revision, card->iobase, card->iobase + card->length - 1);
+7 -7
drivers/media/radio/radio-gemtek.c
··· 378 378 379 379 static const struct v4l2_file_operations gemtek_fops = { 380 380 .owner = THIS_MODULE, 381 - .ioctl = video_ioctl2, 381 + .unlocked_ioctl = video_ioctl2, 382 382 }; 383 383 384 384 static int vidioc_querycap(struct file *file, void *priv, ··· 577 577 gt->vdev.release = video_device_release_empty; 578 578 video_set_drvdata(&gt->vdev, gt); 579 579 580 - if (video_register_device(&gt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 581 - v4l2_device_unregister(v4l2_dev); 582 - release_region(gt->io, 1); 583 - return -EBUSY; 584 - } 585 - 586 580 /* Set defaults */ 587 581 gt->lastfreq = GEMTEK_LOWFREQ; 588 582 gt->bu2614data = 0; 589 583 590 584 if (initmute) 591 585 gemtek_mute(gt); 586 + 587 + if (video_register_device(&gt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 588 + v4l2_device_unregister(v4l2_dev); 589 + release_region(gt->io, 1); 590 + return -EBUSY; 591 + } 592 592 593 593 return 0; 594 594 }
+6 -8
drivers/media/radio/radio-maestro.c
··· 299 299 300 300 static const struct v4l2_file_operations maestro_fops = { 301 301 .owner = THIS_MODULE, 302 - .ioctl = video_ioctl2, 302 + .unlocked_ioctl = video_ioctl2, 303 303 }; 304 304 305 305 static const struct v4l2_ioctl_ops maestro_ioctl_ops = { ··· 383 383 dev->vdev.release = video_device_release_empty; 384 384 video_set_drvdata(&dev->vdev, dev); 385 385 386 + if (!radio_power_on(dev)) { 387 + retval = -EIO; 388 + goto errfr1; 389 + } 390 + 386 391 retval = video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr); 387 392 if (retval) { 388 393 v4l2_err(v4l2_dev, "can't register video device!\n"); 389 394 goto errfr1; 390 395 } 391 396 392 - if (!radio_power_on(dev)) { 393 - retval = -EIO; 394 - goto errunr; 395 - } 396 - 397 397 v4l2_info(v4l2_dev, "version " DRIVER_VERSION "\n"); 398 398 399 399 return 0; 400 - errunr: 401 - video_unregister_device(&dev->vdev); 402 400 errfr1: 403 401 v4l2_device_unregister(v4l2_dev); 404 402 errfr:
+1 -1
drivers/media/radio/radio-maxiradio.c
··· 346 346 347 347 static const struct v4l2_file_operations maxiradio_fops = { 348 348 .owner = THIS_MODULE, 349 - .ioctl = video_ioctl2, 349 + .unlocked_ioctl = video_ioctl2, 350 350 }; 351 351 352 352 static const struct v4l2_ioctl_ops maxiradio_ioctl_ops = {
+4 -2
drivers/media/radio/radio-miropcm20.c
··· 33 33 unsigned long freq; 34 34 int muted; 35 35 struct snd_miro_aci *aci; 36 + struct mutex lock; 36 37 }; 37 38 38 39 static struct pcm20 pcm20_card = { ··· 73 72 74 73 static const struct v4l2_file_operations pcm20_fops = { 75 74 .owner = THIS_MODULE, 76 - .ioctl = video_ioctl2, 75 + .unlocked_ioctl = video_ioctl2, 77 76 }; 78 77 79 78 static int vidioc_querycap(struct file *file, void *priv, ··· 230 229 return -ENODEV; 231 230 } 232 231 strlcpy(v4l2_dev->name, "miropcm20", sizeof(v4l2_dev->name)); 233 - 232 + mutex_init(&dev->lock); 234 233 235 234 res = v4l2_device_register(NULL, v4l2_dev); 236 235 if (res < 0) { ··· 243 242 dev->vdev.fops = &pcm20_fops; 244 243 dev->vdev.ioctl_ops = &pcm20_ioctl_ops; 245 244 dev->vdev.release = video_device_release_empty; 245 + dev->vdev.lock = &dev->lock; 246 246 video_set_drvdata(&dev->vdev, dev); 247 247 248 248 if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0)
+5 -5
drivers/media/radio/radio-rtrack2.c
··· 266 266 267 267 static const struct v4l2_file_operations rtrack2_fops = { 268 268 .owner = THIS_MODULE, 269 - .ioctl = video_ioctl2, 269 + .unlocked_ioctl = video_ioctl2, 270 270 }; 271 271 272 272 static const struct v4l2_ioctl_ops rtrack2_ioctl_ops = { ··· 315 315 dev->vdev.release = video_device_release_empty; 316 316 video_set_drvdata(&dev->vdev, dev); 317 317 318 + /* mute card - prevents noisy bootups */ 319 + outb(1, dev->io); 320 + dev->muted = 1; 321 + 318 322 mutex_init(&dev->lock); 319 323 if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 320 324 v4l2_device_unregister(v4l2_dev); ··· 327 323 } 328 324 329 325 v4l2_info(v4l2_dev, "AIMSlab Radiotrack II card driver.\n"); 330 - 331 - /* mute card - prevents noisy bootups */ 332 - outb(1, dev->io); 333 - dev->muted = 1; 334 326 335 327 return 0; 336 328 }
+4 -3
drivers/media/radio/radio-sf16fmi.c
··· 260 260 261 261 static const struct v4l2_file_operations fmi_fops = { 262 262 .owner = THIS_MODULE, 263 - .ioctl = video_ioctl2, 263 + .unlocked_ioctl = video_ioctl2, 264 264 }; 265 265 266 266 static const struct v4l2_ioctl_ops fmi_ioctl_ops = { ··· 382 382 383 383 mutex_init(&fmi->lock); 384 384 385 + /* mute card - prevents noisy bootups */ 386 + fmi_mute(fmi); 387 + 385 388 if (video_register_device(&fmi->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 386 389 v4l2_device_unregister(v4l2_dev); 387 390 release_region(fmi->io, 2); ··· 394 391 } 395 392 396 393 v4l2_info(v4l2_dev, "card driver at 0x%x\n", fmi->io); 397 - /* mute card - prevents noisy bootups */ 398 - fmi_mute(fmi); 399 394 return 0; 400 395 } 401 396
+5 -6
drivers/media/radio/radio-sf16fmr2.c
··· 376 376 377 377 static const struct v4l2_file_operations fmr2_fops = { 378 378 .owner = THIS_MODULE, 379 - .ioctl = video_ioctl2, 379 + .unlocked_ioctl = video_ioctl2, 380 380 }; 381 381 382 382 static const struct v4l2_ioctl_ops fmr2_ioctl_ops = { ··· 424 424 fmr2->vdev.release = video_device_release_empty; 425 425 video_set_drvdata(&fmr2->vdev, fmr2); 426 426 427 + /* mute card - prevents noisy bootups */ 428 + fmr2_mute(fmr2->io); 429 + fmr2_product_info(fmr2); 430 + 427 431 if (video_register_device(&fmr2->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 428 432 v4l2_device_unregister(v4l2_dev); 429 433 release_region(fmr2->io, 2); ··· 435 431 } 436 432 437 433 v4l2_info(v4l2_dev, "SF16FMR2 radio card driver at 0x%x.\n", fmr2->io); 438 - /* mute card - prevents noisy bootups */ 439 - mutex_lock(&fmr2->lock); 440 - fmr2_mute(fmr2->io); 441 - fmr2_product_info(fmr2); 442 - mutex_unlock(&fmr2->lock); 443 434 debug_print((KERN_DEBUG "card_type %d\n", fmr2->card_type)); 444 435 return 0; 445 436 }
+2 -1
drivers/media/radio/radio-si4713.c
··· 53 53 /* radio_si4713_fops - file operations interface */ 54 54 static const struct v4l2_file_operations radio_si4713_fops = { 55 55 .owner = THIS_MODULE, 56 - .ioctl = video_ioctl2, 56 + /* Note: locking is done at the subdev level in the i2c driver. */ 57 + .unlocked_ioctl = video_ioctl2, 57 58 }; 58 59 59 60 /* Video4Linux Interface */
+9 -40
drivers/media/radio/radio-tea5764.c
··· 142 142 struct video_device *videodev; 143 143 struct tea5764_regs regs; 144 144 struct mutex mutex; 145 - int users; 146 145 }; 147 146 148 147 /* I2C code related */ ··· 457 458 return 0; 458 459 } 459 460 460 - static int tea5764_open(struct file *file) 461 - { 462 - /* Currently we support only one device */ 463 - struct tea5764_device *radio = video_drvdata(file); 464 - 465 - mutex_lock(&radio->mutex); 466 - /* Only exclusive access */ 467 - if (radio->users) { 468 - mutex_unlock(&radio->mutex); 469 - return -EBUSY; 470 - } 471 - radio->users++; 472 - mutex_unlock(&radio->mutex); 473 - file->private_data = radio; 474 - return 0; 475 - } 476 - 477 - static int tea5764_close(struct file *file) 478 - { 479 - struct tea5764_device *radio = video_drvdata(file); 480 - 481 - if (!radio) 482 - return -ENODEV; 483 - mutex_lock(&radio->mutex); 484 - radio->users--; 485 - mutex_unlock(&radio->mutex); 486 - return 0; 487 - } 488 - 489 461 /* File system interface */ 490 462 static const struct v4l2_file_operations tea5764_fops = { 491 463 .owner = THIS_MODULE, 492 - .open = tea5764_open, 493 - .release = tea5764_close, 494 - .ioctl = video_ioctl2, 464 + .unlocked_ioctl = video_ioctl2, 495 465 }; 496 466 497 467 static const struct v4l2_ioctl_ops tea5764_ioctl_ops = { ··· 495 527 int ret; 496 528 497 529 PDEBUG("probe"); 498 - radio = kmalloc(sizeof(struct tea5764_device), GFP_KERNEL); 530 + radio = kzalloc(sizeof(struct tea5764_device), GFP_KERNEL); 499 531 if (!radio) 500 532 return -ENOMEM; 501 533 ··· 523 555 524 556 i2c_set_clientdata(client, radio); 525 557 video_set_drvdata(radio->videodev, radio); 526 - 527 - ret = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr); 528 - if (ret < 0) { 529 - PWARN("Could not register video device!"); 530 - goto errrel; 531 - } 558 + radio->videodev->lock = &radio->mutex; 532 559 533 560 /* initialize and power off the chip */ 534 561 tea5764_i2c_read(radio); 535 562 tea5764_set_audout_mode(radio, V4L2_TUNER_MODE_STEREO); 536 563 tea5764_mute(radio, 1); 537 564 tea5764_power_down(radio); 565 + 566 + ret = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr); 567 + if (ret < 0) { 568 + PWARN("Could not register video device!"); 569 + goto errrel; 570 + } 538 571 539 572 PINFO("registered."); 540 573 return 0;
+4 -4
drivers/media/radio/radio-terratec.c
··· 338 338 339 339 static const struct v4l2_file_operations terratec_fops = { 340 340 .owner = THIS_MODULE, 341 - .ioctl = video_ioctl2, 341 + .unlocked_ioctl = video_ioctl2, 342 342 }; 343 343 344 344 static const struct v4l2_ioctl_ops terratec_ioctl_ops = { ··· 389 389 390 390 mutex_init(&tt->lock); 391 391 392 + /* mute card - prevents noisy bootups */ 393 + tt_write_vol(tt, 0); 394 + 392 395 if (video_register_device(&tt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 393 396 v4l2_device_unregister(&tt->v4l2_dev); 394 397 release_region(tt->io, 2); ··· 399 396 } 400 397 401 398 v4l2_info(v4l2_dev, "TERRATEC ActivRadio Standalone card driver.\n"); 402 - 403 - /* mute card - prevents noisy bootups */ 404 - tt_write_vol(tt, 0); 405 399 return 0; 406 400 } 407 401
+4 -1
drivers/media/radio/radio-timb.c
··· 34 34 struct v4l2_subdev *sd_dsp; 35 35 struct video_device video_dev; 36 36 struct v4l2_device v4l2_dev; 37 + struct mutex lock; 37 38 }; 38 39 39 40 ··· 143 142 144 143 static const struct v4l2_file_operations timbradio_fops = { 145 144 .owner = THIS_MODULE, 146 - .ioctl = video_ioctl2, 145 + .unlocked_ioctl = video_ioctl2, 147 146 }; 148 147 149 148 static int __devinit timbradio_probe(struct platform_device *pdev) ··· 165 164 } 166 165 167 166 tr->pdata = *pdata; 167 + mutex_init(&tr->lock); 168 168 169 169 strlcpy(tr->video_dev.name, "Timberdale Radio", 170 170 sizeof(tr->video_dev.name)); ··· 173 171 tr->video_dev.ioctl_ops = &timbradio_ioctl_ops; 174 172 tr->video_dev.release = video_device_release_empty; 175 173 tr->video_dev.minor = -1; 174 + tr->video_dev.lock = &tr->lock; 176 175 177 176 strlcpy(tr->v4l2_dev.name, DRIVER_NAME, sizeof(tr->v4l2_dev.name)); 178 177 err = v4l2_device_register(NULL, &tr->v4l2_dev);
+9 -9
drivers/media/radio/radio-trust.c
··· 344 344 345 345 static const struct v4l2_file_operations trust_fops = { 346 346 .owner = THIS_MODULE, 347 - .ioctl = video_ioctl2, 347 + .unlocked_ioctl = video_ioctl2, 348 348 }; 349 349 350 350 static const struct v4l2_ioctl_ops trust_ioctl_ops = { ··· 396 396 tr->vdev.release = video_device_release_empty; 397 397 video_set_drvdata(&tr->vdev, tr); 398 398 399 - if (video_register_device(&tr->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 400 - v4l2_device_unregister(v4l2_dev); 401 - release_region(tr->io, 2); 402 - return -EINVAL; 403 - } 404 - 405 - v4l2_info(v4l2_dev, "Trust FM Radio card driver v1.0.\n"); 406 - 407 399 write_i2c(tr, 2, TDA7318_ADDR, 0x80); /* speaker att. LF = 0 dB */ 408 400 write_i2c(tr, 2, TDA7318_ADDR, 0xa0); /* speaker att. RF = 0 dB */ 409 401 write_i2c(tr, 2, TDA7318_ADDR, 0xc0); /* speaker att. LR = 0 dB */ ··· 409 417 410 418 /* mute card - prevents noisy bootups */ 411 419 tr_setmute(tr, 1); 420 + 421 + if (video_register_device(&tr->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 422 + v4l2_device_unregister(v4l2_dev); 423 + release_region(tr->io, 2); 424 + return -EINVAL; 425 + } 426 + 427 + v4l2_info(v4l2_dev, "Trust FM Radio card driver v1.0.\n"); 412 428 413 429 return 0; 414 430 }
+8 -8
drivers/media/radio/radio-typhoon.c
··· 317 317 318 318 static const struct v4l2_file_operations typhoon_fops = { 319 319 .owner = THIS_MODULE, 320 - .ioctl = video_ioctl2, 320 + .unlocked_ioctl = video_ioctl2, 321 321 }; 322 322 323 323 static const struct v4l2_ioctl_ops typhoon_ioctl_ops = { ··· 344 344 345 345 strlcpy(v4l2_dev->name, "typhoon", sizeof(v4l2_dev->name)); 346 346 dev->io = io; 347 - dev->curfreq = dev->mutefreq = mutefreq; 348 347 349 348 if (dev->io == -1) { 350 349 v4l2_err(v4l2_dev, "You must set an I/O address with io=0x316 or io=0x336\n"); 351 350 return -EINVAL; 352 351 } 353 352 354 - if (dev->mutefreq < 87000 || dev->mutefreq > 108500) { 353 + if (mutefreq < 87000 || mutefreq > 108500) { 355 354 v4l2_err(v4l2_dev, "You must set a frequency (in kHz) used when muting the card,\n"); 356 355 v4l2_err(v4l2_dev, "e.g. with \"mutefreq=87500\" (87000 <= mutefreq <= 108500)\n"); 357 356 return -EINVAL; 358 357 } 358 + dev->curfreq = dev->mutefreq = mutefreq << 4; 359 359 360 360 mutex_init(&dev->lock); 361 361 if (!request_region(dev->io, 8, "typhoon")) { ··· 378 378 dev->vdev.ioctl_ops = &typhoon_ioctl_ops; 379 379 dev->vdev.release = video_device_release_empty; 380 380 video_set_drvdata(&dev->vdev, dev); 381 + 382 + /* mute card - prevents noisy bootups */ 383 + typhoon_mute(dev); 384 + 381 385 if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 382 386 v4l2_device_unregister(&dev->v4l2_dev); 383 387 release_region(dev->io, 8); 384 388 return -EINVAL; 385 389 } 386 390 v4l2_info(v4l2_dev, "port 0x%x.\n", dev->io); 387 - v4l2_info(v4l2_dev, "mute frequency is %lu kHz.\n", dev->mutefreq); 388 - dev->mutefreq <<= 4; 389 - 390 - /* mute card - prevents noisy bootups */ 391 - typhoon_mute(dev); 391 + v4l2_info(v4l2_dev, "mute frequency is %lu kHz.\n", mutefreq); 392 392 393 393 return 0; 394 394 }
+15 -15
drivers/media/radio/radio-zoltrix.c
··· 377 377 static const struct v4l2_file_operations zoltrix_fops = 378 378 { 379 379 .owner = THIS_MODULE, 380 - .ioctl = video_ioctl2, 380 + .unlocked_ioctl = video_ioctl2, 381 381 }; 382 382 383 383 static const struct v4l2_ioctl_ops zoltrix_ioctl_ops = { ··· 424 424 return res; 425 425 } 426 426 427 - strlcpy(zol->vdev.name, v4l2_dev->name, sizeof(zol->vdev.name)); 428 - zol->vdev.v4l2_dev = v4l2_dev; 429 - zol->vdev.fops = &zoltrix_fops; 430 - zol->vdev.ioctl_ops = &zoltrix_ioctl_ops; 431 - zol->vdev.release = video_device_release_empty; 432 - video_set_drvdata(&zol->vdev, zol); 433 - 434 - if (video_register_device(&zol->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 435 - v4l2_device_unregister(v4l2_dev); 436 - release_region(zol->io, 2); 437 - return -EINVAL; 438 - } 439 - v4l2_info(v4l2_dev, "Zoltrix Radio Plus card driver.\n"); 440 - 441 427 mutex_init(&zol->lock); 442 428 443 429 /* mute card - prevents noisy bootups */ ··· 437 451 438 452 zol->curvol = 0; 439 453 zol->stereo = 1; 454 + 455 + strlcpy(zol->vdev.name, v4l2_dev->name, sizeof(zol->vdev.name)); 456 + zol->vdev.v4l2_dev = v4l2_dev; 457 + zol->vdev.fops = &zoltrix_fops; 458 + zol->vdev.ioctl_ops = &zoltrix_ioctl_ops; 459 + zol->vdev.release = video_device_release_empty; 460 + video_set_drvdata(&zol->vdev, zol); 461 + 462 + if (video_register_device(&zol->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 463 + v4l2_device_unregister(v4l2_dev); 464 + release_region(zol->io, 2); 465 + return -EINVAL; 466 + } 467 + v4l2_info(v4l2_dev, "Zoltrix Radio Plus card driver.\n"); 440 468 441 469 return 0; 442 470 }
+1 -1
drivers/media/video/arv.c
··· 712 712 static const struct v4l2_file_operations ar_fops = { 713 713 .owner = THIS_MODULE, 714 714 .read = ar_read, 715 - .ioctl = video_ioctl2, 715 + .unlocked_ioctl = video_ioctl2, 716 716 }; 717 717 718 718 static const struct v4l2_ioctl_ops ar_ioctl_ops = {
+3 -114
drivers/media/video/bt8xx/bttv-driver.c
··· 854 854 xbits |= RESOURCE_VIDEO_READ | RESOURCE_VIDEO_STREAM; 855 855 856 856 /* is it free? */ 857 - mutex_lock(&btv->lock); 858 857 if (btv->resources & xbits) { 859 858 /* no, someone else uses it */ 860 859 goto fail; ··· 883 884 /* it's free, grab it */ 884 885 fh->resources |= bit; 885 886 btv->resources |= bit; 886 - mutex_unlock(&btv->lock); 887 887 return 1; 888 888 889 889 fail: 890 - mutex_unlock(&btv->lock); 891 890 return 0; 892 891 } 893 892 ··· 937 940 /* trying to free ressources not allocated by us ... */ 938 941 printk("bttv: BUG! (btres)\n"); 939 942 } 940 - mutex_lock(&btv->lock); 941 943 fh->resources &= ~bits; 942 944 btv->resources &= ~bits; 943 945 ··· 947 951 948 952 if (0 == (bits & VBI_RESOURCES)) 949 953 disclaim_vbi_lines(btv); 950 - 951 - mutex_unlock(&btv->lock); 952 954 } 953 955 954 956 /* ----------------------------------------------------------------------- */ ··· 1707 1713 1708 1714 /* Make sure tvnorm and vbi_end remain consistent 1709 1715 until we're done. */ 1710 - mutex_lock(&btv->lock); 1711 1716 1712 1717 norm = btv->tvnorm; 1713 1718 1714 1719 /* In this mode capturing always starts at defrect.top 1715 1720 (default VDELAY), ignoring cropping parameters. */ 1716 1721 if (btv->vbi_end > bttv_tvnorms[norm].cropcap.defrect.top) { 1717 - mutex_unlock(&btv->lock); 1718 1722 return -EINVAL; 1719 1723 } 1720 1724 1721 - mutex_unlock(&btv->lock); 1722 - 1723 1725 c.rect = bttv_tvnorms[norm].cropcap.defrect; 1724 1726 } else { 1725 - mutex_lock(&btv->lock); 1726 - 1727 1727 norm = btv->tvnorm; 1728 1728 c = btv->crop[!!fh->do_crop]; 1729 - 1730 - mutex_unlock(&btv->lock); 1731 1729 1732 1730 if (width < c.min_scaled_width || 1733 1731 width > c.max_scaled_width || ··· 1844 1858 unsigned int i; 1845 1859 int err; 1846 1860 1847 - mutex_lock(&btv->lock); 1848 1861 err = v4l2_prio_check(&btv->prio, fh->prio); 1849 1862 if (err) 1850 1863 goto err; ··· 1859 1874 set_tvnorm(btv, i); 1860 1875 1861 1876 err: 1862 - mutex_unlock(&btv->lock); 1863 1877 1864 1878 return err; 1865 1879 } ··· 1882 1898 struct bttv *btv = fh->btv; 1883 1899 int rc = 0; 1884 1900 1885 - mutex_lock(&btv->lock); 1886 1901 if (i->index >= bttv_tvcards[btv->c.type].video_inputs) { 1887 1902 rc = -EINVAL; 1888 1903 goto err; ··· 1911 1928 i->std = BTTV_NORMS; 1912 1929 1913 1930 err: 1914 - mutex_unlock(&btv->lock); 1915 1931 1916 1932 return rc; 1917 1933 } ··· 1920 1938 struct bttv_fh *fh = priv; 1921 1939 struct bttv *btv = fh->btv; 1922 1940 1923 - mutex_lock(&btv->lock); 1924 1941 *i = btv->input; 1925 - mutex_unlock(&btv->lock); 1926 1942 1927 1943 return 0; 1928 1944 } ··· 1932 1952 1933 1953 int err; 1934 1954 1935 - mutex_lock(&btv->lock); 1936 1955 err = v4l2_prio_check(&btv->prio, fh->prio); 1937 1956 if (unlikely(err)) 1938 1957 goto err; ··· 1944 1965 set_input(btv, i, btv->tvnorm); 1945 1966 1946 1967 err: 1947 - mutex_unlock(&btv->lock); 1948 1968 return 0; 1949 1969 } 1950 1970 ··· 1957 1979 if (unlikely(0 != t->index)) 1958 1980 return -EINVAL; 1959 1981 1960 - mutex_lock(&btv->lock); 1961 1982 if (unlikely(btv->tuner_type == TUNER_ABSENT)) { 1962 1983 err = -EINVAL; 1963 1984 goto err; ··· 1972 1995 btv->audio_mode_gpio(btv, t, 1); 1973 1996 1974 1997 err: 1975 - mutex_unlock(&btv->lock); 1976 1998 1977 1999 return 0; 1978 2000 } ··· 1982 2006 struct bttv_fh *fh = priv; 1983 2007 struct bttv *btv = fh->btv; 1984 2008 1985 - mutex_lock(&btv->lock); 1986 2009 f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; 1987 2010 f->frequency = btv->freq; 1988 - mutex_unlock(&btv->lock); 1989 2011 1990 2012 return 0; 1991 2013 } ··· 1998 2024 if (unlikely(f->tuner != 0)) 1999 2025 return -EINVAL; 2000 2026 2001 - mutex_lock(&btv->lock); 2002 2027 err = v4l2_prio_check(&btv->prio, fh->prio); 2003 2028 if (unlikely(err)) 2004 2029 goto err; ··· 2012 2039 if (btv->has_matchbox && btv->radio_user) 2013 2040 tea5757_set_freq(btv, btv->freq); 2014 2041 err: 2015 - mutex_unlock(&btv->lock); 2016 2042 2017 2043 return 0; 2018 2044 } ··· 2144 2172 2145 2173 /* Make sure tvnorm, vbi_end and the current cropping parameters 2146 2174 remain consistent until we're done. */ 2147 - mutex_lock(&btv->lock); 2148 2175 2149 2176 b = &bttv_tvnorms[btv->tvnorm].cropcap.bounds; 2150 2177 ··· 2221 2250 rc = 0; /* success */ 2222 2251 2223 2252 fail: 2224 - mutex_unlock(&btv->lock); 2225 2253 2226 2254 return rc; 2227 2255 } ··· 2252 2282 if (V4L2_FIELD_ANY == field) { 2253 2283 __s32 height2; 2254 2284 2255 - mutex_lock(&fh->btv->lock); 2256 2285 height2 = fh->btv->crop[!!fh->do_crop].rect.height >> 1; 2257 - mutex_unlock(&fh->btv->lock); 2258 2286 field = (win->w.height > height2) 2259 2287 ? V4L2_FIELD_INTERLACED 2260 2288 : V4L2_FIELD_TOP; ··· 2328 2360 } 2329 2361 } 2330 2362 2331 - mutex_lock(&fh->cap.vb_lock); 2332 2363 /* clip against screen */ 2333 2364 if (NULL != btv->fbuf.base) 2334 2365 n = btcx_screen_clips(btv->fbuf.fmt.width, btv->fbuf.fmt.height, ··· 2358 2391 fh->ov.field = win->field; 2359 2392 fh->ov.setup_ok = 1; 2360 2393 2361 - /* 2362 - * FIXME: btv is protected by btv->lock mutex, while btv->init 2363 - * is protected by fh->cap.vb_lock. This seems to open the 2364 - * possibility for some race situations. Maybe the better would 2365 - * be to unify those locks or to use another way to store the 2366 - * init values that will be consumed by videobuf callbacks 2367 - */ 2368 2394 btv->init.ov.w.width = win->w.width; 2369 2395 btv->init.ov.w.height = win->w.height; 2370 2396 btv->init.ov.field = win->field; ··· 2372 2412 bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); 2373 2413 retval = bttv_switch_overlay(btv,fh,new); 2374 2414 } 2375 - mutex_unlock(&fh->cap.vb_lock); 2376 2415 return retval; 2377 2416 } 2378 2417 ··· 2485 2526 if (V4L2_FIELD_ANY == field) { 2486 2527 __s32 height2; 2487 2528 2488 - mutex_lock(&btv->lock); 2489 2529 height2 = btv->crop[!!fh->do_crop].rect.height >> 1; 2490 - mutex_unlock(&btv->lock); 2491 2530 field = (f->fmt.pix.height > height2) 2492 2531 ? V4L2_FIELD_INTERLACED 2493 2532 : V4L2_FIELD_BOTTOM; ··· 2571 2614 fmt = format_by_fourcc(f->fmt.pix.pixelformat); 2572 2615 2573 2616 /* update our state informations */ 2574 - mutex_lock(&fh->cap.vb_lock); 2575 2617 fh->fmt = fmt; 2576 2618 fh->cap.field = f->fmt.pix.field; 2577 2619 fh->cap.last = V4L2_FIELD_NONE; ··· 2579 2623 btv->init.fmt = fmt; 2580 2624 btv->init.width = f->fmt.pix.width; 2581 2625 btv->init.height = f->fmt.pix.height; 2582 - mutex_unlock(&fh->cap.vb_lock); 2583 2626 2584 2627 return 0; 2585 2628 } ··· 2604 2649 unsigned int i; 2605 2650 struct bttv_fh *fh = priv; 2606 2651 2607 - mutex_lock(&fh->cap.vb_lock); 2608 2652 retval = __videobuf_mmap_setup(&fh->cap, gbuffers, gbufsize, 2609 2653 V4L2_MEMORY_MMAP); 2610 2654 if (retval < 0) { 2611 - mutex_unlock(&fh->cap.vb_lock); 2612 2655 return retval; 2613 2656 } 2614 2657 ··· 2618 2665 for (i = 0; i < gbuffers; i++) 2619 2666 mbuf->offsets[i] = i * gbufsize; 2620 2667 2621 - mutex_unlock(&fh->cap.vb_lock); 2622 2668 return 0; 2623 2669 } 2624 2670 #endif ··· 2727 2775 int retval = 0; 2728 2776 2729 2777 if (on) { 2730 - mutex_lock(&fh->cap.vb_lock); 2731 2778 /* verify args */ 2732 2779 if (unlikely(!btv->fbuf.base)) { 2733 - mutex_unlock(&fh->cap.vb_lock); 2734 2780 return -EINVAL; 2735 2781 } 2736 2782 if (unlikely(!fh->ov.setup_ok)) { ··· 2737 2787 } 2738 2788 if (retval) 2739 2789 return retval; 2740 - mutex_unlock(&fh->cap.vb_lock); 2741 2790 } 2742 2791 2743 2792 if (!check_alloc_btres_lock(btv, fh, RESOURCE_OVERLAY)) 2744 2793 return -EBUSY; 2745 2794 2746 - mutex_lock(&fh->cap.vb_lock); 2747 2795 if (on) { 2748 2796 fh->ov.tvnorm = btv->tvnorm; 2749 2797 new = videobuf_sg_alloc(sizeof(*new)); ··· 2753 2805 2754 2806 /* switch over */ 2755 2807 retval = bttv_switch_overlay(btv, fh, new); 2756 - mutex_unlock(&fh->cap.vb_lock); 2757 2808 return retval; 2758 2809 } 2759 2810 ··· 2791 2844 } 2792 2845 2793 2846 /* ok, accept it */ 2794 - mutex_lock(&fh->cap.vb_lock); 2795 2847 btv->fbuf.base = fb->base; 2796 2848 btv->fbuf.fmt.width = fb->fmt.width; 2797 2849 btv->fbuf.fmt.height = fb->fmt.height; ··· 2822 2876 retval = bttv_switch_overlay(btv, fh, new); 2823 2877 } 2824 2878 } 2825 - mutex_unlock(&fh->cap.vb_lock); 2826 2879 return retval; 2827 2880 } 2828 2881 ··· 2900 2955 c->id >= V4L2_CID_PRIVATE_LASTP1)) 2901 2956 return -EINVAL; 2902 2957 2903 - mutex_lock(&btv->lock); 2904 2958 if (!btv->volume_gpio && (c->id == V4L2_CID_AUDIO_VOLUME)) 2905 2959 *c = no_ctl; 2906 2960 else { ··· 2907 2963 2908 2964 *c = (NULL != ctrl) ? *ctrl : no_ctl; 2909 2965 } 2910 - mutex_unlock(&btv->lock); 2911 2966 2912 2967 return 0; 2913 2968 } ··· 2917 2974 struct bttv_fh *fh = f; 2918 2975 struct bttv *btv = fh->btv; 2919 2976 2920 - mutex_lock(&btv->lock); 2921 2977 v4l2_video_std_frame_period(bttv_tvnorms[btv->tvnorm].v4l2_id, 2922 2978 &parm->parm.capture.timeperframe); 2923 - mutex_unlock(&btv->lock); 2924 2979 2925 2980 return 0; 2926 2981 } ··· 2934 2993 if (0 != t->index) 2935 2994 return -EINVAL; 2936 2995 2937 - mutex_lock(&btv->lock); 2938 2996 t->rxsubchans = V4L2_TUNER_SUB_MONO; 2939 2997 bttv_call_all(btv, tuner, g_tuner, t); 2940 2998 strcpy(t->name, "Television"); ··· 2945 3005 if (btv->audio_mode_gpio) 2946 3006 btv->audio_mode_gpio(btv, t, 0); 2947 3007 2948 - mutex_unlock(&btv->lock); 2949 3008 return 0; 2950 3009 } 2951 3010 ··· 2953 3014 struct bttv_fh *fh = f; 2954 3015 struct bttv *btv = fh->btv; 2955 3016 2956 - mutex_lock(&btv->lock); 2957 3017 *p = v4l2_prio_max(&btv->prio); 2958 - mutex_unlock(&btv->lock); 2959 3018 2960 3019 return 0; 2961 3020 } ··· 2965 3028 struct bttv *btv = fh->btv; 2966 3029 int rc; 2967 3030 2968 - mutex_lock(&btv->lock); 2969 3031 rc = v4l2_prio_change(&btv->prio, &fh->prio, prio); 2970 - mutex_unlock(&btv->lock); 2971 3032 2972 3033 return rc; 2973 3034 } ··· 2980 3045 cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) 2981 3046 return -EINVAL; 2982 3047 2983 - mutex_lock(&btv->lock); 2984 3048 *cap = bttv_tvnorms[btv->tvnorm].cropcap; 2985 - mutex_unlock(&btv->lock); 2986 3049 2987 3050 return 0; 2988 3051 } ··· 2998 3065 inconsistent with fh->width or fh->height and apps 2999 3066 do not expect a change here. */ 3000 3067 3001 - mutex_lock(&btv->lock); 3002 3068 crop->c = btv->crop[!!fh->do_crop].rect; 3003 - mutex_unlock(&btv->lock); 3004 3069 3005 3070 return 0; 3006 3071 } ··· 3022 3091 /* Make sure tvnorm, vbi_end and the current cropping 3023 3092 parameters remain consistent until we're done. Note 3024 3093 read() may change vbi_end in check_alloc_btres_lock(). */ 3025 - mutex_lock(&btv->lock); 3026 3094 retval = v4l2_prio_check(&btv->prio, fh->prio); 3027 3095 if (0 != retval) { 3028 - mutex_unlock(&btv->lock); 3029 3096 return retval; 3030 3097 } 3031 3098 3032 3099 retval = -EBUSY; 3033 3100 3034 3101 if (locked_btres(fh->btv, VIDEO_RESOURCES)) { 3035 - mutex_unlock(&btv->lock); 3036 3102 return retval; 3037 3103 } 3038 3104 ··· 3041 3113 3042 3114 b_top = max(b->top, btv->vbi_end); 3043 3115 if (b_top + 32 >= b_bottom) { 3044 - mutex_unlock(&btv->lock); 3045 3116 return retval; 3046 3117 } 3047 3118 ··· 3063 3136 3064 3137 btv->crop[1] = c; 3065 3138 3066 - mutex_unlock(&btv->lock); 3067 - 3068 3139 fh->do_crop = 1; 3069 - 3070 - mutex_lock(&fh->cap.vb_lock); 3071 3140 3072 3141 if (fh->width < c.min_scaled_width) { 3073 3142 fh->width = c.min_scaled_width; ··· 3080 3157 fh->height = c.max_scaled_height; 3081 3158 btv->init.height = c.max_scaled_height; 3082 3159 } 3083 - 3084 - mutex_unlock(&fh->cap.vb_lock); 3085 3160 3086 3161 return 0; 3087 3162 } ··· 3148 3227 return videobuf_poll_stream(file, &fh->vbi, wait); 3149 3228 } 3150 3229 3151 - mutex_lock(&fh->cap.vb_lock); 3152 3230 if (check_btres(fh,RESOURCE_VIDEO_STREAM)) { 3153 3231 /* streaming capture */ 3154 3232 if (list_empty(&fh->cap.stream)) ··· 3182 3262 else 3183 3263 rc = 0; 3184 3264 err: 3185 - mutex_unlock(&fh->cap.vb_lock); 3186 3265 return rc; 3187 3266 } 3188 3267 ··· 3212 3293 return -ENOMEM; 3213 3294 file->private_data = fh; 3214 3295 3215 - /* 3216 - * btv is protected by btv->lock mutex, while btv->init and other 3217 - * streaming vars are protected by fh->cap.vb_lock. We need to take 3218 - * care of both locks to avoid troubles. However, vb_lock is used also 3219 - * inside videobuf, without calling buf->lock. So, it is a very bad 3220 - * idea to hold both locks at the same time. 3221 - * Let's first copy btv->init at fh, holding cap.vb_lock, and then work 3222 - * with the rest of init, holding btv->lock. 3223 - */ 3224 - mutex_lock(&fh->cap.vb_lock); 3225 3296 *fh = btv->init; 3226 - mutex_unlock(&fh->cap.vb_lock); 3227 3297 3228 3298 fh->type = type; 3229 3299 fh->ov.setup_ok = 0; 3230 3300 3231 - mutex_lock(&btv->lock); 3232 3301 v4l2_prio_open(&btv->prio, &fh->prio); 3233 3302 3234 3303 videobuf_queue_sg_init(&fh->cap, &bttv_video_qops, ··· 3224 3317 V4L2_BUF_TYPE_VIDEO_CAPTURE, 3225 3318 V4L2_FIELD_INTERLACED, 3226 3319 sizeof(struct bttv_buffer), 3227 - fh, NULL); 3320 + fh, &btv->lock); 3228 3321 videobuf_queue_sg_init(&fh->vbi, &bttv_vbi_qops, 3229 3322 &btv->c.pci->dev, &btv->s_lock, 3230 3323 V4L2_BUF_TYPE_VBI_CAPTURE, 3231 3324 V4L2_FIELD_SEQ_TB, 3232 3325 sizeof(struct bttv_buffer), 3233 - fh, NULL); 3326 + fh, &btv->lock); 3234 3327 set_tvnorm(btv,btv->tvnorm); 3235 3328 set_input(btv, btv->input, btv->tvnorm); 3236 3329 ··· 3253 3346 bttv_vbi_fmt_reset(&fh->vbi_fmt, btv->tvnorm); 3254 3347 3255 3348 bttv_field_count(btv); 3256 - mutex_unlock(&btv->lock); 3257 3349 return 0; 3258 3350 } 3259 3351 ··· 3261 3355 struct bttv_fh *fh = file->private_data; 3262 3356 struct bttv *btv = fh->btv; 3263 3357 3264 - mutex_lock(&btv->lock); 3265 3358 /* turn off overlay */ 3266 3359 if (check_btres(fh, RESOURCE_OVERLAY)) 3267 3360 bttv_switch_overlay(btv,fh,NULL); ··· 3286 3381 3287 3382 /* free stuff */ 3288 3383 3289 - /* 3290 - * videobuf uses cap.vb_lock - we should avoid holding btv->lock, 3291 - * otherwise we may have dead lock conditions 3292 - */ 3293 - mutex_unlock(&btv->lock); 3294 3384 videobuf_mmap_free(&fh->cap); 3295 3385 videobuf_mmap_free(&fh->vbi); 3296 - mutex_lock(&btv->lock); 3297 3386 v4l2_prio_close(&btv->prio, fh->prio); 3298 3387 file->private_data = NULL; 3299 3388 kfree(fh); ··· 3297 3398 3298 3399 if (!btv->users) 3299 3400 audio_mute(btv, 1); 3300 - mutex_unlock(&btv->lock); 3301 3401 3302 3402 return 0; 3303 3403 } ··· 3400 3502 if (unlikely(!fh)) 3401 3503 return -ENOMEM; 3402 3504 file->private_data = fh; 3403 - mutex_lock(&fh->cap.vb_lock); 3404 3505 *fh = btv->init; 3405 - mutex_unlock(&fh->cap.vb_lock); 3406 3506 3407 - mutex_lock(&btv->lock); 3408 3507 v4l2_prio_open(&btv->prio, &fh->prio); 3409 3508 3410 3509 btv->radio_user++; ··· 3409 3514 bttv_call_all(btv, tuner, s_radio); 3410 3515 audio_input(btv,TVAUDIO_INPUT_RADIO); 3411 3516 3412 - mutex_unlock(&btv->lock); 3413 3517 return 0; 3414 3518 } 3415 3519 ··· 3418 3524 struct bttv *btv = fh->btv; 3419 3525 struct rds_command cmd; 3420 3526 3421 - mutex_lock(&btv->lock); 3422 3527 v4l2_prio_close(&btv->prio, fh->prio); 3423 3528 file->private_data = NULL; 3424 3529 kfree(fh); ··· 3425 3532 btv->radio_user--; 3426 3533 3427 3534 bttv_call_all(btv, core, ioctl, RDS_CMD_CLOSE, &cmd); 3428 - mutex_unlock(&btv->lock); 3429 3535 3430 3536 return 0; 3431 3537 } ··· 3453 3561 return -EINVAL; 3454 3562 if (0 != t->index) 3455 3563 return -EINVAL; 3456 - mutex_lock(&btv->lock); 3457 3564 strcpy(t->name, "Radio"); 3458 3565 t->type = V4L2_TUNER_RADIO; 3459 3566 ··· 3460 3569 3461 3570 if (btv->audio_mode_gpio) 3462 3571 btv->audio_mode_gpio(btv, t, 0); 3463 - 3464 - mutex_unlock(&btv->lock); 3465 3572 3466 3573 return 0; 3467 3574 } ··· 3581 3692 .open = radio_open, 3582 3693 .read = radio_read, 3583 3694 .release = radio_release, 3584 - .ioctl = video_ioctl2, 3695 + .unlocked_ioctl = video_ioctl2, 3585 3696 .poll = radio_poll, 3586 3697 }; 3587 3698
+1 -1
drivers/media/video/bw-qcam.c
··· 860 860 861 861 static const struct v4l2_file_operations qcam_fops = { 862 862 .owner = THIS_MODULE, 863 - .ioctl = video_ioctl2, 863 + .unlocked_ioctl = video_ioctl2, 864 864 .read = qcam_read, 865 865 }; 866 866
+1 -1
drivers/media/video/c-qcam.c
··· 718 718 719 719 static const struct v4l2_file_operations qcam_fops = { 720 720 .owner = THIS_MODULE, 721 - .ioctl = video_ioctl2, 721 + .unlocked_ioctl = video_ioctl2, 722 722 .read = qcam_read, 723 723 }; 724 724
+1 -1
drivers/media/video/cafe_ccic.c
··· 1775 1775 .read = cafe_v4l_read, 1776 1776 .poll = cafe_v4l_poll, 1777 1777 .mmap = cafe_v4l_mmap, 1778 - .ioctl = video_ioctl2, 1778 + .unlocked_ioctl = video_ioctl2, 1779 1779 }; 1780 1780 1781 1781 static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
+7 -1
drivers/media/video/cx18/cx18-alsa-pcm.c
··· 218 218 static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream, 219 219 unsigned int cmd, void *arg) 220 220 { 221 - return snd_pcm_lib_ioctl(substream, cmd, arg); 221 + struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream); 222 + int ret; 223 + 224 + snd_cx18_lock(cxsc); 225 + ret = snd_pcm_lib_ioctl(substream, cmd, arg); 226 + snd_cx18_unlock(cxsc); 227 + return ret; 222 228 } 223 229 224 230
+1 -1
drivers/media/video/cx18/cx18-streams.c
··· 41 41 .read = cx18_v4l2_read, 42 42 .open = cx18_v4l2_open, 43 43 /* FIXME change to video_ioctl2 if serialization lock can be removed */ 44 - .ioctl = cx18_v4l2_ioctl, 44 + .unlocked_ioctl = cx18_v4l2_ioctl, 45 45 .release = cx18_v4l2_close, 46 46 .poll = cx18_v4l2_enc_poll, 47 47 };
+1 -1
drivers/media/video/et61x251/et61x251_core.c
··· 2530 2530 .owner = THIS_MODULE, 2531 2531 .open = et61x251_open, 2532 2532 .release = et61x251_release, 2533 - .ioctl = et61x251_ioctl, 2533 + .unlocked_ioctl = et61x251_ioctl, 2534 2534 .read = et61x251_read, 2535 2535 .poll = et61x251_poll, 2536 2536 .mmap = et61x251_mmap,
+184 -232
drivers/media/video/gspca/sonixj.c
··· 63 63 #define QUALITY_DEF 80 64 64 u8 jpegqual; /* webcam quality */ 65 65 66 + u8 reg01; 67 + u8 reg17; 66 68 u8 reg18; 69 + u8 flags; 67 70 68 71 s8 ag_cnt; 69 72 #define AG_CNT_START 13 ··· 98 95 SENSOR_SOI768, 99 96 SENSOR_SP80708, 100 97 }; 98 + 99 + /* device flags */ 100 + #define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */ 101 + 102 + /* sn9c1xx definitions */ 103 + /* register 0x01 */ 104 + #define S_PWR_DN 0x01 /* sensor power down */ 105 + #define S_PDN_INV 0x02 /* inverse pin S_PWR_DN */ 106 + #define V_TX_EN 0x04 /* video transfer enable */ 107 + #define LED 0x08 /* output to pin LED */ 108 + #define SCL_SEL_OD 0x20 /* open-drain mode */ 109 + #define SYS_SEL_48M 0x40 /* system clock 0: 24MHz, 1: 48MHz */ 110 + /* register 0x17 */ 111 + #define MCK_SIZE_MASK 0x1f /* sensor master clock */ 112 + #define SEN_CLK_EN 0x20 /* enable sensor clock */ 113 + #define DEF_EN 0x80 /* defect pixel by 0: soft, 1: hard */ 101 114 102 115 /* V4L2 controls supported by the driver */ 103 116 static void setbrightness(struct gspca_dev *gspca_dev); ··· 1774 1755 } 1775 1756 } 1776 1757 1777 - static void bridge_init(struct gspca_dev *gspca_dev, 1778 - const u8 *sn9c1xx) 1779 - { 1780 - struct sd *sd = (struct sd *) gspca_dev; 1781 - u8 reg0102[2]; 1782 - const u8 *reg9a; 1783 - static const u8 reg9a_def[] = 1784 - {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; 1785 - static const u8 reg9a_spec[] = 1786 - {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; 1787 - static const u8 regd4[] = {0x60, 0x00, 0x00}; 1788 - 1789 - /* sensor clock already enabled in sd_init */ 1790 - /* reg_w1(gspca_dev, 0xf1, 0x00); */ 1791 - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); 1792 - 1793 - /* configure gpio */ 1794 - reg0102[0] = sn9c1xx[1]; 1795 - reg0102[1] = sn9c1xx[2]; 1796 - if (gspca_dev->audio) 1797 - reg0102[1] |= 0x04; /* keep the audio connection */ 1798 - reg_w(gspca_dev, 0x01, reg0102, 2); 1799 - reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); 1800 - reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); 1801 - switch (sd->sensor) { 1802 - case SENSOR_GC0307: 1803 - case SENSOR_OV7660: 1804 - case SENSOR_PO1030: 1805 - case SENSOR_PO2030N: 1806 - case SENSOR_SOI768: 1807 - case SENSOR_SP80708: 1808 - reg9a = reg9a_spec; 1809 - break; 1810 - default: 1811 - reg9a = reg9a_def; 1812 - break; 1813 - } 1814 - reg_w(gspca_dev, 0x9a, reg9a, 6); 1815 - 1816 - reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); 1817 - 1818 - reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); 1819 - 1820 - switch (sd->sensor) { 1821 - case SENSOR_ADCM1700: 1822 - reg_w1(gspca_dev, 0x01, 0x43); 1823 - reg_w1(gspca_dev, 0x17, 0x62); 1824 - reg_w1(gspca_dev, 0x01, 0x42); 1825 - reg_w1(gspca_dev, 0x01, 0x42); 1826 - break; 1827 - case SENSOR_GC0307: 1828 - msleep(50); 1829 - reg_w1(gspca_dev, 0x01, 0x61); 1830 - reg_w1(gspca_dev, 0x17, 0x22); 1831 - reg_w1(gspca_dev, 0x01, 0x60); 1832 - reg_w1(gspca_dev, 0x01, 0x40); 1833 - msleep(50); 1834 - break; 1835 - case SENSOR_MI0360B: 1836 - reg_w1(gspca_dev, 0x01, 0x61); 1837 - reg_w1(gspca_dev, 0x17, 0x60); 1838 - reg_w1(gspca_dev, 0x01, 0x60); 1839 - reg_w1(gspca_dev, 0x01, 0x40); 1840 - break; 1841 - case SENSOR_MT9V111: 1842 - reg_w1(gspca_dev, 0x01, 0x61); 1843 - reg_w1(gspca_dev, 0x17, 0x61); 1844 - reg_w1(gspca_dev, 0x01, 0x60); 1845 - reg_w1(gspca_dev, 0x01, 0x40); 1846 - break; 1847 - case SENSOR_OM6802: 1848 - msleep(10); 1849 - reg_w1(gspca_dev, 0x02, 0x73); 1850 - reg_w1(gspca_dev, 0x17, 0x60); 1851 - reg_w1(gspca_dev, 0x01, 0x22); 1852 - msleep(100); 1853 - reg_w1(gspca_dev, 0x01, 0x62); 1854 - reg_w1(gspca_dev, 0x17, 0x64); 1855 - reg_w1(gspca_dev, 0x17, 0x64); 1856 - reg_w1(gspca_dev, 0x01, 0x42); 1857 - msleep(10); 1858 - reg_w1(gspca_dev, 0x01, 0x42); 1859 - i2c_w8(gspca_dev, om6802_init0[0]); 1860 - i2c_w8(gspca_dev, om6802_init0[1]); 1861 - msleep(15); 1862 - reg_w1(gspca_dev, 0x02, 0x71); 1863 - msleep(150); 1864 - break; 1865 - case SENSOR_OV7630: 1866 - reg_w1(gspca_dev, 0x01, 0x61); 1867 - reg_w1(gspca_dev, 0x17, 0xe2); 1868 - reg_w1(gspca_dev, 0x01, 0x60); 1869 - reg_w1(gspca_dev, 0x01, 0x40); 1870 - break; 1871 - case SENSOR_OV7648: 1872 - reg_w1(gspca_dev, 0x01, 0x63); 1873 - reg_w1(gspca_dev, 0x17, 0x20); 1874 - reg_w1(gspca_dev, 0x01, 0x62); 1875 - reg_w1(gspca_dev, 0x01, 0x42); 1876 - break; 1877 - case SENSOR_PO1030: 1878 - case SENSOR_SOI768: 1879 - reg_w1(gspca_dev, 0x01, 0x61); 1880 - reg_w1(gspca_dev, 0x17, 0x20); 1881 - reg_w1(gspca_dev, 0x01, 0x60); 1882 - reg_w1(gspca_dev, 0x01, 0x40); 1883 - break; 1884 - case SENSOR_PO2030N: 1885 - case SENSOR_OV7660: 1886 - reg_w1(gspca_dev, 0x01, 0x63); 1887 - reg_w1(gspca_dev, 0x17, 0x20); 1888 - reg_w1(gspca_dev, 0x01, 0x62); 1889 - reg_w1(gspca_dev, 0x01, 0x42); 1890 - break; 1891 - case SENSOR_SP80708: 1892 - reg_w1(gspca_dev, 0x01, 0x63); 1893 - reg_w1(gspca_dev, 0x17, 0x20); 1894 - reg_w1(gspca_dev, 0x01, 0x62); 1895 - reg_w1(gspca_dev, 0x01, 0x42); 1896 - msleep(100); 1897 - reg_w1(gspca_dev, 0x02, 0x62); 1898 - break; 1899 - default: 1900 - /* case SENSOR_HV7131R: */ 1901 - /* case SENSOR_MI0360: */ 1902 - /* case SENSOR_MO4000: */ 1903 - reg_w1(gspca_dev, 0x01, 0x43); 1904 - reg_w1(gspca_dev, 0x17, 0x61); 1905 - reg_w1(gspca_dev, 0x01, 0x42); 1906 - if (sd->sensor == SENSOR_HV7131R) 1907 - hv7131r_probe(gspca_dev); 1908 - break; 1909 - } 1910 - } 1911 - 1912 1758 /* this function is called at probe time */ 1913 1759 static int sd_config(struct gspca_dev *gspca_dev, 1914 1760 const struct usb_device_id *id) ··· 1782 1898 struct cam *cam; 1783 1899 1784 1900 sd->bridge = id->driver_info >> 16; 1785 - sd->sensor = id->driver_info; 1901 + sd->sensor = id->driver_info >> 8; 1902 + sd->flags = id->driver_info; 1786 1903 1787 1904 cam = &gspca_dev->cam; 1788 1905 if (sd->sensor == SENSOR_ADCM1700) { ··· 1814 1929 /* setup a selector by bridge */ 1815 1930 reg_w1(gspca_dev, 0xf1, 0x01); 1816 1931 reg_r(gspca_dev, 0x00, 1); 1817 - reg_w1(gspca_dev, 0xf1, gspca_dev->usb_buf[0]); 1932 + reg_w1(gspca_dev, 0xf1, 0x00); 1818 1933 reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */ 1819 1934 regF1 = gspca_dev->usb_buf[0]; 1820 1935 if (gspca_dev->usb_err < 0) ··· 2308 2423 { 2309 2424 struct sd *sd = (struct sd *) gspca_dev; 2310 2425 int i; 2311 - u8 reg1, reg17; 2426 + u8 reg01, reg17; 2427 + u8 reg0102[2]; 2312 2428 const u8 *sn9c1xx; 2313 2429 const u8 (*init)[8]; 2430 + const u8 *reg9a; 2314 2431 int mode; 2432 + static const u8 reg9a_def[] = 2433 + {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; 2434 + static const u8 reg9a_spec[] = 2435 + {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; 2436 + static const u8 regd4[] = {0x60, 0x00, 0x00}; 2315 2437 static const u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f }; 2316 2438 static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; 2317 2439 static const u8 CA_adcm1700[] = ··· 2340 2448 2341 2449 /* initialize the bridge */ 2342 2450 sn9c1xx = sn_tb[sd->sensor]; 2343 - bridge_init(gspca_dev, sn9c1xx); 2451 + 2452 + /* sensor clock already enabled in sd_init */ 2453 + /* reg_w1(gspca_dev, 0xf1, 0x00); */ 2454 + reg01 = sn9c1xx[1]; 2455 + if (sd->flags & PDN_INV) 2456 + reg01 ^= S_PDN_INV; /* power down inverted */ 2457 + reg_w1(gspca_dev, 0x01, reg01); 2458 + 2459 + /* configure gpio */ 2460 + reg0102[0] = reg01; 2461 + reg0102[1] = sn9c1xx[2]; 2462 + if (gspca_dev->audio) 2463 + reg0102[1] |= 0x04; /* keep the audio connection */ 2464 + reg_w(gspca_dev, 0x01, reg0102, 2); 2465 + reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); 2466 + reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); 2467 + switch (sd->sensor) { 2468 + case SENSOR_GC0307: 2469 + case SENSOR_OV7660: 2470 + case SENSOR_PO1030: 2471 + case SENSOR_PO2030N: 2472 + case SENSOR_SOI768: 2473 + case SENSOR_SP80708: 2474 + reg9a = reg9a_spec; 2475 + break; 2476 + default: 2477 + reg9a = reg9a_def; 2478 + break; 2479 + } 2480 + reg_w(gspca_dev, 0x9a, reg9a, 6); 2481 + 2482 + reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); 2483 + 2484 + reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); 2485 + 2486 + reg17 = sn9c1xx[0x17]; 2487 + switch (sd->sensor) { 2488 + case SENSOR_GC0307: 2489 + msleep(50); /*fixme: is it useful? */ 2490 + break; 2491 + case SENSOR_OM6802: 2492 + msleep(10); 2493 + reg_w1(gspca_dev, 0x02, 0x73); 2494 + reg17 |= SEN_CLK_EN; 2495 + reg_w1(gspca_dev, 0x17, reg17); 2496 + reg_w1(gspca_dev, 0x01, 0x22); 2497 + msleep(100); 2498 + reg01 = SCL_SEL_OD | S_PDN_INV; 2499 + reg17 &= MCK_SIZE_MASK; 2500 + reg17 |= 0x04; /* clock / 4 */ 2501 + break; 2502 + } 2503 + reg01 |= SYS_SEL_48M; 2504 + reg_w1(gspca_dev, 0x01, reg01); 2505 + reg17 |= SEN_CLK_EN; 2506 + reg_w1(gspca_dev, 0x17, reg17); 2507 + reg01 &= ~S_PWR_DN; /* sensor power on */ 2508 + reg_w1(gspca_dev, 0x01, reg01); 2509 + reg01 &= ~SYS_SEL_48M; 2510 + reg_w1(gspca_dev, 0x01, reg01); 2511 + 2512 + switch (sd->sensor) { 2513 + case SENSOR_HV7131R: 2514 + hv7131r_probe(gspca_dev); /*fixme: is it useful? */ 2515 + break; 2516 + case SENSOR_OM6802: 2517 + msleep(10); 2518 + reg_w1(gspca_dev, 0x01, reg01); 2519 + i2c_w8(gspca_dev, om6802_init0[0]); 2520 + i2c_w8(gspca_dev, om6802_init0[1]); 2521 + msleep(15); 2522 + reg_w1(gspca_dev, 0x02, 0x71); 2523 + msleep(150); 2524 + break; 2525 + case SENSOR_SP80708: 2526 + msleep(100); 2527 + reg_w1(gspca_dev, 0x02, 0x62); 2528 + break; 2529 + } 2344 2530 2345 2531 /* initialize the sensor */ 2346 2532 i2c_w_seq(gspca_dev, sensor_init[sd->sensor]); ··· 2446 2476 } 2447 2477 reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); 2448 2478 switch (sd->sensor) { 2449 - case SENSOR_GC0307: 2450 - reg17 = 0xa2; 2451 - break; 2452 - case SENSOR_MT9V111: 2453 - case SENSOR_MI0360B: 2454 - reg17 = 0xe0; 2455 - break; 2456 - case SENSOR_ADCM1700: 2457 - case SENSOR_OV7630: 2458 - reg17 = 0xe2; 2459 - break; 2460 - case SENSOR_OV7648: 2461 - reg17 = 0x20; 2462 - break; 2463 - case SENSOR_OV7660: 2464 - case SENSOR_SOI768: 2465 - reg17 = 0xa0; 2466 - break; 2467 - case SENSOR_PO1030: 2468 - case SENSOR_PO2030N: 2469 - reg17 = 0xa0; 2479 + case SENSOR_OM6802: 2480 + /* case SENSOR_OV7648: * fixme: sometimes */ 2470 2481 break; 2471 2482 default: 2472 - reg17 = 0x60; 2483 + reg17 |= DEF_EN; 2473 2484 break; 2474 2485 } 2475 2486 reg_w1(gspca_dev, 0x17, reg17); ··· 2497 2546 2498 2547 init = NULL; 2499 2548 mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; 2500 - if (mode) 2501 - reg1 = 0x46; /* 320x240: clk 48Mhz, video trf enable */ 2502 - else 2503 - reg1 = 0x06; /* 640x480: clk 24Mhz, video trf enable */ 2504 - reg17 = 0x61; /* 0x:20: enable sensor clock */ 2549 + reg01 |= SYS_SEL_48M | V_TX_EN; 2550 + reg17 &= ~MCK_SIZE_MASK; 2551 + reg17 |= 0x02; /* clock / 2 */ 2505 2552 switch (sd->sensor) { 2506 2553 case SENSOR_ADCM1700: 2507 2554 init = adcm1700_sensor_param1; 2508 - reg1 = 0x46; 2509 - reg17 = 0xe2; 2510 2555 break; 2511 2556 case SENSOR_GC0307: 2512 2557 init = gc0307_sensor_param1; 2513 - reg17 = 0xa2; 2514 - reg1 = 0x44; 2558 + break; 2559 + case SENSOR_HV7131R: 2560 + case SENSOR_MI0360: 2561 + if (mode) 2562 + reg01 |= SYS_SEL_48M; /* 320x240: clk 48Mhz */ 2563 + else 2564 + reg01 &= ~SYS_SEL_48M; /* 640x480: clk 24Mhz */ 2565 + reg17 &= ~MCK_SIZE_MASK; 2566 + reg17 |= 0x01; /* clock / 1 */ 2515 2567 break; 2516 2568 case SENSOR_MI0360B: 2517 2569 init = mi0360b_sensor_param1; 2518 - reg1 &= ~0x02; /* don't inverse pin S_PWR_DN */ 2519 - reg17 = 0xe2; 2520 2570 break; 2521 2571 case SENSOR_MO4000: 2522 - if (mode) { 2523 - /* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */ 2524 - reg1 = 0x06; /* clk 24Mz */ 2525 - } else { 2526 - reg17 = 0x22; /* 640 MCKSIZE */ 2527 - /* reg1 = 0x06; * 640 clk 24Mz (done) */ 2572 + if (mode) { /* if 320x240 */ 2573 + reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ 2574 + reg17 &= ~MCK_SIZE_MASK; 2575 + reg17 |= 0x01; /* clock / 1 */ 2528 2576 } 2529 2577 break; 2530 2578 case SENSOR_MT9V111: 2531 2579 init = mt9v111_sensor_param1; 2532 - if (mode) { 2533 - reg1 = 0x04; /* 320 clk 48Mhz */ 2534 - } else { 2535 - /* reg1 = 0x06; * 640 clk 24Mz (done) */ 2536 - reg17 = 0xc2; 2537 - } 2538 2580 break; 2539 2581 case SENSOR_OM6802: 2540 2582 init = om6802_sensor_param1; 2541 - reg17 = 0x64; /* 640 MCKSIZE */ 2583 + if (!mode) { /* if 640x480 */ 2584 + reg17 &= ~MCK_SIZE_MASK; 2585 + reg17 |= 0x01; /* clock / 4 */ 2586 + } 2542 2587 break; 2543 2588 case SENSOR_OV7630: 2544 2589 init = ov7630_sensor_param1; 2545 - reg17 = 0xe2; 2546 - reg1 = 0x44; 2547 2590 break; 2548 2591 case SENSOR_OV7648: 2549 2592 init = ov7648_sensor_param1; 2550 - reg17 = 0x21; 2551 - /* reg1 = 0x42; * 42 - 46? */ 2593 + reg17 &= ~MCK_SIZE_MASK; 2594 + reg17 |= 0x01; /* clock / 1 */ 2552 2595 break; 2553 2596 case SENSOR_OV7660: 2554 2597 init = ov7660_sensor_param1; 2555 - if (sd->bridge == BRIDGE_SN9C120) { 2556 - if (mode) { /* 320x240 - 160x120 */ 2557 - reg17 = 0xa2; 2558 - reg1 = 0x44; /* 48 Mhz, video trf eneble */ 2559 - } 2560 - } else { 2561 - reg17 = 0x22; 2562 - reg1 = 0x06; /* 24 Mhz, video trf eneble 2563 - * inverse power down */ 2564 - } 2565 2598 break; 2566 2599 case SENSOR_PO1030: 2567 2600 init = po1030_sensor_param1; 2568 - reg17 = 0xa2; 2569 - reg1 = 0x44; 2570 2601 break; 2571 2602 case SENSOR_PO2030N: 2572 2603 init = po2030n_sensor_param1; 2573 - reg1 = 0x46; 2574 - reg17 = 0xa2; 2575 2604 break; 2576 2605 case SENSOR_SOI768: 2577 2606 init = soi768_sensor_param1; 2578 - reg1 = 0x44; 2579 - reg17 = 0xa2; 2580 2607 break; 2581 2608 case SENSOR_SP80708: 2582 2609 init = sp80708_sensor_param1; 2583 - if (mode) { 2584 - /*?? reg1 = 0x04; * 320 clk 48Mhz */ 2585 - } else { 2586 - reg1 = 0x46; /* 640 clk 48Mz */ 2587 - reg17 = 0xa2; 2588 - } 2589 2610 break; 2590 2611 } 2591 2612 ··· 2607 2684 setjpegqual(gspca_dev); 2608 2685 2609 2686 reg_w1(gspca_dev, 0x17, reg17); 2610 - reg_w1(gspca_dev, 0x01, reg1); 2687 + reg_w1(gspca_dev, 0x01, reg01); 2688 + sd->reg01 = reg01; 2689 + sd->reg17 = reg17; 2611 2690 2612 2691 sethvflip(gspca_dev); 2613 2692 setbrightness(gspca_dev); ··· 2631 2706 { 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 }; 2632 2707 static const u8 stopsoi768[] = 2633 2708 { 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 }; 2634 - u8 data; 2635 - const u8 *sn9c1xx; 2709 + u8 reg01; 2710 + u8 reg17; 2636 2711 2637 - data = 0x0b; 2712 + reg01 = sd->reg01; 2713 + reg17 = sd->reg17 & ~SEN_CLK_EN; 2638 2714 switch (sd->sensor) { 2715 + case SENSOR_ADCM1700: 2639 2716 case SENSOR_GC0307: 2640 - data = 0x29; 2717 + case SENSOR_PO2030N: 2718 + case SENSOR_SP80708: 2719 + reg01 |= LED; 2720 + reg_w1(gspca_dev, 0x01, reg01); 2721 + reg01 &= ~(LED | V_TX_EN); 2722 + reg_w1(gspca_dev, 0x01, reg01); 2723 + /* reg_w1(gspca_dev, 0x02, 0x??); * LED off ? */ 2641 2724 break; 2642 2725 case SENSOR_HV7131R: 2726 + reg01 &= ~V_TX_EN; 2727 + reg_w1(gspca_dev, 0x01, reg01); 2643 2728 i2c_w8(gspca_dev, stophv7131); 2644 - data = 0x2b; 2645 2729 break; 2646 2730 case SENSOR_MI0360: 2647 2731 case SENSOR_MI0360B: 2732 + reg01 &= ~V_TX_EN; 2733 + reg_w1(gspca_dev, 0x01, reg01); 2734 + /* reg_w1(gspca_dev, 0x02, 0x40); * LED off ? */ 2648 2735 i2c_w8(gspca_dev, stopmi0360); 2649 - data = 0x29; 2650 2736 break; 2651 - case SENSOR_OV7648: 2652 - i2c_w8(gspca_dev, stopov7648); 2653 - /* fall thru */ 2654 2737 case SENSOR_MT9V111: 2655 - case SENSOR_OV7630: 2738 + case SENSOR_OM6802: 2656 2739 case SENSOR_PO1030: 2657 - data = 0x29; 2740 + reg01 &= ~V_TX_EN; 2741 + reg_w1(gspca_dev, 0x01, reg01); 2742 + break; 2743 + case SENSOR_OV7630: 2744 + case SENSOR_OV7648: 2745 + reg01 &= ~V_TX_EN; 2746 + reg_w1(gspca_dev, 0x01, reg01); 2747 + i2c_w8(gspca_dev, stopov7648); 2748 + break; 2749 + case SENSOR_OV7660: 2750 + reg01 &= ~V_TX_EN; 2751 + reg_w1(gspca_dev, 0x01, reg01); 2658 2752 break; 2659 2753 case SENSOR_SOI768: 2660 2754 i2c_w8(gspca_dev, stopsoi768); 2661 - data = 0x29; 2662 2755 break; 2663 2756 } 2664 - sn9c1xx = sn_tb[sd->sensor]; 2665 - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); 2666 - reg_w1(gspca_dev, 0x17, sn9c1xx[0x17]); 2667 - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); 2668 - reg_w1(gspca_dev, 0x01, data); 2757 + 2758 + reg01 |= SCL_SEL_OD; 2759 + reg_w1(gspca_dev, 0x01, reg01); 2760 + reg01 |= S_PWR_DN; /* sensor power down */ 2761 + reg_w1(gspca_dev, 0x01, reg01); 2762 + reg_w1(gspca_dev, 0x17, reg17); 2763 + reg01 &= ~SYS_SEL_48M; /* clock 24MHz */ 2764 + reg_w1(gspca_dev, 0x01, reg01); 2765 + reg01 |= LED; 2766 + reg_w1(gspca_dev, 0x01, reg01); 2669 2767 /* Don't disable sensor clock as that disables the button on the cam */ 2670 2768 /* reg_w1(gspca_dev, 0xf1, 0x01); */ 2671 2769 } ··· 2902 2954 /* -- module initialisation -- */ 2903 2955 #define BS(bridge, sensor) \ 2904 2956 .driver_info = (BRIDGE_ ## bridge << 16) \ 2905 - | SENSOR_ ## sensor 2957 + | (SENSOR_ ## sensor << 8) 2958 + #define BSF(bridge, sensor, flags) \ 2959 + .driver_info = (BRIDGE_ ## bridge << 16) \ 2960 + | (SENSOR_ ## sensor << 8) \ 2961 + | (flags) 2906 2962 static const __devinitdata struct usb_device_id device_table[] = { 2907 2963 #if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE 2908 2964 {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)}, 2909 2965 {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)}, 2910 2966 #endif 2911 - {USB_DEVICE(0x045e, 0x00f5), BS(SN9C105, OV7660)}, 2912 - {USB_DEVICE(0x045e, 0x00f7), BS(SN9C105, OV7660)}, 2967 + {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)}, 2968 + {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)}, 2913 2969 {USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)}, 2914 2970 {USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)}, 2915 2971 {USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
+7 -7
drivers/media/video/meye.c
··· 1659 1659 .open = meye_open, 1660 1660 .release = meye_release, 1661 1661 .mmap = meye_mmap, 1662 - .ioctl = video_ioctl2, 1662 + .unlocked_ioctl = video_ioctl2, 1663 1663 .poll = meye_poll, 1664 1664 }; 1665 1665 ··· 1831 1831 msleep(1); 1832 1832 mchip_set(MCHIP_MM_INTA, MCHIP_MM_INTA_HIC_1_MASK); 1833 1833 1834 - if (video_register_device(meye.vdev, VFL_TYPE_GRABBER, 1835 - video_nr) < 0) { 1836 - v4l2_err(v4l2_dev, "video_register_device failed\n"); 1837 - goto outvideoreg; 1838 - } 1839 - 1840 1834 mutex_init(&meye.lock); 1841 1835 init_waitqueue_head(&meye.proc_list); 1842 1836 meye.brightness = 32 << 10; ··· 1851 1857 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERASHARPNESS, 32); 1852 1858 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAPICTURE, 0); 1853 1859 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAAGC, 48); 1860 + 1861 + if (video_register_device(meye.vdev, VFL_TYPE_GRABBER, 1862 + video_nr) < 0) { 1863 + v4l2_err(v4l2_dev, "video_register_device failed\n"); 1864 + goto outvideoreg; 1865 + } 1854 1866 1855 1867 v4l2_info(v4l2_dev, "Motion Eye Camera Driver v%s.\n", 1856 1868 MEYE_DRIVER_VERSION);
+1 -1
drivers/media/video/pms.c
··· 932 932 933 933 static const struct v4l2_file_operations pms_fops = { 934 934 .owner = THIS_MODULE, 935 - .ioctl = video_ioctl2, 935 + .unlocked_ioctl = video_ioctl2, 936 936 .read = pms_read, 937 937 }; 938 938
+8 -5
drivers/media/video/sh_vou.c
··· 75 75 int pix_idx; 76 76 struct videobuf_buffer *active; 77 77 enum sh_vou_status status; 78 + struct mutex fop_lock; 78 79 }; 79 80 80 81 struct sh_vou_file { ··· 236 235 vb->state = VIDEOBUF_NEEDS_INIT; 237 236 } 238 237 239 - /* Locking: caller holds vq->vb_lock mutex */ 238 + /* Locking: caller holds fop_lock mutex */ 240 239 static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count, 241 240 unsigned int *size) 242 241 { ··· 258 257 return 0; 259 258 } 260 259 261 - /* Locking: caller holds vq->vb_lock mutex */ 260 + /* Locking: caller holds fop_lock mutex */ 262 261 static int sh_vou_buf_prepare(struct videobuf_queue *vq, 263 262 struct videobuf_buffer *vb, 264 263 enum v4l2_field field) ··· 307 306 return 0; 308 307 } 309 308 310 - /* Locking: caller holds vq->vb_lock mutex and vq->irqlock spinlock */ 309 + /* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */ 311 310 static void sh_vou_buf_queue(struct videobuf_queue *vq, 312 311 struct videobuf_buffer *vb) 313 312 { ··· 1191 1190 V4L2_BUF_TYPE_VIDEO_OUTPUT, 1192 1191 V4L2_FIELD_NONE, 1193 1192 sizeof(struct videobuf_buffer), vdev, 1194 - NULL); 1193 + &vou_dev->fop_lock); 1195 1194 1196 1195 return 0; 1197 1196 } ··· 1293 1292 .owner = THIS_MODULE, 1294 1293 .open = sh_vou_open, 1295 1294 .release = sh_vou_release, 1296 - .ioctl = video_ioctl2, 1295 + .unlocked_ioctl = video_ioctl2, 1297 1296 .mmap = sh_vou_mmap, 1298 1297 .poll = sh_vou_poll, 1299 1298 }; ··· 1332 1331 1333 1332 INIT_LIST_HEAD(&vou_dev->queue); 1334 1333 spin_lock_init(&vou_dev->lock); 1334 + mutex_init(&vou_dev->fop_lock); 1335 1335 atomic_set(&vou_dev->use_count, 0); 1336 1336 vou_dev->pdata = vou_pdata; 1337 1337 vou_dev->status = SH_VOU_IDLE; ··· 1390 1388 vdev->tvnorms |= V4L2_STD_PAL; 1391 1389 vdev->v4l2_dev = &vou_dev->v4l2_dev; 1392 1390 vdev->release = video_device_release; 1391 + vdev->lock = &vou_dev->fop_lock; 1393 1392 1394 1393 vou_dev->vdev = vdev; 1395 1394 video_set_drvdata(vdev, vou_dev);
+1 -1
drivers/media/video/sn9c102/sn9c102_core.c
··· 3238 3238 .owner = THIS_MODULE, 3239 3239 .open = sn9c102_open, 3240 3240 .release = sn9c102_release, 3241 - .ioctl = sn9c102_ioctl, 3241 + .unlocked_ioctl = sn9c102_ioctl, 3242 3242 .read = sn9c102_read, 3243 3243 .poll = sn9c102_poll, 3244 3244 .mmap = sn9c102_mmap,
+47 -1
drivers/media/video/uvc/uvc_ctrl.c
··· 785 785 } 786 786 } 787 787 788 - struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, 788 + static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, 789 789 __u32 v4l2_id, struct uvc_control_mapping **mapping) 790 790 { 791 791 struct uvc_control *ctrl = NULL; ··· 938 938 if (ctrl->info.flags & UVC_CONTROL_GET_RES) 939 939 v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES, 940 940 uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); 941 + 942 + done: 943 + mutex_unlock(&chain->ctrl_mutex); 944 + return ret; 945 + } 946 + 947 + /* 948 + * Mapping V4L2 controls to UVC controls can be straighforward if done well. 949 + * Most of the UVC controls exist in V4L2, and can be mapped directly. Some 950 + * must be grouped (for instance the Red Balance, Blue Balance and Do White 951 + * Balance V4L2 controls use the White Balance Component UVC control) or 952 + * otherwise translated. The approach we take here is to use a translation 953 + * table for the controls that can be mapped directly, and handle the others 954 + * manually. 955 + */ 956 + int uvc_query_v4l2_menu(struct uvc_video_chain *chain, 957 + struct v4l2_querymenu *query_menu) 958 + { 959 + struct uvc_menu_info *menu_info; 960 + struct uvc_control_mapping *mapping; 961 + struct uvc_control *ctrl; 962 + u32 index = query_menu->index; 963 + u32 id = query_menu->id; 964 + int ret; 965 + 966 + memset(query_menu, 0, sizeof(*query_menu)); 967 + query_menu->id = id; 968 + query_menu->index = index; 969 + 970 + ret = mutex_lock_interruptible(&chain->ctrl_mutex); 971 + if (ret < 0) 972 + return -ERESTARTSYS; 973 + 974 + ctrl = uvc_find_control(chain, query_menu->id, &mapping); 975 + if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) { 976 + ret = -EINVAL; 977 + goto done; 978 + } 979 + 980 + if (query_menu->index >= mapping->menu_count) { 981 + ret = -EINVAL; 982 + goto done; 983 + } 984 + 985 + menu_info = &mapping->menu_info[query_menu->index]; 986 + strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name); 941 987 942 988 done: 943 989 mutex_unlock(&chain->ctrl_mutex);
+110 -23
drivers/media/video/uvc/uvc_queue.c
··· 90 90 } 91 91 92 92 /* 93 + * Free the video buffers. 94 + * 95 + * This function must be called with the queue lock held. 96 + */ 97 + static int __uvc_free_buffers(struct uvc_video_queue *queue) 98 + { 99 + unsigned int i; 100 + 101 + for (i = 0; i < queue->count; ++i) { 102 + if (queue->buffer[i].vma_use_count != 0) 103 + return -EBUSY; 104 + } 105 + 106 + if (queue->count) { 107 + vfree(queue->mem); 108 + queue->count = 0; 109 + } 110 + 111 + return 0; 112 + } 113 + 114 + int uvc_free_buffers(struct uvc_video_queue *queue) 115 + { 116 + int ret; 117 + 118 + mutex_lock(&queue->mutex); 119 + ret = __uvc_free_buffers(queue); 120 + mutex_unlock(&queue->mutex); 121 + 122 + return ret; 123 + } 124 + 125 + /* 93 126 * Allocate the video buffers. 94 127 * 95 128 * Pages are reserved to make sure they will not be swapped, as they will be ··· 143 110 144 111 mutex_lock(&queue->mutex); 145 112 146 - if ((ret = uvc_free_buffers(queue)) < 0) 113 + if ((ret = __uvc_free_buffers(queue)) < 0) 147 114 goto done; 148 115 149 116 /* Bail out if no buffers should be allocated. */ ··· 182 149 done: 183 150 mutex_unlock(&queue->mutex); 184 151 return ret; 185 - } 186 - 187 - /* 188 - * Free the video buffers. 189 - * 190 - * This function must be called with the queue lock held. 191 - */ 192 - int uvc_free_buffers(struct uvc_video_queue *queue) 193 - { 194 - unsigned int i; 195 - 196 - for (i = 0; i < queue->count; ++i) { 197 - if (queue->buffer[i].vma_use_count != 0) 198 - return -EBUSY; 199 - } 200 - 201 - if (queue->count) { 202 - vfree(queue->mem); 203 - queue->count = 0; 204 - } 205 - 206 - return 0; 207 152 } 208 153 209 154 /* ··· 373 362 374 363 list_del(&buf->stream); 375 364 __uvc_query_buffer(buf, v4l2_buf); 365 + 366 + done: 367 + mutex_unlock(&queue->mutex); 368 + return ret; 369 + } 370 + 371 + /* 372 + * VMA operations. 373 + */ 374 + static void uvc_vm_open(struct vm_area_struct *vma) 375 + { 376 + struct uvc_buffer *buffer = vma->vm_private_data; 377 + buffer->vma_use_count++; 378 + } 379 + 380 + static void uvc_vm_close(struct vm_area_struct *vma) 381 + { 382 + struct uvc_buffer *buffer = vma->vm_private_data; 383 + buffer->vma_use_count--; 384 + } 385 + 386 + static const struct vm_operations_struct uvc_vm_ops = { 387 + .open = uvc_vm_open, 388 + .close = uvc_vm_close, 389 + }; 390 + 391 + /* 392 + * Memory-map a video buffer. 393 + * 394 + * This function implements video buffers memory mapping and is intended to be 395 + * used by the device mmap handler. 396 + */ 397 + int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 398 + { 399 + struct uvc_buffer *uninitialized_var(buffer); 400 + struct page *page; 401 + unsigned long addr, start, size; 402 + unsigned int i; 403 + int ret = 0; 404 + 405 + start = vma->vm_start; 406 + size = vma->vm_end - vma->vm_start; 407 + 408 + mutex_lock(&queue->mutex); 409 + 410 + for (i = 0; i < queue->count; ++i) { 411 + buffer = &queue->buffer[i]; 412 + if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) 413 + break; 414 + } 415 + 416 + if (i == queue->count || size != queue->buf_size) { 417 + ret = -EINVAL; 418 + goto done; 419 + } 420 + 421 + /* 422 + * VM_IO marks the area as being an mmaped region for I/O to a 423 + * device. It also prevents the region from being core dumped. 424 + */ 425 + vma->vm_flags |= VM_IO; 426 + 427 + addr = (unsigned long)queue->mem + buffer->buf.m.offset; 428 + while (size > 0) { 429 + page = vmalloc_to_page((void *)addr); 430 + if ((ret = vm_insert_page(vma, start, page)) < 0) 431 + goto done; 432 + 433 + start += PAGE_SIZE; 434 + addr += PAGE_SIZE; 435 + size -= PAGE_SIZE; 436 + } 437 + 438 + vma->vm_ops = &uvc_vm_ops; 439 + vma->vm_private_data = buffer; 440 + uvc_vm_open(vma); 376 441 377 442 done: 378 443 mutex_unlock(&queue->mutex);
+58 -127
drivers/media/video/uvc/uvc_v4l2.c
··· 101 101 */ 102 102 103 103 /* 104 - * Mapping V4L2 controls to UVC controls can be straighforward if done well. 105 - * Most of the UVC controls exist in V4L2, and can be mapped directly. Some 106 - * must be grouped (for instance the Red Balance, Blue Balance and Do White 107 - * Balance V4L2 controls use the White Balance Component UVC control) or 108 - * otherwise translated. The approach we take here is to use a translation 109 - * table for the controls that can be mapped directly, and handle the others 110 - * manually. 111 - */ 112 - static int uvc_v4l2_query_menu(struct uvc_video_chain *chain, 113 - struct v4l2_querymenu *query_menu) 114 - { 115 - struct uvc_menu_info *menu_info; 116 - struct uvc_control_mapping *mapping; 117 - struct uvc_control *ctrl; 118 - u32 index = query_menu->index; 119 - u32 id = query_menu->id; 120 - 121 - ctrl = uvc_find_control(chain, query_menu->id, &mapping); 122 - if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) 123 - return -EINVAL; 124 - 125 - if (query_menu->index >= mapping->menu_count) 126 - return -EINVAL; 127 - 128 - memset(query_menu, 0, sizeof(*query_menu)); 129 - query_menu->id = id; 130 - query_menu->index = index; 131 - 132 - menu_info = &mapping->menu_info[query_menu->index]; 133 - strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name); 134 - return 0; 135 - } 136 - 137 - /* 138 104 * Find the frame interval closest to the requested frame interval for the 139 105 * given frame format and size. This should be done by the device as part of 140 106 * the Video Probe and Commit negotiation, but some hardware don't implement ··· 226 260 * developers test their webcams with the Linux driver as well as with 227 261 * the Windows driver). 228 262 */ 263 + mutex_lock(&stream->mutex); 229 264 if (stream->dev->quirks & UVC_QUIRK_PROBE_EXTRAFIELDS) 230 265 probe->dwMaxVideoFrameSize = 231 266 stream->ctrl.dwMaxVideoFrameSize; 232 267 233 268 /* Probe the device. */ 234 269 ret = uvc_probe_video(stream, probe); 270 + mutex_unlock(&stream->mutex); 235 271 if (ret < 0) 236 272 goto done; 237 273 ··· 257 289 static int uvc_v4l2_get_format(struct uvc_streaming *stream, 258 290 struct v4l2_format *fmt) 259 291 { 260 - struct uvc_format *format = stream->cur_format; 261 - struct uvc_frame *frame = stream->cur_frame; 292 + struct uvc_format *format; 293 + struct uvc_frame *frame; 294 + int ret = 0; 262 295 263 296 if (fmt->type != stream->type) 264 297 return -EINVAL; 265 298 266 - if (format == NULL || frame == NULL) 267 - return -EINVAL; 299 + mutex_lock(&stream->mutex); 300 + format = stream->cur_format; 301 + frame = stream->cur_frame; 302 + 303 + if (format == NULL || frame == NULL) { 304 + ret = -EINVAL; 305 + goto done; 306 + } 268 307 269 308 fmt->fmt.pix.pixelformat = format->fcc; 270 309 fmt->fmt.pix.width = frame->wWidth; ··· 282 307 fmt->fmt.pix.colorspace = format->colorspace; 283 308 fmt->fmt.pix.priv = 0; 284 309 285 - return 0; 310 + done: 311 + mutex_unlock(&stream->mutex); 312 + return ret; 286 313 } 287 314 288 315 static int uvc_v4l2_set_format(struct uvc_streaming *stream, ··· 298 321 if (fmt->type != stream->type) 299 322 return -EINVAL; 300 323 301 - if (uvc_queue_allocated(&stream->queue)) 302 - return -EBUSY; 303 - 304 324 ret = uvc_v4l2_try_format(stream, fmt, &probe, &format, &frame); 305 325 if (ret < 0) 306 326 return ret; 327 + 328 + mutex_lock(&stream->mutex); 329 + 330 + if (uvc_queue_allocated(&stream->queue)) { 331 + ret = -EBUSY; 332 + goto done; 333 + } 307 334 308 335 memcpy(&stream->ctrl, &probe, sizeof probe); 309 336 stream->cur_format = format; 310 337 stream->cur_frame = frame; 311 338 312 - return 0; 339 + done: 340 + mutex_unlock(&stream->mutex); 341 + return ret; 313 342 } 314 343 315 344 static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream, ··· 326 343 if (parm->type != stream->type) 327 344 return -EINVAL; 328 345 346 + mutex_lock(&stream->mutex); 329 347 numerator = stream->ctrl.dwFrameInterval; 348 + mutex_unlock(&stream->mutex); 349 + 330 350 denominator = 10000000; 331 351 uvc_simplify_fraction(&numerator, &denominator, 8, 333); 332 352 ··· 356 370 static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream, 357 371 struct v4l2_streamparm *parm) 358 372 { 359 - struct uvc_frame *frame = stream->cur_frame; 360 373 struct uvc_streaming_control probe; 361 374 struct v4l2_fract timeperframe; 362 375 uint32_t interval; ··· 364 379 if (parm->type != stream->type) 365 380 return -EINVAL; 366 381 367 - if (uvc_queue_streaming(&stream->queue)) 368 - return -EBUSY; 369 - 370 382 if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 371 383 timeperframe = parm->parm.capture.timeperframe; 372 384 else 373 385 timeperframe = parm->parm.output.timeperframe; 374 386 375 - memcpy(&probe, &stream->ctrl, sizeof probe); 376 387 interval = uvc_fraction_to_interval(timeperframe.numerator, 377 388 timeperframe.denominator); 378 - 379 389 uvc_trace(UVC_TRACE_FORMAT, "Setting frame interval to %u/%u (%u).\n", 380 390 timeperframe.numerator, timeperframe.denominator, interval); 381 - probe.dwFrameInterval = uvc_try_frame_interval(frame, interval); 391 + 392 + mutex_lock(&stream->mutex); 393 + 394 + if (uvc_queue_streaming(&stream->queue)) { 395 + mutex_unlock(&stream->mutex); 396 + return -EBUSY; 397 + } 398 + 399 + memcpy(&probe, &stream->ctrl, sizeof probe); 400 + probe.dwFrameInterval = 401 + uvc_try_frame_interval(stream->cur_frame, interval); 382 402 383 403 /* Probe the device with the new settings. */ 384 404 ret = uvc_probe_video(stream, &probe); 385 - if (ret < 0) 405 + if (ret < 0) { 406 + mutex_unlock(&stream->mutex); 386 407 return ret; 408 + } 387 409 388 410 memcpy(&stream->ctrl, &probe, sizeof probe); 411 + mutex_unlock(&stream->mutex); 389 412 390 413 /* Return the actual frame period. */ 391 414 timeperframe.numerator = probe.dwFrameInterval; ··· 521 528 if (uvc_has_privileges(handle)) { 522 529 uvc_video_enable(stream, 0); 523 530 524 - mutex_lock(&stream->queue.mutex); 525 531 if (uvc_free_buffers(&stream->queue) < 0) 526 532 uvc_printk(KERN_ERR, "uvc_v4l2_release: Unable to " 527 533 "free buffers.\n"); 528 - mutex_unlock(&stream->queue.mutex); 529 534 } 530 535 531 536 /* Release the file handle. */ ··· 615 624 } 616 625 617 626 case VIDIOC_QUERYMENU: 618 - return uvc_v4l2_query_menu(chain, arg); 627 + return uvc_query_v4l2_menu(chain, arg); 619 628 620 629 case VIDIOC_G_EXT_CTRLS: 621 630 { ··· 896 905 case VIDIOC_CROPCAP: 897 906 { 898 907 struct v4l2_cropcap *ccap = arg; 899 - struct uvc_frame *frame = stream->cur_frame; 900 908 901 909 if (ccap->type != stream->type) 902 910 return -EINVAL; 903 911 904 912 ccap->bounds.left = 0; 905 913 ccap->bounds.top = 0; 906 - ccap->bounds.width = frame->wWidth; 907 - ccap->bounds.height = frame->wHeight; 914 + 915 + mutex_lock(&stream->mutex); 916 + ccap->bounds.width = stream->cur_frame->wWidth; 917 + ccap->bounds.height = stream->cur_frame->wHeight; 918 + mutex_unlock(&stream->mutex); 908 919 909 920 ccap->defrect = ccap->bounds; 910 921 ··· 923 930 case VIDIOC_REQBUFS: 924 931 { 925 932 struct v4l2_requestbuffers *rb = arg; 926 - unsigned int bufsize = 927 - stream->ctrl.dwMaxVideoFrameSize; 928 933 929 934 if (rb->type != stream->type || 930 935 rb->memory != V4L2_MEMORY_MMAP) ··· 931 940 if ((ret = uvc_acquire_privileges(handle)) < 0) 932 941 return ret; 933 942 934 - ret = uvc_alloc_buffers(&stream->queue, rb->count, bufsize); 943 + mutex_lock(&stream->mutex); 944 + ret = uvc_alloc_buffers(&stream->queue, rb->count, 945 + stream->ctrl.dwMaxVideoFrameSize); 946 + mutex_unlock(&stream->mutex); 935 947 if (ret < 0) 936 948 return ret; 937 949 ··· 982 988 if (!uvc_has_privileges(handle)) 983 989 return -EBUSY; 984 990 991 + mutex_lock(&stream->mutex); 985 992 ret = uvc_video_enable(stream, 1); 993 + mutex_unlock(&stream->mutex); 986 994 if (ret < 0) 987 995 return ret; 988 996 break; ··· 1064 1068 return -EINVAL; 1065 1069 } 1066 1070 1067 - /* 1068 - * VMA operations. 1069 - */ 1070 - static void uvc_vm_open(struct vm_area_struct *vma) 1071 - { 1072 - struct uvc_buffer *buffer = vma->vm_private_data; 1073 - buffer->vma_use_count++; 1074 - } 1075 - 1076 - static void uvc_vm_close(struct vm_area_struct *vma) 1077 - { 1078 - struct uvc_buffer *buffer = vma->vm_private_data; 1079 - buffer->vma_use_count--; 1080 - } 1081 - 1082 - static const struct vm_operations_struct uvc_vm_ops = { 1083 - .open = uvc_vm_open, 1084 - .close = uvc_vm_close, 1085 - }; 1086 - 1087 1071 static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma) 1088 1072 { 1089 1073 struct uvc_fh *handle = file->private_data; 1090 1074 struct uvc_streaming *stream = handle->stream; 1091 - struct uvc_video_queue *queue = &stream->queue; 1092 - struct uvc_buffer *uninitialized_var(buffer); 1093 - struct page *page; 1094 - unsigned long addr, start, size; 1095 - unsigned int i; 1096 - int ret = 0; 1097 1075 1098 1076 uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_mmap\n"); 1099 1077 1100 - start = vma->vm_start; 1101 - size = vma->vm_end - vma->vm_start; 1102 - 1103 - mutex_lock(&queue->mutex); 1104 - 1105 - for (i = 0; i < queue->count; ++i) { 1106 - buffer = &queue->buffer[i]; 1107 - if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) 1108 - break; 1109 - } 1110 - 1111 - if (i == queue->count || size != queue->buf_size) { 1112 - ret = -EINVAL; 1113 - goto done; 1114 - } 1115 - 1116 - /* 1117 - * VM_IO marks the area as being an mmaped region for I/O to a 1118 - * device. It also prevents the region from being core dumped. 1119 - */ 1120 - vma->vm_flags |= VM_IO; 1121 - 1122 - addr = (unsigned long)queue->mem + buffer->buf.m.offset; 1123 - while (size > 0) { 1124 - page = vmalloc_to_page((void *)addr); 1125 - if ((ret = vm_insert_page(vma, start, page)) < 0) 1126 - goto done; 1127 - 1128 - start += PAGE_SIZE; 1129 - addr += PAGE_SIZE; 1130 - size -= PAGE_SIZE; 1131 - } 1132 - 1133 - vma->vm_ops = &uvc_vm_ops; 1134 - vma->vm_private_data = buffer; 1135 - uvc_vm_open(vma); 1136 - 1137 - done: 1138 - mutex_unlock(&queue->mutex); 1139 - return ret; 1078 + return uvc_queue_mmap(&stream->queue, vma); 1140 1079 } 1141 1080 1142 1081 static unsigned int uvc_v4l2_poll(struct file *file, poll_table *wait) ··· 1088 1157 .owner = THIS_MODULE, 1089 1158 .open = uvc_v4l2_open, 1090 1159 .release = uvc_v4l2_release, 1091 - .ioctl = uvc_v4l2_ioctl, 1160 + .unlocked_ioctl = uvc_v4l2_ioctl, 1092 1161 .read = uvc_v4l2_read, 1093 1162 .mmap = uvc_v4l2_mmap, 1094 1163 .poll = uvc_v4l2_poll,
-3
drivers/media/video/uvc/uvc_video.c
··· 293 293 unsigned int i; 294 294 int ret; 295 295 296 - mutex_lock(&stream->mutex); 297 - 298 296 /* Perform probing. The device should adjust the requested values 299 297 * according to its capabilities. However, some devices, namely the 300 298 * first generation UVC Logitech webcams, don't implement the Video ··· 344 346 } 345 347 346 348 done: 347 - mutex_unlock(&stream->mutex); 348 349 return ret; 349 350 } 350 351
+7 -3
drivers/media/video/uvc/uvcvideo.h
··· 436 436 struct uvc_streaming_control ctrl; 437 437 struct uvc_format *cur_format; 438 438 struct uvc_frame *cur_frame; 439 - 439 + /* Protect access to ctrl, cur_format, cur_frame and hardware video 440 + * probe control. 441 + */ 440 442 struct mutex mutex; 441 443 442 444 unsigned int frozen : 1; ··· 576 574 extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect); 577 575 extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, 578 576 struct uvc_buffer *buf); 577 + extern int uvc_queue_mmap(struct uvc_video_queue *queue, 578 + struct vm_area_struct *vma); 579 579 extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue, 580 580 struct file *file, poll_table *wait); 581 581 extern int uvc_queue_allocated(struct uvc_video_queue *queue); ··· 610 606 extern int uvc_status_resume(struct uvc_device *dev); 611 607 612 608 /* Controls */ 613 - extern struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, 614 - __u32 v4l2_id, struct uvc_control_mapping **mapping); 615 609 extern int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, 616 610 struct v4l2_queryctrl *v4l2_ctrl); 611 + extern int uvc_query_v4l2_menu(struct uvc_video_chain *chain, 612 + struct v4l2_querymenu *query_menu); 617 613 618 614 extern int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, 619 615 const struct uvc_control_mapping *mapping);
+51 -18
drivers/media/video/v4l2-dev.c
··· 186 186 size_t sz, loff_t *off) 187 187 { 188 188 struct video_device *vdev = video_devdata(filp); 189 - int ret = -EIO; 189 + int ret = -ENODEV; 190 190 191 191 if (!vdev->fops->read) 192 192 return -EINVAL; 193 - if (vdev->lock) 194 - mutex_lock(vdev->lock); 193 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) 194 + return -ERESTARTSYS; 195 195 if (video_is_registered(vdev)) 196 196 ret = vdev->fops->read(filp, buf, sz, off); 197 197 if (vdev->lock) ··· 203 203 size_t sz, loff_t *off) 204 204 { 205 205 struct video_device *vdev = video_devdata(filp); 206 - int ret = -EIO; 206 + int ret = -ENODEV; 207 207 208 208 if (!vdev->fops->write) 209 209 return -EINVAL; 210 - if (vdev->lock) 211 - mutex_lock(vdev->lock); 210 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) 211 + return -ERESTARTSYS; 212 212 if (video_is_registered(vdev)) 213 213 ret = vdev->fops->write(filp, buf, sz, off); 214 214 if (vdev->lock) ··· 219 219 static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll) 220 220 { 221 221 struct video_device *vdev = video_devdata(filp); 222 - int ret = DEFAULT_POLLMASK; 222 + int ret = POLLERR | POLLHUP; 223 223 224 224 if (!vdev->fops->poll) 225 - return ret; 225 + return DEFAULT_POLLMASK; 226 226 if (vdev->lock) 227 227 mutex_lock(vdev->lock); 228 228 if (video_is_registered(vdev)) ··· 238 238 int ret = -ENODEV; 239 239 240 240 if (vdev->fops->unlocked_ioctl) { 241 - if (vdev->lock) 242 - mutex_lock(vdev->lock); 241 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) 242 + return -ERESTARTSYS; 243 243 if (video_is_registered(vdev)) 244 244 ret = vdev->fops->unlocked_ioctl(filp, cmd, arg); 245 245 if (vdev->lock) 246 246 mutex_unlock(vdev->lock); 247 247 } else if (vdev->fops->ioctl) { 248 - /* TODO: convert all drivers to unlocked_ioctl */ 248 + /* This code path is a replacement for the BKL. It is a major 249 + * hack but it will have to do for those drivers that are not 250 + * yet converted to use unlocked_ioctl. 251 + * 252 + * There are two options: if the driver implements struct 253 + * v4l2_device, then the lock defined there is used to 254 + * serialize the ioctls. Otherwise the v4l2 core lock defined 255 + * below is used. This lock is really bad since it serializes 256 + * completely independent devices. 257 + * 258 + * Both variants suffer from the same problem: if the driver 259 + * sleeps, then it blocks all ioctls since the lock is still 260 + * held. This is very common for VIDIOC_DQBUF since that 261 + * normally waits for a frame to arrive. As a result any other 262 + * ioctl calls will proceed very, very slowly since each call 263 + * will have to wait for the VIDIOC_QBUF to finish. Things that 264 + * should take 0.01s may now take 10-20 seconds. 265 + * 266 + * The workaround is to *not* take the lock for VIDIOC_DQBUF. 267 + * This actually works OK for videobuf-based drivers, since 268 + * videobuf will take its own internal lock. 269 + */ 249 270 static DEFINE_MUTEX(v4l2_ioctl_mutex); 271 + struct mutex *m = vdev->v4l2_dev ? 272 + &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex; 250 273 251 - mutex_lock(&v4l2_ioctl_mutex); 274 + if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m)) 275 + return -ERESTARTSYS; 252 276 if (video_is_registered(vdev)) 253 277 ret = vdev->fops->ioctl(filp, cmd, arg); 254 - mutex_unlock(&v4l2_ioctl_mutex); 278 + if (cmd != VIDIOC_DQBUF) 279 + mutex_unlock(m); 255 280 } else 256 281 ret = -ENOTTY; 257 282 ··· 290 265 291 266 if (!vdev->fops->mmap) 292 267 return ret; 293 - if (vdev->lock) 294 - mutex_lock(vdev->lock); 268 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) 269 + return -ERESTARTSYS; 295 270 if (video_is_registered(vdev)) 296 271 ret = vdev->fops->mmap(filp, vm); 297 272 if (vdev->lock) ··· 309 284 mutex_lock(&videodev_lock); 310 285 vdev = video_devdata(filp); 311 286 /* return ENODEV if the video device has already been removed. */ 312 - if (vdev == NULL) { 287 + if (vdev == NULL || !video_is_registered(vdev)) { 313 288 mutex_unlock(&videodev_lock); 314 289 return -ENODEV; 315 290 } ··· 317 292 video_get(vdev); 318 293 mutex_unlock(&videodev_lock); 319 294 if (vdev->fops->open) { 320 - if (vdev->lock) 321 - mutex_lock(vdev->lock); 295 + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) { 296 + ret = -ERESTARTSYS; 297 + goto err; 298 + } 322 299 if (video_is_registered(vdev)) 323 300 ret = vdev->fops->open(filp); 324 301 else ··· 329 302 mutex_unlock(vdev->lock); 330 303 } 331 304 305 + err: 332 306 /* decrease the refcount in case of an error */ 333 307 if (ret) 334 308 video_put(vdev); ··· 624 596 if (!vdev || !video_is_registered(vdev)) 625 597 return; 626 598 599 + mutex_lock(&videodev_lock); 600 + /* This must be in a critical section to prevent a race with v4l2_open. 601 + * Once this bit has been cleared video_get may never be called again. 602 + */ 627 603 clear_bit(V4L2_FL_REGISTERED, &vdev->flags); 604 + mutex_unlock(&videodev_lock); 628 605 device_unregister(&vdev->dev); 629 606 } 630 607 EXPORT_SYMBOL(video_unregister_device);
+1
drivers/media/video/v4l2-device.c
··· 35 35 36 36 INIT_LIST_HEAD(&v4l2_dev->subdevs); 37 37 spin_lock_init(&v4l2_dev->lock); 38 + mutex_init(&v4l2_dev->ioctl_lock); 38 39 v4l2_dev->dev = dev; 39 40 if (dev == NULL) { 40 41 /* If dev == NULL, then name must be filled in by the caller */
+1 -1
drivers/media/video/w9966.c
··· 815 815 816 816 static const struct v4l2_file_operations w9966_fops = { 817 817 .owner = THIS_MODULE, 818 - .ioctl = video_ioctl2, 818 + .unlocked_ioctl = video_ioctl2, 819 819 .read = w9966_v4l_read, 820 820 }; 821 821
+1 -1
drivers/mtd/maps/pxa2xx-flash.c
··· 51 51 static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 52 52 53 53 54 - static int __init pxa2xx_flash_probe(struct platform_device *pdev) 54 + static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) 55 55 { 56 56 struct flash_platform_data *flash = pdev->dev.platform_data; 57 57 struct pxa2xx_flash_info *info;
-1
drivers/mtd/nand/omap2.c
··· 7 7 * it under the terms of the GNU General Public License version 2 as 8 8 * published by the Free Software Foundation. 9 9 */ 10 - #define CONFIG_MTD_NAND_OMAP_HWECC 11 10 12 11 #include <linux/platform_device.h> 13 12 #include <linux/dma-mapping.h>
+5 -6
drivers/net/b44.c
··· 381 381 __b44_set_flow_ctrl(bp, pause_enab); 382 382 } 383 383 384 - #ifdef SSB_DRIVER_MIPS 385 - extern char *nvram_get(char *name); 384 + #ifdef CONFIG_BCM47XX 385 + #include <asm/mach-bcm47xx/nvram.h> 386 386 static void b44_wap54g10_workaround(struct b44 *bp) 387 387 { 388 - const char *str; 388 + char buf[20]; 389 389 u32 val; 390 390 int err; 391 391 ··· 394 394 * see https://dev.openwrt.org/ticket/146 395 395 * check and reset bit "isolate" 396 396 */ 397 - str = nvram_get("boardnum"); 398 - if (!str) 397 + if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0) 399 398 return; 400 - if (simple_strtoul(str, NULL, 0) == 2) { 399 + if (simple_strtoul(buf, NULL, 0) == 2) { 401 400 err = __b44_readphy(bp, 0, MII_BMCR, &val); 402 401 if (err) 403 402 goto error;
+1 -1
drivers/net/benet/be_cmds.c
··· 1235 1235 1236 1236 i = 0; 1237 1237 netdev_for_each_mc_addr(ha, netdev) 1238 - memcpy(req->mac[i].byte, ha->addr, ETH_ALEN); 1238 + memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN); 1239 1239 } else { 1240 1240 req->promiscuous = 1; 1241 1241 }
+2 -2
drivers/net/bnx2x/bnx2x.h
··· 20 20 * (you will need to reboot afterwards) */ 21 21 /* #define BNX2X_STOP_ON_ERROR */ 22 22 23 - #define DRV_MODULE_VERSION "1.60.00-4" 24 - #define DRV_MODULE_RELDATE "2010/11/01" 23 + #define DRV_MODULE_VERSION "1.60.01-0" 24 + #define DRV_MODULE_RELDATE "2010/11/12" 25 25 #define BNX2X_BC_VER 0x040200 26 26 27 27 #define BNX2X_MULTI_QUEUE
+26 -16
drivers/net/bnx2x/bnx2x_cmn.c
··· 1782 1782 } 1783 1783 #endif 1784 1784 1785 - static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, 1786 - struct eth_tx_parse_bd_e2 *pbd, 1787 - u32 xmit_type) 1785 + static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, 1786 + u32 xmit_type) 1788 1787 { 1789 - pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) << 1790 - ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT; 1788 + *parsing_data |= (skb_shinfo(skb)->gso_size << 1789 + ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 1790 + ETH_TX_PARSE_BD_E2_LSO_MSS; 1791 1791 if ((xmit_type & XMIT_GSO_V6) && 1792 1792 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 1793 - pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 1793 + *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 1794 1794 } 1795 1795 1796 1796 /** ··· 1835 1835 * @return header len 1836 1836 */ 1837 1837 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 1838 - struct eth_tx_parse_bd_e2 *pbd, 1839 - u32 xmit_type) 1838 + u32 *parsing_data, u32 xmit_type) 1840 1839 { 1841 - pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) << 1842 - ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT; 1840 + *parsing_data |= ((tcp_hdrlen(skb)/4) << 1841 + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 1842 + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; 1843 1843 1844 - pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) - 1845 - skb->data) / 2) << 1846 - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT; 1844 + *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) << 1845 + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & 1846 + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; 1847 1847 1848 1848 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; 1849 1849 } ··· 1912 1912 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 1913 1913 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 1914 1914 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 1915 + u32 pbd_e2_parsing_data = 0; 1915 1916 u16 pkt_prod, bd_prod; 1916 1917 int nbd, fp_index; 1917 1918 dma_addr_t mapping; ··· 2034 2033 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 2035 2034 /* Set PBD in checksum offload case */ 2036 2035 if (xmit_type & XMIT_CSUM) 2037 - hlen = bnx2x_set_pbd_csum_e2(bp, 2038 - skb, pbd_e2, xmit_type); 2036 + hlen = bnx2x_set_pbd_csum_e2(bp, skb, 2037 + &pbd_e2_parsing_data, 2038 + xmit_type); 2039 2039 } else { 2040 2040 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; 2041 2041 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); ··· 2078 2076 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2079 2077 hlen, bd_prod, ++nbd); 2080 2078 if (CHIP_IS_E2(bp)) 2081 - bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type); 2079 + bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, 2080 + xmit_type); 2082 2081 else 2083 2082 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); 2084 2083 } 2084 + 2085 + /* Set the PBD's parsing_data field if not zero 2086 + * (for the chips newer than 57711). 2087 + */ 2088 + if (pbd_e2_parsing_data) 2089 + pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); 2090 + 2085 2091 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2086 2092 2087 2093 /* Handle fragmented skb */
+2 -2
drivers/net/bnx2x/bnx2x_init_ops.h
··· 838 838 /**************************************************************************** 839 839 * SRC initializations 840 840 ****************************************************************************/ 841 - 841 + #ifdef BCM_CNIC 842 842 /* called during init func stage */ 843 843 static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, 844 844 dma_addr_t t2_mapping, int src_cid_count) ··· 862 862 U64_HI((u64)t2_mapping + 863 863 (src_cid_count-1) * sizeof(struct src_ent))); 864 864 } 865 - 865 + #endif 866 866 #endif /* BNX2X_INIT_OPS_H */
+6 -13
drivers/net/bonding/bond_main.c
··· 171 171 /*----------------------------- Global variables ----------------------------*/ 172 172 173 173 #ifdef CONFIG_NET_POLL_CONTROLLER 174 - cpumask_var_t netpoll_block_tx; 174 + atomic_t netpoll_block_tx = ATOMIC_INIT(0); 175 175 #endif 176 176 177 177 static const char * const version = ··· 1576 1576 1577 1577 /* If this is the first slave, then we need to set the master's hardware 1578 1578 * address to be the same as the slave's. */ 1579 - if (bond->slave_cnt == 0) 1579 + if (is_zero_ether_addr(bond->dev->dev_addr)) 1580 1580 memcpy(bond->dev->dev_addr, slave_dev->dev_addr, 1581 1581 slave_dev->addr_len); 1582 1582 ··· 5299 5299 if (res) 5300 5300 goto out; 5301 5301 5302 - #ifdef CONFIG_NET_POLL_CONTROLLER 5303 - if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) { 5304 - res = -ENOMEM; 5305 - goto out; 5306 - } 5307 - #endif 5308 - 5309 5302 res = register_pernet_subsys(&bond_net_ops); 5310 5303 if (res) 5311 5304 goto out; ··· 5327 5334 rtnl_link_unregister(&bond_link_ops); 5328 5335 err_link: 5329 5336 unregister_pernet_subsys(&bond_net_ops); 5330 - #ifdef CONFIG_NET_POLL_CONTROLLER 5331 - free_cpumask_var(netpoll_block_tx); 5332 - #endif 5333 5337 goto out; 5334 5338 5335 5339 } ··· 5343 5353 unregister_pernet_subsys(&bond_net_ops); 5344 5354 5345 5355 #ifdef CONFIG_NET_POLL_CONTROLLER 5346 - free_cpumask_var(netpoll_block_tx); 5356 + /* 5357 + * Make sure we don't have an imbalance on our netpoll blocking 5358 + */ 5359 + WARN_ON(atomic_read(&netpoll_block_tx)); 5347 5360 #endif 5348 5361 } 5349 5362
+4 -8
drivers/net/bonding/bonding.h
··· 119 119 120 120 121 121 #ifdef CONFIG_NET_POLL_CONTROLLER 122 - extern cpumask_var_t netpoll_block_tx; 122 + extern atomic_t netpoll_block_tx; 123 123 124 124 static inline void block_netpoll_tx(void) 125 125 { 126 - preempt_disable(); 127 - BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(), 128 - netpoll_block_tx)); 126 + atomic_inc(&netpoll_block_tx); 129 127 } 130 128 131 129 static inline void unblock_netpoll_tx(void) 132 130 { 133 - BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(), 134 - netpoll_block_tx)); 135 - preempt_enable(); 131 + atomic_dec(&netpoll_block_tx); 136 132 } 137 133 138 134 static inline int is_netpoll_tx_blocked(struct net_device *dev) 139 135 { 140 136 if (unlikely(dev->priv_flags & IFF_IN_NETPOLL)) 141 - return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx); 137 + return atomic_read(&netpoll_block_tx); 142 138 return 0; 143 139 } 144 140 #else
+1 -1
drivers/net/caif/caif_shm_u5500.c
··· 5 5 * License terms: GNU General Public License (GPL) version 2 6 6 */ 7 7 8 - #define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt 8 + #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt 9 9 10 10 #include <linux/version.h> 11 11 #include <linux/init.h>
+1 -1
drivers/net/caif/caif_shmcore.c
··· 6 6 * License terms: GNU General Public License (GPL) version 2 7 7 */ 8 8 9 - #define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt 9 + #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt 10 10 11 11 #include <linux/spinlock.h> 12 12 #include <linux/sched.h>
+1 -1
drivers/net/cxgb4/t4_hw.c
··· 2408 2408 if (index < NEXACT_MAC) 2409 2409 ret++; 2410 2410 else if (hash) 2411 - *hash |= (1 << hash_mac_addr(addr[i])); 2411 + *hash |= (1ULL << hash_mac_addr(addr[i])); 2412 2412 } 2413 2413 return ret; 2414 2414 }
+11 -4
drivers/net/cxgb4vf/cxgb4vf_main.c
··· 2269 2269 { 2270 2270 struct sge *s = &adapter->sge; 2271 2271 int q10g, n10g, qidx, pidx, qs; 2272 + size_t iqe_size; 2272 2273 2273 2274 /* 2274 2275 * We should not be called till we know how many Queue Sets we can ··· 2314 2313 s->ethqsets = qidx; 2315 2314 2316 2315 /* 2316 + * The Ingress Queue Entry Size for our various Response Queues needs 2317 + * to be big enough to accommodate the largest message we can receive 2318 + * from the chip/firmware; which is 64 bytes ... 2319 + */ 2320 + iqe_size = 64; 2321 + 2322 + /* 2317 2323 * Set up default Queue Set parameters ... Start off with the 2318 2324 * shortest interrupt holdoff timer. 2319 2325 */ ··· 2328 2320 struct sge_eth_rxq *rxq = &s->ethrxq[qs]; 2329 2321 struct sge_eth_txq *txq = &s->ethtxq[qs]; 2330 2322 2331 - init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES); 2323 + init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); 2332 2324 rxq->fl.size = 72; 2333 2325 txq->q.size = 1024; 2334 2326 } ··· 2337 2329 * The firmware event queue is used for link state changes and 2338 2330 * notifications of TX DMA completions. 2339 2331 */ 2340 - init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, 2341 - L1_CACHE_BYTES); 2332 + init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); 2342 2333 2343 2334 /* 2344 2335 * The forwarded interrupt queue is used when we're in MSI interrupt ··· 2353 2346 * any time ... 2354 2347 */ 2355 2348 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, 2356 - L1_CACHE_BYTES); 2349 + iqe_size); 2357 2350 } 2358 2351 2359 2352 /*
+9
drivers/net/ehea/ehea_ethtool.c
··· 261 261 262 262 } 263 263 264 + static int ehea_set_flags(struct net_device *dev, u32 data) 265 + { 266 + return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO 267 + | ETH_FLAG_TXVLAN 268 + | ETH_FLAG_RXVLAN); 269 + } 270 + 264 271 const struct ethtool_ops ehea_ethtool_ops = { 265 272 .get_settings = ehea_get_settings, 266 273 .get_drvinfo = ehea_get_drvinfo, ··· 280 273 .get_ethtool_stats = ehea_get_ethtool_stats, 281 274 .get_rx_csum = ehea_get_rx_csum, 282 275 .set_settings = ehea_set_settings, 276 + .get_flags = ethtool_op_get_flags, 277 + .set_flags = ehea_set_flags, 283 278 .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ 284 279 }; 285 280
+5 -2
drivers/net/ehea/ehea_main.c
··· 683 683 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) && 684 684 pr->port->vgrp); 685 685 686 - if (use_lro) { 686 + if (skb->dev->features & NETIF_F_LRO) { 687 687 if (vlan_extracted) 688 688 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb, 689 689 pr->port->vgrp, ··· 787 787 } 788 788 cqe = ehea_poll_rq1(qp, &wqe_index); 789 789 } 790 - if (use_lro) 790 + if (dev->features & NETIF_F_LRO) 791 791 lro_flush_all(&pr->lro_mgr); 792 792 793 793 pr->rx_packets += processed; ··· 3277 3277 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3278 3278 | NETIF_F_LLTX; 3279 3279 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3280 + 3281 + if (use_lro) 3282 + dev->features |= NETIF_F_LRO; 3280 3283 3281 3284 INIT_WORK(&port->reset_task, ehea_reset_port); 3282 3285
+2 -1
drivers/net/enic/enic_main.c
··· 1962 1962 case VNIC_DEV_INTR_MODE_MSIX: 1963 1963 for (i = 0; i < enic->rq_count; i++) { 1964 1964 intr = enic_msix_rq_intr(enic, i); 1965 - enic_isr_msix_rq(enic->msix_entry[intr].vector, enic); 1965 + enic_isr_msix_rq(enic->msix_entry[intr].vector, 1966 + &enic->napi[i]); 1966 1967 } 1967 1968 intr = enic_msix_wq_intr(enic, i); 1968 1969 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+2
drivers/net/ifb.c
··· 104 104 rcu_read_unlock(); 105 105 dev_kfree_skb(skb); 106 106 stats->tx_dropped++; 107 + if (skb_queue_len(&dp->tq) != 0) 108 + goto resched; 107 109 break; 108 110 } 109 111 rcu_read_unlock();
+3
drivers/net/ixgbe/ixgbe_main.c
··· 4771 4771 adapter->rx_ring[i] = NULL; 4772 4772 } 4773 4773 4774 + adapter->num_tx_queues = 0; 4775 + adapter->num_rx_queues = 0; 4776 + 4774 4777 ixgbe_free_q_vectors(adapter); 4775 4778 ixgbe_reset_interrupt_capability(adapter); 4776 4779 }
+1 -1
drivers/net/phy/Kconfig
··· 64 64 config ICPLUS_PHY 65 65 tristate "Drivers for ICPlus PHYs" 66 66 ---help--- 67 - Currently supports the IP175C PHY. 67 + Currently supports the IP175C and IP1001 PHYs. 68 68 69 69 config REALTEK_PHY 70 70 tristate "Drivers for Realtek PHYs"
+54 -5
drivers/net/phy/icplus.c
··· 30 30 #include <asm/irq.h> 31 31 #include <asm/uaccess.h> 32 32 33 - MODULE_DESCRIPTION("ICPlus IP175C PHY driver"); 33 + MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers"); 34 34 MODULE_AUTHOR("Michael Barkowski"); 35 35 MODULE_LICENSE("GPL"); 36 36 ··· 89 89 return 0; 90 90 } 91 91 92 + static int ip1001_config_init(struct phy_device *phydev) 93 + { 94 + int err, value; 95 + 96 + /* Software Reset PHY */ 97 + value = phy_read(phydev, MII_BMCR); 98 + value |= BMCR_RESET; 99 + err = phy_write(phydev, MII_BMCR, value); 100 + if (err < 0) 101 + return err; 102 + 103 + do { 104 + value = phy_read(phydev, MII_BMCR); 105 + } while (value & BMCR_RESET); 106 + 107 + /* Additional delay (2ns) used to adjust RX clock phase 108 + * at GMII/ RGMII interface */ 109 + value = phy_read(phydev, 16); 110 + value |= 0x3; 111 + 112 + err = phy_write(phydev, 16, value); 113 + if (err < 0) 114 + return err; 115 + 116 + return err; 117 + } 118 + 92 119 static int ip175c_read_status(struct phy_device *phydev) 93 120 { 94 121 if (phydev->addr == 4) /* WAN port */ ··· 148 121 .driver = { .owner = THIS_MODULE,}, 149 122 }; 150 123 151 - static int __init ip175c_init(void) 124 + static struct phy_driver ip1001_driver = { 125 + .phy_id = 0x02430d90, 126 + .name = "ICPlus IP1001", 127 + .phy_id_mask = 0x0ffffff0, 128 + .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | 129 + SUPPORTED_Asym_Pause, 130 + .config_init = &ip1001_config_init, 131 + .config_aneg = &genphy_config_aneg, 132 + .read_status = &genphy_read_status, 133 + .suspend = genphy_suspend, 134 + .resume = genphy_resume, 135 + .driver = { .owner = THIS_MODULE,}, 136 + }; 137 + 138 + static int __init icplus_init(void) 152 139 { 140 + int ret = 0; 141 + 142 + ret = phy_driver_register(&ip1001_driver); 143 + if (ret < 0) 144 + return -ENODEV; 145 + 153 146 return phy_driver_register(&ip175c_driver); 154 147 } 155 148 156 - static void __exit ip175c_exit(void) 149 + static void __exit icplus_exit(void) 157 150 { 151 + phy_driver_unregister(&ip1001_driver); 158 152 phy_driver_unregister(&ip175c_driver); 159 153 } 160 154 161 - module_init(ip175c_init); 162 - module_exit(ip175c_exit); 155 + module_init(icplus_init); 156 + module_exit(icplus_exit); 163 157 164 158 static struct mdio_device_id __maybe_unused icplus_tbl[] = { 165 159 { 0x02430d80, 0x0ffffff0 }, 160 + { 0x02430d90, 0x0ffffff0 }, 166 161 { } 167 162 }; 168 163
+1 -1
drivers/net/pppoe.c
··· 948 948 949 949 abort: 950 950 kfree_skb(skb); 951 - return 0; 951 + return 1; 952 952 } 953 953 954 954 /************************************************************************
+1
drivers/net/qlge/qlge.h
··· 2083 2083 u32 mailbox_in; 2084 2084 u32 mailbox_out; 2085 2085 struct mbox_params idc_mbc; 2086 + struct mutex mpi_mutex; 2086 2087 2087 2088 int tx_ring_size; 2088 2089 int rx_ring_size;
+1
drivers/net/qlge/qlge_main.c
··· 4629 4629 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4630 4630 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); 4631 4631 init_completion(&qdev->ide_completion); 4632 + mutex_init(&qdev->mpi_mutex); 4632 4633 4633 4634 if (!cards_found) { 4634 4635 dev_info(&pdev->dev, "%s\n", DRV_STRING);
+4 -8
drivers/net/qlge/qlge_mpi.c
··· 534 534 int status; 535 535 unsigned long count; 536 536 537 + mutex_lock(&qdev->mpi_mutex); 537 538 538 539 /* Begin polled mode for MPI */ 539 540 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); ··· 604 603 end: 605 604 /* End polled mode for MPI */ 606 605 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 606 + mutex_unlock(&qdev->mpi_mutex); 607 607 return status; 608 608 } 609 609 ··· 1101 1099 static int ql_set_port_cfg(struct ql_adapter *qdev) 1102 1100 { 1103 1101 int status; 1104 - rtnl_lock(); 1105 1102 status = ql_mb_set_port_cfg(qdev); 1106 - rtnl_unlock(); 1107 1103 if (status) 1108 1104 return status; 1109 1105 status = ql_idc_wait(qdev); ··· 1122 1122 container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 1123 1123 int status; 1124 1124 1125 - rtnl_lock(); 1126 1125 status = ql_mb_get_port_cfg(qdev); 1127 - rtnl_unlock(); 1128 1126 if (status) { 1129 1127 netif_err(qdev, drv, qdev->ndev, 1130 1128 "Bug: Failed to get port config data.\n"); ··· 1165 1167 u32 aen; 1166 1168 int timeout; 1167 1169 1168 - rtnl_lock(); 1169 1170 aen = mbcp->mbox_out[1] >> 16; 1170 1171 timeout = (mbcp->mbox_out[1] >> 8) & 0xf; 1171 1172 ··· 1228 1231 } 1229 1232 break; 1230 1233 } 1231 - rtnl_unlock(); 1232 1234 } 1233 1235 1234 1236 void ql_mpi_work(struct work_struct *work) ··· 1238 1242 struct mbox_params *mbcp = &mbc; 1239 1243 int err = 0; 1240 1244 1241 - rtnl_lock(); 1245 + mutex_lock(&qdev->mpi_mutex); 1242 1246 /* Begin polled mode for MPI */ 1243 1247 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 1244 1248 ··· 1255 1259 1256 1260 /* End polled mode for MPI */ 1257 1261 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 1258 - rtnl_unlock(); 1262 + mutex_unlock(&qdev->mpi_mutex); 1259 1263 ql_enable_completion_interrupt(qdev, 0); 1260 1264 } 1261 1265
+16 -10
drivers/net/r8169.c
··· 744 744 mdio_write(ioaddr, MII_BMCR, val & 0xffff); 745 745 } 746 746 747 - static void rtl8169_check_link_status(struct net_device *dev, 747 + static void __rtl8169_check_link_status(struct net_device *dev, 748 748 struct rtl8169_private *tp, 749 - void __iomem *ioaddr) 749 + void __iomem *ioaddr, 750 + bool pm) 750 751 { 751 752 unsigned long flags; 752 753 753 754 spin_lock_irqsave(&tp->lock, flags); 754 755 if (tp->link_ok(ioaddr)) { 755 756 /* This is to cancel a scheduled suspend if there's one. */ 756 - pm_request_resume(&tp->pci_dev->dev); 757 + if (pm) 758 + pm_request_resume(&tp->pci_dev->dev); 757 759 netif_carrier_on(dev); 758 760 netif_info(tp, ifup, dev, "link up\n"); 759 761 } else { 760 762 netif_carrier_off(dev); 761 763 netif_info(tp, ifdown, dev, "link down\n"); 762 - pm_schedule_suspend(&tp->pci_dev->dev, 100); 764 + if (pm) 765 + pm_schedule_suspend(&tp->pci_dev->dev, 100); 763 766 } 764 767 spin_unlock_irqrestore(&tp->lock, flags); 768 + } 769 + 770 + static void rtl8169_check_link_status(struct net_device *dev, 771 + struct rtl8169_private *tp, 772 + void __iomem *ioaddr) 773 + { 774 + __rtl8169_check_link_status(dev, tp, ioaddr, false); 765 775 } 766 776 767 777 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) ··· 4610 4600 } 4611 4601 4612 4602 if (status & LinkChg) 4613 - rtl8169_check_link_status(dev, tp, ioaddr); 4603 + __rtl8169_check_link_status(dev, tp, ioaddr, true); 4614 4604 4615 4605 /* We need to see the lastest version of tp->intr_mask to 4616 4606 * avoid ignoring an MSI interrupt and having to wait for ··· 4900 4890 struct net_device *dev = pci_get_drvdata(pdev); 4901 4891 struct rtl8169_private *tp = netdev_priv(dev); 4902 4892 4903 - if (!tp->TxDescArray) 4904 - return 0; 4905 - 4906 - rtl8169_check_link_status(dev, tp, tp->mmio_addr); 4907 - return -EBUSY; 4893 + return tp->TxDescArray ? -EBUSY : 0; 4908 4894 } 4909 4895 4910 4896 static const struct dev_pm_ops rtl8169_pm_ops = {
+29 -14
drivers/net/sfc/efx.c
··· 197 197 198 198 static void efx_remove_channels(struct efx_nic *efx); 199 199 static void efx_remove_port(struct efx_nic *efx); 200 + static void efx_init_napi(struct efx_nic *efx); 200 201 static void efx_fini_napi(struct efx_nic *efx); 202 + static void efx_fini_napi_channel(struct efx_channel *channel); 201 203 static void efx_fini_struct(struct efx_nic *efx); 202 204 static void efx_start_all(struct efx_nic *efx); 203 205 static void efx_stop_all(struct efx_nic *efx); ··· 337 335 338 336 /* Disable interrupts and wait for ISRs to complete */ 339 337 efx_nic_disable_interrupts(efx); 340 - if (efx->legacy_irq) 338 + if (efx->legacy_irq) { 341 339 synchronize_irq(efx->legacy_irq); 340 + efx->legacy_irq_enabled = false; 341 + } 342 342 if (channel->irq) 343 343 synchronize_irq(channel->irq); 344 344 ··· 355 351 efx_channel_processed(channel); 356 352 357 353 napi_enable(&channel->napi_str); 354 + if (efx->legacy_irq) 355 + efx->legacy_irq_enabled = true; 358 356 efx_nic_enable_interrupts(efx); 359 357 } 360 358 ··· 432 426 433 427 *channel = *old_channel; 434 428 429 + channel->napi_dev = NULL; 435 430 memset(&channel->eventq, 0, sizeof(channel->eventq)); 436 431 437 432 rx_queue = &channel->rx_queue; ··· 743 736 if (rc) 744 737 goto rollback; 745 738 739 + efx_init_napi(efx); 740 + 746 741 /* Destroy old channels */ 747 - for (i = 0; i < efx->n_channels; i++) 742 + for (i = 0; i < efx->n_channels; i++) { 743 + efx_fini_napi_channel(other_channel[i]); 748 744 efx_remove_channel(other_channel[i]); 745 + } 749 746 out: 750 747 /* Free unused channel structures */ 751 748 for (i = 0; i < efx->n_channels; i++) ··· 1411 1400 efx_start_channel(channel); 1412 1401 } 1413 1402 1403 + if (efx->legacy_irq) 1404 + efx->legacy_irq_enabled = true; 1414 1405 efx_nic_enable_interrupts(efx); 1415 1406 1416 1407 /* Switch to event based MCDI completions after enabling interrupts. ··· 1473 1460 1474 1461 /* Disable interrupts and wait for ISR to complete */ 1475 1462 efx_nic_disable_interrupts(efx); 1476 - if (efx->legacy_irq) 1463 + if (efx->legacy_irq) { 1477 1464 synchronize_irq(efx->legacy_irq); 1465 + efx->legacy_irq_enabled = false; 1466 + } 1478 1467 efx_for_each_channel(channel, efx) { 1479 1468 if (channel->irq) 1480 1469 synchronize_irq(channel->irq); ··· 1608 1593 * 1609 1594 **************************************************************************/ 1610 1595 1611 - static int efx_init_napi(struct efx_nic *efx) 1596 + static void efx_init_napi(struct efx_nic *efx) 1612 1597 { 1613 1598 struct efx_channel *channel; 1614 1599 ··· 1617 1602 netif_napi_add(channel->napi_dev, &channel->napi_str, 1618 1603 efx_poll, napi_weight); 1619 1604 } 1620 - return 0; 1605 + } 1606 + 1607 + static void efx_fini_napi_channel(struct efx_channel *channel) 1608 + { 1609 + if (channel->napi_dev) 1610 + netif_napi_del(&channel->napi_str); 1611 + channel->napi_dev = NULL; 1621 1612 } 1622 1613 1623 1614 static void efx_fini_napi(struct efx_nic *efx) 1624 1615 { 1625 1616 struct efx_channel *channel; 1626 1617 1627 - efx_for_each_channel(channel, efx) { 1628 - if (channel->napi_dev) 1629 - netif_napi_del(&channel->napi_str); 1630 - channel->napi_dev = NULL; 1631 - } 1618 + efx_for_each_channel(channel, efx) 1619 + efx_fini_napi_channel(channel); 1632 1620 } 1633 1621 1634 1622 /************************************************************************** ··· 2353 2335 if (rc) 2354 2336 goto fail1; 2355 2337 2356 - rc = efx_init_napi(efx); 2357 - if (rc) 2358 - goto fail2; 2338 + efx_init_napi(efx); 2359 2339 2360 2340 rc = efx->type->init(efx); 2361 2341 if (rc) { ··· 2384 2368 efx->type->fini(efx); 2385 2369 fail3: 2386 2370 efx_fini_napi(efx); 2387 - fail2: 2388 2371 efx_remove_all(efx); 2389 2372 fail1: 2390 2373 return rc;
+2
drivers/net/sfc/net_driver.h
··· 621 621 * @pci_dev: The PCI device 622 622 * @type: Controller type attributes 623 623 * @legacy_irq: IRQ number 624 + * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)? 624 625 * @workqueue: Workqueue for port reconfigures and the HW monitor. 625 626 * Work items do not hold and must not acquire RTNL. 626 627 * @workqueue_name: Name of workqueue ··· 710 709 struct pci_dev *pci_dev; 711 710 const struct efx_nic_type *type; 712 711 int legacy_irq; 712 + bool legacy_irq_enabled; 713 713 struct workqueue_struct *workqueue; 714 714 char workqueue_name[16]; 715 715 struct work_struct reset_work;
+6
drivers/net/sfc/nic.c
··· 1418 1418 u32 queues; 1419 1419 int syserr; 1420 1420 1421 + /* Could this be ours? If interrupts are disabled then the 1422 + * channel state may not be valid. 1423 + */ 1424 + if (!efx->legacy_irq_enabled) 1425 + return result; 1426 + 1421 1427 /* Read the ISR which also ACKs the interrupts */ 1422 1428 efx_readd(efx, &reg, FR_BZ_INT_ISR0); 1423 1429 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+2 -2
drivers/net/stmmac/stmmac_main.c
··· 1509 1509 pr_warning("\tno valid MAC address;" 1510 1510 "please, use ifconfig or nwhwconfig!\n"); 1511 1511 1512 + spin_lock_init(&priv->lock); 1513 + 1512 1514 ret = register_netdev(dev); 1513 1515 if (ret) { 1514 1516 pr_err("%s: ERROR %i registering the device\n", ··· 1521 1519 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", 1522 1520 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", 1523 1521 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); 1524 - 1525 - spin_lock_init(&priv->lock); 1526 1522 1527 1523 return ret; 1528 1524 }
+3 -3
drivers/net/tulip/dmfe.c
··· 688 688 689 689 DMFE_DBUG(0, "dmfe_start_xmit", 0); 690 690 691 - /* Resource flag check */ 692 - netif_stop_queue(dev); 693 - 694 691 /* Too large packet check */ 695 692 if (skb->len > MAX_PACKET_SIZE) { 696 693 pr_err("big packet = %d\n", (u16)skb->len); 697 694 dev_kfree_skb(skb); 698 695 return NETDEV_TX_OK; 699 696 } 697 + 698 + /* Resource flag check */ 699 + netif_stop_queue(dev); 700 700 701 701 spin_lock_irqsave(&db->lock, flags); 702 702
-4
drivers/net/usb/hso.c
··· 958 958 /* Packet is complete. Inject into stack. */ 959 959 /* We have IP packet here */ 960 960 odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP); 961 - /* don't check it */ 962 - odev->skb_rx_buf->ip_summed = 963 - CHECKSUM_UNNECESSARY; 964 - 965 961 skb_reset_mac_header(odev->skb_rx_buf); 966 962 967 963 /* Ship it off to the kernel */
+4 -1
drivers/net/wan/hd64572.c
··· 293 293 struct net_device *dev = port->netdev; 294 294 card_t* card = port->card; 295 295 u8 stat; 296 + unsigned count = 0; 296 297 297 298 spin_lock(&port->lock); 298 299 ··· 317 316 dev->stats.tx_bytes += readw(&desc->len); 318 317 } 319 318 writeb(0, &desc->stat); /* Free descriptor */ 319 + count++; 320 320 port->txlast = (port->txlast + 1) % card->tx_ring_buffers; 321 321 } 322 322 323 - netif_wake_queue(dev); 323 + if (count) 324 + netif_wake_queue(dev); 324 325 spin_unlock(&port->lock); 325 326 } 326 327
+8 -5
drivers/net/wireless/ath/ath5k/base.c
··· 1917 1917 sc->bmisscount = 0; 1918 1918 } 1919 1919 1920 - if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) { 1920 + if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) || 1921 + sc->opmode == NL80211_IFTYPE_MESH_POINT) { 1921 1922 u64 tsf = ath5k_hw_get_tsf64(ah); 1922 1923 u32 tsftu = TSF_TO_TU(tsf); 1923 1924 int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval; ··· 1950 1949 /* NB: hw still stops DMA, so proceed */ 1951 1950 } 1952 1951 1953 - /* refresh the beacon for AP mode */ 1954 - if (sc->opmode == NL80211_IFTYPE_AP) 1952 + /* refresh the beacon for AP or MESH mode */ 1953 + if (sc->opmode == NL80211_IFTYPE_AP || 1954 + sc->opmode == NL80211_IFTYPE_MESH_POINT) 1955 1955 ath5k_beacon_update(sc->hw, vif); 1956 1956 1957 1957 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); ··· 2853 2851 2854 2852 /* Assign the vap/adhoc to a beacon xmit slot. */ 2855 2853 if ((avf->opmode == NL80211_IFTYPE_AP) || 2856 - (avf->opmode == NL80211_IFTYPE_ADHOC)) { 2854 + (avf->opmode == NL80211_IFTYPE_ADHOC) || 2855 + (avf->opmode == NL80211_IFTYPE_MESH_POINT)) { 2857 2856 int slot; 2858 2857 2859 2858 WARN_ON(list_empty(&sc->bcbuf)); ··· 2873 2870 sc->bslot[avf->bslot] = vif; 2874 2871 if (avf->opmode == NL80211_IFTYPE_AP) 2875 2872 sc->num_ap_vifs++; 2876 - else 2873 + else if (avf->opmode == NL80211_IFTYPE_ADHOC) 2877 2874 sc->num_adhoc_vifs++; 2878 2875 } 2879 2876
+38 -35
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
··· 55 55 #define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ 56 56 #define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ 57 57 58 + #define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6)) 59 + 58 60 static const struct ar9300_eeprom ar9300_default = { 59 61 .eepromVersion = 2, 60 62 .templateVersion = 2, ··· 292 290 } 293 291 }, 294 292 .ctlPowerData_2G = { 295 - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 296 - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 297 - { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } }, 293 + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 294 + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 295 + { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 298 296 299 - { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } }, 300 - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 301 - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 297 + { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 298 + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 299 + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 302 300 303 - { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } }, 304 - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 305 - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 301 + { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, 302 + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 303 + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 306 304 307 - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 308 - { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } }, 305 + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 306 + { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, 307 + { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, 309 308 }, 310 309 .modalHeader5G = { 311 310 /* 4 idle,t1,t2,b (4 bits per setting) */ ··· 571 568 .ctlPowerData_5G = { 572 569 { 573 570 { 574 - {60, 1}, {60, 1}, {60, 1}, {60, 1}, 575 - {60, 1}, {60, 1}, {60, 1}, {60, 0}, 571 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), 572 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), 576 573 } 577 574 }, 578 575 { 579 576 { 580 - {60, 1}, {60, 1}, {60, 1}, {60, 1}, 581 - {60, 1}, {60, 1}, {60, 1}, {60, 0}, 577 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), 578 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), 582 579 } 583 580 }, 584 581 { 585 582 { 586 - {60, 0}, {60, 1}, {60, 0}, {60, 1}, 587 - {60, 1}, {60, 1}, {60, 1}, {60, 1}, 583 + CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), 584 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), 588 585 } 589 586 }, 590 587 { 591 588 { 592 - {60, 0}, {60, 1}, {60, 1}, {60, 0}, 593 - {60, 1}, {60, 0}, {60, 0}, {60, 0}, 589 + CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), 590 + CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), 594 591 } 595 592 }, 596 593 { 597 594 { 598 - {60, 1}, {60, 1}, {60, 1}, {60, 0}, 599 - {60, 0}, {60, 0}, {60, 0}, {60, 0}, 595 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), 596 + CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), 600 597 } 601 598 }, 602 599 { 603 600 { 604 - {60, 1}, {60, 1}, {60, 1}, {60, 1}, 605 - {60, 1}, {60, 0}, {60, 0}, {60, 0}, 601 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), 602 + CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), 606 603 } 607 604 }, 608 605 { 609 606 { 610 - {60, 1}, {60, 1}, {60, 1}, {60, 1}, 611 - {60, 1}, {60, 1}, {60, 1}, {60, 1}, 607 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), 608 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), 612 609 } 613 610 }, 614 611 { 615 612 { 616 - {60, 1}, {60, 1}, {60, 0}, {60, 1}, 617 - {60, 1}, {60, 1}, {60, 1}, {60, 0}, 613 + CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), 614 + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), 618 615 } 619 616 }, 620 617 { 621 618 { 622 - {60, 1}, {60, 0}, {60, 1}, {60, 1}, 623 - {60, 1}, {60, 1}, {60, 0}, {60, 1}, 619 + CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), 620 + CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), 624 621 } 625 622 }, 626 623 } ··· 1830 1827 struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; 1831 1828 1832 1829 if (is2GHz) 1833 - return ctl_2g[idx].ctlEdges[edge].tPower; 1830 + return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]); 1834 1831 else 1835 - return ctl_5g[idx].ctlEdges[edge].tPower; 1832 + return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]); 1836 1833 } 1837 1834 1838 1835 static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, ··· 1850 1847 1851 1848 if (is2GHz) { 1852 1849 if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq && 1853 - ctl_2g[idx].ctlEdges[edge - 1].flag) 1854 - return ctl_2g[idx].ctlEdges[edge - 1].tPower; 1850 + CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1])) 1851 + return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]); 1855 1852 } else { 1856 1853 if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq && 1857 - ctl_5g[idx].ctlEdges[edge - 1].flag) 1858 - return ctl_5g[idx].ctlEdges[edge - 1].tPower; 1854 + CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1])) 1855 + return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]); 1859 1856 } 1860 1857 1861 1858 return AR9300_MAX_RATE_POWER;
+2 -7
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
··· 261 261 u8 tPow2x[14]; 262 262 } __packed; 263 263 264 - struct cal_ctl_edge_pwr { 265 - u8 tPower:6, 266 - flag:2; 267 - } __packed; 268 - 269 264 struct cal_ctl_data_2g { 270 - struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G]; 265 + u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G]; 271 266 } __packed; 272 267 273 268 struct cal_ctl_data_5g { 274 - struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G]; 269 + u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G]; 275 270 } __packed; 276 271 277 272 struct ar9300_eeprom {
+4 -2
drivers/net/wireless/ath/ath9k/ath9k.h
··· 21 21 #include <linux/device.h> 22 22 #include <linux/leds.h> 23 23 #include <linux/completion.h> 24 + #include <linux/pm_qos_params.h> 24 25 25 26 #include "debug.h" 26 27 #include "common.h" ··· 329 328 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 330 329 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 331 330 int ath_tx_setup(struct ath_softc *sc, int haltype); 332 - void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); 331 + bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); 333 332 void ath_draintxq(struct ath_softc *sc, 334 333 struct ath_txq *txq, bool retry_tx); 335 334 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); ··· 647 646 struct ath_descdma txsdma; 648 647 649 648 struct ath_ant_comb ant_comb; 649 + 650 + struct pm_qos_request_list pm_qos_req; 650 651 }; 651 652 652 653 struct ath_wiphy { ··· 678 675 } 679 676 680 677 extern struct ieee80211_ops ath9k_ops; 681 - extern struct pm_qos_request_list ath9k_pm_qos_req; 682 678 extern int modparam_nohwcrypt; 683 679 extern int led_blink; 684 680
+3 -3
drivers/net/wireless/ath/ath9k/eeprom.c
··· 240 240 for (i = 0; (i < num_band_edges) && 241 241 (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { 242 242 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) { 243 - twiceMaxEdgePower = pRdEdgesPower[i].tPower; 243 + twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl); 244 244 break; 245 245 } else if ((i > 0) && 246 246 (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, 247 247 is2GHz))) { 248 248 if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel, 249 249 is2GHz) < freq && 250 - pRdEdgesPower[i - 1].flag) { 250 + CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) { 251 251 twiceMaxEdgePower = 252 - pRdEdgesPower[i - 1].tPower; 252 + CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl); 253 253 } 254 254 break; 255 255 }
+14 -13
drivers/net/wireless/ath/ath9k/eeprom.h
··· 233 233 234 234 #define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1) 235 235 236 + #define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f) 237 + #define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03) 238 + 239 + #define LNA_CTL_BUF_MODE BIT(0) 240 + #define LNA_CTL_ISEL_LO BIT(1) 241 + #define LNA_CTL_ISEL_HI BIT(2) 242 + #define LNA_CTL_BUF_IN BIT(3) 243 + #define LNA_CTL_FEM_BAND BIT(4) 244 + #define LNA_CTL_LOCAL_BIAS BIT(5) 245 + #define LNA_CTL_FORCE_XPA BIT(6) 246 + #define LNA_CTL_USE_ANT1 BIT(7) 247 + 236 248 enum eeprom_param { 237 249 EEP_NFTHRESH_5, 238 250 EEP_NFTHRESH_2, ··· 390 378 u8 xatten2Margin[AR5416_MAX_CHAINS]; 391 379 u8 ob_ch1; 392 380 u8 db_ch1; 393 - u8 useAnt1:1, 394 - force_xpaon:1, 395 - local_bias:1, 396 - femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1; 381 + u8 lna_ctl; 397 382 u8 miscBits; 398 383 u16 xpaBiasLvlFreq[3]; 399 384 u8 futureModal[6]; ··· 544 535 u8 tPow2x[8]; 545 536 } __packed; 546 537 547 - 548 - #ifdef __BIG_ENDIAN_BITFIELD 549 538 struct cal_ctl_edges { 550 539 u8 bChannel; 551 - u8 flag:2, tPower:6; 540 + u8 ctl; 552 541 } __packed; 553 - #else 554 - struct cal_ctl_edges { 555 - u8 bChannel; 556 - u8 tPower:6, flag:2; 557 - } __packed; 558 - #endif 559 542 560 543 struct cal_data_op_loop_ar9287 { 561 544 u8 pwrPdg[2][5];
+14 -9
drivers/net/wireless/ath/ath9k/eeprom_def.c
··· 451 451 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2, 452 452 AR_AN_TOP2_LOCALBIAS, 453 453 AR_AN_TOP2_LOCALBIAS_S, 454 - pModal->local_bias); 454 + !!(pModal->lna_ctl & 455 + LNA_CTL_LOCAL_BIAS)); 455 456 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG, 456 - pModal->force_xpaon); 457 + !!(pModal->lna_ctl & LNA_CTL_FORCE_XPA)); 457 458 } 458 459 459 460 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, ··· 1063 1062 case 1: 1064 1063 break; 1065 1064 case 2: 1066 - scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 1065 + if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) 1066 + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 1067 + else 1068 + scaledPower = 0; 1067 1069 break; 1068 1070 case 3: 1069 - scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 1071 + if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) 1072 + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 1073 + else 1074 + scaledPower = 0; 1070 1075 break; 1071 1076 } 1072 - 1073 - scaledPower = max((u16)0, scaledPower); 1074 1077 1075 1078 if (IS_CHAN_2GHZ(chan)) { 1076 1079 numCtlModes = ARRAY_SIZE(ctlModesFor11g) - ··· 1433 1428 1434 1429 num_ant_config = 1; 1435 1430 1436 - if (pBase->version >= 0x0E0D) 1437 - if (pModal->useAnt1) 1438 - num_ant_config += 1; 1431 + if (pBase->version >= 0x0E0D && 1432 + (pModal->lna_ctl & LNA_CTL_USE_ANT1)) 1433 + num_ant_config += 1; 1439 1434 1440 1435 return num_ant_config; 1441 1436 }
+7
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 1024 1024 struct hif_device_usb *hif_dev = 1025 1025 (struct hif_device_usb *) usb_get_intfdata(interface); 1026 1026 1027 + /* 1028 + * The device has to be set to FULLSLEEP mode in case no 1029 + * interface is up. 1030 + */ 1031 + if (!(hif_dev->flags & HIF_USB_START)) 1032 + ath9k_htc_suspend(hif_dev->htc_handle); 1033 + 1027 1034 ath9k_hif_usb_dealloc_urbs(hif_dev); 1028 1035 1029 1036 return 0;
+3
drivers/net/wireless/ath/ath9k/htc.h
··· 455 455 void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv); 456 456 void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv); 457 457 void ath9k_ps_work(struct work_struct *work); 458 + bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, 459 + enum ath9k_power_mode mode); 458 460 459 461 void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); 460 462 void ath9k_init_leds(struct ath9k_htc_priv *priv); ··· 466 464 u16 devid, char *product); 467 465 void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug); 468 466 #ifdef CONFIG_PM 467 + void ath9k_htc_suspend(struct htc_target *htc_handle); 469 468 int ath9k_htc_resume(struct htc_target *htc_handle); 470 469 #endif 471 470 #ifdef CONFIG_ATH9K_HTC_DEBUGFS
+6
drivers/net/wireless/ath/ath9k/htc_drv_init.c
··· 891 891 } 892 892 893 893 #ifdef CONFIG_PM 894 + 895 + void ath9k_htc_suspend(struct htc_target *htc_handle) 896 + { 897 + ath9k_htc_setpower(htc_handle->drv_priv, ATH9K_PM_FULL_SLEEP); 898 + } 899 + 894 900 int ath9k_htc_resume(struct htc_target *htc_handle) 895 901 { 896 902 int ret;
+2 -2
drivers/net/wireless/ath/ath9k/htc_drv_main.c
··· 63 63 return mode; 64 64 } 65 65 66 - static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, 67 - enum ath9k_power_mode mode) 66 + bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, 67 + enum ath9k_power_mode mode) 68 68 { 69 69 bool ret; 70 70
+2 -1
drivers/net/wireless/ath/ath9k/hw.c
··· 2044 2044 val = REG_READ(ah, AR7010_GPIO_IN); 2045 2045 return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; 2046 2046 } else if (AR_SREV_9300_20_OR_LATER(ah)) 2047 - return MS_REG_READ(AR9300, gpio) != 0; 2047 + return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) & 2048 + AR_GPIO_BIT(gpio)) != 0; 2048 2049 else if (AR_SREV_9271(ah)) 2049 2050 return MS_REG_READ(AR9271, gpio) != 0; 2050 2051 else if (AR_SREV_9287_11_OR_LATER(ah))
+4 -5
drivers/net/wireless/ath/ath9k/init.c
··· 15 15 */ 16 16 17 17 #include <linux/slab.h> 18 - #include <linux/pm_qos_params.h> 19 18 20 19 #include "ath9k.h" 21 20 ··· 178 179 .read = ath9k_ioread32, 179 180 .write = ath9k_iowrite32, 180 181 }; 181 - 182 - struct pm_qos_request_list ath9k_pm_qos_req; 183 182 184 183 /**************************/ 185 184 /* Initialization */ ··· 661 664 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 662 665 663 666 hw->wiphy->interface_modes = 667 + BIT(NL80211_IFTYPE_P2P_GO) | 668 + BIT(NL80211_IFTYPE_P2P_CLIENT) | 664 669 BIT(NL80211_IFTYPE_AP) | 665 670 BIT(NL80211_IFTYPE_WDS) | 666 671 BIT(NL80211_IFTYPE_STATION) | ··· 758 759 ath_init_leds(sc); 759 760 ath_start_rfkill_poll(sc); 760 761 761 - pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 762 + pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 762 763 PM_QOS_DEFAULT_VALUE); 763 764 764 765 return 0; ··· 829 830 } 830 831 831 832 ieee80211_unregister_hw(hw); 832 - pm_qos_remove_request(&ath9k_pm_qos_req); 833 + pm_qos_remove_request(&sc->pm_qos_req); 833 834 ath_rx_cleanup(sc); 834 835 ath_tx_cleanup(sc); 835 836 ath9k_deinit_softc(sc);
+1 -2
drivers/net/wireless/ath/ath9k/mac.c
··· 703 703 rs->rs_phyerr = phyerr; 704 704 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 705 705 rs->rs_status |= ATH9K_RXERR_DECRYPT; 706 - else if ((ads.ds_rxstatus8 & AR_MichaelErr) && 707 - rs->rs_keyix != ATH9K_RXKEYIX_INVALID) 706 + else if (ads.ds_rxstatus8 & AR_MichaelErr) 708 707 rs->rs_status |= ATH9K_RXERR_MIC; 709 708 else if (ads.ds_rxstatus8 & AR_KeyMiss) 710 709 rs->rs_status |= ATH9K_RXERR_DECRYPT;
+15 -13
drivers/net/wireless/ath/ath9k/main.c
··· 15 15 */ 16 16 17 17 #include <linux/nl80211.h> 18 - #include <linux/pm_qos_params.h> 19 18 #include "ath9k.h" 20 19 #include "btcoex.h" 21 20 ··· 244 245 * the relevant bits of the h/w. 245 246 */ 246 247 ath9k_hw_set_interrupts(ah, 0); 247 - ath_drain_all_txq(sc, false); 248 + stopped = ath_drain_all_txq(sc, false); 248 249 249 250 spin_lock_bh(&sc->rx.pcu_lock); 250 251 251 - stopped = ath_stoprecv(sc); 252 + if (!ath_stoprecv(sc)) 253 + stopped = false; 252 254 253 255 /* XXX: do not flush receive queue here. We don't want 254 256 * to flush data frames already in queue because of ··· 1244 1244 ath9k_btcoex_timer_resume(sc); 1245 1245 } 1246 1246 1247 - pm_qos_update_request(&ath9k_pm_qos_req, 55); 1247 + pm_qos_update_request(&sc->pm_qos_req, 55); 1248 1248 1249 1249 mutex_unlock: 1250 1250 mutex_unlock(&sc->mutex); ··· 1423 1423 1424 1424 sc->sc_flags |= SC_OP_INVALID; 1425 1425 1426 - pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1426 + pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); 1427 1427 1428 1428 mutex_unlock(&sc->mutex); 1429 1429 ··· 1520 1520 struct ath_softc *sc = aphy->sc; 1521 1521 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1522 1522 struct ath_vif *avp = (void *)vif->drv_priv; 1523 - int i; 1524 1523 1525 1524 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1526 1525 ··· 1533 1534 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 1534 1535 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 1535 1536 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { 1537 + /* Disable SWBA interrupt */ 1538 + sc->sc_ah->imask &= ~ATH9K_INT_SWBA; 1536 1539 ath9k_ps_wakeup(sc); 1540 + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); 1537 1541 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1538 1542 ath9k_ps_restore(sc); 1543 + tasklet_kill(&sc->bcon_tasklet); 1539 1544 } 1540 1545 1541 1546 ath_beacon_return(sc, avp); 1542 1547 sc->sc_flags &= ~SC_OP_BEACONS; 1543 1548 1544 - for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 1545 - if (sc->beacon.bslot[i] == vif) { 1546 - printk(KERN_DEBUG "%s: vif had allocated beacon " 1547 - "slot\n", __func__); 1548 - sc->beacon.bslot[i] = NULL; 1549 - sc->beacon.bslot_aphy[i] = NULL; 1550 - } 1549 + if (sc->nbcnvifs) { 1550 + /* Re-enable SWBA interrupt */ 1551 + sc->sc_ah->imask |= ATH9K_INT_SWBA; 1552 + ath9k_ps_wakeup(sc); 1553 + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); 1554 + ath9k_ps_restore(sc); 1551 1555 } 1552 1556 1553 1557 sc->nvifs--;
+8 -1
drivers/net/wireless/ath/ath9k/recv.c
··· 838 838 struct ath_rx_status *rx_stats, 839 839 bool *decrypt_error) 840 840 { 841 + #define is_mc_or_valid_tkip_keyix ((is_mc || \ 842 + (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \ 843 + test_bit(rx_stats->rs_keyix, common->tkip_keymap)))) 844 + 841 845 struct ath_hw *ah = common->ah; 842 846 __le16 fc; 843 847 u8 rx_status_len = ah->caps.rx_status_len; ··· 883 879 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 884 880 *decrypt_error = true; 885 881 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 882 + bool is_mc; 886 883 /* 887 884 * The MIC error bit is only valid if the frame 888 885 * is not a control frame or fragment, and it was 889 886 * decrypted using a valid TKIP key. 890 887 */ 888 + is_mc = !!is_multicast_ether_addr(hdr->addr1); 889 + 891 890 if (!ieee80211_is_ctl(fc) && 892 891 !ieee80211_has_morefrags(fc) && 893 892 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 894 - test_bit(rx_stats->rs_keyix, common->tkip_keymap)) 893 + is_mc_or_valid_tkip_keyix) 895 894 rxs->flag |= RX_FLAG_MMIC_ERROR; 896 895 else 897 896 rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
+4 -2
drivers/net/wireless/ath/ath9k/reg.h
··· 984 984 #define AR9287_GPIO_IN_VAL_S 11 985 985 #define AR9271_GPIO_IN_VAL 0xFFFF0000 986 986 #define AR9271_GPIO_IN_VAL_S 16 987 - #define AR9300_GPIO_IN_VAL 0x0001FFFF 988 - #define AR9300_GPIO_IN_VAL_S 0 989 987 #define AR7010_GPIO_IN_VAL 0x0000FFFF 990 988 #define AR7010_GPIO_IN_VAL_S 0 989 + 990 + #define AR_GPIO_IN 0x404c 991 + #define AR9300_GPIO_IN_VAL 0x0001FFFF 992 + #define AR9300_GPIO_IN_VAL_S 0 991 993 992 994 #define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c) 993 995 #define AR_GPIO_OE_OUT_DRV 0x3
+6 -16
drivers/net/wireless/ath/ath9k/xmit.c
··· 1120 1120 } 1121 1121 } 1122 1122 1123 - void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1123 + bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1124 1124 { 1125 1125 struct ath_hw *ah = sc->sc_ah; 1126 1126 struct ath_common *common = ath9k_hw_common(sc->sc_ah); ··· 1128 1128 int i, npend = 0; 1129 1129 1130 1130 if (sc->sc_flags & SC_OP_INVALID) 1131 - return; 1131 + return true; 1132 1132 1133 1133 /* Stop beacon queue */ 1134 1134 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); ··· 1142 1142 } 1143 1143 } 1144 1144 1145 - if (npend) { 1146 - int r; 1147 - 1148 - ath_print(common, ATH_DBG_FATAL, 1149 - "Failed to stop TX DMA. Resetting hardware!\n"); 1150 - 1151 - spin_lock_bh(&sc->sc_resetlock); 1152 - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 1153 - if (r) 1154 - ath_print(common, ATH_DBG_FATAL, 1155 - "Unable to reset hardware; reset status %d\n", 1156 - r); 1157 - spin_unlock_bh(&sc->sc_resetlock); 1158 - } 1145 + if (npend) 1146 + ath_print(common, ATH_DBG_FATAL, "Failed to stop TX DMA!\n"); 1159 1147 1160 1148 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1161 1149 if (ATH_TXQ_SETUP(sc, i)) 1162 1150 ath_draintxq(sc, &sc->tx.txq[i], retry_tx); 1163 1151 } 1152 + 1153 + return !npend; 1164 1154 } 1165 1155 1166 1156 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
+2 -1
drivers/net/wireless/ath/carl9170/fw.c
··· 291 291 292 292 if (SUPP(CARL9170FW_WLANTX_CAB)) { 293 293 ar->hw->wiphy->interface_modes |= 294 - BIT(NL80211_IFTYPE_AP); 294 + BIT(NL80211_IFTYPE_AP) | 295 + BIT(NL80211_IFTYPE_P2P_GO); 295 296 } 296 297 } 297 298
+2 -1
drivers/net/wireless/ath/carl9170/main.c
··· 1631 1631 * supports these modes. The code which will add the 1632 1632 * additional interface_modes is in fw.c. 1633 1633 */ 1634 - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1634 + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 1635 + BIT(NL80211_IFTYPE_P2P_CLIENT); 1635 1636 1636 1637 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 1637 1638 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+1 -1
drivers/net/wireless/ath/carl9170/tx.c
··· 810 810 811 811 mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION | 812 812 AR9170_TX_MAC_BACKOFF); 813 - mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) && 813 + mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) & 814 814 AR9170_TX_MAC_QOS); 815 815 816 816 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
-1
drivers/net/wireless/libertas/if_sdio.c
··· 1170 1170 lbs_deb_sdio("call remove card\n"); 1171 1171 lbs_stop_card(card->priv); 1172 1172 lbs_remove_card(card->priv); 1173 - card->priv->surpriseremoved = 1; 1174 1173 1175 1174 flush_workqueue(card->workqueue); 1176 1175 destroy_workqueue(card->workqueue);
-1
drivers/net/wireless/libertas/if_spi.c
··· 1055 1055 lbs_stop_card(priv); 1056 1056 lbs_remove_card(priv); /* will call free_netdev */ 1057 1057 1058 - priv->surpriseremoved = 1; 1059 1058 free_irq(spi->irq, card); 1060 1059 if_spi_terminate_spi_thread(card); 1061 1060 if (card->pdata->teardown)
-2
drivers/net/wireless/libertas/main.c
··· 915 915 916 916 lbs_free_adapter(priv); 917 917 lbs_cfg_free(priv); 918 - 919 - priv->dev = NULL; 920 918 free_netdev(dev); 921 919 922 920 lbs_deb_leave(LBS_DEB_MAIN);
+11 -7
drivers/net/wireless/orinoco/main.c
··· 1392 1392 orinoco_add_hostscan_results(priv, buf, len); 1393 1393 1394 1394 kfree(buf); 1395 - } else if (priv->scan_request) { 1395 + } else { 1396 1396 /* Either abort or complete the scan */ 1397 - cfg80211_scan_done(priv->scan_request, (len < 0)); 1398 - priv->scan_request = NULL; 1397 + orinoco_scan_done(priv, (len < 0)); 1399 1398 } 1400 1399 1401 1400 spin_lock_irqsave(&priv->scan_lock, flags); ··· 1683 1684 hermes_write_regn(hw, EVACK, 0xffff); 1684 1685 } 1685 1686 1687 + orinoco_scan_done(priv, true); 1688 + 1686 1689 /* firmware will have to reassociate */ 1687 1690 netif_carrier_off(dev); 1688 1691 priv->last_linkstatus = 0xffff; ··· 1763 1762 orinoco_unlock(priv, &flags); 1764 1763 1765 1764 /* Scanning support: Notify scan cancellation */ 1766 - if (priv->scan_request) { 1767 - cfg80211_scan_done(priv->scan_request, 1); 1768 - priv->scan_request = NULL; 1769 - } 1765 + orinoco_scan_done(priv, true); 1770 1766 1771 1767 if (priv->hard_reset) { 1772 1768 err = (*priv->hard_reset)(priv); ··· 1810 1812 { 1811 1813 struct net_device *dev = priv->ndev; 1812 1814 int err = 0; 1815 + 1816 + /* If we've called commit, we are reconfiguring or bringing the 1817 + * interface up. Maintaining countermeasures across this would 1818 + * be confusing, so note that we've disabled them. The port will 1819 + * be enabled later in orinoco_commit or __orinoco_up. */ 1820 + priv->tkip_cm_active = 0; 1813 1821 1814 1822 err = orinoco_hw_program_rids(priv); 1815 1823
+7 -7
drivers/net/wireless/orinoco/orinoco_cs.c
··· 151 151 goto failed; 152 152 } 153 153 154 - ret = pcmcia_request_irq(link, orinoco_interrupt); 155 - if (ret) 156 - goto failed; 157 - 158 - /* We initialize the hermes structure before completing PCMCIA 159 - * configuration just in case the interrupt handler gets 160 - * called. */ 161 154 mem = ioport_map(link->resource[0]->start, 162 155 resource_size(link->resource[0])); 163 156 if (!mem) 164 157 goto failed; 165 158 159 + /* We initialize the hermes structure before completing PCMCIA 160 + * configuration just in case the interrupt handler gets 161 + * called. */ 166 162 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 163 + 164 + ret = pcmcia_request_irq(link, orinoco_interrupt); 165 + if (ret) 166 + goto failed; 167 167 168 168 ret = pcmcia_enable_device(link); 169 169 if (ret)
+8
drivers/net/wireless/orinoco/scan.c
··· 229 229 priv->scan_request = NULL; 230 230 } 231 231 } 232 + 233 + void orinoco_scan_done(struct orinoco_private *priv, bool abort) 234 + { 235 + if (priv->scan_request) { 236 + cfg80211_scan_done(priv->scan_request, abort); 237 + priv->scan_request = NULL; 238 + } 239 + }
+1
drivers/net/wireless/orinoco/scan.h
··· 16 16 void orinoco_add_hostscan_results(struct orinoco_private *dev, 17 17 unsigned char *buf, 18 18 size_t len); 19 + void orinoco_scan_done(struct orinoco_private *priv, bool abort); 19 20 20 21 #endif /* _ORINOCO_SCAN_H_ */
+7 -7
drivers/net/wireless/orinoco/spectrum_cs.c
··· 214 214 goto failed; 215 215 } 216 216 217 - ret = pcmcia_request_irq(link, orinoco_interrupt); 218 - if (ret) 219 - goto failed; 220 - 221 - /* We initialize the hermes structure before completing PCMCIA 222 - * configuration just in case the interrupt handler gets 223 - * called. */ 224 217 mem = ioport_map(link->resource[0]->start, 225 218 resource_size(link->resource[0])); 226 219 if (!mem) 227 220 goto failed; 228 221 222 + /* We initialize the hermes structure before completing PCMCIA 223 + * configuration just in case the interrupt handler gets 224 + * called. */ 229 225 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 230 226 hw->eeprom_pda = true; 227 + 228 + ret = pcmcia_request_irq(link, orinoco_interrupt); 229 + if (ret) 230 + goto failed; 231 231 232 232 ret = pcmcia_enable_device(link); 233 233 if (ret)
+2 -2
drivers/net/wireless/orinoco/wext.c
··· 911 911 */ 912 912 if (param->value) { 913 913 priv->tkip_cm_active = 1; 914 - ret = hermes_enable_port(hw, 0); 914 + ret = hermes_disable_port(hw, 0); 915 915 } else { 916 916 priv->tkip_cm_active = 0; 917 - ret = hermes_disable_port(hw, 0); 917 + ret = hermes_enable_port(hw, 0); 918 918 } 919 919 break; 920 920
+2 -2
drivers/net/xen-netfront.c
··· 66 66 67 67 #define GRANT_INVALID_REF 0 68 68 69 - #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) 70 - #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) 69 + #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) 70 + #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 71 71 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 72 72 73 73 struct netfront_info {
+5 -76
drivers/pci/bus.c
··· 64 64 } 65 65 } 66 66 67 - static bool pci_bus_resource_better(struct resource *res1, bool pos1, 68 - struct resource *res2, bool pos2) 69 - { 70 - /* If exactly one is positive decode, always prefer that one */ 71 - if (pos1 != pos2) 72 - return pos1 ? true : false; 73 - 74 - /* Prefer the one that contains the highest address */ 75 - if (res1->end != res2->end) 76 - return (res1->end > res2->end) ? true : false; 77 - 78 - /* Otherwise, prefer the one with highest "center of gravity" */ 79 - if (res1->start != res2->start) 80 - return (res1->start > res2->start) ? true : false; 81 - 82 - /* Otherwise, choose one arbitrarily (but consistently) */ 83 - return (res1 > res2) ? true : false; 84 - } 85 - 86 - static bool pci_bus_resource_positive(struct pci_bus *bus, struct resource *res) 87 - { 88 - struct pci_bus_resource *bus_res; 89 - 90 - /* 91 - * This relies on the fact that pci_bus.resource[] refers to P2P or 92 - * CardBus bridge base/limit registers, which are always positively 93 - * decoded. The pci_bus.resources list contains host bridge or 94 - * subtractively decoded resources. 95 - */ 96 - list_for_each_entry(bus_res, &bus->resources, list) { 97 - if (bus_res->res == res) 98 - return (bus_res->flags & PCI_SUBTRACTIVE_DECODE) ? 99 - false : true; 100 - } 101 - return true; 102 - } 103 - 104 - /* 105 - * Find the next-best bus resource after the cursor "res". If the cursor is 106 - * NULL, return the best resource. "Best" means that we prefer positive 107 - * decode regions over subtractive decode, then those at higher addresses. 108 - */ 109 - static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, 110 - unsigned int type, 111 - struct resource *res) 112 - { 113 - bool res_pos, r_pos, prev_pos = false; 114 - struct resource *r, *prev = NULL; 115 - int i; 116 - 117 - res_pos = pci_bus_resource_positive(bus, res); 118 - pci_bus_for_each_resource(bus, r, i) { 119 - if (!r) 120 - continue; 121 - 122 - if ((r->flags & IORESOURCE_TYPE_BITS) != type) 123 - continue; 124 - 125 - r_pos = pci_bus_resource_positive(bus, r); 126 - if (!res || pci_bus_resource_better(res, res_pos, r, r_pos)) { 127 - if (!prev || pci_bus_resource_better(r, r_pos, 128 - prev, prev_pos)) { 129 - prev = r; 130 - prev_pos = r_pos; 131 - } 132 - } 133 - } 134 - 135 - return prev; 136 - } 137 - 138 67 /** 139 68 * pci_bus_alloc_resource - allocate a resource from a parent bus 140 69 * @bus: PCI bus ··· 89 160 resource_size_t), 90 161 void *alignf_data) 91 162 { 92 - int ret = -ENOMEM; 163 + int i, ret = -ENOMEM; 93 164 struct resource *r; 94 165 resource_size_t max = -1; 95 - unsigned int type = res->flags & IORESOURCE_TYPE_BITS; 96 166 97 167 type_mask |= IORESOURCE_IO | IORESOURCE_MEM; 98 168 ··· 99 171 if (!(res->flags & IORESOURCE_MEM_64)) 100 172 max = PCIBIOS_MAX_MEM_32; 101 173 102 - /* Look for space at highest addresses first */ 103 - r = pci_bus_find_resource_prev(bus, type, NULL); 104 - for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) { 174 + pci_bus_for_each_resource(bus, r, i) { 175 + if (!r) 176 + continue; 177 + 105 178 /* type_mask must match */ 106 179 if ((res->flags ^ r->flags) & type_mask) 107 180 continue;
+5
drivers/pci/dmar.c
··· 1417 1417 (unsigned long long)drhd->reg_base_addr, ret); 1418 1418 return -1; 1419 1419 } 1420 + 1421 + /* 1422 + * Clear any previous faults. 1423 + */ 1424 + dmar_fault(iommu->irq, iommu); 1420 1425 } 1421 1426 1422 1427 return 0;
+26
drivers/pci/quirks.c
··· 2329 2329 { 2330 2330 u32 cfg; 2331 2331 2332 + if (!pci_find_capability(dev, PCI_CAP_ID_HT)) 2333 + return; 2334 + 2332 2335 pci_read_config_dword(dev, 0x74, &cfg); 2333 2336 2334 2337 if (cfg & ((1 << 2) | (1 << 15))) { ··· 2767 2764 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); 2768 2765 #endif /*CONFIG_MMC_RICOH_MMC*/ 2769 2766 2767 + #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) 2768 + #define VTUNCERRMSK_REG 0x1ac 2769 + #define VTD_MSK_SPEC_ERRORS (1 << 31) 2770 + /* 2771 + * This is a quirk for masking vt-d spec defined errors to platform error 2772 + * handling logic. With out this, platforms using Intel 7500, 5500 chipsets 2773 + * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based 2774 + * on the RAS config settings of the platform) when a vt-d fault happens. 2775 + * The resulting SMI caused the system to hang. 2776 + * 2777 + * VT-d spec related errors are already handled by the VT-d OS code, so no 2778 + * need to report the same error through other channels. 2779 + */ 2780 + static void vtd_mask_spec_errors(struct pci_dev *dev) 2781 + { 2782 + u32 word; 2783 + 2784 + pci_read_config_dword(dev, VTUNCERRMSK_REG, &word); 2785 + pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS); 2786 + } 2787 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); 2788 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); 2789 + #endif 2770 2790 2771 2791 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 2772 2792 struct pci_fixup *end)
+1 -1
drivers/pnp/pnpacpi/core.c
··· 180 180 }; 181 181 EXPORT_SYMBOL(pnpacpi_protocol); 182 182 183 - static char *pnpacpi_get_id(struct acpi_device *device) 183 + static char *__init pnpacpi_get_id(struct acpi_device *device) 184 184 { 185 185 struct acpi_hardware_id *id; 186 186
+22 -11
drivers/regulator/tps6586x-regulator.c
··· 231 231 }; 232 232 233 233 #define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits, \ 234 - ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ 235 - { \ 234 + ereg0, ebit0, ereg1, ebit1) \ 236 235 .desc = { \ 237 236 .name = "REG-" #_id, \ 238 237 .ops = &tps6586x_regulator_##_ops, \ ··· 247 248 .enable_bit[0] = (ebit0), \ 248 249 .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \ 249 250 .enable_bit[1] = (ebit1), \ 250 - .voltages = tps6586x_##vdata##_voltages, \ 251 - } 251 + .voltages = tps6586x_##vdata##_voltages, 252 + 253 + #define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ 254 + .go_reg = TPS6586X_##goreg, \ 255 + .go_bit = (gobit), 252 256 253 257 #define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \ 254 258 ereg0, ebit0, ereg1, ebit1) \ 259 + { \ 255 260 TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits, \ 256 - ereg0, ebit0, ereg1, ebit1, 0, 0) 261 + ereg0, ebit0, ereg1, ebit1) \ 262 + } 257 263 258 264 #define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \ 259 265 ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ 266 + { \ 260 267 TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits, \ 261 - ereg0, ebit0, ereg1, ebit1, goreg, gobit) 268 + ereg0, ebit0, ereg1, ebit1) \ 269 + TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ 270 + } 262 271 263 272 static struct tps6586x_regulator tps6586x_regulator[] = { 264 273 TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0), ··· 274 267 TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6), 275 268 TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4), 276 269 TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5), 277 - TPS6586X_LDO(LDO_8, ldo, SUPPLYV1, 5, 3, ENC, 6, END, 6), 270 + TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6), 278 271 TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), 279 - TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, ENE, 7, ENE, 7), 272 + TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), 280 273 TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), 281 - TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 1, END, 1), 274 + TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), 282 275 283 276 TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6), 284 277 TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6), ··· 297 290 uint8_t val1, val2; 298 291 int ret; 299 292 293 + if (ri->enable_reg[0] == ri->enable_reg[1] && 294 + ri->enable_bit[0] == ri->enable_bit[1]) 295 + return 0; 296 + 300 297 ret = tps6586x_read(parent, ri->enable_reg[0], &val1); 301 298 if (ret) 302 299 return ret; ··· 309 298 if (ret) 310 299 return ret; 311 300 312 - if (!(val2 & ri->enable_bit[1])) 301 + if (!(val2 & (1 << ri->enable_bit[1]))) 313 302 return 0; 314 303 315 304 /* 316 305 * The regulator is on, but it's enabled with the bit we don't 317 306 * want to use, so we switch the enable bits 318 307 */ 319 - if (!(val1 & ri->enable_bit[0])) { 308 + if (!(val1 & (1 << ri->enable_bit[0]))) { 320 309 ret = tps6586x_set_bits(parent, ri->enable_reg[0], 321 310 1 << ri->enable_bit[0]); 322 311 if (ret)
+8 -3
drivers/s390/scsi/zfcp_erp.c
··· 156 156 if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || 157 157 a_status & ZFCP_STATUS_COMMON_ERP_FAILED) 158 158 return 0; 159 + if (p_status & ZFCP_STATUS_COMMON_NOESC) 160 + return need; 159 161 if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED)) 160 162 need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; 161 163 /* fall through */ ··· 190 188 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 191 189 &zfcp_sdev->status); 192 190 erp_action = &zfcp_sdev->erp_action; 191 + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 192 + erp_action->port = port; 193 + erp_action->sdev = sdev; 193 194 if (!(atomic_read(&zfcp_sdev->status) & 194 195 ZFCP_STATUS_COMMON_RUNNING)) 195 196 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; ··· 205 200 zfcp_erp_action_dismiss_port(port); 206 201 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 207 202 erp_action = &port->erp_action; 203 + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 204 + erp_action->port = port; 208 205 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) 209 206 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; 210 207 break; ··· 216 209 zfcp_erp_action_dismiss_adapter(adapter); 217 210 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 218 211 erp_action = &adapter->erp_action; 212 + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 219 213 if (!(atomic_read(&adapter->status) & 220 214 ZFCP_STATUS_COMMON_RUNNING)) 221 215 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; ··· 226 218 return NULL; 227 219 } 228 220 229 - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 230 221 erp_action->adapter = adapter; 231 - erp_action->port = port; 232 - erp_action->sdev = sdev; 233 222 erp_action->action = need; 234 223 erp_action->status = act_status; 235 224
+6 -5
drivers/s390/scsi/zfcp_fsf.c
··· 851 851 852 852 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 853 853 854 - req->data = zfcp_sdev; 854 + req->data = sdev; 855 855 req->handler = zfcp_fsf_abort_fcp_command_handler; 856 856 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 857 857 req->qtcb->header.port_handle = zfcp_sdev->port->handle; ··· 2069 2069 struct fcp_resp_with_ext *fcp_rsp; 2070 2070 unsigned long flags; 2071 2071 2072 - zfcp_fsf_fcp_handler_common(req); 2073 - 2074 2072 read_lock_irqsave(&req->adapter->abort_lock, flags); 2075 2073 2076 2074 scpnt = req->data; ··· 2076 2078 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2077 2079 return; 2078 2080 } 2081 + 2082 + zfcp_fsf_fcp_handler_common(req); 2079 2083 2080 2084 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2081 2085 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); ··· 2170 2170 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2171 2171 struct zfcp_qdio *qdio = adapter->qdio; 2172 2172 struct fsf_qtcb_bottom_io *io; 2173 + unsigned long flags; 2173 2174 2174 2175 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2175 2176 ZFCP_STATUS_COMMON_UNBLOCKED))) 2176 2177 return -EBUSY; 2177 2178 2178 - spin_lock(&qdio->req_q_lock); 2179 + spin_lock_irqsave(&qdio->req_q_lock, flags); 2179 2180 if (atomic_read(&qdio->req_q_free) <= 0) { 2180 2181 atomic_inc(&qdio->req_q_full); 2181 2182 goto out; ··· 2240 2239 zfcp_fsf_req_free(req); 2241 2240 scsi_cmnd->host_scribble = NULL; 2242 2241 out: 2243 - spin_unlock(&qdio->req_q_lock); 2242 + spin_unlock_irqrestore(&qdio->req_q_lock, flags); 2244 2243 return retval; 2245 2244 } 2246 2245
+2 -5
drivers/s390/scsi/zfcp_scsi.c
··· 76 76 scpnt->scsi_done(scpnt); 77 77 } 78 78 79 - static int zfcp_scsi_queuecommand_lck(struct scsi_cmnd *scpnt, 80 - void (*done) (struct scsi_cmnd *)) 79 + static 80 + int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) 81 81 { 82 82 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 83 83 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; ··· 87 87 /* reset the status for this request */ 88 88 scpnt->result = 0; 89 89 scpnt->host_scribble = NULL; 90 - scpnt->scsi_done = done; 91 90 92 91 scsi_result = fc_remote_port_chkready(rport); 93 92 if (unlikely(scsi_result)) { ··· 125 126 126 127 return ret; 127 128 } 128 - 129 - static DEF_SCSI_QCMD(zfcp_scsi_queuecommand) 130 129 131 130 static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) 132 131 {
+1 -7
drivers/scsi/hpsa.c
··· 90 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, 91 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, 92 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, 93 - #define PCI_DEVICE_ID_HP_CISSF 0x333f 94 - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, 95 - {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 96 - PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 97 - {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 93 + {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 98 94 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 99 95 {0,} 100 96 }; ··· 109 113 {0x3249103C, "Smart Array P812", &SA5_access}, 110 114 {0x324a103C, "Smart Array P712m", &SA5_access}, 111 115 {0x324b103C, "Smart Array P711m", &SA5_access}, 112 - {0x3233103C, "StorageWorks P1210m", &SA5_access}, 113 - {0x333F103C, "StorageWorks P1210m", &SA5_access}, 114 116 {0x3250103C, "Smart Array", &SA5_access}, 115 117 {0x3250113C, "Smart Array", &SA5_access}, 116 118 {0x3250123C, "Smart Array", &SA5_access},
+2 -2
drivers/scsi/osd/osd_initiator.c
··· 951 951 /* create a bio for continuation segment */ 952 952 bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes, 953 953 GFP_KERNEL); 954 - if (unlikely(!bio)) 955 - return -ENOMEM; 954 + if (IS_ERR(bio)) 955 + return PTR_ERR(bio); 956 956 957 957 bio->bi_rw |= REQ_WRITE; 958 958
+3 -1
drivers/scsi/pmcraid.c
··· 62 62 static unsigned int pmcraid_debug_log; 63 63 static unsigned int pmcraid_disable_aen; 64 64 static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST; 65 + static unsigned int pmcraid_enable_msix; 65 66 66 67 /* 67 68 * Data structures to support multiple adapters by the LLD. ··· 4692 4691 int rc; 4693 4692 struct pci_dev *pdev = pinstance->pdev; 4694 4693 4695 - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { 4694 + if ((pmcraid_enable_msix) && 4695 + (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) { 4696 4696 int num_hrrq = PMCRAID_NUM_MSIX_VECTORS; 4697 4697 struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS]; 4698 4698 int i;
+2 -4
drivers/scsi/pmcraid.h
··· 42 42 */ 43 43 #define PMCRAID_DRIVER_NAME "PMC MaxRAID" 44 44 #define PMCRAID_DEVFILE "pmcsas" 45 - #define PMCRAID_DRIVER_VERSION "2.0.3" 45 + #define PMCRAID_DRIVER_VERSION "1.0.3" 46 46 #define PMCRAID_DRIVER_DATE __DATE__ 47 47 48 48 #define PMCRAID_FW_VERSION_1 0x002 ··· 333 333 __u8 lun[PMCRAID_LUN_LEN]; 334 334 } __attribute__((packed, aligned(4))); 335 335 336 - /* extended configuration table sizes are of 64 bytes in size */ 337 - #define PMCRAID_CFGTE_EXT_SIZE 32 336 + /* extended configuration table sizes are also of 32 bytes in size */ 338 337 struct pmcraid_config_table_entry_ext { 339 338 struct pmcraid_config_table_entry cfgte; 340 - __u8 cfgte_ext[PMCRAID_CFGTE_EXT_SIZE]; 341 339 }; 342 340 343 341 /* resource types (config_table_entry.resource_type values) */
-1
drivers/scsi/qla2xxx/qla_def.h
··· 2409 2409 uint32_t enable_target_reset :1; 2410 2410 uint32_t enable_lip_full_login :1; 2411 2411 uint32_t enable_led_scheme :1; 2412 - uint32_t inta_enabled :1; 2413 2412 uint32_t msi_enabled :1; 2414 2413 uint32_t msix_enabled :1; 2415 2414 uint32_t disable_serdes :1;
+1
drivers/scsi/qla2xxx/qla_iocb.c
··· 1061 1061 fcp_cmnd->additional_cdb_len |= 2; 1062 1062 1063 1063 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); 1064 + host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); 1064 1065 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1065 1066 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1066 1067 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
+3 -2
drivers/scsi/qla2xxx/qla_isr.c
··· 2491 2491 skip_msi: 2492 2492 2493 2493 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2494 - IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); 2494 + ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2495 + QLA2XXX_DRIVER_NAME, rsp); 2495 2496 if (ret) { 2496 2497 qla_printk(KERN_WARNING, ha, 2497 2498 "Failed to reserve interrupt %d already in use.\n", 2498 2499 ha->pdev->irq); 2499 2500 goto fail; 2500 2501 } 2501 - ha->flags.inta_enabled = 1; 2502 + 2502 2503 clear_risc_ints: 2503 2504 2504 2505 /*
+1
drivers/scsi/qla2xxx/qla_nx.c
··· 2749 2749 goto queuing_error_fcp_cmnd; 2750 2750 2751 2751 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2752 + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2752 2753 2753 2754 /* build FCP_CMND IU */ 2754 2755 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+2 -1
drivers/scsi/qla2xxx/qla_os.c
··· 829 829 { 830 830 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 831 831 srb_t *sp; 832 - int ret; 832 + int ret = SUCCESS; 833 833 unsigned int id, lun; 834 834 unsigned long flags; 835 835 int wait = 0; ··· 2064 2064 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2065 2065 ha->gid_list_info_size = 8; 2066 2066 ha->optrom_size = OPTROM_SIZE_82XX; 2067 + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2067 2068 ha->isp_ops = &qla82xx_isp_ops; 2068 2069 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2069 2070 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+2 -2
drivers/scsi/qla2xxx/qla_version.h
··· 7 7 /* 8 8 * Driver version 9 9 */ 10 - #define QLA2XXX_VERSION "8.03.04-k0" 10 + #define QLA2XXX_VERSION "8.03.05-k0" 11 11 12 12 #define QLA_DRIVER_MAJOR_VER 8 13 13 #define QLA_DRIVER_MINOR_VER 3 14 - #define QLA_DRIVER_PATCH_VER 4 14 + #define QLA_DRIVER_PATCH_VER 5 15 15 #define QLA_DRIVER_BETA_VER 0
+2 -24
drivers/scsi/scsi_error.c
··· 615 615 return rtn; 616 616 } 617 617 618 - static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) 618 + static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) 619 619 { 620 620 if (!scmd->device->host->hostt->eh_abort_handler) 621 621 return FAILED; ··· 623 623 return scmd->device->host->hostt->eh_abort_handler(scmd); 624 624 } 625 625 626 - /** 627 - * scsi_try_to_abort_cmd - Ask host to abort a running command. 628 - * @scmd: SCSI cmd to abort from Lower Level. 629 - * 630 - * Notes: 631 - * This function will not return until the user's completion function 632 - * has been called. there is no timeout on this operation. if the 633 - * author of the low-level driver wishes this operation to be timed, 634 - * they can provide this facility themselves. helper functions in 635 - * scsi_error.c can be supplied to make this easier to do. 636 - */ 637 - static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) 638 - { 639 - /* 640 - * scsi_done was called just after the command timed out and before 641 - * we had a chance to process it. (db) 642 - */ 643 - if (scmd->serial_number == 0) 644 - return SUCCESS; 645 - return __scsi_try_to_abort_cmd(scmd); 646 - } 647 - 648 626 static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd) 649 627 { 650 - if (__scsi_try_to_abort_cmd(scmd) != SUCCESS) 628 + if (scsi_try_to_abort_cmd(scmd) != SUCCESS) 651 629 if (scsi_try_bus_device_reset(scmd) != SUCCESS) 652 630 if (scsi_try_target_reset(scmd) != SUCCESS) 653 631 if (scsi_try_bus_reset(scmd) != SUCCESS)
+1 -7
drivers/scsi/scsi_lib.c
··· 1403 1403 1404 1404 INIT_LIST_HEAD(&cmd->eh_entry); 1405 1405 1406 - /* 1407 - * Set the serial numbers back to zero 1408 - */ 1409 - cmd->serial_number = 0; 1410 - 1411 1406 atomic_inc(&cmd->device->iodone_cnt); 1412 1407 if (cmd->result) 1413 1408 atomic_inc(&cmd->device->ioerr_cnt); ··· 1637 1642 1638 1643 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1639 1644 1640 - /* New queue, no concurrency on queue_flags */ 1641 1645 if (!shost->use_clustering) 1642 - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1646 + q->limits.cluster = 0; 1643 1647 1644 1648 /* 1645 1649 * set a reasonable default alignment on word boundaries: the
+2 -1
drivers/serial/kgdboc.c
··· 90 90 91 91 static void kgdboc_restore_input(void) 92 92 { 93 - schedule_work(&kgdboc_restore_input_work); 93 + if (likely(system_state == SYSTEM_RUNNING)) 94 + schedule_work(&kgdboc_restore_input_work); 94 95 } 95 96 96 97 static int kgdboc_register_kbd(char **cptr)
+5
drivers/spi/dw_spi.c
··· 413 413 { 414 414 while (dws->write(dws)) 415 415 dws->read(dws); 416 + /* 417 + * There is a possibility that the last word of a transaction 418 + * will be lost if data is not ready. Re-read to solve this issue. 419 + */ 420 + dws->read(dws); 416 421 417 422 transfer_complete(dws); 418 423 }
+4 -4
drivers/staging/cx25821/cx25821-video.c
··· 92 92 return ARRAY_SIZE(formats); 93 93 } 94 94 95 - struct cx25821_fmt *format_by_fourcc(unsigned int fourcc) 95 + struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc) 96 96 { 97 97 unsigned int i; 98 98 ··· 848 848 pix_format = 849 849 (dev->channels[ch_id].pixel_formats == 850 850 PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV; 851 - fh->fmt = format_by_fourcc(pix_format); 851 + fh->fmt = cx25821_format_by_fourcc(pix_format); 852 852 853 853 v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio); 854 854 ··· 1010 1010 if (0 != err) 1011 1011 return err; 1012 1012 1013 - fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); 1013 + fh->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat); 1014 1014 fh->vidq.field = f->fmt.pix.field; 1015 1015 1016 1016 /* check if width and height is valid based on set standard */ ··· 1119 1119 enum v4l2_field field; 1120 1120 unsigned int maxw, maxh; 1121 1121 1122 - fmt = format_by_fourcc(f->fmt.pix.pixelformat); 1122 + fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat); 1123 1123 if (NULL == fmt) 1124 1124 return -EINVAL; 1125 1125
+1 -1
drivers/staging/cx25821/cx25821-video.h
··· 87 87 88 88 #define FORMAT_FLAGS_PACKED 0x01 89 89 extern struct cx25821_fmt formats[]; 90 - extern struct cx25821_fmt *format_by_fourcc(unsigned int fourcc); 90 + extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc); 91 91 extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM]; 92 92 93 93 extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
+4 -2
drivers/tty/n_gsm.c
··· 716 716 if (msg->len < 128) 717 717 *--dp = (msg->len << 1) | EA; 718 718 else { 719 - *--dp = ((msg->len & 127) << 1) | EA; 720 - *--dp = (msg->len >> 6) & 0xfe; 719 + *--dp = (msg->len >> 7); /* bits 7 - 15 */ 720 + *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */ 721 721 } 722 722 } 723 723 ··· 968 968 { 969 969 struct gsm_msg *msg; 970 970 msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); 971 + if (msg == NULL) 972 + return; 971 973 msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ 972 974 msg->data[1] = (dlen << 1) | EA; 973 975 memcpy(msg->data + 2, data, dlen);
+3 -12
drivers/usb/atm/usbatm.c
··· 951 951 * condition: callbacks we register can be executed at once, before we have 952 952 * initialized the struct atm_dev. To protect against this, all callbacks 953 953 * abort if atm_dev->dev_data is NULL. */ 954 - atm_dev = atm_dev_register(instance->driver_name, &usbatm_atm_devops, -1, NULL); 954 + atm_dev = atm_dev_register(instance->driver_name, 955 + &instance->usb_intf->dev, &usbatm_atm_devops, 956 + -1, NULL); 955 957 if (!atm_dev) { 956 958 usb_err(instance, "%s: failed to register ATM device!\n", __func__); 957 959 return -1; ··· 967 965 968 966 /* temp init ATM device, set to 128kbit */ 969 967 atm_dev->link_rate = 128 * 1000 / 424; 970 - 971 - ret = sysfs_create_link(&atm_dev->class_dev.kobj, 972 - &instance->usb_intf->dev.kobj, "device"); 973 - if (ret) { 974 - atm_err(instance, "%s: sysfs_create_link failed: %d\n", 975 - __func__, ret); 976 - goto fail_sysfs; 977 - } 978 968 979 969 if (instance->driver->atm_start && ((ret = instance->driver->atm_start(instance, atm_dev)) < 0)) { 980 970 atm_err(instance, "%s: atm_start failed: %d!\n", __func__, ret); ··· 986 992 return 0; 987 993 988 994 fail: 989 - sysfs_remove_link(&atm_dev->class_dev.kobj, "device"); 990 - fail_sysfs: 991 995 instance->atm_dev = NULL; 992 996 atm_dev_deregister(atm_dev); /* usbatm_atm_dev_close will eventually be called */ 993 997 return ret; ··· 1321 1329 1322 1330 /* ATM finalize */ 1323 1331 if (instance->atm_dev) { 1324 - sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device"); 1325 1332 atm_dev_deregister(instance->atm_dev); 1326 1333 instance->atm_dev = NULL; 1327 1334 }
+9 -1
drivers/usb/core/Kconfig
··· 107 107 If you are unsure about this, say N here. 108 108 109 109 config USB_OTG 110 - bool 110 + bool "OTG support" 111 111 depends on USB && EXPERIMENTAL 112 112 depends on USB_SUSPEND 113 113 default n 114 + help 115 + The most notable feature of USB OTG is support for a 116 + "Dual-Role" device, which can act as either a device 117 + or a host. The initial role is decided by the type of 118 + plug inserted and can be changed later when two dual 119 + role devices talk to each other. 114 120 121 + Select this only if your board has Mini-AB/Micro-AB 122 + connector. 115 123 116 124 config USB_OTG_WHITELIST 117 125 bool "Rely on OTG Targeted Peripherals List"
+9 -9
drivers/usb/gadget/composite.c
··· 1047 1047 kfree(cdev->req->buf); 1048 1048 usb_ep_free_request(gadget->ep0, cdev->req); 1049 1049 } 1050 + device_remove_file(&gadget->dev, &dev_attr_suspended); 1050 1051 kfree(cdev); 1051 1052 set_gadget_data(gadget, NULL); 1052 - device_remove_file(&gadget->dev, &dev_attr_suspended); 1053 1053 composite = NULL; 1054 1054 } 1055 1055 ··· 1107 1107 */ 1108 1108 usb_ep_autoconfig_reset(cdev->gadget); 1109 1109 1110 - /* standardized runtime overrides for device ID data */ 1111 - if (idVendor) 1112 - cdev->desc.idVendor = cpu_to_le16(idVendor); 1113 - if (idProduct) 1114 - cdev->desc.idProduct = cpu_to_le16(idProduct); 1115 - if (bcdDevice) 1116 - cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); 1117 - 1118 1110 /* composite gadget needs to assign strings for whole device (like 1119 1111 * serial number), register function drivers, potentially update 1120 1112 * power state and consumption, etc ··· 1117 1125 1118 1126 cdev->desc = *composite->dev; 1119 1127 cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket; 1128 + 1129 + /* standardized runtime overrides for device ID data */ 1130 + if (idVendor) 1131 + cdev->desc.idVendor = cpu_to_le16(idVendor); 1132 + if (idProduct) 1133 + cdev->desc.idProduct = cpu_to_le16(idProduct); 1134 + if (bcdDevice) 1135 + cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); 1120 1136 1121 1137 /* stirng overrides */ 1122 1138 if (iManufacturer || !cdev->desc.iManufacturer) {
+15 -10
drivers/usb/host/xhci-mem.c
··· 1680 1680 xhci->port_array[i] = (u8) -1; 1681 1681 } 1682 1682 /* FIXME: Should we disable the port? */ 1683 + continue; 1683 1684 } 1684 1685 xhci->port_array[i] = major_revision; 1685 1686 if (major_revision == 0x03) ··· 1759 1758 return -ENOMEM; 1760 1759 1761 1760 port_index = 0; 1762 - for (i = 0; i < num_ports; i++) 1763 - if (xhci->port_array[i] != 0x03) { 1764 - xhci->usb2_ports[port_index] = 1765 - &xhci->op_regs->port_status_base + 1766 - NUM_PORT_REGS*i; 1767 - xhci_dbg(xhci, "USB 2.0 port at index %u, " 1768 - "addr = %p\n", i, 1769 - xhci->usb2_ports[port_index]); 1770 - port_index++; 1771 - } 1761 + for (i = 0; i < num_ports; i++) { 1762 + if (xhci->port_array[i] == 0x03 || 1763 + xhci->port_array[i] == 0 || 1764 + xhci->port_array[i] == -1) 1765 + continue; 1766 + 1767 + xhci->usb2_ports[port_index] = 1768 + &xhci->op_regs->port_status_base + 1769 + NUM_PORT_REGS*i; 1770 + xhci_dbg(xhci, "USB 2.0 port at index %u, " 1771 + "addr = %p\n", i, 1772 + xhci->usb2_ports[port_index]); 1773 + port_index++; 1774 + } 1772 1775 } 1773 1776 if (xhci->num_usb3_ports) { 1774 1777 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
+3 -1
drivers/usb/misc/uss720.c
··· 3 3 /* 4 4 * uss720.c -- USS720 USB Parport Cable. 5 5 * 6 - * Copyright (C) 1999, 2005 6 + * Copyright (C) 1999, 2005, 2010 7 7 * Thomas Sailer (t.sailer@alumni.ethz.ch) 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify ··· 776 776 { USB_DEVICE(0x0557, 0x2001) }, 777 777 { USB_DEVICE(0x0729, 0x1284) }, 778 778 { USB_DEVICE(0x1293, 0x0002) }, 779 + { USB_DEVICE(0x1293, 0x0002) }, 780 + { USB_DEVICE(0x050d, 0x0002) }, 779 781 { } /* Terminating entry */ 780 782 }; 781 783
+1
drivers/usb/serial/ftdi_sio.c
··· 796 796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, 797 797 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, 798 798 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, 799 + { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, 799 800 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), 800 801 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 801 802 { }, /* Optional parameter entry */
+5
drivers/usb/serial/ftdi_sio_ids.h
··· 1081 1081 #define MJSG_HD_RADIO_PID 0x937C 1082 1082 1083 1083 /* 1084 + * D.O.Tec products (http://www.directout.eu) 1085 + */ 1086 + #define FTDI_DOTEC_PID 0x9868 1087 + 1088 + /* 1084 1089 * Xverve Signalyzer tools (http://www.signalyzer.com/) 1085 1090 */ 1086 1091 #define XVERVE_SIGNALYZER_ST_PID 0xBCA0
+7
drivers/usb/storage/unusual_devs.h
··· 481 481 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 482 482 US_FL_MAX_SECTORS_64), 483 483 484 + /* Reported by Vitaly Kuznetsov <vitty@altlinux.ru> */ 485 + UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, 486 + "Samsung", 487 + "YP-CP3", 488 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 489 + US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), 490 + 484 491 /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. 485 492 * Device uses standards-violating 32-byte Bulk Command Block Wrappers and 486 493 * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
+2 -1
drivers/vhost/vhost.c
··· 884 884 int r; 885 885 if (!write_length) 886 886 return 0; 887 + write_length += write_address % VHOST_PAGE_SIZE; 887 888 write_address /= VHOST_PAGE_SIZE; 888 889 for (;;) { 889 890 u64 base = (u64)(unsigned long)log_base; ··· 898 897 if (write_length <= VHOST_PAGE_SIZE) 899 898 break; 900 899 write_length -= VHOST_PAGE_SIZE; 901 - write_address += VHOST_PAGE_SIZE; 900 + write_address += 1; 902 901 } 903 902 return r; 904 903 }
+1
drivers/video/modedb.c
··· 855 855 abs(cmode->yres - mode->yres); 856 856 if (diff > d) { 857 857 diff = d; 858 + diff_refresh = abs(cmode->refresh - mode->refresh); 858 859 best = cmode; 859 860 } else if (diff == d) { 860 861 d = abs(cmode->refresh - mode->refresh);
+2 -2
drivers/video/omap/Kconfig
··· 1 1 config FB_OMAP 2 2 tristate "OMAP frame buffer support (EXPERIMENTAL)" 3 - depends on FB && ARCH_OMAP && (OMAP2_DSS = "n") 4 - 3 + depends on FB && (OMAP2_DSS = "n") 4 + depends on ARCH_OMAP1 || ARCH_OMAP2 || ARCH_OMAP3 5 5 select FB_CFB_FILLRECT 6 6 select FB_CFB_COPYAREA 7 7 select FB_CFB_IMAGEBLIT
+2 -2
drivers/video/omap2/vram.c
··· 551 551 if (!size) 552 552 return; 553 553 554 - size = PAGE_ALIGN(size); 554 + size = ALIGN(size, SZ_2M); 555 555 556 556 if (paddr) { 557 557 if (paddr & ~PAGE_MASK) { ··· 576 576 return; 577 577 } 578 578 } else { 579 - paddr = memblock_alloc(size, PAGE_SIZE); 579 + paddr = memblock_alloc(size, SZ_2M); 580 580 } 581 581 582 582 memblock_free(paddr, size);
+6 -5
fs/btrfs/disk-io.c
··· 696 696 __btree_submit_bio_done); 697 697 } 698 698 699 + #ifdef CONFIG_MIGRATION 699 700 static int btree_migratepage(struct address_space *mapping, 700 701 struct page *newpage, struct page *page) 701 702 { ··· 713 712 if (page_has_private(page) && 714 713 !try_to_release_page(page, GFP_KERNEL)) 715 714 return -EAGAIN; 716 - #ifdef CONFIG_MIGRATION 717 715 return migrate_page(mapping, newpage, page); 718 - #else 719 - return -ENOSYS; 720 - #endif 721 716 } 717 + #endif 722 718 723 719 static int btree_writepage(struct page *page, struct writeback_control *wbc) 724 720 { ··· 1007 1009 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); 1008 1010 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), 1009 1011 blocksize, generation); 1010 - BUG_ON(!root->node); 1012 + if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) { 1013 + free_extent_buffer(root->node); 1014 + return -EIO; 1015 + } 1011 1016 root->commit_root = btrfs_root_node(root); 1012 1017 return 0; 1013 1018 }
+1 -1
fs/btrfs/export.c
··· 166 166 static struct dentry *btrfs_get_parent(struct dentry *child) 167 167 { 168 168 struct inode *dir = child->d_inode; 169 - static struct dentry *dentry; 169 + struct dentry *dentry; 170 170 struct btrfs_root *root = BTRFS_I(dir)->root; 171 171 struct btrfs_path *path; 172 172 struct extent_buffer *leaf;
+59 -16
fs/btrfs/extent-tree.c
··· 429 429 430 430 static int cache_block_group(struct btrfs_block_group_cache *cache, 431 431 struct btrfs_trans_handle *trans, 432 + struct btrfs_root *root, 432 433 int load_cache_only) 433 434 { 434 435 struct btrfs_fs_info *fs_info = cache->fs_info; ··· 443 442 444 443 /* 445 444 * We can't do the read from on-disk cache during a commit since we need 446 - * to have the normal tree locking. 445 + * to have the normal tree locking. Also if we are currently trying to 446 + * allocate blocks for the tree root we can't do the fast caching since 447 + * we likely hold important locks. 447 448 */ 448 - if (!trans->transaction->in_commit) { 449 + if (!trans->transaction->in_commit && 450 + (root && root != root->fs_info->tree_root)) { 449 451 spin_lock(&cache->lock); 450 452 if (cache->cached != BTRFS_CACHE_NO) { 451 453 spin_unlock(&cache->lock); ··· 2745 2741 struct btrfs_root *root = block_group->fs_info->tree_root; 2746 2742 struct inode *inode = NULL; 2747 2743 u64 alloc_hint = 0; 2744 + int dcs = BTRFS_DC_ERROR; 2748 2745 int num_pages = 0; 2749 2746 int retries = 0; 2750 2747 int ret = 0; ··· 2800 2795 2801 2796 spin_lock(&block_group->lock); 2802 2797 if (block_group->cached != BTRFS_CACHE_FINISHED) { 2798 + /* We're not cached, don't bother trying to write stuff out */ 2799 + dcs = BTRFS_DC_WRITTEN; 2803 2800 spin_unlock(&block_group->lock); 2804 2801 goto out_put; 2805 2802 } ··· 2828 2821 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, 2829 2822 num_pages, num_pages, 2830 2823 &alloc_hint); 2824 + if (!ret) 2825 + dcs = BTRFS_DC_SETUP; 2831 2826 btrfs_free_reserved_data_space(inode, num_pages); 2832 2827 out_put: 2833 2828 iput(inode); ··· 2837 2828 btrfs_release_path(root, path); 2838 2829 out: 2839 2830 spin_lock(&block_group->lock); 2840 - if (ret) 2841 - block_group->disk_cache_state = BTRFS_DC_ERROR; 2842 - else 2843 - block_group->disk_cache_state = BTRFS_DC_SETUP; 2831 + block_group->disk_cache_state = dcs; 2844 2832 spin_unlock(&block_group->lock); 2845 2833 2846 2834 return ret; ··· 3043 3037 3044 3038 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 3045 3039 { 3046 - u64 num_devices = root->fs_info->fs_devices->rw_devices; 3040 + /* 3041 + * we add in the count of missing devices because we want 3042 + * to make sure that any RAID levels on a degraded FS 3043 + * continue to be honored. 3044 + */ 3045 + u64 num_devices = root->fs_info->fs_devices->rw_devices + 3046 + root->fs_info->fs_devices->missing_devices; 3047 3047 3048 3048 if (num_devices == 1) 3049 3049 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); ··· 4092 4080 * space back to the block group, otherwise we will leak space. 4093 4081 */ 4094 4082 if (!alloc && cache->cached == BTRFS_CACHE_NO) 4095 - cache_block_group(cache, trans, 1); 4083 + cache_block_group(cache, trans, NULL, 1); 4096 4084 4097 4085 byte_in_group = bytenr - cache->key.objectid; 4098 4086 WARN_ON(byte_in_group > cache->key.offset); ··· 4942 4930 btrfs_get_block_group(block_group); 4943 4931 search_start = block_group->key.objectid; 4944 4932 4933 + /* 4934 + * this can happen if we end up cycling through all the 4935 + * raid types, but we want to make sure we only allocate 4936 + * for the proper type. 4937 + */ 4938 + if (!block_group_bits(block_group, data)) { 4939 + u64 extra = BTRFS_BLOCK_GROUP_DUP | 4940 + BTRFS_BLOCK_GROUP_RAID1 | 4941 + BTRFS_BLOCK_GROUP_RAID10; 4942 + 4943 + /* 4944 + * if they asked for extra copies and this block group 4945 + * doesn't provide them, bail. This does allow us to 4946 + * fill raid0 from raid1. 4947 + */ 4948 + if ((data & extra) && !(block_group->flags & extra)) 4949 + goto loop; 4950 + } 4951 + 4945 4952 have_block_group: 4946 4953 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4947 4954 u64 free_percent; 4948 4955 4949 - ret = cache_block_group(block_group, trans, 1); 4956 + ret = cache_block_group(block_group, trans, 4957 + orig_root, 1); 4950 4958 if (block_group->cached == BTRFS_CACHE_FINISHED) 4951 4959 goto have_block_group; 4952 4960 ··· 4990 4958 if (loop > LOOP_CACHING_NOWAIT || 4991 4959 (loop > LOOP_FIND_IDEAL && 4992 4960 atomic_read(&space_info->caching_threads) < 2)) { 4993 - ret = cache_block_group(block_group, trans, 0); 4961 + ret = cache_block_group(block_group, trans, 4962 + orig_root, 0); 4994 4963 BUG_ON(ret); 4995 4964 } 4996 4965 found_uncached_bg = true; ··· 5548 5515 u64 num_bytes = ins->offset; 5549 5516 5550 5517 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 5551 - cache_block_group(block_group, trans, 0); 5518 + cache_block_group(block_group, trans, NULL, 0); 5552 5519 caching_ctl = get_caching_control(block_group); 5553 5520 5554 5521 if (!caching_ctl) { ··· 6333 6300 NULL, NULL); 6334 6301 BUG_ON(ret < 0); 6335 6302 if (ret > 0) { 6336 - ret = btrfs_del_orphan_item(trans, tree_root, 6337 - root->root_key.objectid); 6338 - BUG_ON(ret); 6303 + /* if we fail to delete the orphan item this time 6304 + * around, it'll get picked up the next time. 6305 + * 6306 + * The most common failure here is just -ENOENT. 6307 + */ 6308 + btrfs_del_orphan_item(trans, tree_root, 6309 + root->root_key.objectid); 6339 6310 } 6340 6311 } 6341 6312 ··· 7915 7878 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | 7916 7879 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; 7917 7880 7918 - num_devices = root->fs_info->fs_devices->rw_devices; 7881 + /* 7882 + * we add in the count of missing devices because we want 7883 + * to make sure that any RAID levels on a degraded FS 7884 + * continue to be honored. 7885 + */ 7886 + num_devices = root->fs_info->fs_devices->rw_devices + 7887 + root->fs_info->fs_devices->missing_devices; 7888 + 7919 7889 if (num_devices == 1) { 7920 7890 stripped |= BTRFS_BLOCK_GROUP_DUP; 7921 7891 stripped = flags & ~stripped; ··· 8291 8247 break; 8292 8248 if (ret != 0) 8293 8249 goto error; 8294 - 8295 8250 leaf = path->nodes[0]; 8296 8251 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 8297 8252 cache = kzalloc(sizeof(*cache), GFP_NOFS);
+61 -33
fs/btrfs/file.c
··· 48 48 struct page **prepared_pages, 49 49 struct iov_iter *i) 50 50 { 51 - size_t copied; 51 + size_t copied = 0; 52 52 int pg = 0; 53 53 int offset = pos & (PAGE_CACHE_SIZE - 1); 54 + int total_copied = 0; 54 55 55 56 while (write_bytes > 0) { 56 57 size_t count = min_t(size_t, 57 58 PAGE_CACHE_SIZE - offset, write_bytes); 58 59 struct page *page = prepared_pages[pg]; 59 - again: 60 - if (unlikely(iov_iter_fault_in_readable(i, count))) 61 - return -EFAULT; 62 - 63 - /* Copy data from userspace to the current page */ 64 - copied = iov_iter_copy_from_user(page, i, offset, count); 60 + /* 61 + * Copy data from userspace to the current page 62 + * 63 + * Disable pagefault to avoid recursive lock since 64 + * the pages are already locked 65 + */ 66 + pagefault_disable(); 67 + copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 68 + pagefault_enable(); 65 69 66 70 /* Flush processor's dcache for this page */ 67 71 flush_dcache_page(page); 68 72 iov_iter_advance(i, copied); 69 73 write_bytes -= copied; 74 + total_copied += copied; 70 75 76 + /* Return to btrfs_file_aio_write to fault page */ 71 77 if (unlikely(copied == 0)) { 72 - count = min_t(size_t, PAGE_CACHE_SIZE - offset, 73 - iov_iter_single_seg_count(i)); 74 - goto again; 78 + break; 75 79 } 76 80 77 81 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { ··· 85 81 offset = 0; 86 82 } 87 83 } 88 - return 0; 84 + return total_copied; 89 85 } 90 86 91 87 /* ··· 858 854 unsigned long last_index; 859 855 int will_write; 860 856 int buffered = 0; 857 + int copied = 0; 858 + int dirty_pages = 0; 861 859 862 860 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 863 861 (file->f_flags & O_DIRECT)); ··· 976 970 WARN_ON(num_pages > nrptrs); 977 971 memset(pages, 0, sizeof(struct page *) * nrptrs); 978 972 979 - ret = btrfs_delalloc_reserve_space(inode, write_bytes); 973 + /* 974 + * Fault pages before locking them in prepare_pages 975 + * to avoid recursive lock 976 + */ 977 + if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) { 978 + ret = -EFAULT; 979 + goto out; 980 + } 981 + 982 + ret = btrfs_delalloc_reserve_space(inode, 983 + num_pages << PAGE_CACHE_SHIFT); 980 984 if (ret) 981 985 goto out; 982 986 ··· 994 978 pos, first_index, last_index, 995 979 write_bytes); 996 980 if (ret) { 997 - btrfs_delalloc_release_space(inode, write_bytes); 981 + btrfs_delalloc_release_space(inode, 982 + num_pages << PAGE_CACHE_SHIFT); 998 983 goto out; 999 984 } 1000 985 1001 - ret = btrfs_copy_from_user(pos, num_pages, 986 + copied = btrfs_copy_from_user(pos, num_pages, 1002 987 write_bytes, pages, &i); 1003 - if (ret == 0) { 988 + dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> 989 + PAGE_CACHE_SHIFT; 990 + 991 + if (num_pages > dirty_pages) { 992 + if (copied > 0) 993 + atomic_inc( 994 + &BTRFS_I(inode)->outstanding_extents); 995 + btrfs_delalloc_release_space(inode, 996 + (num_pages - dirty_pages) << 997 + PAGE_CACHE_SHIFT); 998 + } 999 + 1000 + if (copied > 0) { 1004 1001 dirty_and_release_pages(NULL, root, file, pages, 1005 - num_pages, pos, write_bytes); 1002 + dirty_pages, pos, copied); 1006 1003 } 1007 1004 1008 1005 btrfs_drop_pages(pages, num_pages); 1009 - if (ret) { 1010 - btrfs_delalloc_release_space(inode, write_bytes); 1011 - goto out; 1006 + 1007 + if (copied > 0) { 1008 + if (will_write) { 1009 + filemap_fdatawrite_range(inode->i_mapping, pos, 1010 + pos + copied - 1); 1011 + } else { 1012 + balance_dirty_pages_ratelimited_nr( 1013 + inode->i_mapping, 1014 + dirty_pages); 1015 + if (dirty_pages < 1016 + (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1017 + btrfs_btree_balance_dirty(root, 1); 1018 + btrfs_throttle(root); 1019 + } 1012 1020 } 1013 1021 1014 - if (will_write) { 1015 - filemap_fdatawrite_range(inode->i_mapping, pos, 1016 - pos + write_bytes - 1); 1017 - } else { 1018 - balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1019 - num_pages); 1020 - if (num_pages < 1021 - (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1022 - btrfs_btree_balance_dirty(root, 1); 1023 - btrfs_throttle(root); 1024 - } 1025 - 1026 - pos += write_bytes; 1027 - num_written += write_bytes; 1022 + pos += copied; 1023 + num_written += copied; 1028 1024 1029 1025 cond_resched(); 1030 1026 }
+7 -5
fs/btrfs/free-space-cache.c
··· 290 290 (unsigned long long)BTRFS_I(inode)->generation, 291 291 (unsigned long long)generation, 292 292 (unsigned long long)block_group->key.objectid); 293 - goto out; 293 + goto free_cache; 294 294 } 295 295 296 296 if (!num_entries) ··· 524 524 return 0; 525 525 } 526 526 527 + node = rb_first(&block_group->free_space_offset); 528 + if (!node) { 529 + iput(inode); 530 + return 0; 531 + } 532 + 527 533 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 528 534 filemap_write_and_wait(inode->i_mapping); 529 535 btrfs_wait_ordered_range(inode, inode->i_size & ··· 548 542 * our entries. 549 543 */ 550 544 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); 551 - 552 - node = rb_first(&block_group->free_space_offset); 553 - if (!node) 554 - goto out_free; 555 545 556 546 /* 557 547 * Lock all pages first so we can lock the extent safely.
+5 -6
fs/btrfs/inode.c
··· 495 495 add_async_extent(async_cow, start, num_bytes, 496 496 total_compressed, pages, nr_pages_ret); 497 497 498 - if (start + num_bytes < end && start + num_bytes < actual_end) { 498 + if (start + num_bytes < end) { 499 499 start += num_bytes; 500 500 pages = NULL; 501 501 cond_resched(); ··· 5712 5712 5713 5713 if (err) { 5714 5714 printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " 5715 - "disk_bytenr %lu len %u err no %d\n", 5716 - dip->inode->i_ino, bio->bi_rw, bio->bi_sector, 5717 - bio->bi_size, err); 5715 + "sector %#Lx len %u err no %d\n", 5716 + dip->inode->i_ino, bio->bi_rw, 5717 + (unsigned long long)bio->bi_sector, bio->bi_size, err); 5718 5718 dip->errors = 1; 5719 5719 5720 5720 /* ··· 5934 5934 */ 5935 5935 if (write) { 5936 5936 struct btrfs_ordered_extent *ordered; 5937 - ordered = btrfs_lookup_ordered_extent(inode, 5938 - dip->logical_offset); 5937 + ordered = btrfs_lookup_ordered_extent(inode, file_offset); 5939 5938 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 5940 5939 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 5941 5940 btrfs_free_reserved_extent(root, ordered->start,
+35 -23
fs/btrfs/ioctl.c
··· 947 947 948 948 static noinline int btrfs_ioctl_snap_create(struct file *file, 949 949 void __user *arg, int subvol, 950 - int async) 950 + int v2) 951 951 { 952 952 struct btrfs_ioctl_vol_args *vol_args = NULL; 953 - struct btrfs_ioctl_async_vol_args *async_vol_args = NULL; 953 + struct btrfs_ioctl_vol_args_v2 *vol_args_v2 = NULL; 954 954 char *name; 955 955 u64 fd; 956 - u64 transid = 0; 957 956 int ret; 958 957 959 - if (async) { 960 - async_vol_args = memdup_user(arg, sizeof(*async_vol_args)); 961 - if (IS_ERR(async_vol_args)) 962 - return PTR_ERR(async_vol_args); 958 + if (v2) { 959 + u64 transid = 0; 960 + u64 *ptr = NULL; 963 961 964 - name = async_vol_args->name; 965 - fd = async_vol_args->fd; 966 - async_vol_args->name[BTRFS_SNAPSHOT_NAME_MAX] = '\0'; 962 + vol_args_v2 = memdup_user(arg, sizeof(*vol_args_v2)); 963 + if (IS_ERR(vol_args_v2)) 964 + return PTR_ERR(vol_args_v2); 965 + 966 + if (vol_args_v2->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) { 967 + ret = -EINVAL; 968 + goto out; 969 + } 970 + 971 + name = vol_args_v2->name; 972 + fd = vol_args_v2->fd; 973 + vol_args_v2->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; 974 + 975 + if (vol_args_v2->flags & BTRFS_SUBVOL_CREATE_ASYNC) 976 + ptr = &transid; 977 + 978 + ret = btrfs_ioctl_snap_create_transid(file, name, fd, 979 + subvol, ptr); 980 + 981 + if (ret == 0 && ptr && 982 + copy_to_user(arg + 983 + offsetof(struct btrfs_ioctl_vol_args_v2, 984 + transid), ptr, sizeof(*ptr))) 985 + ret = -EFAULT; 967 986 } else { 968 987 vol_args = memdup_user(arg, sizeof(*vol_args)); 969 988 if (IS_ERR(vol_args)) ··· 990 971 name = vol_args->name; 991 972 fd = vol_args->fd; 992 973 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; 974 + 975 + ret = btrfs_ioctl_snap_create_transid(file, name, fd, 976 + subvol, NULL); 993 977 } 994 - 995 - ret = btrfs_ioctl_snap_create_transid(file, name, fd, 996 - subvol, &transid); 997 - 998 - if (!ret && async) { 999 - if (copy_to_user(arg + 1000 - offsetof(struct btrfs_ioctl_async_vol_args, 1001 - transid), &transid, sizeof(transid))) 1002 - return -EFAULT; 1003 - } 1004 - 978 + out: 1005 979 kfree(vol_args); 1006 - kfree(async_vol_args); 980 + kfree(vol_args_v2); 1007 981 1008 982 return ret; 1009 983 } ··· 2258 2246 return btrfs_ioctl_getversion(file, argp); 2259 2247 case BTRFS_IOC_SNAP_CREATE: 2260 2248 return btrfs_ioctl_snap_create(file, argp, 0, 0); 2261 - case BTRFS_IOC_SNAP_CREATE_ASYNC: 2249 + case BTRFS_IOC_SNAP_CREATE_V2: 2262 2250 return btrfs_ioctl_snap_create(file, argp, 0, 1); 2263 2251 case BTRFS_IOC_SUBVOL_CREATE: 2264 2252 return btrfs_ioctl_snap_create(file, argp, 1, 0);
+9 -5
fs/btrfs/ioctl.h
··· 30 30 char name[BTRFS_PATH_NAME_MAX + 1]; 31 31 }; 32 32 33 - #define BTRFS_SNAPSHOT_NAME_MAX 4079 34 - struct btrfs_ioctl_async_vol_args { 33 + #define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) 34 + 35 + #define BTRFS_SUBVOL_NAME_MAX 4039 36 + struct btrfs_ioctl_vol_args_v2 { 35 37 __s64 fd; 36 38 __u64 transid; 37 - char name[BTRFS_SNAPSHOT_NAME_MAX + 1]; 39 + __u64 flags; 40 + __u64 unused[4]; 41 + char name[BTRFS_SUBVOL_NAME_MAX + 1]; 38 42 }; 39 43 40 44 #define BTRFS_INO_LOOKUP_PATH_MAX 4080 ··· 191 187 struct btrfs_ioctl_space_args) 192 188 #define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) 193 189 #define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) 194 - #define BTRFS_IOC_SNAP_CREATE_ASYNC _IOW(BTRFS_IOCTL_MAGIC, 23, \ 195 - struct btrfs_ioctl_async_vol_args) 190 + #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ 191 + struct btrfs_ioctl_vol_args_v2) 196 192 #endif
+5 -1
fs/btrfs/orphan.c
··· 56 56 return -ENOMEM; 57 57 58 58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 59 - if (ret) 59 + if (ret < 0) 60 60 goto out; 61 + if (ret) { 62 + ret = -ENOENT; 63 + goto out; 64 + } 61 65 62 66 ret = btrfs_del_item(trans, root, path); 63 67
+1 -1
fs/btrfs/super.c
··· 685 685 mutex_unlock(&root->d_inode->i_mutex); 686 686 687 687 if (IS_ERR(new_root)) { 688 + dput(root); 688 689 deactivate_locked_super(s); 689 690 error = PTR_ERR(new_root); 690 - dput(root); 691 691 goto error_free_subvol_name; 692 692 } 693 693 if (!new_root->d_inode) {
+19 -1
fs/btrfs/volumes.c
··· 412 412 413 413 device->fs_devices = fs_devices; 414 414 fs_devices->num_devices++; 415 - } else if (strcmp(device->name, path)) { 415 + } else if (!device->name || strcmp(device->name, path)) { 416 416 name = kstrdup(path, GFP_NOFS); 417 417 if (!name) 418 418 return -ENOMEM; 419 419 kfree(device->name); 420 420 device->name = name; 421 + if (device->missing) { 422 + fs_devices->missing_devices--; 423 + device->missing = 0; 424 + } 421 425 } 422 426 423 427 if (found_transid > fs_devices->latest_trans) { ··· 1239 1235 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1240 1236 1241 1237 device->fs_devices->num_devices--; 1238 + 1239 + if (device->missing) 1240 + root->fs_info->fs_devices->missing_devices--; 1242 1241 1243 1242 next_device = list_entry(root->fs_info->fs_devices->devices.next, 1244 1243 struct btrfs_device, dev_list); ··· 3087 3080 device->devid = devid; 3088 3081 device->work.func = pending_bios_fn; 3089 3082 device->fs_devices = fs_devices; 3083 + device->missing = 1; 3090 3084 fs_devices->num_devices++; 3085 + fs_devices->missing_devices++; 3091 3086 spin_lock_init(&device->io_lock); 3092 3087 INIT_LIST_HEAD(&device->dev_alloc_list); 3093 3088 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); ··· 3287 3278 device = add_missing_dev(root, devid, dev_uuid); 3288 3279 if (!device) 3289 3280 return -ENOMEM; 3281 + } else if (!device->missing) { 3282 + /* 3283 + * this happens when a device that was properly setup 3284 + * in the device info lists suddenly goes bad. 3285 + * device->bdev is NULL, and so we have to set 3286 + * device->missing to one here 3287 + */ 3288 + root->fs_info->fs_devices->missing_devices++; 3289 + device->missing = 1; 3290 3290 } 3291 3291 } 3292 3292
+2
fs/btrfs/volumes.h
··· 44 44 45 45 int writeable; 46 46 int in_fs_metadata; 47 + int missing; 47 48 48 49 spinlock_t io_lock; 49 50 ··· 94 93 u64 num_devices; 95 94 u64 open_devices; 96 95 u64 rw_devices; 96 + u64 missing_devices; 97 97 u64 total_rw_bytes; 98 98 struct block_device *latest_bdev; 99 99
+4 -3
fs/ceph/dir.c
··· 40 40 if (dentry->d_fsdata) 41 41 return 0; 42 42 43 - if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 43 + if (dentry->d_parent == NULL || /* nfs fh_to_dentry */ 44 + ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 44 45 dentry->d_op = &ceph_dentry_ops; 45 46 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 46 47 dentry->d_op = &ceph_snapdir_dentry_ops; ··· 115 114 spin_lock(&dcache_lock); 116 115 117 116 /* start at beginning? */ 118 - if (filp->f_pos == 2 || (last && 119 - filp->f_pos < ceph_dentry(last)->offset)) { 117 + if (filp->f_pos == 2 || last == NULL || 118 + filp->f_pos < ceph_dentry(last)->offset) { 120 119 if (list_empty(&parent->d_subdirs)) 121 120 goto out_unlock; 122 121 p = parent->d_subdirs.prev;
+23 -16
fs/ceph/file.c
··· 282 282 static int striped_read(struct inode *inode, 283 283 u64 off, u64 len, 284 284 struct page **pages, int num_pages, 285 - int *checkeof, bool align_to_pages) 285 + int *checkeof, bool align_to_pages, 286 + unsigned long buf_align) 286 287 { 287 288 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 288 289 struct ceph_inode_info *ci = ceph_inode(inode); ··· 308 307 309 308 more: 310 309 if (align_to_pages) 311 - page_align = (pos - io_align) & ~PAGE_MASK; 310 + page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 312 311 else 313 312 page_align = pos & ~PAGE_MASK; 314 313 this_len = left; ··· 377 376 struct inode *inode = file->f_dentry->d_inode; 378 377 struct page **pages; 379 378 u64 off = *poff; 380 - int num_pages = calc_pages_for(off, len); 381 - int ret; 379 + int num_pages, ret; 382 380 383 381 dout("sync_read on file %p %llu~%u %s\n", file, off, len, 384 382 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 385 383 386 - if (file->f_flags & O_DIRECT) 387 - pages = ceph_get_direct_page_vector(data, num_pages); 388 - else 384 + if (file->f_flags & O_DIRECT) { 385 + num_pages = calc_pages_for((unsigned long)data, len); 386 + pages = ceph_get_direct_page_vector(data, num_pages, true); 387 + } else { 388 + num_pages = calc_pages_for(off, len); 389 389 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); 390 + } 390 391 if (IS_ERR(pages)) 391 392 return PTR_ERR(pages); 392 393 ··· 403 400 goto done; 404 401 405 402 ret = striped_read(inode, off, len, pages, num_pages, checkeof, 406 - file->f_flags & O_DIRECT); 403 + file->f_flags & O_DIRECT, 404 + (unsigned long)data & ~PAGE_MASK); 407 405 408 406 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) 409 407 ret = ceph_copy_page_vector_to_user(pages, data, off, ret); ··· 413 409 414 410 done: 415 411 if (file->f_flags & O_DIRECT) 416 - ceph_put_page_vector(pages, num_pages); 412 + ceph_put_page_vector(pages, num_pages, true); 417 413 else 418 414 ceph_release_page_vector(pages, num_pages); 419 415 dout("sync_read result %d\n", ret); ··· 460 456 int do_sync = 0; 461 457 int check_caps = 0; 462 458 int page_align, io_align; 459 + unsigned long buf_align; 463 460 int ret; 464 461 struct timespec mtime = CURRENT_TIME; 465 462 ··· 476 471 pos = *offset; 477 472 478 473 io_align = pos & ~PAGE_MASK; 474 + buf_align = (unsigned long)data & ~PAGE_MASK; 479 475 480 476 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); 481 477 if (ret < 0) ··· 502 496 */ 503 497 more: 504 498 len = left; 505 - if (file->f_flags & O_DIRECT) 499 + if (file->f_flags & O_DIRECT) { 506 500 /* write from beginning of first page, regardless of 507 501 io alignment */ 508 - page_align = (pos - io_align) & ~PAGE_MASK; 509 - else 502 + page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 503 + num_pages = calc_pages_for((unsigned long)data, len); 504 + } else { 510 505 page_align = pos & ~PAGE_MASK; 506 + num_pages = calc_pages_for(pos, len); 507 + } 511 508 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 512 509 ceph_vino(inode), pos, &len, 513 510 CEPH_OSD_OP_WRITE, flags, ··· 521 512 if (!req) 522 513 return -ENOMEM; 523 514 524 - num_pages = calc_pages_for(pos, len); 525 - 526 515 if (file->f_flags & O_DIRECT) { 527 - pages = ceph_get_direct_page_vector(data, num_pages); 516 + pages = ceph_get_direct_page_vector(data, num_pages, false); 528 517 if (IS_ERR(pages)) { 529 518 ret = PTR_ERR(pages); 530 519 goto out; ··· 572 565 } 573 566 574 567 if (file->f_flags & O_DIRECT) 575 - ceph_put_page_vector(pages, num_pages); 568 + ceph_put_page_vector(pages, num_pages, false); 576 569 else if (file->f_flags & O_SYNC) 577 570 ceph_release_page_vector(pages, num_pages); 578 571
+1 -1
fs/ceph/ioctl.h
··· 4 4 #include <linux/ioctl.h> 5 5 #include <linux/types.h> 6 6 7 - #define CEPH_IOCTL_MAGIC 0x98 7 + #define CEPH_IOCTL_MAGIC 0x97 8 8 9 9 /* just use u64 to align sanely on all archs */ 10 10 struct ceph_ioctl_layout {
+50 -44
fs/ceph/locks.c
··· 11 11 * Implement fcntl and flock locking functions. 12 12 */ 13 13 static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, 14 - u64 pid, u64 pid_ns, 15 - int cmd, u64 start, u64 length, u8 wait) 14 + int cmd, u8 wait, struct file_lock *fl) 16 15 { 17 16 struct inode *inode = file->f_dentry->d_inode; 18 17 struct ceph_mds_client *mdsc = 19 18 ceph_sb_to_client(inode->i_sb)->mdsc; 20 19 struct ceph_mds_request *req; 21 20 int err; 21 + u64 length = 0; 22 22 23 23 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); 24 24 if (IS_ERR(req)) 25 25 return PTR_ERR(req); 26 26 req->r_inode = igrab(inode); 27 27 28 + /* mds requires start and length rather than start and end */ 29 + if (LLONG_MAX == fl->fl_end) 30 + length = 0; 31 + else 32 + length = fl->fl_end - fl->fl_start + 1; 33 + 28 34 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 29 35 "length: %llu, wait: %d, type`: %d", (int)lock_type, 30 - (int)operation, pid, start, length, wait, cmd); 36 + (int)operation, (u64)fl->fl_pid, fl->fl_start, 37 + length, wait, fl->fl_type); 38 + 31 39 32 40 req->r_args.filelock_change.rule = lock_type; 33 41 req->r_args.filelock_change.type = cmd; 34 - req->r_args.filelock_change.pid = cpu_to_le64(pid); 42 + req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); 35 43 /* This should be adjusted, but I'm not sure if 36 44 namespaces actually get id numbers*/ 37 45 req->r_args.filelock_change.pid_namespace = 38 - cpu_to_le64((u64)pid_ns); 39 - req->r_args.filelock_change.start = cpu_to_le64(start); 46 + cpu_to_le64((u64)(unsigned long)fl->fl_nspid); 47 + req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); 40 48 req->r_args.filelock_change.length = cpu_to_le64(length); 41 49 req->r_args.filelock_change.wait = wait; 42 50 43 51 err = ceph_mdsc_do_request(mdsc, inode, req); 52 + 53 + if ( operation == CEPH_MDS_OP_GETFILELOCK){ 54 + fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid); 55 + if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) 56 + fl->fl_type = F_RDLCK; 57 + else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type) 58 + fl->fl_type = F_WRLCK; 59 + else 60 + fl->fl_type = F_UNLCK; 61 + 62 + fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); 63 + length = le64_to_cpu(req->r_reply_info.filelock_reply->start) + 64 + le64_to_cpu(req->r_reply_info.filelock_reply->length); 65 + if (length >= 1) 66 + fl->fl_end = length -1; 67 + else 68 + fl->fl_end = 0; 69 + 70 + } 44 71 ceph_mdsc_put_request(req); 45 72 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 46 - "length: %llu, wait: %d, type`: %d err code %d", (int)lock_type, 47 - (int)operation, pid, start, length, wait, cmd, err); 73 + "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type, 74 + (int)operation, (u64)fl->fl_pid, fl->fl_start, 75 + length, wait, fl->fl_type, err); 48 76 return err; 49 77 } 50 78 ··· 82 54 */ 83 55 int ceph_lock(struct file *file, int cmd, struct file_lock *fl) 84 56 { 85 - u64 length; 86 57 u8 lock_cmd; 87 58 int err; 88 59 u8 wait = 0; ··· 103 76 else 104 77 lock_cmd = CEPH_LOCK_UNLOCK; 105 78 106 - if (LLONG_MAX == fl->fl_end) 107 - length = 0; 108 - else 109 - length = fl->fl_end - fl->fl_start + 1; 110 - 111 - err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 112 - (u64)fl->fl_pid, 113 - (u64)(unsigned long)fl->fl_nspid, 114 - lock_cmd, fl->fl_start, 115 - length, wait); 79 + err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl); 116 80 if (!err) { 117 - dout("mds locked, locking locally"); 118 - err = posix_lock_file(file, fl, NULL); 119 - if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { 120 - /* undo! This should only happen if the kernel detects 121 - * local deadlock. */ 122 - ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 123 - (u64)fl->fl_pid, 124 - (u64)(unsigned long)fl->fl_nspid, 125 - CEPH_LOCK_UNLOCK, fl->fl_start, 126 - length, 0); 127 - dout("got %d on posix_lock_file, undid lock", err); 81 + if ( op != CEPH_MDS_OP_GETFILELOCK ){ 82 + dout("mds locked, locking locally"); 83 + err = posix_lock_file(file, fl, NULL); 84 + if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { 85 + /* undo! This should only happen if the kernel detects 86 + * local deadlock. */ 87 + ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 88 + CEPH_LOCK_UNLOCK, 0, fl); 89 + dout("got %d on posix_lock_file, undid lock", err); 90 + } 128 91 } 92 + 129 93 } else { 130 94 dout("mds returned error code %d", err); 131 95 } ··· 125 107 126 108 int ceph_flock(struct file *file, int cmd, struct file_lock *fl) 127 109 { 128 - u64 length; 129 110 u8 lock_cmd; 130 111 int err; 131 112 u8 wait = 1; ··· 144 127 lock_cmd = CEPH_LOCK_EXCL; 145 128 else 146 129 lock_cmd = CEPH_LOCK_UNLOCK; 147 - /* mds requires start and length rather than start and end */ 148 - if (LLONG_MAX == fl->fl_end) 149 - length = 0; 150 - else 151 - length = fl->fl_end - fl->fl_start + 1; 152 130 153 131 err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, 154 - file, (u64)fl->fl_pid, 155 - (u64)(unsigned long)fl->fl_nspid, 156 - lock_cmd, fl->fl_start, 157 - length, wait); 132 + file, lock_cmd, wait, fl); 158 133 if (!err) { 159 134 err = flock_lock_file_wait(file, fl); 160 135 if (err) { 161 136 ceph_lock_message(CEPH_LOCK_FLOCK, 162 137 CEPH_MDS_OP_SETFILELOCK, 163 - file, (u64)fl->fl_pid, 164 - (u64)(unsigned long)fl->fl_nspid, 165 - CEPH_LOCK_UNLOCK, fl->fl_start, 166 - length, 0); 138 + file, CEPH_LOCK_UNLOCK, 0, fl); 167 139 dout("got %d on flock_lock_file_wait, undid lock", err); 168 140 } 169 141 } else {
+37 -4
fs/ceph/mds_client.c
··· 202 202 } 203 203 204 204 /* 205 + * parse fcntl F_GETLK results 206 + */ 207 + static int parse_reply_info_filelock(void **p, void *end, 208 + struct ceph_mds_reply_info_parsed *info) 209 + { 210 + if (*p + sizeof(*info->filelock_reply) > end) 211 + goto bad; 212 + 213 + info->filelock_reply = *p; 214 + *p += sizeof(*info->filelock_reply); 215 + 216 + if (unlikely(*p != end)) 217 + goto bad; 218 + return 0; 219 + 220 + bad: 221 + return -EIO; 222 + } 223 + 224 + /* 225 + * parse extra results 226 + */ 227 + static int parse_reply_info_extra(void **p, void *end, 228 + struct ceph_mds_reply_info_parsed *info) 229 + { 230 + if (info->head->op == CEPH_MDS_OP_GETFILELOCK) 231 + return parse_reply_info_filelock(p, end, info); 232 + else 233 + return parse_reply_info_dir(p, end, info); 234 + } 235 + 236 + /* 205 237 * parse entire mds reply 206 238 */ 207 239 static int parse_reply_info(struct ceph_msg *msg, ··· 255 223 goto out_bad; 256 224 } 257 225 258 - /* dir content */ 226 + /* extra */ 259 227 ceph_decode_32_safe(&p, end, len, bad); 260 228 if (len > 0) { 261 - err = parse_reply_info_dir(&p, p+len, info); 229 + err = parse_reply_info_extra(&p, p+len, info); 262 230 if (err < 0) 263 231 goto out_bad; 264 232 } ··· 2106 2074 2107 2075 mutex_lock(&session->s_mutex); 2108 2076 if (err < 0) { 2109 - pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds); 2077 + pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); 2110 2078 ceph_msg_dump(msg); 2111 2079 goto out_err; 2112 2080 } ··· 2126 2094 mutex_lock(&req->r_fill_mutex); 2127 2095 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); 2128 2096 if (err == 0) { 2129 - if (result == 0 && rinfo->dir_nr) 2097 + if (result == 0 && req->r_op != CEPH_MDS_OP_GETFILELOCK && 2098 + rinfo->dir_nr) 2130 2099 ceph_readdir_prepopulate(req, req->r_session); 2131 2100 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); 2132 2101 }
+21 -10
fs/ceph/mds_client.h
··· 42 42 }; 43 43 44 44 /* 45 - * parsed info about an mds reply, including information about the 46 - * target inode and/or its parent directory and dentry, and directory 47 - * contents (for readdir results). 45 + * parsed info about an mds reply, including information about 46 + * either: 1) the target inode and/or its parent directory and dentry, 47 + * and directory contents (for readdir results), or 48 + * 2) the file range lock info (for fcntl F_GETLK results). 48 49 */ 49 50 struct ceph_mds_reply_info_parsed { 50 51 struct ceph_mds_reply_head *head; 51 52 53 + /* trace */ 52 54 struct ceph_mds_reply_info_in diri, targeti; 53 55 struct ceph_mds_reply_dirfrag *dirfrag; 54 56 char *dname; 55 57 u32 dname_len; 56 58 struct ceph_mds_reply_lease *dlease; 57 59 58 - struct ceph_mds_reply_dirfrag *dir_dir; 59 - int dir_nr; 60 - char **dir_dname; 61 - u32 *dir_dname_len; 62 - struct ceph_mds_reply_lease **dir_dlease; 63 - struct ceph_mds_reply_info_in *dir_in; 64 - u8 dir_complete, dir_end; 60 + /* extra */ 61 + union { 62 + /* for fcntl F_GETLK results */ 63 + struct ceph_filelock *filelock_reply; 64 + 65 + /* for readdir results */ 66 + struct { 67 + struct ceph_mds_reply_dirfrag *dir_dir; 68 + int dir_nr; 69 + char **dir_dname; 70 + u32 *dir_dname_len; 71 + struct ceph_mds_reply_lease **dir_dlease; 72 + struct ceph_mds_reply_info_in *dir_in; 73 + u8 dir_complete, dir_end; 74 + }; 75 + }; 65 76 66 77 /* encoded blob describing snapshot contexts for certain 67 78 operations (e.g., open) */
+3 -1
fs/cifs/Makefile
··· 6 6 cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ 7 7 link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \ 8 8 md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ 9 - readdir.o ioctl.o sess.o export.o cifsacl.o 9 + readdir.o ioctl.o sess.o export.o 10 + 11 + cifs-$(CONFIG_CIFS_ACL) += cifsacl.o 10 12 11 13 cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o 12 14
+9
fs/cifs/README
··· 337 337 wsize default write size (default 57344) 338 338 maximum wsize currently allowed by CIFS is 57344 (fourteen 339 339 4096 byte pages) 340 + actimeo=n attribute cache timeout in seconds (default 1 second). 341 + After this timeout, the cifs client requests fresh attribute 342 + information from the server. This option allows to tune the 343 + attribute cache timeout to suit the workload needs. Shorter 344 + timeouts mean better the cache coherency, but increased number 345 + of calls to the server. Longer timeouts mean reduced number 346 + of calls to the server at the expense of less stricter cache 347 + coherency checks (i.e. incorrect attribute cache for a short 348 + period of time). 340 349 rw mount the network share read-write (note that the 341 350 server may still consider the share read-only) 342 351 ro mount network share read-only
+1
fs/cifs/cifs_fs_sb.h
··· 48 48 struct nls_table *local_nls; 49 49 unsigned int rsize; 50 50 unsigned int wsize; 51 + unsigned long actimeo; /* attribute cache timeout (jiffies) */ 51 52 atomic_t active; 52 53 uid_t mnt_uid; 53 54 gid_t mnt_gid;
-3
fs/cifs/cifsacl.c
··· 30 30 #include "cifs_debug.h" 31 31 32 32 33 - #ifdef CONFIG_CIFS_EXPERIMENTAL 34 - 35 33 static struct cifs_wksid wksidarr[NUM_WK_SIDS] = { 36 34 {{1, 0, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0} }, "null user"}, 37 35 {{1, 1, {0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0} }, "nobody"}, ··· 772 774 773 775 return rc; 774 776 } 775 - #endif /* CONFIG_CIFS_EXPERIMENTAL */
-4
fs/cifs/cifsacl.h
··· 74 74 char sidname[SIDNAMELENGTH]; 75 75 } __attribute__((packed)); 76 76 77 - #ifdef CONFIG_CIFS_EXPERIMENTAL 78 - 79 77 extern int match_sid(struct cifs_sid *); 80 78 extern int compare_sids(const struct cifs_sid *, const struct cifs_sid *); 81 - 82 - #endif /* CONFIG_CIFS_EXPERIMENTAL */ 83 79 84 80 #endif /* _CIFSACL_H */
+2 -1
fs/cifs/cifsfs.c
··· 463 463 464 464 seq_printf(s, ",rsize=%d", cifs_sb->rsize); 465 465 seq_printf(s, ",wsize=%d", cifs_sb->wsize); 466 + /* convert actimeo and display it in seconds */ 467 + seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); 466 468 467 469 return 0; 468 470 } ··· 937 935 GlobalCurrentXid = 0; 938 936 GlobalTotalActiveXid = 0; 939 937 GlobalMaxActiveXid = 0; 940 - memset(Local_System_Name, 0, 15); 941 938 spin_lock_init(&cifs_tcp_ses_lock); 942 939 spin_lock_init(&cifs_file_list_lock); 943 940 spin_lock_init(&GlobalMid_Lock);
+10 -2
fs/cifs/cifsglob.h
··· 45 45 #define CIFS_MIN_RCV_POOL 4 46 46 47 47 /* 48 + * default attribute cache timeout (jiffies) 49 + */ 50 + #define CIFS_DEF_ACTIMEO (1 * HZ) 51 + 52 + /* 53 + * max attribute cache timeout (jiffies) - 2^30 54 + */ 55 + #define CIFS_MAX_ACTIMEO (1 << 30) 56 + 57 + /* 48 58 * MAX_REQ is the maximum number of requests that WE will send 49 59 * on one socket concurrently. It also matches the most common 50 60 * value of max multiplex returned by servers. We may ··· 756 746 GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 757 747 GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */ 758 748 /* on midQ entries */ 759 - GLOBAL_EXTERN char Local_System_Name[15]; 760 - 761 749 /* 762 750 * Global counters, updated atomically 763 751 */
+2 -3
fs/cifs/cifsproto.h
··· 54 54 __func__, curr_xid, (int)rc); \ 55 55 } while (0) 56 56 extern char *build_path_from_dentry(struct dentry *); 57 - extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb); 57 + extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb, 58 + struct cifsTconInfo *tcon); 58 59 extern char *build_wildcard_path_from_dentry(struct dentry *direntry); 59 60 extern char *cifs_compose_mount_options(const char *sb_mountdata, 60 61 const char *fullpath, const struct dfs_info3_param *ref, ··· 80 79 struct TCP_Server_Info *); 81 80 extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); 82 81 extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool); 83 - #ifdef CONFIG_CIFS_EXPERIMENTAL 84 82 extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool); 85 - #endif 86 83 extern unsigned int smbCalcSize(struct smb_hdr *ptr); 87 84 extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); 88 85 extern int decode_negTokenInit(unsigned char *security_blob, int length,
+92 -91
fs/cifs/cifssmb.c
··· 2478 2478 } 2479 2479 2480 2480 #ifdef CONFIG_CIFS_EXPERIMENTAL 2481 - /* Initialize NT TRANSACT SMB into small smb request buffer. 2482 - This assumes that all NT TRANSACTS that we init here have 2483 - total parm and data under about 400 bytes (to fit in small cifs 2484 - buffer size), which is the case so far, it easily fits. NB: 2485 - Setup words themselves and ByteCount 2486 - MaxSetupCount (size of returned setup area) and 2487 - MaxParameterCount (returned parms size) must be set by caller */ 2488 - static int 2489 - smb_init_nttransact(const __u16 sub_command, const int setup_count, 2490 - const int parm_len, struct cifsTconInfo *tcon, 2491 - void **ret_buf) 2492 - { 2493 - int rc; 2494 - __u32 temp_offset; 2495 - struct smb_com_ntransact_req *pSMB; 2496 - 2497 - rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon, 2498 - (void **)&pSMB); 2499 - if (rc) 2500 - return rc; 2501 - *ret_buf = (void *)pSMB; 2502 - pSMB->Reserved = 0; 2503 - pSMB->TotalParameterCount = cpu_to_le32(parm_len); 2504 - pSMB->TotalDataCount = 0; 2505 - pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - 2506 - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); 2507 - pSMB->ParameterCount = pSMB->TotalParameterCount; 2508 - pSMB->DataCount = pSMB->TotalDataCount; 2509 - temp_offset = offsetof(struct smb_com_ntransact_req, Parms) + 2510 - (setup_count * 2) - 4 /* for rfc1001 length itself */; 2511 - pSMB->ParameterOffset = cpu_to_le32(temp_offset); 2512 - pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len); 2513 - pSMB->SetupCount = setup_count; /* no need to le convert byte fields */ 2514 - pSMB->SubCommand = cpu_to_le16(sub_command); 2515 - return 0; 2516 - } 2517 - 2518 - static int 2519 - validate_ntransact(char *buf, char **ppparm, char **ppdata, 2520 - __u32 *pparmlen, __u32 *pdatalen) 2521 - { 2522 - char *end_of_smb; 2523 - __u32 data_count, data_offset, parm_count, parm_offset; 2524 - struct smb_com_ntransact_rsp *pSMBr; 2525 - 2526 - *pdatalen = 0; 2527 - *pparmlen = 0; 2528 - 2529 - if (buf == NULL) 2530 - return -EINVAL; 2531 - 2532 - pSMBr = (struct smb_com_ntransact_rsp *)buf; 2533 - 2534 - /* ByteCount was converted from little endian in SendReceive */ 2535 - end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount + 2536 - (char *)&pSMBr->ByteCount; 2537 - 2538 - data_offset = le32_to_cpu(pSMBr->DataOffset); 2539 - data_count = le32_to_cpu(pSMBr->DataCount); 2540 - parm_offset = le32_to_cpu(pSMBr->ParameterOffset); 2541 - parm_count = le32_to_cpu(pSMBr->ParameterCount); 2542 - 2543 - *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset; 2544 - *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset; 2545 - 2546 - /* should we also check that parm and data areas do not overlap? */ 2547 - if (*ppparm > end_of_smb) { 2548 - cFYI(1, "parms start after end of smb"); 2549 - return -EINVAL; 2550 - } else if (parm_count + *ppparm > end_of_smb) { 2551 - cFYI(1, "parm end after end of smb"); 2552 - return -EINVAL; 2553 - } else if (*ppdata > end_of_smb) { 2554 - cFYI(1, "data starts after end of smb"); 2555 - return -EINVAL; 2556 - } else if (data_count + *ppdata > end_of_smb) { 2557 - cFYI(1, "data %p + count %d (%p) past smb end %p start %p", 2558 - *ppdata, data_count, (data_count + *ppdata), 2559 - end_of_smb, pSMBr); 2560 - return -EINVAL; 2561 - } else if (parm_count + data_count > pSMBr->ByteCount) { 2562 - cFYI(1, "parm count and data count larger than SMB"); 2563 - return -EINVAL; 2564 - } 2565 - *pdatalen = data_count; 2566 - *pparmlen = parm_count; 2567 - return 0; 2568 - } 2569 - 2570 2481 int 2571 2482 CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, 2572 2483 const unsigned char *searchName, ··· 2967 3056 2968 3057 #endif /* CONFIG_POSIX */ 2969 3058 2970 - #ifdef CONFIG_CIFS_EXPERIMENTAL 3059 + #ifdef CONFIG_CIFS_ACL 3060 + /* 3061 + * Initialize NT TRANSACT SMB into small smb request buffer. This assumes that 3062 + * all NT TRANSACTS that we init here have total parm and data under about 400 3063 + * bytes (to fit in small cifs buffer size), which is the case so far, it 3064 + * easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of 3065 + * returned setup area) and MaxParameterCount (returned parms size) must be set 3066 + * by caller 3067 + */ 3068 + static int 3069 + smb_init_nttransact(const __u16 sub_command, const int setup_count, 3070 + const int parm_len, struct cifsTconInfo *tcon, 3071 + void **ret_buf) 3072 + { 3073 + int rc; 3074 + __u32 temp_offset; 3075 + struct smb_com_ntransact_req *pSMB; 3076 + 3077 + rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon, 3078 + (void **)&pSMB); 3079 + if (rc) 3080 + return rc; 3081 + *ret_buf = (void *)pSMB; 3082 + pSMB->Reserved = 0; 3083 + pSMB->TotalParameterCount = cpu_to_le32(parm_len); 3084 + pSMB->TotalDataCount = 0; 3085 + pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - 3086 + MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); 3087 + pSMB->ParameterCount = pSMB->TotalParameterCount; 3088 + pSMB->DataCount = pSMB->TotalDataCount; 3089 + temp_offset = offsetof(struct smb_com_ntransact_req, Parms) + 3090 + (setup_count * 2) - 4 /* for rfc1001 length itself */; 3091 + pSMB->ParameterOffset = cpu_to_le32(temp_offset); 3092 + pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len); 3093 + pSMB->SetupCount = setup_count; /* no need to le convert byte fields */ 3094 + pSMB->SubCommand = cpu_to_le16(sub_command); 3095 + return 0; 3096 + } 3097 + 3098 + static int 3099 + validate_ntransact(char *buf, char **ppparm, char **ppdata, 3100 + __u32 *pparmlen, __u32 *pdatalen) 3101 + { 3102 + char *end_of_smb; 3103 + __u32 data_count, data_offset, parm_count, parm_offset; 3104 + struct smb_com_ntransact_rsp *pSMBr; 3105 + 3106 + *pdatalen = 0; 3107 + *pparmlen = 0; 3108 + 3109 + if (buf == NULL) 3110 + return -EINVAL; 3111 + 3112 + pSMBr = (struct smb_com_ntransact_rsp *)buf; 3113 + 3114 + /* ByteCount was converted from little endian in SendReceive */ 3115 + end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount + 3116 + (char *)&pSMBr->ByteCount; 3117 + 3118 + data_offset = le32_to_cpu(pSMBr->DataOffset); 3119 + data_count = le32_to_cpu(pSMBr->DataCount); 3120 + parm_offset = le32_to_cpu(pSMBr->ParameterOffset); 3121 + parm_count = le32_to_cpu(pSMBr->ParameterCount); 3122 + 3123 + *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset; 3124 + *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset; 3125 + 3126 + /* should we also check that parm and data areas do not overlap? */ 3127 + if (*ppparm > end_of_smb) { 3128 + cFYI(1, "parms start after end of smb"); 3129 + return -EINVAL; 3130 + } else if (parm_count + *ppparm > end_of_smb) { 3131 + cFYI(1, "parm end after end of smb"); 3132 + return -EINVAL; 3133 + } else if (*ppdata > end_of_smb) { 3134 + cFYI(1, "data starts after end of smb"); 3135 + return -EINVAL; 3136 + } else if (data_count + *ppdata > end_of_smb) { 3137 + cFYI(1, "data %p + count %d (%p) past smb end %p start %p", 3138 + *ppdata, data_count, (data_count + *ppdata), 3139 + end_of_smb, pSMBr); 3140 + return -EINVAL; 3141 + } else if (parm_count + data_count > pSMBr->ByteCount) { 3142 + cFYI(1, "parm count and data count larger than SMB"); 3143 + return -EINVAL; 3144 + } 3145 + *pdatalen = data_count; 3146 + *pparmlen = parm_count; 3147 + return 0; 3148 + } 3149 + 2971 3150 /* Get Security Descriptor (by handle) from remote server for a file or dir */ 2972 3151 int 2973 3152 CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, ··· 3215 3214 return (rc); 3216 3215 } 3217 3216 3218 - #endif /* CONFIG_CIFS_EXPERIMENTAL */ 3217 + #endif /* CONFIG_CIFS_ACL */ 3219 3218 3220 3219 /* Legacy Query Path Information call for lookup to old servers such 3221 3220 as Win9x/WinME */
+27 -15
fs/cifs/connect.c
··· 105 105 unsigned int wsize; 106 106 bool sockopt_tcp_nodelay:1; 107 107 unsigned short int port; 108 + unsigned long actimeo; /* attribute cache timeout (jiffies) */ 108 109 char *prepath; 109 110 struct sockaddr_storage srcaddr; /* allow binding to a local IP */ 110 111 struct nls_table *local_nls; ··· 807 806 short int override_gid = -1; 808 807 bool uid_specified = false; 809 808 bool gid_specified = false; 809 + char *nodename = utsname()->nodename; 810 810 811 811 separator[0] = ','; 812 812 separator[1] = 0; 813 813 814 - if (Local_System_Name[0] != 0) 815 - memcpy(vol->source_rfc1001_name, Local_System_Name, 15); 816 - else { 817 - char *nodename = utsname()->nodename; 818 - int n = strnlen(nodename, 15); 819 - memset(vol->source_rfc1001_name, 0x20, 15); 820 - for (i = 0; i < n; i++) { 821 - /* does not have to be perfect mapping since field is 822 - informational, only used for servers that do not support 823 - port 445 and it can be overridden at mount time */ 824 - vol->source_rfc1001_name[i] = toupper(nodename[i]); 825 - } 826 - } 814 + /* 815 + * does not have to be perfect mapping since field is 816 + * informational, only used for servers that do not support 817 + * port 445 and it can be overridden at mount time 818 + */ 819 + memset(vol->source_rfc1001_name, 0x20, 15); 820 + for (i = 0; i < strnlen(nodename, 15); i++) 821 + vol->source_rfc1001_name[i] = toupper(nodename[i]); 822 + 827 823 vol->source_rfc1001_name[15] = 0; 828 824 /* null target name indicates to use *SMBSERVR default called name 829 825 if we end up sending RFC1001 session initialize */ ··· 837 839 vol->posix_paths = 1; 838 840 /* default to using server inode numbers where available */ 839 841 vol->server_ino = 1; 842 + 843 + vol->actimeo = CIFS_DEF_ACTIMEO; 840 844 841 845 if (!options) 842 846 return 1; ··· 1213 1213 if ((i == 15) && (value[i] != 0)) 1214 1214 printk(KERN_WARNING "CIFS: server net" 1215 1215 "biosname longer than 15 truncated.\n"); 1216 + } 1217 + } else if (strnicmp(data, "actimeo", 7) == 0) { 1218 + if (value && *value) { 1219 + vol->actimeo = HZ * simple_strtoul(value, 1220 + &value, 0); 1221 + if (vol->actimeo > CIFS_MAX_ACTIMEO) { 1222 + cERROR(1, "CIFS: attribute cache" 1223 + "timeout too large"); 1224 + return 1; 1225 + } 1216 1226 } 1217 1227 } else if (strnicmp(data, "credentials", 4) == 0) { 1218 1228 /* ignore */ ··· 2581 2571 cFYI(1, "file mode: 0x%x dir mode: 0x%x", 2582 2572 cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); 2583 2573 2574 + cifs_sb->actimeo = pvolume_info->actimeo; 2575 + 2584 2576 if (pvolume_info->noperm) 2585 2577 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; 2586 2578 if (pvolume_info->setuids) ··· 2833 2821 /* check if a whole path (including prepath) is not remote */ 2834 2822 if (!rc && cifs_sb->prepathlen && tcon) { 2835 2823 /* build_path_to_root works only when we have a valid tcon */ 2836 - full_path = cifs_build_path_to_root(cifs_sb); 2824 + full_path = cifs_build_path_to_root(cifs_sb, tcon); 2837 2825 if (full_path == NULL) { 2838 2826 rc = -ENOMEM; 2839 2827 goto mount_fail_check; 2840 2828 } 2841 2829 rc = is_path_accessible(xid, tcon, cifs_sb, full_path); 2842 - if (rc != -EREMOTE) { 2830 + if (rc != 0 && rc != -EREMOTE) { 2843 2831 kfree(full_path); 2844 2832 goto mount_fail_check; 2845 2833 }
-2
fs/cifs/file.c
··· 1108 1108 return total_written; 1109 1109 } 1110 1110 1111 - #ifdef CONFIG_CIFS_EXPERIMENTAL 1112 1111 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 1113 1112 bool fsuid_only) 1114 1113 { ··· 1141 1142 spin_unlock(&cifs_file_list_lock); 1142 1143 return NULL; 1143 1144 } 1144 - #endif 1145 1145 1146 1146 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, 1147 1147 bool fsuid_only)
+11 -10
fs/cifs/inode.c
··· 686 686 cFYI(1, "cifs_sfu_type failed: %d", tmprc); 687 687 } 688 688 689 - #ifdef CONFIG_CIFS_EXPERIMENTAL 689 + #ifdef CONFIG_CIFS_ACL 690 690 /* fill in 0777 bits from ACL */ 691 691 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { 692 692 rc = cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, ··· 697 697 goto cgii_exit; 698 698 } 699 699 } 700 - #endif 700 + #endif /* CONFIG_CIFS_ACL */ 701 701 702 702 /* fill in remaining high mode bits e.g. SUID, VTX */ 703 703 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) ··· 728 728 .lookup = cifs_lookup, 729 729 }; 730 730 731 - char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb) 731 + char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb, 732 + struct cifsTconInfo *tcon) 732 733 { 733 734 int pplen = cifs_sb->prepathlen; 734 735 int dfsplen; 735 736 char *full_path = NULL; 736 - struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 737 737 738 738 /* if no prefix path, simply set path to the root of share to "" */ 739 739 if (pplen == 0) { ··· 875 875 char *full_path; 876 876 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 877 877 878 - full_path = cifs_build_path_to_root(cifs_sb); 878 + full_path = cifs_build_path_to_root(cifs_sb, tcon); 879 879 if (full_path == NULL) 880 880 return ERR_PTR(-ENOMEM); 881 881 ··· 1653 1653 cifs_inode_needs_reval(struct inode *inode) 1654 1654 { 1655 1655 struct cifsInodeInfo *cifs_i = CIFS_I(inode); 1656 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1656 1657 1657 1658 if (cifs_i->clientCanCacheRead) 1658 1659 return false; ··· 1664 1663 if (cifs_i->time == 0) 1665 1664 return true; 1666 1665 1667 - /* FIXME: the actimeo should be tunable */ 1668 - if (time_after_eq(jiffies, cifs_i->time + HZ)) 1666 + if (!time_in_range(jiffies, cifs_i->time, 1667 + cifs_i->time + cifs_sb->actimeo)) 1669 1668 return true; 1670 1669 1671 1670 /* hardlinked files w/ noserverino get "special" treatment */ 1672 - if (!(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) && 1671 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) && 1673 1672 S_ISREG(inode->i_mode) && inode->i_nlink != 1) 1674 1673 return true; 1675 1674 ··· 2122 2121 2123 2122 if (attrs->ia_valid & ATTR_MODE) { 2124 2123 rc = 0; 2125 - #ifdef CONFIG_CIFS_EXPERIMENTAL 2124 + #ifdef CONFIG_CIFS_ACL 2126 2125 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { 2127 2126 rc = mode_to_cifs_acl(inode, full_path, mode); 2128 2127 if (rc) { ··· 2131 2130 goto cifs_setattr_exit; 2132 2131 } 2133 2132 } else 2134 - #endif 2133 + #endif /* CONFIG_CIFS_ACL */ 2135 2134 if (((mode & S_IWUGO) == 0) && 2136 2135 (cifsInode->cifsAttrs & ATTR_READONLY) == 0) { 2137 2136
-12
fs/cifs/readdir.c
··· 759 759 rc = filldir(direntry, qstring.name, qstring.len, file->f_pos, 760 760 ino, fattr.cf_dtype); 761 761 762 - /* 763 - * we can not return filldir errors to the caller since they are 764 - * "normal" when the stat blocksize is too small - we return remapped 765 - * error instead 766 - * 767 - * FIXME: This looks bogus. filldir returns -EOVERFLOW in the above 768 - * case already. Why should we be clobbering other errors from it? 769 - */ 770 - if (rc) { 771 - cFYI(1, "filldir rc = %d", rc); 772 - rc = -EOVERFLOW; 773 - } 774 762 dput(tmp_dentry); 775 763 return rc; 776 764 }
+5
fs/exec.c
··· 275 275 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; 276 276 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 277 277 INIT_LIST_HEAD(&vma->anon_vma_chain); 278 + 279 + err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); 280 + if (err) 281 + goto err; 282 + 278 283 err = insert_vm_struct(mm, vma); 279 284 if (err) 280 285 goto err;
+1
fs/ext4/ext4.h
··· 910 910 #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ 911 911 #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ 912 912 #define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ 913 + #define EXT4_MOUNT_MBLK_IO_SUBMIT 0x4000000 /* multi-block io submits */ 913 914 #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ 914 915 #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ 915 916 #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
+4 -1
fs/ext4/inode.c
··· 2125 2125 */ 2126 2126 if (unlikely(journal_data && PageChecked(page))) 2127 2127 err = __ext4_journalled_writepage(page, len); 2128 - else 2128 + else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) 2129 2129 err = ext4_bio_write_page(&io_submit, page, 2130 2130 len, mpd->wbc); 2131 + else 2132 + err = block_write_full_page(page, 2133 + noalloc_get_block_write, mpd->wbc); 2131 2134 2132 2135 if (!err) 2133 2136 mpd->pages_written++;
+1 -1
fs/ext4/namei.c
··· 872 872 if (namelen > EXT4_NAME_LEN) 873 873 return NULL; 874 874 if ((namelen <= 2) && (name[0] == '.') && 875 - (name[1] == '.' || name[1] == '0')) { 875 + (name[1] == '.' || name[1] == '\0')) { 876 876 /* 877 877 * "." or ".." will only be in the first block 878 878 * NFS may look up ".."; "." should be handled by the VFS
+12 -2
fs/ext4/super.c
··· 1026 1026 !(def_mount_opts & EXT4_DEFM_NODELALLOC)) 1027 1027 seq_puts(seq, ",nodelalloc"); 1028 1028 1029 + if (test_opt(sb, MBLK_IO_SUBMIT)) 1030 + seq_puts(seq, ",mblk_io_submit"); 1029 1031 if (sbi->s_stripe) 1030 1032 seq_printf(seq, ",stripe=%lu", sbi->s_stripe); 1031 1033 /* ··· 1241 1239 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, 1242 1240 Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, 1243 1241 Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version, 1244 - Opt_stripe, Opt_delalloc, Opt_nodelalloc, 1245 - Opt_block_validity, Opt_noblock_validity, 1242 + Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, 1243 + Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, 1246 1244 Opt_inode_readahead_blks, Opt_journal_ioprio, 1247 1245 Opt_dioread_nolock, Opt_dioread_lock, 1248 1246 Opt_discard, Opt_nodiscard, ··· 1306 1304 {Opt_resize, "resize"}, 1307 1305 {Opt_delalloc, "delalloc"}, 1308 1306 {Opt_nodelalloc, "nodelalloc"}, 1307 + {Opt_mblk_io_submit, "mblk_io_submit"}, 1308 + {Opt_nomblk_io_submit, "nomblk_io_submit"}, 1309 1309 {Opt_block_validity, "block_validity"}, 1310 1310 {Opt_noblock_validity, "noblock_validity"}, 1311 1311 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"}, ··· 1728 1724 break; 1729 1725 case Opt_nodelalloc: 1730 1726 clear_opt(sbi->s_mount_opt, DELALLOC); 1727 + break; 1728 + case Opt_mblk_io_submit: 1729 + set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT); 1730 + break; 1731 + case Opt_nomblk_io_submit: 1732 + clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT); 1731 1733 break; 1732 1734 case Opt_stripe: 1733 1735 if (match_int(&args[0], &option))
+66 -6
fs/fuse/file.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/sched.h> 15 15 #include <linux/module.h> 16 + #include <linux/compat.h> 16 17 17 18 static const struct file_operations fuse_direct_io_file_operations; 18 19 ··· 1629 1628 } 1630 1629 1631 1630 /* 1631 + * CUSE servers compiled on 32bit broke on 64bit kernels because the 1632 + * ABI was defined to be 'struct iovec' which is different on 32bit 1633 + * and 64bit. Fortunately we can determine which structure the server 1634 + * used from the size of the reply. 1635 + */ 1636 + static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, 1637 + size_t transferred, unsigned count, 1638 + bool is_compat) 1639 + { 1640 + #ifdef CONFIG_COMPAT 1641 + if (count * sizeof(struct compat_iovec) == transferred) { 1642 + struct compat_iovec *ciov = src; 1643 + unsigned i; 1644 + 1645 + /* 1646 + * With this interface a 32bit server cannot support 1647 + * non-compat (i.e. ones coming from 64bit apps) ioctl 1648 + * requests 1649 + */ 1650 + if (!is_compat) 1651 + return -EINVAL; 1652 + 1653 + for (i = 0; i < count; i++) { 1654 + dst[i].iov_base = compat_ptr(ciov[i].iov_base); 1655 + dst[i].iov_len = ciov[i].iov_len; 1656 + } 1657 + return 0; 1658 + } 1659 + #endif 1660 + 1661 + if (count * sizeof(struct iovec) != transferred) 1662 + return -EIO; 1663 + 1664 + memcpy(dst, src, transferred); 1665 + return 0; 1666 + } 1667 + 1668 + /* Make sure iov_length() won't overflow */ 1669 + static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) 1670 + { 1671 + size_t n; 1672 + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; 1673 + 1674 + for (n = 0; n < count; n++) { 1675 + if (iov->iov_len > (size_t) max) 1676 + return -ENOMEM; 1677 + max -= iov->iov_len; 1678 + } 1679 + return 0; 1680 + } 1681 + 1682 + /* 1632 1683 * For ioctls, there is no generic way to determine how much memory 1633 1684 * needs to be read and/or written. Furthermore, ioctls are allowed 1634 1685 * to dereference the passed pointer, so the parameter requires deep ··· 1861 1808 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 1862 1809 goto out; 1863 1810 1864 - err = -EIO; 1865 - if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred) 1866 - goto out; 1867 - 1868 - /* okay, copy in iovs and retry */ 1869 1811 vaddr = kmap_atomic(pages[0], KM_USER0); 1870 - memcpy(page_address(iov_page), vaddr, transferred); 1812 + err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr, 1813 + transferred, in_iovs + out_iovs, 1814 + (flags & FUSE_IOCTL_COMPAT) != 0); 1871 1815 kunmap_atomic(vaddr, KM_USER0); 1816 + if (err) 1817 + goto out; 1872 1818 1873 1819 in_iov = page_address(iov_page); 1874 1820 out_iov = in_iov + in_iovs; 1821 + 1822 + err = fuse_verify_ioctl_iov(in_iov, in_iovs); 1823 + if (err) 1824 + goto out; 1825 + 1826 + err = fuse_verify_ioctl_iov(out_iov, out_iovs); 1827 + if (err) 1828 + goto out; 1875 1829 1876 1830 goto retry; 1877 1831 }
+3
fs/namei.c
··· 1748 1748 if (!(open_flag & O_CREAT)) 1749 1749 mode = 0; 1750 1750 1751 + /* Must never be set by userspace */ 1752 + open_flag &= ~FMODE_NONOTIFY; 1753 + 1751 1754 /* 1752 1755 * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only 1753 1756 * check for O_DSYNC if the need any syncing at all we enforce it's
+35 -41
fs/nfs/dir.c
··· 57 57 struct inode *, struct dentry *); 58 58 static int nfs_fsync_dir(struct file *, int); 59 59 static loff_t nfs_llseek_dir(struct file *, loff_t, int); 60 - static int nfs_readdir_clear_array(struct page*, gfp_t); 60 + static void nfs_readdir_clear_array(struct page*); 61 61 62 62 const struct file_operations nfs_dir_operations = { 63 63 .llseek = nfs_llseek_dir, ··· 83 83 .setattr = nfs_setattr, 84 84 }; 85 85 86 - const struct address_space_operations nfs_dir_addr_space_ops = { 87 - .releasepage = nfs_readdir_clear_array, 86 + const struct address_space_operations nfs_dir_aops = { 87 + .freepage = nfs_readdir_clear_array, 88 88 }; 89 89 90 90 #ifdef CONFIG_NFS_V3 ··· 178 178 struct page *page; 179 179 unsigned long page_index; 180 180 u64 *dir_cookie; 181 + u64 last_cookie; 181 182 loff_t current_index; 182 183 decode_dirent_t decode; 183 184 ··· 214 213 * we are freeing strings created by nfs_add_to_readdir_array() 215 214 */ 216 215 static 217 - int nfs_readdir_clear_array(struct page *page, gfp_t mask) 216 + void nfs_readdir_clear_array(struct page *page) 218 217 { 219 - struct nfs_cache_array *array = nfs_readdir_get_array(page); 218 + struct nfs_cache_array *array; 220 219 int i; 221 220 222 - if (IS_ERR(array)) 223 - return PTR_ERR(array); 221 + array = kmap_atomic(page, KM_USER0); 224 222 for (i = 0; i < array->size; i++) 225 223 kfree(array->array[i].string.name); 226 - nfs_readdir_release_array(page); 227 - return 0; 224 + kunmap_atomic(array, KM_USER0); 228 225 } 229 226 230 227 /* ··· 271 272 goto out; 272 273 array->last_cookie = entry->cookie; 273 274 array->size++; 274 - if (entry->eof == 1) 275 + if (entry->eof != 0) 275 276 array->eof_index = array->size; 276 277 out: 277 278 nfs_readdir_release_array(page); ··· 311 312 for (i = 0; i < array->size; i++) { 312 313 if (array->array[i].cookie == *desc->dir_cookie) { 313 314 desc->cache_entry_index = i; 314 - status = 0; 315 - goto out; 315 + return 0; 316 316 } 317 317 } 318 - if (i == array->eof_index) { 319 - desc->eof = 1; 318 + if (array->eof_index >= 0) { 320 319 status = -EBADCOOKIE; 320 + if (*desc->dir_cookie == array->last_cookie) 321 + desc->eof = 1; 321 322 } 322 - out: 323 323 return status; 324 324 } 325 325 ··· 326 328 int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc) 327 329 { 328 330 struct nfs_cache_array *array; 329 - int status = -EBADCOOKIE; 330 - 331 - if (desc->dir_cookie == NULL) 332 - goto out; 331 + int status; 333 332 334 333 array = nfs_readdir_get_array(desc->page); 335 334 if (IS_ERR(array)) { ··· 339 344 else 340 345 status = nfs_readdir_search_for_cookie(array, desc); 341 346 347 + if (status == -EAGAIN) { 348 + desc->last_cookie = array->last_cookie; 349 + desc->page_index++; 350 + } 342 351 nfs_readdir_release_array(desc->page); 343 352 out: 344 353 return status; ··· 489 490 490 491 count++; 491 492 492 - if (desc->plus == 1) 493 + if (desc->plus != 0) 493 494 nfs_prime_dcache(desc->file->f_path.dentry, entry); 494 495 495 496 status = nfs_readdir_add_to_array(entry, page); ··· 497 498 break; 498 499 } while (!entry->eof); 499 500 500 - if (count == 0 || (status == -EBADCOOKIE && entry->eof == 1)) { 501 + if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) { 501 502 array = nfs_readdir_get_array(page); 502 503 if (!IS_ERR(array)) { 503 504 array->eof_index = array->size; ··· 562 563 unsigned int array_size = ARRAY_SIZE(pages); 563 564 564 565 entry.prev_cookie = 0; 565 - entry.cookie = *desc->dir_cookie; 566 + entry.cookie = desc->last_cookie; 566 567 entry.eof = 0; 567 568 entry.fh = nfs_alloc_fhandle(); 568 569 entry.fattr = nfs_alloc_fattr(); ··· 635 636 static 636 637 void cache_page_release(nfs_readdir_descriptor_t *desc) 637 638 { 639 + if (!desc->page->mapping) 640 + nfs_readdir_clear_array(desc->page); 638 641 page_cache_release(desc->page); 639 642 desc->page = NULL; 640 643 } ··· 661 660 return PTR_ERR(desc->page); 662 661 663 662 res = nfs_readdir_search_array(desc); 664 - if (res == 0) 665 - return 0; 666 - cache_page_release(desc); 663 + if (res != 0) 664 + cache_page_release(desc); 667 665 return res; 668 666 } 669 667 ··· 672 672 { 673 673 int res; 674 674 675 - if (desc->page_index == 0) 675 + if (desc->page_index == 0) { 676 676 desc->current_index = 0; 677 - while (1) { 678 - res = find_cache_page(desc); 679 - if (res != -EAGAIN) 680 - break; 681 - desc->page_index++; 677 + desc->last_cookie = 0; 682 678 } 679 + do { 680 + res = find_cache_page(desc); 681 + } while (res == -EAGAIN); 683 682 return res; 684 - } 685 - 686 - static inline unsigned int dt_type(struct inode *inode) 687 - { 688 - return (inode->i_mode >> 12) & 15; 689 683 } 690 684 691 685 /* ··· 711 717 break; 712 718 } 713 719 file->f_pos++; 714 - desc->cache_entry_index = i; 715 720 if (i < (array->size-1)) 716 721 *desc->dir_cookie = array->array[i+1].cookie; 717 722 else 718 723 *desc->dir_cookie = array->last_cookie; 719 724 } 720 - if (i == array->eof_index) 725 + if (array->eof_index >= 0) 721 726 desc->eof = 1; 722 727 723 728 nfs_readdir_release_array(desc->page); ··· 757 764 } 758 765 759 766 desc->page_index = 0; 767 + desc->last_cookie = *desc->dir_cookie; 760 768 desc->page = page; 761 769 762 770 status = nfs_readdir_xdr_to_array(desc, page, inode); ··· 785 791 struct inode *inode = dentry->d_inode; 786 792 nfs_readdir_descriptor_t my_desc, 787 793 *desc = &my_desc; 788 - int res = -ENOMEM; 794 + int res; 789 795 790 796 dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n", 791 797 dentry->d_parent->d_name.name, dentry->d_name.name, ··· 810 816 if (res < 0) 811 817 goto out; 812 818 813 - while (desc->eof != 1) { 819 + do { 814 820 res = readdir_search_pagecache(desc); 815 821 816 822 if (res == -EBADCOOKIE) { ··· 838 844 res = nfs_do_filldir(desc, dirent, filldir); 839 845 if (res < 0) 840 846 break; 841 - } 847 + } while (!desc->eof); 842 848 out: 843 849 nfs_unblock_sillyrename(dentry); 844 850 if (res > 0)
+2
fs/nfs/file.c
··· 693 693 { 694 694 struct inode *inode = filp->f_mapping->host; 695 695 int status = 0; 696 + unsigned int saved_type = fl->fl_type; 696 697 697 698 /* Try local locking first */ 698 699 posix_test_lock(filp, fl); ··· 701 700 /* found a conflict */ 702 701 goto out; 703 702 } 703 + fl->fl_type = saved_type; 704 704 705 705 if (nfs_have_delegation(inode, FMODE_READ)) 706 706 goto out_noconflict;
+1
fs/nfs/inode.c
··· 289 289 } else if (S_ISDIR(inode->i_mode)) { 290 290 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops; 291 291 inode->i_fop = &nfs_dir_operations; 292 + inode->i_data.a_ops = &nfs_dir_aops; 292 293 if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)) 293 294 set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags); 294 295 /* Deal with crossing mountpoints */
+2 -2
fs/nfs/mount_clnt.c
··· 505 505 506 506 static struct rpc_version mnt_version1 = { 507 507 .number = 1, 508 - .nrprocs = 2, 508 + .nrprocs = ARRAY_SIZE(mnt_procedures), 509 509 .procs = mnt_procedures, 510 510 }; 511 511 512 512 static struct rpc_version mnt_version3 = { 513 513 .number = 3, 514 - .nrprocs = 2, 514 + .nrprocs = ARRAY_SIZE(mnt3_procedures), 515 515 .procs = mnt3_procedures, 516 516 }; 517 517
+9
fs/nfs/nfs4proc.c
··· 3361 3361 ret = nfs_revalidate_inode(server, inode); 3362 3362 if (ret < 0) 3363 3363 return ret; 3364 + if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 3365 + nfs_zap_acl_cache(inode); 3364 3366 ret = nfs4_read_cached_acl(inode, buf, buflen); 3365 3367 if (ret != -ENOENT) 3366 3368 return ret; ··· 3391 3389 nfs_inode_return_delegation(inode); 3392 3390 buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3393 3391 ret = nfs4_call_sync(server, &msg, &arg, &res, 1); 3392 + /* 3393 + * Acl update can result in inode attribute update. 3394 + * so mark the attribute cache invalid. 3395 + */ 3396 + spin_lock(&inode->i_lock); 3397 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 3398 + spin_unlock(&inode->i_lock); 3394 3399 nfs_access_zap_cache(inode); 3395 3400 nfs_zap_acl_cache(inode); 3396 3401 return ret;
+2 -2
fs/nfs/pagelist.c
··· 115 115 { 116 116 if (!nfs_lock_request_dontget(req)) 117 117 return 0; 118 - if (req->wb_page != NULL) 118 + if (test_bit(PG_MAPPED, &req->wb_flags)) 119 119 radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 120 120 return 1; 121 121 } ··· 125 125 */ 126 126 void nfs_clear_page_tag_locked(struct nfs_page *req) 127 127 { 128 - if (req->wb_page != NULL) { 128 + if (test_bit(PG_MAPPED, &req->wb_flags)) { 129 129 struct inode *inode = req->wb_context->path.dentry->d_inode; 130 130 struct nfs_inode *nfsi = NFS_I(inode); 131 131
-1
fs/nfs/read.c
··· 152 152 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 153 153 req->wb_bytes, 154 154 (long long)req_offset(req)); 155 - nfs_clear_request(req); 156 155 nfs_release_request(req); 157 156 } 158 157
-4
fs/nfs/super.c
··· 1069 1069 mnt->flags |= NFS_MOUNT_VER3; 1070 1070 mnt->version = 3; 1071 1071 break; 1072 - #ifdef CONFIG_NFS_V4 1073 1072 case Opt_v4: 1074 1073 mnt->flags &= ~NFS_MOUNT_VER3; 1075 1074 mnt->version = 4; 1076 1075 break; 1077 - #endif 1078 1076 case Opt_udp: 1079 1077 mnt->flags &= ~NFS_MOUNT_TCP; 1080 1078 mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP; ··· 1284 1286 mnt->flags |= NFS_MOUNT_VER3; 1285 1287 mnt->version = 3; 1286 1288 break; 1287 - #ifdef CONFIG_NFS_V4 1288 1289 case NFS4_VERSION: 1289 1290 mnt->flags &= ~NFS_MOUNT_VER3; 1290 1291 mnt->version = 4; 1291 1292 break; 1292 - #endif 1293 1293 default: 1294 1294 goto out_invalid_value; 1295 1295 }
+2 -1
fs/nfs/write.c
··· 390 390 if (nfs_have_delegation(inode, FMODE_WRITE)) 391 391 nfsi->change_attr++; 392 392 } 393 + set_bit(PG_MAPPED, &req->wb_flags); 393 394 SetPagePrivate(req->wb_page); 394 395 set_page_private(req->wb_page, (unsigned long)req); 395 396 nfsi->npages++; ··· 416 415 spin_lock(&inode->i_lock); 417 416 set_page_private(req->wb_page, 0); 418 417 ClearPagePrivate(req->wb_page); 418 + clear_bit(PG_MAPPED, &req->wb_flags); 419 419 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); 420 420 nfsi->npages--; 421 421 if (!nfsi->npages) { ··· 424 422 iput(inode); 425 423 } else 426 424 spin_unlock(&inode->i_lock); 427 - nfs_clear_request(req); 428 425 nfs_release_request(req); 429 426 } 430 427
+4 -2
fs/nfsd/nfs3xdr.c
··· 260 260 err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, 261 261 &fhp->fh_post_attr); 262 262 fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version; 263 - if (err) 263 + if (err) { 264 264 fhp->fh_post_saved = 0; 265 - else 265 + /* Grab the ctime anyway - set_change_info might use it */ 266 + fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime; 267 + } else 266 268 fhp->fh_post_saved = 1; 267 269 } 268 270
+10 -11
fs/nfsd/xdr4.h
··· 484 484 static inline void 485 485 set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) 486 486 { 487 - BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved); 488 - cinfo->atomic = 1; 487 + BUG_ON(!fhp->fh_pre_saved); 488 + cinfo->atomic = fhp->fh_post_saved; 489 489 cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode); 490 - if (cinfo->change_supported) { 491 - cinfo->before_change = fhp->fh_pre_change; 492 - cinfo->after_change = fhp->fh_post_change; 493 - } else { 494 - cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; 495 - cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; 496 - cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; 497 - cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; 498 - } 490 + 491 + cinfo->before_change = fhp->fh_pre_change; 492 + cinfo->after_change = fhp->fh_post_change; 493 + cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; 494 + cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; 495 + cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; 496 + cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; 497 + 499 498 } 500 499 501 500 int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
-9
fs/nilfs2/gcinode.c
··· 176 176 int nilfs_init_gcinode(struct inode *inode) 177 177 { 178 178 struct nilfs_inode_info *ii = NILFS_I(inode); 179 - struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs; 180 179 181 180 inode->i_mode = S_IFREG; 182 181 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); ··· 184 185 185 186 ii->i_flags = 0; 186 187 nilfs_bmap_init_gc(ii->i_bmap); 187 - 188 - /* 189 - * Add the inode to GC inode list. Garbage Collection 190 - * is serialized and no two processes manipulate the 191 - * list simultaneously. 192 - */ 193 - igrab(inode); 194 - list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes); 195 188 196 189 return 0; 197 190 }
+12
fs/nilfs2/ioctl.c
··· 337 337 struct nilfs_argv *argv, void *buf) 338 338 { 339 339 size_t nmembs = argv->v_nmembs; 340 + struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs; 340 341 struct inode *inode; 341 342 struct nilfs_vdesc *vdesc; 342 343 struct buffer_head *bh, *n; ··· 354 353 ret = PTR_ERR(inode); 355 354 goto failed; 356 355 } 356 + if (list_empty(&NILFS_I(inode)->i_dirty)) { 357 + /* 358 + * Add the inode to GC inode list. Garbage Collection 359 + * is serialized and no two processes manipulate the 360 + * list simultaneously. 361 + */ 362 + igrab(inode); 363 + list_add(&NILFS_I(inode)->i_dirty, 364 + &nilfs->ns_gc_inodes); 365 + } 366 + 357 367 do { 358 368 ret = nilfs_ioctl_move_inode_block(inode, vdesc, 359 369 &buffers);
+5 -1
fs/notify/fanotify/fanotify.c
··· 92 92 93 93 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 94 94 95 - wait_event(group->fanotify_data.access_waitq, event->response); 95 + wait_event(group->fanotify_data.access_waitq, event->response || 96 + atomic_read(&group->fanotify_data.bypass_perm)); 97 + 98 + if (!event->response) /* bypass_perm set */ 99 + return 0; 96 100 97 101 /* userspace responded, convert to something usable */ 98 102 spin_lock(&event->lock);
+51 -30
fs/notify/fanotify/fanotify_user.c
··· 106 106 return client_fd; 107 107 } 108 108 109 - static ssize_t fill_event_metadata(struct fsnotify_group *group, 109 + static int fill_event_metadata(struct fsnotify_group *group, 110 110 struct fanotify_event_metadata *metadata, 111 111 struct fsnotify_event *event) 112 112 { 113 + int ret = 0; 114 + 113 115 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, 114 116 group, metadata, event); 115 117 116 118 metadata->event_len = FAN_EVENT_METADATA_LEN; 119 + metadata->metadata_len = FAN_EVENT_METADATA_LEN; 117 120 metadata->vers = FANOTIFY_METADATA_VERSION; 118 121 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; 119 122 metadata->pid = pid_vnr(event->tgid); 120 - metadata->fd = create_fd(group, event); 123 + if (unlikely(event->mask & FAN_Q_OVERFLOW)) 124 + metadata->fd = FAN_NOFD; 125 + else { 126 + metadata->fd = create_fd(group, event); 127 + if (metadata->fd < 0) 128 + ret = metadata->fd; 129 + } 121 130 122 - return metadata->fd; 131 + return ret; 123 132 } 124 133 125 134 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS ··· 209 200 210 201 mutex_lock(&group->fanotify_data.access_mutex); 211 202 212 - if (group->fanotify_data.bypass_perm) { 203 + if (atomic_read(&group->fanotify_data.bypass_perm)) { 213 204 mutex_unlock(&group->fanotify_data.access_mutex); 214 205 kmem_cache_free(fanotify_response_event_cache, re); 215 206 event->response = FAN_ALLOW; ··· 266 257 267 258 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 268 259 269 - fd = fill_event_metadata(group, &fanotify_event_metadata, event); 270 - if (fd < 0) 271 - return fd; 260 + ret = fill_event_metadata(group, &fanotify_event_metadata, event); 261 + if (ret < 0) 262 + goto out; 272 263 264 + fd = fanotify_event_metadata.fd; 273 265 ret = prepare_for_access_response(group, event, fd); 274 266 if (ret) 275 267 goto out_close_fd; 276 268 277 269 ret = -EFAULT; 278 - if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN)) 270 + if (copy_to_user(buf, &fanotify_event_metadata, 271 + fanotify_event_metadata.event_len)) 279 272 goto out_kill_access_response; 280 273 281 - return FAN_EVENT_METADATA_LEN; 274 + return fanotify_event_metadata.event_len; 282 275 283 276 out_kill_access_response: 284 277 remove_access_response(group, event, fd); 285 278 out_close_fd: 286 - sys_close(fd); 279 + if (fd != FAN_NOFD) 280 + sys_close(fd); 281 + out: 282 + #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 283 + if (event->mask & FAN_ALL_PERM_EVENTS) { 284 + event->response = FAN_DENY; 285 + wake_up(&group->fanotify_data.access_waitq); 286 + } 287 + #endif 287 288 return ret; 288 289 } 289 290 ··· 401 382 402 383 mutex_lock(&group->fanotify_data.access_mutex); 403 384 404 - group->fanotify_data.bypass_perm = true; 385 + atomic_inc(&group->fanotify_data.bypass_perm); 405 386 406 387 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { 407 388 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, ··· 605 586 { 606 587 struct fsnotify_mark *fsn_mark; 607 588 __u32 added; 589 + int ret = 0; 608 590 609 591 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); 610 592 if (!fsn_mark) { 611 - int ret; 612 - 613 593 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 614 594 return -ENOSPC; 615 595 ··· 618 600 619 601 fsnotify_init_mark(fsn_mark, fanotify_free_mark); 620 602 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); 621 - if (ret) { 622 - fanotify_free_mark(fsn_mark); 623 - return ret; 624 - } 603 + if (ret) 604 + goto err; 625 605 } 626 606 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 627 - fsnotify_put_mark(fsn_mark); 607 + 628 608 if (added & ~mnt->mnt_fsnotify_mask) 629 609 fsnotify_recalc_vfsmount_mask(mnt); 630 - 631 - return 0; 610 + err: 611 + fsnotify_put_mark(fsn_mark); 612 + return ret; 632 613 } 633 614 634 615 static int fanotify_add_inode_mark(struct fsnotify_group *group, ··· 636 619 { 637 620 struct fsnotify_mark *fsn_mark; 638 621 __u32 added; 622 + int ret = 0; 639 623 640 624 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); 641 625 ··· 652 634 653 635 fsn_mark = fsnotify_find_inode_mark(group, inode); 654 636 if (!fsn_mark) { 655 - int ret; 656 - 657 637 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 658 638 return -ENOSPC; 659 639 ··· 661 645 662 646 fsnotify_init_mark(fsn_mark, fanotify_free_mark); 663 647 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); 664 - if (ret) { 665 - fanotify_free_mark(fsn_mark); 666 - return ret; 667 - } 648 + if (ret) 649 + goto err; 668 650 } 669 651 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 670 - fsnotify_put_mark(fsn_mark); 652 + 671 653 if (added & ~inode->i_fsnotify_mask) 672 654 fsnotify_recalc_inode_mask(inode); 673 - return 0; 655 + err: 656 + fsnotify_put_mark(fsn_mark); 657 + return ret; 674 658 } 675 659 676 660 /* fanotify syscalls */ ··· 703 687 704 688 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 705 689 group = fsnotify_alloc_group(&fanotify_fsnotify_ops); 706 - if (IS_ERR(group)) 690 + if (IS_ERR(group)) { 691 + free_uid(user); 707 692 return PTR_ERR(group); 693 + } 708 694 709 695 group->fanotify_data.user = user; 710 696 atomic_inc(&user->fanotify_listeners); ··· 716 698 mutex_init(&group->fanotify_data.access_mutex); 717 699 init_waitqueue_head(&group->fanotify_data.access_waitq); 718 700 INIT_LIST_HEAD(&group->fanotify_data.access_list); 701 + atomic_set(&group->fanotify_data.bypass_perm, 0); 719 702 #endif 720 703 switch (flags & FAN_ALL_CLASS_BITS) { 721 704 case FAN_CLASS_NOTIF: ··· 783 764 if (flags & ~FAN_ALL_MARK_FLAGS) 784 765 return -EINVAL; 785 766 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 786 - case FAN_MARK_ADD: 767 + case FAN_MARK_ADD: /* fallthrough */ 787 768 case FAN_MARK_REMOVE: 769 + if (!mask) 770 + return -EINVAL; 788 771 case FAN_MARK_FLUSH: 789 772 break; 790 773 default:
+1
fs/notify/inotify/inotify_user.c
··· 752 752 if (ret >= 0) 753 753 return ret; 754 754 755 + fsnotify_put_group(group); 755 756 atomic_dec(&user->inotify_devs); 756 757 out_free_uid: 757 758 free_uid(user);
+1
fs/xfs/xfs_rename.c
··· 297 297 * it and some incremental backup programs won't work without it. 298 298 */ 299 299 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); 300 + xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); 300 301 301 302 /* 302 303 * Adjust the link count on src_dp. This is necessary when
+4 -1
include/acpi/video.h
··· 1 1 #ifndef __ACPI_VIDEO_H 2 2 #define __ACPI_VIDEO_H 3 3 4 + #include <linux/errno.h> /* for ENODEV */ 5 + 6 + struct acpi_device; 7 + 4 8 #define ACPI_VIDEO_DISPLAY_CRT 1 5 9 #define ACPI_VIDEO_DISPLAY_TV 2 6 10 #define ACPI_VIDEO_DISPLAY_DVI 3 ··· 30 26 #endif 31 27 32 28 #endif 33 -
+1 -1
include/linux/acpi.h
··· 219 219 220 220 extern int acpi_blacklisted(void); 221 221 extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); 222 - extern int acpi_osi_setup(char *str); 222 + extern void acpi_osi_setup(char *str); 223 223 224 224 #ifdef CONFIG_ACPI_NUMA 225 225 int acpi_get_pxm(acpi_handle handle);
+4 -2
include/linux/atmdev.h
··· 427 427 428 428 #define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb)) 429 429 430 - struct atm_dev *atm_dev_register(const char *type,const struct atmdev_ops *ops, 431 - int number,unsigned long *flags); /* number == -1: pick first available */ 430 + struct atm_dev *atm_dev_register(const char *type, struct device *parent, 431 + const struct atmdev_ops *ops, 432 + int number, /* -1 == pick first available */ 433 + unsigned long *flags); 432 434 struct atm_dev *atm_dev_lookup(int number); 433 435 void atm_dev_deregister(struct atm_dev *dev); 434 436
+7 -3
include/linux/blkdev.h
··· 250 250 251 251 unsigned char misaligned; 252 252 unsigned char discard_misaligned; 253 - unsigned char no_cluster; 253 + unsigned char cluster; 254 254 signed char discard_zeroes_data; 255 255 }; 256 256 ··· 380 380 #endif 381 381 }; 382 382 383 - #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 384 383 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 385 384 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 386 385 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ ··· 402 403 #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 403 404 404 405 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 405 - (1 << QUEUE_FLAG_CLUSTER) | \ 406 406 (1 << QUEUE_FLAG_STACKABLE) | \ 407 407 (1 << QUEUE_FLAG_SAME_COMP) | \ 408 408 (1 << QUEUE_FLAG_ADD_RANDOM)) ··· 507 509 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 508 510 509 511 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 512 + 513 + static inline unsigned int blk_queue_cluster(struct request_queue *q) 514 + { 515 + return q->limits.cluster; 516 + } 510 517 511 518 /* 512 519 * We regard a request as sync, if either a read or a sync write ··· 808 805 extern void blk_cleanup_queue(struct request_queue *); 809 806 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 810 807 extern void blk_queue_bounce_limit(struct request_queue *, u64); 808 + extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 811 809 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 812 810 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 813 811 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+2
include/linux/bootmem.h
··· 105 105 106 106 #define alloc_bootmem(x) \ 107 107 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 108 + #define alloc_bootmem_align(x, align) \ 109 + __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS)) 108 110 #define alloc_bootmem_nopanic(x) \ 109 111 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 110 112 #define alloc_bootmem_pages(x) \
+4 -2
include/linux/ceph/libceph.h
··· 227 227 extern void ceph_release_page_vector(struct page **pages, int num_pages); 228 228 229 229 extern struct page **ceph_get_direct_page_vector(const char __user *data, 230 - int num_pages); 231 - extern void ceph_put_page_vector(struct page **pages, int num_pages); 230 + int num_pages, 231 + bool write_page); 232 + extern void ceph_put_page_vector(struct page **pages, int num_pages, 233 + bool dirty); 232 234 extern void ceph_release_page_vector(struct page **pages, int num_pages); 233 235 extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); 234 236 extern int ceph_copy_user_to_page_vector(struct page **pages,
+19 -1
include/linux/cnt32_to_63.h
··· 61 61 * 62 62 * 2) this code must not be preempted for a duration longer than the 63 63 * 32-bit counter half period minus the longest period between two 64 - * calls to this code. 64 + * calls to this code; 65 65 * 66 66 * Those requirements ensure proper update to the state bit in memory. 67 67 * This is usually not a problem in practice, but if it is then a kernel 68 68 * timer should be scheduled to manage for this code to be executed often 69 69 * enough. 70 + * 71 + * And finally: 72 + * 73 + * 3) the cnt_lo argument must be seen as a globally incrementing value, 74 + * meaning that it should be a direct reference to the counter data which 75 + * can be evaluated according to a specific ordering within the macro, 76 + * and not the result of a previous evaluation stored in a variable. 77 + * 78 + * For example, this is wrong: 79 + * 80 + * u32 partial = get_hw_count(); 81 + * u64 full = cnt32_to_63(partial); 82 + * return full; 83 + * 84 + * This is fine: 85 + * 86 + * u64 full = cnt32_to_63(get_hw_count()); 87 + * return full; 70 88 * 71 89 * Note that the top bit (bit 63) in the returned value should be considered 72 90 * as garbage. It is not cleared here because callers are likely to use a
+7 -3
include/linux/fanotify.h
··· 83 83 FAN_ALL_PERM_EVENTS |\ 84 84 FAN_Q_OVERFLOW) 85 85 86 - #define FANOTIFY_METADATA_VERSION 2 86 + #define FANOTIFY_METADATA_VERSION 3 87 87 88 88 struct fanotify_event_metadata { 89 89 __u32 event_len; 90 - __u32 vers; 90 + __u8 vers; 91 + __u8 reserved; 92 + __u16 metadata_len; 91 93 __aligned_u64 mask; 92 94 __s32 fd; 93 95 __s32 pid; ··· 98 96 struct fanotify_response { 99 97 __s32 fd; 100 98 __u32 response; 101 - } __attribute__ ((packed)); 99 + }; 102 100 103 101 /* Legit userspace responses to a _PERM event */ 104 102 #define FAN_ALLOW 0x01 105 103 #define FAN_DENY 0x02 104 + /* No fd set in event */ 105 + #define FAN_NOFD -1 106 106 107 107 /* Helper functions to deal with fanotify_event_metadata buffers */ 108 108 #define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
+1
include/linux/fs.h
··· 602 602 sector_t (*bmap)(struct address_space *, sector_t); 603 603 void (*invalidatepage) (struct page *, unsigned long); 604 604 int (*releasepage) (struct page *, gfp_t); 605 + void (*freepage)(struct page *); 605 606 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 606 607 loff_t offset, unsigned long nr_segs); 607 608 int (*get_xip_mem)(struct address_space *, pgoff_t, int,
-3
include/linux/fsnotify.h
··· 235 235 if (S_ISDIR(inode->i_mode)) 236 236 mask |= FS_ISDIR; 237 237 238 - /* FMODE_NONOTIFY must never be set from user */ 239 - file->f_mode &= ~FMODE_NONOTIFY; 240 - 241 238 fsnotify_parent(path, NULL, mask); 242 239 fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); 243 240 }
+1 -1
include/linux/fsnotify_backend.h
··· 166 166 struct mutex access_mutex; 167 167 struct list_head access_list; 168 168 wait_queue_head_t access_waitq; 169 - bool bypass_perm; /* protected by access_mutex */ 169 + atomic_t bypass_perm; 170 170 #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ 171 171 int f_flags; 172 172 unsigned int max_marks;
+4 -2
include/linux/input.h
··· 104 104 #define EVIOCGREP _IOR('E', 0x03, unsigned int[2]) /* get repeat settings */ 105 105 #define EVIOCSREP _IOW('E', 0x03, unsigned int[2]) /* set repeat settings */ 106 106 107 - #define EVIOCGKEYCODE _IOR('E', 0x04, struct input_keymap_entry) /* get keycode */ 108 - #define EVIOCSKEYCODE _IOW('E', 0x04, struct input_keymap_entry) /* set keycode */ 107 + #define EVIOCGKEYCODE _IOR('E', 0x04, unsigned int[2]) /* get keycode */ 108 + #define EVIOCGKEYCODE_V2 _IOR('E', 0x04, struct input_keymap_entry) 109 + #define EVIOCSKEYCODE _IOW('E', 0x04, unsigned int[2]) /* set keycode */ 110 + #define EVIOCSKEYCODE_V2 _IOW('E', 0x04, struct input_keymap_entry) 109 111 110 112 #define EVIOCGNAME(len) _IOC(_IOC_READ, 'E', 0x06, len) /* get device name */ 111 113 #define EVIOCGPHYS(len) _IOC(_IOC_READ, 'E', 0x07, len) /* get physical location */
+1 -1
include/linux/ioport.h
··· 112 112 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ 113 113 extern struct resource ioport_resource; 114 114 extern struct resource iomem_resource; 115 - extern int resource_alloc_from_bottom; 116 115 117 116 extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); 118 117 extern int request_resource(struct resource *root, struct resource *new); ··· 123 124 extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); 124 125 extern int insert_resource(struct resource *parent, struct resource *new); 125 126 extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); 127 + extern void arch_remove_reservations(struct resource *avail); 126 128 extern int allocate_resource(struct resource *root, struct resource *new, 127 129 resource_size_t size, resource_size_t min, 128 130 resource_size_t max, resource_size_t align,
+1 -1
include/linux/mfd/wm8994/pdata.h
··· 29 29 #define WM8994_CONFIGURE_GPIO 0x8000 30 30 31 31 #define WM8994_DRC_REGS 5 32 - #define WM8994_EQ_REGS 19 32 + #define WM8994_EQ_REGS 20 33 33 34 34 /** 35 35 * DRC configurations are specified with a label and a set of register
+1
include/linux/nfs_fs.h
··· 401 401 #endif /* CONFIG_NFS_V3 */ 402 402 extern const struct file_operations nfs_file_operations; 403 403 extern const struct address_space_operations nfs_file_aops; 404 + extern const struct address_space_operations nfs_dir_aops; 404 405 405 406 static inline struct nfs_open_context *nfs_file_open_context(struct file *filp) 406 407 {
+1
include/linux/nfs_page.h
··· 29 29 */ 30 30 enum { 31 31 PG_BUSY = 0, 32 + PG_MAPPED, 32 33 PG_CLEAN, 33 34 PG_NEED_COMMIT, 34 35 PG_NEED_RESCHED,
+2 -1
include/linux/pm_runtime.h
··· 77 77 78 78 static inline bool pm_runtime_suspended(struct device *dev) 79 79 { 80 - return dev->power.runtime_status == RPM_SUSPENDED; 80 + return dev->power.runtime_status == RPM_SUSPENDED 81 + && !dev->power.disable_depth; 81 82 } 82 83 83 84 static inline void pm_runtime_mark_last_busy(struct device *dev)
+1 -1
include/linux/sched.h
··· 143 143 extern unsigned long this_cpu_load(void); 144 144 145 145 146 - extern void calc_global_load(void); 146 + extern void calc_global_load(unsigned long ticks); 147 147 148 148 extern unsigned long get_parent_ip(unsigned long addr); 149 149
+1
include/linux/snmp.h
··· 230 230 LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ 231 231 LINUX_MIB_TCPDEFERACCEPTDROP, 232 232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ 233 + LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ 233 234 __LINUX_MIB_MAX 234 235 }; 235 236
+11 -6
include/linux/ssb/ssb_driver_gige.h
··· 96 96 return 0; 97 97 } 98 98 99 - extern char * nvram_get(const char *name); 99 + #ifdef CONFIG_BCM47XX 100 + #include <asm/mach-bcm47xx/nvram.h> 100 101 /* Get the device MAC address */ 101 102 static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) 102 103 { 103 - #ifdef CONFIG_BCM47XX 104 - char *res = nvram_get("et0macaddr"); 105 - if (res) 106 - memcpy(macaddr, res, 6); 107 - #endif 104 + char buf[20]; 105 + if (nvram_getenv("et0macaddr", buf, sizeof(buf)) < 0) 106 + return; 107 + nvram_parse_macaddr(buf, macaddr); 108 108 } 109 + #else 110 + static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) 111 + { 112 + } 113 + #endif 109 114 110 115 extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, 111 116 struct pci_dev *pdev);
+15
include/linux/video_output.h
··· 23 23 #ifndef _LINUX_VIDEO_OUTPUT_H 24 24 #define _LINUX_VIDEO_OUTPUT_H 25 25 #include <linux/device.h> 26 + #include <linux/err.h> 26 27 struct output_device; 27 28 struct output_properties { 28 29 int (*set_state)(struct output_device *); ··· 35 34 struct device dev; 36 35 }; 37 36 #define to_output_device(obj) container_of(obj, struct output_device, dev) 37 + #if defined(CONFIG_VIDEO_OUTPUT_CONTROL) || defined(CONFIG_VIDEO_OUTPUT_CONTROL_MODULE) 38 38 struct output_device *video_output_register(const char *name, 39 39 struct device *dev, 40 40 void *devdata, 41 41 struct output_properties *op); 42 42 void video_output_unregister(struct output_device *dev); 43 + #else 44 + static struct output_device *video_output_register(const char *name, 45 + struct device *dev, 46 + void *devdata, 47 + struct output_properties *op) 48 + { 49 + return ERR_PTR(-ENODEV); 50 + } 51 + static void video_output_unregister(struct output_device *dev) 52 + { 53 + return; 54 + } 55 + #endif 43 56 #endif
+1 -1
include/media/saa7146.h
··· 161 161 extern struct mutex saa7146_devices_lock; 162 162 int saa7146_register_extension(struct saa7146_extension*); 163 163 int saa7146_unregister_extension(struct saa7146_extension*); 164 - struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc); 164 + struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc); 165 165 int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt); 166 166 void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt); 167 167 int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
+2
include/media/v4l2-device.h
··· 51 51 unsigned int notification, void *arg); 52 52 /* The control handler. May be NULL. */ 53 53 struct v4l2_ctrl_handler *ctrl_handler; 54 + /* BKL replacement mutex. Temporary solution only. */ 55 + struct mutex ioctl_lock; 54 56 }; 55 57 56 58 /* Initialize v4l2_dev and make dev->driver_data point to v4l2_dev.
+3 -1
include/net/sock.h
··· 1155 1155 /* Initialise core socket variables */ 1156 1156 extern void sock_init_data(struct socket *sock, struct sock *sk); 1157 1157 1158 + extern void sk_filter_release_rcu(struct rcu_head *rcu); 1159 + 1158 1160 /** 1159 1161 * sk_filter_release - release a socket filter 1160 1162 * @fp: filter to remove ··· 1167 1165 static inline void sk_filter_release(struct sk_filter *fp) 1168 1166 { 1169 1167 if (atomic_dec_and_test(&fp->refcnt)) 1170 - kfree(fp); 1168 + call_rcu_bh(&fp->rcu, sk_filter_release_rcu); 1171 1169 } 1172 1170 1173 1171 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
+9 -2
include/xen/interface/io/ring.h
··· 24 24 * A ring contains as many entries as will fit, rounded down to the nearest 25 25 * power of two (so we can mask with (size-1) to loop around). 26 26 */ 27 - #define __RING_SIZE(_s, _sz) \ 28 - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 27 + #define __CONST_RING_SIZE(_s, _sz) \ 28 + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ 29 + sizeof(((struct _s##_sring *)0)->ring[0]))) 30 + 31 + /* 32 + * The same for passing in an actual pointer instead of a name tag. 33 + */ 34 + #define __RING_SIZE(_s, _sz) \ 35 + (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 29 36 30 37 /* 31 38 * Macros to make the correct C datatypes for a new kind of ring.
+1
kernel/fork.c
··· 273 273 274 274 setup_thread_stack(tsk, orig); 275 275 clear_user_return_notifier(tsk); 276 + clear_tsk_need_resched(tsk); 276 277 stackend = end_of_stack(tsk); 277 278 *stackend = STACK_END_MAGIC; /* for overflow detection */ 278 279
+1 -1
kernel/power/swap.c
··· 30 30 31 31 #include "power.h" 32 32 33 - #define HIBERNATE_SIG "LINHIB0001" 33 + #define HIBERNATE_SIG "S1SUSPEND" 34 34 35 35 /* 36 36 * The swap map is a data structure used for keeping track of each page
+1 -1
kernel/power/user.c
··· 137 137 free_all_swap_pages(data->swap); 138 138 if (data->frozen) 139 139 thaw_processes(); 140 - pm_notifier_call_chain(data->mode == O_WRONLY ? 140 + pm_notifier_call_chain(data->mode == O_RDONLY ? 141 141 PM_POST_HIBERNATION : PM_POST_RESTORE); 142 142 atomic_inc(&snapshot_device_available); 143 143
+10 -94
kernel/resource.c
··· 40 40 41 41 static DEFINE_RWLOCK(resource_lock); 42 42 43 - /* 44 - * By default, we allocate free space bottom-up. The architecture can request 45 - * top-down by clearing this flag. The user can override the architecture's 46 - * choice with the "resource_alloc_from_bottom" kernel boot option, but that 47 - * should only be a debugging tool. 48 - */ 49 - int resource_alloc_from_bottom = 1; 50 - 51 - static __init int setup_alloc_from_bottom(char *s) 52 - { 53 - printk(KERN_INFO 54 - "resource: allocating from bottom-up; please report a bug\n"); 55 - resource_alloc_from_bottom = 1; 56 - return 0; 57 - } 58 - early_param("resource_alloc_from_bottom", setup_alloc_from_bottom); 59 - 60 43 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 61 44 { 62 45 struct resource *p = v; ··· 357 374 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 358 375 } 359 376 377 + void __weak arch_remove_reservations(struct resource *avail) 378 + { 379 + } 380 + 360 381 static resource_size_t simple_align_resource(void *data, 361 382 const struct resource *avail, 362 383 resource_size_t size, ··· 384 397 } 385 398 386 399 /* 387 - * Find the resource before "child" in the sibling list of "root" children. 388 - */ 389 - static struct resource *find_sibling_prev(struct resource *root, struct resource *child) 390 - { 391 - struct resource *this; 392 - 393 - for (this = root->child; this; this = this->sibling) 394 - if (this->sibling == child) 395 - return this; 396 - 397 - return NULL; 398 - } 399 - 400 - /* 401 400 * Find empty slot in the resource tree given range and alignment. 402 - * This version allocates from the end of the root resource first. 403 - */ 404 - static int find_resource_from_top(struct resource *root, struct resource *new, 405 - resource_size_t size, resource_size_t min, 406 - resource_size_t max, resource_size_t align, 407 - resource_size_t (*alignf)(void *, 408 - const struct resource *, 409 - resource_size_t, 410 - resource_size_t), 411 - void *alignf_data) 412 - { 413 - struct resource *this; 414 - struct resource tmp, avail, alloc; 415 - 416 - tmp.start = root->end; 417 - tmp.end = root->end; 418 - 419 - this = find_sibling_prev(root, NULL); 420 - for (;;) { 421 - if (this) { 422 - if (this->end < root->end) 423 - tmp.start = this->end + 1; 424 - } else 425 - tmp.start = root->start; 426 - 427 - resource_clip(&tmp, min, max); 428 - 429 - /* Check for overflow after ALIGN() */ 430 - avail = *new; 431 - avail.start = ALIGN(tmp.start, align); 432 - avail.end = tmp.end; 433 - if (avail.start >= tmp.start) { 434 - alloc.start = alignf(alignf_data, &avail, size, align); 435 - alloc.end = alloc.start + size - 1; 436 - if (resource_contains(&avail, &alloc)) { 437 - new->start = alloc.start; 438 - new->end = alloc.end; 439 - return 0; 440 - } 441 - } 442 - 443 - if (!this || this->start == root->start) 444 - break; 445 - 446 - tmp.end = this->start - 1; 447 - this = find_sibling_prev(root, this); 448 - } 449 - return -EBUSY; 450 - } 451 - 452 - /* 453 - * Find empty slot in the resource tree given range and alignment. 454 - * This version allocates from the beginning of the root resource first. 455 401 */ 456 402 static int find_resource(struct resource *root, struct resource *new, 457 403 resource_size_t size, resource_size_t min, ··· 398 478 struct resource *this = root->child; 399 479 struct resource tmp = *new, avail, alloc; 400 480 481 + tmp.flags = new->flags; 401 482 tmp.start = root->start; 402 483 /* 403 - * Skip past an allocated resource that starts at 0, since the 404 - * assignment of this->start - 1 to tmp->end below would cause an 405 - * underflow. 484 + * Skip past an allocated resource that starts at 0, since the assignment 485 + * of this->start - 1 to tmp->end below would cause an underflow. 406 486 */ 407 487 if (this && this->start == 0) { 408 488 tmp.start = this->end + 1; 409 489 this = this->sibling; 410 490 } 411 - for (;;) { 491 + for(;;) { 412 492 if (this) 413 493 tmp.end = this->start - 1; 414 494 else 415 495 tmp.end = root->end; 416 496 417 497 resource_clip(&tmp, min, max); 498 + arch_remove_reservations(&tmp); 418 499 419 500 /* Check for overflow after ALIGN() */ 420 501 avail = *new; ··· 430 509 return 0; 431 510 } 432 511 } 433 - 434 512 if (!this) 435 513 break; 436 - 437 514 tmp.start = this->end + 1; 438 515 this = this->sibling; 439 516 } ··· 464 545 alignf = simple_align_resource; 465 546 466 547 write_lock(&resource_lock); 467 - if (resource_alloc_from_bottom) 468 - err = find_resource(root, new, size, min, max, align, alignf, alignf_data); 469 - else 470 - err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data); 548 + err = find_resource(root, new, size, min, max, align, alignf, alignf_data); 471 549 if (err >= 0 && __request_resource(root, new)) 472 550 err = -EBUSY; 473 551 write_unlock(&resource_lock);
+238 -53
kernel/sched.c
··· 636 636 637 637 #endif /* CONFIG_CGROUP_SCHED */ 638 638 639 - static u64 irq_time_cpu(int cpu); 640 - static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); 639 + static void update_rq_clock_task(struct rq *rq, s64 delta); 641 640 642 - inline void update_rq_clock(struct rq *rq) 641 + static void update_rq_clock(struct rq *rq) 643 642 { 644 - if (!rq->skip_clock_update) { 645 - int cpu = cpu_of(rq); 646 - u64 irq_time; 643 + s64 delta; 647 644 648 - rq->clock = sched_clock_cpu(cpu); 649 - irq_time = irq_time_cpu(cpu); 650 - if (rq->clock - irq_time > rq->clock_task) 651 - rq->clock_task = rq->clock - irq_time; 645 + if (rq->skip_clock_update) 646 + return; 652 647 653 - sched_irq_time_avg_update(rq, irq_time); 654 - } 648 + delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 649 + rq->clock += delta; 650 + update_rq_clock_task(rq, delta); 655 651 } 656 652 657 653 /* ··· 1920 1924 * They are read and saved off onto struct rq in update_rq_clock(). 1921 1925 * This may result in other CPU reading this CPU's irq time and can 1922 1926 * race with irq/account_system_vtime on this CPU. We would either get old 1923 - * or new value (or semi updated value on 32 bit) with a side effect of 1924 - * accounting a slice of irq time to wrong task when irq is in progress 1925 - * while we read rq->clock. That is a worthy compromise in place of having 1926 - * locks on each irq in account_system_time. 1927 + * or new value with a side effect of accounting a slice of irq time to wrong 1928 + * task when irq is in progress while we read rq->clock. That is a worthy 1929 + * compromise in place of having locks on each irq in account_system_time. 1927 1930 */ 1928 1931 static DEFINE_PER_CPU(u64, cpu_hardirq_time); 1929 1932 static DEFINE_PER_CPU(u64, cpu_softirq_time); ··· 1940 1945 sched_clock_irqtime = 0; 1941 1946 } 1942 1947 1943 - static u64 irq_time_cpu(int cpu) 1944 - { 1945 - if (!sched_clock_irqtime) 1946 - return 0; 1948 + #ifndef CONFIG_64BIT 1949 + static DEFINE_PER_CPU(seqcount_t, irq_time_seq); 1947 1950 1948 - return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); 1951 + static inline void irq_time_write_begin(void) 1952 + { 1953 + __this_cpu_inc(irq_time_seq.sequence); 1954 + smp_wmb(); 1949 1955 } 1950 1956 1957 + static inline void irq_time_write_end(void) 1958 + { 1959 + smp_wmb(); 1960 + __this_cpu_inc(irq_time_seq.sequence); 1961 + } 1962 + 1963 + static inline u64 irq_time_read(int cpu) 1964 + { 1965 + u64 irq_time; 1966 + unsigned seq; 1967 + 1968 + do { 1969 + seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); 1970 + irq_time = per_cpu(cpu_softirq_time, cpu) + 1971 + per_cpu(cpu_hardirq_time, cpu); 1972 + } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); 1973 + 1974 + return irq_time; 1975 + } 1976 + #else /* CONFIG_64BIT */ 1977 + static inline void irq_time_write_begin(void) 1978 + { 1979 + } 1980 + 1981 + static inline void irq_time_write_end(void) 1982 + { 1983 + } 1984 + 1985 + static inline u64 irq_time_read(int cpu) 1986 + { 1987 + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); 1988 + } 1989 + #endif /* CONFIG_64BIT */ 1990 + 1991 + /* 1992 + * Called before incrementing preempt_count on {soft,}irq_enter 1993 + * and before decrementing preempt_count on {soft,}irq_exit. 1994 + */ 1951 1995 void account_system_vtime(struct task_struct *curr) 1952 1996 { 1953 1997 unsigned long flags; 1998 + s64 delta; 1954 1999 int cpu; 1955 - u64 now, delta; 1956 2000 1957 2001 if (!sched_clock_irqtime) 1958 2002 return; ··· 1999 1965 local_irq_save(flags); 2000 1966 2001 1967 cpu = smp_processor_id(); 2002 - now = sched_clock_cpu(cpu); 2003 - delta = now - per_cpu(irq_start_time, cpu); 2004 - per_cpu(irq_start_time, cpu) = now; 1968 + delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); 1969 + __this_cpu_add(irq_start_time, delta); 1970 + 1971 + irq_time_write_begin(); 2005 1972 /* 2006 1973 * We do not account for softirq time from ksoftirqd here. 2007 1974 * We want to continue accounting softirq time to ksoftirqd thread ··· 2010 1975 * that do not consume any time, but still wants to run. 2011 1976 */ 2012 1977 if (hardirq_count()) 2013 - per_cpu(cpu_hardirq_time, cpu) += delta; 1978 + __this_cpu_add(cpu_hardirq_time, delta); 2014 1979 else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) 2015 - per_cpu(cpu_softirq_time, cpu) += delta; 1980 + __this_cpu_add(cpu_softirq_time, delta); 2016 1981 1982 + irq_time_write_end(); 2017 1983 local_irq_restore(flags); 2018 1984 } 2019 1985 EXPORT_SYMBOL_GPL(account_system_vtime); 2020 1986 2021 - static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) 1987 + static void update_rq_clock_task(struct rq *rq, s64 delta) 2022 1988 { 2023 - if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { 2024 - u64 delta_irq = curr_irq_time - rq->prev_irq_time; 2025 - rq->prev_irq_time = curr_irq_time; 2026 - sched_rt_avg_update(rq, delta_irq); 2027 - } 1989 + s64 irq_delta; 1990 + 1991 + irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 1992 + 1993 + /* 1994 + * Since irq_time is only updated on {soft,}irq_exit, we might run into 1995 + * this case when a previous update_rq_clock() happened inside a 1996 + * {soft,}irq region. 1997 + * 1998 + * When this happens, we stop ->clock_task and only update the 1999 + * prev_irq_time stamp to account for the part that fit, so that a next 2000 + * update will consume the rest. This ensures ->clock_task is 2001 + * monotonic. 2002 + * 2003 + * It does however cause some slight miss-attribution of {soft,}irq 2004 + * time, a more accurate solution would be to update the irq_time using 2005 + * the current rq->clock timestamp, except that would require using 2006 + * atomic ops. 2007 + */ 2008 + if (irq_delta > delta) 2009 + irq_delta = delta; 2010 + 2011 + rq->prev_irq_time += irq_delta; 2012 + delta -= irq_delta; 2013 + rq->clock_task += delta; 2014 + 2015 + if (irq_delta && sched_feat(NONIRQ_POWER)) 2016 + sched_rt_avg_update(rq, irq_delta); 2028 2017 } 2029 2018 2030 - #else 2019 + #else /* CONFIG_IRQ_TIME_ACCOUNTING */ 2031 2020 2032 - static u64 irq_time_cpu(int cpu) 2021 + static void update_rq_clock_task(struct rq *rq, s64 delta) 2033 2022 { 2034 - return 0; 2023 + rq->clock_task += delta; 2035 2024 } 2036 2025 2037 - static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } 2038 - 2039 - #endif 2026 + #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2040 2027 2041 2028 #include "sched_idletask.c" 2042 2029 #include "sched_fair.c" ··· 2186 2129 * A queue event has occurred, and we're going to schedule. In 2187 2130 * this case, we can save a useless back to back clock update. 2188 2131 */ 2189 - if (test_tsk_need_resched(rq->curr)) 2132 + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) 2190 2133 rq->skip_clock_update = 1; 2191 2134 } 2192 2135 ··· 3176 3119 return delta; 3177 3120 } 3178 3121 3122 + static unsigned long 3123 + calc_load(unsigned long load, unsigned long exp, unsigned long active) 3124 + { 3125 + load *= exp; 3126 + load += active * (FIXED_1 - exp); 3127 + load += 1UL << (FSHIFT - 1); 3128 + return load >> FSHIFT; 3129 + } 3130 + 3179 3131 #ifdef CONFIG_NO_HZ 3180 3132 /* 3181 3133 * For NO_HZ we delay the active fold to the next LOAD_FREQ update. ··· 3214 3148 3215 3149 return delta; 3216 3150 } 3151 + 3152 + /** 3153 + * fixed_power_int - compute: x^n, in O(log n) time 3154 + * 3155 + * @x: base of the power 3156 + * @frac_bits: fractional bits of @x 3157 + * @n: power to raise @x to. 3158 + * 3159 + * By exploiting the relation between the definition of the natural power 3160 + * function: x^n := x*x*...*x (x multiplied by itself for n times), and 3161 + * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, 3162 + * (where: n_i \elem {0, 1}, the binary vector representing n), 3163 + * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is 3164 + * of course trivially computable in O(log_2 n), the length of our binary 3165 + * vector. 3166 + */ 3167 + static unsigned long 3168 + fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) 3169 + { 3170 + unsigned long result = 1UL << frac_bits; 3171 + 3172 + if (n) for (;;) { 3173 + if (n & 1) { 3174 + result *= x; 3175 + result += 1UL << (frac_bits - 1); 3176 + result >>= frac_bits; 3177 + } 3178 + n >>= 1; 3179 + if (!n) 3180 + break; 3181 + x *= x; 3182 + x += 1UL << (frac_bits - 1); 3183 + x >>= frac_bits; 3184 + } 3185 + 3186 + return result; 3187 + } 3188 + 3189 + /* 3190 + * a1 = a0 * e + a * (1 - e) 3191 + * 3192 + * a2 = a1 * e + a * (1 - e) 3193 + * = (a0 * e + a * (1 - e)) * e + a * (1 - e) 3194 + * = a0 * e^2 + a * (1 - e) * (1 + e) 3195 + * 3196 + * a3 = a2 * e + a * (1 - e) 3197 + * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) 3198 + * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) 3199 + * 3200 + * ... 3201 + * 3202 + * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] 3203 + * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) 3204 + * = a0 * e^n + a * (1 - e^n) 3205 + * 3206 + * [1] application of the geometric series: 3207 + * 3208 + * n 1 - x^(n+1) 3209 + * S_n := \Sum x^i = ------------- 3210 + * i=0 1 - x 3211 + */ 3212 + static unsigned long 3213 + calc_load_n(unsigned long load, unsigned long exp, 3214 + unsigned long active, unsigned int n) 3215 + { 3216 + 3217 + return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); 3218 + } 3219 + 3220 + /* 3221 + * NO_HZ can leave us missing all per-cpu ticks calling 3222 + * calc_load_account_active(), but since an idle CPU folds its delta into 3223 + * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold 3224 + * in the pending idle delta if our idle period crossed a load cycle boundary. 3225 + * 3226 + * Once we've updated the global active value, we need to apply the exponential 3227 + * weights adjusted to the number of cycles missed. 3228 + */ 3229 + static void calc_global_nohz(unsigned long ticks) 3230 + { 3231 + long delta, active, n; 3232 + 3233 + if (time_before(jiffies, calc_load_update)) 3234 + return; 3235 + 3236 + /* 3237 + * If we crossed a calc_load_update boundary, make sure to fold 3238 + * any pending idle changes, the respective CPUs might have 3239 + * missed the tick driven calc_load_account_active() update 3240 + * due to NO_HZ. 3241 + */ 3242 + delta = calc_load_fold_idle(); 3243 + if (delta) 3244 + atomic_long_add(delta, &calc_load_tasks); 3245 + 3246 + /* 3247 + * If we were idle for multiple load cycles, apply them. 3248 + */ 3249 + if (ticks >= LOAD_FREQ) { 3250 + n = ticks / LOAD_FREQ; 3251 + 3252 + active = atomic_long_read(&calc_load_tasks); 3253 + active = active > 0 ? active * FIXED_1 : 0; 3254 + 3255 + avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); 3256 + avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); 3257 + avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); 3258 + 3259 + calc_load_update += n * LOAD_FREQ; 3260 + } 3261 + 3262 + /* 3263 + * Its possible the remainder of the above division also crosses 3264 + * a LOAD_FREQ period, the regular check in calc_global_load() 3265 + * which comes after this will take care of that. 3266 + * 3267 + * Consider us being 11 ticks before a cycle completion, and us 3268 + * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will 3269 + * age us 4 cycles, and the test in calc_global_load() will 3270 + * pick up the final one. 3271 + */ 3272 + } 3217 3273 #else 3218 3274 static void calc_load_account_idle(struct rq *this_rq) 3219 3275 { ··· 3344 3156 static inline long calc_load_fold_idle(void) 3345 3157 { 3346 3158 return 0; 3159 + } 3160 + 3161 + static void calc_global_nohz(unsigned long ticks) 3162 + { 3347 3163 } 3348 3164 #endif 3349 3165 ··· 3366 3174 loads[2] = (avenrun[2] + offset) << shift; 3367 3175 } 3368 3176 3369 - static unsigned long 3370 - calc_load(unsigned long load, unsigned long exp, unsigned long active) 3371 - { 3372 - load *= exp; 3373 - load += active * (FIXED_1 - exp); 3374 - return load >> FSHIFT; 3375 - } 3376 - 3377 3177 /* 3378 3178 * calc_load - update the avenrun load estimates 10 ticks after the 3379 3179 * CPUs have updated calc_load_tasks. 3380 3180 */ 3381 - void calc_global_load(void) 3181 + void calc_global_load(unsigned long ticks) 3382 3182 { 3383 - unsigned long upd = calc_load_update + 10; 3384 3183 long active; 3385 3184 3386 - if (time_before(jiffies, upd)) 3185 + calc_global_nohz(ticks); 3186 + 3187 + if (time_before(jiffies, calc_load_update + 10)) 3387 3188 return; 3388 3189 3389 3190 active = atomic_long_read(&calc_load_tasks); ··· 4030 3845 { 4031 3846 if (prev->se.on_rq) 4032 3847 update_rq_clock(rq); 4033 - rq->skip_clock_update = 0; 4034 3848 prev->sched_class->put_prev_task(rq, prev); 4035 3849 } 4036 3850 ··· 4087 3903 hrtick_clear(rq); 4088 3904 4089 3905 raw_spin_lock_irq(&rq->lock); 4090 - clear_tsk_need_resched(prev); 4091 3906 4092 3907 switch_count = &prev->nivcsw; 4093 3908 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { ··· 4118 3935 4119 3936 put_prev_task(rq, prev); 4120 3937 next = pick_next_task(rq); 3938 + clear_tsk_need_resched(prev); 3939 + rq->skip_clock_update = 0; 4121 3940 4122 3941 if (likely(prev != next)) { 4123 3942 sched_info_switch(prev, next);
+7 -1
kernel/timer.c
··· 1252 1252 struct tvec_base *base = __get_cpu_var(tvec_bases); 1253 1253 unsigned long expires; 1254 1254 1255 + /* 1256 + * Pretend that there is no timer pending if the cpu is offline. 1257 + * Possible pending timers will be migrated later to an active cpu. 1258 + */ 1259 + if (cpu_is_offline(smp_processor_id())) 1260 + return now + NEXT_TIMER_MAX_DELTA; 1255 1261 spin_lock(&base->lock); 1256 1262 if (time_before_eq(base->next_timer, base->timer_jiffies)) 1257 1263 base->next_timer = __next_timer_interrupt(base); ··· 1325 1319 { 1326 1320 jiffies_64 += ticks; 1327 1321 update_wall_time(); 1328 - calc_global_load(); 1322 + calc_global_load(ticks); 1329 1323 } 1330 1324 1331 1325 #ifdef __ARCH_WANT_SYS_ALARM
+4 -3
kernel/workqueue.c
··· 661 661 { 662 662 struct worker *worker = kthread_data(task); 663 663 664 - if (likely(!(worker->flags & WORKER_NOT_RUNNING))) 664 + if (!(worker->flags & WORKER_NOT_RUNNING)) 665 665 atomic_inc(get_gcwq_nr_running(cpu)); 666 666 } 667 667 ··· 687 687 struct global_cwq *gcwq = get_gcwq(cpu); 688 688 atomic_t *nr_running = get_gcwq_nr_running(cpu); 689 689 690 - if (unlikely(worker->flags & WORKER_NOT_RUNNING)) 690 + if (worker->flags & WORKER_NOT_RUNNING) 691 691 return NULL; 692 692 693 693 /* this can only happen on the local cpu */ ··· 3692 3692 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); 3693 3693 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 3694 3694 WQ_UNBOUND_MAX_ACTIVE); 3695 - BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); 3695 + BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || 3696 + !system_unbound_wq); 3696 3697 return 0; 3697 3698 } 3698 3699 early_initcall(init_workqueues);
+5
mm/filemap.c
··· 143 143 void remove_from_page_cache(struct page *page) 144 144 { 145 145 struct address_space *mapping = page->mapping; 146 + void (*freepage)(struct page *); 146 147 147 148 BUG_ON(!PageLocked(page)); 148 149 150 + freepage = mapping->a_ops->freepage; 149 151 spin_lock_irq(&mapping->tree_lock); 150 152 __remove_from_page_cache(page); 151 153 spin_unlock_irq(&mapping->tree_lock); 152 154 mem_cgroup_uncharge_cache_page(page); 155 + 156 + if (freepage) 157 + freepage(page); 153 158 } 154 159 EXPORT_SYMBOL(remove_from_page_cache); 155 160
+12 -4
mm/mmap.c
··· 2462 2462 unsigned long addr, unsigned long len, 2463 2463 unsigned long vm_flags, struct page **pages) 2464 2464 { 2465 + int ret; 2465 2466 struct vm_area_struct *vma; 2466 2467 2467 2468 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ··· 2480 2479 vma->vm_ops = &special_mapping_vmops; 2481 2480 vma->vm_private_data = pages; 2482 2481 2483 - if (unlikely(insert_vm_struct(mm, vma))) { 2484 - kmem_cache_free(vm_area_cachep, vma); 2485 - return -ENOMEM; 2486 - } 2482 + ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); 2483 + if (ret) 2484 + goto out; 2485 + 2486 + ret = insert_vm_struct(mm, vma); 2487 + if (ret) 2488 + goto out; 2487 2489 2488 2490 mm->total_vm += len >> PAGE_SHIFT; 2489 2491 2490 2492 perf_event_mmap(vma); 2491 2493 2492 2494 return 0; 2495 + 2496 + out: 2497 + kmem_cache_free(vm_area_cachep, vma); 2498 + return ret; 2493 2499 } 2494 2500 2495 2501 static DEFINE_MUTEX(mm_all_locks_mutex);
+4
mm/truncate.c
··· 390 390 __remove_from_page_cache(page); 391 391 spin_unlock_irq(&mapping->tree_lock); 392 392 mem_cgroup_uncharge_cache_page(page); 393 + 394 + if (mapping->a_ops->freepage) 395 + mapping->a_ops->freepage(page); 396 + 393 397 page_cache_release(page); /* pagecache ref */ 394 398 return 1; 395 399 failed:
+7
mm/vmscan.c
··· 494 494 spin_unlock_irq(&mapping->tree_lock); 495 495 swapcache_free(swap, page); 496 496 } else { 497 + void (*freepage)(struct page *); 498 + 499 + freepage = mapping->a_ops->freepage; 500 + 497 501 __remove_from_page_cache(page); 498 502 spin_unlock_irq(&mapping->tree_lock); 499 503 mem_cgroup_uncharge_cache_page(page); 504 + 505 + if (freepage != NULL) 506 + freepage(page); 500 507 } 501 508 502 509 return 1;
+2 -1
net/atm/atm_sysfs.c
··· 143 143 .dev_uevent = atm_uevent, 144 144 }; 145 145 146 - int atm_register_sysfs(struct atm_dev *adev) 146 + int atm_register_sysfs(struct atm_dev *adev, struct device *parent) 147 147 { 148 148 struct device *cdev = &adev->class_dev; 149 149 int i, j, err; 150 150 151 151 cdev->class = &atm_class; 152 + cdev->parent = parent; 152 153 dev_set_drvdata(cdev, adev); 153 154 154 155 dev_set_name(cdev, "%s%d", adev->type, adev->number);
+4 -3
net/atm/resources.c
··· 74 74 } 75 75 EXPORT_SYMBOL(atm_dev_lookup); 76 76 77 - struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, 78 - int number, unsigned long *flags) 77 + struct atm_dev *atm_dev_register(const char *type, struct device *parent, 78 + const struct atmdev_ops *ops, int number, 79 + unsigned long *flags) 79 80 { 80 81 struct atm_dev *dev, *inuse; 81 82 ··· 116 115 goto out_fail; 117 116 } 118 117 119 - if (atm_register_sysfs(dev) < 0) { 118 + if (atm_register_sysfs(dev, parent) < 0) { 120 119 pr_err("atm_register_sysfs failed for dev %s\n", type); 121 120 atm_proc_dev_deregister(dev); 122 121 goto out_fail;
+1 -1
net/atm/resources.h
··· 42 42 43 43 #endif /* CONFIG_PROC_FS */ 44 44 45 - int atm_register_sysfs(struct atm_dev *adev); 45 + int atm_register_sysfs(struct atm_dev *adev, struct device *parent); 46 46 void atm_unregister_sysfs(struct atm_dev *adev); 47 47 #endif
+3 -3
net/bluetooth/sco.c
··· 882 882 int lm = 0; 883 883 884 884 if (type != SCO_LINK && type != ESCO_LINK) 885 - return 0; 885 + return -EINVAL; 886 886 887 887 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 888 888 ··· 908 908 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 909 909 910 910 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 911 - return 0; 911 + return -EINVAL; 912 912 913 913 if (!status) { 914 914 struct sco_conn *conn; ··· 927 927 BT_DBG("hcon %p reason %d", hcon, reason); 928 928 929 929 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 930 - return 0; 930 + return -EINVAL; 931 931 932 932 sco_conn_del(hcon, bt_err(reason)); 933 933
+3 -5
net/ceph/messenger.c
··· 97 97 int ceph_msgr_init(void) 98 98 { 99 99 ceph_msgr_wq = create_workqueue("ceph-msgr"); 100 - if (IS_ERR(ceph_msgr_wq)) { 101 - int ret = PTR_ERR(ceph_msgr_wq); 102 - pr_err("msgr_init failed to create workqueue: %d\n", ret); 103 - ceph_msgr_wq = NULL; 104 - return ret; 100 + if (!ceph_msgr_wq) { 101 + pr_err("msgr_init failed to create workqueue\n"); 102 + return -ENOMEM; 105 103 } 106 104 return 0; 107 105 }
+9 -6
net/ceph/pagevec.c
··· 13 13 * build a vector of user pages 14 14 */ 15 15 struct page **ceph_get_direct_page_vector(const char __user *data, 16 - int num_pages) 16 + int num_pages, bool write_page) 17 17 { 18 18 struct page **pages; 19 19 int rc; ··· 24 24 25 25 down_read(&current->mm->mmap_sem); 26 26 rc = get_user_pages(current, current->mm, (unsigned long)data, 27 - num_pages, 0, 0, pages, NULL); 27 + num_pages, write_page, 0, pages, NULL); 28 28 up_read(&current->mm->mmap_sem); 29 - if (rc < 0) 29 + if (rc < num_pages) 30 30 goto fail; 31 31 return pages; 32 32 33 33 fail: 34 - kfree(pages); 34 + ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); 35 35 return ERR_PTR(rc); 36 36 } 37 37 EXPORT_SYMBOL(ceph_get_direct_page_vector); 38 38 39 - void ceph_put_page_vector(struct page **pages, int num_pages) 39 + void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) 40 40 { 41 41 int i; 42 42 43 - for (i = 0; i < num_pages; i++) 43 + for (i = 0; i < num_pages; i++) { 44 + if (dirty) 45 + set_page_dirty_lock(pages[i]); 44 46 put_page(pages[i]); 47 + } 45 48 kfree(pages); 46 49 } 47 50 EXPORT_SYMBOL(ceph_put_page_vector);
+6 -13
net/core/filter.c
··· 589 589 EXPORT_SYMBOL(sk_chk_filter); 590 590 591 591 /** 592 - * sk_filter_rcu_release - Release a socket filter by rcu_head 592 + * sk_filter_release_rcu - Release a socket filter by rcu_head 593 593 * @rcu: rcu_head that contains the sk_filter to free 594 594 */ 595 - static void sk_filter_rcu_release(struct rcu_head *rcu) 595 + void sk_filter_release_rcu(struct rcu_head *rcu) 596 596 { 597 597 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 598 598 599 - sk_filter_release(fp); 599 + kfree(fp); 600 600 } 601 - 602 - static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp) 603 - { 604 - unsigned int size = sk_filter_len(fp); 605 - 606 - atomic_sub(size, &sk->sk_omem_alloc); 607 - call_rcu_bh(&fp->rcu, sk_filter_rcu_release); 608 - } 601 + EXPORT_SYMBOL(sk_filter_release_rcu); 609 602 610 603 /** 611 604 * sk_attach_filter - attach a socket filter ··· 642 649 rcu_assign_pointer(sk->sk_filter, fp); 643 650 644 651 if (old_fp) 645 - sk_filter_delayed_uncharge(sk, old_fp); 652 + sk_filter_uncharge(sk, old_fp); 646 653 return 0; 647 654 } 648 655 EXPORT_SYMBOL_GPL(sk_attach_filter); ··· 656 663 sock_owned_by_user(sk)); 657 664 if (filter) { 658 665 rcu_assign_pointer(sk->sk_filter, NULL); 659 - sk_filter_delayed_uncharge(sk, filter); 666 + sk_filter_uncharge(sk, filter); 660 667 ret = 0; 661 668 } 662 669 return ret;
+4 -2
net/core/timestamping.c
··· 96 96 struct phy_device *phydev; 97 97 unsigned int type; 98 98 99 - skb_push(skb, ETH_HLEN); 99 + if (skb_headroom(skb) < ETH_HLEN) 100 + return false; 101 + __skb_push(skb, ETH_HLEN); 100 102 101 103 type = classify(skb); 102 104 103 - skb_pull(skb, ETH_HLEN); 105 + __skb_pull(skb, ETH_HLEN); 104 106 105 107 switch (type) { 106 108 case PTP_CLASS_V1_IPV4:
+9 -3
net/econet/af_econet.c
··· 661 661 err = 0; 662 662 switch (cmd) { 663 663 case SIOCSIFADDR: 664 - if (!capable(CAP_NET_ADMIN)) 665 - return -EPERM; 664 + if (!capable(CAP_NET_ADMIN)) { 665 + err = -EPERM; 666 + break; 667 + } 666 668 667 669 edev = dev->ec_ptr; 668 670 if (edev == NULL) { ··· 851 849 { 852 850 struct iphdr *ip = ip_hdr(skb); 853 851 unsigned char stn = ntohl(ip->saddr) & 0xff; 852 + struct dst_entry *dst = skb_dst(skb); 853 + struct ec_device *edev = NULL; 854 854 struct sock *sk = NULL; 855 855 struct sk_buff *newskb; 856 - struct ec_device *edev = skb->dev->ec_ptr; 856 + 857 + if (dst) 858 + edev = dst->dev->ec_ptr; 857 859 858 860 if (! edev) 859 861 goto bad;
+1
net/ipv4/proc.c
··· 253 253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), 254 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 255 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 256 + SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 256 257 SNMP_MIB_SENTINEL 257 258 }; 258 259
+1 -1
net/ipv4/tcp_minisocks.c
··· 347 347 * socket up. We've got bigger problems than 348 348 * non-graceful socket closings. 349 349 */ 350 - LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n"); 350 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); 351 351 } 352 352 353 353 tcp_update_metrics(sk);
+23 -19
net/ipv4/tcp_output.c
··· 231 231 /* when initializing use the value from init_rcv_wnd 232 232 * rather than the default from above 233 233 */ 234 - if (init_rcv_wnd && 235 - (*rcv_wnd > init_rcv_wnd * mss)) 236 - *rcv_wnd = init_rcv_wnd * mss; 237 - else if (*rcv_wnd > init_cwnd * mss) 238 - *rcv_wnd = init_cwnd * mss; 234 + if (init_rcv_wnd) 235 + *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 236 + else 237 + *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); 239 238 } 240 239 241 240 /* Set the clamp no higher than max representable value */ ··· 385 386 */ 386 387 static u8 tcp_cookie_size_check(u8 desired) 387 388 { 388 - if (desired > 0) { 389 + int cookie_size; 390 + 391 + if (desired > 0) 389 392 /* previously specified */ 390 393 return desired; 391 - } 392 - if (sysctl_tcp_cookie_size <= 0) { 394 + 395 + cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); 396 + if (cookie_size <= 0) 393 397 /* no default specified */ 394 398 return 0; 395 - } 396 - if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) { 399 + 400 + if (cookie_size <= TCP_COOKIE_MIN) 397 401 /* value too small, specify minimum */ 398 402 return TCP_COOKIE_MIN; 399 - } 400 - if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) { 403 + 404 + if (cookie_size >= TCP_COOKIE_MAX) 401 405 /* value too large, specify maximum */ 402 406 return TCP_COOKIE_MAX; 403 - } 404 - if (0x1 & sysctl_tcp_cookie_size) { 407 + 408 + if (cookie_size & 1) 405 409 /* 8-bit multiple, illegal, fix it */ 406 - return (u8)(sysctl_tcp_cookie_size + 0x1); 407 - } 408 - return (u8)sysctl_tcp_cookie_size; 410 + cookie_size++; 411 + 412 + return (u8)cookie_size; 409 413 } 410 414 411 415 /* Write previously computed TCP options to the packet. ··· 1515 1513 struct tcp_sock *tp = tcp_sk(sk); 1516 1514 const struct inet_connection_sock *icsk = inet_csk(sk); 1517 1515 u32 send_win, cong_win, limit, in_flight; 1516 + int win_divisor; 1518 1517 1519 1518 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) 1520 1519 goto send_now; ··· 1547 1544 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1548 1545 goto send_now; 1549 1546 1550 - if (sysctl_tcp_tso_win_divisor) { 1547 + win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1548 + if (win_divisor) { 1551 1549 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1552 1550 1553 1551 /* If at least some fraction of a window is available, 1554 1552 * just use it. 1555 1553 */ 1556 - chunk /= sysctl_tcp_tso_win_divisor; 1554 + chunk /= win_divisor; 1557 1555 if (limit >= chunk) 1558 1556 goto send_now; 1559 1557 } else {
+2 -2
net/ipv6/addrconf.c
··· 4021 4021 kfree_skb(skb); 4022 4022 goto errout; 4023 4023 } 4024 - rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 4024 + rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC); 4025 4025 return; 4026 4026 errout: 4027 4027 if (err < 0) 4028 - rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err); 4028 + rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err); 4029 4029 } 4030 4030 4031 4031 static inline size_t inet6_prefix_nlmsg_size(void)
+7
net/ipv6/ip6_tunnel.c
··· 1175 1175 sizeof (struct ipv6hdr); 1176 1176 1177 1177 dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); 1178 + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1179 + dev->mtu-=8; 1178 1180 1179 1181 if (dev->mtu < IPV6_MIN_MTU) 1180 1182 dev->mtu = IPV6_MIN_MTU; ··· 1365 1363 1366 1364 static void ip6_tnl_dev_setup(struct net_device *dev) 1367 1365 { 1366 + struct ip6_tnl *t; 1367 + 1368 1368 dev->netdev_ops = &ip6_tnl_netdev_ops; 1369 1369 dev->destructor = ip6_dev_free; 1370 1370 1371 1371 dev->type = ARPHRD_TUNNEL6; 1372 1372 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); 1373 1373 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); 1374 + t = netdev_priv(dev); 1375 + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1376 + dev->mtu-=8; 1374 1377 dev->flags |= IFF_NOARP; 1375 1378 dev->addr_len = sizeof(struct in6_addr); 1376 1379 dev->features |= NETIF_F_NETNS_LOCAL;
+2 -1
net/ipv6/sit.c
··· 606 606 return 0; 607 607 } 608 608 609 - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 609 + /* no tunnel matched, let upstream know, ipsec may handle it */ 610 610 rcu_read_unlock(); 611 + return 1; 611 612 out: 612 613 kfree_skb(skb); 613 614 return 0;
+5 -1
net/l2tp/l2tp_ip.c
··· 674 674 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); 675 675 MODULE_DESCRIPTION("L2TP over IP"); 676 676 MODULE_VERSION("1.0"); 677 - MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP); 677 + 678 + /* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like 679 + * enums 680 + */ 681 + MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
+3 -2
net/llc/af_llc.c
··· 317 317 goto out; 318 318 rc = -ENODEV; 319 319 rtnl_lock(); 320 + rcu_read_lock(); 320 321 if (sk->sk_bound_dev_if) { 321 - llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); 322 + llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); 322 323 if (llc->dev) { 323 324 if (!addr->sllc_arphrd) 324 325 addr->sllc_arphrd = llc->dev->type; ··· 330 329 !llc_mac_match(addr->sllc_mac, 331 330 llc->dev->dev_addr)) { 332 331 rc = -EINVAL; 333 - dev_put(llc->dev); 334 332 llc->dev = NULL; 335 333 } 336 334 } 337 335 } else 338 336 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, 339 337 addr->sllc_mac); 338 + rcu_read_unlock(); 340 339 rtnl_unlock(); 341 340 if (!llc->dev) 342 341 goto out;
+6
net/mac80211/rx.c
··· 2247 2247 break; 2248 2248 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2249 2249 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2250 + if (is_multicast_ether_addr(mgmt->da) && 2251 + !is_broadcast_ether_addr(mgmt->da)) 2252 + return RX_DROP_MONITOR; 2253 + 2250 2254 /* process only for station */ 2251 2255 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2252 2256 return RX_DROP_MONITOR; ··· 2745 2741 2746 2742 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2747 2743 return; 2744 + goto out; 2748 2745 } 2749 2746 } 2750 2747 ··· 2785 2780 return; 2786 2781 } 2787 2782 2783 + out: 2788 2784 dev_kfree_skb(skb); 2789 2785 } 2790 2786
+24 -4
net/mac80211/tx.c
··· 1587 1587 list) { 1588 1588 if (!ieee80211_sdata_running(tmp_sdata)) 1589 1589 continue; 1590 - if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) 1590 + if (tmp_sdata->vif.type == 1591 + NL80211_IFTYPE_MONITOR || 1592 + tmp_sdata->vif.type == 1593 + NL80211_IFTYPE_AP_VLAN || 1594 + tmp_sdata->vif.type == 1595 + NL80211_IFTYPE_WDS) 1591 1596 continue; 1592 1597 if (compare_ether_addr(tmp_sdata->vif.addr, 1593 1598 hdr->addr2) == 0) { ··· 1737 1732 int nh_pos, h_pos; 1738 1733 struct sta_info *sta = NULL; 1739 1734 u32 sta_flags = 0; 1735 + struct sk_buff *tmp_skb; 1740 1736 1741 1737 if (unlikely(skb->len < ETH_HLEN)) { 1742 1738 ret = NETDEV_TX_OK; 1743 1739 goto fail; 1744 1740 } 1745 - 1746 - nh_pos = skb_network_header(skb) - skb->data; 1747 - h_pos = skb_transport_header(skb) - skb->data; 1748 1741 1749 1742 /* convert Ethernet header to proper 802.11 header (based on 1750 1743 * operation mode) */ ··· 1916 1913 goto fail; 1917 1914 } 1918 1915 1916 + /* 1917 + * If the skb is shared we need to obtain our own copy. 1918 + */ 1919 + if (skb_shared(skb)) { 1920 + tmp_skb = skb; 1921 + skb = skb_copy(skb, GFP_ATOMIC); 1922 + kfree_skb(tmp_skb); 1923 + 1924 + if (!skb) { 1925 + ret = NETDEV_TX_OK; 1926 + goto fail; 1927 + } 1928 + } 1929 + 1919 1930 hdr.frame_control = fc; 1920 1931 hdr.duration_id = 0; 1921 1932 hdr.seq_ctrl = 0; ··· 1947 1930 encaps_data = NULL; 1948 1931 encaps_len = 0; 1949 1932 } 1933 + 1934 + nh_pos = skb_network_header(skb) - skb->data; 1935 + h_pos = skb_transport_header(skb) - skb->data; 1950 1936 1951 1937 skb_pull(skb, skip_header_bytes); 1952 1938 nh_pos -= skip_header_bytes;
+8
net/sctp/socket.c
··· 2932 2932 struct sctp_association *asoc = NULL; 2933 2933 struct sctp_setpeerprim prim; 2934 2934 struct sctp_chunk *chunk; 2935 + struct sctp_af *af; 2935 2936 int err; 2936 2937 2937 2938 sp = sctp_sk(sk); ··· 2959 2958 2960 2959 if (!sctp_state(asoc, ESTABLISHED)) 2961 2960 return -ENOTCONN; 2961 + 2962 + af = sctp_get_af_specific(prim.sspp_addr.ss_family); 2963 + if (!af) 2964 + return -EINVAL; 2965 + 2966 + if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 2967 + return -EADDRNOTAVAIL; 2962 2968 2963 2969 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 2964 2970 return -EADDRNOTAVAIL;
+15
net/socket.c
··· 732 732 return ret; 733 733 } 734 734 735 + /** 736 + * kernel_recvmsg - Receive a message from a socket (kernel space) 737 + * @sock: The socket to receive the message from 738 + * @msg: Received message 739 + * @vec: Input s/g array for message data 740 + * @num: Size of input s/g array 741 + * @size: Number of bytes to read 742 + * @flags: Message flags (MSG_DONTWAIT, etc...) 743 + * 744 + * On return the msg structure contains the scatter/gather array passed in the 745 + * vec argument. The array is modified so that it consists of the unfilled 746 + * portion of the original array. 747 + * 748 + * The returned value is the total number of bytes received, or an error. 749 + */ 735 750 int kernel_recvmsg(struct socket *sock, struct msghdr *msg, 736 751 struct kvec *vec, size_t num, size_t size, int flags) 737 752 {
+8 -1
net/sunrpc/svc_xprt.c
··· 212 212 spin_lock(&svc_xprt_class_lock); 213 213 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 214 214 struct svc_xprt *newxprt; 215 + unsigned short newport; 215 216 216 217 if (strcmp(xprt_name, xcl->xcl_name)) 217 218 continue; ··· 231 230 spin_lock_bh(&serv->sv_lock); 232 231 list_add(&newxprt->xpt_list, &serv->sv_permsocks); 233 232 spin_unlock_bh(&serv->sv_lock); 233 + newport = svc_xprt_local_port(newxprt); 234 234 clear_bit(XPT_BUSY, &newxprt->xpt_flags); 235 - return svc_xprt_local_port(newxprt); 235 + return newport; 236 236 } 237 237 err: 238 238 spin_unlock(&svc_xprt_class_lock); ··· 427 425 { 428 426 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 429 427 xprt->xpt_pool = NULL; 428 + /* As soon as we clear busy, the xprt could be closed and 429 + * 'put', so we need a reference to call svc_xprt_enqueue with: 430 + */ 431 + svc_xprt_get(xprt); 430 432 clear_bit(XPT_BUSY, &xprt->xpt_flags); 431 433 svc_xprt_enqueue(xprt); 434 + svc_xprt_put(xprt); 432 435 } 433 436 EXPORT_SYMBOL_GPL(svc_xprt_received); 434 437
+1
net/x25/x25_link.c
··· 394 394 list_for_each_safe(entry, tmp, &x25_neigh_list) { 395 395 nb = list_entry(entry, struct x25_neigh, node); 396 396 __x25_remove_neigh(nb); 397 + dev_put(nb->dev); 397 398 } 398 399 write_unlock_bh(&x25_neigh_list_lock); 399 400 }
+1 -1
net/xfrm/xfrm_state.c
··· 1268 1268 1269 1269 return xc; 1270 1270 error: 1271 - kfree(xc); 1271 + xfrm_state_put(xc); 1272 1272 return NULL; 1273 1273 } 1274 1274 EXPORT_SYMBOL(xfrm_state_migrate);
+10 -14
sound/pci/hda/hda_eld.c
··· 189 189 a->channels = GRAB_BITS(buf, 0, 0, 3); 190 190 a->channels++; 191 191 192 + a->sample_bits = 0; 193 + a->max_bitrate = 0; 194 + 192 195 a->format = GRAB_BITS(buf, 0, 3, 4); 193 196 switch (a->format) { 194 197 case AUDIO_CODING_TYPE_REF_STREAM_HEADER: ··· 201 198 202 199 case AUDIO_CODING_TYPE_LPCM: 203 200 val = GRAB_BITS(buf, 2, 0, 3); 204 - a->sample_bits = 0; 205 201 for (i = 0; i < 3; i++) 206 202 if (val & (1 << i)) 207 203 a->sample_bits |= cea_sample_sizes[i + 1]; ··· 600 598 { 601 599 int i; 602 600 603 - pcm->rates = 0; 604 - pcm->formats = 0; 605 - pcm->maxbps = 0; 606 - pcm->channels_min = -1; 607 - pcm->channels_max = 0; 601 + /* assume basic audio support (the basic audio flag is not in ELD; 602 + * however, all audio capable sinks are required to support basic 603 + * audio) */ 604 + pcm->rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000; 605 + pcm->formats = SNDRV_PCM_FMTBIT_S16_LE; 606 + pcm->maxbps = 16; 607 + pcm->channels_max = 2; 608 608 for (i = 0; i < eld->sad_count; i++) { 609 609 struct cea_sad *a = &eld->sad[i]; 610 610 pcm->rates |= a->rates; 611 - if (a->channels < pcm->channels_min) 612 - pcm->channels_min = a->channels; 613 611 if (a->channels > pcm->channels_max) 614 612 pcm->channels_max = a->channels; 615 613 if (a->format == AUDIO_CODING_TYPE_LPCM) { 616 - if (a->sample_bits & AC_SUPPCM_BITS_16) { 617 - pcm->formats |= SNDRV_PCM_FMTBIT_S16_LE; 618 - if (pcm->maxbps < 16) 619 - pcm->maxbps = 16; 620 - } 621 614 if (a->sample_bits & AC_SUPPCM_BITS_20) { 622 615 pcm->formats |= SNDRV_PCM_FMTBIT_S32_LE; 623 616 if (pcm->maxbps < 20) ··· 632 635 /* restrict the parameters by the values the codec provides */ 633 636 pcm->rates &= codec_pars->rates; 634 637 pcm->formats &= codec_pars->formats; 635 - pcm->channels_min = max(pcm->channels_min, codec_pars->channels_min); 636 638 pcm->channels_max = min(pcm->channels_max, codec_pars->channels_max); 637 639 pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps); 638 640 }
+1
sound/pci/hda/hda_intel.c
··· 2296 2296 */ 2297 2297 static struct snd_pci_quirk position_fix_list[] __devinitdata = { 2298 2298 SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB), 2299 + SND_PCI_QUIRK(0x1025, 0x026f, "Acer Aspire 5538", POS_FIX_LPIB), 2299 2300 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), 2300 2301 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), 2301 2302 SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
+18 -17
sound/pci/hda/patch_conexant.c
··· 2116 2116 struct conexant_spec *spec = codec->spec; 2117 2117 unsigned int pinctl; 2118 2118 2119 - snd_printdd("CXT5066: update speaker, hp_present=%d\n", 2120 - spec->hp_present); 2119 + snd_printdd("CXT5066: update speaker, hp_present=%d, cur_eapd=%d\n", 2120 + spec->hp_present, spec->cur_eapd); 2121 2121 2122 2122 /* Port A (HP) */ 2123 2123 pinctl = ((spec->hp_present & 1) && spec->cur_eapd) ? PIN_HP : 0; ··· 2125 2125 pinctl); 2126 2126 2127 2127 /* Port D (HP/LO) */ 2128 - pinctl = ((spec->hp_present & 2) && spec->cur_eapd) 2129 - ? spec->port_d_mode : 0; 2130 - /* Mute if Port A is connected on Thinkpad */ 2131 - if (spec->thinkpad && (spec->hp_present & 1)) 2132 - pinctl = 0; 2128 + if (spec->dell_automute) { 2129 + /* DELL AIO Port Rule: PortA> PortD> IntSpk */ 2130 + pinctl = (!(spec->hp_present & 1) && spec->cur_eapd) 2131 + ? PIN_OUT : 0; 2132 + } else if (spec->thinkpad) { 2133 + if (spec->cur_eapd) 2134 + pinctl = spec->port_d_mode; 2135 + /* Mute dock line-out if Port A (laptop HP) is present */ 2136 + if (spec->hp_present& 1) 2137 + pinctl = 0; 2138 + } else { 2139 + pinctl = ((spec->hp_present & 2) && spec->cur_eapd) 2140 + ? spec->port_d_mode : 0; 2141 + } 2133 2142 snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 2134 2143 pinctl); 2135 2144 ··· 2146 2137 pinctl = (!spec->hp_present && spec->cur_eapd) ? PIN_OUT : 0; 2147 2138 snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 2148 2139 pinctl); 2149 - 2150 - if (spec->dell_automute) { 2151 - /* DELL AIO Port Rule: PortA > PortD > IntSpk */ 2152 - pinctl = (!(spec->hp_present & 1) && spec->cur_eapd) 2153 - ? PIN_OUT : 0; 2154 - snd_hda_codec_write(codec, 0x1c, 0, 2155 - AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl); 2156 - } 2157 2140 } 2158 2141 2159 2142 /* turn on/off EAPD (+ mute HP) as a master switch */ ··· 3096 3095 static struct snd_pci_quirk cxt5066_cfg_tbl[] = { 3097 3096 SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD), 3098 3097 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO), 3099 - SND_PCI_QUIRK(0x1028, 0x02f5, "Dell", 3100 - CXT5066_DELL_LAPTOP), 3098 + SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD), 3101 3099 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), 3102 3100 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 3103 3101 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), ··· 3109 3109 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), 3110 3110 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), 3111 3111 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), 3112 + SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), 3112 3113 SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), 3113 3114 SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), 3114 3115 SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD),
-1
sound/pci/hda/patch_hdmi.c
··· 834 834 return -ENODEV; 835 835 } else { 836 836 /* fallback to the codec default */ 837 - hinfo->channels_min = codec_pars->channels_min; 838 837 hinfo->channels_max = codec_pars->channels_max; 839 838 hinfo->rates = codec_pars->rates; 840 839 hinfo->formats = codec_pars->formats;
+23 -2
sound/pci/hda/patch_realtek.c
··· 4595 4595 SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU), 4596 4596 SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW), 4597 4597 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG), 4598 + SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_LG), 4598 4599 SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_LG), 4599 4600 SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_LG_LW), 4600 4601 SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_TCL_S700), ··· 10830 10829 { 10831 10830 struct alc_spec *spec = codec->spec; 10832 10831 struct auto_pin_cfg *cfg = &spec->autocfg; 10833 - int i, err; 10832 + int i, err, type; 10833 + int type_idx = 0; 10834 10834 hda_nid_t nid; 10835 10835 10836 10836 for (i = 0; i < cfg->num_inputs; i++) { ··· 10840 10838 nid = cfg->inputs[i].pin; 10841 10839 if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) { 10842 10840 char label[32]; 10841 + type = cfg->inputs[i].type; 10842 + if (i > 0 && type == cfg->inputs[i - 1].type) 10843 + type_idx++; 10844 + else 10845 + type_idx = 0; 10843 10846 snprintf(label, sizeof(label), "%s Boost", 10844 10847 hda_get_autocfg_input_label(codec, cfg, i)); 10845 - err = add_control(spec, ALC_CTL_WIDGET_VOL, label, 0, 10848 + err = add_control(spec, ALC_CTL_WIDGET_VOL, label, 10849 + type_idx, 10846 10850 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT)); 10847 10851 if (err < 0) 10848 10852 return err; ··· 14807 14799 enum { 14808 14800 ALC269_FIXUP_SONY_VAIO, 14809 14801 ALC269_FIXUP_DELL_M101Z, 14802 + ALC269_FIXUP_LENOVO_EDGE14, 14803 + ALC269_FIXUP_ASUS_G73JW, 14810 14804 }; 14811 14805 14812 14806 static const struct alc_fixup alc269_fixups[] = { ··· 14826 14816 {} 14827 14817 } 14828 14818 }, 14819 + [ALC269_FIXUP_LENOVO_EDGE14] = { 14820 + .sku = ALC_FIXUP_SKU_IGNORE, 14821 + }, 14822 + [ALC269_FIXUP_ASUS_G73JW] = { 14823 + .pins = (const struct alc_pincfg[]) { 14824 + { 0x17, 0x99130111 }, /* subwoofer */ 14825 + { } 14826 + } 14827 + }, 14829 14828 }; 14830 14829 14831 14830 static struct snd_pci_quirk alc269_fixup_tbl[] = { 14832 14831 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14833 14832 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 14833 + SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_LENOVO_EDGE14), 14834 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 14834 14835 {} 14835 14836 }; 14836 14837
+5 -5
sound/soc/codecs/wm8580.c
··· 161 161 static const u16 wm8580_reg[] = { 162 162 0x0121, 0x017e, 0x007d, 0x0014, /*R3*/ 163 163 0x0121, 0x017e, 0x007d, 0x0194, /*R7*/ 164 - 0x001c, 0x0002, 0x0002, 0x00c2, /*R11*/ 164 + 0x0010, 0x0002, 0x0002, 0x00c2, /*R11*/ 165 165 0x0182, 0x0082, 0x000a, 0x0024, /*R15*/ 166 166 0x0009, 0x0000, 0x00ff, 0x0000, /*R19*/ 167 167 0x00ff, 0x00ff, 0x00ff, 0x00ff, /*R23*/ ··· 491 491 paifa |= 0x8; 492 492 break; 493 493 case SNDRV_PCM_FORMAT_S20_3LE: 494 - paifa |= 0x10; 494 + paifa |= 0x0; 495 495 paifb |= WM8580_AIF_LENGTH_20; 496 496 break; 497 497 case SNDRV_PCM_FORMAT_S24_LE: 498 - paifa |= 0x10; 498 + paifa |= 0x0; 499 499 paifb |= WM8580_AIF_LENGTH_24; 500 500 break; 501 501 case SNDRV_PCM_FORMAT_S32_LE: 502 - paifa |= 0x10; 503 - paifb |= WM8580_AIF_LENGTH_24; 502 + paifa |= 0x0; 503 + paifb |= WM8580_AIF_LENGTH_32; 504 504 break; 505 505 default: 506 506 return -EINVAL;
+2 -1
sound/soc/codecs/wm8904.c
··· 818 818 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 819 819 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 820 820 821 - return wm8904->deemph; 821 + ucontrol->value.enumerated.item[0] = wm8904->deemph; 822 + return 0; 822 823 } 823 824 824 825 static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
+2 -1
sound/soc/codecs/wm8955.c
··· 380 380 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 381 381 struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); 382 382 383 - return wm8955->deemph; 383 + ucontrol->value.enumerated.item[0] = wm8955->deemph; 384 + return 0; 384 385 } 385 386 386 387 static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
+2 -1
sound/soc/codecs/wm8960.c
··· 138 138 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 139 139 struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); 140 140 141 - return wm8960->deemph; 141 + ucontrol->value.enumerated.item[0] = wm8960->deemph; 142 + return 0; 142 143 } 143 144 144 145 static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
+1 -1
sound/soc/codecs/wm8962.c
··· 3339 3339 int mask; 3340 3340 int active; 3341 3341 3342 - mask = snd_soc_read(codec, WM8962_INTERRUPT_STATUS_2); 3342 + mask = snd_soc_read(codec, WM8962_INTERRUPT_STATUS_2_MASK); 3343 3343 3344 3344 active = snd_soc_read(codec, WM8962_INTERRUPT_STATUS_2); 3345 3345 active &= ~mask;
+1 -1
sound/soc/codecs/wm_hubs.c
··· 293 293 SOC_DOUBLE_R("Speaker ZC Switch", 294 294 WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT, 295 295 7, 1, 0), 296 - SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 0, 3, 7, 0, 296 + SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 3, 0, 7, 0, 297 297 spkboost_tlv), 298 298 SOC_ENUM("Speaker Reference", speaker_ref), 299 299 SOC_ENUM("Speaker Mode", speaker_mode),
+9 -5
sound/soc/soc-core.c
··· 1619 1619 #ifdef CONFIG_SND_SOC_AC97_BUS 1620 1620 /* register any AC97 codecs */ 1621 1621 for (i = 0; i < card->num_rtd; i++) { 1622 - ret = soc_register_ac97_dai_link(&card->rtd[i]); 1623 - if (ret < 0) { 1624 - printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name); 1625 - goto probe_dai_err; 1626 - } 1622 + ret = soc_register_ac97_dai_link(&card->rtd[i]); 1623 + if (ret < 0) { 1624 + printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name); 1625 + while (--i >= 0) 1626 + soc_unregister_ac97_dai_link(&card->rtd[i]); 1627 + goto probe_dai_err; 1627 1628 } 1629 + } 1628 1630 #endif 1629 1631 1630 1632 card->instantiated = 1; ··· 3074 3072 pr_debug("Registered DAI '%s'\n", dai->name); 3075 3073 } 3076 3074 3075 + mutex_lock(&client_mutex); 3077 3076 snd_soc_instantiate_cards(); 3077 + mutex_unlock(&client_mutex); 3078 3078 return 0; 3079 3079 3080 3080 err:
+3
sound/soc/soc-dapm.c
··· 944 944 case SND_SOC_DAPM_STREAM_RESUME: 945 945 sys_power = 1; 946 946 break; 947 + case SND_SOC_DAPM_STREAM_STOP: 948 + sys_power = !!codec->active; 949 + break; 947 950 case SND_SOC_DAPM_STREAM_SUSPEND: 948 951 sys_power = 0; 949 952 break;