Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit 'v2.6.35-rc4' into perf/core

Merge reason: Pick up the latest perf fixes

Signed-off-by: Ingo Molnar <mingo@elte.hu>

+1876 -1461
+5
Documentation/watchdog/watchdog-parameters.txt
··· 125 125 nowayout: Watchdog cannot be stopped once started 126 126 (default=kernel config parameter) 127 127 ------------------------------------------------- 128 + imx2_wdt: 129 + timeout: Watchdog timeout in seconds (default 60 s) 130 + nowayout: Watchdog cannot be stopped once started 131 + (default=kernel config parameter) 132 + ------------------------------------------------- 128 133 indydog: 129 134 nowayout: Watchdog cannot be stopped once started 130 135 (default=kernel config parameter)
+23 -8
MAINTAINERS
··· 896 896 897 897 ARM/SAMSUNG ARM ARCHITECTURES 898 898 M: Ben Dooks <ben-linux@fluff.org> 899 + M: Kukjin Kim <kgene.kim@samsung.com> 899 900 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 900 901 W: http://www.fluff.org/ben/linux/ 901 902 S: Maintained 902 - F: arch/arm/plat-s3c/ 903 + F: arch/arm/plat-samsung/ 903 904 F: arch/arm/plat-s3c24xx/ 905 + F: arch/arm/plat-s5p/ 904 906 905 907 ARM/S3C2410 ARM ARCHITECTURE 906 908 M: Ben Dooks <ben-linux@fluff.org> ··· 1150 1148 F: drivers/mmc/host/atmel-mci-regs.h 1151 1149 1152 1150 ATMEL AT91 / AT32 SERIAL DRIVER 1153 - M: Haavard Skinnemoen <hskinnemoen@atmel.com> 1151 + M: Nicolas Ferre <nicolas.ferre@atmel.com> 1154 1152 S: Supported 1155 1153 F: drivers/serial/atmel_serial.c 1156 1154 ··· 1162 1160 F: include/video/atmel_lcdc.h 1163 1161 1164 1162 ATMEL MACB ETHERNET DRIVER 1165 - M: Haavard Skinnemoen <hskinnemoen@atmel.com> 1163 + M: Nicolas Ferre <nicolas.ferre@atmel.com> 1166 1164 S: Supported 1167 1165 F: drivers/net/macb.* 1168 1166 1169 1167 ATMEL SPI DRIVER 1170 - M: Haavard Skinnemoen <hskinnemoen@atmel.com> 1168 + M: Nicolas Ferre <nicolas.ferre@atmel.com> 1171 1169 S: Supported 1172 1170 F: drivers/spi/atmel_spi.* 1173 1171 1174 1172 ATMEL USBA UDC DRIVER 1175 - M: Haavard Skinnemoen <hskinnemoen@atmel.com> 1176 - L: kernel@avr32linux.org 1173 + M: Nicolas Ferre <nicolas.ferre@atmel.com> 1174 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1177 1175 W: http://avr32linux.org/twiki/bin/view/Main/AtmelUsbDeviceDriver 1178 1176 S: Supported 1179 1177 F: drivers/usb/gadget/atmel_usba_udc.* ··· 2111 2109 2112 2110 EDAC-I5400 2113 2111 M: Mauro Carvalho Chehab <mchehab@redhat.com> 2114 - L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) 2112 + L: linux-edac@vger.kernel.org 2115 2113 W: bluesmoke.sourceforge.net 2116 2114 S: Maintained 2117 2115 F: drivers/edac/i5400_edac.c 2116 + 2117 + EDAC-I7CORE 2118 + M: Mauro Carvalho Chehab <mchehab@redhat.com> 2119 + L: linux-edac@vger.kernel.org 2120 + W: bluesmoke.sourceforge.net 2121 + S: Maintained 2122 + F: drivers/edac/i7core_edac.c linux/edac_mce.h drivers/edac/edac_mce.c 2118 2123 2119 2124 EDAC-I82975X 2120 2125 M: Ranganathan Desikan <ravi@jetztechnologies.com> ··· 3382 3373 M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> 3383 3374 M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 3384 3375 M: "David S. Miller" <davem@davemloft.net> 3385 - M: Masami Hiramatsu <mhiramat@redhat.com> 3376 + M: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> 3386 3377 S: Maintained 3387 3378 F: Documentation/kprobes.txt 3388 3379 F: include/linux/kprobes.h ··· 4629 4620 M: Robert Jarzmik <robert.jarzmik@free.fr> 4630 4621 L: rtc-linux@googlegroups.com 4631 4622 S: Maintained 4623 + 4624 + QLOGIC QLA1280 SCSI DRIVER 4625 + M: Michael Reed <mdr@sgi.com> 4626 + L: linux-scsi@vger.kernel.org 4627 + S: Maintained 4628 + F: drivers/scsi/qla1280.[ch] 4632 4629 4633 4630 QLOGIC QLA2XXX FC-SCSI DRIVER 4634 4631 M: Andrew Vasquez <andrew.vasquez@qlogic.com>
+2 -72
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 35 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = Sheep on Meth 6 6 7 7 # *DOCUMENTATION* ··· 883 883 $(vmlinux-dirs): prepare scripts 884 884 $(Q)$(MAKE) $(build)=$@ 885 885 886 - # Build the kernel release string 887 - # 888 - # The KERNELRELEASE value built here is stored in the file 889 - # include/config/kernel.release, and is used when executing several 890 - # make targets, such as "make install" or "make modules_install." 891 - # 892 - # The eventual kernel release string consists of the following fields, 893 - # shown in a hierarchical format to show how smaller parts are concatenated 894 - # to form the larger and final value, with values coming from places like 895 - # the Makefile, kernel config options, make command line options and/or 896 - # SCM tag information. 897 - # 898 - # $(KERNELVERSION) 899 - # $(VERSION) eg, 2 900 - # $(PATCHLEVEL) eg, 6 901 - # $(SUBLEVEL) eg, 18 902 - # $(EXTRAVERSION) eg, -rc6 903 - # $(localver-full) 904 - # $(localver) 905 - # localversion* (files without backups, containing '~') 906 - # $(CONFIG_LOCALVERSION) (from kernel config setting) 907 - # $(LOCALVERSION) (from make command line, if provided) 908 - # $(localver-extra) 909 - # $(scm-identifier) (unique SCM tag, if one exists) 910 - # ./scripts/setlocalversion (only with CONFIG_LOCALVERSION_AUTO) 911 - # .scmversion (only with CONFIG_LOCALVERSION_AUTO) 912 - # + (only without CONFIG_LOCALVERSION_AUTO 913 - # and without LOCALVERSION= and 914 - # repository is at non-tagged commit) 915 - # 916 - # For kernels without CONFIG_LOCALVERSION_AUTO compiled from an SCM that has 917 - # been revised beyond a tagged commit, `+' is appended to the version string 918 - # when not overridden by using "make LOCALVERSION=". This indicates that the 919 - # kernel is not a vanilla release version and has been modified. 920 - 921 - pattern = ".*/localversion[^~]*" 922 - string = $(shell cat /dev/null \ 923 - `find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort -u`) 924 - 925 - localver = $(subst $(space),, $(string) \ 926 - $(patsubst "%",%,$(CONFIG_LOCALVERSION))) 927 - 928 - # scripts/setlocalversion is called to create a unique identifier if the source 929 - # is managed by a known SCM and the repository has been revised since the last 930 - # tagged (release) commit. The format of the identifier is determined by the 931 - # SCM's implementation. 932 - # 933 - # .scmversion is used when generating rpm packages so we do not loose 934 - # the version information from the SCM when we do the build of the kernel 935 - # from the copied source 936 - ifeq ($(wildcard .scmversion),) 937 - scm-identifier = $(shell $(CONFIG_SHELL) \ 938 - $(srctree)/scripts/setlocalversion $(srctree)) 939 - else 940 - scm-identifier = $(shell cat .scmversion 2> /dev/null) 941 - endif 942 - 943 - ifdef CONFIG_LOCALVERSION_AUTO 944 - localver-extra = $(scm-identifier) 945 - else 946 - ifneq ($(scm-identifier),) 947 - ifeq ("$(origin LOCALVERSION)", "undefined") 948 - localver-extra = + 949 - endif 950 - endif 951 - endif 952 - 953 - localver-full = $(localver)$(LOCALVERSION)$(localver-extra) 954 - 955 886 # Store (new) KERNELRELASE string in include/config/kernel.release 956 - kernelrelease = $(KERNELVERSION)$(localver-full) 957 887 include/config/kernel.release: include/config/auto.conf FORCE 958 888 $(Q)rm -f $@ 959 - $(Q)echo $(kernelrelease) > $@ 889 + $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) scripts/setlocalversion $(srctree))" > $@ 960 890 961 891 962 892 # Things we need to do before we recursively start building the kernel
+2 -1
arch/arm/Kconfig
··· 955 955 default y 956 956 957 957 config CPU_HAS_PMU 958 - depends on CPU_V6 || CPU_V7 || XSCALE_PMU 958 + depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \ 959 + (!ARCH_OMAP3 || OMAP3_EMU) 959 960 default y 960 961 bool 961 962
+2 -2
arch/arm/include/asm/mach/udc_pxa2xx.h
··· 21 21 * here. Note that sometimes the signals go through inverters... 22 22 */ 23 23 bool gpio_vbus_inverted; 24 - u16 gpio_vbus; /* high == vbus present */ 24 + int gpio_vbus; /* high == vbus present */ 25 25 bool gpio_pullup_inverted; 26 - u16 gpio_pullup; /* high == pullup activated */ 26 + int gpio_pullup; /* high == pullup activated */ 27 27 }; 28 28
+4
arch/arm/include/asm/processor.h
··· 91 91 92 92 unsigned long get_wchan(struct task_struct *p); 93 93 94 + #if __LINUX_ARM_ARCH__ == 6 95 + #define cpu_relax() smp_mb() 96 + #else 94 97 #define cpu_relax() barrier() 98 + #endif 95 99 96 100 /* 97 101 * Create a new kernel thread
+1 -1
arch/arm/kernel/perf_event.c
··· 201 201 { 202 202 int shift = 64 - 32; 203 203 s64 prev_raw_count, new_raw_count; 204 - s64 delta; 204 + u64 delta; 205 205 206 206 again: 207 207 prev_raw_count = local64_read(&hwc->prev_count);
+22 -11
arch/arm/mach-mx3/mach-mx31lilly.c
··· 115 115 116 116 /* USB */ 117 117 118 + #if defined(CONFIG_USB_ULPI) 119 + 118 120 #define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ 119 121 PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) 120 122 ··· 246 244 .flags = MXC_EHCI_POWER_PINS_ENABLED, 247 245 }; 248 246 249 - static struct platform_device *devices[] __initdata = { 250 - &smsc91x_device, 251 - &physmap_flash_device, 252 - }; 247 + static void lilly1131_usb_init(void) 248 + { 249 + usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, 250 + USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); 251 + usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, 252 + USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); 253 + 254 + mxc_register_device(&mxc_usbh1, &usbh1_pdata); 255 + mxc_register_device(&mxc_usbh2, &usbh2_pdata); 256 + } 257 + 258 + #else 259 + static inline void lilly1131_usb_init(void) {} 260 + #endif /* CONFIG_USB_ULPI */ 253 261 254 262 /* SPI */ 255 263 ··· 289 277 .bus_num = 1, 290 278 .chip_select = 0, 291 279 .platform_data = &mc13783_pdata, 280 + }; 281 + 282 + static struct platform_device *devices[] __initdata = { 283 + &smsc91x_device, 284 + &physmap_flash_device, 292 285 }; 293 286 294 287 static int mx31lilly_baseboard; ··· 338 321 platform_add_devices(devices, ARRAY_SIZE(devices)); 339 322 340 323 /* USB */ 341 - usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, 342 - USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); 343 - usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, 344 - USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); 345 - 346 - mxc_register_device(&mxc_usbh1, &usbh1_pdata); 347 - mxc_register_device(&mxc_usbh2, &usbh2_pdata); 324 + lilly1131_usb_init(); 348 325 } 349 326 350 327 static void __init mx31lilly_timer_init(void)
+1 -3
arch/arm/mach-omap2/board-omap3stalker.c
··· 538 538 printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); 539 539 540 540 gpio_direction_input(OMAP3_STALKER_TS_GPIO); 541 - 542 - omap_set_gpio_debounce(OMAP3_STALKER_TS_GPIO, 1); 543 - omap_set_gpio_debounce_time(OMAP3_STALKER_TS_GPIO, 0xa); 541 + gpio_set_debounce(OMAP3_STALKER_TS_GPIO, 310); 544 542 } 545 543 546 544 static int ads7846_get_pendown_state(void)
+2
arch/arm/mach-omap2/clock44xx_data.c
··· 1369 1369 .ops = &clkops_omap2_dflt, 1370 1370 .enable_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL, 1371 1371 .enable_bit = OMAP4430_MODULEMODE_HWCTRL, 1372 + .flags = ENABLE_ON_INIT, 1372 1373 .clkdm_name = "l3_emif_clkdm", 1373 1374 .parent = &ddrphy_ck, 1374 1375 .recalc = &followparent_recalc, ··· 1380 1379 .ops = &clkops_omap2_dflt, 1381 1380 .enable_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL, 1382 1381 .enable_bit = OMAP4430_MODULEMODE_HWCTRL, 1382 + .flags = ENABLE_ON_INIT, 1383 1383 .clkdm_name = "l3_emif_clkdm", 1384 1384 .parent = &ddrphy_ck, 1385 1385 .recalc = &followparent_recalc,
+6 -3
arch/arm/mach-omap2/omap_hwmod.c
··· 409 409 return 0; 410 410 411 411 oh->_clk = omap_clk_get_by_name(oh->main_clk); 412 - if (!oh->_clk) 412 + if (!oh->_clk) { 413 413 pr_warning("omap_hwmod: %s: cannot clk_get main_clk %s\n", 414 414 oh->name, oh->main_clk); 415 415 return -EINVAL; 416 + } 416 417 417 418 if (!oh->_clk->clkdm) 418 419 pr_warning("omap_hwmod: %s: missing clockdomain for %s.\n", ··· 445 444 continue; 446 445 447 446 c = omap_clk_get_by_name(os->clk); 448 - if (!c) 447 + if (!c) { 449 448 pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", 450 449 oh->name, os->clk); 451 450 ret = -EINVAL; 451 + } 452 452 os->_clk = c; 453 453 } 454 454 ··· 472 470 473 471 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) { 474 472 c = omap_clk_get_by_name(oc->clk); 475 - if (!c) 473 + if (!c) { 476 474 pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", 477 475 oh->name, oc->clk); 478 476 ret = -EINVAL; 477 + } 479 478 oc->_clk = c; 480 479 } 481 480
+2 -2
arch/arm/mach-omap2/pm34xx.c
··· 99 99 /* Do a readback to assure write has been done */ 100 100 prm_read_mod_reg(WKUP_MOD, PM_WKEN); 101 101 102 - while (!(prm_read_mod_reg(WKUP_MOD, PM_WKST) & 102 + while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) & 103 103 OMAP3430_ST_IO_CHAIN_MASK)) { 104 104 timeout++; 105 105 if (timeout > 1000) { ··· 108 108 return; 109 109 } 110 110 prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK, 111 - WKUP_MOD, PM_WKST); 111 + WKUP_MOD, PM_WKEN); 112 112 } 113 113 } 114 114 }
+2
arch/arm/mach-omap2/usb-ehci.c
··· 20 20 #include <linux/delay.h> 21 21 #include <linux/platform_device.h> 22 22 #include <linux/clk.h> 23 + #include <linux/dma-mapping.h> 24 + 23 25 #include <asm/io.h> 24 26 #include <plat/mux.h> 25 27
+1 -1
arch/arm/mach-pxa/mioa701.c
··· 697 697 }; 698 698 699 699 /* Board I2C devices. */ 700 - static struct i2c_board_info __initdata mioa701_i2c_devices[] = { 700 + static struct i2c_board_info mioa701_i2c_devices[] = { 701 701 { 702 702 I2C_BOARD_INFO("mt9m111", 0x5d), 703 703 },
+10 -16
arch/arm/mach-pxa/z2.c
··· 3 3 * 4 4 * Support for the Zipit Z2 Handheld device. 5 5 * 6 - * Author: Ken McGuire 7 - * Created: Jan 25, 2009 6 + * Copyright (C) 2009-2010 Marek Vasut <marek.vasut@gmail.com> 7 + * 8 + * Based on research and code by: Ken McGuire 8 9 * Based on mainstone.c as modified for the Zipit Z2. 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify ··· 158 157 { 159 158 .name = "U-Boot Bootloader", 160 159 .offset = 0x0, 161 - .size = 0x20000, 162 - }, 163 - { 164 - .name = "Linux Kernel", 165 - .offset = 0x20000, 166 - .size = 0x220000, 167 - }, 168 - { 169 - .name = "Filesystem", 170 - .offset = 0x240000, 171 - .size = 0x5b0000, 172 - }, 173 - { 160 + .size = 0x40000, 161 + }, { 174 162 .name = "U-Boot Environment", 175 - .offset = 0x7f0000, 163 + .offset = 0x40000, 164 + .size = 0x60000, 165 + }, { 166 + .name = "Flash", 167 + .offset = 0x60000, 176 168 .size = MTDPART_SIZ_FULL, 177 169 }, 178 170 };
+2
arch/arm/mach-realview/Kconfig
··· 18 18 bool "Support ARM11MPCore tile" 19 19 depends on MACH_REALVIEW_EB 20 20 select CPU_V6 21 + select ARCH_HAS_BARRIERS if SMP 21 22 help 22 23 Enable support for the ARM11MPCore tile on the Realview platform. 23 24 ··· 36 35 select CPU_V6 37 36 select ARM_GIC 38 37 select HAVE_PATA_PLATFORM 38 + select ARCH_HAS_BARRIERS if SMP 39 39 help 40 40 Include support for the ARM(R) RealView MPCore Platform Baseboard. 41 41 PB11MPCore is a platform with an on-board ARM11MPCore and has
+8
arch/arm/mach-realview/include/mach/barriers.h
··· 1 + /* 2 + * Barriers redefined for RealView ARM11MPCore platforms with L220 cache 3 + * controller to work around hardware errata causing the outer_sync() 4 + * operation to deadlock the system. 5 + */ 6 + #define mb() dsb() 7 + #define rmb() dmb() 8 + #define wmb() mb()
+2 -1
arch/arm/mach-realview/realview_eb.c
··· 32 32 #include <asm/leds.h> 33 33 #include <asm/mach-types.h> 34 34 #include <asm/pmu.h> 35 + #include <asm/pgtable.h> 35 36 #include <asm/hardware/gic.h> 36 37 #include <asm/hardware/cache-l2x0.h> 37 38 #include <asm/localtimer.h> ··· 458 457 459 458 MACHINE_START(REALVIEW_EB, "ARM-RealView EB") 460 459 /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ 461 - .phys_io = REALVIEW_EB_UART0_BASE, 460 + .phys_io = REALVIEW_EB_UART0_BASE & SECTION_MASK, 462 461 .io_pg_offst = (IO_ADDRESS(REALVIEW_EB_UART0_BASE) >> 18) & 0xfffc, 463 462 .boot_params = PHYS_OFFSET + 0x00000100, 464 463 .fixup = realview_fixup,
+2 -1
arch/arm/mach-realview/realview_pb1176.c
··· 32 32 #include <asm/leds.h> 33 33 #include <asm/mach-types.h> 34 34 #include <asm/pmu.h> 35 + #include <asm/pgtable.h> 35 36 #include <asm/hardware/gic.h> 36 37 #include <asm/hardware/cache-l2x0.h> 37 38 ··· 352 351 353 352 MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176") 354 353 /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ 355 - .phys_io = REALVIEW_PB1176_UART0_BASE, 354 + .phys_io = REALVIEW_PB1176_UART0_BASE & SECTION_MASK, 356 355 .io_pg_offst = (IO_ADDRESS(REALVIEW_PB1176_UART0_BASE) >> 18) & 0xfffc, 357 356 .boot_params = PHYS_OFFSET + 0x00000100, 358 357 .fixup = realview_pb1176_fixup,
+2 -1
arch/arm/mach-realview/realview_pb11mp.c
··· 32 32 #include <asm/leds.h> 33 33 #include <asm/mach-types.h> 34 34 #include <asm/pmu.h> 35 + #include <asm/pgtable.h> 35 36 #include <asm/hardware/gic.h> 36 37 #include <asm/hardware/cache-l2x0.h> 37 38 #include <asm/localtimer.h> ··· 374 373 375 374 MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore") 376 375 /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ 377 - .phys_io = REALVIEW_PB11MP_UART0_BASE, 376 + .phys_io = REALVIEW_PB11MP_UART0_BASE & SECTION_MASK, 378 377 .io_pg_offst = (IO_ADDRESS(REALVIEW_PB11MP_UART0_BASE) >> 18) & 0xfffc, 379 378 .boot_params = PHYS_OFFSET + 0x00000100, 380 379 .fixup = realview_fixup,
+2 -1
arch/arm/mach-realview/realview_pba8.c
··· 31 31 #include <asm/leds.h> 32 32 #include <asm/mach-types.h> 33 33 #include <asm/pmu.h> 34 + #include <asm/pgtable.h> 34 35 #include <asm/hardware/gic.h> 35 36 36 37 #include <asm/mach/arch.h> ··· 324 323 325 324 MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8") 326 325 /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ 327 - .phys_io = REALVIEW_PBA8_UART0_BASE, 326 + .phys_io = REALVIEW_PBA8_UART0_BASE & SECTION_MASK, 328 327 .io_pg_offst = (IO_ADDRESS(REALVIEW_PBA8_UART0_BASE) >> 18) & 0xfffc, 329 328 .boot_params = PHYS_OFFSET + 0x00000100, 330 329 .fixup = realview_fixup,
+2 -1
arch/arm/mach-realview/realview_pbx.c
··· 31 31 #include <asm/mach-types.h> 32 32 #include <asm/pmu.h> 33 33 #include <asm/smp_twd.h> 34 + #include <asm/pgtable.h> 34 35 #include <asm/hardware/gic.h> 35 36 #include <asm/hardware/cache-l2x0.h> 36 37 ··· 410 409 411 410 MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX") 412 411 /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ 413 - .phys_io = REALVIEW_PBX_UART0_BASE, 412 + .phys_io = REALVIEW_PBX_UART0_BASE & SECTION_MASK, 414 413 .io_pg_offst = (IO_ADDRESS(REALVIEW_PBX_UART0_BASE) >> 18) & 0xfffc, 415 414 .boot_params = PHYS_OFFSET + 0x00000100, 416 415 .fixup = realview_pbx_fixup,
+2 -1
arch/arm/mach-vexpress/ct-ca9x4.c
··· 10 10 #include <linux/amba/clcd.h> 11 11 12 12 #include <asm/clkdev.h> 13 + #include <asm/pgtable.h> 13 14 #include <asm/hardware/arm_timer.h> 14 15 #include <asm/hardware/cache-l2x0.h> 15 16 #include <asm/hardware/gic.h> ··· 237 236 } 238 237 239 238 MACHINE_START(VEXPRESS, "ARM-Versatile Express CA9x4") 240 - .phys_io = V2M_UART0, 239 + .phys_io = V2M_UART0 & SECTION_MASK, 241 240 .io_pg_offst = (__MMIO_P2V(V2M_UART0) >> 18) & 0xfffc, 242 241 .boot_params = PHYS_OFFSET + 0x00000100, 243 242 .map_io = ct_ca9x4_map_io,
+21
arch/arm/mm/Kconfig
··· 735 735 Forget about fast user space cmpxchg support. 736 736 It is just not possible. 737 737 738 + config DMA_CACHE_RWFO 739 + bool "Enable read/write for ownership DMA cache maintenance" 740 + depends on CPU_V6 && SMP 741 + default y 742 + help 743 + The Snoop Control Unit on ARM11MPCore does not detect the 744 + cache maintenance operations and the dma_{map,unmap}_area() 745 + functions may leave stale cache entries on other CPUs. By 746 + enabling this option, Read or Write For Ownership in the ARMv6 747 + DMA cache maintenance functions is performed. These LDR/STR 748 + instructions change the cache line state to shared or modified 749 + so that the cache operation has the desired effect. 750 + 751 + Note that the workaround is only valid on processors that do 752 + not perform speculative loads into the D-cache. For such 753 + processors, if cache maintenance operations are not broadcast 754 + in hardware, other workarounds are needed (e.g. cache 755 + maintenance broadcasting in software via FIQ). 756 + 738 757 config OUTER_CACHE 739 758 bool 740 759 ··· 813 794 814 795 config ARM_DMA_MEM_BUFFERABLE 815 796 bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 797 + depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ 798 + MACH_REALVIEW_PB11MP) 816 799 default y if CPU_V6 || CPU_V7 817 800 help 818 801 Historically, the kernel has used strongly ordered mappings to
+14 -4
arch/arm/mm/cache-v6.S
··· 211 211 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 212 212 #endif 213 213 1: 214 - #ifdef CONFIG_SMP 215 - str r0, [r0] @ write for ownership 214 + #ifdef CONFIG_DMA_CACHE_RWFO 215 + ldr r2, [r0] @ read for ownership 216 + str r2, [r0] @ write for ownership 216 217 #endif 217 218 #ifdef HARVARD_CACHE 218 219 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line ··· 235 234 v6_dma_clean_range: 236 235 bic r0, r0, #D_CACHE_LINE_SIZE - 1 237 236 1: 238 - #ifdef CONFIG_SMP 237 + #ifdef CONFIG_DMA_CACHE_RWFO 239 238 ldr r2, [r0] @ read for ownership 240 239 #endif 241 240 #ifdef HARVARD_CACHE ··· 258 257 ENTRY(v6_dma_flush_range) 259 258 bic r0, r0, #D_CACHE_LINE_SIZE - 1 260 259 1: 261 - #ifdef CONFIG_SMP 260 + #ifdef CONFIG_DMA_CACHE_RWFO 262 261 ldr r2, [r0] @ read for ownership 263 262 str r2, [r0] @ write for ownership 264 263 #endif ··· 284 283 add r1, r1, r0 285 284 teq r2, #DMA_FROM_DEVICE 286 285 beq v6_dma_inv_range 286 + #ifndef CONFIG_DMA_CACHE_RWFO 287 + b v6_dma_clean_range 288 + #else 287 289 teq r2, #DMA_TO_DEVICE 288 290 beq v6_dma_clean_range 289 291 b v6_dma_flush_range 292 + #endif 290 293 ENDPROC(v6_dma_map_area) 291 294 292 295 /* ··· 300 295 * - dir - DMA direction 301 296 */ 302 297 ENTRY(v6_dma_unmap_area) 298 + #ifndef CONFIG_DMA_CACHE_RWFO 299 + add r1, r1, r0 300 + teq r2, #DMA_TO_DEVICE 301 + bne v6_dma_inv_range 302 + #endif 303 303 mov pc, lr 304 304 ENDPROC(v6_dma_unmap_area) 305 305
+9 -9
arch/arm/mm/dma-mapping.c
··· 24 24 #include <asm/tlbflush.h> 25 25 #include <asm/sizes.h> 26 26 27 - /* Sanity check size */ 28 - #if (CONSISTENT_DMA_SIZE % SZ_2M) 29 - #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" 30 - #endif 31 - 32 - #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 33 - #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 34 - #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 35 - 36 27 static u64 get_coherent_dma_mask(struct device *dev) 37 28 { 38 29 u64 mask = ISA_DMA_THRESHOLD; ··· 114 123 } 115 124 116 125 #ifdef CONFIG_MMU 126 + /* Sanity check size */ 127 + #if (CONSISTENT_DMA_SIZE % SZ_2M) 128 + #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" 129 + #endif 130 + 131 + #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 132 + #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 133 + #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 134 + 117 135 /* 118 136 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 119 137 */
+3 -3
arch/arm/plat-omap/dmtimer.c
··· 541 541 * timer is stopped 542 542 */ 543 543 udelay(3500000 / clk_get_rate(timer->fclk) + 1); 544 - /* Ack possibly pending interrupt */ 545 - omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, 546 - OMAP_TIMER_INT_OVERFLOW); 547 544 #endif 548 545 } 546 + /* Ack possibly pending interrupt */ 547 + omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, 548 + OMAP_TIMER_INT_OVERFLOW); 549 549 } 550 550 EXPORT_SYMBOL_GPL(omap_dm_timer_stop); 551 551
+1
arch/arm/plat-omap/gpio.c
··· 673 673 if (cpu_is_omap34xx() || cpu_is_omap44xx()) 674 674 clk_disable(bank->dbck); 675 675 } 676 + bank->dbck_enable_mask = val; 676 677 677 678 __raw_writel(val, reg); 678 679 }
+3 -1
arch/arm/plat-omap/iovmm.c
··· 140 140 return ERR_PTR(-ENOMEM); 141 141 142 142 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); 143 - if (err) 143 + if (err) { 144 + kfree(sgt); 144 145 return ERR_PTR(err); 146 + } 145 147 146 148 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); 147 149
+2 -1
arch/arm/plat-pxa/Makefile
··· 2 2 # Makefile for code common across different PXA processor families 3 3 # 4 4 5 - obj-y := dma.o pmu.o 5 + obj-y := dma.o 6 6 7 + obj-$(CONFIG_ARCH_PXA) += pmu.o 7 8 obj-$(CONFIG_GENERIC_GPIO) += gpio.o 8 9 obj-$(CONFIG_PXA3xx) += mfp.o 9 10 obj-$(CONFIG_ARCH_MMP) += mfp.o
+1 -1
arch/ia64/mm/tlb.c
··· 121 121 ia64_invala(); 122 122 123 123 for (;;) { 124 - asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); 124 + asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); 125 125 if (time_before(t, serve)) 126 126 return; 127 127 cpu_relax();
+1
arch/um/os-Linux/mem.c
··· 10 10 #include <errno.h> 11 11 #include <fcntl.h> 12 12 #include <string.h> 13 + #include <sys/stat.h> 13 14 #include <sys/mman.h> 14 15 #include <sys/param.h> 15 16 #include "init.h"
+2 -2
arch/x86/kernel/cpu/perf_event_amd.c
··· 102 102 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 103 103 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, 104 104 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, 105 - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 106 - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 105 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, 106 + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, 107 107 }; 108 108 109 109 static u64 amd_pmu_event_map(int hw_event)
+10 -1
arch/x86/kernel/traps.c
··· 526 526 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 527 527 { 528 528 struct task_struct *tsk = current; 529 + int user_icebp = 0; 529 530 unsigned long dr6; 530 531 int si_code; 531 532 ··· 534 533 535 534 /* Filter out all the reserved bits which are preset to 1 */ 536 535 dr6 &= ~DR6_RESERVED; 536 + 537 + /* 538 + * If dr6 has no reason to give us about the origin of this trap, 539 + * then it's very likely the result of an icebp/int01 trap. 540 + * User wants a sigtrap for that. 541 + */ 542 + if (!dr6 && user_mode(regs)) 543 + user_icebp = 1; 537 544 538 545 /* Catch kmemcheck conditions first of all! */ 539 546 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) ··· 584 575 regs->flags &= ~X86_EFLAGS_TF; 585 576 } 586 577 si_code = get_si_code(tsk->thread.debugreg6); 587 - if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) 578 + if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 588 579 send_sigtrap(tsk, regs, error_code, si_code); 589 580 preempt_conditional_cli(regs); 590 581
+3 -6
block/blk-core.c
··· 1149 1149 else 1150 1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; 1151 1151 1152 - if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { 1152 + if (bio_rw_flagged(bio, BIO_RW_DISCARD)) 1153 1153 req->cmd_flags |= REQ_DISCARD; 1154 - if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1155 - req->cmd_flags |= REQ_SOFTBARRIER; 1156 - } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) 1154 + if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1157 1155 req->cmd_flags |= REQ_HARDBARRIER; 1158 - 1159 1156 if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 1160 1157 req->cmd_flags |= REQ_RW_SYNC; 1161 1158 if (bio_rw_flagged(bio, BIO_RW_META)) ··· 1583 1586 * If it's a regular read/write or a barrier with data attached, 1584 1587 * go through the normal accounting stuff before submission. 1585 1588 */ 1586 - if (bio_has_data(bio)) { 1589 + if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) { 1587 1590 if (rw & WRITE) { 1588 1591 count_vm_events(PGPGOUT, count); 1589 1592 } else {
+38 -29
block/cfq-iosched.c
··· 14 14 #include <linux/rbtree.h> 15 15 #include <linux/ioprio.h> 16 16 #include <linux/blktrace_api.h> 17 - #include "blk-cgroup.h" 17 + #include "cfq.h" 18 18 19 19 /* 20 20 * tunables ··· 879 879 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 880 880 cfq_rb_erase(&cfqg->rb_node, st); 881 881 cfqg->saved_workload_slice = 0; 882 - blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 882 + cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 883 883 } 884 884 885 885 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) ··· 939 939 940 940 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 941 941 st->min_vdisktime); 942 - blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 943 - blkiocg_set_start_empty_time(&cfqg->blkg); 942 + cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 943 + cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 944 944 } 945 945 946 946 #ifdef CONFIG_CFQ_GROUP_IOSCHED ··· 995 995 996 996 /* Add group onto cgroup list */ 997 997 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 998 - blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 998 + cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 999 999 MKDEV(major, minor)); 1000 1000 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); 1001 1001 ··· 1079 1079 * it from cgroup list, then it will take care of destroying 1080 1080 * cfqg also. 1081 1081 */ 1082 - if (!blkiocg_del_blkio_group(&cfqg->blkg)) 1082 + if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg)) 1083 1083 cfq_destroy_cfqg(cfqd, cfqg); 1084 1084 } 1085 1085 } ··· 1421 1421 { 1422 1422 elv_rb_del(&cfqq->sort_list, rq); 1423 1423 cfqq->queued[rq_is_sync(rq)]--; 1424 - blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), 1425 - rq_is_sync(rq)); 1424 + cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1425 + rq_data_dir(rq), rq_is_sync(rq)); 1426 1426 cfq_add_rq_rb(rq); 1427 - blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 1427 + cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 1428 1428 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), 1429 1429 rq_is_sync(rq)); 1430 1430 } ··· 1482 1482 cfq_del_rq_rb(rq); 1483 1483 1484 1484 cfqq->cfqd->rq_queued--; 1485 - blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), 1486 - rq_is_sync(rq)); 1485 + cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1486 + rq_data_dir(rq), rq_is_sync(rq)); 1487 1487 if (rq_is_meta(rq)) { 1488 1488 WARN_ON(!cfqq->meta_pending); 1489 1489 cfqq->meta_pending--; ··· 1518 1518 static void cfq_bio_merged(struct request_queue *q, struct request *req, 1519 1519 struct bio *bio) 1520 1520 { 1521 - blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio), 1522 - cfq_bio_sync(bio)); 1521 + cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, 1522 + bio_data_dir(bio), cfq_bio_sync(bio)); 1523 1523 } 1524 1524 1525 1525 static void ··· 1539 1539 if (cfqq->next_rq == next) 1540 1540 cfqq->next_rq = rq; 1541 1541 cfq_remove_request(next); 1542 - blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), 1543 - rq_is_sync(next)); 1542 + cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, 1543 + rq_data_dir(next), rq_is_sync(next)); 1544 1544 } 1545 1545 1546 1546 static int cfq_allow_merge(struct request_queue *q, struct request *rq, ··· 1571 1571 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1572 1572 { 1573 1573 del_timer(&cfqd->idle_slice_timer); 1574 - blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); 1574 + cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); 1575 1575 } 1576 1576 1577 1577 static void __cfq_set_active_queue(struct cfq_data *cfqd, ··· 1580 1580 if (cfqq) { 1581 1581 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 1582 1582 cfqd->serving_prio, cfqd->serving_type); 1583 - blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); 1583 + cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); 1584 1584 cfqq->slice_start = 0; 1585 1585 cfqq->dispatch_start = jiffies; 1586 1586 cfqq->allocated_slice = 0; ··· 1911 1911 sl = cfqd->cfq_slice_idle; 1912 1912 1913 1913 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1914 - blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1914 + cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1915 1915 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1916 1916 } 1917 1917 ··· 1931 1931 elv_dispatch_sort(q, rq); 1932 1932 1933 1933 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1934 - blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 1934 + cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 1935 1935 rq_data_dir(rq), rq_is_sync(rq)); 1936 1936 } 1937 1937 ··· 1986 1986 int process_refs, new_process_refs; 1987 1987 struct cfq_queue *__cfqq; 1988 1988 1989 + /* 1990 + * If there are no process references on the new_cfqq, then it is 1991 + * unsafe to follow the ->new_cfqq chain as other cfqq's in the 1992 + * chain may have dropped their last reference (not just their 1993 + * last process reference). 1994 + */ 1995 + if (!cfqq_process_refs(new_cfqq)) 1996 + return; 1997 + 1989 1998 /* Avoid a circular list and skip interim queue merges */ 1990 1999 while ((__cfqq = new_cfqq->new_cfqq)) { 1991 2000 if (__cfqq == cfqq) ··· 2003 1994 } 2004 1995 2005 1996 process_refs = cfqq_process_refs(cfqq); 1997 + new_process_refs = cfqq_process_refs(new_cfqq); 2006 1998 /* 2007 1999 * If the process for the cfqq has gone away, there is no 2008 2000 * sense in merging the queues. 2009 2001 */ 2010 - if (process_refs == 0) 2002 + if (process_refs == 0 || new_process_refs == 0) 2011 2003 return; 2012 2004 2013 2005 /* 2014 2006 * Merge in the direction of the lesser amount of work. 2015 2007 */ 2016 - new_process_refs = cfqq_process_refs(new_cfqq); 2017 2008 if (new_process_refs >= process_refs) { 2018 2009 cfqq->new_cfqq = new_cfqq; 2019 2010 atomic_add(process_refs, &new_cfqq->ref); ··· 3257 3248 cfq_clear_cfqq_wait_request(cfqq); 3258 3249 __blk_run_queue(cfqd->queue); 3259 3250 } else { 3260 - blkiocg_update_idle_time_stats( 3251 + cfq_blkiocg_update_idle_time_stats( 3261 3252 &cfqq->cfqg->blkg); 3262 3253 cfq_mark_cfqq_must_dispatch(cfqq); 3263 3254 } ··· 3285 3276 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3286 3277 list_add_tail(&rq->queuelist, &cfqq->fifo); 3287 3278 cfq_add_rq_rb(rq); 3288 - blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 3279 + cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 3289 3280 &cfqd->serving_group->blkg, rq_data_dir(rq), 3290 3281 rq_is_sync(rq)); 3291 3282 cfq_rq_enqueued(cfqd, cfqq, rq); ··· 3373 3364 WARN_ON(!cfqq->dispatched); 3374 3365 cfqd->rq_in_driver--; 3375 3366 cfqq->dispatched--; 3376 - blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq), 3377 - rq_io_start_time_ns(rq), rq_data_dir(rq), 3378 - rq_is_sync(rq)); 3367 + cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, 3368 + rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3369 + rq_data_dir(rq), rq_is_sync(rq)); 3379 3370 3380 3371 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 3381 3372 ··· 3739 3730 3740 3731 cfq_put_async_queues(cfqd); 3741 3732 cfq_release_cfq_groups(cfqd); 3742 - blkiocg_del_blkio_group(&cfqd->root_group.blkg); 3733 + cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg); 3743 3734 3744 3735 spin_unlock_irq(q->queue_lock); 3745 3736 ··· 3807 3798 */ 3808 3799 atomic_set(&cfqg->ref, 1); 3809 3800 rcu_read_lock(); 3810 - blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 3811 - 0); 3801 + cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, 3802 + (void *)cfqd, 0); 3812 3803 rcu_read_unlock(); 3813 3804 #endif 3814 3805 /*
+115
block/cfq.h
··· 1 + #ifndef _CFQ_H 2 + #define _CFQ_H 3 + #include "blk-cgroup.h" 4 + 5 + #ifdef CONFIG_CFQ_GROUP_IOSCHED 6 + static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, 7 + struct blkio_group *curr_blkg, bool direction, bool sync) 8 + { 9 + blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync); 10 + } 11 + 12 + static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, 13 + unsigned long dequeue) 14 + { 15 + blkiocg_update_dequeue_stats(blkg, dequeue); 16 + } 17 + 18 + static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 19 + unsigned long time) 20 + { 21 + blkiocg_update_timeslice_used(blkg, time); 22 + } 23 + 24 + static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) 25 + { 26 + blkiocg_set_start_empty_time(blkg); 27 + } 28 + 29 + static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 30 + bool direction, bool sync) 31 + { 32 + blkiocg_update_io_remove_stats(blkg, direction, sync); 33 + } 34 + 35 + static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, 36 + bool direction, bool sync) 37 + { 38 + blkiocg_update_io_merged_stats(blkg, direction, sync); 39 + } 40 + 41 + static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg) 42 + { 43 + blkiocg_update_idle_time_stats(blkg); 44 + } 45 + 46 + static inline void 47 + cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) 48 + { 49 + blkiocg_update_avg_queue_size_stats(blkg); 50 + } 51 + 52 + static inline void 53 + cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) 54 + { 55 + blkiocg_update_set_idle_time_stats(blkg); 56 + } 57 + 58 + static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, 59 + uint64_t bytes, bool direction, bool sync) 60 + { 61 + blkiocg_update_dispatch_stats(blkg, bytes, direction, sync); 62 + } 63 + 64 + static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) 65 + { 66 + blkiocg_update_completion_stats(blkg, start_time, io_start_time, 67 + direction, sync); 68 + } 69 + 70 + static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 71 + struct blkio_group *blkg, void *key, dev_t dev) { 72 + blkiocg_add_blkio_group(blkcg, blkg, key, dev); 73 + } 74 + 75 + static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) 76 + { 77 + return blkiocg_del_blkio_group(blkg); 78 + } 79 + 80 + #else /* CFQ_GROUP_IOSCHED */ 81 + static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, 82 + struct blkio_group *curr_blkg, bool direction, bool sync) {} 83 + 84 + static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, 85 + unsigned long dequeue) {} 86 + 87 + static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 88 + unsigned long time) {} 89 + static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {} 90 + static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 91 + bool direction, bool sync) {} 92 + static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, 93 + bool direction, bool sync) {} 94 + static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg) 95 + { 96 + } 97 + static inline void 98 + cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {} 99 + 100 + static inline void 101 + cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {} 102 + 103 + static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, 104 + uint64_t bytes, bool direction, bool sync) {} 105 + static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {} 106 + 107 + static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 108 + struct blkio_group *blkg, void *key, dev_t dev) {} 109 + static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) 110 + { 111 + return 0; 112 + } 113 + 114 + #endif /* CFQ_GROUP_IOSCHED */ 115 + #endif
+1 -1
drivers/acpi/apei/erst.c
··· 781 781 status = acpi_get_table(ACPI_SIG_ERST, 0, 782 782 (struct acpi_table_header **)&erst_tab); 783 783 if (status == AE_NOT_FOUND) { 784 - pr_err(ERST_PFX "Table is not found!\n"); 784 + pr_info(ERST_PFX "Table is not found!\n"); 785 785 goto err; 786 786 } else if (ACPI_FAILURE(status)) { 787 787 const char *msg = acpi_format_exception(status);
+10
drivers/ata/ahci.c
··· 1053 1053 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) 1054 1054 return -ENODEV; 1055 1055 1056 + /* 1057 + * For some reason, MCP89 on MacBook 7,1 doesn't work with 1058 + * ahci, use ata_generic instead. 1059 + */ 1060 + if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && 1061 + pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA && 1062 + pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && 1063 + pdev->subsystem_device == 0xcb89) 1064 + return -ENODEV; 1065 + 1056 1066 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. 1057 1067 * At the moment, we can only use the AHCI mode. Let the users know 1058 1068 * that for SAS drives they're out of luck.
+24 -6
drivers/ata/ata_generic.c
··· 32 32 * A generic parallel ATA driver using libata 33 33 */ 34 34 35 + enum { 36 + ATA_GEN_CLASS_MATCH = (1 << 0), 37 + ATA_GEN_FORCE_DMA = (1 << 1), 38 + }; 39 + 35 40 /** 36 41 * generic_set_mode - mode setting 37 42 * @link: link to set up ··· 51 46 static int generic_set_mode(struct ata_link *link, struct ata_device **unused) 52 47 { 53 48 struct ata_port *ap = link->ap; 49 + const struct pci_device_id *id = ap->host->private_data; 54 50 int dma_enabled = 0; 55 51 struct ata_device *dev; 56 52 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 57 53 58 - /* Bits 5 and 6 indicate if DMA is active on master/slave */ 59 - if (ap->ioaddr.bmdma_addr) 54 + if (id->driver_data & ATA_GEN_FORCE_DMA) { 55 + dma_enabled = 0xff; 56 + } else if (ap->ioaddr.bmdma_addr) { 57 + /* Bits 5 and 6 indicate if DMA is active on master/slave */ 60 58 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 59 + } 61 60 62 61 if (pdev->vendor == PCI_VENDOR_ID_CENATEK) 63 62 dma_enabled = 0xFF; ··· 135 126 const struct ata_port_info *ppi[] = { &info, NULL }; 136 127 137 128 /* Don't use the generic entry unless instructed to do so */ 138 - if (id->driver_data == 1 && all_generic_ide == 0) 129 + if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0) 139 130 return -ENODEV; 140 131 141 132 /* Devices that need care */ ··· 164 155 return rc; 165 156 pcim_pin_device(dev); 166 157 } 167 - return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0); 158 + return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0); 168 159 } 169 160 170 161 static struct pci_device_id ata_generic[] = { ··· 176 167 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, 177 168 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, 178 169 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, 179 - { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), }, 170 + { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), 171 + .driver_data = ATA_GEN_FORCE_DMA }, 172 + /* 173 + * For some reason, MCP89 on MacBook 7,1 doesn't work with 174 + * ahci, use ata_generic instead. 175 + */ 176 + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA, 177 + PCI_VENDOR_ID_APPLE, 0xcb89, 178 + .driver_data = ATA_GEN_FORCE_DMA }, 180 179 #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) 181 180 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, 182 181 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, ··· 192 175 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, 193 176 #endif 194 177 /* Must come last. If you add entries adjust this table appropriately */ 195 - { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, 178 + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), 179 + .driver_data = ATA_GEN_CLASS_MATCH }, 196 180 { 0, }, 197 181 }; 198 182
+3 -2
drivers/ata/libahci.c
··· 324 324 struct ahci_host_priv *hpriv = ap->host->private_data; 325 325 void __iomem *mmio = hpriv->mmio; 326 326 void __iomem *em_mmio = mmio + hpriv->em_loc; 327 + const unsigned char *msg_buf = buf; 327 328 u32 em_ctl, msg; 328 329 unsigned long flags; 329 330 int i; ··· 344 343 } 345 344 346 345 for (i = 0; i < size; i += 4) { 347 - msg = buf[i] | buf[i + 1] << 8 | 348 - buf[i + 2] << 16 | buf[i + 3] << 24; 346 + msg = msg_buf[i] | msg_buf[i + 1] << 8 | 347 + msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24; 349 348 writel(msg, em_mmio + i); 350 349 } 351 350
+1
drivers/block/cciss_scsi.c
··· 861 861 sh->n_io_port = 0; // I don't think we use these two... 862 862 sh->this_id = SELF_SCSI_ID; 863 863 sh->sg_tablesize = hba[ctlr]->maxsgentries; 864 + sh->max_cmd_len = MAX_COMMAND_SIZE; 864 865 865 866 ((struct cciss_scsi_adapter_data_t *) 866 867 hba[ctlr]->scsi_ctlr)->scsi_host = sh;
+3 -3
drivers/block/cpqarray.c
··· 386 386 } 387 387 388 388 /* pdev is NULL for eisa */ 389 - static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) 389 + static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev) 390 390 { 391 391 struct request_queue *q; 392 392 int j; ··· 503 503 return -1; 504 504 } 505 505 506 - static int __init cpqarray_init_one( struct pci_dev *pdev, 506 + static int __devinit cpqarray_init_one( struct pci_dev *pdev, 507 507 const struct pci_device_id *ent) 508 508 { 509 509 int i; ··· 740 740 /* 741 741 * Find an EISA controller's signature. Set up an hba if we find it. 742 742 */ 743 - static int __init cpqarray_eisa_detect(void) 743 + static int __devinit cpqarray_eisa_detect(void) 744 744 { 745 745 int i=0, j; 746 746 __u32 board_id;
-2
drivers/block/drbd/drbd_main.c
··· 1236 1236 /* Last part of the attaching process ... */ 1237 1237 if (ns.conn >= C_CONNECTED && 1238 1238 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { 1239 - kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */ 1240 - mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */ 1241 1239 drbd_send_sizes(mdev, 0, 0); /* to start sync... */ 1242 1240 drbd_send_uuids(mdev); 1243 1241 drbd_send_state(mdev);
+6
drivers/block/drbd/drbd_nl.c
··· 1114 1114 mdev->new_state_tmp.i = ns.i; 1115 1115 ns.i = os.i; 1116 1116 ns.disk = D_NEGOTIATING; 1117 + 1118 + /* We expect to receive up-to-date UUIDs soon. 1119 + To avoid a race in receive_state, free p_uuid while 1120 + holding req_lock. I.e. atomic with the state change */ 1121 + kfree(mdev->p_uuid); 1122 + mdev->p_uuid = NULL; 1117 1123 } 1118 1124 1119 1125 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
+2 -4
drivers/char/agp/generic.c
··· 97 97 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 98 98 { 99 99 mem->pages = NULL; 100 - mem->vmalloc_flag = false; 101 100 102 101 if (size <= 2*PAGE_SIZE) 103 - mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); 102 + mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 104 103 if (mem->pages == NULL) { 105 104 mem->pages = vmalloc(size); 106 - mem->vmalloc_flag = true; 107 105 } 108 106 } 109 107 EXPORT_SYMBOL(agp_alloc_page_array); 110 108 111 109 void agp_free_page_array(struct agp_memory *mem) 112 110 { 113 - if (mem->vmalloc_flag) { 111 + if (is_vmalloc_addr(mem->pages)) { 114 112 vfree(mem->pages); 115 113 } else { 116 114 kfree(mem->pages);
+18 -5
drivers/char/ipmi/ipmi_si_intf.c
··· 302 302 303 303 static int force_kipmid[SI_MAX_PARMS]; 304 304 static int num_force_kipmid; 305 + #ifdef CONFIG_PCI 306 + static int pci_registered; 307 + #endif 308 + #ifdef CONFIG_PPC_OF 309 + static int of_registered; 310 + #endif 305 311 306 312 static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; 307 313 static int num_max_busy_us; ··· 1024 1018 else if (smi_result == SI_SM_IDLE) 1025 1019 schedule_timeout_interruptible(100); 1026 1020 else 1027 - schedule_timeout_interruptible(0); 1021 + schedule_timeout_interruptible(1); 1028 1022 } 1029 1023 return 0; 1030 1024 } ··· 3320 3314 rv = pci_register_driver(&ipmi_pci_driver); 3321 3315 if (rv) 3322 3316 printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); 3317 + else 3318 + pci_registered = 1; 3323 3319 #endif 3324 3320 3325 3321 #ifdef CONFIG_ACPI ··· 3338 3330 3339 3331 #ifdef CONFIG_PPC_OF 3340 3332 of_register_platform_driver(&ipmi_of_platform_driver); 3333 + of_registered = 1; 3341 3334 #endif 3342 3335 3343 3336 /* We prefer devices with interrupts, but in the case of a machine ··· 3392 3383 if (unload_when_empty && list_empty(&smi_infos)) { 3393 3384 mutex_unlock(&smi_infos_lock); 3394 3385 #ifdef CONFIG_PCI 3395 - pci_unregister_driver(&ipmi_pci_driver); 3386 + if (pci_registered) 3387 + pci_unregister_driver(&ipmi_pci_driver); 3396 3388 #endif 3397 3389 3398 3390 #ifdef CONFIG_PPC_OF 3399 - of_unregister_platform_driver(&ipmi_of_platform_driver); 3391 + if (of_registered) 3392 + of_unregister_platform_driver(&ipmi_of_platform_driver); 3400 3393 #endif 3401 3394 driver_unregister(&ipmi_driver.driver); 3402 3395 printk(KERN_WARNING PFX ··· 3489 3478 return; 3490 3479 3491 3480 #ifdef CONFIG_PCI 3492 - pci_unregister_driver(&ipmi_pci_driver); 3481 + if (pci_registered) 3482 + pci_unregister_driver(&ipmi_pci_driver); 3493 3483 #endif 3494 3484 #ifdef CONFIG_ACPI 3495 3485 pnp_unregister_driver(&ipmi_pnp_driver); 3496 3486 #endif 3497 3487 3498 3488 #ifdef CONFIG_PPC_OF 3499 - of_unregister_platform_driver(&ipmi_of_platform_driver); 3489 + if (of_registered) 3490 + of_unregister_platform_driver(&ipmi_of_platform_driver); 3500 3491 #endif 3501 3492 3502 3493 mutex_lock(&smi_infos_lock);
+2 -2
drivers/cpuidle/governors/menu.c
··· 143 143 * This allows us to calculate 144 144 * E(duration)|iowait 145 145 */ 146 - if (nr_iowait_cpu()) 146 + if (nr_iowait_cpu(smp_processor_id())) 147 147 bucket = BUCKETS/2; 148 148 149 149 if (duration < 10) ··· 175 175 mult += 2 * get_loadavg(); 176 176 177 177 /* for IO wait tasks (per cpu!) we add 5x each */ 178 - mult += 10 * nr_iowait_cpu(); 178 + mult += 10 * nr_iowait_cpu(smp_processor_id()); 179 179 180 180 return mult; 181 181 }
+3 -1
drivers/dma/ppc4xx/adma.c
··· 4257 4257 struct ppc440spe_adma_chan *chan, 4258 4258 int *initcode) 4259 4259 { 4260 + struct of_device *ofdev; 4260 4261 struct device_node *np; 4261 4262 int ret; 4262 4263 4263 - np = container_of(adev->dev, struct of_device, dev)->node; 4264 + ofdev = container_of(adev->dev, struct of_device, dev); 4265 + np = ofdev->dev.of_node; 4264 4266 if (adev->id != PPC440SPE_XOR_ID) { 4265 4267 adev->err_irq = irq_of_parse_and_map(np, 1); 4266 4268 if (adev->err_irq == NO_IRQ) {
+12 -12
drivers/edac/amd64_edac.c
··· 1958 1958 u32 value = 0; 1959 1959 int err_sym = 0; 1960 1960 1961 - amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); 1961 + if (boot_cpu_data.x86 == 0x10) { 1962 1962 1963 - /* F3x180[EccSymbolSize]=1, x8 symbols */ 1964 - if (boot_cpu_data.x86 == 0x10 && 1965 - boot_cpu_data.x86_model > 7 && 1966 - value & BIT(25)) { 1967 - err_sym = decode_syndrome(syndrome, x8_vectors, 1968 - ARRAY_SIZE(x8_vectors), 8); 1969 - return map_err_sym_to_channel(err_sym, 8); 1970 - } else { 1971 - err_sym = decode_syndrome(syndrome, x4_vectors, 1972 - ARRAY_SIZE(x4_vectors), 4); 1973 - return map_err_sym_to_channel(err_sym, 4); 1963 + amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); 1964 + 1965 + /* F3x180[EccSymbolSize]=1 => x8 symbols */ 1966 + if (boot_cpu_data.x86_model > 7 && 1967 + value & BIT(25)) { 1968 + err_sym = decode_syndrome(syndrome, x8_vectors, 1969 + ARRAY_SIZE(x8_vectors), 8); 1970 + return map_err_sym_to_channel(err_sym, 8); 1971 + } 1974 1972 } 1973 + err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), 4); 1974 + return map_err_sym_to_channel(err_sym, 4); 1975 1975 } 1976 1976 1977 1977 /*
+40 -13
drivers/edac/i7core_edac.c
··· 1233 1233 for (i = 0; i < MAX_SOCKET_BUSES; i++) 1234 1234 pcibios_scan_specific_bus(255-i); 1235 1235 } 1236 + pci_dev_put(pdev); 1236 1237 table++; 1237 1238 } 1239 + } 1240 + 1241 + static unsigned i7core_pci_lastbus(void) 1242 + { 1243 + int last_bus = 0, bus; 1244 + struct pci_bus *b = NULL; 1245 + 1246 + while ((b = pci_find_next_bus(b)) != NULL) { 1247 + bus = b->number; 1248 + debugf0("Found bus %d\n", bus); 1249 + if (bus > last_bus) 1250 + last_bus = bus; 1251 + } 1252 + 1253 + debugf0("Last bus %d\n", last_bus); 1254 + 1255 + return last_bus; 1238 1256 } 1239 1257 1240 1258 /* ··· 1262 1244 * Need to 'get' device 16 func 1 and func 2 1263 1245 */ 1264 1246 int i7core_get_onedevice(struct pci_dev **prev, int devno, 1265 - struct pci_id_descr *dev_descr, unsigned n_devs) 1247 + struct pci_id_descr *dev_descr, unsigned n_devs, 1248 + unsigned last_bus) 1266 1249 { 1267 1250 struct i7core_dev *i7core_dev; 1268 1251 ··· 1310 1291 } 1311 1292 bus = pdev->bus->number; 1312 1293 1313 - if (bus == 0x3f) 1314 - socket = 0; 1315 - else 1316 - socket = 255 - bus; 1294 + socket = last_bus - bus; 1317 1295 1318 1296 i7core_dev = get_i7core_dev(socket); 1319 1297 if (!i7core_dev) { ··· 1374 1358 1375 1359 static int i7core_get_devices(struct pci_id_table *table) 1376 1360 { 1377 - int i, rc; 1361 + int i, rc, last_bus; 1378 1362 struct pci_dev *pdev = NULL; 1379 1363 struct pci_id_descr *dev_descr; 1364 + 1365 + last_bus = i7core_pci_lastbus(); 1380 1366 1381 1367 while (table && table->descr) { 1382 1368 dev_descr = table->descr; 1383 1369 for (i = 0; i < table->n_devs; i++) { 1384 1370 pdev = NULL; 1385 1371 do { 1386 - rc = i7core_get_onedevice(&pdev, i, &dev_descr[i], 1387 - table->n_devs); 1372 + rc = i7core_get_onedevice(&pdev, i, 1373 + &dev_descr[i], 1374 + table->n_devs, 1375 + last_bus); 1388 1376 if (rc < 0) { 1389 1377 if (i == 0) { 1390 1378 i = table->n_devs; ··· 1947 1927 * 0 for FOUND a device 1948 1928 * < 0 for error code 1949 1929 */ 1930 + 1931 + static int probed = 0; 1932 + 1950 1933 static int __devinit i7core_probe(struct pci_dev *pdev, 1951 1934 const struct pci_device_id *id) 1952 1935 { 1953 - int dev_idx = id->driver_data; 1954 1936 int rc; 1955 1937 struct i7core_dev *i7core_dev; 1938 + 1939 + /* get the pci devices we want to reserve for our use */ 1940 + mutex_lock(&i7core_edac_lock); 1956 1941 1957 1942 /* 1958 1943 * All memory controllers are allocated at the first pass. 1959 1944 */ 1960 - if (unlikely(dev_idx >= 1)) 1945 + if (unlikely(probed >= 1)) { 1946 + mutex_unlock(&i7core_edac_lock); 1961 1947 return -EINVAL; 1962 - 1963 - /* get the pci devices we want to reserve for our use */ 1964 - mutex_lock(&i7core_edac_lock); 1948 + } 1949 + probed++; 1965 1950 1966 1951 rc = i7core_get_devices(pci_dev_table); 1967 1952 if (unlikely(rc < 0)) ··· 2038 2013 i7core_dev->socket); 2039 2014 } 2040 2015 } 2016 + probed--; 2017 + 2041 2018 mutex_unlock(&i7core_edac_lock); 2042 2019 } 2043 2020
+1 -1
drivers/gpio/Kconfig
··· 1 1 # 2 - # GPIO infrastructure and expanders 2 + # platform-neutral GPIO infrastructure and expanders 3 3 # 4 4 5 5 config ARCH_WANT_OPTIONAL_GPIOLIB
+5 -1
drivers/gpio/Makefile
··· 1 - # gpio support: dedicated expander chips, etc 1 + # generic gpio support: dedicated expander chips, etc 2 + # 3 + # NOTE: platform-specific GPIO drivers don't belong in the 4 + # drivers/gpio directory; put them with other platform setup 5 + # code, IRQ controllers, board init, etc. 2 6 3 7 ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG 4 8
+13 -6
drivers/gpu/drm/drm_fb_helper.c
··· 146 146 cvt = 1; 147 147 break; 148 148 case 'R': 149 - if (!cvt) 149 + if (cvt) 150 150 rb = 1; 151 151 break; 152 152 case 'm': ··· 1024 1024 } 1025 1025 1026 1026 create_mode: 1027 - mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, 1028 - cmdline_mode->yres, 1029 - cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, 1030 - cmdline_mode->rb, cmdline_mode->interlace, 1031 - cmdline_mode->margins); 1027 + if (cmdline_mode->cvt) 1028 + mode = drm_cvt_mode(fb_helper_conn->connector->dev, 1029 + cmdline_mode->xres, cmdline_mode->yres, 1030 + cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, 1031 + cmdline_mode->rb, cmdline_mode->interlace, 1032 + cmdline_mode->margins); 1033 + else 1034 + mode = drm_gtf_mode(fb_helper_conn->connector->dev, 1035 + cmdline_mode->xres, cmdline_mode->yres, 1036 + cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, 1037 + cmdline_mode->interlace, 1038 + cmdline_mode->margins); 1032 1039 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1033 1040 list_add(&mode->head, &fb_helper_conn->connector->modes); 1034 1041 return mode;
+1 -1
drivers/gpu/drm/i915/dvo_tfp410.c
··· 208 208 uint8_t ctl2; 209 209 210 210 if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { 211 - if (ctl2 & TFP410_CTL_2_HTPLG) 211 + if (ctl2 & TFP410_CTL_2_RSEN) 212 212 ret = connector_status_connected; 213 213 else 214 214 ret = connector_status_disconnected;
+1 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 620 620 drm_i915_private_t *dev_priv = dev->dev_private; 621 621 bool sr_enabled = false; 622 622 623 - if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) 623 + if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) 624 624 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 625 625 else if (IS_I915GM(dev)) 626 626 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+7 -1
drivers/gpu/drm/i915/i915_dma.c
··· 128 128 if (dev->irq_enabled) 129 129 drm_irq_uninstall(dev); 130 130 131 + mutex_lock(&dev->struct_mutex); 131 132 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 132 133 if (HAS_BSD(dev)) 133 134 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 135 + mutex_unlock(&dev->struct_mutex); 134 136 135 137 /* Clear the HWS virtual address at teardown */ 136 138 if (I915_NEED_GFX_HWS(dev)) ··· 1231 1229 static void i915_setup_compression(struct drm_device *dev, int size) 1232 1230 { 1233 1231 struct drm_i915_private *dev_priv = dev->dev_private; 1234 - struct drm_mm_node *compressed_fb, *compressed_llb; 1232 + struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 1235 1233 unsigned long cfb_base; 1236 1234 unsigned long ll_base = 0; 1237 1235 ··· 1411 1409 i915_switcheroo_can_switch); 1412 1410 if (ret) 1413 1411 goto cleanup_vga_client; 1412 + 1413 + /* IIR "flip pending" bit means done if this bit is set */ 1414 + if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) 1415 + dev_priv->flip_pending_is_done = true; 1414 1416 1415 1417 intel_modeset_init(dev); 1416 1418
+2 -1
drivers/gpu/drm/i915/i915_drv.h
··· 596 596 struct drm_crtc *plane_to_crtc_mapping[2]; 597 597 struct drm_crtc *pipe_to_crtc_mapping[2]; 598 598 wait_queue_head_t pending_flip_queue; 599 + bool flip_pending_is_done; 599 600 600 601 /* Reclocking support */ 601 602 bool render_reclock_avail; ··· 1077 1076 drm_i915_private_t *dev_priv = dev->dev_private; \ 1078 1077 if (I915_VERBOSE) \ 1079 1078 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ 1080 - intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \ 1079 + intel_ring_begin(dev, &dev_priv->render_ring, (n)); \ 1081 1080 } while (0) 1082 1081 1083 1082
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 2239 2239 mapping = inode->i_mapping; 2240 2240 for (i = 0; i < page_count; i++) { 2241 2241 page = read_cache_page_gfp(mapping, i, 2242 - mapping_gfp_mask (mapping) | 2242 + GFP_HIGHUSER | 2243 2243 __GFP_COLD | 2244 2244 gfpmask); 2245 2245 if (IS_ERR(page))
+45 -31
drivers/gpu/drm/i915/i915_irq.c
··· 940 940 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 941 941 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 942 942 943 - if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 943 + if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 944 944 intel_prepare_page_flip(dev, 0); 945 + if (dev_priv->flip_pending_is_done) 946 + intel_finish_page_flip_plane(dev, 0); 947 + } 945 948 946 - if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 949 + if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 947 950 intel_prepare_page_flip(dev, 1); 951 + if (dev_priv->flip_pending_is_done) 952 + intel_finish_page_flip_plane(dev, 1); 953 + } 948 954 949 955 if (pipea_stats & vblank_status) { 950 956 vblank++; 951 957 drm_handle_vblank(dev, 0); 952 - intel_finish_page_flip(dev, 0); 958 + if (!dev_priv->flip_pending_is_done) 959 + intel_finish_page_flip(dev, 0); 953 960 } 954 961 955 962 if (pipeb_stats & vblank_status) { 956 963 vblank++; 957 964 drm_handle_vblank(dev, 1); 958 - intel_finish_page_flip(dev, 1); 965 + if (!dev_priv->flip_pending_is_done) 966 + intel_finish_page_flip(dev, 1); 959 967 } 960 968 961 969 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || ··· 1395 1387 dev_priv->pipestat[1] = 0; 1396 1388 1397 1389 if (I915_HAS_HOTPLUG(dev)) { 1398 - u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1399 - 1400 - /* Note HDMI and DP share bits */ 1401 - if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1402 - hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1403 - if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1404 - hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1405 - if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1406 - hotplug_en |= HDMID_HOTPLUG_INT_EN; 1407 - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 1408 - hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1409 - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 1410 - hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1411 - if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) 1412 - hotplug_en |= CRT_HOTPLUG_INT_EN; 1413 - /* Ignore TV since it's buggy */ 1414 - 1415 - I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1416 - 1417 1390 /* Enable in IER... */ 1418 1391 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1419 1392 /* and unmask in IMR */ 1420 - i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); 1393 + dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; 1421 1394 } 1422 1395 1423 1396 /* ··· 1416 1427 } 1417 1428 I915_WRITE(EMR, error_mask); 1418 1429 1419 - /* Disable pipe interrupt enables, clear pending pipe status */ 1420 - I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); 1421 - I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); 1422 - /* Clear pending interrupt status */ 1423 - I915_WRITE(IIR, I915_READ(IIR)); 1424 - 1425 - I915_WRITE(IER, enable_mask); 1426 1430 I915_WRITE(IMR, dev_priv->irq_mask_reg); 1431 + I915_WRITE(IER, enable_mask); 1427 1432 (void) I915_READ(IER); 1433 + 1434 + if (I915_HAS_HOTPLUG(dev)) { 1435 + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1436 + 1437 + /* Note HDMI and DP share bits */ 1438 + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1439 + hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1440 + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1441 + hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1442 + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1443 + hotplug_en |= HDMID_HOTPLUG_INT_EN; 1444 + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 1445 + hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1446 + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 1447 + hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1448 + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 1449 + hotplug_en |= CRT_HOTPLUG_INT_EN; 1450 + 1451 + /* Programming the CRT detection parameters tends 1452 + to generate a spurious hotplug event about three 1453 + seconds later. So just do it once. 1454 + */ 1455 + if (IS_G4X(dev)) 1456 + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 1457 + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 1458 + } 1459 + 1460 + /* Ignore TV since it's buggy */ 1461 + 1462 + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1463 + } 1428 1464 1429 1465 opregion_enable_asle(dev); 1430 1466
+4 -1
drivers/gpu/drm/i915/i915_reg.h
··· 178 178 #define MI_OVERLAY_OFF (0x2<<21) 179 179 #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) 180 180 #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) 181 + #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) 181 182 #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) 182 183 #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 183 184 #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ ··· 369 368 #define CM0_RC_OP_FLUSH_DISABLE (1<<0) 370 369 #define BB_ADDR 0x02140 /* 8 bytes */ 371 370 #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 371 + #define ECOSKPD 0x021d0 372 + #define ECO_GATING_CX_ONLY (1<<3) 373 + #define ECO_FLIP_DONE (1<<0) 372 374 373 375 /* GEN6 interrupt control */ 374 376 #define GEN6_RENDER_HWSTAM 0x2098 ··· 1134 1130 #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) 1135 1131 #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 1136 1132 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1137 - #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 1138 1133 1139 1134 #define PORT_HOTPLUG_STAT 0x61114 1140 1135 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
-6
drivers/gpu/drm/i915/intel_crt.c
··· 234 234 else 235 235 tries = 1; 236 236 hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); 237 - hotplug_en &= CRT_HOTPLUG_MASK; 238 237 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; 239 - 240 - if (IS_G4X(dev)) 241 - hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 242 - 243 - hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 244 238 245 239 for (i = 0; i < tries ; i++) { 246 240 unsigned long timeout;
+47 -13
drivers/gpu/drm/i915/intel_display.c
··· 2970 2970 if (srwm < 0) 2971 2971 srwm = 1; 2972 2972 srwm &= 0x3f; 2973 - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2973 + if (IS_I965GM(dev)) 2974 + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2974 2975 } else { 2975 2976 /* Turn off self refresh if both pipes are enabled */ 2976 - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2977 - & ~FW_BLC_SELF_EN); 2977 + if (IS_I965GM(dev)) 2978 + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2979 + & ~FW_BLC_SELF_EN); 2978 2980 } 2979 2981 2980 2982 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", ··· 4485 4483 struct drm_device *dev = dev_priv->dev; 4486 4484 struct drm_crtc *crtc; 4487 4485 struct intel_crtc *intel_crtc; 4486 + int enabled = 0; 4488 4487 4489 4488 if (!i915_powersave) 4490 4489 return; ··· 4494 4491 4495 4492 i915_update_gfx_val(dev_priv); 4496 4493 4497 - if (IS_I945G(dev) || IS_I945GM(dev)) { 4498 - DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); 4499 - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 4500 - } 4501 - 4502 4494 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4503 4495 /* Skip inactive CRTCs */ 4504 4496 if (!crtc->fb) 4505 4497 continue; 4506 4498 4499 + enabled++; 4507 4500 intel_crtc = to_intel_crtc(crtc); 4508 4501 if (!intel_crtc->busy) 4509 4502 intel_decrease_pllclock(crtc); 4503 + } 4504 + 4505 + if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { 4506 + DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); 4507 + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 4510 4508 } 4511 4509 4512 4510 mutex_unlock(&dev->struct_mutex); ··· 4605 4601 kfree(work); 4606 4602 } 4607 4603 4608 - void intel_finish_page_flip(struct drm_device *dev, int pipe) 4604 + static void do_intel_finish_page_flip(struct drm_device *dev, 4605 + struct drm_crtc *crtc) 4609 4606 { 4610 4607 drm_i915_private_t *dev_priv = dev->dev_private; 4611 - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 4612 4608 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4613 4609 struct intel_unpin_work *work; 4614 4610 struct drm_i915_gem_object *obj_priv; ··· 4652 4648 schedule_work(&work->work); 4653 4649 } 4654 4650 4651 + void intel_finish_page_flip(struct drm_device *dev, int pipe) 4652 + { 4653 + drm_i915_private_t *dev_priv = dev->dev_private; 4654 + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 4655 + 4656 + do_intel_finish_page_flip(dev, crtc); 4657 + } 4658 + 4659 + void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 4660 + { 4661 + drm_i915_private_t *dev_priv = dev->dev_private; 4662 + struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 4663 + 4664 + do_intel_finish_page_flip(dev, crtc); 4665 + } 4666 + 4655 4667 void intel_prepare_page_flip(struct drm_device *dev, int plane) 4656 4668 { 4657 4669 drm_i915_private_t *dev_priv = dev->dev_private; ··· 4698 4678 unsigned long flags; 4699 4679 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4700 4680 int ret, pipesrc; 4681 + u32 flip_mask; 4701 4682 4702 4683 work = kzalloc(sizeof *work, GFP_KERNEL); 4703 4684 if (work == NULL) ··· 4752 4731 atomic_inc(&obj_priv->pending_flip); 4753 4732 work->pending_flip_obj = obj; 4754 4733 4734 + if (intel_crtc->plane) 4735 + flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4736 + else 4737 + flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; 4738 + 4739 + /* Wait for any previous flip to finish */ 4740 + if (IS_GEN3(dev)) 4741 + while (I915_READ(ISR) & flip_mask) 4742 + ; 4743 + 4755 4744 BEGIN_LP_RING(4); 4756 - OUT_RING(MI_DISPLAY_FLIP | 4757 - MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 4758 - OUT_RING(fb->pitch); 4759 4745 if (IS_I965G(dev)) { 4746 + OUT_RING(MI_DISPLAY_FLIP | 4747 + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 4748 + OUT_RING(fb->pitch); 4760 4749 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4761 4750 pipesrc = I915_READ(pipesrc_reg); 4762 4751 OUT_RING(pipesrc & 0x0fff0fff); 4763 4752 } else { 4753 + OUT_RING(MI_DISPLAY_FLIP_I915 | 4754 + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 4755 + OUT_RING(fb->pitch); 4764 4756 OUT_RING(obj_priv->gtt_offset); 4765 4757 OUT_RING(MI_NOOP); 4766 4758 }
+24 -3
drivers/gpu/drm/i915/intel_dp.c
··· 136 136 } 137 137 138 138 static int 139 + intel_dp_max_data_rate(int max_link_clock, int max_lanes) 140 + { 141 + return (max_link_clock * max_lanes * 8) / 10; 142 + } 143 + 144 + static int 139 145 intel_dp_mode_valid(struct drm_connector *connector, 140 146 struct drm_display_mode *mode) 141 147 { ··· 150 144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); 151 145 int max_lanes = intel_dp_max_lane_count(intel_encoder); 152 146 153 - if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) 154 - > max_link_clock * max_lanes) 147 + /* only refuse the mode on non eDP since we have seen some wierd eDP panels 148 + which are outside spec tolerances but somehow work by magic */ 149 + if (!IS_eDP(intel_encoder) && 150 + (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) 151 + > intel_dp_max_data_rate(max_link_clock, max_lanes))) 155 152 return MODE_CLOCK_HIGH; 156 153 157 154 if (mode->clock < 10000) ··· 515 506 516 507 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 517 508 for (clock = 0; clock <= max_clock; clock++) { 518 - int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 509 + int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 519 510 520 511 if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) 521 512 <= link_avail) { ··· 529 520 return true; 530 521 } 531 522 } 523 + } 524 + 525 + if (IS_eDP(intel_encoder)) { 526 + /* okay we failed just pick the highest */ 527 + dp_priv->lane_count = max_lane_count; 528 + dp_priv->link_bw = bws[max_clock]; 529 + adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 530 + DRM_DEBUG_KMS("Force picking display port link bw %02x lane " 531 + "count %d clock %d\n", 532 + dp_priv->link_bw, dp_priv->lane_count, 533 + adjusted_mode->clock); 534 + return true; 532 535 } 533 536 return false; 534 537 }
+1
drivers/gpu/drm/i915/intel_drv.h
··· 224 224 225 225 extern void intel_prepare_page_flip(struct drm_device *dev, int plane); 226 226 extern void intel_finish_page_flip(struct drm_device *dev, int pipe); 227 + extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 227 228 228 229 extern void intel_setup_overlay(struct drm_device *dev); 229 230 extern void intel_cleanup_overlay(struct drm_device *dev);
+2 -2
drivers/gpu/drm/i915/intel_lvds.c
··· 983 983 984 984 drm_connector_attach_property(&intel_connector->base, 985 985 dev->mode_config.scaling_mode_property, 986 - DRM_MODE_SCALE_FULLSCREEN); 987 - lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 986 + DRM_MODE_SCALE_ASPECT); 987 + lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT; 988 988 /* 989 989 * LVDS discovery: 990 990 * 1) check for EDID on DDC
+6 -4
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 94 94 #if WATCH_EXEC 95 95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 96 96 #endif 97 - intel_ring_begin(dev, ring, 8); 97 + intel_ring_begin(dev, ring, 2); 98 98 intel_ring_emit(dev, ring, cmd); 99 99 intel_ring_emit(dev, ring, MI_NOOP); 100 100 intel_ring_advance(dev, ring); ··· 358 358 u32 invalidate_domains, 359 359 u32 flush_domains) 360 360 { 361 - intel_ring_begin(dev, ring, 8); 361 + intel_ring_begin(dev, ring, 2); 362 362 intel_ring_emit(dev, ring, MI_FLUSH); 363 363 intel_ring_emit(dev, ring, MI_NOOP); 364 364 intel_ring_advance(dev, ring); ··· 687 687 *virt++ = MI_NOOP; 688 688 689 689 ring->tail = 0; 690 + ring->space = ring->head - 8; 690 691 691 692 return 0; 692 693 } ··· 722 721 } 723 722 724 723 void intel_ring_begin(struct drm_device *dev, 725 - struct intel_ring_buffer *ring, int n) 724 + struct intel_ring_buffer *ring, int num_dwords) 726 725 { 726 + int n = 4*num_dwords; 727 727 if (unlikely(ring->tail + n > ring->size)) 728 728 intel_wrap_ring_buffer(dev, ring); 729 729 if (unlikely(ring->space < n)) ··· 754 752 { 755 753 unsigned int *virt = ring->virtual_start + ring->tail; 756 754 BUG_ON((len&~(4-1)) != 0); 757 - intel_ring_begin(dev, ring, len); 755 + intel_ring_begin(dev, ring, len/4); 758 756 memcpy(virt, data, len); 759 757 ring->tail += len; 760 758 ring->tail &= ring->size - 1;
+1 -1
drivers/gpu/drm/radeon/atombios_crtc.c
··· 498 498 if ((rdev->family == CHIP_RS600) || 499 499 (rdev->family == CHIP_RS690) || 500 500 (rdev->family == CHIP_RS740)) 501 - pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 501 + pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ 502 502 RADEON_PLL_PREFER_CLOSEST_LOWER); 503 503 504 504 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
+29 -6
drivers/gpu/drm/radeon/evergreen.c
··· 607 607 WREG32(MC_VM_FB_LOCATION, tmp); 608 608 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 609 609 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 610 - WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); 610 + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 611 611 if (rdev->flags & RADEON_IS_AGP) { 612 612 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 613 613 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); ··· 1222 1222 ps_thread_count = 128; 1223 1223 1224 1224 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); 1225 - sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1226 - sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1227 - sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1228 - sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1229 - sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1225 + sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 1226 + sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 1227 + sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 1228 + sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 1229 + sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 1230 1230 1231 1231 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 1232 1232 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); ··· 1260 1260 WREG32(VGT_GS_VERTEX_REUSE, 16); 1261 1261 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 1262 1262 1263 + WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); 1264 + WREG32(VGT_OUT_DEALLOC_CNTL, 16); 1265 + 1263 1266 WREG32(CB_PERF_CTR0_SEL_0, 0); 1264 1267 WREG32(CB_PERF_CTR0_SEL_1, 0); 1265 1268 WREG32(CB_PERF_CTR1_SEL_0, 0); ··· 1271 1268 WREG32(CB_PERF_CTR2_SEL_1, 0); 1272 1269 WREG32(CB_PERF_CTR3_SEL_0, 0); 1273 1270 WREG32(CB_PERF_CTR3_SEL_1, 0); 1271 + 1272 + /* clear render buffer base addresses */ 1273 + WREG32(CB_COLOR0_BASE, 0); 1274 + WREG32(CB_COLOR1_BASE, 0); 1275 + WREG32(CB_COLOR2_BASE, 0); 1276 + WREG32(CB_COLOR3_BASE, 0); 1277 + WREG32(CB_COLOR4_BASE, 0); 1278 + WREG32(CB_COLOR5_BASE, 0); 1279 + WREG32(CB_COLOR6_BASE, 0); 1280 + WREG32(CB_COLOR7_BASE, 0); 1281 + WREG32(CB_COLOR8_BASE, 0); 1282 + WREG32(CB_COLOR9_BASE, 0); 1283 + WREG32(CB_COLOR10_BASE, 0); 1284 + WREG32(CB_COLOR11_BASE, 0); 1285 + 1286 + /* set the shader const cache sizes to 0 */ 1287 + for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) 1288 + WREG32(i, 0); 1289 + for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) 1290 + WREG32(i, 0); 1274 1291 1275 1292 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 1276 1293 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+2 -2
drivers/gpu/drm/radeon/evergreen_cs.c
··· 1197 1197 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 1198 1198 return -EINVAL; 1199 1199 } 1200 - ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1200 + ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1201 1201 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1202 1202 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1203 1203 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) ··· 1209 1209 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 1210 1210 return -EINVAL; 1211 1211 } 1212 - ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1212 + ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1213 1213 mipmap = reloc->robj; 1214 1214 r = evergreen_check_texture_resource(p, idx+1+(i*8), 1215 1215 texture, mipmap);
+3
drivers/gpu/drm/radeon/evergreend.h
··· 713 713 #define SQ_GSVS_RING_OFFSET_2 0x28930 714 714 #define SQ_GSVS_RING_OFFSET_3 0x28934 715 715 716 + #define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140 717 + #define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80 718 + 716 719 #define SQ_ALU_CONST_CACHE_PS_0 0x28940 717 720 #define SQ_ALU_CONST_CACHE_PS_1 0x28944 718 721 #define SQ_ALU_CONST_CACHE_PS_2 0x28948
+48 -33
drivers/gpu/drm/radeon/r100.c
··· 1628 1628 case RADEON_TXFORMAT_RGB332: 1629 1629 case RADEON_TXFORMAT_Y8: 1630 1630 track->textures[i].cpp = 1; 1631 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1631 1632 break; 1632 1633 case RADEON_TXFORMAT_AI88: 1633 1634 case RADEON_TXFORMAT_ARGB1555: ··· 1640 1639 case RADEON_TXFORMAT_LDUDV655: 1641 1640 case RADEON_TXFORMAT_DUDV88: 1642 1641 track->textures[i].cpp = 2; 1642 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1643 1643 break; 1644 1644 case RADEON_TXFORMAT_ARGB8888: 1645 1645 case RADEON_TXFORMAT_RGBA8888: 1646 1646 case RADEON_TXFORMAT_SHADOW32: 1647 1647 case RADEON_TXFORMAT_LDUDUV8888: 1648 1648 track->textures[i].cpp = 4; 1649 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 1649 1650 break; 1650 1651 case RADEON_TXFORMAT_DXT1: 1651 1652 track->textures[i].cpp = 1; ··· 2607 2604 int surf_index = reg * 16; 2608 2605 int flags = 0; 2609 2606 2610 - /* r100/r200 divide by 16 */ 2611 - if (rdev->family < CHIP_R300) 2612 - flags = pitch / 16; 2613 - else 2614 - flags = pitch / 8; 2615 - 2616 2607 if (rdev->family <= CHIP_RS200) { 2617 2608 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 2618 2609 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) ··· 2629 2632 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 2630 2633 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 2631 2634 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 2635 + 2636 + /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ 2637 + if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { 2638 + if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) 2639 + if (ASIC_IS_RN50(rdev)) 2640 + pitch /= 16; 2641 + } 2642 + 2643 + /* r100/r200 divide by 16 */ 2644 + if (rdev->family < CHIP_R300) 2645 + flags |= pitch / 16; 2646 + else 2647 + flags |= pitch / 8; 2648 + 2632 2649 2633 2650 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 2634 2651 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); ··· 3158 3147 DRM_ERROR("compress format %d\n", t->compress_format); 3159 3148 } 3160 3149 3161 - static int r100_cs_track_cube(struct radeon_device *rdev, 3162 - struct r100_cs_track *track, unsigned idx) 3163 - { 3164 - unsigned face, w, h; 3165 - struct radeon_bo *cube_robj; 3166 - unsigned long size; 3167 - 3168 - for (face = 0; face < 5; face++) { 3169 - cube_robj = track->textures[idx].cube_info[face].robj; 3170 - w = track->textures[idx].cube_info[face].width; 3171 - h = track->textures[idx].cube_info[face].height; 3172 - 3173 - size = w * h; 3174 - size *= track->textures[idx].cpp; 3175 - 3176 - size += track->textures[idx].cube_info[face].offset; 3177 - 3178 - if (size > radeon_bo_size(cube_robj)) { 3179 - DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 3180 - size, radeon_bo_size(cube_robj)); 3181 - r100_cs_track_texture_print(&track->textures[idx]); 3182 - return -1; 3183 - } 3184 - } 3185 - return 0; 3186 - } 3187 - 3188 3150 static int r100_track_compress_size(int compress_format, int w, int h) 3189 3151 { 3190 3152 int block_width, block_height, block_bytes; ··· 3186 3202 wblocks = min_wblocks; 3187 3203 sz = wblocks * hblocks * block_bytes; 3188 3204 return sz; 3205 + } 3206 + 3207 + static int r100_cs_track_cube(struct radeon_device *rdev, 3208 + struct r100_cs_track *track, unsigned idx) 3209 + { 3210 + unsigned face, w, h; 3211 + struct radeon_bo *cube_robj; 3212 + unsigned long size; 3213 + unsigned compress_format = track->textures[idx].compress_format; 3214 + 3215 + for (face = 0; face < 5; face++) { 3216 + cube_robj = track->textures[idx].cube_info[face].robj; 3217 + w = track->textures[idx].cube_info[face].width; 3218 + h = track->textures[idx].cube_info[face].height; 3219 + 3220 + if (compress_format) { 3221 + size = r100_track_compress_size(compress_format, w, h); 3222 + } else 3223 + size = w * h; 3224 + size *= track->textures[idx].cpp; 3225 + 3226 + size += track->textures[idx].cube_info[face].offset; 3227 + 3228 + if (size > radeon_bo_size(cube_robj)) { 3229 + DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 3230 + size, radeon_bo_size(cube_robj)); 3231 + r100_cs_track_texture_print(&track->textures[idx]); 3232 + return -1; 3233 + } 3234 + } 3235 + return 0; 3189 3236 } 3190 3237 3191 3238 static int r100_cs_track_texture_check(struct radeon_device *rdev,
+5
drivers/gpu/drm/radeon/r200.c
··· 415 415 /* 2D, 3D, CUBE */ 416 416 switch (tmp) { 417 417 case 0: 418 + case 3: 419 + case 4: 418 420 case 5: 419 421 case 6: 420 422 case 7: ··· 452 450 case R200_TXFORMAT_RGB332: 453 451 case R200_TXFORMAT_Y8: 454 452 track->textures[i].cpp = 1; 453 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 455 454 break; 456 455 case R200_TXFORMAT_AI88: 457 456 case R200_TXFORMAT_ARGB1555: ··· 464 461 case R200_TXFORMAT_DVDU88: 465 462 case R200_TXFORMAT_AVYU4444: 466 463 track->textures[i].cpp = 2; 464 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 467 465 break; 468 466 case R200_TXFORMAT_ARGB8888: 469 467 case R200_TXFORMAT_RGBA8888: ··· 472 468 case R200_TXFORMAT_BGR111110: 473 469 case R200_TXFORMAT_LDVDU8888: 474 470 track->textures[i].cpp = 4; 471 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 475 472 break; 476 473 case R200_TXFORMAT_DXT1: 477 474 track->textures[i].cpp = 1;
+5
drivers/gpu/drm/radeon/r300.c
··· 881 881 case R300_TX_FORMAT_Y4X4: 882 882 case R300_TX_FORMAT_Z3Y3X2: 883 883 track->textures[i].cpp = 1; 884 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 884 885 break; 885 886 case R300_TX_FORMAT_X16: 886 887 case R300_TX_FORMAT_Y8X8: ··· 893 892 case R300_TX_FORMAT_B8G8_B8G8: 894 893 case R300_TX_FORMAT_G8R8_G8B8: 895 894 track->textures[i].cpp = 2; 895 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 896 896 break; 897 897 case R300_TX_FORMAT_Y16X16: 898 898 case R300_TX_FORMAT_Z11Y11X10: ··· 904 902 case R300_TX_FORMAT_FL_I32: 905 903 case 0x1e: 906 904 track->textures[i].cpp = 4; 905 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 907 906 break; 908 907 case R300_TX_FORMAT_W16Z16Y16X16: 909 908 case R300_TX_FORMAT_FL_R16G16B16A16: 910 909 case R300_TX_FORMAT_FL_I32A32: 911 910 track->textures[i].cpp = 8; 911 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 912 912 break; 913 913 case R300_TX_FORMAT_FL_R32G32B32A32: 914 914 track->textures[i].cpp = 16; 915 + track->textures[i].compress_format = R100_TRACK_COMP_NONE; 915 916 break; 916 917 case R300_TX_FORMAT_DXT1: 917 918 track->textures[i].cpp = 1;
+12 -5
drivers/gpu/drm/radeon/r600.c
··· 130 130 break; 131 131 } 132 132 } 133 - } else 134 - rdev->pm.requested_power_state_index = 135 - rdev->pm.current_power_state_index - 1; 133 + } else { 134 + if (rdev->pm.current_power_state_index == 0) 135 + rdev->pm.requested_power_state_index = 136 + rdev->pm.num_power_states - 1; 137 + else 138 + rdev->pm.requested_power_state_index = 139 + rdev->pm.current_power_state_index - 1; 140 + } 136 141 } 137 142 rdev->pm.requested_clock_mode_index = 0; 138 143 /* don't use the power state if crtcs are active and no display flag is set */ ··· 1102 1097 WREG32(MC_VM_FB_LOCATION, tmp); 1103 1098 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1104 1099 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 1105 - WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); 1100 + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1106 1101 if (rdev->flags & RADEON_IS_AGP) { 1107 1102 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); 1108 1103 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); ··· 1224 1219 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1225 1220 r600_vram_gtt_location(rdev, &rdev->mc); 1226 1221 1227 - if (rdev->flags & RADEON_IS_IGP) 1222 + if (rdev->flags & RADEON_IS_IGP) { 1223 + rs690_pm_info(rdev); 1228 1224 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1225 + } 1229 1226 radeon_update_bandwidth_info(rdev); 1230 1227 return 0; 1231 1228 }
+3 -1
drivers/gpu/drm/radeon/radeon.h
··· 177 177 void radeon_combios_get_power_modes(struct radeon_device *rdev); 178 178 void radeon_atombios_get_power_modes(struct radeon_device *rdev); 179 179 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); 180 + void rs690_pm_info(struct radeon_device *rdev); 180 181 181 182 /* 182 183 * Fences. ··· 620 619 DYNPM_STATE_DISABLED, 621 620 DYNPM_STATE_MINIMUM, 622 621 DYNPM_STATE_PAUSED, 623 - DYNPM_STATE_ACTIVE 622 + DYNPM_STATE_ACTIVE, 623 + DYNPM_STATE_SUSPENDED, 624 624 }; 625 625 enum radeon_dynpm_action { 626 626 DYNPM_ACTION_NONE,
+7
drivers/gpu/drm/radeon/radeon_asic.c
··· 780 780 case CHIP_R423: 781 781 case CHIP_RV410: 782 782 rdev->asic = &r420_asic; 783 + /* handle macs */ 784 + if (rdev->bios == NULL) { 785 + rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock; 786 + rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock; 787 + rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock; 788 + rdev->asic->set_memory_clock = NULL; 789 + } 783 790 break; 784 791 case CHIP_RS400: 785 792 case CHIP_RS480:
+4
drivers/gpu/drm/radeon/radeon_bios.c
··· 48 48 resource_size_t vram_base; 49 49 resource_size_t size = 256 * 1024; /* ??? */ 50 50 51 + if (!(rdev->flags & RADEON_IS_IGP)) 52 + if (!radeon_card_posted(rdev)) 53 + return false; 54 + 51 55 rdev->bios = NULL; 52 56 vram_base = drm_get_resource_start(rdev->ddev, 0); 53 57 bios = ioremap(vram_base, size);
+40 -9
drivers/gpu/drm/radeon/radeon_combios.c
··· 1411 1411 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; 1412 1412 } else 1413 1413 #endif /* CONFIG_PPC_PMAC */ 1414 + #ifdef CONFIG_PPC64 1415 + if (ASIC_IS_RN50(rdev)) 1416 + rdev->mode_info.connector_table = CT_RN50_POWER; 1417 + else 1418 + #endif 1414 1419 rdev->mode_info.connector_table = CT_GENERIC; 1415 1420 } 1416 1421 ··· 1858 1853 CONNECTOR_OBJECT_ID_SVIDEO, 1859 1854 &hpd); 1860 1855 break; 1856 + case CT_RN50_POWER: 1857 + DRM_INFO("Connector Table: %d (rn50-power)\n", 1858 + rdev->mode_info.connector_table); 1859 + /* VGA - primary dac */ 1860 + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); 1861 + hpd.hpd = RADEON_HPD_NONE; 1862 + radeon_add_legacy_encoder(dev, 1863 + radeon_get_encoder_id(dev, 1864 + ATOM_DEVICE_CRT1_SUPPORT, 1865 + 1), 1866 + ATOM_DEVICE_CRT1_SUPPORT); 1867 + radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, 1868 + DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1869 + CONNECTOR_OBJECT_ID_VGA, 1870 + &hpd); 1871 + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); 1872 + hpd.hpd = RADEON_HPD_NONE; 1873 + radeon_add_legacy_encoder(dev, 1874 + radeon_get_encoder_id(dev, 1875 + ATOM_DEVICE_CRT2_SUPPORT, 1876 + 2), 1877 + ATOM_DEVICE_CRT2_SUPPORT); 1878 + radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1879 + DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1880 + CONNECTOR_OBJECT_ID_VGA, 1881 + &hpd); 1882 + break; 1861 1883 default: 1862 1884 DRM_INFO("Connector table: %d (invalid)\n", 1863 1885 rdev->mode_info.connector_table); ··· 1936 1904 if (*legacy_connector == CONNECTOR_CRT_LEGACY && 1937 1905 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) 1938 1906 return false; 1939 - } 1940 - 1941 - /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */ 1942 - if (dev->pdev->device == 0x5159 && 1943 - dev->pdev->subsystem_vendor == 0x1002 && 1944 - dev->pdev->subsystem_device == 0x013a) { 1945 - if (*legacy_connector == CONNECTOR_DVI_I_LEGACY) 1946 - *legacy_connector = CONNECTOR_CRT_LEGACY; 1947 - 1948 1907 } 1949 1908 1950 1909 /* X300 card with extra non-existent DVI port */ ··· 3041 3018 /* write CONFIG_MEMSIZE */ 3042 3019 combios_write_ram_size(dev); 3043 3020 } 3021 + 3022 + /* quirk for rs4xx HP nx6125 laptop to make it resume 3023 + * - it hangs on resume inside the dynclk 1 table. 3024 + */ 3025 + if (rdev->family == CHIP_RS480 && 3026 + rdev->pdev->subsystem_vendor == 0x103c && 3027 + rdev->pdev->subsystem_device == 0x308b) 3028 + return; 3044 3029 3045 3030 /* DYN CLK 1 */ 3046 3031 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+1 -1
drivers/gpu/drm/radeon/radeon_cursor.c
··· 194 194 fail: 195 195 drm_gem_object_unreference_unlocked(obj); 196 196 197 - return 0; 197 + return ret; 198 198 } 199 199 200 200 int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+7
drivers/gpu/drm/radeon/radeon_device.c
··· 779 779 780 780 int radeon_resume_kms(struct drm_device *dev) 781 781 { 782 + struct drm_connector *connector; 782 783 struct radeon_device *rdev = dev->dev_private; 783 784 784 785 if (rdev->powered_down) ··· 798 797 radeon_resume(rdev); 799 798 radeon_pm_resume(rdev); 800 799 radeon_restore_bios_scratch_regs(rdev); 800 + 801 + /* turn on display hw */ 802 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 803 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 804 + } 805 + 801 806 radeon_fbdev_set_suspend(rdev, 0); 802 807 release_console_sem(); 803 808
+2 -2
drivers/gpu/drm/radeon/radeon_encoders.c
··· 1072 1072 if (is_dig) { 1073 1073 switch (mode) { 1074 1074 case DRM_MODE_DPMS_ON: 1075 + if (!ASIC_IS_DCE4(rdev)) 1076 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1075 1077 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1076 1078 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1077 1079 ··· 1081 1079 if (ASIC_IS_DCE4(rdev)) 1082 1080 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1083 1081 } 1084 - if (!ASIC_IS_DCE4(rdev)) 1085 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1086 1082 break; 1087 1083 case DRM_MODE_DPMS_STANDBY: 1088 1084 case DRM_MODE_DPMS_SUSPEND:
+9 -13
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
··· 928 928 if (ASIC_IS_R300(rdev)) { 929 929 gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; 930 930 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); 931 - } 932 - 933 - if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) 934 - disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); 935 - else 931 + } else if (rdev->family != CHIP_R200) 936 932 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 937 - 938 - if (rdev->family == CHIP_R200) 933 + else if (rdev->family == CHIP_R200) 939 934 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 935 + 936 + if (rdev->family >= CHIP_R200) 937 + disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); 940 938 941 939 if (is_tv) { 942 940 uint32_t dac_cntl; ··· 1000 1002 if (ASIC_IS_R300(rdev)) { 1001 1003 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); 1002 1004 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); 1003 - } 1005 + } else if (rdev->family != CHIP_R200) 1006 + WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1007 + else if (rdev->family == CHIP_R200) 1008 + WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); 1004 1009 1005 1010 if (rdev->family >= CHIP_R200) 1006 1011 WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); 1007 - else 1008 - WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1009 - 1010 - if (rdev->family == CHIP_R200) 1011 - WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); 1012 1012 1013 1013 if (is_tv) 1014 1014 radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
+1
drivers/gpu/drm/radeon/radeon_mode.h
··· 206 206 CT_MINI_INTERNAL, 207 207 CT_IMAC_G5_ISIGHT, 208 208 CT_EMAC, 209 + CT_RN50_POWER, 209 210 }; 210 211 211 212 enum radeon_dvo_chip {
+34 -7
drivers/gpu/drm/radeon/radeon_pm.c
··· 397 397 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 398 398 mutex_unlock(&rdev->pm.mutex); 399 399 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 400 + bool flush_wq = false; 401 + 400 402 mutex_lock(&rdev->pm.mutex); 401 - rdev->pm.pm_method = PM_METHOD_PROFILE; 403 + if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 404 + cancel_delayed_work(&rdev->pm.dynpm_idle_work); 405 + flush_wq = true; 406 + } 402 407 /* disable dynpm */ 403 408 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 404 409 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 405 - cancel_delayed_work(&rdev->pm.dynpm_idle_work); 410 + rdev->pm.pm_method = PM_METHOD_PROFILE; 406 411 mutex_unlock(&rdev->pm.mutex); 412 + if (flush_wq) 413 + flush_workqueue(rdev->wq); 407 414 } else { 408 415 DRM_ERROR("invalid power method!\n"); 409 416 goto fail; ··· 425 418 426 419 void radeon_pm_suspend(struct radeon_device *rdev) 427 420 { 421 + bool flush_wq = false; 422 + 428 423 mutex_lock(&rdev->pm.mutex); 429 - cancel_delayed_work(&rdev->pm.dynpm_idle_work); 424 + if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 425 + cancel_delayed_work(&rdev->pm.dynpm_idle_work); 426 + if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 427 + rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 428 + flush_wq = true; 429 + } 430 430 mutex_unlock(&rdev->pm.mutex); 431 + if (flush_wq) 432 + flush_workqueue(rdev->wq); 431 433 } 432 434 433 435 void radeon_pm_resume(struct radeon_device *rdev) ··· 448 432 rdev->pm.current_sclk = rdev->clock.default_sclk; 449 433 rdev->pm.current_mclk = rdev->clock.default_mclk; 450 434 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 435 + if (rdev->pm.pm_method == PM_METHOD_DYNPM 436 + && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 437 + rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 438 + queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 439 + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 440 + } 451 441 mutex_unlock(&rdev->pm.mutex); 452 442 radeon_pm_compute_clocks(rdev); 453 443 } ··· 508 486 void radeon_pm_fini(struct radeon_device *rdev) 509 487 { 510 488 if (rdev->pm.num_power_states > 1) { 489 + bool flush_wq = false; 490 + 511 491 mutex_lock(&rdev->pm.mutex); 512 492 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 513 493 rdev->pm.profile = PM_PROFILE_DEFAULT; ··· 517 493 radeon_pm_set_clocks(rdev); 518 494 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 519 495 /* cancel work */ 520 - cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 496 + cancel_delayed_work(&rdev->pm.dynpm_idle_work); 497 + flush_wq = true; 521 498 /* reset default clocks */ 522 499 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 523 500 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 524 501 radeon_pm_set_clocks(rdev); 525 502 } 526 503 mutex_unlock(&rdev->pm.mutex); 504 + if (flush_wq) 505 + flush_workqueue(rdev->wq); 527 506 528 507 device_remove_file(rdev->dev, &dev_attr_power_profile); 529 508 device_remove_file(rdev->dev, &dev_attr_power_method); ··· 747 720 radeon_pm_get_dynpm_state(rdev); 748 721 radeon_pm_set_clocks(rdev); 749 722 } 723 + 724 + queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 725 + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 750 726 } 751 727 mutex_unlock(&rdev->pm.mutex); 752 728 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 753 - 754 - queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 755 - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 756 729 } 757 730 758 731 /*
+5 -5
drivers/gpu/drm/radeon/reg_srcs/evergreen
··· 80 80 0x00028010 DB_RENDER_OVERRIDE2 81 81 0x00028028 DB_STENCIL_CLEAR 82 82 0x0002802C DB_DEPTH_CLEAR 83 - 0x00028034 PA_SC_SCREEN_SCISSOR_BR 84 83 0x00028030 PA_SC_SCREEN_SCISSOR_TL 84 + 0x00028034 PA_SC_SCREEN_SCISSOR_BR 85 85 0x0002805C DB_DEPTH_SLICE 86 86 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 87 87 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 ··· 460 460 0x00028844 SQ_PGM_RESOURCES_PS 461 461 0x00028848 SQ_PGM_RESOURCES_2_PS 462 462 0x0002884C SQ_PGM_EXPORTS_PS 463 - 0x0002885C SQ_PGM_RESOURCES_VS 464 - 0x00028860 SQ_PGM_RESOURCES_2_VS 463 + 0x00028860 SQ_PGM_RESOURCES_VS 464 + 0x00028864 SQ_PGM_RESOURCES_2_VS 465 465 0x00028878 SQ_PGM_RESOURCES_GS 466 466 0x0002887C SQ_PGM_RESOURCES_2_GS 467 467 0x00028890 SQ_PGM_RESOURCES_ES ··· 469 469 0x000288A8 SQ_PGM_RESOURCES_FS 470 470 0x000288BC SQ_PGM_RESOURCES_HS 471 471 0x000288C0 SQ_PGM_RESOURCES_2_HS 472 - 0x000288D0 SQ_PGM_RESOURCES_LS 473 - 0x000288D4 SQ_PGM_RESOURCES_2_LS 472 + 0x000288D4 SQ_PGM_RESOURCES_LS 473 + 0x000288D8 SQ_PGM_RESOURCES_2_LS 474 474 0x000288E8 SQ_LDS_ALLOC 475 475 0x000288EC SQ_LDS_ALLOC_PS 476 476 0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+20 -21
drivers/gpu/drm/radeon/rs690.c
··· 79 79 tmp.full = dfixed_const(100); 80 80 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); 81 81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 82 - rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); 82 + if (info->info.usK8MemoryClock) 83 + rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); 84 + else if (rdev->clock.default_mclk) { 85 + rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 86 + rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 87 + } else 88 + rdev->pm.igp_system_mclk.full = dfixed_const(400); 83 89 rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); 84 90 rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); 85 91 break; ··· 93 87 tmp.full = dfixed_const(100); 94 88 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); 95 89 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 96 - rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); 90 + if (info->info_v2.ulBootUpUMAClock) 91 + rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); 92 + else if (rdev->clock.default_mclk) 93 + rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 94 + else 95 + rdev->pm.igp_system_mclk.full = dfixed_const(66700); 97 96 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 98 97 rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); 99 98 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); 100 99 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); 101 100 break; 102 101 default: 103 - tmp.full = dfixed_const(100); 104 102 /* We assume the slower possible clock ie worst case */ 105 - /* DDR 333Mhz */ 106 - rdev->pm.igp_sideport_mclk.full = dfixed_const(333); 107 - /* FIXME: system clock ? */ 108 - rdev->pm.igp_system_mclk.full = dfixed_const(100); 109 - rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 110 - rdev->pm.igp_ht_link_clk.full = dfixed_const(200); 103 + rdev->pm.igp_sideport_mclk.full = dfixed_const(200); 104 + rdev->pm.igp_system_mclk.full = dfixed_const(200); 105 + rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); 111 106 rdev->pm.igp_ht_link_width.full = dfixed_const(8); 112 107 DRM_ERROR("No integrated system info for your GPU, using safe default\n"); 113 108 break; 114 109 } 115 110 } else { 116 - tmp.full = dfixed_const(100); 117 111 /* We assume the slower possible clock ie worst case */ 118 - /* DDR 333Mhz */ 119 - rdev->pm.igp_sideport_mclk.full = dfixed_const(333); 120 - /* FIXME: system clock ? */ 121 - rdev->pm.igp_system_mclk.full = dfixed_const(100); 122 - rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 123 - rdev->pm.igp_ht_link_clk.full = dfixed_const(200); 112 + rdev->pm.igp_sideport_mclk.full = dfixed_const(200); 113 + rdev->pm.igp_system_mclk.full = dfixed_const(200); 114 + rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); 124 115 rdev->pm.igp_ht_link_width.full = dfixed_const(8); 125 116 DRM_ERROR("No integrated system info for your GPU, using safe default\n"); 126 117 } ··· 231 228 fixed20_12 a, b, c; 232 229 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; 233 230 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; 234 - /* FIXME: detect IGP with sideport memory, i don't think there is any 235 - * such product available 236 - */ 237 - bool sideport = false; 238 231 239 232 if (!crtc->base.enabled) { 240 233 /* FIXME: wouldn't it better to set priority mark to maximum */ ··· 299 300 300 301 /* Maximun bandwidth is the minimun bandwidth of all component */ 301 302 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; 302 - if (sideport) { 303 + if (rdev->mc.igp_sideport_enabled) { 303 304 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && 304 305 rdev->pm.sideport_bandwidth.full) 305 306 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
+1 -1
drivers/gpu/drm/radeon/rv770.c
··· 224 224 WREG32(MC_VM_FB_LOCATION, tmp); 225 225 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 226 226 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 227 - WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); 227 + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 228 228 if (rdev->flags & RADEON_IS_AGP) { 229 229 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 230 230 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+1 -1
drivers/gpu/drm/ttm/ttm_page_alloc.c
··· 667 667 { 668 668 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 669 669 struct page *p = NULL; 670 - int gfp_flags = 0; 670 + int gfp_flags = GFP_USER; 671 671 int r; 672 672 673 673 /* set zero flag for page allocation if required */
+3 -9
drivers/power/z2_battery.c
··· 9 9 * 10 10 */ 11 11 12 - #include <linux/init.h> 13 - #include <linux/kernel.h> 14 12 #include <linux/module.h> 15 - #include <linux/platform_device.h> 16 - #include <linux/power_supply.h> 17 - #include <linux/i2c.h> 18 - #include <linux/spinlock.h> 19 - #include <linux/interrupt.h> 20 13 #include <linux/gpio.h> 14 + #include <linux/i2c.h> 21 15 #include <linux/interrupt.h> 22 16 #include <linux/irq.h> 23 - #include <asm/irq.h> 24 - #include <asm/mach/irq.h> 17 + #include <linux/power_supply.h> 18 + #include <linux/slab.h> 25 19 #include <linux/z2_battery.h> 26 20 27 21 #define Z2_DEFAULT_NAME "Z2"
+2 -2
drivers/rtc/rtc-ds1307.c
··· 777 777 778 778 read_rtc: 779 779 /* read RTC registers */ 780 - tmp = ds1307->read_block_data(ds1307->client, 0, 8, buf); 780 + tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf); 781 781 if (tmp != 8) { 782 782 pr_debug("read error %d\n", tmp); 783 783 err = -EIO; ··· 862 862 if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM) 863 863 tmp += 12; 864 864 i2c_smbus_write_byte_data(client, 865 - DS1307_REG_HOUR, 865 + ds1307->offset + DS1307_REG_HOUR, 866 866 bin2bcd(tmp)); 867 867 } 868 868
+79 -64
drivers/serial/cpm_uart/cpm_uart_core.c
··· 930 930 } 931 931 } 932 932 933 + #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE) 934 + /* 935 + * Write a string to the serial port 936 + * Note that this is called with interrupts already disabled 937 + */ 938 + static void cpm_uart_early_write(struct uart_cpm_port *pinfo, 939 + const char *string, u_int count) 940 + { 941 + unsigned int i; 942 + cbd_t __iomem *bdp, *bdbase; 943 + unsigned char *cpm_outp_addr; 944 + 945 + /* Get the address of the host memory buffer. 946 + */ 947 + bdp = pinfo->tx_cur; 948 + bdbase = pinfo->tx_bd_base; 949 + 950 + /* 951 + * Now, do each character. This is not as bad as it looks 952 + * since this is a holding FIFO and not a transmitting FIFO. 953 + * We could add the complexity of filling the entire transmit 954 + * buffer, but we would just wait longer between accesses...... 955 + */ 956 + for (i = 0; i < count; i++, string++) { 957 + /* Wait for transmitter fifo to empty. 958 + * Ready indicates output is ready, and xmt is doing 959 + * that, not that it is ready for us to send. 960 + */ 961 + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) 962 + ; 963 + 964 + /* Send the character out. 965 + * If the buffer address is in the CPM DPRAM, don't 966 + * convert it. 967 + */ 968 + cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), 969 + pinfo); 970 + *cpm_outp_addr = *string; 971 + 972 + out_be16(&bdp->cbd_datlen, 1); 973 + setbits16(&bdp->cbd_sc, BD_SC_READY); 974 + 975 + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) 976 + bdp = bdbase; 977 + else 978 + bdp++; 979 + 980 + /* if a LF, also do CR... */ 981 + if (*string == 10) { 982 + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) 983 + ; 984 + 985 + cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), 986 + pinfo); 987 + *cpm_outp_addr = 13; 988 + 989 + out_be16(&bdp->cbd_datlen, 1); 990 + setbits16(&bdp->cbd_sc, BD_SC_READY); 991 + 992 + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) 993 + bdp = bdbase; 994 + else 995 + bdp++; 996 + } 997 + } 998 + 999 + /* 1000 + * Finally, Wait for transmitter & holding register to empty 1001 + * and restore the IER 1002 + */ 1003 + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) 1004 + ; 1005 + 1006 + pinfo->tx_cur = bdp; 1007 + } 1008 + #endif 1009 + 933 1010 #ifdef CONFIG_CONSOLE_POLL 934 1011 /* Serial polling routines for writing and reading from the uart while 935 1012 * in an interrupt or debug context. ··· 1076 999 static char ch[2]; 1077 1000 1078 1001 ch[0] = (char)c; 1079 - cpm_uart_early_write(pinfo->port.line, ch, 1); 1002 + cpm_uart_early_write(pinfo, ch, 1); 1080 1003 } 1081 1004 #endif /* CONFIG_CONSOLE_POLL */ 1082 1005 ··· 1207 1130 u_int count) 1208 1131 { 1209 1132 struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; 1210 - unsigned int i; 1211 - cbd_t __iomem *bdp, *bdbase; 1212 - unsigned char *cp; 1213 1133 unsigned long flags; 1214 1134 int nolock = oops_in_progress; 1215 1135 ··· 1216 1142 spin_lock_irqsave(&pinfo->port.lock, flags); 1217 1143 } 1218 1144 1219 - /* Get the address of the host memory buffer. 1220 - */ 1221 - bdp = pinfo->tx_cur; 1222 - bdbase = pinfo->tx_bd_base; 1223 - 1224 - /* 1225 - * Now, do each character. This is not as bad as it looks 1226 - * since this is a holding FIFO and not a transmitting FIFO. 1227 - * We could add the complexity of filling the entire transmit 1228 - * buffer, but we would just wait longer between accesses...... 1229 - */ 1230 - for (i = 0; i < count; i++, s++) { 1231 - /* Wait for transmitter fifo to empty. 1232 - * Ready indicates output is ready, and xmt is doing 1233 - * that, not that it is ready for us to send. 1234 - */ 1235 - while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) 1236 - ; 1237 - 1238 - /* Send the character out. 1239 - * If the buffer address is in the CPM DPRAM, don't 1240 - * convert it. 1241 - */ 1242 - cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); 1243 - *cp = *s; 1244 - 1245 - out_be16(&bdp->cbd_datlen, 1); 1246 - setbits16(&bdp->cbd_sc, BD_SC_READY); 1247 - 1248 - if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) 1249 - bdp = bdbase; 1250 - else 1251 - bdp++; 1252 - 1253 - /* if a LF, also do CR... */ 1254 - if (*s == 10) { 1255 - while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) 1256 - ; 1257 - 1258 - cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); 1259 - *cp = 13; 1260 - 1261 - out_be16(&bdp->cbd_datlen, 1); 1262 - setbits16(&bdp->cbd_sc, BD_SC_READY); 1263 - 1264 - if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) 1265 - bdp = bdbase; 1266 - else 1267 - bdp++; 1268 - } 1269 - } 1270 - 1271 - /* 1272 - * Finally, Wait for transmitter & holding register to empty 1273 - * and restore the IER 1274 - */ 1275 - while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) 1276 - ; 1277 - 1278 - pinfo->tx_cur = bdp; 1145 + cpm_uart_early_write(pinfo, s, count); 1279 1146 1280 1147 if (unlikely(nolock)) { 1281 1148 local_irq_restore(flags);
+12 -12
drivers/staging/batman-adv/bat_sysfs.c
··· 225 225 NULL, 226 226 }; 227 227 228 - static ssize_t transtable_local_read(struct kobject *kobj, 229 - struct bin_attribute *bin_attr, 230 - char *buff, loff_t off, size_t count) 228 + static ssize_t transtable_local_read(struct file *filp, struct kobject *kobj, 229 + struct bin_attribute *bin_attr, 230 + char *buff, loff_t off, size_t count) 231 231 { 232 232 struct device *dev = to_dev(kobj->parent); 233 233 struct net_device *net_dev = to_net_dev(dev); ··· 235 235 return hna_local_fill_buffer_text(net_dev, buff, count, off); 236 236 } 237 237 238 - static ssize_t transtable_global_read(struct kobject *kobj, 239 - struct bin_attribute *bin_attr, 240 - char *buff, loff_t off, size_t count) 238 + static ssize_t transtable_global_read(struct file *filp, struct kobject *kobj, 239 + struct bin_attribute *bin_attr, 240 + char *buff, loff_t off, size_t count) 241 241 { 242 242 struct device *dev = to_dev(kobj->parent); 243 243 struct net_device *net_dev = to_net_dev(dev); ··· 245 245 return hna_global_fill_buffer_text(net_dev, buff, count, off); 246 246 } 247 247 248 - static ssize_t originators_read(struct kobject *kobj, 249 - struct bin_attribute *bin_attr, 250 - char *buff, loff_t off, size_t count) 248 + static ssize_t originators_read(struct file *filp, struct kobject *kobj, 249 + struct bin_attribute *bin_attr, 250 + char *buff, loff_t off, size_t count) 251 251 { 252 252 struct device *dev = to_dev(kobj->parent); 253 253 struct net_device *net_dev = to_net_dev(dev); ··· 255 255 return orig_fill_buffer_text(net_dev, buff, count, off); 256 256 } 257 257 258 - static ssize_t vis_data_read(struct kobject *kobj, 259 - struct bin_attribute *bin_attr, 260 - char *buff, loff_t off, size_t count) 258 + static ssize_t vis_data_read(struct file *filp, struct kobject *kobj, 259 + struct bin_attribute *bin_attr, 260 + char *buff, loff_t off, size_t count) 261 261 { 262 262 struct device *dev = to_dev(kobj->parent); 263 263 struct net_device *net_dev = to_net_dev(dev);
+1 -1
drivers/staging/batman-adv/device.c
··· 196 196 kfree(device_packet); 197 197 198 198 if (error) 199 - return error; 199 + return -EFAULT; 200 200 201 201 return sizeof(struct icmp_packet); 202 202 }
+6 -3
drivers/staging/comedi/drivers/adl_pci9111.c
··· 824 824 plx9050_interrupt_control(dev_private->lcr_io_base, true, true, 825 825 false, true, true); 826 826 827 - dev_private->scan_delay = 828 - (async_cmd->scan_begin_arg / (async_cmd->convert_arg * 829 - async_cmd->chanlist_len)) - 1; 827 + if (async_cmd->scan_begin_src == TRIG_TIMER) { 828 + dev_private->scan_delay = 829 + (async_cmd->scan_begin_arg / 830 + (async_cmd->convert_arg * 831 + async_cmd->chanlist_len)) - 1; 832 + } 830 833 831 834 break; 832 835
+1 -2
drivers/staging/comedi/drivers/cb_pcidda.c
··· 52 52 #include "8255.h" 53 53 54 54 #define PCI_VENDOR_ID_CB 0x1307 /* PCI vendor number of ComputerBoards */ 55 - #define N_BOARDS 10 /* Number of boards in cb_pcidda_boards */ 56 55 #define EEPROM_SIZE 128 /* number of entries in eeprom */ 57 56 #define MAX_AO_CHANNELS 8 /* maximum number of ao channels for supported boards */ 58 57 ··· 306 307 continue; 307 308 } 308 309 } 309 - for (index = 0; index < N_BOARDS; index++) { 310 + for (index = 0; index < ARRAY_SIZE(cb_pcidda_boards); index++) { 310 311 if (cb_pcidda_boards[index].device_id == 311 312 pcidev->device) { 312 313 goto found;
+30 -11
drivers/staging/hv/channel_mgmt.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/list.h> 25 25 #include <linux/module.h> 26 + #include <linux/completion.h> 26 27 #include "osd.h" 27 28 #include "logging.h" 28 29 #include "vmbus_private.h" ··· 294 293 Channel); 295 294 } 296 295 296 + 297 + DECLARE_COMPLETION(hv_channel_ready); 298 + 299 + /* 300 + * Count initialized channels, and ensure all channels are ready when hv_vmbus 301 + * module loading completes. 302 + */ 303 + static void count_hv_channel(void) 304 + { 305 + static int counter; 306 + unsigned long flags; 307 + 308 + spin_lock_irqsave(&gVmbusConnection.channel_lock, flags); 309 + if (++counter == MAX_MSG_TYPES) 310 + complete(&hv_channel_ready); 311 + spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags); 312 + } 313 + 314 + 297 315 /* 298 316 * VmbusChannelProcessOffer - Process the offer by creating a channel/device 299 317 * associated with this offer ··· 393 373 * can cleanup properly 394 374 */ 395 375 newChannel->State = CHANNEL_OPEN_STATE; 396 - cnt = 0; 397 376 398 - while (cnt != MAX_MSG_TYPES) { 377 + /* Open IC channels */ 378 + for (cnt = 0; cnt < MAX_MSG_TYPES; cnt++) { 399 379 if (memcmp(&newChannel->OfferMsg.Offer.InterfaceType, 400 380 &hv_cb_utils[cnt].data, 401 - sizeof(struct hv_guid)) == 0) { 381 + sizeof(struct hv_guid)) == 0 && 382 + VmbusChannelOpen(newChannel, 2 * PAGE_SIZE, 383 + 2 * PAGE_SIZE, NULL, 0, 384 + hv_cb_utils[cnt].callback, 385 + newChannel) == 0) { 386 + hv_cb_utils[cnt].channel = newChannel; 402 387 DPRINT_INFO(VMBUS, "%s", 403 - hv_cb_utils[cnt].log_msg); 404 - 405 - if (VmbusChannelOpen(newChannel, 2 * PAGE_SIZE, 406 - 2 * PAGE_SIZE, NULL, 0, 407 - hv_cb_utils[cnt].callback, 408 - newChannel) == 0) 409 - hv_cb_utils[cnt].channel = newChannel; 388 + hv_cb_utils[cnt].log_msg); 389 + count_hv_channel(); 410 390 } 411 - cnt++; 412 391 } 413 392 } 414 393 DPRINT_EXIT(VMBUS);
+28
drivers/staging/hv/hv_utils.c
··· 24 24 #include <linux/slab.h> 25 25 #include <linux/sysctl.h> 26 26 #include <linux/reboot.h> 27 + #include <linux/dmi.h> 28 + #include <linux/pci.h> 27 29 28 30 #include "logging.h" 29 31 #include "osd.h" ··· 253 251 DPRINT_EXIT(VMBUS); 254 252 } 255 253 254 + static const struct pci_device_id __initconst 255 + hv_utils_pci_table[] __maybe_unused = { 256 + { PCI_DEVICE(0x1414, 0x5353) }, /* Hyper-V emulated VGA controller */ 257 + { 0 } 258 + }; 259 + MODULE_DEVICE_TABLE(pci, hv_utils_pci_table); 260 + 261 + 262 + static const struct dmi_system_id __initconst 263 + hv_utils_dmi_table[] __maybe_unused = { 264 + { 265 + .ident = "Hyper-V", 266 + .matches = { 267 + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), 268 + DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), 269 + DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"), 270 + }, 271 + }, 272 + { }, 273 + }; 274 + MODULE_DEVICE_TABLE(dmi, hv_utils_dmi_table); 275 + 276 + 256 277 static int __init init_hyperv_utils(void) 257 278 { 258 279 printk(KERN_INFO "Registering HyperV Utility Driver\n"); 280 + 281 + if (!dmi_check_system(hv_utils_dmi_table)) 282 + return -ENODEV; 259 283 260 284 hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback = 261 285 &shutdown_onchannelcallback;
+2
drivers/staging/hv/vmbus.h
··· 74 74 void vmbus_child_driver_unregister(struct driver_context *driver_ctx); 75 75 void vmbus_get_interface(struct vmbus_channel_interface *interface); 76 76 77 + extern struct completion hv_channel_ready; 78 + 77 79 #endif /* _VMBUS_H_ */
+3
drivers/staging/hv/vmbus_drv.c
··· 27 27 #include <linux/pci.h> 28 28 #include <linux/dmi.h> 29 29 #include <linux/slab.h> 30 + #include <linux/completion.h> 30 31 #include "version_info.h" 31 32 #include "osd.h" 32 33 #include "logging.h" ··· 356 355 357 356 358 357 vmbus_drv_obj->GetChannelOffers(); 358 + 359 + wait_for_completion(&hv_channel_ready); 359 360 360 361 cleanup: 361 362 DPRINT_EXIT(VMBUS_DRV);
+1 -1
drivers/staging/mrst-touchscreen/intel-mid-touch.c
··· 817 817 free_irq(mrstouchdevp->irq, mrstouchdevp); 818 818 input_unregister_device(mrstouchdevp->input); 819 819 input_free_device(mrstouchdevp->input); 820 - kfree(mrstouchdevp); 821 820 if (mrstouchdevp->pendet_thrd) 822 821 kthread_stop(mrstouchdevp->pendet_thrd); 822 + kfree(mrstouchdevp); 823 823 return 0; 824 824 } 825 825
+1
drivers/staging/rt2860/usb_main_dev.c
··· 77 77 {USB_DEVICE(0x083A, 0x7522)}, /* Arcadyan */ 78 78 {USB_DEVICE(0x0CDE, 0x0022)}, /* ZCOM */ 79 79 {USB_DEVICE(0x0586, 0x3416)}, /* Zyxel */ 80 + {USB_DEVICE(0x0586, 0x341a)}, /* Zyxel NWD-270N */ 80 81 {USB_DEVICE(0x0CDE, 0x0025)}, /* Zyxel */ 81 82 {USB_DEVICE(0x1740, 0x9701)}, /* EnGenius */ 82 83 {USB_DEVICE(0x1740, 0x9702)}, /* EnGenius */
-2
drivers/staging/rtl8187se/r8180_core.c
··· 66 66 static int hwwep = 0; 67 67 static int channels = 0x3fff; 68 68 69 - #define eqMacAddr(a, b) (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0) 70 - #define cpMacAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3], (des)[4] = (src)[4], (des)[5] = (src)[5]) 71 69 MODULE_LICENSE("GPL"); 72 70 MODULE_DEVICE_TABLE(pci, rtl8180_pci_id_tbl); 73 71 MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
+22 -21
drivers/staging/rtl8192su/r8192U_core.c
··· 112 112 #define CAM_CONTENT_COUNT 8 113 113 114 114 static const struct usb_device_id rtl8192_usb_id_tbl[] = { 115 - /* Realtek */ 116 - {USB_DEVICE(0x0bda, 0x8171)}, 117 - {USB_DEVICE(0x0bda, 0x8192)}, 118 - {USB_DEVICE(0x0bda, 0x8709)}, 119 - /* Corega */ 120 - {USB_DEVICE(0x07aa, 0x0043)}, 121 - /* Belkin */ 122 - {USB_DEVICE(0x050d, 0x805E)}, 123 - {USB_DEVICE(0x050d, 0x815F)}, /* Belkin F5D8053 v6 */ 124 - /* Sitecom */ 125 - {USB_DEVICE(0x0df6, 0x0031)}, 126 - {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */ 127 - /* EnGenius */ 128 - {USB_DEVICE(0x1740, 0x9201)}, 129 - /* Dlink */ 130 - {USB_DEVICE(0x2001, 0x3301)}, 131 - /* Zinwell */ 132 - {USB_DEVICE(0x5a57, 0x0290)}, 133 - /* Guillemot */ 134 - {USB_DEVICE(0x06f8, 0xe031)}, 135 - //92SU 115 + {USB_DEVICE(0x0bda, 0x8171)}, /* Realtek */ 136 116 {USB_DEVICE(0x0bda, 0x8172)}, 117 + {USB_DEVICE(0x0bda, 0x8173)}, 118 + {USB_DEVICE(0x0bda, 0x8174)}, 119 + {USB_DEVICE(0x0bda, 0x8712)}, 120 + {USB_DEVICE(0x0bda, 0x8713)}, 121 + {USB_DEVICE(0x07aa, 0x0047)}, 122 + {USB_DEVICE(0x07d1, 0x3303)}, 123 + {USB_DEVICE(0x07d1, 0x3302)}, 124 + {USB_DEVICE(0x07d1, 0x3300)}, 125 + {USB_DEVICE(0x1740, 0x9603)}, 126 + {USB_DEVICE(0x1740, 0x9605)}, 127 + {USB_DEVICE(0x050d, 0x815F)}, 128 + {USB_DEVICE(0x06f8, 0xe031)}, 129 + {USB_DEVICE(0x7392, 0x7611)}, 130 + {USB_DEVICE(0x7392, 0x7612)}, 131 + {USB_DEVICE(0x7392, 0x7622)}, 132 + {USB_DEVICE(0x0DF6, 0x0045)}, 133 + {USB_DEVICE(0x0E66, 0x0015)}, 134 + {USB_DEVICE(0x0E66, 0x0016)}, 135 + {USB_DEVICE(0x0b05, 0x1786)}, 136 + /* these are not in the official list */ 137 + {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */ 137 138 {} 138 139 }; 139 140
+2
drivers/staging/rtl8192u/r8192U_core.c
··· 121 121 {USB_DEVICE(0x2001, 0x3301)}, 122 122 /* Zinwell */ 123 123 {USB_DEVICE(0x5a57, 0x0290)}, 124 + /* LG */ 125 + {USB_DEVICE(0x043e, 0x7a01)}, 124 126 {} 125 127 }; 126 128
+33 -13
drivers/staging/usbip/usbip_common.c
··· 378 378 complete_and_exit(&ut->thread_done, 0); 379 379 } 380 380 381 + static void stop_rx_thread(struct usbip_device *ud) 382 + { 383 + if (ud->tcp_rx.thread != NULL) { 384 + send_sig(SIGKILL, ud->tcp_rx.thread, 1); 385 + wait_for_completion(&ud->tcp_rx.thread_done); 386 + usbip_udbg("rx_thread for ud %p has finished\n", ud); 387 + } 388 + } 389 + 390 + static void stop_tx_thread(struct usbip_device *ud) 391 + { 392 + if (ud->tcp_tx.thread != NULL) { 393 + send_sig(SIGKILL, ud->tcp_tx.thread, 1); 394 + wait_for_completion(&ud->tcp_tx.thread_done); 395 + usbip_udbg("tx_thread for ud %p has finished\n", ud); 396 + } 397 + } 398 + 381 399 int usbip_start_threads(struct usbip_device *ud) 382 400 { 383 401 /* 384 402 * threads are invoked per one device (per one connection). 385 403 */ 386 404 struct task_struct *th; 405 + int err = 0; 387 406 388 407 th = kthread_run(usbip_thread, (void *)&ud->tcp_rx, "usbip"); 389 408 if (IS_ERR(th)) { 390 409 printk(KERN_WARNING 391 410 "Unable to start control thread\n"); 392 - return PTR_ERR(th); 411 + err = PTR_ERR(th); 412 + goto ust_exit; 393 413 } 414 + 394 415 th = kthread_run(usbip_thread, (void *)&ud->tcp_tx, "usbip"); 395 416 if (IS_ERR(th)) { 396 417 printk(KERN_WARNING 397 418 "Unable to start control thread\n"); 398 - return PTR_ERR(th); 419 + err = PTR_ERR(th); 420 + goto tx_thread_err; 399 421 } 400 422 401 423 /* confirm threads are starting */ 402 424 wait_for_completion(&ud->tcp_rx.thread_done); 403 425 wait_for_completion(&ud->tcp_tx.thread_done); 426 + 404 427 return 0; 428 + 429 + tx_thread_err: 430 + stop_rx_thread(ud); 431 + 432 + ust_exit: 433 + return err; 405 434 } 406 435 EXPORT_SYMBOL_GPL(usbip_start_threads); 407 436 408 437 void usbip_stop_threads(struct usbip_device *ud) 409 438 { 410 439 /* kill threads related to this sdev, if v.c. exists */ 411 - if (ud->tcp_rx.thread != NULL) { 412 - send_sig(SIGKILL, ud->tcp_rx.thread, 1); 413 - wait_for_completion(&ud->tcp_rx.thread_done); 414 - usbip_udbg("rx_thread for ud %p has finished\n", ud); 415 - } 416 - 417 - if (ud->tcp_tx.thread != NULL) { 418 - send_sig(SIGKILL, ud->tcp_tx.thread, 1); 419 - wait_for_completion(&ud->tcp_tx.thread_done); 420 - usbip_udbg("tx_thread for ud %p has finished\n", ud); 421 - } 440 + stop_rx_thread(ud); 441 + stop_tx_thread(ud); 422 442 } 423 443 EXPORT_SYMBOL_GPL(usbip_stop_threads); 424 444
+1
drivers/staging/wlags49_h2/wl_enc.c
··· 62 62 /******************************************************************************* 63 63 * include files 64 64 ******************************************************************************/ 65 + #include <linux/string.h> 65 66 #include <wl_version.h> 66 67 67 68 #include <debug.h>
+2 -2
drivers/staging/wlags49_h2/wl_sysfs.h
··· 2 2 extern void register_wlags_sysfs(struct net_device *); 3 3 extern void unregister_wlags_sysfs(struct net_device *); 4 4 #else 5 - static void register_wlags_sysfs(struct net_device *) { return; }; 6 - static void unregister_wlags_sysfs(struct net_device *) { return; }; 5 + static inline void register_wlags_sysfs(struct net_device *net) { } 6 + static inline void unregister_wlags_sysfs(struct net_device *net) { } 7 7 #endif
+3 -10
drivers/usb/core/driver.c
··· 1272 1272 1273 1273 static void choose_wakeup(struct usb_device *udev, pm_message_t msg) 1274 1274 { 1275 - int w, i; 1276 - struct usb_interface *intf; 1275 + int w; 1277 1276 1278 1277 /* Remote wakeup is needed only when we actually go to sleep. 1279 1278 * For things like FREEZE and QUIESCE, if the device is already ··· 1284 1285 return; 1285 1286 } 1286 1287 1287 - /* If remote wakeup is permitted, see whether any interface drivers 1288 + /* Enable remote wakeup if it is allowed, even if no interface drivers 1288 1289 * actually want it. 1289 1290 */ 1290 - w = 0; 1291 - if (device_may_wakeup(&udev->dev) && udev->actconfig) { 1292 - for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { 1293 - intf = udev->actconfig->interface[i]; 1294 - w |= intf->needs_remote_wakeup; 1295 - } 1296 - } 1291 + w = device_may_wakeup(&udev->dev); 1297 1292 1298 1293 /* If the device is autosuspended with the wrong wakeup setting, 1299 1294 * autoresume now so the setting can be changed.
+5 -2
drivers/usb/core/message.c
··· 416 416 /* A length of zero means transfer the whole sg list */ 417 417 len = length; 418 418 if (len == 0) { 419 - for_each_sg(sg, sg, nents, i) 420 - len += sg->length; 419 + struct scatterlist *sg2; 420 + int j; 421 + 422 + for_each_sg(sg, sg2, nents, j) 423 + len += sg2->length; 421 424 } 422 425 } else { 423 426 /*
+1 -2
drivers/usb/gadget/f_eem.c
··· 469 469 crc = get_unaligned_le32(skb->data + len 470 470 - ETH_FCS_LEN); 471 471 crc2 = ~crc32_le(~0, 472 - skb->data, 473 - skb->len - ETH_FCS_LEN); 472 + skb->data, len - ETH_FCS_LEN); 474 473 } else { 475 474 crc = get_unaligned_be32(skb->data + len 476 475 - ETH_FCS_LEN);
+73 -121
drivers/usb/gadget/f_mass_storage.c
··· 321 321 /* Data shared by all the FSG instances. */ 322 322 struct fsg_common { 323 323 struct usb_gadget *gadget; 324 - struct fsg_dev *fsg; 325 - struct fsg_dev *prev_fsg; 324 + struct fsg_dev *fsg, *new_fsg; 325 + wait_queue_head_t fsg_wait; 326 326 327 327 /* filesem protects: backing files in use */ 328 328 struct rw_semaphore filesem; ··· 351 351 enum fsg_state state; /* For exception handling */ 352 352 unsigned int exception_req_tag; 353 353 354 - u8 config, new_config; 355 354 enum data_direction data_dir; 356 355 u32 data_size; 357 356 u32 data_size_from_cmnd; ··· 594 595 u16 w_value = le16_to_cpu(ctrl->wValue); 595 596 u16 w_length = le16_to_cpu(ctrl->wLength); 596 597 597 - if (!fsg->common->config) 598 + if (!fsg_is_set(fsg->common)) 598 599 return -EOPNOTSUPP; 599 600 600 601 switch (ctrl->bRequest) { ··· 2302 2303 return -ENOMEM; 2303 2304 } 2304 2305 2305 - /* 2306 - * Reset interface setting and re-init endpoint state (toggle etc). 2307 - * Call with altsetting < 0 to disable the interface. The only other 2308 - * available altsetting is 0, which enables the interface. 2309 - */ 2310 - static int do_set_interface(struct fsg_common *common, int altsetting) 2306 + /* Reset interface setting and re-init endpoint state (toggle etc). */ 2307 + static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) 2311 2308 { 2312 - int rc = 0; 2313 - int i; 2314 - const struct usb_endpoint_descriptor *d; 2309 + const struct usb_endpoint_descriptor *d; 2310 + struct fsg_dev *fsg; 2311 + int i, rc = 0; 2315 2312 2316 2313 if (common->running) 2317 2314 DBG(common, "reset interface\n"); 2318 2315 2319 2316 reset: 2320 2317 /* Deallocate the requests */ 2321 - if (common->prev_fsg) { 2322 - struct fsg_dev *fsg = common->prev_fsg; 2318 + if (common->fsg) { 2319 + fsg = common->fsg; 2323 2320 2324 2321 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2325 2322 struct fsg_buffhd *bh = &common->buffhds[i]; ··· 2340 2345 fsg->bulk_out_enabled = 0; 2341 2346 } 2342 2347 2343 - common->prev_fsg = 0; 2348 + common->fsg = NULL; 2349 + wake_up(&common->fsg_wait); 2344 2350 } 2345 2351 2346 2352 common->running = 0; 2347 - if (altsetting < 0 || rc != 0) 2353 + if (!new_fsg || rc) 2348 2354 return rc; 2349 2355 2350 - DBG(common, "set interface %d\n", altsetting); 2356 + common->fsg = new_fsg; 2357 + fsg = common->fsg; 2351 2358 2352 - if (fsg_is_set(common)) { 2353 - struct fsg_dev *fsg = common->fsg; 2354 - common->prev_fsg = common->fsg; 2359 + /* Enable the endpoints */ 2360 + d = fsg_ep_desc(common->gadget, 2361 + &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc); 2362 + rc = enable_endpoint(common, fsg->bulk_in, d); 2363 + if (rc) 2364 + goto reset; 2365 + fsg->bulk_in_enabled = 1; 2355 2366 2356 - /* Enable the endpoints */ 2357 - d = fsg_ep_desc(common->gadget, 2358 - &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc); 2359 - rc = enable_endpoint(common, fsg->bulk_in, d); 2367 + d = fsg_ep_desc(common->gadget, 2368 + &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc); 2369 + rc = enable_endpoint(common, fsg->bulk_out, d); 2370 + if (rc) 2371 + goto reset; 2372 + fsg->bulk_out_enabled = 1; 2373 + common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize); 2374 + clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2375 + 2376 + /* Allocate the requests */ 2377 + for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2378 + struct fsg_buffhd *bh = &common->buffhds[i]; 2379 + 2380 + rc = alloc_request(common, fsg->bulk_in, &bh->inreq); 2360 2381 if (rc) 2361 2382 goto reset; 2362 - fsg->bulk_in_enabled = 1; 2363 - 2364 - d = fsg_ep_desc(common->gadget, 2365 - &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc); 2366 - rc = enable_endpoint(common, fsg->bulk_out, d); 2383 + rc = alloc_request(common, fsg->bulk_out, &bh->outreq); 2367 2384 if (rc) 2368 2385 goto reset; 2369 - fsg->bulk_out_enabled = 1; 2370 - common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize); 2371 - clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2372 - 2373 - /* Allocate the requests */ 2374 - for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2375 - struct fsg_buffhd *bh = &common->buffhds[i]; 2376 - 2377 - rc = alloc_request(common, fsg->bulk_in, &bh->inreq); 2378 - if (rc) 2379 - goto reset; 2380 - rc = alloc_request(common, fsg->bulk_out, &bh->outreq); 2381 - if (rc) 2382 - goto reset; 2383 - bh->inreq->buf = bh->outreq->buf = bh->buf; 2384 - bh->inreq->context = bh->outreq->context = bh; 2385 - bh->inreq->complete = bulk_in_complete; 2386 - bh->outreq->complete = bulk_out_complete; 2387 - } 2388 - 2389 - common->running = 1; 2390 - for (i = 0; i < common->nluns; ++i) 2391 - common->luns[i].unit_attention_data = SS_RESET_OCCURRED; 2392 - return rc; 2393 - } else { 2394 - return -EIO; 2395 - } 2396 - } 2397 - 2398 - 2399 - /* 2400 - * Change our operational configuration. This code must agree with the code 2401 - * that returns config descriptors, and with interface altsetting code. 2402 - * 2403 - * It's also responsible for power management interactions. Some 2404 - * configurations might not work with our current power sources. 2405 - * For now we just assume the gadget is always self-powered. 2406 - */ 2407 - static int do_set_config(struct fsg_common *common, u8 new_config) 2408 - { 2409 - int rc = 0; 2410 - 2411 - /* Disable the single interface */ 2412 - if (common->config != 0) { 2413 - DBG(common, "reset config\n"); 2414 - common->config = 0; 2415 - rc = do_set_interface(common, -1); 2386 + bh->inreq->buf = bh->outreq->buf = bh->buf; 2387 + bh->inreq->context = bh->outreq->context = bh; 2388 + bh->inreq->complete = bulk_in_complete; 2389 + bh->outreq->complete = bulk_out_complete; 2416 2390 } 2417 2391 2418 - /* Enable the interface */ 2419 - if (new_config != 0) { 2420 - common->config = new_config; 2421 - rc = do_set_interface(common, 0); 2422 - if (rc != 0) 2423 - common->config = 0; /* Reset on errors */ 2424 - } 2392 + common->running = 1; 2393 + for (i = 0; i < common->nluns; ++i) 2394 + common->luns[i].unit_attention_data = SS_RESET_OCCURRED; 2425 2395 return rc; 2426 2396 } 2427 2397 ··· 2397 2437 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2398 2438 { 2399 2439 struct fsg_dev *fsg = fsg_from_func(f); 2400 - fsg->common->prev_fsg = fsg->common->fsg; 2401 - fsg->common->fsg = fsg; 2402 - fsg->common->new_config = 1; 2440 + fsg->common->new_fsg = fsg; 2403 2441 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2404 2442 return 0; 2405 2443 } ··· 2405 2447 static void fsg_disable(struct usb_function *f) 2406 2448 { 2407 2449 struct fsg_dev *fsg = fsg_from_func(f); 2408 - fsg->common->prev_fsg = fsg->common->fsg; 2409 - fsg->common->fsg = fsg; 2410 - fsg->common->new_config = 0; 2450 + fsg->common->new_fsg = NULL; 2411 2451 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2412 2452 } 2413 2453 ··· 2415 2459 static void handle_exception(struct fsg_common *common) 2416 2460 { 2417 2461 siginfo_t info; 2418 - int sig; 2419 2462 int i; 2420 2463 struct fsg_buffhd *bh; 2421 2464 enum fsg_state old_state; 2422 - u8 new_config; 2423 2465 struct fsg_lun *curlun; 2424 2466 unsigned int exception_req_tag; 2425 - int rc; 2426 2467 2427 2468 /* Clear the existing signals. Anything but SIGUSR1 is converted 2428 2469 * into a high-priority EXIT exception. */ 2429 2470 for (;;) { 2430 - sig = dequeue_signal_lock(current, &current->blocked, &info); 2471 + int sig = 2472 + dequeue_signal_lock(current, &current->blocked, &info); 2431 2473 if (!sig) 2432 2474 break; 2433 2475 if (sig != SIGUSR1) { ··· 2436 2482 } 2437 2483 2438 2484 /* Cancel all the pending transfers */ 2439 - if (fsg_is_set(common)) { 2485 + if (likely(common->fsg)) { 2440 2486 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2441 2487 bh = &common->buffhds[i]; 2442 2488 if (bh->inreq_busy) ··· 2477 2523 common->next_buffhd_to_fill = &common->buffhds[0]; 2478 2524 common->next_buffhd_to_drain = &common->buffhds[0]; 2479 2525 exception_req_tag = common->exception_req_tag; 2480 - new_config = common->new_config; 2481 2526 old_state = common->state; 2482 2527 2483 2528 if (old_state == FSG_STATE_ABORT_BULK_OUT) ··· 2526 2573 break; 2527 2574 2528 2575 case FSG_STATE_CONFIG_CHANGE: 2529 - rc = do_set_config(common, new_config); 2576 + do_set_interface(common, common->new_fsg); 2530 2577 break; 2531 2578 2532 2579 case FSG_STATE_EXIT: 2533 2580 case FSG_STATE_TERMINATED: 2534 - do_set_config(common, 0); /* Free resources */ 2581 + do_set_interface(common, NULL); /* Free resources */ 2535 2582 spin_lock_irq(&common->lock); 2536 2583 common->state = FSG_STATE_TERMINATED; /* Stop the thread */ 2537 2584 spin_unlock_irq(&common->lock); ··· 2816 2863 goto error_release; 2817 2864 } 2818 2865 init_completion(&common->thread_notifier); 2866 + init_waitqueue_head(&common->fsg_wait); 2819 2867 #undef OR 2820 2868 2821 2869 ··· 2911 2957 static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) 2912 2958 { 2913 2959 struct fsg_dev *fsg = fsg_from_func(f); 2960 + struct fsg_common *common = fsg->common; 2914 2961 2915 2962 DBG(fsg, "unbind\n"); 2916 - fsg_common_put(fsg->common); 2963 + if (fsg->common->fsg == fsg) { 2964 + fsg->common->new_fsg = NULL; 2965 + raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2966 + /* FIXME: make interruptible or killable somehow? */ 2967 + wait_event(common->fsg_wait, common->fsg != fsg); 2968 + } 2969 + 2970 + fsg_common_put(common); 2917 2971 usb_free_descriptors(fsg->function.descriptors); 2918 2972 usb_free_descriptors(fsg->function.hs_descriptors); 2919 2973 kfree(fsg); ··· 2932 2970 { 2933 2971 struct fsg_dev *fsg = fsg_from_func(f); 2934 2972 struct usb_gadget *gadget = c->cdev->gadget; 2935 - int rc; 2936 2973 int i; 2937 2974 struct usb_ep *ep; 2938 2975 ··· 2957 2996 ep->driver_data = fsg->common; /* claim the endpoint */ 2958 2997 fsg->bulk_out = ep; 2959 2998 2999 + /* Copy descriptors */ 3000 + f->descriptors = usb_copy_descriptors(fsg_fs_function); 3001 + if (unlikely(!f->descriptors)) 3002 + return -ENOMEM; 3003 + 2960 3004 if (gadget_is_dualspeed(gadget)) { 2961 3005 /* Assume endpoint addresses are the same for both speeds */ 2962 3006 fsg_hs_bulk_in_desc.bEndpointAddress = ··· 2969 3003 fsg_hs_bulk_out_desc.bEndpointAddress = 2970 3004 fsg_fs_bulk_out_desc.bEndpointAddress; 2971 3005 f->hs_descriptors = usb_copy_descriptors(fsg_hs_function); 2972 - if (unlikely(!f->hs_descriptors)) 3006 + if (unlikely(!f->hs_descriptors)) { 3007 + usb_free_descriptors(f->descriptors); 2973 3008 return -ENOMEM; 3009 + } 2974 3010 } 2975 3011 2976 3012 return 0; 2977 3013 2978 3014 autoconf_fail: 2979 3015 ERROR(fsg, "unable to autoconfigure all endpoints\n"); 2980 - rc = -ENOTSUPP; 2981 - return rc; 3016 + return -ENOTSUPP; 2982 3017 } 2983 3018 2984 3019 ··· 3003 3036 3004 3037 fsg->function.name = FSG_DRIVER_DESC; 3005 3038 fsg->function.strings = fsg_strings_array; 3006 - fsg->function.descriptors = usb_copy_descriptors(fsg_fs_function); 3007 - if (unlikely(!fsg->function.descriptors)) { 3008 - rc = -ENOMEM; 3009 - goto error_free_fsg; 3010 - } 3011 3039 fsg->function.bind = fsg_bind; 3012 3040 fsg->function.unbind = fsg_unbind; 3013 3041 fsg->function.setup = fsg_setup; ··· 3018 3056 3019 3057 rc = usb_add_function(c, &fsg->function); 3020 3058 if (unlikely(rc)) 3021 - goto error_free_all; 3022 - 3023 - fsg_common_get(fsg->common); 3024 - return 0; 3025 - 3026 - error_free_all: 3027 - usb_free_descriptors(fsg->function.descriptors); 3028 - /* fsg_bind() might have copied those; or maybe not? who cares 3029 - * -- free it just in case. */ 3030 - usb_free_descriptors(fsg->function.hs_descriptors); 3031 - error_free_fsg: 3032 - kfree(fsg); 3033 - 3059 + kfree(fsg); 3060 + else 3061 + fsg_common_get(fsg->common); 3034 3062 return rc; 3035 3063 } 3036 3064
+11
drivers/usb/gadget/g_ffs.c
··· 392 392 if (unlikely(ret < 0)) 393 393 return ret; 394 394 395 + /* After previous do_configs there may be some invalid 396 + * pointers in c->interface array. This happens every time 397 + * a user space function with fewer interfaces than a user 398 + * space function that was run before the new one is run. The 399 + * compasit's set_config() assumes that if there is no more 400 + * then MAX_CONFIG_INTERFACES interfaces in a configuration 401 + * then there is a NULL pointer after the last interface in 402 + * c->interface array. We need to make sure this is true. */ 403 + if (c->next_interface_id < ARRAY_SIZE(c->interface)) 404 + c->interface[c->next_interface_id] = NULL; 405 + 395 406 return 0; 396 407 } 397 408
+16 -16
drivers/usb/gadget/printer.c
··· 82 82 struct printer_dev { 83 83 spinlock_t lock; /* lock this structure */ 84 84 /* lock buffer lists during read/write calls */ 85 - spinlock_t lock_printer_io; 85 + struct mutex lock_printer_io; 86 86 struct usb_gadget *gadget; 87 87 struct usb_request *req; /* for control responses */ 88 88 u8 config; ··· 567 567 568 568 DBG(dev, "printer_read trying to read %d bytes\n", (int)len); 569 569 570 - spin_lock(&dev->lock_printer_io); 570 + mutex_lock(&dev->lock_printer_io); 571 571 spin_lock_irqsave(&dev->lock, flags); 572 572 573 573 /* We will use this flag later to check if a printer reset happened ··· 601 601 * call or not. 602 602 */ 603 603 if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) { 604 - spin_unlock(&dev->lock_printer_io); 604 + mutex_unlock(&dev->lock_printer_io); 605 605 return -EAGAIN; 606 606 } 607 607 ··· 648 648 if (dev->reset_printer) { 649 649 list_add(&current_rx_req->list, &dev->rx_reqs); 650 650 spin_unlock_irqrestore(&dev->lock, flags); 651 - spin_unlock(&dev->lock_printer_io); 651 + mutex_unlock(&dev->lock_printer_io); 652 652 return -EAGAIN; 653 653 } 654 654 ··· 673 673 dev->current_rx_buf = current_rx_buf; 674 674 675 675 spin_unlock_irqrestore(&dev->lock, flags); 676 - spin_unlock(&dev->lock_printer_io); 676 + mutex_unlock(&dev->lock_printer_io); 677 677 678 678 DBG(dev, "printer_read returned %d bytes\n", (int)bytes_copied); 679 679 ··· 697 697 if (len == 0) 698 698 return -EINVAL; 699 699 700 - spin_lock(&dev->lock_printer_io); 700 + mutex_lock(&dev->lock_printer_io); 701 701 spin_lock_irqsave(&dev->lock, flags); 702 702 703 703 /* Check if a printer reset happens while we have interrupts on */ ··· 713 713 * a NON-Blocking call or not. 714 714 */ 715 715 if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) { 716 - spin_unlock(&dev->lock_printer_io); 716 + mutex_unlock(&dev->lock_printer_io); 717 717 return -EAGAIN; 718 718 } 719 719 ··· 752 752 753 753 if (copy_from_user(req->buf, buf, size)) { 754 754 list_add(&req->list, &dev->tx_reqs); 755 - spin_unlock(&dev->lock_printer_io); 755 + mutex_unlock(&dev->lock_printer_io); 756 756 return bytes_copied; 757 757 } 758 758 ··· 766 766 if (dev->reset_printer) { 767 767 list_add(&req->list, &dev->tx_reqs); 768 768 spin_unlock_irqrestore(&dev->lock, flags); 769 - spin_unlock(&dev->lock_printer_io); 769 + mutex_unlock(&dev->lock_printer_io); 770 770 return -EAGAIN; 771 771 } 772 772 773 773 if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { 774 774 list_add(&req->list, &dev->tx_reqs); 775 775 spin_unlock_irqrestore(&dev->lock, flags); 776 - spin_unlock(&dev->lock_printer_io); 776 + mutex_unlock(&dev->lock_printer_io); 777 777 return -EAGAIN; 778 778 } 779 779 ··· 782 782 } 783 783 784 784 spin_unlock_irqrestore(&dev->lock, flags); 785 - spin_unlock(&dev->lock_printer_io); 785 + mutex_unlock(&dev->lock_printer_io); 786 786 787 787 DBG(dev, "printer_write sent %d bytes\n", (int)bytes_copied); 788 788 ··· 820 820 unsigned long flags; 821 821 int status = 0; 822 822 823 - spin_lock(&dev->lock_printer_io); 823 + mutex_lock(&dev->lock_printer_io); 824 824 spin_lock_irqsave(&dev->lock, flags); 825 825 setup_rx_reqs(dev); 826 826 spin_unlock_irqrestore(&dev->lock, flags); 827 - spin_unlock(&dev->lock_printer_io); 827 + mutex_unlock(&dev->lock_printer_io); 828 828 829 829 poll_wait(fd, &dev->rx_wait, wait); 830 830 poll_wait(fd, &dev->tx_wait, wait); ··· 1461 1461 } 1462 1462 1463 1463 spin_lock_init(&dev->lock); 1464 - spin_lock_init(&dev->lock_printer_io); 1464 + mutex_init(&dev->lock_printer_io); 1465 1465 INIT_LIST_HEAD(&dev->tx_reqs); 1466 1466 INIT_LIST_HEAD(&dev->tx_reqs_active); 1467 1467 INIT_LIST_HEAD(&dev->rx_reqs); ··· 1594 1594 { 1595 1595 int status; 1596 1596 1597 - spin_lock(&usb_printer_gadget.lock_printer_io); 1597 + mutex_lock(&usb_printer_gadget.lock_printer_io); 1598 1598 class_destroy(usb_gadget_class); 1599 1599 unregister_chrdev_region(g_printer_devno, 2); 1600 1600 ··· 1602 1602 if (status) 1603 1603 ERROR(dev, "usb_gadget_unregister_driver %x\n", status); 1604 1604 1605 - spin_unlock(&usb_printer_gadget.lock_printer_io); 1605 + mutex_unlock(&usb_printer_gadget.lock_printer_io); 1606 1606 } 1607 1607 module_exit(cleanup);
+5 -1
drivers/usb/gadget/s3c2410_udc.c
··· 1700 1700 if (!driver || driver != udc->driver || !driver->unbind) 1701 1701 return -EINVAL; 1702 1702 1703 - dprintk(DEBUG_NORMAL,"usb_gadget_register_driver() '%s'\n", 1703 + dprintk(DEBUG_NORMAL, "usb_gadget_unregister_driver() '%s'\n", 1704 1704 driver->driver.name); 1705 + 1706 + /* report disconnect */ 1707 + if (driver->disconnect) 1708 + driver->disconnect(&udc->gadget); 1705 1709 1706 1710 driver->unbind(&udc->gadget); 1707 1711
+3 -13
drivers/usb/gadget/u_serial.c
··· 536 536 list_move(&req->list, &port->read_pool); 537 537 } 538 538 539 - /* Push from tty to ldisc; this is immediate with low_latency, and 540 - * may trigger callbacks to this driver ... so drop the spinlock. 539 + /* Push from tty to ldisc; without low_latency set this is handled by 540 + * a workqueue, so we won't get callbacks and can hold port_lock 541 541 */ 542 542 if (tty && do_push) { 543 - spin_unlock_irq(&port->port_lock); 544 543 tty_flip_buffer_push(tty); 545 - wake_up_interruptible(&tty->read_wait); 546 - spin_lock_irq(&port->port_lock); 547 - 548 - /* tty may have been closed */ 549 - tty = port->port_tty; 550 544 } 551 545 552 546 ··· 777 783 778 784 port->open_count = 1; 779 785 port->openclose = false; 780 - 781 - /* low_latency means ldiscs work in tasklet context, without 782 - * needing a workqueue schedule ... easier to keep up. 783 - */ 784 - tty->low_latency = 1; 785 786 786 787 /* if connected, start the I/O stream */ 787 788 if (port->port_usb) { ··· 1184 1195 n_ports = 0; 1185 1196 1186 1197 tty_unregister_driver(gs_tty_driver); 1198 + put_tty_driver(gs_tty_driver); 1187 1199 gs_tty_driver = NULL; 1188 1200 1189 1201 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
+10 -3
drivers/usb/host/ehci-mxc.c
··· 207 207 /* Initialize the transceiver */ 208 208 if (pdata->otg) { 209 209 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; 210 - if (otg_init(pdata->otg) != 0) 211 - dev_err(dev, "unable to init transceiver\n"); 212 - else if (otg_set_vbus(pdata->otg, 1) != 0) 210 + ret = otg_init(pdata->otg); 211 + if (ret) { 212 + dev_err(dev, "unable to init transceiver, probably missing\n"); 213 + ret = -ENODEV; 214 + goto err_add; 215 + } 216 + ret = otg_set_vbus(pdata->otg, 1); 217 + if (ret) { 213 218 dev_err(dev, "unable to enable vbus on transceiver\n"); 219 + goto err_add; 220 + } 214 221 } 215 222 216 223 priv->hcd = hcd;
+10 -5
drivers/usb/host/isp1362-hcd.c
··· 2224 2224 2225 2225 /*-------------------------------------------------------------------------*/ 2226 2226 2227 - static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) 2227 + static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) 2228 2228 { 2229 2229 int tmp = 20; 2230 - unsigned long flags; 2231 - 2232 - spin_lock_irqsave(&isp1362_hcd->lock, flags); 2233 2230 2234 2231 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC); 2235 2232 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR); ··· 2237 2240 } 2238 2241 if (!tmp) 2239 2242 pr_err("Software reset timeout\n"); 2243 + } 2244 + 2245 + static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) 2246 + { 2247 + unsigned long flags; 2248 + 2249 + spin_lock_irqsave(&isp1362_hcd->lock, flags); 2250 + __isp1362_sw_reset(isp1362_hcd); 2240 2251 spin_unlock_irqrestore(&isp1362_hcd->lock, flags); 2241 2252 } 2242 2253 ··· 2423 2418 if (isp1362_hcd->board && isp1362_hcd->board->reset) 2424 2419 isp1362_hcd->board->reset(hcd->self.controller, 1); 2425 2420 else 2426 - isp1362_sw_reset(isp1362_hcd); 2421 + __isp1362_sw_reset(isp1362_hcd); 2427 2422 2428 2423 if (isp1362_hcd->board && isp1362_hcd->board->clock) 2429 2424 isp1362_hcd->board->clock(hcd->self.controller, 0);
+1 -1
drivers/usb/host/r8a66597-hcd.c
··· 1065 1065 else if (speed == LSMODE) 1066 1066 rh->port |= USB_PORT_STAT_LOW_SPEED; 1067 1067 1068 - rh->port &= USB_PORT_STAT_RESET; 1068 + rh->port &= ~USB_PORT_STAT_RESET; 1069 1069 rh->port |= USB_PORT_STAT_ENABLE; 1070 1070 } 1071 1071
+46 -16
drivers/usb/host/xhci-ring.c
··· 182 182 * set, but other sections talk about dealing with the chain bit set. This was 183 183 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 184 184 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 185 + * 186 + * @more_trbs_coming: Will you enqueue more TRBs before calling 187 + * prepare_transfer()? 185 188 */ 186 - static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 189 + static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 190 + bool consumer, bool more_trbs_coming) 187 191 { 188 192 u32 chain; 189 193 union xhci_trb *next; ··· 203 199 while (last_trb(xhci, ring, ring->enq_seg, next)) { 204 200 if (!consumer) { 205 201 if (ring != xhci->event_ring) { 206 - if (chain) { 207 - next->link.control |= TRB_CHAIN; 208 - 209 - /* Give this link TRB to the hardware */ 210 - wmb(); 211 - next->link.control ^= TRB_CYCLE; 212 - } else { 202 + /* 203 + * If the caller doesn't plan on enqueueing more 204 + * TDs before ringing the doorbell, then we 205 + * don't want to give the link TRB to the 206 + * hardware just yet. We'll give the link TRB 207 + * back in prepare_ring() just before we enqueue 208 + * the TD at the top of the ring. 209 + */ 210 + if (!chain && !more_trbs_coming) 213 211 break; 212 + 213 + /* If we're not dealing with 0.95 hardware, 214 + * carry over the chain bit of the previous TRB 215 + * (which may mean the chain bit is cleared). 216 + */ 217 + if (!xhci_link_trb_quirk(xhci)) { 218 + next->link.control &= ~TRB_CHAIN; 219 + next->link.control |= chain; 214 220 } 221 + /* Give this link TRB to the hardware */ 222 + wmb(); 223 + next->link.control ^= TRB_CYCLE; 215 224 } 216 225 /* Toggle the cycle bit after the last ring segment. */ 217 226 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { ··· 1724 1707 /* 1725 1708 * Generic function for queueing a TRB on a ring. 1726 1709 * The caller must have checked to make sure there's room on the ring. 1710 + * 1711 + * @more_trbs_coming: Will you enqueue more TRBs before calling 1712 + * prepare_transfer()? 1727 1713 */ 1728 1714 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 1729 - bool consumer, 1715 + bool consumer, bool more_trbs_coming, 1730 1716 u32 field1, u32 field2, u32 field3, u32 field4) 1731 1717 { 1732 1718 struct xhci_generic_trb *trb; ··· 1739 1719 trb->field[1] = field2; 1740 1720 trb->field[2] = field3; 1741 1721 trb->field[3] = field4; 1742 - inc_enq(xhci, ring, consumer); 1722 + inc_enq(xhci, ring, consumer, more_trbs_coming); 1743 1723 } 1744 1724 1745 1725 /* ··· 2008 1988 int trb_buff_len, this_sg_len, running_total; 2009 1989 bool first_trb; 2010 1990 u64 addr; 1991 + bool more_trbs_coming; 2011 1992 2012 1993 struct xhci_generic_trb *start_trb; 2013 1994 int start_cycle; ··· 2094 2073 length_field = TRB_LEN(trb_buff_len) | 2095 2074 remainder | 2096 2075 TRB_INTR_TARGET(0); 2097 - queue_trb(xhci, ep_ring, false, 2076 + if (num_trbs > 1) 2077 + more_trbs_coming = true; 2078 + else 2079 + more_trbs_coming = false; 2080 + queue_trb(xhci, ep_ring, false, more_trbs_coming, 2098 2081 lower_32_bits(addr), 2099 2082 upper_32_bits(addr), 2100 2083 length_field, ··· 2149 2124 int num_trbs; 2150 2125 struct xhci_generic_trb *start_trb; 2151 2126 bool first_trb; 2127 + bool more_trbs_coming; 2152 2128 int start_cycle; 2153 2129 u32 field, length_field; 2154 2130 ··· 2238 2212 length_field = TRB_LEN(trb_buff_len) | 2239 2213 remainder | 2240 2214 TRB_INTR_TARGET(0); 2241 - queue_trb(xhci, ep_ring, false, 2215 + if (num_trbs > 1) 2216 + more_trbs_coming = true; 2217 + else 2218 + more_trbs_coming = false; 2219 + queue_trb(xhci, ep_ring, false, more_trbs_coming, 2242 2220 lower_32_bits(addr), 2243 2221 upper_32_bits(addr), 2244 2222 length_field, ··· 2321 2291 /* Queue setup TRB - see section 6.4.1.2.1 */ 2322 2292 /* FIXME better way to translate setup_packet into two u32 fields? */ 2323 2293 setup = (struct usb_ctrlrequest *) urb->setup_packet; 2324 - queue_trb(xhci, ep_ring, false, 2294 + queue_trb(xhci, ep_ring, false, true, 2325 2295 /* FIXME endianness is probably going to bite my ass here. */ 2326 2296 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 2327 2297 setup->wIndex | setup->wLength << 16, ··· 2337 2307 if (urb->transfer_buffer_length > 0) { 2338 2308 if (setup->bRequestType & USB_DIR_IN) 2339 2309 field |= TRB_DIR_IN; 2340 - queue_trb(xhci, ep_ring, false, 2310 + queue_trb(xhci, ep_ring, false, true, 2341 2311 lower_32_bits(urb->transfer_dma), 2342 2312 upper_32_bits(urb->transfer_dma), 2343 2313 length_field, ··· 2354 2324 field = 0; 2355 2325 else 2356 2326 field = TRB_DIR_IN; 2357 - queue_trb(xhci, ep_ring, false, 2327 + queue_trb(xhci, ep_ring, false, false, 2358 2328 0, 2359 2329 0, 2360 2330 TRB_INTR_TARGET(0), ··· 2391 2361 "unfailable commands failed.\n"); 2392 2362 return -ENOMEM; 2393 2363 } 2394 - queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 2364 + queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, 2395 2365 field4 | xhci->cmd_ring->cycle_state); 2396 2366 return 0; 2397 2367 }
+5 -8
drivers/usb/musb/musb_core.c
··· 219 219 return 0; 220 220 } 221 221 #else 222 - #define musb_ulpi_read(a, b) NULL 223 - #define musb_ulpi_write(a, b, c) NULL 222 + #define musb_ulpi_read NULL 223 + #define musb_ulpi_write NULL 224 224 #endif 225 225 226 226 static struct otg_io_access_ops musb_ulpi_access = { ··· 451 451 * @param power 452 452 */ 453 453 454 - #define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \ 455 - | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \ 456 - | MUSB_INTR_RESET) 457 - 458 454 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, 459 455 u8 devctl, u8 power) 460 456 { ··· 638 642 handled = IRQ_HANDLED; 639 643 } 640 644 641 - 645 + #endif 642 646 if (int_usb & MUSB_INTR_SUSPEND) { 643 647 DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", 644 648 otg_state_string(musb), devctl, power); ··· 701 705 } 702 706 } 703 707 708 + #ifdef CONFIG_USB_MUSB_HDRC_HCD 704 709 if (int_usb & MUSB_INTR_CONNECT) { 705 710 struct usb_hcd *hcd = musb_to_hcd(musb); 706 711 void __iomem *mbase = musb->mregs; ··· 1594 1597 /* the core can interrupt us for multiple reasons; docs have 1595 1598 * a generic interrupt flowchart to follow 1596 1599 */ 1597 - if (musb->int_usb & STAGE0_MASK) 1600 + if (musb->int_usb) 1598 1601 retval |= musb_stage0_irq(musb, musb->int_usb, 1599 1602 devctl, power); 1600 1603
+4 -2
drivers/usb/musb/musb_core.h
··· 470 470 471 471 struct musb_context_registers { 472 472 473 - #ifdef CONFIG_PM 473 + #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 474 + defined(CONFIG_ARCH_OMAP4) 474 475 u32 otg_sysconfig, otg_forcestandby; 475 476 #endif 476 477 u8 power; ··· 485 484 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; 486 485 }; 487 486 488 - #ifdef CONFIG_PM 487 + #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 488 + defined(CONFIG_ARCH_OMAP4) 489 489 extern void musb_platform_save_context(struct musb *musb, 490 490 struct musb_context_registers *musb_context); 491 491 extern void musb_platform_restore_context(struct musb *musb,
+2 -11
drivers/usb/musb/musbhsdma.c
··· 132 132 if (mode) { 133 133 csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; 134 134 BUG_ON(len < packet_sz); 135 - 136 - if (packet_sz >= 64) { 137 - csr |= MUSB_HSDMA_BURSTMODE_INCR16 138 - << MUSB_HSDMA_BURSTMODE_SHIFT; 139 - } else if (packet_sz >= 32) { 140 - csr |= MUSB_HSDMA_BURSTMODE_INCR8 141 - << MUSB_HSDMA_BURSTMODE_SHIFT; 142 - } else if (packet_sz >= 16) { 143 - csr |= MUSB_HSDMA_BURSTMODE_INCR4 144 - << MUSB_HSDMA_BURSTMODE_SHIFT; 145 - } 146 135 } 136 + csr |= MUSB_HSDMA_BURSTMODE_INCR16 137 + << MUSB_HSDMA_BURSTMODE_SHIFT; 147 138 148 139 csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) 149 140 | (1 << MUSB_HSDMA_ENABLE_SHIFT)
+10 -5
drivers/usb/otg/ulpi.c
··· 59 59 60 60 static int ulpi_init(struct otg_transceiver *otg) 61 61 { 62 - int i, vid, pid; 62 + int i, vid, pid, ret; 63 + u32 ulpi_id = 0; 63 64 64 - vid = (otg_io_read(otg, ULPI_VENDOR_ID_HIGH) << 8) | 65 - otg_io_read(otg, ULPI_VENDOR_ID_LOW); 66 - pid = (otg_io_read(otg, ULPI_PRODUCT_ID_HIGH) << 8) | 67 - otg_io_read(otg, ULPI_PRODUCT_ID_LOW); 65 + for (i = 0; i < 4; i++) { 66 + ret = otg_io_read(otg, ULPI_PRODUCT_ID_HIGH - i); 67 + if (ret < 0) 68 + return ret; 69 + ulpi_id = (ulpi_id << 8) | ret; 70 + } 71 + vid = ulpi_id & 0xffff; 72 + pid = ulpi_id >> 16; 68 73 69 74 pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid); 70 75
-1
drivers/usb/serial/ftdi_sio.c
··· 653 653 { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) }, 654 654 { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) }, 655 655 { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) }, 656 - { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, 657 656 { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) }, 658 657 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) }, 659 658 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
-7
drivers/usb/serial/ftdi_sio_ids.h
··· 501 501 #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ 502 502 503 503 /* 504 - * Contec products (http://www.contec.com) 505 - * Submitted by Daniel Sangorrin 506 - */ 507 - #define CONTEC_VID 0x06CE /* Vendor ID */ 508 - #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ 509 - 510 - /* 511 504 * Definitions for B&B Electronics products. 512 505 */ 513 506 #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
+3
drivers/usb/serial/qcserial.c
··· 139 139 "Could not set interface, error %d\n", 140 140 retval); 141 141 retval = -ENODEV; 142 + kfree(data); 142 143 } 143 144 return retval; 144 145 } ··· 156 155 "Could not set interface, error %d\n", 157 156 retval); 158 157 retval = -ENODEV; 158 + kfree(data); 159 159 } 160 160 return retval; 161 161 } ··· 165 163 default: 166 164 dev_err(&serial->dev->dev, 167 165 "unknown number of interfaces: %d\n", nintf); 166 + kfree(data); 168 167 return -ENODEV; 169 168 } 170 169
+12 -8
drivers/video/geode/gxfb_core.c
··· 40 40 static int vt_switch; 41 41 42 42 /* Modes relevant to the GX (taken from modedb.c) */ 43 - static struct fb_videomode gx_modedb[] __initdata = { 43 + static struct fb_videomode gx_modedb[] __devinitdata = { 44 44 /* 640x480-60 VESA */ 45 45 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, 46 46 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, ··· 110 110 #ifdef CONFIG_OLPC 111 111 #include <asm/olpc.h> 112 112 113 - static struct fb_videomode gx_dcon_modedb[] __initdata = { 113 + static struct fb_videomode gx_dcon_modedb[] __devinitdata = { 114 114 /* The only mode the DCON has is 1200x900 */ 115 115 { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, 116 116 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, 117 117 FB_VMODE_NONINTERLACED, 0 } 118 118 }; 119 119 120 - static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) 120 + static void __devinit get_modedb(struct fb_videomode **modedb, 121 + unsigned int *size) 121 122 { 122 123 if (olpc_has_dcon()) { 123 124 *modedb = (struct fb_videomode *) gx_dcon_modedb; ··· 130 129 } 131 130 132 131 #else 133 - static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) 132 + static void __devinit get_modedb(struct fb_videomode **modedb, 133 + unsigned int *size) 134 134 { 135 135 *modedb = (struct fb_videomode *) gx_modedb; 136 136 *size = ARRAY_SIZE(gx_modedb); ··· 228 226 return gx_blank_display(info, blank_mode); 229 227 } 230 228 231 - static int __init gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev) 229 + static int __devinit gxfb_map_video_memory(struct fb_info *info, 230 + struct pci_dev *dev) 232 231 { 233 232 struct gxfb_par *par = info->par; 234 233 int ret; ··· 293 290 .fb_imageblit = cfb_imageblit, 294 291 }; 295 292 296 - static struct fb_info * __init gxfb_init_fbinfo(struct device *dev) 293 + static struct fb_info *__devinit gxfb_init_fbinfo(struct device *dev) 297 294 { 298 295 struct gxfb_par *par; 299 296 struct fb_info *info; ··· 374 371 } 375 372 #endif 376 373 377 - static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) 374 + static int __devinit gxfb_probe(struct pci_dev *pdev, 375 + const struct pci_device_id *id) 378 376 { 379 377 struct gxfb_par *par; 380 378 struct fb_info *info; ··· 455 451 return ret; 456 452 } 457 453 458 - static void gxfb_remove(struct pci_dev *pdev) 454 + static void __devexit gxfb_remove(struct pci_dev *pdev) 459 455 { 460 456 struct fb_info *info = pci_get_drvdata(pdev); 461 457 struct gxfb_par *par = info->par;
+10 -8
drivers/video/geode/lxfb_core.c
··· 35 35 * we try to make it something sane - 640x480-60 is sane 36 36 */ 37 37 38 - static struct fb_videomode geode_modedb[] __initdata = { 38 + static struct fb_videomode geode_modedb[] __devinitdata = { 39 39 /* 640x480-60 */ 40 40 { NULL, 60, 640, 480, 39682, 48, 8, 25, 2, 88, 2, 41 41 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, ··· 219 219 #ifdef CONFIG_OLPC 220 220 #include <asm/olpc.h> 221 221 222 - static struct fb_videomode olpc_dcon_modedb[] __initdata = { 222 + static struct fb_videomode olpc_dcon_modedb[] __devinitdata = { 223 223 /* The only mode the DCON has is 1200x900 */ 224 224 { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, 225 225 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, 226 226 FB_VMODE_NONINTERLACED, 0 } 227 227 }; 228 228 229 - static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) 229 + static void __devinit get_modedb(struct fb_videomode **modedb, 230 + unsigned int *size) 230 231 { 231 232 if (olpc_has_dcon()) { 232 233 *modedb = (struct fb_videomode *) olpc_dcon_modedb; ··· 239 238 } 240 239 241 240 #else 242 - static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) 241 + static void __devinit get_modedb(struct fb_videomode **modedb, 242 + unsigned int *size) 243 243 { 244 244 *modedb = (struct fb_videomode *) geode_modedb; 245 245 *size = ARRAY_SIZE(geode_modedb); ··· 336 334 } 337 335 338 336 339 - static int __init lxfb_map_video_memory(struct fb_info *info, 337 + static int __devinit lxfb_map_video_memory(struct fb_info *info, 340 338 struct pci_dev *dev) 341 339 { 342 340 struct lxfb_par *par = info->par; ··· 414 412 .fb_imageblit = cfb_imageblit, 415 413 }; 416 414 417 - static struct fb_info * __init lxfb_init_fbinfo(struct device *dev) 415 + static struct fb_info * __devinit lxfb_init_fbinfo(struct device *dev) 418 416 { 419 417 struct lxfb_par *par; 420 418 struct fb_info *info; ··· 498 496 #define lxfb_resume NULL 499 497 #endif 500 498 501 - static int __init lxfb_probe(struct pci_dev *pdev, 499 + static int __devinit lxfb_probe(struct pci_dev *pdev, 502 500 const struct pci_device_id *id) 503 501 { 504 502 struct lxfb_par *par; ··· 590 588 return ret; 591 589 } 592 590 593 - static void lxfb_remove(struct pci_dev *pdev) 591 + static void __devexit lxfb_remove(struct pci_dev *pdev) 594 592 { 595 593 struct fb_info *info = pci_get_drvdata(pdev); 596 594 struct lxfb_par *par = info->par;
-2
drivers/video/nuc900fb.c
··· 596 596 goto release_regs; 597 597 } 598 598 599 - nuc900_driver_clksrc_div(&pdev->dev, "ext", 0x2); 600 - 601 599 fbi->clk = clk_get(&pdev->dev, NULL); 602 600 if (!fbi->clk || IS_ERR(fbi->clk)) { 603 601 printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n");
+1 -5
fs/binfmt_flat.c
··· 68 68 * Here we can be a bit looser than the data sections since this 69 69 * needs to only meet arch ABI requirements. 70 70 */ 71 - #ifdef ARCH_SLAB_MINALIGN 72 - #define FLAT_STACK_ALIGN (ARCH_SLAB_MINALIGN) 73 - #else 74 - #define FLAT_STACK_ALIGN (sizeof(void *)) 75 - #endif 71 + #define FLAT_STACK_ALIGN max_t(unsigned long, sizeof(void *), ARCH_SLAB_MINALIGN) 76 72 77 73 #define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ 78 74 #define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */
+2
fs/dcache.c
··· 590 590 up_read(&sb->s_umount); 591 591 } 592 592 spin_lock(&sb_lock); 593 + /* lock was dropped, must reset next */ 594 + list_safe_reset_next(sb, n, s_list); 593 595 count -= pruned; 594 596 __put_super(sb); 595 597 /* more work left to do? */
+4 -2
fs/fcntl.c
··· 733 733 { 734 734 while (fa) { 735 735 struct fown_struct *fown; 736 + unsigned long flags; 737 + 736 738 if (fa->magic != FASYNC_MAGIC) { 737 739 printk(KERN_ERR "kill_fasync: bad magic number in " 738 740 "fasync_struct!\n"); 739 741 return; 740 742 } 741 - spin_lock(&fa->fa_lock); 743 + spin_lock_irqsave(&fa->fa_lock, flags); 742 744 if (fa->fa_file) { 743 745 fown = &fa->fa_file->f_owner; 744 746 /* Don't send SIGURG to processes which have not set a ··· 749 747 if (!(sig == SIGURG && fown->signum == 0)) 750 748 send_sigio(fown, fa->fa_fd, band); 751 749 } 752 - spin_unlock(&fa->fa_lock); 750 + spin_unlock_irqrestore(&fa->fa_lock, flags); 753 751 fa = rcu_dereference(fa->fa_next); 754 752 } 755 753 }
+115 -155
fs/fs-writeback.c
··· 63 63 }; 64 64 65 65 enum { 66 - WS_USED_B = 0, 67 - WS_ONSTACK_B, 66 + WS_INPROGRESS = 0, 67 + WS_ONSTACK, 68 68 }; 69 - 70 - #define WS_USED (1 << WS_USED_B) 71 - #define WS_ONSTACK (1 << WS_ONSTACK_B) 72 - 73 - static inline bool bdi_work_on_stack(struct bdi_work *work) 74 - { 75 - return test_bit(WS_ONSTACK_B, &work->state); 76 - } 77 69 78 70 static inline void bdi_work_init(struct bdi_work *work, 79 71 struct wb_writeback_args *args) 80 72 { 81 73 INIT_RCU_HEAD(&work->rcu_head); 82 74 work->args = *args; 83 - work->state = WS_USED; 75 + __set_bit(WS_INPROGRESS, &work->state); 84 76 } 85 77 86 78 /** ··· 87 95 return !list_empty(&bdi->work_list); 88 96 } 89 97 90 - static void bdi_work_clear(struct bdi_work *work) 91 - { 92 - clear_bit(WS_USED_B, &work->state); 93 - smp_mb__after_clear_bit(); 94 - /* 95 - * work can have disappeared at this point. bit waitq functions 96 - * should be able to tolerate this, provided bdi_sched_wait does 97 - * not dereference it's pointer argument. 98 - */ 99 - wake_up_bit(&work->state, WS_USED_B); 100 - } 101 - 102 98 static void bdi_work_free(struct rcu_head *head) 103 99 { 104 100 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); 105 101 106 - if (!bdi_work_on_stack(work)) 102 + clear_bit(WS_INPROGRESS, &work->state); 103 + smp_mb__after_clear_bit(); 104 + wake_up_bit(&work->state, WS_INPROGRESS); 105 + 106 + if (!test_bit(WS_ONSTACK, &work->state)) 107 107 kfree(work); 108 - else 109 - bdi_work_clear(work); 110 - } 111 - 112 - static void wb_work_complete(struct bdi_work *work) 113 - { 114 - const enum writeback_sync_modes sync_mode = work->args.sync_mode; 115 - int onstack = bdi_work_on_stack(work); 116 - 117 - /* 118 - * For allocated work, we can clear the done/seen bit right here. 119 - * For on-stack work, we need to postpone both the clear and free 120 - * to after the RCU grace period, since the stack could be invalidated 121 - * as soon as bdi_work_clear() has done the wakeup. 122 - */ 123 - if (!onstack) 124 - bdi_work_clear(work); 125 - if (sync_mode == WB_SYNC_NONE || onstack) 126 - call_rcu(&work->rcu_head, bdi_work_free); 127 108 } 128 109 129 110 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) ··· 112 147 list_del_rcu(&work->list); 113 148 spin_unlock(&bdi->wb_lock); 114 149 115 - wb_work_complete(work); 150 + call_rcu(&work->rcu_head, bdi_work_free); 116 151 } 117 152 } 118 153 ··· 150 185 * Used for on-stack allocated work items. The caller needs to wait until 151 186 * the wb threads have acked the work before it's safe to continue. 152 187 */ 153 - static void bdi_wait_on_work_clear(struct bdi_work *work) 188 + static void bdi_wait_on_work_done(struct bdi_work *work) 154 189 { 155 - wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait, 190 + wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait, 156 191 TASK_UNINTERRUPTIBLE); 157 192 } 158 193 ··· 178 213 } 179 214 180 215 /** 181 - * bdi_sync_writeback - start and wait for writeback 182 - * @bdi: the backing device to write from 216 + * bdi_queue_work_onstack - start and wait for writeback 183 217 * @sb: write inodes from this super_block 184 218 * 185 219 * Description: 186 - * This does WB_SYNC_ALL data integrity writeback and waits for the 187 - * IO to complete. Callers must hold the sb s_umount semaphore for 220 + * This function initiates writeback and waits for the operation to 221 + * complete. Callers must hold the sb s_umount semaphore for 188 222 * reading, to avoid having the super disappear before we are done. 189 223 */ 190 - static void bdi_sync_writeback(struct backing_dev_info *bdi, 191 - struct super_block *sb) 224 + static void bdi_queue_work_onstack(struct wb_writeback_args *args) 192 225 { 193 - struct wb_writeback_args args = { 194 - .sb = sb, 195 - .sync_mode = WB_SYNC_ALL, 196 - .nr_pages = LONG_MAX, 197 - .range_cyclic = 0, 198 - }; 199 226 struct bdi_work work; 200 227 201 - bdi_work_init(&work, &args); 202 - work.state |= WS_ONSTACK; 228 + bdi_work_init(&work, args); 229 + __set_bit(WS_ONSTACK, &work.state); 203 230 204 - bdi_queue_work(bdi, &work); 205 - bdi_wait_on_work_clear(&work); 231 + bdi_queue_work(args->sb->s_bdi, &work); 232 + bdi_wait_on_work_done(&work); 206 233 } 207 234 208 235 /** 209 236 * bdi_start_writeback - start writeback 210 237 * @bdi: the backing device to write from 211 - * @sb: write inodes from this super_block 212 238 * @nr_pages: the number of pages to write 213 239 * 214 240 * Description: ··· 208 252 * completion. Caller need not hold sb s_umount semaphore. 209 253 * 210 254 */ 211 - void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, 212 - long nr_pages) 255 + void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 213 256 { 214 257 struct wb_writeback_args args = { 215 - .sb = sb, 216 258 .sync_mode = WB_SYNC_NONE, 217 259 .nr_pages = nr_pages, 218 260 .range_cyclic = 1, 219 261 }; 220 262 221 - /* 222 - * We treat @nr_pages=0 as the special case to do background writeback, 223 - * ie. to sync pages until the background dirty threshold is reached. 224 - */ 225 - if (!nr_pages) { 226 - args.nr_pages = LONG_MAX; 227 - args.for_background = 1; 228 - } 263 + bdi_alloc_queue_work(bdi, &args); 264 + } 229 265 266 + /** 267 + * bdi_start_background_writeback - start background writeback 268 + * @bdi: the backing device to write from 269 + * 270 + * Description: 271 + * This does WB_SYNC_NONE background writeback. The IO is only 272 + * started when this function returns, we make no guarentees on 273 + * completion. Caller need not hold sb s_umount semaphore. 274 + */ 275 + void bdi_start_background_writeback(struct backing_dev_info *bdi) 276 + { 277 + struct wb_writeback_args args = { 278 + .sync_mode = WB_SYNC_NONE, 279 + .nr_pages = LONG_MAX, 280 + .for_background = 1, 281 + .range_cyclic = 1, 282 + }; 230 283 bdi_alloc_queue_work(bdi, &args); 231 284 } 232 285 ··· 526 561 return ret; 527 562 } 528 563 529 - static void unpin_sb_for_writeback(struct super_block *sb) 530 - { 531 - up_read(&sb->s_umount); 532 - put_super(sb); 533 - } 534 - 535 - enum sb_pin_state { 536 - SB_PINNED, 537 - SB_NOT_PINNED, 538 - SB_PIN_FAILED 539 - }; 540 - 541 564 /* 542 - * For WB_SYNC_NONE writeback, the caller does not have the sb pinned 565 + * For background writeback the caller does not have the sb pinned 543 566 * before calling writeback. So make sure that we do pin it, so it doesn't 544 567 * go away while we are writing inodes from it. 545 568 */ 546 - static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc, 547 - struct super_block *sb) 569 + static bool pin_sb_for_writeback(struct super_block *sb) 548 570 { 549 - /* 550 - * Caller must already hold the ref for this 551 - */ 552 - if (wbc->sync_mode == WB_SYNC_ALL) { 553 - WARN_ON(!rwsem_is_locked(&sb->s_umount)); 554 - return SB_NOT_PINNED; 555 - } 556 571 spin_lock(&sb_lock); 572 + if (list_empty(&sb->s_instances)) { 573 + spin_unlock(&sb_lock); 574 + return false; 575 + } 576 + 557 577 sb->s_count++; 578 + spin_unlock(&sb_lock); 579 + 558 580 if (down_read_trylock(&sb->s_umount)) { 559 - if (sb->s_root) { 560 - spin_unlock(&sb_lock); 561 - return SB_PINNED; 562 - } 563 - /* 564 - * umounted, drop rwsem again and fall through to failure 565 - */ 581 + if (sb->s_root) 582 + return true; 566 583 up_read(&sb->s_umount); 567 584 } 568 - sb->s_count--; 569 - spin_unlock(&sb_lock); 570 - return SB_PIN_FAILED; 585 + 586 + put_super(sb); 587 + return false; 571 588 } 572 589 573 590 /* ··· 628 681 struct inode *inode = list_entry(wb->b_io.prev, 629 682 struct inode, i_list); 630 683 struct super_block *sb = inode->i_sb; 631 - enum sb_pin_state state; 632 684 633 - if (wbc->sb && sb != wbc->sb) { 634 - /* super block given and doesn't 635 - match, skip this inode */ 636 - redirty_tail(inode); 637 - continue; 685 + if (wbc->sb) { 686 + /* 687 + * We are requested to write out inodes for a specific 688 + * superblock. This means we already have s_umount 689 + * taken by the caller which also waits for us to 690 + * complete the writeout. 691 + */ 692 + if (sb != wbc->sb) { 693 + redirty_tail(inode); 694 + continue; 695 + } 696 + 697 + WARN_ON(!rwsem_is_locked(&sb->s_umount)); 698 + 699 + ret = writeback_sb_inodes(sb, wb, wbc); 700 + } else { 701 + if (!pin_sb_for_writeback(sb)) { 702 + requeue_io(inode); 703 + continue; 704 + } 705 + ret = writeback_sb_inodes(sb, wb, wbc); 706 + drop_super(sb); 638 707 } 639 - state = pin_sb_for_writeback(wbc, sb); 640 708 641 - if (state == SB_PIN_FAILED) { 642 - requeue_io(inode); 643 - continue; 644 - } 645 - ret = writeback_sb_inodes(sb, wb, wbc); 646 - 647 - if (state == SB_PINNED) 648 - unpin_sb_for_writeback(sb); 649 709 if (ret) 650 710 break; 651 711 } ··· 865 911 * If this isn't a data integrity operation, just notify 866 912 * that we have seen this work and we are now starting it. 867 913 */ 868 - if (args.sync_mode == WB_SYNC_NONE) 914 + if (!test_bit(WS_ONSTACK, &work->state)) 869 915 wb_clear_pending(wb, work); 870 916 871 917 wrote += wb_writeback(wb, &args); ··· 874 920 * This is a data integrity writeback, so only do the 875 921 * notification when we have completed the work. 876 922 */ 877 - if (args.sync_mode == WB_SYNC_ALL) 923 + if (test_bit(WS_ONSTACK, &work->state)) 878 924 wb_clear_pending(wb, work); 879 925 } 880 926 ··· 932 978 } 933 979 934 980 /* 935 - * Schedule writeback for all backing devices. This does WB_SYNC_NONE 936 - * writeback, for integrity writeback see bdi_sync_writeback(). 937 - */ 938 - static void bdi_writeback_all(struct super_block *sb, long nr_pages) 939 - { 940 - struct wb_writeback_args args = { 941 - .sb = sb, 942 - .nr_pages = nr_pages, 943 - .sync_mode = WB_SYNC_NONE, 944 - }; 945 - struct backing_dev_info *bdi; 946 - 947 - rcu_read_lock(); 948 - 949 - list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 950 - if (!bdi_has_dirty_io(bdi)) 951 - continue; 952 - 953 - bdi_alloc_queue_work(bdi, &args); 954 - } 955 - 956 - rcu_read_unlock(); 957 - } 958 - 959 - /* 960 981 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 961 982 * the whole world. 962 983 */ 963 984 void wakeup_flusher_threads(long nr_pages) 964 985 { 965 - if (nr_pages == 0) 966 - nr_pages = global_page_state(NR_FILE_DIRTY) + 986 + struct backing_dev_info *bdi; 987 + struct wb_writeback_args args = { 988 + .sync_mode = WB_SYNC_NONE, 989 + }; 990 + 991 + if (nr_pages) { 992 + args.nr_pages = nr_pages; 993 + } else { 994 + args.nr_pages = global_page_state(NR_FILE_DIRTY) + 967 995 global_page_state(NR_UNSTABLE_NFS); 968 - bdi_writeback_all(NULL, nr_pages); 996 + } 997 + 998 + rcu_read_lock(); 999 + list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1000 + if (!bdi_has_dirty_io(bdi)) 1001 + continue; 1002 + bdi_alloc_queue_work(bdi, &args); 1003 + } 1004 + rcu_read_unlock(); 969 1005 } 970 1006 971 1007 static noinline void block_dump___mark_inode_dirty(struct inode *inode) ··· 1162 1218 { 1163 1219 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 1164 1220 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 1165 - long nr_to_write; 1221 + struct wb_writeback_args args = { 1222 + .sb = sb, 1223 + .sync_mode = WB_SYNC_NONE, 1224 + }; 1166 1225 1167 - nr_to_write = nr_dirty + nr_unstable + 1226 + WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1227 + 1228 + args.nr_pages = nr_dirty + nr_unstable + 1168 1229 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 1169 1230 1170 - bdi_start_writeback(sb->s_bdi, sb, nr_to_write); 1231 + bdi_queue_work_onstack(&args); 1171 1232 } 1172 1233 EXPORT_SYMBOL(writeback_inodes_sb); 1173 1234 ··· 1186 1237 int writeback_inodes_sb_if_idle(struct super_block *sb) 1187 1238 { 1188 1239 if (!writeback_in_progress(sb->s_bdi)) { 1240 + down_read(&sb->s_umount); 1189 1241 writeback_inodes_sb(sb); 1242 + up_read(&sb->s_umount); 1190 1243 return 1; 1191 1244 } else 1192 1245 return 0; ··· 1204 1253 */ 1205 1254 void sync_inodes_sb(struct super_block *sb) 1206 1255 { 1207 - bdi_sync_writeback(sb->s_bdi, sb); 1256 + struct wb_writeback_args args = { 1257 + .sb = sb, 1258 + .sync_mode = WB_SYNC_ALL, 1259 + .nr_pages = LONG_MAX, 1260 + .range_cyclic = 0, 1261 + }; 1262 + 1263 + WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1264 + 1265 + bdi_queue_work_onstack(&args); 1208 1266 wait_sb_inodes(sb); 1209 1267 } 1210 1268 EXPORT_SYMBOL(sync_inodes_sb);
+16 -4
fs/proc/task_nommu.c
··· 122 122 return size; 123 123 } 124 124 125 + static void pad_len_spaces(struct seq_file *m, int len) 126 + { 127 + len = 25 + sizeof(void*) * 6 - len; 128 + if (len < 1) 129 + len = 1; 130 + seq_printf(m, "%*c", len, ' '); 131 + } 132 + 125 133 /* 126 134 * display a single VMA to a sequenced file 127 135 */ 128 136 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) 129 137 { 138 + struct mm_struct *mm = vma->vm_mm; 130 139 unsigned long ino = 0; 131 140 struct file *file; 132 141 dev_t dev = 0; ··· 164 155 MAJOR(dev), MINOR(dev), ino, &len); 165 156 166 157 if (file) { 167 - len = 25 + sizeof(void *) * 6 - len; 168 - if (len < 1) 169 - len = 1; 170 - seq_printf(m, "%*c", len, ' '); 158 + pad_len_spaces(m, len); 171 159 seq_path(m, &file->f_path, ""); 160 + } else if (mm) { 161 + if (vma->vm_start <= mm->start_stack && 162 + vma->vm_end >= mm->start_stack) { 163 + pad_len_spaces(m, len); 164 + seq_puts(m, "[stack]"); 165 + } 172 166 } 173 167 174 168 seq_putc(m, '\n');
+6
fs/super.c
··· 374 374 up_read(&sb->s_umount); 375 375 376 376 spin_lock(&sb_lock); 377 + /* lock was dropped, must reset next */ 378 + list_safe_reset_next(sb, n, s_list); 377 379 __put_super(sb); 378 380 } 379 381 } ··· 407 405 up_read(&sb->s_umount); 408 406 409 407 spin_lock(&sb_lock); 408 + /* lock was dropped, must reset next */ 409 + list_safe_reset_next(sb, n, s_list); 410 410 __put_super(sb); 411 411 } 412 412 spin_unlock(&sb_lock); ··· 589 585 } 590 586 up_write(&sb->s_umount); 591 587 spin_lock(&sb_lock); 588 + /* lock was dropped, must reset next */ 589 + list_safe_reset_next(sb, n, s_list); 592 590 __put_super(sb); 593 591 } 594 592 spin_unlock(&sb_lock);
+5 -1
fs/sysv/ialloc.c
··· 25 25 #include <linux/stat.h> 26 26 #include <linux/string.h> 27 27 #include <linux/buffer_head.h> 28 + #include <linux/writeback.h> 28 29 #include "sysv.h" 29 30 30 31 /* We don't trust the value of ··· 140 139 struct inode *inode; 141 140 sysv_ino_t ino; 142 141 unsigned count; 142 + struct writeback_control wbc = { 143 + .sync_mode = WB_SYNC_NONE 144 + }; 143 145 144 146 inode = new_inode(sb); 145 147 if (!inode) ··· 172 168 insert_inode_hash(inode); 173 169 mark_inode_dirty(inode); 174 170 175 - sysv_write_inode(inode, 0); /* ensure inode not allocated again */ 171 + sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */ 176 172 mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ 177 173 /* That's it. */ 178 174 unlock_super(sb);
+2
fs/ubifs/budget.c
··· 62 62 */ 63 63 static void shrink_liability(struct ubifs_info *c, int nr_to_write) 64 64 { 65 + down_read(&c->vfs_sb->s_umount); 65 66 writeback_inodes_sb(c->vfs_sb); 67 + up_read(&c->vfs_sb->s_umount); 66 68 } 67 69 68 70 /**
+5 -6
fs/xfs/linux-2.6/xfs_export.c
··· 128 128 return ERR_PTR(-ESTALE); 129 129 130 130 /* 131 - * The XFS_IGET_BULKSTAT means that an invalid inode number is just 132 - * fine and not an indication of a corrupted filesystem. Because 133 - * clients can send any kind of invalid file handle, e.g. after 134 - * a restore on the server we have to deal with this case gracefully. 131 + * The XFS_IGET_UNTRUSTED means that an invalid inode number is just 132 + * fine and not an indication of a corrupted filesystem as clients can 133 + * send invalid file handles and we have to handle it gracefully.. 135 134 */ 136 - error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT, 137 - XFS_ILOCK_SHARED, &ip, 0); 135 + error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 136 + XFS_ILOCK_SHARED, &ip); 138 137 if (error) { 139 138 /* 140 139 * EINVAL means the inode cluster doesn't exist anymore.
+3 -4
fs/xfs/linux-2.6/xfs_ioctl.c
··· 679 679 error = xfs_bulkstat_single(mp, &inlast, 680 680 bulkreq.ubuffer, &done); 681 681 else /* XFS_IOC_FSBULKSTAT */ 682 - error = xfs_bulkstat(mp, &inlast, &count, 683 - (bulkstat_one_pf)xfs_bulkstat_one, NULL, 684 - sizeof(xfs_bstat_t), bulkreq.ubuffer, 685 - BULKSTAT_FG_QUICK, &done); 682 + error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, 683 + sizeof(xfs_bstat_t), bulkreq.ubuffer, 684 + &done); 686 685 687 686 if (error) 688 687 return -error;
+5 -10
fs/xfs/linux-2.6/xfs_ioctl32.c
··· 237 237 xfs_ino_t ino, /* inode number to get data for */ 238 238 void __user *buffer, /* buffer to place output in */ 239 239 int ubsize, /* size of buffer */ 240 - void *private_data, /* my private data */ 241 - xfs_daddr_t bno, /* starting bno of inode cluster */ 242 240 int *ubused, /* bytes used by me */ 243 - void *dibuff, /* on-disk inode buffer */ 244 241 int *stat) /* BULKSTAT_RV_... */ 245 242 { 246 243 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 247 - xfs_bulkstat_one_fmt_compat, bno, 248 - ubused, dibuff, stat); 244 + xfs_bulkstat_one_fmt_compat, 245 + ubused, stat); 249 246 } 250 247 251 248 /* copied from xfs_ioctl.c */ ··· 295 298 int res; 296 299 297 300 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, 298 - sizeof(compat_xfs_bstat_t), 299 - NULL, 0, NULL, NULL, &res); 301 + sizeof(compat_xfs_bstat_t), 0, &res); 300 302 } else if (cmd == XFS_IOC_FSBULKSTAT_32) { 301 303 error = xfs_bulkstat(mp, &inlast, &count, 302 - xfs_bulkstat_one_compat, NULL, 303 - sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, 304 - BULKSTAT_FG_QUICK, &done); 304 + xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), 305 + bulkreq.ubuffer, &done); 305 306 } else 306 307 error = XFS_ERROR(EINVAL); 307 308 if (error)
+8 -10
fs/xfs/quota/xfs_qm.c
··· 1632 1632 xfs_ino_t ino, /* inode number to get data for */ 1633 1633 void __user *buffer, /* not used */ 1634 1634 int ubsize, /* not used */ 1635 - void *private_data, /* not used */ 1636 - xfs_daddr_t bno, /* starting block of inode cluster */ 1637 1635 int *ubused, /* not used */ 1638 - void *dip, /* on-disk inode pointer (not used) */ 1639 1636 int *res) /* result code value */ 1640 1637 { 1641 1638 xfs_inode_t *ip; ··· 1657 1660 * the case in all other instances. It's OK that we do this because 1658 1661 * quotacheck is done only at mount time. 1659 1662 */ 1660 - if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) { 1663 + if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) { 1661 1664 *res = BULKSTAT_RV_NOTHING; 1662 1665 return error; 1663 1666 } ··· 1793 1796 * Iterate thru all the inodes in the file system, 1794 1797 * adjusting the corresponding dquot counters in core. 1795 1798 */ 1796 - if ((error = xfs_bulkstat(mp, &lastino, &count, 1797 - xfs_qm_dqusage_adjust, NULL, 1798 - structsz, NULL, BULKSTAT_FG_IGET, &done))) 1799 + error = xfs_bulkstat(mp, &lastino, &count, 1800 + xfs_qm_dqusage_adjust, 1801 + structsz, NULL, &done); 1802 + if (error) 1799 1803 break; 1800 1804 1801 - } while (! done); 1805 + } while (!done); 1802 1806 1803 1807 /* 1804 1808 * We've made all the changes that we need to make incore. ··· 1887 1889 mp->m_sb.sb_uquotino != NULLFSINO) { 1888 1890 ASSERT(mp->m_sb.sb_uquotino > 0); 1889 1891 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1890 - 0, 0, &uip, 0))) 1892 + 0, 0, &uip))) 1891 1893 return XFS_ERROR(error); 1892 1894 } 1893 1895 if (XFS_IS_OQUOTA_ON(mp) && 1894 1896 mp->m_sb.sb_gquotino != NULLFSINO) { 1895 1897 ASSERT(mp->m_sb.sb_gquotino > 0); 1896 1898 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1897 - 0, 0, &gip, 0))) { 1899 + 0, 0, &gip))) { 1898 1900 if (uip) 1899 1901 IRELE(uip); 1900 1902 return XFS_ERROR(error);
+12 -15
fs/xfs/quota/xfs_qm_syscalls.c
··· 262 262 } 263 263 264 264 if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { 265 - error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); 265 + error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip); 266 266 if (!error) { 267 267 error = xfs_truncate_file(mp, qip); 268 268 IRELE(qip); ··· 271 271 272 272 if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && 273 273 mp->m_sb.sb_gquotino != NULLFSINO) { 274 - error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); 274 + error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip); 275 275 if (!error2) { 276 276 error2 = xfs_truncate_file(mp, qip); 277 277 IRELE(qip); ··· 417 417 } 418 418 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { 419 419 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 420 - 0, 0, &uip, 0) == 0) 420 + 0, 0, &uip) == 0) 421 421 tempuqip = B_TRUE; 422 422 } 423 423 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { 424 424 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 425 - 0, 0, &gip, 0) == 0) 425 + 0, 0, &gip) == 0) 426 426 tempgqip = B_TRUE; 427 427 } 428 428 if (uip) { ··· 1109 1109 xfs_ino_t ino, /* inode number to get data for */ 1110 1110 void __user *buffer, /* not used */ 1111 1111 int ubsize, /* not used */ 1112 - void *private_data, /* not used */ 1113 - xfs_daddr_t bno, /* starting block of inode cluster */ 1114 1112 int *ubused, /* not used */ 1115 - void *dip, /* not used */ 1116 1113 int *res) /* bulkstat result code */ 1117 1114 { 1118 1115 xfs_inode_t *ip; ··· 1131 1134 ipreleased = B_FALSE; 1132 1135 again: 1133 1136 lock_flags = XFS_ILOCK_SHARED; 1134 - if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { 1137 + if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) { 1135 1138 *res = BULKSTAT_RV_NOTHING; 1136 1139 return (error); 1137 1140 } ··· 1202 1205 * Iterate thru all the inodes in the file system, 1203 1206 * adjusting the corresponding dquot counters 1204 1207 */ 1205 - if ((error = xfs_bulkstat(mp, &lastino, &count, 1206 - xfs_qm_internalqcheck_adjust, NULL, 1207 - 0, NULL, BULKSTAT_FG_IGET, &done))) { 1208 + error = xfs_bulkstat(mp, &lastino, &count, 1209 + xfs_qm_internalqcheck_adjust, 1210 + 0, NULL, &done); 1211 + if (error) { 1212 + cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); 1208 1213 break; 1209 1214 } 1210 - } while (! done); 1211 - if (error) { 1212 - cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); 1213 - } 1215 + } while (!done); 1216 + 1214 1217 cmn_err(CE_DEBUG, "Checking results against system dquots"); 1215 1218 for (i = 0; i < qmtest_hashmask; i++) { 1216 1219 xfs_dqtest_t *d, *n;
+4 -1
fs/xfs/xfs_dfrag.c
··· 69 69 goto out; 70 70 } 71 71 72 - if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { 72 + if (!(file->f_mode & FMODE_WRITE) || 73 + !(file->f_mode & FMODE_READ) || 74 + (file->f_flags & O_APPEND)) { 73 75 error = XFS_ERROR(EBADF); 74 76 goto out_put_file; 75 77 } ··· 83 81 } 84 82 85 83 if (!(tmp_file->f_mode & FMODE_WRITE) || 84 + !(tmp_file->f_mode & FMODE_READ) || 86 85 (tmp_file->f_flags & O_APPEND)) { 87 86 error = XFS_ERROR(EBADF); 88 87 goto out_put_tmp_file;
+82 -60
fs/xfs/xfs_ialloc.c
··· 1203 1203 return error; 1204 1204 } 1205 1205 1206 + STATIC int 1207 + xfs_imap_lookup( 1208 + struct xfs_mount *mp, 1209 + struct xfs_trans *tp, 1210 + xfs_agnumber_t agno, 1211 + xfs_agino_t agino, 1212 + xfs_agblock_t agbno, 1213 + xfs_agblock_t *chunk_agbno, 1214 + xfs_agblock_t *offset_agbno, 1215 + int flags) 1216 + { 1217 + struct xfs_inobt_rec_incore rec; 1218 + struct xfs_btree_cur *cur; 1219 + struct xfs_buf *agbp; 1220 + xfs_agino_t startino; 1221 + int error; 1222 + int i; 1223 + 1224 + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 1225 + if (error) { 1226 + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " 1227 + "xfs_ialloc_read_agi() returned " 1228 + "error %d, agno %d", 1229 + error, agno); 1230 + return error; 1231 + } 1232 + 1233 + /* 1234 + * derive and lookup the exact inode record for the given agino. If the 1235 + * record cannot be found, then it's an invalid inode number and we 1236 + * should abort. 1237 + */ 1238 + cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); 1239 + startino = agino & ~(XFS_IALLOC_INODES(mp) - 1); 1240 + error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i); 1241 + if (!error) { 1242 + if (i) 1243 + error = xfs_inobt_get_rec(cur, &rec, &i); 1244 + if (!error && i == 0) 1245 + error = EINVAL; 1246 + } 1247 + 1248 + xfs_trans_brelse(tp, agbp); 1249 + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1250 + if (error) 1251 + return error; 1252 + 1253 + /* for untrusted inodes check it is allocated first */ 1254 + if ((flags & XFS_IGET_UNTRUSTED) && 1255 + (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 1256 + return EINVAL; 1257 + 1258 + *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); 1259 + *offset_agbno = agbno - *chunk_agbno; 1260 + return 0; 1261 + } 1262 + 1206 1263 /* 1207 1264 * Return the location of the inode in imap, for mapping it into a buffer. 1208 1265 */ ··· 1292 1235 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || 1293 1236 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 1294 1237 #ifdef DEBUG 1295 - /* no diagnostics for bulkstat, ino comes from userspace */ 1296 - if (flags & XFS_IGET_BULKSTAT) 1238 + /* 1239 + * Don't output diagnostic information for untrusted inodes 1240 + * as they can be invalid without implying corruption. 1241 + */ 1242 + if (flags & XFS_IGET_UNTRUSTED) 1297 1243 return XFS_ERROR(EINVAL); 1298 1244 if (agno >= mp->m_sb.sb_agcount) { 1299 1245 xfs_fs_cmn_err(CE_ALERT, mp, ··· 1323 1263 return XFS_ERROR(EINVAL); 1324 1264 } 1325 1265 1266 + blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; 1267 + 1268 + /* 1269 + * For bulkstat and handle lookups, we have an untrusted inode number 1270 + * that we have to verify is valid. We cannot do this just by reading 1271 + * the inode buffer as it may have been unlinked and removed leaving 1272 + * inodes in stale state on disk. Hence we have to do a btree lookup 1273 + * in all cases where an untrusted inode number is passed. 1274 + */ 1275 + if (flags & XFS_IGET_UNTRUSTED) { 1276 + error = xfs_imap_lookup(mp, tp, agno, agino, agbno, 1277 + &chunk_agbno, &offset_agbno, flags); 1278 + if (error) 1279 + return error; 1280 + goto out_map; 1281 + } 1282 + 1326 1283 /* 1327 1284 * If the inode cluster size is the same as the blocksize or 1328 1285 * smaller we get to the buffer by simple arithmetics. ··· 1354 1277 return 0; 1355 1278 } 1356 1279 1357 - blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; 1358 - 1359 - /* 1360 - * If we get a block number passed from bulkstat we can use it to 1361 - * find the buffer easily. 1362 - */ 1363 - if (imap->im_blkno) { 1364 - offset = XFS_INO_TO_OFFSET(mp, ino); 1365 - ASSERT(offset < mp->m_sb.sb_inopblock); 1366 - 1367 - cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno); 1368 - offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock; 1369 - 1370 - imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); 1371 - imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); 1372 - return 0; 1373 - } 1374 - 1375 1280 /* 1376 1281 * If the inode chunks are aligned then use simple maths to 1377 1282 * find the location. Otherwise we have to do a btree ··· 1363 1304 offset_agbno = agbno & mp->m_inoalign_mask; 1364 1305 chunk_agbno = agbno - offset_agbno; 1365 1306 } else { 1366 - xfs_btree_cur_t *cur; /* inode btree cursor */ 1367 - xfs_inobt_rec_incore_t chunk_rec; 1368 - xfs_buf_t *agbp; /* agi buffer */ 1369 - int i; /* temp state */ 1370 - 1371 - error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 1372 - if (error) { 1373 - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " 1374 - "xfs_ialloc_read_agi() returned " 1375 - "error %d, agno %d", 1376 - error, agno); 1377 - return error; 1378 - } 1379 - 1380 - cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); 1381 - error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); 1382 - if (error) { 1383 - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " 1384 - "xfs_inobt_lookup() failed"); 1385 - goto error0; 1386 - } 1387 - 1388 - error = xfs_inobt_get_rec(cur, &chunk_rec, &i); 1389 - if (error) { 1390 - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " 1391 - "xfs_inobt_get_rec() failed"); 1392 - goto error0; 1393 - } 1394 - if (i == 0) { 1395 - #ifdef DEBUG 1396 - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " 1397 - "xfs_inobt_get_rec() failed"); 1398 - #endif /* DEBUG */ 1399 - error = XFS_ERROR(EINVAL); 1400 - } 1401 - error0: 1402 - xfs_trans_brelse(tp, agbp); 1403 - xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1307 + error = xfs_imap_lookup(mp, tp, agno, agino, agbno, 1308 + &chunk_agbno, &offset_agbno, flags); 1404 1309 if (error) 1405 1310 return error; 1406 - chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino); 1407 - offset_agbno = agbno - chunk_agbno; 1408 1311 } 1409 1312 1313 + out_map: 1410 1314 ASSERT(agbno >= chunk_agbno); 1411 1315 cluster_agbno = chunk_agbno + 1412 1316 ((offset_agbno / blks_per_cluster) * blks_per_cluster);
+3 -7
fs/xfs/xfs_iget.c
··· 259 259 xfs_trans_t *tp, 260 260 xfs_ino_t ino, 261 261 struct xfs_inode **ipp, 262 - xfs_daddr_t bno, 263 262 int flags, 264 263 int lock_flags) 265 264 { ··· 271 272 if (!ip) 272 273 return ENOMEM; 273 274 274 - error = xfs_iread(mp, tp, ip, bno, flags); 275 + error = xfs_iread(mp, tp, ip, flags); 275 276 if (error) 276 277 goto out_destroy; 277 278 ··· 357 358 * within the file system for the inode being requested. 358 359 * lock_flags -- flags indicating how to lock the inode. See the comment 359 360 * for xfs_ilock() for a list of valid values. 360 - * bno -- the block number starting the buffer containing the inode, 361 - * if known (as by bulkstat), else 0. 362 361 */ 363 362 int 364 363 xfs_iget( ··· 365 368 xfs_ino_t ino, 366 369 uint flags, 367 370 uint lock_flags, 368 - xfs_inode_t **ipp, 369 - xfs_daddr_t bno) 371 + xfs_inode_t **ipp) 370 372 { 371 373 xfs_inode_t *ip; 372 374 int error; ··· 393 397 read_unlock(&pag->pag_ici_lock); 394 398 XFS_STATS_INC(xs_ig_missed); 395 399 396 - error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno, 400 + error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, 397 401 flags, lock_flags); 398 402 if (error) 399 403 goto out_error_or_again;
+1 -4
fs/xfs/xfs_inode.c
··· 177 177 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 178 178 XFS_ERRTAG_ITOBP_INOTOBP, 179 179 XFS_RANDOM_ITOBP_INOTOBP))) { 180 - if (iget_flags & XFS_IGET_BULKSTAT) { 180 + if (iget_flags & XFS_IGET_UNTRUSTED) { 181 181 xfs_trans_brelse(tp, bp); 182 182 return XFS_ERROR(EINVAL); 183 183 } ··· 787 787 xfs_mount_t *mp, 788 788 xfs_trans_t *tp, 789 789 xfs_inode_t *ip, 790 - xfs_daddr_t bno, 791 790 uint iget_flags) 792 791 { 793 792 xfs_buf_t *bp; ··· 796 797 /* 797 798 * Fill in the location information in the in-core inode. 798 799 */ 799 - ip->i_imap.im_blkno = bno; 800 800 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 801 801 if (error) 802 802 return error; 803 - ASSERT(bno == 0 || bno == ip->i_imap.im_blkno); 804 803 805 804 /* 806 805 * Get pointers to the on-disk inode and the buffer containing it.
+3 -3
fs/xfs/xfs_inode.h
··· 442 442 * xfs_iget.c prototypes. 443 443 */ 444 444 int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, 445 - uint, uint, xfs_inode_t **, xfs_daddr_t); 445 + uint, uint, xfs_inode_t **); 446 446 void xfs_iput(xfs_inode_t *, uint); 447 447 void xfs_iput_new(xfs_inode_t *, uint); 448 448 void xfs_ilock(xfs_inode_t *, uint); ··· 500 500 * Flags for xfs_iget() 501 501 */ 502 502 #define XFS_IGET_CREATE 0x1 503 - #define XFS_IGET_BULKSTAT 0x2 503 + #define XFS_IGET_UNTRUSTED 0x2 504 504 505 505 int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, 506 506 xfs_ino_t, struct xfs_dinode **, ··· 509 509 struct xfs_inode *, struct xfs_dinode **, 510 510 struct xfs_buf **, uint); 511 511 int xfs_iread(struct xfs_mount *, struct xfs_trans *, 512 - struct xfs_inode *, xfs_daddr_t, uint); 512 + struct xfs_inode *, uint); 513 513 void xfs_dinode_to_disk(struct xfs_dinode *, 514 514 struct xfs_icdinode *); 515 515 void xfs_idestroy_fork(struct xfs_inode *, int);
+42 -247
fs/xfs/xfs_itable.c
··· 49 49 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); 50 50 } 51 51 52 - STATIC int 53 - xfs_bulkstat_one_iget( 54 - xfs_mount_t *mp, /* mount point for filesystem */ 55 - xfs_ino_t ino, /* inode number to get data for */ 56 - xfs_daddr_t bno, /* starting bno of inode cluster */ 57 - xfs_bstat_t *buf, /* return buffer */ 58 - int *stat) /* BULKSTAT_RV_... */ 52 + /* 53 + * Return stat information for one inode. 54 + * Return 0 if ok, else errno. 55 + */ 56 + int 57 + xfs_bulkstat_one_int( 58 + struct xfs_mount *mp, /* mount point for filesystem */ 59 + xfs_ino_t ino, /* inode to get data for */ 60 + void __user *buffer, /* buffer to place output in */ 61 + int ubsize, /* size of buffer */ 62 + bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ 63 + int *ubused, /* bytes used by me */ 64 + int *stat) /* BULKSTAT_RV_... */ 59 65 { 60 - xfs_icdinode_t *dic; /* dinode core info pointer */ 61 - xfs_inode_t *ip; /* incore inode pointer */ 62 - struct inode *inode; 63 - int error; 66 + struct xfs_icdinode *dic; /* dinode core info pointer */ 67 + struct xfs_inode *ip; /* incore inode pointer */ 68 + struct inode *inode; 69 + struct xfs_bstat *buf; /* return buffer */ 70 + int error = 0; /* error value */ 71 + 72 + *stat = BULKSTAT_RV_NOTHING; 73 + 74 + if (!buffer || xfs_internal_inum(mp, ino)) 75 + return XFS_ERROR(EINVAL); 76 + 77 + buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); 78 + if (!buf) 79 + return XFS_ERROR(ENOMEM); 64 80 65 81 error = xfs_iget(mp, NULL, ino, 66 - XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); 82 + XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip); 67 83 if (error) { 68 84 *stat = BULKSTAT_RV_NOTHING; 69 - return error; 85 + goto out_free; 70 86 } 71 87 72 88 ASSERT(ip != NULL); ··· 143 127 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 144 128 break; 145 129 } 146 - 147 130 xfs_iput(ip, XFS_ILOCK_SHARED); 131 + 132 + error = formatter(buffer, ubsize, ubused, buf); 133 + 134 + if (!error) 135 + *stat = BULKSTAT_RV_DIDONE; 136 + 137 + out_free: 138 + kmem_free(buf); 148 139 return error; 149 - } 150 - 151 - STATIC void 152 - xfs_bulkstat_one_dinode( 153 - xfs_mount_t *mp, /* mount point for filesystem */ 154 - xfs_ino_t ino, /* inode number to get data for */ 155 - xfs_dinode_t *dic, /* dinode inode pointer */ 156 - xfs_bstat_t *buf) /* return buffer */ 157 - { 158 - /* 159 - * The inode format changed when we moved the link count and 160 - * made it 32 bits long. If this is an old format inode, 161 - * convert it in memory to look like a new one. If it gets 162 - * flushed to disk we will convert back before flushing or 163 - * logging it. We zero out the new projid field and the old link 164 - * count field. We'll handle clearing the pad field (the remains 165 - * of the old uuid field) when we actually convert the inode to 166 - * the new format. We don't change the version number so that we 167 - * can distinguish this from a real new format inode. 168 - */ 169 - if (dic->di_version == 1) { 170 - buf->bs_nlink = be16_to_cpu(dic->di_onlink); 171 - buf->bs_projid = 0; 172 - } else { 173 - buf->bs_nlink = be32_to_cpu(dic->di_nlink); 174 - buf->bs_projid = be16_to_cpu(dic->di_projid); 175 - } 176 - 177 - buf->bs_ino = ino; 178 - buf->bs_mode = be16_to_cpu(dic->di_mode); 179 - buf->bs_uid = be32_to_cpu(dic->di_uid); 180 - buf->bs_gid = be32_to_cpu(dic->di_gid); 181 - buf->bs_size = be64_to_cpu(dic->di_size); 182 - buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec); 183 - buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec); 184 - buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec); 185 - buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); 186 - buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); 187 - buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); 188 - buf->bs_xflags = xfs_dic2xflags(dic); 189 - buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; 190 - buf->bs_extents = be32_to_cpu(dic->di_nextents); 191 - buf->bs_gen = be32_to_cpu(dic->di_gen); 192 - memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 193 - buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask); 194 - buf->bs_dmstate = be16_to_cpu(dic->di_dmstate); 195 - buf->bs_aextents = be16_to_cpu(dic->di_anextents); 196 - buf->bs_forkoff = XFS_DFORK_BOFF(dic); 197 - 198 - switch (dic->di_format) { 199 - case XFS_DINODE_FMT_DEV: 200 - buf->bs_rdev = xfs_dinode_get_rdev(dic); 201 - buf->bs_blksize = BLKDEV_IOSIZE; 202 - buf->bs_blocks = 0; 203 - break; 204 - case XFS_DINODE_FMT_LOCAL: 205 - case XFS_DINODE_FMT_UUID: 206 - buf->bs_rdev = 0; 207 - buf->bs_blksize = mp->m_sb.sb_blocksize; 208 - buf->bs_blocks = 0; 209 - break; 210 - case XFS_DINODE_FMT_EXTENTS: 211 - case XFS_DINODE_FMT_BTREE: 212 - buf->bs_rdev = 0; 213 - buf->bs_blksize = mp->m_sb.sb_blocksize; 214 - buf->bs_blocks = be64_to_cpu(dic->di_nblocks); 215 - break; 216 - } 217 140 } 218 141 219 142 /* Return 0 on success or positive error */ ··· 172 217 return 0; 173 218 } 174 219 175 - /* 176 - * Return stat information for one inode. 177 - * Return 0 if ok, else errno. 178 - */ 179 - int /* error status */ 180 - xfs_bulkstat_one_int( 181 - xfs_mount_t *mp, /* mount point for filesystem */ 182 - xfs_ino_t ino, /* inode number to get data for */ 183 - void __user *buffer, /* buffer to place output in */ 184 - int ubsize, /* size of buffer */ 185 - bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ 186 - xfs_daddr_t bno, /* starting bno of inode cluster */ 187 - int *ubused, /* bytes used by me */ 188 - void *dibuff, /* on-disk inode buffer */ 189 - int *stat) /* BULKSTAT_RV_... */ 190 - { 191 - xfs_bstat_t *buf; /* return buffer */ 192 - int error = 0; /* error value */ 193 - xfs_dinode_t *dip; /* dinode inode pointer */ 194 - 195 - dip = (xfs_dinode_t *)dibuff; 196 - *stat = BULKSTAT_RV_NOTHING; 197 - 198 - if (!buffer || xfs_internal_inum(mp, ino)) 199 - return XFS_ERROR(EINVAL); 200 - 201 - buf = kmem_alloc(sizeof(*buf), KM_SLEEP); 202 - 203 - if (dip == NULL) { 204 - /* We're not being passed a pointer to a dinode. This happens 205 - * if BULKSTAT_FG_IGET is selected. Do the iget. 206 - */ 207 - error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat); 208 - if (error) 209 - goto out_free; 210 - } else { 211 - xfs_bulkstat_one_dinode(mp, ino, dip, buf); 212 - } 213 - 214 - error = formatter(buffer, ubsize, ubused, buf); 215 - if (error) 216 - goto out_free; 217 - 218 - *stat = BULKSTAT_RV_DIDONE; 219 - 220 - out_free: 221 - kmem_free(buf); 222 - return error; 223 - } 224 - 225 220 int 226 221 xfs_bulkstat_one( 227 222 xfs_mount_t *mp, /* mount point for filesystem */ 228 223 xfs_ino_t ino, /* inode number to get data for */ 229 224 void __user *buffer, /* buffer to place output in */ 230 225 int ubsize, /* size of buffer */ 231 - void *private_data, /* my private data */ 232 - xfs_daddr_t bno, /* starting bno of inode cluster */ 233 226 int *ubused, /* bytes used by me */ 234 - void *dibuff, /* on-disk inode buffer */ 235 227 int *stat) /* BULKSTAT_RV_... */ 236 228 { 237 229 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 238 - xfs_bulkstat_one_fmt, bno, 239 - ubused, dibuff, stat); 240 - } 241 - 242 - /* 243 - * Test to see whether we can use the ondisk inode directly, based 244 - * on the given bulkstat flags, filling in dipp accordingly. 245 - * Returns zero if the inode is dodgey. 246 - */ 247 - STATIC int 248 - xfs_bulkstat_use_dinode( 249 - xfs_mount_t *mp, 250 - int flags, 251 - xfs_buf_t *bp, 252 - int clustidx, 253 - xfs_dinode_t **dipp) 254 - { 255 - xfs_dinode_t *dip; 256 - unsigned int aformat; 257 - 258 - *dipp = NULL; 259 - if (!bp || (flags & BULKSTAT_FG_IGET)) 260 - return 1; 261 - dip = (xfs_dinode_t *) 262 - xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); 263 - /* 264 - * Check the buffer containing the on-disk inode for di_mode == 0. 265 - * This is to prevent xfs_bulkstat from picking up just reclaimed 266 - * inodes that have their in-core state initialized but not flushed 267 - * to disk yet. This is a temporary hack that would require a proper 268 - * fix in the future. 269 - */ 270 - if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || 271 - !XFS_DINODE_GOOD_VERSION(dip->di_version) || 272 - !dip->di_mode) 273 - return 0; 274 - if (flags & BULKSTAT_FG_QUICK) { 275 - *dipp = dip; 276 - return 1; 277 - } 278 - /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ 279 - aformat = dip->di_aformat; 280 - if ((XFS_DFORK_Q(dip) == 0) || 281 - (aformat == XFS_DINODE_FMT_LOCAL) || 282 - (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) { 283 - *dipp = dip; 284 - return 1; 285 - } 286 - return 1; 230 + xfs_bulkstat_one_fmt, ubused, stat); 287 231 } 288 232 289 233 #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) ··· 196 342 xfs_ino_t *lastinop, /* last inode returned */ 197 343 int *ubcountp, /* size of buffer/count returned */ 198 344 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 199 - void *private_data,/* private data for formatter */ 200 345 size_t statstruct_size, /* sizeof struct filling */ 201 346 char __user *ubuffer, /* buffer with inode stats */ 202 - int flags, /* defined in xfs_itable.h */ 203 347 int *done) /* 1 if there are more stats to get */ 204 348 { 205 349 xfs_agblock_t agbno=0;/* allocation group block number */ ··· 232 380 int ubelem; /* spaces used in user's buffer */ 233 381 int ubused; /* bytes used by formatter */ 234 382 xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ 235 - xfs_dinode_t *dip; /* ptr into bp for specific inode */ 236 383 237 384 /* 238 385 * Get the last inode value, see if there's nothing to do. 239 386 */ 240 387 ino = (xfs_ino_t)*lastinop; 241 388 lastino = ino; 242 - dip = NULL; 243 389 agno = XFS_INO_TO_AGNO(mp, ino); 244 390 agino = XFS_INO_TO_AGINO(mp, ino); 245 391 if (agno >= mp->m_sb.sb_agcount || ··· 462 612 irbp->ir_startino) + 463 613 ((chunkidx & nimask) >> 464 614 mp->m_sb.sb_inopblog); 465 - 466 - if (flags & (BULKSTAT_FG_QUICK | 467 - BULKSTAT_FG_INLINE)) { 468 - int offset; 469 - 470 - ino = XFS_AGINO_TO_INO(mp, agno, 471 - agino); 472 - bno = XFS_AGB_TO_DADDR(mp, agno, 473 - agbno); 474 - 475 - /* 476 - * Get the inode cluster buffer 477 - */ 478 - if (bp) 479 - xfs_buf_relse(bp); 480 - 481 - error = xfs_inotobp(mp, NULL, ino, &dip, 482 - &bp, &offset, 483 - XFS_IGET_BULKSTAT); 484 - 485 - if (!error) 486 - clustidx = offset / mp->m_sb.sb_inodesize; 487 - if (XFS_TEST_ERROR(error != 0, 488 - mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, 489 - XFS_RANDOM_BULKSTAT_READ_CHUNK)) { 490 - bp = NULL; 491 - ubleft = 0; 492 - rval = error; 493 - break; 494 - } 495 - } 496 615 } 497 616 ino = XFS_AGINO_TO_INO(mp, agno, agino); 498 617 bno = XFS_AGB_TO_DADDR(mp, agno, agbno); ··· 477 658 * when the chunk is used up. 478 659 */ 479 660 irbp->ir_freecount++; 480 - if (!xfs_bulkstat_use_dinode(mp, flags, bp, 481 - clustidx, &dip)) { 482 - lastino = ino; 483 - continue; 484 - } 485 - /* 486 - * If we need to do an iget, cannot hold bp. 487 - * Drop it, until starting the next cluster. 488 - */ 489 - if ((flags & BULKSTAT_FG_INLINE) && !dip) { 490 - if (bp) 491 - xfs_buf_relse(bp); 492 - bp = NULL; 493 - } 494 661 495 662 /* 496 663 * Get the inode and fill in a single buffer. 497 - * BULKSTAT_FG_QUICK uses dip to fill it in. 498 - * BULKSTAT_FG_IGET uses igets. 499 - * BULKSTAT_FG_INLINE uses dip if we have an 500 - * inline attr fork, else igets. 501 - * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. 502 - * This is also used to count inodes/blks, etc 503 - * in xfs_qm_quotacheck. 504 664 */ 505 665 ubused = statstruct_size; 506 - error = formatter(mp, ino, ubufp, 507 - ubleft, private_data, 508 - bno, &ubused, dip, &fmterror); 666 + error = formatter(mp, ino, ubufp, ubleft, 667 + &ubused, &fmterror); 509 668 if (fmterror == BULKSTAT_RV_NOTHING) { 510 669 if (error && error != ENOENT && 511 670 error != EINVAL) { ··· 575 778 */ 576 779 577 780 ino = (xfs_ino_t)*lastinop; 578 - error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 579 - NULL, 0, NULL, NULL, &res); 781 + error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res); 580 782 if (error) { 581 783 /* 582 784 * Special case way failed, do it the "long" way ··· 584 788 (*lastinop)--; 585 789 count = 1; 586 790 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, 587 - NULL, sizeof(xfs_bstat_t), buffer, 588 - BULKSTAT_FG_IGET, done)) 791 + sizeof(xfs_bstat_t), buffer, done)) 589 792 return error; 590 793 if (count == 0 || (xfs_ino_t)*lastinop != ino) 591 794 return error == EFSCORRUPTED ?
-17
fs/xfs/xfs_itable.h
··· 27 27 xfs_ino_t ino, 28 28 void __user *buffer, 29 29 int ubsize, 30 - void *private_data, 31 - xfs_daddr_t bno, 32 30 int *ubused, 33 - void *dip, 34 31 int *stat); 35 32 36 33 /* ··· 38 41 #define BULKSTAT_RV_GIVEUP 2 39 42 40 43 /* 41 - * Values for bulkstat flag argument. 42 - */ 43 - #define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */ 44 - #define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */ 45 - #define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */ 46 - 47 - /* 48 44 * Return stat information in bulk (by-inode) for the filesystem. 49 45 */ 50 46 int /* error status */ ··· 46 56 xfs_ino_t *lastino, /* last inode returned */ 47 57 int *count, /* size of buffer/count returned */ 48 58 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 49 - void *private_data, /* private data for formatter */ 50 59 size_t statstruct_size,/* sizeof struct that we're filling */ 51 60 char __user *ubuffer,/* buffer with inode stats */ 52 - int flags, /* flag to control access method */ 53 61 int *done); /* 1 if there are more stats to get */ 54 62 55 63 int ··· 70 82 void __user *buffer, 71 83 int ubsize, 72 84 bulkstat_one_fmt_pf formatter, 73 - xfs_daddr_t bno, 74 85 int *ubused, 75 - void *dibuff, 76 86 int *stat); 77 87 78 88 int ··· 79 93 xfs_ino_t ino, 80 94 void __user *buffer, 81 95 int ubsize, 82 - void *private_data, 83 - xfs_daddr_t bno, 84 96 int *ubused, 85 - void *dibuff, 86 97 int *stat); 87 98 88 99 typedef int (*inumbers_fmt_pf)(
+1 -1
fs/xfs/xfs_log_recover.c
··· 3198 3198 int error; 3199 3199 3200 3200 ino = XFS_AGINO_TO_INO(mp, agno, agino); 3201 - error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0); 3201 + error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 3202 3202 if (error) 3203 3203 goto fail; 3204 3204
+1 -1
fs/xfs/xfs_mount.c
··· 1300 1300 * Get and sanity-check the root inode. 1301 1301 * Save the pointer to it in the mount structure. 1302 1302 */ 1303 - error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0); 1303 + error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); 1304 1304 if (error) { 1305 1305 cmn_err(CE_WARN, "XFS: failed to read root inode"); 1306 1306 goto out_log_dealloc;
+2 -2
fs/xfs/xfs_rtalloc.c
··· 2277 2277 sbp = &mp->m_sb; 2278 2278 if (sbp->sb_rbmino == NULLFSINO) 2279 2279 return 0; 2280 - error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0); 2280 + error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip); 2281 2281 if (error) 2282 2282 return error; 2283 2283 ASSERT(mp->m_rbmip != NULL); 2284 2284 ASSERT(sbp->sb_rsumino != NULLFSINO); 2285 - error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0); 2285 + error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip); 2286 2286 if (error) { 2287 2287 IRELE(mp->m_rbmip); 2288 2288 return error;
+1 -1
fs/xfs/xfs_trans_inode.c
··· 62 62 { 63 63 int error; 64 64 65 - error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0); 65 + error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp); 66 66 if (!error && tp) 67 67 xfs_trans_ijoin(tp, *ipp, lock_flags); 68 68 return error;
+1 -1
fs/xfs/xfs_vnodeops.c
··· 1269 1269 if (error) 1270 1270 goto out; 1271 1271 1272 - error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0); 1272 + error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); 1273 1273 if (error) 1274 1274 goto out_free_name; 1275 1275
-1
include/linux/agp_backend.h
··· 79 79 u32 physical; 80 80 bool is_bound; 81 81 bool is_flushed; 82 - bool vmalloc_flag; 83 82 /* list of agp_memory mapped to the aperture */ 84 83 struct list_head mapped_list; 85 84 /* DMA-mapped addresses */
+2 -2
include/linux/backing-dev.h
··· 105 105 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 106 106 void bdi_unregister(struct backing_dev_info *bdi); 107 107 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 108 - void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, 109 - long nr_pages); 108 + void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); 109 + void bdi_start_background_writeback(struct backing_dev_info *bdi); 110 110 int bdi_writeback_task(struct bdi_writeback *wb); 111 111 int bdi_has_dirty_io(struct backing_dev_info *bdi); 112 112 void bdi_arm_supers_timer(void);
+9 -1
include/linux/compiler-gcc.h
··· 58 58 * naked functions because then mcount is called without stack and frame pointer 59 59 * being set up and there is no chance to restore the lr register to the value 60 60 * before mcount was called. 61 + * 62 + * The asm() bodies of naked functions often depend on standard calling conventions, 63 + * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce 64 + * this, so we must do so ourselves. See GCC PR44290. 61 65 */ 62 - #define __naked __attribute__((naked)) notrace 66 + #define __naked __attribute__((naked)) noinline __noclone notrace 63 67 64 68 #define __noreturn __attribute__((noreturn)) 65 69 ··· 89 85 #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) 90 86 #define gcc_header(x) _gcc_header(x) 91 87 #include gcc_header(__GNUC__) 88 + 89 + #if !defined(__noclone) 90 + #define __noclone /* not needed */ 91 + #endif
+4
include/linux/compiler-gcc4.h
··· 48 48 * unreleased. Really, we need to have autoconf for the kernel. 49 49 */ 50 50 #define unreachable() __builtin_unreachable() 51 + 52 + /* Mark a function definition as prohibited from being cloned. */ 53 + #define __noclone __attribute__((__noclone__)) 54 + 51 55 #endif 52 56 53 57 #endif
+1 -1
include/linux/drbd.h
··· 53 53 54 54 55 55 extern const char *drbd_buildtag(void); 56 - #define REL_VERSION "8.3.8rc2" 56 + #define REL_VERSION "8.3.8" 57 57 #define API_VERSION 88 58 58 #define PRO_VERSION_MIN 86 59 59 #define PRO_VERSION_MAX 94
+2 -2
include/linux/dynamic_debug.h
··· 40 40 const char *modname); 41 41 42 42 #if defined(CONFIG_DYNAMIC_DEBUG) 43 - extern int ddebug_remove_module(char *mod_name); 43 + extern int ddebug_remove_module(const char *mod_name); 44 44 45 45 #define __dynamic_dbg_enabled(dd) ({ \ 46 46 int __ret = 0; \ ··· 73 73 74 74 #else 75 75 76 - static inline int ddebug_remove_module(char *mod) 76 + static inline int ddebug_remove_module(const char *mod) 77 77 { 78 78 return 0; 79 79 }
+2 -2
include/linux/fb.h
··· 786 786 #define FBINFO_MISC_USEREVENT 0x10000 /* event request 787 787 from userspace */ 788 788 #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ 789 - #define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware 790 - inited framebuffer */ 791 789 792 790 /* A driver may set this flag to indicate that it does want a set_par to be 793 791 * called every time when fbcon_switch is executed. The advantage is that with ··· 799 801 */ 800 802 #define FBINFO_MISC_ALWAYS_SETPAR 0x40000 801 803 804 + /* where the fb is a firmware driver, and can be replaced with a proper one */ 805 + #define FBINFO_MISC_FIRMWARE 0x80000 802 806 /* 803 807 * Host and GPU endianness differ. 804 808 */
+15
include/linux/list.h
··· 544 544 &pos->member != (head); \ 545 545 pos = n, n = list_entry(n->member.prev, typeof(*n), member)) 546 546 547 + /** 548 + * list_safe_reset_next - reset a stale list_for_each_entry_safe loop 549 + * @pos: the loop cursor used in the list_for_each_entry_safe loop 550 + * @n: temporary storage used in list_for_each_entry_safe 551 + * @member: the name of the list_struct within the struct. 552 + * 553 + * list_safe_reset_next is not safe to use in general if the list may be 554 + * modified concurrently (eg. the lock is dropped in the loop body). An 555 + * exception to this is if the cursor element (pos) is pinned in the list, 556 + * and list_safe_reset_next is called after re-taking the lock and before 557 + * completing the current iteration of the loop body. 558 + */ 559 + #define list_safe_reset_next(pos, n, member) \ 560 + n = list_entry(pos->member.next, typeof(*pos), member) 561 + 547 562 /* 548 563 * Double linked lists with a single pointer list head. 549 564 * Mostly useful for hash tables where the two pointer list head is
+1
include/linux/pci_ids.h
··· 1261 1261 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 1262 1262 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 1263 1263 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 1264 + #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 1264 1265 1265 1266 #define PCI_VENDOR_ID_IMS 0x10e0 1266 1267 #define PCI_DEVICE_ID_IMS_TT128 0x9128
+1 -1
include/linux/sched.h
··· 139 139 extern unsigned long nr_running(void); 140 140 extern unsigned long nr_uninterruptible(void); 141 141 extern unsigned long nr_iowait(void); 142 - extern unsigned long nr_iowait_cpu(void); 142 + extern unsigned long nr_iowait_cpu(int cpu); 143 143 extern unsigned long this_cpu_load(void); 144 144 145 145
+12
init/main.c
··· 422 422 * gcc-3.4 accidentally inlines this function, so use noinline. 423 423 */ 424 424 425 + static __initdata DECLARE_COMPLETION(kthreadd_done); 426 + 425 427 static noinline void __init_refok rest_init(void) 426 428 __releases(kernel_lock) 427 429 { 428 430 int pid; 429 431 430 432 rcu_scheduler_starting(); 433 + /* 434 + * We need to spawn init first so that it obtains pid 1, however 435 + * the init task will end up wanting to create kthreads, which, if 436 + * we schedule it before we create kthreadd, will OOPS. 437 + */ 431 438 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); 432 439 numa_default_policy(); 433 440 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); 434 441 rcu_read_lock(); 435 442 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); 436 443 rcu_read_unlock(); 444 + complete(&kthreadd_done); 437 445 unlock_kernel(); 438 446 439 447 /* ··· 857 849 858 850 static int __init kernel_init(void * unused) 859 851 { 852 + /* 853 + * Wait until kthreadd is all set-up. 854 + */ 855 + wait_for_completion(&kthreadd_done); 860 856 lock_kernel(); 861 857 862 858 /*
+4 -13
kernel/futex.c
··· 429 429 static struct task_struct * futex_find_get_task(pid_t pid) 430 430 { 431 431 struct task_struct *p; 432 - const struct cred *cred = current_cred(), *pcred; 433 432 434 433 rcu_read_lock(); 435 434 p = find_task_by_vpid(pid); 436 - if (!p) { 437 - p = ERR_PTR(-ESRCH); 438 - } else { 439 - pcred = __task_cred(p); 440 - if (cred->euid != pcred->euid && 441 - cred->euid != pcred->uid) 442 - p = ERR_PTR(-ESRCH); 443 - else 444 - get_task_struct(p); 445 - } 435 + if (p) 436 + get_task_struct(p); 446 437 447 438 rcu_read_unlock(); 448 439 ··· 555 564 if (!pid) 556 565 return -ESRCH; 557 566 p = futex_find_get_task(pid); 558 - if (IS_ERR(p)) 559 - return PTR_ERR(p); 567 + if (!p) 568 + return -ESRCH; 560 569 561 570 /* 562 571 * We need to look at the task state flags to figure out,
+4 -3
kernel/kexec.c
··· 1089 1089 1090 1090 size_t crash_get_memory_size(void) 1091 1091 { 1092 - size_t size; 1092 + size_t size = 0; 1093 1093 mutex_lock(&kexec_mutex); 1094 - size = crashk_res.end - crashk_res.start + 1; 1094 + if (crashk_res.end != crashk_res.start) 1095 + size = crashk_res.end - crashk_res.start + 1; 1095 1096 mutex_unlock(&kexec_mutex); 1096 1097 return size; 1097 1098 } ··· 1135 1134 1136 1135 free_reserved_phys_range(end, crashk_res.end); 1137 1136 1138 - if (start == end) 1137 + if ((start == end) && (crashk_res.parent != NULL)) 1139 1138 release_resource(&crashk_res); 1140 1139 crashk_res.end = end - 1; 1141 1140
+15 -8
kernel/module.c
··· 2062 2062 #endif 2063 2063 } 2064 2064 2065 + static void dynamic_debug_remove(struct _ddebug *debug) 2066 + { 2067 + if (debug) 2068 + ddebug_remove_module(debug->modname); 2069 + } 2070 + 2065 2071 static void *module_alloc_update_bounds(unsigned long size) 2066 2072 { 2067 2073 void *ret = module_alloc(size); ··· 2130 2124 void *ptr = NULL; /* Stops spurious gcc warning */ 2131 2125 unsigned long symoffs, stroffs, *strmap; 2132 2126 void __percpu *percpu; 2127 + struct _ddebug *debug = NULL; 2128 + unsigned int num_debug = 0; 2133 2129 2134 2130 mm_segment_t old_fs; 2135 2131 ··· 2484 2476 kfree(strmap); 2485 2477 strmap = NULL; 2486 2478 2487 - if (!mod->taints) { 2488 - struct _ddebug *debug; 2489 - unsigned int num_debug; 2490 - 2479 + if (!mod->taints) 2491 2480 debug = section_objs(hdr, sechdrs, secstrings, "__verbose", 2492 2481 sizeof(*debug), &num_debug); 2493 - if (debug) 2494 - dynamic_debug_setup(debug, num_debug); 2495 - } 2496 2482 2497 2483 err = module_finalize(hdr, sechdrs, mod); 2498 2484 if (err < 0) ··· 2528 2526 goto unlock; 2529 2527 } 2530 2528 2529 + if (debug) 2530 + dynamic_debug_setup(debug, num_debug); 2531 + 2531 2532 /* Find duplicate symbols */ 2532 2533 err = verify_export_symbols(mod); 2533 2534 if (err < 0) 2534 - goto unlock; 2535 + goto ddebug; 2535 2536 2536 2537 list_add_rcu(&mod->list, &modules); 2537 2538 mutex_unlock(&module_mutex); ··· 2562 2557 mutex_lock(&module_mutex); 2563 2558 /* Unlink carefully: kallsyms could be walking list. */ 2564 2559 list_del_rcu(&mod->list); 2560 + ddebug: 2561 + dynamic_debug_remove(debug); 2565 2562 unlock: 2566 2563 mutex_unlock(&module_mutex); 2567 2564 synchronize_sched();
+2 -2
kernel/sched.c
··· 2873 2873 return sum; 2874 2874 } 2875 2875 2876 - unsigned long nr_iowait_cpu(void) 2876 + unsigned long nr_iowait_cpu(int cpu) 2877 2877 { 2878 - struct rq *this = this_rq(); 2878 + struct rq *this = cpu_rq(cpu); 2879 2879 return atomic_read(&this->nr_iowait); 2880 2880 } 2881 2881
+8 -8
kernel/time/tick-sched.c
··· 154 154 * Updates the per cpu time idle statistics counters 155 155 */ 156 156 static void 157 - update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) 157 + update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 158 158 { 159 159 ktime_t delta; 160 160 161 161 if (ts->idle_active) { 162 162 delta = ktime_sub(now, ts->idle_entrytime); 163 163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 164 - if (nr_iowait_cpu() > 0) 164 + if (nr_iowait_cpu(cpu) > 0) 165 165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 166 166 ts->idle_entrytime = now; 167 167 } ··· 175 175 { 176 176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 177 177 178 - update_ts_time_stats(ts, now, NULL); 178 + update_ts_time_stats(cpu, ts, now, NULL); 179 179 ts->idle_active = 0; 180 180 181 181 sched_clock_idle_wakeup_event(0); 182 182 } 183 183 184 - static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 184 + static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) 185 185 { 186 186 ktime_t now; 187 187 188 188 now = ktime_get(); 189 189 190 - update_ts_time_stats(ts, now, NULL); 190 + update_ts_time_stats(cpu, ts, now, NULL); 191 191 192 192 ts->idle_entrytime = now; 193 193 ts->idle_active = 1; ··· 216 216 if (!tick_nohz_enabled) 217 217 return -1; 218 218 219 - update_ts_time_stats(ts, ktime_get(), last_update_time); 219 + update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); 220 220 221 221 return ktime_to_us(ts->idle_sleeptime); 222 222 } ··· 242 242 if (!tick_nohz_enabled) 243 243 return -1; 244 244 245 - update_ts_time_stats(ts, ktime_get(), last_update_time); 245 + update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); 246 246 247 247 return ktime_to_us(ts->iowait_sleeptime); 248 248 } ··· 284 284 */ 285 285 ts->inidle = 1; 286 286 287 - now = tick_nohz_start_idle(ts); 287 + now = tick_nohz_start_idle(cpu, ts); 288 288 289 289 /* 290 290 * If this cpu is offline and it is the one which updates
+1 -1
lib/dynamic_debug.c
··· 692 692 * Called in response to a module being unloaded. Removes 693 693 * any ddebug_table's which point at the module. 694 694 */ 695 - int ddebug_remove_module(char *mod_name) 695 + int ddebug_remove_module(const char *mod_name) 696 696 { 697 697 struct ddebug_table *dt, *nextdt; 698 698 int ret = -ENOENT;
-1
lib/genalloc.c
··· 128 128 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 129 129 130 130 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 131 - end_bit -= nbits + 1; 132 131 133 132 spin_lock_irqsave(&chunk->lock, flags); 134 133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
+3 -1
mm/memcontrol.c
··· 1370 1370 1371 1371 static void memcg_oom_recover(struct mem_cgroup *mem) 1372 1372 { 1373 - if (mem->oom_kill_disable && atomic_read(&mem->oom_lock)) 1373 + if (atomic_read(&mem->oom_lock)) 1374 1374 memcg_wakeup_oom(mem); 1375 1375 } 1376 1376 ··· 3781 3781 return -EINVAL; 3782 3782 } 3783 3783 mem->oom_kill_disable = val; 3784 + if (!val) 3785 + memcg_oom_recover(mem); 3784 3786 cgroup_unlock(); 3785 3787 return 0; 3786 3788 }
+5 -4
mm/mempolicy.c
··· 2094 2094 NODEMASK_SCRATCH(scratch); 2095 2095 2096 2096 if (!scratch) 2097 - return; 2097 + goto put_mpol; 2098 2098 /* contextualize the tmpfs mount point mempolicy */ 2099 2099 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2100 2100 if (IS_ERR(new)) ··· 2103 2103 task_lock(current); 2104 2104 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 2105 2105 task_unlock(current); 2106 - mpol_put(mpol); /* drop our ref on sb mpol */ 2107 2106 if (ret) 2108 - goto put_free; 2107 + goto put_new; 2109 2108 2110 2109 /* Create pseudo-vma that contains just the policy */ 2111 2110 memset(&pvma, 0, sizeof(struct vm_area_struct)); 2112 2111 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2113 2112 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2114 2113 2115 - put_free: 2114 + put_new: 2116 2115 mpol_put(new); /* drop initial ref */ 2117 2116 free_scratch: 2118 2117 NODEMASK_SCRATCH_FREE(scratch); 2118 + put_mpol: 2119 + mpol_put(mpol); /* drop our incoming ref on sb mpol */ 2119 2120 } 2120 2121 } 2121 2122
+2 -3
mm/page-writeback.c
··· 597 597 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) 598 598 + global_page_state(NR_UNSTABLE_NFS)) 599 599 > background_thresh))) 600 - bdi_start_writeback(bdi, NULL, 0); 600 + bdi_start_background_writeback(bdi); 601 601 } 602 602 603 603 void set_page_dirty_balance(struct page *page, int page_mkwrite) ··· 705 705 * We want to write everything out, not just down to the dirty 706 706 * threshold 707 707 */ 708 - 709 708 if (bdi_has_dirty_io(&q->backing_dev_info)) 710 - bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages); 709 + bdi_start_writeback(&q->backing_dev_info, nr_pages); 711 710 } 712 711 713 712 /*
+1 -1
scripts/package/Makefile
··· 44 44 fi 45 45 $(MAKE) clean 46 46 $(PREV) ln -sf $(srctree) $(KERNELPATH) 47 - $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion > $(objtree)/.scmversion 47 + $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --scm-only > $(objtree)/.scmversion 48 48 $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/. 49 49 $(PREV) rm $(KERNELPATH) 50 50 rm -f $(objtree)/.scmversion
+133 -48
scripts/setlocalversion
··· 10 10 # 11 11 12 12 usage() { 13 - echo "Usage: $0 [srctree]" >&2 13 + echo "Usage: $0 [--scm-only] [srctree]" >&2 14 14 exit 1 15 15 } 16 16 17 - cd "${1:-.}" || usage 17 + scm_only=false 18 + srctree=. 19 + if test "$1" = "--scm-only"; then 20 + scm_only=true 21 + shift 22 + fi 23 + if test $# -gt 0; then 24 + srctree=$1 25 + shift 26 + fi 27 + if test $# -gt 0 -o ! -d "$srctree"; then 28 + usage 29 + fi 18 30 19 - # Check for git and a git repo. 20 - if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then 31 + scm_version() 32 + { 33 + local short=false 21 34 22 - # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore it, 23 - # because this version is defined in the top level Makefile. 24 - if [ -z "`git describe --exact-match 2>/dev/null`" ]; then 35 + cd "$srctree" 36 + if test -e .scmversion; then 37 + cat "$_" 38 + return 39 + fi 40 + if test "$1" = "--short"; then 41 + short=true 42 + fi 25 43 26 - # If we are past a tagged commit (like "v2.6.30-rc5-302-g72357d5"), 27 - # we pretty print it. 28 - if atag="`git describe 2>/dev/null`"; then 29 - echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' 44 + # Check for git and a git repo. 45 + if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then 30 46 31 - # If we don't have a tag at all we print -g{commitish}. 32 - else 33 - printf '%s%s' -g $head 47 + # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore 48 + # it, because this version is defined in the top level Makefile. 49 + if [ -z "`git describe --exact-match 2>/dev/null`" ]; then 50 + 51 + # If only the short version is requested, don't bother 52 + # running further git commands 53 + if $short; then 54 + echo "+" 55 + return 56 + fi 57 + # If we are past a tagged commit (like 58 + # "v2.6.30-rc5-302-g72357d5"), we pretty print it. 59 + if atag="`git describe 2>/dev/null`"; then 60 + echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' 61 + 62 + # If we don't have a tag at all we print -g{commitish}. 63 + else 64 + printf '%s%s' -g $head 65 + fi 34 66 fi 67 + 68 + # Is this git on svn? 69 + if git config --get svn-remote.svn.url >/dev/null; then 70 + printf -- '-svn%s' "`git svn find-rev $head`" 71 + fi 72 + 73 + # Update index only on r/w media 74 + [ -w . ] && git update-index --refresh --unmerged > /dev/null 75 + 76 + # Check for uncommitted changes 77 + if git diff-index --name-only HEAD | grep -v "^scripts/package" \ 78 + | read dummy; then 79 + printf '%s' -dirty 80 + fi 81 + 82 + # All done with git 83 + return 35 84 fi 36 85 37 - # Is this git on svn? 38 - if git config --get svn-remote.svn.url >/dev/null; then 39 - printf -- '-svn%s' "`git svn find-rev $head`" 86 + # Check for mercurial and a mercurial repo. 87 + if hgid=`hg id 2>/dev/null`; then 88 + tag=`printf '%s' "$hgid" | cut -d' ' -f2` 89 + 90 + # Do we have an untagged version? 91 + if [ -z "$tag" -o "$tag" = tip ]; then 92 + id=`printf '%s' "$hgid" | sed 's/[+ ].*//'` 93 + printf '%s%s' -hg "$id" 94 + fi 95 + 96 + # Are there uncommitted changes? 97 + # These are represented by + after the changeset id. 98 + case "$hgid" in 99 + *+|*+\ *) printf '%s' -dirty ;; 100 + esac 101 + 102 + # All done with mercurial 103 + return 40 104 fi 41 105 42 - # Update index only on r/w media 43 - [ -w . ] && git update-index --refresh --unmerged > /dev/null 106 + # Check for svn and a svn repo. 107 + if rev=`svn info 2>/dev/null | grep '^Last Changed Rev'`; then 108 + rev=`echo $rev | awk '{print $NF}'` 109 + printf -- '-svn%s' "$rev" 44 110 45 - # Check for uncommitted changes 46 - if git diff-index --name-only HEAD | grep -v "^scripts/package" \ 47 - | read dummy; then 48 - printf '%s' -dirty 111 + # All done with svn 112 + return 49 113 fi 114 + } 50 115 51 - # All done with git 116 + collect_files() 117 + { 118 + local file res 119 + 120 + for file; do 121 + case "$file" in 122 + *\~*) 123 + continue 124 + ;; 125 + esac 126 + if test -e "$file"; then 127 + res="$res$(cat "$file")" 128 + fi 129 + done 130 + echo "$res" 131 + } 132 + 133 + if $scm_only; then 134 + scm_version 52 135 exit 53 136 fi 54 137 55 - # Check for mercurial and a mercurial repo. 56 - if hgid=`hg id 2>/dev/null`; then 57 - tag=`printf '%s' "$hgid" | cut -d' ' -f2` 138 + if test -e include/config/auto.conf; then 139 + source "$_" 140 + else 141 + echo "Error: kernelrelease not valid - run 'make prepare' to update it" 142 + exit 1 143 + fi 58 144 59 - # Do we have an untagged version? 60 - if [ -z "$tag" -o "$tag" = tip ]; then 61 - id=`printf '%s' "$hgid" | sed 's/[+ ].*//'` 62 - printf '%s%s' -hg "$id" 145 + # localversion* files in the build and source directory 146 + res="$(collect_files localversion*)" 147 + if test ! "$srctree" -ef .; then 148 + res="$res$(collect_files "$srctree"/localversion*)" 149 + fi 150 + 151 + # CONFIG_LOCALVERSION and LOCALVERSION (if set) 152 + res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}" 153 + 154 + # scm version string if not at a tagged commit 155 + if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then 156 + # full scm version string 157 + res="$res$(scm_version)" 158 + else 159 + # apped a plus sign if the repository is not in a clean tagged 160 + # state and LOCALVERSION= is not specified 161 + if test "${LOCALVERSION+set}" != "set"; then 162 + scm=$(scm_version --short) 163 + res="$res${scm:++}" 63 164 fi 64 - 65 - # Are there uncommitted changes? 66 - # These are represented by + after the changeset id. 67 - case "$hgid" in 68 - *+|*+\ *) printf '%s' -dirty ;; 69 - esac 70 - 71 - # All done with mercurial 72 - exit 73 165 fi 74 166 75 - # Check for svn and a svn repo. 76 - if rev=`svn info 2>/dev/null | grep '^Last Changed Rev'`; then 77 - rev=`echo $rev | awk '{print $NF}'` 78 - printf -- '-svn%s' "$rev" 79 - 80 - # All done with svn 81 - exit 82 - fi 167 + echo "$res"
+10 -1
tools/perf/util/thread.c
··· 7 7 #include "util.h" 8 8 #include "debug.h" 9 9 10 + /* Skip "." and ".." directories */ 11 + static int filter(const struct dirent *dir) 12 + { 13 + if (dir->d_name[0] == '.') 14 + return 0; 15 + else 16 + return 1; 17 + } 18 + 10 19 int find_all_tid(int pid, pid_t ** all_tid) 11 20 { 12 21 char name[256]; ··· 25 16 int i; 26 17 27 18 sprintf(name, "/proc/%d/task", pid); 28 - items = scandir(name, &namelist, NULL, NULL); 19 + items = scandir(name, &namelist, filter, NULL); 29 20 if (items <= 0) 30 21 return -ENOENT; 31 22 *all_tid = malloc(sizeof(pid_t) * items);