Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'merge'

+3243 -2010
+5
.gitignore
··· 30 30 include/linux/autoconf.h 31 31 include/linux/compile.h 32 32 include/linux/version.h 33 + include/linux/utsrelease.h 33 34 34 35 # stgit generated dirs 35 36 patches-* 37 + 38 + # quilt's files 39 + patches 40 + series
+4 -1
Documentation/cpu-freq/user-guide.txt
··· 153 153 that some governors won't load - they only 154 154 work on some specific architectures or 155 155 processors. 156 - scaling_min_freq and 156 + scaling_min_freq and 157 157 scaling_max_freq show the current "policy limits" (in 158 158 kHz). By echoing new values into these 159 159 files, you can change these limits. 160 + NOTE: when setting a policy you need to 161 + first set scaling_max_freq, then 162 + scaling_min_freq. 160 163 161 164 162 165 If you have selected the "userspace" governor which allows you to
-2
Documentation/infiniband/ipoib.txt
··· 51 51 52 52 References 53 53 54 - IETF IP over InfiniBand (ipoib) Working Group 55 - http://ietf.org/html.charters/ipoib-charter.html 56 54 Transmission of IP over InfiniBand (IPoIB) (RFC 4391) 57 55 http://ietf.org/rfc/rfc4391.txt 58 56 IP over InfiniBand (IPoIB) Architecture (RFC 4392)
+2 -3
Documentation/sysctl/kernel.txt
··· 211 211 212 212 0: try to continue operation 213 213 214 - 1: delay a few seconds (to give klogd time to record the oops output) and 215 - then panic. If the `panic' sysctl is also non-zero then the machine will 216 - be rebooted. 214 + 1: panic immediatly. If the `panic' sysctl is also non-zero then the 215 + machine will be rebooted. 217 216 218 217 ============================================================== 219 218
+1 -1
Documentation/usb/proc_usb_info.txt
··· 59 59 would issue more ioctls to the device to communicate to it using 60 60 control, bulk, or other kinds of USB transfers. The IOCTLs are 61 61 listed in the <linux/usbdevice_fs.h> file, and at this writing the 62 - source code (linux/drivers/usb/devio.c) is the primary reference 62 + source code (linux/drivers/usb/core/devio.c) is the primary reference 63 63 for how to access devices through those files. 64 64 65 65 Note that since by default these BBB/DDD files are writable only by
+1 -2
Documentation/usb/usb-help.txt
··· 5 5 Documentation/usb/*, see the following: 6 6 7 7 Linux-USB project: http://www.linux-usb.org 8 - mirrors at http://www.suse.cz/development/linux-usb/ 9 - and http://usb.in.tum.de/linux-usb/ 8 + mirrors at http://usb.in.tum.de/linux-usb/ 10 9 and http://it.linux-usb.org 11 10 Linux USB Guide: http://linux-usb.sourceforge.net 12 11 Linux-USB device overview (working devices and drivers):
+21
MAINTAINERS
··· 214 214 T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git 215 215 S: Maintained 216 216 217 + ACPI PCI HOTPLUG DRIVER 218 + P: Kristen Carlson Accardi 219 + M: kristen.c.accardi@intel.com 220 + L: pcihpd-discuss@lists.sourceforge.net 221 + S: Maintained 222 + 217 223 AD1816 SOUND DRIVER 218 224 P: Thorsten Knabe 219 225 M: Thorsten Knabe <linux@thorsten-knabe.de> ··· 297 291 L: info-linux@geode.amd.com 298 292 W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html 299 293 S: Supported 294 + 295 + AOA (Apple Onboard Audio) ALSA DRIVER 296 + P: Johannes Berg 297 + M: johannes@sipsolutions.net 298 + L: linuxppc-dev@ozlabs.org 299 + L: alsa-devel@alsa-project.org 300 + S: Maintained 300 301 301 302 APM DRIVER 302 303 P: Stephen Rothwell ··· 2646 2633 P: David Brownell 2647 2634 M: dbrownell@users.sourceforge.net 2648 2635 L: spi-devel-general@lists.sourceforge.net 2636 + S: Maintained 2637 + 2638 + STABLE BRANCH: 2639 + P: Greg Kroah-Hartman 2640 + M: greg@kroah.com 2641 + P: Chris Wright 2642 + M: chrisw@sous-sol.org 2643 + L: stable@kernel.org 2649 2644 S: Maintained 2650 2645 2651 2646 TPM DEVICE DRIVER
+4 -3
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 18 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME=Crazed Snow-Weasel 6 6 7 7 # *DOCUMENTATION* ··· 310 310 CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ 311 311 -fno-strict-aliasing -fno-common 312 312 # Force gcc to behave correct even for buggy distributions 313 - CFLAGS += $(call cc-option, -fno-stack-protector-all \ 314 - -fno-stack-protector) 313 + CFLAGS += $(call cc-option, -fno-stack-protector) 314 + 315 315 AFLAGS := -D__ASSEMBLY__ 316 316 317 317 # Read KERNELRELEASE from include/config/kernel.release (if it exists) ··· 368 368 369 369 no-dot-config-targets := clean mrproper distclean \ 370 370 cscope TAGS tags help %docs check% \ 371 + include/linux/version.h headers_% \ 371 372 kernelrelease kernelversion 372 373 373 374 config-targets := 0
+2 -1
arch/arm/common/gic.c
··· 95 95 } 96 96 #endif 97 97 98 - static struct irqchip gic_chip = { 98 + static struct irq_chip gic_chip = { 99 + .name = "GIC", 99 100 .ack = gic_ack_irq, 100 101 .mask = gic_mask_irq, 101 102 .unmask = gic_unmask_irq,
+10 -5
arch/arm/common/locomo.c
··· 204 204 locomo_writel(r, mapbase + LOCOMO_ICR); 205 205 } 206 206 207 - static struct irqchip locomo_chip = { 207 + static struct irq_chip locomo_chip = { 208 + .name = "LOCOMO", 208 209 .ack = locomo_ack_irq, 209 210 .mask = locomo_mask_irq, 210 211 .unmask = locomo_unmask_irq, ··· 250 249 locomo_writel(r, mapbase + LOCOMO_KEYBOARD + LOCOMO_KIC); 251 250 } 252 251 253 - static struct irqchip locomo_key_chip = { 252 + static struct irq_chip locomo_key_chip = { 253 + .name = "LOCOMO-key", 254 254 .ack = locomo_key_ack_irq, 255 255 .mask = locomo_key_mask_irq, 256 256 .unmask = locomo_key_unmask_irq, ··· 314 312 locomo_writel(r, mapbase + LOCOMO_GIE); 315 313 } 316 314 317 - static struct irqchip locomo_gpio_chip = { 315 + static struct irq_chip locomo_gpio_chip = { 316 + .name = "LOCOMO-gpio", 318 317 .ack = locomo_gpio_ack_irq, 319 318 .mask = locomo_gpio_mask_irq, 320 319 .unmask = locomo_gpio_unmask_irq, ··· 360 357 locomo_writel(r, mapbase + LOCOMO_LTINT); 361 358 } 362 359 363 - static struct irqchip locomo_lt_chip = { 360 + static struct irq_chip locomo_lt_chip = { 361 + .name = "LOCOMO-lt", 364 362 .ack = locomo_lt_ack_irq, 365 363 .mask = locomo_lt_mask_irq, 366 364 .unmask = locomo_lt_unmask_irq, ··· 422 418 locomo_writel(r, mapbase + LOCOMO_SPIIE); 423 419 } 424 420 425 - static struct irqchip locomo_spi_chip = { 421 + static struct irq_chip locomo_spi_chip = { 422 + .name = "LOCOMO-spi", 426 423 .ack = locomo_spi_ack_irq, 427 424 .mask = locomo_spi_mask_irq, 428 425 .unmask = locomo_spi_unmask_irq,
+4 -2
arch/arm/common/sa1111.c
··· 272 272 return 0; 273 273 } 274 274 275 - static struct irqchip sa1111_low_chip = { 275 + static struct irq_chip sa1111_low_chip = { 276 + .name = "SA1111-l", 276 277 .ack = sa1111_ack_irq, 277 278 .mask = sa1111_mask_lowirq, 278 279 .unmask = sa1111_unmask_lowirq, ··· 369 368 return 0; 370 369 } 371 370 372 - static struct irqchip sa1111_high_chip = { 371 + static struct irq_chip sa1111_high_chip = { 372 + .name = "SA1111-h", 373 373 .ack = sa1111_ack_irq, 374 374 .mask = sa1111_mask_highirq, 375 375 .unmask = sa1111_unmask_highirq,
+2 -1
arch/arm/common/vic.c
··· 39 39 writel(1 << irq, base + VIC_INT_ENABLE); 40 40 } 41 41 42 - static struct irqchip vic_chip = { 42 + static struct irq_chip vic_chip = { 43 + .name = "VIC", 43 44 .ack = vic_mask_irq, 44 45 .mask = vic_mask_irq, 45 46 .unmask = vic_unmask_irq,
+2 -1
arch/arm/kernel/ecard.c
··· 470 470 } 471 471 } 472 472 473 - static struct irqchip ecard_chip = { 473 + static struct irq_chip ecard_chip = { 474 + .name = "ECARD", 474 475 .ack = ecard_irq_mask, 475 476 .mask = ecard_irq_mask, 476 477 .unmask = ecard_irq_unmask,
+1
arch/arm/kernel/irq.c
··· 77 77 seq_printf(p, "%3d: ", i); 78 78 for_each_present_cpu(cpu) 79 79 seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); 80 + seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); 80 81 seq_printf(p, " %s", action->name); 81 82 for (action = action->next; action; action = action->next) 82 83 seq_printf(p, ", %s", action->name);
+2 -1
arch/arm/mach-at91rm9200/gpio.c
··· 327 327 return (type == IRQT_BOTHEDGE) ? 0 : -EINVAL; 328 328 } 329 329 330 - static struct irqchip gpio_irqchip = { 330 + static struct irq_chip gpio_irqchip = { 331 + .name = "GPIO", 331 332 .mask = gpio_irq_mask, 332 333 .unmask = gpio_irq_unmask, 333 334 .set_type = gpio_irq_type,
+2 -1
arch/arm/mach-at91rm9200/irq.c
··· 114 114 #define at91_aic_set_wake NULL 115 115 #endif 116 116 117 - static struct irqchip at91_aic_chip = { 117 + static struct irq_chip at91_aic_chip = { 118 + .name = "AIC", 118 119 .ack = at91_aic_mask_irq, 119 120 .mask = at91_aic_mask_irq, 120 121 .unmask = at91_aic_unmask_irq,
+4 -2
arch/arm/mach-imx/irq.c
··· 204 204 imx_gpio_handler(mask, irq, desc, regs); 205 205 } 206 206 207 - static struct irqchip imx_internal_chip = { 207 + static struct irq_chip imx_internal_chip = { 208 + .name = "MPU", 208 209 .ack = imx_mask_irq, 209 210 .mask = imx_mask_irq, 210 211 .unmask = imx_unmask_irq, 211 212 }; 212 213 213 - static struct irqchip imx_gpio_chip = { 214 + static struct irq_chip imx_gpio_chip = { 215 + .name = "GPIO", 214 216 .ack = imx_gpio_ack_irq, 215 217 .mask = imx_gpio_mask_irq, 216 218 .unmask = imx_gpio_unmask_irq,
+2 -1
arch/arm/mach-integrator/integrator_ap.c
··· 161 161 writel(1 << irq, VA_IC_BASE + IRQ_ENABLE_SET); 162 162 } 163 163 164 - static struct irqchip sc_chip = { 164 + static struct irq_chip sc_chip = { 165 + .name = "SC", 165 166 .ack = sc_mask_irq, 166 167 .mask = sc_mask_irq, 167 168 .unmask = sc_unmask_irq,
+6 -3
arch/arm/mach-integrator/integrator_cp.c
··· 156 156 cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_SET); 157 157 } 158 158 159 - static struct irqchip cic_chip = { 159 + static struct irq_chip cic_chip = { 160 + .name = "CIC", 160 161 .ack = cic_mask_irq, 161 162 .mask = cic_mask_irq, 162 163 .unmask = cic_unmask_irq, ··· 175 174 pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_SET); 176 175 } 177 176 178 - static struct irqchip pic_chip = { 177 + static struct irq_chip pic_chip = { 178 + .name = "PIC", 179 179 .ack = pic_mask_irq, 180 180 .mask = pic_mask_irq, 181 181 .unmask = pic_unmask_irq, ··· 194 192 sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_SET); 195 193 } 196 194 197 - static struct irqchip sic_chip = { 195 + static struct irq_chip sic_chip = { 196 + .name = "SIC", 198 197 .ack = sic_mask_irq, 199 198 .mask = sic_mask_irq, 200 199 .unmask = sic_unmask_irq,
+2 -1
arch/arm/mach-iop3xx/iop321-irq.c
··· 52 52 intctl_write(iop321_mask); 53 53 } 54 54 55 - struct irqchip ext_chip = { 55 + struct irq_chip ext_chip = { 56 + .name = "IOP", 56 57 .ack = iop321_irq_mask, 57 58 .mask = iop321_irq_mask, 58 59 .unmask = iop321_irq_unmask,
+4 -2
arch/arm/mach-iop3xx/iop331-irq.c
··· 77 77 intctl_write1(iop331_mask1); 78 78 } 79 79 80 - struct irqchip iop331_irqchip1 = { 80 + struct irq_chip iop331_irqchip1 = { 81 + .name = "IOP-1", 81 82 .ack = iop331_irq_mask1, 82 83 .mask = iop331_irq_mask1, 83 84 .unmask = iop331_irq_unmask1, 84 85 }; 85 86 86 - struct irqchip iop331_irqchip2 = { 87 + struct irq_chip iop331_irqchip2 = { 88 + .name = "IOP-2", 87 89 .ack = iop331_irq_mask2, 88 90 .mask = iop331_irq_mask2, 89 91 .unmask = iop331_irq_unmask2,
+2 -1
arch/arm/mach-lh7a40x/arch-kev7a400.c
··· 63 63 CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask; 64 64 } 65 65 66 - static struct irqchip kev7a400_cpld_chip = { 66 + static struct irq_chip kev7a400_cpld_chip = { 67 + .name = "CPLD", 67 68 .ack = kev7a400_ack_cpld_irq, 68 69 .mask = kev7a400_mask_cpld_irq, 69 70 .unmask = kev7a400_unmask_cpld_irq,
+2 -1
arch/arm/mach-lh7a40x/arch-lpd7a40x.c
··· 200 200 } 201 201 } 202 202 203 - static struct irqchip lpd7a40x_cpld_chip = { 203 + static struct irq_chip lpd7a40x_cpld_chip = { 204 + .name = "CPLD", 204 205 .ack = lh7a40x_ack_cpld_irq, 205 206 .mask = lh7a40x_mask_cpld_irq, 206 207 .unmask = lh7a40x_unmask_cpld_irq,
+2 -1
arch/arm/mach-lh7a40x/irq-kev7a400.c
··· 43 43 } 44 44 45 45 static struct 46 - irqchip lh7a400_cpld_chip = { 46 + irq_chip lh7a400_cpld_chip = { 47 + .name = "CPLD", 47 48 .ack = lh7a400_ack_cpld_irq, 48 49 .mask = lh7a400_mask_cpld_irq, 49 50 .unmask = lh7a400_unmask_cpld_irq,
+4 -2
arch/arm/mach-lh7a40x/irq-lh7a400.c
··· 38 38 INTC_INTENC = (1 << irq); 39 39 } 40 40 41 - static struct irqchip lh7a400_internal_chip = { 41 + static struct irq_chip lh7a400_internal_chip = { 42 + .name = "MPU", 42 43 .ack = lh7a400_mask_irq, /* Level triggering -> mask is ack */ 43 44 .mask = lh7a400_mask_irq, 44 45 .unmask = lh7a400_unmask_irq, 45 46 }; 46 47 47 - static struct irqchip lh7a400_gpio_chip = { 48 + static struct irq_chip lh7a400_gpio_chip = { 49 + .name = "GPIO", 48 50 .ack = lh7a400_ack_gpio_irq, 49 51 .mask = lh7a400_mask_irq, 50 52 .unmask = lh7a400_unmask_irq,
+8 -4
arch/arm/mach-lh7a40x/irq-lh7a404.c
··· 76 76 VIC2_INTENCLR = (1 << irq); 77 77 } 78 78 79 - static struct irqchip lh7a404_vic1_chip = { 79 + static struct irq_chip lh7a404_vic1_chip = { 80 + .name = "VIC1", 80 81 .ack = lh7a404_vic1_mask_irq, /* Because level-triggered */ 81 82 .mask = lh7a404_vic1_mask_irq, 82 83 .unmask = lh7a404_vic1_unmask_irq, 83 84 }; 84 85 85 - static struct irqchip lh7a404_vic2_chip = { 86 + static struct irq_chip lh7a404_vic2_chip = { 87 + .name = "VIC2", 86 88 .ack = lh7a404_vic2_mask_irq, /* Because level-triggered */ 87 89 .mask = lh7a404_vic2_mask_irq, 88 90 .unmask = lh7a404_vic2_unmask_irq, 89 91 }; 90 92 91 - static struct irqchip lh7a404_gpio_vic1_chip = { 93 + static struct irq_chip lh7a404_gpio_vic1_chip = { 94 + .name = "GPIO-VIC1", 92 95 .ack = lh7a404_vic1_ack_gpio_irq, 93 96 .mask = lh7a404_vic1_mask_irq, 94 97 .unmask = lh7a404_vic1_unmask_irq, 95 98 }; 96 99 97 - static struct irqchip lh7a404_gpio_vic2_chip = { 100 + static struct irq_chip lh7a404_gpio_vic2_chip = { 101 + .name = "GPIO-VIC2", 98 102 .ack = lh7a404_vic2_ack_gpio_irq, 99 103 .mask = lh7a404_vic2_mask_irq, 100 104 .unmask = lh7a404_vic2_unmask_irq,
+2 -1
arch/arm/mach-lh7a40x/irq-lpd7a40x.c
··· 50 50 } 51 51 } 52 52 53 - static struct irqchip lh7a40x_cpld_chip = { 53 + static struct irq_chip lh7a40x_cpld_chip = { 54 + .name = "CPLD", 54 55 .ack = lh7a40x_ack_cpld_irq, 55 56 .mask = lh7a40x_mask_cpld_irq, 56 57 .unmask = lh7a40x_unmask_cpld_irq,
+4 -2
arch/arm/mach-omap1/fpga.c
··· 106 106 } 107 107 } 108 108 109 - static struct irqchip omap_fpga_irq_ack = { 109 + static struct irq_chip omap_fpga_irq_ack = { 110 + .name = "FPGA-ack", 110 111 .ack = fpga_mask_ack_irq, 111 112 .mask = fpga_mask_irq, 112 113 .unmask = fpga_unmask_irq, 113 114 }; 114 115 115 116 116 - static struct irqchip omap_fpga_irq = { 117 + static struct irq_chip omap_fpga_irq = { 118 + .name = "FPGA", 117 119 .ack = fpga_ack_irq, 118 120 .mask = fpga_mask_irq, 119 121 .unmask = fpga_unmask_irq,
+2 -1
arch/arm/mach-omap1/irq.c
··· 168 168 }; 169 169 #endif 170 170 171 - static struct irqchip omap_irq_chip = { 171 + static struct irq_chip omap_irq_chip = { 172 + .name = "MPU", 172 173 .ack = omap_mask_ack_irq, 173 174 .mask = omap_mask_irq, 174 175 .unmask = omap_unmask_irq,
+2 -1
arch/arm/mach-omap2/irq.c
··· 94 94 omap_ack_irq(irq); 95 95 } 96 96 97 - static struct irqchip omap_irq_chip = { 97 + static struct irq_chip omap_irq_chip = { 98 + .name = "INTC", 98 99 .ack = omap_mask_ack_irq, 99 100 .mask = omap_mask_irq, 100 101 .unmask = omap_unmask_irq,
+8 -4
arch/arm/mach-pxa/irq.c
··· 39 39 ICMR |= (1 << (irq + PXA_IRQ_SKIP)); 40 40 } 41 41 42 - static struct irqchip pxa_internal_chip_low = { 42 + static struct irq_chip pxa_internal_chip_low = { 43 + .name = "SC", 43 44 .ack = pxa_mask_low_irq, 44 45 .mask = pxa_mask_low_irq, 45 46 .unmask = pxa_unmask_low_irq, ··· 62 61 ICMR2 |= (1 << (irq - 32 + PXA_IRQ_SKIP)); 63 62 } 64 63 65 - static struct irqchip pxa_internal_chip_high = { 64 + static struct irq_chip pxa_internal_chip_high = { 65 + .name = "SC-hi", 66 66 .ack = pxa_mask_high_irq, 67 67 .mask = pxa_mask_high_irq, 68 68 .unmask = pxa_unmask_high_irq, ··· 131 129 GEDR0 = (1 << (irq - IRQ_GPIO0)); 132 130 } 133 131 134 - static struct irqchip pxa_low_gpio_chip = { 132 + static struct irq_chip pxa_low_gpio_chip = { 133 + .name = "GPIO-l", 135 134 .ack = pxa_ack_low_gpio, 136 135 .mask = pxa_mask_low_irq, 137 136 .unmask = pxa_unmask_low_irq, ··· 240 237 GFER(gpio) = GPIO_IRQ_falling_edge[idx] & GPIO_IRQ_mask[idx]; 241 238 } 242 239 243 - static struct irqchip pxa_muxed_gpio_chip = { 240 + static struct irq_chip pxa_muxed_gpio_chip = { 241 + .name = "GPIO", 244 242 .ack = pxa_ack_muxed_gpio, 245 243 .mask = pxa_mask_muxed_gpio, 246 244 .unmask = pxa_unmask_muxed_gpio,
+2 -1
arch/arm/mach-pxa/lpd270.c
··· 68 68 __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK); 69 69 } 70 70 71 - static struct irqchip lpd270_irq_chip = { 71 + static struct irq_chip lpd270_irq_chip = { 72 + .name = "CPLD", 72 73 .ack = lpd270_mask_irq, 73 74 .mask = lpd270_mask_irq, 74 75 .unmask = lpd270_unmask_irq,
+2 -1
arch/arm/mach-pxa/lubbock.c
··· 78 78 LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq)); 79 79 } 80 80 81 - static struct irqchip lubbock_irq_chip = { 81 + static struct irq_chip lubbock_irq_chip = { 82 + .name = "FPGA", 82 83 .ack = lubbock_mask_irq, 83 84 .mask = lubbock_mask_irq, 84 85 .unmask = lubbock_unmask_irq,
+2 -1
arch/arm/mach-pxa/mainstone.c
··· 64 64 MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq)); 65 65 } 66 66 67 - static struct irqchip mainstone_irq_chip = { 67 + static struct irq_chip mainstone_irq_chip = { 68 + .name = "FPGA", 68 69 .ack = mainstone_mask_irq, 69 70 .mask = mainstone_mask_irq, 70 71 .unmask = mainstone_unmask_irq,
+6 -3
arch/arm/mach-sa1100/irq.c
··· 95 95 return 0; 96 96 } 97 97 98 - static struct irqchip sa1100_low_gpio_chip = { 98 + static struct irq_chip sa1100_low_gpio_chip = { 99 + .name = "GPIO-l", 99 100 .ack = sa1100_low_gpio_ack, 100 101 .mask = sa1100_low_gpio_mask, 101 102 .unmask = sa1100_low_gpio_unmask, ··· 179 178 return 0; 180 179 } 181 180 182 - static struct irqchip sa1100_high_gpio_chip = { 181 + static struct irq_chip sa1100_high_gpio_chip = { 182 + .name = "GPIO-h", 183 183 .ack = sa1100_high_gpio_ack, 184 184 .mask = sa1100_high_gpio_mask, 185 185 .unmask = sa1100_high_gpio_unmask, ··· 217 215 return -EINVAL; 218 216 } 219 217 220 - static struct irqchip sa1100_normal_chip = { 218 + static struct irq_chip sa1100_normal_chip = { 219 + .name = "SC", 221 220 .ack = sa1100_mask_irq, 222 221 .mask = sa1100_mask_irq, 223 222 .unmask = sa1100_unmask_irq,
+2 -1
arch/arm/mach-shark/irq.c
··· 69 69 70 70 static struct irqaction cascade; 71 71 72 - static struct irqchip fb_chip = { 72 + static struct irq_chip fb_chip = { 73 + .name = "XT-PIC", 73 74 .ack = shark_ack_8259A_irq, 74 75 .mask = shark_disable_8259A_irq, 75 76 .unmask = shark_enable_8259A_irq,
+2 -1
arch/arm/mach-versatile/core.c
··· 69 69 writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_SET); 70 70 } 71 71 72 - static struct irqchip sic_chip = { 72 + static struct irq_chip sic_chip = { 73 + .name = "SIC", 73 74 .ack = sic_mask_irq, 74 75 .mask = sic_mask_irq, 75 76 .unmask = sic_unmask_irq,
+5 -3
arch/arm/plat-omap/gpio.c
··· 944 944 _set_gpio_irqenable(bank, gpio, 1); 945 945 } 946 946 947 - static struct irqchip gpio_irq_chip = { 947 + static struct irq_chip gpio_irq_chip = { 948 + .name = "GPIO", 948 949 .ack = gpio_ack_irq, 949 950 .mask = gpio_mask_irq, 950 951 .unmask = gpio_unmask_irq, ··· 953 952 .set_wake = gpio_wake_enable, 954 953 }; 955 954 956 - static struct irqchip mpuio_irq_chip = { 955 + static struct irq_chip mpuio_irq_chip = { 956 + .name = "MPUIO", 957 957 .ack = mpuio_ack_irq, 958 958 .mask = mpuio_mask_irq, 959 - .unmask = mpuio_unmask_irq 959 + .unmask = mpuio_unmask_irq 960 960 }; 961 961 962 962 static int initialized;
+2 -1
arch/i386/kernel/cpu/cpufreq/Kconfig
··· 96 96 97 97 config X86_GX_SUSPMOD 98 98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" 99 + depends on PCI 99 100 help 100 101 This add the CPUFreq driver for NatSemi Geode processors which 101 102 support suspend modulation. ··· 203 202 config X86_LONGHAUL 204 203 tristate "VIA Cyrix III Longhaul" 205 204 select CPU_FREQ_TABLE 206 - depends on BROKEN 205 + depends on ACPI_PROCESSOR 207 206 help 208 207 This adds the CPUFreq driver for VIA Samuel/CyrixIII, 209 208 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
+1 -2
arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 384 384 } 385 385 386 386 /* Do initialization in ACPI core */ 387 - acpi_processor_preregister_performance(acpi_perf_data); 388 - return 0; 387 + return acpi_processor_preregister_performance(acpi_perf_data); 389 388 } 390 389 391 390 static int
+129 -92
arch/i386/kernel/cpu/cpufreq/longhaul.c
··· 29 29 #include <linux/cpufreq.h> 30 30 #include <linux/slab.h> 31 31 #include <linux/string.h> 32 - #include <linux/pci.h> 33 32 34 33 #include <asm/msr.h> 35 34 #include <asm/timex.h> 36 35 #include <asm/io.h> 36 + #include <asm/acpi.h> 37 + #include <linux/acpi.h> 38 + #include <acpi/processor.h> 37 39 38 40 #include "longhaul.h" 39 41 ··· 58 56 static unsigned int minmult, maxmult; 59 57 static int can_scale_voltage; 60 58 static int vrmrev; 59 + static struct acpi_processor *pr = NULL; 60 + static struct acpi_processor_cx *cx = NULL; 61 61 62 62 /* Module parameters */ 63 63 static int dont_scale_voltage; ··· 122 118 return eblcr_table[invalue]; 123 119 } 124 120 121 + /* For processor with BCR2 MSR */ 125 122 126 - static void do_powersaver(union msr_longhaul *longhaul, 127 - unsigned int clock_ratio_index) 123 + static void do_longhaul1(int cx_address, unsigned int clock_ratio_index) 128 124 { 129 - struct pci_dev *dev; 130 - unsigned long flags; 131 - unsigned int tmp_mask; 132 - int version; 133 - int i; 134 - u16 pci_cmd; 135 - u16 cmd_state[64]; 125 + union msr_bcr2 bcr2; 126 + u32 t; 136 127 137 - switch (cpu_model) { 138 - case CPU_EZRA_T: 139 - version = 3; 140 - break; 141 - case CPU_NEHEMIAH: 142 - version = 0xf; 143 - break; 144 - default: 145 - return; 146 - } 128 + rdmsrl(MSR_VIA_BCR2, bcr2.val); 129 + /* Enable software clock multiplier */ 130 + bcr2.bits.ESOFTBF = 1; 131 + bcr2.bits.CLOCKMUL = clock_ratio_index; 147 132 148 - rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); 149 - longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf; 150 - longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; 151 - longhaul->bits.EnableSoftBusRatio = 1; 152 - longhaul->bits.RevisionKey = 0; 153 - 154 - preempt_disable(); 155 - local_irq_save(flags); 156 - 157 - /* 158 - * get current pci bus master state for all devices 159 - * and clear bus master bit 160 - */ 161 - dev = NULL; 162 - i = 0; 163 - do { 164 - dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 165 - if (dev != NULL) { 166 - pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); 167 - cmd_state[i++] = pci_cmd; 168 - pci_cmd &= ~PCI_COMMAND_MASTER; 169 - pci_write_config_word(dev, PCI_COMMAND, pci_cmd); 170 - } 171 - } while (dev != NULL); 172 - 173 - tmp_mask=inb(0x21); /* works on C3. save mask. */ 174 - outb(0xFE,0x21); /* TMR0 only */ 175 - outb(0xFF,0x80); /* delay */ 176 - 133 + /* Sync to timer tick */ 177 134 safe_halt(); 178 - wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); 179 - halt(); 135 + ACPI_FLUSH_CPU_CACHE(); 136 + /* Change frequency on next halt or sleep */ 137 + wrmsrl(MSR_VIA_BCR2, bcr2.val); 138 + /* Invoke C3 */ 139 + inb(cx_address); 140 + /* Dummy op - must do something useless after P_LVL3 read */ 141 + t = inl(acpi_fadt.xpm_tmr_blk.address); 180 142 143 + /* Disable software clock multiplier */ 181 144 local_irq_disable(); 145 + rdmsrl(MSR_VIA_BCR2, bcr2.val); 146 + bcr2.bits.ESOFTBF = 0; 147 + wrmsrl(MSR_VIA_BCR2, bcr2.val); 148 + } 182 149 183 - outb(tmp_mask,0x21); /* restore mask */ 150 + /* For processor with Longhaul MSR */ 184 151 185 - /* restore pci bus master state for all devices */ 186 - dev = NULL; 187 - i = 0; 188 - do { 189 - dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 190 - if (dev != NULL) { 191 - pci_cmd = cmd_state[i++]; 192 - pci_write_config_byte(dev, PCI_COMMAND, pci_cmd); 193 - } 194 - } while (dev != NULL); 195 - local_irq_restore(flags); 196 - preempt_enable(); 152 + static void do_powersaver(int cx_address, unsigned int clock_ratio_index) 153 + { 154 + union msr_longhaul longhaul; 155 + u32 t; 197 156 198 - /* disable bus ratio bit */ 199 - rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); 200 - longhaul->bits.EnableSoftBusRatio = 0; 201 - longhaul->bits.RevisionKey = version; 202 - wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); 157 + rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); 158 + longhaul.bits.RevisionKey = longhaul.bits.RevisionID; 159 + longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; 160 + longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; 161 + longhaul.bits.EnableSoftBusRatio = 1; 162 + 163 + /* Sync to timer tick */ 164 + safe_halt(); 165 + ACPI_FLUSH_CPU_CACHE(); 166 + /* Change frequency on next halt or sleep */ 167 + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 168 + /* Invoke C3 */ 169 + inb(cx_address); 170 + /* Dummy op - must do something useless after P_LVL3 read */ 171 + t = inl(acpi_fadt.xpm_tmr_blk.address); 172 + 173 + /* Disable bus ratio bit */ 174 + local_irq_disable(); 175 + longhaul.bits.RevisionKey = longhaul.bits.RevisionID; 176 + longhaul.bits.EnableSoftBusRatio = 0; 177 + longhaul.bits.EnableSoftBSEL = 0; 178 + longhaul.bits.EnableSoftVID = 0; 179 + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 203 180 } 204 181 205 182 /** ··· 194 209 { 195 210 int speed, mult; 196 211 struct cpufreq_freqs freqs; 197 - union msr_longhaul longhaul; 198 - union msr_bcr2 bcr2; 199 212 static unsigned int old_ratio=-1; 213 + unsigned long flags; 214 + unsigned int pic1_mask, pic2_mask; 200 215 201 216 if (old_ratio == clock_ratio_index) 202 217 return; ··· 219 234 dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 220 235 fsb, mult/10, mult%10, print_speed(speed/1000)); 221 236 237 + preempt_disable(); 238 + local_irq_save(flags); 239 + 240 + pic2_mask = inb(0xA1); 241 + pic1_mask = inb(0x21); /* works on C3. save mask. */ 242 + outb(0xFF,0xA1); /* Overkill */ 243 + outb(0xFE,0x21); /* TMR0 only */ 244 + 245 + /* Disable bus master arbitration */ 246 + if (pr->flags.bm_check) { 247 + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 248 + ACPI_MTX_DO_NOT_LOCK); 249 + } 250 + 222 251 switch (longhaul_version) { 223 252 224 253 /* ··· 244 245 */ 245 246 case TYPE_LONGHAUL_V1: 246 247 case TYPE_LONGHAUL_V2: 247 - rdmsrl (MSR_VIA_BCR2, bcr2.val); 248 - /* Enable software clock multiplier */ 249 - bcr2.bits.ESOFTBF = 1; 250 - bcr2.bits.CLOCKMUL = clock_ratio_index; 251 - local_irq_disable(); 252 - wrmsrl (MSR_VIA_BCR2, bcr2.val); 253 - safe_halt(); 254 - 255 - /* Disable software clock multiplier */ 256 - rdmsrl (MSR_VIA_BCR2, bcr2.val); 257 - bcr2.bits.ESOFTBF = 0; 258 - local_irq_disable(); 259 - wrmsrl (MSR_VIA_BCR2, bcr2.val); 260 - local_irq_enable(); 248 + do_longhaul1(cx->address, clock_ratio_index); 261 249 break; 262 250 263 251 /* ··· 259 273 * to work in practice. 260 274 */ 261 275 case TYPE_POWERSAVER: 262 - do_powersaver(&longhaul, clock_ratio_index); 276 + do_powersaver(cx->address, clock_ratio_index); 263 277 break; 264 278 } 279 + 280 + /* Enable bus master arbitration */ 281 + if (pr->flags.bm_check) { 282 + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, 283 + ACPI_MTX_DO_NOT_LOCK); 284 + } 285 + 286 + outb(pic2_mask,0xA1); /* restore mask */ 287 + outb(pic1_mask,0x21); 288 + 289 + local_irq_restore(flags); 290 + preempt_enable(); 265 291 266 292 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 267 293 } ··· 322 324 static int __init longhaul_get_ranges(void) 323 325 { 324 326 unsigned long invalue; 325 - unsigned int multipliers[32]= { 326 - 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, 327 - -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; 327 + unsigned int ezra_t_multipliers[32]= { 328 + 90, 30, 40, 100, 55, 35, 45, 95, 329 + 50, 70, 80, 60, 120, 75, 85, 65, 330 + -1, 110, 120, -1, 135, 115, 125, 105, 331 + 130, 150, 160, 140, -1, 155, -1, 145 }; 328 332 unsigned int j, k = 0; 329 333 union msr_longhaul longhaul; 330 334 unsigned long lo, hi; ··· 355 355 invalue = longhaul.bits.MaxMHzBR; 356 356 if (longhaul.bits.MaxMHzBR4) 357 357 invalue += 16; 358 - maxmult=multipliers[invalue]; 358 + maxmult=ezra_t_multipliers[invalue]; 359 359 360 360 invalue = longhaul.bits.MinMHzBR; 361 361 if (longhaul.bits.MinMHzBR4 == 1) 362 362 minmult = 30; 363 363 else 364 - minmult = multipliers[invalue]; 364 + minmult = ezra_t_multipliers[invalue]; 365 365 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; 366 366 break; 367 367 } ··· 527 527 return calc_speed(longhaul_get_cpu_mult()); 528 528 } 529 529 530 + static acpi_status longhaul_walk_callback(acpi_handle obj_handle, 531 + u32 nesting_level, 532 + void *context, void **return_value) 533 + { 534 + struct acpi_device *d; 535 + 536 + if ( acpi_bus_get_device(obj_handle, &d) ) { 537 + return 0; 538 + } 539 + *return_value = (void *)acpi_driver_data(d); 540 + return 1; 541 + } 530 542 531 543 static int __init longhaul_cpu_init(struct cpufreq_policy *policy) 532 544 { ··· 546 534 char *cpuname=NULL; 547 535 int ret; 548 536 537 + /* Check ACPI support for C3 state */ 538 + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 539 + &longhaul_walk_callback, NULL, (void *)&pr); 540 + if (pr == NULL) goto err_acpi; 541 + 542 + cx = &pr->power.states[ACPI_STATE_C3]; 543 + if (cx->address == 0 || cx->latency > 1000) goto err_acpi; 544 + 545 + /* Now check what we have on this motherboard */ 549 546 switch (c->x86_model) { 550 547 case 6: 551 548 cpu_model = CPU_SAMUEL; ··· 655 634 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); 656 635 657 636 return 0; 637 + 638 + err_acpi: 639 + printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n"); 640 + return -ENODEV; 658 641 } 659 642 660 643 static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) ··· 691 666 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) 692 667 return -ENODEV; 693 668 669 + #ifdef CONFIG_SMP 670 + if (num_online_cpus() > 1) { 671 + return -ENODEV; 672 + printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); 673 + } 674 + #endif 675 + #ifdef CONFIG_X86_IO_APIC 676 + if (cpu_has_apic) { 677 + printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n"); 678 + return -ENODEV; 679 + } 680 + #endif 694 681 switch (c->x86_model) { 695 682 case 6 ... 9: 696 683 return cpufreq_register_driver(&longhaul_driver); ··· 736 699 MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); 737 700 MODULE_LICENSE ("GPL"); 738 701 739 - module_init(longhaul_init); 702 + late_initcall(longhaul_init); 740 703 module_exit(longhaul_exit); 741 704
+2 -2
arch/ia64/hp/sim/simscsi.c
··· 151 151 simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) 152 152 { 153 153 int list_len = sc->use_sg; 154 - struct scatterlist *sl = (struct scatterlist *)sc->buffer; 154 + struct scatterlist *sl = (struct scatterlist *)sc->request_buffer; 155 155 struct disk_stat stat; 156 156 struct disk_req req; 157 157 ··· 244 244 245 245 if (scatterlen == 0) 246 246 memcpy(sc->request_buffer, buf, len); 247 - else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { 247 + else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) { 248 248 unsigned thislen = min(len, slp->length); 249 249 250 250 memcpy(page_address(slp->page) + slp->offset, buf, thislen);
+3 -3
arch/ia64/kernel/efi.c
··· 632 632 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) 633 633 return md; 634 634 } 635 - return 0; 635 + return NULL; 636 636 } 637 637 638 638 static efi_memory_desc_t * ··· 652 652 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) 653 653 return md; 654 654 } 655 - return 0; 655 + return NULL; 656 656 } 657 657 658 658 u32 ··· 923 923 void 924 924 efi_memmap_init(unsigned long *s, unsigned long *e) 925 925 { 926 - struct kern_memdesc *k, *prev = 0; 926 + struct kern_memdesc *k, *prev = NULL; 927 927 u64 contig_low=0, contig_high=0; 928 928 u64 as, ae, lim; 929 929 void *efi_map_start, *efi_map_end, *p, *q;
-2
arch/ia64/kernel/head.S
··· 853 853 */ 854 854 GLOBAL_ENTRY(ia64_switch_mode_phys) 855 855 { 856 - alloc r2=ar.pfs,0,0,0,0 857 856 rsm psr.i | psr.ic // disable interrupts and interrupt collection 858 857 mov r15=ip 859 858 } ··· 901 902 */ 902 903 GLOBAL_ENTRY(ia64_switch_mode_virt) 903 904 { 904 - alloc r2=ar.pfs,0,0,0,0 905 905 rsm psr.i | psr.ic // disable interrupts and interrupt collection 906 906 mov r15=ip 907 907 }
+1 -1
arch/ia64/kernel/ia64_ksyms.c
··· 62 62 EXPORT_SYMBOL(__moddi3); 63 63 EXPORT_SYMBOL(__umoddi3); 64 64 65 - #if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE) 65 + #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) 66 66 extern void xor_ia64_2(void); 67 67 extern void xor_ia64_3(void); 68 68 extern void xor_ia64_4(void);
+9 -9
arch/ia64/kernel/pal.S
··· 217 217 .body 218 218 ;; 219 219 ld8 loc2 = [loc2] // loc2 <- entry point 220 - mov out0 = in0 // first argument 221 - mov out1 = in1 // copy arg2 222 - mov out2 = in2 // copy arg3 223 - mov out3 = in3 // copy arg3 224 - ;; 225 - mov loc3 = psr // save psr 220 + mov loc3 = psr // save psr 226 221 ;; 227 222 mov loc4=ar.rsc // save RSE configuration 228 223 dep.z loc2=loc2,0,61 // convert pal entry point to physical ··· 231 236 ;; 232 237 andcm r16=loc3,r16 // removes bits to clear from psr 233 238 br.call.sptk.many rp=ia64_switch_mode_phys 234 - .ret6: 239 + 240 + mov out0 = in0 // first argument 241 + mov out1 = in1 // copy arg2 242 + mov out2 = in2 // copy arg3 243 + mov out3 = in3 // copy arg3 235 244 mov loc5 = r19 236 245 mov loc6 = r20 246 + 237 247 br.call.sptk.many rp=b7 // now make the call 238 - .ret7: 248 + 239 249 mov ar.rsc=0 // put RSE in enforced lazy, LE mode 240 250 mov r16=loc3 // r16= original psr 241 251 mov r19=loc5 242 252 mov r20=loc6 243 253 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 244 254 245 - .ret8: mov psr.l = loc3 // restore init PSR 255 + mov psr.l = loc3 // restore init PSR 246 256 mov ar.pfs = loc1 247 257 mov rp = loc0 248 258 ;;
+14 -20
arch/ia64/kernel/palinfo.c
··· 566 566 pal_version_u_t min_ver, cur_ver; 567 567 char *p = page; 568 568 569 - /* The PAL_VERSION call is advertised as being able to support 570 - * both physical and virtual mode calls. This seems to be a documentation 571 - * bug rather than firmware bug. In fact, it does only support physical mode. 572 - * So now the code reflects this fact and the pal_version() has been updated 573 - * accordingly. 574 - */ 575 - if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0; 569 + if (ia64_pal_version(&min_ver, &cur_ver) != 0) 570 + return 0; 576 571 577 572 p += sprintf(p, 578 573 "PAL_vendor : 0x%02x (min=0x%02x)\n" 579 - "PAL_A : %x.%x.%x (min=%x.%x.%x)\n" 580 - "PAL_B : %x.%x.%x (min=%x.%x.%x)\n", 581 - cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, 582 - 583 - cur_ver.pal_version_s.pv_pal_a_model>>4, 584 - cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev, 585 - min_ver.pal_version_s.pv_pal_a_model>>4, 586 - min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev, 587 - 588 - cur_ver.pal_version_s.pv_pal_b_model>>4, 589 - cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev, 590 - min_ver.pal_version_s.pv_pal_b_model>>4, 591 - min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev); 574 + "PAL_A : %02x.%02x (min=%02x.%02x)\n" 575 + "PAL_B : %02x.%02x (min=%02x.%02x)\n", 576 + cur_ver.pal_version_s.pv_pal_vendor, 577 + min_ver.pal_version_s.pv_pal_vendor, 578 + cur_ver.pal_version_s.pv_pal_a_model, 579 + cur_ver.pal_version_s.pv_pal_a_rev, 580 + min_ver.pal_version_s.pv_pal_a_model, 581 + min_ver.pal_version_s.pv_pal_a_rev, 582 + cur_ver.pal_version_s.pv_pal_b_model, 583 + cur_ver.pal_version_s.pv_pal_b_rev, 584 + min_ver.pal_version_s.pv_pal_b_model, 585 + min_ver.pal_version_s.pv_pal_b_rev); 592 586 return p - page; 593 587 } 594 588
+57 -29
arch/ia64/kernel/uncached.c
··· 32 32 33 33 extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); 34 34 35 - #define MAX_UNCACHED_GRANULES 5 36 - static int allocated_granules; 35 + struct uncached_pool { 36 + struct gen_pool *pool; 37 + struct mutex add_chunk_mutex; /* serialize adding a converted chunk */ 38 + int nchunks_added; /* #of converted chunks added to pool */ 39 + atomic_t status; /* smp called function's return status*/ 40 + }; 37 41 38 - struct gen_pool *uncached_pool[MAX_NUMNODES]; 42 + #define MAX_CONVERTED_CHUNKS_PER_NODE 2 43 + 44 + struct uncached_pool uncached_pools[MAX_NUMNODES]; 39 45 40 46 41 47 static void uncached_ipi_visibility(void *data) 42 48 { 43 49 int status; 50 + struct uncached_pool *uc_pool = (struct uncached_pool *)data; 44 51 45 52 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 46 53 if ((status != PAL_VISIBILITY_OK) && 47 54 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) 48 - printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " 49 - "CPU %i\n", status, raw_smp_processor_id()); 55 + atomic_inc(&uc_pool->status); 50 56 } 51 57 52 58 53 59 static void uncached_ipi_mc_drain(void *data) 54 60 { 55 61 int status; 62 + struct uncached_pool *uc_pool = (struct uncached_pool *)data; 56 63 57 64 status = ia64_pal_mc_drain(); 58 - if (status) 59 - printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " 60 - "CPU %i\n", status, raw_smp_processor_id()); 65 + if (status != PAL_STATUS_SUCCESS) 66 + atomic_inc(&uc_pool->status); 61 67 } 62 68 63 69 ··· 76 70 * This is accomplished by first allocating a granule of cached memory pages 77 71 * and then converting them to uncached memory pages. 78 72 */ 79 - static int uncached_add_chunk(struct gen_pool *pool, int nid) 73 + static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) 80 74 { 81 75 struct page *page; 82 - int status, i; 76 + int status, i, nchunks_added = uc_pool->nchunks_added; 83 77 unsigned long c_addr, uc_addr; 84 78 85 - if (allocated_granules >= MAX_UNCACHED_GRANULES) 79 + if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) 80 + return -1; /* interrupted by a signal */ 81 + 82 + if (uc_pool->nchunks_added > nchunks_added) { 83 + /* someone added a new chunk while we were waiting */ 84 + mutex_unlock(&uc_pool->add_chunk_mutex); 85 + return 0; 86 + } 87 + 88 + if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { 89 + mutex_unlock(&uc_pool->add_chunk_mutex); 86 90 return -1; 91 + } 87 92 88 93 /* attempt to allocate a granule's worth of cached memory pages */ 89 94 90 95 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 91 96 IA64_GRANULE_SHIFT-PAGE_SHIFT); 92 - if (!page) 97 + if (!page) { 98 + mutex_unlock(&uc_pool->add_chunk_mutex); 93 99 return -1; 100 + } 94 101 95 102 /* convert the memory pages from cached to uncached */ 96 103 ··· 121 102 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); 122 103 123 104 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 124 - if (!status) { 125 - status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); 126 - if (status) 105 + if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { 106 + atomic_set(&uc_pool->status, 0); 107 + status = smp_call_function(uncached_ipi_visibility, uc_pool, 108 + 0, 1); 109 + if (status || atomic_read(&uc_pool->status)) 127 110 goto failed; 128 - } 111 + } else if (status != PAL_VISIBILITY_OK) 112 + goto failed; 129 113 130 114 preempt_disable(); 131 115 ··· 142 120 143 121 preempt_enable(); 144 122 145 - ia64_pal_mc_drain(); 146 - status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); 147 - if (status) 123 + status = ia64_pal_mc_drain(); 124 + if (status != PAL_STATUS_SUCCESS) 125 + goto failed; 126 + atomic_set(&uc_pool->status, 0); 127 + status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); 128 + if (status || atomic_read(&uc_pool->status)) 148 129 goto failed; 149 130 150 131 /* 151 132 * The chunk of memory pages has been converted to uncached so now we 152 133 * can add it to the pool. 153 134 */ 154 - status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); 135 + status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); 155 136 if (status) 156 137 goto failed; 157 138 158 - allocated_granules++; 139 + uc_pool->nchunks_added++; 140 + mutex_unlock(&uc_pool->add_chunk_mutex); 159 141 return 0; 160 142 161 143 /* failed to convert or add the chunk so give it back to the kernel */ ··· 168 142 ClearPageUncached(&page[i]); 169 143 170 144 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); 145 + mutex_unlock(&uc_pool->add_chunk_mutex); 171 146 return -1; 172 147 } 173 148 ··· 185 158 unsigned long uncached_alloc_page(int starting_nid) 186 159 { 187 160 unsigned long uc_addr; 188 - struct gen_pool *pool; 161 + struct uncached_pool *uc_pool; 189 162 int nid; 190 163 191 164 if (unlikely(starting_nid >= MAX_NUMNODES)) ··· 198 171 do { 199 172 if (!node_online(nid)) 200 173 continue; 201 - pool = uncached_pool[nid]; 202 - if (pool == NULL) 174 + uc_pool = &uncached_pools[nid]; 175 + if (uc_pool->pool == NULL) 203 176 continue; 204 177 do { 205 - uc_addr = gen_pool_alloc(pool, PAGE_SIZE); 178 + uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE); 206 179 if (uc_addr != 0) 207 180 return uc_addr; 208 - } while (uncached_add_chunk(pool, nid) == 0); 181 + } while (uncached_add_chunk(uc_pool, nid) == 0); 209 182 210 183 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); 211 184 ··· 224 197 void uncached_free_page(unsigned long uc_addr) 225 198 { 226 199 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); 227 - struct gen_pool *pool = uncached_pool[nid]; 200 + struct gen_pool *pool = uncached_pools[nid].pool; 228 201 229 202 if (unlikely(pool == NULL)) 230 203 return; ··· 251 224 unsigned long uc_end, void *arg) 252 225 { 253 226 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); 254 - struct gen_pool *pool = uncached_pool[nid]; 227 + struct gen_pool *pool = uncached_pools[nid].pool; 255 228 size_t size = uc_end - uc_start; 256 229 257 230 touch_softlockup_watchdog(); ··· 269 242 int nid; 270 243 271 244 for_each_online_node(nid) { 272 - uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); 245 + uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); 246 + mutex_init(&uncached_pools[nid].add_chunk_mutex); 273 247 } 274 248 275 249 efi_memmap_walk_uc(uncached_build_memmap, NULL);
+1 -1
arch/ia64/lib/Makefile
··· 14 14 lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o 15 15 lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 16 16 lib-$(CONFIG_PERFMON) += carta_random.o 17 - lib-$(CONFIG_MD_RAID5) += xor.o 17 + lib-$(CONFIG_MD_RAID456) += xor.o 18 18 19 19 AFLAGS___divdi3.o = 20 20 AFLAGS___udivdi3.o = -DUNSIGNED
+11 -5
arch/ia64/mm/contig.c
··· 27 27 28 28 #ifdef CONFIG_VIRTUAL_MEM_MAP 29 29 static unsigned long num_dma_physpages; 30 + static unsigned long max_gap; 30 31 #endif 31 32 32 33 /** ··· 46 45 47 46 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 48 47 i = max_mapnr; 49 - while (i-- > 0) { 50 - if (!pfn_valid(i)) 48 + for (i = 0; i < max_mapnr; i++) { 49 + if (!pfn_valid(i)) { 50 + #ifdef CONFIG_VIRTUAL_MEM_MAP 51 + if (max_gap < LARGE_GAP) 52 + continue; 53 + i = vmemmap_find_next_valid_pfn(0, i) - 1; 54 + #endif 51 55 continue; 56 + } 52 57 total++; 53 58 if (PageReserved(mem_map+i)) 54 59 reserved++; ··· 241 234 unsigned long zones_size[MAX_NR_ZONES]; 242 235 #ifdef CONFIG_VIRTUAL_MEM_MAP 243 236 unsigned long zholes_size[MAX_NR_ZONES]; 244 - unsigned long max_gap; 245 237 #endif 246 238 247 239 /* initialize mem_map[] */ ··· 272 266 } 273 267 } 274 268 275 - max_gap = 0; 276 269 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 277 270 if (max_gap < LARGE_GAP) { 278 271 vmem_map = (struct page *) 0; ··· 282 277 283 278 /* allocate virtual_mem_map */ 284 279 285 - map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); 280 + map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 281 + sizeof(struct page)); 286 282 vmalloc_end -= map_size; 287 283 vmem_map = (struct page *) vmalloc_end; 288 284 efi_memmap_walk(create_mem_map_page_table, NULL);
+4 -64
arch/ia64/mm/discontig.c
··· 534 534 } 535 535 #endif /* CONFIG_SMP */ 536 536 537 - #ifdef CONFIG_VIRTUAL_MEM_MAP 538 - static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) 539 - { 540 - unsigned long end_address, hole_next_pfn; 541 - unsigned long stop_address; 542 - 543 - end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; 544 - end_address = PAGE_ALIGN(end_address); 545 - 546 - stop_address = (unsigned long) &vmem_map[ 547 - pgdat->node_start_pfn + pgdat->node_spanned_pages]; 548 - 549 - do { 550 - pgd_t *pgd; 551 - pud_t *pud; 552 - pmd_t *pmd; 553 - pte_t *pte; 554 - 555 - pgd = pgd_offset_k(end_address); 556 - if (pgd_none(*pgd)) { 557 - end_address += PGDIR_SIZE; 558 - continue; 559 - } 560 - 561 - pud = pud_offset(pgd, end_address); 562 - if (pud_none(*pud)) { 563 - end_address += PUD_SIZE; 564 - continue; 565 - } 566 - 567 - pmd = pmd_offset(pud, end_address); 568 - if (pmd_none(*pmd)) { 569 - end_address += PMD_SIZE; 570 - continue; 571 - } 572 - 573 - pte = pte_offset_kernel(pmd, end_address); 574 - retry_pte: 575 - if (pte_none(*pte)) { 576 - end_address += PAGE_SIZE; 577 - pte++; 578 - if ((end_address < stop_address) && 579 - (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) 580 - goto retry_pte; 581 - continue; 582 - } 583 - /* Found next valid vmem_map page */ 584 - break; 585 - } while (end_address < stop_address); 586 - 587 - end_address = min(end_address, stop_address); 588 - end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; 589 - hole_next_pfn = end_address / sizeof(struct page); 590 - return hole_next_pfn - pgdat->node_start_pfn; 591 - } 592 - #else 593 - static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) 594 - { 595 - return i + 1; 596 - } 597 - #endif 598 - 599 537 /** 600 538 * show_mem - give short summary of memory stats 601 539 * ··· 563 625 if (pfn_valid(pgdat->node_start_pfn + i)) 564 626 page = pfn_to_page(pgdat->node_start_pfn + i); 565 627 else { 566 - i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; 628 + i = vmemmap_find_next_valid_pfn(pgdat->node_id, 629 + i) - 1; 567 630 continue; 568 631 } 569 632 if (PageReserved(page)) ··· 690 751 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 691 752 692 753 #ifdef CONFIG_VIRTUAL_MEM_MAP 693 - vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); 754 + vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 755 + sizeof(struct page)); 694 756 vmem_map = (struct page *) vmalloc_end; 695 757 efi_memmap_walk(create_mem_map_page_table, NULL); 696 758 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
+55
arch/ia64/mm/init.c
··· 415 415 } 416 416 417 417 #ifdef CONFIG_VIRTUAL_MEM_MAP 418 + int vmemmap_find_next_valid_pfn(int node, int i) 419 + { 420 + unsigned long end_address, hole_next_pfn; 421 + unsigned long stop_address; 422 + pg_data_t *pgdat = NODE_DATA(node); 423 + 424 + end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; 425 + end_address = PAGE_ALIGN(end_address); 426 + 427 + stop_address = (unsigned long) &vmem_map[ 428 + pgdat->node_start_pfn + pgdat->node_spanned_pages]; 429 + 430 + do { 431 + pgd_t *pgd; 432 + pud_t *pud; 433 + pmd_t *pmd; 434 + pte_t *pte; 435 + 436 + pgd = pgd_offset_k(end_address); 437 + if (pgd_none(*pgd)) { 438 + end_address += PGDIR_SIZE; 439 + continue; 440 + } 441 + 442 + pud = pud_offset(pgd, end_address); 443 + if (pud_none(*pud)) { 444 + end_address += PUD_SIZE; 445 + continue; 446 + } 447 + 448 + pmd = pmd_offset(pud, end_address); 449 + if (pmd_none(*pmd)) { 450 + end_address += PMD_SIZE; 451 + continue; 452 + } 453 + 454 + pte = pte_offset_kernel(pmd, end_address); 455 + retry_pte: 456 + if (pte_none(*pte)) { 457 + end_address += PAGE_SIZE; 458 + pte++; 459 + if ((end_address < stop_address) && 460 + (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) 461 + goto retry_pte; 462 + continue; 463 + } 464 + /* Found next valid vmem_map page */ 465 + break; 466 + } while (end_address < stop_address); 467 + 468 + end_address = min(end_address, stop_address); 469 + end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; 470 + hole_next_pfn = end_address / sizeof(struct page); 471 + return hole_next_pfn - pgdat->node_start_pfn; 472 + } 418 473 419 474 int __init 420 475 create_mem_map_page_table (u64 start, u64 end, void *arg)
+3 -3
arch/ia64/mm/ioremap.c
··· 32 32 */ 33 33 attr = kern_mem_attribute(offset, size); 34 34 if (attr & EFI_MEMORY_WB) 35 - return phys_to_virt(offset); 35 + return (void __iomem *) phys_to_virt(offset); 36 36 else if (attr & EFI_MEMORY_UC) 37 37 return __ioremap(offset, size); 38 38 ··· 43 43 gran_base = GRANULEROUNDDOWN(offset); 44 44 gran_size = GRANULEROUNDUP(offset + size) - gran_base; 45 45 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) 46 - return phys_to_virt(offset); 46 + return (void __iomem *) phys_to_virt(offset); 47 47 48 48 return __ioremap(offset, size); 49 49 } ··· 53 53 ioremap_nocache (unsigned long offset, unsigned long size) 54 54 { 55 55 if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) 56 - return 0; 56 + return NULL; 57 57 58 58 return __ioremap(offset, size); 59 59 }
+1 -1
arch/ia64/sn/kernel/xpc_main.c
··· 480 480 partid_t partid = (u64) __partid; 481 481 struct xpc_partition *part = &xpc_partitions[partid]; 482 482 unsigned long irq_flags; 483 - struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 }; 483 + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 484 484 int ret; 485 485 486 486
+3 -3
arch/ia64/sn/pci/tioce_provider.c
··· 74 74 else 75 75 mmr_war_offset = 0x158; 76 76 77 - readq_relaxed((void *)(mmr_base + mmr_war_offset)); 77 + readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset)); 78 78 } 79 79 } 80 80 ··· 92 92 93 93 if (mmr_offset < 0x45000) { 94 94 if (mmr_offset == 0x100) 95 - readq_relaxed((void *)(mmr_base + 0x38)); 96 - readq_relaxed((void *)(mmr_base + 0xb050)); 95 + readq_relaxed((void __iomem *)(mmr_base + 0x38)); 96 + readq_relaxed((void __iomem *)(mmr_base + 0xb050)); 97 97 } 98 98 } 99 99
+21
arch/powerpc/kernel/rtas.c
··· 571 571 } 572 572 EXPORT_SYMBOL(rtas_set_indicator); 573 573 574 + /* 575 + * Ignoring RTAS extended delay 576 + */ 577 + int rtas_set_indicator_fast(int indicator, int index, int new_value) 578 + { 579 + int rc; 580 + int token = rtas_token("set-indicator"); 581 + 582 + if (token == RTAS_UNKNOWN_SERVICE) 583 + return -ENOENT; 584 + 585 + rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value); 586 + 587 + WARN_ON(rc == -2 || (rc >= 9900 && rc <= 9905)); 588 + 589 + if (rc < 0) 590 + return rtas_error_rc(rc); 591 + 592 + return rc; 593 + } 594 + 574 595 void rtas_restart(char *cmd) 575 596 { 576 597 if (rtas_flash_term_hook)
-2
arch/powerpc/platforms/pseries/setup.c
··· 213 213 { 214 214 unsigned long set, reset; 215 215 216 - power4_enable_pmcs(); 217 - 218 216 set = 1UL << 63; 219 217 reset = 0; 220 218 plpar_hcall_norets(H_PERFMON, set, reset);
+3 -3
arch/powerpc/platforms/pseries/xics.c
··· 447 447 * 448 448 * XXX: undo of teardown on kexec needs this too, as may hotplug 449 449 */ 450 - rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, 450 + rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, 451 451 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); 452 452 } 453 453 ··· 776 776 * so leave the master cpu in the group. 777 777 */ 778 778 if (secondary) 779 - rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, 779 + rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, 780 780 (1UL << interrupt_server_size) - 1 - 781 781 default_distrib_server, 0); 782 782 } ··· 793 793 xics_set_cpu_priority(cpu, 0); 794 794 795 795 /* remove ourselves from the global interrupt queue */ 796 - status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, 796 + status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, 797 797 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0); 798 798 WARN_ON(status < 0); 799 799
+7 -3
arch/sh/kernel/cpu/sh4/sq.c
··· 421 421 422 422 static int __init sq_api_init(void) 423 423 { 424 + int ret; 424 425 printk(KERN_NOTICE "sq: Registering store queue API.\n"); 425 426 426 - #ifdef CONFIG_PROC_FS 427 427 create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0); 428 - #endif 429 428 430 - return misc_register(&sq_dev); 429 + ret = misc_register(&sq_dev); 430 + if (ret) 431 + remove_proc_entry("sq_mapping", NULL); 432 + 433 + return ret; 431 434 } 432 435 433 436 static void __exit sq_api_exit(void) 434 437 { 435 438 misc_deregister(&sq_dev); 439 + remove_proc_entry("sq_mapping", NULL); 436 440 } 437 441 438 442 module_init(sq_api_init);
+11 -7
arch/x86_64/kernel/entry.S
··· 513 513 swapgs 514 514 1: incl %gs:pda_irqcount # RED-PEN should check preempt count 515 515 cmoveq %gs:pda_irqstackptr,%rsp 516 + push %rbp # backlink for old unwinder 516 517 /* 517 518 * We entered an interrupt context - irqs are off: 518 519 */ ··· 1140 1139 END(machine_check) 1141 1140 #endif 1142 1141 1142 + /* Call softirq on interrupt stack. Interrupts are off. */ 1143 1143 ENTRY(call_softirq) 1144 1144 CFI_STARTPROC 1145 - movq %gs:pda_irqstackptr,%rax 1146 - movq %rsp,%rdx 1147 - CFI_DEF_CFA_REGISTER rdx 1145 + push %rbp 1146 + CFI_ADJUST_CFA_OFFSET 8 1147 + CFI_REL_OFFSET rbp,0 1148 + mov %rsp,%rbp 1149 + CFI_DEF_CFA_REGISTER rbp 1148 1150 incl %gs:pda_irqcount 1149 - cmove %rax,%rsp 1150 - pushq %rdx 1151 - /*todo CFI_DEF_CFA_EXPRESSION ...*/ 1151 + cmove %gs:pda_irqstackptr,%rsp 1152 + push %rbp # backlink for old unwinder 1152 1153 call __do_softirq 1153 - popq %rsp 1154 + leaveq 1154 1155 CFI_DEF_CFA_REGISTER rsp 1156 + CFI_ADJUST_CFA_OFFSET -8 1155 1157 decl %gs:pda_irqcount 1156 1158 ret 1157 1159 CFI_ENDPROC
+2
arch/x86_64/kernel/pci-nommu.c
··· 92 92 { 93 93 if (dma_ops) 94 94 return; 95 + 96 + force_iommu = 0; /* no HW IOMMU */ 95 97 dma_ops = &nommu_dma_ops; 96 98 }
+1 -1
arch/x86_64/kernel/smp.c
··· 203 203 { 204 204 int i; 205 205 for_each_cpu_mask(i, cpu_possible_map) { 206 - spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i)); 206 + spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); 207 207 } 208 208 return 0; 209 209 }
+5 -8
drivers/acpi/acpi_memhotplug.c
··· 129 129 struct acpi_memory_info *info, *n; 130 130 131 131 132 + if (!list_empty(&mem_device->res_list)) 133 + return 0; 134 + 132 135 status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS, 133 136 acpi_memory_get_resource, mem_device); 134 137 if (ACPI_FAILURE(status)) { 135 138 list_for_each_entry_safe(info, n, &mem_device->res_list, list) 136 139 kfree(info); 140 + INIT_LIST_HEAD(&mem_device->res_list); 137 141 return -EINVAL; 138 142 } 139 143 ··· 234 230 * (i.e. memory-hot-remove function) 235 231 */ 236 232 list_for_each_entry(info, &mem_device->res_list, list) { 237 - u64 start_pfn, end_pfn; 238 - 239 - start_pfn = info->start_addr >> PAGE_SHIFT; 240 - end_pfn = (info->start_addr + info->length - 1) >> PAGE_SHIFT; 241 - 242 - if (pfn_valid(start_pfn) || pfn_valid(end_pfn)) { 243 - /* already enabled. try next area */ 233 + if (info->enabled) { /* just sanity check...*/ 244 234 num_enabled++; 245 235 continue; 246 236 } 247 - 248 237 result = add_memory(node, info->start_addr, info->length); 249 238 if (result) 250 239 continue;
+6 -7
drivers/acpi/dock.c
··· 58 58 }; 59 59 60 60 #define DOCK_DOCKING 0x00000001 61 - #define DOCK_EVENT KOBJ_DOCK 62 - #define UNDOCK_EVENT KOBJ_UNDOCK 61 + #define DOCK_EVENT 3 62 + #define UNDOCK_EVENT 2 63 63 64 64 static struct dock_station *dock_station; 65 65 ··· 322 322 323 323 static void dock_event(struct dock_station *ds, u32 event, int num) 324 324 { 325 - struct acpi_device *device; 326 - 327 - device = dock_create_acpi_device(ds->handle); 328 - if (device) 329 - kobject_uevent(&device->kobj, num); 325 + /* 326 + * we don't do events until someone tells me that 327 + * they would like to have them. 328 + */ 330 329 } 331 330 332 331 /**
+3 -5
drivers/char/hvsi.c
··· 311 311 /* CD went away; no more connection */ 312 312 pr_debug("hvsi%i: CD dropped\n", hp->index); 313 313 hp->mctrl &= TIOCM_CD; 314 - if (!(hp->tty->flags & CLOCAL)) 314 + /* If userland hasn't done an open(2) yet, hp->tty is NULL. */ 315 + if (hp->tty && !(hp->tty->flags & CLOCAL)) 315 316 *to_hangup = hp->tty; 316 317 } 317 318 break; ··· 987 986 start_j = 0; 988 987 #endif /* DEBUG */ 989 988 wake_up_all(&hp->emptyq); 990 - if (test_bit(TTY_DO_WRITE_WAKEUP, &hp->tty->flags) 991 - && hp->tty->ldisc.write_wakeup) 992 - hp->tty->ldisc.write_wakeup(hp->tty); 993 - wake_up_interruptible(&hp->tty->write_wait); 989 + tty_wakeup(hp->tty); 994 990 } 995 991 996 992 out:
+24 -27
drivers/char/hw_random/omap-rng.c
··· 25 25 #include <linux/module.h> 26 26 #include <linux/init.h> 27 27 #include <linux/random.h> 28 + #include <linux/clk.h> 28 29 #include <linux/err.h> 29 - #include <linux/device.h> 30 + #include <linux/platform_device.h> 30 31 #include <linux/hw_random.h> 31 32 32 33 #include <asm/io.h> 33 - #include <asm/hardware/clock.h> 34 34 35 35 #define RNG_OUT_REG 0x00 /* Output register */ 36 36 #define RNG_STAT_REG 0x04 /* Status register ··· 52 52 53 53 static void __iomem *rng_base; 54 54 static struct clk *rng_ick; 55 - static struct device *rng_dev; 55 + static struct platform_device *rng_dev; 56 56 57 57 static u32 omap_rng_read_reg(int reg) 58 58 { ··· 83 83 .data_read = omap_rng_data_read, 84 84 }; 85 85 86 - static int __init omap_rng_probe(struct device *dev) 86 + static int __init omap_rng_probe(struct platform_device *pdev) 87 87 { 88 - struct platform_device *pdev = to_platform_device(dev); 89 88 struct resource *res, *mem; 90 89 int ret; 91 90 ··· 94 95 */ 95 96 BUG_ON(rng_dev); 96 97 97 - if (cpu_is_omap24xx()) { 98 + if (cpu_is_omap24xx()) { 98 99 rng_ick = clk_get(NULL, "rng_ick"); 99 100 if (IS_ERR(rng_ick)) { 100 - dev_err(dev, "Could not get rng_ick\n"); 101 + dev_err(&pdev->dev, "Could not get rng_ick\n"); 101 102 ret = PTR_ERR(rng_ick); 102 103 return ret; 103 - } 104 - else { 105 - clk_use(rng_ick); 106 - } 104 + } else 105 + clk_enable(rng_ick); 107 106 } 108 107 109 108 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 114 117 if (mem == NULL) 115 118 return -EBUSY; 116 119 117 - dev_set_drvdata(dev, mem); 120 + dev_set_drvdata(&pdev->dev, mem); 118 121 rng_base = (u32 __iomem *)io_p2v(res->start); 119 122 120 123 ret = hwrng_register(&omap_rng_ops); ··· 124 127 return ret; 125 128 } 126 129 127 - dev_info(dev, "OMAP Random Number Generator ver. %02x\n", 130 + dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", 128 131 omap_rng_read_reg(RNG_REV_REG)); 129 132 omap_rng_write_reg(RNG_MASK_REG, 0x1); 130 133 131 - rng_dev = dev; 134 + rng_dev = pdev; 132 135 133 136 return 0; 134 137 } 135 138 136 - static int __exit omap_rng_remove(struct device *dev) 139 + static int __exit omap_rng_remove(struct platform_device *pdev) 137 140 { 138 - struct resource *mem = dev_get_drvdata(dev); 141 + struct resource *mem = dev_get_drvdata(&pdev->dev); 139 142 140 143 hwrng_unregister(&omap_rng_ops); 141 144 142 145 omap_rng_write_reg(RNG_MASK_REG, 0x0); 143 146 144 147 if (cpu_is_omap24xx()) { 145 - clk_unuse(rng_ick); 148 + clk_disable(rng_ick); 146 149 clk_put(rng_ick); 147 150 } 148 151 ··· 154 157 155 158 #ifdef CONFIG_PM 156 159 157 - static int omap_rng_suspend(struct device *dev, pm_message_t message, u32 level) 160 + static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message) 158 161 { 159 162 omap_rng_write_reg(RNG_MASK_REG, 0x0); 160 - 161 163 return 0; 162 164 } 163 165 164 - static int omap_rng_resume(struct device *dev, pm_message_t message, u32 level) 166 + static int omap_rng_resume(struct platform_device *pdev) 165 167 { 166 168 omap_rng_write_reg(RNG_MASK_REG, 0x1); 167 - 168 - return 1; 169 + return 0; 169 170 } 170 171 171 172 #else ··· 174 179 #endif 175 180 176 181 177 - static struct device_driver omap_rng_driver = { 178 - .name = "omap_rng", 179 - .bus = &platform_bus_type, 182 + static struct platform_driver omap_rng_driver = { 183 + .driver = { 184 + .name = "omap_rng", 185 + .owner = THIS_MODULE, 186 + }, 180 187 .probe = omap_rng_probe, 181 188 .remove = __exit_p(omap_rng_remove), 182 189 .suspend = omap_rng_suspend, ··· 190 193 if (!cpu_is_omap16xx() && !cpu_is_omap24xx()) 191 194 return -ENODEV; 192 195 193 - return driver_register(&omap_rng_driver); 196 + return platform_driver_register(&omap_rng_driver); 194 197 } 195 198 196 199 static void __exit omap_rng_exit(void) 197 200 { 198 - driver_unregister(&omap_rng_driver); 201 + platform_driver_unregister(&omap_rng_driver); 199 202 } 200 203 201 204 module_init(omap_rng_init);
+78 -61
drivers/char/keyboard.c
··· 107 107 108 108 struct kbd_struct kbd_table[MAX_NR_CONSOLES]; 109 109 static struct kbd_struct *kbd = kbd_table; 110 - static struct kbd_struct kbd0; 111 110 112 111 int spawnpid, spawnsig; 113 112 ··· 222 223 { 223 224 struct list_head *node; 224 225 225 - list_for_each(node,&kbd_handler.h_list) { 226 + list_for_each(node, &kbd_handler.h_list) { 226 227 struct input_handle *handle = to_handle_h(node); 227 228 if (test_bit(EV_SND, handle->dev->evbit)) { 228 229 if (test_bit(SND_TONE, handle->dev->sndbit)) 229 - input_event(handle->dev, EV_SND, SND_TONE, 0); 230 + input_inject_event(handle, EV_SND, SND_TONE, 0); 230 231 if (test_bit(SND_BELL, handle->dev->sndbit)) 231 - input_event(handle->dev, EV_SND, SND_BELL, 0); 232 + input_inject_event(handle, EV_SND, SND_BELL, 0); 232 233 } 233 234 } 234 235 } ··· 246 247 struct input_handle *handle = to_handle_h(node); 247 248 if (test_bit(EV_SND, handle->dev->evbit)) { 248 249 if (test_bit(SND_TONE, handle->dev->sndbit)) { 249 - input_event(handle->dev, EV_SND, SND_TONE, hz); 250 + input_inject_event(handle, EV_SND, SND_TONE, hz); 250 251 break; 251 252 } 252 253 if (test_bit(SND_BELL, handle->dev->sndbit)) { 253 - input_event(handle->dev, EV_SND, SND_BELL, 1); 254 + input_inject_event(handle, EV_SND, SND_BELL, 1); 254 255 break; 255 256 } 256 257 } ··· 271 272 unsigned int d = 0; 272 273 unsigned int p = 0; 273 274 274 - list_for_each(node,&kbd_handler.h_list) { 275 + list_for_each(node, &kbd_handler.h_list) { 275 276 struct input_handle *handle = to_handle_h(node); 276 277 struct input_dev *dev = handle->dev; 277 278 278 279 if (test_bit(EV_REP, dev->evbit)) { 279 280 if (rep->delay > 0) 280 - input_event(dev, EV_REP, REP_DELAY, rep->delay); 281 + input_inject_event(handle, EV_REP, REP_DELAY, rep->delay); 281 282 if (rep->period > 0) 282 - input_event(dev, EV_REP, REP_PERIOD, rep->period); 283 + input_inject_event(handle, EV_REP, REP_PERIOD, rep->period); 283 284 d = dev->rep[REP_DELAY]; 284 285 p = dev->rep[REP_PERIOD]; 285 286 } ··· 987 988 * interrupt routines for this thing allows us to easily mask 988 989 * this when we don't want any of the above to happen. 989 990 * This allows for easy and efficient race-condition prevention 990 - * for kbd_refresh_leds => input_event(dev, EV_LED, ...) => ... 991 + * for kbd_start => input_inject_event(dev, EV_LED, ...) => ... 991 992 */ 992 993 993 994 static void kbd_bh(unsigned long dummy) ··· 997 998 998 999 if (leds != ledstate) { 999 1000 list_for_each(node, &kbd_handler.h_list) { 1000 - struct input_handle * handle = to_handle_h(node); 1001 - input_event(handle->dev, EV_LED, LED_SCROLLL, !!(leds & 0x01)); 1002 - input_event(handle->dev, EV_LED, LED_NUML, !!(leds & 0x02)); 1003 - input_event(handle->dev, EV_LED, LED_CAPSL, !!(leds & 0x04)); 1004 - input_sync(handle->dev); 1001 + struct input_handle *handle = to_handle_h(node); 1002 + input_inject_event(handle, EV_LED, LED_SCROLLL, !!(leds & 0x01)); 1003 + input_inject_event(handle, EV_LED, LED_NUML, !!(leds & 0x02)); 1004 + input_inject_event(handle, EV_LED, LED_CAPSL, !!(leds & 0x04)); 1005 + input_inject_event(handle, EV_SYN, SYN_REPORT, 0); 1005 1006 } 1006 1007 } 1007 1008 ··· 1009 1010 } 1010 1011 1011 1012 DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0); 1012 - 1013 - /* 1014 - * This allows a newly plugged keyboard to pick the LED state. 1015 - */ 1016 - static void kbd_refresh_leds(struct input_handle *handle) 1017 - { 1018 - unsigned char leds = ledstate; 1019 - 1020 - tasklet_disable(&keyboard_tasklet); 1021 - if (leds != 0xff) { 1022 - input_event(handle->dev, EV_LED, LED_SCROLLL, !!(leds & 0x01)); 1023 - input_event(handle->dev, EV_LED, LED_NUML, !!(leds & 0x02)); 1024 - input_event(handle->dev, EV_LED, LED_CAPSL, !!(leds & 0x04)); 1025 - input_sync(handle->dev); 1026 - } 1027 - tasklet_enable(&keyboard_tasklet); 1028 - } 1029 1013 1030 1014 #if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\ 1031 1015 defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\ ··· 1025 1043 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 1026 1044 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1027 1045 80, 81, 82, 83, 84,118, 86, 87, 88,115,120,119,121,112,123, 92, 1028 - 284,285,309,298,312, 91,327,328,329,331,333,335,336,337,338,339, 1046 + 284,285,309, 0,312, 91,327,328,329,331,333,335,336,337,338,339, 1029 1047 367,288,302,304,350, 89,334,326,267,126,268,269,125,347,348,349, 1030 1048 360,261,262,263,268,376,100,101,321,316,373,286,289,102,351,355, 1031 1049 103,104,105,275,287,279,306,106,274,107,294,364,358,363,362,361, ··· 1047 1065 static int emulate_raw(struct vc_data *vc, unsigned int keycode, 1048 1066 unsigned char up_flag) 1049 1067 { 1050 - if (keycode > 255 || !x86_keycodes[keycode]) 1051 - return -1; 1068 + int code; 1052 1069 1053 1070 switch (keycode) { 1054 1071 case KEY_PAUSE: 1055 1072 put_queue(vc, 0xe1); 1056 1073 put_queue(vc, 0x1d | up_flag); 1057 1074 put_queue(vc, 0x45 | up_flag); 1058 - return 0; 1075 + break; 1076 + 1059 1077 case KEY_HANGEUL: 1060 1078 if (!up_flag) 1061 1079 put_queue(vc, 0xf2); 1062 - return 0; 1080 + break; 1081 + 1063 1082 case KEY_HANJA: 1064 1083 if (!up_flag) 1065 1084 put_queue(vc, 0xf1); 1066 - return 0; 1067 - } 1085 + break; 1068 1086 1069 - if (keycode == KEY_SYSRQ && sysrq_alt) { 1070 - put_queue(vc, 0x54 | up_flag); 1071 - return 0; 1072 - } 1087 + case KEY_SYSRQ: 1088 + /* 1089 + * Real AT keyboards (that's what we're trying 1090 + * to emulate here emit 0xe0 0x2a 0xe0 0x37 when 1091 + * pressing PrtSc/SysRq alone, but simply 0x54 1092 + * when pressing Alt+PrtSc/SysRq. 1093 + */ 1094 + if (sysrq_alt) { 1095 + put_queue(vc, 0x54 | up_flag); 1096 + } else { 1097 + put_queue(vc, 0xe0); 1098 + put_queue(vc, 0x2a | up_flag); 1099 + put_queue(vc, 0xe0); 1100 + put_queue(vc, 0x37 | up_flag); 1101 + } 1102 + break; 1073 1103 1074 - if (x86_keycodes[keycode] & 0x100) 1075 - put_queue(vc, 0xe0); 1104 + default: 1105 + if (keycode > 255) 1106 + return -1; 1076 1107 1077 - put_queue(vc, (x86_keycodes[keycode] & 0x7f) | up_flag); 1108 + code = x86_keycodes[keycode]; 1109 + if (!code) 1110 + return -1; 1078 1111 1079 - if (keycode == KEY_SYSRQ) { 1080 - put_queue(vc, 0xe0); 1081 - put_queue(vc, 0x37 | up_flag); 1112 + if (code & 0x100) 1113 + put_queue(vc, 0xe0); 1114 + put_queue(vc, (code & 0x7f) | up_flag); 1115 + 1116 + break; 1082 1117 } 1083 1118 1084 1119 return 0; ··· 1297 1298 if (i == BTN_MISC && !test_bit(EV_SND, dev->evbit)) 1298 1299 return NULL; 1299 1300 1300 - if (!(handle = kmalloc(sizeof(struct input_handle), GFP_KERNEL))) 1301 + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); 1302 + if (!handle) 1301 1303 return NULL; 1302 - memset(handle, 0, sizeof(struct input_handle)); 1303 1304 1304 1305 handle->dev = dev; 1305 1306 handle->handler = handler; 1306 1307 handle->name = "kbd"; 1307 1308 1308 1309 input_open_device(handle); 1309 - kbd_refresh_leds(handle); 1310 1310 1311 1311 return handle; 1312 1312 } ··· 1314 1316 { 1315 1317 input_close_device(handle); 1316 1318 kfree(handle); 1319 + } 1320 + 1321 + /* 1322 + * Start keyboard handler on the new keyboard by refreshing LED state to 1323 + * match the rest of the system. 1324 + */ 1325 + static void kbd_start(struct input_handle *handle) 1326 + { 1327 + unsigned char leds = ledstate; 1328 + 1329 + tasklet_disable(&keyboard_tasklet); 1330 + if (leds != 0xff) { 1331 + input_inject_event(handle, EV_LED, LED_SCROLLL, !!(leds & 0x01)); 1332 + input_inject_event(handle, EV_LED, LED_NUML, !!(leds & 0x02)); 1333 + input_inject_event(handle, EV_LED, LED_CAPSL, !!(leds & 0x04)); 1334 + input_inject_event(handle, EV_SYN, SYN_REPORT, 0); 1335 + } 1336 + tasklet_enable(&keyboard_tasklet); 1317 1337 } 1318 1338 1319 1339 static struct input_device_id kbd_ids[] = { ··· 1354 1338 .event = kbd_event, 1355 1339 .connect = kbd_connect, 1356 1340 .disconnect = kbd_disconnect, 1341 + .start = kbd_start, 1357 1342 .name = "kbd", 1358 1343 .id_table = kbd_ids, 1359 1344 }; ··· 1363 1346 { 1364 1347 int i; 1365 1348 1366 - kbd0.ledflagstate = kbd0.default_ledflagstate = KBD_DEFLEDS; 1367 - kbd0.ledmode = LED_SHOW_FLAGS; 1368 - kbd0.lockstate = KBD_DEFLOCK; 1369 - kbd0.slockstate = 0; 1370 - kbd0.modeflags = KBD_DEFMODE; 1371 - kbd0.kbdmode = VC_XLATE; 1372 - 1373 - for (i = 0 ; i < MAX_NR_CONSOLES ; i++) 1374 - kbd_table[i] = kbd0; 1349 + for (i = 0; i < MAX_NR_CONSOLES; i++) { 1350 + kbd_table[i].ledflagstate = KBD_DEFLEDS; 1351 + kbd_table[i].default_ledflagstate = KBD_DEFLEDS; 1352 + kbd_table[i].ledmode = LED_SHOW_FLAGS; 1353 + kbd_table[i].lockstate = KBD_DEFLOCK; 1354 + kbd_table[i].slockstate = 0; 1355 + kbd_table[i].modeflags = KBD_DEFMODE; 1356 + kbd_table[i].kbdmode = VC_XLATE; 1357 + } 1375 1358 1376 1359 input_register_handler(&kbd_handler); 1377 1360
+6 -1
drivers/char/snsc.c
··· 374 374 struct sysctl_data_s *scd; 375 375 void *salbuf; 376 376 dev_t first_dev, dev; 377 - nasid_t event_nasid = ia64_sn_get_console_nasid(); 377 + nasid_t event_nasid; 378 + 379 + if (!ia64_platform_is("sn2")) 380 + return -ENODEV; 381 + 382 + event_nasid = ia64_sn_get_console_nasid(); 378 383 379 384 if (alloc_chrdev_region(&first_dev, 0, num_cnodes, 380 385 SYSCTL_BASENAME) < 0) {
+54 -21
drivers/cpufreq/cpufreq.c
··· 284 284 * SYSFS INTERFACE * 285 285 *********************************************************************/ 286 286 287 + static struct cpufreq_governor *__find_governor(const char *str_governor) 288 + { 289 + struct cpufreq_governor *t; 290 + 291 + list_for_each_entry(t, &cpufreq_governor_list, governor_list) 292 + if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) 293 + return t; 294 + 295 + return NULL; 296 + } 297 + 287 298 /** 288 299 * cpufreq_parse_governor - parse a governor string 289 300 */ 290 301 static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, 291 302 struct cpufreq_governor **governor) 292 303 { 304 + int err = -EINVAL; 305 + 293 306 if (!cpufreq_driver) 294 - return -EINVAL; 307 + goto out; 308 + 295 309 if (cpufreq_driver->setpolicy) { 296 310 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 297 311 *policy = CPUFREQ_POLICY_PERFORMANCE; 298 - return 0; 312 + err = 0; 299 313 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { 300 314 *policy = CPUFREQ_POLICY_POWERSAVE; 301 - return 0; 315 + err = 0; 302 316 } 303 - return -EINVAL; 304 - } else { 317 + } else if (cpufreq_driver->target) { 305 318 struct cpufreq_governor *t; 319 + 306 320 mutex_lock(&cpufreq_governor_mutex); 307 - if (!cpufreq_driver || !cpufreq_driver->target) 308 - goto out; 309 - list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 310 - if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { 311 - *governor = t; 321 + 322 + t = __find_governor(str_governor); 323 + 324 + if (t == NULL) { 325 + char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor); 326 + 327 + if (name) { 328 + int ret; 329 + 312 330 mutex_unlock(&cpufreq_governor_mutex); 313 - return 0; 331 + ret = request_module(name); 332 + mutex_lock(&cpufreq_governor_mutex); 333 + 334 + if (ret == 0) 335 + t = __find_governor(str_governor); 314 336 } 337 + 338 + kfree(name); 315 339 } 316 - out: 340 + 341 + if (t != NULL) { 342 + *governor = t; 343 + err = 0; 344 + } 345 + 317 346 mutex_unlock(&cpufreq_governor_mutex); 318 347 } 319 - return -EINVAL; 348 + out: 349 + return err; 320 350 } 321 351 322 352 ··· 1295 1265 1296 1266 int cpufreq_register_governor(struct cpufreq_governor *governor) 1297 1267 { 1298 - struct cpufreq_governor *t; 1268 + int err; 1299 1269 1300 1270 if (!governor) 1301 1271 return -EINVAL; 1302 1272 1303 1273 mutex_lock(&cpufreq_governor_mutex); 1304 1274 1305 - list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 1306 - if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { 1307 - mutex_unlock(&cpufreq_governor_mutex); 1308 - return -EBUSY; 1309 - } 1275 + err = -EBUSY; 1276 + if (__find_governor(governor->name) == NULL) { 1277 + err = 0; 1278 + list_add(&governor->governor_list, &cpufreq_governor_list); 1310 1279 } 1311 - list_add(&governor->governor_list, &cpufreq_governor_list); 1312 1280 1313 1281 mutex_unlock(&cpufreq_governor_mutex); 1314 - return 0; 1282 + return err; 1315 1283 } 1316 1284 EXPORT_SYMBOL_GPL(cpufreq_register_governor); 1317 1285 ··· 1370 1342 policy->min, policy->max); 1371 1343 1372 1344 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); 1345 + 1346 + if (policy->min > data->min && policy->min > policy->max) { 1347 + ret = -EINVAL; 1348 + goto error_out; 1349 + } 1373 1350 1374 1351 /* verify the cpu speed can be set within this limit */ 1375 1352 ret = cpufreq_driver->verify(policy);
+1
drivers/edac/edac_mc.h
··· 29 29 #include <linux/rcupdate.h> 30 30 #include <linux/completion.h> 31 31 #include <linux/kobject.h> 32 + #include <linux/platform_device.h> 32 33 33 34 #define EDAC_MC_LABEL_LEN 31 34 35 #define MC_PROC_NAME_MAX_LEN 7
+5 -2
drivers/i2c/busses/scx200_acb.c
··· 232 232 unsigned long timeout; 233 233 234 234 timeout = jiffies + POLL_TIMEOUT; 235 - while (time_before(jiffies, timeout)) { 235 + while (1) { 236 236 status = inb(ACBST); 237 237 238 238 /* Reset the status register to avoid the hang */ ··· 242 242 scx200_acb_machine(iface, status); 243 243 return; 244 244 } 245 - yield(); 245 + if (time_after(jiffies, timeout)) 246 + break; 247 + cpu_relax(); 248 + cond_resched(); 246 249 } 247 250 248 251 dev_err(&iface->adapter.dev, "timeout in state %s\n",
+3
drivers/ieee1394/sbp2.c
··· 2515 2515 sdev->skip_ms_page_8 = 1; 2516 2516 if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) 2517 2517 sdev->fix_capacity = 1; 2518 + if (scsi_id->ne->guid_vendor_id == 0x0010b9 && /* Maxtor's OUI */ 2519 + (sdev->type == TYPE_DISK || sdev->type == TYPE_RBC)) 2520 + sdev->allow_restart = 1; 2518 2521 return 0; 2519 2522 } 2520 2523
+14 -16
drivers/infiniband/core/addr.c
··· 35 35 #include <net/arp.h> 36 36 #include <net/neighbour.h> 37 37 #include <net/route.h> 38 + #include <net/netevent.h> 38 39 #include <rdma/ib_addr.h> 39 40 40 41 MODULE_AUTHOR("Sean Hefty"); ··· 327 326 } 328 327 EXPORT_SYMBOL(rdma_addr_cancel); 329 328 330 - static int addr_arp_recv(struct sk_buff *skb, struct net_device *dev, 331 - struct packet_type *pkt, struct net_device *orig_dev) 329 + static int netevent_callback(struct notifier_block *self, unsigned long event, 330 + void *ctx) 332 331 { 333 - struct arphdr *arp_hdr; 332 + if (event == NETEVENT_NEIGH_UPDATE) { 333 + struct neighbour *neigh = ctx; 334 334 335 - arp_hdr = (struct arphdr *) skb->nh.raw; 336 - 337 - if (arp_hdr->ar_op == htons(ARPOP_REQUEST) || 338 - arp_hdr->ar_op == htons(ARPOP_REPLY)) 339 - set_timeout(jiffies); 340 - 341 - kfree_skb(skb); 335 + if (neigh->dev->type == ARPHRD_INFINIBAND && 336 + (neigh->nud_state & NUD_VALID)) { 337 + set_timeout(jiffies); 338 + } 339 + } 342 340 return 0; 343 341 } 344 342 345 - static struct packet_type addr_arp = { 346 - .type = __constant_htons(ETH_P_ARP), 347 - .func = addr_arp_recv, 348 - .af_packet_priv = (void*) 1, 343 + static struct notifier_block nb = { 344 + .notifier_call = netevent_callback 349 345 }; 350 346 351 347 static int addr_init(void) ··· 351 353 if (!addr_wq) 352 354 return -ENOMEM; 353 355 354 - dev_add_pack(&addr_arp); 356 + register_netevent_notifier(&nb); 355 357 return 0; 356 358 } 357 359 358 360 static void addr_cleanup(void) 359 361 { 360 - dev_remove_pack(&addr_arp); 362 + unregister_netevent_notifier(&nb); 361 363 destroy_workqueue(addr_wq); 362 364 } 363 365
+3 -1
drivers/infiniband/core/cm.c
··· 975 975 976 976 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 977 977 id.local_id); 978 - if (IS_ERR(cm_id_priv->timewait_info)) 978 + if (IS_ERR(cm_id_priv->timewait_info)) { 979 + ret = PTR_ERR(cm_id_priv->timewait_info); 979 980 goto out; 981 + } 980 982 981 983 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 982 984 if (ret)
+2
drivers/infiniband/core/uverbs.h
··· 42 42 #include <linux/kref.h> 43 43 #include <linux/idr.h> 44 44 #include <linux/mutex.h> 45 + #include <linux/completion.h> 45 46 46 47 #include <rdma/ib_verbs.h> 47 48 #include <rdma/ib_user_verbs.h> ··· 70 69 71 70 struct ib_uverbs_device { 72 71 struct kref ref; 72 + struct completion comp; 73 73 int devnum; 74 74 struct cdev *dev; 75 75 struct class_device *class_dev;
+7 -1
drivers/infiniband/core/uverbs_main.c
··· 122 122 struct ib_uverbs_device *dev = 123 123 container_of(ref, struct ib_uverbs_device, ref); 124 124 125 - kfree(dev); 125 + complete(&dev->comp); 126 126 } 127 127 128 128 void ib_uverbs_release_ucq(struct ib_uverbs_file *file, ··· 740 740 return; 741 741 742 742 kref_init(&uverbs_dev->ref); 743 + init_completion(&uverbs_dev->comp); 743 744 744 745 spin_lock(&map_lock); 745 746 uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); ··· 794 793 795 794 err: 796 795 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); 796 + wait_for_completion(&uverbs_dev->comp); 797 + kfree(uverbs_dev); 797 798 return; 798 799 } 799 800 ··· 815 812 spin_unlock(&map_lock); 816 813 817 814 clear_bit(uverbs_dev->devnum, dev_map); 815 + 818 816 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); 817 + wait_for_completion(&uverbs_dev->comp); 818 + kfree(uverbs_dev); 819 819 } 820 820 821 821 static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
+8 -7
drivers/infiniband/hw/mthca/mthca_allocator.c
··· 108 108 * serialize access to the array. 109 109 */ 110 110 111 + #define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1) 112 + 111 113 void *mthca_array_get(struct mthca_array *array, int index) 112 114 { 113 115 int p = (index * sizeof (void *)) >> PAGE_SHIFT; 114 116 115 - if (array->page_list[p].page) { 116 - int i = index & (PAGE_SIZE / sizeof (void *) - 1); 117 - return array->page_list[p].page[i]; 118 - } else 117 + if (array->page_list[p].page) 118 + return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; 119 + else 119 120 return NULL; 120 121 } 121 122 ··· 131 130 if (!array->page_list[p].page) 132 131 return -ENOMEM; 133 132 134 - array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] = 135 - value; 133 + array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; 136 134 ++array->page_list[p].used; 137 135 138 136 return 0; ··· 144 144 if (--array->page_list[p].used == 0) { 145 145 free_page((unsigned long) array->page_list[p].page); 146 146 array->page_list[p].page = NULL; 147 - } 147 + } else 148 + array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL; 148 149 149 150 if (array->page_list[p].used < 0) 150 151 pr_debug("Array %p index %d page %d with ref count %d < 0\n",
+1 -2
drivers/infiniband/ulp/ipoib/Kconfig
··· 6 6 transports IP packets over InfiniBand so you can use your IB 7 7 device as a fancy NIC. 8 8 9 - The IPoIB protocol is defined by the IETF ipoib working 10 - group: <http://www.ietf.org/html.charters/ipoib-charter.html>. 9 + See Documentation/infiniband/ipoib.txt for more information 11 10 12 11 config INFINIBAND_IPOIB_DEBUG 13 12 bool "IP-over-InfiniBand debugging" if EMBEDDED
+17 -2
drivers/infiniband/ulp/srp/ib_srp.c
··· 77 77 78 78 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 79 79 80 + static int mellanox_workarounds = 1; 81 + 82 + module_param(mellanox_workarounds, int, 0444); 83 + MODULE_PARM_DESC(mellanox_workarounds, 84 + "Enable workarounds for Mellanox SRP target bugs if != 0"); 85 + 86 + static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; 87 + 80 88 static void srp_add_one(struct ib_device *device); 81 89 static void srp_remove_one(struct ib_device *device); 82 90 static void srp_completion(struct ib_cq *cq, void *target_ptr); ··· 534 526 while (ib_poll_cq(target->cq, 1, &wc) > 0) 535 527 ; /* nothing */ 536 528 529 + spin_lock_irq(target->scsi_host->host_lock); 537 530 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 538 531 srp_reset_req(target, req); 532 + spin_unlock_irq(target->scsi_host->host_lock); 539 533 540 534 target->rx_head = 0; 541 535 target->tx_head = 0; ··· 577 567 return ret; 578 568 } 579 569 580 - static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, 570 + static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, 581 571 int sg_cnt, struct srp_request *req, 582 572 struct srp_direct_buf *buf) 583 573 { ··· 587 577 int page_cnt; 588 578 int i, j; 589 579 int ret; 580 + struct srp_device *dev = target->srp_host->dev; 590 581 591 582 if (!dev->fmr_pool) 592 583 return -ENODEV; 584 + 585 + if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && 586 + mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 587 + return -EINVAL; 593 588 594 589 len = page_cnt = 0; 595 590 for (i = 0; i < sg_cnt; ++i) { ··· 698 683 buf->va = cpu_to_be64(sg_dma_address(scat)); 699 684 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 700 685 buf->len = cpu_to_be32(sg_dma_len(scat)); 701 - } else if (srp_map_fmr(target->srp_host->dev, scat, count, req, 686 + } else if (srp_map_fmr(target, scat, count, req, 702 687 (void *) cmd->add_data)) { 703 688 /* 704 689 * FMR mapping failed, and the scatterlist has more
+3 -7
drivers/input/evdev.c
··· 127 127 { 128 128 struct evdev_list *list; 129 129 int i = iminor(inode) - EVDEV_MINOR_BASE; 130 - int accept_err; 131 130 132 131 if (i >= EVDEV_MINORS || !evdev_table[i] || !evdev_table[i]->exist) 133 132 return -ENODEV; 134 - 135 - if ((accept_err = input_accept_process(&(evdev_table[i]->handle), file))) 136 - return accept_err; 137 133 138 134 if (!(list = kzalloc(sizeof(struct evdev_list), GFP_KERNEL))) 139 135 return -ENOMEM; ··· 256 260 257 261 if (evdev_event_from_user(buffer + retval, &event)) 258 262 return -EFAULT; 259 - input_event(list->evdev->handle.dev, event.type, event.code, event.value); 263 + input_inject_event(&list->evdev->handle, event.type, event.code, event.value); 260 264 retval += evdev_event_size(); 261 265 } 262 266 ··· 424 428 if (get_user(v, ip + 1)) 425 429 return -EFAULT; 426 430 427 - input_event(dev, EV_REP, REP_DELAY, u); 428 - input_event(dev, EV_REP, REP_PERIOD, v); 431 + input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u); 432 + input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v); 429 433 430 434 return 0; 431 435
+2 -2
drivers/input/gameport/fm801-gp.c
··· 106 106 gp->gameport = port; 107 107 gp->res_port = request_region(port->io, 0x10, "FM801 GP"); 108 108 if (!gp->res_port) { 109 - kfree(gp); 110 - gameport_free_port(port); 111 109 printk(KERN_DEBUG "fm801-gp: unable to grab region 0x%x-0x%x\n", 112 110 port->io, port->io + 0x0f); 111 + gameport_free_port(port); 112 + kfree(gp); 113 113 return -EBUSY; 114 114 } 115 115
+49 -17
drivers/input/gameport/gameport.c
··· 53 53 54 54 static struct bus_type gameport_bus; 55 55 56 + static void gameport_add_driver(struct gameport_driver *drv); 56 57 static void gameport_add_port(struct gameport *gameport); 57 58 static void gameport_destroy_port(struct gameport *gameport); 58 59 static void gameport_reconnect_port(struct gameport *gameport); ··· 212 211 213 212 static void gameport_find_driver(struct gameport *gameport) 214 213 { 214 + int error; 215 + 215 216 down_write(&gameport_bus.subsys.rwsem); 216 - device_attach(&gameport->dev); 217 + error = device_attach(&gameport->dev); 218 + if (error < 0) 219 + printk(KERN_WARNING 220 + "gameport: device_attach() failed for %s (%s), error: %d\n", 221 + gameport->phys, gameport->name, error); 217 222 up_write(&gameport_bus.subsys.rwsem); 218 223 } 219 224 ··· 323 316 spin_unlock_irqrestore(&gameport_event_lock, flags); 324 317 } 325 318 326 - 327 319 static struct gameport_event *gameport_get_event(void) 328 320 { 329 321 struct gameport_event *event; ··· 348 342 static void gameport_handle_event(void) 349 343 { 350 344 struct gameport_event *event; 351 - struct gameport_driver *gameport_drv; 352 345 353 346 mutex_lock(&gameport_mutex); 354 347 ··· 374 369 break; 375 370 376 371 case GAMEPORT_REGISTER_DRIVER: 377 - gameport_drv = event->object; 378 - driver_register(&gameport_drv->driver); 372 + gameport_add_driver(event->object); 379 373 break; 380 374 381 375 default: ··· 536 532 if (gameport->parent) 537 533 gameport->dev.parent = &gameport->parent->dev; 538 534 535 + INIT_LIST_HEAD(&gameport->node); 539 536 spin_lock_init(&gameport->timer_lock); 540 537 init_timer(&gameport->poll_timer); 541 538 gameport->poll_timer.function = gameport_run_poll_handler; ··· 549 544 */ 550 545 static void gameport_add_port(struct gameport *gameport) 551 546 { 547 + int error; 548 + 552 549 if (gameport->parent) 553 550 gameport->parent->child = gameport; 554 551 ··· 565 558 printk(KERN_INFO "gameport: %s is %s, speed %dkHz\n", 566 559 gameport->name, gameport->phys, gameport->speed); 567 560 568 - device_add(&gameport->dev); 569 - gameport->registered = 1; 561 + error = device_add(&gameport->dev); 562 + if (error) 563 + printk(KERN_ERR 564 + "gameport: device_add() failed for %s (%s), error: %d\n", 565 + gameport->phys, gameport->name, error); 566 + else 567 + gameport->registered = 1; 570 568 } 571 569 572 570 /* ··· 595 583 596 584 if (gameport->registered) { 597 585 device_del(&gameport->dev); 598 - list_del_init(&gameport->node); 599 586 gameport->registered = 0; 600 587 } 588 + 589 + list_del_init(&gameport->node); 601 590 602 591 gameport_remove_pending_events(gameport); 603 592 put_device(&gameport->dev); ··· 717 704 } 718 705 719 706 static struct bus_type gameport_bus = { 720 - .name = "gameport", 721 - .probe = gameport_driver_probe, 722 - .remove = gameport_driver_remove, 707 + .name = "gameport", 708 + .probe = gameport_driver_probe, 709 + .remove = gameport_driver_remove, 723 710 }; 711 + 712 + static void gameport_add_driver(struct gameport_driver *drv) 713 + { 714 + int error; 715 + 716 + error = driver_register(&drv->driver); 717 + if (error) 718 + printk(KERN_ERR 719 + "gameport: driver_register() failed for %s, error: %d\n", 720 + drv->driver.name, error); 721 + } 724 722 725 723 void __gameport_register_driver(struct gameport_driver *drv, struct module *owner) 726 724 { ··· 802 778 803 779 static int __init gameport_init(void) 804 780 { 805 - gameport_task = kthread_run(gameport_thread, NULL, "kgameportd"); 806 - if (IS_ERR(gameport_task)) { 807 - printk(KERN_ERR "gameport: Failed to start kgameportd\n"); 808 - return PTR_ERR(gameport_task); 809 - } 781 + int error; 810 782 811 783 gameport_bus.dev_attrs = gameport_device_attrs; 812 784 gameport_bus.drv_attrs = gameport_driver_attrs; 813 785 gameport_bus.match = gameport_bus_match; 814 - bus_register(&gameport_bus); 786 + error = bus_register(&gameport_bus); 787 + if (error) { 788 + printk(KERN_ERR "gameport: failed to register gameport bus, error: %d\n", error); 789 + return error; 790 + } 791 + 792 + gameport_task = kthread_run(gameport_thread, NULL, "kgameportd"); 793 + if (IS_ERR(gameport_task)) { 794 + bus_unregister(&gameport_bus); 795 + error = PTR_ERR(gameport_task); 796 + printk(KERN_ERR "gameport: Failed to start kgameportd, error: %d\n", error); 797 + return error; 798 + } 815 799 816 800 return 0; 817 801 }
+44 -13
drivers/input/input.c
··· 35 35 36 36 static struct input_handler *input_table[8]; 37 37 38 + /** 39 + * input_event() - report new input event 40 + * @handle: device that generated the event 41 + * @type: type of the event 42 + * @code: event code 43 + * @value: value of the event 44 + * 45 + * This function should be used by drivers implementing various input devices 46 + * See also input_inject_event() 47 + */ 38 48 void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 39 49 { 40 50 struct input_handle *handle; ··· 193 183 } 194 184 EXPORT_SYMBOL(input_event); 195 185 186 + /** 187 + * input_inject_event() - send input event from input handler 188 + * @handle: input handle to send event through 189 + * @type: type of the event 190 + * @code: event code 191 + * @value: value of the event 192 + * 193 + * Similar to input_event() but will ignore event if device is "grabbed" and handle 194 + * injecting event is not the one that owns the device. 195 + */ 196 + void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) 197 + { 198 + if (!handle->dev->grab || handle->dev->grab == handle) 199 + input_event(handle->dev, type, code, value); 200 + } 201 + EXPORT_SYMBOL(input_inject_event); 202 + 196 203 static void input_repeat_key(unsigned long data) 197 204 { 198 205 struct input_dev *dev = (void *) data; ··· 224 197 mod_timer(&dev->timer, jiffies + msecs_to_jiffies(dev->rep[REP_PERIOD])); 225 198 } 226 199 227 - int input_accept_process(struct input_handle *handle, struct file *file) 228 - { 229 - if (handle->dev->accept) 230 - return handle->dev->accept(handle->dev, file); 231 - 232 - return 0; 233 - } 234 - EXPORT_SYMBOL(input_accept_process); 235 - 236 200 int input_grab_device(struct input_handle *handle) 237 201 { 238 202 if (handle->dev->grab) ··· 236 218 237 219 void input_release_device(struct input_handle *handle) 238 220 { 239 - if (handle->dev->grab == handle) 240 - handle->dev->grab = NULL; 221 + struct input_dev *dev = handle->dev; 222 + 223 + if (dev->grab == handle) { 224 + dev->grab = NULL; 225 + 226 + list_for_each_entry(handle, &dev->h_list, d_node) 227 + if (handle->handler->start) 228 + handle->handler->start(handle); 229 + } 241 230 } 242 231 EXPORT_SYMBOL(input_release_device); 243 232 ··· 988 963 list_for_each_entry(handler, &input_handler_list, node) 989 964 if (!handler->blacklist || !input_match_device(handler->blacklist, dev)) 990 965 if ((id = input_match_device(handler->id_table, dev))) 991 - if ((handle = handler->connect(handler, dev, id))) 966 + if ((handle = handler->connect(handler, dev, id))) { 992 967 input_link_handle(handle); 968 + if (handler->start) 969 + handler->start(handle); 970 + } 993 971 994 972 input_wakeup_procfs_readers(); 995 973 ··· 1056 1028 list_for_each_entry(dev, &input_dev_list, node) 1057 1029 if (!handler->blacklist || !input_match_device(handler->blacklist, dev)) 1058 1030 if ((id = input_match_device(handler->id_table, dev))) 1059 - if ((handle = handler->connect(handler, dev, id))) 1031 + if ((handle = handler->connect(handler, dev, id))) { 1060 1032 input_link_handle(handle); 1033 + if (handler->start) 1034 + handler->start(handle); 1035 + } 1061 1036 1062 1037 input_wakeup_procfs_readers(); 1063 1038 }
+10 -9
drivers/input/joystick/iforce/iforce-main.c
··· 79 79 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? 80 80 { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? 81 81 { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? 82 + { 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce }, 82 83 { 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce } 83 84 }; 84 85 ··· 223 222 int err = 0; 224 223 struct iforce_core_effect* core_effect; 225 224 226 - /* Check who is trying to erase this effect */ 227 - if (iforce->core_effects[effect_id].owner != current->pid) { 228 - printk(KERN_WARNING "iforce-main.c: %d tried to erase an effect belonging to %d\n", current->pid, iforce->core_effects[effect_id].owner); 229 - return -EACCES; 230 - } 231 - 232 225 if (effect_id < 0 || effect_id >= FF_EFFECTS_MAX) 233 226 return -EINVAL; 234 227 235 - core_effect = iforce->core_effects + effect_id; 228 + core_effect = &iforce->core_effects[effect_id]; 229 + 230 + /* Check who is trying to erase this effect */ 231 + if (core_effect->owner != current->pid) { 232 + printk(KERN_WARNING "iforce-main.c: %d tried to erase an effect belonging to %d\n", current->pid, core_effect->owner); 233 + return -EACCES; 234 + } 236 235 237 236 if (test_bit(FF_MOD1_IS_USED, core_effect->flags)) 238 - err = release_resource(&(iforce->core_effects[effect_id].mod1_chunk)); 237 + err = release_resource(&core_effect->mod1_chunk); 239 238 240 239 if (!err && test_bit(FF_MOD2_IS_USED, core_effect->flags)) 241 - err = release_resource(&(iforce->core_effects[effect_id].mod2_chunk)); 240 + err = release_resource(&core_effect->mod2_chunk); 242 241 243 242 /*TODO: remember to change that if more FF_MOD* bits are added */ 244 243 core_effect->flags[0] = 0;
+1 -1
drivers/input/joystick/spaceball.c
··· 50 50 */ 51 51 52 52 #define SPACEBALL_MAX_LENGTH 128 53 - #define SPACEBALL_MAX_ID 8 53 + #define SPACEBALL_MAX_ID 9 54 54 55 55 #define SPACEBALL_1003 1 56 56 #define SPACEBALL_2003B 3
+60 -43
drivers/input/keyboard/atkbd.c
··· 482 482 return IRQ_HANDLED; 483 483 } 484 484 485 + static int atkbd_set_repeat_rate(struct atkbd *atkbd) 486 + { 487 + const short period[32] = 488 + { 33, 37, 42, 46, 50, 54, 58, 63, 67, 75, 83, 92, 100, 109, 116, 125, 489 + 133, 149, 167, 182, 200, 217, 232, 250, 270, 303, 333, 370, 400, 435, 470, 500 }; 490 + const short delay[4] = 491 + { 250, 500, 750, 1000 }; 492 + 493 + struct input_dev *dev = atkbd->dev; 494 + unsigned char param; 495 + int i = 0, j = 0; 496 + 497 + while (i < ARRAY_SIZE(period) - 1 && period[i] < dev->rep[REP_PERIOD]) 498 + i++; 499 + dev->rep[REP_PERIOD] = period[i]; 500 + 501 + while (j < ARRAY_SIZE(period) - 1 && delay[j] < dev->rep[REP_DELAY]) 502 + j++; 503 + dev->rep[REP_DELAY] = delay[j]; 504 + 505 + param = i | (j << 5); 506 + return ps2_command(&atkbd->ps2dev, &param, ATKBD_CMD_SETREP); 507 + } 508 + 509 + static int atkbd_set_leds(struct atkbd *atkbd) 510 + { 511 + struct input_dev *dev = atkbd->dev; 512 + unsigned char param[2]; 513 + 514 + param[0] = (test_bit(LED_SCROLLL, dev->led) ? 1 : 0) 515 + | (test_bit(LED_NUML, dev->led) ? 2 : 0) 516 + | (test_bit(LED_CAPSL, dev->led) ? 4 : 0); 517 + if (ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_SETLEDS)) 518 + return -1; 519 + 520 + if (atkbd->extra) { 521 + param[0] = 0; 522 + param[1] = (test_bit(LED_COMPOSE, dev->led) ? 0x01 : 0) 523 + | (test_bit(LED_SLEEP, dev->led) ? 0x02 : 0) 524 + | (test_bit(LED_SUSPEND, dev->led) ? 0x04 : 0) 525 + | (test_bit(LED_MISC, dev->led) ? 0x10 : 0) 526 + | (test_bit(LED_MUTE, dev->led) ? 0x20 : 0); 527 + if (ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_EX_SETLEDS)) 528 + return -1; 529 + } 530 + 531 + return 0; 532 + } 533 + 485 534 /* 486 535 * atkbd_event_work() is used to complete processing of events that 487 536 * can not be processed by input_event() which is often called from ··· 539 490 540 491 static void atkbd_event_work(void *data) 541 492 { 542 - const short period[32] = 543 - { 33, 37, 42, 46, 50, 54, 58, 63, 67, 75, 83, 92, 100, 109, 116, 125, 544 - 133, 149, 167, 182, 200, 217, 232, 250, 270, 303, 333, 370, 400, 435, 470, 500 }; 545 - const short delay[4] = 546 - { 250, 500, 750, 1000 }; 547 - 548 493 struct atkbd *atkbd = data; 549 - struct input_dev *dev = atkbd->dev; 550 - unsigned char param[2]; 551 - int i, j; 552 494 553 495 mutex_lock(&atkbd->event_mutex); 554 496 555 - if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) { 556 - param[0] = (test_bit(LED_SCROLLL, dev->led) ? 1 : 0) 557 - | (test_bit(LED_NUML, dev->led) ? 2 : 0) 558 - | (test_bit(LED_CAPSL, dev->led) ? 4 : 0); 559 - ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_SETLEDS); 497 + if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) 498 + atkbd_set_leds(atkbd); 560 499 561 - if (atkbd->extra) { 562 - param[0] = 0; 563 - param[1] = (test_bit(LED_COMPOSE, dev->led) ? 0x01 : 0) 564 - | (test_bit(LED_SLEEP, dev->led) ? 0x02 : 0) 565 - | (test_bit(LED_SUSPEND, dev->led) ? 0x04 : 0) 566 - | (test_bit(LED_MISC, dev->led) ? 0x10 : 0) 567 - | (test_bit(LED_MUTE, dev->led) ? 0x20 : 0); 568 - ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_EX_SETLEDS); 569 - } 570 - } 571 - 572 - if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask)) { 573 - i = j = 0; 574 - while (i < 31 && period[i] < dev->rep[REP_PERIOD]) 575 - i++; 576 - while (j < 3 && delay[j] < dev->rep[REP_DELAY]) 577 - j++; 578 - dev->rep[REP_PERIOD] = period[i]; 579 - dev->rep[REP_DELAY] = delay[j]; 580 - param[0] = i | (j << 5); 581 - ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_SETREP); 582 - } 500 + if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask)) 501 + atkbd_set_repeat_rate(atkbd); 583 502 584 503 mutex_unlock(&atkbd->event_mutex); 585 504 } ··· 992 975 { 993 976 struct atkbd *atkbd = serio_get_drvdata(serio); 994 977 struct serio_driver *drv = serio->drv; 995 - unsigned char param[1]; 996 978 997 979 if (!atkbd || !drv) { 998 980 printk(KERN_DEBUG "atkbd: reconnect request, but serio is disconnected, ignoring...\n"); ··· 1001 985 atkbd_disable(atkbd); 1002 986 1003 987 if (atkbd->write) { 1004 - param[0] = (test_bit(LED_SCROLLL, atkbd->dev->led) ? 1 : 0) 1005 - | (test_bit(LED_NUML, atkbd->dev->led) ? 2 : 0) 1006 - | (test_bit(LED_CAPSL, atkbd->dev->led) ? 4 : 0); 1007 - 1008 988 if (atkbd_probe(atkbd)) 1009 989 return -1; 1010 990 if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra)) ··· 1008 996 1009 997 atkbd_activate(atkbd); 1010 998 1011 - if (ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_SETLEDS)) 1012 - return -1; 999 + /* 1000 + * Restore repeat rate and LEDs (that were reset by atkbd_activate) 1001 + * to pre-resume state 1002 + */ 1003 + if (!atkbd->softrepeat) 1004 + atkbd_set_repeat_rate(atkbd); 1005 + atkbd_set_leds(atkbd); 1013 1006 } 1014 1007 1015 1008 atkbd_enable(atkbd);
+10 -10
drivers/input/misc/wistron_btns.c
··· 94 94 95 95 static ssize_t __init locate_wistron_bios(void __iomem *base) 96 96 { 97 - static const unsigned char __initdata signature[] = 97 + static unsigned char __initdata signature[] = 98 98 { 0x42, 0x21, 0x55, 0x30 }; 99 99 ssize_t offset; 100 100 ··· 259 259 return 1; 260 260 } 261 261 262 - static struct key_entry keymap_empty[] = { 262 + static struct key_entry keymap_empty[] __initdata = { 263 263 { KE_END, 0 } 264 264 }; 265 265 266 - static struct key_entry keymap_fs_amilo_pro_v2000[] = { 266 + static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = { 267 267 { KE_KEY, 0x01, KEY_HELP }, 268 268 { KE_KEY, 0x11, KEY_PROG1 }, 269 269 { KE_KEY, 0x12, KEY_PROG2 }, ··· 273 273 { KE_END, 0 } 274 274 }; 275 275 276 - static struct key_entry keymap_fujitsu_n3510[] = { 276 + static struct key_entry keymap_fujitsu_n3510[] __initdata = { 277 277 { KE_KEY, 0x11, KEY_PROG1 }, 278 278 { KE_KEY, 0x12, KEY_PROG2 }, 279 279 { KE_KEY, 0x36, KEY_WWW }, ··· 285 285 { KE_END, 0 } 286 286 }; 287 287 288 - static struct key_entry keymap_wistron_ms2111[] = { 288 + static struct key_entry keymap_wistron_ms2111[] __initdata = { 289 289 { KE_KEY, 0x11, KEY_PROG1 }, 290 290 { KE_KEY, 0x12, KEY_PROG2 }, 291 291 { KE_KEY, 0x13, KEY_PROG3 }, ··· 294 294 { KE_END, 0 } 295 295 }; 296 296 297 - static struct key_entry keymap_wistron_ms2141[] = { 297 + static struct key_entry keymap_wistron_ms2141[] __initdata = { 298 298 { KE_KEY, 0x11, KEY_PROG1 }, 299 299 { KE_KEY, 0x12, KEY_PROG2 }, 300 300 { KE_WIFI, 0x30, 0 }, ··· 307 307 { KE_END, 0 } 308 308 }; 309 309 310 - static struct key_entry keymap_acer_aspire_1500[] = { 310 + static struct key_entry keymap_acer_aspire_1500[] __initdata = { 311 311 { KE_KEY, 0x11, KEY_PROG1 }, 312 312 { KE_KEY, 0x12, KEY_PROG2 }, 313 313 { KE_WIFI, 0x30, 0 }, ··· 317 317 { KE_END, 0 } 318 318 }; 319 319 320 - static struct key_entry keymap_acer_travelmate_240[] = { 320 + static struct key_entry keymap_acer_travelmate_240[] __initdata = { 321 321 { KE_KEY, 0x31, KEY_MAIL }, 322 322 { KE_KEY, 0x36, KEY_WWW }, 323 323 { KE_KEY, 0x11, KEY_PROG1 }, ··· 327 327 { KE_END, 0 } 328 328 }; 329 329 330 - static struct key_entry keymap_aopen_1559as[] = { 330 + static struct key_entry keymap_aopen_1559as[] __initdata = { 331 331 { KE_KEY, 0x01, KEY_HELP }, 332 332 { KE_KEY, 0x06, KEY_PROG3 }, 333 333 { KE_KEY, 0x11, KEY_PROG1 }, ··· 343 343 * a list of buttons and their key codes (reported when loading this module 344 344 * with force=1) and the output of dmidecode to $MODULE_AUTHOR. 345 345 */ 346 - static struct dmi_system_id dmi_ids[] = { 346 + static struct dmi_system_id dmi_ids[] __initdata = { 347 347 { 348 348 .callback = dmi_matched, 349 349 .ident = "Fujitsu-Siemens Amilo Pro V2000",
+1 -2
drivers/input/mouse/logips2pp.c
··· 238 238 { 100, PS2PP_KIND_MX, /* MX510 */ 239 239 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | 240 240 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN }, 241 - { 111, PS2PP_KIND_MX, /* MX300 */ 242 - PS2PP_WHEEL | PS2PP_EXTRA_BTN | PS2PP_TASK_BTN }, 241 + { 111, PS2PP_KIND_MX, PS2PP_WHEEL | PS2PP_SIDE_BTN }, /* MX300 reports task button as side */ 243 242 { 112, PS2PP_KIND_MX, /* MX500 */ 244 243 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | 245 244 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN },
+34 -18
drivers/input/mouse/trackpoint.c
··· 183 183 .attrs = trackpoint_attrs, 184 184 }; 185 185 186 - static void trackpoint_disconnect(struct psmouse *psmouse) 186 + static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id) 187 187 { 188 - sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group); 188 + unsigned char param[2] = { 0 }; 189 189 190 - kfree(psmouse->private); 191 - psmouse->private = NULL; 190 + if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) 191 + return -1; 192 + 193 + if (param[0] != TP_MAGIC_IDENT) 194 + return -1; 195 + 196 + if (firmware_id) 197 + *firmware_id = param[1]; 198 + 199 + return 0; 192 200 } 193 201 194 202 static int trackpoint_sync(struct psmouse *psmouse) 195 203 { 196 - unsigned char toggle; 197 204 struct trackpoint_data *tp = psmouse->private; 198 - 199 - if (!tp) 200 - return -1; 205 + unsigned char toggle; 201 206 202 207 /* Disable features that may make device unusable with this driver */ 203 208 trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_TWOHAND, &toggle); ··· 268 263 tp->ext_dev = TP_DEF_EXT_DEV; 269 264 } 270 265 266 + static void trackpoint_disconnect(struct psmouse *psmouse) 267 + { 268 + sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group); 269 + 270 + kfree(psmouse->private); 271 + psmouse->private = NULL; 272 + } 273 + 274 + static int trackpoint_reconnect(struct psmouse *psmouse) 275 + { 276 + if (trackpoint_start_protocol(psmouse, NULL)) 277 + return -1; 278 + 279 + if (trackpoint_sync(psmouse)) 280 + return -1; 281 + 282 + return 0; 283 + } 284 + 271 285 int trackpoint_detect(struct psmouse *psmouse, int set_properties) 272 286 { 273 287 struct trackpoint_data *priv; 274 288 struct ps2dev *ps2dev = &psmouse->ps2dev; 275 289 unsigned char firmware_id; 276 290 unsigned char button_info; 277 - unsigned char param[2]; 278 291 279 - param[0] = param[1] = 0; 280 - 281 - if (ps2_command(ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) 282 - return -1; 283 - 284 - if (param[0] != TP_MAGIC_IDENT) 292 + if (trackpoint_start_protocol(psmouse, &firmware_id)) 285 293 return -1; 286 294 287 295 if (!set_properties) 288 296 return 0; 289 - 290 - firmware_id = param[1]; 291 297 292 298 if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) { 293 299 printk(KERN_WARNING "trackpoint.c: failed to get extended button data\n"); ··· 312 296 psmouse->vendor = "IBM"; 313 297 psmouse->name = "TrackPoint"; 314 298 315 - psmouse->reconnect = trackpoint_sync; 299 + psmouse->reconnect = trackpoint_reconnect; 316 300 psmouse->disconnect = trackpoint_disconnect; 317 301 318 302 trackpoint_defaults(priv);
+5
drivers/input/serio/libps2.c
··· 177 177 return -1; 178 178 } 179 179 180 + if (send && !param) { 181 + WARN_ON(1); 182 + return -1; 183 + } 184 + 180 185 mutex_lock_nested(&ps2dev->cmd_mutex, SINGLE_DEPTH_NESTING); 181 186 182 187 serio_pause_rx(ps2dev->serio);
+51 -14
drivers/input/serio/serio.c
··· 62 62 63 63 static struct bus_type serio_bus; 64 64 65 + static void serio_add_driver(struct serio_driver *drv); 65 66 static void serio_add_port(struct serio *serio); 66 67 static void serio_destroy_port(struct serio *serio); 67 68 static void serio_reconnect_port(struct serio *serio); ··· 141 140 142 141 static void serio_find_driver(struct serio *serio) 143 142 { 143 + int error; 144 + 144 145 down_write(&serio_bus.subsys.rwsem); 145 - device_attach(&serio->dev); 146 + error = device_attach(&serio->dev); 147 + if (error < 0) 148 + printk(KERN_WARNING 149 + "serio: device_attach() failed for %s (%s), error: %d\n", 150 + serio->phys, serio->name, error); 146 151 up_write(&serio_bus.subsys.rwsem); 147 152 } 148 153 ··· 279 272 static void serio_handle_event(void) 280 273 { 281 274 struct serio_event *event; 282 - struct serio_driver *serio_drv; 283 275 284 276 mutex_lock(&serio_mutex); 285 277 ··· 310 304 break; 311 305 312 306 case SERIO_REGISTER_DRIVER: 313 - serio_drv = event->object; 314 - driver_register(&serio_drv->driver); 307 + serio_add_driver(event->object); 315 308 break; 316 309 317 310 default: ··· 530 525 531 526 __module_get(THIS_MODULE); 532 527 528 + INIT_LIST_HEAD(&serio->node); 533 529 spin_lock_init(&serio->lock); 534 530 mutex_init(&serio->drv_mutex); 535 531 device_initialize(&serio->dev); ··· 548 542 */ 549 543 static void serio_add_port(struct serio *serio) 550 544 { 545 + int error; 546 + 551 547 if (serio->parent) { 552 548 serio_pause_rx(serio->parent); 553 549 serio->parent->child = serio; ··· 559 551 list_add_tail(&serio->node, &serio_list); 560 552 if (serio->start) 561 553 serio->start(serio); 562 - device_add(&serio->dev); 563 - sysfs_create_group(&serio->dev.kobj, &serio_id_attr_group); 564 - serio->registered = 1; 554 + error = device_add(&serio->dev); 555 + if (error) 556 + printk(KERN_ERR 557 + "serio: device_add() failed for %s (%s), error: %d\n", 558 + serio->phys, serio->name, error); 559 + else { 560 + serio->registered = 1; 561 + error = sysfs_create_group(&serio->dev.kobj, &serio_id_attr_group); 562 + if (error) 563 + printk(KERN_ERR 564 + "serio: sysfs_create_group() failed for %s (%s), error: %d\n", 565 + serio->phys, serio->name, error); 566 + } 565 567 } 566 568 567 569 /* ··· 601 583 if (serio->registered) { 602 584 sysfs_remove_group(&serio->dev.kobj, &serio_id_attr_group); 603 585 device_del(&serio->dev); 604 - list_del_init(&serio->node); 605 586 serio->registered = 0; 606 587 } 607 588 589 + list_del_init(&serio->node); 608 590 serio_remove_pending_events(serio); 609 591 put_device(&serio->dev); 610 592 } ··· 774 756 .remove = serio_driver_remove, 775 757 }; 776 758 759 + static void serio_add_driver(struct serio_driver *drv) 760 + { 761 + int error; 762 + 763 + error = driver_register(&drv->driver); 764 + if (error) 765 + printk(KERN_ERR 766 + "serio: driver_register() failed for %s, error: %d\n", 767 + drv->driver.name, error); 768 + } 769 + 777 770 void __serio_register_driver(struct serio_driver *drv, struct module *owner) 778 771 { 779 772 drv->driver.bus = &serio_bus; ··· 932 903 933 904 static int __init serio_init(void) 934 905 { 935 - serio_task = kthread_run(serio_thread, NULL, "kseriod"); 936 - if (IS_ERR(serio_task)) { 937 - printk(KERN_ERR "serio: Failed to start kseriod\n"); 938 - return PTR_ERR(serio_task); 939 - } 906 + int error; 940 907 941 908 serio_bus.dev_attrs = serio_device_attrs; 942 909 serio_bus.drv_attrs = serio_driver_attrs; 943 910 serio_bus.match = serio_bus_match; 944 911 serio_bus.uevent = serio_uevent; 945 912 serio_bus.resume = serio_resume; 946 - bus_register(&serio_bus); 913 + error = bus_register(&serio_bus); 914 + if (error) { 915 + printk(KERN_ERR "serio: failed to register serio bus, error: %d\n", error); 916 + return error; 917 + } 918 + 919 + serio_task = kthread_run(serio_thread, NULL, "kseriod"); 920 + if (IS_ERR(serio_task)) { 921 + bus_unregister(&serio_bus); 922 + error = PTR_ERR(serio_task); 923 + printk(KERN_ERR "serio: Failed to start kseriod, error: %d\n", error); 924 + return error; 925 + } 947 926 948 927 return 0; 949 928 }
-1
drivers/isdn/hardware/eicon/divasync.h
··· 256 256 #define NO_ORDER_CHECK_MASK 0x00000010 257 257 #define LOW_CHANNEL_MASK 0x00000020 258 258 #define NO_HSCX30_MASK 0x00000040 259 - #define MODE_MASK 0x00000080 260 259 #define SET_BOARD 0x00001000 261 260 #define SET_CRC4 0x00030000 262 261 #define SET_L1_TRISTATE 0x00040000
+3 -3
drivers/md/linear.c
··· 162 162 goto out; 163 163 } 164 164 165 - min_spacing = mddev->array_size; 165 + min_spacing = conf->array_size; 166 166 sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *)); 167 167 168 168 /* min_spacing is the minimum spacing that will fit the hash ··· 171 171 * that is larger than min_spacing as use the size of that as 172 172 * the actual spacing 173 173 */ 174 - conf->hash_spacing = mddev->array_size; 174 + conf->hash_spacing = conf->array_size; 175 175 for (i=0; i < cnt-1 ; i++) { 176 176 sector_t sz = 0; 177 177 int j; ··· 228 228 curr_offset = 0; 229 229 i = 0; 230 230 for (curr_offset = 0; 231 - curr_offset < mddev->array_size; 231 + curr_offset < conf->array_size; 232 232 curr_offset += conf->hash_spacing) { 233 233 234 234 while (i < mddev->raid_disks-1 &&
+9 -6
drivers/media/dvb/dvb-core/dvb_frontend.c
··· 526 526 fepriv->delay = 3*HZ; 527 527 fepriv->status = 0; 528 528 fepriv->wakeup = 0; 529 - fepriv->reinitialise = 1; 529 + fepriv->reinitialise = 0; 530 + 531 + dvb_frontend_init(fe); 530 532 531 533 while (1) { 532 534 up(&fepriv->sem); /* is locked when we enter the thread... */ ··· 1015 1013 return ret; 1016 1014 1017 1015 if ((file->f_flags & O_ACCMODE) != O_RDONLY) { 1016 + 1017 + /* normal tune mode when opened R/W */ 1018 + fepriv->tune_mode_flags &= ~FE_TUNE_MODE_ONESHOT; 1019 + fepriv->tone = -1; 1020 + fepriv->voltage = -1; 1021 + 1018 1022 ret = dvb_frontend_start (fe); 1019 1023 if (ret) 1020 1024 dvb_generic_release (inode, file); 1021 1025 1022 1026 /* empty event queue */ 1023 1027 fepriv->events.eventr = fepriv->events.eventw = 0; 1024 - 1025 - /* normal tune mode when opened R/W */ 1026 - fepriv->tune_mode_flags &= ~FE_TUNE_MODE_ONESHOT; 1027 - fepriv->tone = -1; 1028 - fepriv->voltage = -1; 1029 1028 } 1030 1029 1031 1030 return ret;
+19 -5
drivers/media/dvb/frontends/dvb-pll.c
··· 194 194 { 253834000, 36249333, 166667, 0xca, 0x62 /* 011 0 0 0 10 */ }, 195 195 { 383834000, 36249333, 166667, 0xca, 0xa2 /* 101 0 0 0 10 */ }, 196 196 { 443834000, 36249333, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, 197 - { 444000000, 36249333, 166667, 0xca, 0xc3 /* 110 0 0 0 11 */ }, 198 - { 583834000, 36249333, 166667, 0xca, 0x63 /* 011 0 0 0 11 */ }, 199 - { 793834000, 36249333, 166667, 0xca, 0xa3 /* 101 0 0 0 11 */ }, 200 - { 444834000, 36249333, 166667, 0xca, 0xc3 /* 110 0 0 0 11 */ }, 201 - { 861000000, 36249333, 166667, 0xca, 0xe3 /* 111 0 0 0 11 */ }, 197 + { 444000000, 36249333, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, 198 + { 583834000, 36249333, 166667, 0xca, 0x64 /* 011 0 0 1 00 */ }, 199 + { 793834000, 36249333, 166667, 0xca, 0xa4 /* 101 0 0 1 00 */ }, 200 + { 444834000, 36249333, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, 201 + { 861000000, 36249333, 166667, 0xca, 0xe4 /* 111 0 0 1 00 */ }, 202 202 } 203 203 }; 204 204 EXPORT_SYMBOL(dvb_pll_tda665x); ··· 613 613 614 614 int dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, struct dvb_pll_desc *desc) 615 615 { 616 + u8 b1 [] = { 0 }; 617 + struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .buf = b1, .len = 1 }; 616 618 struct dvb_pll_priv *priv = NULL; 619 + int ret; 620 + 621 + if (i2c != NULL) { 622 + if (fe->ops.i2c_gate_ctrl) 623 + fe->ops.i2c_gate_ctrl(fe, 1); 624 + 625 + ret = i2c_transfer (i2c, &msg, 1); 626 + if (ret != 1) 627 + return -1; 628 + if (fe->ops.i2c_gate_ctrl) 629 + fe->ops.i2c_gate_ctrl(fe, 0); 630 + } 617 631 618 632 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); 619 633 if (priv == NULL)
+2 -2
drivers/media/dvb/ttpci/av7110.c
··· 2203 2203 av7110->fe->ops.tuner_ops.set_params = nexusca_stv0297_tuner_set_params; 2204 2204 2205 2205 /* set TDA9819 into DVB mode */ 2206 - saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9198 pin9(STD) 2207 - saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9198 pin30(VIF) 2206 + saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD) 2207 + saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF) 2208 2208 2209 2209 /* tuner on this needs a slower i2c bus speed */ 2210 2210 av7110->dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
+6 -6
drivers/media/dvb/ttpci/av7110_v4l.c
··· 272 272 if (ves1820_writereg(dev, 0x09, 0x0f, 0x60)) 273 273 dprintk(1, "setting band in demodulator failed.\n"); 274 274 } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { 275 - saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // TDA9198 pin9(STD) 276 - saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); // TDA9198 pin30(VIF) 275 + saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // TDA9819 pin9(STD) 276 + saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); // TDA9819 pin30(VIF) 277 277 } 278 278 if (i2c_writereg(av7110, 0x48, 0x02, 0xd0) != 1) 279 279 dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num); ··· 308 308 if (ves1820_writereg(dev, 0x09, 0x0f, 0x20)) 309 309 dprintk(1, "setting band in demodulator failed.\n"); 310 310 } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { 311 - saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); // TDA9198 pin9(STD) 312 - saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // TDA9198 pin30(VIF) 311 + saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD) 312 + saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF) 313 313 } 314 314 } 315 315 ··· 750 750 if (ves1820_writereg(av7110->dev, 0x09, 0x0f, 0x20)) 751 751 dprintk(1, "setting band in demodulator failed.\n"); 752 752 } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { 753 - saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9198 pin9(STD) 754 - saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9198 pin30(VIF) 753 + saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD) 754 + saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF) 755 755 } 756 756 757 757 /* init the saa7113 */
+3
drivers/media/dvb/ttpci/budget-av.c
··· 1303 1303 budget_av->budget.dvb_adapter.priv = budget_av; 1304 1304 frontend_init(budget_av); 1305 1305 ciintf_init(budget_av); 1306 + 1307 + ttpci_budget_init_hooks(&budget_av->budget); 1308 + 1306 1309 return 0; 1307 1310 } 1308 1311
+2
drivers/media/dvb/ttpci/budget-ci.c
··· 1101 1101 budget_ci->budget.dvb_adapter.priv = budget_ci; 1102 1102 frontend_init(budget_ci); 1103 1103 1104 + ttpci_budget_init_hooks(&budget_ci->budget); 1105 + 1104 1106 return 0; 1105 1107 } 1106 1108
+44 -13
drivers/media/dvb/ttpci/budget-core.c
··· 63 63 { 64 64 dprintk(2, "budget: %p\n", budget); 65 65 66 - if (--budget->feeding) 67 - return budget->feeding; 68 - 69 66 saa7146_write(budget->dev, MC1, MASK_20); // DMA3 off 70 67 SAA7146_IER_DISABLE(budget->dev, MASK_10); 71 68 return 0; ··· 74 77 75 78 dprintk(2, "budget: %p\n", budget); 76 79 77 - if (budget->feeding) 78 - return ++budget->feeding; 80 + if (!budget->feeding || !budget->fe_synced) 81 + return 0; 79 82 80 83 saa7146_write(dev, MC1, MASK_20); // DMA3 off 81 84 ··· 136 139 SAA7146_IER_ENABLE(budget->dev, MASK_10); /* VPE */ 137 140 saa7146_write(dev, MC1, (MASK_04 | MASK_20)); /* DMA3 on */ 138 141 139 - return ++budget->feeding; 142 + return 0; 143 + } 144 + 145 + static int budget_read_fe_status(struct dvb_frontend *fe, fe_status_t *status) 146 + { 147 + struct budget *budget = (struct budget *) fe->dvb->priv; 148 + int synced; 149 + int ret; 150 + 151 + if (budget->read_fe_status) 152 + ret = budget->read_fe_status(fe, status); 153 + else 154 + ret = -EINVAL; 155 + 156 + if (!ret) { 157 + synced = (*status & FE_HAS_LOCK); 158 + if (synced != budget->fe_synced) { 159 + budget->fe_synced = synced; 160 + spin_lock(&budget->feedlock); 161 + if (synced) 162 + start_ts_capture(budget); 163 + else 164 + stop_ts_capture(budget); 165 + spin_unlock(&budget->feedlock); 166 + } 167 + } 168 + return ret; 140 169 } 141 170 142 171 static void vpeirq(unsigned long data) ··· 290 267 { 291 268 struct dvb_demux *demux = feed->demux; 292 269 struct budget *budget = (struct budget *) demux->priv; 293 - int status; 270 + int status = 0; 294 271 295 272 dprintk(2, "budget: %p\n", budget); 296 273 ··· 299 276 300 277 spin_lock(&budget->feedlock); 301 278 feed->pusi_seen = 0; /* have a clean section start */ 302 - status = start_ts_capture(budget); 279 + if (budget->feeding++ == 0) 280 + status = start_ts_capture(budget); 303 281 spin_unlock(&budget->feedlock); 304 282 return status; 305 283 } ··· 309 285 { 310 286 struct dvb_demux *demux = feed->demux; 311 287 struct budget *budget = (struct budget *) demux->priv; 312 - int status; 288 + int status = 0; 313 289 314 290 dprintk(2, "budget: %p\n", budget); 315 291 316 292 spin_lock(&budget->feedlock); 317 - status = stop_ts_capture(budget); 293 + if (--budget->feeding == 0) 294 + status = stop_ts_capture(budget); 318 295 spin_unlock(&budget->feedlock); 319 296 return status; 320 297 } ··· 495 470 return ret; 496 471 } 497 472 473 + void ttpci_budget_init_hooks(struct budget *budget) 474 + { 475 + if (budget->dvb_frontend && !budget->read_fe_status) { 476 + budget->read_fe_status = budget->dvb_frontend->ops.read_status; 477 + budget->dvb_frontend->ops.read_status = budget_read_fe_status; 478 + } 479 + } 480 + 498 481 int ttpci_budget_deinit(struct budget *budget) 499 482 { 500 483 struct saa7146_dev *dev = budget->dev; ··· 541 508 spin_lock(&budget->feedlock); 542 509 budget->video_port = video_port; 543 510 if (budget->feeding) { 544 - int oldfeeding = budget->feeding; 545 - budget->feeding = 1; 546 511 stop_ts_capture(budget); 547 512 start_ts_capture(budget); 548 - budget->feeding = oldfeeding; 549 513 } 550 514 spin_unlock(&budget->feedlock); 551 515 } ··· 550 520 EXPORT_SYMBOL_GPL(ttpci_budget_debiread); 551 521 EXPORT_SYMBOL_GPL(ttpci_budget_debiwrite); 552 522 EXPORT_SYMBOL_GPL(ttpci_budget_init); 523 + EXPORT_SYMBOL_GPL(ttpci_budget_init_hooks); 553 524 EXPORT_SYMBOL_GPL(ttpci_budget_deinit); 554 525 EXPORT_SYMBOL_GPL(ttpci_budget_irq10_handler); 555 526 EXPORT_SYMBOL_GPL(ttpci_budget_set_video_port);
+2
drivers/media/dvb/ttpci/budget-patch.c
··· 617 617 budget->dvb_adapter.priv = budget; 618 618 frontend_init(budget); 619 619 620 + ttpci_budget_init_hooks(budget); 621 + 620 622 return 0; 621 623 } 622 624
+2 -3
drivers/media/dvb/ttpci/budget.c
··· 375 375 if (budget->dvb_frontend) { 376 376 budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params; 377 377 budget->dvb_frontend->tuner_priv = &budget->i2c_adap; 378 - budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd; 379 - budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst; 380 - budget->dvb_frontend->ops.set_tone = budget_set_tone; 381 378 break; 382 379 } 383 380 break; ··· 470 473 471 474 budget->dvb_adapter.priv = budget; 472 475 frontend_init(budget); 476 + 477 + ttpci_budget_init_hooks(budget); 473 478 474 479 return 0; 475 480 }
+4 -3
drivers/media/dvb/ttpci/budget.h
··· 52 52 struct dmx_frontend hw_frontend; 53 53 struct dmx_frontend mem_frontend; 54 54 55 - int fe_synced; 56 - struct mutex pid_mutex; 57 - 58 55 int ci_present; 59 56 int video_port; 60 57 ··· 71 74 72 75 struct dvb_adapter dvb_adapter; 73 76 struct dvb_frontend *dvb_frontend; 77 + int (*read_fe_status)(struct dvb_frontend *fe, fe_status_t *status); 78 + int fe_synced; 79 + 74 80 void *priv; 75 81 }; 76 82 ··· 106 106 extern int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev, 107 107 struct saa7146_pci_extension_data *info, 108 108 struct module *owner); 109 + extern void ttpci_budget_init_hooks(struct budget *budget); 109 110 extern int ttpci_budget_deinit(struct budget *budget); 110 111 extern void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 * isr); 111 112 extern void ttpci_budget_set_video_port(struct saa7146_dev *dev, int video_port);
+2 -2
drivers/media/video/Kconfig
··· 145 145 146 146 config VIDEO_SAA5249 147 147 tristate "SAA5249 Teletext processor" 148 - depends on VIDEO_DEV && I2C 148 + depends on VIDEO_DEV && I2C && VIDEO_V4L1 149 149 help 150 150 Support for I2C bus based teletext using the SAA5249 chip. At the 151 151 moment this is only useful on some European WinTV cards. ··· 155 155 156 156 config TUNER_3036 157 157 tristate "SAB3036 tuner" 158 - depends on VIDEO_DEV && I2C 158 + depends on VIDEO_DEV && I2C && VIDEO_V4L1 159 159 help 160 160 Say Y here to include support for Philips SAB3036 compatible tuners. 161 161 If in doubt, say N.
+1 -1
drivers/media/video/bt8xx/Kconfig
··· 1 1 config VIDEO_BT848 2 2 tristate "BT848 Video For Linux" 3 - depends on VIDEO_DEV && PCI && I2C && VIDEO_V4L2 3 + depends on VIDEO_DEV && PCI && I2C && VIDEO_V4L1 4 4 select I2C_ALGOBIT 5 5 select FW_LOADER 6 6 select VIDEO_BTCX
+13 -2
drivers/media/video/bt8xx/bttv-driver.c
··· 3923 3923 goto err; 3924 3924 printk(KERN_INFO "bttv%d: registered device video%d\n", 3925 3925 btv->c.nr,btv->video_dev->minor & 0x1f); 3926 - video_device_create_file(btv->video_dev, &class_device_attr_card); 3926 + if (class_device_create_file(&btv->video_dev->class_dev, 3927 + &class_device_attr_card)<0) { 3928 + printk(KERN_ERR "bttv%d: class_device_create_file 'card' " 3929 + "failed\n", btv->c.nr); 3930 + goto err; 3931 + } 3927 3932 3928 3933 /* vbi */ 3929 3934 btv->vbi_dev = vdev_init(btv, &bttv_vbi_template, "vbi"); ··· 4292 4287 4293 4288 static int bttv_init_module(void) 4294 4289 { 4290 + int ret; 4291 + 4295 4292 bttv_num = 0; 4296 4293 4297 4294 printk(KERN_INFO "bttv: driver version %d.%d.%d loaded\n", ··· 4315 4308 4316 4309 bttv_check_chipset(); 4317 4310 4318 - bus_register(&bttv_sub_bus_type); 4311 + ret = bus_register(&bttv_sub_bus_type); 4312 + if (ret < 0) { 4313 + printk(KERN_WARNING "bttv: bus_register error: %d\n", ret); 4314 + return ret; 4315 + } 4319 4316 return pci_register_driver(&bttv_pci_driver); 4320 4317 } 4321 4318
+10 -5
drivers/media/video/bt8xx/bttv-vbi.c
··· 31 31 #include <asm/io.h> 32 32 #include "bttvp.h" 33 33 34 - /* Offset from line sync pulse leading edge (0H) in 1 / sampling_rate: 35 - bt8x8 /HRESET pulse starts at 0H and has length 64 / fCLKx1 (E|O_VTC 36 - HSFMT = 0). VBI_HDELAY (always 0) is an offset from the trailing edge 37 - of /HRESET in 1 / fCLKx1, and the sampling_rate tvnorm->Fsc is fCLKx2. */ 38 - #define VBI_OFFSET ((64 + 0) * 2) 34 + /* Offset from line sync pulse leading edge (0H) to start of VBI capture, 35 + in fCLKx2 pixels. According to the datasheet, VBI capture starts 36 + VBI_HDELAY fCLKx1 pixels from the tailing edgeof /HRESET, and /HRESET 37 + is 64 fCLKx1 pixels wide. VBI_HDELAY is set to 0, so this should be 38 + (64 + 0) * 2 = 128 fCLKx2 pixels. But it's not! The datasheet is 39 + Just Plain Wrong. The real value appears to be different for 40 + different revisions of the bt8x8 chips, and to be affected by the 41 + horizontal scaling factor. Experimentally, the value is measured 42 + to be about 244. */ 43 + #define VBI_OFFSET 244 39 44 40 45 #define VBI_DEFLINES 16 41 46 #define VBI_MAXLINES 32
+24
drivers/media/video/compat_ioctl32.c
··· 490 490 return 0; 491 491 } 492 492 493 + struct video_code32 494 + { 495 + char loadwhat[16]; /* name or tag of file being passed */ 496 + compat_int_t datasize; 497 + unsigned char *data; 498 + }; 499 + 500 + static inline int microcode32(struct video_code *kp, struct video_code32 __user *up) 501 + { 502 + if(!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) || 503 + copy_from_user(kp->loadwhat, up->loadwhat, sizeof (up->loadwhat)) || 504 + get_user(kp->datasize, &up->datasize) || 505 + copy_from_user(kp->data, up->data, up->datasize)) 506 + return -EFAULT; 507 + return 0; 508 + } 509 + 493 510 #define VIDIOCGTUNER32 _IOWR('v',4, struct video_tuner32) 494 511 #define VIDIOCSTUNER32 _IOW('v',5, struct video_tuner32) 495 512 #define VIDIOCGWIN32 _IOR('v',9, struct video_window32) ··· 515 498 #define VIDIOCSFBUF32 _IOW('v',12, struct video_buffer32) 516 499 #define VIDIOCGFREQ32 _IOR('v',14, u32) 517 500 #define VIDIOCSFREQ32 _IOW('v',15, u32) 501 + #define VIDIOCSMICROCODE32 _IOW('v',27, struct video_code32) 518 502 519 503 /* VIDIOC_ENUMINPUT32 is VIDIOC_ENUMINPUT minus 4 bytes of padding alignement */ 520 504 #define VIDIOC_ENUMINPUT32 VIDIOC_ENUMINPUT - _IOC(0, 0, 0, 4) ··· 608 590 struct video_tuner vt; 609 591 struct video_buffer vb; 610 592 struct video_window vw; 593 + struct video_code vc; 611 594 struct v4l2_format v2f; 612 595 struct v4l2_buffer v2b; 613 596 struct v4l2_framebuffer v2fb; ··· 647 628 case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break; 648 629 case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break; 649 630 case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break; 631 + case VIDIOCSMICROCODE32: cmd = VIDIOCSMICROCODE; break; 650 632 }; 651 633 652 634 switch(cmd) { ··· 723 703 case VIDIOC_G_FBUF: 724 704 case VIDIOC_G_INPUT: 725 705 compatible_arg = 0; 706 + case VIDIOCSMICROCODE: 707 + err = microcode32(&karg.vc, up); 708 + compatible_arg = 0; 709 + break; 726 710 }; 727 711 728 712 if(err)
+1 -1
drivers/media/video/cpia2/Kconfig
··· 1 1 config VIDEO_CPIA2 2 2 tristate "CPiA2 Video For Linux" 3 - depends on VIDEO_DEV && USB 3 + depends on VIDEO_DEV && USB && VIDEO_V4L1 4 4 ---help--- 5 5 This is the video4linux driver for cameras based on Vision's CPiA2 6 6 (Colour Processor Interface ASIC), such as the Digital Blue QX5
+1 -1
drivers/media/video/cx88/cx88-input.c
··· 89 89 90 90 auxgpio = cx_read(MO_GP1_IO); 91 91 /* Take out the parity part */ 92 - gpio+=(gpio & 0x7fd) + (auxgpio & 0xef); 92 + gpio=(gpio & 0x7fd) + (auxgpio & 0xef); 93 93 } else 94 94 auxgpio = gpio; 95 95
+2 -3
drivers/media/video/cx88/cx88-video.c
··· 1180 1180 V4L2_CAP_READWRITE | 1181 1181 V4L2_CAP_STREAMING | 1182 1182 V4L2_CAP_VBI_CAPTURE | 1183 - V4L2_CAP_VIDEO_OVERLAY | 1184 1183 0; 1185 1184 if (UNSET != core->tuner_type) 1186 1185 cap->capabilities |= V4L2_CAP_TUNER; ··· 1225 1226 struct v4l2_format *f = arg; 1226 1227 return cx8800_try_fmt(dev,fh,f); 1227 1228 } 1228 - #ifdef HAVE_V4L1 1229 + #ifdef CONFIG_V4L1_COMPAT 1229 1230 /* --- streaming capture ------------------------------------- */ 1230 1231 case VIDIOCGMBUF: 1231 1232 { ··· 1584 1585 *id = 0; 1585 1586 return 0; 1586 1587 } 1587 - #ifdef HAVE_V4L1 1588 + #ifdef CONFIG_V4L1_COMPAT 1588 1589 case VIDIOCSTUNER: 1589 1590 { 1590 1591 struct video_tuner *v = arg;
+9 -1
drivers/media/video/msp3400-driver.c
··· 362 362 } 363 363 364 364 /* ------------------------------------------------------------------------ */ 365 - 365 + #ifdef CONFIG_VIDEO_V4L1 366 366 static int msp_mode_v4l2_to_v4l1(int rxsubchans, int audmode) 367 367 { 368 368 if (rxsubchans == V4L2_TUNER_SUB_MONO) ··· 384 384 return V4L2_TUNER_MODE_LANG1; 385 385 return V4L2_TUNER_MODE_MONO; 386 386 } 387 + #endif 387 388 388 389 static int msp_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl) 389 390 { ··· 510 509 /* --- v4l ioctls --- */ 511 510 /* take care: bttv does userspace copying, we'll get a 512 511 kernel pointer here... */ 512 + #ifdef CONFIG_VIDEO_V4L1 513 513 case VIDIOCGAUDIO: 514 514 { 515 515 struct video_audio *va = arg; ··· 579 577 } 580 578 581 579 case VIDIOCSFREQ: 580 + { 581 + /* new channel -- kick audio carrier scan */ 582 + msp_wake_thread(client); 583 + break; 584 + } 585 + #endif 582 586 case VIDIOC_S_FREQUENCY: 583 587 { 584 588 /* new channel -- kick audio carrier scan */
+6 -2
drivers/media/video/pvrusb2/pvrusb2-hdw.c
··· 852 852 return hdw->serial_number; 853 853 } 854 854 855 - 856 855 int pvr2_hdw_get_unit_number(struct pvr2_hdw *hdw) 857 856 { 858 857 return hdw->unit_number; ··· 2317 2318 } 2318 2319 } 2319 2320 2320 - 2321 2321 /* Return name for this driver instance */ 2322 2322 const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *hdw) 2323 2323 { ··· 2540 2542 } 2541 2543 2542 2544 2545 + /* Issue a command and get a response from the device. This extended 2546 + version includes a probe flag (which if set means that device errors 2547 + should not be logged or treated as fatal) and a timeout in jiffies. 2548 + This can be used to non-lethally probe the health of endpoint 1. */ 2543 2549 static int pvr2_send_request_ex(struct pvr2_hdw *hdw, 2544 2550 unsigned int timeout,int probe_fl, 2545 2551 void *write_data,unsigned int write_len, ··· 2972 2970 } 2973 2971 2974 2972 2973 + /* Stop / start video stream transport */ 2975 2974 static int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl) 2976 2975 { 2977 2976 int status; ··· 3071 3068 } 3072 3069 3073 3070 3071 + /* Find I2C address of eeprom */ 3074 3072 static int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw) 3075 3073 { 3076 3074 int result;
+6 -3
drivers/media/video/pvrusb2/pvrusb2-io.c
··· 26 26 #include <linux/slab.h> 27 27 #include <linux/mutex.h> 28 28 29 + static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state); 30 + 29 31 #define BUFFER_SIG 0x47653271 30 32 31 33 // #define SANITY_CHECK_BUFFERS ··· 517 515 } 518 516 519 517 /* Query / set the nominal buffer count */ 518 + int pvr2_stream_get_buffer_count(struct pvr2_stream *sp) 519 + { 520 + return sp->buffer_target_count; 521 + } 520 522 521 523 int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt) 522 524 { ··· 558 552 { 559 553 return sp->r_count; 560 554 } 561 - 562 555 563 556 void pvr2_stream_kill(struct pvr2_stream *sp) 564 557 { ··· 612 607 return ret; 613 608 } 614 609 615 - 616 610 int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt) 617 611 { 618 612 int ret = 0; ··· 649 645 { 650 646 return bp->status; 651 647 } 652 - 653 648 654 649 int pvr2_buffer_get_id(struct pvr2_buffer *bp) 655 650 {
+2
drivers/media/video/pvrusb2/pvrusb2-io.h
··· 47 47 void *data); 48 48 49 49 /* Query / set the nominal buffer count */ 50 + int pvr2_stream_get_buffer_count(struct pvr2_stream *); 50 51 int pvr2_stream_set_buffer_count(struct pvr2_stream *,unsigned int); 51 52 52 53 /* Get a pointer to a buffer that is either idle, ready, or is specified ··· 58 57 59 58 /* Find out how many buffers are idle or ready */ 60 59 int pvr2_stream_get_ready_count(struct pvr2_stream *); 60 + 61 61 62 62 /* Kill all pending buffers and throw away any ready buffers as well */ 63 63 void pvr2_stream_kill(struct pvr2_stream *);
+3 -2
drivers/media/video/pvrusb2/pvrusb2-ioread.c
··· 213 213 " pvr2_ioread_setup (tear-down) id=%p",cp); 214 214 pvr2_ioread_stop(cp); 215 215 pvr2_stream_kill(cp->stream); 216 - pvr2_stream_set_buffer_count(cp->stream,0); 216 + if (pvr2_stream_get_buffer_count(cp->stream)) { 217 + pvr2_stream_set_buffer_count(cp->stream,0); 218 + } 217 219 cp->stream = NULL; 218 220 } 219 221 if (sp) { ··· 252 250 } while (0); mutex_unlock(&cp->mutex); 253 251 return ret; 254 252 } 255 - 256 253 257 254 static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp) 258 255 {
+73 -12
drivers/media/video/pvrusb2/pvrusb2-sysfs.c
··· 44 44 struct kobj_type ktype; 45 45 struct class_device_attribute attr_v4l_minor_number; 46 46 struct class_device_attribute attr_unit_number; 47 + int v4l_minor_number_created_ok; 48 + int unit_number_created_ok; 47 49 }; 48 50 49 51 #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC 50 52 struct pvr2_sysfs_debugifc { 51 53 struct class_device_attribute attr_debugcmd; 52 54 struct class_device_attribute attr_debuginfo; 55 + int debugcmd_created_ok; 56 + int debuginfo_created_ok; 53 57 }; 54 58 #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ 55 59 ··· 71 67 struct pvr2_sysfs_ctl_item *item_next; 72 68 struct attribute *attr_gen[7]; 73 69 struct attribute_group grp; 70 + int created_ok; 74 71 char name[80]; 75 72 }; 76 73 ··· 492 487 struct pvr2_sysfs_func_set *fp; 493 488 struct pvr2_ctrl *cptr; 494 489 unsigned int cnt,acnt; 490 + int ret; 495 491 496 492 if ((ctl_id < 0) || (ctl_id >= (sizeof(funcs)/sizeof(funcs[0])))) { 497 493 return; ··· 595 589 cip->grp.name = cip->name; 596 590 cip->grp.attrs = cip->attr_gen; 597 591 598 - sysfs_create_group(&sfp->class_dev->kobj,&cip->grp); 592 + ret = sysfs_create_group(&sfp->class_dev->kobj,&cip->grp); 593 + if (ret) { 594 + printk(KERN_WARNING "%s: sysfs_create_group error: %d\n", 595 + __FUNCTION__, ret); 596 + return; 597 + } 598 + cip->created_ok = !0; 599 599 } 600 600 601 601 #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC ··· 612 600 static void pvr2_sysfs_add_debugifc(struct pvr2_sysfs *sfp) 613 601 { 614 602 struct pvr2_sysfs_debugifc *dip; 603 + int ret; 604 + 615 605 dip = kmalloc(sizeof(*dip),GFP_KERNEL); 616 606 if (!dip) return; 617 607 memset(dip,0,sizeof(*dip)); ··· 627 613 dip->attr_debuginfo.attr.mode = S_IRUGO; 628 614 dip->attr_debuginfo.show = debuginfo_show; 629 615 sfp->debugifc = dip; 630 - class_device_create_file(sfp->class_dev,&dip->attr_debugcmd); 631 - class_device_create_file(sfp->class_dev,&dip->attr_debuginfo); 616 + ret = class_device_create_file(sfp->class_dev,&dip->attr_debugcmd); 617 + if (ret < 0) { 618 + printk(KERN_WARNING "%s: class_device_create_file error: %d\n", 619 + __FUNCTION__, ret); 620 + } else { 621 + dip->debugcmd_created_ok = !0; 622 + } 623 + ret = class_device_create_file(sfp->class_dev,&dip->attr_debuginfo); 624 + if (ret < 0) { 625 + printk(KERN_WARNING "%s: class_device_create_file error: %d\n", 626 + __FUNCTION__, ret); 627 + } else { 628 + dip->debuginfo_created_ok = !0; 629 + } 632 630 } 633 631 634 632 635 633 static void pvr2_sysfs_tear_down_debugifc(struct pvr2_sysfs *sfp) 636 634 { 637 635 if (!sfp->debugifc) return; 638 - class_device_remove_file(sfp->class_dev, 639 - &sfp->debugifc->attr_debuginfo); 640 - class_device_remove_file(sfp->class_dev,&sfp->debugifc->attr_debugcmd); 636 + if (sfp->debugifc->debuginfo_created_ok) { 637 + class_device_remove_file(sfp->class_dev, 638 + &sfp->debugifc->attr_debuginfo); 639 + } 640 + if (sfp->debugifc->debugcmd_created_ok) { 641 + class_device_remove_file(sfp->class_dev, 642 + &sfp->debugifc->attr_debugcmd); 643 + } 641 644 kfree(sfp->debugifc); 642 645 sfp->debugifc = NULL; 643 646 } ··· 676 645 struct pvr2_sysfs_ctl_item *cip1,*cip2; 677 646 for (cip1 = sfp->item_first; cip1; cip1 = cip2) { 678 647 cip2 = cip1->item_next; 679 - sysfs_remove_group(&sfp->class_dev->kobj,&cip1->grp); 648 + if (cip1->created_ok) { 649 + sysfs_remove_group(&sfp->class_dev->kobj,&cip1->grp); 650 + } 680 651 pvr2_sysfs_trace("Destroying pvr2_sysfs_ctl_item id=%p",cip1); 681 652 kfree(cip1); 682 653 } ··· 708 675 pvr2_sysfs_tear_down_debugifc(sfp); 709 676 #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ 710 677 pvr2_sysfs_tear_down_controls(sfp); 711 - class_device_remove_file(sfp->class_dev,&sfp->attr_v4l_minor_number); 712 - class_device_remove_file(sfp->class_dev,&sfp->attr_unit_number); 678 + if (sfp->v4l_minor_number_created_ok) { 679 + class_device_remove_file(sfp->class_dev, 680 + &sfp->attr_v4l_minor_number); 681 + } 682 + if (sfp->unit_number_created_ok) { 683 + class_device_remove_file(sfp->class_dev, 684 + &sfp->attr_unit_number); 685 + } 713 686 pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev); 714 687 sfp->class_dev->class_data = NULL; 715 688 class_device_unregister(sfp->class_dev); ··· 748 709 { 749 710 struct usb_device *usb_dev; 750 711 struct class_device *class_dev; 712 + int ret; 713 + 751 714 usb_dev = pvr2_hdw_get_dev(sfp->channel.hdw); 752 715 if (!usb_dev) return; 753 716 class_dev = kmalloc(sizeof(*class_dev),GFP_KERNEL); ··· 774 733 775 734 sfp->class_dev = class_dev; 776 735 class_dev->class_data = sfp; 777 - class_device_register(class_dev); 736 + ret = class_device_register(class_dev); 737 + if (ret) { 738 + printk(KERN_ERR "%s: class_device_register failed\n", 739 + __FUNCTION__); 740 + kfree(class_dev); 741 + return; 742 + } 778 743 779 744 sfp->attr_v4l_minor_number.attr.owner = THIS_MODULE; 780 745 sfp->attr_v4l_minor_number.attr.name = "v4l_minor_number"; 781 746 sfp->attr_v4l_minor_number.attr.mode = S_IRUGO; 782 747 sfp->attr_v4l_minor_number.show = v4l_minor_number_show; 783 748 sfp->attr_v4l_minor_number.store = NULL; 784 - class_device_create_file(sfp->class_dev,&sfp->attr_v4l_minor_number); 749 + ret = class_device_create_file(sfp->class_dev, 750 + &sfp->attr_v4l_minor_number); 751 + if (ret < 0) { 752 + printk(KERN_WARNING "%s: class_device_create_file error: %d\n", 753 + __FUNCTION__, ret); 754 + } else { 755 + sfp->v4l_minor_number_created_ok = !0; 756 + } 757 + 785 758 sfp->attr_unit_number.attr.owner = THIS_MODULE; 786 759 sfp->attr_unit_number.attr.name = "unit_number"; 787 760 sfp->attr_unit_number.attr.mode = S_IRUGO; 788 761 sfp->attr_unit_number.show = unit_number_show; 789 762 sfp->attr_unit_number.store = NULL; 790 - class_device_create_file(sfp->class_dev,&sfp->attr_unit_number); 763 + ret = class_device_create_file(sfp->class_dev,&sfp->attr_unit_number); 764 + if (ret < 0) { 765 + printk(KERN_WARNING "%s: class_device_create_file error: %d\n", 766 + __FUNCTION__, ret); 767 + } else { 768 + sfp->unit_number_created_ok = !0; 769 + } 791 770 792 771 pvr2_sysfs_add_controls(sfp); 793 772 #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+5 -5
drivers/media/video/saa7134/saa7134-alsa.c
··· 997 997 struct saa7134_dev *dev = NULL; 998 998 struct list_head *list; 999 999 1000 - if (!dmasound_init && !dmasound_exit) { 1001 - dmasound_init = alsa_device_init; 1002 - dmasound_exit = alsa_device_exit; 1000 + if (!saa7134_dmasound_init && !saa7134_dmasound_exit) { 1001 + saa7134_dmasound_init = alsa_device_init; 1002 + saa7134_dmasound_exit = alsa_device_exit; 1003 1003 } else { 1004 1004 printk(KERN_WARNING "saa7134 ALSA: can't load, DMA sound handler already assigned (probably to OSS)\n"); 1005 1005 return -EBUSY; ··· 1036 1036 snd_card_free(snd_saa7134_cards[idx]); 1037 1037 } 1038 1038 1039 - dmasound_init = NULL; 1040 - dmasound_exit = NULL; 1039 + saa7134_dmasound_init = NULL; 1040 + saa7134_dmasound_exit = NULL; 1041 1041 printk(KERN_INFO "saa7134 ALSA driver for DMA sound unloaded\n"); 1042 1042 1043 1043 return;
+8 -8
drivers/media/video/saa7134/saa7134-core.c
··· 95 95 static LIST_HEAD(mops_list); 96 96 static unsigned int saa7134_devcount; 97 97 98 - int (*dmasound_init)(struct saa7134_dev *dev); 99 - int (*dmasound_exit)(struct saa7134_dev *dev); 98 + int (*saa7134_dmasound_init)(struct saa7134_dev *dev); 99 + int (*saa7134_dmasound_exit)(struct saa7134_dev *dev); 100 100 101 101 #define dprintk(fmt, arg...) if (core_debug) \ 102 102 printk(KERN_DEBUG "%s/core: " fmt, dev->name , ## arg) ··· 1008 1008 /* check for signal */ 1009 1009 saa7134_irq_video_intl(dev); 1010 1010 1011 - if (dmasound_init && !dev->dmasound.priv_data) { 1012 - dmasound_init(dev); 1011 + if (saa7134_dmasound_init && !dev->dmasound.priv_data) { 1012 + saa7134_dmasound_init(dev); 1013 1013 } 1014 1014 1015 1015 return 0; ··· 1036 1036 struct saa7134_mpeg_ops *mops; 1037 1037 1038 1038 /* Release DMA sound modules if present */ 1039 - if (dmasound_exit && dev->dmasound.priv_data) { 1040 - dmasound_exit(dev); 1039 + if (saa7134_dmasound_exit && dev->dmasound.priv_data) { 1040 + saa7134_dmasound_exit(dev); 1041 1041 } 1042 1042 1043 1043 /* debugging ... */ ··· 1169 1169 1170 1170 /* ----------------- for the DMA sound modules --------------- */ 1171 1171 1172 - EXPORT_SYMBOL(dmasound_init); 1173 - EXPORT_SYMBOL(dmasound_exit); 1172 + EXPORT_SYMBOL(saa7134_dmasound_init); 1173 + EXPORT_SYMBOL(saa7134_dmasound_exit); 1174 1174 EXPORT_SYMBOL(saa7134_pgtable_free); 1175 1175 EXPORT_SYMBOL(saa7134_pgtable_build); 1176 1176 EXPORT_SYMBOL(saa7134_pgtable_alloc);
+5 -5
drivers/media/video/saa7134/saa7134-oss.c
··· 993 993 struct saa7134_dev *dev = NULL; 994 994 struct list_head *list; 995 995 996 - if (!dmasound_init && !dmasound_exit) { 997 - dmasound_init = oss_device_init; 998 - dmasound_exit = oss_device_exit; 996 + if (!saa7134_dmasound_init && !saa7134_dmasound_exit) { 997 + saa7134_dmasound_init = oss_device_init; 998 + saa7134_dmasound_exit = oss_device_exit; 999 999 } else { 1000 1000 printk(KERN_WARNING "saa7134 OSS: can't load, DMA sound handler already assigned (probably to ALSA)\n"); 1001 1001 return -EBUSY; ··· 1037 1037 1038 1038 } 1039 1039 1040 - dmasound_init = NULL; 1041 - dmasound_exit = NULL; 1040 + saa7134_dmasound_init = NULL; 1041 + saa7134_dmasound_exit = NULL; 1042 1042 1043 1043 printk(KERN_INFO "saa7134 OSS driver for DMA sound unloaded\n"); 1044 1044
+3 -3
drivers/media/video/saa7134/saa7134-video.c
··· 40 40 41 41 static unsigned int video_debug = 0; 42 42 static unsigned int gbuffers = 8; 43 - static unsigned int noninterlaced = 0; 43 + static unsigned int noninterlaced = 1; 44 44 static unsigned int gbufsize = 720*576*4; 45 45 static unsigned int gbufsize_max = 720*576*4; 46 46 module_param(video_debug, int, 0644); ··· 48 48 module_param(gbuffers, int, 0444); 49 49 MODULE_PARM_DESC(gbuffers,"number of capture buffers, range 2-32"); 50 50 module_param(noninterlaced, int, 0644); 51 - MODULE_PARM_DESC(noninterlaced,"video input is noninterlaced"); 51 + MODULE_PARM_DESC(noninterlaced,"capture non interlaced video"); 52 52 53 53 #define dprintk(fmt, arg...) if (video_debug) \ 54 54 printk(KERN_DEBUG "%s/video: " fmt, dev->name , ## arg) ··· 2087 2087 struct v4l2_format *f = arg; 2088 2088 return saa7134_try_fmt(dev,fh,f); 2089 2089 } 2090 - #ifdef HAVE_V4L1 2090 + #ifdef CONFIG_V4L1_COMPAT 2091 2091 case VIDIOCGMBUF: 2092 2092 { 2093 2093 struct video_mbuf *mbuf = arg;
+2 -2
drivers/media/video/saa7134/saa7134.h
··· 586 586 587 587 int saa7134_set_dmabits(struct saa7134_dev *dev); 588 588 589 - extern int (*dmasound_init)(struct saa7134_dev *dev); 590 - extern int (*dmasound_exit)(struct saa7134_dev *dev); 589 + extern int (*saa7134_dmasound_init)(struct saa7134_dev *dev); 590 + extern int (*saa7134_dmasound_exit)(struct saa7134_dev *dev); 591 591 592 592 593 593 /* ----------------------------------------------------------- */
-1
drivers/media/video/stradis.c
··· 2181 2181 { 0 } 2182 2182 }; 2183 2183 2184 - MODULE_DEVICE_TABLE(pci, stradis_pci_tbl); 2185 2184 2186 2185 static struct pci_driver stradis_driver = { 2187 2186 .name = "stradis",
+12 -19
drivers/media/video/tuner-core.c
··· 196 196 i2c_master_send(c, buffer, 4); 197 197 default_tuner_init(c); 198 198 break; 199 - case TUNER_LG_TDVS_H06XF: 200 - /* Set the Auxiliary Byte. */ 201 - buffer[2] &= ~0x20; 202 - buffer[2] |= 0x18; 203 - buffer[3] = 0x20; 204 - i2c_master_send(c, buffer, 4); 205 - default_tuner_init(c); 206 - break; 207 199 case TUNER_PHILIPS_TD1316: 208 200 buffer[0] = 0x0b; 209 201 buffer[1] = 0xdc; ··· 590 598 if (t->standby) 591 599 t->standby (client); 592 600 break; 601 + #ifdef CONFIG_VIDEO_V4L1 593 602 case VIDIOCSAUDIO: 594 603 if (check_mode(t, "VIDIOCSAUDIO") == EINVAL) 595 604 return 0; ··· 600 607 /* Should be implemented, since bttv calls it */ 601 608 tuner_dbg("VIDIOCSAUDIO not implemented.\n"); 602 609 break; 603 - case TDA9887_SET_CONFIG: 604 - if (t->type == TUNER_TDA9887) { 605 - int *i = arg; 606 - 607 - t->tda9887_config = *i; 608 - set_freq(client, t->tv_freq); 609 - } 610 - break; 611 - /* --- v4l ioctls --- */ 612 - /* take care: bttv does userspace copying, we'll get a 613 - kernel pointer here... */ 614 610 case VIDIOCSCHAN: 615 611 { 616 612 static const v4l2_std_id map[] = { ··· 683 701 ? VIDEO_SOUND_STEREO : VIDEO_SOUND_MONO; 684 702 return 0; 685 703 } 704 + #endif 705 + case TDA9887_SET_CONFIG: 706 + if (t->type == TUNER_TDA9887) { 707 + int *i = arg; 686 708 709 + t->tda9887_config = *i; 710 + set_freq(client, t->tv_freq); 711 + } 712 + break; 713 + /* --- v4l ioctls --- */ 714 + /* take care: bttv does userspace copying, we'll get a 715 + kernel pointer here... */ 687 716 case VIDIOC_S_STD: 688 717 { 689 718 v4l2_std_id *id = arg;
+17 -2
drivers/media/video/tuner-simple.c
··· 339 339 if (4 != (rc = i2c_master_send(c,buffer,4))) 340 340 tuner_warn("i2c i/o error: rc == %d (should be 4)\n",rc); 341 341 342 - if (t->type == TUNER_MICROTUNE_4042FI5) { 342 + switch (t->type) { 343 + case TUNER_LG_TDVS_H06XF: 344 + /* Set the Auxiliary Byte. */ 345 + buffer[0] = buffer[2]; 346 + buffer[0] &= ~0x20; 347 + buffer[0] |= 0x18; 348 + buffer[1] = 0x20; 349 + tuner_dbg("tv 0x%02x 0x%02x\n",buffer[0],buffer[1]); 350 + 351 + if (2 != (rc = i2c_master_send(c,buffer,2))) 352 + tuner_warn("i2c i/o error: rc == %d (should be 2)\n",rc); 353 + break; 354 + case TUNER_MICROTUNE_4042FI5: 355 + { 343 356 // FIXME - this may also work for other tuners 344 357 unsigned long timeout = jiffies + msecs_to_jiffies(1); 345 358 u8 status_byte = 0; ··· 377 364 buffer[2] = config; 378 365 buffer[3] = cb; 379 366 tuner_dbg("tv 0x%02x 0x%02x 0x%02x 0x%02x\n", 380 - buffer[0],buffer[1],buffer[2],buffer[3]); 367 + buffer[0],buffer[1],buffer[2],buffer[3]); 381 368 382 369 if (4 != (rc = i2c_master_send(c,buffer,4))) 383 370 tuner_warn("i2c i/o error: rc == %d (should be 4)\n",rc); 371 + break; 372 + } 384 373 } 385 374 } 386 375
+4 -4
drivers/media/video/usbvideo/Kconfig
··· 3 3 4 4 config USB_VICAM 5 5 tristate "USB 3com HomeConnect (aka vicam) support (EXPERIMENTAL)" 6 - depends on USB && VIDEO_V4L1 && EXPERIMENTAL 6 + depends on USB && VIDEO_DEV && VIDEO_V4L1 && EXPERIMENTAL 7 7 select VIDEO_USBVIDEO 8 8 ---help--- 9 9 Say Y here if you have 3com homeconnect camera (vicam). ··· 13 13 14 14 config USB_IBMCAM 15 15 tristate "USB IBM (Xirlink) C-it Camera support" 16 - depends on USB && VIDEO_V4L1 16 + depends on USB && VIDEO_DEV && VIDEO_V4L1 17 17 select VIDEO_USBVIDEO 18 18 ---help--- 19 19 Say Y here if you want to connect a IBM "C-It" camera, also known as ··· 28 28 29 29 config USB_KONICAWC 30 30 tristate "USB Konica Webcam support" 31 - depends on USB && VIDEO_V4L1 31 + depends on USB && VIDEO_DEV && VIDEO_V4L1 32 32 select VIDEO_USBVIDEO 33 33 ---help--- 34 34 Say Y here if you want support for webcams based on a Konica ··· 39 39 40 40 config USB_QUICKCAM_MESSENGER 41 41 tristate "USB Logitech Quickcam Messenger" 42 - depends on USB && VIDEO_DEV 42 + depends on USB && VIDEO_DEV && VIDEO_V4L1 43 43 select VIDEO_USBVIDEO 44 44 ---help--- 45 45 Say Y or M here to enable support for the USB Logitech Quickcam
+14 -10
drivers/media/video/v4l2-common.c
··· 202 202 /* ------------------------------------------------------------------ */ 203 203 /* debug help functions */ 204 204 205 - #ifdef HAVE_V4L1 205 + #ifdef CONFIG_V4L1_COMPAT 206 206 static const char *v4l1_ioctls[] = { 207 207 [_IOC_NR(VIDIOCGCAP)] = "VIDIOCGCAP", 208 208 [_IOC_NR(VIDIOCGCHAN)] = "VIDIOCGCHAN", ··· 301 301 #define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) 302 302 303 303 static const char *v4l2_int_ioctls[] = { 304 - #ifdef HAVE_VIDEO_DECODER 304 + #ifdef CONFIG_V4L1_COMPAT 305 305 [_IOC_NR(DECODER_GET_CAPABILITIES)] = "DECODER_GET_CAPABILITIES", 306 306 [_IOC_NR(DECODER_GET_STATUS)] = "DECODER_GET_STATUS", 307 307 [_IOC_NR(DECODER_SET_NORM)] = "DECODER_SET_NORM", ··· 367 367 (_IOC_NR(cmd) < V4L2_INT_IOCTLS) ? 368 368 v4l2_int_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd); 369 369 break; 370 - #ifdef HAVE_V4L1 370 + #ifdef CONFIG_V4L1_COMPAT 371 371 case 'v': 372 372 printk("v4l1 ioctl %s, dir=%s (0x%08x)\n", 373 373 (_IOC_NR(cmd) < V4L1_IOCTLS) ? ··· 414 414 printk ("%s: tuner type=%d\n", s, *p); 415 415 break; 416 416 } 417 + #ifdef CONFIG_VIDEO_V4L1_COMPAT 417 418 case DECODER_SET_VBI_BYPASS: 418 419 case DECODER_ENABLE_OUTPUT: 419 420 case DECODER_GET_STATUS: ··· 425 424 case VIDIOCCAPTURE: 426 425 case VIDIOCSYNC: 427 426 case VIDIOCSWRITEMODE: 427 + #endif 428 428 case TUNER_SET_TYPE_ADDR: 429 429 case TUNER_SET_STANDBY: 430 430 case TDA9887_SET_CONFIG: ··· 757 755 p->afc); 758 756 break; 759 757 } 758 + #ifdef CONFIG_VIDEO_V4L1_COMPAT 760 759 case VIDIOCGVBIFMT: 761 760 case VIDIOCSVBIFMT: 762 761 { ··· 927 924 p->clipcount); 928 925 break; 929 926 } 927 + case VIDIOCGFREQ: 928 + case VIDIOCSFREQ: 929 + { 930 + unsigned long *p=arg; 931 + printk ("%s: value=%lu\n", s, *p); 932 + break; 933 + } 934 + #endif 930 935 case VIDIOC_INT_AUDIO_CLOCK_FREQ: 931 936 case VIDIOC_INT_I2S_CLOCK_FREQ: 932 937 case VIDIOC_INT_S_STANDBY: ··· 942 931 u32 *p=arg; 943 932 944 933 printk ("%s: value=%d\n", s, *p); 945 - break; 946 - } 947 - case VIDIOCGFREQ: 948 - case VIDIOCSFREQ: 949 - { 950 - unsigned long *p=arg; 951 - printk ("%s: value=%lu\n", s, *p); 952 934 break; 953 935 } 954 936 case VIDIOC_G_STD:
+25 -4
drivers/media/video/videodev.c
··· 760 760 ret=vfd->vidioc_overlay(file, fh, *i); 761 761 break; 762 762 } 763 - #ifdef HAVE_V4L1 763 + #ifdef CONFIG_V4L1_COMPAT 764 764 /* --- streaming capture ------------------------------------- */ 765 765 case VIDIOCGMBUF: 766 766 { ··· 1512 1512 int i=0; 1513 1513 int base; 1514 1514 int end; 1515 + int ret; 1515 1516 char *name_base; 1516 1517 1517 1518 switch(type) ··· 1538 1537 name_base = "radio"; 1539 1538 break; 1540 1539 default: 1540 + printk(KERN_ERR "%s called with unknown type: %d\n", 1541 + __FUNCTION__, type); 1541 1542 return -1; 1542 1543 } 1543 1544 ··· 1574 1571 vfd->class_dev.class = &video_class; 1575 1572 vfd->class_dev.devt = MKDEV(VIDEO_MAJOR, vfd->minor); 1576 1573 sprintf(vfd->class_dev.class_id, "%s%d", name_base, i - base); 1577 - class_device_register(&vfd->class_dev); 1578 - class_device_create_file(&vfd->class_dev, 1579 - &class_device_attr_name); 1574 + ret = class_device_register(&vfd->class_dev); 1575 + if (ret < 0) { 1576 + printk(KERN_ERR "%s: class_device_register failed\n", 1577 + __FUNCTION__); 1578 + goto fail_minor; 1579 + } 1580 + ret = class_device_create_file(&vfd->class_dev, &class_device_attr_name); 1581 + if (ret < 0) { 1582 + printk(KERN_ERR "%s: class_device_create_file 'name' failed\n", 1583 + __FUNCTION__); 1584 + goto fail_classdev; 1585 + } 1580 1586 1581 1587 #if 1 1582 1588 /* needed until all drivers are fixed */ ··· 1595 1583 "http://lwn.net/Articles/36850/\n", vfd->name); 1596 1584 #endif 1597 1585 return 0; 1586 + 1587 + fail_classdev: 1588 + class_device_unregister(&vfd->class_dev); 1589 + fail_minor: 1590 + mutex_lock(&videodev_lock); 1591 + video_device[vfd->minor] = NULL; 1592 + vfd->minor = -1; 1593 + mutex_unlock(&videodev_lock); 1594 + return ret; 1598 1595 } 1599 1596 1600 1597 /**
+2 -2
drivers/media/video/vivi.c
··· 986 986 file->f_flags & O_NONBLOCK)); 987 987 } 988 988 989 - #ifdef HAVE_V4L1 989 + #ifdef CONFIG_V4L1_COMPAT 990 990 static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf) 991 991 { 992 992 struct vivi_fh *fh=priv; ··· 1328 1328 .vidioc_s_ctrl = vidioc_s_ctrl, 1329 1329 .vidioc_streamon = vidioc_streamon, 1330 1330 .vidioc_streamoff = vidioc_streamoff, 1331 - #ifdef HAVE_V4L1 1331 + #ifdef CONFIG_V4L1_COMPAT 1332 1332 .vidiocgmbuf = vidiocgmbuf, 1333 1333 #endif 1334 1334 .tvnorms = tvnorms,
+1 -1
drivers/net/appletalk/Kconfig
··· 29 29 even politically correct people are allowed to say Y here. 30 30 31 31 config DEV_APPLETALK 32 - bool "Appletalk interfaces support" 32 + tristate "Appletalk interfaces support" 33 33 depends on ATALK 34 34 help 35 35 AppleTalk is the protocol that Apple computers can use to communicate
+6 -5
drivers/net/e1000/e1000_main.c
··· 3127 3127 break; 3128 3128 } 3129 3129 3130 - /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3130 + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3131 3131 * means we reserve 2 more, this pushes us to allocate from the next 3132 3132 * larger slab size 3133 3133 * i.e. RXBUFFER_2048 --> size-4096 slab */ ··· 3708 3708 #define E1000_CB_LENGTH 256 3709 3709 if (length < E1000_CB_LENGTH) { 3710 3710 struct sk_buff *new_skb = 3711 - dev_alloc_skb(length + NET_IP_ALIGN); 3711 + netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 3712 3712 if (new_skb) { 3713 3713 skb_reserve(new_skb, NET_IP_ALIGN); 3714 3714 new_skb->dev = netdev; ··· 3979 3979 3980 3980 while (cleaned_count--) { 3981 3981 if (!(skb = buffer_info->skb)) 3982 - skb = dev_alloc_skb(bufsz); 3982 + skb = netdev_alloc_skb(netdev, bufsz); 3983 3983 else { 3984 3984 skb_trim(skb, 0); 3985 3985 goto map_skb; ··· 3997 3997 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 3998 3998 "at %p\n", bufsz, skb->data); 3999 3999 /* Try again, without freeing the previous */ 4000 - skb = dev_alloc_skb(bufsz); 4000 + skb = netdev_alloc_skb(netdev, bufsz); 4001 4001 /* Failed allocation, critical failure */ 4002 4002 if (!skb) { 4003 4003 dev_kfree_skb(oldskb); ··· 4121 4121 rx_desc->read.buffer_addr[j+1] = ~0; 4122 4122 } 4123 4123 4124 - skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4124 + skb = netdev_alloc_skb(netdev, 4125 + adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4125 4126 4126 4127 if (unlikely(!skb)) { 4127 4128 adapter->alloc_rx_buff_failed++;
+11 -13
drivers/net/myri10ge/myri10ge.c
··· 177 177 struct work_struct watchdog_work; 178 178 struct timer_list watchdog_timer; 179 179 int watchdog_tx_done; 180 + int watchdog_tx_req; 180 181 int watchdog_resets; 181 182 int tx_linearized; 182 183 int pause; ··· 449 448 struct mcp_gen_header *hdr; 450 449 size_t hdr_offset; 451 450 int status; 451 + unsigned i; 452 452 453 453 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { 454 454 dev_err(dev, "Unable to load %s firmware image via hotplug\n", ··· 481 479 goto abort_with_fw; 482 480 483 481 crc = crc32(~0, fw->data, fw->size); 484 - if (mgp->tx.boundary == 2048) { 485 - /* Avoid PCI burst on chipset with unaligned completions. */ 486 - int i; 487 - __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + 488 - MYRI10GE_FW_OFFSET); 489 - for (i = 0; i < fw->size / 4; i++) { 490 - __raw_writel(((u32 *) fw->data)[i], ptr + i); 491 - wmb(); 492 - } 493 - } else { 494 - myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data, 495 - fw->size); 482 + for (i = 0; i < fw->size; i += 256) { 483 + myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, 484 + fw->data + i, 485 + min(256U, (unsigned)(fw->size - i))); 486 + mb(); 487 + readb(mgp->sram); 496 488 } 497 489 /* corruption checking is good for parity recovery and buggy chipset */ 498 490 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); ··· 2543 2547 2544 2548 mgp = (struct myri10ge_priv *)arg; 2545 2549 if (mgp->tx.req != mgp->tx.done && 2546 - mgp->tx.done == mgp->watchdog_tx_done) 2550 + mgp->tx.done == mgp->watchdog_tx_done && 2551 + mgp->watchdog_tx_req != mgp->watchdog_tx_done) 2547 2552 /* nic seems like it might be stuck.. */ 2548 2553 schedule_work(&mgp->watchdog_work); 2549 2554 else ··· 2553 2556 jiffies + myri10ge_watchdog_timeout * HZ); 2554 2557 2555 2558 mgp->watchdog_tx_done = mgp->tx.done; 2559 + mgp->watchdog_tx_req = mgp->tx.req; 2556 2560 } 2557 2561 2558 2562 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+2 -6
drivers/net/phy/phy.c
··· 419 419 420 420 /* phy_stop_machine 421 421 * 422 - * description: Stops the state machine timer, sets the state to 423 - * UP (unless it wasn't up yet), and then frees the interrupt, 424 - * if it is in use. This function must be called BEFORE 422 + * description: Stops the state machine timer, sets the state to UP 423 + * (unless it wasn't up yet). This function must be called BEFORE 425 424 * phy_detach. 426 425 */ 427 426 void phy_stop_machine(struct phy_device *phydev) ··· 431 432 if (phydev->state > PHY_UP) 432 433 phydev->state = PHY_UP; 433 434 spin_unlock(&phydev->lock); 434 - 435 - if (phydev->irq != PHY_POLL) 436 - phy_stop_interrupts(phydev); 437 435 438 436 phydev->adjust_state = NULL; 439 437 }
+173 -213
drivers/net/s2io.c
··· 76 76 #include "s2io.h" 77 77 #include "s2io-regs.h" 78 78 79 - #define DRV_VERSION "2.0.14.2" 79 + #define DRV_VERSION "2.0.15.2" 80 80 81 81 /* S2io Driver name & version. */ 82 82 static char s2io_driver_name[] = "Neterion"; ··· 370 370 END_SIGN 371 371 }; 372 372 373 + MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); 374 + MODULE_LICENSE("GPL"); 375 + MODULE_VERSION(DRV_VERSION); 376 + 377 + 373 378 /* Module Loadable parameters. */ 374 - static unsigned int tx_fifo_num = 1; 379 + S2IO_PARM_INT(tx_fifo_num, 1); 380 + S2IO_PARM_INT(rx_ring_num, 1); 381 + 382 + 383 + S2IO_PARM_INT(rx_ring_mode, 1); 384 + S2IO_PARM_INT(use_continuous_tx_intrs, 1); 385 + S2IO_PARM_INT(rmac_pause_time, 0x100); 386 + S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); 387 + S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); 388 + S2IO_PARM_INT(shared_splits, 0); 389 + S2IO_PARM_INT(tmac_util_period, 5); 390 + S2IO_PARM_INT(rmac_util_period, 5); 391 + S2IO_PARM_INT(bimodal, 0); 392 + S2IO_PARM_INT(l3l4hdr_size, 128); 393 + /* Frequency of Rx desc syncs expressed as power of 2 */ 394 + S2IO_PARM_INT(rxsync_frequency, 3); 395 + /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 396 + S2IO_PARM_INT(intr_type, 0); 397 + /* Large receive offload feature */ 398 + S2IO_PARM_INT(lro, 0); 399 + /* Max pkts to be aggregated by LRO at one time. If not specified, 400 + * aggregation happens until we hit max IP pkt size(64K) 401 + */ 402 + S2IO_PARM_INT(lro_max_pkts, 0xFFFF); 403 + #ifndef CONFIG_S2IO_NAPI 404 + S2IO_PARM_INT(indicate_max_pkts, 0); 405 + #endif 406 + 375 407 static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 376 408 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 377 - static unsigned int rx_ring_num = 1; 378 409 static unsigned int rx_ring_sz[MAX_RX_RINGS] = 379 410 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; 380 411 static unsigned int rts_frm_len[MAX_RX_RINGS] = 381 412 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 382 - static unsigned int rx_ring_mode = 1; 383 - static unsigned int use_continuous_tx_intrs = 1; 384 - static unsigned int rmac_pause_time = 0x100; 385 - static unsigned int mc_pause_threshold_q0q3 = 187; 386 - static unsigned int mc_pause_threshold_q4q7 = 187; 387 - static unsigned int shared_splits; 388 - static unsigned int tmac_util_period = 5; 389 - static unsigned int rmac_util_period = 5; 390 - static unsigned int bimodal = 0; 391 - static unsigned int l3l4hdr_size = 128; 392 - #ifndef CONFIG_S2IO_NAPI 393 - static unsigned int indicate_max_pkts; 394 - #endif 395 - /* Frequency of Rx desc syncs expressed as power of 2 */ 396 - static unsigned int rxsync_frequency = 3; 397 - /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 398 - static unsigned int intr_type = 0; 399 - /* Large receive offload feature */ 400 - static unsigned int lro = 0; 401 - /* Max pkts to be aggregated by LRO at one time. If not specified, 402 - * aggregation happens until we hit max IP pkt size(64K) 403 - */ 404 - static unsigned int lro_max_pkts = 0xFFFF; 413 + 414 + module_param_array(tx_fifo_len, uint, NULL, 0); 415 + module_param_array(rx_ring_sz, uint, NULL, 0); 416 + module_param_array(rts_frm_len, uint, NULL, 0); 405 417 406 418 /* 407 419 * S2IO device table. ··· 476 464 size += config->tx_cfg[i].fifo_len; 477 465 } 478 466 if (size > MAX_AVAILABLE_TXDS) { 479 - DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", 480 - __FUNCTION__); 467 + DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); 481 468 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); 482 - return FAILURE; 469 + return -EINVAL; 483 470 } 484 471 485 472 lst_size = (sizeof(TxD_t) * config->max_txds); ··· 558 547 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); 559 548 if (!nic->ufo_in_band_v) 560 549 return -ENOMEM; 550 + memset(nic->ufo_in_band_v, 0, size); 561 551 562 552 /* Allocation and initialization of RXDs in Rings */ 563 553 size = 0; ··· 1225 1213 break; 1226 1214 } 1227 1215 1228 - /* Enable Tx FIFO partition 0. */ 1216 + /* Enable all configured Tx FIFO partitions */ 1229 1217 val64 = readq(&bar0->tx_fifo_partition_0); 1230 1218 val64 |= (TX_FIFO_PARTITION_EN); 1231 1219 writeq(val64, &bar0->tx_fifo_partition_0); ··· 1662 1650 writeq(temp64, &bar0->general_int_mask); 1663 1651 /* 1664 1652 * If Hercules adapter enable GPIO otherwise 1665 - * disabled all PCIX, Flash, MDIO, IIC and GPIO 1653 + * disable all PCIX, Flash, MDIO, IIC and GPIO 1666 1654 * interrupts for now. 1667 1655 * TODO 1668 1656 */ ··· 2131 2119 frag->size, PCI_DMA_TODEVICE); 2132 2120 } 2133 2121 } 2134 - txdlp->Host_Control = 0; 2122 + memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); 2135 2123 return(skb); 2136 2124 } 2137 2125 ··· 2383 2371 skb->data = (void *) (unsigned long)tmp; 2384 2372 skb->tail = (void *) (unsigned long)tmp; 2385 2373 2386 - ((RxD3_t*)rxdp)->Buffer0_ptr = 2387 - pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2374 + if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) 2375 + ((RxD3_t*)rxdp)->Buffer0_ptr = 2376 + pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2388 2377 PCI_DMA_FROMDEVICE); 2378 + else 2379 + pci_dma_sync_single_for_device(nic->pdev, 2380 + (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, 2381 + BUF0_LEN, PCI_DMA_FROMDEVICE); 2389 2382 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2390 2383 if (nic->rxd_mode == RXD_MODE_3B) { 2391 2384 /* Two buffer mode */ ··· 2403 2386 (nic->pdev, skb->data, dev->mtu + 4, 2404 2387 PCI_DMA_FROMDEVICE); 2405 2388 2406 - /* Buffer-1 will be dummy buffer not used */ 2407 - ((RxD3_t*)rxdp)->Buffer1_ptr = 2408 - pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2409 - PCI_DMA_FROMDEVICE); 2389 + /* Buffer-1 will be dummy buffer. Not used */ 2390 + if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { 2391 + ((RxD3_t*)rxdp)->Buffer1_ptr = 2392 + pci_map_single(nic->pdev, 2393 + ba->ba_1, BUF1_LEN, 2394 + PCI_DMA_FROMDEVICE); 2395 + } 2410 2396 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2411 2397 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2412 2398 (dev->mtu + 4); ··· 2634 2614 } 2635 2615 #endif 2636 2616 2617 + #ifdef CONFIG_NET_POLL_CONTROLLER 2637 2618 /** 2638 - * s2io_netpoll - Rx interrupt service handler for netpoll support 2619 + * s2io_netpoll - netpoll event handler entry point 2639 2620 * @dev : pointer to the device structure. 2640 2621 * Description: 2641 - * Polling 'interrupt' - used by things like netconsole to send skbs 2642 - * without having to re-enable interrupts. It's not called while 2643 - * the interrupt routine is executing. 2622 + * This function will be called by upper layer to check for events on the 2623 + * interface in situations where interrupts are disabled. It is used for 2624 + * specific in-kernel networking tasks, such as remote consoles and kernel 2625 + * debugging over the network (example netdump in RedHat). 2644 2626 */ 2645 - 2646 - #ifdef CONFIG_NET_POLL_CONTROLLER 2647 2627 static void s2io_netpoll(struct net_device *dev) 2648 2628 { 2649 2629 nic_t *nic = dev->priv; 2650 2630 mac_info_t *mac_control; 2651 2631 struct config_param *config; 2652 2632 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2653 - u64 val64; 2633 + u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2654 2634 int i; 2655 2635 2656 2636 disable_irq(dev->irq); ··· 2659 2639 mac_control = &nic->mac_control; 2660 2640 config = &nic->config; 2661 2641 2662 - val64 = readq(&bar0->rx_traffic_int); 2663 2642 writeq(val64, &bar0->rx_traffic_int); 2643 + writeq(val64, &bar0->tx_traffic_int); 2664 2644 2645 + /* we need to free up the transmitted skbufs or else netpoll will 2646 + * run out of skbs and will fail and eventually netpoll application such 2647 + * as netdump will fail. 2648 + */ 2649 + for (i = 0; i < config->tx_fifo_num; i++) 2650 + tx_intr_handler(&mac_control->fifos[i]); 2651 + 2652 + /* check for received packet and indicate up to network */ 2665 2653 for (i = 0; i < config->rx_ring_num; i++) 2666 2654 rx_intr_handler(&mac_control->rings[i]); 2667 2655 ··· 2736 2708 /* If your are next to put index then it's FIFO full condition */ 2737 2709 if ((get_block == put_block) && 2738 2710 (get_info.offset + 1) == put_info.offset) { 2739 - DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); 2711 + DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); 2740 2712 break; 2741 2713 } 2742 2714 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); ··· 2756 2728 HEADER_SNAP_SIZE, 2757 2729 PCI_DMA_FROMDEVICE); 2758 2730 } else if (nic->rxd_mode == RXD_MODE_3B) { 2759 - pci_unmap_single(nic->pdev, (dma_addr_t) 2731 + pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2760 2732 ((RxD3_t*)rxdp)->Buffer0_ptr, 2761 2733 BUF0_LEN, PCI_DMA_FROMDEVICE); 2762 - pci_unmap_single(nic->pdev, (dma_addr_t) 2763 - ((RxD3_t*)rxdp)->Buffer1_ptr, 2764 - BUF1_LEN, PCI_DMA_FROMDEVICE); 2765 2734 pci_unmap_single(nic->pdev, (dma_addr_t) 2766 2735 ((RxD3_t*)rxdp)->Buffer2_ptr, 2767 2736 dev->mtu + 4, 2768 2737 PCI_DMA_FROMDEVICE); 2769 2738 } else { 2770 - pci_unmap_single(nic->pdev, (dma_addr_t) 2739 + pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2771 2740 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2772 2741 PCI_DMA_FROMDEVICE); 2773 2742 pci_unmap_single(nic->pdev, (dma_addr_t) ··· 3352 3327 3353 3328 /* Clear certain PCI/PCI-X fields after reset */ 3354 3329 if (sp->device_type == XFRAME_II_DEVICE) { 3355 - /* Clear parity err detect bit */ 3330 + /* Clear "detected parity error" bit */ 3356 3331 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); 3357 3332 3358 3333 /* Clearing PCIX Ecc status register */ ··· 3553 3528 u64 val64; 3554 3529 int i; 3555 3530 3556 - for (i=0; i< nic->avail_msix_vectors; i++) { 3531 + for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3557 3532 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3558 3533 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3559 3534 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); ··· 3572 3547 int i; 3573 3548 3574 3549 /* Store and display */ 3575 - for (i=0; i< nic->avail_msix_vectors; i++) { 3550 + for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3576 3551 val64 = (BIT(15) | vBIT(i, 26, 6)); 3577 3552 writeq(val64, &bar0->xmsi_access); 3578 3553 if (wait_for_msix_trans(nic, i)) { ··· 3833 3808 TxD_t *txdp; 3834 3809 TxFIFO_element_t __iomem *tx_fifo; 3835 3810 unsigned long flags; 3836 - #ifdef NETIF_F_TSO 3837 - int mss; 3838 - #endif 3839 3811 u16 vlan_tag = 0; 3840 3812 int vlan_priority = 0; 3841 3813 mac_info_t *mac_control; 3842 3814 struct config_param *config; 3815 + int offload_type; 3843 3816 3844 3817 mac_control = &sp->mac_control; 3845 3818 config = &sp->config; ··· 3885 3862 return 0; 3886 3863 } 3887 3864 3888 - txdp->Control_1 = 0; 3889 - txdp->Control_2 = 0; 3865 + offload_type = s2io_offload_type(skb); 3890 3866 #ifdef NETIF_F_TSO 3891 - mss = skb_shinfo(skb)->gso_size; 3892 - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3867 + if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3893 3868 txdp->Control_1 |= TXD_TCP_LSO_EN; 3894 - txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3869 + txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); 3895 3870 } 3896 3871 #endif 3897 3872 if (skb->ip_summed == CHECKSUM_HW) { ··· 3907 3886 } 3908 3887 3909 3888 frg_len = skb->len - skb->data_len; 3910 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { 3889 + if (offload_type == SKB_GSO_UDP) { 3911 3890 int ufo_size; 3912 3891 3913 - ufo_size = skb_shinfo(skb)->gso_size; 3892 + ufo_size = s2io_udp_mss(skb); 3914 3893 ufo_size &= ~7; 3915 3894 txdp->Control_1 |= TXD_UFO_EN; 3916 3895 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); ··· 3927 3906 sp->ufo_in_band_v, 3928 3907 sizeof(u64), PCI_DMA_TODEVICE); 3929 3908 txdp++; 3930 - txdp->Control_1 = 0; 3931 - txdp->Control_2 = 0; 3932 3909 } 3933 3910 3934 3911 txdp->Buffer_Pointer = pci_map_single 3935 3912 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3936 3913 txdp->Host_Control = (unsigned long) skb; 3937 3914 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3938 - 3939 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3915 + if (offload_type == SKB_GSO_UDP) 3940 3916 txdp->Control_1 |= TXD_UFO_EN; 3941 3917 3942 3918 frg_cnt = skb_shinfo(skb)->nr_frags; ··· 3948 3930 (sp->pdev, frag->page, frag->page_offset, 3949 3931 frag->size, PCI_DMA_TODEVICE); 3950 3932 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 3951 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3933 + if (offload_type == SKB_GSO_UDP) 3952 3934 txdp->Control_1 |= TXD_UFO_EN; 3953 3935 } 3954 3936 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3955 3937 3956 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3938 + if (offload_type == SKB_GSO_UDP) 3957 3939 frg_cnt++; /* as Txd0 was used for inband header */ 3958 3940 3959 3941 tx_fifo = mac_control->tx_FIFO_start[queue]; ··· 3962 3944 3963 3945 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3964 3946 TX_FIFO_LAST_LIST); 3947 + if (offload_type) 3948 + val64 |= TX_FIFO_SPECIAL_FUNC; 3965 3949 3966 - #ifdef NETIF_F_TSO 3967 - if (mss) 3968 - val64 |= TX_FIFO_SPECIAL_FUNC; 3969 - #endif 3970 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3971 - val64 |= TX_FIFO_SPECIAL_FUNC; 3972 3950 writeq(val64, &tx_fifo->List_Control); 3973 3951 3974 3952 mmiowb(); ··· 3998 3984 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3999 3985 } 4000 3986 3987 + static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) 3988 + { 3989 + int rxb_size, level; 3990 + 3991 + if (!sp->lro) { 3992 + rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); 3993 + level = rx_buffer_level(sp, rxb_size, rng_n); 3994 + 3995 + if ((level == PANIC) && (!TASKLET_IN_USE)) { 3996 + int ret; 3997 + DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); 3998 + DBG_PRINT(INTR_DBG, "PANIC levels\n"); 3999 + if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { 4000 + DBG_PRINT(ERR_DBG, "Out of memory in %s", 4001 + __FUNCTION__); 4002 + clear_bit(0, (&sp->tasklet_status)); 4003 + return -1; 4004 + } 4005 + clear_bit(0, (&sp->tasklet_status)); 4006 + } else if (level == LOW) 4007 + tasklet_schedule(&sp->task); 4008 + 4009 + } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { 4010 + DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name); 4011 + DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); 4012 + } 4013 + return 0; 4014 + } 4015 + 4001 4016 static irqreturn_t 4002 4017 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) 4003 4018 { 4004 4019 struct net_device *dev = (struct net_device *) dev_id; 4005 4020 nic_t *sp = dev->priv; 4006 4021 int i; 4007 - int ret; 4008 4022 mac_info_t *mac_control; 4009 4023 struct config_param *config; 4010 4024 ··· 4054 4012 * reallocate the buffers from the interrupt handler itself, 4055 4013 * else schedule a tasklet to reallocate the buffers. 4056 4014 */ 4057 - for (i = 0; i < config->rx_ring_num; i++) { 4058 - if (!sp->lro) { 4059 - int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 4060 - int level = rx_buffer_level(sp, rxb_size, i); 4061 - 4062 - if ((level == PANIC) && (!TASKLET_IN_USE)) { 4063 - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", 4064 - dev->name); 4065 - DBG_PRINT(INTR_DBG, "PANIC levels\n"); 4066 - if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { 4067 - DBG_PRINT(ERR_DBG, "%s:Out of memory", 4068 - dev->name); 4069 - DBG_PRINT(ERR_DBG, " in ISR!!\n"); 4070 - clear_bit(0, (&sp->tasklet_status)); 4071 - atomic_dec(&sp->isr_cnt); 4072 - return IRQ_HANDLED; 4073 - } 4074 - clear_bit(0, (&sp->tasklet_status)); 4075 - } else if (level == LOW) { 4076 - tasklet_schedule(&sp->task); 4077 - } 4078 - } 4079 - else if (fill_rx_buffers(sp, i) == -ENOMEM) { 4080 - DBG_PRINT(ERR_DBG, "%s:Out of memory", 4081 - dev->name); 4082 - DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); 4083 - break; 4084 - } 4085 - } 4015 + for (i = 0; i < config->rx_ring_num; i++) 4016 + s2io_chk_rx_buffers(sp, i); 4086 4017 4087 4018 atomic_dec(&sp->isr_cnt); 4088 4019 return IRQ_HANDLED; ··· 4066 4051 { 4067 4052 ring_info_t *ring = (ring_info_t *)dev_id; 4068 4053 nic_t *sp = ring->nic; 4069 - struct net_device *dev = (struct net_device *) dev_id; 4070 - int rxb_size, level, rng_n; 4071 4054 4072 4055 atomic_inc(&sp->isr_cnt); 4056 + 4073 4057 rx_intr_handler(ring); 4074 - 4075 - rng_n = ring->ring_no; 4076 - if (!sp->lro) { 4077 - rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); 4078 - level = rx_buffer_level(sp, rxb_size, rng_n); 4079 - 4080 - if ((level == PANIC) && (!TASKLET_IN_USE)) { 4081 - int ret; 4082 - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); 4083 - DBG_PRINT(INTR_DBG, "PANIC levels\n"); 4084 - if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { 4085 - DBG_PRINT(ERR_DBG, "Out of memory in %s", 4086 - __FUNCTION__); 4087 - clear_bit(0, (&sp->tasklet_status)); 4088 - return IRQ_HANDLED; 4089 - } 4090 - clear_bit(0, (&sp->tasklet_status)); 4091 - } else if (level == LOW) { 4092 - tasklet_schedule(&sp->task); 4093 - } 4094 - } 4095 - else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { 4096 - DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); 4097 - DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); 4098 - } 4058 + s2io_chk_rx_buffers(sp, ring->ring_no); 4099 4059 4100 4060 atomic_dec(&sp->isr_cnt); 4101 - 4102 4061 return IRQ_HANDLED; 4103 4062 } 4104 4063 ··· 4237 4248 * else schedule a tasklet to reallocate the buffers. 4238 4249 */ 4239 4250 #ifndef CONFIG_S2IO_NAPI 4240 - for (i = 0; i < config->rx_ring_num; i++) { 4241 - if (!sp->lro) { 4242 - int ret; 4243 - int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 4244 - int level = rx_buffer_level(sp, rxb_size, i); 4245 - 4246 - if ((level == PANIC) && (!TASKLET_IN_USE)) { 4247 - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", 4248 - dev->name); 4249 - DBG_PRINT(INTR_DBG, "PANIC levels\n"); 4250 - if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { 4251 - DBG_PRINT(ERR_DBG, "%s:Out of memory", 4252 - dev->name); 4253 - DBG_PRINT(ERR_DBG, " in ISR!!\n"); 4254 - clear_bit(0, (&sp->tasklet_status)); 4255 - atomic_dec(&sp->isr_cnt); 4256 - writeq(org_mask, &bar0->general_int_mask); 4257 - return IRQ_HANDLED; 4258 - } 4259 - clear_bit(0, (&sp->tasklet_status)); 4260 - } else if (level == LOW) { 4261 - tasklet_schedule(&sp->task); 4262 - } 4263 - } 4264 - else if (fill_rx_buffers(sp, i) == -ENOMEM) { 4265 - DBG_PRINT(ERR_DBG, "%s:Out of memory", 4266 - dev->name); 4267 - DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); 4268 - break; 4269 - } 4270 - } 4251 + for (i = 0; i < config->rx_ring_num; i++) 4252 + s2io_chk_rx_buffers(sp, i); 4271 4253 #endif 4272 4254 writeq(org_mask, &bar0->general_int_mask); 4273 4255 atomic_dec(&sp->isr_cnt); ··· 4268 4308 if (cnt == 5) 4269 4309 break; /* Updt failed */ 4270 4310 } while(1); 4311 + } else { 4312 + memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); 4271 4313 } 4272 4314 } 4273 4315 ··· 4904 4942 } 4905 4943 static void s2io_vpd_read(nic_t *nic) 4906 4944 { 4907 - u8 vpd_data[256],data; 4945 + u8 *vpd_data; 4946 + u8 data; 4908 4947 int i=0, cnt, fail = 0; 4909 4948 int vpd_addr = 0x80; 4910 4949 ··· 4917 4954 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); 4918 4955 vpd_addr = 0x50; 4919 4956 } 4957 + 4958 + vpd_data = kmalloc(256, GFP_KERNEL); 4959 + if (!vpd_data) 4960 + return; 4920 4961 4921 4962 for (i = 0; i < 256; i +=4 ) { 4922 4963 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); ··· 4944 4977 memset(nic->product_name, 0, vpd_data[1]); 4945 4978 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4946 4979 } 4980 + kfree(vpd_data); 4947 4981 } 4948 4982 4949 4983 /** ··· 5263 5295 else 5264 5296 *data = 0; 5265 5297 5266 - return 0; 5298 + return *data; 5267 5299 } 5268 5300 5269 5301 /** ··· 5721 5753 return 0; 5722 5754 } 5723 5755 5756 + static u32 s2io_ethtool_op_get_tso(struct net_device *dev) 5757 + { 5758 + return (dev->features & NETIF_F_TSO) != 0; 5759 + } 5760 + static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) 5761 + { 5762 + if (data) 5763 + dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); 5764 + else 5765 + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 5766 + 5767 + return 0; 5768 + } 5724 5769 5725 5770 static struct ethtool_ops netdev_ethtool_ops = { 5726 5771 .get_settings = s2io_ethtool_gset, ··· 5754 5773 .get_sg = ethtool_op_get_sg, 5755 5774 .set_sg = ethtool_op_set_sg, 5756 5775 #ifdef NETIF_F_TSO 5757 - .get_tso = ethtool_op_get_tso, 5758 - .set_tso = ethtool_op_set_tso, 5776 + .get_tso = s2io_ethtool_op_get_tso, 5777 + .set_tso = s2io_ethtool_op_set_tso, 5759 5778 #endif 5760 5779 .get_ufo = ethtool_op_get_ufo, 5761 5780 .set_ufo = ethtool_op_set_ufo, ··· 6318 6337 s2io_set_multicast(dev); 6319 6338 6320 6339 if (sp->lro) { 6321 - /* Initialize max aggregatable pkts based on MTU */ 6340 + /* Initialize max aggregatable pkts per session based on MTU */ 6322 6341 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 6323 6342 /* Check if we can use(if specified) user provided value */ 6324 6343 if (lro_max_pkts < sp->lro_max_aggr_per_sess) ··· 6419 6438 * @cksum : FCS checksum of the frame. 6420 6439 * @ring_no : the ring from which this RxD was extracted. 6421 6440 * Description: 6422 - * This function is called by the Tx interrupt serivce routine to perform 6441 + * This function is called by the Rx interrupt serivce routine to perform 6423 6442 * some OS related operations on the SKB before passing it to the upper 6424 6443 * layers. It mainly checks if the checksum is OK, if so adds it to the 6425 6444 * SKBs cksum variable, increments the Rx packet count and passes the SKB ··· 6679 6698 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 6680 6699 } 6681 6700 6682 - MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); 6683 - MODULE_LICENSE("GPL"); 6684 - MODULE_VERSION(DRV_VERSION); 6685 - 6686 - module_param(tx_fifo_num, int, 0); 6687 - module_param(rx_ring_num, int, 0); 6688 - module_param(rx_ring_mode, int, 0); 6689 - module_param_array(tx_fifo_len, uint, NULL, 0); 6690 - module_param_array(rx_ring_sz, uint, NULL, 0); 6691 - module_param_array(rts_frm_len, uint, NULL, 0); 6692 - module_param(use_continuous_tx_intrs, int, 1); 6693 - module_param(rmac_pause_time, int, 0); 6694 - module_param(mc_pause_threshold_q0q3, int, 0); 6695 - module_param(mc_pause_threshold_q4q7, int, 0); 6696 - module_param(shared_splits, int, 0); 6697 - module_param(tmac_util_period, int, 0); 6698 - module_param(rmac_util_period, int, 0); 6699 - module_param(bimodal, bool, 0); 6700 - module_param(l3l4hdr_size, int , 0); 6701 - #ifndef CONFIG_S2IO_NAPI 6702 - module_param(indicate_max_pkts, int, 0); 6703 - #endif 6704 - module_param(rxsync_frequency, int, 0); 6705 - module_param(intr_type, int, 0); 6706 - module_param(lro, int, 0); 6707 - module_param(lro_max_pkts, int, 0); 6708 - 6709 6701 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) 6710 6702 { 6711 6703 if ( tx_fifo_num > 8) { ··· 6786 6832 } 6787 6833 if (dev_intr_type != MSI_X) { 6788 6834 if (pci_request_regions(pdev, s2io_driver_name)) { 6789 - DBG_PRINT(ERR_DBG, "Request Regions failed\n"), 6790 - pci_disable_device(pdev); 6835 + DBG_PRINT(ERR_DBG, "Request Regions failed\n"); 6836 + pci_disable_device(pdev); 6791 6837 return -ENODEV; 6792 6838 } 6793 6839 } ··· 6911 6957 /* initialize the shared memory used by the NIC and the host */ 6912 6958 if (init_shared_mem(sp)) { 6913 6959 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 6914 - __FUNCTION__); 6960 + dev->name); 6915 6961 ret = -ENOMEM; 6916 6962 goto mem_alloc_failed; 6917 6963 } ··· 7048 7094 dev->addr_len = ETH_ALEN; 7049 7095 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 7050 7096 7097 + /* reset Nic and bring it to known state */ 7098 + s2io_reset(sp); 7099 + 7051 7100 /* 7052 7101 * Initialize the tasklet status and link state flags 7053 7102 * and the card state parameter ··· 7088 7131 goto register_failed; 7089 7132 } 7090 7133 s2io_vpd_read(sp); 7091 - DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name); 7092 - DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n", 7093 - get_xena_rev_id(sp->pdev), 7094 - s2io_driver_version); 7095 7134 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); 7135 + DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, 7136 + sp->product_name, get_xena_rev_id(sp->pdev)); 7137 + DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 7138 + s2io_driver_version); 7096 7139 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7097 7140 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7098 7141 sp->def_mac_addr[0].mac_addr[0], ··· 7393 7436 if (ip->ihl != 5) /* IP has options */ 7394 7437 return -1; 7395 7438 7439 + /* If we see CE codepoint in IP header, packet is not mergeable */ 7440 + if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) 7441 + return -1; 7442 + 7443 + /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ 7396 7444 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || 7397 - !tcp->ack) { 7445 + tcp->ece || tcp->cwr || !tcp->ack) { 7398 7446 /* 7399 7447 * Currently recognize only the ack control word and 7400 7448 * any other control field being set would result in ··· 7553 7591 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7554 7592 u32 tcp_len) 7555 7593 { 7556 - struct sk_buff *tmp, *first = lro->parent; 7594 + struct sk_buff *first = lro->parent; 7557 7595 7558 7596 first->len += tcp_len; 7559 7597 first->data_len = lro->frags_len; 7560 7598 skb_pull(skb, (skb->len - tcp_len)); 7561 - if ((tmp = skb_shinfo(first)->frag_list)) { 7562 - while (tmp->next) 7563 - tmp = tmp->next; 7564 - tmp->next = skb; 7565 - } 7599 + if (skb_shinfo(first)->frag_list) 7600 + lro->last_frag->next = skb; 7566 7601 else 7567 7602 skb_shinfo(first)->frag_list = skb; 7603 + lro->last_frag = skb; 7568 7604 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7569 7605 return; 7570 7606 }
+10
drivers/net/s2io.h
··· 719 719 /* Data structure to represent a LRO session */ 720 720 typedef struct lro { 721 721 struct sk_buff *parent; 722 + struct sk_buff *last_frag; 722 723 u8 *l2h; 723 724 struct iphdr *iph; 724 725 struct tcphdr *tcph; ··· 1012 1011 static void queue_rx_frame(struct sk_buff *skb); 1013 1012 static void update_L3L4_header(nic_t *sp, lro_t *lro); 1014 1013 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1014 + 1015 + #define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size 1016 + #define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size 1017 + #define s2io_offload_type(skb) skb_shinfo(skb)->gso_type 1018 + 1019 + #define S2IO_PARM_INT(X, def_val) \ 1020 + static unsigned int X = def_val;\ 1021 + module_param(X , uint, 0); 1022 + 1015 1023 #endif /* _S2IO_H */
+5 -5
drivers/net/tg3.c
··· 68 68 69 69 #define DRV_MODULE_NAME "tg3" 70 70 #define PFX DRV_MODULE_NAME ": " 71 - #define DRV_MODULE_VERSION "3.63" 72 - #define DRV_MODULE_RELDATE "July 25, 2006" 71 + #define DRV_MODULE_VERSION "3.64" 72 + #define DRV_MODULE_RELDATE "July 31, 2006" 73 73 74 74 #define TG3_DEF_MAC_MODE 0 75 75 #define TG3_DEF_RX_MODE 0 ··· 3097 3097 * Callers depend upon this behavior and assume that 3098 3098 * we leave everything unchanged if we fail. 3099 3099 */ 3100 - skb = dev_alloc_skb(skb_size); 3100 + skb = netdev_alloc_skb(tp->dev, skb_size); 3101 3101 if (skb == NULL) 3102 3102 return -ENOMEM; 3103 3103 ··· 3270 3270 tg3_recycle_rx(tp, opaque_key, 3271 3271 desc_idx, *post_ptr); 3272 3272 3273 - copy_skb = dev_alloc_skb(len + 2); 3273 + copy_skb = netdev_alloc_skb(tp->dev, len + 2); 3274 3274 if (copy_skb == NULL) 3275 3275 goto drop_it_no_recycle; 3276 3276 ··· 8618 8618 err = -EIO; 8619 8619 8620 8620 tx_len = 1514; 8621 - skb = dev_alloc_skb(tx_len); 8621 + skb = netdev_alloc_skb(tp->dev, tx_len); 8622 8622 if (!skb) 8623 8623 return -ENOMEM; 8624 8624
+2 -2
drivers/net/wireless/zd1211rw/zd_chip.c
··· 797 797 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, 798 798 { CR_ZD1211_RETRY_MAX, 0x2 }, 799 799 { CR_SNIFFER_ON, 0 }, 800 - { CR_RX_FILTER, AP_RX_FILTER }, 800 + { CR_RX_FILTER, STA_RX_FILTER }, 801 801 { CR_GROUP_HASH_P1, 0x00 }, 802 802 { CR_GROUP_HASH_P2, 0x80000000 }, 803 803 { CR_REG1, 0xa4 }, ··· 844 844 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 845 845 { CR_ZD1211B_TXOP, 0x01800824 }, 846 846 { CR_SNIFFER_ON, 0 }, 847 - { CR_RX_FILTER, AP_RX_FILTER }, 847 + { CR_RX_FILTER, STA_RX_FILTER }, 848 848 { CR_GROUP_HASH_P1, 0x00 }, 849 849 { CR_GROUP_HASH_P2, 0x80000000 }, 850 850 { CR_REG1, 0xa4 },
+6 -4
drivers/net/wireless/zd1211rw/zd_chip.h
··· 461 461 462 462 #define CR_RX_FILTER CTL_REG(0x068c) 463 463 #define RX_FILTER_ASSOC_RESPONSE 0x0002 464 + #define RX_FILTER_REASSOC_RESPONSE 0x0008 464 465 #define RX_FILTER_PROBE_RESPONSE 0x0020 465 466 #define RX_FILTER_BEACON 0x0100 467 + #define RX_FILTER_DISASSOC 0x0400 466 468 #define RX_FILTER_AUTH 0x0800 467 - /* Sniff modus sets filter to 0xfffff */ 469 + #define AP_RX_FILTER 0x0400feff 470 + #define STA_RX_FILTER 0x0000ffff 471 + 472 + /* Monitor mode sets filter to 0xfffff */ 468 473 469 474 #define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) 470 475 #define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) ··· 550 545 #define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14) 551 546 #define CR_ZD1211B_TXOP CTL_REG(0x0b20) 552 547 #define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 553 - 554 - #define AP_RX_FILTER 0x0400feff 555 - #define STA_RX_FILTER 0x0000ffff 556 548 557 549 #define CWIN_SIZE 0x007f043f 558 550
+8 -8
drivers/net/wireless/zd1211rw/zd_mac.c
··· 108 108 if (r) 109 109 goto disable_int; 110 110 111 - r = zd_set_encryption_type(chip, NO_WEP); 111 + /* We must inform the device that we are doing encryption/decryption in 112 + * software at the moment. */ 113 + r = zd_set_encryption_type(chip, ENC_SNIFFER); 112 114 if (r) 113 115 goto disable_int; 114 116 ··· 138 136 { 139 137 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); 140 138 struct zd_ioreq32 ioreqs[3] = { 141 - { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE| 142 - RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE }, 139 + { CR_RX_FILTER, STA_RX_FILTER }, 143 140 { CR_SNIFFER_ON, 0U }, 144 - { CR_ENCRYPTION_TYPE, NO_WEP }, 145 141 }; 146 142 147 143 if (ieee->iw_mode == IW_MODE_MONITOR) { ··· 713 713 struct zd_rt_hdr { 714 714 struct ieee80211_radiotap_header rt_hdr; 715 715 u8 rt_flags; 716 + u8 rt_rate; 716 717 u16 rt_channel; 717 718 u16 rt_chbitmask; 718 - u16 rt_rate; 719 - }; 719 + } __attribute__((packed)); 720 720 721 721 static void fill_rt_header(void *buffer, struct zd_mac *mac, 722 722 const struct ieee80211_rx_stats *stats, ··· 735 735 if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) 736 736 hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; 737 737 738 + hdr->rt_rate = stats->rate / 5; 739 + 738 740 /* FIXME: 802.11a */ 739 741 hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( 740 742 _zd_chip_get_channel(&mac->chip))); 741 743 hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | 742 744 ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == 743 745 ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); 744 - 745 - hdr->rt_rate = stats->rate / 5; 746 746 } 747 747 748 748 /* Returns 1 if the data packet is for us and 0 otherwise. */
+3 -4
drivers/net/wireless/zd1211rw/zd_usb.c
··· 323 323 { 324 324 struct zd_usb_interrupt *intr = &usb->intr; 325 325 326 - ZD_ASSERT(in_interrupt()); 327 326 spin_lock(&intr->lock); 328 327 intr->read_regs_enabled = 0; 329 328 spin_unlock(&intr->lock); ··· 544 545 * be padded. Unaligned access might also happen if the length_info 545 546 * structure is not present. 546 547 */ 547 - if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) { 548 + if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) 549 + { 548 550 unsigned int l, k, n; 549 551 for (i = 0, l = 0;; i++) { 550 - k = le16_to_cpu(get_unaligned( 551 - &length_info->length[i])); 552 + k = le16_to_cpu(get_unaligned(&length_info->length[i])); 552 553 n = l+k; 553 554 if (n > length) 554 555 return;
+1 -2
drivers/pci/hotplug/acpiphp_core.c
··· 27 27 * along with this program; if not, write to the Free Software 28 28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 29 29 * 30 - * Send feedback to <gregkh@us.ibm.com>, 31 - * <t-kochi@bq.jp.nec.com> 30 + * Send feedback to <kristen.c.accardi@intel.com> 32 31 * 33 32 */ 34 33
+1 -1
drivers/pci/hotplug/acpiphp_glue.c
··· 26 26 * along with this program; if not, write to the Free Software 27 27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 28 28 * 29 - * Send feedback to <t-kochi@bq.jp.nec.com> 29 + * Send feedback to <kristen.c.accardi@intel.com> 30 30 * 31 31 */ 32 32
+20 -18
drivers/pci/pcie/portdrv_pci.c
··· 30 30 /* global data */ 31 31 static const char device_name[] = "pcieport-driver"; 32 32 33 - static int pcie_portdrv_save_config(struct pci_dev *dev) 34 - { 35 - return pci_save_state(dev); 36 - } 37 - 38 - static int pcie_portdrv_restore_config(struct pci_dev *dev) 39 - { 40 - int retval; 41 - 42 - pci_restore_state(dev); 43 - retval = pci_enable_device(dev); 44 - if (retval) 45 - return retval; 46 - pci_set_master(dev); 47 - return 0; 48 - } 49 - 50 33 /* 51 34 * pcie_portdrv_probe - Probe PCI-Express port devices 52 35 * @dev: PCI-Express port device being probed ··· 56 73 "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", 57 74 __FUNCTION__, dev->device, dev->vendor); 58 75 } 59 - if (pcie_port_device_register(dev)) 76 + if (pcie_port_device_register(dev)) { 77 + pci_disable_device(dev); 60 78 return -ENOMEM; 79 + } 61 80 62 81 return 0; 63 82 } ··· 71 86 } 72 87 73 88 #ifdef CONFIG_PM 89 + static int pcie_portdrv_save_config(struct pci_dev *dev) 90 + { 91 + return pci_save_state(dev); 92 + } 93 + 94 + static int pcie_portdrv_restore_config(struct pci_dev *dev) 95 + { 96 + int retval; 97 + 98 + pci_restore_state(dev); 99 + retval = pci_enable_device(dev); 100 + if (retval) 101 + return retval; 102 + pci_set_master(dev); 103 + return 0; 104 + } 105 + 74 106 static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state) 75 107 { 76 108 int ret = pcie_port_device_suspend(dev, state);
+7
drivers/pci/quirks.c
··· 990 990 case 0x8070: /* P4G8X Deluxe */ 991 991 asus_hides_smbus = 1; 992 992 } 993 + if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) 994 + switch (dev->subsystem_device) { 995 + case 0x80c9: /* PU-DLS */ 996 + asus_hides_smbus = 1; 997 + } 993 998 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) 994 999 switch (dev->subsystem_device) { 995 1000 case 0x1751: /* M2N notebook */ ··· 1063 1058 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge ); 1064 1059 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge ); 1065 1060 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge ); 1061 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge ); 1066 1062 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge ); 1067 1063 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge ); 1068 1064 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge ); ··· 1087 1081 } 1088 1082 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc ); 1089 1083 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc ); 1084 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc ); 1090 1085 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); 1091 1086 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); 1092 1087 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
+1 -1
drivers/pci/search.c
··· 41 41 * in the global list of PCI buses. If the bus is found, a pointer to its 42 42 * data structure is returned. If no bus is found, %NULL is returned. 43 43 */ 44 - struct pci_bus * __devinit pci_find_bus(int domain, int busnr) 44 + struct pci_bus * pci_find_bus(int domain, int busnr) 45 45 { 46 46 struct pci_bus *bus = NULL; 47 47 struct pci_bus *tmp_bus;
+6 -6
drivers/pnp/interface.c
··· 265 265 pnp_printf(buffer," disabled\n"); 266 266 else 267 267 pnp_printf(buffer," 0x%llx-0x%llx\n", 268 - pnp_port_start(dev, i), 269 - pnp_port_end(dev, i)); 268 + (unsigned long long)pnp_port_start(dev, i), 269 + (unsigned long long)pnp_port_end(dev, i)); 270 270 } 271 271 } 272 272 for (i = 0; i < PNP_MAX_MEM; i++) { ··· 276 276 pnp_printf(buffer," disabled\n"); 277 277 else 278 278 pnp_printf(buffer," 0x%llx-0x%llx\n", 279 - pnp_mem_start(dev, i), 280 - pnp_mem_end(dev, i)); 279 + (unsigned long long)pnp_mem_start(dev, i), 280 + (unsigned long long)pnp_mem_end(dev, i)); 281 281 } 282 282 } 283 283 for (i = 0; i < PNP_MAX_IRQ; i++) { ··· 287 287 pnp_printf(buffer," disabled\n"); 288 288 else 289 289 pnp_printf(buffer," %lld\n", 290 - pnp_irq(dev, i)); 290 + (unsigned long long)pnp_irq(dev, i)); 291 291 } 292 292 } 293 293 for (i = 0; i < PNP_MAX_DMA; i++) { ··· 297 297 pnp_printf(buffer," disabled\n"); 298 298 else 299 299 pnp_printf(buffer," %lld\n", 300 - pnp_dma(dev, i)); 300 + (unsigned long long)pnp_dma(dev, i)); 301 301 } 302 302 } 303 303 ret = (buffer->curr - buf);
+8
drivers/pnp/pnpacpi/rsparser.c
··· 173 173 return; 174 174 } 175 175 176 + if (p->producer_consumer == ACPI_PRODUCER) 177 + return; 178 + 176 179 if (p->resource_type == ACPI_MEMORY_RANGE) 177 180 pnpacpi_parse_allocated_memresource(res_table, 178 181 p->minimum, p->address_length); ··· 255 252 break; 256 253 257 254 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: 255 + if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER) 256 + return AE_OK; 258 257 break; 259 258 260 259 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 260 + if (res->data.extended_irq.producer_consumer == ACPI_PRODUCER) 261 + return AE_OK; 262 + 261 263 for (i = 0; i < res->data.extended_irq.interrupt_count; i++) { 262 264 pnpacpi_parse_allocated_irqresource(res_table, 263 265 res->data.extended_irq.interrupts[i],
+2 -8
drivers/scsi/ahci.c
··· 940 940 return; 941 941 942 942 /* ignore interim PIO setup fis interrupts */ 943 - if (ata_tag_valid(ap->active_tag)) { 944 - struct ata_queued_cmd *qc = 945 - ata_qc_from_tag(ap, ap->active_tag); 946 - 947 - if (qc && qc->tf.protocol == ATA_PROT_PIO && 948 - (status & PORT_IRQ_PIOS_FIS)) 949 - return; 950 - } 943 + if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS)) 944 + return; 951 945 952 946 if (ata_ratelimit()) 953 947 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
+2
drivers/scsi/aic7xxx/aicasm/Makefile
··· 14 14 clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) 15 15 # Override default kernel CFLAGS. This is a userland app. 16 16 AICASM_CFLAGS:= -I/usr/include -I. 17 + LEX= flex 18 + YACC= bison 17 19 YFLAGS= -d 18 20 19 21 NOMAN= noman
+1 -1
drivers/usb/Kconfig
··· 24 24 default y if ARCH_S3C2410 25 25 default y if PXA27x 26 26 default y if ARCH_EP93XX 27 - default y if ARCH_AT91RM9200 27 + default y if (ARCH_AT91RM9200 || ARCH_AT91SAM9261) 28 28 # PPC: 29 29 default y if STB03xxx 30 30 default y if PPC_MPC52xx
+10 -10
drivers/usb/core/devio.c
··· 517 517 518 518 static struct usb_device *usbdev_lookup_minor(int minor) 519 519 { 520 - struct device *device; 521 - struct usb_device *udev = NULL; 520 + struct class_device *class_dev; 521 + struct usb_device *dev = NULL; 522 522 523 523 down(&usb_device_class->sem); 524 - list_for_each_entry(device, &usb_device_class->devices, node) { 525 - if (device->devt == MKDEV(USB_DEVICE_MAJOR, minor)) { 526 - udev = device->platform_data; 524 + list_for_each_entry(class_dev, &usb_device_class->children, node) { 525 + if (class_dev->devt == MKDEV(USB_DEVICE_MAJOR, minor)) { 526 + dev = class_dev->class_data; 527 527 break; 528 528 } 529 529 } 530 530 up(&usb_device_class->sem); 531 531 532 - return udev; 532 + return dev; 533 533 }; 534 534 535 535 /* ··· 1580 1580 { 1581 1581 int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1); 1582 1582 1583 - dev->usbfs_dev = device_create(usb_device_class, &dev->dev, 1584 - MKDEV(USB_DEVICE_MAJOR, minor), 1583 + dev->class_dev = class_device_create(usb_device_class, NULL, 1584 + MKDEV(USB_DEVICE_MAJOR, minor), &dev->dev, 1585 1585 "usbdev%d.%d", dev->bus->busnum, dev->devnum); 1586 1586 1587 - dev->usbfs_dev->platform_data = dev; 1587 + dev->class_dev->class_data = dev; 1588 1588 } 1589 1589 1590 1590 static void usbdev_remove(struct usb_device *dev) 1591 1591 { 1592 - device_unregister(dev->usbfs_dev); 1592 + class_device_unregister(dev->class_dev); 1593 1593 } 1594 1594 1595 1595 static int usbdev_notify(struct notifier_block *self, unsigned long action,
+7 -6
drivers/usb/core/file.c
··· 194 194 ++temp; 195 195 else 196 196 temp = name; 197 - intf->usb_dev = device_create(usb_class->class, &intf->dev, 198 - MKDEV(USB_MAJOR, minor), "%s", temp); 199 - if (IS_ERR(intf->usb_dev)) { 197 + intf->class_dev = class_device_create(usb_class->class, NULL, 198 + MKDEV(USB_MAJOR, minor), 199 + &intf->dev, "%s", temp); 200 + if (IS_ERR(intf->class_dev)) { 200 201 spin_lock (&minor_lock); 201 202 usb_minors[intf->minor] = NULL; 202 203 spin_unlock (&minor_lock); 203 - retval = PTR_ERR(intf->usb_dev); 204 + retval = PTR_ERR(intf->class_dev); 204 205 } 205 206 exit: 206 207 return retval; ··· 242 241 spin_unlock (&minor_lock); 243 242 244 243 snprintf(name, BUS_ID_SIZE, class_driver->name, intf->minor - minor_base); 245 - device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); 246 - intf->usb_dev = NULL; 244 + class_device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); 245 + intf->class_dev = NULL; 247 246 intf->minor = -1; 248 247 destroy_usb_class(); 249 248 }
+1 -1
drivers/usb/gadget/Kconfig
··· 207 207 208 208 config USB_GADGET_DUMMY_HCD 209 209 boolean "Dummy HCD (DEVELOPMENT)" 210 - depends on USB && EXPERIMENTAL 210 + depends on (USB=y || (USB=m && USB_GADGET=m)) && EXPERIMENTAL 211 211 select USB_GADGET_DUALSPEED 212 212 help 213 213 This host controller driver emulates USB, looping all data transfer
+112 -64
drivers/usb/gadget/at91_udc.c
··· 57 57 58 58 /* 59 59 * This controller is simple and PIO-only. It's used in many AT91-series 60 - * ARMv4T controllers, including the at91rm9200 (arm920T, with MMU), 61 - * at91sam9261 (arm926ejs, with MMU), and several no-mmu versions. 60 + * full speed USB controllers, including the at91rm9200 (arm920T, with MMU), 61 + * at91sam926x (arm926ejs, with MMU), and several no-mmu versions. 62 62 * 63 63 * This driver expects the board has been wired with two GPIOs suppporting 64 64 * a VBUS sensing IRQ, and a D+ pullup. (They may be omitted, but the 65 - * testing hasn't covered such cases.) The pullup is most important; it 65 + * testing hasn't covered such cases.) 66 + * 67 + * The pullup is most important (so it's integrated on sam926x parts). It 66 68 * provides software control over whether the host enumerates the device. 69 + * 67 70 * The VBUS sensing helps during enumeration, and allows both USB clocks 68 71 * (and the transceiver) to stay gated off until they're necessary, saving 69 - * power. During USB suspend, the 48 MHz clock is gated off. 72 + * power. During USB suspend, the 48 MHz clock is gated off in hardware; 73 + * it may also be gated off by software during some Linux sleep states. 70 74 */ 71 75 72 - #define DRIVER_VERSION "8 March 2005" 76 + #define DRIVER_VERSION "3 May 2006" 73 77 74 78 static const char driver_name [] = "at91_udc"; 75 79 static const char ep0name[] = "ep0"; ··· 320 316 * 321 317 * There are also state bits like FORCESTALL, EPEDS, DIR, and EPTYPE 322 318 * that shouldn't normally be changed. 319 + * 320 + * NOTE at91sam9260 docs mention synch between UDPCK and MCK clock domains, 321 + * implying a need to wait for one write to complete (test relevant bits) 322 + * before starting the next write. This shouldn't be an issue given how 323 + * infrequently we write, except maybe for write-then-read idioms. 323 324 */ 324 325 #define SET_FX (AT91_UDP_TXPKTRDY) 325 - #define CLR_FX (RX_DATA_READY | AT91_UDP_RXSETUP | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP) 326 + #define CLR_FX (RX_DATA_READY | AT91_UDP_RXSETUP \ 327 + | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP) 326 328 327 329 /* pull OUT packet data from the endpoint's fifo */ 328 330 static int read_fifo (struct at91_ep *ep, struct at91_request *req) ··· 482 472 483 473 /*-------------------------------------------------------------------------*/ 484 474 485 - static int at91_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 475 + static int at91_ep_enable(struct usb_ep *_ep, 476 + const struct usb_endpoint_descriptor *desc) 486 477 { 487 478 struct at91_ep *ep = container_of(_ep, struct at91_ep, ep); 488 479 struct at91_udc *dev = ep->udc; ··· 593 582 * interesting for request or buffer allocation. 594 583 */ 595 584 596 - static struct usb_request *at91_ep_alloc_request (struct usb_ep *_ep, unsigned int gfp_flags) 585 + static struct usb_request * 586 + at91_ep_alloc_request(struct usb_ep *_ep, unsigned int gfp_flags) 597 587 { 598 588 struct at91_request *req; 599 589 600 - req = kcalloc(1, sizeof (struct at91_request), SLAB_KERNEL); 590 + req = kcalloc(1, sizeof (struct at91_request), gfp_flags); 601 591 if (!req) 602 592 return NULL; 603 593 ··· 874 862 if (udc->gadget.speed == USB_SPEED_UNKNOWN) 875 863 driver = NULL; 876 864 udc->gadget.speed = USB_SPEED_UNKNOWN; 865 + udc->suspended = 0; 877 866 878 867 for (i = 0; i < NUM_ENDPOINTS; i++) { 879 868 struct at91_ep *ep = &udc->ep[i]; ··· 902 889 return; 903 890 udc->clocked = 0; 904 891 udc->gadget.speed = USB_SPEED_UNKNOWN; 905 - clk_disable(udc->iclk); 906 892 clk_disable(udc->fclk); 893 + clk_disable(udc->iclk); 907 894 } 908 895 909 896 /* ··· 924 911 at91_udp_write(AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); 925 912 at91_set_gpio_value(udc->board.pullup_pin, 0); 926 913 clk_off(udc); 927 - 928 - // REVISIT: with transceiver disabled, will D- float 929 - // so that a host would falsely detect a device? 930 914 } 931 915 } 932 916 ··· 1300 1290 if (udc->wait_for_addr_ack) { 1301 1291 u32 tmp; 1302 1292 1303 - at91_udp_write(AT91_UDP_FADDR, AT91_UDP_FEN | udc->addr); 1293 + at91_udp_write(AT91_UDP_FADDR, 1294 + AT91_UDP_FEN | udc->addr); 1304 1295 tmp = at91_udp_read(AT91_UDP_GLB_STAT); 1305 1296 tmp &= ~AT91_UDP_FADDEN; 1306 1297 if (udc->addr) ··· 1372 1361 u32 rescans = 5; 1373 1362 1374 1363 while (rescans--) { 1375 - u32 status = at91_udp_read(AT91_UDP_ISR); 1364 + u32 status; 1376 1365 1377 - status &= at91_udp_read(AT91_UDP_IMR); 1366 + status = at91_udp_read(AT91_UDP_ISR) 1367 + & at91_udp_read(AT91_UDP_IMR); 1378 1368 if (!status) 1379 1369 break; 1380 1370 ··· 1391 1379 stop_activity(udc); 1392 1380 1393 1381 /* enable ep0 */ 1394 - at91_udp_write(AT91_UDP_CSR(0), AT91_UDP_EPEDS | AT91_UDP_EPTYPE_CTRL); 1382 + at91_udp_write(AT91_UDP_CSR(0), 1383 + AT91_UDP_EPEDS | AT91_UDP_EPTYPE_CTRL); 1395 1384 udc->gadget.speed = USB_SPEED_FULL; 1396 1385 udc->suspended = 0; 1397 1386 at91_udp_write(AT91_UDP_IER, AT91_UDP_EP(0)); 1398 1387 1399 1388 /* 1400 1389 * NOTE: this driver keeps clocks off unless the 1401 - * USB host is present. That saves power, and also 1402 - * eliminates IRQs (reset, resume, suspend) that can 1403 - * otherwise flood from the controller. If your 1404 - * board doesn't support VBUS detection, suspend and 1405 - * resume irq logic may need more attention... 1390 + * USB host is present. That saves power, but for 1391 + * boards that don't support VBUS detection, both 1392 + * clocks need to be active most of the time. 1406 1393 */ 1407 1394 1408 1395 /* host initiated suspend (3+ms bus idle) */ ··· 1463 1452 1464 1453 /*-------------------------------------------------------------------------*/ 1465 1454 1455 + static void nop_release(struct device *dev) 1456 + { 1457 + /* nothing to free */ 1458 + } 1459 + 1466 1460 static struct at91_udc controller = { 1467 1461 .gadget = { 1468 - .ops = &at91_udc_ops, 1469 - .ep0 = &controller.ep[0].ep, 1470 - .name = driver_name, 1471 - .dev = { 1472 - .bus_id = "gadget" 1462 + .ops = &at91_udc_ops, 1463 + .ep0 = &controller.ep[0].ep, 1464 + .name = driver_name, 1465 + .dev = { 1466 + .bus_id = "gadget", 1467 + .release = nop_release, 1473 1468 } 1474 1469 }, 1475 1470 .ep[0] = { ··· 1485 1468 }, 1486 1469 .udc = &controller, 1487 1470 .maxpacket = 8, 1488 - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(0)), 1471 + .creg = (void __iomem *)(AT91_VA_BASE_UDP 1472 + + AT91_UDP_CSR(0)), 1489 1473 .int_mask = 1 << 0, 1490 1474 }, 1491 1475 .ep[1] = { ··· 1497 1479 .udc = &controller, 1498 1480 .is_pingpong = 1, 1499 1481 .maxpacket = 64, 1500 - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(1)), 1482 + .creg = (void __iomem *)(AT91_VA_BASE_UDP 1483 + + AT91_UDP_CSR(1)), 1501 1484 .int_mask = 1 << 1, 1502 1485 }, 1503 1486 .ep[2] = { ··· 1509 1490 .udc = &controller, 1510 1491 .is_pingpong = 1, 1511 1492 .maxpacket = 64, 1512 - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(2)), 1493 + .creg = (void __iomem *)(AT91_VA_BASE_UDP 1494 + + AT91_UDP_CSR(2)), 1513 1495 .int_mask = 1 << 2, 1514 1496 }, 1515 1497 .ep[3] = { ··· 1521 1501 }, 1522 1502 .udc = &controller, 1523 1503 .maxpacket = 8, 1524 - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(3)), 1504 + .creg = (void __iomem *)(AT91_VA_BASE_UDP 1505 + + AT91_UDP_CSR(3)), 1525 1506 .int_mask = 1 << 3, 1526 1507 }, 1527 1508 .ep[4] = { ··· 1533 1512 .udc = &controller, 1534 1513 .is_pingpong = 1, 1535 1514 .maxpacket = 256, 1536 - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(4)), 1515 + .creg = (void __iomem *)(AT91_VA_BASE_UDP 1516 + + AT91_UDP_CSR(4)), 1537 1517 .int_mask = 1 << 4, 1538 1518 }, 1539 1519 .ep[5] = { ··· 1545 1523 .udc = &controller, 1546 1524 .is_pingpong = 1, 1547 1525 .maxpacket = 256, 1548 - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(5)), 1526 + .creg = (void __iomem *)(AT91_VA_BASE_UDP 1527 + + AT91_UDP_CSR(5)), 1549 1528 .int_mask = 1 << 5, 1550 1529 }, 1551 - /* ep6 and ep7 are also reserved */ 1530 + /* ep6 and ep7 are also reserved (custom silicon might use them) */ 1552 1531 }; 1553 1532 1554 1533 static irqreturn_t at91_vbus_irq(int irq, void *_udc, struct pt_regs *r) ··· 1616 1593 1617 1594 local_irq_disable(); 1618 1595 udc->enabled = 0; 1596 + at91_udp_write(AT91_UDP_IDR, ~0); 1619 1597 pullup(udc, 0); 1620 1598 local_irq_enable(); 1621 1599 ··· 1648 1624 return -ENODEV; 1649 1625 } 1650 1626 1627 + if (pdev->num_resources != 2) { 1628 + DBG("invalid num_resources"); 1629 + return -ENODEV; 1630 + } 1631 + if ((pdev->resource[0].flags != IORESOURCE_MEM) 1632 + || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 1633 + DBG("invalid resource type"); 1634 + return -ENODEV; 1635 + } 1636 + 1651 1637 if (!request_mem_region(AT91_BASE_UDP, SZ_16K, driver_name)) { 1652 1638 DBG("someone's using UDC memory\n"); 1653 1639 return -EBUSY; ··· 1683 1649 if (retval < 0) 1684 1650 goto fail0; 1685 1651 1686 - /* disable everything until there's a gadget driver and vbus */ 1687 - pullup(udc, 0); 1652 + /* don't do anything until we have both gadget driver and VBUS */ 1653 + clk_enable(udc->iclk); 1654 + at91_udp_write(AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); 1655 + at91_udp_write(AT91_UDP_IDR, 0xffffffff); 1656 + clk_disable(udc->iclk); 1688 1657 1689 1658 /* request UDC and maybe VBUS irqs */ 1690 - if (request_irq(AT91_ID_UDP, at91_udc_irq, IRQF_DISABLED, driver_name, udc)) { 1691 - DBG("request irq %d failed\n", AT91_ID_UDP); 1659 + udc->udp_irq = platform_get_irq(pdev, 0); 1660 + if (request_irq(udc->udp_irq, at91_udc_irq, 1661 + IRQF_DISABLED, driver_name, udc)) { 1662 + DBG("request irq %d failed\n", udc->udp_irq); 1692 1663 retval = -EBUSY; 1693 1664 goto fail1; 1694 1665 } 1695 1666 if (udc->board.vbus_pin > 0) { 1696 - if (request_irq(udc->board.vbus_pin, at91_vbus_irq, IRQF_DISABLED, driver_name, udc)) { 1697 - DBG("request vbus irq %d failed\n", udc->board.vbus_pin); 1698 - free_irq(AT91_ID_UDP, udc); 1667 + if (request_irq(udc->board.vbus_pin, at91_vbus_irq, 1668 + IRQF_DISABLED, driver_name, udc)) { 1669 + DBG("request vbus irq %d failed\n", 1670 + udc->board.vbus_pin); 1671 + free_irq(udc->udp_irq, udc); 1699 1672 retval = -EBUSY; 1700 1673 goto fail1; 1701 1674 } ··· 1711 1670 udc->vbus = 1; 1712 1671 } 1713 1672 dev_set_drvdata(dev, udc); 1673 + device_init_wakeup(dev, 1); 1714 1674 create_debug_file(udc); 1715 1675 1716 1676 INFO("%s version %s\n", driver_name, DRIVER_VERSION); ··· 1720 1678 fail1: 1721 1679 device_unregister(&udc->gadget.dev); 1722 1680 fail0: 1723 - release_mem_region(AT91_VA_BASE_UDP, SZ_16K); 1681 + release_mem_region(AT91_BASE_UDP, SZ_16K); 1724 1682 DBG("%s probe failed, %d\n", driver_name, retval); 1725 1683 return retval; 1726 1684 } 1727 1685 1728 - static int __devexit at91udc_remove(struct platform_device *dev) 1686 + static int __devexit at91udc_remove(struct platform_device *pdev) 1729 1687 { 1730 - struct at91_udc *udc = platform_get_drvdata(dev); 1688 + struct at91_udc *udc = platform_get_drvdata(pdev); 1731 1689 1732 1690 DBG("remove\n"); 1733 1691 ··· 1736 1694 if (udc->driver != 0) 1737 1695 usb_gadget_unregister_driver(udc->driver); 1738 1696 1697 + device_init_wakeup(&pdev->dev, 0); 1739 1698 remove_debug_file(udc); 1740 1699 if (udc->board.vbus_pin > 0) 1741 1700 free_irq(udc->board.vbus_pin, udc); 1742 - free_irq(AT91_ID_UDP, udc); 1701 + free_irq(udc->udp_irq, udc); 1743 1702 device_unregister(&udc->gadget.dev); 1744 1703 release_mem_region(AT91_BASE_UDP, SZ_16K); 1745 1704 ··· 1751 1708 } 1752 1709 1753 1710 #ifdef CONFIG_PM 1754 - static int at91udc_suspend(struct platform_device *dev, pm_message_t mesg) 1711 + static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg) 1755 1712 { 1756 - struct at91_udc *udc = platform_get_drvdata(dev); 1713 + struct at91_udc *udc = platform_get_drvdata(pdev); 1714 + int wake = udc->driver && device_may_wakeup(&pdev->dev); 1757 1715 1758 - /* 1759 - * The "safe" suspend transitions are opportunistic ... e.g. when 1760 - * the USB link is suspended (48MHz clock autogated off), or when 1761 - * it's disconnected (programmatically gated off, elsewhere). 1762 - * Then we can suspend, and the chip can enter slow clock mode. 1763 - * 1764 - * The problem case is some component (user mode?) suspending this 1765 - * device while it's active, with the 48 MHz clock in use. There 1766 - * are two basic approaches: (a) veto suspend levels involving slow 1767 - * clock mode, (b) disconnect, so 48 MHz will no longer be in use 1768 - * and we can enter slow clock mode. This uses (b) for now, since 1769 - * it's simplest until AT91 PM exists and supports the other option. 1716 + /* Unless we can act normally to the host (letting it wake us up 1717 + * whenever it has work for us) force disconnect. Wakeup requires 1718 + * PLLB for USB events (signaling for reset, wakeup, or incoming 1719 + * tokens) and VBUS irqs (on systems which support them). 1770 1720 */ 1771 - if (udc->vbus && !udc->suspended) 1721 + if ((!udc->suspended && udc->addr) 1722 + || !wake 1723 + || at91_suspend_entering_slow_clock()) { 1772 1724 pullup(udc, 0); 1725 + disable_irq_wake(udc->udp_irq); 1726 + } else 1727 + enable_irq_wake(udc->udp_irq); 1728 + 1729 + if (udc->board.vbus_pin > 0) { 1730 + if (wake) 1731 + enable_irq_wake(udc->board.vbus_pin); 1732 + else 1733 + disable_irq_wake(udc->board.vbus_pin); 1734 + } 1773 1735 return 0; 1774 1736 } 1775 1737 1776 - static int at91udc_resume(struct platform_device *dev) 1738 + static int at91udc_resume(struct platform_device *pdev) 1777 1739 { 1778 - struct at91_udc *udc = platform_get_drvdata(dev); 1740 + struct at91_udc *udc = platform_get_drvdata(pdev); 1779 1741 1780 1742 /* maybe reconnect to host; if so, clocks on */ 1781 1743 pullup(udc, 1); ··· 1796 1748 .remove = __devexit_p(at91udc_remove), 1797 1749 .shutdown = at91udc_shutdown, 1798 1750 .suspend = at91udc_suspend, 1799 - .resume = at91udc_resume, 1751 + .resume = at91udc_resume, 1800 1752 .driver = { 1801 1753 .name = (char *) driver_name, 1802 1754 .owner = THIS_MODULE, ··· 1815 1767 } 1816 1768 module_exit(udc_exit_module); 1817 1769 1818 - MODULE_DESCRIPTION("AT91RM9200 udc driver"); 1770 + MODULE_DESCRIPTION("AT91 udc driver"); 1819 1771 MODULE_AUTHOR("Thomas Rathbone, David Brownell"); 1820 1772 MODULE_LICENSE("GPL");
+1
drivers/usb/gadget/at91_udc.h
··· 141 141 struct clk *iclk, *fclk; 142 142 struct platform_device *pdev; 143 143 struct proc_dir_entry *pde; 144 + int udp_irq; 144 145 }; 145 146 146 147 static inline struct at91_udc *to_udc(struct usb_gadget *g)
+4 -2
drivers/usb/gadget/dummy_hcd.c
··· 609 609 if (!dum->driver) 610 610 return -ESHUTDOWN; 611 611 612 - spin_lock_irqsave (&dum->lock, flags); 612 + local_irq_save (flags); 613 + spin_lock (&dum->lock); 613 614 list_for_each_entry (req, &ep->queue, queue) { 614 615 if (&req->req == _req) { 615 616 list_del_init (&req->queue); ··· 619 618 break; 620 619 } 621 620 } 622 - spin_unlock_irqrestore (&dum->lock, flags); 621 + spin_unlock (&dum->lock); 623 622 624 623 if (retval == 0) { 625 624 dev_dbg (udc_dev(dum), ··· 627 626 req, _ep->name, _req->length, _req->buf); 628 627 _req->complete (_ep, _req); 629 628 } 629 + local_irq_restore (flags); 630 630 return retval; 631 631 } 632 632
+1 -1
drivers/usb/host/ehci-hcd.c
··· 892 892 #define PCI_DRIVER ehci_pci_driver 893 893 #endif 894 894 895 - #ifdef CONFIG_PPC_83xx 895 + #ifdef CONFIG_MPC834x 896 896 #include "ehci-fsl.c" 897 897 #define PLATFORM_DRIVER ehci_fsl_driver 898 898 #endif
+59 -29
drivers/usb/host/ohci-at91.c
··· 4 4 * Copyright (C) 2004 SAN People (Pty) Ltd. 5 5 * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org> 6 6 * 7 - * AT91RM9200 Bus Glue 7 + * AT91 Bus Glue 8 8 * 9 9 * Based on fragments of 2.4 driver by Rick Bronson. 10 10 * Based on ohci-omap.c ··· 19 19 #include <asm/hardware.h> 20 20 #include <asm/arch/board.h> 21 21 22 - #ifndef CONFIG_ARCH_AT91RM9200 23 - #error "CONFIG_ARCH_AT91RM9200 must be defined." 22 + #ifndef CONFIG_ARCH_AT91 23 + #error "CONFIG_ARCH_AT91 must be defined." 24 24 #endif 25 25 26 26 /* interface and function clocks */ 27 27 static struct clk *iclk, *fclk; 28 + static int clocked; 28 29 29 30 extern int usb_disabled(void); 30 31 ··· 36 35 struct usb_hcd *hcd = platform_get_drvdata(pdev); 37 36 struct ohci_regs __iomem *regs = hcd->regs; 38 37 39 - dev_dbg(&pdev->dev, "starting AT91RM9200 OHCI USB Controller\n"); 38 + dev_dbg(&pdev->dev, "start\n"); 40 39 41 40 /* 42 41 * Start the USB clocks. 43 42 */ 44 43 clk_enable(iclk); 45 44 clk_enable(fclk); 45 + clocked = 1; 46 46 47 47 /* 48 48 * The USB host controller must remain in reset. ··· 56 54 struct usb_hcd *hcd = platform_get_drvdata(pdev); 57 55 struct ohci_regs __iomem *regs = hcd->regs; 58 56 59 - dev_dbg(&pdev->dev, "stopping AT91RM9200 OHCI USB Controller\n"); 57 + dev_dbg(&pdev->dev, "stop\n"); 60 58 61 59 /* 62 60 * Put the USB host controller into reset. ··· 68 66 */ 69 67 clk_disable(fclk); 70 68 clk_disable(iclk); 69 + clocked = 0; 71 70 } 72 71 73 72 ··· 81 78 82 79 83 80 /** 84 - * usb_hcd_at91_probe - initialize AT91RM9200-based HCDs 81 + * usb_hcd_at91_probe - initialize AT91-based HCDs 85 82 * Context: !in_interrupt() 86 83 * 87 84 * Allocates basic resources for this USB host controller, and 88 85 * then invokes the start() method for the HCD associated with it 89 86 * through the hotplug entry's driver_data. 90 87 */ 91 - int usb_hcd_at91_probe (const struct hc_driver *driver, struct platform_device *pdev) 88 + static int usb_hcd_at91_probe(const struct hc_driver *driver, 89 + struct platform_device *pdev) 92 90 { 93 91 int retval; 94 92 struct usb_hcd *hcd = NULL; ··· 99 95 return -ENODEV; 100 96 } 101 97 102 - if ((pdev->resource[0].flags != IORESOURCE_MEM) || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 98 + if ((pdev->resource[0].flags != IORESOURCE_MEM) 99 + || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 103 100 pr_debug("hcd probe: invalid resource type\n"); 104 101 return -ENODEV; 105 102 } 106 103 107 - hcd = usb_create_hcd(driver, &pdev->dev, "at91rm9200"); 104 + hcd = usb_create_hcd(driver, &pdev->dev, "at91"); 108 105 if (!hcd) 109 106 return -ENOMEM; 110 107 hcd->rsrc_start = pdev->resource[0].start; ··· 154 149 /* may be called with controller, bus, and devices active */ 155 150 156 151 /** 157 - * usb_hcd_at91_remove - shutdown processing for AT91RM9200-based HCDs 152 + * usb_hcd_at91_remove - shutdown processing for AT91-based HCDs 158 153 * @dev: USB Host Controller being removed 159 154 * Context: !in_interrupt() 160 155 * 161 156 * Reverses the effect of usb_hcd_at91_probe(), first invoking 162 157 * the HCD's stop() method. It is always called from a thread 163 - * context, normally "rmmod", "apmd", or something similar. 158 + * context, "rmmod" or something similar. 164 159 * 165 160 */ 166 - static int usb_hcd_at91_remove (struct usb_hcd *hcd, struct platform_device *pdev) 161 + static int usb_hcd_at91_remove(struct usb_hcd *hcd, 162 + struct platform_device *pdev) 167 163 { 168 164 usb_remove_hcd(hcd); 169 165 at91_stop_hc(pdev); 170 166 iounmap(hcd->regs); 171 167 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 168 + disable_irq_wake(hcd->irq); 172 169 173 170 clk_put(fclk); 174 171 clk_put(iclk); ··· 185 178 static int __devinit 186 179 ohci_at91_start (struct usb_hcd *hcd) 187 180 { 188 - // struct at91_ohci_data *board = hcd->self.controller->platform_data; 181 + struct at91_usbh_data *board = hcd->self.controller->platform_data; 189 182 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 183 + struct usb_device *root = hcd->self.root_hub; 190 184 int ret; 191 185 192 186 if ((ret = ohci_init(ohci)) < 0) 193 187 return ret; 188 + 189 + root->maxchild = board->ports; 194 190 195 191 if ((ret = ohci_run(ohci)) < 0) { 196 192 err("can't start %s", hcd->self.bus_name); 197 193 ohci_stop(hcd); 198 194 return ret; 199 195 } 200 - // hcd->self.root_hub->maxchild = board->ports; 201 196 return 0; 202 197 } 203 198 ··· 207 198 208 199 static const struct hc_driver ohci_at91_hc_driver = { 209 200 .description = hcd_name, 210 - .product_desc = "AT91RM9200 OHCI", 201 + .product_desc = "AT91 OHCI", 211 202 .hcd_priv_size = sizeof(struct ohci_hcd), 212 203 213 204 /* ··· 249 240 250 241 /*-------------------------------------------------------------------------*/ 251 242 252 - static int ohci_hcd_at91_drv_probe(struct platform_device *dev) 243 + static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) 253 244 { 254 - return usb_hcd_at91_probe(&ohci_at91_hc_driver, dev); 245 + device_init_wakeup(&pdev->dev, 1); 246 + return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); 255 247 } 256 248 257 - static int ohci_hcd_at91_drv_remove(struct platform_device *dev) 249 + static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) 258 250 { 259 - return usb_hcd_at91_remove(platform_get_drvdata(dev), dev); 251 + device_init_wakeup(&pdev->dev, 0); 252 + return usb_hcd_at91_remove(platform_get_drvdata(pdev), pdev); 260 253 } 261 254 262 255 #ifdef CONFIG_PM 263 256 264 - /* REVISIT suspend/resume look "too" simple here */ 265 - 266 257 static int 267 - ohci_hcd_at91_drv_suspend(struct platform_device *dev, pm_message_t mesg) 258 + ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg) 268 259 { 269 - clk_disable(fclk); 270 - clk_disable(iclk); 260 + struct usb_hcd *hcd = platform_get_drvdata(pdev); 261 + struct ohci_hcd *ohci = hcd_to_ohci(hcd); 262 + 263 + if (device_may_wakeup(&pdev->dev)) 264 + enable_irq_wake(hcd->irq); 265 + else 266 + disable_irq_wake(hcd->irq); 267 + 268 + /* 269 + * The integrated transceivers seem unable to notice disconnect, 270 + * reconnect, or wakeup without the 48 MHz clock active. so for 271 + * correctness, always discard connection state (using reset). 272 + * 273 + * REVISIT: some boards will be able to turn VBUS off... 274 + */ 275 + if (at91_suspend_entering_slow_clock()) { 276 + ohci_usb_reset (ohci); 277 + clk_disable(fclk); 278 + clk_disable(iclk); 279 + clocked = 0; 280 + } 271 281 272 282 return 0; 273 283 } 274 284 275 - static int ohci_hcd_at91_drv_resume(struct platform_device *dev) 285 + static int ohci_hcd_at91_drv_resume(struct platform_device *pdev) 276 286 { 277 - clk_enable(iclk); 278 - clk_enable(fclk); 287 + if (!clocked) { 288 + clk_enable(iclk); 289 + clk_enable(fclk); 290 + } 279 291 280 292 return 0; 281 293 } ··· 305 275 #define ohci_hcd_at91_drv_resume NULL 306 276 #endif 307 277 308 - MODULE_ALIAS("at91rm9200-ohci"); 278 + MODULE_ALIAS("at91_ohci"); 309 279 310 280 static struct platform_driver ohci_hcd_at91_driver = { 311 281 .probe = ohci_hcd_at91_drv_probe, ··· 313 283 .suspend = ohci_hcd_at91_drv_suspend, 314 284 .resume = ohci_hcd_at91_drv_resume, 315 285 .driver = { 316 - .name = "at91rm9200-ohci", 286 + .name = "at91_ohci", 317 287 .owner = THIS_MODULE, 318 288 }, 319 289 };
+2 -1
drivers/usb/host/ohci-hcd.c
··· 913 913 #include "ohci-ppc-soc.c" 914 914 #endif 915 915 916 - #ifdef CONFIG_ARCH_AT91RM9200 916 + #if defined(CONFIG_ARCH_AT91RM9200) || defined(CONFIG_ARCH_AT91SAM9261) 917 917 #include "ohci-at91.c" 918 918 #endif 919 919 ··· 927 927 || defined (CONFIG_SOC_AU1X00) \ 928 928 || defined (CONFIG_USB_OHCI_HCD_PPC_SOC) \ 929 929 || defined (CONFIG_ARCH_AT91RM9200) \ 930 + || defined (CONFIG_ARCH_AT91SAM9261) \ 930 931 ) 931 932 #error "missing bus glue for ohci-hcd" 932 933 #endif
+3 -1
drivers/usb/host/uhci-q.c
··· 943 943 /* We received a short packet */ 944 944 if (urb->transfer_flags & URB_SHORT_NOT_OK) 945 945 ret = -EREMOTEIO; 946 - else if (ctrlstat & TD_CTRL_SPD) 946 + 947 + /* Fixup needed only if this isn't the URB's last TD */ 948 + else if (&td->list != urbp->td_list.prev) 947 949 ret = 1; 948 950 } 949 951
+98 -74
drivers/usb/input/ati_remote.c
··· 111 111 #define NAME_BUFSIZE 80 /* size of product name, path buffers */ 112 112 #define DATA_BUFSIZE 63 /* size of URB data buffers */ 113 113 114 + /* 115 + * Duplicate event filtering time. 116 + * Sequential, identical KIND_FILTERED inputs with less than 117 + * FILTER_TIME milliseconds between them are considered as repeat 118 + * events. The hardware generates 5 events for the first keypress 119 + * and we have to take this into account for an accurate repeat 120 + * behaviour. 121 + */ 122 + #define FILTER_TIME 60 /* msec */ 123 + 114 124 static unsigned long channel_mask; 115 - module_param(channel_mask, ulong, 0444); 125 + module_param(channel_mask, ulong, 0644); 116 126 MODULE_PARM_DESC(channel_mask, "Bitmask of remote control channels to ignore"); 117 127 118 128 static int debug; 119 - module_param(debug, int, 0444); 129 + module_param(debug, int, 0644); 120 130 MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); 131 + 132 + static int repeat_filter = FILTER_TIME; 133 + module_param(repeat_filter, int, 0644); 134 + MODULE_PARM_DESC(repeat_filter, "Repeat filter time, default = 60 msec"); 121 135 122 136 #define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0) 123 137 #undef err ··· 156 142 /* Device initialization strings */ 157 143 static char init1[] = { 0x01, 0x00, 0x20, 0x14 }; 158 144 static char init2[] = { 0x01, 0x00, 0x20, 0x14, 0x20, 0x20, 0x20 }; 159 - 160 - /* Acceleration curve for directional control pad */ 161 - static const char accel[] = { 1, 2, 4, 6, 9, 13, 20 }; 162 - 163 - /* Duplicate event filtering time. 164 - * Sequential, identical KIND_FILTERED inputs with less than 165 - * FILTER_TIME jiffies between them are considered as repeat 166 - * events. The hardware generates 5 events for the first keypress 167 - * and we have to take this into account for an accurate repeat 168 - * behaviour. 169 - * (HZ / 20) == 50 ms and works well for me. 170 - */ 171 - #define FILTER_TIME (HZ / 20) 172 145 173 146 struct ati_remote { 174 147 struct input_dev *idev; ··· 414 413 } 415 414 416 415 /* 416 + * ati_remote_compute_accel 417 + * 418 + * Implements acceleration curve for directional control pad 419 + * If elapsed time since last event is > 1/4 second, user "stopped", 420 + * so reset acceleration. Otherwise, user is probably holding the control 421 + * pad down, so we increase acceleration, ramping up over two seconds to 422 + * a maximum speed. 423 + */ 424 + static int ati_remote_compute_accel(struct ati_remote *ati_remote) 425 + { 426 + static const char accel[] = { 1, 2, 4, 6, 9, 13, 20 }; 427 + unsigned long now = jiffies; 428 + int acc; 429 + 430 + if (time_after(now, ati_remote->old_jiffies + msecs_to_jiffies(250))) { 431 + acc = 1; 432 + ati_remote->acc_jiffies = now; 433 + } 434 + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(125))) 435 + acc = accel[0]; 436 + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(250))) 437 + acc = accel[1]; 438 + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(500))) 439 + acc = accel[2]; 440 + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(1000))) 441 + acc = accel[3]; 442 + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(1500))) 443 + acc = accel[4]; 444 + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(2000))) 445 + acc = accel[5]; 446 + else 447 + acc = accel[6]; 448 + 449 + return acc; 450 + } 451 + 452 + /* 417 453 * ati_remote_report_input 418 454 */ 419 455 static void ati_remote_input_report(struct urb *urb, struct pt_regs *regs) ··· 503 465 504 466 if (ati_remote_tbl[index].kind == KIND_FILTERED) { 505 467 /* Filter duplicate events which happen "too close" together. */ 506 - if ((ati_remote->old_data[0] == data[1]) && 507 - (ati_remote->old_data[1] == data[2]) && 508 - time_before(jiffies, ati_remote->old_jiffies + FILTER_TIME)) { 468 + if (ati_remote->old_data[0] == data[1] && 469 + ati_remote->old_data[1] == data[2] && 470 + time_before(jiffies, ati_remote->old_jiffies + msecs_to_jiffies(repeat_filter))) { 509 471 ati_remote->repeat_count++; 510 472 } else { 511 473 ati_remote->repeat_count = 0; ··· 515 477 ati_remote->old_data[1] = data[2]; 516 478 ati_remote->old_jiffies = jiffies; 517 479 518 - if ((ati_remote->repeat_count > 0) 519 - && (ati_remote->repeat_count < 5)) 480 + if (ati_remote->repeat_count > 0 && 481 + ati_remote->repeat_count < 5) 520 482 return; 521 483 522 484 523 485 input_regs(dev, regs); 524 486 input_event(dev, ati_remote_tbl[index].type, 525 487 ati_remote_tbl[index].code, 1); 488 + input_sync(dev); 526 489 input_event(dev, ati_remote_tbl[index].type, 527 490 ati_remote_tbl[index].code, 0); 528 491 input_sync(dev); 529 492 530 - return; 531 - } 493 + } else { 532 494 533 - /* 534 - * Other event kinds are from the directional control pad, and have an 535 - * acceleration factor applied to them. Without this acceleration, the 536 - * control pad is mostly unusable. 537 - * 538 - * If elapsed time since last event is > 1/4 second, user "stopped", 539 - * so reset acceleration. Otherwise, user is probably holding the control 540 - * pad down, so we increase acceleration, ramping up over two seconds to 541 - * a maximum speed. The acceleration curve is #defined above. 542 - */ 543 - if (time_after(jiffies, ati_remote->old_jiffies + (HZ >> 2))) { 544 - acc = 1; 545 - ati_remote->acc_jiffies = jiffies; 546 - } 547 - else if (time_before(jiffies, ati_remote->acc_jiffies + (HZ >> 3))) acc = accel[0]; 548 - else if (time_before(jiffies, ati_remote->acc_jiffies + (HZ >> 2))) acc = accel[1]; 549 - else if (time_before(jiffies, ati_remote->acc_jiffies + (HZ >> 1))) acc = accel[2]; 550 - else if (time_before(jiffies, ati_remote->acc_jiffies + HZ)) acc = accel[3]; 551 - else if (time_before(jiffies, ati_remote->acc_jiffies + HZ+(HZ>>1))) acc = accel[4]; 552 - else if (time_before(jiffies, ati_remote->acc_jiffies + (HZ << 1))) acc = accel[5]; 553 - else acc = accel[6]; 495 + /* 496 + * Other event kinds are from the directional control pad, and have an 497 + * acceleration factor applied to them. Without this acceleration, the 498 + * control pad is mostly unusable. 499 + */ 500 + acc = ati_remote_compute_accel(ati_remote); 554 501 555 - input_regs(dev, regs); 556 - switch (ati_remote_tbl[index].kind) { 557 - case KIND_ACCEL: 558 - input_event(dev, ati_remote_tbl[index].type, 559 - ati_remote_tbl[index].code, 560 - ati_remote_tbl[index].value * acc); 561 - break; 562 - case KIND_LU: 563 - input_report_rel(dev, REL_X, -acc); 564 - input_report_rel(dev, REL_Y, -acc); 565 - break; 566 - case KIND_RU: 567 - input_report_rel(dev, REL_X, acc); 568 - input_report_rel(dev, REL_Y, -acc); 569 - break; 570 - case KIND_LD: 571 - input_report_rel(dev, REL_X, -acc); 572 - input_report_rel(dev, REL_Y, acc); 573 - break; 574 - case KIND_RD: 575 - input_report_rel(dev, REL_X, acc); 576 - input_report_rel(dev, REL_Y, acc); 577 - break; 578 - default: 579 - dev_dbg(&ati_remote->interface->dev, "ati_remote kind=%d\n", 580 - ati_remote_tbl[index].kind); 581 - } 582 - input_sync(dev); 502 + input_regs(dev, regs); 503 + switch (ati_remote_tbl[index].kind) { 504 + case KIND_ACCEL: 505 + input_event(dev, ati_remote_tbl[index].type, 506 + ati_remote_tbl[index].code, 507 + ati_remote_tbl[index].value * acc); 508 + break; 509 + case KIND_LU: 510 + input_report_rel(dev, REL_X, -acc); 511 + input_report_rel(dev, REL_Y, -acc); 512 + break; 513 + case KIND_RU: 514 + input_report_rel(dev, REL_X, acc); 515 + input_report_rel(dev, REL_Y, -acc); 516 + break; 517 + case KIND_LD: 518 + input_report_rel(dev, REL_X, -acc); 519 + input_report_rel(dev, REL_Y, acc); 520 + break; 521 + case KIND_RD: 522 + input_report_rel(dev, REL_X, acc); 523 + input_report_rel(dev, REL_Y, acc); 524 + break; 525 + default: 526 + dev_dbg(&ati_remote->interface->dev, "ati_remote kind=%d\n", 527 + ati_remote_tbl[index].kind); 528 + } 529 + input_sync(dev); 583 530 584 - ati_remote->old_jiffies = jiffies; 585 - ati_remote->old_data[0] = data[1]; 586 - ati_remote->old_data[1] = data[2]; 531 + ati_remote->old_jiffies = jiffies; 532 + ati_remote->old_data[0] = data[1]; 533 + ati_remote->old_data[1] = data[2]; 534 + } 587 535 } 588 536 589 537 /*
+2 -1
drivers/usb/input/hid-input.c
··· 607 607 608 608 } 609 609 610 - if (usage->hat_min < usage->hat_max || usage->hat_dir) { 610 + if (usage->type == EV_ABS && 611 + (usage->hat_min < usage->hat_max || usage->hat_dir)) { 611 612 int i; 612 613 for (i = usage->code; i < usage->code + 2 && i <= max; i++) { 613 614 input_set_abs_params(input, i, -1, 1, 0, 0);
+38 -34
drivers/usb/input/hiddev.c
··· 49 49 int open; 50 50 wait_queue_head_t wait; 51 51 struct hid_device *hid; 52 - struct hiddev_list *list; 52 + struct list_head list; 53 53 }; 54 54 55 55 struct hiddev_list { ··· 59 59 unsigned flags; 60 60 struct fasync_struct *fasync; 61 61 struct hiddev *hiddev; 62 - struct hiddev_list *next; 62 + struct list_head node; 63 63 }; 64 64 65 65 static struct hiddev *hiddev_table[HIDDEV_MINORS]; ··· 73 73 static struct hid_report * 74 74 hiddev_lookup_report(struct hid_device *hid, struct hiddev_report_info *rinfo) 75 75 { 76 - unsigned flags = rinfo->report_id & ~HID_REPORT_ID_MASK; 76 + unsigned int flags = rinfo->report_id & ~HID_REPORT_ID_MASK; 77 + unsigned int rid = rinfo->report_id & HID_REPORT_ID_MASK; 77 78 struct hid_report_enum *report_enum; 79 + struct hid_report *report; 78 80 struct list_head *list; 79 81 80 82 if (rinfo->report_type < HID_REPORT_TYPE_MIN || 81 - rinfo->report_type > HID_REPORT_TYPE_MAX) return NULL; 83 + rinfo->report_type > HID_REPORT_TYPE_MAX) 84 + return NULL; 82 85 83 86 report_enum = hid->report_enum + 84 87 (rinfo->report_type - HID_REPORT_TYPE_MIN); ··· 91 88 break; 92 89 93 90 case HID_REPORT_ID_FIRST: 94 - list = report_enum->report_list.next; 95 - if (list == &report_enum->report_list) 91 + if (list_empty(&report_enum->report_list)) 96 92 return NULL; 97 - rinfo->report_id = ((struct hid_report *) list)->id; 93 + 94 + list = report_enum->report_list.next; 95 + report = list_entry(list, struct hid_report, list); 96 + rinfo->report_id = report->id; 98 97 break; 99 98 100 99 case HID_REPORT_ID_NEXT: 101 - list = (struct list_head *) 102 - report_enum->report_id_hash[rinfo->report_id & HID_REPORT_ID_MASK]; 103 - if (list == NULL) 100 + report = report_enum->report_id_hash[rid]; 101 + if (!report) 104 102 return NULL; 105 - list = list->next; 103 + 104 + list = report->list.next; 106 105 if (list == &report_enum->report_list) 107 106 return NULL; 108 - rinfo->report_id = ((struct hid_report *) list)->id; 107 + 108 + report = list_entry(list, struct hid_report, list); 109 + rinfo->report_id = report->id; 109 110 break; 110 111 111 112 default: ··· 132 125 struct hid_field *field; 133 126 134 127 if (uref->report_type < HID_REPORT_TYPE_MIN || 135 - uref->report_type > HID_REPORT_TYPE_MAX) return NULL; 128 + uref->report_type > HID_REPORT_TYPE_MAX) 129 + return NULL; 136 130 137 131 report_enum = hid->report_enum + 138 132 (uref->report_type - HID_REPORT_TYPE_MIN); 139 133 140 - list_for_each_entry(report, &report_enum->report_list, list) 134 + list_for_each_entry(report, &report_enum->report_list, list) { 141 135 for (i = 0; i < report->maxfield; i++) { 142 136 field = report->field[i]; 143 137 for (j = 0; j < field->maxusage; j++) { ··· 150 142 } 151 143 } 152 144 } 145 + } 153 146 154 147 return NULL; 155 148 } ··· 159 150 struct hiddev_usage_ref *uref) 160 151 { 161 152 struct hiddev *hiddev = hid->hiddev; 162 - struct hiddev_list *list = hiddev->list; 153 + struct hiddev_list *list; 163 154 164 - while (list) { 155 + list_for_each_entry(list, &hiddev->list, node) { 165 156 if (uref->field_index != HID_FIELD_INDEX_NONE || 166 157 (list->flags & HIDDEV_FLAG_REPORT) != 0) { 167 158 list->buffer[list->head] = *uref; ··· 169 160 (HIDDEV_BUFFER_SIZE - 1); 170 161 kill_fasync(&list->fasync, SIGIO, POLL_IN); 171 162 } 172 - 173 - list = list->next; 174 163 } 175 164 176 165 wake_up_interruptible(&hiddev->wait); ··· 187 180 uref.report_type = 188 181 (type == HID_INPUT_REPORT) ? HID_REPORT_TYPE_INPUT : 189 182 ((type == HID_OUTPUT_REPORT) ? HID_REPORT_TYPE_OUTPUT : 190 - ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE:0)); 183 + ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE : 0)); 191 184 uref.report_id = field->report->id; 192 185 uref.field_index = field->index; 193 186 uref.usage_index = (usage - field->usage); ··· 207 200 uref.report_type = 208 201 (type == HID_INPUT_REPORT) ? HID_REPORT_TYPE_INPUT : 209 202 ((type == HID_OUTPUT_REPORT) ? HID_REPORT_TYPE_OUTPUT : 210 - ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE:0)); 203 + ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE : 0)); 211 204 uref.report_id = report->id; 212 205 uref.field_index = HID_FIELD_INDEX_NONE; 213 206 ··· 220 213 { 221 214 int retval; 222 215 struct hiddev_list *list = file->private_data; 216 + 223 217 retval = fasync_helper(fd, file, on, &list->fasync); 218 + 224 219 return retval < 0 ? retval : 0; 225 220 } 226 221 ··· 233 224 static int hiddev_release(struct inode * inode, struct file * file) 234 225 { 235 226 struct hiddev_list *list = file->private_data; 236 - struct hiddev_list **listptr; 237 227 238 - listptr = &list->hiddev->list; 239 228 hiddev_fasync(-1, file, 0); 240 - 241 - while (*listptr && (*listptr != list)) 242 - listptr = &((*listptr)->next); 243 - *listptr = (*listptr)->next; 229 + list_del(&list->node); 244 230 245 231 if (!--list->hiddev->open) { 246 232 if (list->hiddev->exist) ··· 252 248 /* 253 249 * open file op 254 250 */ 255 - static int hiddev_open(struct inode * inode, struct file * file) { 251 + static int hiddev_open(struct inode *inode, struct file *file) 252 + { 256 253 struct hiddev_list *list; 257 254 258 255 int i = iminor(inode) - HIDDEV_MINOR_BASE; ··· 265 260 return -ENOMEM; 266 261 267 262 list->hiddev = hiddev_table[i]; 268 - list->next = hiddev_table[i]->list; 269 - hiddev_table[i]->list = list; 270 - 263 + list_add_tail(&list->node, &hiddev_table[i]->list); 271 264 file->private_data = list; 272 265 273 266 if (!list->hiddev->open++) ··· 365 362 static unsigned int hiddev_poll(struct file *file, poll_table *wait) 366 363 { 367 364 struct hiddev_list *list = file->private_data; 365 + 368 366 poll_wait(file, &list->hiddev->wait, wait); 369 367 if (list->head != list->tail) 370 368 return POLLIN | POLLRDNORM; ··· 386 382 struct hiddev_collection_info cinfo; 387 383 struct hiddev_report_info rinfo; 388 384 struct hiddev_field_info finfo; 389 - struct hiddev_usage_ref_multi *uref_multi=NULL; 385 + struct hiddev_usage_ref_multi *uref_multi = NULL; 390 386 struct hiddev_usage_ref *uref; 391 387 struct hiddev_devinfo dinfo; 392 388 struct hid_report *report; ··· 768 764 } 769 765 770 766 init_waitqueue_head(&hiddev->wait); 771 - 772 - hiddev_table[hid->intf->minor - HIDDEV_MINOR_BASE] = hiddev; 773 - 767 + INIT_LIST_HEAD(&hiddev->list); 774 768 hiddev->hid = hid; 775 769 hiddev->exist = 1; 776 770 777 771 hid->minor = hid->intf->minor; 778 772 hid->hiddev = hiddev; 773 + 774 + hiddev_table[hid->intf->minor - HIDDEV_MINOR_BASE] = hiddev; 779 775 780 776 return 0; 781 777 }
+7 -2
drivers/usb/misc/cypress_cy7c63.c
··· 12 12 * the single I/O ports of the device. 13 13 * 14 14 * Supported vendors: AK Modul-Bus Computer GmbH 15 - * Supported devices: CY7C63001A-PC (to be continued...) 16 - * Supported functions: Read/Write Ports (to be continued...) 15 + * (Firmware "Port-Chip") 16 + * 17 + * Supported devices: CY7C63001A-PC 18 + * CY7C63001C-PXC 19 + * CY7C63001C-SXC 20 + * 21 + * Supported functions: Read/Write Ports 17 22 * 18 23 * 19 24 * This program is free software; you can redistribute it and/or
+71 -12
drivers/usb/net/rtl8150.c
··· 175 175 static void rtl8150_disconnect(struct usb_interface *intf); 176 176 static int rtl8150_probe(struct usb_interface *intf, 177 177 const struct usb_device_id *id); 178 + static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message); 179 + static int rtl8150_resume(struct usb_interface *intf); 178 180 179 181 static const char driver_name [] = "rtl8150"; 180 182 ··· 185 183 .probe = rtl8150_probe, 186 184 .disconnect = rtl8150_disconnect, 187 185 .id_table = rtl8150_table, 186 + .suspend = rtl8150_suspend, 187 + .resume = rtl8150_resume 188 188 }; 189 189 190 190 /* ··· 242 238 usb_fill_control_urb(dev->ctrl_urb, dev->udev, 243 239 usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr, 244 240 &dev->rx_creg, size, ctrl_callback, dev); 245 - if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) 241 + if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) { 242 + if (ret == -ENODEV) 243 + netif_device_detach(dev->netdev); 246 244 err("control request submission failed: %d", ret); 247 - else 245 + } else 248 246 set_bit(RX_REG_SET, &dev->flags); 249 247 250 248 return ret; ··· 422 416 struct sk_buff *skb; 423 417 struct net_device *netdev; 424 418 u16 rx_stat; 419 + int status; 425 420 426 421 dev = urb->context; 427 422 if (!dev) ··· 472 465 goon: 473 466 usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), 474 467 dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); 475 - if (usb_submit_urb(dev->rx_urb, GFP_ATOMIC)) { 468 + status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); 469 + if (status == -ENODEV) 470 + netif_device_detach(dev->netdev); 471 + else if (status) { 476 472 set_bit(RX_URB_FAIL, &dev->flags); 477 473 goto resched; 478 474 } else { ··· 491 481 { 492 482 rtl8150_t *dev; 493 483 struct sk_buff *skb; 484 + int status; 494 485 495 486 dev = (rtl8150_t *)data; 496 487 ··· 510 499 usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), 511 500 dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); 512 501 try_again: 513 - if (usb_submit_urb(dev->rx_urb, GFP_ATOMIC)) { 502 + status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); 503 + if (status == -ENODEV) { 504 + netif_device_detach(dev->netdev); 505 + } else if (status) { 514 506 set_bit(RX_URB_FAIL, &dev->flags); 515 507 goto tlsched; 516 - } else { 508 + } else { 517 509 clear_bit(RX_URB_FAIL, &dev->flags); 518 510 } 519 511 ··· 588 574 589 575 resubmit: 590 576 status = usb_submit_urb (urb, SLAB_ATOMIC); 591 - if (status) 577 + if (status == -ENODEV) 578 + netif_device_detach(dev->netdev); 579 + else if (status) 592 580 err ("can't resubmit intr, %s-%s/input0, status %d", 593 581 dev->udev->bus->bus_name, 594 582 dev->udev->devpath, status); 595 583 } 596 584 585 + static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message) 586 + { 587 + rtl8150_t *dev = usb_get_intfdata(intf); 588 + 589 + netif_device_detach(dev->netdev); 590 + 591 + if (netif_running(dev->netdev)) { 592 + usb_kill_urb(dev->rx_urb); 593 + usb_kill_urb(dev->intr_urb); 594 + } 595 + return 0; 596 + } 597 + 598 + static int rtl8150_resume(struct usb_interface *intf) 599 + { 600 + rtl8150_t *dev = usb_get_intfdata(intf); 601 + 602 + netif_device_attach(dev->netdev); 603 + if (netif_running(dev->netdev)) { 604 + dev->rx_urb->status = 0; 605 + dev->rx_urb->actual_length = 0; 606 + read_bulk_callback(dev->rx_urb, NULL); 607 + 608 + dev->intr_urb->status = 0; 609 + dev->intr_urb->actual_length = 0; 610 + intr_callback(dev->intr_urb, NULL); 611 + } 612 + return 0; 613 + } 597 614 598 615 /* 599 616 ** ··· 735 690 usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), 736 691 skb->data, count, write_bulk_callback, dev); 737 692 if ((res = usb_submit_urb(dev->tx_urb, GFP_ATOMIC))) { 738 - warn("failed tx_urb %d\n", res); 739 - dev->stats.tx_errors++; 740 - netif_start_queue(netdev); 693 + /* Can we get/handle EPIPE here? */ 694 + if (res == -ENODEV) 695 + netif_device_detach(dev->netdev); 696 + else { 697 + warn("failed tx_urb %d\n", res); 698 + dev->stats.tx_errors++; 699 + netif_start_queue(netdev); 700 + } 741 701 } else { 742 702 dev->stats.tx_packets++; 743 703 dev->stats.tx_bytes += skb->len; ··· 779 729 780 730 usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), 781 731 dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); 782 - if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) 732 + if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) { 733 + if (res == -ENODEV) 734 + netif_device_detach(dev->netdev); 783 735 warn("%s: rx_urb submit failed: %d", __FUNCTION__, res); 736 + return res; 737 + } 784 738 usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 3), 785 739 dev->intr_buff, INTBUFSIZE, intr_callback, 786 740 dev, dev->intr_interval); 787 - if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) 741 + if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) { 742 + if (res == -ENODEV) 743 + netif_device_detach(dev->netdev); 788 744 warn("%s: intr_urb submit failed: %d", __FUNCTION__, res); 789 - netif_start_queue(netdev); 745 + usb_kill_urb(dev->rx_urb); 746 + return res; 747 + } 790 748 enable_net_traffic(dev); 791 749 set_carrier(netdev); 750 + netif_start_queue(netdev); 792 751 793 752 return res; 794 753 }
+9 -15
drivers/usb/serial/Kconfig
··· 62 62 To compile this driver as a module, choose M here: the 63 63 module will be called airprime. 64 64 65 - config USB_SERIAL_ANYDATA 66 - tristate "USB AnyData CDMA Wireless Driver" 67 - depends on USB_SERIAL 68 - help 69 - Say Y here if you want to use a AnyData CDMA device. 70 - 71 - To compile this driver as a module, choose M here: the 72 - module will be called anydata. 73 - 74 65 config USB_SERIAL_ARK3116 75 66 tristate "USB ARK Micro 3116 USB Serial Driver (EXPERIMENTAL)" 76 67 depends on USB_SERIAL && EXPERIMENTAL ··· 493 502 module will be called keyspan_pda. 494 503 495 504 config USB_SERIAL_OPTION 496 - tristate "USB driver for GSM modems" 505 + tristate "USB driver for GSM and CDMA modems" 497 506 depends on USB_SERIAL 498 507 help 499 - Say Y here if you have an "Option" GSM PCMCIA card 500 - (or an OEM version: branded Huawei, Audiovox, or Novatel). 508 + Say Y here if you have a GSM or CDMA modem that's connected to USB. 501 509 502 - These cards feature a built-in OHCI-USB adapter and an 503 - internally-connected GSM modem. The USB bus is not 504 - accessible externally. 510 + This driver also supports several PCMCIA cards which have a 511 + built-in OHCI-USB adapter and an internally-connected GSM modem. 512 + The USB bus on these cards is not accessible externally. 513 + 514 + Supported devices include (some of?) those made by: 515 + Option, Huawei, Audiovox, Sierra Wireless, Novatel Wireless, or 516 + Anydata. 505 517 506 518 To compile this driver as a module, choose M here: the 507 519 module will be called option.
-1
drivers/usb/serial/Makefile
··· 12 12 usbserial-objs := usb-serial.o generic.o bus.o $(usbserial-obj-y) 13 13 14 14 obj-$(CONFIG_USB_SERIAL_AIRPRIME) += airprime.o 15 - obj-$(CONFIG_USB_SERIAL_ANYDATA) += anydata.o 16 15 obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o 17 16 obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o 18 17 obj-$(CONFIG_USB_SERIAL_CP2101) += cp2101.o
-123
drivers/usb/serial/anydata.c
··· 1 - /* 2 - * AnyData CDMA Serial USB driver 3 - * 4 - * Copyright (C) 2005 Greg Kroah-Hartman <gregkh@suse.de> 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License version 8 - * 2 as published by the Free Software Foundation. 9 - */ 10 - 11 - #include <linux/kernel.h> 12 - #include <linux/init.h> 13 - #include <linux/tty.h> 14 - #include <linux/module.h> 15 - #include <linux/usb.h> 16 - #include <linux/usb/serial.h> 17 - 18 - static struct usb_device_id id_table [] = { 19 - { USB_DEVICE(0x16d5, 0x6501) }, /* AirData CDMA device */ 20 - { }, 21 - }; 22 - MODULE_DEVICE_TABLE(usb, id_table); 23 - 24 - /* if overridden by the user, then use their value for the size of the 25 - * read and write urbs */ 26 - static int buffer_size; 27 - static int debug; 28 - 29 - static struct usb_driver anydata_driver = { 30 - .name = "anydata", 31 - .probe = usb_serial_probe, 32 - .disconnect = usb_serial_disconnect, 33 - .id_table = id_table, 34 - .no_dynamic_id = 1, 35 - }; 36 - 37 - static int anydata_open(struct usb_serial_port *port, struct file *filp) 38 - { 39 - char *buffer; 40 - int result = 0; 41 - 42 - dbg("%s - port %d", __FUNCTION__, port->number); 43 - 44 - if (buffer_size) { 45 - /* override the default buffer sizes */ 46 - buffer = kmalloc(buffer_size, GFP_KERNEL); 47 - if (!buffer) { 48 - dev_err(&port->dev, "%s - out of memory.\n", 49 - __FUNCTION__); 50 - return -ENOMEM; 51 - } 52 - kfree (port->read_urb->transfer_buffer); 53 - port->read_urb->transfer_buffer = buffer; 54 - port->read_urb->transfer_buffer_length = buffer_size; 55 - 56 - buffer = kmalloc(buffer_size, GFP_KERNEL); 57 - if (!buffer) { 58 - dev_err(&port->dev, "%s - out of memory.\n", 59 - __FUNCTION__); 60 - return -ENOMEM; 61 - } 62 - kfree (port->write_urb->transfer_buffer); 63 - port->write_urb->transfer_buffer = buffer; 64 - port->write_urb->transfer_buffer_length = buffer_size; 65 - port->bulk_out_size = buffer_size; 66 - } 67 - 68 - /* Start reading from the device */ 69 - usb_fill_bulk_urb(port->read_urb, port->serial->dev, 70 - usb_rcvbulkpipe(port->serial->dev, 71 - port->bulk_in_endpointAddress), 72 - port->read_urb->transfer_buffer, 73 - port->read_urb->transfer_buffer_length, 74 - usb_serial_generic_read_bulk_callback, port); 75 - result = usb_submit_urb(port->read_urb, GFP_KERNEL); 76 - if (result) 77 - dev_err(&port->dev, 78 - "%s - failed submitting read urb, error %d\n", 79 - __FUNCTION__, result); 80 - 81 - return result; 82 - } 83 - 84 - static struct usb_serial_driver anydata_device = { 85 - .driver = { 86 - .owner = THIS_MODULE, 87 - .name = "anydata", 88 - }, 89 - .id_table = id_table, 90 - .num_interrupt_in = NUM_DONT_CARE, 91 - .num_bulk_in = NUM_DONT_CARE, 92 - .num_bulk_out = NUM_DONT_CARE, 93 - .num_ports = 1, 94 - .open = anydata_open, 95 - }; 96 - 97 - static int __init anydata_init(void) 98 - { 99 - int retval; 100 - 101 - retval = usb_serial_register(&anydata_device); 102 - if (retval) 103 - return retval; 104 - retval = usb_register(&anydata_driver); 105 - if (retval) 106 - usb_serial_deregister(&anydata_device); 107 - return retval; 108 - } 109 - 110 - static void __exit anydata_exit(void) 111 - { 112 - usb_deregister(&anydata_driver); 113 - usb_serial_deregister(&anydata_device); 114 - } 115 - 116 - module_init(anydata_init); 117 - module_exit(anydata_exit); 118 - MODULE_LICENSE("GPL"); 119 - 120 - module_param(debug, bool, S_IRUGO | S_IWUSR); 121 - MODULE_PARM_DESC(debug, "Debug enabled or not"); 122 - module_param(buffer_size, int, 0); 123 - MODULE_PARM_DESC(buffer_size, "Size of the transfer buffers");
+1
drivers/usb/serial/ftdi_sio.c
··· 337 337 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, 338 338 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, 339 339 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, 340 + { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, 340 341 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) }, 341 342 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) }, 342 343 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) },
+4
drivers/usb/serial/ftdi_sio.h
··· 182 182 /* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */ 183 183 #define FTDI_USB_UIRT_PID 0xF850 /* Product Id */ 184 184 185 + /* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */ 186 + 187 + #define FTDI_TNC_X_PID 0xEBE0 188 + 185 189 /* 186 190 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 187 191 * All of these devices use FTDI's vendor ID (0x0403).
+1
drivers/usb/serial/ipaq.c
··· 250 250 { USB_DEVICE(0x04C5, 0x1058) }, /* FUJITSU USB Sync */ 251 251 { USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */ 252 252 { USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */ 253 + { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */ 253 254 { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */ 254 255 { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */ 255 256 { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */
+7 -69
drivers/usb/serial/option.c
··· 9 9 10 10 Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> 11 11 12 - History: 13 - 14 - 2005-05-19 v0.1 Initial version, based on incomplete docs 15 - and analysis of misbehavior with the standard driver 16 - 2005-05-20 v0.2 Extended the input buffer to avoid losing 17 - random 64-byte chunks of data 18 - 2005-05-21 v0.3 implemented chars_in_buffer() 19 - turned on low_latency 20 - simplified the code somewhat 21 - 2005-05-24 v0.4 option_write() sometimes deadlocked under heavy load 22 - removed some dead code 23 - added sponsor notice 24 - coding style clean-up 25 - 2005-06-20 v0.4.1 add missing braces :-/ 26 - killed end-of-line whitespace 27 - 2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2 28 - 2005-09-10 v0.4.3 added HUAWEI E600 card and Audiovox AirCard 29 - 2005-09-20 v0.4.4 increased recv buffer size: the card sometimes 30 - wants to send >2000 bytes. 31 - 2006-04-10 v0.5 fixed two array overrun errors :-/ 32 - 2006-04-21 v0.5.1 added support for Sierra Wireless MC8755 33 - 2006-05-15 v0.6 re-enable multi-port support 34 - 2006-06-01 v0.6.1 add COBRA 35 - 2006-06-01 v0.6.2 add backwards-compatibility stuff 36 - 2006-06-01 v0.6.3 add Novatel Wireless 37 - 2006-06-01 v0.7 Option => GSM 38 - 2006-06-01 v0.7.1 add COBRA2 12 + History: see the git log. 39 13 40 14 Work sponsored by: Sigos GmbH, Germany <info@sigos.de> 41 15 42 16 This driver exists because the "normal" serial driver doesn't work too well 43 17 with GSM modems. Issues: 44 18 - data loss -- one single Receive URB is not nearly enough 45 - - nonstandard flow (Option devices) and multiplex (Sierra) control 19 + - nonstandard flow (Option devices) control 46 20 - controlling the baud rate doesn't make sense 47 21 48 22 This driver is named "option" because the most common device it's ··· 70 96 #define OPTION_VENDOR_ID 0x0AF0 71 97 #define HUAWEI_VENDOR_ID 0x12D1 72 98 #define AUDIOVOX_VENDOR_ID 0x0F3D 73 - #define SIERRAWIRELESS_VENDOR_ID 0x1199 74 99 #define NOVATELWIRELESS_VENDOR_ID 0x1410 100 + #define ANYDATA_VENDOR_ID 0x16d5 75 101 76 102 #define OPTION_PRODUCT_OLD 0x5000 77 103 #define OPTION_PRODUCT_FUSION 0x6000 ··· 80 106 #define OPTION_PRODUCT_COBRA2 0x6600 81 107 #define HUAWEI_PRODUCT_E600 0x1001 82 108 #define AUDIOVOX_PRODUCT_AIRCARD 0x0112 83 - #define SIERRAWIRELESS_PRODUCT_MC8755 0x6802 84 109 #define NOVATELWIRELESS_PRODUCT_U740 0x1400 110 + #define ANYDATA_PRODUCT_ID 0x6501 85 111 86 112 static struct usb_device_id option_ids[] = { 87 113 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) }, ··· 91 117 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA2) }, 92 118 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, 93 119 { USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) }, 94 - { USB_DEVICE(SIERRAWIRELESS_VENDOR_ID, SIERRAWIRELESS_PRODUCT_MC8755) }, 95 120 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID,NOVATELWIRELESS_PRODUCT_U740) }, 121 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ID) }, 96 122 { } /* Terminating entry */ 97 123 }; 98 124 ··· 105 131 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, 106 132 { USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) }, 107 133 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID,NOVATELWIRELESS_PRODUCT_U740) }, 108 - { } /* Terminating entry */ 109 - }; 110 - static struct usb_device_id option_ids3[] = { 111 - { USB_DEVICE(SIERRAWIRELESS_VENDOR_ID, SIERRAWIRELESS_PRODUCT_MC8755) }, 134 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ID) }, 112 135 { } /* Terminating entry */ 113 136 }; 114 137 ··· 122 151 /* The card has three separate interfaces, which the serial driver 123 152 * recognizes separately, thus num_port=1. 124 153 */ 125 - static struct usb_serial_driver option_3port_device = { 126 - .driver = { 127 - .owner = THIS_MODULE, 128 - .name = "option", 129 - }, 130 - .description = "GSM modem (3-port)", 131 - .id_table = option_ids3, 132 - .num_interrupt_in = NUM_DONT_CARE, 133 - .num_bulk_in = NUM_DONT_CARE, 134 - .num_bulk_out = NUM_DONT_CARE, 135 - .num_ports = 3, 136 - .open = option_open, 137 - .close = option_close, 138 - .write = option_write, 139 - .write_room = option_write_room, 140 - .chars_in_buffer = option_chars_in_buffer, 141 - .throttle = option_rx_throttle, 142 - .unthrottle = option_rx_unthrottle, 143 - .set_termios = option_set_termios, 144 - .break_ctl = option_break_ctl, 145 - .tiocmget = option_tiocmget, 146 - .tiocmset = option_tiocmset, 147 - .attach = option_startup, 148 - .shutdown = option_shutdown, 149 - .read_int_callback = option_instat_callback, 150 - }; 151 154 152 155 static struct usb_serial_driver option_1port_device = { 153 156 .driver = { 154 157 .owner = THIS_MODULE, 155 - .name = "option", 158 + .name = "option1", 156 159 }, 157 160 .description = "GSM modem (1-port)", 158 161 .id_table = option_ids1, ··· 190 245 retval = usb_serial_register(&option_1port_device); 191 246 if (retval) 192 247 goto failed_1port_device_register; 193 - retval = usb_serial_register(&option_3port_device); 194 - if (retval) 195 - goto failed_3port_device_register; 196 248 retval = usb_register(&option_driver); 197 249 if (retval) 198 250 goto failed_driver_register; ··· 199 257 return 0; 200 258 201 259 failed_driver_register: 202 - usb_serial_deregister (&option_3port_device); 203 - failed_3port_device_register: 204 260 usb_serial_deregister (&option_1port_device); 205 261 failed_1port_device_register: 206 262 return retval; ··· 207 267 static void __exit option_exit(void) 208 268 { 209 269 usb_deregister (&option_driver); 210 - usb_serial_deregister (&option_3port_device); 211 270 usb_serial_deregister (&option_1port_device); 212 271 } 213 272 ··· 594 655 struct option_port_private *portdata; 595 656 596 657 dbg("%s", __FUNCTION__); 597 - 598 658 599 659 for (i = 0; i < serial->num_ports; i++) { 600 660 port = serial->port[i];
+1
drivers/usb/serial/pl2303.c
··· 81 81 { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) }, 82 82 { USB_DEVICE(OTI_VENDOR_ID, OTI_PRODUCT_ID) }, 83 83 { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) }, 84 + { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) }, 84 85 { } /* Terminating entry */ 85 86 }; 86 87
+4
drivers/usb/serial/pl2303.h
··· 89 89 /* DATAPILOT Universal-2 Phone Cable */ 90 90 #define DATAPILOT_U2_VENDOR_ID 0x0731 91 91 #define DATAPILOT_U2_PRODUCT_ID 0x2003 92 + 93 + /* Belkin "F5U257" Serial Adapter */ 94 + #define BELKIN_VENDOR_ID 0x050d 95 + #define BELKIN_PRODUCT_ID 0x0257
+16 -13
drivers/usb/storage/unusual_devs.h
··· 145 145 US_SC_DEVICE, US_PR_DEVICE, NULL, 146 146 US_FL_IGNORE_RESIDUE ), 147 147 148 + /* Reported by Mario Rettig <mariorettig@web.de> */ 149 + UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, 150 + "Nokia", 151 + "Nokia 3250", 152 + US_SC_DEVICE, US_PR_DEVICE, NULL, 153 + US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), 154 + 148 155 /* Reported by Sumedha Swamy <sumedhaswamy@gmail.com> and 149 156 * Einar Th. Einarsson <einarthered@gmail.com> */ 150 157 UNUSUAL_DEV( 0x0421, 0x0444, 0x0100, 0x0100, ··· 634 627 "Digital Camera EX-20 DSC", 635 628 US_SC_8070, US_PR_DEVICE, NULL, 0 ), 636 629 637 - /* The entry was here before I took over, and had US_SC_RBC. It turns 638 - * out that isn't needed. Additionally, Torsten Eriksson 639 - * <Torsten.Eriksson@bergianska.se> is able to use his device fine 640 - * without this entry at all - but I don't suspect that will be true 641 - * for all users (the protocol is likely needed), so is staying at 642 - * this time. - Phil Dibowitz <phil@ipom.com> 643 - */ 644 - UNUSUAL_DEV( 0x059f, 0xa601, 0x0200, 0x0200, 645 - "LaCie", 646 - "USB Hard Disk", 647 - US_SC_DEVICE, US_PR_CB, NULL, 0 ), 648 - 649 630 /* Submitted by Joel Bourquard <numlock@freesurf.ch> 650 631 * Some versions of this device need the SubClass and Protocol overrides 651 632 * while others don't. ··· 1101 1106 "Optio S/S4", 1102 1107 US_SC_DEVICE, US_PR_DEVICE, NULL, 1103 1108 US_FL_FIX_INQUIRY ), 1104 - 1109 + 1110 + /* This is a virtual windows driver CD, which the zd1211rw driver automatically 1111 + * converts into a WLAN device. */ 1112 + UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101, 1113 + "ZyXEL", 1114 + "G-220F USB-WLAN Install", 1115 + US_SC_DEVICE, US_PR_DEVICE, NULL, 1116 + US_FL_IGNORE_DEVICE ), 1117 + 1105 1118 #ifdef CONFIG_USB_STORAGE_ISD200 1106 1119 UNUSUAL_DEV( 0x0bf6, 0xa001, 0x0100, 0x0110, 1107 1120 "ATI",
+11 -2
drivers/usb/storage/usb.c
··· 483 483 } 484 484 485 485 /* Get the unusual_devs entries and the string descriptors */ 486 - static void get_device_info(struct us_data *us, const struct usb_device_id *id) 486 + static int get_device_info(struct us_data *us, const struct usb_device_id *id) 487 487 { 488 488 struct usb_device *dev = us->pusb_dev; 489 489 struct usb_interface_descriptor *idesc = ··· 499 499 idesc->bInterfaceProtocol : 500 500 unusual_dev->useTransport; 501 501 us->flags = USB_US_ORIG_FLAGS(id->driver_info); 502 + 503 + if (us->flags & US_FL_IGNORE_DEVICE) { 504 + printk(KERN_INFO USB_STORAGE "device ignored\n"); 505 + return -ENODEV; 506 + } 502 507 503 508 /* 504 509 * This flag is only needed when we're in high-speed, so let's ··· 546 541 msgs[msg], 547 542 UTS_RELEASE); 548 543 } 544 + 545 + return 0; 549 546 } 550 547 551 548 /* Get the transport settings */ ··· 976 969 * of the match from the usb_device_id table, so we can find the 977 970 * corresponding entry in the private table. 978 971 */ 979 - get_device_info(us, id); 972 + result = get_device_info(us, id); 973 + if (result) 974 + goto BadDevice; 980 975 981 976 /* Get the transport, protocol, and pipe settings */ 982 977 result = get_transport(us);
+3 -7
drivers/video/aty/aty128fb.c
··· 1913 1913 u8 chip_rev; 1914 1914 u32 dac; 1915 1915 1916 - if (!par->vram_size) /* may have already been probed */ 1917 - par->vram_size = aty_ld_le32(CONFIG_MEMSIZE) & 0x03FFFFFF; 1918 - 1919 1916 /* Get the chip revision */ 1920 1917 chip_rev = (aty_ld_le32(CONFIG_CNTL) >> 16) & 0x1F; 1921 1918 ··· 2025 2028 2026 2029 aty128_init_engine(par); 2027 2030 2028 - if (register_framebuffer(info) < 0) 2029 - return 0; 2030 - 2031 2031 par->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM); 2032 2032 par->pdev = pdev; 2033 2033 par->asleep = 0; ··· 2033 2039 #ifdef CONFIG_FB_ATY128_BACKLIGHT 2034 2040 aty128_bl_init(par); 2035 2041 #endif 2042 + 2043 + if (register_framebuffer(info) < 0) 2044 + return 0; 2036 2045 2037 2046 printk(KERN_INFO "fb%d: %s frame buffer device on %s\n", 2038 2047 info->node, info->fix.id, video_card); ··· 2086 2089 par = info->par; 2087 2090 2088 2091 info->pseudo_palette = par->pseudo_palette; 2089 - info->fix = aty128fb_fix; 2090 2092 2091 2093 /* Virtualize mmio region */ 2092 2094 info->fix.mmio_start = reg_addr;
+9 -11
drivers/video/au1100fb.c
··· 156 156 157 157 info->fix.visual = FB_VISUAL_TRUECOLOR; 158 158 info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */ 159 - } 159 + } 160 160 } else { 161 161 /* mono */ 162 162 info->fix.visual = FB_VISUAL_MONO10; ··· 164 164 } 165 165 166 166 info->screen_size = info->fix.line_length * info->var.yres_virtual; 167 + info->var.rotate = ((fbdev->panel->control_base&LCD_CONTROL_SM_MASK) \ 168 + >> LCD_CONTROL_SM_BIT) * 90; 167 169 168 170 /* Determine BPP mode and format */ 169 - fbdev->regs->lcd_control = fbdev->panel->control_base | 170 - ((info->var.rotate/90) << LCD_CONTROL_SM_BIT); 171 - 171 + fbdev->regs->lcd_control = fbdev->panel->control_base; 172 + fbdev->regs->lcd_horztiming = fbdev->panel->horztiming; 173 + fbdev->regs->lcd_verttiming = fbdev->panel->verttiming; 174 + fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base; 172 175 fbdev->regs->lcd_intenable = 0; 173 176 fbdev->regs->lcd_intstatus = 0; 174 - 175 - fbdev->regs->lcd_horztiming = fbdev->panel->horztiming; 176 - 177 - fbdev->regs->lcd_verttiming = fbdev->panel->verttiming; 178 - 179 - fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base; 180 - 181 177 fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys); 182 178 183 179 if (panel_is_dual(fbdev->panel)) { ··· 202 206 203 207 /* Resume controller */ 204 208 fbdev->regs->lcd_control |= LCD_CONTROL_GO; 209 + mdelay(10); 210 + au1100fb_fb_blank(VESA_NO_BLANKING, info); 205 211 206 212 return 0; 207 213 }
+9 -2
fs/befs/linuxvfs.c
··· 512 512 wchar_t uni; 513 513 int unilen, utflen; 514 514 char *result; 515 - int maxlen = in_len; /* The utf8->nls conversion can't make more chars */ 515 + /* The utf8->nls conversion won't make the final nls string bigger 516 + * than the utf one, but if the string is pure ascii they'll have the 517 + * same width and an extra char is needed to save the additional \0 518 + */ 519 + int maxlen = in_len + 1; 516 520 517 521 befs_debug(sb, "---> utf2nls()"); 518 522 ··· 592 588 wchar_t uni; 593 589 int unilen, utflen; 594 590 char *result; 595 - int maxlen = 3 * in_len; 591 + /* There're nls characters that will translate to 3-chars-wide UTF-8 592 + * characters, a additional byte is needed to save the final \0 593 + * in special cases */ 594 + int maxlen = (3 * in_len) + 1; 596 595 597 596 befs_debug(sb, "---> nls2utf()\n"); 598 597
+3 -9
fs/lockd/svclock.c
··· 638 638 if (task->tk_status < 0) { 639 639 /* RPC error: Re-insert for retransmission */ 640 640 timeout = 10 * HZ; 641 - } else if (block->b_done) { 642 - /* Block already removed, kill it for real */ 643 - timeout = 0; 644 641 } else { 645 642 /* Call was successful, now wait for client callback */ 646 643 timeout = 60 * HZ; ··· 706 709 break; 707 710 if (time_after(block->b_when,jiffies)) 708 711 break; 709 - dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", 710 - block, block->b_when, block->b_done); 712 + dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", 713 + block, block->b_when); 711 714 kref_get(&block->b_count); 712 - if (block->b_done) 713 - nlmsvc_unlink_block(block); 714 - else 715 - nlmsvc_grant_blocked(block); 715 + nlmsvc_grant_blocked(block); 716 716 nlmsvc_release_block(block); 717 717 } 718 718
+5 -3
fs/namei.c
··· 159 159 #ifdef CONFIG_AUDITSYSCALL 160 160 void putname(const char *name) 161 161 { 162 - if (unlikely(current->audit_context)) 162 + if (unlikely(!audit_dummy_context())) 163 163 audit_putname(name); 164 164 else 165 165 __putname(name); ··· 1125 1125 retval = link_path_walk(name, nd); 1126 1126 out: 1127 1127 if (likely(retval == 0)) { 1128 - if (unlikely(current->audit_context && nd && nd->dentry && 1128 + if (unlikely(!audit_dummy_context() && nd && nd->dentry && 1129 1129 nd->dentry->d_inode)) 1130 1130 audit_inode(name, nd->dentry->d_inode); 1131 1131 } ··· 1357 1357 return -ENOENT; 1358 1358 1359 1359 BUG_ON(victim->d_parent->d_inode != dir); 1360 - audit_inode_child(victim->d_name.name, victim->d_inode, dir->i_ino); 1360 + audit_inode_child(victim->d_name.name, victim->d_inode, dir); 1361 1361 1362 1362 error = permission(dir,MAY_WRITE | MAY_EXEC, NULL); 1363 1363 if (error) ··· 1659 1659 * It already exists. 1660 1660 */ 1661 1661 mutex_unlock(&dir->d_inode->i_mutex); 1662 + audit_inode_update(path.dentry->d_inode); 1662 1663 1663 1664 error = -EEXIST; 1664 1665 if (flag & O_EXCL) ··· 1670 1669 if (flag & O_NOFOLLOW) 1671 1670 goto exit_dput; 1672 1671 } 1672 + 1673 1673 error = -ENOENT; 1674 1674 if (!path.dentry->d_inode) 1675 1675 goto exit_dput;
+3 -1
fs/nfs/namespace.c
··· 51 51 namelen = dentry->d_name.len; 52 52 buflen -= namelen + 1; 53 53 if (buflen < 0) 54 - goto Elong; 54 + goto Elong_unlock; 55 55 end -= namelen; 56 56 memcpy(end, dentry->d_name.name, namelen); 57 57 *--end = '/'; ··· 68 68 end -= namelen; 69 69 memcpy(end, base, namelen); 70 70 return end; 71 + Elong_unlock: 72 + spin_unlock(&dcache_lock); 71 73 Elong: 72 74 return ERR_PTR(-ENAMETOOLONG); 73 75 }
+1 -1
fs/nfs/read.c
··· 63 63 return p; 64 64 } 65 65 66 - void nfs_readdata_free(struct nfs_read_data *p) 66 + static void nfs_readdata_free(struct nfs_read_data *p) 67 67 { 68 68 if (p && (p->pagevec != &p->page_array[0])) 69 69 kfree(p->pagevec);
+1 -1
fs/nfs/write.c
··· 137 137 return p; 138 138 } 139 139 140 - void nfs_writedata_free(struct nfs_write_data *p) 140 + static void nfs_writedata_free(struct nfs_write_data *p) 141 141 { 142 142 if (p && (p->pagevec != &p->page_array[0])) 143 143 kfree(p->pagevec);
+1 -1
fs/reiserfs/file.c
··· 48 48 return 0; 49 49 } 50 50 51 - reiserfs_write_lock(inode->i_sb); 52 51 mutex_lock(&inode->i_mutex); 52 + reiserfs_write_lock(inode->i_sb); 53 53 /* freeing preallocation only involves relogging blocks that 54 54 * are already in the current transaction. preallocation gets 55 55 * freed at the end of each transaction, so it is impossible for
+14 -12
fs/reiserfs/inode.c
··· 39 39 40 40 /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ 41 41 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ 42 - mutex_lock(&inode->i_mutex); 43 - 44 42 reiserfs_delete_xattrs(inode); 45 43 46 - if (journal_begin(&th, inode->i_sb, jbegin_count)) { 47 - mutex_unlock(&inode->i_mutex); 44 + if (journal_begin(&th, inode->i_sb, jbegin_count)) 48 45 goto out; 49 - } 50 46 reiserfs_update_inode_transaction(inode); 51 47 52 48 err = reiserfs_delete_object(&th, inode); ··· 53 57 if (!err) 54 58 DQUOT_FREE_INODE(inode); 55 59 56 - if (journal_end(&th, inode->i_sb, jbegin_count)) { 57 - mutex_unlock(&inode->i_mutex); 60 + if (journal_end(&th, inode->i_sb, jbegin_count)) 58 61 goto out; 59 - } 60 - 61 - mutex_unlock(&inode->i_mutex); 62 62 63 63 /* check return value from reiserfs_delete_object after 64 64 * ending the transaction ··· 2340 2348 unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; 2341 2349 int error = 0; 2342 2350 unsigned long block; 2351 + sector_t last_block; 2343 2352 struct buffer_head *head, *bh; 2344 2353 int partial = 0; 2345 2354 int nr = 0; ··· 2388 2395 } 2389 2396 bh = head; 2390 2397 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); 2398 + last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 2391 2399 /* first map all the buffers, logging any direct items we find */ 2392 2400 do { 2393 - if ((checked || buffer_dirty(bh)) && (!buffer_mapped(bh) || 2394 - (buffer_mapped(bh) 2401 + if (block > last_block) { 2402 + /* 2403 + * This can happen when the block size is less than 2404 + * the page size. The corresponding bytes in the page 2405 + * were zero filled above 2406 + */ 2407 + clear_buffer_dirty(bh); 2408 + set_buffer_uptodate(bh); 2409 + } else if ((checked || buffer_dirty(bh)) && 2410 + (!buffer_mapped(bh) || (buffer_mapped(bh) 2395 2411 && bh->b_blocknr == 2396 2412 0))) { 2397 2413 /* not mapped yet, or it points to a direct item, search
+1 -1
fs/reiserfs/ioctl.c
··· 116 116 if (REISERFS_I(inode)->i_flags & i_nopack_mask) { 117 117 return 0; 118 118 } 119 - reiserfs_write_lock(inode->i_sb); 120 119 121 120 /* we need to make sure nobody is changing the file size beneath 122 121 ** us 123 122 */ 124 123 mutex_lock(&inode->i_mutex); 124 + reiserfs_write_lock(inode->i_sb); 125 125 126 126 write_from = inode->i_size & (blocksize - 1); 127 127 /* if we are on a block boundary, we are already unpacked. */
+6 -5
fs/udf/ialloc.c
··· 75 75 } 76 76 *err = -ENOSPC; 77 77 78 + UDF_I_UNIQUE(inode) = 0; 79 + UDF_I_LENEXTENTS(inode) = 0; 80 + UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; 81 + UDF_I_NEXT_ALLOC_GOAL(inode) = 0; 82 + UDF_I_STRAT4096(inode) = 0; 83 + 78 84 block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, 79 85 start, err); 80 86 if (*err) ··· 90 84 } 91 85 92 86 mutex_lock(&sbi->s_alloc_mutex); 93 - UDF_I_UNIQUE(inode) = 0; 94 - UDF_I_LENEXTENTS(inode) = 0; 95 - UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; 96 - UDF_I_NEXT_ALLOC_GOAL(inode) = 0; 97 - UDF_I_STRAT4096(inode) = 0; 98 87 if (UDF_SB_LVIDBH(sb)) 99 88 { 100 89 struct logicalVolHeaderDesc *lvhd;
+1 -1
fs/ufs/balloc.c
··· 248 248 249 249 if (likely(cur_index != index)) { 250 250 page = ufs_get_locked_page(mapping, index); 251 - if (IS_ERR(page)) 251 + if (!page || IS_ERR(page)) /* it was truncated or EIO */ 252 252 continue; 253 253 } else 254 254 page = locked_page;
+9 -8
fs/ufs/util.c
··· 251 251 { 252 252 struct page *page; 253 253 254 - try_again: 255 254 page = find_lock_page(mapping, index); 256 255 if (!page) { 257 256 page = read_cache_page(mapping, index, 258 257 (filler_t*)mapping->a_ops->readpage, 259 258 NULL); 259 + 260 260 if (IS_ERR(page)) { 261 261 printk(KERN_ERR "ufs_change_blocknr: " 262 262 "read_cache_page error: ino %lu, index: %lu\n", ··· 265 265 } 266 266 267 267 lock_page(page); 268 + 269 + if (unlikely(page->mapping == NULL)) { 270 + /* Truncate got there first */ 271 + unlock_page(page); 272 + page_cache_release(page); 273 + page = NULL; 274 + goto out; 275 + } 268 276 269 277 if (!PageUptodate(page) || PageError(page)) { 270 278 unlock_page(page); ··· 283 275 mapping->host->i_ino, index); 284 276 285 277 page = ERR_PTR(-EIO); 286 - goto out; 287 278 } 288 - } 289 - 290 - if (unlikely(!page->mapping || !page_has_buffers(page))) { 291 - unlock_page(page); 292 - page_cache_release(page); 293 - goto try_again;/*we really need these buffers*/ 294 279 } 295 280 out: 296 281 return page;
-2
include/asm-arm/arch-omap/clock.h
··· 48 48 }; 49 49 50 50 extern unsigned int mpurate; 51 - extern struct list_head clocks; 52 - extern spinlock_t clockfw_lock; 53 51 54 52 extern int clk_init(struct clk_functions * custom_clocks); 55 53 extern int clk_register(struct clk *clk);
+6 -1
include/asm-ia64/meminit.h
··· 56 56 extern struct page *vmem_map; 57 57 extern int find_largest_hole (u64 start, u64 end, void *arg); 58 58 extern int create_mem_map_page_table (u64 start, u64 end, void *arg); 59 + extern int vmemmap_find_next_valid_pfn(int, int); 60 + #else 61 + static inline int vmemmap_find_next_valid_pfn(int node, int i) 62 + { 63 + return i + 1; 64 + } 59 65 #endif 60 - 61 66 #endif /* meminit_h */
+6 -1
include/asm-ia64/pal.h
··· 1433 1433 } pal_version_u_t; 1434 1434 1435 1435 1436 - /* Return PAL version information */ 1436 + /* 1437 + * Return PAL version information. While the documentation states that 1438 + * PAL_VERSION can be called in either physical or virtual mode, some 1439 + * implementations only allow physical calls. We don't call it very often, 1440 + * so the overhead isn't worth eliminating. 1441 + */ 1437 1442 static inline s64 1438 1443 ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) 1439 1444 {
+2 -2
include/asm-ia64/sn/xpc.h
··· 1124 1124 #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) 1125 1125 #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) 1126 1126 1127 - #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) 1128 - #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) 1127 + #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) 1128 + #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) 1129 1129 1130 1130 1131 1131 static inline void
+1 -1
include/asm-ia64/system.h
··· 24 24 * 0xa000000000000000+2*PERCPU_PAGE_SIZE 25 25 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) 26 26 */ 27 - #define KERNEL_START (GATE_ADDR+0x100000000) 27 + #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) 28 28 #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 29 29 30 30 #ifndef __ASSEMBLY__
+1
include/asm-powerpc/rtas.h
··· 170 170 extern int rtas_get_power_level(int powerdomain, int *level); 171 171 extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); 172 172 extern int rtas_set_indicator(int indicator, int index, int new_value); 173 + extern int rtas_set_indicator_fast(int indicator, int index, int new_value); 173 174 extern void rtas_progress(char *s, unsigned short hex); 174 175 extern void rtas_initialize(void); 175 176
+29 -14
include/linux/audit.h
··· 327 327 extern void audit_putname(const char *name); 328 328 extern void __audit_inode(const char *name, const struct inode *inode); 329 329 extern void __audit_inode_child(const char *dname, const struct inode *inode, 330 - unsigned long pino); 330 + const struct inode *parent); 331 + extern void __audit_inode_update(const struct inode *inode); 332 + static inline int audit_dummy_context(void) 333 + { 334 + void *p = current->audit_context; 335 + return !p || *(int *)p; 336 + } 331 337 static inline void audit_getname(const char *name) 332 338 { 333 - if (unlikely(current->audit_context)) 339 + if (unlikely(!audit_dummy_context())) 334 340 __audit_getname(name); 335 341 } 336 342 static inline void audit_inode(const char *name, const struct inode *inode) { 337 - if (unlikely(current->audit_context)) 343 + if (unlikely(!audit_dummy_context())) 338 344 __audit_inode(name, inode); 339 345 } 340 346 static inline void audit_inode_child(const char *dname, 341 - const struct inode *inode, 342 - unsigned long pino) { 343 - if (unlikely(current->audit_context)) 344 - __audit_inode_child(dname, inode, pino); 347 + const struct inode *inode, 348 + const struct inode *parent) { 349 + if (unlikely(!audit_dummy_context())) 350 + __audit_inode_child(dname, inode, parent); 351 + } 352 + static inline void audit_inode_update(const struct inode *inode) { 353 + if (unlikely(!audit_dummy_context())) 354 + __audit_inode_update(inode); 345 355 } 346 356 347 357 /* Private API (for audit.c only) */ ··· 375 365 376 366 static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) 377 367 { 378 - if (unlikely(current->audit_context)) 368 + if (unlikely(!audit_dummy_context())) 379 369 return __audit_ipc_obj(ipcp); 380 370 return 0; 381 371 } 382 372 static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) 383 373 { 384 - if (unlikely(current->audit_context)) 374 + if (unlikely(!audit_dummy_context())) 385 375 return __audit_ipc_set_perm(qbytes, uid, gid, mode); 386 376 return 0; 387 377 } 388 378 static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) 389 379 { 390 - if (unlikely(current->audit_context)) 380 + if (unlikely(!audit_dummy_context())) 391 381 return __audit_mq_open(oflag, mode, u_attr); 392 382 return 0; 393 383 } 394 384 static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout) 395 385 { 396 - if (unlikely(current->audit_context)) 386 + if (unlikely(!audit_dummy_context())) 397 387 return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); 398 388 return 0; 399 389 } 400 390 static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout) 401 391 { 402 - if (unlikely(current->audit_context)) 392 + if (unlikely(!audit_dummy_context())) 403 393 return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); 404 394 return 0; 405 395 } 406 396 static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) 407 397 { 408 - if (unlikely(current->audit_context)) 398 + if (unlikely(!audit_dummy_context())) 409 399 return __audit_mq_notify(mqdes, u_notification); 410 400 return 0; 411 401 } 412 402 static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) 413 403 { 414 - if (unlikely(current->audit_context)) 404 + if (unlikely(!audit_dummy_context())) 415 405 return __audit_mq_getsetattr(mqdes, mqstat); 416 406 return 0; 417 407 } 408 + extern int audit_n_rules; 418 409 #else 419 410 #define audit_alloc(t) ({ 0; }) 420 411 #define audit_free(t) do { ; } while (0) 421 412 #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) 422 413 #define audit_syscall_exit(f,r) do { ; } while (0) 414 + #define audit_dummy_context() 1 423 415 #define audit_getname(n) do { ; } while (0) 424 416 #define audit_putname(n) do { ; } while (0) 425 417 #define __audit_inode(n,i) do { ; } while (0) 426 418 #define __audit_inode_child(d,i,p) do { ; } while (0) 419 + #define __audit_inode_update(i) do { ; } while (0) 427 420 #define audit_inode(n,i) do { ; } while (0) 428 421 #define audit_inode_child(d,i,p) do { ; } while (0) 422 + #define audit_inode_update(i) do { ; } while (0) 429 423 #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) 430 424 #define audit_get_loginuid(c) ({ -1; }) 431 425 #define audit_ipc_obj(i) ({ 0; }) ··· 444 430 #define audit_mq_timedreceive(d,l,p,t) ({ 0; }) 445 431 #define audit_mq_notify(d,n) ({ 0; }) 446 432 #define audit_mq_getsetattr(d,s) ({ 0; }) 433 + #define audit_n_rules 0 447 434 #endif 448 435 449 436 #ifdef CONFIG_AUDIT
+2
include/linux/debug_locks.h
··· 1 1 #ifndef __LINUX_DEBUG_LOCKING_H 2 2 #define __LINUX_DEBUG_LOCKING_H 3 3 4 + struct task_struct; 5 + 4 6 extern int debug_locks; 5 7 extern int debug_locks_silent; 6 8
+3 -3
include/linux/fsnotify.h
··· 67 67 if (source) { 68 68 inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); 69 69 } 70 - audit_inode_child(new_name, source, new_dir->i_ino); 70 + audit_inode_child(new_name, source, new_dir); 71 71 } 72 72 73 73 /* ··· 98 98 inode_dir_notify(inode, DN_CREATE); 99 99 inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, 100 100 dentry->d_inode); 101 - audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); 101 + audit_inode_child(dentry->d_name.name, dentry->d_inode, inode); 102 102 } 103 103 104 104 /* ··· 109 109 inode_dir_notify(inode, DN_CREATE); 110 110 inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, 111 111 dentry->d_name.name, dentry->d_inode); 112 - audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); 112 + audit_inode_child(dentry->d_name.name, dentry->d_inode, inode); 113 113 } 114 114 115 115 /*
+22 -2
include/linux/input.h
··· 893 893 894 894 int (*open)(struct input_dev *dev); 895 895 void (*close)(struct input_dev *dev); 896 - int (*accept)(struct input_dev *dev, struct file *file); 897 896 int (*flush)(struct input_dev *dev, struct file *file); 898 897 int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); 899 898 int (*upload_effect)(struct input_dev *dev, struct ff_effect *effect); ··· 960 961 961 962 struct input_handle; 962 963 964 + /** 965 + * struct input_handler - implements one of interfaces for input devices 966 + * @private: driver-specific data 967 + * @event: event handler 968 + * @connect: called when attaching a handler to an input device 969 + * @disconnect: disconnects a handler from input device 970 + * @start: starts handler for given handle. This function is called by 971 + * input core right after connect() method and also when a process 972 + * that "grabbed" a device releases it 973 + * @fops: file operations this driver implements 974 + * @minor: beginning of range of 32 minors for devices this driver 975 + * can provide 976 + * @name: name of the handler, to be shown in /proc/bus/input/handlers 977 + * @id_table: pointer to a table of input_device_ids this driver can 978 + * handle 979 + * @blacklist: prointer to a table of input_device_ids this driver should 980 + * ignore even if they match @id_table 981 + * @h_list: list of input handles associated with the handler 982 + * @node: for placing the driver onto input_handler_list 983 + */ 963 984 struct input_handler { 964 985 965 986 void *private; ··· 987 968 void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); 988 969 struct input_handle* (*connect)(struct input_handler *handler, struct input_dev *dev, struct input_device_id *id); 989 970 void (*disconnect)(struct input_handle *handle); 971 + void (*start)(struct input_handle *handle); 990 972 991 973 const struct file_operations *fops; 992 974 int minor; ··· 1050 1030 int input_open_device(struct input_handle *); 1051 1031 void input_close_device(struct input_handle *); 1052 1032 1053 - int input_accept_process(struct input_handle *handle, struct file *file); 1054 1033 int input_flush_device(struct input_handle* handle, struct file* file); 1055 1034 1056 1035 void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); 1036 + void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); 1057 1037 1058 1038 static inline void input_report_key(struct input_dev *dev, unsigned int code, int value) 1059 1039 {
-2
include/linux/kobject.h
··· 46 46 KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ 47 47 KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ 48 48 KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ 49 - KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */ 50 - KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */ 51 49 }; 52 50 53 51 struct kobject {
-1
include/linux/lockd/lockd.h
··· 123 123 unsigned int b_id; /* block id */ 124 124 unsigned char b_queued; /* re-queued */ 125 125 unsigned char b_granted; /* VFS granted lock */ 126 - unsigned char b_done; /* callback complete */ 127 126 struct nlm_file * b_file; /* file in question */ 128 127 }; 129 128
-1
include/linux/netfilter_bridge.h
··· 6 6 7 7 #include <linux/netfilter.h> 8 8 #if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER) 9 - #include <asm/atomic.h> 10 9 #include <linux/if_ether.h> 11 10 #endif 12 11
+2 -4
include/linux/nfs_fs.h
··· 476 476 } 477 477 478 478 /* 479 - * Allocate and free nfs_write_data structures 479 + * Allocate nfs_write_data structures 480 480 */ 481 481 extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount); 482 - extern void nfs_writedata_free(struct nfs_write_data *p); 483 482 484 483 /* 485 484 * linux/fs/nfs/read.c ··· 490 491 extern void nfs_readdata_release(void *data); 491 492 492 493 /* 493 - * Allocate and free nfs_read_data structures 494 + * Allocate nfs_read_data structures 494 495 */ 495 496 extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount); 496 - extern void nfs_readdata_free(struct nfs_read_data *p); 497 497 498 498 /* 499 499 * linux/fs/nfs3proc.c
+1
include/linux/pci_ids.h
··· 2142 2142 #define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 2143 2143 #define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 2144 2144 #define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 2145 + #define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c 2145 2146 #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 2146 2147 #define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 2147 2148 #define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
+8
include/linux/sched.h
··· 1558 1558 } 1559 1559 1560 1560 /* 1561 + * Sometimes we may need to cancel the previous 'freeze' request 1562 + */ 1563 + static inline void do_not_freeze(struct task_struct *p) 1564 + { 1565 + p->flags &= ~PF_FREEZE; 1566 + } 1567 + 1568 + /* 1561 1569 * Wake up a frozen process 1562 1570 */ 1563 1571 static inline int thaw_process(struct task_struct *p)
+34 -6
include/linux/security.h
··· 1109 1109 * @name contains the name of the security module being unstacked. 1110 1110 * @ops contains a pointer to the struct security_operations of the module to unstack. 1111 1111 * 1112 + * @secid_to_secctx: 1113 + * Convert secid to security context. 1114 + * @secid contains the security ID. 1115 + * @secdata contains the pointer that stores the converted security context. 1116 + * 1117 + * @release_secctx: 1118 + * Release the security context. 1119 + * @secdata contains the security context. 1120 + * @seclen contains the length of the security context. 1121 + * 1112 1122 * This is the main security structure. 1113 1123 */ 1114 1124 struct security_operations { ··· 1299 1289 1300 1290 int (*getprocattr)(struct task_struct *p, char *name, void *value, size_t size); 1301 1291 int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size); 1292 + int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); 1293 + void (*release_secctx)(char *secdata, u32 seclen); 1302 1294 1303 1295 #ifdef CONFIG_SECURITY_NETWORK 1304 1296 int (*unix_stream_connect) (struct socket * sock, ··· 1329 1317 int (*socket_shutdown) (struct socket * sock, int how); 1330 1318 int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb); 1331 1319 int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); 1332 - int (*socket_getpeersec_dgram) (struct sk_buff *skb, char **secdata, u32 *seclen); 1320 + int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid); 1333 1321 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); 1334 1322 void (*sk_free_security) (struct sock *sk); 1335 1323 unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir); ··· 2071 2059 return security_ops->netlink_recv(skb, cap); 2072 2060 } 2073 2061 2062 + static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 2063 + { 2064 + return security_ops->secid_to_secctx(secid, secdata, seclen); 2065 + } 2066 + 2067 + static inline void security_release_secctx(char *secdata, u32 seclen) 2068 + { 2069 + return security_ops->release_secctx(secdata, seclen); 2070 + } 2071 + 2074 2072 /* prototypes */ 2075 2073 extern int security_init (void); 2076 2074 extern int register_security (struct security_operations *ops); ··· 2747 2725 { 2748 2726 } 2749 2727 2728 + static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 2729 + { 2730 + return -EOPNOTSUPP; 2731 + } 2732 + 2733 + static inline void security_release_secctx(char *secdata, u32 seclen) 2734 + { 2735 + } 2750 2736 #endif /* CONFIG_SECURITY */ 2751 2737 2752 2738 #ifdef CONFIG_SECURITY_NETWORK ··· 2870 2840 return security_ops->socket_getpeersec_stream(sock, optval, optlen, len); 2871 2841 } 2872 2842 2873 - static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 2874 - u32 *seclen) 2843 + static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 2875 2844 { 2876 - return security_ops->socket_getpeersec_dgram(skb, secdata, seclen); 2845 + return security_ops->socket_getpeersec_dgram(sock, skb, secid); 2877 2846 } 2878 2847 2879 2848 static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) ··· 2997 2968 return -ENOPROTOOPT; 2998 2969 } 2999 2970 3000 - static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 3001 - u32 *seclen) 2971 + static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 3002 2972 { 3003 2973 return -ENOPROTOOPT; 3004 2974 }
+30 -3
include/linux/skbuff.h
··· 604 604 return list_->qlen; 605 605 } 606 606 607 - extern struct lock_class_key skb_queue_lock_key; 608 - 607 + /* 608 + * This function creates a split out lock class for each invocation; 609 + * this is needed for now since a whole lot of users of the skb-queue 610 + * infrastructure in drivers have different locking usage (in hardirq) 611 + * than the networking core (in softirq only). In the long run either the 612 + * network layer or drivers should need annotation to consolidate the 613 + * main types of usage into 3 classes. 614 + */ 609 615 static inline void skb_queue_head_init(struct sk_buff_head *list) 610 616 { 611 617 spin_lock_init(&list->lock); 612 - lockdep_set_class(&list->lock, &skb_queue_lock_key); 613 618 list->prev = list->next = (struct sk_buff *)list; 614 619 list->qlen = 0; 615 620 } ··· 1107 1102 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1108 1103 { 1109 1104 return __dev_alloc_skb(length, GFP_ATOMIC); 1105 + } 1106 + 1107 + extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1108 + unsigned int length, gfp_t gfp_mask); 1109 + 1110 + /** 1111 + * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1112 + * @dev: network device to receive on 1113 + * @length: length to allocate 1114 + * 1115 + * Allocate a new &sk_buff and assign it a usage count of one. The 1116 + * buffer has unspecified headroom built in. Users should allocate 1117 + * the headroom they think they need without accounting for the 1118 + * built in space. The built in space is used for optimisations. 1119 + * 1120 + * %NULL is returned if there is no free memory. Although this function 1121 + * allocates memory it can be called from an interrupt. 1122 + */ 1123 + static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1124 + unsigned int length) 1125 + { 1126 + return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1110 1127 } 1111 1128 1112 1129 /**
+1 -1
include/linux/sunrpc/xprt.h
··· 229 229 int xprt_reserve_xprt_cong(struct rpc_task *task); 230 230 int xprt_prepare_transmit(struct rpc_task *task); 231 231 void xprt_transmit(struct rpc_task *task); 232 - void xprt_abort_transmit(struct rpc_task *task); 232 + void xprt_end_transmit(struct rpc_task *task); 233 233 int xprt_adjust_timeout(struct rpc_rqst *req); 234 234 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); 235 235 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+3 -4
include/linux/usb.h
··· 103 103 * @condition: binding state of the interface: not bound, binding 104 104 * (in probe()), bound to a driver, or unbinding (in disconnect()) 105 105 * @dev: driver model's view of this device 106 - * @usb_dev: if an interface is bound to the USB major, this will point 107 - * to the sysfs representation for that device. 106 + * @class_dev: driver model's class view of this device. 108 107 * 109 108 * USB device drivers attach to interfaces on a physical device. Each 110 109 * interface encapsulates a single high level function, such as feeding ··· 143 144 * bound to */ 144 145 enum usb_interface_condition condition; /* state of binding */ 145 146 struct device dev; /* interface specific device info */ 146 - struct device *usb_dev; /* pointer to the usb class's device, if any */ 147 + struct class_device *class_dev; 147 148 }; 148 149 #define to_usb_interface(d) container_of(d, struct usb_interface, dev) 149 150 #define interface_to_usbdev(intf) \ ··· 360 361 char *serial; /* iSerialNumber string, if present */ 361 362 362 363 struct list_head filelist; 363 - struct device *usbfs_dev; 364 + struct class_device *class_dev; 364 365 struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */ 365 366 366 367 /*
+3 -1
include/linux/usb_usual.h
··· 44 44 US_FLAG(NO_WP_DETECT, 0x00000200) \ 45 45 /* Don't check for write-protect */ \ 46 46 US_FLAG(MAX_SECTORS_64, 0x00000400) \ 47 - /* Sets max_sectors to 64 */ 47 + /* Sets max_sectors to 64 */ \ 48 + US_FLAG(IGNORE_DEVICE, 0x00000800) \ 49 + /* Don't claim device */ 48 50 49 51 #define US_FLAG(name, value) US_FL_##name = value , 50 52 enum { US_DO_ALL_FLAGS };
+5 -2
include/linux/videodev.h
··· 12 12 #ifndef __LINUX_VIDEODEV_H 13 13 #define __LINUX_VIDEODEV_H 14 14 15 - #define HAVE_V4L1 1 16 - 17 15 #include <linux/videodev2.h> 16 + 17 + #ifdef CONFIG_VIDEO_V4L1_COMPAT 18 + #define HAVE_V4L1 1 18 19 19 20 struct video_capability 20 21 { ··· 336 335 #define VID_HARDWARE_SAA7114H 37 337 336 #define VID_HARDWARE_SN9C102 38 338 337 #define VID_HARDWARE_ARV 39 338 + 339 + #endif /* CONFIG_VIDEO_V4L1_COMPAT */ 339 340 340 341 #endif /* __LINUX_VIDEODEV_H */ 341 342
+1 -1
include/linux/videodev2.h
··· 716 716 __s64 value64; 717 717 void *reserved; 718 718 }; 719 - }; 719 + } __attribute__ ((packed)); 720 720 721 721 struct v4l2_ext_controls 722 722 {
+4 -4
include/linux/vmstat.h
··· 41 41 42 42 static inline void __count_vm_event(enum vm_event_item item) 43 43 { 44 - __get_cpu_var(vm_event_states.event[item])++; 44 + __get_cpu_var(vm_event_states).event[item]++; 45 45 } 46 46 47 47 static inline void count_vm_event(enum vm_event_item item) 48 48 { 49 - get_cpu_var(vm_event_states.event[item])++; 49 + get_cpu_var(vm_event_states).event[item]++; 50 50 put_cpu(); 51 51 } 52 52 53 53 static inline void __count_vm_events(enum vm_event_item item, long delta) 54 54 { 55 - __get_cpu_var(vm_event_states.event[item]) += delta; 55 + __get_cpu_var(vm_event_states).event[item] += delta; 56 56 } 57 57 58 58 static inline void count_vm_events(enum vm_event_item item, long delta) 59 59 { 60 - get_cpu_var(vm_event_states.event[item]) += delta; 60 + get_cpu_var(vm_event_states).event[item] += delta; 61 61 put_cpu(); 62 62 } 63 63
+5 -2
include/media/v4l2-dev.h
··· 341 341 extern struct video_device* video_devdata(struct file*); 342 342 343 343 #define to_video_device(cd) container_of(cd, struct video_device, class_dev) 344 - static inline void 344 + static inline int 345 345 video_device_create_file(struct video_device *vfd, 346 346 struct class_device_attribute *attr) 347 347 { 348 - class_device_create_file(&vfd->class_dev, attr); 348 + int ret = class_device_create_file(&vfd->class_dev, attr); 349 + if (ret < 0) 350 + printk(KERN_WARNING "%s error: %d\n", __FUNCTION__, ret); 351 + return ret; 349 352 } 350 353 static inline void 351 354 video_device_remove_file(struct video_device *vfd,
+2 -4
include/net/af_unix.h
··· 54 54 struct ucred creds; /* Skb credentials */ 55 55 struct scm_fp_list *fp; /* Passed files */ 56 56 #ifdef CONFIG_SECURITY_NETWORK 57 - char *secdata; /* Security context */ 58 - u32 seclen; /* Security length */ 57 + u32 secid; /* Security ID */ 59 58 #endif 60 59 }; 61 60 62 61 #define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) 63 62 #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) 64 - #define UNIXSECDATA(skb) (&UNIXCB((skb)).secdata) 65 - #define UNIXSECLEN(skb) (&UNIXCB((skb)).seclen) 63 + #define UNIXSID(skb) (&UNIXCB((skb)).secid) 66 64 67 65 #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 68 66 #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
+9 -3
include/net/ip6_route.h
··· 139 139 /* 140 140 * Store a destination cache entry in a socket 141 141 */ 142 - static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, 143 - struct in6_addr *daddr) 142 + static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, 143 + struct in6_addr *daddr) 144 144 { 145 145 struct ipv6_pinfo *np = inet6_sk(sk); 146 146 struct rt6_info *rt = (struct rt6_info *) dst; 147 147 148 - write_lock(&sk->sk_dst_lock); 149 148 sk_setup_caps(sk, dst); 150 149 np->daddr_cache = daddr; 151 150 np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 151 + } 152 + 153 + static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, 154 + struct in6_addr *daddr) 155 + { 156 + write_lock(&sk->sk_dst_lock); 157 + __ip6_dst_store(sk, dst, daddr); 152 158 write_unlock(&sk->sk_dst_lock); 153 159 } 154 160
+3
include/net/ipv6.h
··· 468 468 extern int ip6_dst_lookup(struct sock *sk, 469 469 struct dst_entry **dst, 470 470 struct flowi *fl); 471 + extern int ip6_sk_dst_lookup(struct sock *sk, 472 + struct dst_entry **dst, 473 + struct flowi *fl); 471 474 472 475 /* 473 476 * skb processing functions
+1 -1
include/net/netdma.h
··· 29 29 { 30 30 struct dma_chan *chan; 31 31 rcu_read_lock(); 32 - chan = rcu_dereference(__get_cpu_var(softnet_data.net_dma)); 32 + chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); 33 33 if (chan) 34 34 dma_chan_get(chan); 35 35 rcu_read_unlock();
+33
include/net/netevent.h
··· 1 + #ifndef _NET_EVENT_H 2 + #define _NET_EVENT_H 3 + 4 + /* 5 + * Generic netevent notifiers 6 + * 7 + * Authors: 8 + * Tom Tucker <tom@opengridcomputing.com> 9 + * Steve Wise <swise@opengridcomputing.com> 10 + * 11 + * Changes: 12 + */ 13 + #ifdef __KERNEL__ 14 + 15 + #include <net/dst.h> 16 + 17 + struct netevent_redirect { 18 + struct dst_entry *old; 19 + struct dst_entry *new; 20 + }; 21 + 22 + enum netevent_notif_type { 23 + NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */ 24 + NETEVENT_PMTU_UPDATE, /* arg is struct dst_entry ptr */ 25 + NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */ 26 + }; 27 + 28 + extern int register_netevent_notifier(struct notifier_block *nb); 29 + extern int unregister_netevent_notifier(struct notifier_block *nb); 30 + extern int call_netevent_notifiers(unsigned long val, void *v); 31 + 32 + #endif 33 + #endif
+1 -1
include/net/red.h
··· 212 212 * Seems, it is the best solution to 213 213 * problem of too coarse exponent tabulation. 214 214 */ 215 - us_idle = (p->qavg * us_idle) >> p->Scell_log; 215 + us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; 216 216 217 217 if (us_idle < (p->qavg >> 1)) 218 218 return p->qavg - us_idle;
+25 -4
include/net/scm.h
··· 3 3 4 4 #include <linux/limits.h> 5 5 #include <linux/net.h> 6 + #include <linux/security.h> 6 7 7 8 /* Well, we should have at least one descriptor open 8 9 * to accept passed FDs 8) ··· 21 20 struct ucred creds; /* Skb credentials */ 22 21 struct scm_fp_list *fp; /* Passed files */ 23 22 #ifdef CONFIG_SECURITY_NETWORK 24 - char *secdata; /* Security context */ 25 - u32 seclen; /* Security length */ 23 + u32 secid; /* Passed security ID */ 26 24 #endif 27 25 unsigned long seq; /* Connection seqno */ 28 26 }; ··· 31 31 extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); 32 32 extern void __scm_destroy(struct scm_cookie *scm); 33 33 extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl); 34 + 35 + #ifdef CONFIG_SECURITY_NETWORK 36 + static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) 37 + { 38 + security_socket_getpeersec_dgram(sock, NULL, &scm->secid); 39 + } 40 + #else 41 + static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) 42 + { } 43 + #endif /* CONFIG_SECURITY_NETWORK */ 34 44 35 45 static __inline__ void scm_destroy(struct scm_cookie *scm) 36 46 { ··· 57 47 scm->creds.pid = p->tgid; 58 48 scm->fp = NULL; 59 49 scm->seq = 0; 50 + unix_get_peersec_dgram(sock, scm); 60 51 if (msg->msg_controllen <= 0) 61 52 return 0; 62 53 return __scm_send(sock, msg, scm); ··· 66 55 #ifdef CONFIG_SECURITY_NETWORK 67 56 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) 68 57 { 69 - if (test_bit(SOCK_PASSSEC, &sock->flags) && scm->secdata != NULL) 70 - put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, scm->seclen, scm->secdata); 58 + char *secdata; 59 + u32 seclen; 60 + int err; 61 + 62 + if (test_bit(SOCK_PASSSEC, &sock->flags)) { 63 + err = security_secid_to_secctx(scm->secid, &secdata, &seclen); 64 + 65 + if (!err) { 66 + put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata); 67 + security_release_secctx(secdata, seclen); 68 + } 69 + } 71 70 } 72 71 #else 73 72 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
+3
include/net/tcp.h
··· 914 914 915 915 static inline void tcp_done(struct sock *sk) 916 916 { 917 + if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 918 + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 919 + 917 920 tcp_set_state(sk, TCP_CLOSE); 918 921 tcp_clear_xmit_timers(sk); 919 922
+1 -3
kernel/audit.c
··· 690 690 /* Initialize audit support at boot time. */ 691 691 static int __init audit_init(void) 692 692 { 693 - #ifdef CONFIG_AUDITSYSCALL 694 693 int i; 695 - #endif 696 694 697 695 printk(KERN_INFO "audit: initializing netlink socket (%s)\n", 698 696 audit_default ? "enabled" : "disabled"); ··· 715 717 audit_ih = inotify_init(&audit_inotify_ops); 716 718 if (IS_ERR(audit_ih)) 717 719 audit_panic("cannot initialize inotify handle"); 720 + #endif 718 721 719 722 for (i = 0; i < AUDIT_INODE_BUCKETS; i++) 720 723 INIT_LIST_HEAD(&audit_inode_hash[i]); 721 - #endif 722 724 723 725 return 0; 724 726 }
+26
kernel/auditfilter.c
··· 442 442 case AUDIT_EQUAL: 443 443 break; 444 444 default: 445 + err = -EINVAL; 445 446 goto exit_free; 446 447 } 447 448 } ··· 580 579 case AUDIT_EQUAL: 581 580 break; 582 581 default: 582 + err = -EINVAL; 583 583 goto exit_free; 584 584 } 585 585 } ··· 1136 1134 struct audit_watch *watch = entry->rule.watch; 1137 1135 struct nameidata *ndp, *ndw; 1138 1136 int h, err, putnd_needed = 0; 1137 + #ifdef CONFIG_AUDITSYSCALL 1138 + int dont_count = 0; 1139 + 1140 + /* If either of these, don't count towards total */ 1141 + if (entry->rule.listnr == AUDIT_FILTER_USER || 1142 + entry->rule.listnr == AUDIT_FILTER_TYPE) 1143 + dont_count = 1; 1144 + #endif 1139 1145 1140 1146 if (inode_f) { 1141 1147 h = audit_hash_ino(inode_f->val); ··· 1184 1174 } else { 1185 1175 list_add_tail_rcu(&entry->list, list); 1186 1176 } 1177 + #ifdef CONFIG_AUDITSYSCALL 1178 + if (!dont_count) 1179 + audit_n_rules++; 1180 + #endif 1187 1181 mutex_unlock(&audit_filter_mutex); 1188 1182 1189 1183 if (putnd_needed) ··· 1212 1198 struct audit_watch *watch, *tmp_watch = entry->rule.watch; 1213 1199 LIST_HEAD(inotify_list); 1214 1200 int h, ret = 0; 1201 + #ifdef CONFIG_AUDITSYSCALL 1202 + int dont_count = 0; 1203 + 1204 + /* If either of these, don't count towards total */ 1205 + if (entry->rule.listnr == AUDIT_FILTER_USER || 1206 + entry->rule.listnr == AUDIT_FILTER_TYPE) 1207 + dont_count = 1; 1208 + #endif 1215 1209 1216 1210 if (inode_f) { 1217 1211 h = audit_hash_ino(inode_f->val); ··· 1257 1235 list_del_rcu(&e->list); 1258 1236 call_rcu(&e->rcu, audit_free_rule_rcu); 1259 1237 1238 + #ifdef CONFIG_AUDITSYSCALL 1239 + if (!dont_count) 1240 + audit_n_rules--; 1241 + #endif 1260 1242 mutex_unlock(&audit_filter_mutex); 1261 1243 1262 1244 if (!list_empty(&inotify_list))
+76 -41
kernel/auditsc.c
··· 85 85 /* Indicates that audit should log the full pathname. */ 86 86 #define AUDIT_NAME_FULL -1 87 87 88 + /* number of audit rules */ 89 + int audit_n_rules; 90 + 88 91 /* When fs/namei.c:getname() is called, we store the pointer in name and 89 92 * we don't let putname() free it (instead we free all of the saved 90 93 * pointers at syscall exit time). ··· 177 174 178 175 /* The per-task audit context. */ 179 176 struct audit_context { 177 + int dummy; /* must be the first element */ 180 178 int in_syscall; /* 1 if task is in a syscall */ 181 179 enum audit_state state; 182 180 unsigned int serial; /* serial number for record */ ··· 518 514 context->return_valid = return_valid; 519 515 context->return_code = return_code; 520 516 521 - if (context->in_syscall && !context->auditable) { 517 + if (context->in_syscall && !context->dummy && !context->auditable) { 522 518 enum audit_state state; 523 519 524 520 state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); ··· 534 530 } 535 531 536 532 get_context: 537 - context->pid = tsk->pid; 538 - context->ppid = sys_getppid(); /* sic. tsk == current in all cases */ 539 - context->uid = tsk->uid; 540 - context->gid = tsk->gid; 541 - context->euid = tsk->euid; 542 - context->suid = tsk->suid; 543 - context->fsuid = tsk->fsuid; 544 - context->egid = tsk->egid; 545 - context->sgid = tsk->sgid; 546 - context->fsgid = tsk->fsgid; 547 - context->personality = tsk->personality; 533 + 548 534 tsk->audit_context = NULL; 549 535 return context; 550 536 } ··· 743 749 const char *tty; 744 750 745 751 /* tsk == current */ 752 + context->pid = tsk->pid; 753 + context->ppid = sys_getppid(); /* sic. tsk == current in all cases */ 754 + context->uid = tsk->uid; 755 + context->gid = tsk->gid; 756 + context->euid = tsk->euid; 757 + context->suid = tsk->suid; 758 + context->fsuid = tsk->fsuid; 759 + context->egid = tsk->egid; 760 + context->sgid = tsk->sgid; 761 + context->fsgid = tsk->fsgid; 762 + context->personality = tsk->personality; 746 763 747 764 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); 748 765 if (!ab) ··· 1071 1066 context->argv[3] = a4; 1072 1067 1073 1068 state = context->state; 1074 - if (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT) 1069 + context->dummy = !audit_n_rules; 1070 + if (!context->dummy && (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT)) 1075 1071 state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); 1076 1072 if (likely(state == AUDIT_DISABLED)) 1077 1073 return; ··· 1205 1199 #endif 1206 1200 } 1207 1201 1208 - static void audit_inode_context(int idx, const struct inode *inode) 1202 + /* Copy inode data into an audit_names. */ 1203 + static void audit_copy_inode(struct audit_names *name, const struct inode *inode) 1209 1204 { 1210 - struct audit_context *context = current->audit_context; 1211 - 1212 - selinux_get_inode_sid(inode, &context->names[idx].osid); 1205 + name->ino = inode->i_ino; 1206 + name->dev = inode->i_sb->s_dev; 1207 + name->mode = inode->i_mode; 1208 + name->uid = inode->i_uid; 1209 + name->gid = inode->i_gid; 1210 + name->rdev = inode->i_rdev; 1211 + selinux_get_inode_sid(inode, &name->osid); 1213 1212 } 1214 - 1215 1213 1216 1214 /** 1217 1215 * audit_inode - store the inode and device from a lookup ··· 1250 1240 ++context->ino_count; 1251 1241 #endif 1252 1242 } 1253 - context->names[idx].ino = inode->i_ino; 1254 - context->names[idx].dev = inode->i_sb->s_dev; 1255 - context->names[idx].mode = inode->i_mode; 1256 - context->names[idx].uid = inode->i_uid; 1257 - context->names[idx].gid = inode->i_gid; 1258 - context->names[idx].rdev = inode->i_rdev; 1259 - audit_inode_context(idx, inode); 1243 + audit_copy_inode(&context->names[idx], inode); 1260 1244 } 1261 1245 1262 1246 /** 1263 1247 * audit_inode_child - collect inode info for created/removed objects 1264 1248 * @dname: inode's dentry name 1265 1249 * @inode: inode being audited 1266 - * @pino: inode number of dentry parent 1250 + * @parent: inode of dentry parent 1267 1251 * 1268 1252 * For syscalls that create or remove filesystem objects, audit_inode 1269 1253 * can only collect information for the filesystem object's parent. ··· 1268 1264 * unsuccessful attempts. 1269 1265 */ 1270 1266 void __audit_inode_child(const char *dname, const struct inode *inode, 1271 - unsigned long pino) 1267 + const struct inode *parent) 1272 1268 { 1273 1269 int idx; 1274 1270 struct audit_context *context = current->audit_context; ··· 1282 1278 if (!dname) 1283 1279 goto update_context; 1284 1280 for (idx = 0; idx < context->name_count; idx++) 1285 - if (context->names[idx].ino == pino) { 1281 + if (context->names[idx].ino == parent->i_ino) { 1286 1282 const char *name = context->names[idx].name; 1287 1283 1288 1284 if (!name) ··· 1306 1302 context->names[idx].name_len = AUDIT_NAME_FULL; 1307 1303 context->names[idx].name_put = 0; /* don't call __putname() */ 1308 1304 1309 - if (inode) { 1310 - context->names[idx].ino = inode->i_ino; 1311 - context->names[idx].dev = inode->i_sb->s_dev; 1312 - context->names[idx].mode = inode->i_mode; 1313 - context->names[idx].uid = inode->i_uid; 1314 - context->names[idx].gid = inode->i_gid; 1315 - context->names[idx].rdev = inode->i_rdev; 1316 - audit_inode_context(idx, inode); 1317 - } else 1318 - context->names[idx].ino = (unsigned long)-1; 1305 + if (!inode) 1306 + context->names[idx].ino = (unsigned long)-1; 1307 + else 1308 + audit_copy_inode(&context->names[idx], inode); 1309 + 1310 + /* A parent was not found in audit_names, so copy the inode data for the 1311 + * provided parent. */ 1312 + if (!found_name) { 1313 + idx = context->name_count++; 1314 + #if AUDIT_DEBUG 1315 + context->ino_count++; 1316 + #endif 1317 + audit_copy_inode(&context->names[idx], parent); 1318 + } 1319 + } 1320 + 1321 + /** 1322 + * audit_inode_update - update inode info for last collected name 1323 + * @inode: inode being audited 1324 + * 1325 + * When open() is called on an existing object with the O_CREAT flag, the inode 1326 + * data audit initially collects is incorrect. This additional hook ensures 1327 + * audit has the inode data for the actual object to be opened. 1328 + */ 1329 + void __audit_inode_update(const struct inode *inode) 1330 + { 1331 + struct audit_context *context = current->audit_context; 1332 + int idx; 1333 + 1334 + if (!context->in_syscall || !inode) 1335 + return; 1336 + 1337 + if (context->name_count == 0) { 1338 + context->name_count++; 1339 + #if AUDIT_DEBUG 1340 + context->ino_count++; 1341 + #endif 1342 + } 1343 + idx = context->name_count - 1; 1344 + 1345 + audit_copy_inode(&context->names[idx], inode); 1319 1346 } 1320 1347 1321 1348 /** ··· 1677 1642 unsigned long p, next; 1678 1643 void *to; 1679 1644 1680 - if (likely(!audit_enabled || !context)) 1645 + if (likely(!audit_enabled || !context || context->dummy)) 1681 1646 return 0; 1682 1647 1683 1648 ax = kmalloc(sizeof(*ax) + PAGE_SIZE * MAX_ARG_PAGES - bprm->p, ··· 1715 1680 struct audit_aux_data_socketcall *ax; 1716 1681 struct audit_context *context = current->audit_context; 1717 1682 1718 - if (likely(!context)) 1683 + if (likely(!context || context->dummy)) 1719 1684 return 0; 1720 1685 1721 1686 ax = kmalloc(sizeof(*ax) + nargs * sizeof(unsigned long), GFP_KERNEL); ··· 1743 1708 struct audit_aux_data_sockaddr *ax; 1744 1709 struct audit_context *context = current->audit_context; 1745 1710 1746 - if (likely(!context)) 1711 + if (likely(!context || context->dummy)) 1747 1712 return 0; 1748 1713 1749 1714 ax = kmalloc(sizeof(*ax) + len, GFP_KERNEL);
+3 -1
kernel/fork.c
··· 1387 1387 1388 1388 if (clone_flags & CLONE_VFORK) { 1389 1389 wait_for_completion(&vfork); 1390 - if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) 1390 + if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1391 + current->ptrace_message = nr; 1391 1392 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); 1393 + } 1392 1394 } 1393 1395 } else { 1394 1396 free_pid(pid);
+1
kernel/futex.c
··· 948 948 /* In the common case we don't take the spinlock, which is nice. */ 949 949 retry: 950 950 lock_ptr = q->lock_ptr; 951 + barrier(); 951 952 if (lock_ptr != 0) { 952 953 spin_lock(lock_ptr); 953 954 /*
+3 -3
kernel/futex_compat.c
··· 39 39 { 40 40 struct compat_robust_list_head __user *head = curr->compat_robust_list; 41 41 struct robust_list __user *entry, *pending; 42 - unsigned int limit = ROBUST_LIST_LIMIT, pi; 42 + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 43 43 compat_uptr_t uentry, upending; 44 44 compat_long_t futex_offset; 45 45 ··· 59 59 * if it exists: 60 60 */ 61 61 if (fetch_robust_entry(&upending, &pending, 62 - &head->list_op_pending, &pi)) 62 + &head->list_op_pending, &pip)) 63 63 return; 64 64 if (upending) 65 - handle_futex_death((void *)pending + futex_offset, curr, pi); 65 + handle_futex_death((void *)pending + futex_offset, curr, pip); 66 66 67 67 while (compat_ptr(uentry) != &head->list) { 68 68 /*
+18 -8
kernel/power/process.c
··· 66 66 } 67 67 } 68 68 69 + static void cancel_freezing(struct task_struct *p) 70 + { 71 + unsigned long flags; 72 + 73 + if (freezing(p)) { 74 + pr_debug(" clean up: %s\n", p->comm); 75 + do_not_freeze(p); 76 + spin_lock_irqsave(&p->sighand->siglock, flags); 77 + recalc_sigpending_tsk(p); 78 + spin_unlock_irqrestore(&p->sighand->siglock, flags); 79 + } 80 + } 81 + 69 82 /* 0 = success, else # of processes that we failed to stop */ 70 83 int freeze_processes(void) 71 84 { 72 85 int todo, nr_user, user_frozen; 73 86 unsigned long start_time; 74 87 struct task_struct *g, *p; 75 - unsigned long flags; 76 88 77 89 printk( "Stopping tasks: " ); 78 90 start_time = jiffies; ··· 97 85 continue; 98 86 if (frozen(p)) 99 87 continue; 88 + if (p->state == TASK_TRACED && frozen(p->parent)) { 89 + cancel_freezing(p); 90 + continue; 91 + } 100 92 if (p->mm && !(p->flags & PF_BORROWED_MM)) { 101 93 /* The task is a user-space one. 102 94 * Freeze it unless there's a vfork completion ··· 142 126 do_each_thread(g, p) { 143 127 if (freezeable(p) && !frozen(p)) 144 128 printk(KERN_ERR " %s\n", p->comm); 145 - if (freezing(p)) { 146 - pr_debug(" clean up: %s\n", p->comm); 147 - p->flags &= ~PF_FREEZE; 148 - spin_lock_irqsave(&p->sighand->siglock, flags); 149 - recalc_sigpending_tsk(p); 150 - spin_unlock_irqrestore(&p->sighand->siglock, flags); 151 - } 129 + cancel_freezing(p); 152 130 } while_each_thread(g, p); 153 131 read_unlock(&tasklist_lock); 154 132 return todo;
+3 -1
kernel/printk.c
··· 799 799 up(&secondary_console_sem); 800 800 return; 801 801 } 802 + 803 + console_may_schedule = 0; 804 + 802 805 for ( ; ; ) { 803 806 spin_lock_irqsave(&logbuf_lock, flags); 804 807 wake_klogd |= log_start - log_end; ··· 815 812 local_irq_restore(flags); 816 813 } 817 814 console_locked = 0; 818 - console_may_schedule = 0; 819 815 up(&console_sem); 820 816 spin_unlock_irqrestore(&logbuf_lock, flags); 821 817 if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) {
+6 -3
kernel/resource.c
··· 244 244 245 245 start = res->start; 246 246 end = res->end; 247 + BUG_ON(start >= end); 247 248 248 249 read_lock(&resource_lock); 249 250 for (p = iomem_resource.child; p ; p = p->sibling) { ··· 255 254 p = NULL; 256 255 break; 257 256 } 258 - if (p->start >= start) 257 + if ((p->end >= start) && (p->start < end)) 259 258 break; 260 259 } 261 260 read_unlock(&resource_lock); 262 261 if (!p) 263 262 return -1; 264 263 /* copy data */ 265 - res->start = p->start; 266 - res->end = p->end; 264 + if (res->start < p->start) 265 + res->start = p->start; 266 + if (res->end > p->end) 267 + res->end = p->end; 267 268 return 0; 268 269 } 269 270 #endif
+17 -8
kernel/signal.c
··· 791 791 /* 792 792 * Force a signal that the process can't ignore: if necessary 793 793 * we unblock the signal and change any SIG_IGN to SIG_DFL. 794 + * 795 + * Note: If we unblock the signal, we always reset it to SIG_DFL, 796 + * since we do not want to have a signal handler that was blocked 797 + * be invoked when user space had explicitly blocked it. 798 + * 799 + * We don't want to have recursive SIGSEGV's etc, for example. 794 800 */ 795 - 796 801 int 797 802 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 798 803 { 799 804 unsigned long int flags; 800 - int ret; 805 + int ret, blocked, ignored; 806 + struct k_sigaction *action; 801 807 802 808 spin_lock_irqsave(&t->sighand->siglock, flags); 803 - if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { 804 - t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; 809 + action = &t->sighand->action[sig-1]; 810 + ignored = action->sa.sa_handler == SIG_IGN; 811 + blocked = sigismember(&t->blocked, sig); 812 + if (blocked || ignored) { 813 + action->sa.sa_handler = SIG_DFL; 814 + if (blocked) { 815 + sigdelset(&t->blocked, sig); 816 + recalc_sigpending_tsk(t); 817 + } 805 818 } 806 - if (sigismember(&t->blocked, sig)) { 807 - sigdelset(&t->blocked, sig); 808 - } 809 - recalc_sigpending_tsk(t); 810 819 ret = specific_send_sig_info(sig, info, t); 811 820 spin_unlock_irqrestore(&t->sighand->siglock, flags); 812 821
-4
lib/kobject_uevent.c
··· 50 50 return "offline"; 51 51 case KOBJ_ONLINE: 52 52 return "online"; 53 - case KOBJ_DOCK: 54 - return "dock"; 55 - case KOBJ_UNDOCK: 56 - return "undock"; 57 53 default: 58 54 return NULL; 59 55 }
+6 -4
lib/spinlock_debug.c
··· 162 162 163 163 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) 164 164 165 + #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ 165 166 static void __read_lock_debug(rwlock_t *lock) 166 167 { 167 168 int print_once = 1; ··· 185 184 } 186 185 } 187 186 } 187 + #endif 188 188 189 189 void _raw_read_lock(rwlock_t *lock) 190 190 { 191 191 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 192 - if (unlikely(!__raw_read_trylock(&lock->raw_lock))) 193 - __read_lock_debug(lock); 192 + __raw_read_lock(&lock->raw_lock); 194 193 } 195 194 196 195 int _raw_read_trylock(rwlock_t *lock) ··· 236 235 lock->owner_cpu = -1; 237 236 } 238 237 238 + #if 0 /* This can cause lockups */ 239 239 static void __write_lock_debug(rwlock_t *lock) 240 240 { 241 241 int print_once = 1; ··· 259 257 } 260 258 } 261 259 } 260 + #endif 262 261 263 262 void _raw_write_lock(rwlock_t *lock) 264 263 { 265 264 debug_write_lock_before(lock); 266 - if (unlikely(!__raw_write_trylock(&lock->raw_lock))) 267 - __write_lock_debug(lock); 265 + __raw_write_lock(&lock->raw_lock); 268 266 debug_write_lock_after(lock); 269 267 } 270 268
+2 -1
mm/fadvise.c
··· 73 73 file->f_ra.ra_pages = bdi->ra_pages * 2; 74 74 break; 75 75 case POSIX_FADV_WILLNEED: 76 - case POSIX_FADV_NOREUSE: 77 76 if (!mapping->a_ops->readpage) { 78 77 ret = -EINVAL; 79 78 break; ··· 92 93 max_sane_readahead(nrpages)); 93 94 if (ret > 0) 94 95 ret = 0; 96 + break; 97 + case POSIX_FADV_NOREUSE: 95 98 break; 96 99 case POSIX_FADV_DONTNEED: 97 100 if (!bdi_write_congested(mapping->backing_dev_info))
+34 -10
mm/memory_hotplug.c
··· 52 52 int nr_pages = PAGES_PER_SECTION; 53 53 int ret; 54 54 55 + if (pfn_valid(phys_start_pfn)) 56 + return -EEXIST; 57 + 55 58 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); 56 59 57 60 if (ret < 0) ··· 79 76 { 80 77 unsigned long i; 81 78 int err = 0; 79 + int start_sec, end_sec; 80 + /* during initialize mem_map, align hot-added range to section */ 81 + start_sec = pfn_to_section_nr(phys_start_pfn); 82 + end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); 82 83 83 - for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { 84 - err = __add_section(zone, phys_start_pfn + i); 84 + for (i = start_sec; i <= end_sec; i++) { 85 + err = __add_section(zone, i << PFN_SECTION_SHIFT); 85 86 86 - /* We want to keep adding the rest of the 87 - * sections if the first ones already exist 87 + /* 88 + * EEXIST is finally dealed with by ioresource collision 89 + * check. see add_memory() => register_memory_resource() 90 + * Warning will be printed if there is collision. 88 91 */ 89 92 if (err && (err != -EEXIST)) 90 93 break; 94 + err = 0; 91 95 } 92 96 93 97 return err; ··· 166 156 res.flags = IORESOURCE_MEM; /* we just need system ram */ 167 157 section_end = res.end; 168 158 169 - while (find_next_system_ram(&res) >= 0) { 159 + while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { 170 160 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT); 171 161 nr_pages = (unsigned long) 172 162 ((res.end + 1 - res.start) >> PAGE_SHIFT); ··· 223 213 } 224 214 225 215 /* add this memory to iomem resource */ 226 - static void register_memory_resource(u64 start, u64 size) 216 + static struct resource *register_memory_resource(u64 start, u64 size) 227 217 { 228 218 struct resource *res; 229 - 230 219 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 231 220 BUG_ON(!res); 232 221 ··· 237 228 printk("System RAM resource %llx - %llx cannot be added\n", 238 229 (unsigned long long)res->start, (unsigned long long)res->end); 239 230 kfree(res); 231 + res = NULL; 240 232 } 233 + return res; 234 + } 235 + 236 + static void release_memory_resource(struct resource *res) 237 + { 238 + if (!res) 239 + return; 240 + release_resource(res); 241 + kfree(res); 242 + return; 241 243 } 242 244 243 245 ··· 257 237 { 258 238 pg_data_t *pgdat = NULL; 259 239 int new_pgdat = 0; 240 + struct resource *res; 260 241 int ret; 242 + 243 + res = register_memory_resource(start, size); 244 + if (!res) 245 + return -EEXIST; 261 246 262 247 if (!node_online(nid)) { 263 248 pgdat = hotadd_new_pgdat(nid, start); ··· 293 268 BUG_ON(ret); 294 269 } 295 270 296 - /* register this memory as resource */ 297 - register_memory_resource(start, size); 298 - 299 271 return ret; 300 272 error: 301 273 /* rollback pgdat allocation and others */ 302 274 if (new_pgdat) 303 275 rollback_node_hotadd(nid, pgdat); 276 + if (res) 277 + release_memory_resource(res); 304 278 305 279 return ret; 306 280 }
+1 -1
net/bridge/br_netlink.c
··· 85 85 goto err_out; 86 86 87 87 err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0); 88 - if (err) 88 + if (err < 0) 89 89 goto err_kfree; 90 90 91 91 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
+1 -1
net/core/Makefile
··· 7 7 8 8 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 9 9 10 - obj-y += dev.o ethtool.o dev_mcast.o dst.o \ 10 + obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 11 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 12 12 13 13 obj-$(CONFIG_XFRM) += flow.o
+2 -17
net/core/dev.c
··· 1166 1166 goto out_set_summed; 1167 1167 1168 1168 if (unlikely(skb_shinfo(skb)->gso_size)) { 1169 - static int warned; 1170 - 1171 - WARN_ON(!warned); 1172 - warned = 1; 1173 - 1174 1169 /* Let GSO fix up the checksum. */ 1175 1170 goto out_set_summed; 1176 1171 } ··· 1215 1220 __skb_pull(skb, skb->mac_len); 1216 1221 1217 1222 if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 1218 - static int warned; 1219 - 1220 - WARN_ON(!warned); 1221 - warned = 1; 1222 - 1223 1223 if (skb_header_cloned(skb) && 1224 1224 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1225 1225 return ERR_PTR(err); ··· 3419 3429 unsigned int cpu, i, n; 3420 3430 struct dma_chan *chan; 3421 3431 3422 - lock_cpu_hotplug(); 3423 - 3424 3432 if (net_dma_count == 0) { 3425 3433 for_each_online_cpu(cpu) 3426 - rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); 3427 - unlock_cpu_hotplug(); 3434 + rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); 3428 3435 return; 3429 3436 } 3430 3437 ··· 3434 3447 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3435 3448 3436 3449 while(n) { 3437 - per_cpu(softnet_data.net_dma, cpu) = chan; 3450 + per_cpu(softnet_data, cpu).net_dma = chan; 3438 3451 cpu = next_cpu(cpu, cpu_online_map); 3439 3452 n--; 3440 3453 } 3441 3454 i++; 3442 3455 } 3443 3456 rcu_read_unlock(); 3444 - 3445 - unlock_cpu_hotplug(); 3446 3457 } 3447 3458 3448 3459 /**
+8 -6
net/core/neighbour.c
··· 29 29 #include <net/neighbour.h> 30 30 #include <net/dst.h> 31 31 #include <net/sock.h> 32 + #include <net/netevent.h> 32 33 #include <linux/rtnetlink.h> 33 34 #include <linux/random.h> 34 35 #include <linux/string.h> ··· 755 754 neigh->nud_state = NUD_STALE; 756 755 neigh->updated = jiffies; 757 756 neigh_suspect(neigh); 757 + notify = 1; 758 758 } 759 759 } else if (state & NUD_DELAY) { 760 760 if (time_before_eq(now, ··· 764 762 neigh->nud_state = NUD_REACHABLE; 765 763 neigh->updated = jiffies; 766 764 neigh_connect(neigh); 765 + notify = 1; 767 766 next = neigh->confirmed + neigh->parms->reachable_time; 768 767 } else { 769 768 NEIGH_PRINTK2("neigh %p is probed.\n", neigh); ··· 822 819 out: 823 820 write_unlock(&neigh->lock); 824 821 } 822 + if (notify) 823 + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 825 824 826 825 #ifdef CONFIG_ARPD 827 826 if (notify && neigh->parms->app_probes) ··· 931 926 { 932 927 u8 old; 933 928 int err; 934 - #ifdef CONFIG_ARPD 935 929 int notify = 0; 936 - #endif 937 930 struct net_device *dev; 938 931 int update_isrouter = 0; 939 932 ··· 951 948 neigh_suspect(neigh); 952 949 neigh->nud_state = new; 953 950 err = 0; 954 - #ifdef CONFIG_ARPD 955 951 notify = old & NUD_VALID; 956 - #endif 957 952 goto out; 958 953 } 959 954 ··· 1023 1022 if (!(new & NUD_CONNECTED)) 1024 1023 neigh->confirmed = jiffies - 1025 1024 (neigh->parms->base_reachable_time << 1); 1026 - #ifdef CONFIG_ARPD 1027 1025 notify = 1; 1028 - #endif 1029 1026 } 1030 1027 if (new == old) 1031 1028 goto out; ··· 1055 1056 (neigh->flags & ~NTF_ROUTER); 1056 1057 } 1057 1058 write_unlock_bh(&neigh->lock); 1059 + 1060 + if (notify) 1061 + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 1058 1062 #ifdef CONFIG_ARPD 1059 1063 if (notify && neigh->parms->app_probes) 1060 1064 neigh_app_notify(neigh);
+69
net/core/netevent.c
··· 1 + /* 2 + * Network event notifiers 3 + * 4 + * Authors: 5 + * Tom Tucker <tom@opengridcomputing.com> 6 + * Steve Wise <swise@opengridcomputing.com> 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License 10 + * as published by the Free Software Foundation; either version 11 + * 2 of the License, or (at your option) any later version. 12 + * 13 + * Fixes: 14 + */ 15 + 16 + #include <linux/rtnetlink.h> 17 + #include <linux/notifier.h> 18 + 19 + static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); 20 + 21 + /** 22 + * register_netevent_notifier - register a netevent notifier block 23 + * @nb: notifier 24 + * 25 + * Register a notifier to be called when a netevent occurs. 26 + * The notifier passed is linked into the kernel structures and must 27 + * not be reused until it has been unregistered. A negative errno code 28 + * is returned on a failure. 29 + */ 30 + int register_netevent_notifier(struct notifier_block *nb) 31 + { 32 + int err; 33 + 34 + err = atomic_notifier_chain_register(&netevent_notif_chain, nb); 35 + return err; 36 + } 37 + 38 + /** 39 + * netevent_unregister_notifier - unregister a netevent notifier block 40 + * @nb: notifier 41 + * 42 + * Unregister a notifier previously registered by 43 + * register_neigh_notifier(). The notifier is unlinked into the 44 + * kernel structures and may then be reused. A negative errno code 45 + * is returned on a failure. 46 + */ 47 + 48 + int unregister_netevent_notifier(struct notifier_block *nb) 49 + { 50 + return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); 51 + } 52 + 53 + /** 54 + * call_netevent_notifiers - call all netevent notifier blocks 55 + * @val: value passed unmodified to notifier function 56 + * @v: pointer passed unmodified to notifier function 57 + * 58 + * Call all neighbour notifier blocks. Parameters and return value 59 + * are as for notifier_call_chain(). 60 + */ 61 + 62 + int call_netevent_notifiers(unsigned long val, void *v) 63 + { 64 + return atomic_notifier_call_chain(&netevent_notif_chain, val, v); 65 + } 66 + 67 + EXPORT_SYMBOL_GPL(register_netevent_notifier); 68 + EXPORT_SYMBOL_GPL(unregister_netevent_notifier); 69 + EXPORT_SYMBOL_GPL(call_netevent_notifiers);
+34 -11
net/core/skbuff.c
··· 71 71 static kmem_cache_t *skbuff_fclone_cache __read_mostly; 72 72 73 73 /* 74 - * lockdep: lock class key used by skb_queue_head_init(): 75 - */ 76 - struct lock_class_key skb_queue_lock_key; 77 - 78 - EXPORT_SYMBOL(skb_queue_lock_key); 79 - 80 - /* 81 74 * Keep out-of-line to prevent kernel bloat. 82 75 * __builtin_return_address is not used because it is not always 83 76 * reliable. ··· 249 256 goto out; 250 257 } 251 258 259 + /** 260 + * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 261 + * @dev: network device to receive on 262 + * @length: length to allocate 263 + * @gfp_mask: get_free_pages mask, passed to alloc_skb 264 + * 265 + * Allocate a new &sk_buff and assign it a usage count of one. The 266 + * buffer has unspecified headroom built in. Users should allocate 267 + * the headroom they think they need without accounting for the 268 + * built in space. The built in space is used for optimisations. 269 + * 270 + * %NULL is returned if there is no free memory. 271 + */ 272 + struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 273 + unsigned int length, gfp_t gfp_mask) 274 + { 275 + struct sk_buff *skb; 276 + 277 + skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 278 + if (likely(skb)) 279 + skb_reserve(skb, NET_SKB_PAD); 280 + return skb; 281 + } 252 282 253 283 static void skb_drop_list(struct sk_buff **listp) 254 284 { ··· 862 846 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 863 847 return err; 864 848 865 - for (i = 0; i < nfrags; i++) { 849 + i = 0; 850 + if (offset >= len) 851 + goto drop_pages; 852 + 853 + for (; i < nfrags; i++) { 866 854 int end = offset + skb_shinfo(skb)->frags[i].size; 867 855 868 856 if (end < len) { ··· 874 854 continue; 875 855 } 876 856 877 - if (len > offset) 878 - skb_shinfo(skb)->frags[i++].size = len - offset; 857 + skb_shinfo(skb)->frags[i++].size = len - offset; 879 858 859 + drop_pages: 880 860 skb_shinfo(skb)->nr_frags = i; 881 861 882 862 for (; i < nfrags; i++) ··· 884 864 885 865 if (skb_shinfo(skb)->frag_list) 886 866 skb_drop_fraglist(skb); 887 - break; 867 + goto done; 888 868 } 889 869 890 870 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); ··· 899 879 return -ENOMEM; 900 880 901 881 nfrag->next = frag->next; 882 + kfree_skb(frag); 902 883 frag = nfrag; 903 884 *fragp = frag; 904 885 } ··· 918 897 break; 919 898 } 920 899 900 + done: 921 901 if (len > skb_headlen(skb)) { 922 902 skb->data_len -= skb->len - len; 923 903 skb->len = len; ··· 2064 2042 EXPORT_SYMBOL(kfree_skb); 2065 2043 EXPORT_SYMBOL(__pskb_pull_tail); 2066 2044 EXPORT_SYMBOL(__alloc_skb); 2045 + EXPORT_SYMBOL(__netdev_alloc_skb); 2067 2046 EXPORT_SYMBOL(pskb_copy); 2068 2047 EXPORT_SYMBOL(pskb_expand_head); 2069 2048 EXPORT_SYMBOL(skb_checksum);
+2 -2
net/dccp/ipv6.c
··· 230 230 ipv6_addr_copy(&np->saddr, saddr); 231 231 inet->rcv_saddr = LOOPBACK4_IPV6; 232 232 233 - ip6_dst_store(sk, dst, NULL); 233 + __ip6_dst_store(sk, dst, NULL); 234 234 235 235 icsk->icsk_ext_hdr_len = 0; 236 236 if (np->opt != NULL) ··· 863 863 * comment in that function for the gory details. -acme 864 864 */ 865 865 866 - ip6_dst_store(newsk, dst, NULL); 866 + __ip6_dst_store(newsk, dst, NULL); 867 867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 868 868 NETIF_F_TSO); 869 869 newdp6 = (struct dccp6_sock *)newsk;
+7 -2
net/decnet/dn_route.c
··· 925 925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 926 926 if (!dev_out->dn_ptr) 927 927 continue; 928 - if (dn_dev_islocal(dev_out, oldflp->fld_src)) 929 - break; 928 + if (!dn_dev_islocal(dev_out, oldflp->fld_src)) 929 + continue; 930 + if ((dev_out->flags & IFF_LOOPBACK) && 931 + oldflp->fld_dst && 932 + !dn_dev_islocal(dev_out, oldflp->fld_dst)) 933 + continue; 934 + break; 930 935 } 931 936 read_unlock(&dev_base_lock); 932 937 if (dev_out == NULL)
+4 -3
net/ipv4/ip_output.c
··· 526 526 527 527 err = output(skb); 528 528 529 + if (!err) 530 + IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 529 531 if (err || !frag) 530 532 break; 531 533 ··· 651 649 /* 652 650 * Put this fragment into the sending queue. 653 651 */ 654 - 655 - IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 656 - 657 652 iph->tot_len = htons(len + hlen); 658 653 659 654 ip_send_check(iph); ··· 658 659 err = output(skb2); 659 660 if (err) 660 661 goto fail; 662 + 663 + IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 661 664 } 662 665 kfree_skb(skb); 663 666 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
+7 -2
net/ipv4/ip_sockglue.c
··· 112 112 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) 113 113 { 114 114 char *secdata; 115 - u32 seclen; 115 + u32 seclen, secid; 116 116 int err; 117 117 118 - err = security_socket_getpeersec_dgram(skb, &secdata, &seclen); 118 + err = security_socket_getpeersec_dgram(NULL, skb, &secid); 119 + if (err) 120 + return; 121 + 122 + err = security_secid_to_secctx(secid, &secdata, &seclen); 119 123 if (err) 120 124 return; 121 125 122 126 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); 127 + security_release_secctx(secdata, seclen); 123 128 } 124 129 125 130
+1 -1
net/ipv4/netfilter/ip_conntrack_sip.c
··· 442 442 sip[i].tuple.src.u.udp.port = htons(ports[i]); 443 443 sip[i].mask.src.u.udp.port = 0xFFFF; 444 444 sip[i].mask.dst.protonum = 0xFF; 445 - sip[i].max_expected = 1; 445 + sip[i].max_expected = 2; 446 446 sip[i].timeout = 3 * 60; /* 3 minutes */ 447 447 sip[i].me = THIS_MODULE; 448 448 sip[i].help = sip_help;
+3
net/ipv4/netfilter/ipt_hashlimit.c
··· 508 508 if (!r->cfg.expire) 509 509 return 0; 510 510 511 + if (r->name[sizeof(r->name) - 1] != '\0') 512 + return 0; 513 + 511 514 /* This is the best we've got: We cannot release and re-grab lock, 512 515 * since checkentry() is called before ip_tables.c grabs ipt_mutex. 513 516 * We also cannot grab the hashtable spinlock, since htable_create will
+8
net/ipv4/route.c
··· 104 104 #include <net/icmp.h> 105 105 #include <net/xfrm.h> 106 106 #include <net/ip_mp_alg.h> 107 + #include <net/netevent.h> 107 108 #ifdef CONFIG_SYSCTL 108 109 #include <linux/sysctl.h> 109 110 #endif ··· 1126 1125 struct rtable *rth, **rthp; 1127 1126 u32 skeys[2] = { saddr, 0 }; 1128 1127 int ikeys[2] = { dev->ifindex, 0 }; 1128 + struct netevent_redirect netevent; 1129 1129 1130 1130 if (!in_dev) 1131 1131 return; ··· 1218 1216 rt_drop(rt); 1219 1217 goto do_next; 1220 1218 } 1219 + 1220 + netevent.old = &rth->u.dst; 1221 + netevent.new = &rt->u.dst; 1222 + call_netevent_notifiers(NETEVENT_REDIRECT, 1223 + &netevent); 1221 1224 1222 1225 rt_del(hash, rth); 1223 1226 if (!rt_intern_hash(hash, rt, &rt)) ··· 1459 1452 } 1460 1453 dst->metrics[RTAX_MTU-1] = mtu; 1461 1454 dst_set_expires(dst, ip_rt_mtu_expires); 1455 + call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); 1462 1456 } 1463 1457 } 1464 1458
+3 -2
net/ipv4/tcp.c
··· 1132 1132 tp->ucopy.dma_chan = NULL; 1133 1133 preempt_disable(); 1134 1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1135 - !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) { 1135 + !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { 1136 1136 preempt_enable_no_resched(); 1137 1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); 1138 1138 } else ··· 1659 1659 const int tmo = tcp_fin_time(sk); 1660 1660 1661 1661 if (tmo > TCP_TIMEWAIT_LEN) { 1662 - inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); 1662 + inet_csk_reset_keepalive_timer(sk, 1663 + tmo - TCP_TIMEWAIT_LEN); 1663 1664 } else { 1664 1665 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1665 1666 goto out;
+2 -1
net/ipv4/tcp_input.c
··· 3541 3541 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 3542 3542 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 3543 3543 /* Limited by application or receiver window. */ 3544 - u32 win_used = max(tp->snd_cwnd_used, 2U); 3544 + u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 3545 + u32 win_used = max(tp->snd_cwnd_used, init_win); 3545 3546 if (win_used < tp->snd_cwnd) { 3546 3547 tp->snd_ssthresh = tcp_current_ssthresh(sk); 3547 3548 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
-2
net/ipv4/tcp_ipv4.c
··· 438 438 It can f.e. if SYNs crossed. 439 439 */ 440 440 if (!sock_owned_by_user(sk)) { 441 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 442 441 sk->sk_err = err; 443 442 444 443 sk->sk_error_report(sk); ··· 873 874 drop_and_free: 874 875 reqsk_free(req); 875 876 drop: 876 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 877 877 return 0; 878 878 } 879 879
+3 -1
net/ipv4/tcp_minisocks.c
··· 589 589 /* RFC793: "second check the RST bit" and 590 590 * "fourth, check the SYN bit" 591 591 */ 592 - if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) 592 + if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 593 + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 593 594 goto embryonic_reset; 595 + } 594 596 595 597 /* ACK sequence verified above, just make sure ACK is 596 598 * set. If ACK not set, just silently drop the packet.
+1 -1
net/ipv4/tcp_probe.c
··· 114 114 static ssize_t tcpprobe_read(struct file *file, char __user *buf, 115 115 size_t len, loff_t *ppos) 116 116 { 117 - int error = 0, cnt; 117 + int error = 0, cnt = 0; 118 118 unsigned char *tbuf; 119 119 120 120 if (!buf || len < 0)
+166 -8
net/ipv6/addrconf.c
··· 1869 1869 /* 1870 1870 * Manual configuration of address on an interface 1871 1871 */ 1872 - static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen) 1872 + static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, 1873 + __u32 prefered_lft, __u32 valid_lft) 1873 1874 { 1874 1875 struct inet6_ifaddr *ifp; 1875 1876 struct inet6_dev *idev; 1876 1877 struct net_device *dev; 1878 + __u8 ifa_flags = 0; 1877 1879 int scope; 1878 1880 1879 1881 ASSERT_RTNL(); 1880 1882 1883 + /* check the lifetime */ 1884 + if (!valid_lft || prefered_lft > valid_lft) 1885 + return -EINVAL; 1886 + 1881 1887 if ((dev = __dev_get_by_index(ifindex)) == NULL) 1882 1888 return -ENODEV; 1883 1889 ··· 1895 1889 1896 1890 scope = ipv6_addr_scope(pfx); 1897 1891 1898 - ifp = ipv6_add_addr(idev, pfx, plen, scope, IFA_F_PERMANENT); 1892 + if (valid_lft == INFINITY_LIFE_TIME) 1893 + ifa_flags |= IFA_F_PERMANENT; 1894 + else if (valid_lft >= 0x7FFFFFFF/HZ) 1895 + valid_lft = 0x7FFFFFFF/HZ; 1896 + 1897 + if (prefered_lft == 0) 1898 + ifa_flags |= IFA_F_DEPRECATED; 1899 + else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 1900 + (prefered_lft != INFINITY_LIFE_TIME)) 1901 + prefered_lft = 0x7FFFFFFF/HZ; 1902 + 1903 + ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); 1904 + 1899 1905 if (!IS_ERR(ifp)) { 1906 + spin_lock(&ifp->lock); 1907 + ifp->valid_lft = valid_lft; 1908 + ifp->prefered_lft = prefered_lft; 1909 + ifp->tstamp = jiffies; 1910 + spin_unlock(&ifp->lock); 1911 + 1900 1912 addrconf_dad_start(ifp, 0); 1901 1913 in6_ifa_put(ifp); 1914 + addrconf_verify(0); 1902 1915 return 0; 1903 1916 } 1904 1917 ··· 1970 1945 return -EFAULT; 1971 1946 1972 1947 rtnl_lock(); 1973 - err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); 1948 + err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen, 1949 + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 1974 1950 rtnl_unlock(); 1975 1951 return err; 1976 1952 } ··· 2797 2771 ifp->idev->nd_parms->retrans_time / HZ; 2798 2772 #endif 2799 2773 2800 - if (age >= ifp->valid_lft) { 2774 + if (ifp->valid_lft != INFINITY_LIFE_TIME && 2775 + age >= ifp->valid_lft) { 2801 2776 spin_unlock(&ifp->lock); 2802 2777 in6_ifa_hold(ifp); 2803 2778 read_unlock(&addrconf_hash_lock); 2804 2779 ipv6_del_addr(ifp); 2805 2780 goto restart; 2781 + } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { 2782 + spin_unlock(&ifp->lock); 2783 + continue; 2806 2784 } else if (age >= ifp->prefered_lft) { 2807 2785 /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */ 2808 2786 int deprecate = 0; ··· 2883 2853 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2884 2854 } 2885 2855 if (rta[IFA_LOCAL-1]) { 2886 - if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2856 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) || 2857 + (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))) 2887 2858 return -EINVAL; 2888 2859 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2889 2860 } ··· 2895 2864 } 2896 2865 2897 2866 static int 2867 + inet6_addr_modify(int ifindex, struct in6_addr *pfx, 2868 + __u32 prefered_lft, __u32 valid_lft) 2869 + { 2870 + struct inet6_ifaddr *ifp = NULL; 2871 + struct net_device *dev; 2872 + int ifa_flags = 0; 2873 + 2874 + if ((dev = __dev_get_by_index(ifindex)) == NULL) 2875 + return -ENODEV; 2876 + 2877 + if (!(dev->flags&IFF_UP)) 2878 + return -ENETDOWN; 2879 + 2880 + if (!valid_lft || (prefered_lft > valid_lft)) 2881 + return -EINVAL; 2882 + 2883 + ifp = ipv6_get_ifaddr(pfx, dev, 1); 2884 + if (ifp == NULL) 2885 + return -ENOENT; 2886 + 2887 + if (valid_lft == INFINITY_LIFE_TIME) 2888 + ifa_flags = IFA_F_PERMANENT; 2889 + else if (valid_lft >= 0x7FFFFFFF/HZ) 2890 + valid_lft = 0x7FFFFFFF/HZ; 2891 + 2892 + if (prefered_lft == 0) 2893 + ifa_flags = IFA_F_DEPRECATED; 2894 + else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 2895 + (prefered_lft != INFINITY_LIFE_TIME)) 2896 + prefered_lft = 0x7FFFFFFF/HZ; 2897 + 2898 + spin_lock_bh(&ifp->lock); 2899 + ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED|IFA_F_PERMANENT)) | ifa_flags; 2900 + 2901 + ifp->tstamp = jiffies; 2902 + ifp->valid_lft = valid_lft; 2903 + ifp->prefered_lft = prefered_lft; 2904 + 2905 + spin_unlock_bh(&ifp->lock); 2906 + if (!(ifp->flags&IFA_F_TENTATIVE)) 2907 + ipv6_ifa_notify(0, ifp); 2908 + in6_ifa_put(ifp); 2909 + 2910 + addrconf_verify(0); 2911 + 2912 + return 0; 2913 + } 2914 + 2915 + static int 2898 2916 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2899 2917 { 2900 2918 struct rtattr **rta = arg; 2901 2919 struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 2902 2920 struct in6_addr *pfx; 2921 + __u32 valid_lft = INFINITY_LIFE_TIME, prefered_lft = INFINITY_LIFE_TIME; 2903 2922 2904 2923 pfx = NULL; 2905 2924 if (rta[IFA_ADDRESS-1]) { ··· 2958 2877 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2959 2878 } 2960 2879 if (rta[IFA_LOCAL-1]) { 2961 - if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2880 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) || 2881 + (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))) 2962 2882 return -EINVAL; 2963 2883 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2964 2884 } 2965 2885 if (pfx == NULL) 2966 2886 return -EINVAL; 2967 2887 2968 - return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen); 2888 + if (rta[IFA_CACHEINFO-1]) { 2889 + struct ifa_cacheinfo *ci; 2890 + if (RTA_PAYLOAD(rta[IFA_CACHEINFO-1]) < sizeof(*ci)) 2891 + return -EINVAL; 2892 + ci = RTA_DATA(rta[IFA_CACHEINFO-1]); 2893 + valid_lft = ci->ifa_valid; 2894 + prefered_lft = ci->ifa_prefered; 2895 + } 2896 + 2897 + if (nlh->nlmsg_flags & NLM_F_REPLACE) { 2898 + int ret; 2899 + ret = inet6_addr_modify(ifm->ifa_index, pfx, 2900 + prefered_lft, valid_lft); 2901 + if (ret == 0 || !(nlh->nlmsg_flags & NLM_F_CREATE)) 2902 + return ret; 2903 + } 2904 + 2905 + return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen, 2906 + prefered_lft, valid_lft); 2907 + 2969 2908 } 2970 2909 2971 2910 /* Maximum length of ifa_cacheinfo attributes */ ··· 3220 3119 { 3221 3120 enum addr_type_t type = ANYCAST_ADDR; 3222 3121 return inet6_dump_addr(skb, cb, type); 3122 + } 3123 + 3124 + static int inet6_rtm_getaddr(struct sk_buff *in_skb, 3125 + struct nlmsghdr* nlh, void *arg) 3126 + { 3127 + struct rtattr **rta = arg; 3128 + struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 3129 + struct in6_addr *addr = NULL; 3130 + struct net_device *dev = NULL; 3131 + struct inet6_ifaddr *ifa; 3132 + struct sk_buff *skb; 3133 + int size = NLMSG_SPACE(sizeof(struct ifaddrmsg) + INET6_IFADDR_RTA_SPACE); 3134 + int err; 3135 + 3136 + if (rta[IFA_ADDRESS-1]) { 3137 + if (RTA_PAYLOAD(rta[IFA_ADDRESS-1]) < sizeof(*addr)) 3138 + return -EINVAL; 3139 + addr = RTA_DATA(rta[IFA_ADDRESS-1]); 3140 + } 3141 + if (rta[IFA_LOCAL-1]) { 3142 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*addr) || 3143 + (addr && memcmp(addr, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*addr)))) 3144 + return -EINVAL; 3145 + addr = RTA_DATA(rta[IFA_LOCAL-1]); 3146 + } 3147 + if (addr == NULL) 3148 + return -EINVAL; 3149 + 3150 + if (ifm->ifa_index) 3151 + dev = __dev_get_by_index(ifm->ifa_index); 3152 + 3153 + if ((ifa = ipv6_get_ifaddr(addr, dev, 1)) == NULL) 3154 + return -EADDRNOTAVAIL; 3155 + 3156 + if ((skb = alloc_skb(size, GFP_KERNEL)) == NULL) { 3157 + err = -ENOBUFS; 3158 + goto out; 3159 + } 3160 + 3161 + NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; 3162 + err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, 3163 + nlh->nlmsg_seq, RTM_NEWADDR, 0); 3164 + if (err < 0) { 3165 + err = -EMSGSIZE; 3166 + goto out_free; 3167 + } 3168 + 3169 + err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); 3170 + if (err > 0) 3171 + err = 0; 3172 + out: 3173 + in6_ifa_put(ifa); 3174 + return err; 3175 + out_free: 3176 + kfree_skb(skb); 3177 + goto out; 3223 3178 } 3224 3179 3225 3180 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) ··· 3520 3363 [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, 3521 3364 [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, 3522 3365 [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, 3523 - [RTM_GETADDR - RTM_BASE] = { .dumpit = inet6_dump_ifaddr, }, 3366 + [RTM_GETADDR - RTM_BASE] = { .doit = inet6_rtm_getaddr, 3367 + .dumpit = inet6_dump_ifaddr, }, 3524 3368 [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, }, 3525 3369 [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, }, 3526 3370 [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, },
+1 -1
net/ipv6/af_inet6.c
··· 658 658 return err; 659 659 } 660 660 661 - ip6_dst_store(sk, dst, NULL); 661 + __ip6_dst_store(sk, dst, NULL); 662 662 } 663 663 664 664 return 0;
+1 -1
net/ipv6/inet6_connection_sock.c
··· 185 185 return err; 186 186 } 187 187 188 - ip6_dst_store(sk, dst, NULL); 188 + __ip6_dst_store(sk, dst, NULL); 189 189 } 190 190 191 191 skb->dst = dst_clone(dst);
+87 -42
net/ipv6/ip6_output.c
··· 356 356 skb->dev = dst->dev; 357 357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 358 358 0, skb->dev); 359 + IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 359 360 360 361 kfree_skb(skb); 361 362 return -ETIMEDOUT; ··· 596 595 } 597 596 598 597 err = output(skb); 598 + if(!err) 599 + IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 600 + 599 601 if (err || !frag) 600 602 break; 601 603 ··· 710 706 /* 711 707 * Put this fragment into the sending queue. 712 708 */ 713 - 714 - IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 715 - 716 709 err = output(frag); 717 710 if (err) 718 711 goto fail; 712 + 713 + IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 719 714 } 720 715 kfree_skb(skb); 721 716 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); ··· 726 723 return err; 727 724 } 728 725 729 - int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 726 + static struct dst_entry *ip6_sk_dst_check(struct sock *sk, 727 + struct dst_entry *dst, 728 + struct flowi *fl) 730 729 { 731 - int err = 0; 730 + struct ipv6_pinfo *np = inet6_sk(sk); 731 + struct rt6_info *rt = (struct rt6_info *)dst; 732 732 733 - *dst = NULL; 734 - if (sk) { 735 - struct ipv6_pinfo *np = inet6_sk(sk); 736 - 737 - *dst = sk_dst_check(sk, np->dst_cookie); 738 - if (*dst) { 739 - struct rt6_info *rt = (struct rt6_info*)*dst; 740 - 741 - /* Yes, checking route validity in not connected 742 - * case is not very simple. Take into account, 743 - * that we do not support routing by source, TOS, 744 - * and MSG_DONTROUTE --ANK (980726) 745 - * 746 - * 1. If route was host route, check that 747 - * cached destination is current. 748 - * If it is network route, we still may 749 - * check its validity using saved pointer 750 - * to the last used address: daddr_cache. 751 - * We do not want to save whole address now, 752 - * (because main consumer of this service 753 - * is tcp, which has not this problem), 754 - * so that the last trick works only on connected 755 - * sockets. 756 - * 2. oif also should be the same. 757 - */ 758 - if (((rt->rt6i_dst.plen != 128 || 759 - !ipv6_addr_equal(&fl->fl6_dst, 760 - &rt->rt6i_dst.addr)) 761 - && (np->daddr_cache == NULL || 762 - !ipv6_addr_equal(&fl->fl6_dst, 763 - np->daddr_cache))) 764 - || (fl->oif && fl->oif != (*dst)->dev->ifindex)) { 765 - dst_release(*dst); 766 - *dst = NULL; 767 - } 768 - } 733 + if (!dst) 734 + goto out; 735 + 736 + /* Yes, checking route validity in not connected 737 + * case is not very simple. Take into account, 738 + * that we do not support routing by source, TOS, 739 + * and MSG_DONTROUTE --ANK (980726) 740 + * 741 + * 1. If route was host route, check that 742 + * cached destination is current. 743 + * If it is network route, we still may 744 + * check its validity using saved pointer 745 + * to the last used address: daddr_cache. 746 + * We do not want to save whole address now, 747 + * (because main consumer of this service 748 + * is tcp, which has not this problem), 749 + * so that the last trick works only on connected 750 + * sockets. 751 + * 2. oif also should be the same. 752 + */ 753 + if (((rt->rt6i_dst.plen != 128 || 754 + !ipv6_addr_equal(&fl->fl6_dst, &rt->rt6i_dst.addr)) 755 + && (np->daddr_cache == NULL || 756 + !ipv6_addr_equal(&fl->fl6_dst, np->daddr_cache))) 757 + || (fl->oif && fl->oif != dst->dev->ifindex)) { 758 + dst_release(dst); 759 + dst = NULL; 769 760 } 761 + 762 + out: 763 + return dst; 764 + } 765 + 766 + static int ip6_dst_lookup_tail(struct sock *sk, 767 + struct dst_entry **dst, struct flowi *fl) 768 + { 769 + int err; 770 770 771 771 if (*dst == NULL) 772 772 *dst = ip6_route_output(sk, fl); ··· 779 773 780 774 if (ipv6_addr_any(&fl->fl6_src)) { 781 775 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); 782 - 783 776 if (err) 784 777 goto out_err_release; 785 778 } ··· 791 786 return err; 792 787 } 793 788 789 + /** 790 + * ip6_dst_lookup - perform route lookup on flow 791 + * @sk: socket which provides route info 792 + * @dst: pointer to dst_entry * for result 793 + * @fl: flow to lookup 794 + * 795 + * This function performs a route lookup on the given flow. 796 + * 797 + * It returns zero on success, or a standard errno code on error. 798 + */ 799 + int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 800 + { 801 + *dst = NULL; 802 + return ip6_dst_lookup_tail(sk, dst, fl); 803 + } 794 804 EXPORT_SYMBOL_GPL(ip6_dst_lookup); 805 + 806 + /** 807 + * ip6_sk_dst_lookup - perform socket cached route lookup on flow 808 + * @sk: socket which provides the dst cache and route info 809 + * @dst: pointer to dst_entry * for result 810 + * @fl: flow to lookup 811 + * 812 + * This function performs a route lookup on the given flow with the 813 + * possibility of using the cached route in the socket if it is valid. 814 + * It will take the socket dst lock when operating on the dst cache. 815 + * As a result, this function can only be used in process context. 816 + * 817 + * It returns zero on success, or a standard errno code on error. 818 + */ 819 + int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 820 + { 821 + *dst = NULL; 822 + if (sk) { 823 + *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); 824 + *dst = ip6_sk_dst_check(sk, *dst, fl); 825 + } 826 + 827 + return ip6_dst_lookup_tail(sk, dst, fl); 828 + } 829 + EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup); 795 830 796 831 static inline int ip6_ufo_append_data(struct sock *sk, 797 832 int getfrag(void *from, char *to, int offset, int len,
+7
net/ipv6/route.c
··· 53 53 #include <linux/rtnetlink.h> 54 54 #include <net/dst.h> 55 55 #include <net/xfrm.h> 56 + #include <net/netevent.h> 56 57 57 58 #include <asm/uaccess.h> 58 59 ··· 743 742 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 744 743 } 745 744 dst->metrics[RTAX_MTU-1] = mtu; 745 + call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); 746 746 } 747 747 } 748 748 ··· 1157 1155 struct rt6_info *rt, *nrt = NULL; 1158 1156 int strict; 1159 1157 struct fib6_node *fn; 1158 + struct netevent_redirect netevent; 1160 1159 1161 1160 /* 1162 1161 * Get the "current" route for this destination and ··· 1254 1251 1255 1252 if (ip6_ins_rt(nrt, NULL, NULL, NULL)) 1256 1253 goto out; 1254 + 1255 + netevent.old = &rt->u.dst; 1256 + netevent.new = &nrt->u.dst; 1257 + call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 1257 1258 1258 1259 if (rt->rt6i_flags&RTF_CACHE) { 1259 1260 ip6_del_rt(rt, NULL, NULL, NULL);
+2 -4
net/ipv6/tcp_ipv6.c
··· 270 270 inet->rcv_saddr = LOOPBACK4_IPV6; 271 271 272 272 sk->sk_gso_type = SKB_GSO_TCPV6; 273 - ip6_dst_store(sk, dst, NULL); 273 + __ip6_dst_store(sk, dst, NULL); 274 274 275 275 icsk->icsk_ext_hdr_len = 0; 276 276 if (np->opt) ··· 427 427 case TCP_SYN_RECV: /* Cannot happen. 428 428 It can, it SYNs are crossed. --ANK */ 429 429 if (!sock_owned_by_user(sk)) { 430 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 431 430 sk->sk_err = err; 432 431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 433 432 ··· 830 831 if (req) 831 832 reqsk_free(req); 832 833 833 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 834 834 return 0; /* don't send reset */ 835 835 } 836 836 ··· 945 947 */ 946 948 947 949 sk->sk_gso_type = SKB_GSO_TCPV6; 948 - ip6_dst_store(newsk, dst, NULL); 950 + __ip6_dst_store(newsk, dst, NULL); 949 951 950 952 newtcp6sk = (struct tcp6_sock *)newsk; 951 953 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+1 -1
net/ipv6/udp.c
··· 782 782 connected = 0; 783 783 } 784 784 785 - err = ip6_dst_lookup(sk, &dst, fl); 785 + err = ip6_sk_dst_lookup(sk, &dst, fl); 786 786 if (err) 787 787 goto out; 788 788 if (final_p)
+1 -1
net/ipv6/xfrm6_output.c
··· 125 125 if (!skb_is_gso(skb)) 126 126 return xfrm6_output_finish2(skb); 127 127 128 - skb->protocol = htons(ETH_P_IP); 128 + skb->protocol = htons(ETH_P_IPV6); 129 129 segs = skb_gso_segment(skb, 0); 130 130 kfree_skb(skb); 131 131 if (unlikely(IS_ERR(segs)))
+7 -5
net/lapb/lapb_iface.c
··· 238 238 goto out_put; 239 239 240 240 if (lapb->state == LAPB_STATE_0) { 241 - if (((parms->mode & LAPB_EXTENDED) && 242 - (parms->window < 1 || parms->window > 127)) || 243 - (parms->window < 1 || parms->window > 7)) 244 - goto out_put; 245 - 241 + if (parms->mode & LAPB_EXTENDED) { 242 + if (parms->window < 1 || parms->window > 127) 243 + goto out_put; 244 + } else { 245 + if (parms->window < 1 || parms->window > 7) 246 + goto out_put; 247 + } 246 248 lapb->mode = parms->mode; 247 249 lapb->window = parms->window; 248 250 }
+8 -12
net/llc/af_llc.c
··· 784 784 copied += used; 785 785 len -= used; 786 786 787 - if (used + offset < skb->len) 788 - continue; 789 - 790 787 if (!(flags & MSG_PEEK)) { 791 788 sk_eat_skb(sk, skb, 0); 792 789 *seq = 0; 793 790 } 791 + 792 + /* For non stream protcols we get one packet per recvmsg call */ 793 + if (sk->sk_type != SOCK_STREAM) 794 + goto copy_uaddr; 795 + 796 + /* Partial read */ 797 + if (used + offset < skb->len) 798 + continue; 794 799 } while (len > 0); 795 800 796 - /* 797 - * According to UNIX98, msg_name/msg_namelen are ignored 798 - * on connected socket. -ANK 799 - * But... af_llc still doesn't have separate sets of methods for 800 - * SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will 801 - * eventually fix this tho :-) -acme 802 - */ 803 - if (sk->sk_type == SOCK_DGRAM) 804 - goto copy_uaddr; 805 801 out: 806 802 release_sock(sk); 807 803 return copied;
+2 -2
net/llc/llc_sap.c
··· 51 51 { 52 52 struct sockaddr_llc *addr; 53 53 54 - if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */ 55 - return; 56 54 /* save primitive for use by the user. */ 57 55 addr = llc_ui_skb_cb(skb); 56 + 57 + memset(addr, 0, sizeof(*addr)); 58 58 addr->sllc_family = sk->sk_family; 59 59 addr->sllc_arphrd = skb->dev->type; 60 60 addr->sllc_test = prim == LLC_TEST_PRIM;
+2
net/netfilter/xt_SECMARK.c
··· 57 57 { 58 58 int err; 59 59 struct xt_secmark_target_selinux_info *sel = &info->u.sel; 60 + 61 + sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; 60 62 61 63 err = selinux_string_to_sid(sel->selctx, &sel->selsid); 62 64 if (err) {
+4 -1
net/netfilter/xt_string.c
··· 55 55 /* Damn, can't handle this case properly with iptables... */ 56 56 if (conf->from_offset > conf->to_offset) 57 57 return 0; 58 - 58 + if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') 59 + return 0; 60 + if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) 61 + return 0; 59 62 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, 60 63 GFP_KERNEL, TS_AUTOLOAD); 61 64 if (IS_ERR(ts_conf))
+1 -1
net/sched/sch_api.c
··· 430 430 } 431 431 #endif 432 432 433 - err = -EINVAL; 433 + err = -ENOENT; 434 434 if (ops == NULL) 435 435 goto err_out; 436 436
+5 -1
net/sunrpc/cache.c
··· 71 71 new = detail->alloc(); 72 72 if (!new) 73 73 return NULL; 74 + /* must fully initialise 'new', else 75 + * we might get lose if we need to 76 + * cache_put it soon. 77 + */ 74 78 cache_init(new); 79 + detail->init(new, key); 75 80 76 81 write_lock(&detail->hash_lock); 77 82 ··· 90 85 return tmp; 91 86 } 92 87 } 93 - detail->init(new, key); 94 88 new->next = *head; 95 89 *head = new; 96 90 detail->entries++;
+29 -23
net/sunrpc/clnt.c
··· 921 921 task->tk_status = xprt_prepare_transmit(task); 922 922 if (task->tk_status != 0) 923 923 return; 924 + task->tk_action = call_transmit_status; 924 925 /* Encode here so that rpcsec_gss can use correct sequence number. */ 925 926 if (rpc_task_need_encode(task)) { 926 - task->tk_rqstp->rq_bytes_sent = 0; 927 + BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); 927 928 call_encode(task); 928 929 /* Did the encode result in an error condition? */ 929 930 if (task->tk_status != 0) 930 - goto out_nosend; 931 + return; 931 932 } 932 - task->tk_action = call_transmit_status; 933 933 xprt_transmit(task); 934 934 if (task->tk_status < 0) 935 935 return; 936 - if (!task->tk_msg.rpc_proc->p_decode) { 937 - task->tk_action = rpc_exit_task; 938 - rpc_wake_up_task(task); 939 - } 940 - return; 941 - out_nosend: 942 - /* release socket write lock before attempting to handle error */ 943 - xprt_abort_transmit(task); 936 + /* 937 + * On success, ensure that we call xprt_end_transmit() before sleeping 938 + * in order to allow access to the socket to other RPC requests. 939 + */ 940 + call_transmit_status(task); 941 + if (task->tk_msg.rpc_proc->p_decode != NULL) 942 + return; 943 + task->tk_action = rpc_exit_task; 944 + rpc_wake_up_task(task); 945 + } 946 + 947 + /* 948 + * 5a. Handle cleanup after a transmission 949 + */ 950 + static void 951 + call_transmit_status(struct rpc_task *task) 952 + { 953 + task->tk_action = call_status; 954 + /* 955 + * Special case: if we've been waiting on the socket's write_space() 956 + * callback, then don't call xprt_end_transmit(). 957 + */ 958 + if (task->tk_status == -EAGAIN) 959 + return; 960 + xprt_end_transmit(task); 944 961 rpc_task_force_reencode(task); 945 962 } 946 963 ··· 1009 992 } 1010 993 1011 994 /* 1012 - * 6a. Handle transmission errors. 1013 - */ 1014 - static void 1015 - call_transmit_status(struct rpc_task *task) 1016 - { 1017 - if (task->tk_status != -EAGAIN) 1018 - rpc_task_force_reencode(task); 1019 - call_status(task); 1020 - } 1021 - 1022 - /* 1023 - * 6b. Handle RPC timeout 995 + * 6a. Handle RPC timeout 1024 996 * We do not release the request slot, so we keep using the 1025 997 * same XID for all retransmits. 1026 998 */
+4 -2
net/sunrpc/rpc_pipe.c
··· 667 667 RPCAUTH_info, RPCAUTH_EOF); 668 668 if (error) 669 669 goto err_depopulate; 670 + dget(dentry); 670 671 out: 671 672 mutex_unlock(&dir->i_mutex); 672 673 rpc_release_path(&nd); 673 - return dget(dentry); 674 + return dentry; 674 675 err_depopulate: 675 676 rpc_depopulate(dentry); 676 677 __rpc_rmdir(dir, dentry); ··· 732 731 rpci->flags = flags; 733 732 rpci->ops = ops; 734 733 inode_dir_notify(dir, DN_CREATE); 734 + dget(dentry); 735 735 out: 736 736 mutex_unlock(&dir->i_mutex); 737 737 rpc_release_path(&nd); 738 - return dget(dentry); 738 + return dentry; 739 739 err_dput: 740 740 dput(dentry); 741 741 dentry = ERR_PTR(-ENOMEM);
+3 -18
net/sunrpc/xprt.c
··· 707 707 return err; 708 708 } 709 709 710 - void 711 - xprt_abort_transmit(struct rpc_task *task) 710 + void xprt_end_transmit(struct rpc_task *task) 712 711 { 713 - struct rpc_xprt *xprt = task->tk_xprt; 714 - 715 - xprt_release_write(xprt, task); 712 + xprt_release_write(task->tk_xprt, task); 716 713 } 717 714 718 715 /** ··· 758 761 task->tk_status = -ENOTCONN; 759 762 else if (!req->rq_received) 760 763 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 761 - 762 - xprt->ops->release_xprt(xprt, task); 763 764 spin_unlock_bh(&xprt->transport_lock); 764 765 return; 765 766 } ··· 767 772 * schedq, and being picked up by a parallel run of rpciod(). 768 773 */ 769 774 task->tk_status = status; 770 - 771 - switch (status) { 772 - case -ECONNREFUSED: 775 + if (status == -ECONNREFUSED) 773 776 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 774 - case -EAGAIN: 775 - case -ENOTCONN: 776 - return; 777 - default: 778 - break; 779 - } 780 - xprt_release_write(xprt, task); 781 - return; 782 777 } 783 778 784 779 static inline void do_xprt_reserve(struct rpc_task *task)
+28 -1
net/sunrpc/xprtsock.c
··· 414 414 } 415 415 416 416 /** 417 + * xs_tcp_release_xprt - clean up after a tcp transmission 418 + * @xprt: transport 419 + * @task: rpc task 420 + * 421 + * This cleans up if an error causes us to abort the transmission of a request. 422 + * In this case, the socket may need to be reset in order to avoid confusing 423 + * the server. 424 + */ 425 + static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 426 + { 427 + struct rpc_rqst *req; 428 + 429 + if (task != xprt->snd_task) 430 + return; 431 + if (task == NULL) 432 + goto out_release; 433 + req = task->tk_rqstp; 434 + if (req->rq_bytes_sent == 0) 435 + goto out_release; 436 + if (req->rq_bytes_sent == req->rq_snd_buf.len) 437 + goto out_release; 438 + set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state); 439 + out_release: 440 + xprt_release_xprt(xprt, task); 441 + } 442 + 443 + /** 417 444 * xs_close - close a socket 418 445 * @xprt: transport 419 446 * ··· 1277 1250 1278 1251 static struct rpc_xprt_ops xs_tcp_ops = { 1279 1252 .reserve_xprt = xprt_reserve_xprt, 1280 - .release_xprt = xprt_release_xprt, 1253 + .release_xprt = xs_tcp_release_xprt, 1281 1254 .set_port = xs_set_port, 1282 1255 .connect = xs_connect, 1283 1256 .buf_alloc = rpc_malloc,
+5 -12
net/unix/af_unix.c
··· 128 128 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 129 129 130 130 #ifdef CONFIG_SECURITY_NETWORK 131 - static void unix_get_peersec_dgram(struct sk_buff *skb) 131 + static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 132 132 { 133 - int err; 134 - 135 - err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb), 136 - UNIXSECLEN(skb)); 137 - if (err) 138 - *(UNIXSECDATA(skb)) = NULL; 133 + memcpy(UNIXSID(skb), &scm->secid, sizeof(u32)); 139 134 } 140 135 141 136 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 142 137 { 143 - scm->secdata = *UNIXSECDATA(skb); 144 - scm->seclen = *UNIXSECLEN(skb); 138 + scm->secid = *UNIXSID(skb); 145 139 } 146 140 #else 147 - static inline void unix_get_peersec_dgram(struct sk_buff *skb) 141 + static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 148 142 { } 149 143 150 144 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) ··· 1316 1322 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1317 1323 if (siocb->scm->fp) 1318 1324 unix_attach_fds(siocb->scm, skb); 1319 - 1320 - unix_get_peersec_dgram(skb); 1325 + unix_get_secdata(siocb->scm, skb); 1321 1326 1322 1327 skb->h.raw = skb->data; 1323 1328 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
+1 -2
scripts/Kbuild.include
··· 77 77 78 78 # cc-version 79 79 # Usage gcc-ver := $(call cc-version, $(CC)) 80 - cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh \ 81 - $(if $(1), $(1), $(CC))) 80 + cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) 82 81 83 82 # cc-ifversion 84 83 # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
+1 -1
scripts/Makefile.modpost
··· 40 40 include scripts/Makefile.lib 41 41 42 42 kernelsymfile := $(objtree)/Module.symvers 43 - modulesymfile := $(KBUILD_EXTMOD)/Modules.symvers 43 + modulesymfile := $(KBUILD_EXTMOD)/Module.symvers 44 44 45 45 # Step 1), find all modules listed in $(MODVERDIR)/ 46 46 __modules := $(sort $(shell grep -h '\.ko' /dev/null $(wildcard $(MODVERDIR)/*.mod)))
+1 -1
scripts/kconfig/confdata.c
··· 357 357 for (e = prop->expr; e; e = e->left.expr) 358 358 if (e->right.sym->visible != no) 359 359 flags &= e->right.sym->flags; 360 - sym->flags |= flags & SYMBOL_DEF_USER; 360 + sym->flags &= flags | ~SYMBOL_DEF_USER; 361 361 } 362 362 363 363 sym_change_count += conf_warnings || conf_unsaved;
+43 -19
scripts/mod/file2alias.c
··· 52 52 sprintf(str + strlen(str), "*"); \ 53 53 } while(0) 54 54 55 + /** 56 + * Check that sizeof(device_id type) are consistent with size of section 57 + * in .o file. If in-consistent then userspace and kernel does not agree 58 + * on actual size which is a bug. 59 + **/ 60 + static void device_id_size_check(const char *modname, const char *device_id, 61 + unsigned long size, unsigned long id_size) 62 + { 63 + if (size % id_size || size < id_size) { 64 + fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo " 65 + "of the size of section __mod_%s_device_table=%lu.\n" 66 + "Fix definition of struct %s_device_id " 67 + "in mod_devicetable.h\n", 68 + modname, device_id, id_size, device_id, size, device_id); 69 + } 70 + } 71 + 55 72 /* USB is special because the bcdDevice can be matched against a numeric range */ 56 73 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */ 57 74 static void do_usb_entry(struct usb_device_id *id, ··· 169 152 unsigned int i; 170 153 const unsigned long id_size = sizeof(struct usb_device_id); 171 154 172 - if (size % id_size || size < id_size) { 173 - warn("%s ids %lu bad size " 174 - "(each on %lu)\n", mod->name, size, id_size); 175 - } 155 + device_id_size_check(mod->name, "usb", size, id_size); 156 + 176 157 /* Leave last one: it's the terminator. */ 177 158 size -= id_size; 178 159 ··· 449 434 450 435 static void do_table(void *symval, unsigned long size, 451 436 unsigned long id_size, 437 + const char *device_id, 452 438 void *function, 453 439 struct module *mod) 454 440 { ··· 457 441 char alias[500]; 458 442 int (*do_entry)(const char *, void *entry, char *alias) = function; 459 443 460 - if (size % id_size || size < id_size) { 461 - warn("%s ids %lu bad size " 462 - "(each on %lu)\n", mod->name, size, id_size); 463 - } 444 + device_id_size_check(mod->name, device_id, size, id_size); 464 445 /* Leave last one: it's the terminator. */ 465 446 size -= id_size; 466 447 ··· 489 476 + sym->st_value; 490 477 491 478 if (sym_is(symname, "__mod_pci_device_table")) 492 - do_table(symval, sym->st_size, sizeof(struct pci_device_id), 479 + do_table(symval, sym->st_size, 480 + sizeof(struct pci_device_id), "pci", 493 481 do_pci_entry, mod); 494 482 else if (sym_is(symname, "__mod_usb_device_table")) 495 483 /* special case to handle bcdDevice ranges */ 496 484 do_usb_table(symval, sym->st_size, mod); 497 485 else if (sym_is(symname, "__mod_ieee1394_device_table")) 498 - do_table(symval, sym->st_size, sizeof(struct ieee1394_device_id), 486 + do_table(symval, sym->st_size, 487 + sizeof(struct ieee1394_device_id), "ieee1394", 499 488 do_ieee1394_entry, mod); 500 489 else if (sym_is(symname, "__mod_ccw_device_table")) 501 - do_table(symval, sym->st_size, sizeof(struct ccw_device_id), 490 + do_table(symval, sym->st_size, 491 + sizeof(struct ccw_device_id), "ccw", 502 492 do_ccw_entry, mod); 503 493 else if (sym_is(symname, "__mod_serio_device_table")) 504 - do_table(symval, sym->st_size, sizeof(struct serio_device_id), 494 + do_table(symval, sym->st_size, 495 + sizeof(struct serio_device_id), "serio", 505 496 do_serio_entry, mod); 506 497 else if (sym_is(symname, "__mod_pnp_device_table")) 507 - do_table(symval, sym->st_size, sizeof(struct pnp_device_id), 498 + do_table(symval, sym->st_size, 499 + sizeof(struct pnp_device_id), "pnp", 508 500 do_pnp_entry, mod); 509 501 else if (sym_is(symname, "__mod_pnp_card_device_table")) 510 - do_table(symval, sym->st_size, sizeof(struct pnp_card_device_id), 502 + do_table(symval, sym->st_size, 503 + sizeof(struct pnp_card_device_id), "pnp_card", 511 504 do_pnp_card_entry, mod); 512 505 else if (sym_is(symname, "__mod_pcmcia_device_table")) 513 - do_table(symval, sym->st_size, sizeof(struct pcmcia_device_id), 506 + do_table(symval, sym->st_size, 507 + sizeof(struct pcmcia_device_id), "pcmcia", 514 508 do_pcmcia_entry, mod); 515 509 else if (sym_is(symname, "__mod_of_device_table")) 516 - do_table(symval, sym->st_size, sizeof(struct of_device_id), 510 + do_table(symval, sym->st_size, 511 + sizeof(struct of_device_id), "of", 517 512 do_of_entry, mod); 518 513 else if (sym_is(symname, "__mod_vio_device_table")) 519 - do_table(symval, sym->st_size, sizeof(struct vio_device_id), 514 + do_table(symval, sym->st_size, 515 + sizeof(struct vio_device_id), "vio", 520 516 do_vio_entry, mod); 521 517 else if (sym_is(symname, "__mod_i2c_device_table")) 522 - do_table(symval, sym->st_size, sizeof(struct i2c_device_id), 518 + do_table(symval, sym->st_size, 519 + sizeof(struct i2c_device_id), "i2c", 523 520 do_i2c_entry, mod); 524 521 else if (sym_is(symname, "__mod_input_device_table")) 525 - do_table(symval, sym->st_size, sizeof(struct input_device_id), 522 + do_table(symval, sym->st_size, 523 + sizeof(struct input_device_id), "input", 526 524 do_input_entry, mod); 527 525 } 528 526
+12 -2
security/dummy.c
··· 791 791 return -ENOPROTOOPT; 792 792 } 793 793 794 - static int dummy_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 795 - u32 *seclen) 794 + static int dummy_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 796 795 { 797 796 return -ENOPROTOOPT; 798 797 } ··· 873 874 static int dummy_setprocattr(struct task_struct *p, char *name, void *value, size_t size) 874 875 { 875 876 return -EINVAL; 877 + } 878 + 879 + static int dummy_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 880 + { 881 + return -EOPNOTSUPP; 882 + } 883 + 884 + static void dummy_release_secctx(char *secdata, u32 seclen) 885 + { 876 886 } 877 887 878 888 #ifdef CONFIG_KEYS ··· 1036 1028 set_to_dummy_if_null(ops, d_instantiate); 1037 1029 set_to_dummy_if_null(ops, getprocattr); 1038 1030 set_to_dummy_if_null(ops, setprocattr); 1031 + set_to_dummy_if_null(ops, secid_to_secctx); 1032 + set_to_dummy_if_null(ops, release_secctx); 1039 1033 #ifdef CONFIG_SECURITY_NETWORK 1040 1034 set_to_dummy_if_null(ops, unix_stream_connect); 1041 1035 set_to_dummy_if_null(ops, unix_may_send);
+24 -14
security/selinux/hooks.c
··· 3524 3524 return err; 3525 3525 } 3526 3526 3527 - static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, u32 *seclen) 3527 + static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 3528 3528 { 3529 + u32 peer_secid = SECSID_NULL; 3529 3530 int err = 0; 3530 - u32 peer_sid; 3531 3531 3532 - if (skb->sk->sk_family == PF_UNIX) 3533 - selinux_get_inode_sid(SOCK_INODE(skb->sk->sk_socket), 3534 - &peer_sid); 3535 - else 3536 - peer_sid = selinux_socket_getpeer_dgram(skb); 3532 + if (sock && (sock->sk->sk_family == PF_UNIX)) 3533 + selinux_get_inode_sid(SOCK_INODE(sock), &peer_secid); 3534 + else if (skb) 3535 + peer_secid = selinux_socket_getpeer_dgram(skb); 3537 3536 3538 - if (peer_sid == SECSID_NULL) 3539 - return -EINVAL; 3537 + if (peer_secid == SECSID_NULL) 3538 + err = -EINVAL; 3539 + *secid = peer_secid; 3540 3540 3541 - err = security_sid_to_context(peer_sid, secdata, seclen); 3542 - if (err) 3543 - return err; 3544 - 3545 - return 0; 3541 + return err; 3546 3542 } 3547 3543 3548 3544 static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) ··· 4403 4407 return size; 4404 4408 } 4405 4409 4410 + static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 4411 + { 4412 + return security_sid_to_context(secid, secdata, seclen); 4413 + } 4414 + 4415 + static void selinux_release_secctx(char *secdata, u32 seclen) 4416 + { 4417 + if (secdata) 4418 + kfree(secdata); 4419 + } 4420 + 4406 4421 #ifdef CONFIG_KEYS 4407 4422 4408 4423 static int selinux_key_alloc(struct key *k, struct task_struct *tsk, ··· 4593 4586 4594 4587 .getprocattr = selinux_getprocattr, 4595 4588 .setprocattr = selinux_setprocattr, 4589 + 4590 + .secid_to_secctx = selinux_secid_to_secctx, 4591 + .release_secctx = selinux_release_secctx, 4596 4592 4597 4593 .unix_stream_connect = selinux_socket_unix_stream_connect, 4598 4594 .unix_may_send = selinux_socket_unix_may_send,
+13 -4
sound/aoa/codecs/snd-aoa-codec-toonie.c
··· 51 51 {} 52 52 }; 53 53 54 + static int toonie_usable(struct codec_info_item *cii, 55 + struct transfer_info *ti, 56 + struct transfer_info *out) 57 + { 58 + return 1; 59 + } 60 + 54 61 #ifdef CONFIG_PM 55 62 static int toonie_suspend(struct codec_info_item *cii, pm_message_t state) 56 63 { ··· 76 69 .sysclock_factor = 256, 77 70 .bus_factor = 64, 78 71 .owner = THIS_MODULE, 72 + .usable = toonie_usable, 79 73 #ifdef CONFIG_PM 80 74 .suspend = toonie_suspend, 81 75 .resume = toonie_resume, ··· 87 79 { 88 80 struct toonie *toonie = codec_to_toonie(codec); 89 81 82 + /* nothing connected? what a joke! */ 83 + if (toonie->codec.connected != 1) 84 + return -ENOTCONN; 85 + 90 86 if (aoa_snd_device_new(SNDRV_DEV_LOWLEVEL, toonie, &ops)) { 91 87 printk(KERN_ERR PFX "failed to create toonie snd device!\n"); 92 88 return -ENODEV; 93 89 } 94 90 95 - /* nothing connected? what a joke! */ 96 - if (toonie->codec.connected != 1) 97 - return -ENOTCONN; 98 - 99 91 if (toonie->codec.soundbus_dev->attach_codec(toonie->codec.soundbus_dev, 100 92 aoa_get_card(), 101 93 &toonie_codec_info, toonie)) { 102 94 printk(KERN_ERR PFX "error creating toonie pcm\n"); 95 + snd_device_free(aoa_get_card(), toonie); 103 96 return -ENODEV; 104 97 } 105 98
+5 -2
sound/aoa/core/snd-aoa-gpio-feature.c
··· 112 112 113 113 static void get_irq(struct device_node * np, int *irqptr) 114 114 { 115 - *irqptr = irq_of_parse_and_map(np, 0); 115 + if (np) 116 + *irqptr = irq_of_parse_and_map(np, 0); 117 + else 118 + *irqptr = NO_IRQ; 116 119 } 117 120 118 121 /* 0x4 is outenable, 0x1 is out, thus 4 or 5 */ ··· 325 322 return -EINVAL; 326 323 } 327 324 328 - if (irq == -1) 325 + if (irq == NO_IRQ) 329 326 return -ENODEV; 330 327 331 328 mutex_lock(&notif->mutex);
+1 -1
sound/aoa/core/snd-aoa-gpio-pmf.c
··· 18 18 \ 19 19 if (unlikely(!rt)) return; \ 20 20 rc = pmf_call_function(rt->node, #name "-mute", &args); \ 21 - if (rc) \ 21 + if (rc && rc != -ENODEV) \ 22 22 printk(KERN_WARNING "pmf_gpio_set_" #name \ 23 23 " failed, rc: %d\n", rc); \ 24 24 rt->implementation_private &= ~(1<<bit); \
+1 -2
sound/core/oss/mixer_oss.c
··· 988 988 if (ptr->index == 0 && (kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0)) != NULL) { 989 989 struct snd_ctl_elem_info *uinfo; 990 990 991 - uinfo = kmalloc(sizeof(*uinfo), GFP_KERNEL); 991 + uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL); 992 992 if (! uinfo) { 993 993 up_read(&mixer->card->controls_rwsem); 994 994 return -ENOMEM; 995 995 } 996 996 997 - memset(uinfo, 0, sizeof(*uinfo)); 998 997 if (kctl->info(kctl, uinfo)) { 999 998 up_read(&mixer->card->controls_rwsem); 1000 999 return 0;
+2
sound/core/oss/pcm_oss.c
··· 2228 2228 for (idx = 0; idx < 2; idx++) { 2229 2229 if (setup[idx].disable) 2230 2230 continue; 2231 + if (! pcm->streams[idx].substream_count) 2232 + continue; /* no matching substream */ 2231 2233 if (idx == SNDRV_PCM_STREAM_PLAYBACK) { 2232 2234 if (! (f_mode & FMODE_WRITE)) 2233 2235 continue;
+1 -2
sound/core/seq/seq_device.c
··· 372 372 { 373 373 struct ops_list *ops; 374 374 375 - ops = kmalloc(sizeof(*ops), GFP_KERNEL); 375 + ops = kzalloc(sizeof(*ops), GFP_KERNEL); 376 376 if (ops == NULL) 377 377 return ops; 378 - memset(ops, 0, sizeof(*ops)); 379 378 380 379 /* set up driver entry */ 381 380 strlcpy(ops->id, id, sizeof(ops->id));
+3 -6
sound/core/sgbuf.c
··· 68 68 69 69 dmab->area = NULL; 70 70 dmab->addr = 0; 71 - dmab->private_data = sgbuf = kmalloc(sizeof(*sgbuf), GFP_KERNEL); 71 + dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); 72 72 if (! sgbuf) 73 73 return NULL; 74 - memset(sgbuf, 0, sizeof(*sgbuf)); 75 74 sgbuf->dev = device; 76 75 pages = snd_sgbuf_aligned_pages(size); 77 76 sgbuf->tblsize = sgbuf_align_table(pages); 78 - sgbuf->table = kmalloc(sizeof(*sgbuf->table) * sgbuf->tblsize, GFP_KERNEL); 77 + sgbuf->table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->table), GFP_KERNEL); 79 78 if (! sgbuf->table) 80 79 goto _failed; 81 - memset(sgbuf->table, 0, sizeof(*sgbuf->table) * sgbuf->tblsize); 82 - sgbuf->page_table = kmalloc(sizeof(*sgbuf->page_table) * sgbuf->tblsize, GFP_KERNEL); 80 + sgbuf->page_table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->page_table), GFP_KERNEL); 83 81 if (! sgbuf->page_table) 84 82 goto _failed; 85 - memset(sgbuf->page_table, 0, sizeof(*sgbuf->page_table) * sgbuf->tblsize); 86 83 87 84 /* allocate each page */ 88 85 for (i = 0; i < pages; i++) {
+2 -5
sound/drivers/vx/vx_pcm.c
··· 1252 1252 chip->audio_info = rmh.Stat[1]; 1253 1253 1254 1254 /* allocate pipes */ 1255 - chip->playback_pipes = kmalloc(sizeof(struct vx_pipe *) * chip->audio_outs, GFP_KERNEL); 1255 + chip->playback_pipes = kcalloc(chip->audio_outs, sizeof(struct vx_pipe *), GFP_KERNEL); 1256 1256 if (!chip->playback_pipes) 1257 1257 return -ENOMEM; 1258 - chip->capture_pipes = kmalloc(sizeof(struct vx_pipe *) * chip->audio_ins, GFP_KERNEL); 1258 + chip->capture_pipes = kcalloc(chip->audio_ins, sizeof(struct vx_pipe *), GFP_KERNEL); 1259 1259 if (!chip->capture_pipes) { 1260 1260 kfree(chip->playback_pipes); 1261 1261 return -ENOMEM; 1262 1262 } 1263 - 1264 - memset(chip->playback_pipes, 0, sizeof(struct vx_pipe *) * chip->audio_outs); 1265 - memset(chip->capture_pipes, 0, sizeof(struct vx_pipe *) * chip->audio_ins); 1266 1263 1267 1264 preferred = chip->ibl.size; 1268 1265 chip->ibl.size = 0;
+2 -2
sound/pci/echoaudio/echoaudio.c
··· 236 236 chip = snd_pcm_substream_chip(substream); 237 237 runtime = substream->runtime; 238 238 239 - if (!(pipe = kmalloc(sizeof(struct audiopipe), GFP_KERNEL))) 239 + pipe = kzalloc(sizeof(struct audiopipe), GFP_KERNEL); 240 + if (!pipe) 240 241 return -ENOMEM; 241 - memset(pipe, 0, sizeof(struct audiopipe)); 242 242 pipe->index = -1; /* Not configured yet */ 243 243 244 244 /* Set up hw capabilities and contraints */
+11
sound/pci/emu10k1/emu10k1_main.c
··· 936 936 .ca0151_chip = 1, 937 937 .spk71 = 1, 938 938 .spdif_bug = 1} , 939 + /* Dell OEM/Creative Labs Audigy 2 ZS */ 940 + /* See ALSA bug#1365 */ 941 + {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10031102, 942 + .driver = "Audigy2", .name = "Audigy 2 ZS [SB0353]", 943 + .id = "Audigy2", 944 + .emu10k2_chip = 1, 945 + .ca0102_chip = 1, 946 + .ca0151_chip = 1, 947 + .spk71 = 1, 948 + .spdif_bug = 1, 949 + .ac97_chip = 1} , 939 950 {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10021102, 940 951 .driver = "Audigy2", .name = "Audigy 2 Platinum [SB0240P]", 941 952 .id = "Audigy2",
+5 -1
sound/pci/emu10k1/irq.c
··· 37 37 int handled = 0; 38 38 39 39 while ((status = inl(emu->port + IPR)) != 0) { 40 - //printk("emu10k1 irq - status = 0x%x\n", status); 40 + //snd_printk(KERN_INFO "emu10k1 irq - status = 0x%x\n", status); 41 41 orig_status = status; 42 42 handled = 1; 43 + if ((status & 0xffffffff) == 0xffffffff) { 44 + snd_printk(KERN_INFO "snd-emu10k1: Suspected sound card removal\n"); 45 + break; 46 + } 43 47 if (status & IPR_PCIERROR) { 44 48 snd_printk(KERN_ERR "interrupt: PCI error\n"); 45 49 snd_emu10k1_intr_disable(emu, INTE_PCIERRORENABLE);
+1 -2
sound/ppc/awacs.c
··· 801 801 chip->revision = (in_le32(&chip->awacs->codec_stat) >> 12) & 0xf; 802 802 #ifdef PMAC_AMP_AVAIL 803 803 if (chip->revision == 3 && chip->has_iic && CHECK_CUDA_AMP()) { 804 - struct awacs_amp *amp = kmalloc(sizeof(*amp), GFP_KERNEL); 804 + struct awacs_amp *amp = kzalloc(sizeof(*amp), GFP_KERNEL); 805 805 if (! amp) 806 806 return -ENOMEM; 807 807 chip->mixer_data = amp; 808 - memset(amp, 0, sizeof(*amp)); 809 808 chip->mixer_free = awacs_amp_free; 810 809 awacs_amp_set_vol(amp, 0, 63, 63, 0); /* mute and zero vol */ 811 810 awacs_amp_set_vol(amp, 1, 63, 63, 0);
+1 -2
sound/ppc/daca.c
··· 258 258 request_module("i2c-powermac"); 259 259 #endif /* CONFIG_KMOD */ 260 260 261 - mix = kmalloc(sizeof(*mix), GFP_KERNEL); 261 + mix = kzalloc(sizeof(*mix), GFP_KERNEL); 262 262 if (! mix) 263 263 return -ENOMEM; 264 - memset(mix, 0, sizeof(*mix)); 265 264 chip->mixer_data = mix; 266 265 chip->mixer_free = daca_cleanup; 267 266 mix->amp_on = 1; /* default on */
+1 -2
sound/ppc/keywest.c
··· 64 64 if (strncmp(i2c_device_name(adapter), "mac-io", 6)) 65 65 return 0; /* ignored */ 66 66 67 - new_client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL); 67 + new_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); 68 68 if (! new_client) 69 69 return -ENOMEM; 70 70 71 - memset(new_client, 0, sizeof(*new_client)); 72 71 new_client->addr = keywest_ctx->addr; 73 72 i2c_set_clientdata(new_client, keywest_ctx); 74 73 new_client->adapter = adapter;
+3 -10
sound/ppc/powermac.c
··· 181 181 if ((err = platform_driver_register(&snd_pmac_driver)) < 0) 182 182 return err; 183 183 device = platform_device_register_simple(SND_PMAC_DRIVER, -1, NULL, 0); 184 - if (!IS_ERR(device)) { 185 - if (platform_get_drvdata(device)) 186 - return 0; 187 - platform_device_unregister(device); 188 - err = -ENODEV; 189 - } else 190 - err = PTR_ERR(device); 191 - platform_driver_unregister(&snd_pmac_driver); 192 - return err; 184 + return 0; 193 185 194 186 } 195 187 196 188 static void __exit alsa_card_pmac_exit(void) 197 189 { 198 - platform_device_unregister(device); 190 + if (!IS_ERR(device)) 191 + platform_device_unregister(device); 199 192 platform_driver_unregister(&snd_pmac_driver); 200 193 } 201 194
+1 -2
sound/ppc/tumbler.c
··· 1317 1317 request_module("i2c-powermac"); 1318 1318 #endif /* CONFIG_KMOD */ 1319 1319 1320 - mix = kmalloc(sizeof(*mix), GFP_KERNEL); 1320 + mix = kzalloc(sizeof(*mix), GFP_KERNEL); 1321 1321 if (! mix) 1322 1322 return -ENOMEM; 1323 - memset(mix, 0, sizeof(*mix)); 1324 1323 mix->headphone_irq = -1; 1325 1324 1326 1325 chip->mixer_data = mix;
+2 -4
sound/usb/usbaudio.c
··· 2260 2260 } 2261 2261 2262 2262 /* create a new pcm */ 2263 - as = kmalloc(sizeof(*as), GFP_KERNEL); 2263 + as = kzalloc(sizeof(*as), GFP_KERNEL); 2264 2264 if (! as) 2265 2265 return -ENOMEM; 2266 - memset(as, 0, sizeof(*as)); 2267 2266 as->pcm_index = chip->pcm_devs; 2268 2267 as->chip = chip; 2269 2268 as->fmt_type = fp->fmt_type; ··· 2632 2633 csep = NULL; 2633 2634 } 2634 2635 2635 - fp = kmalloc(sizeof(*fp), GFP_KERNEL); 2636 + fp = kzalloc(sizeof(*fp), GFP_KERNEL); 2636 2637 if (! fp) { 2637 2638 snd_printk(KERN_ERR "cannot malloc\n"); 2638 2639 return -ENOMEM; 2639 2640 } 2640 2641 2641 - memset(fp, 0, sizeof(*fp)); 2642 2642 fp->iface = iface_no; 2643 2643 fp->altsetting = altno; 2644 2644 fp->altset_idx = i;