Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master'

+6810 -2000
+2 -7
CREDITS
··· 3241 3241 S: Beaverton, Oregon 97005 3242 3242 S: USA 3243 3243 3244 - N: Marcelo W. Tosatti 3245 - E: marcelo.tosatti@cyclades.com 3246 - D: Miscellaneous kernel hacker 3244 + N: Marcelo Tosatti 3245 + E: marcelo@kvack.org 3247 3246 D: v2.4 kernel maintainer 3248 - D: Current pc300/cyclades maintainer 3249 - S: Cyclades Corporation 3250 - S: Av Cristovao Colombo, 462. Floresta. 3251 - S: Porto Alegre 3252 3247 S: Brazil 3253 3248 3254 3249 N: Stefan Traby
-5
Documentation/devices.txt
··· 1721 1721 These devices support the same API as the generic SCSI 1722 1722 devices. 1723 1723 1724 - 97 block Packet writing for CD/DVD devices 1725 - 0 = /dev/pktcdvd0 First packet-writing module 1726 - 1 = /dev/pktcdvd1 Second packet-writing module 1727 - ... 1728 - 1729 1724 98 char Control and Measurement Device (comedi) 1730 1725 0 = /dev/comedi0 First comedi device 1731 1726 1 = /dev/comedi1 Second comedi device
+4 -4
Documentation/dvb/get_dvb_firmware
··· 259 259 } 260 260 261 261 sub nxt2002 { 262 - my $sourcefile = "Broadband4PC_4_2_11.zip"; 262 + my $sourcefile = "Technisat_DVB-PC_4_4_COMPACT.zip"; 263 263 my $url = "http://www.bbti.us/download/windows/$sourcefile"; 264 - my $hash = "c6d2ea47a8f456d887ada0cfb718ff2a"; 264 + my $hash = "476befae8c7c1bb9648954060b1eec1f"; 265 265 my $outfile = "dvb-fe-nxt2002.fw"; 266 266 my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1); 267 267 ··· 269 269 270 270 wgetfile($sourcefile, $url); 271 271 unzip($sourcefile, $tmpdir); 272 - verify("$tmpdir/SkyNETU.sys", $hash); 273 - extract("$tmpdir/SkyNETU.sys", 375832, 5908, $outfile); 272 + verify("$tmpdir/SkyNET.sys", $hash); 273 + extract("$tmpdir/SkyNET.sys", 331624, 5908, $outfile); 274 274 275 275 $outfile; 276 276 }
+9
Documentation/feature-removal-schedule.txt
··· 57 57 58 58 --------------------------- 59 59 60 + What: sbp2: module parameter "force_inquiry_hack" 61 + When: July 2006 62 + Why: Superceded by parameter "workarounds". Both parameters are meant to be 63 + used ad-hoc and for single devices only, i.e. not in modprobe.conf, 64 + therefore the impact of this feature replacement should be low. 65 + Who: Stefan Richter <stefanr@s5r6.in-berlin.de> 66 + 67 + --------------------------- 68 + 60 69 What: Video4Linux API 1 ioctls and video_decoder.h from Video devices. 61 70 When: July 2006 62 71 Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
-17
Documentation/firmware_class/README
··· 105 105 on the setup, so I think that the choice on what firmware to make 106 106 persistent should be left to userspace. 107 107 108 - - Why register_firmware()+__init can be useful: 109 - - For boot devices needing firmware. 110 - - To make the transition easier: 111 - The firmware can be declared __init and register_firmware() 112 - called on module_init. Then the firmware is warranted to be 113 - there even if "firmware hotplug userspace" is not there yet or 114 - it doesn't yet provide the needed firmware. 115 - Once the firmware is widely available in userspace, it can be 116 - removed from the kernel. Or made optional (CONFIG_.*_FIRMWARE). 117 - 118 - In either case, if firmware hotplug support is there, it can move the 119 - firmware out of kernel memory into the real filesystem for later 120 - usage. 121 - 122 - Note: If persistence is implemented on top of initramfs, 123 - register_firmware() may not be appropriate. 124 -
-11
Documentation/firmware_class/firmware_sample_driver.c
··· 5 5 * 6 6 * Sample code on how to use request_firmware() from drivers. 7 7 * 8 - * Note that register_firmware() is currently useless. 9 - * 10 8 */ 11 9 12 10 #include <linux/module.h> ··· 14 16 #include <linux/string.h> 15 17 16 18 #include "linux/firmware.h" 17 - 18 - #define WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE 19 - #ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE 20 - char __init inkernel_firmware[] = "let's say that this is firmware\n"; 21 - #endif 22 19 23 20 static struct device ghost_device = { 24 21 .bus_id = "ghost0", ··· 97 104 98 105 static int sample_init(void) 99 106 { 100 - #ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE 101 - register_firmware("sample_driver_fw", inkernel_firmware, 102 - sizeof(inkernel_firmware)); 103 - #endif 104 107 device_initialize(&ghost_device); 105 108 /* since there is no real hardware insertion I just call the 106 109 * sample probe functions here */
+2 -2
Documentation/memory-barriers.txt
··· 1031 1031 LOCKS VS MEMORY ACCESSES 1032 1032 ------------------------ 1033 1033 1034 - Consider the following: the system has a pair of spinlocks (N) and (Q), and 1034 + Consider the following: the system has a pair of spinlocks (M) and (Q), and 1035 1035 three CPUs; then should the following sequence of events occur: 1036 1036 1037 1037 CPU 1 CPU 2 ··· 1678 1678 smp_wmb(); 1679 1679 <A:modify v=2> <C:busy> 1680 1680 <C:queue v=2> 1681 - p = &b; q = p; 1681 + p = &v; q = p; 1682 1682 <D:request p> 1683 1683 <B:modify p=&v> <D:commit p=&v> 1684 1684 <D:read p>
+234
Documentation/spi/pxa2xx
··· 1 + PXA2xx SPI on SSP driver HOWTO 2 + =================================================== 3 + This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx 4 + synchronous serial port into a SPI master controller 5 + (see Documentation/spi/spi_summary). The driver has the following features 6 + 7 + - Support for any PXA2xx SSP 8 + - SSP PIO and SSP DMA data transfers. 9 + - External and Internal (SSPFRM) chip selects. 10 + - Per slave device (chip) configuration. 11 + - Full suspend, freeze, resume support. 12 + 13 + The driver is built around a "spi_message" fifo serviced by workqueue and a 14 + tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet 15 + (pump_transfer) is responsible for queuing SPI transactions and setting up and 16 + launching the dma/interrupt driven transfers. 17 + 18 + Declaring PXA2xx Master Controllers 19 + ----------------------------------- 20 + Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a 21 + "platform device". The master configuration is passed to the driver via a table 22 + found in include/asm-arm/arch-pxa/pxa2xx_spi.h: 23 + 24 + struct pxa2xx_spi_master { 25 + enum pxa_ssp_type ssp_type; 26 + u32 clock_enable; 27 + u16 num_chipselect; 28 + u8 enable_dma; 29 + }; 30 + 31 + The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and 32 + informs the driver which features a particular SSP supports. 33 + 34 + The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the 35 + corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See 36 + the "PXA2xx Developer Manual" section "Clocks and Power Management". 37 + 38 + The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of 39 + slave device (chips) attached to this SPI master. 40 + 41 + The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should 42 + be used. This caused the driver to acquire two DMA channels: rx_channel and 43 + tx_channel. The rx_channel has a higher DMA service priority the tx_channel. 44 + See the "PXA2xx Developer Manual" section "DMA Controller". 45 + 46 + NSSP MASTER SAMPLE 47 + ------------------ 48 + Below is a sample configuration using the PXA255 NSSP. 49 + 50 + static struct resource pxa_spi_nssp_resources[] = { 51 + [0] = { 52 + .start = __PREG(SSCR0_P(2)), /* Start address of NSSP */ 53 + .end = __PREG(SSCR0_P(2)) + 0x2c, /* Range of registers */ 54 + .flags = IORESOURCE_MEM, 55 + }, 56 + [1] = { 57 + .start = IRQ_NSSP, /* NSSP IRQ */ 58 + .end = IRQ_NSSP, 59 + .flags = IORESOURCE_IRQ, 60 + }, 61 + }; 62 + 63 + static struct pxa2xx_spi_master pxa_nssp_master_info = { 64 + .ssp_type = PXA25x_NSSP, /* Type of SSP */ 65 + .clock_enable = CKEN9_NSSP, /* NSSP Peripheral clock */ 66 + .num_chipselect = 1, /* Matches the number of chips attached to NSSP */ 67 + .enable_dma = 1, /* Enables NSSP DMA */ 68 + }; 69 + 70 + static struct platform_device pxa_spi_nssp = { 71 + .name = "pxa2xx-spi", /* MUST BE THIS VALUE, so device match driver */ 72 + .id = 2, /* Bus number, MUST MATCH SSP number 1..n */ 73 + .resource = pxa_spi_nssp_resources, 74 + .num_resources = ARRAY_SIZE(pxa_spi_nssp_resources), 75 + .dev = { 76 + .platform_data = &pxa_nssp_master_info, /* Passed to driver */ 77 + }, 78 + }; 79 + 80 + static struct platform_device *devices[] __initdata = { 81 + &pxa_spi_nssp, 82 + }; 83 + 84 + static void __init board_init(void) 85 + { 86 + (void)platform_add_device(devices, ARRAY_SIZE(devices)); 87 + } 88 + 89 + Declaring Slave Devices 90 + ----------------------- 91 + Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c 92 + using the "spi_board_info" structure found in "linux/spi/spi.h". See 93 + "Documentation/spi/spi_summary" for additional information. 94 + 95 + Each slave device attached to the PXA must provide slave specific configuration 96 + information via the structure "pxa2xx_spi_chip" found in 97 + "include/asm-arm/arch-pxa/pxa2xx_spi.h". The pxa2xx_spi master controller driver 98 + will uses the configuration whenever the driver communicates with the slave 99 + device. 100 + 101 + struct pxa2xx_spi_chip { 102 + u8 tx_threshold; 103 + u8 rx_threshold; 104 + u8 dma_burst_size; 105 + u32 timeout_microsecs; 106 + u8 enable_loopback; 107 + void (*cs_control)(u32 command); 108 + }; 109 + 110 + The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are 111 + used to configure the SSP hardware fifo. These fields are critical to the 112 + performance of pxa2xx_spi driver and misconfiguration will result in rx 113 + fifo overruns (especially in PIO mode transfers). Good default values are 114 + 115 + .tx_threshold = 12, 116 + .rx_threshold = 4, 117 + 118 + The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA 119 + engine and is related the "spi_device.bits_per_word" field. Read and understand 120 + the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers 121 + to determine the correct value. An SSP configured for byte-wide transfers would 122 + use a value of 8. 123 + 124 + The "pxa2xx_spi_chip.timeout_microsecs" fields is used to efficiently handle 125 + trailing bytes in the SSP receiver fifo. The correct value for this field is 126 + dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific 127 + slave device. Please note the the PXA2xx SSP 1 does not support trailing byte 128 + timeouts and must busy-wait any trailing bytes. 129 + 130 + The "pxa2xx_spi_chip.enable_loopback" field is used to place the SSP porting 131 + into internal loopback mode. In this mode the SSP controller internally 132 + connects the SSPTX pin the the SSPRX pin. This is useful for initial setup 133 + testing. 134 + 135 + The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific 136 + function for asserting/deasserting a slave device chip select. If the field is 137 + NULL, the pxa2xx_spi master controller driver assumes that the SSP port is 138 + configured to use SSPFRM instead. 139 + 140 + NSSP SALVE SAMPLE 141 + ----------------- 142 + The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the 143 + "spi_board_info.controller_data" field. Below is a sample configuration using 144 + the PXA255 NSSP. 145 + 146 + /* Chip Select control for the CS8415A SPI slave device */ 147 + static void cs8415a_cs_control(u32 command) 148 + { 149 + if (command & PXA2XX_CS_ASSERT) 150 + GPCR(2) = GPIO_bit(2); 151 + else 152 + GPSR(2) = GPIO_bit(2); 153 + } 154 + 155 + /* Chip Select control for the CS8405A SPI slave device */ 156 + static void cs8405a_cs_control(u32 command) 157 + { 158 + if (command & PXA2XX_CS_ASSERT) 159 + GPCR(3) = GPIO_bit(3); 160 + else 161 + GPSR(3) = GPIO_bit(3); 162 + } 163 + 164 + static struct pxa2xx_spi_chip cs8415a_chip_info = { 165 + .tx_threshold = 12, /* SSP hardward FIFO threshold */ 166 + .rx_threshold = 4, /* SSP hardward FIFO threshold */ 167 + .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */ 168 + .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */ 169 + .cs_control = cs8415a_cs_control, /* Use external chip select */ 170 + }; 171 + 172 + static struct pxa2xx_spi_chip cs8405a_chip_info = { 173 + .tx_threshold = 12, /* SSP hardward FIFO threshold */ 174 + .rx_threshold = 4, /* SSP hardward FIFO threshold */ 175 + .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */ 176 + .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */ 177 + .cs_control = cs8405a_cs_control, /* Use external chip select */ 178 + }; 179 + 180 + static struct spi_board_info streetracer_spi_board_info[] __initdata = { 181 + { 182 + .modalias = "cs8415a", /* Name of spi_driver for this device */ 183 + .max_speed_hz = 3686400, /* Run SSP as fast a possbile */ 184 + .bus_num = 2, /* Framework bus number */ 185 + .chip_select = 0, /* Framework chip select */ 186 + .platform_data = NULL; /* No spi_driver specific config */ 187 + .controller_data = &cs8415a_chip_info, /* Master chip config */ 188 + .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */ 189 + }, 190 + { 191 + .modalias = "cs8405a", /* Name of spi_driver for this device */ 192 + .max_speed_hz = 3686400, /* Run SSP as fast a possbile */ 193 + .bus_num = 2, /* Framework bus number */ 194 + .chip_select = 1, /* Framework chip select */ 195 + .controller_data = &cs8405a_chip_info, /* Master chip config */ 196 + .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */ 197 + }, 198 + }; 199 + 200 + static void __init streetracer_init(void) 201 + { 202 + spi_register_board_info(streetracer_spi_board_info, 203 + ARRAY_SIZE(streetracer_spi_board_info)); 204 + } 205 + 206 + 207 + DMA and PIO I/O Support 208 + ----------------------- 209 + The pxa2xx_spi driver support both DMA and interrupt driven PIO message 210 + transfers. The driver defaults to PIO mode and DMA transfers must enabled by 211 + setting the "enable_dma" flag in the "pxa2xx_spi_master" structure and and 212 + ensuring that the "pxa2xx_spi_chip.dma_burst_size" field is non-zero. The DMA 213 + mode support both coherent and stream based DMA mappings. 214 + 215 + The following logic is used to determine the type of I/O to be used on 216 + a per "spi_transfer" basis: 217 + 218 + if !enable_dma or dma_burst_size == 0 then 219 + always use PIO transfers 220 + 221 + if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then 222 + use coherent DMA mode 223 + 224 + if rx_buf and tx_buf are aligned on 8 byte boundary then 225 + use streaming DMA mode 226 + 227 + otherwise 228 + use PIO transfer 229 + 230 + THANKS TO 231 + --------- 232 + 233 + David Brownell and others for mentoring the development of this driver. 234 +
+33 -1
Documentation/spi/spi-summary
··· 414 414 The driver will initialize the fields of that spi_master, including the 415 415 bus number (maybe the same as the platform device ID) and three methods 416 416 used to interact with the SPI core and SPI protocol drivers. It will 417 - also initialize its own internal state. 417 + also initialize its own internal state. (See below about bus numbering 418 + and those methods.) 419 + 420 + After you initialize the spi_master, then use spi_register_master() to 421 + publish it to the rest of the system. At that time, device nodes for 422 + the controller and any predeclared spi devices will be made available, 423 + and the driver model core will take care of binding them to drivers. 424 + 425 + If you need to remove your SPI controller driver, spi_unregister_master() 426 + will reverse the effect of spi_register_master(). 427 + 428 + 429 + BUS NUMBERING 430 + 431 + Bus numbering is important, since that's how Linux identifies a given 432 + SPI bus (shared SCK, MOSI, MISO). Valid bus numbers start at zero. On 433 + SOC systems, the bus numbers should match the numbers defined by the chip 434 + manufacturer. For example, hardware controller SPI2 would be bus number 2, 435 + and spi_board_info for devices connected to it would use that number. 436 + 437 + If you don't have such hardware-assigned bus number, and for some reason 438 + you can't just assign them, then provide a negative bus number. That will 439 + then be replaced by a dynamically assigned number. You'd then need to treat 440 + this as a non-static configuration (see above). 441 + 442 + 443 + SPI MASTER METHODS 418 444 419 445 master->setup(struct spi_device *spi) 420 446 This sets up the device clock rate, SPI mode, and word sizes. ··· 457 431 state it dynamically associates with that device. If you do that, 458 432 be sure to provide the cleanup() method to free that state. 459 433 434 + 435 + SPI MESSAGE QUEUE 436 + 460 437 The bulk of the driver will be managing the I/O queue fed by transfer(). 461 438 462 439 That queue could be purely conceptual. For example, a driver used only ··· 469 440 often DMA (especially if the root filesystem is in SPI flash), and 470 441 execution contexts like IRQ handlers, tasklets, or workqueues (such 471 442 as keventd). Your driver can be as fancy, or as simple, as you need. 443 + Such a transfer() method would normally just add the message to a 444 + queue, and then start some asynchronous transfer engine (unless it's 445 + already running). 472 446 473 447 474 448 THANKS TO
+3
Documentation/watchdog/watchdog-api.txt
··· 36 36 some data to the device. So a very simple watchdog daemon would look 37 37 like this: 38 38 39 + #include <stdlib.h> 40 + #include <fcntl.h> 41 + 39 42 int main(int argc, const char *argv[]) { 40 43 int fd=open("/dev/watchdog",O_WRONLY); 41 44 if (fd==-1) {
+39 -3
MAINTAINERS
··· 40 40 PLEASE document known bugs. If it doesn't work for everything 41 41 or does something very odd once a month document it. 42 42 43 + PLEASE remember that submissions must be made under the terms 44 + of the OSDL certificate of contribution 45 + (http://www.osdl.org/newsroom/press_releases/2004/2004_05_24_dco.html) 46 + and should include a Signed-off-by: line. 47 + 43 48 6. Make sure you have the right to send any changes you make. If you 44 49 do changes at work you may find your employer owns the patch 45 50 not you. 46 51 47 - 7. Happy hacking. 52 + 7. When sending security related changes or reports to a maintainer 53 + please Cc: security@kernel.org, especially if the maintainer 54 + does not respond. 55 + 56 + 8. Happy hacking. 48 57 49 58 ----------------------------------- 50 59 ··· 988 979 EXT3 FILE SYSTEM 989 980 P: Stephen Tweedie, Andrew Morton 990 981 M: sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com 991 - L: ext3-users@redhat.com 982 + L: ext2-devel@lists.sourceforge.net 992 983 S: Maintained 993 984 994 985 F71805F HARDWARE MONITORING DRIVER ··· 1557 1548 T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git 1558 1549 S: Supported 1559 1550 1551 + JOURNALLING LAYER FOR BLOCK DEVICS (JBD) 1552 + P: Stephen Tweedie, Andrew Morton 1553 + M: sct@redhat.com, akpm@osdl.org 1554 + L: ext2-devel@lists.sourceforge.net 1555 + S: Maintained 1556 + 1560 1557 KCONFIG 1561 1558 P: Roman Zippel 1562 1559 M: zippel@linux-m68k.org 1563 1560 L: kbuild-devel@lists.sourceforge.net 1561 + S: Maintained 1562 + 1563 + KDUMP 1564 + P: Vivek Goyal 1565 + M: vgoyal@in.ibm.com 1566 + P: Haren Myneni 1567 + M: hbabu@us.ibm.com 1568 + L: fastboot@lists.osdl.org 1569 + L: linux-kernel@vger.kernel.org 1570 + W: http://lse.sourceforge.net/kdump/ 1564 1571 S: Maintained 1565 1572 1566 1573 KERNEL AUTOMOUNTER (AUTOFS) ··· 1646 1621 L: linux-scsi@vger.kernel.org 1647 1622 S: Maintained 1648 1623 1624 + LED SUBSYSTEM 1625 + P: Richard Purdie 1626 + M: rpurdie@rpsys.net 1627 + S: Maintained 1628 + 1649 1629 LEGO USB Tower driver 1650 1630 P: Juergen Stuber 1651 1631 M: starblue@users.sourceforge.net ··· 1710 1680 1711 1681 LINUX FOR POWERPC EMBEDDED PPC8XX 1712 1682 P: Marcelo Tosatti 1713 - M: marcelo.tosatti@cyclades.com 1683 + M: marcelo@kvack.org 1714 1684 W: http://www.penguinppc.org/ 1715 1685 L: linuxppc-embedded@ozlabs.org 1716 1686 S: Maintained ··· 2559 2529 P: Jaroslav Kysela 2560 2530 M: perex@suse.cz 2561 2531 L: alsa-devel@alsa-project.org 2532 + S: Maintained 2533 + 2534 + SPI SUBSYSTEM 2535 + P: David Brownell 2536 + M: dbrownell@users.sourceforge.net 2537 + L: spi-devel-general@lists.sourceforge.net 2562 2538 S: Maintained 2563 2539 2564 2540 TPM DEVICE DRIVER
+2 -2
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 17 4 - EXTRAVERSION =-rc4 5 - NAME=Sliding Snow Leopard 4 + EXTRAVERSION =-rc5 5 + NAME=Lordi Rules 6 6 7 7 # *DOCUMENTATION* 8 8 # To see a list of typical targets execute "make help"
+2
arch/arm/kernel/asm-offsets.c
··· 99 99 DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name)); 100 100 DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io)); 101 101 DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst)); 102 + BLANK(); 103 + DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list)); 102 104 DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); 103 105 DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags)); 104 106 return 0;
+17 -6
arch/arm/kernel/dma-isa.c
··· 143 143 .residue = isa_get_dma_residue, 144 144 }; 145 145 146 - static struct resource dma_resources[] = { 147 - { "dma1", 0x0000, 0x000f }, 148 - { "dma low page", 0x0080, 0x008f }, 149 - { "dma2", 0x00c0, 0x00df }, 150 - { "dma high page", 0x0480, 0x048f } 151 - }; 146 + static struct resource dma_resources[] = { { 147 + .name = "dma1", 148 + .start = 0x0000, 149 + .end = 0x000f 150 + }, { 151 + .name = "dma low page", 152 + .start = 0x0080, 153 + .end = 0x008f 154 + }, { 155 + .name = "dma2", 156 + .start = 0x00c0, 157 + .end = 0x00df 158 + }, { 159 + .name = "dma high page", 160 + .start = 0x0480, 161 + .end = 0x048f 162 + } }; 152 163 153 164 void __init isa_init_dma(dma_t *dma) 154 165 {
+1 -1
arch/arm/kernel/process.c
··· 311 311 struct thread_info_list *th = &get_cpu_var(thread_info_list); 312 312 if (th->nr < EXTRA_TASK_STRUCT) { 313 313 unsigned long *p = (unsigned long *)thread; 314 - p[0] = th->head; 314 + p[0] = (unsigned long)th->head; 315 315 th->head = p; 316 316 th->nr += 1; 317 317 put_cpu_var(thread_info_list);
+2 -2
arch/arm/lib/backtrace.S
··· 122 122 #define reg r5 123 123 #define stack r6 124 124 125 - .Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr} 125 + .Ldumpstm: stmfd sp!, {instr, reg, stack, r7, r8, lr} 126 126 mov stack, r0 127 127 mov instr, r1 128 128 mov reg, #9 ··· 145 145 adrne r0, .Lcr 146 146 blne printk 147 147 mov r0, stack 148 - LOADREGS(fd, sp!, {instr, reg, stack, r7, pc}) 148 + LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc}) 149 149 150 150 .Lfp: .asciz " r%d = %08X%c" 151 151 .Lcr: .asciz "\n"
+2 -2
arch/arm/lib/div64.S
··· 189 189 moveq pc, lr 190 190 191 191 @ Division by 0: 192 - str lr, [sp, #-4]! 192 + str lr, [sp, #-8]! 193 193 bl __div0 194 194 195 195 @ as wrong as it could be... 196 196 mov yl, #0 197 197 mov yh, #0 198 198 mov xh, #0 199 - ldr pc, [sp], #4 199 + ldr pc, [sp], #8 200 200
+4 -1
arch/arm/mach-pxa/mainstone.c
··· 95 95 for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { 96 96 set_irq_chip(irq, &mainstone_irq_chip); 97 97 set_irq_handler(irq, do_level_IRQ); 98 - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 98 + if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) 99 + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); 100 + else 101 + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 99 102 } 100 103 set_irq_flags(MAINSTONE_IRQ(8), 0); 101 104 set_irq_flags(MAINSTONE_IRQ(12), 0);
+4 -1
arch/arm/mach-realview/realview_eb.c
··· 137 137 static void __init gic_init_irq(void) 138 138 { 139 139 #ifdef CONFIG_REALVIEW_MPCORE 140 + unsigned int pldctrl; 140 141 writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK)); 141 - writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8); 142 + pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + 0xd8); 143 + pldctrl |= 0x00800000; /* New irq mode */ 144 + writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + 0xd8); 142 145 writel(0x00000000, __io_address(REALVIEW_SYS_LOCK)); 143 146 #endif 144 147 gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE));
+2 -4
arch/arm/mach-s3c2410/sleep.S
··· 59 59 mrc p15, 0, r5, c13, c0, 0 @ PID 60 60 mrc p15, 0, r6, c3, c0, 0 @ Domain ID 61 61 mrc p15, 0, r7, c2, c0, 0 @ translation table base address 62 - mrc p15, 0, r8, c2, c0, 0 @ auxiliary control register 63 - mrc p15, 0, r9, c1, c0, 0 @ control register 62 + mrc p15, 0, r8, c1, c0, 0 @ control register 64 63 65 64 stmia r0, { r4 - r13 } 66 65 ··· 164 165 mcr p15, 0, r5, c13, c0, 0 @ PID 165 166 mcr p15, 0, r6, c3, c0, 0 @ Domain ID 166 167 mcr p15, 0, r7, c2, c0, 0 @ translation table base 167 - mcr p15, 0, r8, c1, c1, 0 @ auxilliary control 168 168 169 169 #ifdef CONFIG_DEBUG_RESUME 170 170 mov r3, #'R' ··· 171 173 #endif 172 174 173 175 ldr r2, =resume_with_mmu 174 - mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc 176 + mcr p15, 0, r8, c1, c0, 0 @ turn on MMU, etc 175 177 nop @ second-to-last before mmu 176 178 mov pc, r2 @ go back to virtual address 177 179
+2 -2
arch/arm/mm/ioremap.c
··· 141 141 return NULL; 142 142 addr = (unsigned long)area->addr; 143 143 if (remap_area_pages(addr, pfn, size, flags)) { 144 - vfree((void *)addr); 144 + vunmap((void *)addr); 145 145 return NULL; 146 146 } 147 147 return (void __iomem *) (offset + (char *)addr); ··· 173 173 174 174 void __iounmap(void __iomem *addr) 175 175 { 176 - vfree((void *) (PAGE_MASK & (unsigned long) addr)); 176 + vunmap((void *)(PAGE_MASK & (unsigned long)addr)); 177 177 } 178 178 EXPORT_SYMBOL(__iounmap); 179 179
+3 -3
arch/i386/Kconfig
··· 758 758 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" 759 759 depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER 760 760 ---help--- 761 - Say Y here to experiment with turning CPUs off and on. CPUs 762 - can be controlled through /sys/devices/system/cpu. 761 + Say Y here to experiment with turning CPUs off and on, and to 762 + enable suspend on SMP systems. CPUs can be controlled through 763 + /sys/devices/system/cpu. 763 764 764 - Say N. 765 765 766 766 endmenu 767 767
+8
arch/i386/kernel/acpi/boot.c
··· 1066 1066 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), 1067 1067 }, 1068 1068 }, 1069 + { 1070 + .callback = disable_acpi_pci, 1071 + .ident = "HP xw9300", 1072 + .matches = { 1073 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1074 + DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"), 1075 + }, 1076 + }, 1069 1077 {} 1070 1078 }; 1071 1079
+8
arch/i386/kernel/apic.c
··· 1341 1341 1342 1342 connect_bsp_APIC(); 1343 1343 1344 + /* 1345 + * Hack: In case of kdump, after a crash, kernel might be booting 1346 + * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid 1347 + * might be zero if read from MP tables. Get it from LAPIC. 1348 + */ 1349 + #ifdef CONFIG_CRASH_DUMP 1350 + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 1351 + #endif 1344 1352 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 1345 1353 1346 1354 setup_local_APIC();
+2
arch/i386/kernel/setup.c
··· 1320 1320 probe_roms(); 1321 1321 for (i = 0; i < e820.nr_map; i++) { 1322 1322 struct resource *res; 1323 + if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) 1324 + continue; 1323 1325 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1324 1326 switch (e820.map[i].type) { 1325 1327 case E820_RAM: res->name = "System RAM"; break;
+1 -3
arch/i386/kernel/traps.c
··· 130 130 print_symbol("%s", addr); 131 131 132 132 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; 133 - 134 133 if (printed) 135 - printk(" "); 134 + printk(" "); 136 135 else 137 136 printk("\n"); 138 137 ··· 211 212 } 212 213 213 214 stack = esp; 214 - printk(log_lvl); 215 215 for(i = 0; i < kstack_depth_to_print; i++) { 216 216 if (kstack_end(stack)) 217 217 break;
+1 -1
arch/i386/mm/init.c
··· 651 651 * Specifically, in the case of x86, we will always add 652 652 * memory to the highmem for now. 653 653 */ 654 - #ifdef CONFIG_HOTPLUG_MEMORY 654 + #ifdef CONFIG_MEMORY_HOTPLUG 655 655 #ifndef CONFIG_NEED_MULTIPLE_NODES 656 656 int add_memory(u64 start, u64 size) 657 657 {
+4 -3
arch/i386/oprofile/nmi_int.c
··· 332 332 { 333 333 __u8 cpu_model = boot_cpu_data.x86_model; 334 334 335 - if (cpu_model > 0xd) 335 + if (cpu_model == 14) 336 + *cpu_type = "i386/core"; 337 + else if (cpu_model > 0xd) 336 338 return 0; 337 - 338 - if (cpu_model == 9) { 339 + else if (cpu_model == 9) { 339 340 *cpu_type = "i386/p6_mobile"; 340 341 } else if (cpu_model > 5) { 341 342 *cpu_type = "i386/piii";
+1 -1
arch/i386/power/cpu.c
··· 92 92 write_cr4(ctxt->cr4); 93 93 write_cr3(ctxt->cr3); 94 94 write_cr2(ctxt->cr2); 95 - write_cr2(ctxt->cr0); 95 + write_cr0(ctxt->cr0); 96 96 97 97 /* 98 98 * now restore the descriptor tables to their proper values
+2 -2
arch/ia64/configs/sn2_defconfig
··· 134 134 CONFIG_ARCH_SPARSEMEM_ENABLE=y 135 135 CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y 136 136 CONFIG_NUMA=y 137 - CONFIG_NODES_SHIFT=8 137 + CONFIG_NODES_SHIFT=10 138 138 CONFIG_VIRTUAL_MEM_MAP=y 139 139 CONFIG_HOLES_IN_ZONE=y 140 140 CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y ··· 1159 1159 # CONFIG_SCHEDSTATS is not set 1160 1160 # CONFIG_DEBUG_SLAB is not set 1161 1161 CONFIG_DEBUG_PREEMPT=y 1162 - CONFIG_DEBUG_MUTEXES=y 1162 + # CONFIG_DEBUG_MUTEXES is not set 1163 1163 # CONFIG_DEBUG_SPINLOCK is not set 1164 1164 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1165 1165 # CONFIG_DEBUG_KOBJECT is not set
+2 -2
arch/ia64/kernel/iosapic.c
··· 416 416 ia64_vector vec = irq_to_vector(irq); 417 417 struct iosapic_rte_info *rte; 418 418 419 - move_irq(irq); 419 + move_native_irq(irq); 420 420 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) 421 421 iosapic_eoi(rte->addr, vec); 422 422 } ··· 458 458 { 459 459 irq_desc_t *idesc = irq_descp(irq); 460 460 461 - move_irq(irq); 461 + move_native_irq(irq); 462 462 /* 463 463 * Once we have recorded IRQ_PENDING already, we can mask the 464 464 * interrupt for real. This prevents IRQ storms from unhandled
-1
arch/ia64/kernel/irq.c
··· 101 101 102 102 if (irq < NR_IRQS) { 103 103 irq_affinity[irq] = mask; 104 - set_irq_info(irq, mask); 105 104 irq_redir[irq] = (char) (redir & 0xff); 106 105 } 107 106 }
+1 -1
arch/powerpc/kernel/prom_init.c
··· 1636 1636 compat, sizeof(compat)-1); 1637 1637 if (len <= 0) 1638 1638 return PLATFORM_GENERIC; 1639 - if (strncmp(compat, RELOC("chrp"), 4)) 1639 + if (strcmp(compat, RELOC("chrp"))) 1640 1640 return PLATFORM_GENERIC; 1641 1641 1642 1642 /* Default to pSeries. We need to know if we are running LPAR */
+2
arch/powerpc/kernel/systbl.S
··· 338 338 SYSCALL(readlinkat) 339 339 SYSCALL(fchmodat) 340 340 SYSCALL(faccessat) 341 + COMPAT_SYS(get_robust_list) 342 + COMPAT_SYS(set_robust_list) 341 343 342 344 /* 343 345 * please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c
+5 -2
arch/powerpc/platforms/cell/spu_callbacks.c
··· 258 258 [__NR_futex] sys_futex, 259 259 [__NR_sched_setaffinity] sys_sched_setaffinity, 260 260 [__NR_sched_getaffinity] sys_sched_getaffinity, 261 + [224] sys_ni_syscall, 261 262 [__NR_tuxcall] sys_ni_syscall, 262 263 [226] sys_ni_syscall, 263 264 [__NR_io_setup] sys_io_setup, ··· 333 332 [__NR_readlinkat] sys_readlinkat, 334 333 [__NR_fchmodat] sys_fchmodat, 335 334 [__NR_faccessat] sys_faccessat, 335 + [__NR_get_robust_list] sys_get_robust_list, 336 + [__NR_set_robust_list] sys_set_robust_list, 336 337 }; 337 338 338 339 long spu_sys_callback(struct spu_syscall_block *s) 339 340 { 340 341 long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); 341 342 342 - syscall = spu_syscall_table[s->nr_ret]; 343 - 344 343 if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { 345 344 pr_debug("%s: invalid syscall #%ld", __FUNCTION__, s->nr_ret); 346 345 return -ENOSYS; 347 346 } 347 + 348 + syscall = spu_syscall_table[s->nr_ret]; 348 349 349 350 #ifdef DEBUG 350 351 print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall);
+1 -1
arch/powerpc/platforms/pseries/setup.c
··· 255 255 { 256 256 /* Manually leave the kernel version on the panel. */ 257 257 ppc_md.progress("Linux ppc64\n", 0); 258 - ppc_md.progress(system_utsname.version, 0); 258 + ppc_md.progress(system_utsname.release, 0); 259 259 260 260 return 0; 261 261 }
+8
arch/s390/kernel/compat_wrapper.S
··· 1650 1650 llgfr %r4,%r4 # size_t 1651 1651 llgfr %r5,%r5 # unsigned int 1652 1652 jg sys_tee 1653 + 1654 + .globl compat_sys_vmsplice_wrapper 1655 + compat_sys_vmsplice_wrapper: 1656 + lgfr %r2,%r2 # int 1657 + llgtr %r3,%r3 # compat_iovec * 1658 + llgfr %r4,%r4 # unsigned int 1659 + llgfr %r5,%r5 # unsigned int 1660 + jg compat_sys_vmsplice
+1
arch/s390/kernel/syscalls.S
··· 317 317 SYSCALL(sys_splice,sys_splice,sys_splice_wrapper) 318 318 SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper) 319 319 SYSCALL(sys_tee,sys_tee,sys_tee_wrapper) 320 + SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper)
+5 -4
arch/s390/kernel/time.c
··· 249 249 unsigned long flags; 250 250 unsigned long seq, next; 251 251 __u64 timer, todval; 252 + int cpu = smp_processor_id(); 252 253 253 254 if (sysctl_hz_timer != 0) 254 255 return; 255 256 256 - cpu_set(smp_processor_id(), nohz_cpu_mask); 257 + cpu_set(cpu, nohz_cpu_mask); 257 258 258 259 /* 259 260 * Leave the clock comparator set up for the next timer 260 261 * tick if either rcu or a softirq is pending. 261 262 */ 262 - if (rcu_pending(smp_processor_id()) || local_softirq_pending()) { 263 - cpu_clear(smp_processor_id(), nohz_cpu_mask); 263 + if (rcu_needs_cpu(cpu) || local_softirq_pending()) { 264 + cpu_clear(cpu, nohz_cpu_mask); 264 265 return; 265 266 } 266 267 ··· 272 271 next = next_timer_interrupt(); 273 272 do { 274 273 seq = read_seqbegin_irqsave(&xtime_lock, flags); 275 - timer = (__u64)(next - jiffies) + jiffies_64; 274 + timer = (__u64 next) - (__u64 jiffies) + jiffies_64; 276 275 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 277 276 todval = -1ULL; 278 277 /* Be careful about overflows. */
+5
arch/sparc/kernel/ioport.c
··· 274 274 if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0) 275 275 goto err_noiommu; 276 276 277 + /* Set the resource name, if known. */ 278 + if (sdev) { 279 + res->name = sdev->prom_name; 280 + } 281 + 277 282 return (void *)res->start; 278 283 279 284 err_noiommu:
+1
arch/sparc/kernel/module.c
··· 113 113 114 114 switch (ELF32_R_TYPE(rel[i].r_info)) { 115 115 case R_SPARC_32: 116 + case R_SPARC_UA32: 116 117 location[0] = v >> 24; 117 118 location[1] = v >> 16; 118 119 location[2] = v >> 8;
-13
arch/sparc/kernel/sparc_ksyms.c
··· 251 251 EXPORT_SYMBOL(__prom_getsibling); 252 252 253 253 /* sparc library symbols */ 254 - EXPORT_SYMBOL(memchr); 255 254 EXPORT_SYMBOL(memscan); 256 255 EXPORT_SYMBOL(strlen); 257 - EXPORT_SYMBOL(strnlen); 258 - EXPORT_SYMBOL(strcpy); 259 - EXPORT_SYMBOL(strncpy); 260 - EXPORT_SYMBOL(strcat); 261 - EXPORT_SYMBOL(strncat); 262 - EXPORT_SYMBOL(strcmp); 263 256 EXPORT_SYMBOL(strncmp); 264 - EXPORT_SYMBOL(strchr); 265 - EXPORT_SYMBOL(strrchr); 266 - EXPORT_SYMBOL(strstr); 267 257 EXPORT_SYMBOL(page_kernel); 268 258 269 259 /* Special internal versions of library functions. */ ··· 307 317 308 318 /* Sun Power Management Idle Handler */ 309 319 EXPORT_SYMBOL(pm_idle); 310 - 311 - /* Binfmt_misc needs this */ 312 - EXPORT_SYMBOL(sys_close);
+2 -1
arch/sparc/kernel/systbls.S
··· 79 79 /*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64 80 80 /*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat 81 81 /*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare 82 + /*300*/ .long sys_set_robust_list, sys_get_robust_list 82 83 83 84 #ifdef CONFIG_SUNOS_EMUL 84 85 /* Now the SunOS syscall table. */ ··· 191 190 /*290*/ .long sunos_nosys, sunos_nosys, sunos_nosys 192 191 .long sunos_nosys, sunos_nosys, sunos_nosys 193 192 .long sunos_nosys, sunos_nosys, sunos_nosys 194 - .long sunos_nosys 193 + .long sunos_nosys, sunos_nosys, sunos_nosys 195 194 196 195 #endif
+13 -6
arch/sparc64/defconfig
··· 1 1 # 2 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.16 4 - # Sun Apr 2 19:31:04 2006 3 + # Linux kernel version: 2.6.17-rc3 4 + # Fri May 12 12:43:49 2006 5 5 # 6 6 CONFIG_SPARC=y 7 7 CONFIG_SPARC64=y ··· 114 114 CONFIG_HUGETLB_PAGE_SIZE_4MB=y 115 115 # CONFIG_HUGETLB_PAGE_SIZE_512K is not set 116 116 # CONFIG_HUGETLB_PAGE_SIZE_64K is not set 117 + CONFIG_ARCH_SELECT_MEMORY_MODEL=y 117 118 CONFIG_ARCH_SPARSEMEM_ENABLE=y 118 119 CONFIG_ARCH_SPARSEMEM_DEFAULT=y 119 120 CONFIG_LARGE_ALLOCS=y ··· 431 430 # CONFIG_SCSI_INIA100 is not set 432 431 # CONFIG_SCSI_SYM53C8XX_2 is not set 433 432 # CONFIG_SCSI_IPR is not set 434 - # CONFIG_SCSI_QLOGIC_FC is not set 435 433 # CONFIG_SCSI_QLOGIC_1280 is not set 436 434 # CONFIG_SCSI_QLOGICPTI is not set 437 435 # CONFIG_SCSI_QLA_FC is not set ··· 1042 1042 # CONFIG_USB_ACECAD is not set 1043 1043 # CONFIG_USB_KBTAB is not set 1044 1044 # CONFIG_USB_POWERMATE is not set 1045 - # CONFIG_USB_MTOUCH is not set 1046 - # CONFIG_USB_ITMTOUCH is not set 1047 - # CONFIG_USB_EGALAX is not set 1045 + # CONFIG_USB_TOUCHSCREEN is not set 1048 1046 # CONFIG_USB_YEALINK is not set 1049 1047 # CONFIG_USB_XPAD is not set 1050 1048 # CONFIG_USB_ATI_REMOTE is not set ··· 1111 1113 # LED devices 1112 1114 # 1113 1115 # CONFIG_NEW_LEDS is not set 1116 + 1117 + # 1118 + # LED drivers 1119 + # 1120 + 1121 + # 1122 + # LED Triggers 1123 + # 1114 1124 1115 1125 # 1116 1126 # InfiniBand support ··· 1309 1303 # CONFIG_DEBUG_INFO is not set 1310 1304 CONFIG_DEBUG_FS=y 1311 1305 # CONFIG_DEBUG_VM is not set 1306 + # CONFIG_UNWIND_INFO is not set 1312 1307 CONFIG_FORCED_INLINING=y 1313 1308 # CONFIG_RCU_TORTURE_TEST is not set 1314 1309 # CONFIG_DEBUG_STACK_USAGE is not set
+5
arch/sparc64/kernel/module.c
··· 143 143 location[3] = v >> 0; 144 144 break; 145 145 146 + case R_SPARC_DISP32: 147 + v -= (Elf64_Addr) location; 148 + *loc32 = v; 149 + break; 150 + 146 151 case R_SPARC_WDISP30: 147 152 v -= (Elf64_Addr) location; 148 153 *loc32 = (*loc32 & ~0x3fffffff) |
+2 -2
arch/sparc64/kernel/pci_iommu.c
··· 218 218 * DMA for PCI device PDEV. Return non-NULL cpu-side address if 219 219 * successful and set *DMA_ADDRP to the PCI side dma address. 220 220 */ 221 - static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) 221 + static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) 222 222 { 223 223 struct pcidev_cookie *pcp; 224 224 struct pci_iommu *iommu; ··· 232 232 if (order >= 10) 233 233 return NULL; 234 234 235 - first_page = __get_free_pages(GFP_ATOMIC, order); 235 + first_page = __get_free_pages(gfp, order); 236 236 if (first_page == 0UL) 237 237 return NULL; 238 238 memset((char *)first_page, 0, PAGE_SIZE << order);
+2 -2
arch/sparc64/kernel/pci_sun4v.c
··· 154 154 __clear_bit(i, arena->map); 155 155 } 156 156 157 - static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) 157 + static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) 158 158 { 159 159 struct pcidev_cookie *pcp; 160 160 struct pci_iommu *iommu; ··· 169 169 170 170 npages = size >> IO_PAGE_SHIFT; 171 171 172 - first_page = __get_free_pages(GFP_ATOMIC, order); 172 + first_page = __get_free_pages(gfp, order); 173 173 if (unlikely(first_page == 0UL)) 174 174 return NULL; 175 175
+5 -3
arch/sparc64/kernel/systbls.S
··· 78 78 .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid 79 79 /*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat 80 80 .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64 81 - /*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat 81 + /*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat 82 82 .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare 83 + /*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list 83 84 84 85 #endif /* CONFIG_COMPAT */ 85 86 ··· 148 147 .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid 149 148 /*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat 150 149 .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64 151 - /*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat 150 + /*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat 152 151 .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare 152 + /*300*/ .word sys_set_robust_list, sys_get_robust_list 153 153 154 154 #if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \ 155 155 defined(CONFIG_SOLARIS_EMUL_MODULE) ··· 263 261 /*290*/ .word sunos_nosys, sunos_nosys, sunos_nosys 264 262 .word sunos_nosys, sunos_nosys, sunos_nosys 265 263 .word sunos_nosys, sunos_nosys, sunos_nosys 266 - .word sunos_nosys 264 + .word sunos_nosys, sunos_nosys, sunos_nosys 267 265 #endif
+3 -3
arch/x86_64/kernel/kprobes.c
··· 514 514 *tos = orig_rip + (*tos - copy_rip); 515 515 break; 516 516 case 0xff: 517 - if ((*insn & 0x30) == 0x10) { 517 + if ((insn[1] & 0x30) == 0x10) { 518 518 /* call absolute, indirect */ 519 519 /* Fix return addr; rip is correct. */ 520 520 next_rip = regs->rip; 521 521 *tos = orig_rip + (*tos - copy_rip); 522 - } else if (((*insn & 0x31) == 0x20) || /* jmp near, absolute indirect */ 523 - ((*insn & 0x31) == 0x21)) { /* jmp far, absolute indirect */ 522 + } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ 523 + ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ 524 524 /* rip is correct. */ 525 525 next_rip = regs->rip; 526 526 }
+4 -3
arch/x86_64/kernel/pci-nommu.c
··· 12 12 check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 13 13 { 14 14 if (hwdev && bus + size > *hwdev->dma_mask) { 15 - printk(KERN_ERR 16 - "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", 17 - name, (long long)bus, size, (long long)*hwdev->dma_mask); 15 + if (*hwdev->dma_mask >= 0xffffffffULL) 16 + printk(KERN_ERR 17 + "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", 18 + name, (long long)bus, size, (long long)*hwdev->dma_mask); 18 19 return 0; 19 20 } 20 21 return 1;
+17 -4
arch/x86_64/kernel/traps.c
··· 102 102 { 103 103 if (regs->eflags & X86_EFLAGS_IF) 104 104 local_irq_disable(); 105 + /* Make sure to not schedule here because we could be running 106 + on an exception stack. */ 105 107 preempt_enable_no_resched(); 106 108 } 107 109 ··· 485 483 { 486 484 struct task_struct *tsk = current; 487 485 488 - conditional_sti(regs); 489 - 490 486 tsk->thread.error_code = error_code; 491 487 tsk->thread.trap_no = trapnr; 492 488 ··· 521 521 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 522 522 == NOTIFY_STOP) \ 523 523 return; \ 524 + conditional_sti(regs); \ 524 525 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 525 526 } 526 527 ··· 536 535 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 537 536 == NOTIFY_STOP) \ 538 537 return; \ 538 + conditional_sti(regs); \ 539 539 do_trap(trapnr, signr, str, regs, error_code, &info); \ 540 540 } 541 541 ··· 550 548 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 551 549 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 552 550 DO_ERROR(18, SIGSEGV, "reserved", reserved) 553 - DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 551 + 552 + /* Runs on IST stack */ 553 + asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) 554 + { 555 + if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 556 + 12, SIGBUS) == NOTIFY_STOP) 557 + return; 558 + preempt_conditional_sti(regs); 559 + do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 560 + preempt_conditional_cli(regs); 561 + } 554 562 555 563 asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) 556 564 { ··· 694 682 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { 695 683 return; 696 684 } 685 + preempt_conditional_sti(regs); 697 686 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 698 - return; 687 + preempt_conditional_cli(regs); 699 688 } 700 689 701 690 /* Help handler running on IST stack to switch back to user stack
+11 -4
arch/x86_64/mm/srat.c
··· 34 34 static struct bootnode nodes[MAX_NUMNODES] __initdata; 35 35 static struct bootnode nodes_add[MAX_NUMNODES] __initdata; 36 36 static int found_add_area __initdata; 37 - int hotadd_percent __initdata = 10; 37 + int hotadd_percent __initdata = 0; 38 + #ifndef RESERVE_HOTADD 39 + #define hotadd_percent 0 /* Ignore all settings */ 40 + #endif 38 41 static u8 pxm2node[256] = { [0 ... 255] = 0xff }; 39 42 40 43 /* Too small nodes confuse the VM badly. Usually they result ··· 106 103 int i; 107 104 printk(KERN_ERR "SRAT: SRAT not used.\n"); 108 105 acpi_numa = -1; 106 + found_add_area = 0; 109 107 for (i = 0; i < MAX_LOCAL_APIC; i++) 110 108 apicid_to_node[i] = NUMA_NO_NODE; 111 109 for (i = 0; i < MAX_NUMNODES; i++) ··· 158 154 int pxm, node; 159 155 if (srat_disabled()) 160 156 return; 161 - if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat(); 157 + if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { 158 + bad_srat(); 162 159 return; 163 160 } 164 161 if (pa->flags.enabled == 0) ··· 196 191 allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; 197 192 allowed = (allowed / 100) * hotadd_percent; 198 193 if (allocated + mem > allowed) { 194 + unsigned long range; 199 195 /* Give them at least part of their hotadd memory upto hotadd_percent 200 196 It would be better to spread the limit out 201 197 over multiple hotplug areas, but that is too complicated 202 198 right now */ 203 199 if (allocated >= allowed) 204 200 return 0; 205 - pages = (allowed - allocated + mem) / sizeof(struct page); 201 + range = allowed - allocated; 202 + pages = (range / PAGE_SIZE); 206 203 mem = pages * sizeof(struct page); 207 - nd->end = nd->start + pages*PAGE_SIZE; 204 + nd->end = nd->start + range; 208 205 } 209 206 /* Not completely fool proof, but a good sanity check */ 210 207 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
-2
block/genhd.c
··· 182 182 */ 183 183 void add_disk(struct gendisk *disk) 184 184 { 185 - get_device(disk->driverfs_dev); 186 185 disk->flags |= GENHD_FL_UP; 187 186 blk_register_region(MKDEV(disk->major, disk->first_minor), 188 187 disk->minors, NULL, exact_match, exact_lock, disk); ··· 427 428 static void disk_release(struct kobject * kobj) 428 429 { 429 430 struct gendisk *disk = to_disk(kobj); 430 - put_device(disk->driverfs_dev); 431 431 kfree(disk->random); 432 432 kfree(disk->part); 433 433 free_disk_stats(disk);
+6 -1
block/ll_rw_blk.c
··· 3452 3452 if (unlikely(laptop_mode) && blk_fs_request(req)) 3453 3453 laptop_io_completion(); 3454 3454 3455 - if (disk && blk_fs_request(req)) { 3455 + /* 3456 + * Account IO completion. bar_rq isn't accounted as a normal 3457 + * IO on queueing nor completion. Accounting the containing 3458 + * request is enough. 3459 + */ 3460 + if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { 3456 3461 unsigned long duration = jiffies - req->start_time; 3457 3462 const int rw = rq_data_dir(req); 3458 3463
+8 -31
drivers/base/firmware_class.c
··· 86 86 static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store); 87 87 88 88 static void fw_class_dev_release(struct class_device *class_dev); 89 - int firmware_class_uevent(struct class_device *dev, char **envp, 90 - int num_envp, char *buffer, int buffer_size); 91 89 92 - static struct class firmware_class = { 93 - .name = "firmware", 94 - .uevent = firmware_class_uevent, 95 - .release = fw_class_dev_release, 96 - }; 97 - 98 - int 99 - firmware_class_uevent(struct class_device *class_dev, char **envp, 100 - int num_envp, char *buffer, int buffer_size) 90 + static int firmware_class_uevent(struct class_device *class_dev, char **envp, 91 + int num_envp, char *buffer, int buffer_size) 101 92 { 102 93 struct firmware_priv *fw_priv = class_get_devdata(class_dev); 103 94 int i = 0, len = 0; ··· 106 115 107 116 return 0; 108 117 } 118 + 119 + static struct class firmware_class = { 120 + .name = "firmware", 121 + .uevent = firmware_class_uevent, 122 + .release = fw_class_dev_release, 123 + }; 109 124 110 125 static ssize_t 111 126 firmware_loading_show(struct class_device *class_dev, char *buf) ··· 490 493 } 491 494 } 492 495 493 - /** 494 - * register_firmware: - provide a firmware image for later usage 495 - * @name: name of firmware image file 496 - * @data: buffer pointer for the firmware image 497 - * @size: size of the data buffer area 498 - * 499 - * Make sure that @data will be available by requesting firmware @name. 500 - * 501 - * Note: This will not be possible until some kind of persistence 502 - * is available. 503 - **/ 504 - void 505 - register_firmware(const char *name, const u8 *data, size_t size) 506 - { 507 - /* This is meaningless without firmware caching, so until we 508 - * decide if firmware caching is reasonable just leave it as a 509 - * noop */ 510 - } 511 - 512 496 /* Async support */ 513 497 struct firmware_work { 514 498 struct work_struct work; ··· 608 630 EXPORT_SYMBOL(release_firmware); 609 631 EXPORT_SYMBOL(request_firmware); 610 632 EXPORT_SYMBOL(request_firmware_nowait); 611 - EXPORT_SYMBOL(register_firmware);
+10 -8
drivers/block/ub.c
··· 536 536 kfree(lun); 537 537 } 538 538 539 + usb_set_intfdata(sc->intf, NULL); 540 + usb_put_intf(sc->intf); 541 + usb_put_dev(sc->dev); 539 542 kfree(sc); 540 543 } 541 544 ··· 2224 2221 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 2225 2222 usb_set_intfdata(intf, sc); 2226 2223 usb_get_dev(sc->dev); 2227 - // usb_get_intf(sc->intf); /* Do we need this? */ 2224 + /* 2225 + * Since we give the interface struct to the block level through 2226 + * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent 2227 + * oopses on close after a disconnect (kernels 2.6.16 and up). 2228 + */ 2229 + usb_get_intf(sc->intf); 2228 2230 2229 2231 snprintf(sc->name, 12, DRV_NAME "(%d.%d)", 2230 2232 sc->dev->bus->busnum, sc->dev->devnum); ··· 2294 2286 2295 2287 err_dev_desc: 2296 2288 usb_set_intfdata(intf, NULL); 2297 - // usb_put_intf(sc->intf); 2289 + usb_put_intf(sc->intf); 2298 2290 usb_put_dev(sc->dev); 2299 2291 kfree(sc); 2300 2292 err_core: ··· 2468 2460 * At this point there must be no commands coming from anyone 2469 2461 * and no URBs left in transit. 2470 2462 */ 2471 - 2472 - usb_set_intfdata(intf, NULL); 2473 - // usb_put_intf(sc->intf); 2474 - sc->intf = NULL; 2475 - usb_put_dev(sc->dev); 2476 - sc->dev = NULL; 2477 2463 2478 2464 ub_put(sc); 2479 2465 }
+1 -1
drivers/char/Kconfig
··· 291 291 292 292 config RIO 293 293 tristate "Specialix RIO system support" 294 - depends on SERIAL_NONSTANDARD && !64BIT 294 + depends on SERIAL_NONSTANDARD 295 295 help 296 296 This is a driver for the Specialix RIO, a smart serial card which 297 297 drives an outboard box that can support up to 128 ports. Product
-9
drivers/char/rio/host.h
··· 33 33 #ifndef __rio_host_h__ 34 34 #define __rio_host_h__ 35 35 36 - #ifdef SCCS_LABELS 37 - #ifndef lint 38 - static char *_host_h_sccs_ = "@(#)host.h 1.2"; 39 - #endif 40 - #endif 41 - 42 36 /* 43 37 ** the host structure - one per host card in the system. 44 38 */ ··· 71 77 #define RC_STARTUP 1 72 78 #define RC_RUNNING 2 73 79 #define RC_STUFFED 3 74 - #define RC_SOMETHING 4 75 - #define RC_SOMETHING_NEW 5 76 - #define RC_SOMETHING_ELSE 6 77 80 #define RC_READY 7 78 81 #define RUN_STATE 7 79 82 /*
+1
drivers/char/rio/rioboot.c
··· 34 34 #include <linux/slab.h> 35 35 #include <linux/termios.h> 36 36 #include <linux/serial.h> 37 + #include <linux/vmalloc.h> 37 38 #include <asm/semaphore.h> 38 39 #include <linux/generic_serial.h> 39 40 #include <linux/errno.h>
+28 -15
drivers/char/rio/rioctrl.c
··· 1394 1394 return RIO_FAIL; 1395 1395 } 1396 1396 1397 - if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) { 1398 - rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum); 1397 + if ((PortP->InUse == (typeof(PortP->InUse))-1) || 1398 + !(CmdBlkP = RIOGetCmdBlk())) { 1399 + rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block " 1400 + "for command %d on port %d\n", Cmd, PortP->PortNum); 1399 1401 return RIO_FAIL; 1400 1402 } 1401 1403 1402 - rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", CmdBlkP, PortP->InUse); 1404 + rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", 1405 + CmdBlkP, PortP->InUse); 1403 1406 1404 - PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0]; 1407 + PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0]; 1405 1408 1406 1409 CmdBlkP->Packet.src_unit = 0; 1407 1410 if (PortP->SecondBlock) ··· 1428 1425 1429 1426 switch (Cmd) { 1430 1427 case MEMDUMP: 1431 - rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p (addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); 1428 + rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p " 1429 + "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); 1432 1430 PktCmdP->SubCommand = MEMDUMP; 1433 1431 PktCmdP->SubAddr = SubCmd.Addr; 1434 1432 break; 1435 1433 case FCLOSE: 1436 - rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", CmdBlkP); 1434 + rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", 1435 + CmdBlkP); 1437 1436 break; 1438 1437 case READ_REGISTER: 1439 - rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk %p\n", (int) SubCmd.Addr, CmdBlkP); 1438 + rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) " 1439 + "command blk %p\n", (int) SubCmd.Addr, CmdBlkP); 1440 1440 PktCmdP->SubCommand = READ_REGISTER; 1441 1441 PktCmdP->SubAddr = SubCmd.Addr; 1442 1442 break; 1443 1443 case RESUME: 1444 - rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", CmdBlkP); 1444 + rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", 1445 + CmdBlkP); 1445 1446 break; 1446 1447 case RFLUSH: 1447 - rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", CmdBlkP); 1448 + rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", 1449 + CmdBlkP); 1448 1450 CmdBlkP->PostFuncP = RIORFlushEnable; 1449 1451 break; 1450 1452 case SUSPEND: 1451 - rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", CmdBlkP); 1453 + rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", 1454 + CmdBlkP); 1452 1455 break; 1453 1456 1454 1457 case MGET: 1455 - rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", CmdBlkP); 1458 + rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", 1459 + CmdBlkP); 1456 1460 break; 1457 1461 1458 1462 case MSET: 1459 1463 case MBIC: 1460 1464 case MBIS: 1461 1465 CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; 1462 - rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk %p\n", CmdBlkP); 1466 + rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command " 1467 + "blk %p\n", CmdBlkP); 1463 1468 break; 1464 1469 1465 1470 case WFLUSH: ··· 1476 1465 ** allowed then we should not bother sending any more to the 1477 1466 ** RTA. 1478 1467 */ 1479 - if ((int) ((char) PortP->WflushFlag) == (int) -1) { 1480 - rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!"); 1468 + if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) { 1469 + rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, " 1470 + "WflushFlag about to wrap!"); 1481 1471 RIOFreeCmdBlk(CmdBlkP); 1482 1472 return (RIO_FAIL); 1483 1473 } else { 1484 - rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk %p\n", CmdBlkP); 1474 + rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command " 1475 + "blk %p\n", CmdBlkP); 1485 1476 CmdBlkP->PostFuncP = RIOWFlushMark; 1486 1477 } 1487 1478 break;
+5 -51
drivers/char/rio/rioioctl.h
··· 33 33 #ifndef __rioioctl_h__ 34 34 #define __rioioctl_h__ 35 35 36 - #ifdef SCCS_LABELS 37 - static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2"; 38 - #endif 39 - 40 36 /* 41 37 ** RIO device driver - user ioctls and associated structures. 42 38 */ ··· 40 44 struct portStats { 41 45 int port; 42 46 int gather; 43 - ulong txchars; 44 - ulong rxchars; 45 - ulong opens; 46 - ulong closes; 47 - ulong ioctls; 47 + unsigned long txchars; 48 + unsigned long rxchars; 49 + unsigned long opens; 50 + unsigned long closes; 51 + unsigned long ioctls; 48 52 }; 49 53 50 - 51 - #define rIOC ('r'<<8) 52 - #define TCRIOSTATE (rIOC | 1) 53 - #define TCRIOXPON (rIOC | 2) 54 - #define TCRIOXPOFF (rIOC | 3) 55 - #define TCRIOXPCPS (rIOC | 4) 56 - #define TCRIOXPRINT (rIOC | 5) 57 - #define TCRIOIXANYON (rIOC | 6) 58 - #define TCRIOIXANYOFF (rIOC | 7) 59 - #define TCRIOIXONON (rIOC | 8) 60 - #define TCRIOIXONOFF (rIOC | 9) 61 - #define TCRIOMBIS (rIOC | 10) 62 - #define TCRIOMBIC (rIOC | 11) 63 - #define TCRIOTRIAD (rIOC | 12) 64 - #define TCRIOTSTATE (rIOC | 13) 65 - 66 - /* 67 - ** 15.10.1998 ARG - ESIL 0761 part fix 68 - ** Add RIO ioctls for manipulating RTS and CTS flow control, (as LynxOS 69 - ** appears to not support hardware flow control). 70 - */ 71 - #define TCRIOCTSFLOWEN (rIOC | 14) /* enable CTS flow control */ 72 - #define TCRIOCTSFLOWDIS (rIOC | 15) /* disable CTS flow control */ 73 - #define TCRIORTSFLOWEN (rIOC | 16) /* enable RTS flow control */ 74 - #define TCRIORTSFLOWDIS (rIOC | 17) /* disable RTS flow control */ 75 - 76 - /* 77 - ** 09.12.1998 ARG - ESIL 0776 part fix 78 - ** Definition for 'RIOC' also appears in daemon.h, so we'd better do a 79 - ** #ifndef here first. 80 - ** 'RIO_QUICK_CHECK' also #define'd here as this ioctl is now 81 - ** allowed to be used by customers. 82 - ** 83 - ** 05.02.1999 ARG - 84 - ** This is what I've decied to do with ioctls etc., which are intended to be 85 - ** invoked from users applications : 86 - ** Anything that needs to be defined here will be removed from daemon.h, that 87 - ** way it won't end up having to be defined/maintained in two places. The only 88 - ** consequence of this is that this file should now be #include'd by daemon.h 89 - ** 90 - ** 'stats' ioctls now #define'd here as they are to be used by customers. 91 - */ 92 54 #define RIOC ('R'<<8)|('i'<<16)|('o'<<24) 93 55 94 56 #define RIO_QUICK_CHECK (RIOC | 105)
+1 -1
drivers/char/tpm/Kconfig
··· 22 22 23 23 config TCG_TIS 24 24 tristate "TPM Interface Specification 1.2 Interface" 25 - depends on TCG_TPM 25 + depends on TCG_TPM && PNPACPI 26 26 ---help--- 27 27 If you have a TPM security chip that is compliant with the 28 28 TCG TIS 1.2 TPM specification say Yes and it will be accessible
+1 -1
drivers/char/tpm/tpm.h
··· 140 140 extern struct dentry ** tpm_bios_log_setup(char *); 141 141 extern void tpm_bios_log_teardown(struct dentry **); 142 142 #else 143 - static inline struct dentry* tpm_bios_log_setup(char *name) 143 + static inline struct dentry ** tpm_bios_log_setup(char *name) 144 144 { 145 145 return NULL; 146 146 }
+1 -1
drivers/char/tpm/tpm_tis.c
··· 55 55 }; 56 56 57 57 enum tis_defaults { 58 - TIS_MEM_BASE = 0xFED4000, 58 + TIS_MEM_BASE = 0xFED40000, 59 59 TIS_MEM_LEN = 0x5000, 60 60 TIS_SHORT_TIMEOUT = 750, /* ms */ 61 61 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+1 -1
drivers/char/tty_io.c
··· 398 398 while (unlikely(size > copied)); 399 399 return copied; 400 400 } 401 - EXPORT_SYMBOL_GPL(tty_insert_flip_string_flags); 401 + EXPORT_SYMBOL(tty_insert_flip_string_flags); 402 402 403 403 void tty_schedule_flip(struct tty_struct *tty) 404 404 {
+5 -11
drivers/char/watchdog/i8xx_tco.c
··· 33 33 * 82801E (C-ICH) : document number 273599-001, 273645-002, 34 34 * 82801EB (ICH5) : document number 252516-001, 252517-003, 35 35 * 82801ER (ICH5R) : document number 252516-001, 252517-003, 36 - * 82801FB (ICH6) : document number 301473-002, 301474-007, 37 - * 82801FR (ICH6R) : document number 301473-002, 301474-007, 38 - * 82801FBM (ICH6-M) : document number 301473-002, 301474-007, 39 - * 82801FW (ICH6W) : document number 301473-001, 301474-007, 40 - * 82801FRW (ICH6RW) : document number 301473-001, 301474-007 41 36 * 42 37 * 20000710 Nils Faerber 43 38 * Initial Version 0.01 ··· 61 66 * 20050807 Wim Van Sebroeck <wim@iguana.be> 62 67 * 0.08 Make sure that the watchdog is only "armed" when started. 63 68 * (Kernel Bug 4251) 69 + * 20060416 Wim Van Sebroeck <wim@iguana.be> 70 + * 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and 71 + * ICH7 chipsets. (See Kernel Bug 6031 - other code will support these 72 + * chipsets) 64 73 */ 65 74 66 75 /* ··· 89 90 #include "i8xx_tco.h" 90 91 91 92 /* Module and version information */ 92 - #define TCO_VERSION "0.08" 93 + #define TCO_VERSION "0.09" 93 94 #define TCO_MODULE_NAME "i8xx TCO timer" 94 95 #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION 95 96 #define PFX TCO_MODULE_NAME ": " ··· 390 391 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, }, 391 392 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, }, 392 393 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, }, 393 - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, }, 394 - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, }, 395 - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, }, 396 - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, }, 397 - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, }, 398 394 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, }, 399 395 { 0, }, /* End of list */ 400 396 };
+6
drivers/char/watchdog/s3c2410_wdt.c
··· 423 423 if (tmr_atboot && started == 0) { 424 424 printk(KERN_INFO PFX "Starting Watchdog Timer\n"); 425 425 s3c2410wdt_start(); 426 + } else if (!tmr_atboot) { 427 + /* if we're not enabling the watchdog, then ensure it is 428 + * disabled if it has been left running from the bootloader 429 + * or other source */ 430 + 431 + s3c2410wdt_stop(); 426 432 } 427 433 428 434 return 0;
+1 -1
drivers/char/watchdog/sc1200wdt.c
··· 377 377 { 378 378 int ret; 379 379 380 - printk(banner); 380 + printk("%s\n", banner); 381 381 382 382 spin_lock_init(&sc1200wdt_lock); 383 383 sema_init(&open_sem, 1);
+11 -5
drivers/i2c/busses/scx200_acb.c
··· 133 133 134 134 outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); 135 135 outb(ACBST_STASTR | ACBST_NEGACK, ACBST); 136 + 137 + /* Reset the status register */ 138 + outb(0, ACBST); 136 139 return; 137 140 } 138 141 ··· 231 228 timeout = jiffies + POLL_TIMEOUT; 232 229 while (time_before(jiffies, timeout)) { 233 230 status = inb(ACBST); 231 + 232 + /* Reset the status register to avoid the hang */ 233 + outb(0, ACBST); 234 + 234 235 if ((status & (ACBST_SDAST|ACBST_BER|ACBST_NEGACK)) != 0) { 235 236 scx200_acb_machine(iface, status); 236 237 return; ··· 422 415 struct scx200_acb_iface *iface; 423 416 struct i2c_adapter *adapter; 424 417 int rc; 425 - char description[64]; 426 418 427 419 iface = kzalloc(sizeof(*iface), GFP_KERNEL); 428 420 if (!iface) { ··· 440 434 441 435 mutex_init(&iface->mutex); 442 436 443 - snprintf(description, sizeof(description), "%s ACCESS.bus [%s]", 444 - text, adapter->name); 445 - 446 - if (request_region(base, 8, description) == 0) { 437 + if (!request_region(base, 8, adapter->name)) { 447 438 printk(KERN_ERR NAME ": can't allocate io 0x%x-0x%x\n", 448 439 base, base + 8-1); 449 440 rc = -EBUSY; ··· 527 524 } else if (pci_dev_present(divil_pci)) 528 525 rc = scx200_add_cs553x(); 529 526 527 + /* If at least one bus was created, init must succeed */ 528 + if (scx200_acb_list) 529 + return 0; 530 530 return rc; 531 531 } 532 532
+1
drivers/ide/legacy/ide-cs.c
··· 392 392 PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), 393 393 PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), 394 394 PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), 395 + PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), 395 396 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), 396 397 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), 397 398 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
+2
drivers/ide/ppc/pmac.c
··· 553 553 554 554 if (irq != NULL) 555 555 *irq = pmac_ide[ix].irq; 556 + 557 + hw->dev = &pmac_ide[ix].mdev->ofdev.dev; 556 558 } 557 559 558 560 #define PMAC_IDE_REG(x) ((void __iomem *)(IDE_DATA_REG+(x)))
+1 -1
drivers/ieee1394/ohci1394.c
··· 553 553 * register content. 554 554 * To actually enable physical responses is the job of our interrupt 555 555 * handler which programs the physical request filter. */ 556 - reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); 556 + reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000); 557 557 558 558 DBGMSG("physUpperBoundOffset=%08x", 559 559 reg_read(ohci, OHCI1394_PhyUpperBound));
+145 -62
drivers/ieee1394/sbp2.c
··· 42 42 #include <linux/kernel.h> 43 43 #include <linux/list.h> 44 44 #include <linux/string.h> 45 + #include <linux/stringify.h> 45 46 #include <linux/slab.h> 46 47 #include <linux/interrupt.h> 47 48 #include <linux/fs.h> ··· 118 117 */ 119 118 static int max_sectors = SBP2_MAX_SECTORS; 120 119 module_param(max_sectors, int, 0444); 121 - MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)"); 120 + MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = " 121 + __stringify(SBP2_MAX_SECTORS) ")"); 122 122 123 123 /* 124 124 * Exclusive login to sbp2 device? In most cases, the sbp2 driver should ··· 137 135 MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"); 138 136 139 137 /* 140 - * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on 141 - * if your sbp2 device is not properly handling the SCSI inquiry command. 142 - * This hack makes the inquiry look more like a typical MS Windows inquiry 143 - * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8. 138 + * If any of the following workarounds is required for your device to work, 139 + * please submit the kernel messages logged by sbp2 to the linux1394-devel 140 + * mailing list. 144 141 * 145 - * If force_inquiry_hack=1 is required for your device to work, 146 - * please submit the logged sbp2_firmware_revision value of this device to 147 - * the linux1394-devel mailing list. 142 + * - 128kB max transfer 143 + * Limit transfer size. Necessary for some old bridges. 144 + * 145 + * - 36 byte inquiry 146 + * When scsi_mod probes the device, let the inquiry command look like that 147 + * from MS Windows. 148 + * 149 + * - skip mode page 8 150 + * Suppress sending of mode_sense for mode page 8 if the device pretends to 151 + * support the SCSI Primary Block commands instead of Reduced Block Commands. 152 + * 153 + * - fix capacity 154 + * Tell sd_mod to correct the last sector number reported by read_capacity. 155 + * Avoids access beyond actual disk limits on devices with an off-by-one bug. 156 + * Don't use this with devices which don't have this bug. 157 + * 158 + * - override internal blacklist 159 + * Instead of adding to the built-in blacklist, use only the workarounds 160 + * specified in the module load parameter. 161 + * Useful if a blacklist entry interfered with a non-broken device. 148 162 */ 163 + static int sbp2_default_workarounds; 164 + module_param_named(workarounds, sbp2_default_workarounds, int, 0644); 165 + MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" 166 + ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS) 167 + ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) 168 + ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) 169 + ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) 170 + ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) 171 + ", or a combination)"); 172 + 173 + /* legacy parameter */ 149 174 static int force_inquiry_hack; 150 175 module_param(force_inquiry_hack, int, 0644); 151 - MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); 176 + MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'"); 152 177 153 178 /* 154 179 * Export information about protocols/devices supported by this driver. ··· 295 266 }; 296 267 297 268 /* 298 - * List of device firmwares that require the inquiry hack. 299 - * Yields a few false positives but did not break other devices so far. 269 + * List of devices with known bugs. 270 + * 271 + * The firmware_revision field, masked with 0xffff00, is the best indicator 272 + * for the type of bridge chip of a device. It yields a few false positives 273 + * but this did not break correctly behaving devices so far. 300 274 */ 301 - static u32 sbp2_broken_inquiry_list[] = { 302 - 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */ 303 - /* DViCO Momobay CX-1 */ 304 - 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */ 305 - /* QPS Fire DVDBurner */ 275 + static const struct { 276 + u32 firmware_revision; 277 + u32 model_id; 278 + unsigned workarounds; 279 + } sbp2_workarounds_table[] = { 280 + /* TSB42AA9 */ { 281 + .firmware_revision = 0x002800, 282 + .workarounds = SBP2_WORKAROUND_INQUIRY_36 | 283 + SBP2_WORKAROUND_MODE_SENSE_8, 284 + }, 285 + /* Initio bridges, actually only needed for some older ones */ { 286 + .firmware_revision = 0x000200, 287 + .workarounds = SBP2_WORKAROUND_INQUIRY_36, 288 + }, 289 + /* Symbios bridge */ { 290 + .firmware_revision = 0xa0b800, 291 + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 292 + }, 293 + /* 294 + * Note about the following Apple iPod blacklist entries: 295 + * 296 + * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our 297 + * matching logic treats 0 as a wildcard, we cannot match this ID 298 + * without rewriting the matching routine. Fortunately these iPods 299 + * do not feature the read_capacity bug according to one report. 300 + * Read_capacity behaviour as well as model_id could change due to 301 + * Apple-supplied firmware updates though. 302 + */ 303 + /* iPod 4th generation */ { 304 + .firmware_revision = 0x0a2700, 305 + .model_id = 0x000021, 306 + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 307 + }, 308 + /* iPod mini */ { 309 + .firmware_revision = 0x0a2700, 310 + .model_id = 0x000023, 311 + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 312 + }, 313 + /* iPod Photo */ { 314 + .firmware_revision = 0x0a2700, 315 + .model_id = 0x00007e, 316 + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 317 + } 306 318 }; 307 319 308 320 /************************************** ··· 835 765 836 766 /* Register the status FIFO address range. We could use the same FIFO 837 767 * for targets at different nodes. However we need different FIFOs per 838 - * target in order to support multi-unit devices. */ 768 + * target in order to support multi-unit devices. 769 + * The FIFO is located out of the local host controller's physical range 770 + * but, if possible, within the posted write area. Status writes will 771 + * then be performed as unified transactions. This slightly reduces 772 + * bandwidth usage, and some Prolific based devices seem to require it. 773 + */ 839 774 scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( 840 775 &sbp2_highlevel, ud->ne->host, &sbp2_ops, 841 776 sizeof(struct sbp2_status_block), sizeof(quadlet_t), 842 - ~0ULL, ~0ULL); 777 + 0x010000000000ULL, CSR1212_ALL_SPACE_END); 843 778 if (!scsi_id->status_fifo_addr) { 844 779 SBP2_ERR("failed to allocate status FIFO address range"); 845 780 goto failed_alloc; ··· 1525 1450 struct csr1212_dentry *dentry; 1526 1451 u64 management_agent_addr; 1527 1452 u32 command_set_spec_id, command_set, unit_characteristics, 1528 - firmware_revision, workarounds; 1453 + firmware_revision; 1454 + unsigned workarounds; 1529 1455 int i; 1530 1456 1531 1457 SBP2_DEBUG_ENTER(); ··· 1582 1506 case SBP2_FIRMWARE_REVISION_KEY: 1583 1507 /* Firmware revision */ 1584 1508 firmware_revision = kv->value.immediate; 1585 - if (force_inquiry_hack) 1586 - SBP2_INFO("sbp2_firmware_revision = %x", 1587 - (unsigned int)firmware_revision); 1588 - else 1589 - SBP2_DEBUG("sbp2_firmware_revision = %x", 1590 - (unsigned int)firmware_revision); 1509 + SBP2_DEBUG("sbp2_firmware_revision = %x", 1510 + (unsigned int)firmware_revision); 1591 1511 break; 1592 1512 1593 1513 default: ··· 1591 1519 } 1592 1520 } 1593 1521 1594 - /* This is the start of our broken device checking. We try to hack 1595 - * around oddities and known defects. */ 1596 - workarounds = 0x0; 1597 - 1598 - /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a 1599 - * bridge with 128KB max transfer size limitation. For sanity, we 1600 - * only voice this when the current max_sectors setting 1601 - * exceeds the 128k limit. By default, that is not the case. 1602 - * 1603 - * It would be really nice if we could detect this before the scsi 1604 - * host gets initialized. That way we can down-force the 1605 - * max_sectors to account for it. That is not currently 1606 - * possible. */ 1607 - if ((firmware_revision & 0xffff00) == 1608 - SBP2_128KB_BROKEN_FIRMWARE && 1609 - (max_sectors * 512) > (128*1024)) { 1610 - SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.", 1611 - NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid)); 1612 - SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!", 1613 - max_sectors); 1614 - workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER; 1522 + workarounds = sbp2_default_workarounds; 1523 + if (force_inquiry_hack) { 1524 + SBP2_WARN("force_inquiry_hack is deprecated. " 1525 + "Use parameter 'workarounds' instead."); 1526 + workarounds |= SBP2_WORKAROUND_INQUIRY_36; 1615 1527 } 1616 1528 1617 - /* Check for a blacklisted set of devices that require us to force 1618 - * a 36 byte host inquiry. This can be overriden as a module param 1619 - * (to force all hosts). */ 1620 - for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) { 1621 - if ((firmware_revision & 0xffff00) == 1622 - sbp2_broken_inquiry_list[i]) { 1623 - SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround", 1624 - NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid)); 1625 - workarounds |= SBP2_BREAKAGE_INQUIRY_HACK; 1626 - break; /* No need to continue. */ 1529 + if (!(workarounds & SBP2_WORKAROUND_OVERRIDE)) 1530 + for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { 1531 + if (sbp2_workarounds_table[i].firmware_revision && 1532 + sbp2_workarounds_table[i].firmware_revision != 1533 + (firmware_revision & 0xffff00)) 1534 + continue; 1535 + if (sbp2_workarounds_table[i].model_id && 1536 + sbp2_workarounds_table[i].model_id != ud->model_id) 1537 + continue; 1538 + workarounds |= sbp2_workarounds_table[i].workarounds; 1539 + break; 1627 1540 } 1628 - } 1541 + 1542 + if (workarounds) 1543 + SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x " 1544 + "(firmware_revision 0x%06x, vendor_id 0x%06x," 1545 + " model_id 0x%06x)", 1546 + NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), 1547 + workarounds, firmware_revision, 1548 + ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, 1549 + ud->model_id); 1550 + 1551 + /* We would need one SCSI host template for each target to adjust 1552 + * max_sectors on the fly, therefore warn only. */ 1553 + if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && 1554 + (max_sectors * 512) > (128 * 1024)) 1555 + SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB " 1556 + "max transfer size. WARNING: Current max_sectors " 1557 + "setting is larger than 128KB (%d sectors)", 1558 + NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), 1559 + max_sectors); 1629 1560 1630 1561 /* If this is a logical unit directory entry, process the parent 1631 1562 * to get the values. */ ··· 2522 2447 2523 2448 scsi_id->sdev = sdev; 2524 2449 2525 - if (force_inquiry_hack || 2526 - scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) { 2450 + if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) 2527 2451 sdev->inquiry_len = 36; 2528 - sdev->skip_ms_page_8 = 1; 2529 - } 2530 2452 return 0; 2531 2453 } 2532 2454 2533 2455 static int sbp2scsi_slave_configure(struct scsi_device *sdev) 2534 2456 { 2457 + struct scsi_id_instance_data *scsi_id = 2458 + (struct scsi_id_instance_data *)sdev->host->hostdata[0]; 2459 + 2535 2460 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2536 2461 sdev->use_10_for_rw = 1; 2537 2462 sdev->use_10_for_ms = 1; 2463 + 2464 + if (sdev->type == TYPE_DISK && 2465 + scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) 2466 + sdev->skip_ms_page_8 = 1; 2467 + if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) 2468 + sdev->fix_capacity = 1; 2538 2469 return 0; 2539 2470 } 2540 2471 ··· 2684 2603 scsi_driver_template.cmd_per_lun = 1; 2685 2604 } 2686 2605 2687 - /* Set max sectors (module load option). Default is 255 sectors. */ 2606 + if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && 2607 + (max_sectors * 512) > (128 * 1024)) 2608 + max_sectors = 128 * 1024 / 512; 2688 2609 scsi_driver_template.max_sectors = max_sectors; 2689 2610 2690 2611 /* Register our high level driver with 1394 stack */
+8 -10
drivers/ieee1394/sbp2.h
··· 227 227 #define SBP2_SW_VERSION_ENTRY 0x00010483 228 228 229 229 /* 230 - * Other misc defines 231 - */ 232 - #define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800 233 - 234 - /* 235 230 * SCSI specific stuff 236 231 */ 237 232 238 233 #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 239 234 #define SBP2_MAX_SECTORS 255 /* Max sectors supported */ 240 235 #define SBP2_MAX_CMDS 8 /* This should be safe */ 236 + 237 + /* Flags for detected oddities and brokeness */ 238 + #define SBP2_WORKAROUND_128K_MAX_TRANS 0x1 239 + #define SBP2_WORKAROUND_INQUIRY_36 0x2 240 + #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 241 + #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 242 + #define SBP2_WORKAROUND_OVERRIDE 0x100 241 243 242 244 /* This is the two dma types we use for cmd_dma below */ 243 245 enum cmd_dma_types { ··· 269 267 int dma_dir; 270 268 271 269 }; 272 - 273 - /* A list of flags for detected oddities and brokeness. */ 274 - #define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1 275 - #define SBP2_BREAKAGE_INQUIRY_HACK 0x2 276 270 277 271 struct sbp2scsi_host_info; 278 272 ··· 343 345 struct Scsi_Host *scsi_host; 344 346 345 347 /* Device specific workarounds/brokeness */ 346 - u32 workarounds; 348 + unsigned workarounds; 347 349 }; 348 350 349 351 /* Sbp2 host data structure (one per IEEE1394 host) */
+7 -5
drivers/infiniband/core/cm.c
··· 34 34 * 35 35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ 36 36 */ 37 + 38 + #include <linux/completion.h> 37 39 #include <linux/dma-mapping.h> 38 40 #include <linux/err.h> 39 41 #include <linux/idr.h> ··· 124 122 struct rb_node service_node; 125 123 struct rb_node sidr_id_node; 126 124 spinlock_t lock; /* Do not acquire inside cm.lock */ 127 - wait_queue_head_t wait; 125 + struct completion comp; 128 126 atomic_t refcount; 129 127 130 128 struct ib_mad_send_buf *msg; ··· 161 159 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 162 160 { 163 161 if (atomic_dec_and_test(&cm_id_priv->refcount)) 164 - wake_up(&cm_id_priv->wait); 162 + complete(&cm_id_priv->comp); 165 163 } 166 164 167 165 static int cm_alloc_msg(struct cm_id_private *cm_id_priv, ··· 561 559 goto error; 562 560 563 561 spin_lock_init(&cm_id_priv->lock); 564 - init_waitqueue_head(&cm_id_priv->wait); 562 + init_completion(&cm_id_priv->comp); 565 563 INIT_LIST_HEAD(&cm_id_priv->work_list); 566 564 atomic_set(&cm_id_priv->work_count, -1); 567 565 atomic_set(&cm_id_priv->refcount, 1); ··· 726 724 } 727 725 728 726 cm_free_id(cm_id->local_id); 729 - atomic_dec(&cm_id_priv->refcount); 730 - wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount)); 727 + cm_deref_id(cm_id_priv); 728 + wait_for_completion(&cm_id_priv->comp); 731 729 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 732 730 cm_free_work(work); 733 731 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
+25 -22
drivers/infiniband/core/mad.c
··· 352 352 INIT_WORK(&mad_agent_priv->local_work, local_completions, 353 353 mad_agent_priv); 354 354 atomic_set(&mad_agent_priv->refcount, 1); 355 - init_waitqueue_head(&mad_agent_priv->wait); 355 + init_completion(&mad_agent_priv->comp); 356 356 357 357 return &mad_agent_priv->agent; 358 358 ··· 467 467 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 468 468 mad_snoop_priv->agent.port_num = port_num; 469 469 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 470 - init_waitqueue_head(&mad_snoop_priv->wait); 470 + init_completion(&mad_snoop_priv->comp); 471 471 mad_snoop_priv->snoop_index = register_snoop_agent( 472 472 &port_priv->qp_info[qpn], 473 473 mad_snoop_priv); ··· 485 485 return ret; 486 486 } 487 487 EXPORT_SYMBOL(ib_register_mad_snoop); 488 + 489 + static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 490 + { 491 + if (atomic_dec_and_test(&mad_agent_priv->refcount)) 492 + complete(&mad_agent_priv->comp); 493 + } 494 + 495 + static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 496 + { 497 + if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 498 + complete(&mad_snoop_priv->comp); 499 + } 488 500 489 501 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 490 502 { ··· 521 509 flush_workqueue(port_priv->wq); 522 510 ib_cancel_rmpp_recvs(mad_agent_priv); 523 511 524 - atomic_dec(&mad_agent_priv->refcount); 525 - wait_event(mad_agent_priv->wait, 526 - !atomic_read(&mad_agent_priv->refcount)); 512 + deref_mad_agent(mad_agent_priv); 513 + wait_for_completion(&mad_agent_priv->comp); 527 514 528 515 kfree(mad_agent_priv->reg_req); 529 516 ib_dereg_mr(mad_agent_priv->agent.mr); ··· 540 529 atomic_dec(&qp_info->snoop_count); 541 530 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 542 531 543 - atomic_dec(&mad_snoop_priv->refcount); 544 - wait_event(mad_snoop_priv->wait, 545 - !atomic_read(&mad_snoop_priv->refcount)); 532 + deref_snoop_agent(mad_snoop_priv); 533 + wait_for_completion(&mad_snoop_priv->comp); 546 534 547 535 kfree(mad_snoop_priv); 548 536 } ··· 610 600 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 611 601 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 612 602 send_buf, mad_send_wc); 613 - if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 614 - wake_up(&mad_snoop_priv->wait); 603 + deref_snoop_agent(mad_snoop_priv); 615 604 spin_lock_irqsave(&qp_info->snoop_lock, flags); 616 605 } 617 606 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); ··· 635 626 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 636 627 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, 637 628 mad_recv_wc); 638 - if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 639 - wake_up(&mad_snoop_priv->wait); 629 + deref_snoop_agent(mad_snoop_priv); 640 630 spin_lock_irqsave(&qp_info->snoop_lock, flags); 641 631 } 642 632 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); ··· 976 968 977 969 free_send_rmpp_list(mad_send_wr); 978 970 kfree(send_buf->mad); 979 - if (atomic_dec_and_test(&mad_agent_priv->refcount)) 980 - wake_up(&mad_agent_priv->wait); 971 + deref_mad_agent(mad_agent_priv); 981 972 } 982 973 EXPORT_SYMBOL(ib_free_send_mad); 983 974 ··· 1764 1757 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1765 1758 mad_recv_wc); 1766 1759 if (!mad_recv_wc) { 1767 - if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1768 - wake_up(&mad_agent_priv->wait); 1760 + deref_mad_agent(mad_agent_priv); 1769 1761 return; 1770 1762 } 1771 1763 } ··· 1776 1770 if (!mad_send_wr) { 1777 1771 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1778 1772 ib_free_recv_mad(mad_recv_wc); 1779 - if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1780 - wake_up(&mad_agent_priv->wait); 1773 + deref_mad_agent(mad_agent_priv); 1781 1774 return; 1782 1775 } 1783 1776 ib_mark_mad_done(mad_send_wr); ··· 1795 1790 } else { 1796 1791 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 1797 1792 mad_recv_wc); 1798 - if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1799 - wake_up(&mad_agent_priv->wait); 1793 + deref_mad_agent(mad_agent_priv); 1800 1794 } 1801 1795 } 1802 1796 ··· 2025 2021 mad_send_wc); 2026 2022 2027 2023 /* Release reference on agent taken when sending */ 2028 - if (atomic_dec_and_test(&mad_agent_priv->refcount)) 2029 - wake_up(&mad_agent_priv->wait); 2024 + deref_mad_agent(mad_agent_priv); 2030 2025 return; 2031 2026 done: 2032 2027 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+3 -2
drivers/infiniband/core/mad_priv.h
··· 37 37 #ifndef __IB_MAD_PRIV_H__ 38 38 #define __IB_MAD_PRIV_H__ 39 39 40 + #include <linux/completion.h> 40 41 #include <linux/pci.h> 41 42 #include <linux/kthread.h> 42 43 #include <linux/workqueue.h> ··· 109 108 struct list_head rmpp_list; 110 109 111 110 atomic_t refcount; 112 - wait_queue_head_t wait; 111 + struct completion comp; 113 112 }; 114 113 115 114 struct ib_mad_snoop_private { ··· 118 117 int snoop_index; 119 118 int mad_snoop_flags; 120 119 atomic_t refcount; 121 - wait_queue_head_t wait; 120 + struct completion comp; 122 121 }; 123 122 124 123 struct ib_mad_send_wr_private {
+10 -10
drivers/infiniband/core/mad_rmpp.c
··· 49 49 struct list_head list; 50 50 struct work_struct timeout_work; 51 51 struct work_struct cleanup_work; 52 - wait_queue_head_t wait; 52 + struct completion comp; 53 53 enum rmpp_state state; 54 54 spinlock_t lock; 55 55 atomic_t refcount; ··· 69 69 u8 method; 70 70 }; 71 71 72 + static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) 73 + { 74 + if (atomic_dec_and_test(&rmpp_recv->refcount)) 75 + complete(&rmpp_recv->comp); 76 + } 77 + 72 78 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) 73 79 { 74 - atomic_dec(&rmpp_recv->refcount); 75 - wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount)); 80 + deref_rmpp_recv(rmpp_recv); 81 + wait_for_completion(&rmpp_recv->comp); 76 82 ib_destroy_ah(rmpp_recv->ah); 77 83 kfree(rmpp_recv); 78 84 } ··· 259 253 goto error; 260 254 261 255 rmpp_recv->agent = agent; 262 - init_waitqueue_head(&rmpp_recv->wait); 256 + init_completion(&rmpp_recv->comp); 263 257 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); 264 258 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); 265 259 spin_lock_init(&rmpp_recv->lock); ··· 283 277 284 278 error: kfree(rmpp_recv); 285 279 return NULL; 286 - } 287 - 288 - static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) 289 - { 290 - if (atomic_dec_and_test(&rmpp_recv->refcount)) 291 - wake_up(&rmpp_recv->wait); 292 280 } 293 281 294 282 static struct mad_rmpp_recv *
+7 -5
drivers/infiniband/core/ucm.c
··· 32 32 * 33 33 * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $ 34 34 */ 35 + 36 + #include <linux/completion.h> 35 37 #include <linux/init.h> 36 38 #include <linux/fs.h> 37 39 #include <linux/module.h> ··· 74 72 75 73 struct ib_ucm_context { 76 74 int id; 77 - wait_queue_head_t wait; 75 + struct completion comp; 78 76 atomic_t ref; 79 77 int events_reported; 80 78 ··· 140 138 static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) 141 139 { 142 140 if (atomic_dec_and_test(&ctx->ref)) 143 - wake_up(&ctx->wait); 141 + complete(&ctx->comp); 144 142 } 145 143 146 144 static inline int ib_ucm_new_cm_id(int event) ··· 180 178 return NULL; 181 179 182 180 atomic_set(&ctx->ref, 1); 183 - init_waitqueue_head(&ctx->wait); 181 + init_completion(&ctx->comp); 184 182 ctx->file = file; 185 183 INIT_LIST_HEAD(&ctx->events); 186 184 ··· 588 586 if (IS_ERR(ctx)) 589 587 return PTR_ERR(ctx); 590 588 591 - atomic_dec(&ctx->ref); 592 - wait_event(ctx->wait, !atomic_read(&ctx->ref)); 589 + ib_ucm_ctx_put(ctx); 590 + wait_for_completion(&ctx->comp); 593 591 594 592 /* No new events will be generated after destroying the cm_id. */ 595 593 ib_destroy_cm_id(ctx->cm_id);
+3 -1
drivers/infiniband/core/uverbs_mem.c
··· 211 211 */ 212 212 213 213 work = kmalloc(sizeof *work, GFP_KERNEL); 214 - if (!work) 214 + if (!work) { 215 + mmput(mm); 215 216 return; 217 + } 216 218 217 219 INIT_WORK(&work->work, ib_umem_account, work); 218 220 work->mm = mm;
+14 -15
drivers/infiniband/hw/ipath/ipath_driver.c
··· 116 116 #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 117 117 118 118 static const struct pci_device_id ipath_pci_tbl[] = { 119 - {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, 120 - PCI_DEVICE_ID_INFINIPATH_HT)}, 121 - {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, 122 - PCI_DEVICE_ID_INFINIPATH_PE800)}, 119 + { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, 120 + { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, 121 + { 0, } 123 122 }; 124 123 125 124 MODULE_DEVICE_TABLE(pci, ipath_pci_tbl); ··· 1905 1906 } else 1906 1907 ipath_dbg("irq is 0, not doing free_irq " 1907 1908 "for unit %u\n", dd->ipath_unit); 1909 + 1910 + /* 1911 + * we check for NULL here, because it's outside 1912 + * the kregbase check, and we need to call it 1913 + * after the free_irq. Thus it's possible that 1914 + * the function pointers were never initialized. 1915 + */ 1916 + if (dd->ipath_f_cleanup) 1917 + /* clean up chip-specific stuff */ 1918 + dd->ipath_f_cleanup(dd); 1919 + 1908 1920 dd->pcidev = NULL; 1909 1921 } 1910 - 1911 - /* 1912 - * we check for NULL here, because it's outside the kregbase 1913 - * check, and we need to call it after the free_irq. Thus 1914 - * it's possible that the function pointers were never 1915 - * initialized. 1916 - */ 1917 - if (dd->ipath_f_cleanup) 1918 - /* clean up chip-specific stuff */ 1919 - dd->ipath_f_cleanup(dd); 1920 - 1921 1922 spin_lock_irqsave(&ipath_devs_lock, flags); 1922 1923 } 1923 1924
+3 -4
drivers/infiniband/hw/ipath/ipath_eeprom.c
··· 505 505 * ipath_get_guid - get the GUID from the i2c device 506 506 * @dd: the infinipath device 507 507 * 508 - * When we add the multi-chip support, we will probably have to add 509 - * the ability to use the number of guids field, and get the guid from 510 - * the first chip's flash, to use for all of them. 508 + * We have the capability to use the ipath_nguid field, and get 509 + * the guid from the first chip's flash, to use for all of them. 511 510 */ 512 - void ipath_get_guid(struct ipath_devdata *dd) 511 + void ipath_get_eeprom_info(struct ipath_devdata *dd) 513 512 { 514 513 void *buf; 515 514 struct ipath_flash *ifp;
+5 -1
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 139 139 kinfo->spi_piosize = dd->ipath_ibmaxlen; 140 140 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ 141 141 kinfo->spi_port = pd->port_port; 142 - kinfo->spi_sw_version = IPATH_USER_SWVERSION; 142 + kinfo->spi_sw_version = IPATH_KERN_SWVERSION; 143 143 kinfo->spi_hw_version = dd->ipath_revision; 144 144 145 145 if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) ··· 1224 1224 1225 1225 if (tail == head) { 1226 1226 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); 1227 + if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ 1228 + (void)ipath_write_ureg(dd, ur_rcvhdrhead, 1229 + dd->ipath_rhdrhead_intr_off 1230 + | head, pd->port_port); 1227 1231 poll_wait(fp, &pd->port_wait, pt); 1228 1232 1229 1233 if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
+19 -2
drivers/infiniband/hw/ipath/ipath_ht400.c
··· 607 607 case 4: /* Ponderosa is one of the bringup boards */ 608 608 n = "Ponderosa"; 609 609 break; 610 - case 5: /* HT-460 original production board */ 610 + case 5: 611 + /* 612 + * HT-460 original production board; two production levels, with 613 + * different serial number ranges. See ipath_ht_early_init() for 614 + * case where we enable IPATH_GPIO_INTR for later serial # range. 615 + */ 611 616 n = "InfiniPath_HT-460"; 612 617 break; 613 618 case 6: ··· 647 642 if (n) 648 643 snprintf(name, namelen, "%s", n); 649 644 650 - if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) { 645 + if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { 651 646 /* 652 647 * This version of the driver only supports the HT-400 653 648 * Rev 3.2 ··· 1525 1520 */ 1526 1521 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1527 1522 INFINIPATH_S_ABORT); 1523 + 1524 + ipath_get_eeprom_info(dd); 1525 + if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && 1526 + dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { 1527 + /* 1528 + * Later production HT-460 has same changes as HT-465, so 1529 + * can use GPIO interrupts. They have serial #'s starting 1530 + * with 128, rather than 112. 1531 + */ 1532 + dd->ipath_flags |= IPATH_GPIO_INTR; 1533 + dd->ipath_flags &= ~IPATH_POLL_RX_INTR; 1534 + } 1528 1535 return 0; 1529 1536 } 1530 1537
-1
drivers/infiniband/hw/ipath/ipath_init_chip.c
··· 879 879 880 880 done: 881 881 if (!ret) { 882 - ipath_get_guid(dd); 883 882 *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; 884 883 if (!dd->ipath_f_intrsetup(dd)) { 885 884 /* now we can enable all interrupts from the chip */
+1 -1
drivers/infiniband/hw/ipath/ipath_kernel.h
··· 650 650 void ipath_init_pe800_funcs(struct ipath_devdata *); 651 651 /* init HT-400-specific func */ 652 652 void ipath_init_ht400_funcs(struct ipath_devdata *); 653 - void ipath_get_guid(struct ipath_devdata *); 653 + void ipath_get_eeprom_info(struct ipath_devdata *); 654 654 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 655 655 656 656 /*
-6
drivers/infiniband/hw/ipath/ipath_keys.c
··· 136 136 ret = 1; 137 137 goto bail; 138 138 } 139 - spin_lock(&rkt->lock); 140 139 mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; 141 - spin_unlock(&rkt->lock); 142 140 if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { 143 141 ret = 0; 144 142 goto bail; ··· 182 184 * @acc: access flags 183 185 * 184 186 * Return 1 if successful, otherwise 0. 185 - * 186 - * The QP r_rq.lock should be held. 187 187 */ 188 188 int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, 189 189 u32 len, u64 vaddr, u32 rkey, int acc) ··· 192 196 size_t off; 193 197 int ret; 194 198 195 - spin_lock(&rkt->lock); 196 199 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; 197 - spin_unlock(&rkt->lock); 198 200 if (unlikely(mr == NULL || mr->lkey != rkey)) { 199 201 ret = 0; 200 202 goto bail;
+7 -5
drivers/infiniband/hw/ipath/ipath_layer.c
··· 872 872 update_sge(ss, len); 873 873 length -= len; 874 874 } 875 + /* Update address before sending packet. */ 876 + update_sge(ss, length); 875 877 /* must flush early everything before trigger word */ 876 878 ipath_flush_wc(); 877 879 __raw_writel(last, piobuf); 878 880 /* be sure trigger word is written */ 879 881 ipath_flush_wc(); 880 - update_sge(ss, length); 881 882 } 882 883 883 884 /** ··· 944 943 if (likely(ss->num_sge == 1 && len <= ss->sge.length && 945 944 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { 946 945 u32 w; 946 + u32 *addr = (u32 *) ss->sge.vaddr; 947 947 948 + /* Update address before sending packet. */ 949 + update_sge(ss, len); 948 950 /* Need to round up for the last dword in the packet. */ 949 951 w = (len + 3) >> 2; 950 - __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); 952 + __iowrite32_copy(piobuf, addr, w - 1); 951 953 /* must flush early everything before trigger word */ 952 954 ipath_flush_wc(); 953 - __raw_writel(((u32 *) ss->sge.vaddr)[w - 1], 954 - piobuf + w - 1); 955 + __raw_writel(addr[w - 1], piobuf + w - 1); 955 956 /* be sure trigger word is written */ 956 957 ipath_flush_wc(); 957 - update_sge(ss, len); 958 958 ret = 0; 959 959 goto bail; 960 960 }
+2
drivers/infiniband/hw/ipath/ipath_pe800.c
··· 1180 1180 */ 1181 1181 dd->ipath_rhdrhead_intr_off = 1ULL<<32; 1182 1182 1183 + ipath_get_eeprom_info(dd); 1184 + 1183 1185 return 0; 1184 1186 } 1185 1187
+32 -32
drivers/infiniband/hw/ipath/ipath_qp.c
··· 375 375 376 376 spin_lock(&dev->pending_lock); 377 377 /* XXX What if its already removed by the timeout code? */ 378 - if (qp->timerwait.next != LIST_POISON1) 379 - list_del(&qp->timerwait); 380 - if (qp->piowait.next != LIST_POISON1) 381 - list_del(&qp->piowait); 378 + if (!list_empty(&qp->timerwait)) 379 + list_del_init(&qp->timerwait); 380 + if (!list_empty(&qp->piowait)) 381 + list_del_init(&qp->piowait); 382 382 spin_unlock(&dev->pending_lock); 383 383 384 384 wc.status = IB_WC_WR_FLUSH_ERR; ··· 427 427 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 428 428 int attr_mask) 429 429 { 430 + struct ipath_ibdev *dev = to_idev(ibqp->device); 430 431 struct ipath_qp *qp = to_iqp(ibqp); 431 432 enum ib_qp_state cur_state, new_state; 432 433 unsigned long flags; ··· 444 443 attr_mask)) 445 444 goto inval; 446 445 446 + if (attr_mask & IB_QP_AV) 447 + if (attr->ah_attr.dlid == 0 || 448 + attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) 449 + goto inval; 450 + 451 + if (attr_mask & IB_QP_PKEY_INDEX) 452 + if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) 453 + goto inval; 454 + 455 + if (attr_mask & IB_QP_MIN_RNR_TIMER) 456 + if (attr->min_rnr_timer > 31) 457 + goto inval; 458 + 447 459 switch (new_state) { 448 460 case IB_QPS_RESET: 449 461 ipath_reset_qp(qp); ··· 471 457 472 458 } 473 459 474 - if (attr_mask & IB_QP_PKEY_INDEX) { 475 - struct ipath_ibdev *dev = to_idev(ibqp->device); 476 - 477 - if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) 478 - goto inval; 460 + if (attr_mask & IB_QP_PKEY_INDEX) 479 461 qp->s_pkey_index = attr->pkey_index; 480 - } 481 462 482 463 if (attr_mask & IB_QP_DEST_QPN) 483 464 qp->remote_qpn = attr->dest_qp_num; ··· 488 479 if (attr_mask & IB_QP_ACCESS_FLAGS) 489 480 qp->qp_access_flags = attr->qp_access_flags; 490 481 491 - if (attr_mask & IB_QP_AV) { 492 - if (attr->ah_attr.dlid == 0 || 493 - attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) 494 - goto inval; 482 + if (attr_mask & IB_QP_AV) 495 483 qp->remote_ah_attr = attr->ah_attr; 496 - } 497 484 498 485 if (attr_mask & IB_QP_PATH_MTU) 499 486 qp->path_mtu = attr->path_mtu; ··· 504 499 qp->s_rnr_retry_cnt = qp->s_rnr_retry; 505 500 } 506 501 507 - if (attr_mask & IB_QP_MIN_RNR_TIMER) { 508 - if (attr->min_rnr_timer > 31) 509 - goto inval; 502 + if (attr_mask & IB_QP_MIN_RNR_TIMER) 510 503 qp->s_min_rnr_timer = attr->min_rnr_timer; 511 - } 512 504 513 505 if (attr_mask & IB_QP_QKEY) 514 506 qp->qkey = attr->qkey; ··· 712 710 init_attr->qp_type == IB_QPT_RC ? 713 711 ipath_do_rc_send : ipath_do_uc_send, 714 712 (unsigned long)qp); 715 - qp->piowait.next = LIST_POISON1; 716 - qp->piowait.prev = LIST_POISON2; 717 - qp->timerwait.next = LIST_POISON1; 718 - qp->timerwait.prev = LIST_POISON2; 713 + INIT_LIST_HEAD(&qp->piowait); 714 + INIT_LIST_HEAD(&qp->timerwait); 719 715 qp->state = IB_QPS_RESET; 720 716 qp->s_wq = swq; 721 717 qp->s_size = init_attr->cap.max_send_wr + 1; ··· 734 734 ipath_reset_qp(qp); 735 735 736 736 /* Tell the core driver that the kernel SMA is present. */ 737 - if (qp->ibqp.qp_type == IB_QPT_SMI) 737 + if (init_attr->qp_type == IB_QPT_SMI) 738 738 ipath_layer_set_verbs_flags(dev->dd, 739 739 IPATH_VERBS_KERNEL_SMA); 740 740 break; ··· 783 783 784 784 /* Make sure the QP isn't on the timeout list. */ 785 785 spin_lock_irqsave(&dev->pending_lock, flags); 786 - if (qp->timerwait.next != LIST_POISON1) 787 - list_del(&qp->timerwait); 788 - if (qp->piowait.next != LIST_POISON1) 789 - list_del(&qp->piowait); 786 + if (!list_empty(&qp->timerwait)) 787 + list_del_init(&qp->timerwait); 788 + if (!list_empty(&qp->piowait)) 789 + list_del_init(&qp->piowait); 790 790 spin_unlock_irqrestore(&dev->pending_lock, flags); 791 791 792 792 /* ··· 855 855 856 856 spin_lock(&dev->pending_lock); 857 857 /* XXX What if its already removed by the timeout code? */ 858 - if (qp->timerwait.next != LIST_POISON1) 859 - list_del(&qp->timerwait); 860 - if (qp->piowait.next != LIST_POISON1) 861 - list_del(&qp->piowait); 858 + if (!list_empty(&qp->timerwait)) 859 + list_del_init(&qp->timerwait); 860 + if (!list_empty(&qp->piowait)) 861 + list_del_init(&qp->piowait); 862 862 spin_unlock(&dev->pending_lock); 863 863 864 864 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
+7 -8
drivers/infiniband/hw/ipath/ipath_rc.c
··· 57 57 qp->s_len = wqe->length - len; 58 58 dev = to_idev(qp->ibqp.device); 59 59 spin_lock(&dev->pending_lock); 60 - if (qp->timerwait.next == LIST_POISON1) 60 + if (list_empty(&qp->timerwait)) 61 61 list_add_tail(&qp->timerwait, 62 62 &dev->pending[dev->pending_index]); 63 63 spin_unlock(&dev->pending_lock); ··· 356 356 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 357 357 qp->s_next_psn = qp->s_psn; 358 358 spin_lock(&dev->pending_lock); 359 - if (qp->timerwait.next == LIST_POISON1) 359 + if (list_empty(&qp->timerwait)) 360 360 list_add_tail(&qp->timerwait, 361 361 &dev->pending[dev->pending_index]); 362 362 spin_unlock(&dev->pending_lock); ··· 726 726 */ 727 727 dev = to_idev(qp->ibqp.device); 728 728 spin_lock(&dev->pending_lock); 729 - if (qp->timerwait.next != LIST_POISON1) 730 - list_del(&qp->timerwait); 729 + if (!list_empty(&qp->timerwait)) 730 + list_del_init(&qp->timerwait); 731 731 spin_unlock(&dev->pending_lock); 732 732 733 733 if (wqe->wr.opcode == IB_WR_RDMA_READ) ··· 886 886 * just won't find anything to restart if we ACK everything. 887 887 */ 888 888 spin_lock(&dev->pending_lock); 889 - if (qp->timerwait.next != LIST_POISON1) 890 - list_del(&qp->timerwait); 889 + if (!list_empty(&qp->timerwait)) 890 + list_del_init(&qp->timerwait); 891 891 spin_unlock(&dev->pending_lock); 892 892 893 893 /* ··· 1194 1194 IB_WR_RDMA_READ)) 1195 1195 goto ack_done; 1196 1196 spin_lock(&dev->pending_lock); 1197 - if (qp->s_rnr_timeout == 0 && 1198 - qp->timerwait.next != LIST_POISON1) 1197 + if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) 1199 1198 list_move_tail(&qp->timerwait, 1200 1199 &dev->pending[dev->pending_index]); 1201 1200 spin_unlock(&dev->pending_lock);
+1 -1
drivers/infiniband/hw/ipath/ipath_ruc.c
··· 435 435 unsigned long flags; 436 436 437 437 spin_lock_irqsave(&dev->pending_lock, flags); 438 - if (qp->piowait.next == LIST_POISON1) 438 + if (list_empty(&qp->piowait)) 439 439 list_add_tail(&qp->piowait, &dev->piowait); 440 440 spin_unlock_irqrestore(&dev->pending_lock, flags); 441 441 /*
+4 -3
drivers/infiniband/hw/ipath/ipath_verbs.c
··· 464 464 last = &dev->pending[dev->pending_index]; 465 465 while (!list_empty(last)) { 466 466 qp = list_entry(last->next, struct ipath_qp, timerwait); 467 - list_del(&qp->timerwait); 467 + list_del_init(&qp->timerwait); 468 468 qp->timer_next = resend; 469 469 resend = qp; 470 470 atomic_inc(&qp->refcount); ··· 474 474 qp = list_entry(last->next, struct ipath_qp, timerwait); 475 475 if (--qp->s_rnr_timeout == 0) { 476 476 do { 477 - list_del(&qp->timerwait); 477 + list_del_init(&qp->timerwait); 478 478 tasklet_hi_schedule(&qp->s_task); 479 479 if (list_empty(last)) 480 480 break; ··· 554 554 while (!list_empty(&dev->piowait)) { 555 555 qp = list_entry(dev->piowait.next, struct ipath_qp, 556 556 piowait); 557 - list_del(&qp->piowait); 557 + list_del_init(&qp->piowait); 558 558 tasklet_hi_schedule(&qp->s_task); 559 559 } 560 560 spin_unlock_irqrestore(&dev->pending_lock, flags); ··· 951 951 idev->dd = dd; 952 952 953 953 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); 954 + dev->owner = THIS_MODULE; 954 955 dev->node_guid = ipath_layer_get_guid(dd); 955 956 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; 956 957 dev->uverbs_cmd_mask =
+1 -1
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 182 182 u8 status; 183 183 }; 184 184 185 - static int fw_cmd_doorbell = 1; 185 + static int fw_cmd_doorbell = 0; 186 186 module_param(fw_cmd_doorbell, int, 0644); 187 187 MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " 188 188 "(and supported by FW)");
+18 -17
drivers/infiniband/hw/mthca/mthca_qp.c
··· 1727 1727 1728 1728 ind = qp->rq.next_ind; 1729 1729 1730 - for (nreq = 0; wr; ++nreq, wr = wr->next) { 1731 - if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 1732 - nreq = 0; 1733 - 1734 - doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); 1735 - doorbell[1] = cpu_to_be32(qp->qpn << 8); 1736 - 1737 - wmb(); 1738 - 1739 - mthca_write64(doorbell, 1740 - dev->kar + MTHCA_RECEIVE_DOORBELL, 1741 - MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1742 - 1743 - qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1744 - size0 = 0; 1745 - } 1746 - 1730 + for (nreq = 0; wr; wr = wr->next) { 1747 1731 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 1748 1732 mthca_err(dev, "RQ %06x full (%u head, %u tail," 1749 1733 " %d max, %d nreq)\n", qp->qpn, ··· 1781 1797 ++ind; 1782 1798 if (unlikely(ind >= qp->rq.max)) 1783 1799 ind -= qp->rq.max; 1800 + 1801 + ++nreq; 1802 + if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 1803 + nreq = 0; 1804 + 1805 + doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); 1806 + doorbell[1] = cpu_to_be32(qp->qpn << 8); 1807 + 1808 + wmb(); 1809 + 1810 + mthca_write64(doorbell, 1811 + dev->kar + MTHCA_RECEIVE_DOORBELL, 1812 + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1813 + 1814 + qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1815 + size0 = 0; 1816 + } 1784 1817 } 1785 1818 1786 1819 out:
+5 -5
drivers/infiniband/ulp/srp/ib_srp.c
··· 340 340 /* XXX should send SRP_I_LOGOUT request */ 341 341 342 342 init_completion(&target->done); 343 - ib_send_cm_dreq(target->cm_id, NULL, 0); 343 + if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { 344 + printk(KERN_DEBUG PFX "Sending CM DREQ failed\n"); 345 + return; 346 + } 344 347 wait_for_completion(&target->done); 345 348 } 346 349 ··· 354 351 spin_lock_irq(target->scsi_host->host_lock); 355 352 if (target->state != SRP_TARGET_DEAD) { 356 353 spin_unlock_irq(target->scsi_host->host_lock); 357 - scsi_host_put(target->scsi_host); 358 354 return; 359 355 } 360 356 target->state = SRP_TARGET_REMOVED; ··· 366 364 scsi_remove_host(target->scsi_host); 367 365 ib_destroy_cm_id(target->cm_id); 368 366 srp_free_target_ib(target); 369 - scsi_host_put(target->scsi_host); 370 - /* And another put to really free the target port... */ 371 367 scsi_host_put(target->scsi_host); 372 368 } 373 369 ··· 1241 1241 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1242 1242 if (req->scmnd->device == scmnd->device) { 1243 1243 req->scmnd->result = DID_RESET << 16; 1244 - scmnd->scsi_done(scmnd); 1244 + req->scmnd->scsi_done(req->scmnd); 1245 1245 srp_remove_req(target, req); 1246 1246 } 1247 1247
-1
drivers/isdn/capi/capi.c
··· 1499 1499 printk(KERN_ERR "capi20: unable to get major %d\n", capi_major); 1500 1500 return major_ret; 1501 1501 } 1502 - capi_major = major_ret; 1503 1502 capi_class = class_create(THIS_MODULE, "capi"); 1504 1503 if (IS_ERR(capi_class)) { 1505 1504 unregister_chrdev(capi_major, "capi20");
+2 -2
drivers/isdn/gigaset/usb-gigaset.c
··· 710 710 retval = -ENODEV; //FIXME 711 711 712 712 /* See if the device offered us matches what we can accept */ 713 - if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) || 714 - (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID))) 713 + if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) || 714 + (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) 715 715 return -ENODEV; 716 716 717 717 /* this starts to become ascii art... */
+1 -1
drivers/isdn/i4l/isdn_tty.c
··· 2880 2880 p[0]++; 2881 2881 i = 0; 2882 2882 while (*p[0] && (strchr("0123456789,-*[]?;", *p[0])) && 2883 - (i < ISDN_LMSNLEN)) 2883 + (i < ISDN_LMSNLEN - 1)) 2884 2884 m->lmsn[i++] = *p[0]++; 2885 2885 m->lmsn[i] = '\0'; 2886 2886 break;
+5 -2
drivers/leds/Kconfig
··· 4 4 config NEW_LEDS 5 5 bool "LED Support" 6 6 help 7 - Say Y to enable Linux LED support. This is not related to standard 8 - keyboard LEDs which are controlled via the input system. 7 + Say Y to enable Linux LED support. This allows control of supported 8 + LEDs from both userspace and optionally, by kernel events (triggers). 9 + 10 + This is not related to standard keyboard LEDs which are controlled 11 + via the input system. 9 12 10 13 config LEDS_CLASS 11 14 tristate "LED Class Support"
+7 -2
drivers/leds/led-class.c
··· 19 19 #include <linux/sysdev.h> 20 20 #include <linux/timer.h> 21 21 #include <linux/err.h> 22 + #include <linux/ctype.h> 22 23 #include <linux/leds.h> 23 24 #include "leds.h" 24 25 ··· 44 43 ssize_t ret = -EINVAL; 45 44 char *after; 46 45 unsigned long state = simple_strtoul(buf, &after, 10); 46 + size_t count = after - buf; 47 47 48 - if (after - buf > 0) { 49 - ret = after - buf; 48 + if (*after && isspace(*after)) 49 + count++; 50 + 51 + if (count == size) { 52 + ret = count; 50 53 led_set_brightness(led_cdev, state); 51 54 } 52 55
+13 -4
drivers/leds/ledtrig-timer.c
··· 20 20 #include <linux/device.h> 21 21 #include <linux/sysdev.h> 22 22 #include <linux/timer.h> 23 + #include <linux/ctype.h> 23 24 #include <linux/leds.h> 24 25 #include "leds.h" 25 26 ··· 70 69 int ret = -EINVAL; 71 70 char *after; 72 71 unsigned long state = simple_strtoul(buf, &after, 10); 72 + size_t count = after - buf; 73 73 74 - if (after - buf > 0) { 74 + if (*after && isspace(*after)) 75 + count++; 76 + 77 + if (count == size) { 75 78 timer_data->delay_on = state; 76 79 mod_timer(&timer_data->timer, jiffies + 1); 77 - ret = after - buf; 80 + ret = count; 78 81 } 79 82 80 83 return ret; ··· 102 97 int ret = -EINVAL; 103 98 char *after; 104 99 unsigned long state = simple_strtoul(buf, &after, 10); 100 + size_t count = after - buf; 105 101 106 - if (after - buf > 0) { 102 + if (*after && isspace(*after)) 103 + count++; 104 + 105 + if (count == size) { 107 106 timer_data->delay_off = state; 108 107 mod_timer(&timer_data->timer, jiffies + 1); 109 - ret = after - buf; 108 + ret = count; 110 109 } 111 110 112 111 return ret;
+1 -1
drivers/md/md.c
··· 2252 2252 } else { 2253 2253 if (cmd_match(page, "check")) 2254 2254 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2255 - else if (cmd_match(page, "repair")) 2255 + else if (!cmd_match(page, "repair")) 2256 2256 return -EINVAL; 2257 2257 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 2258 2258 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+3 -2
drivers/md/raid0.c
··· 331 331 goto out_free_conf; 332 332 size = conf->strip_zone[cur].size; 333 333 334 - for (i=0; i< nb_zone; i++) { 335 - conf->hash_table[i] = conf->strip_zone + cur; 334 + conf->hash_table[0] = conf->strip_zone + cur; 335 + for (i=1; i< nb_zone; i++) { 336 336 while (size <= conf->hash_spacing) { 337 337 cur++; 338 338 size += conf->strip_zone[cur].size; 339 339 } 340 340 size -= conf->hash_spacing; 341 + conf->hash_table[i] = conf->strip_zone + cur; 341 342 } 342 343 if (conf->preshift) { 343 344 conf->hash_spacing >>= conf->preshift;
+38 -7
drivers/media/Kconfig
··· 8 8 tristate "Video For Linux" 9 9 ---help--- 10 10 Support for audio/video capture and overlay devices and FM radio 11 - cards. The exact capabilities of each device vary. User tools for 12 - this are available from 13 - <ftp://ftp.uk.linux.org/pub/linux/video4linux/>. 11 + cards. The exact capabilities of each device vary. 14 12 15 13 This kernel includes support for the new Video for Linux Two API, 16 14 (V4L2) as well as the original system. Drivers and applications 17 15 need to be rewritten to use V4L2, but drivers for popular cards 18 16 and applications for most video capture functions already exist. 19 17 20 - Documentation for the original API is included in the file 21 - <file:Documentation/video4linux/API.html>. Documentation for V4L2 is 22 - available on the web at <http://bytesex.org/v4l/>. 18 + Additional info and docs are available on the web at 19 + <http://linuxtv.org> 20 + 21 + Documentation for V4L2 is also available on the web at 22 + <http://bytesex.org/v4l/>. 23 23 24 24 To compile this driver as a module, choose M here: the 25 25 module will be called videodev. 26 + 27 + config VIDEO_V4L1 28 + boolean "Enable Video For Linux API 1 (DEPRECATED)" 29 + depends on VIDEO_DEV 30 + select VIDEO_V4L1_COMPAT 31 + default y 32 + ---help--- 33 + Enables a compatibility API used by most V4L2 devices to allow 34 + its usage with legacy applications that supports only V4L1 api. 35 + 36 + If you are unsure as to whether this is required, answer Y. 37 + 38 + config VIDEO_V4L1_COMPAT 39 + boolean "Enable Video For Linux API 1 compatible Layer" 40 + depends on VIDEO_DEV 41 + default y 42 + ---help--- 43 + This api were developed to be used at Kernel 2.2 and 2.4, but 44 + lacks support for several video standards. There are several 45 + drivers at kernel that still depends on it. 46 + 47 + Documentation for the original API is included in the file 48 + <Documentation/video4linux/API.html>. 49 + 50 + User tools for this are available from 51 + <ftp://ftp.uk.linux.org/pub/linux/video4linux/>. 52 + 53 + If you are unsure as to whether this is required, answer Y. 54 + 55 + config VIDEO_V4L2 56 + tristate 57 + default y 26 58 27 59 source "drivers/media/video/Kconfig" 28 60 ··· 97 65 module will be called dabusb. 98 66 99 67 endmenu 100 -
+2 -1
drivers/media/common/Kconfig
··· 1 1 config VIDEO_SAA7146 2 2 tristate 3 - select I2C 3 + depends on I2C 4 4 5 5 config VIDEO_SAA7146_VV 6 6 tristate 7 + select VIDEO_V4L2 7 8 select VIDEO_BUF 8 9 select VIDEO_VIDEOBUF 9 10 select VIDEO_SAA7146
+5 -5
drivers/media/dvb/Kconfig
··· 22 22 source "drivers/media/dvb/dvb-core/Kconfig" 23 23 24 24 comment "Supported SAA7146 based PCI Adapters" 25 - depends on DVB_CORE && PCI 25 + depends on DVB_CORE && PCI && I2C 26 26 source "drivers/media/dvb/ttpci/Kconfig" 27 27 28 28 comment "Supported USB Adapters" 29 - depends on DVB_CORE && USB 29 + depends on DVB_CORE && USB && I2C 30 30 source "drivers/media/dvb/dvb-usb/Kconfig" 31 31 source "drivers/media/dvb/ttusb-budget/Kconfig" 32 32 source "drivers/media/dvb/ttusb-dec/Kconfig" 33 33 source "drivers/media/dvb/cinergyT2/Kconfig" 34 34 35 35 comment "Supported FlexCopII (B2C2) Adapters" 36 - depends on DVB_CORE && (PCI || USB) 36 + depends on DVB_CORE && (PCI || USB) && I2C 37 37 source "drivers/media/dvb/b2c2/Kconfig" 38 38 39 39 comment "Supported BT878 Adapters" 40 - depends on DVB_CORE && PCI 40 + depends on DVB_CORE && PCI && I2C 41 41 source "drivers/media/dvb/bt8xx/Kconfig" 42 42 43 43 comment "Supported Pluto2 Adapters" 44 - depends on DVB_CORE && PCI 44 + depends on DVB_CORE && PCI && I2C 45 45 source "drivers/media/dvb/pluto2/Kconfig" 46 46 47 47 comment "Supported DVB Frontends"
+3 -3
drivers/media/dvb/b2c2/Kconfig
··· 1 1 config DVB_B2C2_FLEXCOP 2 2 tristate "Technisat/B2C2 FlexCopII(b) and FlexCopIII adapters" 3 - depends on DVB_CORE 3 + depends on DVB_CORE && I2C 4 4 select DVB_STV0299 5 5 select DVB_MT352 6 6 select DVB_MT312 ··· 16 16 17 17 config DVB_B2C2_FLEXCOP_PCI 18 18 tristate "Technisat/B2C2 Air/Sky/Cable2PC PCI" 19 - depends on DVB_B2C2_FLEXCOP && PCI 19 + depends on DVB_B2C2_FLEXCOP && PCI && I2C 20 20 help 21 21 Support for the Air/Sky/CableStar2 PCI card (DVB/ATSC) by Technisat/B2C2. 22 22 ··· 24 24 25 25 config DVB_B2C2_FLEXCOP_USB 26 26 tristate "Technisat/B2C2 Air/Sky/Cable2PC USB" 27 - depends on DVB_B2C2_FLEXCOP && USB 27 + depends on DVB_B2C2_FLEXCOP && USB && I2C 28 28 help 29 29 Support for the Air/Sky/Cable2PC USB1.1 box (DVB/ATSC) by Technisat/B2C2, 30 30
+2 -1
drivers/media/dvb/bt8xx/Kconfig
··· 1 1 config DVB_BT8XX 2 2 tristate "BT8xx based PCI cards" 3 - depends on DVB_CORE && PCI && VIDEO_BT848 3 + depends on DVB_CORE && PCI && I2C && VIDEO_BT848 4 4 select DVB_MT352 5 5 select DVB_SP887X 6 6 select DVB_NXT6000 7 7 select DVB_CX24110 8 8 select DVB_OR51211 9 9 select DVB_LGDT330X 10 + select DVB_ZL10353 10 11 select FW_LOADER 11 12 help 12 13 Support for PCI cards based on the Bt8xx PCI bridge. Examples are
+3 -3
drivers/media/dvb/bt8xx/dvb-bt8xx.c
··· 115 115 return 0; 116 116 } 117 117 118 - static struct bt878 __init *dvb_bt8xx_878_match(unsigned int bttv_nr, struct pci_dev* bttv_pci_dev) 118 + static struct bt878 __devinit *dvb_bt8xx_878_match(unsigned int bttv_nr, struct pci_dev* bttv_pci_dev) 119 119 { 120 120 unsigned int card_nr; 121 121 ··· 709 709 } 710 710 } 711 711 712 - static int __init dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type) 712 + static int __devinit dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type) 713 713 { 714 714 int result; 715 715 ··· 794 794 return 0; 795 795 } 796 796 797 - static int dvb_bt8xx_probe(struct bttv_sub_device *sub) 797 + static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub) 798 798 { 799 799 struct dvb_bt8xx_card *card; 800 800 struct pci_dev* bttv_pci_dev;
+4 -1
drivers/media/dvb/cinergyT2/cinergyT2.c
··· 902 902 return -ENOMEM; 903 903 } 904 904 905 - dvb_register_adapter(&cinergyt2->adapter, DRIVER_NAME, THIS_MODULE); 905 + if ((err = dvb_register_adapter(&cinergyt2->adapter, DRIVER_NAME, THIS_MODULE)) < 0) { 906 + kfree(cinergyt2); 907 + return err; 908 + } 906 909 907 910 cinergyt2->demux.priv = cinergyt2; 908 911 cinergyt2->demux.filternum = 256;
+12
drivers/media/dvb/dvb-core/dvb_frontend.c
··· 106 106 unsigned long tune_mode_flags; 107 107 unsigned int delay; 108 108 unsigned int reinitialise; 109 + int tone; 110 + int voltage; 109 111 110 112 /* swzigzag values */ 111 113 unsigned int state; ··· 539 537 540 538 if (fepriv->reinitialise) { 541 539 dvb_frontend_init(fe); 540 + if (fepriv->tone != -1) { 541 + fe->ops->set_tone(fe, fepriv->tone); 542 + } 543 + if (fepriv->voltage != -1) { 544 + fe->ops->set_voltage(fe, fepriv->voltage); 545 + } 542 546 fepriv->reinitialise = 0; 543 547 } 544 548 ··· 796 788 case FE_SET_TONE: 797 789 if (fe->ops->set_tone) { 798 790 err = fe->ops->set_tone(fe, (fe_sec_tone_mode_t) parg); 791 + fepriv->tone = (fe_sec_tone_mode_t) parg; 799 792 fepriv->state = FESTATE_DISEQC; 800 793 fepriv->status = 0; 801 794 } ··· 805 796 case FE_SET_VOLTAGE: 806 797 if (fe->ops->set_voltage) { 807 798 err = fe->ops->set_voltage(fe, (fe_sec_voltage_t) parg); 799 + fepriv->voltage = (fe_sec_voltage_t) parg; 808 800 fepriv->state = FESTATE_DISEQC; 809 801 fepriv->status = 0; 810 802 } ··· 1005 995 1006 996 /* normal tune mode when opened R/W */ 1007 997 fepriv->tune_mode_flags &= ~FE_TUNE_MODE_ONESHOT; 998 + fepriv->tone = -1; 999 + fepriv->voltage = -1; 1008 1000 } 1009 1001 1010 1002 return ret;
+2 -2
drivers/media/dvb/dvb-core/dvbdev.c
··· 219 219 return -ENOMEM; 220 220 } 221 221 222 - mutex_unlock(&dvbdev_register_lock); 223 - 224 222 memcpy(dvbdev, template, sizeof(struct dvb_device)); 225 223 dvbdev->type = type; 226 224 dvbdev->id = id; ··· 228 230 dvbdev->fops->owner = adap->module; 229 231 230 232 list_add_tail (&dvbdev->list_head, &adap->device_list); 233 + 234 + mutex_unlock(&dvbdev_register_lock); 231 235 232 236 devfs_mk_cdev(MKDEV(DVB_MAJOR, nums2minor(adap->num, type, id)), 233 237 S_IFCHR | S_IRUSR | S_IWUSR,
+1 -1
drivers/media/dvb/dvb-usb/Kconfig
··· 1 1 config DVB_USB 2 2 tristate "Support for various USB DVB devices" 3 - depends on DVB_CORE && USB 3 + depends on DVB_CORE && USB && I2C 4 4 select FW_LOADER 5 5 help 6 6 By enabling this you will be able to choose the various supported
+13 -4
drivers/media/dvb/dvb-usb/cxusb.c
··· 150 150 return cxusb_ctrl_msg(d, CMD_POWER_OFF, &b, 1, NULL, 0); 151 151 } 152 152 153 + static int cxusb_bluebird_power_ctrl(struct dvb_usb_device *d, int onoff) 154 + { 155 + u8 b = 0; 156 + if (onoff) 157 + return cxusb_ctrl_msg(d, CMD_POWER_ON, &b, 1, NULL, 0); 158 + else 159 + return 0; 160 + } 161 + 153 162 static int cxusb_streaming_ctrl(struct dvb_usb_device *d, int onoff) 154 163 { 155 164 u8 buf[2] = { 0x03, 0x00 }; ··· 553 544 .size_of_priv = sizeof(struct cxusb_state), 554 545 555 546 .streaming_ctrl = cxusb_streaming_ctrl, 556 - .power_ctrl = cxusb_power_ctrl, 547 + .power_ctrl = cxusb_bluebird_power_ctrl, 557 548 .frontend_attach = cxusb_lgdt3303_frontend_attach, 558 549 .tuner_attach = cxusb_lgh064f_tuner_attach, 559 550 ··· 598 589 .size_of_priv = sizeof(struct cxusb_state), 599 590 600 591 .streaming_ctrl = cxusb_streaming_ctrl, 601 - .power_ctrl = cxusb_power_ctrl, 592 + .power_ctrl = cxusb_bluebird_power_ctrl, 602 593 .frontend_attach = cxusb_dee1601_frontend_attach, 603 594 .tuner_attach = cxusb_dee1601_tuner_attach, 604 595 ··· 647 638 .size_of_priv = sizeof(struct cxusb_state), 648 639 649 640 .streaming_ctrl = cxusb_streaming_ctrl, 650 - .power_ctrl = cxusb_power_ctrl, 641 + .power_ctrl = cxusb_bluebird_power_ctrl, 651 642 .frontend_attach = cxusb_mt352_frontend_attach, 652 643 .tuner_attach = cxusb_lgz201_tuner_attach, 653 644 ··· 692 683 .size_of_priv = sizeof(struct cxusb_state), 693 684 694 685 .streaming_ctrl = cxusb_streaming_ctrl, 695 - .power_ctrl = cxusb_power_ctrl, 686 + .power_ctrl = cxusb_bluebird_power_ctrl, 696 687 .frontend_attach = cxusb_mt352_frontend_attach, 697 688 .tuner_attach = cxusb_dtt7579_tuner_attach, 698 689
+421 -194
drivers/media/dvb/frontends/cx24123.c
··· 29 29 #include "dvb_frontend.h" 30 30 #include "cx24123.h" 31 31 32 + #define XTAL 10111000 33 + 34 + static int force_band; 32 35 static int debug; 33 36 #define dprintk(args...) \ 34 37 do { \ ··· 55 52 u32 VGAarg; 56 53 u32 bandselectarg; 57 54 u32 pllarg; 55 + u32 FILTune; 58 56 59 57 /* The Demod/Tuner can't easily provide these, we cache them */ 60 58 u32 currentfreq; ··· 67 63 { 68 64 u32 symbolrate_low; 69 65 u32 symbolrate_high; 70 - u32 VCAslope; 71 - u32 VCAoffset; 72 - u32 VGA1offset; 73 - u32 VGA2offset; 74 66 u32 VCAprogdata; 75 67 u32 VGAprogdata; 68 + u32 FILTune; 76 69 } cx24123_AGC_vals[] = 77 70 { 78 71 { 79 72 .symbolrate_low = 1000000, 80 73 .symbolrate_high = 4999999, 81 - .VCAslope = 0x07, 82 - .VCAoffset = 0x0f, 83 - .VGA1offset = 0x1f8, 84 - .VGA2offset = 0x1f8, 85 - .VGAprogdata = (2 << 18) | (0x1f8 << 9) | 0x1f8, 86 - .VCAprogdata = (4 << 18) | (0x07 << 9) | 0x07, 74 + /* the specs recommend other values for VGA offsets, 75 + but tests show they are wrong */ 76 + .VGAprogdata = (1 << 19) | (0x180 << 9) | 0x1e0, 77 + .VCAprogdata = (2 << 19) | (0x07 << 9) | 0x07, 78 + .FILTune = 0x27f /* 0.41 V */ 87 79 }, 88 80 { 89 81 .symbolrate_low = 5000000, 90 82 .symbolrate_high = 14999999, 91 - .VCAslope = 0x1f, 92 - .VCAoffset = 0x1f, 93 - .VGA1offset = 0x1e0, 94 - .VGA2offset = 0x180, 95 - .VGAprogdata = (2 << 18) | (0x180 << 9) | 0x1e0, 96 - .VCAprogdata = (4 << 18) | (0x07 << 9) | 0x1f, 83 + .VGAprogdata = (1 << 19) | (0x180 << 9) | 0x1e0, 84 + .VCAprogdata = (2 << 19) | (0x07 << 9) | 0x1f, 85 + .FILTune = 0x317 /* 0.90 V */ 97 86 }, 98 87 { 99 88 .symbolrate_low = 15000000, 100 89 .symbolrate_high = 45000000, 101 - .VCAslope = 0x3f, 102 - .VCAoffset = 0x3f, 103 - .VGA1offset = 0x180, 104 - .VGA2offset = 0x100, 105 - .VGAprogdata = (2 << 18) | (0x100 << 9) | 0x180, 106 - .VCAprogdata = (4 << 18) | (0x07 << 9) | 0x3f, 90 + .VGAprogdata = (1 << 19) | (0x100 << 9) | 0x180, 91 + .VCAprogdata = (2 << 19) | (0x07 << 9) | 0x3f, 92 + .FILTune = 0x145 /* 2.70 V */ 107 93 }, 108 94 }; 109 95 ··· 106 112 { 107 113 u32 freq_low; 108 114 u32 freq_high; 109 - u32 bandselect; 110 115 u32 VCOdivider; 111 - u32 VCOnumber; 112 116 u32 progdata; 113 117 } cx24123_bandselect_vals[] = 114 118 { 119 + /* band 1 */ 115 120 { 116 121 .freq_low = 950000, 117 - .freq_high = 1018999, 118 - .bandselect = 0x40, 119 - .VCOdivider = 4, 120 - .VCOnumber = 7, 121 - .progdata = (0 << 18) | (0 << 9) | 0x40, 122 - }, 123 - { 124 - .freq_low = 1019000, 125 122 .freq_high = 1074999, 126 - .bandselect = 0x80, 127 123 .VCOdivider = 4, 128 - .VCOnumber = 8, 129 - .progdata = (0 << 18) | (0 << 9) | 0x80, 124 + .progdata = (0 << 19) | (0 << 9) | 0x40, 130 125 }, 126 + 127 + /* band 2 */ 131 128 { 132 129 .freq_low = 1075000, 133 - .freq_high = 1227999, 134 - .bandselect = 0x01, 135 - .VCOdivider = 2, 136 - .VCOnumber = 1, 137 - .progdata = (0 << 18) | (1 << 9) | 0x01, 130 + .freq_high = 1177999, 131 + .VCOdivider = 4, 132 + .progdata = (0 << 19) | (0 << 9) | 0x80, 138 133 }, 134 + 135 + /* band 3 */ 139 136 { 140 - .freq_low = 1228000, 141 - .freq_high = 1349999, 142 - .bandselect = 0x02, 137 + .freq_low = 1178000, 138 + .freq_high = 1295999, 143 139 .VCOdivider = 2, 144 - .VCOnumber = 2, 145 - .progdata = (0 << 18) | (1 << 9) | 0x02, 140 + .progdata = (0 << 19) | (1 << 9) | 0x01, 146 141 }, 142 + 143 + /* band 4 */ 147 144 { 148 - .freq_low = 1350000, 149 - .freq_high = 1481999, 150 - .bandselect = 0x04, 145 + .freq_low = 1296000, 146 + .freq_high = 1431999, 151 147 .VCOdivider = 2, 152 - .VCOnumber = 3, 153 - .progdata = (0 << 18) | (1 << 9) | 0x04, 148 + .progdata = (0 << 19) | (1 << 9) | 0x02, 154 149 }, 150 + 151 + /* band 5 */ 155 152 { 156 - .freq_low = 1482000, 157 - .freq_high = 1595999, 158 - .bandselect = 0x08, 153 + .freq_low = 1432000, 154 + .freq_high = 1575999, 159 155 .VCOdivider = 2, 160 - .VCOnumber = 4, 161 - .progdata = (0 << 18) | (1 << 9) | 0x08, 156 + .progdata = (0 << 19) | (1 << 9) | 0x04, 162 157 }, 158 + 159 + /* band 6 */ 163 160 { 164 - .freq_low = 1596000, 161 + .freq_low = 1576000, 165 162 .freq_high = 1717999, 166 - .bandselect = 0x10, 167 163 .VCOdivider = 2, 168 - .VCOnumber = 5, 169 - .progdata = (0 << 18) | (1 << 9) | 0x10, 164 + .progdata = (0 << 19) | (1 << 9) | 0x08, 170 165 }, 166 + 167 + /* band 7 */ 171 168 { 172 169 .freq_low = 1718000, 173 170 .freq_high = 1855999, 174 - .bandselect = 0x20, 175 171 .VCOdivider = 2, 176 - .VCOnumber = 6, 177 - .progdata = (0 << 18) | (1 << 9) | 0x20, 172 + .progdata = (0 << 19) | (1 << 9) | 0x10, 178 173 }, 174 + 175 + /* band 8 */ 179 176 { 180 177 .freq_low = 1856000, 181 178 .freq_high = 2035999, 182 - .bandselect = 0x40, 183 179 .VCOdivider = 2, 184 - .VCOnumber = 7, 185 - .progdata = (0 << 18) | (1 << 9) | 0x40, 180 + .progdata = (0 << 19) | (1 << 9) | 0x20, 186 181 }, 182 + 183 + /* band 9 */ 187 184 { 188 185 .freq_low = 2036000, 189 - .freq_high = 2149999, 190 - .bandselect = 0x80, 186 + .freq_high = 2150000, 191 187 .VCOdivider = 2, 192 - .VCOnumber = 8, 193 - .progdata = (0 << 18) | (1 << 9) | 0x80, 188 + .progdata = (0 << 19) | (1 << 9) | 0x40, 194 189 }, 195 190 }; 196 191 ··· 190 207 { 191 208 {0x00, 0x03}, /* Reset system */ 192 209 {0x00, 0x00}, /* Clear reset */ 193 - {0x01, 0x3b}, /* Apply sensible defaults, from an i2c sniffer */ 194 - {0x03, 0x07}, 195 - {0x04, 0x10}, 196 - {0x05, 0x04}, 197 - {0x06, 0x31}, 198 - {0x0d, 0x02}, 199 - {0x0e, 0x03}, 200 - {0x0f, 0xfe}, 201 - {0x10, 0x01}, 202 - {0x14, 0x01}, 203 - {0x15, 0x98}, 204 - {0x16, 0x00}, 205 - {0x17, 0x01}, 206 - {0x1b, 0x05}, 207 - {0x1c, 0x80}, 208 - {0x1d, 0x00}, 209 - {0x1e, 0x00}, 210 - {0x20, 0x41}, 211 - {0x21, 0x15}, 212 - {0x27, 0x14}, 213 - {0x28, 0x46}, 214 - {0x29, 0x00}, 215 - {0x2a, 0xb0}, 216 - {0x2b, 0x73}, 217 - {0x2c, 0x00}, 210 + {0x03, 0x07}, /* QPSK, DVB, Auto Acquisition (default) */ 211 + {0x04, 0x10}, /* MPEG */ 212 + {0x05, 0x04}, /* MPEG */ 213 + {0x06, 0x31}, /* MPEG (default) */ 214 + {0x0b, 0x00}, /* Freq search start point (default) */ 215 + {0x0c, 0x00}, /* Demodulator sample gain (default) */ 216 + {0x0d, 0x02}, /* Frequency search range = Fsymbol / 4 (default) */ 217 + {0x0e, 0x03}, /* Default non-inverted, FEC 3/4 (default) */ 218 + {0x0f, 0xfe}, /* FEC search mask (all supported codes) */ 219 + {0x10, 0x01}, /* Default search inversion, no repeat (default) */ 220 + {0x16, 0x00}, /* Enable reading of frequency */ 221 + {0x17, 0x01}, /* Enable EsNO Ready Counter */ 222 + {0x1c, 0x80}, /* Enable error counter */ 223 + {0x20, 0x00}, /* Tuner burst clock rate = 500KHz */ 224 + {0x21, 0x15}, /* Tuner burst mode, word length = 0x15 */ 225 + {0x28, 0x00}, /* Enable FILTERV with positive pol., DiSEqC 2.x off */ 226 + {0x29, 0x00}, /* DiSEqC LNB_DC off */ 227 + {0x2a, 0xb0}, /* DiSEqC Parameters (default) */ 228 + {0x2b, 0x73}, /* DiSEqC Tone Frequency (default) */ 229 + {0x2c, 0x00}, /* DiSEqC Message (0x2c - 0x31) */ 218 230 {0x2d, 0x00}, 219 231 {0x2e, 0x00}, 220 232 {0x2f, 0x00}, 221 233 {0x30, 0x00}, 222 234 {0x31, 0x00}, 223 - {0x32, 0x8c}, 224 - {0x33, 0x00}, 235 + {0x32, 0x8c}, /* DiSEqC Parameters (default) */ 236 + {0x33, 0x00}, /* Interrupts off (0x33 - 0x34) */ 225 237 {0x34, 0x00}, 226 - {0x35, 0x03}, 227 - {0x36, 0x02}, 228 - {0x37, 0x3a}, 229 - {0x3a, 0x00}, /* Enable AGC accumulator */ 230 - {0x44, 0x00}, 231 - {0x45, 0x00}, 232 - {0x46, 0x05}, 233 - {0x56, 0x41}, 234 - {0x57, 0xff}, 235 - {0x67, 0x83}, 238 + {0x35, 0x03}, /* DiSEqC Tone Amplitude (default) */ 239 + {0x36, 0x02}, /* DiSEqC Parameters (default) */ 240 + {0x37, 0x3a}, /* DiSEqC Parameters (default) */ 241 + {0x3a, 0x00}, /* Enable AGC accumulator (for signal strength) */ 242 + {0x44, 0x00}, /* Constellation (default) */ 243 + {0x45, 0x00}, /* Symbol count (default) */ 244 + {0x46, 0x0d}, /* Symbol rate estimator on (default) */ 245 + {0x56, 0x41}, /* Various (default) */ 246 + {0x57, 0xff}, /* Error Counter Window (default) */ 247 + {0x67, 0x83}, /* Non-DCII symbol clock */ 236 248 }; 237 249 238 250 static int cx24123_writereg(struct cx24123_state* state, int reg, int data) ··· 235 257 u8 buf[] = { reg, data }; 236 258 struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; 237 259 int err; 260 + 261 + if (debug>1) 262 + printk("cx24123: %s: write reg 0x%02x, value 0x%02x\n", 263 + __FUNCTION__,reg, data); 238 264 239 265 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) { 240 266 printk("%s: writereg error(err == %i, reg == 0x%02x," ··· 255 273 /* fixme: put the intersil addr int the config */ 256 274 struct i2c_msg msg = { .addr = 0x08, .flags = 0, .buf = buf, .len = 2 }; 257 275 int err; 276 + 277 + if (debug>1) 278 + printk("cx24123: %s: writeln addr=0x08, reg 0x%02x, value 0x%02x\n", 279 + __FUNCTION__,reg, data); 258 280 259 281 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) { 260 282 printk("%s: writelnbreg error (err == %i, reg == 0x%02x," ··· 289 303 return ret; 290 304 } 291 305 306 + if (debug>1) 307 + printk("cx24123: read reg 0x%02x, value 0x%02x\n",reg, ret); 308 + 292 309 return b1[0]; 293 310 } 294 311 ··· 302 313 303 314 static int cx24123_set_inversion(struct cx24123_state* state, fe_spectral_inversion_t inversion) 304 315 { 316 + u8 nom_reg = cx24123_readreg(state, 0x0e); 317 + u8 auto_reg = cx24123_readreg(state, 0x10); 318 + 305 319 switch (inversion) { 306 320 case INVERSION_OFF: 307 - cx24123_writereg(state, 0x0e, cx24123_readreg(state, 0x0e) & 0x7f); 308 - cx24123_writereg(state, 0x10, cx24123_readreg(state, 0x10) | 0x80); 321 + dprintk("%s: inversion off\n",__FUNCTION__); 322 + cx24123_writereg(state, 0x0e, nom_reg & ~0x80); 323 + cx24123_writereg(state, 0x10, auto_reg | 0x80); 309 324 break; 310 325 case INVERSION_ON: 311 - cx24123_writereg(state, 0x0e, cx24123_readreg(state, 0x0e) | 0x80); 312 - cx24123_writereg(state, 0x10, cx24123_readreg(state, 0x10) | 0x80); 326 + dprintk("%s: inversion on\n",__FUNCTION__); 327 + cx24123_writereg(state, 0x0e, nom_reg | 0x80); 328 + cx24123_writereg(state, 0x10, auto_reg | 0x80); 313 329 break; 314 330 case INVERSION_AUTO: 315 - cx24123_writereg(state, 0x10, cx24123_readreg(state, 0x10) & 0x7f); 331 + dprintk("%s: inversion auto\n",__FUNCTION__); 332 + cx24123_writereg(state, 0x10, auto_reg & ~0x80); 316 333 break; 317 334 default: 318 335 return -EINVAL; ··· 333 338 334 339 val = cx24123_readreg(state, 0x1b) >> 7; 335 340 336 - if (val == 0) 341 + if (val == 0) { 342 + dprintk("%s: read inversion off\n",__FUNCTION__); 337 343 *inversion = INVERSION_OFF; 338 - else 344 + } else { 345 + dprintk("%s: read inversion on\n",__FUNCTION__); 339 346 *inversion = INVERSION_ON; 347 + } 340 348 341 349 return 0; 342 350 } 343 351 344 352 static int cx24123_set_fec(struct cx24123_state* state, fe_code_rate_t fec) 345 353 { 354 + u8 nom_reg = cx24123_readreg(state, 0x0e) & ~0x07; 355 + 346 356 if ( (fec < FEC_NONE) || (fec > FEC_AUTO) ) 347 357 fec = FEC_AUTO; 348 358 349 - /* Hardware has 5/11 and 3/5 but are never unused */ 350 359 switch (fec) { 351 - case FEC_NONE: 352 - return cx24123_writereg(state, 0x0f, 0x01); 353 360 case FEC_1_2: 354 - return cx24123_writereg(state, 0x0f, 0x02); 361 + dprintk("%s: set FEC to 1/2\n",__FUNCTION__); 362 + cx24123_writereg(state, 0x0e, nom_reg | 0x01); 363 + cx24123_writereg(state, 0x0f, 0x02); 364 + break; 355 365 case FEC_2_3: 356 - return cx24123_writereg(state, 0x0f, 0x04); 366 + dprintk("%s: set FEC to 2/3\n",__FUNCTION__); 367 + cx24123_writereg(state, 0x0e, nom_reg | 0x02); 368 + cx24123_writereg(state, 0x0f, 0x04); 369 + break; 357 370 case FEC_3_4: 358 - return cx24123_writereg(state, 0x0f, 0x08); 371 + dprintk("%s: set FEC to 3/4\n",__FUNCTION__); 372 + cx24123_writereg(state, 0x0e, nom_reg | 0x03); 373 + cx24123_writereg(state, 0x0f, 0x08); 374 + break; 375 + case FEC_4_5: 376 + dprintk("%s: set FEC to 4/5\n",__FUNCTION__); 377 + cx24123_writereg(state, 0x0e, nom_reg | 0x04); 378 + cx24123_writereg(state, 0x0f, 0x10); 379 + break; 359 380 case FEC_5_6: 360 - return cx24123_writereg(state, 0x0f, 0x20); 381 + dprintk("%s: set FEC to 5/6\n",__FUNCTION__); 382 + cx24123_writereg(state, 0x0e, nom_reg | 0x05); 383 + cx24123_writereg(state, 0x0f, 0x20); 384 + break; 385 + case FEC_6_7: 386 + dprintk("%s: set FEC to 6/7\n",__FUNCTION__); 387 + cx24123_writereg(state, 0x0e, nom_reg | 0x06); 388 + cx24123_writereg(state, 0x0f, 0x40); 389 + break; 361 390 case FEC_7_8: 362 - return cx24123_writereg(state, 0x0f, 0x80); 391 + dprintk("%s: set FEC to 7/8\n",__FUNCTION__); 392 + cx24123_writereg(state, 0x0e, nom_reg | 0x07); 393 + cx24123_writereg(state, 0x0f, 0x80); 394 + break; 363 395 case FEC_AUTO: 364 - return cx24123_writereg(state, 0x0f, 0xae); 396 + dprintk("%s: set FEC to auto\n",__FUNCTION__); 397 + cx24123_writereg(state, 0x0f, 0xfe); 398 + break; 365 399 default: 366 400 return -EOPNOTSUPP; 367 - } 368 - } 369 - 370 - static int cx24123_get_fec(struct cx24123_state* state, fe_code_rate_t *fec) 371 - { 372 - int ret; 373 - u8 val; 374 - 375 - ret = cx24123_readreg (state, 0x1b); 376 - if (ret < 0) 377 - return ret; 378 - val = ret & 0x07; 379 - switch (val) { 380 - case 1: 381 - *fec = FEC_1_2; 382 - break; 383 - case 3: 384 - *fec = FEC_2_3; 385 - break; 386 - case 4: 387 - *fec = FEC_3_4; 388 - break; 389 - case 5: 390 - *fec = FEC_4_5; 391 - break; 392 - case 6: 393 - *fec = FEC_5_6; 394 - break; 395 - case 7: 396 - *fec = FEC_7_8; 397 - break; 398 - case 2: /* *fec = FEC_3_5; break; */ 399 - case 0: /* *fec = FEC_5_11; break; */ 400 - *fec = FEC_AUTO; 401 - break; 402 - default: 403 - *fec = FEC_NONE; // can't happen 404 401 } 405 402 406 403 return 0; 407 404 } 408 405 409 - /* fixme: Symbol rates < 3MSps may not work because of precision loss */ 406 + static int cx24123_get_fec(struct cx24123_state* state, fe_code_rate_t *fec) 407 + { 408 + int ret; 409 + 410 + ret = cx24123_readreg (state, 0x1b); 411 + if (ret < 0) 412 + return ret; 413 + ret = ret & 0x07; 414 + 415 + switch (ret) { 416 + case 1: 417 + *fec = FEC_1_2; 418 + break; 419 + case 2: 420 + *fec = FEC_2_3; 421 + break; 422 + case 3: 423 + *fec = FEC_3_4; 424 + break; 425 + case 4: 426 + *fec = FEC_4_5; 427 + break; 428 + case 5: 429 + *fec = FEC_5_6; 430 + break; 431 + case 6: 432 + *fec = FEC_6_7; 433 + break; 434 + case 7: 435 + *fec = FEC_7_8; 436 + break; 437 + default: 438 + /* this can happen when there's no lock */ 439 + *fec = FEC_NONE; 440 + } 441 + 442 + return 0; 443 + } 444 + 445 + /* Approximation of closest integer of log2(a/b). It actually gives the 446 + lowest integer i such that 2^i >= round(a/b) */ 447 + static u32 cx24123_int_log2(u32 a, u32 b) 448 + { 449 + u32 exp, nearest = 0; 450 + u32 div = a / b; 451 + if(a % b >= b / 2) ++div; 452 + if(div < (1 << 31)) 453 + { 454 + for(exp = 1; div > exp; nearest++) 455 + exp += exp; 456 + } 457 + return nearest; 458 + } 459 + 410 460 static int cx24123_set_symbolrate(struct cx24123_state* state, u32 srate) 411 461 { 412 - u32 val; 462 + u32 tmp, sample_rate, ratio, sample_gain; 463 + u8 pll_mult; 413 464 414 - val = (srate / 1185) * 100; 465 + /* check if symbol rate is within limits */ 466 + if ((srate > state->ops.info.symbol_rate_max) || 467 + (srate < state->ops.info.symbol_rate_min)) 468 + return -EOPNOTSUPP;; 415 469 416 - /* Compensate for scaling up, by removing 17 symbols per 1Msps */ 417 - val = val - (17 * (srate / 1000000)); 470 + /* choose the sampling rate high enough for the required operation, 471 + while optimizing the power consumed by the demodulator */ 472 + if (srate < (XTAL*2)/2) 473 + pll_mult = 2; 474 + else if (srate < (XTAL*3)/2) 475 + pll_mult = 3; 476 + else if (srate < (XTAL*4)/2) 477 + pll_mult = 4; 478 + else if (srate < (XTAL*5)/2) 479 + pll_mult = 5; 480 + else if (srate < (XTAL*6)/2) 481 + pll_mult = 6; 482 + else if (srate < (XTAL*7)/2) 483 + pll_mult = 7; 484 + else if (srate < (XTAL*8)/2) 485 + pll_mult = 8; 486 + else 487 + pll_mult = 9; 418 488 419 - cx24123_writereg(state, 0x08, (val >> 16) & 0xff ); 420 - cx24123_writereg(state, 0x09, (val >> 8) & 0xff ); 421 - cx24123_writereg(state, 0x0a, (val ) & 0xff ); 489 + 490 + sample_rate = pll_mult * XTAL; 491 + 492 + /* 493 + SYSSymbolRate[21:0] = (srate << 23) / sample_rate 494 + 495 + We have to use 32 bit unsigned arithmetic without precision loss. 496 + The maximum srate is 45000000 or 0x02AEA540. This number has 497 + only 6 clear bits on top, hence we can shift it left only 6 bits 498 + at a time. Borrowed from cx24110.c 499 + */ 500 + 501 + tmp = srate << 6; 502 + ratio = tmp / sample_rate; 503 + 504 + tmp = (tmp % sample_rate) << 6; 505 + ratio = (ratio << 6) + (tmp / sample_rate); 506 + 507 + tmp = (tmp % sample_rate) << 6; 508 + ratio = (ratio << 6) + (tmp / sample_rate); 509 + 510 + tmp = (tmp % sample_rate) << 5; 511 + ratio = (ratio << 5) + (tmp / sample_rate); 512 + 513 + 514 + cx24123_writereg(state, 0x01, pll_mult * 6); 515 + 516 + cx24123_writereg(state, 0x08, (ratio >> 16) & 0x3f ); 517 + cx24123_writereg(state, 0x09, (ratio >> 8) & 0xff ); 518 + cx24123_writereg(state, 0x0a, (ratio ) & 0xff ); 519 + 520 + /* also set the demodulator sample gain */ 521 + sample_gain = cx24123_int_log2(sample_rate, srate); 522 + tmp = cx24123_readreg(state, 0x0c) & ~0xe0; 523 + cx24123_writereg(state, 0x0c, tmp | sample_gain << 5); 524 + 525 + dprintk("%s: srate=%d, ratio=0x%08x, sample_rate=%i sample_gain=%d\n", __FUNCTION__, srate, ratio, sample_rate, sample_gain); 422 526 423 527 return 0; 424 528 } ··· 531 437 struct cx24123_state *state = fe->demodulator_priv; 532 438 u32 ndiv = 0, adiv = 0, vco_div = 0; 533 439 int i = 0; 440 + int pump = 2; 441 + int band = 0; 442 + int num_bands = sizeof(cx24123_bandselect_vals) / sizeof(cx24123_bandselect_vals[0]); 534 443 535 444 /* Defaults for low freq, low rate */ 536 445 state->VCAarg = cx24123_AGC_vals[0].VCAprogdata; ··· 541 444 state->bandselectarg = cx24123_bandselect_vals[0].progdata; 542 445 vco_div = cx24123_bandselect_vals[0].VCOdivider; 543 446 544 - /* For the given symbolerate, determine the VCA and VGA programming bits */ 447 + /* For the given symbol rate, determine the VCA, VGA and FILTUNE programming bits */ 545 448 for (i = 0; i < sizeof(cx24123_AGC_vals) / sizeof(cx24123_AGC_vals[0]); i++) 546 449 { 547 450 if ((cx24123_AGC_vals[i].symbolrate_low <= p->u.qpsk.symbol_rate) && 548 - (cx24123_AGC_vals[i].symbolrate_high >= p->u.qpsk.symbol_rate) ) { 451 + (cx24123_AGC_vals[i].symbolrate_high >= p->u.qpsk.symbol_rate) ) { 549 452 state->VCAarg = cx24123_AGC_vals[i].VCAprogdata; 550 453 state->VGAarg = cx24123_AGC_vals[i].VGAprogdata; 454 + state->FILTune = cx24123_AGC_vals[i].FILTune; 551 455 } 552 456 } 553 457 554 - /* For the given frequency, determine the bandselect programming bits */ 555 - for (i = 0; i < sizeof(cx24123_bandselect_vals) / sizeof(cx24123_bandselect_vals[0]); i++) 458 + /* determine the band to use */ 459 + if(force_band < 1 || force_band > num_bands) 556 460 { 557 - if ((cx24123_bandselect_vals[i].freq_low <= p->frequency) && 558 - (cx24123_bandselect_vals[i].freq_high >= p->frequency) ) { 559 - state->bandselectarg = cx24123_bandselect_vals[i].progdata; 560 - vco_div = cx24123_bandselect_vals[i].VCOdivider; 461 + for (i = 0; i < num_bands; i++) 462 + { 463 + if ((cx24123_bandselect_vals[i].freq_low <= p->frequency) && 464 + (cx24123_bandselect_vals[i].freq_high >= p->frequency) ) 465 + band = i; 561 466 } 562 467 } 468 + else 469 + band = force_band - 1; 470 + 471 + state->bandselectarg = cx24123_bandselect_vals[band].progdata; 472 + vco_div = cx24123_bandselect_vals[band].VCOdivider; 473 + 474 + /* determine the charge pump current */ 475 + if ( p->frequency < (cx24123_bandselect_vals[band].freq_low + cx24123_bandselect_vals[band].freq_high)/2 ) 476 + pump = 0x01; 477 + else 478 + pump = 0x02; 563 479 564 480 /* Determine the N/A dividers for the requested lband freq (in kHz). */ 565 - /* Note: 10111 (kHz) is the Crystal Freq and divider of 10. */ 566 - ndiv = ( ((p->frequency * vco_div) / (10111 / 10) / 2) / 32) & 0x1ff; 567 - adiv = ( ((p->frequency * vco_div) / (10111 / 10) / 2) % 32) & 0x1f; 481 + /* Note: the reference divider R=10, frequency is in KHz, XTAL is in Hz */ 482 + ndiv = ( ((p->frequency * vco_div * 10) / (2 * XTAL / 1000)) / 32) & 0x1ff; 483 + adiv = ( ((p->frequency * vco_div * 10) / (2 * XTAL / 1000)) % 32) & 0x1f; 568 484 569 485 if (adiv == 0) 570 - adiv++; 486 + ndiv++; 571 487 572 - /* determine the correct pll frequency values. */ 573 - /* Command 11, refdiv 11, cpump polarity 1, cpump current 3mA 10. */ 574 - state->pllarg = (3 << 19) | (3 << 17) | (1 << 16) | (2 << 14); 575 - state->pllarg |= (ndiv << 5) | adiv; 488 + /* control bits 11, refdiv 11, charge pump polarity 1, charge pump current, ndiv, adiv */ 489 + state->pllarg = (3 << 19) | (3 << 17) | (1 << 16) | (pump << 14) | (ndiv << 5) | adiv; 576 490 577 491 return 0; 578 492 } ··· 596 488 { 597 489 struct cx24123_state *state = fe->demodulator_priv; 598 490 unsigned long timeout; 491 + 492 + dprintk("%s: pll writereg called, data=0x%08x\n",__FUNCTION__,data); 599 493 600 494 /* align the 21 bytes into to bit23 boundary */ 601 495 data = data << 3; ··· 648 538 static int cx24123_pll_tune(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) 649 539 { 650 540 struct cx24123_state *state = fe->demodulator_priv; 541 + u8 val; 542 + 543 + dprintk("frequency=%i\n", p->frequency); 651 544 652 545 if (cx24123_pll_calculate(fe, p) != 0) { 653 546 printk("%s: cx24123_pll_calcutate failed\n",__FUNCTION__); ··· 665 552 cx24123_pll_writereg(fe, p, state->bandselectarg); 666 553 cx24123_pll_writereg(fe, p, state->pllarg); 667 554 555 + /* set the FILTUNE voltage */ 556 + val = cx24123_readreg(state, 0x28) & ~0x3; 557 + cx24123_writereg(state, 0x27, state->FILTune >> 2); 558 + cx24123_writereg(state, 0x28, val | (state->FILTune & 0x3)); 559 + 560 + dprintk("%s: pll tune VCA=%d, band=%d, pll=%d\n",__FUNCTION__,state->VCAarg, 561 + state->bandselectarg,state->pllarg); 562 + 668 563 return 0; 669 564 } 670 565 ··· 680 559 { 681 560 struct cx24123_state *state = fe->demodulator_priv; 682 561 int i; 562 + 563 + dprintk("%s: init frontend\n",__FUNCTION__); 683 564 684 565 /* Configure the demod to a good set of defaults */ 685 566 for (i = 0; i < sizeof(cx24123_regdata) / sizeof(cx24123_regdata[0]); i++) ··· 710 587 711 588 switch (voltage) { 712 589 case SEC_VOLTAGE_13: 590 + dprintk("%s: isl6421 voltage = 13V\n",__FUNCTION__); 713 591 return cx24123_writelnbreg(state, 0x0, val & 0x32); /* V 13v */ 714 592 case SEC_VOLTAGE_18: 593 + dprintk("%s: isl6421 voltage = 18V\n",__FUNCTION__); 715 594 return cx24123_writelnbreg(state, 0x0, val | 0x04); /* H 18v */ 716 595 case SEC_VOLTAGE_OFF: 596 + dprintk("%s: isl5421 voltage off\n",__FUNCTION__); 717 597 return cx24123_writelnbreg(state, 0x0, val & 0x30); 718 598 default: 719 599 return -EINVAL; ··· 750 624 return 0; 751 625 } 752 626 753 - static int cx24123_send_diseqc_msg(struct dvb_frontend* fe, 754 - struct dvb_diseqc_master_cmd *cmd) 627 + /* wait for diseqc queue to become ready (or timeout) */ 628 + static void cx24123_wait_for_diseqc(struct cx24123_state *state) 755 629 { 756 - /* fixme: Implement diseqc */ 757 - printk("%s: No support yet\n",__FUNCTION__); 630 + unsigned long timeout = jiffies + msecs_to_jiffies(200); 631 + while (!(cx24123_readreg(state, 0x29) & 0x40)) { 632 + if(time_after(jiffies, timeout)) { 633 + printk("%s: diseqc queue not ready, command may be lost.\n", __FUNCTION__); 634 + break; 635 + } 636 + msleep(10); 637 + } 638 + } 758 639 759 - return -ENOTSUPP; 640 + static int cx24123_send_diseqc_msg(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd) 641 + { 642 + struct cx24123_state *state = fe->demodulator_priv; 643 + int i, val; 644 + 645 + dprintk("%s:\n",__FUNCTION__); 646 + 647 + /* check if continuous tone has been stopped */ 648 + if (state->config->use_isl6421) 649 + val = cx24123_readlnbreg(state, 0x00) & 0x10; 650 + else 651 + val = cx24123_readreg(state, 0x29) & 0x10; 652 + 653 + 654 + if (val) { 655 + printk("%s: ERROR: attempt to send diseqc command before tone is off\n", __FUNCTION__); 656 + return -ENOTSUPP; 657 + } 658 + 659 + /* wait for diseqc queue ready */ 660 + cx24123_wait_for_diseqc(state); 661 + 662 + /* select tone mode */ 663 + cx24123_writereg(state, 0x2a, cx24123_readreg(state, 0x2a) & 0xf8); 664 + 665 + for (i = 0; i < cmd->msg_len; i++) 666 + cx24123_writereg(state, 0x2C + i, cmd->msg[i]); 667 + 668 + val = cx24123_readreg(state, 0x29); 669 + cx24123_writereg(state, 0x29, ((val & 0x90) | 0x40) | ((cmd->msg_len-3) & 3)); 670 + 671 + /* wait for diseqc message to finish sending */ 672 + cx24123_wait_for_diseqc(state); 673 + 674 + return 0; 675 + } 676 + 677 + static int cx24123_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t burst) 678 + { 679 + struct cx24123_state *state = fe->demodulator_priv; 680 + int val; 681 + 682 + dprintk("%s:\n", __FUNCTION__); 683 + 684 + /* check if continuous tone has been stoped */ 685 + if (state->config->use_isl6421) 686 + val = cx24123_readlnbreg(state, 0x00) & 0x10; 687 + else 688 + val = cx24123_readreg(state, 0x29) & 0x10; 689 + 690 + 691 + if (val) { 692 + printk("%s: ERROR: attempt to send diseqc command before tone is off\n", __FUNCTION__); 693 + return -ENOTSUPP; 694 + } 695 + 696 + cx24123_wait_for_diseqc(state); 697 + 698 + /* select tone mode */ 699 + val = cx24123_readreg(state, 0x2a) & 0xf8; 700 + cx24123_writereg(state, 0x2a, val | 0x04); 701 + 702 + val = cx24123_readreg(state, 0x29); 703 + 704 + if (burst == SEC_MINI_A) 705 + cx24123_writereg(state, 0x29, ((val & 0x90) | 0x40 | 0x00)); 706 + else if (burst == SEC_MINI_B) 707 + cx24123_writereg(state, 0x29, ((val & 0x90) | 0x40 | 0x08)); 708 + else 709 + return -EINVAL; 710 + 711 + cx24123_wait_for_diseqc(state); 712 + 713 + return 0; 760 714 } 761 715 762 716 static int cx24123_read_status(struct dvb_frontend* fe, fe_status_t* status) ··· 848 642 849 643 *status = 0; 850 644 if (lock & 0x01) 851 - *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL; 645 + *status |= FE_HAS_SIGNAL; 646 + if (sync & 0x02) 647 + *status |= FE_HAS_CARRIER; 852 648 if (sync & 0x04) 853 649 *status |= FE_HAS_VITERBI; 854 650 if (sync & 0x08) 855 - *status |= FE_HAS_CARRIER; 651 + *status |= FE_HAS_SYNC; 856 652 if (sync & 0x80) 857 - *status |= FE_HAS_SYNC | FE_HAS_LOCK; 653 + *status |= FE_HAS_LOCK; 858 654 859 655 return 0; 860 656 } ··· 889 681 else 890 682 state->snr = 0; 891 683 684 + dprintk("%s: BER = %d, S/N index = %d\n",__FUNCTION__,state->lastber, state->snr); 685 + 892 686 *ber = state->lastber; 893 687 894 688 return 0; ··· 901 691 struct cx24123_state *state = fe->demodulator_priv; 902 692 *signal_strength = cx24123_readreg(state, 0x3b) << 8; /* larger = better */ 903 693 694 + dprintk("%s: Signal strength = %d\n",__FUNCTION__,*signal_strength); 695 + 904 696 return 0; 905 697 } 906 698 ··· 910 698 { 911 699 struct cx24123_state *state = fe->demodulator_priv; 912 700 *snr = state->snr; 701 + 702 + dprintk("%s: read S/N index = %d\n",__FUNCTION__,*snr); 913 703 914 704 return 0; 915 705 } ··· 921 707 struct cx24123_state *state = fe->demodulator_priv; 922 708 *ucblocks = state->lastber; 923 709 710 + dprintk("%s: ucblocks (ber) = %d\n",__FUNCTION__,*ucblocks); 711 + 924 712 return 0; 925 713 } 926 714 927 715 static int cx24123_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) 928 716 { 929 717 struct cx24123_state *state = fe->demodulator_priv; 718 + 719 + dprintk("%s: set_frontend\n",__FUNCTION__); 930 720 931 721 if (state->config->set_ts_params) 932 722 state->config->set_ts_params(fe, 0); ··· 954 736 static int cx24123_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) 955 737 { 956 738 struct cx24123_state *state = fe->demodulator_priv; 739 + 740 + dprintk("%s: get_frontend\n",__FUNCTION__); 957 741 958 742 if (cx24123_get_inversion(state, &p->inversion) != 0) { 959 743 printk("%s: Failed to get inversion status\n",__FUNCTION__); ··· 983 763 984 764 switch (tone) { 985 765 case SEC_TONE_ON: 766 + dprintk("%s: isl6421 sec tone on\n",__FUNCTION__); 986 767 return cx24123_writelnbreg(state, 0x0, val | 0x10); 987 768 case SEC_TONE_OFF: 769 + dprintk("%s: isl6421 sec tone off\n",__FUNCTION__); 988 770 return cx24123_writelnbreg(state, 0x0, val & 0x2f); 989 771 default: 990 772 printk("%s: CASE reached default with tone=%d\n", __FUNCTION__, tone); ··· 1077 855 .frequency_min = 950000, 1078 856 .frequency_max = 2150000, 1079 857 .frequency_stepsize = 1011, /* kHz for QPSK frontends */ 1080 - .frequency_tolerance = 29500, 858 + .frequency_tolerance = 5000, 1081 859 .symbol_rate_min = 1000000, 1082 860 .symbol_rate_max = 45000000, 1083 861 .caps = FE_CAN_INVERSION_AUTO | 1084 862 FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | 1085 - FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | 863 + FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | 864 + FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | 1086 865 FE_CAN_QPSK | FE_CAN_RECOVER 1087 866 }, 1088 867 ··· 1098 875 .read_snr = cx24123_read_snr, 1099 876 .read_ucblocks = cx24123_read_ucblocks, 1100 877 .diseqc_send_master_cmd = cx24123_send_diseqc_msg, 878 + .diseqc_send_burst = cx24123_diseqc_send_burst, 1101 879 .set_tone = cx24123_set_tone, 1102 880 .set_voltage = cx24123_set_voltage, 1103 881 }; 1104 882 1105 883 module_param(debug, int, 0644); 1106 - MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); 884 + MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); 885 + 886 + module_param(force_band, int, 0644); 887 + MODULE_PARM_DESC(force_band, "Force a specific band select (1-9, default:off)."); 1107 888 1108 889 MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24123/cx24109 hardware"); 1109 890 MODULE_AUTHOR("Steven Toth");
+2 -2
drivers/media/dvb/frontends/dvb-pll.c
··· 235 235 .max = 863000000, 236 236 .count = 3, 237 237 .entries = { 238 - { 160000000, 44000000, 62500, 0xce, 0x01 }, 239 - { 455000000, 44000000, 62500, 0xce, 0x02 }, 238 + { 165000000, 44000000, 62500, 0xce, 0x01 }, 239 + { 450000000, 44000000, 62500, 0xce, 0x02 }, 240 240 { 999999999, 44000000, 62500, 0xce, 0x04 }, 241 241 }, 242 242 };
+1 -2
drivers/media/dvb/pluto2/Kconfig
··· 1 1 config DVB_PLUTO2 2 2 tristate "Pluto2 cards" 3 - depends on DVB_CORE && PCI 4 - select I2C 3 + depends on DVB_CORE && PCI && I2C 5 4 select I2C_ALGOBIT 6 5 select DVB_TDA1004X 7 6 help
+1 -1
drivers/media/dvb/pluto2/Makefile
··· 1 - obj-$(CONFIG_DVB_PLUTO2) = pluto2.o 1 + obj-$(CONFIG_DVB_PLUTO2) += pluto2.o 2 2 3 3 EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+5 -7
drivers/media/dvb/ttpci/Kconfig
··· 1 1 config DVB_AV7110 2 2 tristate "AV7110 cards" 3 - depends on DVB_CORE && PCI 3 + depends on DVB_CORE && PCI && I2C && VIDEO_V4L1 4 4 select FW_LOADER 5 - select VIDEO_DEV 6 5 select VIDEO_SAA7146_VV 7 6 select DVB_VES1820 8 7 select DVB_VES1X93 ··· 58 59 59 60 config DVB_BUDGET 60 61 tristate "Budget cards" 61 - depends on DVB_CORE && PCI 62 + depends on DVB_CORE && PCI && I2C && VIDEO_V4L1 62 63 select VIDEO_SAA7146 63 64 select DVB_STV0299 64 65 select DVB_VES1X93 ··· 79 80 80 81 config DVB_BUDGET_CI 81 82 tristate "Budget cards with onboard CI connector" 82 - depends on DVB_CORE && PCI 83 + depends on DVB_CORE && PCI && I2C && VIDEO_V4L1 83 84 select VIDEO_SAA7146 84 85 select DVB_STV0297 85 86 select DVB_STV0299 ··· 99 100 100 101 config DVB_BUDGET_AV 101 102 tristate "Budget cards with analog video inputs" 102 - depends on DVB_CORE && PCI 103 - select VIDEO_DEV 103 + depends on DVB_CORE && PCI && I2C && VIDEO_V4L1 104 104 select VIDEO_SAA7146_VV 105 105 select DVB_STV0299 106 106 select DVB_TDA1004X ··· 117 119 118 120 config DVB_BUDGET_PATCH 119 121 tristate "AV7110 cards with Budget Patch" 120 - depends on DVB_CORE && DVB_BUDGET 122 + depends on DVB_CORE && DVB_BUDGET && VIDEO_V4L1 121 123 select DVB_AV7110 122 124 select DVB_STV0299 123 125 select DVB_VES1X93
+4 -2
drivers/media/dvb/ttpci/budget-av.c
··· 1190 1190 SAA7146_HPS_SYNC_PORT_A); 1191 1191 1192 1192 saa7113_setinput(budget_av, 0); 1193 - } else { 1194 - ciintf_init(budget_av); 1195 1193 } 1196 1194 1197 1195 /* fixme: find some sane values here... */ ··· 1208 1210 1209 1211 budget_av->budget.dvb_adapter.priv = budget_av; 1210 1212 frontend_init(budget_av); 1213 + 1214 + if (!budget_av->has_saa7113) { 1215 + ciintf_init(budget_av); 1216 + } 1211 1217 1212 1218 return 0; 1213 1219 }
+85 -20
drivers/media/dvb/ttpci/budget-ci.c
··· 71 71 struct tasklet_struct msp430_irq_tasklet; 72 72 struct tasklet_struct ciintf_irq_tasklet; 73 73 int slot_status; 74 + int ci_irq; 74 75 struct dvb_ca_en50221 ca; 75 76 char ir_dev_name[50]; 76 77 u8 tuner_pll_address; /* used for philips_tdm1316l configs */ ··· 277 276 if (slot != 0) 278 277 return -EINVAL; 279 278 280 - // trigger on RISING edge during reset so we know when READY is re-asserted 281 - saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI); 279 + if (budget_ci->ci_irq) { 280 + // trigger on RISING edge during reset so we know when READY is re-asserted 281 + saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI); 282 + } 282 283 budget_ci->slot_status = SLOTSTATUS_RESET; 283 284 ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 0, 1, 0); 284 285 msleep(1); ··· 373 370 } 374 371 } 375 372 373 + static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) 374 + { 375 + struct budget_ci *budget_ci = (struct budget_ci *) ca->data; 376 + unsigned int flags; 377 + 378 + // ensure we don't get spurious IRQs during initialisation 379 + if (!budget_ci->budget.ci_present) 380 + return -EINVAL; 381 + 382 + // read the CAM status 383 + flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0); 384 + if (flags & CICONTROL_CAMDETECT) { 385 + // mark it as present if it wasn't before 386 + if (budget_ci->slot_status & SLOTSTATUS_NONE) { 387 + budget_ci->slot_status = SLOTSTATUS_PRESENT; 388 + } 389 + 390 + // during a RESET, we check if we can read from IO memory to see when CAM is ready 391 + if (budget_ci->slot_status & SLOTSTATUS_RESET) { 392 + if (ciintf_read_attribute_mem(ca, slot, 0) == 0x1d) { 393 + budget_ci->slot_status = SLOTSTATUS_READY; 394 + } 395 + } 396 + } else { 397 + budget_ci->slot_status = SLOTSTATUS_NONE; 398 + } 399 + 400 + if (budget_ci->slot_status != SLOTSTATUS_NONE) { 401 + if (budget_ci->slot_status & SLOTSTATUS_READY) { 402 + return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; 403 + } 404 + return DVB_CA_EN50221_POLL_CAM_PRESENT; 405 + } 406 + 407 + return 0; 408 + } 409 + 376 410 static int ciintf_init(struct budget_ci *budget_ci) 377 411 { 378 412 struct saa7146_dev *saa = budget_ci->budget.dev; 379 413 int flags; 380 414 int result; 415 + int ci_version; 416 + int ca_flags; 381 417 382 418 memset(&budget_ci->ca, 0, sizeof(struct dvb_ca_en50221)); 383 419 ··· 424 382 saa7146_write(saa, MC1, saa7146_read(saa, MC1) | (0x800 << 16) | 0x800); 425 383 426 384 // test if it is there 427 - if ((ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CIVERSION, 1, 1, 0) & 0xa0) != 0xa0) { 385 + ci_version = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CIVERSION, 1, 1, 0); 386 + if ((ci_version & 0xa0) != 0xa0) { 428 387 result = -ENODEV; 429 388 goto error; 430 389 } 390 + 431 391 // determine whether a CAM is present or not 432 392 flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0); 433 393 budget_ci->slot_status = SLOTSTATUS_NONE; 434 394 if (flags & CICONTROL_CAMDETECT) 435 395 budget_ci->slot_status = SLOTSTATUS_PRESENT; 396 + 397 + // version 0xa2 of the CI firmware doesn't generate interrupts 398 + if (ci_version == 0xa2) { 399 + ca_flags = 0; 400 + budget_ci->ci_irq = 0; 401 + } else { 402 + ca_flags = DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE | 403 + DVB_CA_EN50221_FLAG_IRQ_FR | 404 + DVB_CA_EN50221_FLAG_IRQ_DA; 405 + budget_ci->ci_irq = 1; 406 + } 436 407 437 408 // register CI interface 438 409 budget_ci->ca.owner = THIS_MODULE; ··· 456 401 budget_ci->ca.slot_reset = ciintf_slot_reset; 457 402 budget_ci->ca.slot_shutdown = ciintf_slot_shutdown; 458 403 budget_ci->ca.slot_ts_enable = ciintf_slot_ts_enable; 404 + budget_ci->ca.poll_slot_status = ciintf_poll_slot_status; 459 405 budget_ci->ca.data = budget_ci; 460 406 if ((result = dvb_ca_en50221_init(&budget_ci->budget.dvb_adapter, 461 407 &budget_ci->ca, 462 - DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE | 463 - DVB_CA_EN50221_FLAG_IRQ_FR | 464 - DVB_CA_EN50221_FLAG_IRQ_DA, 1)) != 0) { 408 + ca_flags, 1)) != 0) { 465 409 printk("budget_ci: CI interface detected, but initialisation failed.\n"); 466 410 goto error; 467 411 } 412 + 468 413 // Setup CI slot IRQ 469 - tasklet_init(&budget_ci->ciintf_irq_tasklet, ciintf_interrupt, (unsigned long) budget_ci); 470 - if (budget_ci->slot_status != SLOTSTATUS_NONE) { 471 - saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQLO); 472 - } else { 473 - saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI); 414 + if (budget_ci->ci_irq) { 415 + tasklet_init(&budget_ci->ciintf_irq_tasklet, ciintf_interrupt, (unsigned long) budget_ci); 416 + if (budget_ci->slot_status != SLOTSTATUS_NONE) { 417 + saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQLO); 418 + } else { 419 + saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI); 420 + } 421 + saa7146_write(saa, IER, saa7146_read(saa, IER) | MASK_03); 474 422 } 475 - saa7146_write(saa, IER, saa7146_read(saa, IER) | MASK_03); 423 + 424 + // enable interface 476 425 ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 477 426 CICONTROL_RESET, 1, 0); 478 427 ··· 485 426 budget_ci->budget.ci_present = 1; 486 427 487 428 // forge a fake CI IRQ so the CAM state is setup correctly 488 - flags = DVB_CA_EN50221_CAMCHANGE_REMOVED; 489 - if (budget_ci->slot_status != SLOTSTATUS_NONE) 490 - flags = DVB_CA_EN50221_CAMCHANGE_INSERTED; 491 - dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0, flags); 429 + if (budget_ci->ci_irq) { 430 + flags = DVB_CA_EN50221_CAMCHANGE_REMOVED; 431 + if (budget_ci->slot_status != SLOTSTATUS_NONE) 432 + flags = DVB_CA_EN50221_CAMCHANGE_INSERTED; 433 + dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0, flags); 434 + } 492 435 493 436 return 0; 494 437 ··· 504 443 struct saa7146_dev *saa = budget_ci->budget.dev; 505 444 506 445 // disable CI interrupts 507 - saa7146_write(saa, IER, saa7146_read(saa, IER) & ~MASK_03); 508 - saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT); 509 - tasklet_kill(&budget_ci->ciintf_irq_tasklet); 446 + if (budget_ci->ci_irq) { 447 + saa7146_write(saa, IER, saa7146_read(saa, IER) & ~MASK_03); 448 + saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT); 449 + tasklet_kill(&budget_ci->ciintf_irq_tasklet); 450 + } 451 + 452 + // reset interface 510 453 ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 0, 1, 0); 511 454 msleep(1); 512 455 ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, ··· 538 473 if (*isr & MASK_10) 539 474 ttpci_budget_irq10_handler(dev, isr); 540 475 541 - if ((*isr & MASK_03) && (budget_ci->budget.ci_present)) 476 + if ((*isr & MASK_03) && (budget_ci->budget.ci_present) && (budget_ci->ci_irq)) 542 477 tasklet_schedule(&budget_ci->ciintf_irq_tasklet); 543 478 } 544 479
+5 -1
drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
··· 1507 1507 1508 1508 mutex_unlock(&ttusb->semi2c); 1509 1509 1510 - dvb_register_adapter(&ttusb->adapter, "Technotrend/Hauppauge Nova-USB", THIS_MODULE); 1510 + if ((result = dvb_register_adapter(&ttusb->adapter, "Technotrend/Hauppauge Nova-USB", THIS_MODULE)) < 0) { 1511 + ttusb_free_iso_urbs(ttusb); 1512 + kfree(ttusb); 1513 + return result; 1514 + } 1511 1515 ttusb->adapter.priv = ttusb; 1512 1516 1513 1517 /* i2c */
+15 -15
drivers/media/radio/Kconfig
··· 7 7 8 8 config RADIO_CADET 9 9 tristate "ADS Cadet AM/FM Tuner" 10 - depends on ISA && VIDEO_DEV 10 + depends on ISA && VIDEO_V4L1 11 11 ---help--- 12 12 Choose Y here if you have one of these AM/FM radio cards, and then 13 13 fill in the port address below. ··· 25 25 26 26 config RADIO_RTRACK 27 27 tristate "AIMSlab RadioTrack (aka RadioReveal) support" 28 - depends on ISA && VIDEO_DEV 28 + depends on ISA && VIDEO_V4L1 29 29 ---help--- 30 30 Choose Y here if you have one of these FM radio cards, and then fill 31 31 in the port address below. ··· 59 59 60 60 config RADIO_RTRACK2 61 61 tristate "AIMSlab RadioTrack II support" 62 - depends on ISA && VIDEO_DEV 62 + depends on ISA && VIDEO_V4L1 63 63 ---help--- 64 64 Choose Y here if you have this FM radio card, and then fill in the 65 65 port address below. ··· 82 82 83 83 config RADIO_AZTECH 84 84 tristate "Aztech/Packard Bell Radio" 85 - depends on ISA && VIDEO_DEV 85 + depends on ISA && VIDEO_V4L1 86 86 ---help--- 87 87 Choose Y here if you have one of these FM radio cards, and then fill 88 88 in the port address below. ··· 106 106 107 107 config RADIO_GEMTEK 108 108 tristate "GemTek Radio Card support" 109 - depends on ISA && VIDEO_DEV 109 + depends on ISA && VIDEO_V4L1 110 110 ---help--- 111 111 Choose Y here if you have this FM radio card, and then fill in the 112 112 port address below. ··· 131 131 132 132 config RADIO_GEMTEK_PCI 133 133 tristate "GemTek PCI Radio Card support" 134 - depends on VIDEO_DEV && PCI 134 + depends on VIDEO_V4L1 && PCI 135 135 ---help--- 136 136 Choose Y here if you have this PCI FM radio card. 137 137 ··· 145 145 146 146 config RADIO_MAXIRADIO 147 147 tristate "Guillemot MAXI Radio FM 2000 radio" 148 - depends on VIDEO_DEV && PCI 148 + depends on VIDEO_V4L1 && PCI 149 149 ---help--- 150 150 Choose Y here if you have this radio card. This card may also be 151 151 found as Gemtek PCI FM. ··· 160 160 161 161 config RADIO_MAESTRO 162 162 tristate "Maestro on board radio" 163 - depends on VIDEO_DEV 163 + depends on VIDEO_V4L1 164 164 ---help--- 165 165 Say Y here to directly support the on-board radio tuner on the 166 166 Maestro 2 or 2E sound card. ··· 175 175 176 176 config RADIO_MIROPCM20 177 177 tristate "miroSOUND PCM20 radio" 178 - depends on ISA && VIDEO_DEV && SOUND_ACI_MIXER 178 + depends on ISA && VIDEO_V4L1 && SOUND_ACI_MIXER 179 179 ---help--- 180 180 Choose Y here if you have this FM radio card. You also need to say Y 181 181 to "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20 radio)" (in "Sound") ··· 208 208 209 209 config RADIO_SF16FMI 210 210 tristate "SF16FMI Radio" 211 - depends on ISA && VIDEO_DEV 211 + depends on ISA && VIDEO_V4L1 212 212 ---help--- 213 213 Choose Y here if you have one of these FM radio cards. If you 214 214 compile the driver into the kernel and your card is not PnP one, you ··· 225 225 226 226 config RADIO_SF16FMR2 227 227 tristate "SF16FMR2 Radio" 228 - depends on ISA && VIDEO_DEV 228 + depends on ISA && VIDEO_V4L1 229 229 ---help--- 230 230 Choose Y here if you have one of these FM radio cards. 231 231 ··· 239 239 240 240 config RADIO_TERRATEC 241 241 tristate "TerraTec ActiveRadio ISA Standalone" 242 - depends on ISA && VIDEO_DEV 242 + depends on ISA && VIDEO_V4L1 243 243 ---help--- 244 244 Choose Y here if you have this FM radio card, and then fill in the 245 245 port address below. (TODO) ··· 268 268 269 269 config RADIO_TRUST 270 270 tristate "Trust FM radio card" 271 - depends on ISA && VIDEO_DEV 271 + depends on ISA && VIDEO_V4L1 272 272 help 273 273 This is a driver for the Trust FM radio cards. Say Y if you have 274 274 such a card and want to use it under Linux. ··· 286 286 287 287 config RADIO_TYPHOON 288 288 tristate "Typhoon Radio (a.k.a. EcoRadio)" 289 - depends on ISA && VIDEO_DEV 289 + depends on ISA && VIDEO_V4L1 290 290 ---help--- 291 291 Choose Y here if you have one of these FM radio cards, and then fill 292 292 in the port address and the frequency used for muting below. ··· 330 330 331 331 config RADIO_ZOLTRIX 332 332 tristate "Zoltrix Radio" 333 - depends on ISA && VIDEO_DEV 333 + depends on ISA && VIDEO_V4L1 334 334 ---help--- 335 335 Choose Y here if you have one of these FM radio cards, and then fill 336 336 in the port address below.
+47 -32
drivers/media/video/Kconfig
··· 2 2 # Multimedia Video device configuration 3 3 # 4 4 5 - menu "Video For Linux" 5 + menu "Video Capture Adapters" 6 6 depends on VIDEO_DEV 7 7 8 - comment "Video Adapters" 8 + comment "Video Capture Adapters" 9 9 10 10 config VIDEO_ADV_DEBUG 11 11 bool "Enable advanced debug functionality" ··· 16 16 V4L devices. 17 17 In doubt, say N. 18 18 19 + config VIDEO_VIVI 20 + tristate "Virtual Video Driver" 21 + depends on VIDEO_V4L2 && !SPARC32 && !SPARC64 22 + select VIDEO_BUF 23 + default n 24 + ---help--- 25 + Enables a virtual video driver. This device shows a color bar 26 + and a timestamp, as a real device would generate by using V4L2 27 + api. 28 + Say Y here if you want to test video apps or debug V4L devices. 29 + In doubt, say N. 30 + 19 31 source "drivers/media/video/bt8xx/Kconfig" 20 32 21 33 config VIDEO_SAA6588 22 34 tristate "SAA6588 Radio Chip RDS decoder support on BT848 cards" 23 - depends on VIDEO_DEV && I2C && VIDEO_BT848 35 + depends on I2C && VIDEO_BT848 24 36 25 37 help 26 38 Support for Radio Data System (RDS) decoder. This allows seeing ··· 44 32 45 33 config VIDEO_PMS 46 34 tristate "Mediavision Pro Movie Studio Video For Linux" 47 - depends on VIDEO_DEV && ISA 35 + depends on ISA && VIDEO_V4L1 48 36 help 49 37 Say Y if you have such a thing. 50 38 ··· 53 41 54 42 config VIDEO_PLANB 55 43 tristate "PlanB Video-In on PowerMac" 56 - depends on PPC_PMAC && VIDEO_DEV && BROKEN 44 + depends on PPC_PMAC && VIDEO_V4L1 && BROKEN 57 45 help 58 46 PlanB is the V4L driver for the PowerMac 7x00/8x00 series video 59 47 input hardware. If you want to experiment with this, say Y. ··· 64 52 65 53 config VIDEO_BWQCAM 66 54 tristate "Quickcam BW Video For Linux" 67 - depends on VIDEO_DEV && PARPORT 55 + depends on PARPORT && VIDEO_V4L1 68 56 help 69 57 Say Y have if you the black and white version of the QuickCam 70 58 camera. See the next option for the color version. ··· 74 62 75 63 config VIDEO_CQCAM 76 64 tristate "QuickCam Colour Video For Linux (EXPERIMENTAL)" 77 - depends on EXPERIMENTAL && VIDEO_DEV && PARPORT 65 + depends on EXPERIMENTAL && PARPORT && VIDEO_V4L1 78 66 help 79 67 This is the video4linux driver for the colour version of the 80 68 Connectix QuickCam. If you have one of these cameras, say Y here, ··· 85 73 86 74 config VIDEO_W9966 87 75 tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux" 88 - depends on PARPORT_1284 && VIDEO_DEV && PARPORT 76 + depends on PARPORT_1284 && PARPORT && VIDEO_V4L1 89 77 help 90 78 Video4linux driver for Winbond's w9966 based Webcams. 91 79 Currently tested with the LifeView FlyCam Supra. ··· 98 86 99 87 config VIDEO_CPIA 100 88 tristate "CPiA Video For Linux" 101 - depends on VIDEO_DEV 89 + depends on VIDEO_V4L1 102 90 ---help--- 103 91 This is the video4linux driver for cameras based on Vision's CPiA 104 92 (Colour Processor Interface ASIC), such as the Creative Labs Video ··· 135 123 136 124 config VIDEO_SAA5246A 137 125 tristate "SAA5246A, SAA5281 Teletext processor" 138 - depends on VIDEO_DEV && I2C 126 + depends on I2C && VIDEO_V4L1 139 127 help 140 128 Support for I2C bus based teletext using the SAA5246A or SAA5281 141 129 chip. Useful only if you live in Europe. ··· 162 150 163 151 config VIDEO_VINO 164 152 tristate "SGI Vino Video For Linux (EXPERIMENTAL)" 165 - depends on VIDEO_DEV && I2C && SGI_IP22 && EXPERIMENTAL 153 + depends on I2C && SGI_IP22 && EXPERIMENTAL && VIDEO_V4L1 166 154 select I2C_ALGO_SGI 167 155 help 168 156 Say Y here to build in support for the Vino video input system found ··· 170 158 171 159 config VIDEO_STRADIS 172 160 tristate "Stradis 4:2:2 MPEG-2 video driver (EXPERIMENTAL)" 173 - depends on EXPERIMENTAL && VIDEO_DEV && PCI 161 + depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && !PPC64 174 162 help 175 163 Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video 176 164 driver for PCI. There is a product page at ··· 178 166 179 167 config VIDEO_ZORAN 180 168 tristate "Zoran ZR36057/36067 Video For Linux" 181 - depends on VIDEO_DEV && PCI && I2C_ALGOBIT 169 + depends on PCI && I2C_ALGOBIT && VIDEO_V4L1 && !PPC64 182 170 help 183 171 Say Y for support for MJPEG capture cards based on the Zoran 184 172 36057/36067 PCI controller chipset. This includes the Iomega ··· 226 214 227 215 config VIDEO_ZR36120 228 216 tristate "Zoran ZR36120/36125 Video For Linux" 229 - depends on VIDEO_DEV && PCI && I2C && BROKEN 217 + depends on PCI && I2C && VIDEO_V4L1 && BROKEN 230 218 help 231 219 Support for ZR36120/ZR36125 based frame grabber/overlay boards. 232 220 This includes the Victor II, WaveWatcher, Video Wonder, Maxi-TV, ··· 238 226 239 227 config VIDEO_MEYE 240 228 tristate "Sony Vaio Picturebook Motion Eye Video For Linux" 241 - depends on VIDEO_DEV && PCI && SONYPI 229 + depends on PCI && SONYPI && VIDEO_V4L1 242 230 ---help--- 243 231 This is the video4linux driver for the Motion Eye camera found 244 232 in the Vaio Picturebook laptops. Please read the material in ··· 254 242 255 243 config VIDEO_MXB 256 244 tristate "Siemens-Nixdorf 'Multimedia eXtension Board'" 257 - depends on VIDEO_DEV && PCI 245 + depends on PCI && VIDEO_V4L1 258 246 select VIDEO_SAA7146_VV 259 247 select VIDEO_TUNER 260 248 ---help--- ··· 266 254 267 255 config VIDEO_DPC 268 256 tristate "Philips-Semiconductors 'dpc7146 demonstration board'" 269 - depends on VIDEO_DEV && PCI 257 + depends on PCI && VIDEO_V4L1 270 258 select VIDEO_SAA7146_VV 259 + select VIDEO_V4L2 271 260 ---help--- 272 261 This is a video4linux driver for the 'dpc7146 demonstration 273 262 board' by Philips-Semiconductors. It's the reference design ··· 281 268 282 269 config VIDEO_HEXIUM_ORION 283 270 tristate "Hexium HV-PCI6 and Orion frame grabber" 284 - depends on VIDEO_DEV && PCI 271 + depends on PCI && VIDEO_V4L1 285 272 select VIDEO_SAA7146_VV 273 + select VIDEO_V4L2 286 274 ---help--- 287 275 This is a video4linux driver for the Hexium HV-PCI6 and 288 276 Orion frame grabber cards by Hexium. ··· 293 279 294 280 config VIDEO_HEXIUM_GEMINI 295 281 tristate "Hexium Gemini frame grabber" 296 - depends on VIDEO_DEV && PCI 282 + depends on PCI && VIDEO_V4L1 297 283 select VIDEO_SAA7146_VV 284 + select VIDEO_V4L2 298 285 ---help--- 299 286 This is a video4linux driver for the Hexium Gemini frame 300 287 grabber card by Hexium. Please note that the Gemini Dual ··· 308 293 309 294 config VIDEO_OVCAMCHIP 310 295 tristate "OmniVision Camera Chip support" 311 - depends on VIDEO_DEV && I2C 296 + depends on I2C && VIDEO_V4L1 312 297 ---help--- 313 298 Support for the OmniVision OV6xxx and OV7xxx series of camera chips. 314 299 This driver is intended to be used with the ov511 and w9968cf USB ··· 319 304 320 305 config VIDEO_M32R_AR 321 306 tristate "AR devices" 322 - depends on M32R 307 + depends on M32R && VIDEO_V4L1 323 308 ---help--- 324 309 This is a video4linux driver for the Renesas AR (Artificial Retina) 325 310 camera module. ··· 380 365 source "drivers/media/video/cx25840/Kconfig" 381 366 382 367 config VIDEO_SAA711X 383 - tristate "Philips SAA7113/4/5 video decoders" 384 - depends on VIDEO_DEV && I2C && EXPERIMENTAL 368 + tristate "Philips SAA7113/4/5 video decoders (OBSOLETED)" 369 + depends on VIDEO_V4L1 && I2C && EXPERIMENTAL 385 370 ---help--- 386 - Support for the Philips SAA7113/4/5 video decoders. 371 + Old support for the Philips SAA7113/4 video decoders. 387 372 388 373 To compile this driver as a module, choose M here: the 389 374 module will be called saa7115. 390 375 391 376 config VIDEO_SAA7127 392 377 tristate "Philips SAA7127/9 digital video encoders" 393 - depends on VIDEO_DEV && I2C && EXPERIMENTAL 378 + depends on VIDEO_V4L2 && I2C && EXPERIMENTAL 394 379 ---help--- 395 380 Support for the Philips SAA7127/9 digital video encoders. 396 381 ··· 399 384 400 385 config VIDEO_UPD64031A 401 386 tristate "NEC Electronics uPD64031A Ghost Reduction" 402 - depends on VIDEO_DEV && I2C && EXPERIMENTAL 387 + depends on VIDEO_V4L2 && I2C && EXPERIMENTAL 403 388 ---help--- 404 389 Support for the NEC Electronics uPD64031A Ghost Reduction 405 390 video chip. It is most often found in NTSC TV cards made for ··· 411 396 412 397 config VIDEO_UPD64083 413 398 tristate "NEC Electronics uPD64083 3-Dimensional Y/C separation" 414 - depends on VIDEO_DEV && I2C && EXPERIMENTAL 399 + depends on VIDEO_V4L2 && I2C && EXPERIMENTAL 415 400 ---help--- 416 401 Support for the NEC Electronics uPD64083 3-Dimensional Y/C 417 402 separation video chip. It is used to improve the quality of ··· 433 418 434 419 config USB_DSBR 435 420 tristate "D-Link USB FM radio support (EXPERIMENTAL)" 436 - depends on USB && VIDEO_DEV && EXPERIMENTAL 421 + depends on USB && VIDEO_V4L1 && EXPERIMENTAL 437 422 ---help--- 438 423 Say Y here if you want to connect this type of radio to your 439 424 computer's USB port. Note that the audio is not digital, and ··· 449 434 450 435 config USB_OV511 451 436 tristate "USB OV511 Camera support" 452 - depends on USB && VIDEO_DEV 437 + depends on USB && VIDEO_V4L1 453 438 ---help--- 454 439 Say Y here if you want to connect this type of camera to your 455 440 computer's USB port. See <file:Documentation/video4linux/ov511.txt> ··· 460 445 461 446 config USB_SE401 462 447 tristate "USB SE401 Camera support" 463 - depends on USB && VIDEO_DEV 448 + depends on USB && VIDEO_V4L1 464 449 ---help--- 465 450 Say Y here if you want to connect this type of camera to your 466 451 computer's USB port. See <file:Documentation/video4linux/se401.txt> ··· 473 458 474 459 config USB_STV680 475 460 tristate "USB STV680 (Pencam) Camera support" 476 - depends on USB && VIDEO_DEV 461 + depends on USB && VIDEO_V4L1 477 462 ---help--- 478 463 Say Y here if you want to connect this type of camera to your 479 464 computer's USB port. This includes the Pencam line of cameras. ··· 485 470 486 471 config USB_W9968CF 487 472 tristate "USB W996[87]CF JPEG Dual Mode Camera support" 488 - depends on USB && VIDEO_DEV && I2C 473 + depends on USB && VIDEO_V4L1 && I2C 489 474 select VIDEO_OVCAMCHIP 490 475 ---help--- 491 476 Say Y here if you want support for cameras based on OV681 or
+9 -1
drivers/media/video/Makefile
··· 10 10 11 11 msp3400-objs := msp3400-driver.o msp3400-kthreads.o 12 12 13 - obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o compat_ioctl32.o 13 + obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o compat_ioctl32.o 14 + 15 + ifeq ($(CONFIG_VIDEO_V4L1_COMPAT),y) 16 + obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o 17 + endif 14 18 15 19 obj-$(CONFIG_VIDEO_BT848) += bt8xx/ 16 20 obj-$(CONFIG_VIDEO_BT848) += tvaudio.o tda7432.o tda9875.o ir-kbd-i2c.o ··· 88 84 obj-$(CONFIG_USB_KONICAWC) += usbvideo/ 89 85 obj-$(CONFIG_USB_VICAM) += usbvideo/ 90 86 87 + obj-$(CONFIG_VIDEO_VIVI) += vivi.o 88 + 91 89 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 90 + extra-cflags-$(CONFIG_VIDEO_V4L1_COMPAT) += -DCONFIG_VIDEO_V4L1_COMPAT 91 +
+1 -1
drivers/media/video/bt8xx/Kconfig
··· 1 1 config VIDEO_BT848 2 2 tristate "BT848 Video For Linux" 3 - depends on VIDEO_DEV && PCI && I2C 3 + depends on VIDEO_DEV && PCI && I2C && VIDEO_V4L2 4 4 select I2C_ALGOBIT 5 5 select FW_LOADER 6 6 select VIDEO_BTCX
+1 -1
drivers/media/video/bt8xx/Makefile
··· 8 8 9 9 obj-$(CONFIG_VIDEO_BT848) += bttv.o 10 10 11 - EXTRA_CFLAGS += -I$(src)/.. 11 + EXTRA_CFLAGS += -Idrivers/media/video 12 12 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+2 -2
drivers/media/video/bt8xx/bttv-cards.c
··· 2991 2991 2992 2992 if (UNSET != audiomux[0]) { 2993 2993 gpiobits = 0; 2994 - for (i = 0; i < 5; i++) { 2994 + for (i = 0; i < 4; i++) { 2995 2995 bttv_tvcards[btv->c.type].gpiomux[i] = audiomux[i]; 2996 2996 gpiobits |= audiomux[i]; 2997 2997 } 2998 2998 } else { 2999 2999 gpiobits = audioall; 3000 - for (i = 0; i < 5; i++) { 3000 + for (i = 0; i < 4; i++) { 3001 3001 bttv_tvcards[btv->c.type].gpiomux[i] = audioall; 3002 3002 } 3003 3003 }
+6 -8
drivers/media/video/bt8xx/bttv-risc.c
··· 233 233 const struct bttv_format *fmt, struct bttv_overlay *ov, 234 234 int skip_even, int skip_odd) 235 235 { 236 - int instructions,rc,line,maxy,start,end,skip,nskips; 236 + int dwords,rc,line,maxy,start,end,skip,nskips; 237 237 struct btcx_skiplist *skips; 238 238 u32 *rp,ri,ra; 239 239 u32 addr; ··· 242 242 if (NULL == (skips = kmalloc(sizeof(*skips) * ov->nclips,GFP_KERNEL))) 243 243 return -ENOMEM; 244 244 245 - /* estimate risc mem: worst case is (clip+1) * lines instructions 245 + /* estimate risc mem: worst case is (1.5*clip+1) * lines instructions 246 246 + sync + jump (all 2 dwords) */ 247 - instructions = (ov->nclips + 1) * 248 - ((skip_even || skip_odd) ? ov->w.height>>1 : ov->w.height); 249 - instructions += 2; 250 - if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions*8)) < 0) { 247 + dwords = (3 * ov->nclips + 2) * 248 + ((skip_even || skip_odd) ? (ov->w.height+1)>>1 : ov->w.height); 249 + dwords += 4; 250 + if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,dwords*4)) < 0) { 251 251 kfree(skips); 252 252 return rc; 253 253 } ··· 276 276 if (line > maxy) 277 277 btcx_calc_skips(line, ov->w.width, &maxy, 278 278 skips, &nskips, ov->clips, ov->nclips); 279 - else 280 - nskips = 0; 281 279 282 280 /* write out risc code */ 283 281 for (start = 0, skip = 0; start < ov->w.width; start = end) {
+2 -47
drivers/media/video/cx25840/cx25840-firmware.c
··· 39 39 40 40 #define FWDEV(x) &((x)->adapter->dev) 41 41 42 - static int fastfw = 1; 43 42 static char *firmware = FWFILE; 44 43 45 - module_param(fastfw, bool, 0444); 46 44 module_param(firmware, charp, 0444); 47 45 48 - MODULE_PARM_DESC(fastfw, "Load firmware fast [0=100MHz 1=333MHz (default)]"); 49 46 MODULE_PARM_DESC(firmware, "Firmware image [default: " FWFILE "]"); 50 - 51 - static void set_i2c_delay(struct i2c_client *client, int delay) 52 - { 53 - struct i2c_algo_bit_data *algod = client->adapter->algo_data; 54 - 55 - /* We aren't guaranteed to be using algo_bit, 56 - * so avoid the null pointer dereference 57 - * and disable the 'fast firmware load' */ 58 - if (algod) { 59 - algod->udelay = delay; 60 - } else { 61 - fastfw = 0; 62 - } 63 - } 64 47 65 48 static void start_fw_load(struct i2c_client *client) 66 49 { ··· 54 71 cx25840_write(client, 0x803, 0x0b); 55 72 /* AUTO_INC_DIS=1 */ 56 73 cx25840_write(client, 0x000, 0x20); 57 - 58 - if (fastfw) 59 - set_i2c_delay(client, 3); 60 74 } 61 75 62 76 static void end_fw_load(struct i2c_client *client) 63 77 { 64 - if (fastfw) 65 - set_i2c_delay(client, 10); 66 - 67 78 /* AUTO_INC_DIS=0 */ 68 79 cx25840_write(client, 0x000, 0x00); 69 80 /* DL_ENABLE=0 */ ··· 84 107 int sent; 85 108 86 109 if ((sent = i2c_master_send(client, data, size)) < size) { 87 - 88 - if (fastfw) { 89 - v4l_err(client, "333MHz i2c firmware load failed\n"); 90 - fastfw = 0; 91 - set_i2c_delay(client, 10); 92 - 93 - if (sent > 2) { 94 - u16 dl_addr = cx25840_read(client, 0x801) << 8; 95 - dl_addr |= cx25840_read(client, 0x800); 96 - dl_addr -= sent - 2; 97 - cx25840_write(client, 0x801, dl_addr >> 8); 98 - cx25840_write(client, 0x800, dl_addr & 0xff); 99 - } 100 - 101 - if (i2c_master_send(client, data, size) < size) { 102 - v4l_err(client, "100MHz i2c firmware load failed\n"); 103 - return -ENOSYS; 104 - } 105 - 106 - } else { 107 - v4l_err(client, "firmware load i2c failure\n"); 108 - return -ENOSYS; 109 - } 110 - 110 + v4l_err(client, "firmware load i2c failure\n"); 111 + return -ENOSYS; 111 112 } 112 113 113 114 return 0;
+1 -1
drivers/media/video/cx88/cx88-cards.c
··· 564 564 }, 565 565 [CX88_BOARD_PCHDTV_HD3000] = { 566 566 .name = "pcHDTV HD3000 HDTV", 567 - .tuner_type = TUNER_THOMSON_DTT7610, 567 + .tuner_type = TUNER_THOMSON_DTT761X, 568 568 .radio_type = UNSET, 569 569 .tuner_addr = ADDR_UNSET, 570 570 .radio_addr = ADDR_UNSET,
+10 -6
drivers/media/video/cx88/cx88-core.c
··· 146 146 fields++; 147 147 148 148 /* estimate risc mem: worst case is one write per page border + 149 - one write per scan line + syncs + jump (all 2 dwords) */ 150 - instructions = (bpl * lines * fields) / PAGE_SIZE + lines * fields; 151 - instructions += 3 + 4; 149 + one write per scan line + syncs + jump (all 2 dwords). Padding 150 + can cause next bpl to start close to a page border. First DMA 151 + region may be smaller than PAGE_SIZE */ 152 + instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines); 153 + instructions += 2; 152 154 if ((rc = btcx_riscmem_alloc(pci,risc,instructions*8)) < 0) 153 155 return rc; 154 156 ··· 178 176 int rc; 179 177 180 178 /* estimate risc mem: worst case is one write per page border + 181 - one write per scan line + syncs + jump (all 2 dwords) */ 182 - instructions = (bpl * lines) / PAGE_SIZE + lines; 183 - instructions += 3 + 4; 179 + one write per scan line + syncs + jump (all 2 dwords). Here 180 + there is no padding and no sync. First DMA region may be smaller 181 + than PAGE_SIZE */ 182 + instructions = 1 + (bpl * lines) / PAGE_SIZE + lines; 183 + instructions += 1; 184 184 if ((rc = btcx_riscmem_alloc(pci,risc,instructions*8)) < 0) 185 185 return rc; 186 186
+1 -1
drivers/media/video/cx88/cx88-dvb.c
··· 372 372 static struct or51132_config pchdtv_hd3000 = { 373 373 .demod_address = 0x15, 374 374 .pll_address = 0x61, 375 - .pll_desc = &dvb_pll_thomson_dtt7610, 375 + .pll_desc = &dvb_pll_thomson_dtt761x, 376 376 .set_ts_params = or51132_set_ts_param, 377 377 }; 378 378 #endif
+2
drivers/media/video/cx88/cx88-video.c
··· 35 35 #include "cx88.h" 36 36 #include <media/v4l2-common.h> 37 37 38 + #ifdef CONFIG_VIDEO_V4L1_COMPAT 38 39 /* Include V4L1 specific functions. Should be removed soon */ 39 40 #include <linux/videodev.h> 41 + #endif 40 42 41 43 MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards"); 42 44 MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
+1 -1
drivers/media/video/em28xx/Kconfig
··· 1 1 config VIDEO_EM28XX 2 2 tristate "Empia EM2800/2820/2840 USB video capture support" 3 - depends on VIDEO_DEV && USB && I2C 3 + depends on VIDEO_V4L1 && USB && I2C 4 4 select VIDEO_BUF 5 5 select VIDEO_TUNER 6 6 select VIDEO_TVEEPROM
+5 -5
drivers/media/video/em28xx/em28xx-video.c
··· 1576 1576 errCode = em28xx_config(dev); 1577 1577 if (errCode) { 1578 1578 em28xx_errdev("error configuring device\n"); 1579 - kfree(dev); 1580 1579 em28xx_devused&=~(1<<dev->devno); 1580 + kfree(dev); 1581 1581 return -ENOMEM; 1582 1582 } 1583 1583 ··· 1603 1603 dev->vdev = video_device_alloc(); 1604 1604 if (NULL == dev->vdev) { 1605 1605 em28xx_errdev("cannot allocate video_device.\n"); 1606 - kfree(dev); 1607 1606 em28xx_devused&=~(1<<dev->devno); 1607 + kfree(dev); 1608 1608 return -ENOMEM; 1609 1609 } 1610 1610 ··· 1612 1612 if (NULL == dev->vbi_dev) { 1613 1613 em28xx_errdev("cannot allocate video_device.\n"); 1614 1614 kfree(dev->vdev); 1615 - kfree(dev); 1616 1615 em28xx_devused&=~(1<<dev->devno); 1616 + kfree(dev); 1617 1617 return -ENOMEM; 1618 1618 } 1619 1619 ··· 1650 1650 mutex_unlock(&dev->lock); 1651 1651 list_del(&dev->devlist); 1652 1652 video_device_release(dev->vdev); 1653 - kfree(dev); 1654 1653 em28xx_devused&=~(1<<dev->devno); 1654 + kfree(dev); 1655 1655 return -ENODEV; 1656 1656 } 1657 1657 ··· 1662 1662 list_del(&dev->devlist); 1663 1663 video_device_release(dev->vbi_dev); 1664 1664 video_device_release(dev->vdev); 1665 - kfree(dev); 1666 1665 em28xx_devused&=~(1<<dev->devno); 1666 + kfree(dev); 1667 1667 return -ENODEV; 1668 1668 } else { 1669 1669 printk("registered VBI\n");
+1 -1
drivers/media/video/et61x251/Kconfig
··· 1 1 config USB_ET61X251 2 2 tristate "USB ET61X[12]51 PC Camera Controller support" 3 - depends on USB && VIDEO_DEV 3 + depends on USB && VIDEO_V4L1 4 4 ---help--- 5 5 Say Y here if you want support for cameras based on Etoms ET61X151 6 6 or ET61X251 PC Camera Controllers.
+1 -1
drivers/media/video/pwc/Kconfig
··· 1 1 config USB_PWC 2 2 tristate "USB Philips Cameras" 3 - depends on USB && VIDEO_DEV 3 + depends on USB && VIDEO_V4L1 4 4 ---help--- 5 5 Say Y or M here if you want to use one of these Philips & OEM 6 6 webcams:
-17
drivers/media/video/pwc/Makefile
··· 1 - ifneq ($(KERNELRELEASE),) 2 - 3 1 pwc-objs := pwc-if.o pwc-misc.o pwc-ctrl.o pwc-uncompress.o pwc-timon.o pwc-kiara.o 4 2 5 3 obj-$(CONFIG_USB_PWC) += pwc.o 6 - 7 - else 8 - 9 - KDIR := /lib/modules/$(shell uname -r)/build 10 - PWD := $(shell pwd) 11 - 12 - default: 13 - $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) modules 14 - 15 - endif 16 - 17 - clean: 18 - rm -f *.[oas] .*.flags *.ko .*.cmd .*.d .*.tmp *.mod.c 19 - rm -rf .tmp_versions 20 -
+1
drivers/media/video/saa7127.c
··· 142 142 static const struct i2c_reg_value saa7129_init_config_extra[] = { 143 143 { SAA7127_REG_OUTPUT_PORT_CONTROL, 0x38 }, 144 144 { SAA7127_REG_VTRIG, 0xfa }, 145 + { 0, 0 } 145 146 }; 146 147 147 148 static const struct i2c_reg_value saa7127_init_config_common[] = {
+1
drivers/media/video/saa7134/saa7134-cards.c
··· 3504 3504 /* power-up tuner chip */ 3505 3505 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00040000, 0x00040000); 3506 3506 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00040000, 0x00000000); 3507 + break; 3507 3508 case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL: 3508 3509 /* this turns the remote control chip off to work around a bug in it */ 3509 3510 saa_writeb(SAA7134_GPIO_GPMODE1, 0x80);
+5 -1
drivers/media/video/saa7134/saa7134-core.c
··· 548 548 if (report & SAA7134_IRQ_REPORT_GPIO16) { 549 549 switch (dev->has_remote) { 550 550 case SAA7134_REMOTE_GPIO: 551 + if (!dev->remote) 552 + break; 551 553 if (dev->remote->mask_keydown & 0x10000) { 552 554 saa7134_input_irq(dev); 553 555 } ··· 566 564 if (report & SAA7134_IRQ_REPORT_GPIO18) { 567 565 switch (dev->has_remote) { 568 566 case SAA7134_REMOTE_GPIO: 567 + if (!dev->remote) 568 + break; 569 569 if ((dev->remote->mask_keydown & 0x40000) || 570 570 (dev->remote->mask_keyup & 0x40000)) { 571 571 saa7134_input_irq(dev); ··· 680 676 SAA7134_IRQ2_INTE_PE | 681 677 SAA7134_IRQ2_INTE_AR; 682 678 683 - if (dev->has_remote == SAA7134_REMOTE_GPIO) { 679 + if (dev->has_remote == SAA7134_REMOTE_GPIO && dev->remote) { 684 680 if (dev->remote->mask_keydown & 0x10000) 685 681 irq2_mask |= SAA7134_IRQ2_INTE_GPIO16; 686 682 else if (dev->remote->mask_keydown & 0x40000)
+2
drivers/media/video/saa7134/saa7134-video.c
··· 31 31 #include "saa7134.h" 32 32 #include <media/v4l2-common.h> 33 33 34 + #ifdef CONFIG_VIDEO_V4L1_COMPAT 34 35 /* Include V4L1 specific functions. Should be removed soon */ 35 36 #include <linux/videodev.h> 37 + #endif 36 38 37 39 /* ------------------------------------------------------------------ */ 38 40
+1 -1
drivers/media/video/sn9c102/Kconfig
··· 1 1 config USB_SN9C102 2 2 tristate "USB SN9C10x PC Camera Controller support" 3 - depends on USB && VIDEO_DEV 3 + depends on USB && VIDEO_V4L1 4 4 ---help--- 5 5 Say Y here if you want support for cameras based on SONiX SN9C101, 6 6 SN9C102 or SN9C103 PC Camera Controllers.
+2 -2
drivers/media/video/tuner-types.c
··· 877 877 /* ------------ TUNER_LG_TDVS_H062F - INFINEON ATSC ------------ */ 878 878 879 879 static struct tuner_range tuner_tua6034_ntsc_ranges[] = { 880 - { 16 * 160.00 /*MHz*/, 0x8e, 0x01 }, 881 - { 16 * 455.00 /*MHz*/, 0x8e, 0x02 }, 880 + { 16 * 165.00 /*MHz*/, 0x8e, 0x01 }, 881 + { 16 * 450.00 /*MHz*/, 0x8e, 0x02 }, 882 882 { 16 * 999.99 , 0x8e, 0x04 }, 883 883 }; 884 884
+1 -1
drivers/media/video/tveeprom.c
··· 218 218 /* 110-119 */ 219 219 { TUNER_ABSENT, "Thompson DTT75105"}, 220 220 { TUNER_ABSENT, "Conexant_CX24109"}, 221 - { TUNER_ABSENT, "TCL M2523_5N_E"}, 221 + { TUNER_TCL_2002N, "TCL M2523_5N_E"}, 222 222 { TUNER_ABSENT, "TCL M2523_3DB_E"}, 223 223 { TUNER_ABSENT, "Philips 8275A"}, 224 224 { TUNER_ABSENT, "Microtune MT2060"},
+3 -3
drivers/media/video/usbvideo/Kconfig
··· 3 3 4 4 config USB_VICAM 5 5 tristate "USB 3com HomeConnect (aka vicam) support (EXPERIMENTAL)" 6 - depends on USB && VIDEO_DEV && EXPERIMENTAL 6 + depends on USB && VIDEO_V4L1 && EXPERIMENTAL 7 7 select VIDEO_USBVIDEO 8 8 ---help--- 9 9 Say Y here if you have 3com homeconnect camera (vicam). ··· 13 13 14 14 config USB_IBMCAM 15 15 tristate "USB IBM (Xirlink) C-it Camera support" 16 - depends on USB && VIDEO_DEV 16 + depends on USB && VIDEO_V4L1 17 17 select VIDEO_USBVIDEO 18 18 ---help--- 19 19 Say Y here if you want to connect a IBM "C-It" camera, also known as ··· 28 28 29 29 config USB_KONICAWC 30 30 tristate "USB Konica Webcam support" 31 - depends on USB && VIDEO_DEV 31 + depends on USB && VIDEO_V4L1 32 32 select VIDEO_USBVIDEO 33 33 ---help--- 34 34 Say Y here if you want support for webcams based on a Konica
+5
drivers/media/video/vivi.c
··· 26 26 #include <linux/random.h> 27 27 #include <linux/version.h> 28 28 #include <linux/videodev2.h> 29 + #include <linux/dma-mapping.h> 30 + #ifdef CONFIG_VIDEO_V4L1_COMPAT 31 + /* Include V4L1 specific functions. Should be removed soon */ 32 + #include <linux/videodev.h> 33 + #endif 29 34 #include <linux/interrupt.h> 30 35 #include <media/video-buf.h> 31 36 #include <media/v4l2-common.h>
+1 -1
drivers/media/video/zc0301/Kconfig
··· 1 1 config USB_ZC0301 2 2 tristate "USB ZC0301 Image Processor and Control Chip support" 3 - depends on USB && VIDEO_DEV 3 + depends on USB && VIDEO_V4L1 4 4 ---help--- 5 5 Say Y here if you want support for cameras based on the ZC0301 6 6 Image Processor and Control Chip.
+3 -3
drivers/mmc/au1xmmc.c
··· 310 310 } 311 311 else 312 312 data->bytes_xfered = 313 - (data->blocks * (1 << data->blksz_bits)) - 313 + (data->blocks * data->blksz) - 314 314 host->pio.len; 315 315 } 316 316 ··· 575 575 au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) 576 576 { 577 577 578 - int datalen = data->blocks * (1 << data->blksz_bits); 578 + int datalen = data->blocks * data->blksz; 579 579 580 580 if (dma != 0) 581 581 host->flags |= HOST_F_DMA; ··· 596 596 if (host->dma.len == 0) 597 597 return MMC_ERR_TIMEOUT; 598 598 599 - au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host)); 599 + au_writel(data->blksz - 1, HOST_BLKSIZE(host)); 600 600 601 601 if (host->flags & HOST_F_DMA) { 602 602 int i;
+14 -10
drivers/mmc/imxmmc.c
··· 218 218 if(!loops) 219 219 return 0; 220 220 221 - dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", 222 - loops, where, *pstat, stat_mask); 221 + /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ 222 + if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000)) 223 + dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", 224 + loops, where, *pstat, stat_mask); 223 225 return loops; 224 226 } 225 227 ··· 334 332 335 333 WARN_ON(host->cmd != NULL); 336 334 host->cmd = cmd; 335 + 336 + /* Ensure, that clock are stopped else command programming and start fails */ 337 + imxmci_stop_clock(host); 337 338 338 339 if (cmd->flags & MMC_RSP_BUSY) 339 340 cmdat |= CMD_DAT_CONT_BUSY; ··· 558 553 int trans_done = 0; 559 554 unsigned int stat = *pstat; 560 555 561 - if(host->actual_bus_width == MMC_BUS_WIDTH_4) 556 + if(host->actual_bus_width != MMC_BUS_WIDTH_4) 562 557 burst_len = 16; 563 558 else 564 559 burst_len = 64; ··· 596 591 stat = MMC_STATUS; 597 592 598 593 /* Flush extra bytes from FIFO */ 599 - while(flush_len >= 2){ 600 - flush_len -= 2; 594 + while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){ 601 595 i = MMC_BUFFER_ACCESS; 602 596 stat = MMC_STATUS; 603 597 stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */ ··· 750 746 data_dir_mask = STATUS_DATA_TRANS_DONE; 751 747 } 752 748 753 - imxmci_busy_wait_for_status(host, &stat, 754 - data_dir_mask, 755 - 50, "imxmci_tasklet_fnc data"); 756 - 757 749 if(stat & data_dir_mask) { 758 750 clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); 759 751 imxmci_data_done(host, stat); ··· 865 865 866 866 imxmci_stop_clock(host); 867 867 MMC_CLK_RATE = (prescaler<<3) | clk; 868 - imxmci_start_clock(host); 868 + /* 869 + * Under my understanding, clock should not be started there, because it would 870 + * initiate SDHC sequencer and send last or random command into card 871 + */ 872 + /*imxmci_start_clock(host);*/ 869 873 870 874 dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); 871 875 } else {
+1
drivers/mmc/mmc.c
··· 951 951 data.timeout_ns = card->csd.tacc_ns * 10; 952 952 data.timeout_clks = card->csd.tacc_clks * 10; 953 953 data.blksz_bits = 3; 954 + data.blksz = 1 << 3; 954 955 data.blocks = 1; 955 956 data.flags = MMC_DATA_READ; 956 957 data.sg = &sg;
+2 -1
drivers/mmc/mmc_block.c
··· 175 175 brq.data.timeout_ns = card->csd.tacc_ns * 10; 176 176 brq.data.timeout_clks = card->csd.tacc_clks * 10; 177 177 brq.data.blksz_bits = md->block_bits; 178 + brq.data.blksz = 1 << md->block_bits; 178 179 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 179 180 brq.stop.opcode = MMC_STOP_TRANSMISSION; 180 181 brq.stop.arg = 0; ··· 353 352 */ 354 353 printk(KERN_ERR "%s: unable to select block size for " 355 354 "writing (rb%u wb%u rp%u wp%u)\n", 356 - md->disk->disk_name, 355 + mmc_card_id(card), 357 356 1 << card->csd.read_blkbits, 358 357 1 << card->csd.write_blkbits, 359 358 card->csd.read_partial,
+2 -2
drivers/mmc/pxamci.c
··· 119 119 nob = 0xffff; 120 120 121 121 writel(nob, host->base + MMC_NOB); 122 - writel(1 << data->blksz_bits, host->base + MMC_BLKLEN); 122 + writel(data->blksz, host->base + MMC_BLKLEN); 123 123 124 124 clks = (unsigned long long)data->timeout_ns * CLOCKRATE; 125 125 do_div(clks, 1000000000UL); ··· 283 283 * data blocks as being in error. 284 284 */ 285 285 if (data->error == MMC_ERR_NONE) 286 - data->bytes_xfered = data->blocks << data->blksz_bits; 286 + data->bytes_xfered = data->blocks * data->blksz; 287 287 else 288 288 data->bytes_xfered = 0; 289 289
+4 -4
drivers/mmc/wbsd.c
··· 662 662 unsigned long dmaflags; 663 663 664 664 DBGF("blksz %04x blks %04x flags %08x\n", 665 - 1 << data->blksz_bits, data->blocks, data->flags); 665 + data->blksz, data->blocks, data->flags); 666 666 DBGF("tsac %d ms nsac %d clk\n", 667 667 data->timeout_ns / 1000000, data->timeout_clks); 668 668 669 669 /* 670 670 * Calculate size. 671 671 */ 672 - host->size = data->blocks << data->blksz_bits; 672 + host->size = data->blocks * data->blksz; 673 673 674 674 /* 675 675 * Check timeout values for overflow. ··· 696 696 * Two bytes are needed for each data line. 697 697 */ 698 698 if (host->bus_width == MMC_BUS_WIDTH_1) { 699 - blksize = (1 << data->blksz_bits) + 2; 699 + blksize = data->blksz + 2; 700 700 701 701 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); 702 702 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); 703 703 } else if (host->bus_width == MMC_BUS_WIDTH_4) { 704 - blksize = (1 << data->blksz_bits) + 2 * 4; 704 + blksize = data->blksz + 2 * 4; 705 705 706 706 wbsd_write_index(host, WBSD_IDX_PBSMSB, 707 707 ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
+18 -10
drivers/net/b44.c
··· 650 650 651 651 /* Hardware bug work-around, the chip is unable to do PCI DMA 652 652 to/from anything above 1GB :-( */ 653 - if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 653 + if (dma_mapping_error(mapping) || 654 + mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 654 655 /* Sigh... */ 655 - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 656 + if (!dma_mapping_error(mapping)) 657 + pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 656 658 dev_kfree_skb_any(skb); 657 659 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); 658 660 if (skb == NULL) ··· 662 660 mapping = pci_map_single(bp->pdev, skb->data, 663 661 RX_PKT_BUF_SZ, 664 662 PCI_DMA_FROMDEVICE); 665 - if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 666 - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 663 + if (dma_mapping_error(mapping) || 664 + mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 665 + if (!dma_mapping_error(mapping)) 666 + pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 667 667 dev_kfree_skb_any(skb); 668 668 return -ENOMEM; 669 669 } ··· 971 967 } 972 968 973 969 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 974 - if (mapping + len > B44_DMA_MASK) { 970 + if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) { 975 971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 976 - pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); 972 + if (!dma_mapping_error(mapping)) 973 + pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); 977 974 978 975 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, 979 976 GFP_ATOMIC|GFP_DMA); ··· 983 978 984 979 mapping = pci_map_single(bp->pdev, bounce_skb->data, 985 980 len, PCI_DMA_TODEVICE); 986 - if (mapping + len > B44_DMA_MASK) { 987 - pci_unmap_single(bp->pdev, mapping, 981 + if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) { 982 + if (!dma_mapping_error(mapping)) 983 + pci_unmap_single(bp->pdev, mapping, 988 984 len, PCI_DMA_TODEVICE); 989 985 dev_kfree_skb_any(bounce_skb); 990 986 goto err_out; ··· 1209 1203 DMA_TABLE_BYTES, 1210 1204 DMA_BIDIRECTIONAL); 1211 1205 1212 - if (rx_ring_dma + size > B44_DMA_MASK) { 1206 + if (dma_mapping_error(rx_ring_dma) || 1207 + rx_ring_dma + size > B44_DMA_MASK) { 1213 1208 kfree(rx_ring); 1214 1209 goto out_err; 1215 1210 } ··· 1236 1229 DMA_TABLE_BYTES, 1237 1230 DMA_TO_DEVICE); 1238 1231 1239 - if (tx_ring_dma + size > B44_DMA_MASK) { 1232 + if (dma_mapping_error(tx_ring_dma) || 1233 + tx_ring_dma + size > B44_DMA_MASK) { 1240 1234 kfree(tx_ring); 1241 1235 goto out_err; 1242 1236 }
+15 -5
drivers/net/bnx2.c
··· 55 55 56 56 #define DRV_MODULE_NAME "bnx2" 57 57 #define PFX DRV_MODULE_NAME ": " 58 - #define DRV_MODULE_VERSION "1.4.39" 59 - #define DRV_MODULE_RELDATE "March 22, 2006" 58 + #define DRV_MODULE_VERSION "1.4.40" 59 + #define DRV_MODULE_RELDATE "May 22, 2006" 60 60 61 61 #define RUN_AT(x) (jiffies + (x)) 62 62 ··· 2945 2945 int buf_size) 2946 2946 { 2947 2947 u32 written, offset32, len32; 2948 - u8 *buf, start[4], end[4]; 2948 + u8 *buf, start[4], end[4], *flash_buffer = NULL; 2949 2949 int rc = 0; 2950 2950 int align_start, align_end; 2951 2951 ··· 2985 2985 memcpy(buf + align_start, data_buf, buf_size); 2986 2986 } 2987 2987 2988 + if (bp->flash_info->buffered == 0) { 2989 + flash_buffer = kmalloc(264, GFP_KERNEL); 2990 + if (flash_buffer == NULL) { 2991 + rc = -ENOMEM; 2992 + goto nvram_write_end; 2993 + } 2994 + } 2995 + 2988 2996 written = 0; 2989 2997 while ((written < len32) && (rc == 0)) { 2990 2998 u32 page_start, page_end, data_start, data_end; 2991 2999 u32 addr, cmd_flags; 2992 3000 int i; 2993 - u8 flash_buffer[264]; 2994 3001 2995 3002 /* Find the page_start addr */ 2996 3003 page_start = offset32 + written; ··· 3068 3061 } 3069 3062 3070 3063 /* Loop to write the new data from data_start to data_end */ 3071 - for (addr = data_start; addr < data_end; addr += 4, i++) { 3064 + for (addr = data_start; addr < data_end; addr += 4, i += 4) { 3072 3065 if ((addr == page_end - 4) || 3073 3066 ((bp->flash_info->buffered) && 3074 3067 (addr == data_end - 4))) { ··· 3116 3109 } 3117 3110 3118 3111 nvram_write_end: 3112 + if (bp->flash_info->buffered == 0) 3113 + kfree(flash_buffer); 3114 + 3119 3115 if (align_start || align_end) 3120 3116 kfree(buf); 3121 3117 return rc;
+1
drivers/net/dl2k.c
··· 53 53 #define DRV_VERSION "v1.17b" 54 54 #define DRV_RELDATE "2006/03/10" 55 55 #include "dl2k.h" 56 + #include <linux/dma-mapping.h> 56 57 57 58 static char version[] __devinitdata = 58 59 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
-72
drivers/net/forcedeth.c
··· 2891 2891 goto out_drain; 2892 2892 } 2893 2893 2894 - if (np->msi_flags & NV_MSI_X_CAPABLE) { 2895 - for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2896 - np->msi_x_entry[i].entry = i; 2897 - } 2898 - if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 2899 - np->msi_flags |= NV_MSI_X_ENABLED; 2900 - if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 2901 - /* Request irq for rx handling */ 2902 - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { 2903 - printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 2904 - pci_disable_msix(np->pci_dev); 2905 - np->msi_flags &= ~NV_MSI_X_ENABLED; 2906 - goto out_drain; 2907 - } 2908 - /* Request irq for tx handling */ 2909 - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { 2910 - printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 2911 - pci_disable_msix(np->pci_dev); 2912 - np->msi_flags &= ~NV_MSI_X_ENABLED; 2913 - goto out_drain; 2914 - } 2915 - /* Request irq for link and timer handling */ 2916 - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { 2917 - printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 2918 - pci_disable_msix(np->pci_dev); 2919 - np->msi_flags &= ~NV_MSI_X_ENABLED; 2920 - goto out_drain; 2921 - } 2922 - 2923 - /* map interrupts to their respective vector */ 2924 - writel(0, base + NvRegMSIXMap0); 2925 - writel(0, base + NvRegMSIXMap1); 2926 - set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 2927 - set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 2928 - set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 2929 - } else { 2930 - /* Request irq for all interrupts */ 2931 - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { 2932 - printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2933 - pci_disable_msix(np->pci_dev); 2934 - np->msi_flags &= ~NV_MSI_X_ENABLED; 2935 - goto out_drain; 2936 - } 2937 - 2938 - /* map interrupts to vector 0 */ 2939 - writel(0, base + NvRegMSIXMap0); 2940 - writel(0, base + NvRegMSIXMap1); 2941 - } 2942 - } 2943 - } 2944 - if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 2945 - if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 2946 - np->msi_flags |= NV_MSI_ENABLED; 2947 - if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { 2948 - printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2949 - pci_disable_msi(np->pci_dev); 2950 - np->msi_flags &= ~NV_MSI_ENABLED; 2951 - goto out_drain; 2952 - } 2953 - 2954 - /* map interrupts to vector 0 */ 2955 - writel(0, base + NvRegMSIMap0); 2956 - writel(0, base + NvRegMSIMap1); 2957 - /* enable msi vector 0 */ 2958 - writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 2959 - } 2960 - } 2961 - if (ret != 0) { 2962 - if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) 2963 - goto out_drain; 2964 - } 2965 - 2966 2894 /* ask for interrupts */ 2967 2895 nv_enable_hw_interrupts(dev, np->irqmask); 2968 2896
+9 -4
drivers/net/ixp2000/enp2611.c
··· 149 149 int status; 150 150 151 151 dev = nds[i]; 152 + if (dev == NULL) 153 + continue; 152 154 153 155 status = pm3386_is_link_up(i); 154 156 if (status && !netif_carrier_ok(dev)) { ··· 193 191 194 192 static int __init enp2611_init_module(void) 195 193 { 194 + int ports; 196 195 int i; 197 196 198 197 if (!machine_is_enp2611()) ··· 202 199 caleb_reset(); 203 200 pm3386_reset(); 204 201 205 - for (i = 0; i < 3; i++) { 202 + ports = pm3386_port_count(); 203 + for (i = 0; i < ports; i++) { 206 204 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); 207 205 if (nds[i] == NULL) { 208 206 while (--i >= 0) ··· 219 215 220 216 ixp2400_msf_init(&enp2611_msf_parameters); 221 217 222 - if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) { 223 - for (i = 0; i < 3; i++) 224 - free_netdev(nds[i]); 218 + if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) { 219 + for (i = 0; i < ports; i++) 220 + if (nds[i]) 221 + free_netdev(nds[i]); 225 222 return -EINVAL; 226 223 } 227 224
+24 -6
drivers/net/ixp2000/pm3386.c
··· 86 86 pm3386_reg_write(port >> 1, reg, value); 87 87 } 88 88 89 + int pm3386_secondary_present(void) 90 + { 91 + return pm3386_reg_read(1, 0) == 0x3386; 92 + } 89 93 90 94 void pm3386_reset(void) 91 95 { 92 96 u8 mac[3][6]; 97 + int secondary; 98 + 99 + secondary = pm3386_secondary_present(); 93 100 94 101 /* Save programmed MAC addresses. */ 95 102 pm3386_get_mac(0, mac[0]); 96 103 pm3386_get_mac(1, mac[1]); 97 - pm3386_get_mac(2, mac[2]); 104 + if (secondary) 105 + pm3386_get_mac(2, mac[2]); 98 106 99 107 /* Assert analog and digital reset. */ 100 108 pm3386_reg_write(0, 0x002, 0x0060); 101 - pm3386_reg_write(1, 0x002, 0x0060); 109 + if (secondary) 110 + pm3386_reg_write(1, 0x002, 0x0060); 102 111 mdelay(1); 103 112 104 113 /* Deassert analog reset. */ 105 114 pm3386_reg_write(0, 0x002, 0x0062); 106 - pm3386_reg_write(1, 0x002, 0x0062); 115 + if (secondary) 116 + pm3386_reg_write(1, 0x002, 0x0062); 107 117 mdelay(10); 108 118 109 119 /* Deassert digital reset. */ 110 120 pm3386_reg_write(0, 0x002, 0x0063); 111 - pm3386_reg_write(1, 0x002, 0x0063); 121 + if (secondary) 122 + pm3386_reg_write(1, 0x002, 0x0063); 112 123 mdelay(10); 113 124 114 125 /* Restore programmed MAC addresses. */ 115 126 pm3386_set_mac(0, mac[0]); 116 127 pm3386_set_mac(1, mac[1]); 117 - pm3386_set_mac(2, mac[2]); 128 + if (secondary) 129 + pm3386_set_mac(2, mac[2]); 118 130 119 131 /* Disable carrier on all ports. */ 120 132 pm3386_set_carrier(0, 0); 121 133 pm3386_set_carrier(1, 0); 122 - pm3386_set_carrier(2, 0); 134 + if (secondary) 135 + pm3386_set_carrier(2, 0); 123 136 } 124 137 125 138 static u16 swaph(u16 x) 126 139 { 127 140 return ((x << 8) | (x >> 8)) & 0xffff; 141 + } 142 + 143 + int pm3386_port_count(void) 144 + { 145 + return 2 + pm3386_secondary_present(); 128 146 } 129 147 130 148 void pm3386_init_port(int port)
+1
drivers/net/ixp2000/pm3386.h
··· 13 13 #define __PM3386_H 14 14 15 15 void pm3386_reset(void); 16 + int pm3386_port_count(void); 16 17 void pm3386_init_port(int port); 17 18 void pm3386_get_mac(int port, u8 *mac); 18 19 void pm3386_set_mac(int port, u8 *mac);
+2 -11
drivers/net/pcmcia/axnet_cs.c
··· 1691 1691 memset(ei_local->mcfilter, 0xFF, 8); 1692 1692 } 1693 1693 1694 - /* 1695 - * DP8390 manuals don't specify any magic sequence for altering 1696 - * the multicast regs on an already running card. To be safe, we 1697 - * ensure multicast mode is off prior to loading up the new hash 1698 - * table. If this proves to be not enough, we can always resort 1699 - * to stopping the NIC, loading the table and then restarting. 1700 - */ 1701 - 1702 - if (netif_running(dev)) 1703 - outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); 1704 - 1705 1694 outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); 1706 1695 for(i = 0; i < 8; i++) 1707 1696 { ··· 1704 1715 outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); 1705 1716 else 1706 1717 outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); 1718 + 1719 + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); 1707 1720 } 1708 1721 1709 1722 /*
+3 -5
drivers/net/skge.c
··· 78 78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 79 79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, 80 80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, 81 - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, 82 - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, 81 + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ 83 82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 84 83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 85 84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, ··· 401 402 int err; 402 403 403 404 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || 404 - p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE) 405 + p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE) 405 406 return -EINVAL; 406 407 407 408 skge->rx_ring.count = p->rx_pending; ··· 2716 2717 if (control & BMU_OWN) 2717 2718 break; 2718 2719 2719 - skb = skge_rx_get(skge, e, control, rd->status, 2720 - le16_to_cpu(rd->csum2)); 2720 + skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); 2721 2721 if (likely(skb)) { 2722 2722 dev->last_rx = jiffies; 2723 2723 netif_receive_skb(skb);
+51 -17
drivers/net/sky2.c
··· 51 51 #include "sky2.h" 52 52 53 53 #define DRV_NAME "sky2" 54 - #define DRV_VERSION "1.3" 54 + #define DRV_VERSION "1.4" 55 55 #define PFX DRV_NAME " " 56 56 57 57 /* ··· 105 105 static const struct pci_device_id sky2_id_table[] = { 106 106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 107 107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 108 + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 108 109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, 109 110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, 110 111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, ··· 236 235 } 237 236 238 237 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 238 + sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); 239 239 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 240 240 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); 241 241 reg1 &= P_ASPM_CONTROL_MSK; ··· 308 306 u16 ctrl, ct1000, adv, pg, ledctrl, ledover; 309 307 310 308 if (sky2->autoneg == AUTONEG_ENABLE && 311 - (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 309 + !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 312 310 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 313 311 314 312 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | ··· 979 977 struct sky2_hw *hw = sky2->hw; 980 978 unsigned rxq = rxqaddr[sky2->port]; 981 979 int i; 980 + unsigned thresh; 982 981 983 982 sky2->rx_put = sky2->rx_next = 0; 984 983 sky2_qset(hw, rxq); ··· 1004 1001 sky2_rx_add(sky2, re->mapaddr); 1005 1002 } 1006 1003 1007 - /* Truncate oversize frames */ 1008 - sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), sky2->rx_bufsize - 8); 1009 - sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); 1004 + 1005 + /* 1006 + * The receiver hangs if it receives frames larger than the 1007 + * packet buffer. As a workaround, truncate oversize frames, but 1008 + * the register is limited to 9 bits, so if you do frames > 2052 1009 + * you better get the MTU right! 1010 + */ 1011 + thresh = (sky2->rx_bufsize - 8) / sizeof(u32); 1012 + if (thresh > 0x1ff) 1013 + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); 1014 + else { 1015 + sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); 1016 + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); 1017 + } 1018 + 1010 1019 1011 1020 /* Tell chip about available buffers */ 1012 1021 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); ··· 1035 1020 struct sky2_hw *hw = sky2->hw; 1036 1021 unsigned port = sky2->port; 1037 1022 u32 ramsize, rxspace, imask; 1038 - int err = -ENOMEM; 1023 + int cap, err = -ENOMEM; 1024 + struct net_device *otherdev = hw->dev[sky2->port^1]; 1025 + 1026 + /* 1027 + * On dual port PCI-X card, there is an problem where status 1028 + * can be received out of order due to split transactions 1029 + */ 1030 + if (otherdev && netif_running(otherdev) && 1031 + (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { 1032 + struct sky2_port *osky2 = netdev_priv(otherdev); 1033 + u16 cmd; 1034 + 1035 + cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); 1036 + cmd &= ~PCI_X_CMD_MAX_SPLIT; 1037 + sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); 1038 + 1039 + sky2->rx_csum = 0; 1040 + osky2->rx_csum = 0; 1041 + } 1039 1042 1040 1043 if (netif_msg_ifup(sky2)) 1041 1044 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); ··· 1932 1899 } 1933 1900 } 1934 1901 1902 + /* Is status ring empty or is there more to do? */ 1903 + static inline int sky2_more_work(const struct sky2_hw *hw) 1904 + { 1905 + return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)); 1906 + } 1907 + 1935 1908 /* Process status response ring */ 1936 1909 static int sky2_status_intr(struct sky2_hw *hw, int to_do) 1937 1910 { ··· 2210 2171 if (status & Y2_IS_CHK_TXA2) 2211 2172 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); 2212 2173 2213 - if (status & Y2_IS_STAT_BMU) 2214 - sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2215 - 2216 2174 work_done = sky2_status_intr(hw, work_limit); 2217 2175 *budget -= work_done; 2218 2176 dev0->quota -= work_done; 2219 2177 2220 - if (work_done >= work_limit) 2178 + if (status & Y2_IS_STAT_BMU) 2179 + sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2180 + 2181 + if (sky2_more_work(hw)) 2221 2182 return 1; 2222 2183 2223 2184 netif_rx_complete(dev0); 2224 2185 2225 - status = sky2_read32(hw, B0_Y2_SP_LISR); 2186 + sky2_read32(hw, B0_Y2_SP_LISR); 2226 2187 return 0; 2227 2188 } 2228 2189 ··· 3106 3067 sky2->duplex = -1; 3107 3068 sky2->speed = -1; 3108 3069 sky2->advertising = sky2_supported_modes(hw); 3109 - 3110 - /* Receive checksum disabled for Yukon XL 3111 - * because of observed problems with incorrect 3112 - * values when multiple packets are received in one interrupt 3113 - */ 3114 - sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); 3070 + sky2->rx_csum = 1; 3115 3071 3116 3072 spin_lock_init(&sky2->phy_lock); 3117 3073 sky2->tx_pending = TX_DEF_PENDING;
+2
drivers/net/sky2.h
··· 214 214 enum { 215 215 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ 216 216 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ 217 + Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ 218 + Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ 217 219 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ 218 220 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ 219 221 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
+11 -5
drivers/net/tg3.c
··· 69 69 70 70 #define DRV_MODULE_NAME "tg3" 71 71 #define PFX DRV_MODULE_NAME ": " 72 - #define DRV_MODULE_VERSION "3.57" 73 - #define DRV_MODULE_RELDATE "Apr 28, 2006" 72 + #define DRV_MODULE_VERSION "3.58" 73 + #define DRV_MODULE_RELDATE "May 22, 2006" 74 74 75 75 #define TG3_DEF_MAC_MODE 0 76 76 #define TG3_DEF_RX_MODE 0 ··· 6488 6488 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 6489 6489 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 6490 6490 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 6491 + 6492 + TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 6493 + TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 6494 + TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 6491 6495 } 6492 6496 6493 6497 static void tg3_timer(unsigned long __opaque) ··· 7657 7653 cmd->supported |= (SUPPORTED_1000baseT_Half | 7658 7654 SUPPORTED_1000baseT_Full); 7659 7655 7660 - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 7656 + if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 7661 7657 cmd->supported |= (SUPPORTED_100baseT_Half | 7662 7658 SUPPORTED_100baseT_Full | 7663 7659 SUPPORTED_10baseT_Half | 7664 7660 SUPPORTED_10baseT_Full | 7665 7661 SUPPORTED_MII); 7666 - else 7662 + cmd->port = PORT_TP; 7663 + } else { 7667 7664 cmd->supported |= SUPPORTED_FIBRE; 7665 + cmd->port = PORT_FIBRE; 7666 + } 7668 7667 7669 7668 cmd->advertising = tp->link_config.advertising; 7670 7669 if (netif_running(dev)) { 7671 7670 cmd->speed = tp->link_config.active_speed; 7672 7671 cmd->duplex = tp->link_config.active_duplex; 7673 7672 } 7674 - cmd->port = 0; 7675 7673 cmd->phy_address = PHY_ADDR; 7676 7674 cmd->transceiver = 0; 7677 7675 cmd->autoneg = tp->link_config.autoneg;
+2 -2
drivers/net/tulip/winbond-840.c
··· 850 850 break; 851 851 skb->dev = dev; /* Mark as being used by this device. */ 852 852 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, 853 - skb->len,PCI_DMA_FROMDEVICE); 853 + np->rx_buf_sz,PCI_DMA_FROMDEVICE); 854 854 855 855 np->rx_ring[i].buffer1 = np->rx_addr[i]; 856 856 np->rx_ring[i].status = DescOwn; ··· 1316 1316 skb->dev = dev; /* Mark as being used by this device. */ 1317 1317 np->rx_addr[entry] = pci_map_single(np->pci_dev, 1318 1318 skb->data, 1319 - skb->len, PCI_DMA_FROMDEVICE); 1319 + np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1320 1320 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; 1321 1321 } 1322 1322 wmb();
+3 -31
drivers/net/via-rhine.c
··· 491 491 u8 tx_thresh, rx_thresh; 492 492 493 493 struct mii_if_info mii_if; 494 - struct work_struct tx_timeout_task; 495 - struct work_struct check_media_task; 496 494 void __iomem *base; 497 495 }; 498 496 ··· 498 500 static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 499 501 static int rhine_open(struct net_device *dev); 500 502 static void rhine_tx_timeout(struct net_device *dev); 501 - static void rhine_tx_timeout_task(struct net_device *dev); 502 - static void rhine_check_media_task(struct net_device *dev); 503 503 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 504 504 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 505 505 static void rhine_tx(struct net_device *dev); ··· 852 856 if (rp->quirks & rqRhineI) 853 857 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 854 858 855 - INIT_WORK(&rp->tx_timeout_task, 856 - (void (*)(void *))rhine_tx_timeout_task, dev); 857 - 858 - INIT_WORK(&rp->check_media_task, 859 - (void (*)(void *))rhine_check_media_task, dev); 860 - 861 859 /* dev->name not defined before register_netdev()! */ 862 860 rc = register_netdev(dev); 863 861 if (rc) ··· 1098 1108 netif_carrier_ok(mii->dev)); 1099 1109 } 1100 1110 1101 - static void rhine_check_media_task(struct net_device *dev) 1102 - { 1103 - rhine_check_media(dev, 0); 1104 - } 1105 - 1106 1111 static void init_registers(struct net_device *dev) 1107 1112 { 1108 1113 struct rhine_private *rp = netdev_priv(dev); ··· 1151 1166 if (quirks & rqRhineI) { 1152 1167 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1153 1168 1154 - /* Do not call from ISR! */ 1155 - msleep(1); 1169 + /* Can be called from ISR. Evil. */ 1170 + mdelay(1); 1156 1171 1157 1172 /* 0x80 must be set immediately before turning it off */ 1158 1173 iowrite8(0x80, ioaddr + MIICmd); ··· 1240 1255 } 1241 1256 1242 1257 static void rhine_tx_timeout(struct net_device *dev) 1243 - { 1244 - struct rhine_private *rp = netdev_priv(dev); 1245 - 1246 - /* 1247 - * Move bulk of work outside of interrupt context 1248 - */ 1249 - schedule_work(&rp->tx_timeout_task); 1250 - } 1251 - 1252 - static void rhine_tx_timeout_task(struct net_device *dev) 1253 1258 { 1254 1259 struct rhine_private *rp = netdev_priv(dev); 1255 1260 void __iomem *ioaddr = rp->base; ··· 1652 1677 spin_lock(&rp->lock); 1653 1678 1654 1679 if (intr_status & IntrLinkChange) 1655 - schedule_work(&rp->check_media_task); 1680 + rhine_check_media(dev, 0); 1656 1681 if (intr_status & IntrStatsMax) { 1657 1682 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1658 1683 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); ··· 1902 1927 spin_unlock_irq(&rp->lock); 1903 1928 1904 1929 free_irq(rp->pdev->irq, dev); 1905 - 1906 - flush_scheduled_work(); 1907 - 1908 1930 free_rbufs(dev); 1909 1931 free_tbufs(dev); 1910 1932 free_ring(dev);
+3 -3
drivers/net/wireless/bcm43xx/bcm43xx_main.c
··· 3271 3271 bcm43xx_sysfs_register(bcm); 3272 3272 //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though... 3273 3273 3274 + /*FIXME: This should be handled by softmac instead. */ 3275 + schedule_work(&bcm->softmac->associnfo.work); 3276 + 3274 3277 assert(err == 0); 3275 3278 out: 3276 3279 return err; ··· 3949 3946 3950 3947 netif_device_attach(net_dev); 3951 3948 3952 - /*FIXME: This should be handled by softmac instead. */ 3953 - schedule_work(&bcm->softmac->associnfo.work); 3954 - 3955 3949 dprintk(KERN_INFO PFX "Device resumed.\n"); 3956 3950 3957 3951 return 0;
+1 -3
drivers/net/wireless/orinoco.c
··· 812 812 if (datalen > IEEE80211_DATA_LEN + 12) { 813 813 printk(KERN_DEBUG "%s: oversized monitor frame, " 814 814 "data length = %d\n", dev->name, datalen); 815 - err = -EIO; 816 815 stats->rx_length_errors++; 817 816 goto update_stats; 818 817 } ··· 820 821 if (!skb) { 821 822 printk(KERN_WARNING "%s: Cannot allocate skb for monitor frame\n", 822 823 dev->name); 823 - err = -ENOMEM; 824 - goto drop; 824 + goto update_stats; 825 825 } 826 826 827 827 /* Copy the 802.11 header to the skb */
+35 -25
drivers/pci/pci-acpi.c
··· 33 33 acpi_status status; 34 34 struct acpi_object_list input; 35 35 union acpi_object in_params[4]; 36 - struct acpi_buffer output; 37 - union acpi_object out_obj; 36 + struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 37 + union acpi_object *out_obj; 38 38 u32 osc_dw0; 39 39 40 - /* Setting up output buffer */ 41 - output.length = sizeof(out_obj) + 3*sizeof(u32); 42 - output.pointer = &out_obj; 43 40 44 41 /* Setting up input parameters */ 45 42 input.count = 4; ··· 58 61 "Evaluate _OSC Set fails. Status = 0x%04x\n", status); 59 62 return status; 60 63 } 61 - if (out_obj.type != ACPI_TYPE_BUFFER) { 64 + out_obj = output.pointer; 65 + 66 + if (out_obj->type != ACPI_TYPE_BUFFER) { 62 67 printk(KERN_DEBUG 63 68 "Evaluate _OSC returns wrong type\n"); 64 - return AE_TYPE; 69 + status = AE_TYPE; 70 + goto query_osc_out; 65 71 } 66 - osc_dw0 = *((u32 *) out_obj.buffer.pointer); 72 + osc_dw0 = *((u32 *) out_obj->buffer.pointer); 67 73 if (osc_dw0) { 68 74 if (osc_dw0 & OSC_REQUEST_ERROR) 69 75 printk(KERN_DEBUG "_OSC request fails\n"); ··· 76 76 printk(KERN_DEBUG "_OSC invalid revision\n"); 77 77 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 78 78 /* Update Global Control Set */ 79 - global_ctrlsets = *((u32 *)(out_obj.buffer.pointer+8)); 80 - return AE_OK; 79 + global_ctrlsets = *((u32 *)(out_obj->buffer.pointer+8)); 80 + status = AE_OK; 81 + goto query_osc_out; 81 82 } 82 - return AE_ERROR; 83 + status = AE_ERROR; 84 + goto query_osc_out; 83 85 } 84 86 85 87 /* Update Global Control Set */ 86 - global_ctrlsets = *((u32 *)(out_obj.buffer.pointer + 8)); 87 - return AE_OK; 88 + global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); 89 + status = AE_OK; 90 + 91 + query_osc_out: 92 + kfree(output.pointer); 93 + return status; 88 94 } 89 95 90 96 ··· 102 96 acpi_status status; 103 97 struct acpi_object_list input; 104 98 union acpi_object in_params[4]; 105 - struct acpi_buffer output; 106 - union acpi_object out_obj; 99 + struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 100 + union acpi_object *out_obj; 107 101 u32 osc_dw0; 108 102 109 - /* Setting up output buffer */ 110 - output.length = sizeof(out_obj) + 3*sizeof(u32); 111 - output.pointer = &out_obj; 112 - 113 103 /* Setting up input parameters */ 114 104 input.count = 4; 115 105 input.pointer = in_params; ··· 126 124 "Evaluate _OSC Set fails. Status = 0x%04x\n", status); 127 125 return status; 128 126 } 129 - if (out_obj.type != ACPI_TYPE_BUFFER) { 127 + out_obj = output.pointer; 128 + if (out_obj->type != ACPI_TYPE_BUFFER) { 130 129 printk(KERN_DEBUG 131 130 "Evaluate _OSC returns wrong type\n"); 132 - return AE_TYPE; 131 + status = AE_TYPE; 132 + goto run_osc_out; 133 133 } 134 - osc_dw0 = *((u32 *) out_obj.buffer.pointer); 134 + osc_dw0 = *((u32 *) out_obj->buffer.pointer); 135 135 if (osc_dw0) { 136 136 if (osc_dw0 & OSC_REQUEST_ERROR) 137 137 printk(KERN_DEBUG "_OSC request fails\n"); ··· 143 139 printk(KERN_DEBUG "_OSC invalid revision\n"); 144 140 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 145 141 printk(KERN_DEBUG "_OSC FW not grant req. control\n"); 146 - return AE_SUPPORT; 142 + status = AE_SUPPORT; 143 + goto run_osc_out; 147 144 } 148 - return AE_ERROR; 145 + status = AE_ERROR; 146 + goto run_osc_out; 149 147 } 150 - return AE_OK; 148 + status = AE_OK; 149 + 150 + run_osc_out: 151 + kfree(output.pointer); 152 + return status; 151 153 } 152 154 153 155 /**
+15 -1
drivers/pci/quirks.c
··· 634 634 * non-x86 architectures (yes Via exists on PPC among other places), 635 635 * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get 636 636 * interrupts delivered properly. 637 + * 638 + * Some of the on-chip devices are actually '586 devices' so they are 639 + * listed here. 637 640 */ 638 641 static void quirk_via_irq(struct pci_dev *dev) 639 642 { ··· 651 648 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); 652 649 } 653 650 } 651 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_via_irq); 652 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq); 653 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq); 654 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq); 654 655 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq); 655 656 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq); 656 657 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq); ··· 902 895 } 903 896 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); 904 897 898 + #ifndef CONFIG_ACPI_SLEEP 905 899 /* 906 900 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge 907 901 * is not activated. The myth is that Asus said that they do not want the ··· 914 906 * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it 915 907 * becomes necessary to do this tweak in two steps -- I've chosen the Host 916 908 * bridge as trigger. 909 + * 910 + * Actually, leaving it unhidden and not redoing the quirk over suspend2ram 911 + * will cause thermal management to break down, and causing machine to 912 + * overheat. 917 913 */ 918 - static int __initdata asus_hides_smbus = 0; 914 + static int __initdata asus_hides_smbus; 919 915 920 916 static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) 921 917 { ··· 1061 1049 printk(KERN_INFO "PCI: Enabled ICH6/i801 SMBus device\n"); 1062 1050 } 1063 1051 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); 1052 + 1053 + #endif 1064 1054 1065 1055 /* 1066 1056 * SiS 96x south bridge: BIOS typically hides SMBus device...
+16 -7
drivers/pcmcia/pcmcia_ioctl.c
··· 426 426 427 427 if (!warning_printed) { 428 428 printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl " 429 - "usage.\n"); 429 + "usage from process: %s.\n", current->comm); 430 430 printk(KERN_INFO "pcmcia: This interface will soon be removed from " 431 431 "the kernel; please expect breakage unless you upgrade " 432 432 "to new tools.\n"); ··· 601 601 ret = CS_BAD_ARGS; 602 602 else { 603 603 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); 604 - ret = pccard_get_configuration_info(s, p_dev, &buf->config); 605 - pcmcia_put_dev(p_dev); 604 + if (p_dev == NULL) 605 + ret = CS_BAD_ARGS; 606 + else { 607 + ret = pccard_get_configuration_info(s, p_dev, &buf->config); 608 + pcmcia_put_dev(p_dev); 609 + } 606 610 } 607 611 break; 608 612 case DS_GET_FIRST_TUPLE: ··· 636 632 ret = CS_BAD_ARGS; 637 633 else { 638 634 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); 639 - ret = pccard_get_status(s, p_dev, &buf->status); 640 - pcmcia_put_dev(p_dev); 635 + if (p_dev == NULL) 636 + ret = CS_BAD_ARGS; 637 + else { 638 + ret = pccard_get_status(s, p_dev, &buf->status); 639 + pcmcia_put_dev(p_dev); 640 + } 641 641 } 642 642 break; 643 643 case DS_VALIDATE_CIS: ··· 673 665 if (!(buf->conf_reg.Function && 674 666 (buf->conf_reg.Function >= s->functions))) { 675 667 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function); 676 - if (p_dev) 668 + if (p_dev) { 677 669 ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg); 678 - pcmcia_put_dev(p_dev); 670 + pcmcia_put_dev(p_dev); 671 + } 679 672 } 680 673 break; 681 674 case DS_GET_FIRST_REGION:
+1 -1
drivers/pcmcia/pd6729.c
··· 589 589 return 0; 590 590 } 591 591 592 - static u_int __init pd6729_isa_scan(void) 592 + static u_int __devinit pd6729_isa_scan(void) 593 593 { 594 594 u_int mask0, mask = 0; 595 595 int i;
+3 -3
drivers/rtc/rtc-dev.c
··· 141 141 /* try the driver's ioctl interface */ 142 142 if (ops->ioctl) { 143 143 err = ops->ioctl(class_dev->dev, cmd, arg); 144 - if (err != -EINVAL) 144 + if (err != -ENOIOCTLCMD) 145 145 return err; 146 146 } 147 147 148 148 /* if the driver does not provide the ioctl interface 149 149 * or if that particular ioctl was not implemented 150 - * (-EINVAL), we will try to emulate here. 150 + * (-ENOIOCTLCMD), we will try to emulate here. 151 151 */ 152 152 153 153 switch (cmd) { ··· 233 233 break; 234 234 235 235 default: 236 - err = -EINVAL; 236 + err = -ENOTTY; 237 237 break; 238 238 } 239 239
+1 -1
drivers/rtc/rtc-sa1100.c
··· 247 247 rtc_freq = arg; 248 248 return 0; 249 249 } 250 - return -EINVAL; 250 + return -ENOIOCTLCMD; 251 251 } 252 252 253 253 static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
+1 -1
drivers/rtc/rtc-test.c
··· 71 71 return 0; 72 72 73 73 default: 74 - return -EINVAL; 74 + return -ENOIOCTLCMD; 75 75 } 76 76 } 77 77
+1 -1
drivers/rtc/rtc-vr41xx.c
··· 270 270 epoch = arg; 271 271 break; 272 272 default: 273 - return -EINVAL; 273 + return -ENOIOCTLCMD; 274 274 } 275 275 276 276 return 0;
+1 -1
drivers/s390/net/lcs.c
··· 1348 1348 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 1349 1349 - channel->ccws; 1350 1350 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || 1351 - (irb->scsw.cstat | SCHN_STAT_PCI)) 1351 + (irb->scsw.cstat & SCHN_STAT_PCI)) 1352 1352 /* Bloody io subsystem tells us lies about cpa... */ 1353 1353 index = (index - 1) & (LCS_NUM_BUFFS - 1); 1354 1354 while (channel->io_idx != index) {
+9 -6
drivers/sbus/char/openprom.c
··· 392 392 return -ENOMEM; 393 393 } 394 394 395 - prom_getproperty(op.op_nodeid, str, tmp, len); 395 + cnt = prom_getproperty(op.op_nodeid, str, tmp, len); 396 + if (cnt <= 0) { 397 + error = -EINVAL; 398 + } else { 399 + tmp[len] = '\0'; 396 400 397 - tmp[len] = '\0'; 398 - 399 - if (__copy_to_user(argp, &op, sizeof(op)) != 0 400 - || copy_to_user(op.op_buf, tmp, len) != 0) 401 - error = -EFAULT; 401 + if (__copy_to_user(argp, &op, sizeof(op)) != 0 || 402 + copy_to_user(op.op_buf, tmp, len) != 0) 403 + error = -EFAULT; 404 + } 402 405 403 406 kfree(tmp); 404 407 kfree(str);
+11
drivers/scsi/libata-core.c
··· 864 864 /** 865 865 * ata_port_queue_task - Queue port_task 866 866 * @ap: The ata_port to queue port_task for 867 + * @fn: workqueue function to be scheduled 868 + * @data: data value to pass to workqueue function 869 + * @delay: delay time for workqueue function 867 870 * 868 871 * Schedule @fn(@data) for execution after @delay jiffies using 869 872 * port_task. There is one port_task per port and it's the ··· 2742 2739 * ata_dev_init_params - Issue INIT DEV PARAMS command 2743 2740 * @ap: Port associated with device @dev 2744 2741 * @dev: Device to which command will be sent 2742 + * @heads: Number of heads (taskfile parameter) 2743 + * @sectors: Number of sectors (taskfile parameter) 2745 2744 * 2746 2745 * LOCKING: 2747 2746 * Kernel thread context (may sleep) ··· 3643 3638 3644 3639 ata_pio_sector(qc); 3645 3640 } 3641 + 3642 + ata_altstatus(ap); /* flush */ 3646 3643 } 3647 3644 3648 3645 static void ata_pio_error(struct ata_port *ap) ··· 3761 3754 spin_lock_irqsave(&ap->host_set->lock, flags); 3762 3755 ap->flags &= ~ATA_FLAG_NOINTR; 3763 3756 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 3757 + ata_altstatus(ap); /* flush */ 3758 + 3764 3759 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 3765 3760 ap->ops->bmdma_start(qc); /* initiate bmdma */ 3766 3761 spin_unlock_irqrestore(&ap->host_set->lock, flags); 3767 3762 } else { 3768 3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 3764 + ata_altstatus(ap); /* flush */ 3769 3765 3770 3766 /* PIO commands are handled by polling */ 3771 3767 ap->hsm_task_state = HSM_ST; ··· 4312 4302 * ata_device_suspend - prepare a device for suspend 4313 4303 * @ap: port the device is connected to 4314 4304 * @dev: the device to suspend 4305 + * @state: target power management state 4315 4306 * 4316 4307 * Flush the cache on the drive, if appropriate, then issue a 4317 4308 * standbynow command.
+79 -59
drivers/scsi/sata_mv.c
··· 37 37 #include <asm/io.h> 38 38 39 39 #define DRV_NAME "sata_mv" 40 - #define DRV_VERSION "0.6" 40 + #define DRV_VERSION "0.7" 41 41 42 42 enum { 43 43 /* BAR's are enumerated in terms of pci_resource_start() terms */ ··· 50 50 51 51 MV_PCI_REG_BASE = 0, 52 52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 53 + MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), 54 + MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), 55 + MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), 56 + MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), 57 + MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 58 + 53 59 MV_SATAHC0_REG_BASE = 0x20000, 54 60 MV_FLASH_CTL = 0x1046c, 55 61 MV_GPIO_PORT_CTL = 0x104f0, ··· 308 302 dma_addr_t crpb_dma; 309 303 struct mv_sg *sg_tbl; 310 304 dma_addr_t sg_tbl_dma; 311 - 312 - unsigned req_producer; /* cp of req_in_ptr */ 313 - unsigned rsp_consumer; /* cp of rsp_out_ptr */ 314 305 u32 pp_flags; 315 306 }; 316 307 ··· 940 937 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 941 938 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 942 939 943 - pp->req_producer = pp->rsp_consumer = 0; 944 - 945 940 /* Don't turn on EDMA here...do it before DMA commands only. Else 946 941 * we'll be unable to send non-data, PIO, etc due to restricted access 947 942 * to shadow regs. ··· 1023 1022 } 1024 1023 } 1025 1024 1026 - static inline unsigned mv_inc_q_index(unsigned *index) 1025 + static inline unsigned mv_inc_q_index(unsigned index) 1027 1026 { 1028 - *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; 1029 - return *index; 1027 + return (index + 1) & MV_MAX_Q_DEPTH_MASK; 1030 1028 } 1031 1029 1032 1030 static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) 1033 1031 { 1034 - *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1032 + u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1035 1033 (last ? CRQB_CMD_LAST : 0); 1034 + *cmdw = cpu_to_le16(tmp); 1036 1035 } 1037 1036 1038 1037 /** ··· 1054 1053 u16 *cw; 1055 1054 struct ata_taskfile *tf; 1056 1055 u16 flags = 0; 1056 + unsigned in_index; 1057 1057 1058 1058 if (ATA_PROT_DMA != qc->tf.protocol) 1059 1059 return; 1060 - 1061 - /* the req producer index should be the same as we remember it */ 1062 - WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1063 - EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1064 - pp->req_producer); 1065 1060 1066 1061 /* Fill in command request block 1067 1062 */ ··· 1066 1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1067 1070 flags |= qc->tag << CRQB_TAG_SHIFT; 1068 1071 1069 - pp->crqb[pp->req_producer].sg_addr = 1070 - cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1071 - pp->crqb[pp->req_producer].sg_addr_hi = 1072 - cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1073 - pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); 1072 + /* get current queue index from hardware */ 1073 + in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) 1074 + >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1074 1075 1075 - cw = &pp->crqb[pp->req_producer].ata_cmd[0]; 1076 + pp->crqb[in_index].sg_addr = 1077 + cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1078 + pp->crqb[in_index].sg_addr_hi = 1079 + cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1080 + pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1081 + 1082 + cw = &pp->crqb[in_index].ata_cmd[0]; 1076 1083 tf = &qc->tf; 1077 1084 1078 1085 /* Sadly, the CRQB cannot accomodate all registers--there are ··· 1145 1144 struct mv_port_priv *pp = ap->private_data; 1146 1145 struct mv_crqb_iie *crqb; 1147 1146 struct ata_taskfile *tf; 1147 + unsigned in_index; 1148 1148 u32 flags = 0; 1149 1149 1150 1150 if (ATA_PROT_DMA != qc->tf.protocol) 1151 1151 return; 1152 - 1153 - /* the req producer index should be the same as we remember it */ 1154 - WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1155 - EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1156 - pp->req_producer); 1157 1152 1158 1153 /* Fill in Gen IIE command request block 1159 1154 */ ··· 1159 1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1160 1163 flags |= qc->tag << CRQB_TAG_SHIFT; 1161 1164 1162 - crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; 1165 + /* get current queue index from hardware */ 1166 + in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) 1167 + >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1168 + 1169 + crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1163 1170 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1164 1171 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1165 1172 crqb->flags = cpu_to_le32(flags); ··· 1211 1210 { 1212 1211 void __iomem *port_mmio = mv_ap_base(qc->ap); 1213 1212 struct mv_port_priv *pp = qc->ap->private_data; 1213 + unsigned in_index; 1214 1214 u32 in_ptr; 1215 1215 1216 1216 if (ATA_PROT_DMA != qc->tf.protocol) { ··· 1223 1221 return ata_qc_issue_prot(qc); 1224 1222 } 1225 1223 1226 - in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1224 + in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1225 + in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1227 1226 1228 - /* the req producer index should be the same as we remember it */ 1229 - WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1230 - pp->req_producer); 1231 1227 /* until we do queuing, the queue should be empty at this point */ 1232 - WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1233 - ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1234 - EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1228 + WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) 1229 + >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1235 1230 1236 - mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1231 + in_index = mv_inc_q_index(in_index); /* now incr producer index */ 1237 1232 1238 1233 mv_start_dma(port_mmio, pp); 1239 1234 1240 1235 /* and write the request in pointer to kick the EDMA to life */ 1241 1236 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; 1242 - in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; 1237 + in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; 1243 1238 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1244 1239 1245 1240 return 0; ··· 1259 1260 { 1260 1261 void __iomem *port_mmio = mv_ap_base(ap); 1261 1262 struct mv_port_priv *pp = ap->private_data; 1263 + unsigned out_index; 1262 1264 u32 out_ptr; 1263 1265 u8 ata_status; 1264 1266 1265 - out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 + out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1268 + out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1266 1269 1267 - /* the response consumer index should be the same as we remember it */ 1268 - WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1269 - pp->rsp_consumer); 1270 - 1271 - ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT; 1270 + ata_status = le16_to_cpu(pp->crpb[out_index].flags) 1271 + >> CRPB_FLAG_STATUS_SHIFT; 1272 1272 1273 1273 /* increment our consumer index... */ 1274 - pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 + out_index = mv_inc_q_index(out_index); 1275 1275 1276 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1277 - WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1278 - EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1279 - pp->rsp_consumer); 1277 + WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) 1278 + >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1280 1279 1281 1280 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1282 1281 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1283 - out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; 1282 + out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; 1284 1283 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1285 1284 1286 1285 /* Return ATA status register for completed CRPB */ ··· 1288 1291 /** 1289 1292 * mv_err_intr - Handle error interrupts on the port 1290 1293 * @ap: ATA channel to manipulate 1294 + * @reset_allowed: bool: 0 == don't trigger from reset here 1291 1295 * 1292 1296 * In most cases, just clear the interrupt and move on. However, 1293 1297 * some cases require an eDMA reset, which is done right before ··· 1299 1301 * LOCKING: 1300 1302 * Inherited from caller. 1301 1303 */ 1302 - static void mv_err_intr(struct ata_port *ap) 1304 + static void mv_err_intr(struct ata_port *ap, int reset_allowed) 1303 1305 { 1304 1306 void __iomem *port_mmio = mv_ap_base(ap); 1305 1307 u32 edma_err_cause, serr = 0; ··· 1321 1323 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1322 1324 1323 1325 /* check for fatal here and recover if needed */ 1324 - if (EDMA_ERR_FATAL & edma_err_cause) { 1326 + if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) 1325 1327 mv_stop_and_reset(ap); 1326 - } 1327 1328 } 1328 1329 1329 1330 /** ··· 1371 1374 struct ata_port *ap = host_set->ports[port]; 1372 1375 struct mv_port_priv *pp = ap->private_data; 1373 1376 1374 - hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1377 + hard_port = mv_hardport_from_port(port); /* range 0..3 */ 1375 1378 handled = 0; /* ensure ata_status is set if handled++ */ 1376 1379 1377 1380 /* Note that DEV_IRQ might happen spuriously during EDMA, 1378 - * and should be ignored in such cases. We could mask it, 1379 - * but it's pretty rare and may not be worth the overhead. 1381 + * and should be ignored in such cases. 1382 + * The cause of this is still under investigation. 1380 1383 */ 1381 1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1382 1385 /* EDMA: check for response queue interrupt */ ··· 1390 1393 ata_status = readb((void __iomem *) 1391 1394 ap->ioaddr.status_addr); 1392 1395 handled = 1; 1396 + /* ignore spurious intr if drive still BUSY */ 1397 + if (ata_status & ATA_BUSY) { 1398 + ata_status = 0; 1399 + handled = 0; 1400 + } 1393 1401 } 1394 1402 } 1395 1403 ··· 1408 1406 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1409 1407 } 1410 1408 if ((PORT0_ERR << shift) & relevant) { 1411 - mv_err_intr(ap); 1409 + mv_err_intr(ap, 1); 1412 1410 err_mask |= AC_ERR_OTHER; 1413 1411 handled = 1; 1414 1412 } ··· 1450 1448 struct ata_host_set *host_set = dev_instance; 1451 1449 unsigned int hc, handled = 0, n_hcs; 1452 1450 void __iomem *mmio = host_set->mmio_base; 1451 + struct mv_host_priv *hpriv; 1453 1452 u32 irq_stat; 1454 1453 1455 1454 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); ··· 1472 1469 handled++; 1473 1470 } 1474 1471 } 1472 + 1473 + hpriv = host_set->private_data; 1474 + if (IS_60XX(hpriv)) { 1475 + /* deal with the interrupt coalescing bits */ 1476 + if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { 1477 + writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); 1478 + writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); 1479 + writelfl(0, mmio + MV_IRQ_COAL_CAUSE); 1480 + } 1481 + } 1482 + 1475 1483 if (PCI_ERR & irq_stat) { 1476 1484 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", 1477 1485 readl(mmio + PCI_IRQ_CAUSE_OFS)); ··· 1881 1867 1882 1868 if (IS_60XX(hpriv)) { 1883 1869 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 1884 - ifctl |= (1 << 12) | (1 << 7); 1870 + ifctl |= (1 << 7); /* enable gen2i speed */ 1871 + ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ 1885 1872 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); 1886 1873 } 1887 1874 ··· 2046 2031 ap->host_set->mmio_base, ap, qc, qc->scsicmd, 2047 2032 &qc->scsicmd->cmnd); 2048 2033 2049 - mv_err_intr(ap); 2034 + mv_err_intr(ap, 0); 2050 2035 mv_stop_and_reset(ap); 2051 2036 2052 - qc->err_mask |= AC_ERR_TIMEOUT; 2053 - ata_eh_qc_complete(qc); 2037 + WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 2038 + if (qc->flags & ATA_QCFLAG_ACTIVE) { 2039 + qc->err_mask |= AC_ERR_TIMEOUT; 2040 + ata_eh_qc_complete(qc); 2041 + } 2054 2042 } 2055 2043 2056 2044 /** ··· 2247 2229 void __iomem *port_mmio = mv_port_base(mmio, port); 2248 2230 2249 2231 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 2250 - ifctl |= (1 << 12); 2232 + ifctl |= (1 << 7); /* enable gen2i speed */ 2233 + ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ 2251 2234 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); 2252 2235 } 2253 2236 ··· 2349 2330 if (rc) { 2350 2331 return rc; 2351 2332 } 2333 + pci_set_master(pdev); 2352 2334 2353 2335 rc = pci_request_regions(pdev, DRV_NAME); 2354 2336 if (rc) {
+1 -1
drivers/scsi/st.c
··· 4054 4054 } 4055 4055 4056 4056 sdev_printk(KERN_WARNING, SDp, 4057 - "Attached scsi tape %s", tape_name(tpnt)); 4057 + "Attached scsi tape %s\n", tape_name(tpnt)); 4058 4058 printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n", 4059 4059 tape_name(tpnt), tpnt->try_dio ? "yes" : "no", 4060 4060 queue_dma_alignment(SDp->request_queue) + 1);
+6 -3
drivers/serial/serial_core.c
··· 1907 1907 static void uart_change_pm(struct uart_state *state, int pm_state) 1908 1908 { 1909 1909 struct uart_port *port = state->port; 1910 - if (port->ops->pm) 1911 - port->ops->pm(port, pm_state, state->pm_state); 1912 - state->pm_state = pm_state; 1910 + 1911 + if (state->pm_state != pm_state) { 1912 + if (port->ops->pm) 1913 + port->ops->pm(port, pm_state, state->pm_state); 1914 + state->pm_state = pm_state; 1915 + } 1913 1916 } 1914 1917 1915 1918 int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
+1
drivers/serial/sunsu.c
··· 1730 1730 1731 1731 module_init(sunsu_probe); 1732 1732 module_exit(sunsu_exit); 1733 + MODULE_LICENSE("GPL");
+34
drivers/spi/Kconfig
··· 75 75 inexpensive battery powered microcontroller evaluation board. 76 76 This same cable can be used to flash new firmware. 77 77 78 + config SPI_MPC83xx 79 + tristate "Freescale MPC83xx SPI controller" 80 + depends on SPI_MASTER && PPC_83xx && EXPERIMENTAL 81 + select SPI_BITBANG 82 + help 83 + This enables using the Freescale MPC83xx SPI controller in master 84 + mode. 85 + 86 + Note, this driver uniquely supports the SPI controller on the MPC83xx 87 + family of PowerPC processors. The MPC83xx uses a simple set of shift 88 + registers for data (opposed to the CPM based descriptor model). 89 + 90 + config SPI_PXA2XX 91 + tristate "PXA2xx SSP SPI master" 92 + depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL 93 + help 94 + This enables using a PXA2xx SSP port as a SPI master controller. 95 + The driver can be configured to use any SSP port and additional 96 + documentation can be found a Documentation/spi/pxa2xx. 97 + 98 + config SPI_S3C24XX_GPIO 99 + tristate "Samsung S3C24XX series SPI by GPIO" 100 + depends on SPI_MASTER && ARCH_S3C2410 && SPI_BITBANG && EXPERIMENTAL 101 + help 102 + SPI driver for Samsung S3C24XX series ARM SoCs using 103 + GPIO lines to provide the SPI bus. This can be used where 104 + the inbuilt hardware cannot provide the transfer mode, or 105 + where the board is using non hardware connected pins. 78 106 # 79 107 # Add new SPI master controllers in alphabetical order above this line 80 108 # 81 109 110 + 111 + config SPI_S3C24XX 112 + tristate "Samsung S3C24XX series SPI" 113 + depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL 114 + help 115 + SPI driver for Samsung S3C24XX series ARM SoCs 82 116 83 117 # 84 118 # There are lots of SPI device types, with sensors and memory
+4
drivers/spi/Makefile
··· 13 13 # SPI master controller drivers (bus) 14 14 obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 15 15 obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 16 + obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 17 + obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o 18 + obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 19 + obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o 16 20 # ... add above this line ... 17 21 18 22 # SPI protocol drivers (device/link on bus)
+1486
drivers/spi/pxa2xx_spi.c
··· 1 + /* 2 + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 + */ 18 + 19 + #include <linux/init.h> 20 + #include <linux/module.h> 21 + #include <linux/device.h> 22 + #include <linux/ioport.h> 23 + #include <linux/errno.h> 24 + #include <linux/interrupt.h> 25 + #include <linux/platform_device.h> 26 + #include <linux/dma-mapping.h> 27 + #include <linux/spi/spi.h> 28 + #include <linux/workqueue.h> 29 + #include <linux/errno.h> 30 + #include <linux/delay.h> 31 + 32 + #include <asm/io.h> 33 + #include <asm/irq.h> 34 + #include <asm/hardware.h> 35 + #include <asm/delay.h> 36 + #include <asm/dma.h> 37 + 38 + #include <asm/arch/hardware.h> 39 + #include <asm/arch/pxa-regs.h> 40 + #include <asm/arch/pxa2xx_spi.h> 41 + 42 + MODULE_AUTHOR("Stephen Street"); 43 + MODULE_DESCRIPTION("PXA2xx SSP SPI Contoller"); 44 + MODULE_LICENSE("GPL"); 45 + 46 + #define MAX_BUSES 3 47 + 48 + #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 49 + #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 50 + #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) 51 + 52 + #define DEFINE_SSP_REG(reg, off) \ 53 + static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \ 54 + static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); } 55 + 56 + DEFINE_SSP_REG(SSCR0, 0x00) 57 + DEFINE_SSP_REG(SSCR1, 0x04) 58 + DEFINE_SSP_REG(SSSR, 0x08) 59 + DEFINE_SSP_REG(SSITR, 0x0c) 60 + DEFINE_SSP_REG(SSDR, 0x10) 61 + DEFINE_SSP_REG(SSTO, 0x28) 62 + DEFINE_SSP_REG(SSPSP, 0x2c) 63 + 64 + #define START_STATE ((void*)0) 65 + #define RUNNING_STATE ((void*)1) 66 + #define DONE_STATE ((void*)2) 67 + #define ERROR_STATE ((void*)-1) 68 + 69 + #define QUEUE_RUNNING 0 70 + #define QUEUE_STOPPED 1 71 + 72 + struct driver_data { 73 + /* Driver model hookup */ 74 + struct platform_device *pdev; 75 + 76 + /* SPI framework hookup */ 77 + enum pxa_ssp_type ssp_type; 78 + struct spi_master *master; 79 + 80 + /* PXA hookup */ 81 + struct pxa2xx_spi_master *master_info; 82 + 83 + /* DMA setup stuff */ 84 + int rx_channel; 85 + int tx_channel; 86 + u32 *null_dma_buf; 87 + 88 + /* SSP register addresses */ 89 + void *ioaddr; 90 + u32 ssdr_physical; 91 + 92 + /* SSP masks*/ 93 + u32 dma_cr1; 94 + u32 int_cr1; 95 + u32 clear_sr; 96 + u32 mask_sr; 97 + 98 + /* Driver message queue */ 99 + struct workqueue_struct *workqueue; 100 + struct work_struct pump_messages; 101 + spinlock_t lock; 102 + struct list_head queue; 103 + int busy; 104 + int run; 105 + 106 + /* Message Transfer pump */ 107 + struct tasklet_struct pump_transfers; 108 + 109 + /* Current message transfer state info */ 110 + struct spi_message* cur_msg; 111 + struct spi_transfer* cur_transfer; 112 + struct chip_data *cur_chip; 113 + size_t len; 114 + void *tx; 115 + void *tx_end; 116 + void *rx; 117 + void *rx_end; 118 + int dma_mapped; 119 + dma_addr_t rx_dma; 120 + dma_addr_t tx_dma; 121 + size_t rx_map_len; 122 + size_t tx_map_len; 123 + u8 n_bytes; 124 + u32 dma_width; 125 + int cs_change; 126 + void (*write)(struct driver_data *drv_data); 127 + void (*read)(struct driver_data *drv_data); 128 + irqreturn_t (*transfer_handler)(struct driver_data *drv_data); 129 + void (*cs_control)(u32 command); 130 + }; 131 + 132 + struct chip_data { 133 + u32 cr0; 134 + u32 cr1; 135 + u32 to; 136 + u32 psp; 137 + u32 timeout; 138 + u8 n_bytes; 139 + u32 dma_width; 140 + u32 dma_burst_size; 141 + u32 threshold; 142 + u32 dma_threshold; 143 + u8 enable_dma; 144 + u8 bits_per_word; 145 + u32 speed_hz; 146 + void (*write)(struct driver_data *drv_data); 147 + void (*read)(struct driver_data *drv_data); 148 + void (*cs_control)(u32 command); 149 + }; 150 + 151 + static void pump_messages(void *data); 152 + 153 + static int flush(struct driver_data *drv_data) 154 + { 155 + unsigned long limit = loops_per_jiffy << 1; 156 + 157 + void *reg = drv_data->ioaddr; 158 + 159 + do { 160 + while (read_SSSR(reg) & SSSR_RNE) { 161 + read_SSDR(reg); 162 + } 163 + } while ((read_SSSR(reg) & SSSR_BSY) && limit--); 164 + write_SSSR(SSSR_ROR, reg); 165 + 166 + return limit; 167 + } 168 + 169 + static void restore_state(struct driver_data *drv_data) 170 + { 171 + void *reg = drv_data->ioaddr; 172 + 173 + /* Clear status and disable clock */ 174 + write_SSSR(drv_data->clear_sr, reg); 175 + write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg); 176 + 177 + /* Load the registers */ 178 + write_SSCR1(drv_data->cur_chip->cr1, reg); 179 + write_SSCR0(drv_data->cur_chip->cr0, reg); 180 + if (drv_data->ssp_type != PXA25x_SSP) { 181 + write_SSTO(0, reg); 182 + write_SSPSP(drv_data->cur_chip->psp, reg); 183 + } 184 + } 185 + 186 + static void null_cs_control(u32 command) 187 + { 188 + } 189 + 190 + static void null_writer(struct driver_data *drv_data) 191 + { 192 + void *reg = drv_data->ioaddr; 193 + u8 n_bytes = drv_data->n_bytes; 194 + 195 + while ((read_SSSR(reg) & SSSR_TNF) 196 + && (drv_data->tx < drv_data->tx_end)) { 197 + write_SSDR(0, reg); 198 + drv_data->tx += n_bytes; 199 + } 200 + } 201 + 202 + static void null_reader(struct driver_data *drv_data) 203 + { 204 + void *reg = drv_data->ioaddr; 205 + u8 n_bytes = drv_data->n_bytes; 206 + 207 + while ((read_SSSR(reg) & SSSR_RNE) 208 + && (drv_data->rx < drv_data->rx_end)) { 209 + read_SSDR(reg); 210 + drv_data->rx += n_bytes; 211 + } 212 + } 213 + 214 + static void u8_writer(struct driver_data *drv_data) 215 + { 216 + void *reg = drv_data->ioaddr; 217 + 218 + while ((read_SSSR(reg) & SSSR_TNF) 219 + && (drv_data->tx < drv_data->tx_end)) { 220 + write_SSDR(*(u8 *)(drv_data->tx), reg); 221 + ++drv_data->tx; 222 + } 223 + } 224 + 225 + static void u8_reader(struct driver_data *drv_data) 226 + { 227 + void *reg = drv_data->ioaddr; 228 + 229 + while ((read_SSSR(reg) & SSSR_RNE) 230 + && (drv_data->rx < drv_data->rx_end)) { 231 + *(u8 *)(drv_data->rx) = read_SSDR(reg); 232 + ++drv_data->rx; 233 + } 234 + } 235 + 236 + static void u16_writer(struct driver_data *drv_data) 237 + { 238 + void *reg = drv_data->ioaddr; 239 + 240 + while ((read_SSSR(reg) & SSSR_TNF) 241 + && (drv_data->tx < drv_data->tx_end)) { 242 + write_SSDR(*(u16 *)(drv_data->tx), reg); 243 + drv_data->tx += 2; 244 + } 245 + } 246 + 247 + static void u16_reader(struct driver_data *drv_data) 248 + { 249 + void *reg = drv_data->ioaddr; 250 + 251 + while ((read_SSSR(reg) & SSSR_RNE) 252 + && (drv_data->rx < drv_data->rx_end)) { 253 + *(u16 *)(drv_data->rx) = read_SSDR(reg); 254 + drv_data->rx += 2; 255 + } 256 + } 257 + static void u32_writer(struct driver_data *drv_data) 258 + { 259 + void *reg = drv_data->ioaddr; 260 + 261 + while ((read_SSSR(reg) & SSSR_TNF) 262 + && (drv_data->tx < drv_data->tx_end)) { 263 + write_SSDR(*(u32 *)(drv_data->tx), reg); 264 + drv_data->tx += 4; 265 + } 266 + } 267 + 268 + static void u32_reader(struct driver_data *drv_data) 269 + { 270 + void *reg = drv_data->ioaddr; 271 + 272 + while ((read_SSSR(reg) & SSSR_RNE) 273 + && (drv_data->rx < drv_data->rx_end)) { 274 + *(u32 *)(drv_data->rx) = read_SSDR(reg); 275 + drv_data->rx += 4; 276 + } 277 + } 278 + 279 + static void *next_transfer(struct driver_data *drv_data) 280 + { 281 + struct spi_message *msg = drv_data->cur_msg; 282 + struct spi_transfer *trans = drv_data->cur_transfer; 283 + 284 + /* Move to next transfer */ 285 + if (trans->transfer_list.next != &msg->transfers) { 286 + drv_data->cur_transfer = 287 + list_entry(trans->transfer_list.next, 288 + struct spi_transfer, 289 + transfer_list); 290 + return RUNNING_STATE; 291 + } else 292 + return DONE_STATE; 293 + } 294 + 295 + static int map_dma_buffers(struct driver_data *drv_data) 296 + { 297 + struct spi_message *msg = drv_data->cur_msg; 298 + struct device *dev = &msg->spi->dev; 299 + 300 + if (!drv_data->cur_chip->enable_dma) 301 + return 0; 302 + 303 + if (msg->is_dma_mapped) 304 + return drv_data->rx_dma && drv_data->tx_dma; 305 + 306 + if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) 307 + return 0; 308 + 309 + /* Modify setup if rx buffer is null */ 310 + if (drv_data->rx == NULL) { 311 + *drv_data->null_dma_buf = 0; 312 + drv_data->rx = drv_data->null_dma_buf; 313 + drv_data->rx_map_len = 4; 314 + } else 315 + drv_data->rx_map_len = drv_data->len; 316 + 317 + 318 + /* Modify setup if tx buffer is null */ 319 + if (drv_data->tx == NULL) { 320 + *drv_data->null_dma_buf = 0; 321 + drv_data->tx = drv_data->null_dma_buf; 322 + drv_data->tx_map_len = 4; 323 + } else 324 + drv_data->tx_map_len = drv_data->len; 325 + 326 + /* Stream map the rx buffer */ 327 + drv_data->rx_dma = dma_map_single(dev, drv_data->rx, 328 + drv_data->rx_map_len, 329 + DMA_FROM_DEVICE); 330 + if (dma_mapping_error(drv_data->rx_dma)) 331 + return 0; 332 + 333 + /* Stream map the tx buffer */ 334 + drv_data->tx_dma = dma_map_single(dev, drv_data->tx, 335 + drv_data->tx_map_len, 336 + DMA_TO_DEVICE); 337 + 338 + if (dma_mapping_error(drv_data->tx_dma)) { 339 + dma_unmap_single(dev, drv_data->rx_dma, 340 + drv_data->rx_map_len, DMA_FROM_DEVICE); 341 + return 0; 342 + } 343 + 344 + return 1; 345 + } 346 + 347 + static void unmap_dma_buffers(struct driver_data *drv_data) 348 + { 349 + struct device *dev; 350 + 351 + if (!drv_data->dma_mapped) 352 + return; 353 + 354 + if (!drv_data->cur_msg->is_dma_mapped) { 355 + dev = &drv_data->cur_msg->spi->dev; 356 + dma_unmap_single(dev, drv_data->rx_dma, 357 + drv_data->rx_map_len, DMA_FROM_DEVICE); 358 + dma_unmap_single(dev, drv_data->tx_dma, 359 + drv_data->tx_map_len, DMA_TO_DEVICE); 360 + } 361 + 362 + drv_data->dma_mapped = 0; 363 + } 364 + 365 + /* caller already set message->status; dma and pio irqs are blocked */ 366 + static void giveback(struct driver_data *drv_data) 367 + { 368 + struct spi_transfer* last_transfer; 369 + unsigned long flags; 370 + struct spi_message *msg; 371 + 372 + spin_lock_irqsave(&drv_data->lock, flags); 373 + msg = drv_data->cur_msg; 374 + drv_data->cur_msg = NULL; 375 + drv_data->cur_transfer = NULL; 376 + drv_data->cur_chip = NULL; 377 + queue_work(drv_data->workqueue, &drv_data->pump_messages); 378 + spin_unlock_irqrestore(&drv_data->lock, flags); 379 + 380 + last_transfer = list_entry(msg->transfers.prev, 381 + struct spi_transfer, 382 + transfer_list); 383 + 384 + if (!last_transfer->cs_change) 385 + drv_data->cs_control(PXA2XX_CS_DEASSERT); 386 + 387 + msg->state = NULL; 388 + if (msg->complete) 389 + msg->complete(msg->context); 390 + } 391 + 392 + static int wait_ssp_rx_stall(void *ioaddr) 393 + { 394 + unsigned long limit = loops_per_jiffy << 1; 395 + 396 + while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--) 397 + cpu_relax(); 398 + 399 + return limit; 400 + } 401 + 402 + static int wait_dma_channel_stop(int channel) 403 + { 404 + unsigned long limit = loops_per_jiffy << 1; 405 + 406 + while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--) 407 + cpu_relax(); 408 + 409 + return limit; 410 + } 411 + 412 + static void dma_handler(int channel, void *data, struct pt_regs *regs) 413 + { 414 + struct driver_data *drv_data = data; 415 + struct spi_message *msg = drv_data->cur_msg; 416 + void *reg = drv_data->ioaddr; 417 + u32 irq_status = DCSR(channel) & DMA_INT_MASK; 418 + u32 trailing_sssr = 0; 419 + 420 + if (irq_status & DCSR_BUSERR) { 421 + 422 + /* Disable interrupts, clear status and reset DMA */ 423 + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 424 + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 425 + if (drv_data->ssp_type != PXA25x_SSP) 426 + write_SSTO(0, reg); 427 + write_SSSR(drv_data->clear_sr, reg); 428 + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 429 + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 430 + 431 + if (flush(drv_data) == 0) 432 + dev_err(&drv_data->pdev->dev, 433 + "dma_handler: flush fail\n"); 434 + 435 + unmap_dma_buffers(drv_data); 436 + 437 + if (channel == drv_data->tx_channel) 438 + dev_err(&drv_data->pdev->dev, 439 + "dma_handler: bad bus address on " 440 + "tx channel %d, source %x target = %x\n", 441 + channel, DSADR(channel), DTADR(channel)); 442 + else 443 + dev_err(&drv_data->pdev->dev, 444 + "dma_handler: bad bus address on " 445 + "rx channel %d, source %x target = %x\n", 446 + channel, DSADR(channel), DTADR(channel)); 447 + 448 + msg->state = ERROR_STATE; 449 + tasklet_schedule(&drv_data->pump_transfers); 450 + } 451 + 452 + /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ 453 + if ((drv_data->ssp_type == PXA25x_SSP) 454 + && (channel == drv_data->tx_channel) 455 + && (irq_status & DCSR_ENDINTR)) { 456 + 457 + /* Wait for rx to stall */ 458 + if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 459 + dev_err(&drv_data->pdev->dev, 460 + "dma_handler: ssp rx stall failed\n"); 461 + 462 + /* Clear and disable interrupts on SSP and DMA channels*/ 463 + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 464 + write_SSSR(drv_data->clear_sr, reg); 465 + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 466 + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 467 + if (wait_dma_channel_stop(drv_data->rx_channel) == 0) 468 + dev_err(&drv_data->pdev->dev, 469 + "dma_handler: dma rx channel stop failed\n"); 470 + 471 + unmap_dma_buffers(drv_data); 472 + 473 + /* Read trailing bytes */ 474 + /* Calculate number of trailing bytes, read them */ 475 + trailing_sssr = read_SSSR(reg); 476 + if ((trailing_sssr & 0xf008) != 0xf000) { 477 + drv_data->rx = drv_data->rx_end - 478 + (((trailing_sssr >> 12) & 0x0f) + 1); 479 + drv_data->read(drv_data); 480 + } 481 + msg->actual_length += drv_data->len; 482 + 483 + /* Release chip select if requested, transfer delays are 484 + * handled in pump_transfers */ 485 + if (drv_data->cs_change) 486 + drv_data->cs_control(PXA2XX_CS_DEASSERT); 487 + 488 + /* Move to next transfer */ 489 + msg->state = next_transfer(drv_data); 490 + 491 + /* Schedule transfer tasklet */ 492 + tasklet_schedule(&drv_data->pump_transfers); 493 + } 494 + } 495 + 496 + static irqreturn_t dma_transfer(struct driver_data *drv_data) 497 + { 498 + u32 irq_status; 499 + u32 trailing_sssr = 0; 500 + struct spi_message *msg = drv_data->cur_msg; 501 + void *reg = drv_data->ioaddr; 502 + 503 + irq_status = read_SSSR(reg) & drv_data->mask_sr; 504 + if (irq_status & SSSR_ROR) { 505 + /* Clear and disable interrupts on SSP and DMA channels*/ 506 + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 507 + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 508 + if (drv_data->ssp_type != PXA25x_SSP) 509 + write_SSTO(0, reg); 510 + write_SSSR(drv_data->clear_sr, reg); 511 + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 512 + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 513 + unmap_dma_buffers(drv_data); 514 + 515 + if (flush(drv_data) == 0) 516 + dev_err(&drv_data->pdev->dev, 517 + "dma_transfer: flush fail\n"); 518 + 519 + dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n"); 520 + 521 + drv_data->cur_msg->state = ERROR_STATE; 522 + tasklet_schedule(&drv_data->pump_transfers); 523 + 524 + return IRQ_HANDLED; 525 + } 526 + 527 + /* Check for false positive timeout */ 528 + if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) { 529 + write_SSSR(SSSR_TINT, reg); 530 + return IRQ_HANDLED; 531 + } 532 + 533 + if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { 534 + 535 + /* Clear and disable interrupts on SSP and DMA channels*/ 536 + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 537 + if (drv_data->ssp_type != PXA25x_SSP) 538 + write_SSTO(0, reg); 539 + write_SSSR(drv_data->clear_sr, reg); 540 + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 541 + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 542 + 543 + if (wait_dma_channel_stop(drv_data->rx_channel) == 0) 544 + dev_err(&drv_data->pdev->dev, 545 + "dma_transfer: dma rx channel stop failed\n"); 546 + 547 + if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 548 + dev_err(&drv_data->pdev->dev, 549 + "dma_transfer: ssp rx stall failed\n"); 550 + 551 + unmap_dma_buffers(drv_data); 552 + 553 + /* Calculate number of trailing bytes, read them */ 554 + trailing_sssr = read_SSSR(reg); 555 + if ((trailing_sssr & 0xf008) != 0xf000) { 556 + drv_data->rx = drv_data->rx_end - 557 + (((trailing_sssr >> 12) & 0x0f) + 1); 558 + drv_data->read(drv_data); 559 + } 560 + msg->actual_length += drv_data->len; 561 + 562 + /* Release chip select if requested, transfer delays are 563 + * handled in pump_transfers */ 564 + if (drv_data->cs_change) 565 + drv_data->cs_control(PXA2XX_CS_DEASSERT); 566 + 567 + /* Move to next transfer */ 568 + msg->state = next_transfer(drv_data); 569 + 570 + /* Schedule transfer tasklet */ 571 + tasklet_schedule(&drv_data->pump_transfers); 572 + 573 + return IRQ_HANDLED; 574 + } 575 + 576 + /* Opps problem detected */ 577 + return IRQ_NONE; 578 + } 579 + 580 + static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 581 + { 582 + struct spi_message *msg = drv_data->cur_msg; 583 + void *reg = drv_data->ioaddr; 584 + unsigned long limit = loops_per_jiffy << 1; 585 + u32 irq_status; 586 + u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? 587 + drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; 588 + 589 + while ((irq_status = read_SSSR(reg) & irq_mask)) { 590 + 591 + if (irq_status & SSSR_ROR) { 592 + 593 + /* Clear and disable interrupts */ 594 + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 595 + write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 596 + if (drv_data->ssp_type != PXA25x_SSP) 597 + write_SSTO(0, reg); 598 + write_SSSR(drv_data->clear_sr, reg); 599 + 600 + if (flush(drv_data) == 0) 601 + dev_err(&drv_data->pdev->dev, 602 + "interrupt_transfer: flush fail\n"); 603 + 604 + /* Stop the SSP */ 605 + 606 + dev_warn(&drv_data->pdev->dev, 607 + "interrupt_transfer: fifo overun\n"); 608 + 609 + msg->state = ERROR_STATE; 610 + tasklet_schedule(&drv_data->pump_transfers); 611 + 612 + return IRQ_HANDLED; 613 + } 614 + 615 + /* Look for false positive timeout */ 616 + if ((irq_status & SSSR_TINT) 617 + && (drv_data->rx < drv_data->rx_end)) 618 + write_SSSR(SSSR_TINT, reg); 619 + 620 + /* Pump data */ 621 + drv_data->read(drv_data); 622 + drv_data->write(drv_data); 623 + 624 + if (drv_data->tx == drv_data->tx_end) { 625 + /* Disable tx interrupt */ 626 + write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); 627 + irq_mask = drv_data->mask_sr & ~SSSR_TFS; 628 + 629 + /* PXA25x_SSP has no timeout, read trailing bytes */ 630 + if (drv_data->ssp_type == PXA25x_SSP) { 631 + while ((read_SSSR(reg) & SSSR_BSY) && limit--) 632 + drv_data->read(drv_data); 633 + 634 + if (limit == 0) 635 + dev_err(&drv_data->pdev->dev, 636 + "interrupt_transfer: " 637 + "trailing byte read failed\n"); 638 + } 639 + } 640 + 641 + if ((irq_status & SSSR_TINT) 642 + || (drv_data->rx == drv_data->rx_end)) { 643 + 644 + /* Clear timeout */ 645 + write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 646 + if (drv_data->ssp_type != PXA25x_SSP) 647 + write_SSTO(0, reg); 648 + write_SSSR(drv_data->clear_sr, reg); 649 + 650 + /* Update total byte transfered */ 651 + msg->actual_length += drv_data->len; 652 + 653 + /* Release chip select if requested, transfer delays are 654 + * handled in pump_transfers */ 655 + if (drv_data->cs_change) 656 + drv_data->cs_control(PXA2XX_CS_DEASSERT); 657 + 658 + /* Move to next transfer */ 659 + msg->state = next_transfer(drv_data); 660 + 661 + /* Schedule transfer tasklet */ 662 + tasklet_schedule(&drv_data->pump_transfers); 663 + } 664 + } 665 + 666 + /* We did something */ 667 + return IRQ_HANDLED; 668 + } 669 + 670 + static irqreturn_t ssp_int(int irq, void *dev_id, struct pt_regs *regs) 671 + { 672 + struct driver_data *drv_data = (struct driver_data *)dev_id; 673 + void *reg = drv_data->ioaddr; 674 + 675 + if (!drv_data->cur_msg) { 676 + 677 + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 678 + write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 679 + if (drv_data->ssp_type != PXA25x_SSP) 680 + write_SSTO(0, reg); 681 + write_SSSR(drv_data->clear_sr, reg); 682 + 683 + dev_err(&drv_data->pdev->dev, "bad message state " 684 + "in interrupt handler"); 685 + 686 + /* Never fail */ 687 + return IRQ_HANDLED; 688 + } 689 + 690 + return drv_data->transfer_handler(drv_data); 691 + } 692 + 693 + static void pump_transfers(unsigned long data) 694 + { 695 + struct driver_data *drv_data = (struct driver_data *)data; 696 + struct spi_message *message = NULL; 697 + struct spi_transfer *transfer = NULL; 698 + struct spi_transfer *previous = NULL; 699 + struct chip_data *chip = NULL; 700 + void *reg = drv_data->ioaddr; 701 + u32 clk_div = 0; 702 + u8 bits = 0; 703 + u32 speed = 0; 704 + u32 cr0; 705 + 706 + /* Get current state information */ 707 + message = drv_data->cur_msg; 708 + transfer = drv_data->cur_transfer; 709 + chip = drv_data->cur_chip; 710 + 711 + /* Handle for abort */ 712 + if (message->state == ERROR_STATE) { 713 + message->status = -EIO; 714 + giveback(drv_data); 715 + return; 716 + } 717 + 718 + /* Handle end of message */ 719 + if (message->state == DONE_STATE) { 720 + message->status = 0; 721 + giveback(drv_data); 722 + return; 723 + } 724 + 725 + /* Delay if requested at end of transfer*/ 726 + if (message->state == RUNNING_STATE) { 727 + previous = list_entry(transfer->transfer_list.prev, 728 + struct spi_transfer, 729 + transfer_list); 730 + if (previous->delay_usecs) 731 + udelay(previous->delay_usecs); 732 + } 733 + 734 + /* Setup the transfer state based on the type of transfer */ 735 + if (flush(drv_data) == 0) { 736 + dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 737 + message->status = -EIO; 738 + giveback(drv_data); 739 + return; 740 + } 741 + drv_data->n_bytes = chip->n_bytes; 742 + drv_data->dma_width = chip->dma_width; 743 + drv_data->cs_control = chip->cs_control; 744 + drv_data->tx = (void *)transfer->tx_buf; 745 + drv_data->tx_end = drv_data->tx + transfer->len; 746 + drv_data->rx = transfer->rx_buf; 747 + drv_data->rx_end = drv_data->rx + transfer->len; 748 + drv_data->rx_dma = transfer->rx_dma; 749 + drv_data->tx_dma = transfer->tx_dma; 750 + drv_data->len = transfer->len; 751 + drv_data->write = drv_data->tx ? chip->write : null_writer; 752 + drv_data->read = drv_data->rx ? chip->read : null_reader; 753 + drv_data->cs_change = transfer->cs_change; 754 + 755 + /* Change speed and bit per word on a per transfer */ 756 + if (transfer->speed_hz || transfer->bits_per_word) { 757 + 758 + /* Disable clock */ 759 + write_SSCR0(chip->cr0 & ~SSCR0_SSE, reg); 760 + cr0 = chip->cr0; 761 + bits = chip->bits_per_word; 762 + speed = chip->speed_hz; 763 + 764 + if (transfer->speed_hz) 765 + speed = transfer->speed_hz; 766 + 767 + if (transfer->bits_per_word) 768 + bits = transfer->bits_per_word; 769 + 770 + if (reg == SSP1_VIRT) 771 + clk_div = SSP1_SerClkDiv(speed); 772 + else if (reg == SSP2_VIRT) 773 + clk_div = SSP2_SerClkDiv(speed); 774 + else if (reg == SSP3_VIRT) 775 + clk_div = SSP3_SerClkDiv(speed); 776 + 777 + if (bits <= 8) { 778 + drv_data->n_bytes = 1; 779 + drv_data->dma_width = DCMD_WIDTH1; 780 + drv_data->read = drv_data->read != null_reader ? 781 + u8_reader : null_reader; 782 + drv_data->write = drv_data->write != null_writer ? 783 + u8_writer : null_writer; 784 + } else if (bits <= 16) { 785 + drv_data->n_bytes = 2; 786 + drv_data->dma_width = DCMD_WIDTH2; 787 + drv_data->read = drv_data->read != null_reader ? 788 + u16_reader : null_reader; 789 + drv_data->write = drv_data->write != null_writer ? 790 + u16_writer : null_writer; 791 + } else if (bits <= 32) { 792 + drv_data->n_bytes = 4; 793 + drv_data->dma_width = DCMD_WIDTH4; 794 + drv_data->read = drv_data->read != null_reader ? 795 + u32_reader : null_reader; 796 + drv_data->write = drv_data->write != null_writer ? 797 + u32_writer : null_writer; 798 + } 799 + 800 + cr0 = clk_div 801 + | SSCR0_Motorola 802 + | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) 803 + | SSCR0_SSE 804 + | (bits > 16 ? SSCR0_EDSS : 0); 805 + 806 + /* Start it back up */ 807 + write_SSCR0(cr0, reg); 808 + } 809 + 810 + message->state = RUNNING_STATE; 811 + 812 + /* Try to map dma buffer and do a dma transfer if successful */ 813 + if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) { 814 + 815 + /* Ensure we have the correct interrupt handler */ 816 + drv_data->transfer_handler = dma_transfer; 817 + 818 + /* Setup rx DMA Channel */ 819 + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 820 + DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; 821 + DTADR(drv_data->rx_channel) = drv_data->rx_dma; 822 + if (drv_data->rx == drv_data->null_dma_buf) 823 + /* No target address increment */ 824 + DCMD(drv_data->rx_channel) = DCMD_FLOWSRC 825 + | drv_data->dma_width 826 + | chip->dma_burst_size 827 + | drv_data->len; 828 + else 829 + DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR 830 + | DCMD_FLOWSRC 831 + | drv_data->dma_width 832 + | chip->dma_burst_size 833 + | drv_data->len; 834 + 835 + /* Setup tx DMA Channel */ 836 + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 837 + DSADR(drv_data->tx_channel) = drv_data->tx_dma; 838 + DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; 839 + if (drv_data->tx == drv_data->null_dma_buf) 840 + /* No source address increment */ 841 + DCMD(drv_data->tx_channel) = DCMD_FLOWTRG 842 + | drv_data->dma_width 843 + | chip->dma_burst_size 844 + | drv_data->len; 845 + else 846 + DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR 847 + | DCMD_FLOWTRG 848 + | drv_data->dma_width 849 + | chip->dma_burst_size 850 + | drv_data->len; 851 + 852 + /* Enable dma end irqs on SSP to detect end of transfer */ 853 + if (drv_data->ssp_type == PXA25x_SSP) 854 + DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; 855 + 856 + /* Fix me, need to handle cs polarity */ 857 + drv_data->cs_control(PXA2XX_CS_ASSERT); 858 + 859 + /* Go baby, go */ 860 + write_SSSR(drv_data->clear_sr, reg); 861 + DCSR(drv_data->rx_channel) |= DCSR_RUN; 862 + DCSR(drv_data->tx_channel) |= DCSR_RUN; 863 + if (drv_data->ssp_type != PXA25x_SSP) 864 + write_SSTO(chip->timeout, reg); 865 + write_SSCR1(chip->cr1 866 + | chip->dma_threshold 867 + | drv_data->dma_cr1, 868 + reg); 869 + } else { 870 + /* Ensure we have the correct interrupt handler */ 871 + drv_data->transfer_handler = interrupt_transfer; 872 + 873 + /* Fix me, need to handle cs polarity */ 874 + drv_data->cs_control(PXA2XX_CS_ASSERT); 875 + 876 + /* Go baby, go */ 877 + write_SSSR(drv_data->clear_sr, reg); 878 + if (drv_data->ssp_type != PXA25x_SSP) 879 + write_SSTO(chip->timeout, reg); 880 + write_SSCR1(chip->cr1 881 + | chip->threshold 882 + | drv_data->int_cr1, 883 + reg); 884 + } 885 + } 886 + 887 + static void pump_messages(void *data) 888 + { 889 + struct driver_data *drv_data = data; 890 + unsigned long flags; 891 + 892 + /* Lock queue and check for queue work */ 893 + spin_lock_irqsave(&drv_data->lock, flags); 894 + if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { 895 + drv_data->busy = 0; 896 + spin_unlock_irqrestore(&drv_data->lock, flags); 897 + return; 898 + } 899 + 900 + /* Make sure we are not already running a message */ 901 + if (drv_data->cur_msg) { 902 + spin_unlock_irqrestore(&drv_data->lock, flags); 903 + return; 904 + } 905 + 906 + /* Extract head of queue */ 907 + drv_data->cur_msg = list_entry(drv_data->queue.next, 908 + struct spi_message, queue); 909 + list_del_init(&drv_data->cur_msg->queue); 910 + 911 + /* Initial message state*/ 912 + drv_data->cur_msg->state = START_STATE; 913 + drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 914 + struct spi_transfer, 915 + transfer_list); 916 + 917 + /* Setup the SSP using the per chip configuration */ 918 + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 919 + restore_state(drv_data); 920 + 921 + /* Mark as busy and launch transfers */ 922 + tasklet_schedule(&drv_data->pump_transfers); 923 + 924 + drv_data->busy = 1; 925 + spin_unlock_irqrestore(&drv_data->lock, flags); 926 + } 927 + 928 + static int transfer(struct spi_device *spi, struct spi_message *msg) 929 + { 930 + struct driver_data *drv_data = spi_master_get_devdata(spi->master); 931 + unsigned long flags; 932 + 933 + spin_lock_irqsave(&drv_data->lock, flags); 934 + 935 + if (drv_data->run == QUEUE_STOPPED) { 936 + spin_unlock_irqrestore(&drv_data->lock, flags); 937 + return -ESHUTDOWN; 938 + } 939 + 940 + msg->actual_length = 0; 941 + msg->status = -EINPROGRESS; 942 + msg->state = START_STATE; 943 + 944 + list_add_tail(&msg->queue, &drv_data->queue); 945 + 946 + if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) 947 + queue_work(drv_data->workqueue, &drv_data->pump_messages); 948 + 949 + spin_unlock_irqrestore(&drv_data->lock, flags); 950 + 951 + return 0; 952 + } 953 + 954 + static int setup(struct spi_device *spi) 955 + { 956 + struct pxa2xx_spi_chip *chip_info = NULL; 957 + struct chip_data *chip; 958 + struct driver_data *drv_data = spi_master_get_devdata(spi->master); 959 + unsigned int clk_div; 960 + 961 + if (!spi->bits_per_word) 962 + spi->bits_per_word = 8; 963 + 964 + if (drv_data->ssp_type != PXA25x_SSP 965 + && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) 966 + return -EINVAL; 967 + else if (spi->bits_per_word < 4 || spi->bits_per_word > 16) 968 + return -EINVAL; 969 + 970 + /* Only alloc (or use chip_info) on first setup */ 971 + chip = spi_get_ctldata(spi); 972 + if (chip == NULL) { 973 + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 974 + if (!chip) 975 + return -ENOMEM; 976 + 977 + chip->cs_control = null_cs_control; 978 + chip->enable_dma = 0; 979 + chip->timeout = SSP_TIMEOUT(1000); 980 + chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1); 981 + chip->dma_burst_size = drv_data->master_info->enable_dma ? 982 + DCMD_BURST8 : 0; 983 + 984 + chip_info = spi->controller_data; 985 + } 986 + 987 + /* chip_info isn't always needed */ 988 + if (chip_info) { 989 + if (chip_info->cs_control) 990 + chip->cs_control = chip_info->cs_control; 991 + 992 + chip->timeout = SSP_TIMEOUT(chip_info->timeout_microsecs); 993 + 994 + chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold) 995 + | SSCR1_TxTresh(chip_info->tx_threshold); 996 + 997 + chip->enable_dma = chip_info->dma_burst_size != 0 998 + && drv_data->master_info->enable_dma; 999 + chip->dma_threshold = 0; 1000 + 1001 + if (chip->enable_dma) { 1002 + if (chip_info->dma_burst_size <= 8) { 1003 + chip->dma_threshold = SSCR1_RxTresh(8) 1004 + | SSCR1_TxTresh(8); 1005 + chip->dma_burst_size = DCMD_BURST8; 1006 + } else if (chip_info->dma_burst_size <= 16) { 1007 + chip->dma_threshold = SSCR1_RxTresh(16) 1008 + | SSCR1_TxTresh(16); 1009 + chip->dma_burst_size = DCMD_BURST16; 1010 + } else { 1011 + chip->dma_threshold = SSCR1_RxTresh(32) 1012 + | SSCR1_TxTresh(32); 1013 + chip->dma_burst_size = DCMD_BURST32; 1014 + } 1015 + } 1016 + 1017 + 1018 + if (chip_info->enable_loopback) 1019 + chip->cr1 = SSCR1_LBM; 1020 + } 1021 + 1022 + if (drv_data->ioaddr == SSP1_VIRT) 1023 + clk_div = SSP1_SerClkDiv(spi->max_speed_hz); 1024 + else if (drv_data->ioaddr == SSP2_VIRT) 1025 + clk_div = SSP2_SerClkDiv(spi->max_speed_hz); 1026 + else if (drv_data->ioaddr == SSP3_VIRT) 1027 + clk_div = SSP3_SerClkDiv(spi->max_speed_hz); 1028 + else 1029 + return -ENODEV; 1030 + chip->speed_hz = spi->max_speed_hz; 1031 + 1032 + chip->cr0 = clk_div 1033 + | SSCR0_Motorola 1034 + | SSCR0_DataSize(spi->bits_per_word > 16 ? 1035 + spi->bits_per_word - 16 : spi->bits_per_word) 1036 + | SSCR0_SSE 1037 + | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); 1038 + chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) << 4) 1039 + | (((spi->mode & SPI_CPOL) != 0) << 3); 1040 + 1041 + /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 1042 + if (drv_data->ssp_type != PXA25x_SSP) 1043 + dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n", 1044 + spi->bits_per_word, 1045 + (CLOCK_SPEED_HZ) 1046 + / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), 1047 + spi->mode & 0x3); 1048 + else 1049 + dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n", 1050 + spi->bits_per_word, 1051 + (CLOCK_SPEED_HZ/2) 1052 + / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), 1053 + spi->mode & 0x3); 1054 + 1055 + if (spi->bits_per_word <= 8) { 1056 + chip->n_bytes = 1; 1057 + chip->dma_width = DCMD_WIDTH1; 1058 + chip->read = u8_reader; 1059 + chip->write = u8_writer; 1060 + } else if (spi->bits_per_word <= 16) { 1061 + chip->n_bytes = 2; 1062 + chip->dma_width = DCMD_WIDTH2; 1063 + chip->read = u16_reader; 1064 + chip->write = u16_writer; 1065 + } else if (spi->bits_per_word <= 32) { 1066 + chip->cr0 |= SSCR0_EDSS; 1067 + chip->n_bytes = 4; 1068 + chip->dma_width = DCMD_WIDTH4; 1069 + chip->read = u32_reader; 1070 + chip->write = u32_writer; 1071 + } else { 1072 + dev_err(&spi->dev, "invalid wordsize\n"); 1073 + kfree(chip); 1074 + return -ENODEV; 1075 + } 1076 + chip->bits_per_word = spi->bits_per_word; 1077 + 1078 + spi_set_ctldata(spi, chip); 1079 + 1080 + return 0; 1081 + } 1082 + 1083 + static void cleanup(const struct spi_device *spi) 1084 + { 1085 + struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); 1086 + 1087 + kfree(chip); 1088 + } 1089 + 1090 + static int init_queue(struct driver_data *drv_data) 1091 + { 1092 + INIT_LIST_HEAD(&drv_data->queue); 1093 + spin_lock_init(&drv_data->lock); 1094 + 1095 + drv_data->run = QUEUE_STOPPED; 1096 + drv_data->busy = 0; 1097 + 1098 + tasklet_init(&drv_data->pump_transfers, 1099 + pump_transfers, (unsigned long)drv_data); 1100 + 1101 + INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data); 1102 + drv_data->workqueue = create_singlethread_workqueue( 1103 + drv_data->master->cdev.dev->bus_id); 1104 + if (drv_data->workqueue == NULL) 1105 + return -EBUSY; 1106 + 1107 + return 0; 1108 + } 1109 + 1110 + static int start_queue(struct driver_data *drv_data) 1111 + { 1112 + unsigned long flags; 1113 + 1114 + spin_lock_irqsave(&drv_data->lock, flags); 1115 + 1116 + if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { 1117 + spin_unlock_irqrestore(&drv_data->lock, flags); 1118 + return -EBUSY; 1119 + } 1120 + 1121 + drv_data->run = QUEUE_RUNNING; 1122 + drv_data->cur_msg = NULL; 1123 + drv_data->cur_transfer = NULL; 1124 + drv_data->cur_chip = NULL; 1125 + spin_unlock_irqrestore(&drv_data->lock, flags); 1126 + 1127 + queue_work(drv_data->workqueue, &drv_data->pump_messages); 1128 + 1129 + return 0; 1130 + } 1131 + 1132 + static int stop_queue(struct driver_data *drv_data) 1133 + { 1134 + unsigned long flags; 1135 + unsigned limit = 500; 1136 + int status = 0; 1137 + 1138 + spin_lock_irqsave(&drv_data->lock, flags); 1139 + 1140 + /* This is a bit lame, but is optimized for the common execution path. 1141 + * A wait_queue on the drv_data->busy could be used, but then the common 1142 + * execution path (pump_messages) would be required to call wake_up or 1143 + * friends on every SPI message. Do this instead */ 1144 + drv_data->run = QUEUE_STOPPED; 1145 + while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { 1146 + spin_unlock_irqrestore(&drv_data->lock, flags); 1147 + msleep(10); 1148 + spin_lock_irqsave(&drv_data->lock, flags); 1149 + } 1150 + 1151 + if (!list_empty(&drv_data->queue) || drv_data->busy) 1152 + status = -EBUSY; 1153 + 1154 + spin_unlock_irqrestore(&drv_data->lock, flags); 1155 + 1156 + return status; 1157 + } 1158 + 1159 + static int destroy_queue(struct driver_data *drv_data) 1160 + { 1161 + int status; 1162 + 1163 + status = stop_queue(drv_data); 1164 + if (status != 0) 1165 + return status; 1166 + 1167 + destroy_workqueue(drv_data->workqueue); 1168 + 1169 + return 0; 1170 + } 1171 + 1172 + static int pxa2xx_spi_probe(struct platform_device *pdev) 1173 + { 1174 + struct device *dev = &pdev->dev; 1175 + struct pxa2xx_spi_master *platform_info; 1176 + struct spi_master *master; 1177 + struct driver_data *drv_data = 0; 1178 + struct resource *memory_resource; 1179 + int irq; 1180 + int status = 0; 1181 + 1182 + platform_info = dev->platform_data; 1183 + 1184 + if (platform_info->ssp_type == SSP_UNDEFINED) { 1185 + dev_err(&pdev->dev, "undefined SSP\n"); 1186 + return -ENODEV; 1187 + } 1188 + 1189 + /* Allocate master with space for drv_data and null dma buffer */ 1190 + master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); 1191 + if (!master) { 1192 + dev_err(&pdev->dev, "can not alloc spi_master\n"); 1193 + return -ENOMEM; 1194 + } 1195 + drv_data = spi_master_get_devdata(master); 1196 + drv_data->master = master; 1197 + drv_data->master_info = platform_info; 1198 + drv_data->pdev = pdev; 1199 + 1200 + master->bus_num = pdev->id; 1201 + master->num_chipselect = platform_info->num_chipselect; 1202 + master->cleanup = cleanup; 1203 + master->setup = setup; 1204 + master->transfer = transfer; 1205 + 1206 + drv_data->ssp_type = platform_info->ssp_type; 1207 + drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data + 1208 + sizeof(struct driver_data)), 8); 1209 + 1210 + /* Setup register addresses */ 1211 + memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1212 + if (!memory_resource) { 1213 + dev_err(&pdev->dev, "memory resources not defined\n"); 1214 + status = -ENODEV; 1215 + goto out_error_master_alloc; 1216 + } 1217 + 1218 + drv_data->ioaddr = (void *)io_p2v((unsigned long)(memory_resource->start)); 1219 + drv_data->ssdr_physical = memory_resource->start + 0x00000010; 1220 + if (platform_info->ssp_type == PXA25x_SSP) { 1221 + drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; 1222 + drv_data->dma_cr1 = 0; 1223 + drv_data->clear_sr = SSSR_ROR; 1224 + drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; 1225 + } else { 1226 + drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; 1227 + drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; 1228 + drv_data->clear_sr = SSSR_ROR | SSSR_TINT; 1229 + drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; 1230 + } 1231 + 1232 + /* Attach to IRQ */ 1233 + irq = platform_get_irq(pdev, 0); 1234 + if (irq < 0) { 1235 + dev_err(&pdev->dev, "irq resource not defined\n"); 1236 + status = -ENODEV; 1237 + goto out_error_master_alloc; 1238 + } 1239 + 1240 + status = request_irq(irq, ssp_int, 0, dev->bus_id, drv_data); 1241 + if (status < 0) { 1242 + dev_err(&pdev->dev, "can not get IRQ\n"); 1243 + goto out_error_master_alloc; 1244 + } 1245 + 1246 + /* Setup DMA if requested */ 1247 + drv_data->tx_channel = -1; 1248 + drv_data->rx_channel = -1; 1249 + if (platform_info->enable_dma) { 1250 + 1251 + /* Get two DMA channels (rx and tx) */ 1252 + drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", 1253 + DMA_PRIO_HIGH, 1254 + dma_handler, 1255 + drv_data); 1256 + if (drv_data->rx_channel < 0) { 1257 + dev_err(dev, "problem (%d) requesting rx channel\n", 1258 + drv_data->rx_channel); 1259 + status = -ENODEV; 1260 + goto out_error_irq_alloc; 1261 + } 1262 + drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", 1263 + DMA_PRIO_MEDIUM, 1264 + dma_handler, 1265 + drv_data); 1266 + if (drv_data->tx_channel < 0) { 1267 + dev_err(dev, "problem (%d) requesting tx channel\n", 1268 + drv_data->tx_channel); 1269 + status = -ENODEV; 1270 + goto out_error_dma_alloc; 1271 + } 1272 + 1273 + if (drv_data->ioaddr == SSP1_VIRT) { 1274 + DRCMRRXSSDR = DRCMR_MAPVLD 1275 + | drv_data->rx_channel; 1276 + DRCMRTXSSDR = DRCMR_MAPVLD 1277 + | drv_data->tx_channel; 1278 + } else if (drv_data->ioaddr == SSP2_VIRT) { 1279 + DRCMRRXSS2DR = DRCMR_MAPVLD 1280 + | drv_data->rx_channel; 1281 + DRCMRTXSS2DR = DRCMR_MAPVLD 1282 + | drv_data->tx_channel; 1283 + } else if (drv_data->ioaddr == SSP3_VIRT) { 1284 + DRCMRRXSS3DR = DRCMR_MAPVLD 1285 + | drv_data->rx_channel; 1286 + DRCMRTXSS3DR = DRCMR_MAPVLD 1287 + | drv_data->tx_channel; 1288 + } else { 1289 + dev_err(dev, "bad SSP type\n"); 1290 + goto out_error_dma_alloc; 1291 + } 1292 + } 1293 + 1294 + /* Enable SOC clock */ 1295 + pxa_set_cken(platform_info->clock_enable, 1); 1296 + 1297 + /* Load default SSP configuration */ 1298 + write_SSCR0(0, drv_data->ioaddr); 1299 + write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr); 1300 + write_SSCR0(SSCR0_SerClkDiv(2) 1301 + | SSCR0_Motorola 1302 + | SSCR0_DataSize(8), 1303 + drv_data->ioaddr); 1304 + if (drv_data->ssp_type != PXA25x_SSP) 1305 + write_SSTO(0, drv_data->ioaddr); 1306 + write_SSPSP(0, drv_data->ioaddr); 1307 + 1308 + /* Initial and start queue */ 1309 + status = init_queue(drv_data); 1310 + if (status != 0) { 1311 + dev_err(&pdev->dev, "problem initializing queue\n"); 1312 + goto out_error_clock_enabled; 1313 + } 1314 + status = start_queue(drv_data); 1315 + if (status != 0) { 1316 + dev_err(&pdev->dev, "problem starting queue\n"); 1317 + goto out_error_clock_enabled; 1318 + } 1319 + 1320 + /* Register with the SPI framework */ 1321 + platform_set_drvdata(pdev, drv_data); 1322 + status = spi_register_master(master); 1323 + if (status != 0) { 1324 + dev_err(&pdev->dev, "problem registering spi master\n"); 1325 + goto out_error_queue_alloc; 1326 + } 1327 + 1328 + return status; 1329 + 1330 + out_error_queue_alloc: 1331 + destroy_queue(drv_data); 1332 + 1333 + out_error_clock_enabled: 1334 + pxa_set_cken(platform_info->clock_enable, 0); 1335 + 1336 + out_error_dma_alloc: 1337 + if (drv_data->tx_channel != -1) 1338 + pxa_free_dma(drv_data->tx_channel); 1339 + if (drv_data->rx_channel != -1) 1340 + pxa_free_dma(drv_data->rx_channel); 1341 + 1342 + out_error_irq_alloc: 1343 + free_irq(irq, drv_data); 1344 + 1345 + out_error_master_alloc: 1346 + spi_master_put(master); 1347 + return status; 1348 + } 1349 + 1350 + static int pxa2xx_spi_remove(struct platform_device *pdev) 1351 + { 1352 + struct driver_data *drv_data = platform_get_drvdata(pdev); 1353 + int irq; 1354 + int status = 0; 1355 + 1356 + if (!drv_data) 1357 + return 0; 1358 + 1359 + /* Remove the queue */ 1360 + status = destroy_queue(drv_data); 1361 + if (status != 0) 1362 + return status; 1363 + 1364 + /* Disable the SSP at the peripheral and SOC level */ 1365 + write_SSCR0(0, drv_data->ioaddr); 1366 + pxa_set_cken(drv_data->master_info->clock_enable, 0); 1367 + 1368 + /* Release DMA */ 1369 + if (drv_data->master_info->enable_dma) { 1370 + if (drv_data->ioaddr == SSP1_VIRT) { 1371 + DRCMRRXSSDR = 0; 1372 + DRCMRTXSSDR = 0; 1373 + } else if (drv_data->ioaddr == SSP2_VIRT) { 1374 + DRCMRRXSS2DR = 0; 1375 + DRCMRTXSS2DR = 0; 1376 + } else if (drv_data->ioaddr == SSP3_VIRT) { 1377 + DRCMRRXSS3DR = 0; 1378 + DRCMRTXSS3DR = 0; 1379 + } 1380 + pxa_free_dma(drv_data->tx_channel); 1381 + pxa_free_dma(drv_data->rx_channel); 1382 + } 1383 + 1384 + /* Release IRQ */ 1385 + irq = platform_get_irq(pdev, 0); 1386 + if (irq >= 0) 1387 + free_irq(irq, drv_data); 1388 + 1389 + /* Disconnect from the SPI framework */ 1390 + spi_unregister_master(drv_data->master); 1391 + 1392 + /* Prevent double remove */ 1393 + platform_set_drvdata(pdev, NULL); 1394 + 1395 + return 0; 1396 + } 1397 + 1398 + static void pxa2xx_spi_shutdown(struct platform_device *pdev) 1399 + { 1400 + int status = 0; 1401 + 1402 + if ((status = pxa2xx_spi_remove(pdev)) != 0) 1403 + dev_err(&pdev->dev, "shutdown failed with %d\n", status); 1404 + } 1405 + 1406 + #ifdef CONFIG_PM 1407 + static int suspend_devices(struct device *dev, void *pm_message) 1408 + { 1409 + pm_message_t *state = pm_message; 1410 + 1411 + if (dev->power.power_state.event != state->event) { 1412 + dev_warn(dev, "pm state does not match request\n"); 1413 + return -1; 1414 + } 1415 + 1416 + return 0; 1417 + } 1418 + 1419 + static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state) 1420 + { 1421 + struct driver_data *drv_data = platform_get_drvdata(pdev); 1422 + int status = 0; 1423 + 1424 + /* Check all childern for current power state */ 1425 + if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) { 1426 + dev_warn(&pdev->dev, "suspend aborted\n"); 1427 + return -1; 1428 + } 1429 + 1430 + status = stop_queue(drv_data); 1431 + if (status != 0) 1432 + return status; 1433 + write_SSCR0(0, drv_data->ioaddr); 1434 + pxa_set_cken(drv_data->master_info->clock_enable, 0); 1435 + 1436 + return 0; 1437 + } 1438 + 1439 + static int pxa2xx_spi_resume(struct platform_device *pdev) 1440 + { 1441 + struct driver_data *drv_data = platform_get_drvdata(pdev); 1442 + int status = 0; 1443 + 1444 + /* Enable the SSP clock */ 1445 + pxa_set_cken(drv_data->master_info->clock_enable, 1); 1446 + 1447 + /* Start the queue running */ 1448 + status = start_queue(drv_data); 1449 + if (status != 0) { 1450 + dev_err(&pdev->dev, "problem starting queue (%d)\n", status); 1451 + return status; 1452 + } 1453 + 1454 + return 0; 1455 + } 1456 + #else 1457 + #define pxa2xx_spi_suspend NULL 1458 + #define pxa2xx_spi_resume NULL 1459 + #endif /* CONFIG_PM */ 1460 + 1461 + static struct platform_driver driver = { 1462 + .driver = { 1463 + .name = "pxa2xx-spi", 1464 + .bus = &platform_bus_type, 1465 + .owner = THIS_MODULE, 1466 + }, 1467 + .probe = pxa2xx_spi_probe, 1468 + .remove = __devexit_p(pxa2xx_spi_remove), 1469 + .shutdown = pxa2xx_spi_shutdown, 1470 + .suspend = pxa2xx_spi_suspend, 1471 + .resume = pxa2xx_spi_resume, 1472 + }; 1473 + 1474 + static int __init pxa2xx_spi_init(void) 1475 + { 1476 + platform_driver_register(&driver); 1477 + 1478 + return 0; 1479 + } 1480 + module_init(pxa2xx_spi_init); 1481 + 1482 + static void __exit pxa2xx_spi_exit(void) 1483 + { 1484 + platform_driver_unregister(&driver); 1485 + } 1486 + module_exit(pxa2xx_spi_exit);
+7 -6
drivers/spi/spi.c
··· 338 338 * spi_alloc_master - allocate SPI master controller 339 339 * @dev: the controller, possibly using the platform_bus 340 340 * @size: how much driver-private data to preallocate; the pointer to this 341 - * memory is in the class_data field of the returned class_device, 341 + * memory is in the class_data field of the returned class_device, 342 342 * accessible with spi_master_get_devdata(). 343 343 * 344 344 * This call is used only by SPI master controller drivers, which are the 345 345 * only ones directly touching chip registers. It's how they allocate 346 - * an spi_master structure, prior to calling spi_add_master(). 346 + * an spi_master structure, prior to calling spi_register_master(). 347 347 * 348 348 * This must be called from context that can sleep. It returns the SPI 349 349 * master structure on success, else NULL. 350 350 * 351 351 * The caller is responsible for assigning the bus number and initializing 352 - * the master's methods before calling spi_add_master(); and (after errors 352 + * the master's methods before calling spi_register_master(); and (after errors 353 353 * adding the device) calling spi_master_put() to prevent a memory leak. 354 354 */ 355 355 struct spi_master * __init_or_module ··· 395 395 int __init_or_module 396 396 spi_register_master(struct spi_master *master) 397 397 { 398 - static atomic_t dyn_bus_id = ATOMIC_INIT(0); 398 + static atomic_t dyn_bus_id = ATOMIC_INIT((1<<16) - 1); 399 399 struct device *dev = master->cdev.dev; 400 400 int status = -ENODEV; 401 401 int dynamic = 0; ··· 404 404 return -ENODEV; 405 405 406 406 /* convention: dynamically assigned bus IDs count down from the max */ 407 - if (master->bus_num == 0) { 407 + if (master->bus_num < 0) { 408 408 master->bus_num = atomic_dec_return(&dyn_bus_id); 409 409 dynamic = 1; 410 410 } ··· 522 522 } 523 523 EXPORT_SYMBOL_GPL(spi_sync); 524 524 525 - #define SPI_BUFSIZ (SMP_CACHE_BYTES) 525 + /* portable code must never pass more than 32 bytes */ 526 + #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) 526 527 527 528 static u8 *buf; 528 529
+84 -20
drivers/spi/spi_bitbang.c
··· 138 138 return t->len - count; 139 139 } 140 140 141 + int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 142 + { 143 + struct spi_bitbang_cs *cs = spi->controller_state; 144 + u8 bits_per_word; 145 + u32 hz; 146 + 147 + if (t) { 148 + bits_per_word = t->bits_per_word; 149 + hz = t->speed_hz; 150 + } else { 151 + bits_per_word = 0; 152 + hz = 0; 153 + } 154 + 155 + /* spi_transfer level calls that work per-word */ 156 + if (!bits_per_word) 157 + bits_per_word = spi->bits_per_word; 158 + if (bits_per_word <= 8) 159 + cs->txrx_bufs = bitbang_txrx_8; 160 + else if (bits_per_word <= 16) 161 + cs->txrx_bufs = bitbang_txrx_16; 162 + else if (bits_per_word <= 32) 163 + cs->txrx_bufs = bitbang_txrx_32; 164 + else 165 + return -EINVAL; 166 + 167 + /* nsecs = (clock period)/2 */ 168 + if (!hz) 169 + hz = spi->max_speed_hz; 170 + if (hz) { 171 + cs->nsecs = (1000000000/2) / hz; 172 + if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000)) 173 + return -EINVAL; 174 + } 175 + 176 + return 0; 177 + } 178 + EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer); 179 + 141 180 /** 142 181 * spi_bitbang_setup - default setup for per-word I/O loops 143 182 */ ··· 184 145 { 185 146 struct spi_bitbang_cs *cs = spi->controller_state; 186 147 struct spi_bitbang *bitbang; 148 + int retval; 187 149 188 - if (!spi->max_speed_hz) 150 + bitbang = spi_master_get_devdata(spi->master); 151 + 152 + /* REVISIT: some systems will want to support devices using lsb-first 153 + * bit encodings on the wire. In pure software that would be trivial, 154 + * just bitbang_txrx_le_cphaX() routines shifting the other way, and 155 + * some hardware controllers also have this support. 156 + */ 157 + if ((spi->mode & SPI_LSB_FIRST) != 0) 189 158 return -EINVAL; 190 159 191 160 if (!cs) { ··· 202 155 return -ENOMEM; 203 156 spi->controller_state = cs; 204 157 } 205 - bitbang = spi_master_get_devdata(spi->master); 206 158 207 159 if (!spi->bits_per_word) 208 160 spi->bits_per_word = 8; 209 - 210 - /* spi_transfer level calls that work per-word */ 211 - if (spi->bits_per_word <= 8) 212 - cs->txrx_bufs = bitbang_txrx_8; 213 - else if (spi->bits_per_word <= 16) 214 - cs->txrx_bufs = bitbang_txrx_16; 215 - else if (spi->bits_per_word <= 32) 216 - cs->txrx_bufs = bitbang_txrx_32; 217 - else 218 - return -EINVAL; 219 161 220 162 /* per-word shift register access, in hardware or bitbanging */ 221 163 cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; 222 164 if (!cs->txrx_word) 223 165 return -EINVAL; 224 166 225 - /* nsecs = (clock period)/2 */ 226 - cs->nsecs = (1000000000/2) / (spi->max_speed_hz); 227 - if (cs->nsecs > MAX_UDELAY_MS * 1000) 228 - return -EINVAL; 167 + retval = spi_bitbang_setup_transfer(spi, NULL); 168 + if (retval < 0) 169 + return retval; 229 170 230 - dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n", 171 + dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", 231 172 __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA), 232 173 spi->bits_per_word, 2 * cs->nsecs); 233 174 ··· 281 246 unsigned tmp; 282 247 unsigned cs_change; 283 248 int status; 249 + int (*setup_transfer)(struct spi_device *, 250 + struct spi_transfer *); 284 251 285 252 m = container_of(bitbang->queue.next, struct spi_message, 286 253 queue); ··· 299 262 tmp = 0; 300 263 cs_change = 1; 301 264 status = 0; 265 + setup_transfer = NULL; 302 266 303 267 list_for_each_entry (t, &m->transfers, transfer_list) { 304 268 if (bitbang->shutdown) { 305 269 status = -ESHUTDOWN; 306 270 break; 271 + } 272 + 273 + /* override or restore speed and wordsize */ 274 + if (t->speed_hz || t->bits_per_word) { 275 + setup_transfer = bitbang->setup_transfer; 276 + if (!setup_transfer) { 277 + status = -ENOPROTOOPT; 278 + break; 279 + } 280 + } 281 + if (setup_transfer) { 282 + status = setup_transfer(spi, t); 283 + if (status < 0) 284 + break; 307 285 } 308 286 309 287 /* set up default clock polarity, and activate chip; ··· 377 325 m->status = status; 378 326 m->complete(m->context); 379 327 328 + /* restore speed and wordsize */ 329 + if (setup_transfer) 330 + setup_transfer(spi, NULL); 331 + 380 332 /* normally deactivate chipselect ... unless no error and 381 333 * cs_change has hinted that the next message will probably 382 334 * be for this chip too. ··· 404 348 { 405 349 struct spi_bitbang *bitbang; 406 350 unsigned long flags; 351 + int status = 0; 407 352 408 353 m->actual_length = 0; 409 354 m->status = -EINPROGRESS; ··· 414 357 return -ESHUTDOWN; 415 358 416 359 spin_lock_irqsave(&bitbang->lock, flags); 417 - list_add_tail(&m->queue, &bitbang->queue); 418 - queue_work(bitbang->workqueue, &bitbang->work); 360 + if (!spi->max_speed_hz) 361 + status = -ENETDOWN; 362 + else { 363 + list_add_tail(&m->queue, &bitbang->queue); 364 + queue_work(bitbang->workqueue, &bitbang->work); 365 + } 419 366 spin_unlock_irqrestore(&bitbang->lock, flags); 420 367 421 - return 0; 368 + return status; 422 369 } 423 370 EXPORT_SYMBOL_GPL(spi_bitbang_transfer); 424 371 ··· 467 406 bitbang->use_dma = 0; 468 407 bitbang->txrx_bufs = spi_bitbang_bufs; 469 408 if (!bitbang->master->setup) { 409 + if (!bitbang->setup_transfer) 410 + bitbang->setup_transfer = 411 + spi_bitbang_setup_transfer; 470 412 bitbang->master->setup = spi_bitbang_setup; 471 413 bitbang->master->cleanup = spi_bitbang_cleanup; 472 414 }
+1
drivers/spi/spi_butterfly.c
··· 321 321 * (firmware resets at45, acts as spi slave) or neither (we ignore 322 322 * both, AVR uses AT45). Here we expect firmware for the first option. 323 323 */ 324 + 324 325 pp->info[0].max_speed_hz = 15 * 1000 * 1000; 325 326 strcpy(pp->info[0].modalias, "mtd_dataflash"); 326 327 pp->info[0].platform_data = &flash;
+483
drivers/spi/spi_mpc83xx.c
··· 1 + /* 2 + * MPC83xx SPI controller driver. 3 + * 4 + * Maintainer: Kumar Gala 5 + * 6 + * Copyright (C) 2006 Polycom, Inc. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the 10 + * Free Software Foundation; either version 2 of the License, or (at your 11 + * option) any later version. 12 + */ 13 + #include <linux/module.h> 14 + #include <linux/init.h> 15 + #include <linux/types.h> 16 + #include <linux/kernel.h> 17 + #include <linux/completion.h> 18 + #include <linux/interrupt.h> 19 + #include <linux/delay.h> 20 + #include <linux/irq.h> 21 + #include <linux/device.h> 22 + #include <linux/spi/spi.h> 23 + #include <linux/spi/spi_bitbang.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/fsl_devices.h> 26 + 27 + #include <asm/irq.h> 28 + #include <asm/io.h> 29 + 30 + /* SPI Controller registers */ 31 + struct mpc83xx_spi_reg { 32 + u8 res1[0x20]; 33 + __be32 mode; 34 + __be32 event; 35 + __be32 mask; 36 + __be32 command; 37 + __be32 transmit; 38 + __be32 receive; 39 + }; 40 + 41 + /* SPI Controller mode register definitions */ 42 + #define SPMODE_CI_INACTIVEHIGH (1 << 29) 43 + #define SPMODE_CP_BEGIN_EDGECLK (1 << 28) 44 + #define SPMODE_DIV16 (1 << 27) 45 + #define SPMODE_REV (1 << 26) 46 + #define SPMODE_MS (1 << 25) 47 + #define SPMODE_ENABLE (1 << 24) 48 + #define SPMODE_LEN(x) ((x) << 20) 49 + #define SPMODE_PM(x) ((x) << 16) 50 + 51 + /* 52 + * Default for SPI Mode: 53 + * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk 54 + */ 55 + #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ 56 + SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) 57 + 58 + /* SPIE register values */ 59 + #define SPIE_NE 0x00000200 /* Not empty */ 60 + #define SPIE_NF 0x00000100 /* Not full */ 61 + 62 + /* SPIM register values */ 63 + #define SPIM_NE 0x00000200 /* Not empty */ 64 + #define SPIM_NF 0x00000100 /* Not full */ 65 + 66 + /* SPI Controller driver's private data. */ 67 + struct mpc83xx_spi { 68 + /* bitbang has to be first */ 69 + struct spi_bitbang bitbang; 70 + struct completion done; 71 + 72 + struct mpc83xx_spi_reg __iomem *base; 73 + 74 + /* rx & tx bufs from the spi_transfer */ 75 + const void *tx; 76 + void *rx; 77 + 78 + /* functions to deal with different sized buffers */ 79 + void (*get_rx) (u32 rx_data, struct mpc83xx_spi *); 80 + u32(*get_tx) (struct mpc83xx_spi *); 81 + 82 + unsigned int count; 83 + u32 irq; 84 + 85 + unsigned nsecs; /* (clock cycle time)/2 */ 86 + 87 + u32 sysclk; 88 + void (*activate_cs) (u8 cs, u8 polarity); 89 + void (*deactivate_cs) (u8 cs, u8 polarity); 90 + }; 91 + 92 + static inline void mpc83xx_spi_write_reg(__be32 __iomem * reg, u32 val) 93 + { 94 + out_be32(reg, val); 95 + } 96 + 97 + static inline u32 mpc83xx_spi_read_reg(__be32 __iomem * reg) 98 + { 99 + return in_be32(reg); 100 + } 101 + 102 + #define MPC83XX_SPI_RX_BUF(type) \ 103 + void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \ 104 + { \ 105 + type * rx = mpc83xx_spi->rx; \ 106 + *rx++ = (type)data; \ 107 + mpc83xx_spi->rx = rx; \ 108 + } 109 + 110 + #define MPC83XX_SPI_TX_BUF(type) \ 111 + u32 mpc83xx_spi_tx_buf_##type(struct mpc83xx_spi *mpc83xx_spi) \ 112 + { \ 113 + u32 data; \ 114 + const type * tx = mpc83xx_spi->tx; \ 115 + data = *tx++; \ 116 + mpc83xx_spi->tx = tx; \ 117 + return data; \ 118 + } 119 + 120 + MPC83XX_SPI_RX_BUF(u8) 121 + MPC83XX_SPI_RX_BUF(u16) 122 + MPC83XX_SPI_RX_BUF(u32) 123 + MPC83XX_SPI_TX_BUF(u8) 124 + MPC83XX_SPI_TX_BUF(u16) 125 + MPC83XX_SPI_TX_BUF(u32) 126 + 127 + static void mpc83xx_spi_chipselect(struct spi_device *spi, int value) 128 + { 129 + struct mpc83xx_spi *mpc83xx_spi; 130 + u8 pol = spi->mode & SPI_CS_HIGH ? 1 : 0; 131 + 132 + mpc83xx_spi = spi_master_get_devdata(spi->master); 133 + 134 + if (value == BITBANG_CS_INACTIVE) { 135 + if (mpc83xx_spi->deactivate_cs) 136 + mpc83xx_spi->deactivate_cs(spi->chip_select, pol); 137 + } 138 + 139 + if (value == BITBANG_CS_ACTIVE) { 140 + u32 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); 141 + u32 len = spi->bits_per_word; 142 + if (len == 32) 143 + len = 0; 144 + else 145 + len = len - 1; 146 + 147 + /* mask out bits we are going to set */ 148 + regval &= ~0x38ff0000; 149 + 150 + if (spi->mode & SPI_CPHA) 151 + regval |= SPMODE_CP_BEGIN_EDGECLK; 152 + if (spi->mode & SPI_CPOL) 153 + regval |= SPMODE_CI_INACTIVEHIGH; 154 + 155 + regval |= SPMODE_LEN(len); 156 + 157 + if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) { 158 + u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64); 159 + regval |= SPMODE_PM(pm) | SPMODE_DIV16; 160 + } else { 161 + u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4); 162 + regval |= SPMODE_PM(pm); 163 + } 164 + 165 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); 166 + if (mpc83xx_spi->activate_cs) 167 + mpc83xx_spi->activate_cs(spi->chip_select, pol); 168 + } 169 + } 170 + 171 + static 172 + int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 173 + { 174 + struct mpc83xx_spi *mpc83xx_spi; 175 + u32 regval; 176 + u8 bits_per_word; 177 + u32 hz; 178 + 179 + mpc83xx_spi = spi_master_get_devdata(spi->master); 180 + 181 + if (t) { 182 + bits_per_word = t->bits_per_word; 183 + hz = t->speed_hz; 184 + } else { 185 + bits_per_word = 0; 186 + hz = 0; 187 + } 188 + 189 + /* spi_transfer level calls that work per-word */ 190 + if (!bits_per_word) 191 + bits_per_word = spi->bits_per_word; 192 + 193 + /* Make sure its a bit width we support [4..16, 32] */ 194 + if ((bits_per_word < 4) 195 + || ((bits_per_word > 16) && (bits_per_word != 32))) 196 + return -EINVAL; 197 + 198 + if (bits_per_word <= 8) { 199 + mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; 200 + mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; 201 + } else if (bits_per_word <= 16) { 202 + mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16; 203 + mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16; 204 + } else if (bits_per_word <= 32) { 205 + mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32; 206 + mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32; 207 + } else 208 + return -EINVAL; 209 + 210 + /* nsecs = (clock period)/2 */ 211 + if (!hz) 212 + hz = spi->max_speed_hz; 213 + mpc83xx_spi->nsecs = (1000000000 / 2) / hz; 214 + if (mpc83xx_spi->nsecs > MAX_UDELAY_MS * 1000) 215 + return -EINVAL; 216 + 217 + if (bits_per_word == 32) 218 + bits_per_word = 0; 219 + else 220 + bits_per_word = bits_per_word - 1; 221 + 222 + regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); 223 + 224 + /* Mask out bits_per_wordgth */ 225 + regval &= 0xff0fffff; 226 + regval |= SPMODE_LEN(bits_per_word); 227 + 228 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); 229 + 230 + return 0; 231 + } 232 + 233 + static int mpc83xx_spi_setup(struct spi_device *spi) 234 + { 235 + struct spi_bitbang *bitbang; 236 + struct mpc83xx_spi *mpc83xx_spi; 237 + int retval; 238 + 239 + if (!spi->max_speed_hz) 240 + return -EINVAL; 241 + 242 + bitbang = spi_master_get_devdata(spi->master); 243 + mpc83xx_spi = spi_master_get_devdata(spi->master); 244 + 245 + if (!spi->bits_per_word) 246 + spi->bits_per_word = 8; 247 + 248 + retval = mpc83xx_spi_setup_transfer(spi, NULL); 249 + if (retval < 0) 250 + return retval; 251 + 252 + dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n", 253 + __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA), 254 + spi->bits_per_word, 2 * mpc83xx_spi->nsecs); 255 + 256 + /* NOTE we _need_ to call chipselect() early, ideally with adapter 257 + * setup, unless the hardware defaults cooperate to avoid confusion 258 + * between normal (active low) and inverted chipselects. 259 + */ 260 + 261 + /* deselect chip (low or high) */ 262 + spin_lock(&bitbang->lock); 263 + if (!bitbang->busy) { 264 + bitbang->chipselect(spi, BITBANG_CS_INACTIVE); 265 + ndelay(mpc83xx_spi->nsecs); 266 + } 267 + spin_unlock(&bitbang->lock); 268 + 269 + return 0; 270 + } 271 + 272 + static int mpc83xx_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 273 + { 274 + struct mpc83xx_spi *mpc83xx_spi; 275 + u32 word; 276 + 277 + mpc83xx_spi = spi_master_get_devdata(spi->master); 278 + 279 + mpc83xx_spi->tx = t->tx_buf; 280 + mpc83xx_spi->rx = t->rx_buf; 281 + mpc83xx_spi->count = t->len; 282 + INIT_COMPLETION(mpc83xx_spi->done); 283 + 284 + /* enable rx ints */ 285 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, SPIM_NE); 286 + 287 + /* transmit word */ 288 + word = mpc83xx_spi->get_tx(mpc83xx_spi); 289 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, word); 290 + 291 + wait_for_completion(&mpc83xx_spi->done); 292 + 293 + /* disable rx ints */ 294 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0); 295 + 296 + return t->len - mpc83xx_spi->count; 297 + } 298 + 299 + irqreturn_t mpc83xx_spi_irq(s32 irq, void *context_data, 300 + struct pt_regs * ptregs) 301 + { 302 + struct mpc83xx_spi *mpc83xx_spi = context_data; 303 + u32 event; 304 + irqreturn_t ret = IRQ_NONE; 305 + 306 + /* Get interrupt events(tx/rx) */ 307 + event = mpc83xx_spi_read_reg(&mpc83xx_spi->base->event); 308 + 309 + /* We need handle RX first */ 310 + if (event & SPIE_NE) { 311 + u32 rx_data = mpc83xx_spi_read_reg(&mpc83xx_spi->base->receive); 312 + 313 + if (mpc83xx_spi->rx) 314 + mpc83xx_spi->get_rx(rx_data, mpc83xx_spi); 315 + 316 + ret = IRQ_HANDLED; 317 + } 318 + 319 + if ((event & SPIE_NF) == 0) 320 + /* spin until TX is done */ 321 + while (((event = 322 + mpc83xx_spi_read_reg(&mpc83xx_spi->base->event)) & 323 + SPIE_NF) == 0) 324 + cpu_relax(); 325 + 326 + mpc83xx_spi->count -= 1; 327 + if (mpc83xx_spi->count) { 328 + if (mpc83xx_spi->tx) { 329 + u32 word = mpc83xx_spi->get_tx(mpc83xx_spi); 330 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, 331 + word); 332 + } 333 + } else { 334 + complete(&mpc83xx_spi->done); 335 + } 336 + 337 + /* Clear the events */ 338 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->event, event); 339 + 340 + return ret; 341 + } 342 + 343 + static int __init mpc83xx_spi_probe(struct platform_device *dev) 344 + { 345 + struct spi_master *master; 346 + struct mpc83xx_spi *mpc83xx_spi; 347 + struct fsl_spi_platform_data *pdata; 348 + struct resource *r; 349 + u32 regval; 350 + int ret = 0; 351 + 352 + /* Get resources(memory, IRQ) associated with the device */ 353 + master = spi_alloc_master(&dev->dev, sizeof(struct mpc83xx_spi)); 354 + 355 + if (master == NULL) { 356 + ret = -ENOMEM; 357 + goto err; 358 + } 359 + 360 + platform_set_drvdata(dev, master); 361 + pdata = dev->dev.platform_data; 362 + 363 + if (pdata == NULL) { 364 + ret = -ENODEV; 365 + goto free_master; 366 + } 367 + 368 + r = platform_get_resource(dev, IORESOURCE_MEM, 0); 369 + if (r == NULL) { 370 + ret = -ENODEV; 371 + goto free_master; 372 + } 373 + 374 + mpc83xx_spi = spi_master_get_devdata(master); 375 + mpc83xx_spi->bitbang.master = spi_master_get(master); 376 + mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect; 377 + mpc83xx_spi->bitbang.setup_transfer = mpc83xx_spi_setup_transfer; 378 + mpc83xx_spi->bitbang.txrx_bufs = mpc83xx_spi_bufs; 379 + mpc83xx_spi->sysclk = pdata->sysclk; 380 + mpc83xx_spi->activate_cs = pdata->activate_cs; 381 + mpc83xx_spi->deactivate_cs = pdata->deactivate_cs; 382 + mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; 383 + mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; 384 + 385 + mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup; 386 + init_completion(&mpc83xx_spi->done); 387 + 388 + mpc83xx_spi->base = ioremap(r->start, r->end - r->start + 1); 389 + if (mpc83xx_spi->base == NULL) { 390 + ret = -ENOMEM; 391 + goto put_master; 392 + } 393 + 394 + mpc83xx_spi->irq = platform_get_irq(dev, 0); 395 + 396 + if (mpc83xx_spi->irq < 0) { 397 + ret = -ENXIO; 398 + goto unmap_io; 399 + } 400 + 401 + /* Register for SPI Interrupt */ 402 + ret = request_irq(mpc83xx_spi->irq, mpc83xx_spi_irq, 403 + 0, "mpc83xx_spi", mpc83xx_spi); 404 + 405 + if (ret != 0) 406 + goto unmap_io; 407 + 408 + master->bus_num = pdata->bus_num; 409 + master->num_chipselect = pdata->max_chipselect; 410 + 411 + /* SPI controller initializations */ 412 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, 0); 413 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0); 414 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->command, 0); 415 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->event, 0xffffffff); 416 + 417 + /* Enable SPI interface */ 418 + regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 419 + mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); 420 + 421 + ret = spi_bitbang_start(&mpc83xx_spi->bitbang); 422 + 423 + if (ret != 0) 424 + goto free_irq; 425 + 426 + printk(KERN_INFO 427 + "%s: MPC83xx SPI Controller driver at 0x%p (irq = %d)\n", 428 + dev->dev.bus_id, mpc83xx_spi->base, mpc83xx_spi->irq); 429 + 430 + return ret; 431 + 432 + free_irq: 433 + free_irq(mpc83xx_spi->irq, mpc83xx_spi); 434 + unmap_io: 435 + iounmap(mpc83xx_spi->base); 436 + put_master: 437 + spi_master_put(master); 438 + free_master: 439 + kfree(master); 440 + err: 441 + return ret; 442 + } 443 + 444 + static int __devexit mpc83xx_spi_remove(struct platform_device *dev) 445 + { 446 + struct mpc83xx_spi *mpc83xx_spi; 447 + struct spi_master *master; 448 + 449 + master = platform_get_drvdata(dev); 450 + mpc83xx_spi = spi_master_get_devdata(master); 451 + 452 + spi_bitbang_stop(&mpc83xx_spi->bitbang); 453 + free_irq(mpc83xx_spi->irq, mpc83xx_spi); 454 + iounmap(mpc83xx_spi->base); 455 + spi_master_put(mpc83xx_spi->bitbang.master); 456 + 457 + return 0; 458 + } 459 + 460 + static struct platform_driver mpc83xx_spi_driver = { 461 + .probe = mpc83xx_spi_probe, 462 + .remove = __devexit_p(mpc83xx_spi_remove), 463 + .driver = { 464 + .name = "mpc83xx_spi", 465 + }, 466 + }; 467 + 468 + static int __init mpc83xx_spi_init(void) 469 + { 470 + return platform_driver_register(&mpc83xx_spi_driver); 471 + } 472 + 473 + static void __exit mpc83xx_spi_exit(void) 474 + { 475 + platform_driver_unregister(&mpc83xx_spi_driver); 476 + } 477 + 478 + module_init(mpc83xx_spi_init); 479 + module_exit(mpc83xx_spi_exit); 480 + 481 + MODULE_AUTHOR("Kumar Gala"); 482 + MODULE_DESCRIPTION("Simple MPC83xx SPI Driver"); 483 + MODULE_LICENSE("GPL");
+453
drivers/spi/spi_s3c24xx.c
··· 1 + /* linux/drivers/spi/spi_s3c24xx.c 2 + * 3 + * Copyright (c) 2006 Ben Dooks 4 + * Copyright (c) 2006 Simtec Electronics 5 + * Ben Dooks <ben@simtec.co.uk> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + */ 12 + 13 + 14 + //#define DEBUG 15 + 16 + #include <linux/config.h> 17 + #include <linux/init.h> 18 + #include <linux/spinlock.h> 19 + #include <linux/workqueue.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/delay.h> 22 + #include <linux/errno.h> 23 + #include <linux/err.h> 24 + #include <linux/clk.h> 25 + #include <linux/platform_device.h> 26 + 27 + #include <linux/spi/spi.h> 28 + #include <linux/spi/spi_bitbang.h> 29 + 30 + #include <asm/io.h> 31 + #include <asm/dma.h> 32 + #include <asm/hardware.h> 33 + 34 + #include <asm/arch/regs-gpio.h> 35 + #include <asm/arch/regs-spi.h> 36 + #include <asm/arch/spi.h> 37 + 38 + struct s3c24xx_spi { 39 + /* bitbang has to be first */ 40 + struct spi_bitbang bitbang; 41 + struct completion done; 42 + 43 + void __iomem *regs; 44 + int irq; 45 + int len; 46 + int count; 47 + 48 + /* data buffers */ 49 + const unsigned char *tx; 50 + unsigned char *rx; 51 + 52 + struct clk *clk; 53 + struct resource *ioarea; 54 + struct spi_master *master; 55 + struct spi_device *curdev; 56 + struct device *dev; 57 + struct s3c2410_spi_info *pdata; 58 + }; 59 + 60 + #define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) 61 + #define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) 62 + 63 + static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev) 64 + { 65 + return spi_master_get_devdata(sdev->master); 66 + } 67 + 68 + static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) 69 + { 70 + struct s3c24xx_spi *hw = to_hw(spi); 71 + unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; 72 + unsigned int spcon; 73 + 74 + switch (value) { 75 + case BITBANG_CS_INACTIVE: 76 + if (hw->pdata->set_cs) 77 + hw->pdata->set_cs(hw->pdata, value, cspol); 78 + else 79 + s3c2410_gpio_setpin(hw->pdata->pin_cs, cspol ^ 1); 80 + break; 81 + 82 + case BITBANG_CS_ACTIVE: 83 + spcon = readb(hw->regs + S3C2410_SPCON); 84 + 85 + if (spi->mode & SPI_CPHA) 86 + spcon |= S3C2410_SPCON_CPHA_FMTB; 87 + else 88 + spcon &= ~S3C2410_SPCON_CPHA_FMTB; 89 + 90 + if (spi->mode & SPI_CPOL) 91 + spcon |= S3C2410_SPCON_CPOL_HIGH; 92 + else 93 + spcon &= ~S3C2410_SPCON_CPOL_HIGH; 94 + 95 + spcon |= S3C2410_SPCON_ENSCK; 96 + 97 + /* write new configration */ 98 + 99 + writeb(spcon, hw->regs + S3C2410_SPCON); 100 + 101 + if (hw->pdata->set_cs) 102 + hw->pdata->set_cs(hw->pdata, value, cspol); 103 + else 104 + s3c2410_gpio_setpin(hw->pdata->pin_cs, cspol); 105 + 106 + break; 107 + 108 + } 109 + } 110 + 111 + static int s3c24xx_spi_setupxfer(struct spi_device *spi, 112 + struct spi_transfer *t) 113 + { 114 + struct s3c24xx_spi *hw = to_hw(spi); 115 + unsigned int bpw; 116 + unsigned int hz; 117 + unsigned int div; 118 + 119 + bpw = t ? t->bits_per_word : spi->bits_per_word; 120 + hz = t ? t->speed_hz : spi->max_speed_hz; 121 + 122 + if (bpw != 8) { 123 + dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); 124 + return -EINVAL; 125 + } 126 + 127 + div = clk_get_rate(hw->clk) / hz; 128 + 129 + /* is clk = pclk / (2 * (pre+1)), or is it 130 + * clk = (pclk * 2) / ( pre + 1) */ 131 + 132 + div = (div / 2) - 1; 133 + 134 + if (div < 0) 135 + div = 1; 136 + 137 + if (div > 255) 138 + div = 255; 139 + 140 + dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", div, hz); 141 + writeb(div, hw->regs + S3C2410_SPPRE); 142 + 143 + spin_lock(&hw->bitbang.lock); 144 + if (!hw->bitbang.busy) { 145 + hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); 146 + /* need to ndelay for 0.5 clocktick ? */ 147 + } 148 + spin_unlock(&hw->bitbang.lock); 149 + 150 + return 0; 151 + } 152 + 153 + static int s3c24xx_spi_setup(struct spi_device *spi) 154 + { 155 + int ret; 156 + 157 + if (!spi->bits_per_word) 158 + spi->bits_per_word = 8; 159 + 160 + if ((spi->mode & SPI_LSB_FIRST) != 0) 161 + return -EINVAL; 162 + 163 + ret = s3c24xx_spi_setupxfer(spi, NULL); 164 + if (ret < 0) { 165 + dev_err(&spi->dev, "setupxfer returned %d\n", ret); 166 + return ret; 167 + } 168 + 169 + dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", 170 + __FUNCTION__, spi->mode, spi->bits_per_word, 171 + spi->max_speed_hz); 172 + 173 + return 0; 174 + } 175 + 176 + static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) 177 + { 178 + return hw->tx ? hw->tx[count] : 0xff; 179 + } 180 + 181 + static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) 182 + { 183 + struct s3c24xx_spi *hw = to_hw(spi); 184 + 185 + dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", 186 + t->tx_buf, t->rx_buf, t->len); 187 + 188 + hw->tx = t->tx_buf; 189 + hw->rx = t->rx_buf; 190 + hw->len = t->len; 191 + hw->count = 0; 192 + 193 + /* send the first byte */ 194 + writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); 195 + wait_for_completion(&hw->done); 196 + 197 + return hw->count; 198 + } 199 + 200 + static irqreturn_t s3c24xx_spi_irq(int irq, void *dev, struct pt_regs *regs) 201 + { 202 + struct s3c24xx_spi *hw = dev; 203 + unsigned int spsta = readb(hw->regs + S3C2410_SPSTA); 204 + unsigned int count = hw->count; 205 + 206 + if (spsta & S3C2410_SPSTA_DCOL) { 207 + dev_dbg(hw->dev, "data-collision\n"); 208 + complete(&hw->done); 209 + goto irq_done; 210 + } 211 + 212 + if (!(spsta & S3C2410_SPSTA_READY)) { 213 + dev_dbg(hw->dev, "spi not ready for tx?\n"); 214 + complete(&hw->done); 215 + goto irq_done; 216 + } 217 + 218 + hw->count++; 219 + 220 + if (hw->rx) 221 + hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); 222 + 223 + count++; 224 + 225 + if (count < hw->len) 226 + writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT); 227 + else 228 + complete(&hw->done); 229 + 230 + irq_done: 231 + return IRQ_HANDLED; 232 + } 233 + 234 + static int s3c24xx_spi_probe(struct platform_device *pdev) 235 + { 236 + struct s3c24xx_spi *hw; 237 + struct spi_master *master; 238 + struct spi_board_info *bi; 239 + struct resource *res; 240 + int err = 0; 241 + int i; 242 + 243 + master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi)); 244 + if (master == NULL) { 245 + dev_err(&pdev->dev, "No memory for spi_master\n"); 246 + err = -ENOMEM; 247 + goto err_nomem; 248 + } 249 + 250 + hw = spi_master_get_devdata(master); 251 + memset(hw, 0, sizeof(struct s3c24xx_spi)); 252 + 253 + hw->master = spi_master_get(master); 254 + hw->pdata = pdev->dev.platform_data; 255 + hw->dev = &pdev->dev; 256 + 257 + if (hw->pdata == NULL) { 258 + dev_err(&pdev->dev, "No platform data supplied\n"); 259 + err = -ENOENT; 260 + goto err_no_pdata; 261 + } 262 + 263 + platform_set_drvdata(pdev, hw); 264 + init_completion(&hw->done); 265 + 266 + /* setup the state for the bitbang driver */ 267 + 268 + hw->bitbang.master = hw->master; 269 + hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer; 270 + hw->bitbang.chipselect = s3c24xx_spi_chipsel; 271 + hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; 272 + hw->bitbang.master->setup = s3c24xx_spi_setup; 273 + 274 + dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); 275 + 276 + /* find and map our resources */ 277 + 278 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 279 + if (res == NULL) { 280 + dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); 281 + err = -ENOENT; 282 + goto err_no_iores; 283 + } 284 + 285 + hw->ioarea = request_mem_region(res->start, (res->end - res->start)+1, 286 + pdev->name); 287 + 288 + if (hw->ioarea == NULL) { 289 + dev_err(&pdev->dev, "Cannot reserve region\n"); 290 + err = -ENXIO; 291 + goto err_no_iores; 292 + } 293 + 294 + hw->regs = ioremap(res->start, (res->end - res->start)+1); 295 + if (hw->regs == NULL) { 296 + dev_err(&pdev->dev, "Cannot map IO\n"); 297 + err = -ENXIO; 298 + goto err_no_iomap; 299 + } 300 + 301 + hw->irq = platform_get_irq(pdev, 0); 302 + if (hw->irq < 0) { 303 + dev_err(&pdev->dev, "No IRQ specified\n"); 304 + err = -ENOENT; 305 + goto err_no_irq; 306 + } 307 + 308 + err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw); 309 + if (err) { 310 + dev_err(&pdev->dev, "Cannot claim IRQ\n"); 311 + goto err_no_irq; 312 + } 313 + 314 + hw->clk = clk_get(&pdev->dev, "spi"); 315 + if (IS_ERR(hw->clk)) { 316 + dev_err(&pdev->dev, "No clock for device\n"); 317 + err = PTR_ERR(hw->clk); 318 + goto err_no_clk; 319 + } 320 + 321 + /* for the moment, permanently enable the clock */ 322 + 323 + clk_enable(hw->clk); 324 + 325 + /* program defaults into the registers */ 326 + 327 + writeb(0xff, hw->regs + S3C2410_SPPRE); 328 + writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN); 329 + writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON); 330 + 331 + /* setup any gpio we can */ 332 + 333 + if (!hw->pdata->set_cs) { 334 + s3c2410_gpio_setpin(hw->pdata->pin_cs, 1); 335 + s3c2410_gpio_cfgpin(hw->pdata->pin_cs, S3C2410_GPIO_OUTPUT); 336 + } 337 + 338 + /* register our spi controller */ 339 + 340 + err = spi_bitbang_start(&hw->bitbang); 341 + if (err) { 342 + dev_err(&pdev->dev, "Failed to register SPI master\n"); 343 + goto err_register; 344 + } 345 + 346 + dev_dbg(hw->dev, "shutdown=%d\n", hw->bitbang.shutdown); 347 + 348 + /* register all the devices associated */ 349 + 350 + bi = &hw->pdata->board_info[0]; 351 + for (i = 0; i < hw->pdata->board_size; i++, bi++) { 352 + dev_info(hw->dev, "registering %s\n", bi->modalias); 353 + 354 + bi->controller_data = hw; 355 + spi_new_device(master, bi); 356 + } 357 + 358 + return 0; 359 + 360 + err_register: 361 + clk_disable(hw->clk); 362 + clk_put(hw->clk); 363 + 364 + err_no_clk: 365 + free_irq(hw->irq, hw); 366 + 367 + err_no_irq: 368 + iounmap(hw->regs); 369 + 370 + err_no_iomap: 371 + release_resource(hw->ioarea); 372 + kfree(hw->ioarea); 373 + 374 + err_no_iores: 375 + err_no_pdata: 376 + spi_master_put(hw->master);; 377 + 378 + err_nomem: 379 + return err; 380 + } 381 + 382 + static int s3c24xx_spi_remove(struct platform_device *dev) 383 + { 384 + struct s3c24xx_spi *hw = platform_get_drvdata(dev); 385 + 386 + platform_set_drvdata(dev, NULL); 387 + 388 + spi_unregister_master(hw->master); 389 + 390 + clk_disable(hw->clk); 391 + clk_put(hw->clk); 392 + 393 + free_irq(hw->irq, hw); 394 + iounmap(hw->regs); 395 + 396 + release_resource(hw->ioarea); 397 + kfree(hw->ioarea); 398 + 399 + spi_master_put(hw->master); 400 + return 0; 401 + } 402 + 403 + 404 + #ifdef CONFIG_PM 405 + 406 + static int s3c24xx_spi_suspend(struct platform_device *pdev, pm_message_t msg) 407 + { 408 + struct s3c24xx_spi *hw = platform_get_drvdata(dev); 409 + 410 + clk_disable(hw->clk); 411 + return 0; 412 + } 413 + 414 + static int s3c24xx_spi_resume(struct platform_device *pdev) 415 + { 416 + struct s3c24xx_spi *hw = platform_get_drvdata(dev); 417 + 418 + clk_enable(hw->clk); 419 + return 0; 420 + } 421 + 422 + #else 423 + #define s3c24xx_spi_suspend NULL 424 + #define s3c24xx_spi_resume NULL 425 + #endif 426 + 427 + static struct platform_driver s3c24xx_spidrv = { 428 + .probe = s3c24xx_spi_probe, 429 + .remove = s3c24xx_spi_remove, 430 + .suspend = s3c24xx_spi_suspend, 431 + .resume = s3c24xx_spi_resume, 432 + .driver = { 433 + .name = "s3c2410-spi", 434 + .owner = THIS_MODULE, 435 + }, 436 + }; 437 + 438 + static int __init s3c24xx_spi_init(void) 439 + { 440 + return platform_driver_register(&s3c24xx_spidrv); 441 + } 442 + 443 + static void __exit s3c24xx_spi_exit(void) 444 + { 445 + platform_driver_unregister(&s3c24xx_spidrv); 446 + } 447 + 448 + module_init(s3c24xx_spi_init); 449 + module_exit(s3c24xx_spi_exit); 450 + 451 + MODULE_DESCRIPTION("S3C24XX SPI Driver"); 452 + MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); 453 + MODULE_LICENSE("GPL");
+188
drivers/spi/spi_s3c24xx_gpio.c
··· 1 + /* linux/drivers/spi/spi_s3c24xx_gpio.c 2 + * 3 + * Copyright (c) 2006 Ben Dooks 4 + * Copyright (c) 2006 Simtec Electronics 5 + * 6 + * S3C24XX GPIO based SPI driver 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + */ 13 + 14 + #include <linux/config.h> 15 + #include <linux/kernel.h> 16 + #include <linux/init.h> 17 + #include <linux/delay.h> 18 + #include <linux/spinlock.h> 19 + #include <linux/platform_device.h> 20 + 21 + #include <linux/spi/spi.h> 22 + #include <linux/spi/spi_bitbang.h> 23 + 24 + #include <asm/arch/regs-gpio.h> 25 + #include <asm/arch/spi-gpio.h> 26 + #include <asm/arch/hardware.h> 27 + 28 + struct s3c2410_spigpio { 29 + struct spi_bitbang bitbang; 30 + 31 + struct s3c2410_spigpio_info *info; 32 + struct platform_device *dev; 33 + }; 34 + 35 + static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi) 36 + { 37 + return spi->controller_data; 38 + } 39 + 40 + static inline void setsck(struct spi_device *dev, int on) 41 + { 42 + struct s3c2410_spigpio *sg = spidev_to_sg(dev); 43 + s3c2410_gpio_setpin(sg->info->pin_clk, on ? 1 : 0); 44 + } 45 + 46 + static inline void setmosi(struct spi_device *dev, int on) 47 + { 48 + struct s3c2410_spigpio *sg = spidev_to_sg(dev); 49 + s3c2410_gpio_setpin(sg->info->pin_mosi, on ? 1 : 0); 50 + } 51 + 52 + static inline u32 getmiso(struct spi_device *dev) 53 + { 54 + struct s3c2410_spigpio *sg = spidev_to_sg(dev); 55 + return s3c2410_gpio_getpin(sg->info->pin_miso) ? 1 : 0; 56 + } 57 + 58 + #define spidelay(x) ndelay(x) 59 + 60 + #define EXPAND_BITBANG_TXRX 61 + #include <linux/spi/spi_bitbang.h> 62 + 63 + 64 + static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, 65 + unsigned nsecs, u32 word, u8 bits) 66 + { 67 + return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); 68 + } 69 + 70 + static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi, 71 + unsigned nsecs, u32 word, u8 bits) 72 + { 73 + return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); 74 + } 75 + 76 + static void s3c2410_spigpio_chipselect(struct spi_device *dev, int value) 77 + { 78 + struct s3c2410_spigpio *sg = spidev_to_sg(dev); 79 + 80 + if (sg->info && sg->info->chip_select) 81 + (sg->info->chip_select)(sg->info, value); 82 + } 83 + 84 + static int s3c2410_spigpio_probe(struct platform_device *dev) 85 + { 86 + struct spi_master *master; 87 + struct s3c2410_spigpio *sp; 88 + int ret; 89 + int i; 90 + 91 + master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio)); 92 + if (master == NULL) { 93 + dev_err(&dev->dev, "failed to allocate spi master\n"); 94 + ret = -ENOMEM; 95 + goto err; 96 + } 97 + 98 + sp = spi_master_get_devdata(master); 99 + 100 + platform_set_drvdata(dev, sp); 101 + 102 + /* copy in the plkatform data */ 103 + sp->info = dev->dev.platform_data; 104 + 105 + /* setup spi bitbang adaptor */ 106 + sp->bitbang.master = spi_master_get(master); 107 + sp->bitbang.chipselect = s3c2410_spigpio_chipselect; 108 + 109 + sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0; 110 + sp->bitbang.txrx_word[SPI_MODE_1] = s3c2410_spigpio_txrx_mode1; 111 + 112 + /* set state of spi pins */ 113 + s3c2410_gpio_setpin(sp->info->pin_clk, 0); 114 + s3c2410_gpio_setpin(sp->info->pin_mosi, 0); 115 + 116 + s3c2410_gpio_cfgpin(sp->info->pin_clk, S3C2410_GPIO_OUTPUT); 117 + s3c2410_gpio_cfgpin(sp->info->pin_mosi, S3C2410_GPIO_OUTPUT); 118 + s3c2410_gpio_cfgpin(sp->info->pin_miso, S3C2410_GPIO_INPUT); 119 + 120 + ret = spi_bitbang_start(&sp->bitbang); 121 + if (ret) 122 + goto err_no_bitbang; 123 + 124 + /* register the chips to go with the board */ 125 + 126 + for (i = 0; i < sp->info->board_size; i++) { 127 + dev_info(&dev->dev, "registering %p: %s\n", 128 + &sp->info->board_info[i], 129 + sp->info->board_info[i].modalias); 130 + 131 + sp->info->board_info[i].controller_data = sp; 132 + spi_new_device(master, sp->info->board_info + i); 133 + } 134 + 135 + return 0; 136 + 137 + err_no_bitbang: 138 + spi_master_put(sp->bitbang.master); 139 + err: 140 + return ret; 141 + 142 + } 143 + 144 + static int s3c2410_spigpio_remove(struct platform_device *dev) 145 + { 146 + struct s3c2410_spigpio *sp = platform_get_drvdata(dev); 147 + 148 + spi_bitbang_stop(&sp->bitbang); 149 + spi_master_put(sp->bitbang.master); 150 + 151 + return 0; 152 + } 153 + 154 + /* all gpio should be held over suspend/resume, so we should 155 + * not need to deal with this 156 + */ 157 + 158 + #define s3c2410_spigpio_suspend NULL 159 + #define s3c2410_spigpio_resume NULL 160 + 161 + 162 + static struct platform_driver s3c2410_spigpio_drv = { 163 + .probe = s3c2410_spigpio_probe, 164 + .remove = s3c2410_spigpio_remove, 165 + .suspend = s3c2410_spigpio_suspend, 166 + .resume = s3c2410_spigpio_resume, 167 + .driver = { 168 + .name = "s3c24xx-spi-gpio", 169 + .owner = THIS_MODULE, 170 + }, 171 + }; 172 + 173 + static int __init s3c2410_spigpio_init(void) 174 + { 175 + return platform_driver_register(&s3c2410_spigpio_drv); 176 + } 177 + 178 + static void __exit s3c2410_spigpio_exit(void) 179 + { 180 + platform_driver_unregister(&s3c2410_spigpio_drv); 181 + } 182 + 183 + module_init(s3c2410_spigpio_init); 184 + module_exit(s3c2410_spigpio_exit); 185 + 186 + MODULE_DESCRIPTION("S3C24XX SPI Driver"); 187 + MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); 188 + MODULE_LICENSE("GPL");
+1 -1
drivers/usb/atm/speedtch.c
··· 69 69 #define RESUBMIT_DELAY 1000 /* milliseconds */ 70 70 71 71 #define DEFAULT_BULK_ALTSETTING 1 72 - #define DEFAULT_ISOC_ALTSETTING 2 72 + #define DEFAULT_ISOC_ALTSETTING 3 73 73 #define DEFAULT_DL_512_FIRST 0 74 74 #define DEFAULT_ENABLE_ISOC 0 75 75 #define DEFAULT_SW_BUFFERING 0
+4 -4
drivers/usb/atm/usbatm.c
··· 99 99 100 100 #define UDSL_MAX_RCV_URBS 16 101 101 #define UDSL_MAX_SND_URBS 16 102 - #define UDSL_MAX_BUF_SIZE 64 * 1024 /* bytes */ 102 + #define UDSL_MAX_BUF_SIZE 65536 103 103 #define UDSL_DEFAULT_RCV_URBS 4 104 104 #define UDSL_DEFAULT_SND_URBS 4 105 - #define UDSL_DEFAULT_RCV_BUF_SIZE 64 * ATM_CELL_SIZE /* bytes */ 106 - #define UDSL_DEFAULT_SND_BUF_SIZE 64 * ATM_CELL_SIZE /* bytes */ 105 + #define UDSL_DEFAULT_RCV_BUF_SIZE 3392 /* 64 * ATM_CELL_SIZE */ 106 + #define UDSL_DEFAULT_SND_BUF_SIZE 3392 /* 64 * ATM_CELL_SIZE */ 107 107 108 108 #define ATM_CELL_HEADER (ATM_CELL_SIZE - ATM_CELL_PAYLOAD) 109 109 ··· 135 135 module_param(snd_buf_bytes, uint, S_IRUGO); 136 136 MODULE_PARM_DESC(snd_buf_bytes, 137 137 "Size of the buffers used for transmission, in bytes (range: 1-" 138 - __MODULE_STRING(UDSL_MAX_SND_BUF_SIZE) ", default: " 138 + __MODULE_STRING(UDSL_MAX_BUF_SIZE) ", default: " 139 139 __MODULE_STRING(UDSL_DEFAULT_SND_BUF_SIZE) ")"); 140 140 141 141
+6 -7
drivers/usb/core/hcd.c
··· 1805 1805 USB_SPEED_FULL; 1806 1806 hcd->self.root_hub = rhdev; 1807 1807 1808 + /* wakeup flag init defaults to "everything works" for root hubs, 1809 + * but drivers can override it in reset() if needed, along with 1810 + * recording the overall controller's system wakeup capability. 1811 + */ 1812 + device_init_wakeup(&rhdev->dev, 1); 1813 + 1808 1814 /* "reset" is misnamed; its role is now one-time init. the controller 1809 1815 * should already have been reset (and boot firmware kicked off etc). 1810 1816 */ ··· 1818 1812 dev_err(hcd->self.controller, "can't setup\n"); 1819 1813 goto err_hcd_driver_setup; 1820 1814 } 1821 - 1822 - /* wakeup flag init is in transition; for now we can't rely on PCI to 1823 - * initialize these bits properly, so we let reset() override it. 1824 - * This init should _precede_ the reset() once PCI behaves. 1825 - */ 1826 - device_init_wakeup(&rhdev->dev, 1827 - device_can_wakeup(hcd->self.controller)); 1828 1815 1829 1816 /* NOTE: root hub and controller capabilities may not be the same */ 1830 1817 if (device_can_wakeup(hcd->self.controller)
+13 -10
drivers/usb/core/hub.c
··· 1168 1168 static int choose_configuration(struct usb_device *udev) 1169 1169 { 1170 1170 int i; 1171 - u16 devstatus; 1172 - int bus_powered; 1173 1171 int num_configs; 1174 1172 struct usb_host_config *c, *best; 1175 - 1176 - /* If this fails, assume the device is bus-powered */ 1177 - devstatus = 0; 1178 - usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus); 1179 - le16_to_cpus(&devstatus); 1180 - bus_powered = ((devstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0); 1181 - dev_dbg(&udev->dev, "device is %s-powered\n", 1182 - bus_powered ? "bus" : "self"); 1183 1173 1184 1174 best = NULL; 1185 1175 c = udev->config; ··· 1187 1197 * similar errors in their descriptors. If the next test 1188 1198 * were allowed to execute, such configurations would always 1189 1199 * be rejected and the devices would not work as expected. 1200 + * In the meantime, we run the risk of selecting a config 1201 + * that requires external power at a time when that power 1202 + * isn't available. It seems to be the lesser of two evils. 1203 + * 1204 + * Bugzilla #6448 reports a device that appears to crash 1205 + * when it receives a GET_DEVICE_STATUS request! We don't 1206 + * have any other way to tell whether a device is self-powered, 1207 + * but since we don't use that information anywhere but here, 1208 + * the call has been removed. 1209 + * 1210 + * Maybe the GET_DEVICE_STATUS call and the test below can 1211 + * be reinstated when device firmwares become more reliable. 1212 + * Don't hold your breath. 1190 1213 */ 1191 1214 #if 0 1192 1215 /* Rule out self-powered configs for a bus-powered device */
+1 -1
drivers/usb/host/ohci-hcd.c
··· 863 863 i = ohci->num_ports; 864 864 while (i--) 865 865 ohci_writel (ohci, RH_PS_PSS, 866 - &ohci->regs->roothub.portstatus [temp]); 866 + &ohci->regs->roothub.portstatus [i]); 867 867 ohci_dbg (ohci, "restart complete\n"); 868 868 } 869 869 return 0;
+4
drivers/usb/input/hid-core.c
··· 1557 1557 #define USB_VENDOR_ID_HP 0x03f0 1558 1558 #define USB_DEVICE_ID_HP_USBHUB_KB 0x020c 1559 1559 1560 + #define USB_VENDOR_ID_IBM 0x04b3 1561 + #define USB_DEVICE_ID_IBM_USBHUB_KB 0x3005 1562 + 1560 1563 #define USB_VENDOR_ID_CREATIVELABS 0x062a 1561 1564 #define USB_DEVICE_ID_CREATIVELABS_SILVERCREST 0x0201 1562 1565 ··· 1684 1681 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_USBHUB_KB, HID_QUIRK_NOGET}, 1685 1682 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVELABS_SILVERCREST, HID_QUIRK_NOGET }, 1686 1683 { USB_VENDOR_ID_HP, USB_DEVICE_ID_HP_USBHUB_KB, HID_QUIRK_NOGET }, 1684 + { USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_USBHUB_KB, HID_QUIRK_NOGET }, 1687 1685 { USB_VENDOR_ID_TANGTOP, USB_DEVICE_ID_TANGTOP_USBPS2, HID_QUIRK_NOGET }, 1688 1686 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 1689 1687 { USB_VENDOR_ID_SILVERCREST, USB_DEVICE_ID_SILVERCREST_KB, HID_QUIRK_NOGET },
+1
drivers/usb/input/hiddev.c
··· 317 317 } 318 318 319 319 schedule(); 320 + set_current_state(TASK_INTERRUPTIBLE); 320 321 } 321 322 322 323 set_current_state(TASK_RUNNING);
+4
drivers/usb/misc/emi26.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/init.h> 17 17 #include <linux/usb.h> 18 + #include <linux/delay.h> 18 19 19 20 #define MAX_INTEL_HEX_RECORD_LENGTH 16 20 21 typedef struct _INTEL_HEX_RECORD ··· 115 114 116 115 /* De-assert reset (let the CPU run) */ 117 116 err = emi26_set_reset(dev,0); 117 + msleep(250); /* let device settle */ 118 118 119 119 /* 2. We upload the FPGA firmware into the EMI 120 120 * Note: collect up to 1023 (yes!) bytes and send them with ··· 152 150 goto wraperr; 153 151 } 154 152 } 153 + msleep(250); /* let device settle */ 155 154 156 155 /* De-assert reset (let the CPU run) */ 157 156 err = emi26_set_reset(dev,0); ··· 195 192 err("%s - error loading firmware: error = %d", __FUNCTION__, err); 196 193 goto wraperr; 197 194 } 195 + msleep(250); /* let device settle */ 198 196 199 197 /* return 1 to fail the driver inialization 200 198 * and give real driver change to load */
+4
drivers/usb/misc/emi62.c
··· 15 15 #include <linux/init.h> 16 16 #include <linux/module.h> 17 17 #include <linux/usb.h> 18 + #include <linux/delay.h> 18 19 19 20 #define MAX_INTEL_HEX_RECORD_LENGTH 16 20 21 typedef struct _INTEL_HEX_RECORD ··· 124 123 125 124 /* De-assert reset (let the CPU run) */ 126 125 err = emi62_set_reset(dev,0); 126 + msleep(250); /* let device settle */ 127 127 128 128 /* 2. We upload the FPGA firmware into the EMI 129 129 * Note: collect up to 1023 (yes!) bytes and send them with ··· 168 166 err("%s - error loading firmware: error = %d", __FUNCTION__, err); 169 167 goto wraperr; 170 168 } 169 + msleep(250); /* let device settle */ 171 170 172 171 /* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */ 173 172 ··· 231 228 err("%s - error loading firmware: error = %d", __FUNCTION__, err); 232 229 goto wraperr; 233 230 } 231 + msleep(250); /* let device settle */ 234 232 235 233 kfree(buf); 236 234
+15 -5
drivers/usb/net/pegasus.c
··· 318 318 set_register(pegasus, PhyCtrl, (indx | PHY_READ)); 319 319 for (i = 0; i < REG_TIMEOUT; i++) { 320 320 ret = get_registers(pegasus, PhyCtrl, 1, data); 321 + if (ret == -ESHUTDOWN) 322 + goto fail; 321 323 if (data[0] & PHY_DONE) 322 324 break; 323 325 } ··· 328 326 *regd = le16_to_cpu(regdi); 329 327 return ret; 330 328 } 329 + fail: 331 330 if (netif_msg_drv(pegasus)) 332 331 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__); 333 332 ··· 357 354 set_register(pegasus, PhyCtrl, (indx | PHY_WRITE)); 358 355 for (i = 0; i < REG_TIMEOUT; i++) { 359 356 ret = get_registers(pegasus, PhyCtrl, 1, data); 357 + if (ret == -ESHUTDOWN) 358 + goto fail; 360 359 if (data[0] & PHY_DONE) 361 360 break; 362 361 } 363 362 if (i < REG_TIMEOUT) 364 363 return ret; 365 364 365 + fail: 366 366 if (netif_msg_drv(pegasus)) 367 367 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__); 368 368 return -ETIMEDOUT; ··· 393 387 ret = get_registers(pegasus, EpromCtrl, 1, &tmp); 394 388 if (tmp & EPROM_DONE) 395 389 break; 390 + if (ret == -ESHUTDOWN) 391 + goto fail; 396 392 } 397 393 if (i < REG_TIMEOUT) { 398 394 ret = get_registers(pegasus, EpromData, 2, &retdatai); ··· 402 394 return ret; 403 395 } 404 396 397 + fail: 405 398 if (netif_msg_drv(pegasus)) 406 399 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__); 407 400 return -ETIMEDOUT; ··· 442 433 443 434 for (i = 0; i < REG_TIMEOUT; i++) { 444 435 ret = get_registers(pegasus, EpromCtrl, 1, &tmp); 436 + if (ret == -ESHUTDOWN) 437 + goto fail; 445 438 if (tmp & EPROM_DONE) 446 439 break; 447 440 } 448 441 disable_eprom_write(pegasus); 449 442 if (i < REG_TIMEOUT) 450 443 return ret; 444 + fail: 451 445 if (netif_msg_drv(pegasus)) 452 446 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__); 453 447 return -ETIMEDOUT; ··· 1390 1378 struct pegasus *pegasus = usb_get_intfdata(intf); 1391 1379 1392 1380 netif_device_detach (pegasus->net); 1381 + cancel_delayed_work(&pegasus->carrier_check); 1393 1382 if (netif_running(pegasus->net)) { 1394 - cancel_delayed_work(&pegasus->carrier_check); 1395 - 1396 1383 usb_kill_urb(pegasus->rx_urb); 1397 1384 usb_kill_urb(pegasus->intr_urb); 1398 1385 } ··· 1411 1400 pegasus->intr_urb->status = 0; 1412 1401 pegasus->intr_urb->actual_length = 0; 1413 1402 intr_callback(pegasus->intr_urb, NULL); 1414 - 1415 - queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, 1416 - CARRIER_CHECK_DELAY); 1417 1403 } 1404 + queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, 1405 + CARRIER_CHECK_DELAY); 1418 1406 return 0; 1419 1407 } 1420 1408
+10
drivers/usb/serial/Kconfig
··· 71 71 To compile this driver as a module, choose M here: the 72 72 module will be called anydata. 73 73 74 + config USB_SERIAL_ARK3116 75 + tristate "USB ARK Micro 3116 USB Serial Driver (EXPERIMENTAL)" 76 + depends on USB_SERIAL && EXPERIMENTAL 77 + help 78 + Say Y here if you want to use a ARK Micro 3116 USB to Serial 79 + device. 80 + 81 + To compile this driver as a module, choose M here: the 82 + module will be called ark3116 83 + 74 84 config USB_SERIAL_BELKIN 75 85 tristate "USB Belkin and Peracom Single Port Serial Driver" 76 86 depends on USB_SERIAL
+1
drivers/usb/serial/Makefile
··· 13 13 14 14 obj-$(CONFIG_USB_SERIAL_AIRPRIME) += airprime.o 15 15 obj-$(CONFIG_USB_SERIAL_ANYDATA) += anydata.o 16 + obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o 16 17 obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o 17 18 obj-$(CONFIG_USB_SERIAL_CP2101) += cp2101.o 18 19 obj-$(CONFIG_USB_SERIAL_CYBERJACK) += cyberjack.o
+1
drivers/usb/serial/airprime.c
··· 18 18 static struct usb_device_id id_table [] = { 19 19 { USB_DEVICE(0xf3d, 0x0112) }, /* AirPrime CDMA Wireless PC Card */ 20 20 { USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */ 21 + { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless Aircard 580 */ 21 22 { }, 22 23 }; 23 24 MODULE_DEVICE_TABLE(usb, id_table);
+465
drivers/usb/serial/ark3116.c
··· 1 + /* 2 + * ark3116 3 + * - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547, 4 + * productid=0x0232) (used in a datacable called KQ-U8A) 5 + * 6 + * - based on code by krisfx -> thanks !! 7 + * (see http://www.linuxquestions.org/questions/showthread.php?p=2184457#post2184457) 8 + * 9 + * - based on logs created by usbsnoopy 10 + * 11 + * Author : Simon Schulz [ark3116_driver<AT>auctionant.de] 12 + * 13 + * This program is free software; you can redistribute it and/or modify it 14 + * under the terms of the GNU General Public License as published by the 15 + * Free Software Foundation; either version 2 of the License, or (at your 16 + * option) any later version. 17 + */ 18 + 19 + #include <linux/kernel.h> 20 + #include <linux/init.h> 21 + #include <linux/tty.h> 22 + #include <linux/module.h> 23 + #include <linux/usb.h> 24 + #include "usb-serial.h" 25 + 26 + 27 + static int debug; 28 + 29 + static struct usb_device_id id_table [] = { 30 + { USB_DEVICE(0x6547, 0x0232) }, 31 + { }, 32 + }; 33 + MODULE_DEVICE_TABLE(usb, id_table); 34 + 35 + struct ark3116_private { 36 + spinlock_t lock; 37 + u8 termios_initialized; 38 + }; 39 + 40 + static inline void ARK3116_SND(struct usb_serial *serial, int seq, 41 + __u8 request, __u8 requesttype, 42 + __u16 value, __u16 index) 43 + { 44 + int result; 45 + result = usb_control_msg(serial->dev, 46 + usb_sndctrlpipe(serial->dev,0), 47 + request, requesttype, value, index, 48 + NULL,0x00, 1000); 49 + dbg("%03d > ok",seq); 50 + } 51 + 52 + static inline void ARK3116_RCV(struct usb_serial *serial, int seq, 53 + __u8 request, __u8 requesttype, 54 + __u16 value, __u16 index, __u8 expected, 55 + char *buf) 56 + { 57 + int result; 58 + result = usb_control_msg(serial->dev, 59 + usb_rcvctrlpipe(serial->dev,0), 60 + request, requesttype, value, index, 61 + buf, 0x0000001, 1000); 62 + if (result) 63 + dbg("%03d < %d bytes [0x%02X]",seq, result, buf[0]); 64 + else 65 + dbg("%03d < 0 bytes", seq); 66 + } 67 + 68 + 69 + static inline void ARK3116_RCV_QUIET(struct usb_serial *serial, 70 + __u8 request, __u8 requesttype, 71 + __u16 value, __u16 index, char *buf) 72 + { 73 + usb_control_msg(serial->dev, 74 + usb_rcvctrlpipe(serial->dev,0), 75 + request, requesttype, value, index, 76 + buf, 0x0000001, 1000); 77 + } 78 + 79 + 80 + static int ark3116_attach(struct usb_serial *serial) 81 + { 82 + char *buf; 83 + struct ark3116_private *priv; 84 + int i; 85 + 86 + for (i = 0; i < serial->num_ports; ++i) { 87 + priv = kmalloc (sizeof (struct ark3116_private), GFP_KERNEL); 88 + if (!priv) 89 + goto cleanup; 90 + memset (priv, 0x00, sizeof (struct ark3116_private)); 91 + spin_lock_init(&priv->lock); 92 + 93 + usb_set_serial_port_data(serial->port[i], priv); 94 + } 95 + 96 + buf = kmalloc(1, GFP_KERNEL); 97 + if (!buf) { 98 + dbg("error kmalloc -> out of mem ?"); 99 + goto cleanup; 100 + } 101 + 102 + /* 3 */ 103 + ARK3116_SND(serial, 3,0xFE,0x40,0x0008,0x0002); 104 + ARK3116_SND(serial, 4,0xFE,0x40,0x0008,0x0001); 105 + ARK3116_SND(serial, 5,0xFE,0x40,0x0000,0x0008); 106 + ARK3116_SND(serial, 6,0xFE,0x40,0x0000,0x000B); 107 + 108 + /* <-- seq7 */ 109 + ARK3116_RCV(serial, 7,0xFE,0xC0,0x0000,0x0003, 0x00, buf); 110 + ARK3116_SND(serial, 8,0xFE,0x40,0x0080,0x0003); 111 + ARK3116_SND(serial, 9,0xFE,0x40,0x001A,0x0000); 112 + ARK3116_SND(serial,10,0xFE,0x40,0x0000,0x0001); 113 + ARK3116_SND(serial,11,0xFE,0x40,0x0000,0x0003); 114 + 115 + /* <-- seq12 */ 116 + ARK3116_RCV(serial,12,0xFE,0xC0,0x0000,0x0004, 0x00, buf); 117 + ARK3116_SND(serial,13,0xFE,0x40,0x0000,0x0004); 118 + 119 + /* 14 */ 120 + ARK3116_RCV(serial,14,0xFE,0xC0,0x0000,0x0004, 0x00, buf); 121 + ARK3116_SND(serial,15,0xFE,0x40,0x0000,0x0004); 122 + 123 + /* 16 */ 124 + ARK3116_RCV(serial,16,0xFE,0xC0,0x0000,0x0004, 0x00, buf); 125 + /* --> seq17 */ 126 + ARK3116_SND(serial,17,0xFE,0x40,0x0001,0x0004); 127 + 128 + /* <-- seq18 */ 129 + ARK3116_RCV(serial,18,0xFE,0xC0,0x0000,0x0004, 0x01, buf); 130 + 131 + /* --> seq19 */ 132 + ARK3116_SND(serial,19,0xFE,0x40,0x0003,0x0004); 133 + 134 + 135 + /* <-- seq20 */ 136 + /* seems like serial port status info (RTS, CTS,...) */ 137 + /* returns modem control line status ?! */ 138 + ARK3116_RCV(serial,20,0xFE,0xC0,0x0000,0x0006, 0xFF, buf); 139 + 140 + /* set 9600 baud & do some init ?! */ 141 + ARK3116_SND(serial,147,0xFE,0x40,0x0083,0x0003); 142 + ARK3116_SND(serial,148,0xFE,0x40,0x0038,0x0000); 143 + ARK3116_SND(serial,149,0xFE,0x40,0x0001,0x0001); 144 + ARK3116_SND(serial,150,0xFE,0x40,0x0003,0x0003); 145 + ARK3116_RCV(serial,151,0xFE,0xC0,0x0000,0x0004,0x03, buf); 146 + ARK3116_SND(serial,152,0xFE,0x40,0x0000,0x0003); 147 + ARK3116_RCV(serial,153,0xFE,0xC0,0x0000,0x0003,0x00, buf); 148 + ARK3116_SND(serial,154,0xFE,0x40,0x0003,0x0003); 149 + 150 + kfree(buf); 151 + return(0); 152 + 153 + cleanup: 154 + for (--i; i>=0; --i) 155 + usb_set_serial_port_data(serial->port[i], NULL); 156 + return -ENOMEM; 157 + } 158 + 159 + static void ark3116_set_termios(struct usb_serial_port *port, 160 + struct termios *old_termios) 161 + { 162 + struct usb_serial *serial = port->serial; 163 + struct ark3116_private *priv = usb_get_serial_port_data(port); 164 + unsigned int cflag = port->tty->termios->c_cflag; 165 + unsigned long flags; 166 + int baud; 167 + int ark3116_baud; 168 + char *buf; 169 + char config; 170 + 171 + config = 0; 172 + 173 + dbg("%s - port %d", __FUNCTION__, port->number); 174 + 175 + if ((!port->tty) || (!port->tty->termios)) { 176 + dbg("%s - no tty structures", __FUNCTION__); 177 + return; 178 + } 179 + 180 + spin_lock_irqsave(&priv->lock, flags); 181 + if (!priv->termios_initialized) { 182 + *(port->tty->termios) = tty_std_termios; 183 + port->tty->termios->c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; 184 + priv->termios_initialized = 1; 185 + } 186 + spin_unlock_irqrestore(&priv->lock, flags); 187 + 188 + cflag = port->tty->termios->c_cflag; 189 + 190 + /* check that they really want us to change something: */ 191 + if (old_termios) { 192 + if ((cflag == old_termios->c_cflag) && 193 + (RELEVANT_IFLAG(port->tty->termios->c_iflag) == 194 + RELEVANT_IFLAG(old_termios->c_iflag))) { 195 + dbg("%s - nothing to change...", __FUNCTION__); 196 + return; 197 + } 198 + } 199 + 200 + buf = kmalloc(1, GFP_KERNEL); 201 + if (!buf) { 202 + dbg("error kmalloc"); 203 + return; 204 + } 205 + 206 + /* set data bit count (8/7/6/5) */ 207 + if (cflag & CSIZE){ 208 + switch (cflag & CSIZE){ 209 + case CS5: 210 + config |= 0x00; 211 + dbg("setting CS5"); 212 + break; 213 + case CS6: 214 + config |= 0x01; 215 + dbg("setting CS6"); 216 + break; 217 + case CS7: 218 + config |= 0x02; 219 + dbg("setting CS7"); 220 + break; 221 + default: 222 + err ("CSIZE was set but not CS5-CS8, using CS8!"); 223 + case CS8: 224 + config |= 0x03; 225 + dbg("setting CS8"); 226 + break; 227 + } 228 + } 229 + 230 + /* set parity (NONE,EVEN,ODD) */ 231 + if (cflag & PARENB){ 232 + if (cflag & PARODD) { 233 + config |= 0x08; 234 + dbg("setting parity to ODD"); 235 + } else { 236 + config |= 0x18; 237 + dbg("setting parity to EVEN"); 238 + } 239 + } else { 240 + dbg("setting parity to NONE"); 241 + } 242 + 243 + /* SET STOPBIT (1/2) */ 244 + if (cflag & CSTOPB) { 245 + config |= 0x04; 246 + dbg ("setting 2 stop bits"); 247 + } else { 248 + dbg ("setting 1 stop bit"); 249 + } 250 + 251 + 252 + /* set baudrate: */ 253 + baud = 0; 254 + switch (cflag & CBAUD){ 255 + case B0: 256 + err("can't set 0baud, using 9600 instead"); 257 + break; 258 + case B75: baud = 75; break; 259 + case B150: baud = 150; break; 260 + case B300: baud = 300; break; 261 + case B600: baud = 600; break; 262 + case B1200: baud = 1200; break; 263 + case B1800: baud = 1800; break; 264 + case B2400: baud = 2400; break; 265 + case B4800: baud = 4800; break; 266 + case B9600: baud = 9600; break; 267 + case B19200: baud = 19200; break; 268 + case B38400: baud = 38400; break; 269 + case B57600: baud = 57600; break; 270 + case B115200: baud = 115200; break; 271 + case B230400: baud = 230400; break; 272 + case B460800: baud = 460800; break; 273 + default: 274 + dbg("does not support the baudrate requested (fix it)"); 275 + break; 276 + } 277 + 278 + /* set 9600 as default (if given baudrate is invalid for example) */ 279 + if (baud == 0) 280 + baud = 9600; 281 + 282 + /* 283 + * found by try'n'error, be careful, maybe there are other options 284 + * for multiplicator etc! 285 + */ 286 + if (baud == 460800) 287 + /* strange, for 460800 the formula is wrong 288 + * (dont use round(), then 9600baud is wrong) */ 289 + ark3116_baud = 7; 290 + else 291 + ark3116_baud = 3000000 / baud; 292 + 293 + /* ? */ 294 + ARK3116_RCV(serial,0,0xFE,0xC0,0x0000,0x0003, 0x03, buf); 295 + /* offset = buf[0]; */ 296 + /* offset = 0x03; */ 297 + /* dbg("using 0x%04X as target for 0x0003:",0x0080+offset); */ 298 + 299 + 300 + /* set baudrate */ 301 + dbg("setting baudrate to %d (->reg=%d)",baud,ark3116_baud); 302 + ARK3116_SND(serial,147,0xFE,0x40,0x0083,0x0003); 303 + ARK3116_SND(serial,148,0xFE,0x40,(ark3116_baud & 0x00FF) ,0x0000); 304 + ARK3116_SND(serial,149,0xFE,0x40,(ark3116_baud & 0xFF00)>>8,0x0001); 305 + ARK3116_SND(serial,150,0xFE,0x40,0x0003,0x0003); 306 + 307 + /* ? */ 308 + ARK3116_RCV(serial,151,0xFE,0xC0,0x0000,0x0004,0x03, buf); 309 + ARK3116_SND(serial,152,0xFE,0x40,0x0000,0x0003); 310 + 311 + /* set data bit count, stop bit count & parity: */ 312 + dbg("updating bit count, stop bit or parity (cfg=0x%02X)", config); 313 + ARK3116_RCV(serial,153,0xFE,0xC0,0x0000,0x0003,0x00, buf); 314 + ARK3116_SND(serial,154,0xFE,0x40,config,0x0003); 315 + 316 + if (cflag & CRTSCTS) 317 + dbg("CRTSCTS not supported by chipset ?!"); 318 + 319 + /* TEST ARK3116_SND(154,0xFE,0x40,0xFFFF, 0x0006); */ 320 + 321 + kfree(buf); 322 + return; 323 + } 324 + 325 + static int ark3116_open(struct usb_serial_port *port, struct file *filp) 326 + { 327 + struct termios tmp_termios; 328 + struct usb_serial *serial = port->serial; 329 + char *buf; 330 + int result = 0; 331 + 332 + dbg("%s - port %d", __FUNCTION__, port->number); 333 + 334 + buf = kmalloc(1, GFP_KERNEL); 335 + if (!buf) { 336 + dbg("error kmalloc -> out of mem ?"); 337 + return -ENOMEM; 338 + } 339 + 340 + result = usb_serial_generic_open(port, filp); 341 + if (result) 342 + return result; 343 + 344 + /* open */ 345 + ARK3116_RCV(serial,111,0xFE,0xC0,0x0000,0x0003, 0x02, buf); 346 + 347 + ARK3116_SND(serial,112,0xFE,0x40,0x0082,0x0003); 348 + ARK3116_SND(serial,113,0xFE,0x40,0x001A,0x0000); 349 + ARK3116_SND(serial,114,0xFE,0x40,0x0000,0x0001); 350 + ARK3116_SND(serial,115,0xFE,0x40,0x0002,0x0003); 351 + 352 + ARK3116_RCV(serial,116,0xFE,0xC0,0x0000,0x0004, 0x03, buf); 353 + ARK3116_SND(serial,117,0xFE,0x40,0x0002,0x0004); 354 + 355 + ARK3116_RCV(serial,118,0xFE,0xC0,0x0000,0x0004, 0x02, buf); 356 + ARK3116_SND(serial,119,0xFE,0x40,0x0000,0x0004); 357 + 358 + ARK3116_RCV(serial,120,0xFE,0xC0,0x0000,0x0004, 0x00, buf); 359 + 360 + ARK3116_SND(serial,121,0xFE,0x40,0x0001,0x0004); 361 + 362 + ARK3116_RCV(serial,122,0xFE,0xC0,0x0000,0x0004, 0x01, buf); 363 + 364 + ARK3116_SND(serial,123,0xFE,0x40,0x0003,0x0004); 365 + 366 + /* returns different values (control lines ?!) */ 367 + ARK3116_RCV(serial,124,0xFE,0xC0,0x0000,0x0006, 0xFF, buf); 368 + 369 + /* initialise termios: */ 370 + if (port->tty) 371 + ark3116_set_termios(port, &tmp_termios); 372 + 373 + kfree(buf); 374 + 375 + return result; 376 + 377 + } 378 + 379 + static int ark3116_ioctl(struct usb_serial_port *port, struct file *file, 380 + unsigned int cmd, unsigned long arg) 381 + { 382 + dbg("ioctl not supported yet..."); 383 + return -ENOIOCTLCMD; 384 + } 385 + 386 + static int ark3116_tiocmget(struct usb_serial_port *port, struct file *file) 387 + { 388 + struct usb_serial *serial = port->serial; 389 + char *buf; 390 + char temp; 391 + 392 + /* seems like serial port status info (RTS, CTS,...) is stored 393 + * in reg(?) 0x0006 394 + * pcb connection point 11 = GND -> sets bit4 of response 395 + * pcb connection point 7 = GND -> sets bit6 of response 396 + */ 397 + 398 + buf = kmalloc(1, GFP_KERNEL); 399 + if (!buf) { 400 + dbg("error kmalloc"); 401 + return -ENOMEM; 402 + } 403 + 404 + /* read register: */ 405 + ARK3116_RCV_QUIET(serial,0xFE,0xC0,0x0000,0x0006,buf); 406 + temp = buf[0]; 407 + kfree(buf); 408 + 409 + /* i do not really know if bit4=CTS and bit6=DSR... was just a 410 + * quick guess !! 411 + */ 412 + return (temp & (1<<4) ? TIOCM_CTS : 0) | 413 + (temp & (1<<6) ? TIOCM_DSR : 0); 414 + } 415 + 416 + static struct usb_driver ark3116_driver = { 417 + .name = "ark3116", 418 + .probe = usb_serial_probe, 419 + .disconnect = usb_serial_disconnect, 420 + .id_table = id_table, 421 + }; 422 + 423 + static struct usb_serial_driver ark3116_device = { 424 + .driver = { 425 + .owner = THIS_MODULE, 426 + .name = "ark3116", 427 + }, 428 + .id_table = id_table, 429 + .num_interrupt_in = 1, 430 + .num_bulk_in = 1, 431 + .num_bulk_out = 1, 432 + .num_ports = 1, 433 + .attach = ark3116_attach, 434 + .set_termios = ark3116_set_termios, 435 + .ioctl = ark3116_ioctl, 436 + .tiocmget = ark3116_tiocmget, 437 + .open = ark3116_open, 438 + }; 439 + 440 + static int __init ark3116_init(void) 441 + { 442 + int retval; 443 + 444 + retval = usb_serial_register(&ark3116_device); 445 + if (retval) 446 + return retval; 447 + retval = usb_register(&ark3116_driver); 448 + if (retval) 449 + usb_serial_deregister(&ark3116_device); 450 + return retval; 451 + } 452 + 453 + static void __exit ark3116_exit(void) 454 + { 455 + usb_deregister(&ark3116_driver); 456 + usb_serial_deregister(&ark3116_device); 457 + } 458 + 459 + module_init(ark3116_init); 460 + module_exit(ark3116_exit); 461 + MODULE_LICENSE("GPL"); 462 + 463 + module_param(debug, bool, S_IRUGO | S_IWUSR); 464 + MODULE_PARM_DESC(debug, "Debug enabled or not"); 465 +
+2
drivers/usb/serial/ftdi_sio.c
··· 307 307 308 308 309 309 static struct usb_device_id id_table_combined [] = { 310 + { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, 310 311 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, 311 312 { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, 312 313 { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) }, ··· 499 498 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, 500 499 { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) }, 501 500 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) }, 501 + { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, 502 502 { }, /* Optional parameter entry */ 503 503 { } /* Terminating entry */ 504 504 };
+9
drivers/usb/serial/ftdi_sio.h
··· 32 32 #define FTDI_NF_RIC_PID 0x0001 /* Product Id */ 33 33 34 34 35 + /* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */ 36 + #define FTDI_ACTZWAVE_PID 0xF2D0 37 + 38 + 35 39 /* www.irtrans.de device */ 36 40 #define FTDI_IRTRANS_PID 0xFC60 /* Product Id */ 37 41 ··· 430 426 #define PAPOUCH_VID 0x5050 /* Vendor ID */ 431 427 #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 432 428 429 + /* 430 + * ACG Identification Technologies GmbH products (http://www.acg.de/). 431 + * Submitted by anton -at- goto10 -dot- org. 432 + */ 433 + #define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */ 433 434 434 435 /* Commands */ 435 436 #define FTDI_SIO_RESET 0 /* Reset the port */
+1
drivers/usb/serial/generic.c
··· 138 138 139 139 return result; 140 140 } 141 + EXPORT_SYMBOL_GPL(usb_serial_generic_open); 141 142 142 143 static void generic_cleanup (struct usb_serial_port *port) 143 144 {
+6 -6
drivers/usb/serial/omninet.c
··· 257 257 return (0); 258 258 } 259 259 260 - spin_lock(&port->lock); 261 - if (port->write_urb_busy) { 262 - spin_unlock(&port->lock); 260 + spin_lock(&wport->lock); 261 + if (wport->write_urb_busy) { 262 + spin_unlock(&wport->lock); 263 263 dbg("%s - already writing", __FUNCTION__); 264 264 return 0; 265 265 } 266 - port->write_urb_busy = 1; 267 - spin_unlock(&port->lock); 266 + wport->write_urb_busy = 1; 267 + spin_unlock(&wport->lock); 268 268 269 269 count = (count > OMNINET_BULKOUTSIZE) ? OMNINET_BULKOUTSIZE : count; 270 270 ··· 283 283 wport->write_urb->dev = serial->dev; 284 284 result = usb_submit_urb(wport->write_urb, GFP_ATOMIC); 285 285 if (result) { 286 - port->write_urb_busy = 0; 286 + wport->write_urb_busy = 0; 287 287 err("%s - failed submitting write urb, error %d", __FUNCTION__, result); 288 288 } else 289 289 result = count;
+12 -7
drivers/usb/serial/usb-serial.c
··· 189 189 190 190 portNumber = tty->index - serial->minor; 191 191 port = serial->port[portNumber]; 192 - if (!port) 193 - return -ENODEV; 192 + if (!port) { 193 + retval = -ENODEV; 194 + goto bailout_kref_put; 195 + } 194 196 195 - if (mutex_lock_interruptible(&port->mutex)) 196 - return -ERESTARTSYS; 197 + if (mutex_lock_interruptible(&port->mutex)) { 198 + retval = -ERESTARTSYS; 199 + goto bailout_kref_put; 200 + } 197 201 198 202 ++port->open_count; 199 203 ··· 213 209 * safe because we are called with BKL held */ 214 210 if (!try_module_get(serial->type->driver.owner)) { 215 211 retval = -ENODEV; 216 - goto bailout_kref_put; 212 + goto bailout_mutex_unlock; 217 213 } 218 214 219 215 /* only call the device specific open if this ··· 228 224 229 225 bailout_module_put: 230 226 module_put(serial->type->driver.owner); 231 - bailout_kref_put: 232 - kref_put(&serial->kref, destroy_serial); 227 + bailout_mutex_unlock: 233 228 port->open_count = 0; 234 229 mutex_unlock(&port->mutex); 230 + bailout_kref_put: 231 + kref_put(&serial->kref, destroy_serial); 235 232 return retval; 236 233 } 237 234
+12 -6
drivers/video/backlight/backlight.c
··· 29 29 30 30 static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count) 31 31 { 32 - int rc = -ENXIO, power; 32 + int rc = -ENXIO; 33 33 char *endp; 34 34 struct backlight_device *bd = to_backlight_device(cdev); 35 + int power = simple_strtoul(buf, &endp, 0); 36 + size_t size = endp - buf; 35 37 36 - power = simple_strtoul(buf, &endp, 0); 37 - if (*endp && !isspace(*endp)) 38 + if (*endp && isspace(*endp)) 39 + size++; 40 + if (size != count) 38 41 return -EINVAL; 39 42 40 43 down(&bd->sem); ··· 68 65 69 66 static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count) 70 67 { 71 - int rc = -ENXIO, brightness; 68 + int rc = -ENXIO; 72 69 char *endp; 73 70 struct backlight_device *bd = to_backlight_device(cdev); 71 + int brightness = simple_strtoul(buf, &endp, 0); 72 + size_t size = endp - buf; 74 73 75 - brightness = simple_strtoul(buf, &endp, 0); 76 - if (*endp && !isspace(*endp)) 74 + if (*endp && isspace(*endp)) 75 + size++; 76 + if (size != count) 77 77 return -EINVAL; 78 78 79 79 down(&bd->sem);
+16 -16
drivers/video/backlight/lcd.c
··· 31 31 32 32 static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count) 33 33 { 34 - int rc, power; 34 + int rc = -ENXIO; 35 35 char *endp; 36 36 struct lcd_device *ld = to_lcd_device(cdev); 37 + int power = simple_strtoul(buf, &endp, 0); 38 + size_t size = endp - buf; 37 39 38 - power = simple_strtoul(buf, &endp, 0); 39 - if (*endp && !isspace(*endp)) 40 + if (*endp && isspace(*endp)) 41 + size++; 42 + if (size != count) 40 43 return -EINVAL; 41 44 42 45 down(&ld->sem); ··· 47 44 pr_debug("lcd: set power to %d\n", power); 48 45 ld->props->set_power(ld, power); 49 46 rc = count; 50 - } else 51 - rc = -ENXIO; 47 + } 52 48 up(&ld->sem); 53 49 54 50 return rc; ··· 55 53 56 54 static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf) 57 55 { 58 - int rc; 56 + int rc = -ENXIO; 59 57 struct lcd_device *ld = to_lcd_device(cdev); 60 58 61 59 down(&ld->sem); 62 60 if (likely(ld->props && ld->props->get_contrast)) 63 61 rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld)); 64 - else 65 - rc = -ENXIO; 66 62 up(&ld->sem); 67 63 68 64 return rc; ··· 68 68 69 69 static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count) 70 70 { 71 - int rc, contrast; 71 + int rc = -ENXIO; 72 72 char *endp; 73 73 struct lcd_device *ld = to_lcd_device(cdev); 74 + int contrast = simple_strtoul(buf, &endp, 0); 75 + size_t size = endp - buf; 74 76 75 - contrast = simple_strtoul(buf, &endp, 0); 76 - if (*endp && !isspace(*endp)) 77 + if (*endp && isspace(*endp)) 78 + size++; 79 + if (size != count) 77 80 return -EINVAL; 78 81 79 82 down(&ld->sem); ··· 84 81 pr_debug("lcd: set contrast to %d\n", contrast); 85 82 ld->props->set_contrast(ld, contrast); 86 83 rc = count; 87 - } else 88 - rc = -ENXIO; 84 + } 89 85 up(&ld->sem); 90 86 91 87 return rc; ··· 92 90 93 91 static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf) 94 92 { 95 - int rc; 93 + int rc = -ENXIO; 96 94 struct lcd_device *ld = to_lcd_device(cdev); 97 95 98 96 down(&ld->sem); 99 97 if (likely(ld->props)) 100 98 rc = sprintf(buf, "%d\n", ld->props->max_contrast); 101 - else 102 - rc = -ENXIO; 103 99 up(&ld->sem); 104 100 105 101 return rc;
+2 -2
drivers/video/i810/i810_main.c
··· 76 76 * 77 77 * Experiment with v_offset to find out which works best for you. 78 78 */ 79 - static u32 v_offset_default __initdata; /* For 32 MiB Aper size, 8 should be the default */ 80 - static u32 voffset __initdata = 0; 79 + static u32 v_offset_default __devinitdata; /* For 32 MiB Aper size, 8 should be the default */ 80 + static u32 voffset __devinitdata; 81 81 82 82 static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor); 83 83 static int __devinit i810fb_init_pci (struct pci_dev *dev,
+21 -2
drivers/video/matrox/g450_pll.c
··· 316 316 case M_PIXEL_PLL_B: 317 317 case M_PIXEL_PLL_C: 318 318 { 319 - u_int8_t tmp; 319 + u_int8_t tmp, xpwrctrl; 320 320 unsigned long flags; 321 321 322 322 matroxfb_DAC_lock_irqsave(flags); 323 + 324 + xpwrctrl = matroxfb_DAC_in(PMINFO M1064_XPWRCTRL); 325 + matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl & ~M1064_XPWRCTRL_PANELPDN); 326 + mga_outb(M_SEQ_INDEX, M_SEQ1); 327 + mga_outb(M_SEQ_DATA, mga_inb(M_SEQ_DATA) | M_SEQ1_SCROFF); 323 328 tmp = matroxfb_DAC_in(PMINFO M1064_XPIXCLKCTRL); 329 + tmp |= M1064_XPIXCLKCTRL_DIS; 324 330 if (!(tmp & M1064_XPIXCLKCTRL_PLL_UP)) { 325 - matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp | M1064_XPIXCLKCTRL_PLL_UP); 331 + tmp |= M1064_XPIXCLKCTRL_PLL_UP; 326 332 } 333 + matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp); 334 + matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0); 335 + matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl); 336 + 327 337 matroxfb_DAC_unlock_irqrestore(flags); 328 338 } 329 339 { ··· 428 418 frequency to higher - with <= lowest wins, while 429 419 with < highest one wins */ 430 420 if (delta <= deltaarray[idx-1]) { 421 + /* all else being equal except VCO, 422 + * choose VCO not near (within 1/16th or so) VCOmin 423 + * (freqs near VCOmin aren't as stable) 424 + */ 425 + if (delta == deltaarray[idx-1] 426 + && vco != g450_mnp2vco(PMINFO mnparray[idx-1]) 427 + && vco < (pi->vcomin * 17 / 16)) { 428 + break; 429 + } 431 430 mnparray[idx] = mnparray[idx-1]; 432 431 deltaarray[idx] = deltaarray[idx-1]; 433 432 } else {
+2
drivers/video/matrox/matroxfb_DAC1064.h
··· 40 40 #define M1064_XCURCOL1RED 0x0C 41 41 #define M1064_XCURCOL1GREEN 0x0D 42 42 #define M1064_XCURCOL1BLUE 0x0E 43 + #define M1064_XDVICLKCTRL 0x0F 43 44 #define M1064_XCURCOL2RED 0x10 44 45 #define M1064_XCURCOL2GREEN 0x11 45 46 #define M1064_XCURCOL2BLUE 0x12 ··· 145 144 #define M1064_XVIDPLLN 0x8F 146 145 147 146 #define M1064_XPWRCTRL 0xA0 147 + #define M1064_XPWRCTRL_PANELPDN 0x04 148 148 149 149 #define M1064_XPANMODE 0xA2 150 150
+2
drivers/video/matrox/matroxfb_base.h
··· 672 672 673 673 #define M_SEQ_INDEX 0x1FC4 674 674 #define M_SEQ_DATA 0x1FC5 675 + #define M_SEQ1 0x01 676 + #define M_SEQ1_SCROFF 0x20 675 677 676 678 #define M_MISC_REG_READ 0x1FCC 677 679
+9 -12
fs/9p/fcall.c
··· 98 98 static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc, 99 99 struct v9fs_fcall *rc, int err) 100 100 { 101 - int fid; 101 + int fid, id; 102 102 struct v9fs_session_info *v9ses; 103 103 104 - if (err) 105 - return; 106 - 104 + id = 0; 107 105 fid = tc->params.tclunk.fid; 106 + if (rc) 107 + id = rc->id; 108 + 108 109 kfree(tc); 109 - 110 - if (!rc) 111 - return; 112 - 113 - v9ses = a; 114 - if (rc->id == RCLUNK) 115 - v9fs_put_idpool(fid, &v9ses->fidpool); 116 - 117 110 kfree(rc); 111 + if (id == RCLUNK) { 112 + v9ses = a; 113 + v9fs_put_idpool(fid, &v9ses->fidpool); 114 + } 118 115 } 119 116 120 117 /**
+131 -91
fs/9p/mux.c
··· 50 50 Wpending = 8, /* can write */ 51 51 }; 52 52 53 + enum { 54 + None, 55 + Flushing, 56 + Flushed, 57 + }; 58 + 53 59 struct v9fs_mux_poll_task; 54 60 55 61 struct v9fs_req { 62 + spinlock_t lock; 56 63 int tag; 57 64 struct v9fs_fcall *tcall; 58 65 struct v9fs_fcall *rcall; 59 66 int err; 60 67 v9fs_mux_req_callback cb; 61 68 void *cba; 69 + int flush; 62 70 struct list_head req_list; 63 71 }; 64 72 ··· 104 96 105 97 struct v9fs_mux_rpc { 106 98 struct v9fs_mux_data *m; 107 - struct v9fs_req *req; 108 99 int err; 100 + struct v9fs_fcall *tcall; 109 101 struct v9fs_fcall *rcall; 110 102 wait_queue_head_t wqueue; 111 103 }; ··· 532 524 533 525 static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) 534 526 { 535 - int ecode, tag; 527 + int ecode; 536 528 struct v9fs_str *ename; 537 529 538 - tag = req->tag; 539 530 if (!req->err && req->rcall->id == RERROR) { 540 531 ecode = req->rcall->params.rerror.errno; 541 532 ename = &req->rcall->params.rerror.error; ··· 560 553 if (!req->err) 561 554 req->err = -EIO; 562 555 } 563 - 564 - if (req->err == ERREQFLUSH) 565 - return; 566 - 567 - if (req->cb) { 568 - dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n", 569 - req->tcall, req->rcall); 570 - 571 - (*req->cb) (req->cba, req->tcall, req->rcall, req->err); 572 - req->cb = NULL; 573 - } else 574 - kfree(req->rcall); 575 - 576 - v9fs_mux_put_tag(m, tag); 577 - 578 - wake_up(&m->equeue); 579 - kfree(req); 580 556 } 581 557 582 558 /** ··· 659 669 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { 660 670 if (rreq->tag == rcall->tag) { 661 671 req = rreq; 662 - req->rcall = rcall; 663 - list_del(&req->req_list); 664 - spin_unlock(&m->lock); 665 - process_request(m, req); 672 + if (req->flush != Flushing) 673 + list_del(&req->req_list); 666 674 break; 667 675 } 668 - 669 676 } 677 + spin_unlock(&m->lock); 670 678 671 - if (!req) { 672 - spin_unlock(&m->lock); 679 + if (req) { 680 + req->rcall = rcall; 681 + process_request(m, req); 682 + 683 + if (req->flush != Flushing) { 684 + if (req->cb) 685 + (*req->cb) (req, req->cba); 686 + else 687 + kfree(req->rcall); 688 + 689 + wake_up(&m->equeue); 690 + } 691 + } else { 673 692 if (err >= 0 && rcall->id != RFLUSH) 674 693 dprintk(DEBUG_ERROR, 675 694 "unexpected response mux %p id %d tag %d\n", ··· 745 746 return ERR_PTR(-ENOMEM); 746 747 747 748 v9fs_set_tag(tc, n); 748 - 749 749 if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { 750 750 char buf[150]; 751 751 ··· 752 754 printk(KERN_NOTICE "<<< %p %s\n", m, buf); 753 755 } 754 756 757 + spin_lock_init(&req->lock); 755 758 req->tag = n; 756 759 req->tcall = tc; 757 760 req->rcall = NULL; 758 761 req->err = 0; 759 762 req->cb = cb; 760 763 req->cba = cba; 764 + req->flush = None; 761 765 762 766 spin_lock(&m->lock); 763 767 list_add_tail(&req->req_list, &m->unsent_req_list); ··· 776 776 return req; 777 777 } 778 778 779 - static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, 780 - struct v9fs_fcall *rc, int err) 779 + static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req) 780 + { 781 + v9fs_mux_put_tag(m, req->tag); 782 + kfree(req); 783 + } 784 + 785 + static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a) 781 786 { 782 787 v9fs_mux_req_callback cb; 783 788 int tag; 784 789 struct v9fs_mux_data *m; 785 - struct v9fs_req *req, *rptr; 790 + struct v9fs_req *req, *rreq, *rptr; 786 791 787 792 m = a; 788 - dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc, 789 - rc, err, tc->params.tflush.oldtag); 793 + dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, 794 + freq->tcall, freq->rcall, freq->err, 795 + freq->tcall->params.tflush.oldtag); 790 796 791 797 spin_lock(&m->lock); 792 798 cb = NULL; 793 - tag = tc->params.tflush.oldtag; 794 - list_for_each_entry_safe(req, rptr, &m->req_list, req_list) { 795 - if (req->tag == tag) { 799 + tag = freq->tcall->params.tflush.oldtag; 800 + req = NULL; 801 + list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { 802 + if (rreq->tag == tag) { 803 + req = rreq; 796 804 list_del(&req->req_list); 797 - if (req->cb) { 798 - cb = req->cb; 799 - req->cb = NULL; 800 - spin_unlock(&m->lock); 801 - (*cb) (req->cba, req->tcall, req->rcall, 802 - req->err); 803 - } 804 - kfree(req); 805 - wake_up(&m->equeue); 806 805 break; 807 806 } 808 807 } 808 + spin_unlock(&m->lock); 809 809 810 - if (!cb) 811 - spin_unlock(&m->lock); 810 + if (req) { 811 + spin_lock(&req->lock); 812 + req->flush = Flushed; 813 + spin_unlock(&req->lock); 812 814 813 - v9fs_mux_put_tag(m, tag); 814 - kfree(tc); 815 - kfree(rc); 815 + if (req->cb) 816 + (*req->cb) (req, req->cba); 817 + else 818 + kfree(req->rcall); 819 + 820 + wake_up(&m->equeue); 821 + } 822 + 823 + kfree(freq->tcall); 824 + kfree(freq->rcall); 825 + v9fs_mux_free_request(m, freq); 816 826 } 817 827 818 - static void 828 + static int 819 829 v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) 820 830 { 821 831 struct v9fs_fcall *fc; 832 + struct v9fs_req *rreq, *rptr; 822 833 823 834 dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); 824 835 836 + /* if a response was received for a request, do nothing */ 837 + spin_lock(&req->lock); 838 + if (req->rcall || req->err) { 839 + spin_unlock(&req->lock); 840 + dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req); 841 + return 0; 842 + } 843 + 844 + req->flush = Flushing; 845 + spin_unlock(&req->lock); 846 + 847 + spin_lock(&m->lock); 848 + /* if the request is not sent yet, just remove it from the list */ 849 + list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { 850 + if (rreq->tag == req->tag) { 851 + dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req); 852 + list_del(&rreq->req_list); 853 + req->flush = Flushed; 854 + spin_unlock(&m->lock); 855 + if (req->cb) 856 + (*req->cb) (req, req->cba); 857 + return 0; 858 + } 859 + } 860 + spin_unlock(&m->lock); 861 + 862 + clear_thread_flag(TIF_SIGPENDING); 825 863 fc = v9fs_create_tflush(req->tag); 826 864 v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); 865 + return 1; 827 866 } 828 867 829 868 static void 830 - v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err) 869 + v9fs_mux_rpc_cb(struct v9fs_req *req, void *a) 831 870 { 832 871 struct v9fs_mux_rpc *r; 833 872 834 - if (err == ERREQFLUSH) { 835 - kfree(rc); 836 - dprintk(DEBUG_MUX, "err req flush\n"); 837 - return; 838 - } 839 - 873 + dprintk(DEBUG_MUX, "req %p r %p\n", req, a); 840 874 r = a; 841 - dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req, 842 - tc, rc, err); 843 - r->rcall = rc; 844 - r->err = err; 875 + r->rcall = req->rcall; 876 + r->err = req->err; 877 + 878 + if (req->flush!=None && !req->err) 879 + r->err = -ERESTARTSYS; 880 + 845 881 wake_up(&r->wqueue); 846 882 } 847 883 ··· 892 856 v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, 893 857 struct v9fs_fcall **rc) 894 858 { 895 - int err; 859 + int err, sigpending; 896 860 unsigned long flags; 897 861 struct v9fs_req *req; 898 862 struct v9fs_mux_rpc r; 899 863 900 864 r.err = 0; 865 + r.tcall = tc; 901 866 r.rcall = NULL; 902 867 r.m = m; 903 868 init_waitqueue_head(&r.wqueue); ··· 906 869 if (rc) 907 870 *rc = NULL; 908 871 872 + sigpending = 0; 873 + if (signal_pending(current)) { 874 + sigpending = 1; 875 + clear_thread_flag(TIF_SIGPENDING); 876 + } 877 + 909 878 req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); 910 879 if (IS_ERR(req)) { 911 880 err = PTR_ERR(req); 912 881 dprintk(DEBUG_MUX, "error %d\n", err); 913 - return PTR_ERR(req); 882 + return err; 914 883 } 915 884 916 - r.req = req; 917 - dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc, 918 - req->tag, &r, req); 919 885 err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); 920 886 if (r.err < 0) 921 887 err = r.err; 922 888 923 889 if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { 924 - spin_lock(&m->lock); 925 - req->tcall = NULL; 926 - req->err = ERREQFLUSH; 927 - spin_unlock(&m->lock); 890 + if (v9fs_mux_flush_request(m, req)) { 891 + /* wait until we get response of the flush message */ 892 + do { 893 + clear_thread_flag(TIF_SIGPENDING); 894 + err = wait_event_interruptible(r.wqueue, 895 + r.rcall || r.err); 896 + } while (!r.rcall && !r.err && err==-ERESTARTSYS && 897 + m->trans->status==Connected && !m->err); 898 + } 899 + sigpending = 1; 900 + } 928 901 929 - clear_thread_flag(TIF_SIGPENDING); 930 - v9fs_mux_flush_request(m, req); 902 + if (sigpending) { 931 903 spin_lock_irqsave(&current->sighand->siglock, flags); 932 904 recalc_sigpending(); 933 905 spin_unlock_irqrestore(&current->sighand->siglock, flags); 934 906 } 935 907 936 - if (!err) { 937 - if (r.rcall) 938 - dprintk(DEBUG_MUX, "got response id %d tag %d\n", 939 - r.rcall->id, r.rcall->tag); 940 - 941 - if (rc) 942 - *rc = r.rcall; 943 - else 944 - kfree(r.rcall); 945 - } else { 908 + if (rc) 909 + *rc = r.rcall; 910 + else 946 911 kfree(r.rcall); 947 - dprintk(DEBUG_MUX, "got error %d\n", err); 948 - if (err > 0) 949 - err = -EIO; 950 - } 912 + 913 + v9fs_mux_free_request(m, req); 914 + if (err > 0) 915 + err = -EIO; 951 916 952 917 return err; 953 918 } ··· 990 951 struct v9fs_req *req, *rtmp; 991 952 LIST_HEAD(cancel_list); 992 953 993 - dprintk(DEBUG_MUX, "mux %p err %d\n", m, err); 954 + dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err); 994 955 m->err = err; 995 956 spin_lock(&m->lock); 996 957 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { 958 + list_move(&req->req_list, &cancel_list); 959 + } 960 + list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { 997 961 list_move(&req->req_list, &cancel_list); 998 962 } 999 963 spin_unlock(&m->lock); ··· 1007 965 req->err = err; 1008 966 1009 967 if (req->cb) 1010 - (*req->cb) (req->cba, req->tcall, req->rcall, req->err); 968 + (*req->cb) (req, req->cba); 1011 969 else 1012 970 kfree(req->rcall); 1013 - 1014 - kfree(req); 1015 971 } 1016 972 1017 973 wake_up(&m->equeue);
+2 -2
fs/9p/mux.h
··· 24 24 */ 25 25 26 26 struct v9fs_mux_data; 27 + struct v9fs_req; 27 28 28 29 /** 29 30 * v9fs_mux_req_callback - callback function that is called when the ··· 37 36 * @rc - response call 38 37 * @err - error code (non-zero if error occured) 39 38 */ 40 - typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc, 41 - struct v9fs_fcall *rc, int err); 39 + typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a); 42 40 43 41 int v9fs_mux_global_init(void); 44 42 void v9fs_mux_global_exit(void);
+9 -4
fs/9p/vfs_file.c
··· 72 72 return -ENOSPC; 73 73 } 74 74 75 - err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL); 75 + err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall); 76 76 if (err < 0) { 77 77 dprintk(DEBUG_ERROR, "rewalk didn't work\n"); 78 - goto put_fid; 78 + if (fcall && fcall->id == RWALK) 79 + goto clunk_fid; 80 + else { 81 + v9fs_put_idpool(fid, &v9ses->fidpool); 82 + goto free_fcall; 83 + } 79 84 } 85 + kfree(fcall); 80 86 81 87 /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ 82 88 /* translate open mode appropriately */ ··· 115 109 clunk_fid: 116 110 v9fs_t_clunk(v9ses, fid); 117 111 118 - put_fid: 119 - v9fs_put_idpool(fid, &v9ses->fidpool); 112 + free_fcall: 120 113 kfree(fcall); 121 114 122 115 return err;
+16 -3
fs/9p/vfs_inode.c
··· 270 270 err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall); 271 271 if (err < 0) { 272 272 PRINT_FCALL_ERROR("clone error", fcall); 273 - goto put_fid; 273 + if (fcall && fcall->id == RWALK) 274 + goto clunk_fid; 275 + else 276 + goto put_fid; 274 277 } 275 278 kfree(fcall); 276 279 ··· 325 322 &fcall); 326 323 327 324 if (err < 0) { 325 + if (fcall && fcall->id == RWALK) 326 + goto clunk_fid; 327 + 328 328 PRINT_FCALL_ERROR("walk error", fcall); 329 329 v9fs_put_idpool(nfid, &v9ses->fidpool); 330 330 goto error; ··· 646 640 } 647 641 648 642 result = v9fs_t_walk(v9ses, dirfidnum, newfid, 649 - (char *)dentry->d_name.name, NULL); 643 + (char *)dentry->d_name.name, &fcall); 644 + 650 645 if (result < 0) { 651 - v9fs_put_idpool(newfid, &v9ses->fidpool); 646 + if (fcall && fcall->id == RWALK) 647 + v9fs_t_clunk(v9ses, newfid); 648 + else 649 + v9fs_put_idpool(newfid, &v9ses->fidpool); 650 + 652 651 if (result == -ENOENT) { 653 652 d_add(dentry, NULL); 654 653 dprintk(DEBUG_VFS, 655 654 "Return negative dentry %p count %d\n", 656 655 dentry, atomic_read(&dentry->d_count)); 656 + kfree(fcall); 657 657 return NULL; 658 658 } 659 659 dprintk(DEBUG_ERROR, "walk error:%d\n", result); 660 660 goto FreeFcall; 661 661 } 662 + kfree(fcall); 662 663 663 664 result = v9fs_t_stat(v9ses, newfid, &fcall); 664 665 if (result < 0) {
+1 -1
fs/Makefile
··· 45 45 obj-$(CONFIG_PROC_FS) += proc/ 46 46 obj-y += partitions/ 47 47 obj-$(CONFIG_SYSFS) += sysfs/ 48 + obj-$(CONFIG_CONFIGFS_FS) += configfs/ 48 49 obj-y += devpts/ 49 50 50 51 obj-$(CONFIG_PROFILING) += dcookies.o ··· 102 101 obj-$(CONFIG_HOSTFS) += hostfs/ 103 102 obj-$(CONFIG_HPPFS) += hppfs/ 104 103 obj-$(CONFIG_DEBUG_FS) += debugfs/ 105 - obj-$(CONFIG_CONFIGFS_FS) += configfs/ 106 104 obj-$(CONFIG_OCFS2_FS) += ocfs2/ 107 105 obj-$(CONFIG_GFS2_FS) += gfs2/
+2 -3
fs/autofs4/autofs_i.h
··· 74 74 struct autofs_wait_queue *next; 75 75 autofs_wqt_t wait_queue_token; 76 76 /* We use the following to see what we are waiting for */ 77 - int hash; 78 - int len; 77 + unsigned int hash; 78 + unsigned int len; 79 79 char *name; 80 80 u32 dev; 81 81 u64 ino; ··· 85 85 pid_t tgid; 86 86 /* This is for status reporting upon return */ 87 87 int status; 88 - atomic_t notify; 89 88 atomic_t wait_ctr; 90 89 }; 91 90
+3 -7
fs/autofs4/root.c
··· 327 327 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) 328 328 { 329 329 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 330 + struct autofs_info *ino = autofs4_dentry_ino(dentry); 330 331 int oz_mode = autofs4_oz_mode(sbi); 331 332 unsigned int lookup_type; 332 333 int status; ··· 341 340 if (oz_mode || !lookup_type) 342 341 goto done; 343 342 344 - /* 345 - * If a request is pending wait for it. 346 - * If it's a mount then it won't be expired till at least 347 - * a liitle later and if it's an expire then we might need 348 - * to mount it again. 349 - */ 350 - if (autofs4_ispending(dentry)) { 343 + /* If an expire request is pending wait for it. */ 344 + if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { 351 345 DPRINTK("waiting for active request %p name=%.*s", 352 346 dentry, dentry->d_name.len, dentry->d_name.name); 353 347
+55 -26
fs/autofs4/waitq.c
··· 189 189 return len; 190 190 } 191 191 192 + static struct autofs_wait_queue * 193 + autofs4_find_wait(struct autofs_sb_info *sbi, 194 + char *name, unsigned int hash, unsigned int len) 195 + { 196 + struct autofs_wait_queue *wq; 197 + 198 + for (wq = sbi->queues; wq; wq = wq->next) { 199 + if (wq->hash == hash && 200 + wq->len == len && 201 + wq->name && !memcmp(wq->name, name, len)) 202 + break; 203 + } 204 + return wq; 205 + } 206 + 192 207 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, 193 208 enum autofs_notify notify) 194 209 { 210 + struct autofs_info *ino; 195 211 struct autofs_wait_queue *wq; 196 212 char *name; 197 213 unsigned int len = 0; 198 214 unsigned int hash = 0; 199 - int status; 215 + int status, type; 200 216 201 217 /* In catatonic mode, we don't wait for nobody */ 202 218 if (sbi->catatonic) ··· 239 223 return -EINTR; 240 224 } 241 225 242 - for (wq = sbi->queues ; wq ; wq = wq->next) { 243 - if (wq->hash == dentry->d_name.hash && 244 - wq->len == len && 245 - wq->name && !memcmp(wq->name, name, len)) 246 - break; 226 + wq = autofs4_find_wait(sbi, name, hash, len); 227 + ino = autofs4_dentry_ino(dentry); 228 + if (!wq && ino && notify == NFY_NONE) { 229 + /* 230 + * Either we've betean the pending expire to post it's 231 + * wait or it finished while we waited on the mutex. 232 + * So we need to wait till either, the wait appears 233 + * or the expire finishes. 234 + */ 235 + 236 + while (ino->flags & AUTOFS_INF_EXPIRING) { 237 + mutex_unlock(&sbi->wq_mutex); 238 + schedule_timeout_interruptible(HZ/10); 239 + if (mutex_lock_interruptible(&sbi->wq_mutex)) { 240 + kfree(name); 241 + return -EINTR; 242 + } 243 + wq = autofs4_find_wait(sbi, name, hash, len); 244 + if (wq) 245 + break; 246 + } 247 + 248 + /* 249 + * Not ideal but the status has already gone. Of the two 250 + * cases where we wait on NFY_NONE neither depend on the 251 + * return status of the wait. 252 + */ 253 + if (!wq) { 254 + kfree(name); 255 + mutex_unlock(&sbi->wq_mutex); 256 + return 0; 257 + } 247 258 } 248 259 249 260 if (!wq) { 250 - /* Can't wait for an expire if there's no mount */ 251 - if (notify == NFY_NONE && !d_mountpoint(dentry)) { 252 - kfree(name); 253 - mutex_unlock(&sbi->wq_mutex); 254 - return -ENOENT; 255 - } 256 - 257 261 /* Create a new wait queue */ 258 262 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); 259 263 if (!wq) { ··· 299 263 wq->tgid = current->tgid; 300 264 wq->status = -EINTR; /* Status return if interrupted */ 301 265 atomic_set(&wq->wait_ctr, 2); 302 - atomic_set(&wq->notify, 1); 303 266 mutex_unlock(&sbi->wq_mutex); 304 - } else { 305 - atomic_inc(&wq->wait_ctr); 306 - mutex_unlock(&sbi->wq_mutex); 307 - kfree(name); 308 - DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", 309 - (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); 310 - } 311 - 312 - if (notify != NFY_NONE && atomic_read(&wq->notify)) { 313 - int type; 314 - 315 - atomic_dec(&wq->notify); 316 267 317 268 if (sbi->version < 5) { 318 269 if (notify == NFY_MOUNT) ··· 322 299 323 300 /* autofs4_notify_daemon() may block */ 324 301 autofs4_notify_daemon(sbi, wq, type); 302 + } else { 303 + atomic_inc(&wq->wait_ctr); 304 + mutex_unlock(&sbi->wq_mutex); 305 + kfree(name); 306 + DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", 307 + (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); 325 308 } 326 309 327 310 /* wq->name is NULL if and only if the lock is already released */
+9 -21
fs/binfmt_flat.c
··· 428 428 loff_t fpos; 429 429 unsigned long start_code, end_code; 430 430 int ret; 431 - int exec_fileno; 432 431 433 432 hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ 434 433 inode = bprm->file->f_dentry->d_inode; ··· 501 502 goto err; 502 503 } 503 504 504 - /* check file descriptor */ 505 - exec_fileno = get_unused_fd(); 506 - if (exec_fileno < 0) { 507 - ret = -EMFILE; 508 - goto err; 509 - } 510 - get_file(bprm->file); 511 - fd_install(exec_fileno, bprm->file); 512 - 513 505 /* Flush all traces of the currently running executable */ 514 506 if (id == 0) { 515 507 result = flush_old_exec(bprm); 516 508 if (result) { 517 509 ret = result; 518 - goto err_close; 510 + goto err; 519 511 } 520 512 521 513 /* OK, This is the point of no return */ ··· 538 548 textpos = (unsigned long) -ENOMEM; 539 549 printk("Unable to mmap process text, errno %d\n", (int)-textpos); 540 550 ret = textpos; 541 - goto err_close; 551 + goto err; 542 552 } 543 553 544 554 down_write(&current->mm->mmap_sem); ··· 554 564 (int)-datapos); 555 565 do_munmap(current->mm, textpos, text_len); 556 566 ret = realdatastart; 557 - goto err_close; 567 + goto err; 558 568 } 559 569 datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long); 560 570 ··· 577 587 do_munmap(current->mm, textpos, text_len); 578 588 do_munmap(current->mm, realdatastart, data_len + extra); 579 589 ret = result; 580 - goto err_close; 590 + goto err; 581 591 } 582 592 583 593 reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); ··· 596 606 printk("Unable to allocate RAM for process text/data, errno %d\n", 597 607 (int)-textpos); 598 608 ret = textpos; 599 - goto err_close; 609 + goto err; 600 610 } 601 611 602 612 realdatastart = textpos + ntohl(hdr->data_start); ··· 642 652 do_munmap(current->mm, textpos, text_len + data_len + extra + 643 653 MAX_SHARED_LIBS * sizeof(unsigned long)); 644 654 ret = result; 645 - goto err_close; 655 + goto err; 646 656 } 647 657 } 648 658 ··· 707 717 addr = calc_reloc(*rp, libinfo, id, 0); 708 718 if (addr == RELOC_FAILED) { 709 719 ret = -ENOEXEC; 710 - goto err_close; 720 + goto err; 711 721 } 712 722 *rp = addr; 713 723 } ··· 737 747 rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); 738 748 if (rp == (unsigned long *)RELOC_FAILED) { 739 749 ret = -ENOEXEC; 740 - goto err_close; 750 + goto err; 741 751 } 742 752 743 753 /* Get the pointer's value. */ ··· 752 762 addr = calc_reloc(addr, libinfo, id, 0); 753 763 if (addr == RELOC_FAILED) { 754 764 ret = -ENOEXEC; 755 - goto err_close; 765 + goto err; 756 766 } 757 767 758 768 /* Write back the relocated pointer. */ ··· 773 783 stack_len); 774 784 775 785 return 0; 776 - err_close: 777 - sys_close(exec_fileno); 778 786 err: 779 787 return ret; 780 788 }
+3
fs/bio.c
··· 1116 1116 bp->bio1.bi_io_vec = &bp->bv1; 1117 1117 bp->bio2.bi_io_vec = &bp->bv2; 1118 1118 1119 + bp->bio1.bi_max_vecs = 1; 1120 + bp->bio2.bi_max_vecs = 1; 1121 + 1119 1122 bp->bio1.bi_end_io = bio_pair_end_1; 1120 1123 bp->bio2.bi_end_io = bio_pair_end_2; 1121 1124
+89 -82
fs/compat.c
··· 1913 1913 } 1914 1914 1915 1915 if (sigmask) { 1916 - if (sigsetsize |= sizeof(compat_sigset_t)) 1916 + if (sigsetsize != sizeof(compat_sigset_t)) 1917 1917 return -EINVAL; 1918 1918 if (copy_from_user(&ss32, sigmask, sizeof(ss32))) 1919 1919 return -EFAULT; ··· 2030 2030 struct knfsd_fh cr32_getfs; 2031 2031 }; 2032 2032 2033 - static int compat_nfs_svc_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) 2033 + static int compat_nfs_svc_trans(struct nfsctl_arg *karg, 2034 + struct compat_nfsctl_arg __user *arg) 2034 2035 { 2035 - int err; 2036 - 2037 - err = access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)); 2038 - err |= get_user(karg->ca_version, &arg->ca32_version); 2039 - err |= __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port); 2040 - err |= __get_user(karg->ca_svc.svc_nthreads, &arg->ca32_svc.svc32_nthreads); 2041 - return (err) ? -EFAULT : 0; 2036 + if (!access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)) || 2037 + get_user(karg->ca_version, &arg->ca32_version) || 2038 + __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port) || 2039 + __get_user(karg->ca_svc.svc_nthreads, 2040 + &arg->ca32_svc.svc32_nthreads)) 2041 + return -EFAULT; 2042 + return 0; 2042 2043 } 2043 2044 2044 - static int compat_nfs_clnt_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) 2045 + static int compat_nfs_clnt_trans(struct nfsctl_arg *karg, 2046 + struct compat_nfsctl_arg __user *arg) 2045 2047 { 2046 - int err; 2048 + if (!access_ok(VERIFY_READ, &arg->ca32_client, 2049 + sizeof(arg->ca32_client)) || 2050 + get_user(karg->ca_version, &arg->ca32_version) || 2051 + __copy_from_user(&karg->ca_client.cl_ident[0], 2052 + &arg->ca32_client.cl32_ident[0], 2053 + NFSCLNT_IDMAX) || 2054 + __get_user(karg->ca_client.cl_naddr, 2055 + &arg->ca32_client.cl32_naddr) || 2056 + __copy_from_user(&karg->ca_client.cl_addrlist[0], 2057 + &arg->ca32_client.cl32_addrlist[0], 2058 + (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)) || 2059 + __get_user(karg->ca_client.cl_fhkeytype, 2060 + &arg->ca32_client.cl32_fhkeytype) || 2061 + __get_user(karg->ca_client.cl_fhkeylen, 2062 + &arg->ca32_client.cl32_fhkeylen) || 2063 + __copy_from_user(&karg->ca_client.cl_fhkey[0], 2064 + &arg->ca32_client.cl32_fhkey[0], 2065 + NFSCLNT_KEYMAX)) 2066 + return -EFAULT; 2047 2067 2048 - err = access_ok(VERIFY_READ, &arg->ca32_client, sizeof(arg->ca32_client)); 2049 - err |= get_user(karg->ca_version, &arg->ca32_version); 2050 - err |= __copy_from_user(&karg->ca_client.cl_ident[0], 2051 - &arg->ca32_client.cl32_ident[0], 2052 - NFSCLNT_IDMAX); 2053 - err |= __get_user(karg->ca_client.cl_naddr, &arg->ca32_client.cl32_naddr); 2054 - err |= __copy_from_user(&karg->ca_client.cl_addrlist[0], 2055 - &arg->ca32_client.cl32_addrlist[0], 2056 - (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)); 2057 - err |= __get_user(karg->ca_client.cl_fhkeytype, 2058 - &arg->ca32_client.cl32_fhkeytype); 2059 - err |= __get_user(karg->ca_client.cl_fhkeylen, 2060 - &arg->ca32_client.cl32_fhkeylen); 2061 - err |= __copy_from_user(&karg->ca_client.cl_fhkey[0], 2062 - &arg->ca32_client.cl32_fhkey[0], 2063 - NFSCLNT_KEYMAX); 2064 - 2065 - return (err) ? -EFAULT : 0; 2068 + return 0; 2066 2069 } 2067 2070 2068 - static int compat_nfs_exp_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) 2071 + static int compat_nfs_exp_trans(struct nfsctl_arg *karg, 2072 + struct compat_nfsctl_arg __user *arg) 2069 2073 { 2070 - int err; 2071 - 2072 - err = access_ok(VERIFY_READ, &arg->ca32_export, sizeof(arg->ca32_export)); 2073 - err |= get_user(karg->ca_version, &arg->ca32_version); 2074 - err |= __copy_from_user(&karg->ca_export.ex_client[0], 2075 - &arg->ca32_export.ex32_client[0], 2076 - NFSCLNT_IDMAX); 2077 - err |= __copy_from_user(&karg->ca_export.ex_path[0], 2078 - &arg->ca32_export.ex32_path[0], 2079 - NFS_MAXPATHLEN); 2080 - err |= __get_user(karg->ca_export.ex_dev, 2081 - &arg->ca32_export.ex32_dev); 2082 - err |= __get_user(karg->ca_export.ex_ino, 2083 - &arg->ca32_export.ex32_ino); 2084 - err |= __get_user(karg->ca_export.ex_flags, 2085 - &arg->ca32_export.ex32_flags); 2086 - err |= __get_user(karg->ca_export.ex_anon_uid, 2087 - &arg->ca32_export.ex32_anon_uid); 2088 - err |= __get_user(karg->ca_export.ex_anon_gid, 2089 - &arg->ca32_export.ex32_anon_gid); 2074 + if (!access_ok(VERIFY_READ, &arg->ca32_export, 2075 + sizeof(arg->ca32_export)) || 2076 + get_user(karg->ca_version, &arg->ca32_version) || 2077 + __copy_from_user(&karg->ca_export.ex_client[0], 2078 + &arg->ca32_export.ex32_client[0], 2079 + NFSCLNT_IDMAX) || 2080 + __copy_from_user(&karg->ca_export.ex_path[0], 2081 + &arg->ca32_export.ex32_path[0], 2082 + NFS_MAXPATHLEN) || 2083 + __get_user(karg->ca_export.ex_dev, 2084 + &arg->ca32_export.ex32_dev) || 2085 + __get_user(karg->ca_export.ex_ino, 2086 + &arg->ca32_export.ex32_ino) || 2087 + __get_user(karg->ca_export.ex_flags, 2088 + &arg->ca32_export.ex32_flags) || 2089 + __get_user(karg->ca_export.ex_anon_uid, 2090 + &arg->ca32_export.ex32_anon_uid) || 2091 + __get_user(karg->ca_export.ex_anon_gid, 2092 + &arg->ca32_export.ex32_anon_gid)) 2093 + return -EFAULT; 2090 2094 SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid); 2091 2095 SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid); 2092 2096 2093 - return (err) ? -EFAULT : 0; 2097 + return 0; 2094 2098 } 2095 2099 2096 - static int compat_nfs_getfd_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) 2100 + static int compat_nfs_getfd_trans(struct nfsctl_arg *karg, 2101 + struct compat_nfsctl_arg __user *arg) 2097 2102 { 2098 - int err; 2103 + if (!access_ok(VERIFY_READ, &arg->ca32_getfd, 2104 + sizeof(arg->ca32_getfd)) || 2105 + get_user(karg->ca_version, &arg->ca32_version) || 2106 + __copy_from_user(&karg->ca_getfd.gd_addr, 2107 + &arg->ca32_getfd.gd32_addr, 2108 + (sizeof(struct sockaddr))) || 2109 + __copy_from_user(&karg->ca_getfd.gd_path, 2110 + &arg->ca32_getfd.gd32_path, 2111 + (NFS_MAXPATHLEN+1)) || 2112 + __get_user(karg->ca_getfd.gd_version, 2113 + &arg->ca32_getfd.gd32_version)) 2114 + return -EFAULT; 2099 2115 2100 - err = access_ok(VERIFY_READ, &arg->ca32_getfd, sizeof(arg->ca32_getfd)); 2101 - err |= get_user(karg->ca_version, &arg->ca32_version); 2102 - err |= __copy_from_user(&karg->ca_getfd.gd_addr, 2103 - &arg->ca32_getfd.gd32_addr, 2104 - (sizeof(struct sockaddr))); 2105 - err |= __copy_from_user(&karg->ca_getfd.gd_path, 2106 - &arg->ca32_getfd.gd32_path, 2107 - (NFS_MAXPATHLEN+1)); 2108 - err |= __get_user(karg->ca_getfd.gd_version, 2109 - &arg->ca32_getfd.gd32_version); 2110 - 2111 - return (err) ? -EFAULT : 0; 2116 + return 0; 2112 2117 } 2113 2118 2114 - static int compat_nfs_getfs_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) 2119 + static int compat_nfs_getfs_trans(struct nfsctl_arg *karg, 2120 + struct compat_nfsctl_arg __user *arg) 2115 2121 { 2116 - int err; 2122 + if (!access_ok(VERIFY_READ,&arg->ca32_getfs,sizeof(arg->ca32_getfs)) || 2123 + get_user(karg->ca_version, &arg->ca32_version) || 2124 + __copy_from_user(&karg->ca_getfs.gd_addr, 2125 + &arg->ca32_getfs.gd32_addr, 2126 + (sizeof(struct sockaddr))) || 2127 + __copy_from_user(&karg->ca_getfs.gd_path, 2128 + &arg->ca32_getfs.gd32_path, 2129 + (NFS_MAXPATHLEN+1)) || 2130 + __get_user(karg->ca_getfs.gd_maxlen, 2131 + &arg->ca32_getfs.gd32_maxlen)) 2132 + return -EFAULT; 2117 2133 2118 - err = access_ok(VERIFY_READ, &arg->ca32_getfs, sizeof(arg->ca32_getfs)); 2119 - err |= get_user(karg->ca_version, &arg->ca32_version); 2120 - err |= __copy_from_user(&karg->ca_getfs.gd_addr, 2121 - &arg->ca32_getfs.gd32_addr, 2122 - (sizeof(struct sockaddr))); 2123 - err |= __copy_from_user(&karg->ca_getfs.gd_path, 2124 - &arg->ca32_getfs.gd32_path, 2125 - (NFS_MAXPATHLEN+1)); 2126 - err |= __get_user(karg->ca_getfs.gd_maxlen, 2127 - &arg->ca32_getfs.gd32_maxlen); 2128 - 2129 - return (err) ? -EFAULT : 0; 2134 + return 0; 2130 2135 } 2131 2136 2132 2137 /* This really doesn't need translations, we are only passing 2133 2138 * back a union which contains opaque nfs file handle data. 2134 2139 */ 2135 - static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, union compat_nfsctl_res __user *res) 2140 + static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, 2141 + union compat_nfsctl_res __user *res) 2136 2142 { 2137 2143 int err; 2138 2144 ··· 2147 2141 return (err) ? -EFAULT : 0; 2148 2142 } 2149 2143 2150 - asmlinkage long compat_sys_nfsservctl(int cmd, struct compat_nfsctl_arg __user *arg, 2151 - union compat_nfsctl_res __user *res) 2144 + asmlinkage long compat_sys_nfsservctl(int cmd, 2145 + struct compat_nfsctl_arg __user *arg, 2146 + union compat_nfsctl_res __user *res) 2152 2147 { 2153 2148 struct nfsctl_arg *karg; 2154 2149 union nfsctl_res *kres;
+98 -43
fs/configfs/dir.c
··· 505 505 int i; 506 506 507 507 if (group->default_groups) { 508 - /* FYI, we're faking mkdir here 508 + /* 509 + * FYI, we're faking mkdir here 509 510 * I'm not sure we need this semaphore, as we're called 510 511 * from our parent's mkdir. That holds our parent's 511 512 * i_mutex, so afaik lookup cannot continue through our 512 513 * parent to find us, let alone mess with our tree. 513 514 * That said, taking our i_mutex is closer to mkdir 514 - * emulation, and shouldn't hurt. */ 515 + * emulation, and shouldn't hurt. 516 + */ 515 517 mutex_lock(&dentry->d_inode->i_mutex); 516 518 517 519 for (i = 0; group->default_groups[i]; i++) { ··· 548 546 549 547 item->ci_group = NULL; 550 548 item->ci_parent = NULL; 549 + 550 + /* Drop the reference for ci_entry */ 551 551 config_item_put(item); 552 552 553 + /* Drop the reference for ci_parent */ 553 554 config_group_put(group); 554 555 } 555 556 } 556 557 557 558 static void link_obj(struct config_item *parent_item, struct config_item *item) 558 559 { 559 - /* Parent seems redundant with group, but it makes certain 560 - * traversals much nicer. */ 560 + /* 561 + * Parent seems redundant with group, but it makes certain 562 + * traversals much nicer. 563 + */ 561 564 item->ci_parent = parent_item; 565 + 566 + /* 567 + * We hold a reference on the parent for the child's ci_parent 568 + * link. 569 + */ 562 570 item->ci_group = config_group_get(to_config_group(parent_item)); 563 571 list_add_tail(&item->ci_entry, &item->ci_group->cg_children); 564 572 573 + /* 574 + * We hold a reference on the child for ci_entry on the parent's 575 + * cg_children 576 + */ 565 577 config_item_get(item); 566 578 } 567 579 ··· 700 684 type = parent_item->ci_type; 701 685 BUG_ON(!type); 702 686 687 + /* 688 + * If ->drop_item() exists, it is responsible for the 689 + * config_item_put(). 690 + */ 703 691 if (type->ct_group_ops && type->ct_group_ops->drop_item) 704 692 type->ct_group_ops->drop_item(to_config_group(parent_item), 705 693 item); ··· 714 694 715 695 static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) 716 696 { 717 - int ret; 697 + int ret, module_got = 0; 718 698 struct config_group *group; 719 699 struct config_item *item; 720 700 struct config_item *parent_item; 721 701 struct configfs_subsystem *subsys; 722 702 struct configfs_dirent *sd; 723 703 struct config_item_type *type; 724 - struct module *owner; 704 + struct module *owner = NULL; 725 705 char *name; 726 706 727 - if (dentry->d_parent == configfs_sb->s_root) 728 - return -EPERM; 707 + if (dentry->d_parent == configfs_sb->s_root) { 708 + ret = -EPERM; 709 + goto out; 710 + } 729 711 730 712 sd = dentry->d_parent->d_fsdata; 731 - if (!(sd->s_type & CONFIGFS_USET_DIR)) 732 - return -EPERM; 713 + if (!(sd->s_type & CONFIGFS_USET_DIR)) { 714 + ret = -EPERM; 715 + goto out; 716 + } 733 717 718 + /* Get a working ref for the duration of this function */ 734 719 parent_item = configfs_get_config_item(dentry->d_parent); 735 720 type = parent_item->ci_type; 736 721 subsys = to_config_group(parent_item)->cg_subsys; ··· 744 719 if (!type || !type->ct_group_ops || 745 720 (!type->ct_group_ops->make_group && 746 721 !type->ct_group_ops->make_item)) { 747 - config_item_put(parent_item); 748 - return -EPERM; /* What lack-of-mkdir returns */ 722 + ret = -EPERM; /* Lack-of-mkdir returns -EPERM */ 723 + goto out_put; 749 724 } 750 725 751 726 name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); 752 727 if (!name) { 753 - config_item_put(parent_item); 754 - return -ENOMEM; 728 + ret = -ENOMEM; 729 + goto out_put; 755 730 } 731 + 756 732 snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name); 757 733 758 734 down(&subsys->su_sem); ··· 774 748 775 749 kfree(name); 776 750 if (!item) { 777 - config_item_put(parent_item); 778 - return -ENOMEM; 751 + /* 752 + * If item == NULL, then link_obj() was never called. 753 + * There are no extra references to clean up. 754 + */ 755 + ret = -ENOMEM; 756 + goto out_put; 779 757 } 780 758 781 - ret = -EINVAL; 759 + /* 760 + * link_obj() has been called (via link_group() for groups). 761 + * From here on out, errors must clean that up. 762 + */ 763 + 782 764 type = item->ci_type; 783 - if (type) { 784 - owner = type->ct_owner; 785 - if (try_module_get(owner)) { 786 - if (group) { 787 - ret = configfs_attach_group(parent_item, 788 - item, 789 - dentry); 790 - } else { 791 - ret = configfs_attach_item(parent_item, 792 - item, 793 - dentry); 794 - } 795 - 796 - if (ret) { 797 - down(&subsys->su_sem); 798 - if (group) 799 - unlink_group(group); 800 - else 801 - unlink_obj(item); 802 - client_drop_item(parent_item, item); 803 - up(&subsys->su_sem); 804 - 805 - config_item_put(parent_item); 806 - module_put(owner); 807 - } 808 - } 765 + if (!type) { 766 + ret = -EINVAL; 767 + goto out_unlink; 809 768 } 810 769 770 + owner = type->ct_owner; 771 + if (!try_module_get(owner)) { 772 + ret = -EINVAL; 773 + goto out_unlink; 774 + } 775 + 776 + /* 777 + * I hate doing it this way, but if there is 778 + * an error, module_put() probably should 779 + * happen after any cleanup. 780 + */ 781 + module_got = 1; 782 + 783 + if (group) 784 + ret = configfs_attach_group(parent_item, item, dentry); 785 + else 786 + ret = configfs_attach_item(parent_item, item, dentry); 787 + 788 + out_unlink: 789 + if (ret) { 790 + /* Tear down everything we built up */ 791 + down(&subsys->su_sem); 792 + if (group) 793 + unlink_group(group); 794 + else 795 + unlink_obj(item); 796 + client_drop_item(parent_item, item); 797 + up(&subsys->su_sem); 798 + 799 + if (module_got) 800 + module_put(owner); 801 + } 802 + 803 + out_put: 804 + /* 805 + * link_obj()/link_group() took a reference from child->parent, 806 + * so the parent is safely pinned. We can drop our working 807 + * reference. 808 + */ 809 + config_item_put(parent_item); 810 + 811 + out: 811 812 return ret; 812 813 } 813 814 ··· 854 801 if (sd->s_type & CONFIGFS_USET_DEFAULT) 855 802 return -EPERM; 856 803 804 + /* Get a working ref until we have the child */ 857 805 parent_item = configfs_get_config_item(dentry->d_parent); 858 806 subsys = to_config_group(parent_item)->cg_subsys; 859 807 BUG_ON(!subsys); ··· 871 817 return ret; 872 818 } 873 819 820 + /* Get a working ref for the duration of this function */ 874 821 item = configfs_get_config_item(dentry); 875 822 876 823 /* Drop reference from above, item already holds one. */
+1 -1
fs/exportfs/expfs.c
··· 102 102 if (acceptable(context, result)) 103 103 return result; 104 104 if (S_ISDIR(result->d_inode->i_mode)) { 105 - /* there is no other dentry, so fail */ 105 + err = -EACCES; 106 106 goto err_result; 107 107 } 108 108
+6 -3
fs/inotify.c
··· 848 848 inode = watch->inode; 849 849 mutex_lock(&inode->inotify_mutex); 850 850 mutex_lock(&dev->mutex); 851 - remove_watch_no_event(watch, dev); 851 + 852 + /* make sure we didn't race with another list removal */ 853 + if (likely(idr_find(&dev->idr, watch->wd))) 854 + remove_watch_no_event(watch, dev); 855 + 852 856 mutex_unlock(&dev->mutex); 853 857 mutex_unlock(&inode->inotify_mutex); 854 858 put_inotify_watch(watch); ··· 894 890 mutex_lock(&dev->mutex); 895 891 896 892 /* make sure that we did not race */ 897 - watch = idr_find(&dev->idr, wd); 898 - if (likely(watch)) 893 + if (likely(idr_find(&dev->idr, wd) == watch)) 899 894 remove_watch(watch, dev); 900 895 901 896 mutex_unlock(&dev->mutex);
+4 -2
fs/jffs2/nodelist.c
··· 438 438 if (c->mtd->point) { 439 439 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); 440 440 if (!err && retlen < tn->csize) { 441 - JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize); 441 + JFFS2_WARNING("MTD point returned len too short: %zu " 442 + "instead of %u.\n", retlen, tn->csize); 442 443 c->mtd->unpoint(c->mtd, buffer, ofs, len); 443 444 } else if (err) 444 445 JFFS2_WARNING("MTD point failed: error code %d.\n", err); ··· 462 461 } 463 462 464 463 if (retlen != len) { 465 - JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len); 464 + JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", 465 + ofs, retlen, len); 466 466 err = -EIO; 467 467 goto free_out; 468 468 }
+5 -15
fs/jfs/jfs_metapage.c
··· 542 542 static int metapage_releasepage(struct page *page, gfp_t gfp_mask) 543 543 { 544 544 struct metapage *mp; 545 - int busy = 0; 545 + int ret = 1; 546 546 unsigned int offset; 547 547 548 548 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { ··· 552 552 continue; 553 553 554 554 jfs_info("metapage_releasepage: mp = 0x%p", mp); 555 - if (mp->count || mp->nohomeok) { 555 + if (mp->count || mp->nohomeok || 556 + test_bit(META_dirty, &mp->flag)) { 556 557 jfs_info("count = %ld, nohomeok = %d", mp->count, 557 558 mp->nohomeok); 558 - busy = 1; 559 + ret = 0; 559 560 continue; 560 - } 561 - wait_on_page_writeback(page); 562 - //WARN_ON(test_bit(META_dirty, &mp->flag)); 563 - if (test_bit(META_dirty, &mp->flag)) { 564 - dump_mem("dirty mp in metapage_releasepage", mp, 565 - sizeof(struct metapage)); 566 - dump_mem("page", page, sizeof(struct page)); 567 - dump_stack(); 568 561 } 569 562 if (mp->lsn) 570 563 remove_from_logsync(mp); ··· 565 572 INCREMENT(mpStat.pagefree); 566 573 free_metapage(mp); 567 574 } 568 - if (busy) 569 - return -1; 570 - 571 - return 0; 575 + return ret; 572 576 } 573 577 574 578 static void metapage_invalidatepage(struct page *page, unsigned long offset)
+2 -5
fs/namespace.c
··· 899 899 /* 900 900 * do loopback mount. 901 901 */ 902 - static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags) 902 + static int do_loopback(struct nameidata *nd, char *old_name, int recurse) 903 903 { 904 904 struct nameidata old_nd; 905 905 struct vfsmount *mnt = NULL; 906 - int recurse = flags & MS_REC; 907 906 int err = mount_is_safe(nd); 908 - 909 907 if (err) 910 908 return err; 911 909 if (!old_name || !*old_name) ··· 937 939 spin_unlock(&vfsmount_lock); 938 940 release_mounts(&umount_list); 939 941 } 940 - mnt->mnt_flags = mnt_flags; 941 942 942 943 out: 943 944 up_write(&namespace_sem); ··· 1350 1353 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, 1351 1354 data_page); 1352 1355 else if (flags & MS_BIND) 1353 - retval = do_loopback(&nd, dev_name, flags, mnt_flags); 1356 + retval = do_loopback(&nd, dev_name, flags & MS_REC); 1354 1357 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 1355 1358 retval = do_change_type(&nd, flags); 1356 1359 else if (flags & MS_MOVE)
+3 -1
fs/nfsd/export.c
··· 1066 1066 rv = nfserr_perm; 1067 1067 else if (IS_ERR(exp)) 1068 1068 rv = nfserrno(PTR_ERR(exp)); 1069 - else 1069 + else { 1070 1070 rv = fh_compose(fhp, exp, 1071 1071 fsid_key->ek_dentry, NULL); 1072 + exp_put(exp); 1073 + } 1072 1074 cache_put(&fsid_key->h, &svc_expkey_cache); 1073 1075 return rv; 1074 1076 }
+3 -4
fs/nfsd/vfs.c
··· 1922 1922 value = kmalloc(size, GFP_KERNEL); 1923 1923 if (!value) 1924 1924 return -ENOMEM; 1925 - size = posix_acl_to_xattr(acl, value, size); 1926 - if (size < 0) { 1927 - error = size; 1925 + error = posix_acl_to_xattr(acl, value, size); 1926 + if (error < 0) 1928 1927 goto getout; 1929 - } 1928 + size = error; 1930 1929 } else 1931 1930 size = 0; 1932 1931
+39 -7
fs/ocfs2/aops.c
··· 276 276 return ret; 277 277 } 278 278 279 + /* This can also be called from ocfs2_write_zero_page() which has done 280 + * it's own cluster locking. */ 281 + int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, 282 + unsigned from, unsigned to) 283 + { 284 + int ret; 285 + 286 + down_read(&OCFS2_I(inode)->ip_alloc_sem); 287 + 288 + ret = block_prepare_write(page, from, to, ocfs2_get_block); 289 + 290 + up_read(&OCFS2_I(inode)->ip_alloc_sem); 291 + 292 + return ret; 293 + } 294 + 279 295 /* 280 296 * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called 281 297 * from loopback. It must be able to perform its own locking around 282 298 * ocfs2_get_block(). 283 299 */ 284 - int ocfs2_prepare_write(struct file *file, struct page *page, 285 - unsigned from, unsigned to) 300 + static int ocfs2_prepare_write(struct file *file, struct page *page, 301 + unsigned from, unsigned to) 286 302 { 287 303 struct inode *inode = page->mapping->host; 288 304 int ret; ··· 311 295 goto out; 312 296 } 313 297 314 - down_read(&OCFS2_I(inode)->ip_alloc_sem); 315 - 316 - ret = block_prepare_write(page, from, to, ocfs2_get_block); 317 - 318 - up_read(&OCFS2_I(inode)->ip_alloc_sem); 298 + ret = ocfs2_prepare_write_nolock(inode, page, from, to); 319 299 320 300 ocfs2_meta_unlock(inode, 0); 321 301 out: ··· 637 625 int ret; 638 626 639 627 mlog_entry_void(); 628 + 629 + /* 630 + * We get PR data locks even for O_DIRECT. This allows 631 + * concurrent O_DIRECT I/O but doesn't let O_DIRECT with 632 + * extending and buffered zeroing writes race. If they did 633 + * race then the buffered zeroing could be written back after 634 + * the O_DIRECT I/O. It's one thing to tell people not to mix 635 + * buffered and O_DIRECT writes, but expecting them to 636 + * understand that file extension is also an implicit buffered 637 + * write is too much. By getting the PR we force writeback of 638 + * the buffered zeroing before proceeding. 639 + */ 640 + ret = ocfs2_data_lock(inode, 0); 641 + if (ret < 0) { 642 + mlog_errno(ret); 643 + goto out; 644 + } 645 + ocfs2_data_unlock(inode, 0); 646 + 640 647 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 641 648 inode->i_sb->s_bdev, iov, offset, 642 649 nr_segs, 643 650 ocfs2_direct_IO_get_blocks, 644 651 ocfs2_dio_end_io); 652 + out: 645 653 mlog_exit(ret); 646 654 return ret; 647 655 }
+2 -2
fs/ocfs2/aops.h
··· 22 22 #ifndef OCFS2_AOPS_H 23 23 #define OCFS2_AOPS_H 24 24 25 - int ocfs2_prepare_write(struct file *file, struct page *page, 26 - unsigned from, unsigned to); 25 + int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, 26 + unsigned from, unsigned to); 27 27 28 28 struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, 29 29 struct page *page,
+3 -3
fs/ocfs2/extent_map.c
··· 569 569 570 570 ret = -ENOMEM; 571 571 ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, 572 - GFP_KERNEL); 572 + GFP_NOFS); 573 573 if (!ctxt.new_ent) { 574 574 mlog_errno(ret); 575 575 return ret; ··· 583 583 if (ctxt.need_left && !ctxt.left_ent) { 584 584 ctxt.left_ent = 585 585 kmem_cache_alloc(ocfs2_em_ent_cachep, 586 - GFP_KERNEL); 586 + GFP_NOFS); 587 587 if (!ctxt.left_ent) 588 588 break; 589 589 } 590 590 if (ctxt.need_right && !ctxt.right_ent) { 591 591 ctxt.right_ent = 592 592 kmem_cache_alloc(ocfs2_em_ent_cachep, 593 - GFP_KERNEL); 593 + GFP_NOFS); 594 594 if (!ctxt.right_ent) 595 595 break; 596 596 }
+63 -25
fs/ocfs2/file.c
··· 613 613 614 614 /* Some parts of this taken from generic_cont_expand, which turned out 615 615 * to be too fragile to do exactly what we need without us having to 616 - * worry about recursive locking in ->commit_write(). */ 616 + * worry about recursive locking in ->prepare_write() and 617 + * ->commit_write(). */ 617 618 static int ocfs2_write_zero_page(struct inode *inode, 618 619 u64 size) 619 620 { ··· 642 641 goto out; 643 642 } 644 643 645 - ret = ocfs2_prepare_write(NULL, page, offset, offset); 644 + ret = ocfs2_prepare_write_nolock(inode, page, offset, offset); 646 645 if (ret < 0) { 647 646 mlog_errno(ret); 648 647 goto out_unlock; ··· 696 695 return ret; 697 696 } 698 697 698 + /* 699 + * A tail_to_skip value > 0 indicates that we're being called from 700 + * ocfs2_file_aio_write(). This has the following implications: 701 + * 702 + * - we don't want to update i_size 703 + * - di_bh will be NULL, which is fine because it's only used in the 704 + * case where we want to update i_size. 705 + * - ocfs2_zero_extend() will then only be filling the hole created 706 + * between i_size and the start of the write. 707 + */ 699 708 static int ocfs2_extend_file(struct inode *inode, 700 709 struct buffer_head *di_bh, 701 - u64 new_i_size) 710 + u64 new_i_size, 711 + size_t tail_to_skip) 702 712 { 703 713 int ret = 0; 704 714 u32 clusters_to_add; 715 + 716 + BUG_ON(!tail_to_skip && !di_bh); 705 717 706 718 /* setattr sometimes calls us like this. */ 707 719 if (new_i_size == 0) ··· 728 714 OCFS2_I(inode)->ip_clusters; 729 715 730 716 if (clusters_to_add) { 717 + /* 718 + * protect the pages that ocfs2_zero_extend is going to 719 + * be pulling into the page cache.. we do this before the 720 + * metadata extend so that we don't get into the situation 721 + * where we've extended the metadata but can't get the data 722 + * lock to zero. 723 + */ 724 + ret = ocfs2_data_lock(inode, 1); 725 + if (ret < 0) { 726 + mlog_errno(ret); 727 + goto out; 728 + } 729 + 731 730 ret = ocfs2_extend_allocation(inode, clusters_to_add); 732 731 if (ret < 0) { 733 732 mlog_errno(ret); 734 - goto out; 733 + goto out_unlock; 735 734 } 736 735 737 - ret = ocfs2_zero_extend(inode, new_i_size); 736 + ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip); 738 737 if (ret < 0) { 739 738 mlog_errno(ret); 740 - goto out; 739 + goto out_unlock; 741 740 } 742 - } 743 - 744 - /* No allocation required, we just use this helper to 745 - * do a trivial update of i_size. */ 746 - ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); 747 - if (ret < 0) { 748 - mlog_errno(ret); 749 - goto out; 750 741 } 742 + 743 + if (!tail_to_skip) { 744 + /* We're being called from ocfs2_setattr() which wants 745 + * us to update i_size */ 746 + ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); 747 + if (ret < 0) 748 + mlog_errno(ret); 749 + } 750 + 751 + out_unlock: 752 + if (clusters_to_add) /* this is the only case in which we lock */ 753 + ocfs2_data_unlock(inode, 1); 751 754 752 755 out: 753 756 return ret; ··· 824 793 if (i_size_read(inode) > attr->ia_size) 825 794 status = ocfs2_truncate_file(inode, bh, attr->ia_size); 826 795 else 827 - status = ocfs2_extend_file(inode, bh, attr->ia_size); 796 + status = ocfs2_extend_file(inode, bh, attr->ia_size, 0); 828 797 if (status < 0) { 829 798 if (status != -ENOSPC) 830 799 mlog_errno(status); ··· 1080 1049 if (!clusters) 1081 1050 break; 1082 1051 1083 - ret = ocfs2_extend_allocation(inode, clusters); 1052 + ret = ocfs2_extend_file(inode, NULL, newsize, count); 1084 1053 if (ret < 0) { 1085 1054 if (ret != -ENOSPC) 1086 1055 mlog_errno(ret); 1087 - goto out; 1088 - } 1089 - 1090 - /* Fill any holes which would've been created by this 1091 - * write. If we're O_APPEND, this will wind up 1092 - * (correctly) being a noop. */ 1093 - ret = ocfs2_zero_extend(inode, (u64) newsize - count); 1094 - if (ret < 0) { 1095 - mlog_errno(ret); 1096 1056 goto out; 1097 1057 } 1098 1058 break; ··· 1167 1145 /* communicate with ocfs2_dio_end_io */ 1168 1146 ocfs2_iocb_set_rw_locked(iocb); 1169 1147 } 1148 + 1149 + /* 1150 + * We're fine letting folks race truncates and extending 1151 + * writes with read across the cluster, just like they can 1152 + * locally. Hence no rw_lock during read. 1153 + * 1154 + * Take and drop the meta data lock to update inode fields 1155 + * like i_size. This allows the checks down below 1156 + * generic_file_aio_read() a chance of actually working. 1157 + */ 1158 + ret = ocfs2_meta_lock(inode, NULL, NULL, 0); 1159 + if (ret < 0) { 1160 + mlog_errno(ret); 1161 + goto bail; 1162 + } 1163 + ocfs2_meta_unlock(inode, 0); 1170 1164 1171 1165 ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos); 1172 1166 if (ret == -EINVAL)
+5 -3
fs/ocfs2/journal.c
··· 117 117 { 118 118 struct ocfs2_journal_handle *retval = NULL; 119 119 120 - retval = kcalloc(1, sizeof(*retval), GFP_KERNEL); 120 + retval = kcalloc(1, sizeof(*retval), GFP_NOFS); 121 121 if (!retval) { 122 122 mlog(ML_ERROR, "Failed to allocate memory for journal " 123 123 "handle!\n"); ··· 870 870 if (p_blocks > CONCURRENT_JOURNAL_FILL) 871 871 p_blocks = CONCURRENT_JOURNAL_FILL; 872 872 873 + /* We are reading journal data which should not 874 + * be put in the uptodate cache */ 873 875 status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb), 874 876 p_blkno, p_blocks, bhs, 0, 875 - inode); 877 + NULL); 876 878 if (status < 0) { 877 879 mlog_errno(status); 878 880 goto bail; ··· 984 982 { 985 983 struct ocfs2_la_recovery_item *item; 986 984 987 - item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL); 985 + item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); 988 986 if (!item) { 989 987 /* Though we wish to avoid it, we are in fact safe in 990 988 * skipping local alloc cleanup as fsck.ocfs2 is more
+2 -2
fs/ocfs2/uptodate.c
··· 337 337 (unsigned long long)oi->ip_blkno, 338 338 (unsigned long long)block, expand_tree); 339 339 340 - new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL); 340 + new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); 341 341 if (!new) { 342 342 mlog_errno(-ENOMEM); 343 343 return; ··· 349 349 * has no way of tracking that. */ 350 350 for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { 351 351 tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, 352 - GFP_KERNEL); 352 + GFP_NOFS); 353 353 if (!tree[i]) { 354 354 mlog_errno(-ENOMEM); 355 355 goto out_free;
+3 -3
fs/ocfs2/vote.c
··· 586 586 { 587 587 struct ocfs2_net_wait_ctxt *w; 588 588 589 - w = kcalloc(1, sizeof(*w), GFP_KERNEL); 589 + w = kcalloc(1, sizeof(*w), GFP_NOFS); 590 590 if (!w) { 591 591 mlog_errno(-ENOMEM); 592 592 goto bail; ··· 749 749 750 750 BUG_ON(!ocfs2_is_valid_vote_request(type)); 751 751 752 - request = kcalloc(1, sizeof(*request), GFP_KERNEL); 752 + request = kcalloc(1, sizeof(*request), GFP_NOFS); 753 753 if (!request) { 754 754 mlog_errno(-ENOMEM); 755 755 } else { ··· 1129 1129 struct ocfs2_super *osb = data; 1130 1130 struct ocfs2_vote_work *work; 1131 1131 1132 - work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL); 1132 + work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS); 1133 1133 if (!work) { 1134 1134 status = -ENOMEM; 1135 1135 mlog_errno(status);
-1
fs/open.c
··· 1124 1124 prevent_tail_call(ret); 1125 1125 return ret; 1126 1126 } 1127 - EXPORT_SYMBOL_GPL(sys_openat); 1128 1127 1129 1128 #ifndef __alpha__ 1130 1129
+2 -1
fs/partitions/check.c
··· 533 533 534 534 devfs_remove_disk(disk); 535 535 536 + kobject_uevent(&disk->kobj, KOBJ_REMOVE); 536 537 if (disk->holder_dir) 537 538 kobject_unregister(disk->holder_dir); 538 539 if (disk->slave_dir) ··· 546 545 kfree(disk_name); 547 546 } 548 547 put_device(disk->driverfs_dev); 548 + disk->driverfs_dev = NULL; 549 549 } 550 - kobject_uevent(&disk->kobj, KOBJ_REMOVE); 551 550 kobject_del(&disk->kobj); 552 551 }
+5
fs/smbfs/dir.c
··· 434 434 if (dentry->d_name.len > SMB_MAXNAMELEN) 435 435 goto out; 436 436 437 + /* Do not allow lookup of names with backslashes in */ 438 + error = -EINVAL; 439 + if (memchr(dentry->d_name.name, '\\', dentry->d_name.len)) 440 + goto out; 441 + 437 442 lock_kernel(); 438 443 error = smb_proc_getattr(dentry, &finfo); 439 444 #ifdef SMBFS_PARANOIA
+3 -1
fs/smbfs/request.c
··· 339 339 /* 340 340 * On timeout or on interrupt we want to try and remove the 341 341 * request from the recvq/xmitq. 342 + * First check if the request is still part of a queue. (May 343 + * have been removed by some error condition) 342 344 */ 343 345 smb_lock_server(server); 344 - if (!(req->rq_flags & SMB_REQ_RECEIVED)) { 346 + if (!list_empty(&req->rq_queue)) { 345 347 list_del_init(&req->rq_queue); 346 348 smb_rput(req); 347 349 }
+71
include/asm-arm/arch-pxa/pxa2xx_spi.h
··· 1 + /* 2 + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 + */ 18 + 19 + #ifndef PXA2XX_SPI_H_ 20 + #define PXA2XX_SPI_H_ 21 + 22 + #define PXA2XX_CS_ASSERT (0x01) 23 + #define PXA2XX_CS_DEASSERT (0x02) 24 + 25 + #if defined(CONFIG_PXA25x) 26 + #define CLOCK_SPEED_HZ 3686400 27 + #define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/2/(x+1))<<8)&0x0000ff00) 28 + #define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 29 + #define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 30 + #define SSP_TIMEOUT_SCALE (2712) 31 + #elif defined(CONFIG_PXA27x) 32 + #define CLOCK_SPEED_HZ 13000000 33 + #define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 34 + #define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 35 + #define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 36 + #define SSP_TIMEOUT_SCALE (769) 37 + #endif 38 + 39 + #define SSP_TIMEOUT(x) ((x*10000)/SSP_TIMEOUT_SCALE) 40 + #define SSP1_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(1))))) 41 + #define SSP2_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(2))))) 42 + #define SSP3_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(3))))) 43 + 44 + enum pxa_ssp_type { 45 + SSP_UNDEFINED = 0, 46 + PXA25x_SSP, /* pxa 210, 250, 255, 26x */ 47 + PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ 48 + PXA27x_SSP, 49 + }; 50 + 51 + /* device.platform_data for SSP controller devices */ 52 + struct pxa2xx_spi_master { 53 + enum pxa_ssp_type ssp_type; 54 + u32 clock_enable; 55 + u16 num_chipselect; 56 + u8 enable_dma; 57 + }; 58 + 59 + /* spi_board_info.controller_data for SPI slave devices, 60 + * copied to spi_device.platform_data ... mostly for dma tuning 61 + */ 62 + struct pxa2xx_spi_chip { 63 + u8 tx_threshold; 64 + u8 rx_threshold; 65 + u8 dma_burst_size; 66 + u32 timeout_microsecs; 67 + u8 enable_loopback; 68 + void (*cs_control)(u32 command); 69 + }; 70 + 71 + #endif /*PXA2XX_SPI_H_*/
+31
include/asm-arm/arch-s3c2410/spi-gpio.h
··· 1 + /* linux/include/asm-arm/arch-s3c2410/spi.h 2 + * 3 + * Copyright (c) 2006 Simtec Electronics 4 + * Ben Dooks <ben@simtec.co.uk> 5 + * 6 + * S3C2410 - SPI Controller platfrom_device info 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #ifndef __ASM_ARCH_SPIGPIO_H 14 + #define __ASM_ARCH_SPIGPIO_H __FILE__ 15 + 16 + struct s3c2410_spigpio_info; 17 + struct spi_board_info; 18 + 19 + struct s3c2410_spigpio_info { 20 + unsigned long pin_clk; 21 + unsigned long pin_mosi; 22 + unsigned long pin_miso; 23 + 24 + unsigned long board_size; 25 + struct spi_board_info *board_info; 26 + 27 + void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs); 28 + }; 29 + 30 + 31 + #endif /* __ASM_ARCH_SPIGPIO_H */
+29
include/asm-arm/arch-s3c2410/spi.h
··· 1 + /* linux/include/asm-arm/arch-s3c2410/spi.h 2 + * 3 + * Copyright (c) 2006 Simtec Electronics 4 + * Ben Dooks <ben@simtec.co.uk> 5 + * 6 + * S3C2410 - SPI Controller platform_device info 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #ifndef __ASM_ARCH_SPI_H 14 + #define __ASM_ARCH_SPI_H __FILE__ 15 + 16 + struct s3c2410_spi_info; 17 + struct spi_board_info; 18 + 19 + struct s3c2410_spi_info { 20 + unsigned long pin_cs; /* simple gpio cs */ 21 + 22 + unsigned long board_size; 23 + struct spi_board_info *board_info; 24 + 25 + void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); 26 + }; 27 + 28 + 29 + #endif /* __ASM_ARCH_SPI_H */
-2
include/asm-arm/procinfo.h
··· 45 45 46 46 #endif /* __ASSEMBLY__ */ 47 47 48 - #define PROC_INFO_SZ 48 49 - 50 48 #define HWCAP_SWP 1 51 49 #define HWCAP_HALF 2 52 50 #define HWCAP_THUMB 4
+6
include/asm-arm/spinlock.h
··· 142 142 : "cc"); 143 143 } 144 144 145 + /* write_can_lock - would write_trylock() succeed? */ 146 + #define __raw_write_can_lock(x) ((x)->lock == 0x80000000) 147 + 145 148 /* 146 149 * Read locks are a bit more hairy: 147 150 * - Exclusively load the lock value. ··· 200 197 } 201 198 202 199 #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) 200 + 201 + /* read_can_lock - would read_trylock() succeed? */ 202 + #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) 203 203 204 204 #endif /* __ASM_SPINLOCK_H */
+3 -1
include/asm-powerpc/unistd.h
··· 321 321 #define __NR_readlinkat 296 322 322 #define __NR_fchmodat 297 323 323 #define __NR_faccessat 298 324 + #define __NR_get_robust_list 299 325 + #define __NR_set_robust_list 300 324 326 325 - #define __NR_syscalls 299 327 + #define __NR_syscalls 301 326 328 327 329 #ifdef __KERNEL__ 328 330 #define __NR__exit __NR_exit
+7 -1
include/asm-s390/unistd.h
··· 296 296 #define __NR_pselect6 301 297 297 #define __NR_ppoll 302 298 298 #define __NR_unshare 303 299 + #define __NR_set_robust_list 304 300 + #define __NR_get_robust_list 305 301 + #define __NR_splice 306 302 + #define __NR_sync_file_range 307 303 + #define __NR_tee 308 304 + #define __NR_vmsplice 309 299 305 300 - #define NR_syscalls 304 306 + #define NR_syscalls 310 301 307 302 308 /* 303 309 * There are some system calls that are not present on 64 bit, some
+5 -3
include/asm-sparc/unistd.h
··· 316 316 #define __NR_pselect6 297 317 317 #define __NR_ppoll 298 318 318 #define __NR_unshare 299 319 + #define __NR_set_robust_list 300 320 + #define __NR_get_robust_list 301 319 321 320 - /* WARNING: You MAY NOT add syscall numbers larger than 299, since 322 + /* WARNING: You MAY NOT add syscall numbers larger than 301, since 321 323 * all of the syscall tables in the Sparc kernel are 322 - * sized to have 299 entries (starting at zero). Therefore 323 - * find a free slot in the 0-299 range. 324 + * sized to have 301 entries (starting at zero). Therefore 325 + * find a free slot in the 0-301 range. 324 326 */ 325 327 326 328 #define _syscall0(type,name) \
+140 -1
include/asm-sparc64/dma-mapping.h
··· 4 4 #include <linux/config.h> 5 5 6 6 #ifdef CONFIG_PCI 7 - #include <asm-generic/dma-mapping.h> 7 + 8 + /* we implement the API below in terms of the existing PCI one, 9 + * so include it */ 10 + #include <linux/pci.h> 11 + /* need struct page definitions */ 12 + #include <linux/mm.h> 13 + 14 + static inline int 15 + dma_supported(struct device *dev, u64 mask) 16 + { 17 + BUG_ON(dev->bus != &pci_bus_type); 18 + 19 + return pci_dma_supported(to_pci_dev(dev), mask); 20 + } 21 + 22 + static inline int 23 + dma_set_mask(struct device *dev, u64 dma_mask) 24 + { 25 + BUG_ON(dev->bus != &pci_bus_type); 26 + 27 + return pci_set_dma_mask(to_pci_dev(dev), dma_mask); 28 + } 29 + 30 + static inline void * 31 + dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 32 + gfp_t flag) 33 + { 34 + BUG_ON(dev->bus != &pci_bus_type); 35 + 36 + return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); 37 + } 38 + 39 + static inline void 40 + dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 41 + dma_addr_t dma_handle) 42 + { 43 + BUG_ON(dev->bus != &pci_bus_type); 44 + 45 + pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); 46 + } 47 + 48 + static inline dma_addr_t 49 + dma_map_single(struct device *dev, void *cpu_addr, size_t size, 50 + enum dma_data_direction direction) 51 + { 52 + BUG_ON(dev->bus != &pci_bus_type); 53 + 54 + return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); 55 + } 56 + 57 + static inline void 58 + dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 59 + enum dma_data_direction direction) 60 + { 61 + BUG_ON(dev->bus != &pci_bus_type); 62 + 63 + pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); 64 + } 65 + 66 + static inline dma_addr_t 67 + dma_map_page(struct device *dev, struct page *page, 68 + unsigned long offset, size_t size, 69 + enum dma_data_direction direction) 70 + { 71 + BUG_ON(dev->bus != &pci_bus_type); 72 + 73 + return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); 74 + } 75 + 76 + static inline void 77 + dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 78 + enum dma_data_direction direction) 79 + { 80 + BUG_ON(dev->bus != &pci_bus_type); 81 + 82 + pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); 83 + } 84 + 85 + static inline int 86 + dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 87 + enum dma_data_direction direction) 88 + { 89 + BUG_ON(dev->bus != &pci_bus_type); 90 + 91 + return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); 92 + } 93 + 94 + static inline void 95 + dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 96 + enum dma_data_direction direction) 97 + { 98 + BUG_ON(dev->bus != &pci_bus_type); 99 + 100 + pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); 101 + } 102 + 103 + static inline void 104 + dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 105 + enum dma_data_direction direction) 106 + { 107 + BUG_ON(dev->bus != &pci_bus_type); 108 + 109 + pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, 110 + size, (int)direction); 111 + } 112 + 113 + static inline void 114 + dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 115 + enum dma_data_direction direction) 116 + { 117 + BUG_ON(dev->bus != &pci_bus_type); 118 + 119 + pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, 120 + size, (int)direction); 121 + } 122 + 123 + static inline void 124 + dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 125 + enum dma_data_direction direction) 126 + { 127 + BUG_ON(dev->bus != &pci_bus_type); 128 + 129 + pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); 130 + } 131 + 132 + static inline void 133 + dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 134 + enum dma_data_direction direction) 135 + { 136 + BUG_ON(dev->bus != &pci_bus_type); 137 + 138 + pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); 139 + } 140 + 141 + static inline int 142 + dma_mapping_error(dma_addr_t dma_addr) 143 + { 144 + return pci_dma_mapping_error(dma_addr); 145 + } 146 + 8 147 #else 9 148 10 149 struct device;
+2 -2
include/asm-sparc64/pci.h
··· 42 42 struct pci_dev; 43 43 44 44 struct pci_iommu_ops { 45 - void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *); 45 + void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *, gfp_t); 46 46 void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t); 47 47 dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int); 48 48 void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int); ··· 59 59 */ 60 60 static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) 61 61 { 62 - return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle); 62 + return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle, GFP_ATOMIC); 63 63 } 64 64 65 65 /* Free and unmap a consistent DMA buffer.
+5 -3
include/asm-sparc64/unistd.h
··· 318 318 #define __NR_pselect6 297 319 319 #define __NR_ppoll 298 320 320 #define __NR_unshare 299 321 + #define __NR_set_robust_list 300 322 + #define __NR_get_robust_list 301 321 323 322 - /* WARNING: You MAY NOT add syscall numbers larger than 299, since 324 + /* WARNING: You MAY NOT add syscall numbers larger than 301, since 323 325 * all of the syscall tables in the Sparc kernel are 324 - * sized to have 299 entries (starting at zero). Therefore 325 - * find a free slot in the 0-299 range. 326 + * sized to have 301 entries (starting at zero). Therefore 327 + * find a free slot in the 0-301 range. 326 328 */ 327 329 328 330 #define _syscall0(type,name) \
-1
include/linux/firmware.h
··· 19 19 void (*cont)(const struct firmware *fw, void *context)); 20 20 21 21 void release_firmware(const struct firmware *fw); 22 - void register_firmware(const char *name, const u8 *data, size_t size); 23 22 #endif
+4 -3
include/linux/fs.h
··· 213 213 #define FIBMAP _IO(0x00,1) /* bmap access */ 214 214 #define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */ 215 215 216 + #define SYNC_FILE_RANGE_WAIT_BEFORE 1 217 + #define SYNC_FILE_RANGE_WRITE 2 218 + #define SYNC_FILE_RANGE_WAIT_AFTER 4 219 + 216 220 #ifdef __KERNEL__ 217 221 218 222 #include <linux/linkage.h> ··· 762 758 extern int fcntl_getlease(struct file *filp); 763 759 764 760 /* fs/sync.c */ 765 - #define SYNC_FILE_RANGE_WAIT_BEFORE 1 766 - #define SYNC_FILE_RANGE_WRITE 2 767 - #define SYNC_FILE_RANGE_WAIT_AFTER 4 768 761 extern int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte, 769 762 unsigned int flags); 770 763
+11
include/linux/fsl_devices.h
··· 110 110 #define FSL_USB2_PORT0_ENABLED 0x00000001 111 111 #define FSL_USB2_PORT1_ENABLED 0x00000002 112 112 113 + struct fsl_spi_platform_data { 114 + u32 initial_spmode; /* initial SPMODE value */ 115 + u16 bus_num; 116 + 117 + /* board specific information */ 118 + u16 max_chipselect; 119 + void (*activate_cs)(u8 cs, u8 polarity); 120 + void (*deactivate_cs)(u8 cs, u8 polarity); 121 + u32 sysclk; 122 + }; 123 + 113 124 #endif /* _FSL_DEVICE_H_ */ 114 125 #endif /* __KERNEL__ */
+1
include/linux/kernel.h
··· 125 125 extern char *get_options(const char *str, int nints, int *ints); 126 126 extern unsigned long long memparse(char *ptr, char **retptr); 127 127 128 + extern int core_kernel_text(unsigned long addr); 128 129 extern int __kernel_text_address(unsigned long addr); 129 130 extern int kernel_text_address(unsigned long addr); 130 131 extern int session_of_pgrp(int pgrp);
+1
include/linux/mmc/mmc.h
··· 69 69 unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ 70 70 unsigned int timeout_clks; /* data timeout (in clocks) */ 71 71 unsigned int blksz_bits; /* data block size */ 72 + unsigned int blksz; /* data block size */ 72 73 unsigned int blocks; /* number of blocks */ 73 74 unsigned int error; /* data error */ 74 75 unsigned int flags;
+1
include/linux/mmzone.h
··· 22 22 #else 23 23 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 24 24 #endif 25 + #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 25 26 26 27 struct free_area { 27 28 struct list_head free_list;
+1
include/linux/rcupdate.h
··· 132 132 } 133 133 134 134 extern int rcu_pending(int cpu); 135 + extern int rcu_needs_cpu(int cpu); 135 136 136 137 /** 137 138 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
+1
include/linux/slab.h
··· 150 150 151 151 extern void kfree(const void *); 152 152 extern unsigned int ksize(const void *); 153 + extern int slab_is_available(void); 153 154 154 155 #ifdef CONFIG_NUMA 155 156 extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
+28 -17
include/linux/spi/spi.h
··· 31 31 * @master: SPI controller used with the device. 32 32 * @max_speed_hz: Maximum clock rate to be used with this chip 33 33 * (on this board); may be changed by the device's driver. 34 + * The spi_transfer.speed_hz can override this for each transfer. 34 35 * @chip-select: Chipselect, distinguishing chips handled by "master". 35 36 * @mode: The spi mode defines how data is clocked out and in. 36 37 * This may be changed by the device's driver. 38 + * The "active low" default for chipselect mode can be overridden, 39 + * as can the "MSB first" default for each word in a transfer. 37 40 * @bits_per_word: Data transfers involve one or more words; word sizes 38 - * like eight or 12 bits are common. In-memory wordsizes are 41 + * like eight or 12 bits are common. In-memory wordsizes are 39 42 * powers of two bytes (e.g. 20 bit samples use 32 bits). 40 - * This may be changed by the device's driver. 43 + * This may be changed by the device's driver, or left at the 44 + * default (0) indicating protocol words are eight bit bytes. 45 + * The spi_transfer.bits_per_word can override this for each transfer. 41 46 * @irq: Negative, or the number passed to request_irq() to receive 42 - * interrupts from this device. 47 + * interrupts from this device. 43 48 * @controller_state: Controller's runtime state 44 49 * @controller_data: Board-specific definitions for controller, such as 45 - * FIFO initialization parameters; from board_info.controller_data 50 + * FIFO initialization parameters; from board_info.controller_data 46 51 * 47 52 * An spi_device is used to interchange data between an SPI slave 48 53 * (usually a discrete chip) and CPU memory. ··· 70 65 #define SPI_MODE_2 (SPI_CPOL|0) 71 66 #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) 72 67 #define SPI_CS_HIGH 0x04 /* chipselect active high? */ 68 + #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ 73 69 u8 bits_per_word; 74 70 int irq; 75 71 void *controller_state; ··· 79 73 80 74 // likely need more hooks for more protocol options affecting how 81 75 // the controller talks to each chip, like: 82 - // - bit order (default is wordwise msb-first) 83 76 // - memory packing (12 bit samples into low bits, others zeroed) 84 77 // - priority 85 78 // - drop chipselect after each word ··· 148 143 * struct spi_master - interface to SPI master controller 149 144 * @cdev: class interface to this driver 150 145 * @bus_num: board-specific (and often SOC-specific) identifier for a 151 - * given SPI controller. 146 + * given SPI controller. 152 147 * @num_chipselect: chipselects are used to distinguish individual 153 - * SPI slaves, and are numbered from zero to num_chipselects. 154 - * each slave has a chipselect signal, but it's common that not 155 - * every chipselect is connected to a slave. 148 + * SPI slaves, and are numbered from zero to num_chipselects. 149 + * each slave has a chipselect signal, but it's common that not 150 + * every chipselect is connected to a slave. 156 151 * @setup: updates the device mode and clocking records used by a 157 - * device's SPI controller; protocol code may call this. 152 + * device's SPI controller; protocol code may call this. 158 153 * @transfer: adds a message to the controller's transfer queue. 159 154 * @cleanup: frees controller-specific state 160 155 * ··· 172 167 struct spi_master { 173 168 struct class_device cdev; 174 169 175 - /* other than zero (== assign one dynamically), bus_num is fully 170 + /* other than negative (== assign one dynamically), bus_num is fully 176 171 * board-specific. usually that simplifies to being SOC-specific. 177 - * example: one SOC has three SPI controllers, numbered 1..3, 172 + * example: one SOC has three SPI controllers, numbered 0..2, 178 173 * and one board's schematics might show it using SPI-2. software 179 174 * would normally use bus_num=2 for that controller. 180 175 */ 181 - u16 bus_num; 176 + s16 bus_num; 182 177 183 178 /* chipselects will be integral to many controllers; some others 184 179 * might use board-specific GPIOs. ··· 273 268 * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped 274 269 * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped 275 270 * @len: size of rx and tx buffers (in bytes) 271 + * @speed_hz: Select a speed other then the device default for this 272 + * transfer. If 0 the default (from spi_device) is used. 273 + * @bits_per_word: select a bits_per_word other then the device default 274 + * for this transfer. If 0 the default (from spi_device) is used. 276 275 * @cs_change: affects chipselect after this transfer completes 277 276 * @delay_usecs: microseconds to delay after this transfer before 278 - * (optionally) changing the chipselect status, then starting 279 - * the next transfer or completing this spi_message. 277 + * (optionally) changing the chipselect status, then starting 278 + * the next transfer or completing this spi_message. 280 279 * @transfer_list: transfers are sequenced through spi_message.transfers 281 280 * 282 281 * SPI transfers always write the same number of bytes as they read. ··· 331 322 dma_addr_t rx_dma; 332 323 333 324 unsigned cs_change:1; 325 + u8 bits_per_word; 334 326 u16 delay_usecs; 327 + u32 speed_hz; 335 328 336 329 struct list_head transfer_list; 337 330 }; ··· 367 356 * and its transfers, ignore them until its completion callback. 368 357 */ 369 358 struct spi_message { 370 - struct list_head transfers; 359 + struct list_head transfers; 371 360 372 361 struct spi_device *spi; 373 362 ··· 385 374 */ 386 375 387 376 /* completion is reported through a callback */ 388 - void (*complete)(void *context); 377 + void (*complete)(void *context); 389 378 void *context; 390 379 unsigned actual_length; 391 380 int status;
+8
include/linux/spi/spi_bitbang.h
··· 30 30 31 31 struct spi_master *master; 32 32 33 + /* setup_transfer() changes clock and/or wordsize to match settings 34 + * for this transfer; zeroes restore defaults from spi_device. 35 + */ 36 + int (*setup_transfer)(struct spi_device *spi, 37 + struct spi_transfer *t); 38 + 33 39 void (*chipselect)(struct spi_device *spi, int is_on); 34 40 #define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */ 35 41 #define BITBANG_CS_INACTIVE 0 ··· 57 51 extern int spi_bitbang_setup(struct spi_device *spi); 58 52 extern void spi_bitbang_cleanup(const struct spi_device *spi); 59 53 extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m); 54 + extern int spi_bitbang_setup_transfer(struct spi_device *spi, 55 + struct spi_transfer *t); 60 56 61 57 /* start or stop queue processing */ 62 58 extern int spi_bitbang_start(struct spi_bitbang *spi);
+1 -1
include/linux/swap.h
··· 296 296 #define read_swap_cache_async(swp,vma,addr) NULL 297 297 #define lookup_swap_cache(swp) NULL 298 298 #define valid_swaphandles(swp, off) 0 299 - #define can_share_swap_page(p) 0 299 + #define can_share_swap_page(p) (page_mapcount(p) == 1) 300 300 #define move_to_swap_cache(p, swp) 1 301 301 #define move_from_swap_cache(p, i, m) 1 302 302 #define __delete_from_swap_cache(p) /*NOTHING*/
+6
include/linux/syscalls.h
··· 52 52 struct mq_attr; 53 53 struct compat_stat; 54 54 struct compat_timeval; 55 + struct robust_list_head; 55 56 56 57 #include <linux/config.h> 57 58 #include <linux/types.h> ··· 582 581 583 582 asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, 584 583 unsigned int flags); 584 + asmlinkage long sys_get_robust_list(int pid, 585 + struct robust_list_head __user **head_ptr, 586 + size_t __user *len_ptr); 587 + asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, 588 + size_t len); 585 589 586 590 #endif
+5
include/linux/videodev2.h
··· 1141 1141 /* Compatibility layer interface -- v4l1-compat module */ 1142 1142 typedef int (*v4l2_kioctl)(struct inode *inode, struct file *file, 1143 1143 unsigned int cmd, void *arg); 1144 + 1145 + #ifdef CONFIG_VIDEO_V4L1_COMPAT 1144 1146 int v4l_compat_translate_ioctl(struct inode *inode, struct file *file, 1145 1147 int cmd, void *arg, v4l2_kioctl driver_ioctl); 1148 + #else 1149 + #define v4l_compat_translate_ioctl(inode,file,cmd,arg,ioctl) -EINVAL 1150 + #endif 1146 1151 1147 1152 /* 32 Bits compatibility layer for 64 bits processors */ 1148 1153 extern long v4l_compat_ioctl32(struct file *file, unsigned int cmd,
+1 -1
include/net/irda/irlmp.h
··· 112 112 113 113 struct timer_list watchdog_timer; 114 114 115 - IRLMP_STATE lsap_state; /* Connection state */ 115 + LSAP_STATE lsap_state; /* Connection state */ 116 116 notify_t notify; /* Indication/Confirm entry points */ 117 117 struct qos_info qos; /* QoS for this connection */ 118 118
+1
include/net/neighbour.h
··· 211 211 #define NEIGH_UPDATE_F_ADMIN 0x80000000 212 212 213 213 extern void neigh_table_init(struct neigh_table *tbl); 214 + extern void neigh_table_init_no_netlink(struct neigh_table *tbl); 214 215 extern int neigh_table_clear(struct neigh_table *tbl); 215 216 extern struct neighbour * neigh_lookup(struct neigh_table *tbl, 216 217 const void *pkey,
+1
include/net/sctp/command.h
··· 99 99 SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */ 100 100 SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */ 101 101 SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */ 102 + SCTP_CMD_SET_SK_ERR, /* Set sk_err */ 102 103 SCTP_CMD_LAST 103 104 } sctp_verb_t; 104 105
+3 -3
include/net/sctp/sctp.h
··· 461 461 * there is room for a param header too. 462 462 */ 463 463 #define sctp_walk_params(pos, chunk, member)\ 464 - _sctp_walk_params((pos), (chunk), WORD_ROUND(ntohs((chunk)->chunk_hdr.length)), member) 464 + _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) 465 465 466 466 #define _sctp_walk_params(pos, chunk, end, member)\ 467 467 for (pos.v = chunk->member;\ 468 468 pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\ 469 - pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)) &&\ 469 + pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ 470 470 ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ 471 471 pos.v += WORD_ROUND(ntohs(pos.p->length))) 472 472 ··· 477 477 for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ 478 478 sizeof(sctp_chunkhdr_t));\ 479 479 (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\ 480 - (void *)err <= (void *)chunk_hdr + end - WORD_ROUND(ntohs(err->length)) &&\ 480 + (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ 481 481 ntohs(err->length) >= sizeof(sctp_errhdr_t); \ 482 482 err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) 483 483
+5
init/do_mounts.c
··· 310 310 311 311 panic("VFS: Unable to mount root fs on %s", b); 312 312 } 313 + 314 + printk("No filesystem could mount root, tried: "); 315 + for (p = fs_names; *p; p += strlen(p)+1) 316 + printk(" %s", p); 317 + printk("\n"); 313 318 panic("VFS: Unable to mount root fs on %s", __bdevname(ROOT_DEV, b)); 314 319 out: 315 320 putname(fs_names);
+4 -4
init/initramfs.c
··· 26 26 27 27 /* link hash */ 28 28 29 + #define N_ALIGN(len) ((((len) + 1) & ~3) + 2) 30 + 29 31 static __initdata struct hash { 30 32 int ino, minor, major; 31 33 struct hash *next; 32 - char *name; 34 + char name[N_ALIGN(PATH_MAX)]; 33 35 } *head[32]; 34 36 35 37 static inline int hash(int major, int minor, int ino) ··· 59 57 q->ino = ino; 60 58 q->minor = minor; 61 59 q->major = major; 62 - q->name = name; 60 + strcpy(q->name, name); 63 61 q->next = NULL; 64 62 *p = q; 65 63 return NULL; ··· 134 132 this_header += n; 135 133 count -= n; 136 134 } 137 - 138 - #define N_ALIGN(len) ((((len) + 1) & ~3) + 2) 139 135 140 136 static __initdata char *collected; 141 137 static __initdata int remains;
+15 -8
kernel/cpuset.c
··· 2231 2231 * So only GFP_KERNEL allocations, if all nodes in the cpuset are 2232 2232 * short of memory, might require taking the callback_mutex mutex. 2233 2233 * 2234 - * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() 2235 - * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing 2236 - * hardwall cpusets - no allocation on a node outside the cpuset is 2237 - * allowed (unless in interrupt, of course). 2234 + * The first call here from mm/page_alloc:get_page_from_freelist() 2235 + * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, so 2236 + * no allocation on a node outside the cpuset is allowed (unless in 2237 + * interrupt, of course). 2238 2238 * 2239 - * The second loop doesn't even call here for GFP_ATOMIC requests 2240 - * (if the __alloc_pages() local variable 'wait' is set). That check 2241 - * and the checks below have the combined affect in the second loop of 2242 - * the __alloc_pages() routine that: 2239 + * The second pass through get_page_from_freelist() doesn't even call 2240 + * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 2241 + * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set 2242 + * in alloc_flags. That logic and the checks below have the combined 2243 + * affect that: 2243 2244 * in_interrupt - any node ok (current task context irrelevant) 2244 2245 * GFP_ATOMIC - any node ok 2245 2246 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok 2246 2247 * GFP_USER - only nodes in current tasks mems allowed ok. 2248 + * 2249 + * Rule: 2250 + * Don't call cpuset_zone_allowed() if you can't sleep, unless you 2251 + * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables 2252 + * the code that might scan up ancestor cpusets and sleep. 2247 2253 **/ 2248 2254 2249 2255 int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) ··· 2261 2255 if (in_interrupt()) 2262 2256 return 1; 2263 2257 node = z->zone_pgdat->node_id; 2258 + might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); 2264 2259 if (node_isset(node, current->mems_allowed)) 2265 2260 return 1; 2266 2261 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
+1 -1
kernel/extable.c
··· 40 40 return e; 41 41 } 42 42 43 - static int core_kernel_text(unsigned long addr) 43 + int core_kernel_text(unsigned long addr) 44 44 { 45 45 if (addr >= (unsigned long)_stext && 46 46 addr <= (unsigned long)_etext)
+6 -6
kernel/module.c
··· 705 705 706 706 void symbol_put_addr(void *addr) 707 707 { 708 - unsigned long flags; 708 + struct module *modaddr; 709 709 710 - spin_lock_irqsave(&modlist_lock, flags); 711 - if (!kernel_text_address((unsigned long)addr)) 710 + if (core_kernel_text((unsigned long)addr)) 711 + return; 712 + 713 + if (!(modaddr = module_text_address((unsigned long)addr))) 712 714 BUG(); 713 - 714 - module_put(module_text_address((unsigned long)addr)); 715 - spin_unlock_irqrestore(&modlist_lock, flags); 715 + module_put(modaddr); 716 716 } 717 717 EXPORT_SYMBOL_GPL(symbol_put_addr); 718 718
+19
kernel/rcupdate.c
··· 479 479 return 0; 480 480 } 481 481 482 + /* 483 + * Check to see if there is any immediate RCU-related work to be done 484 + * by the current CPU, returning 1 if so. This function is part of the 485 + * RCU implementation; it is -not- an exported member of the RCU API. 486 + */ 482 487 int rcu_pending(int cpu) 483 488 { 484 489 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || 485 490 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); 491 + } 492 + 493 + /* 494 + * Check to see if any future RCU-related work will need to be done 495 + * by the current CPU, even if none need be done immediately, returning 496 + * 1 if so. This function is part of the RCU implementation; it is -not- 497 + * an exported member of the RCU API. 498 + */ 499 + int rcu_needs_cpu(int cpu) 500 + { 501 + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 502 + struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); 503 + 504 + return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); 486 505 } 487 506 488 507 void rcu_check_callbacks(int cpu, int user)
+18 -44
kernel/sched.c
··· 665 665 } 666 666 667 667 /* 668 - * We place interactive tasks back into the active array, if possible. 669 - * 670 - * To guarantee that this does not starve expired tasks we ignore the 671 - * interactivity of a task if the first expired task had to wait more 672 - * than a 'reasonable' amount of time. This deadline timeout is 673 - * load-dependent, as the frequency of array switched decreases with 674 - * increasing number of running tasks. We also ignore the interactivity 675 - * if a better static_prio task has expired, and switch periodically 676 - * regardless, to ensure that highly interactive tasks do not starve 677 - * the less fortunate for unreasonably long periods. 678 - */ 679 - static inline int expired_starving(runqueue_t *rq) 680 - { 681 - int limit; 682 - 683 - /* 684 - * Arrays were recently switched, all is well 685 - */ 686 - if (!rq->expired_timestamp) 687 - return 0; 688 - 689 - limit = STARVATION_LIMIT * rq->nr_running; 690 - 691 - /* 692 - * It's time to switch arrays 693 - */ 694 - if (jiffies - rq->expired_timestamp >= limit) 695 - return 1; 696 - 697 - /* 698 - * There's a better selection in the expired array 699 - */ 700 - if (rq->curr->static_prio > rq->best_expired_prio) 701 - return 1; 702 - 703 - /* 704 - * All is well 705 - */ 706 - return 0; 707 - } 708 - 709 - /* 710 668 * __activate_task - move a task to the runqueue. 711 669 */ 712 670 static void __activate_task(task_t *p, runqueue_t *rq) 713 671 { 714 672 prio_array_t *target = rq->active; 715 673 716 - if (unlikely(batch_task(p) || (expired_starving(rq) && !rt_task(p)))) 674 + if (batch_task(p)) 717 675 target = rq->expired; 718 676 enqueue_task(p, target); 719 677 rq->nr_running++; ··· 2490 2532 } 2491 2533 2492 2534 /* 2535 + * We place interactive tasks back into the active array, if possible. 2536 + * 2537 + * To guarantee that this does not starve expired tasks we ignore the 2538 + * interactivity of a task if the first expired task had to wait more 2539 + * than a 'reasonable' amount of time. This deadline timeout is 2540 + * load-dependent, as the frequency of array switched decreases with 2541 + * increasing number of running tasks. We also ignore the interactivity 2542 + * if a better static_prio task has expired: 2543 + */ 2544 + #define EXPIRED_STARVING(rq) \ 2545 + ((STARVATION_LIMIT && ((rq)->expired_timestamp && \ 2546 + (jiffies - (rq)->expired_timestamp >= \ 2547 + STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \ 2548 + ((rq)->curr->static_prio > (rq)->best_expired_prio)) 2549 + 2550 + /* 2493 2551 * Account user cpu time to a process. 2494 2552 * @p: the process that the cpu time gets accounted to 2495 2553 * @hardirq_offset: the offset to subtract from hardirq_count() ··· 2640 2666 2641 2667 if (!rq->expired_timestamp) 2642 2668 rq->expired_timestamp = jiffies; 2643 - if (!TASK_INTERACTIVE(p) || expired_starving(rq)) { 2669 + if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { 2644 2670 enqueue_task(p, rq->expired); 2645 2671 if (p->static_prio < rq->best_expired_prio) 2646 2672 rq->best_expired_prio = p->static_prio;
+16
kernel/timer.c
··· 541 541 } 542 542 spin_unlock(&base->lock); 543 543 544 + /* 545 + * It can happen that other CPUs service timer IRQs and increment 546 + * jiffies, but we have not yet got a local timer tick to process 547 + * the timer wheels. In that case, the expiry time can be before 548 + * jiffies, but since the high-resolution timer here is relative to 549 + * jiffies, the default expression when high-resolution timers are 550 + * not active, 551 + * 552 + * time_before(MAX_JIFFY_OFFSET + jiffies, expires) 553 + * 554 + * would falsely evaluate to true. If that is the case, just 555 + * return jiffies so that we can immediately fire the local timer 556 + */ 557 + if (time_before(expires, jiffies)) 558 + return jiffies; 559 + 544 560 if (time_before(hr_expires, expires)) 545 561 return hr_expires; 546 562
+1 -1
lib/Kconfig.debug
··· 189 189 config UNWIND_INFO 190 190 bool "Compile the kernel with frame unwind information" 191 191 depends on !IA64 192 - depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || SPARC64 || V850) 192 + depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || V850) 193 193 help 194 194 If you say Y here the resulting kernel image will be slightly larger 195 195 but not slower, and it will give very useful debugging information.
+3 -3
lib/kobject.c
··· 198 198 199 199 /* be noisy on error issues */ 200 200 if (error == -EEXIST) 201 - printk("kobject_add failed for %s with -EEXIST, " 201 + pr_debug("kobject_add failed for %s with -EEXIST, " 202 202 "don't try to register things with the " 203 203 "same name in the same directory.\n", 204 204 kobject_name(kobj)); 205 205 else 206 - printk("kobject_add failed for %s (%d)\n", 206 + pr_debug("kobject_add failed for %s (%d)\n", 207 207 kobject_name(kobj), error); 208 - dump_stack(); 208 + /* dump_stack(); */ 209 209 } 210 210 211 211 return error;
+21 -9
mm/page_alloc.c
··· 39 39 #include <linux/mempolicy.h> 40 40 41 41 #include <asm/tlbflush.h> 42 + #include <asm/div64.h> 42 43 #include "internal.h" 43 44 44 45 /* ··· 951 950 goto got_pg; 952 951 953 952 do { 954 - if (cpuset_zone_allowed(*z, gfp_mask)) 953 + if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL)) 955 954 wakeup_kswapd(*z, order); 956 955 } while (*(++z)); 957 956 ··· 970 969 alloc_flags |= ALLOC_HARDER; 971 970 if (gfp_mask & __GFP_HIGH) 972 971 alloc_flags |= ALLOC_HIGH; 973 - alloc_flags |= ALLOC_CPUSET; 972 + if (wait) 973 + alloc_flags |= ALLOC_CPUSET; 974 974 975 975 /* 976 976 * Go through the zonelist again. Let __GFP_HIGH and allocations ··· 2125 2123 #ifdef CONFIG_FLAT_NODE_MEM_MAP 2126 2124 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2127 2125 if (!pgdat->node_mem_map) { 2128 - unsigned long size; 2126 + unsigned long size, start, end; 2129 2127 struct page *map; 2130 2128 2131 - size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); 2129 + /* 2130 + * The zone's endpoints aren't required to be MAX_ORDER 2131 + * aligned but the node_mem_map endpoints must be in order 2132 + * for the buddy allocator to function correctly. 2133 + */ 2134 + start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 2135 + end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 2136 + end = ALIGN(end, MAX_ORDER_NR_PAGES); 2137 + size = (end - start) * sizeof(struct page); 2132 2138 map = alloc_remap(pgdat->node_id, size); 2133 2139 if (!map) 2134 2140 map = alloc_bootmem_node(pgdat, size); 2135 - pgdat->node_mem_map = map; 2141 + pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 2136 2142 } 2137 2143 #ifdef CONFIG_FLATMEM 2138 2144 /* ··· 2576 2566 } 2577 2567 2578 2568 for_each_zone(zone) { 2579 - unsigned long tmp; 2569 + u64 tmp; 2570 + 2580 2571 spin_lock_irqsave(&zone->lru_lock, flags); 2581 - tmp = (pages_min * zone->present_pages) / lowmem_pages; 2572 + tmp = (u64)pages_min * zone->present_pages; 2573 + do_div(tmp, lowmem_pages); 2582 2574 if (is_highmem(zone)) { 2583 2575 /* 2584 2576 * __GFP_HIGH and PF_MEMALLOC allocations usually don't ··· 2607 2595 zone->pages_min = tmp; 2608 2596 } 2609 2597 2610 - zone->pages_low = zone->pages_min + tmp / 4; 2611 - zone->pages_high = zone->pages_min + tmp / 2; 2598 + zone->pages_low = zone->pages_min + (tmp >> 2); 2599 + zone->pages_high = zone->pages_min + (tmp >> 1); 2612 2600 spin_unlock_irqrestore(&zone->lru_lock, flags); 2613 2601 } 2614 2602
+15 -4
mm/slab.c
··· 700 700 FULL 701 701 } g_cpucache_up; 702 702 703 + /* 704 + * used by boot code to determine if it can use slab based allocator 705 + */ 706 + int slab_is_available(void) 707 + { 708 + return g_cpucache_up == FULL; 709 + } 710 + 703 711 static DEFINE_PER_CPU(struct work_struct, reap_work); 704 712 705 713 static void free_block(struct kmem_cache *cachep, void **objpp, int len, ··· 2200 2192 check_irq_on(); 2201 2193 for_each_online_node(node) { 2202 2194 l3 = cachep->nodelists[node]; 2203 - if (l3) { 2195 + if (l3 && l3->alien) 2196 + drain_alien_cache(cachep, l3->alien); 2197 + } 2198 + 2199 + for_each_online_node(node) { 2200 + l3 = cachep->nodelists[node]; 2201 + if (l3) 2204 2202 drain_array(cachep, l3, l3->shared, 1, node); 2205 - if (l3->alien) 2206 - drain_alien_cache(cachep, l3->alien); 2207 - } 2208 2203 } 2209 2204 } 2210 2205
+3 -6
mm/sparse.c
··· 32 32 unsigned long array_size = SECTIONS_PER_ROOT * 33 33 sizeof(struct mem_section); 34 34 35 - if (system_state == SYSTEM_RUNNING) 35 + if (slab_is_available()) 36 36 section = kmalloc_node(array_size, GFP_KERNEL, nid); 37 37 else 38 38 section = alloc_bootmem_node(NODE_DATA(nid), array_size); ··· 87 87 unsigned long root_nr; 88 88 struct mem_section* root; 89 89 90 - for (root_nr = 0; 91 - root_nr < NR_MEM_SECTIONS; 92 - root_nr += SECTIONS_PER_ROOT) { 93 - root = __nr_to_section(root_nr); 94 - 90 + for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { 91 + root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); 95 92 if (!root) 96 93 continue; 97 94
-1
net/802/tr.c
··· 643 643 644 644 module_init(rif_init); 645 645 646 - EXPORT_SYMBOL(tr_source_route); 647 646 EXPORT_SYMBOL(tr_type_trans); 648 647 EXPORT_SYMBOL(alloc_trdev);
+1 -1
net/atm/clip.c
··· 963 963 static int __init atm_clip_init(void) 964 964 { 965 965 struct proc_dir_entry *p; 966 - neigh_table_init(&clip_tbl); 966 + neigh_table_init_no_netlink(&clip_tbl); 967 967 968 968 clip_tbl_hook = &clip_tbl; 969 969 register_atm_ioctl(&clip_ioctl_ops);
+2 -1
net/bridge/br.c
··· 55 55 56 56 static void __exit br_deinit(void) 57 57 { 58 - llc_sap_close(br_stp_sap); 58 + rcu_assign_pointer(br_stp_sap->rcv_func, NULL); 59 59 60 60 #ifdef CONFIG_BRIDGE_NETFILTER 61 61 br_netfilter_fini(); ··· 67 67 68 68 synchronize_net(); 69 69 70 + llc_sap_put(br_stp_sap); 70 71 br_fdb_get_hook = NULL; 71 72 br_fdb_put_hook = NULL; 72 73
+1 -1
net/bridge/netfilter/ebt_log.c
··· 168 168 169 169 if (info->bitmask & EBT_LOG_NFLOG) 170 170 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, 171 - info->prefix); 171 + "%s", info->prefix); 172 172 else 173 173 ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, 174 174 info->prefix);
+19 -2
net/core/neighbour.c
··· 1326 1326 kfree(parms); 1327 1327 } 1328 1328 1329 - 1330 - void neigh_table_init(struct neigh_table *tbl) 1329 + void neigh_table_init_no_netlink(struct neigh_table *tbl) 1331 1330 { 1332 1331 unsigned long now = jiffies; 1333 1332 unsigned long phsize; ··· 1382 1383 1383 1384 tbl->last_flush = now; 1384 1385 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1386 + } 1387 + 1388 + void neigh_table_init(struct neigh_table *tbl) 1389 + { 1390 + struct neigh_table *tmp; 1391 + 1392 + neigh_table_init_no_netlink(tbl); 1385 1393 write_lock(&neigh_tbl_lock); 1394 + for (tmp = neigh_tables; tmp; tmp = tmp->next) { 1395 + if (tmp->family == tbl->family) 1396 + break; 1397 + } 1386 1398 tbl->next = neigh_tables; 1387 1399 neigh_tables = tbl; 1388 1400 write_unlock(&neigh_tbl_lock); 1401 + 1402 + if (unlikely(tmp)) { 1403 + printk(KERN_ERR "NEIGH: Registering multiple tables for " 1404 + "family %d\n", tbl->family); 1405 + dump_stack(); 1406 + } 1389 1407 } 1390 1408 1391 1409 int neigh_table_clear(struct neigh_table *tbl) ··· 2673 2657 EXPORT_SYMBOL(neigh_resolve_output); 2674 2658 EXPORT_SYMBOL(neigh_table_clear); 2675 2659 EXPORT_SYMBOL(neigh_table_init); 2660 + EXPORT_SYMBOL(neigh_table_init_no_netlink); 2676 2661 EXPORT_SYMBOL(neigh_update); 2677 2662 EXPORT_SYMBOL(neigh_update_hhs); 2678 2663 EXPORT_SYMBOL(pneigh_enqueue);
+1 -1
net/ipv4/ipcomp.c
··· 210 210 skb->h.icmph->code != ICMP_FRAG_NEEDED) 211 211 return; 212 212 213 - spi = ntohl(ntohs(ipch->cpi)); 213 + spi = htonl(ntohs(ipch->cpi)); 214 214 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, 215 215 spi, IPPROTO_COMP, AF_INET); 216 216 if (!x)
+1 -1
net/ipv4/netfilter/arp_tables.c
··· 948 948 949 949 write_lock_bh(&t->lock); 950 950 private = t->private; 951 - if (private->number != paddc->num_counters) { 951 + if (private->number != tmp.num_counters) { 952 952 ret = -EINVAL; 953 953 goto unlock_up_free; 954 954 }
+20 -16
net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
··· 528 528 529 529 /* Decode */ 530 530 if ((err = (Decoders[son->type]) (bs, son, base, 531 - level + 1)) > 532 - H323_ERROR_STOP) 531 + level + 1)) < 532 + H323_ERROR_NONE) 533 533 return err; 534 534 535 535 bs->cur = beg + len; 536 536 bs->bit = 0; 537 537 } else if ((err = (Decoders[son->type]) (bs, son, base, 538 - level + 1))) 538 + level + 1)) < 539 + H323_ERROR_NONE) 539 540 return err; 540 541 } 541 542 ··· 555 554 556 555 /* Decode the extension components */ 557 556 for (opt = 0; opt < bmp2_len; opt++, i++, son++) { 558 - if (son->attr & STOP) { 557 + if (i < f->ub && son->attr & STOP) { 559 558 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 560 559 son->name); 561 560 return H323_ERROR_STOP; ··· 585 584 beg = bs->cur; 586 585 587 586 if ((err = (Decoders[son->type]) (bs, son, base, 588 - level + 1)) > 589 - H323_ERROR_STOP) 587 + level + 1)) < 588 + H323_ERROR_NONE) 590 589 return err; 591 590 592 591 bs->cur = beg + len; ··· 661 660 i < 662 661 effective_count ? 663 662 base : NULL, 664 - level + 1)) > 665 - H323_ERROR_STOP) 663 + level + 1)) < 664 + H323_ERROR_NONE) 666 665 return err; 667 666 668 667 bs->cur = beg + len; 669 668 bs->bit = 0; 670 669 } else 671 - if ((err = (Decoders[son->type]) (bs, son, 672 - i < effective_count ? 673 - base : NULL, 674 - level + 1))) 675 - return err; 670 + if ((err = (Decoders[son->type]) (bs, son, 671 + i < 672 + effective_count ? 673 + base : NULL, 674 + level + 1)) < 675 + H323_ERROR_NONE) 676 + return err; 676 677 677 678 if (base) 678 679 base += son->offset; ··· 738 735 } 739 736 beg = bs->cur; 740 737 741 - if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) > 742 - H323_ERROR_STOP) 738 + if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) < 739 + H323_ERROR_NONE) 743 740 return err; 744 741 745 742 bs->cur = beg + len; 746 743 bs->bit = 0; 747 - } else if ((err = (Decoders[son->type]) (bs, son, base, level + 1))) 744 + } else if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) < 745 + H323_ERROR_NONE) 748 746 return err; 749 747 750 748 return H323_ERROR_NONE;
+6 -6
net/ipv4/netfilter/ip_nat_proto_gre.c
··· 49 49 const union ip_conntrack_manip_proto *min, 50 50 const union ip_conntrack_manip_proto *max) 51 51 { 52 - u_int32_t key; 52 + __be16 key; 53 53 54 54 if (maniptype == IP_NAT_MANIP_SRC) 55 55 key = tuple->src.u.gre.key; 56 56 else 57 57 key = tuple->dst.u.gre.key; 58 58 59 - return ntohl(key) >= ntohl(min->gre.key) 60 - && ntohl(key) <= ntohl(max->gre.key); 59 + return ntohs(key) >= ntohs(min->gre.key) 60 + && ntohs(key) <= ntohs(max->gre.key); 61 61 } 62 62 63 63 /* generate unique tuple ... */ ··· 81 81 min = 1; 82 82 range_size = 0xffff; 83 83 } else { 84 - min = ntohl(range->min.gre.key); 85 - range_size = ntohl(range->max.gre.key) - min + 1; 84 + min = ntohs(range->min.gre.key); 85 + range_size = ntohs(range->max.gre.key) - min + 1; 86 86 } 87 87 88 88 DEBUGP("min = %u, range_size = %u\n", min, range_size); 89 89 90 90 for (i = 0; i < range_size; i++, key++) { 91 - *keyptr = htonl(min + key % range_size); 91 + *keyptr = htons(min + key % range_size); 92 92 if (!ip_nat_used_tuple(tuple, conntrack)) 93 93 return 1; 94 94 }
+8 -8
net/ipv4/netfilter/ip_nat_snmp_basic.c
··· 768 768 len *= sizeof(unsigned long); 769 769 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); 770 770 if (*obj == NULL) { 771 + kfree(lp); 771 772 kfree(id); 772 773 if (net_ratelimit()) 773 774 printk("OOM in bsalg (%d)\n", __LINE__); ··· 1004 1003 1005 1004 return 1; 1006 1005 1006 + err_addr_free: 1007 + kfree((unsigned long *)trap->ip_address); 1008 + 1007 1009 err_id_free: 1008 1010 kfree(trap->id); 1009 1011 1010 - err_addr_free: 1011 - kfree((unsigned long *)trap->ip_address); 1012 - 1013 1012 return 0; 1014 1013 } 1015 1014 ··· 1127 1126 struct snmp_v1_trap trap; 1128 1127 unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); 1129 1128 1130 - /* Discard trap allocations regardless */ 1131 - kfree(trap.id); 1132 - kfree((unsigned long *)trap.ip_address); 1133 - 1134 - if (!ret) 1129 + if (ret) { 1130 + kfree(trap.id); 1131 + kfree((unsigned long *)trap.ip_address); 1132 + } else 1135 1133 return ret; 1136 1134 1137 1135 } else {
+1 -1
net/ipv4/netfilter/ipt_LOG.c
··· 428 428 429 429 if (loginfo->logflags & IPT_LOG_NFLOG) 430 430 nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 431 - loginfo->prefix); 431 + "%s", loginfo->prefix); 432 432 else 433 433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 434 434 loginfo->prefix);
+1 -1
net/ipv4/netfilter/ipt_recent.c
··· 821 821 /* Create our proc 'status' entry. */ 822 822 curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent); 823 823 if (!curr_table->status_proc) { 824 + vfree(hold); 824 825 printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n"); 825 826 /* Destroy the created table */ 826 827 spin_lock_bh(&recent_lock); ··· 846 845 spin_unlock_bh(&recent_lock); 847 846 vfree(curr_table->time_info); 848 847 vfree(curr_table->hash_table); 849 - vfree(hold); 850 848 vfree(curr_table->table); 851 849 vfree(curr_table); 852 850 return 0;
+2
net/ipv4/tcp_input.c
··· 1662 1662 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { 1663 1663 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1664 1664 tp->lost_out += tcp_skb_pcount(skb); 1665 + if (IsReno(tp)) 1666 + tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1); 1665 1667 1666 1668 /* clear xmit_retrans hint */ 1667 1669 if (tp->retransmit_skb_hint &&
+1 -1
net/ipv4/xfrm4_policy.c
··· 221 221 if (pskb_may_pull(skb, xprth + 4 - skb->data)) { 222 222 u16 *ipcomp_hdr = (u16 *)xprth; 223 223 224 - fl->fl_ipsec_spi = ntohl(ntohs(ipcomp_hdr[1])); 224 + fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); 225 225 } 226 226 break; 227 227 default:
+1 -1
net/ipv6/ipcomp6.c
··· 208 208 if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG) 209 209 return; 210 210 211 - spi = ntohl(ntohs(ipcomph->cpi)); 211 + spi = htonl(ntohs(ipcomph->cpi)); 212 212 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 213 213 if (!x) 214 214 return;
+1 -1
net/ipv6/netfilter/ip6_tables.c
··· 1103 1103 1104 1104 write_lock_bh(&t->lock); 1105 1105 private = t->private; 1106 - if (private->number != paddc->num_counters) { 1106 + if (private->number != tmp.num_counters) { 1107 1107 ret = -EINVAL; 1108 1108 goto unlock_up_free; 1109 1109 }
+1 -1
net/ipv6/netfilter/ip6t_LOG.c
··· 439 439 440 440 if (loginfo->logflags & IP6T_LOG_NFLOG) 441 441 nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 442 - loginfo->prefix); 442 + "%s", loginfo->prefix); 443 443 else 444 444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 445 445 loginfo->prefix);
+1 -1
net/ipv6/netfilter/ip6t_eui64.c
··· 40 40 41 41 memset(eui64, 0, sizeof(eui64)); 42 42 43 - if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { 43 + if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) { 44 44 if (skb->nh.ipv6h->version == 0x6) { 45 45 memcpy(eui64, eth_hdr(skb)->h_source, 3); 46 46 memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
+2 -2
net/ipx/af_ipx.c
··· 944 944 return rc; 945 945 } 946 946 947 - static int ipx_map_frame_type(unsigned char type) 947 + static __be16 ipx_map_frame_type(unsigned char type) 948 948 { 949 - int rc = 0; 949 + __be16 rc = 0; 950 950 951 951 switch (type) { 952 952 case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break;
+1 -1
net/ipx/ipx_route.c
··· 119 119 return rc; 120 120 } 121 121 122 - static int ipxrtr_delete(long net) 122 + static int ipxrtr_delete(__u32 net) 123 123 { 124 124 struct ipx_route *r, *tmp; 125 125 int rc;
+2 -1
net/irda/iriap.c
··· 544 544 { 545 545 struct sk_buff *tx_skb; 546 546 int n; 547 - __u32 tmp_be32, tmp_be16; 547 + __u32 tmp_be32; 548 + __be16 tmp_be16; 548 549 __u8 *fp; 549 550 550 551 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+4 -2
net/sched/sch_generic.c
··· 193 193 netif_running(dev) && 194 194 netif_carrier_ok(dev)) { 195 195 if (netif_queue_stopped(dev) && 196 - (jiffies - dev->trans_start) > dev->watchdog_timeo) { 197 - printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); 196 + time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { 197 + 198 + printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", 199 + dev->name); 198 200 dev->tx_timeout(dev); 199 201 } 200 202 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
+78 -60
net/sctp/input.c
··· 73 73 const union sctp_addr *peer, 74 74 struct sctp_transport **pt); 75 75 76 + static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); 77 + 76 78 77 79 /* Calculate the SCTP checksum of an SCTP packet. */ 78 80 static inline int sctp_rcv_checksum(struct sk_buff *skb) ··· 188 186 */ 189 187 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) 190 188 { 191 - sock_put(sk); 192 189 if (asoc) { 193 190 sctp_association_put(asoc); 194 191 asoc = NULL; ··· 198 197 sk = sctp_get_ctl_sock(); 199 198 ep = sctp_sk(sk)->ep; 200 199 sctp_endpoint_hold(ep); 201 - sock_hold(sk); 202 200 rcvr = &ep->base; 203 201 } 204 202 ··· 253 253 */ 254 254 sctp_bh_lock_sock(sk); 255 255 256 - /* It is possible that the association could have moved to a different 257 - * socket if it is peeled off. If so, update the sk. 258 - */ 259 - if (sk != rcvr->sk) { 260 - sctp_bh_lock_sock(rcvr->sk); 261 - sctp_bh_unlock_sock(sk); 262 - sk = rcvr->sk; 263 - } 264 - 265 256 if (sock_owned_by_user(sk)) 266 - sk_add_backlog(sk, skb); 257 + sctp_add_backlog(sk, skb); 267 258 else 268 - sctp_backlog_rcv(sk, skb); 259 + sctp_inq_push(&chunk->rcvr->inqueue, chunk); 269 260 270 - /* Release the sock and the sock ref we took in the lookup calls. 271 - * The asoc/ep ref will be released in sctp_backlog_rcv. 272 - */ 273 261 sctp_bh_unlock_sock(sk); 274 - sock_put(sk); 262 + 263 + /* Release the asoc/ep ref we took in the lookup calls. */ 264 + if (asoc) 265 + sctp_association_put(asoc); 266 + else 267 + sctp_endpoint_put(ep); 275 268 276 269 return 0; 277 270 ··· 273 280 return 0; 274 281 275 282 discard_release: 276 - /* Release any structures we may be holding. */ 277 - sock_put(sk); 283 + /* Release the asoc/ep ref we took in the lookup calls. */ 278 284 if (asoc) 279 285 sctp_association_put(asoc); 280 286 else ··· 282 290 goto discard_it; 283 291 } 284 292 285 - /* Handle second half of inbound skb processing. If the sock was busy, 286 - * we may have need to delay processing until later when the sock is 287 - * released (on the backlog). If not busy, we call this routine 288 - * directly from the bottom half. 293 + /* Process the backlog queue of the socket. Every skb on 294 + * the backlog holds a ref on an association or endpoint. 295 + * We hold this ref throughout the state machine to make 296 + * sure that the structure we need is still around. 289 297 */ 290 298 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) 291 299 { 292 300 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 293 - struct sctp_inq *inqueue = NULL; 301 + struct sctp_inq *inqueue = &chunk->rcvr->inqueue; 294 302 struct sctp_ep_common *rcvr = NULL; 303 + int backloged = 0; 295 304 296 305 rcvr = chunk->rcvr; 297 306 298 - BUG_TRAP(rcvr->sk == sk); 307 + /* If the rcvr is dead then the association or endpoint 308 + * has been deleted and we can safely drop the chunk 309 + * and refs that we are holding. 310 + */ 311 + if (rcvr->dead) { 312 + sctp_chunk_free(chunk); 313 + goto done; 314 + } 299 315 300 - if (rcvr->dead) { 301 - sctp_chunk_free(chunk); 302 - } else { 303 - inqueue = &chunk->rcvr->inqueue; 304 - sctp_inq_push(inqueue, chunk); 305 - } 316 + if (unlikely(rcvr->sk != sk)) { 317 + /* In this case, the association moved from one socket to 318 + * another. We are currently sitting on the backlog of the 319 + * old socket, so we need to move. 320 + * However, since we are here in the process context we 321 + * need to take make sure that the user doesn't own 322 + * the new socket when we process the packet. 323 + * If the new socket is user-owned, queue the chunk to the 324 + * backlog of the new socket without dropping any refs. 325 + * Otherwise, we can safely push the chunk on the inqueue. 326 + */ 306 327 307 - /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ 308 - if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 309 - sctp_association_put(sctp_assoc(rcvr)); 310 - else 311 - sctp_endpoint_put(sctp_ep(rcvr)); 312 - 328 + sk = rcvr->sk; 329 + sctp_bh_lock_sock(sk); 330 + 331 + if (sock_owned_by_user(sk)) { 332 + sk_add_backlog(sk, skb); 333 + backloged = 1; 334 + } else 335 + sctp_inq_push(inqueue, chunk); 336 + 337 + sctp_bh_unlock_sock(sk); 338 + 339 + /* If the chunk was backloged again, don't drop refs */ 340 + if (backloged) 341 + return 0; 342 + } else { 343 + sctp_inq_push(inqueue, chunk); 344 + } 345 + 346 + done: 347 + /* Release the refs we took in sctp_add_backlog */ 348 + if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 349 + sctp_association_put(sctp_assoc(rcvr)); 350 + else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 351 + sctp_endpoint_put(sctp_ep(rcvr)); 352 + else 353 + BUG(); 354 + 313 355 return 0; 314 356 } 315 357 316 - void sctp_backlog_migrate(struct sctp_association *assoc, 317 - struct sock *oldsk, struct sock *newsk) 358 + static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 318 359 { 319 - struct sk_buff *skb; 320 - struct sctp_chunk *chunk; 360 + struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 361 + struct sctp_ep_common *rcvr = chunk->rcvr; 321 362 322 - skb = oldsk->sk_backlog.head; 323 - oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; 324 - while (skb != NULL) { 325 - struct sk_buff *next = skb->next; 363 + /* Hold the assoc/ep while hanging on the backlog queue. 364 + * This way, we know structures we need will not disappear from us 365 + */ 366 + if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 367 + sctp_association_hold(sctp_assoc(rcvr)); 368 + else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 369 + sctp_endpoint_hold(sctp_ep(rcvr)); 370 + else 371 + BUG(); 326 372 327 - chunk = SCTP_INPUT_CB(skb)->chunk; 328 - skb->next = NULL; 329 - if (&assoc->base == chunk->rcvr) 330 - sk_add_backlog(newsk, skb); 331 - else 332 - sk_add_backlog(oldsk, skb); 333 - skb = next; 334 - } 373 + sk_add_backlog(sk, skb); 335 374 } 336 375 337 376 /* Handle icmp frag needed error. */ ··· 435 412 union sctp_addr daddr; 436 413 struct sctp_af *af; 437 414 struct sock *sk = NULL; 438 - struct sctp_association *asoc = NULL; 415 + struct sctp_association *asoc; 439 416 struct sctp_transport *transport = NULL; 440 417 441 418 *app = NULL; *tpp = NULL; ··· 476 453 return sk; 477 454 478 455 out: 479 - sock_put(sk); 480 456 if (asoc) 481 457 sctp_association_put(asoc); 482 458 return NULL; ··· 485 463 void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) 486 464 { 487 465 sctp_bh_unlock_sock(sk); 488 - sock_put(sk); 489 466 if (asoc) 490 467 sctp_association_put(asoc); 491 468 } ··· 511 490 int type = skb->h.icmph->type; 512 491 int code = skb->h.icmph->code; 513 492 struct sock *sk; 514 - struct sctp_association *asoc; 493 + struct sctp_association *asoc = NULL; 515 494 struct sctp_transport *transport; 516 495 struct inet_sock *inet; 517 496 char *saveip, *savesctp; ··· 737 716 738 717 hit: 739 718 sctp_endpoint_hold(ep); 740 - sock_hold(epb->sk); 741 719 read_unlock(&head->lock); 742 720 return ep; 743 721 } ··· 838 818 hit: 839 819 *pt = transport; 840 820 sctp_association_hold(asoc); 841 - sock_hold(epb->sk); 842 821 read_unlock(&head->lock); 843 822 return asoc; 844 823 } ··· 865 846 struct sctp_transport *transport; 866 847 867 848 if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { 868 - sock_put(asoc->base.sk); 869 849 sctp_association_put(asoc); 870 850 return 1; 871 851 }
+12 -4
net/sctp/sm_sideeffect.c
··· 498 498 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 499 499 SCTP_STATE(SCTP_STATE_CLOSED)); 500 500 501 - /* Set sk_err to ECONNRESET on a 1-1 style socket. */ 502 - if (!sctp_style(asoc->base.sk, UDP)) 503 - asoc->base.sk->sk_err = ECONNRESET; 504 - 505 501 /* SEND_FAILED sent later when cleaning up the association. */ 506 502 asoc->outqueue.error = error; 507 503 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); ··· 832 836 } 833 837 834 838 return; 839 + } 840 + 841 + /* Helper function to set sk_err on a 1-1 style socket. */ 842 + static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) 843 + { 844 + struct sock *sk = asoc->base.sk; 845 + 846 + if (!sctp_style(sk, UDP)) 847 + sk->sk_err = error; 835 848 } 836 849 837 850 /* These three macros allow us to pull the debugging code out of the ··· 1462 1457 error = sctp_outq_uncork(&asoc->outqueue); 1463 1458 local_cork = 0; 1464 1459 asoc->peer.retran_path = t; 1460 + break; 1461 + case SCTP_CMD_SET_SK_ERR: 1462 + sctp_cmd_set_sk_err(asoc, cmd->obj.error); 1465 1463 break; 1466 1464 default: 1467 1465 printk(KERN_WARNING "Impossible command: %u, %p\n",
+54 -27
net/sctp/sm_statefuns.c
··· 93 93 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 94 94 95 95 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 96 - __u16 error, 96 + __u16 error, int sk_err, 97 97 const struct sctp_association *asoc, 98 98 struct sctp_transport *transport); 99 99 ··· 448 448 __u32 init_tag; 449 449 struct sctp_chunk *err_chunk; 450 450 struct sctp_packet *packet; 451 - sctp_disposition_t ret; 451 + __u16 error; 452 452 453 453 if (!sctp_vtag_verify(chunk, asoc)) 454 454 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); ··· 480 480 goto nomem; 481 481 482 482 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 483 - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 484 - SCTP_STATE(SCTP_STATE_CLOSED)); 485 - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 486 - sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 487 - return SCTP_DISPOSITION_DELETE_TCB; 483 + return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM, 484 + ECONNREFUSED, asoc, 485 + chunk->transport); 488 486 } 489 487 490 488 /* Verify the INIT chunk before processing it. */ ··· 509 511 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 510 512 SCTP_PACKET(packet)); 511 513 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 512 - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 513 - SCTP_STATE(SCTP_STATE_CLOSED)); 514 - sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, 515 - SCTP_NULL()); 516 - return SCTP_DISPOSITION_CONSUME; 514 + error = SCTP_ERROR_INV_PARAM; 517 515 } else { 518 - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 519 - SCTP_STATE(SCTP_STATE_CLOSED)); 520 - sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, 521 - SCTP_NULL()); 522 - return SCTP_DISPOSITION_NOMEM; 516 + error = SCTP_ERROR_NO_RESOURCE; 523 517 } 524 518 } else { 525 - ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg, 526 - commands); 527 - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 528 - SCTP_STATE(SCTP_STATE_CLOSED)); 529 - sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, 530 - SCTP_NULL()); 531 - return ret; 519 + sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 520 + error = SCTP_ERROR_INV_PARAM; 532 521 } 522 + return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, 523 + asoc, chunk->transport); 533 524 } 534 525 535 526 /* Tag the variable length parameters. Note that we never ··· 873 886 struct sctp_transport *transport = (struct sctp_transport *) arg; 874 887 875 888 if (asoc->overall_error_count >= asoc->max_retrans) { 889 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 890 + SCTP_ERROR(ETIMEDOUT)); 876 891 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 877 892 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 878 893 SCTP_U32(SCTP_ERROR_NO_ERROR)); ··· 1019 1030 commands); 1020 1031 1021 1032 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 1033 + /* Make sure that the length of the parameter is what we expect */ 1034 + if (ntohs(hbinfo->param_hdr.length) != 1035 + sizeof(sctp_sender_hb_info_t)) { 1036 + return SCTP_DISPOSITION_DISCARD; 1037 + } 1038 + 1022 1039 from_addr = hbinfo->daddr; 1023 1040 link = sctp_assoc_lookup_paddr(asoc, &from_addr); 1024 1041 ··· 2121 2126 int attempts = asoc->init_err_counter + 1; 2122 2127 2123 2128 if (attempts > asoc->max_init_attempts) { 2129 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 2130 + SCTP_ERROR(ETIMEDOUT)); 2124 2131 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2125 2132 SCTP_U32(SCTP_ERROR_STALE_COOKIE)); 2126 2133 return SCTP_DISPOSITION_DELETE_TCB; ··· 2259 2262 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2260 2263 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2261 2264 2265 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); 2262 2266 /* ASSOC_FAILED will DELETE_TCB. */ 2263 2267 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); 2264 2268 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); ··· 2304 2306 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2305 2307 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2306 2308 2307 - return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport); 2309 + return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc, 2310 + chunk->transport); 2308 2311 } 2309 2312 2310 2313 /* ··· 2317 2318 void *arg, 2318 2319 sctp_cmd_seq_t *commands) 2319 2320 { 2320 - return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc, 2321 + return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, 2322 + ENOPROTOOPT, asoc, 2321 2323 (struct sctp_transport *)arg); 2322 2324 } 2323 2325 ··· 2343 2343 * This is common code called by several sctp_sf_*_abort() functions above. 2344 2344 */ 2345 2345 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 2346 - __u16 error, 2346 + __u16 error, int sk_err, 2347 2347 const struct sctp_association *asoc, 2348 2348 struct sctp_transport *transport) 2349 2349 { ··· 2353 2353 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 2354 2354 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2355 2355 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 2356 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); 2356 2357 /* CMD_INIT_FAILED will DELETE_TCB. */ 2357 2358 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2358 2359 SCTP_U32(error)); ··· 3337 3336 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3338 3337 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3339 3338 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 3339 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3340 + SCTP_ERROR(ECONNABORTED)); 3340 3341 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3341 3342 SCTP_U32(SCTP_ERROR_ASCONF_ACK)); 3342 3343 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); ··· 3365 3362 * processing the rest of the chunks in the packet. 3366 3363 */ 3367 3364 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 3365 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3366 + SCTP_ERROR(ECONNABORTED)); 3368 3367 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3369 3368 SCTP_U32(SCTP_ERROR_ASCONF_ACK)); 3370 3369 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); ··· 3719 3714 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { 3720 3715 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3721 3716 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 3717 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3718 + SCTP_ERROR(ECONNREFUSED)); 3722 3719 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 3723 3720 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); 3724 3721 } else { 3722 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3723 + SCTP_ERROR(ECONNABORTED)); 3725 3724 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3726 3725 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); 3727 3726 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); ··· 4043 4034 * TCB. This is a departure from our typical NOMEM handling. 4044 4035 */ 4045 4036 4037 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4038 + SCTP_ERROR(ECONNABORTED)); 4046 4039 /* Delete the established association. */ 4047 4040 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4048 4041 SCTP_U32(SCTP_ERROR_USER_ABORT)); ··· 4186 4175 * TCB. This is a departure from our typical NOMEM handling. 4187 4176 */ 4188 4177 4178 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4179 + SCTP_ERROR(ECONNREFUSED)); 4189 4180 /* Delete the established association. */ 4190 4181 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4191 4182 SCTP_U32(SCTP_ERROR_USER_ABORT)); ··· 4556 4543 struct sctp_transport *transport = arg; 4557 4544 4558 4545 if (asoc->overall_error_count >= asoc->max_retrans) { 4546 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4547 + SCTP_ERROR(ETIMEDOUT)); 4559 4548 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 4560 4549 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4561 4550 SCTP_U32(SCTP_ERROR_NO_ERROR)); ··· 4677 4662 SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d" 4678 4663 " max_init_attempts: %d\n", 4679 4664 attempts, asoc->max_init_attempts); 4665 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4666 + SCTP_ERROR(ETIMEDOUT)); 4680 4667 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4681 4668 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4682 4669 return SCTP_DISPOSITION_DELETE_TCB; ··· 4728 4711 4729 4712 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 4730 4713 } else { 4714 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4715 + SCTP_ERROR(ETIMEDOUT)); 4731 4716 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4732 4717 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4733 4718 return SCTP_DISPOSITION_DELETE_TCB; ··· 4761 4742 4762 4743 SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); 4763 4744 if (asoc->overall_error_count >= asoc->max_retrans) { 4745 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4746 + SCTP_ERROR(ETIMEDOUT)); 4764 4747 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 4765 4748 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4766 4749 SCTP_U32(SCTP_ERROR_NO_ERROR)); ··· 4838 4817 if (asoc->overall_error_count >= asoc->max_retrans) { 4839 4818 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 4840 4819 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 4820 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4821 + SCTP_ERROR(ETIMEDOUT)); 4841 4822 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4842 4823 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4843 4824 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); ··· 4893 4870 goto nomem; 4894 4871 4895 4872 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 4873 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4874 + SCTP_ERROR(ETIMEDOUT)); 4896 4875 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4897 4876 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4898 4877 ··· 5334 5309 * processing the rest of the chunks in the packet. 5335 5310 */ 5336 5311 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 5312 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5313 + SCTP_ERROR(ECONNABORTED)); 5337 5314 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5338 5315 SCTP_U32(SCTP_ERROR_NO_DATA)); 5339 5316 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+17 -12
net/sctp/socket.c
··· 1057 1057 inet_sk(sk)->dport = htons(asoc->peer.port); 1058 1058 af = sctp_get_af_specific(to.sa.sa_family); 1059 1059 af->to_sk_daddr(&to, sk); 1060 + sk->sk_err = 0; 1060 1061 1061 1062 timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); 1062 1063 err = sctp_wait_for_connect(asoc, &timeo); ··· 1229 1228 1230 1229 ep = sctp_sk(sk)->ep; 1231 1230 1232 - /* Walk all associations on a socket, not on an endpoint. */ 1231 + /* Walk all associations on an endpoint. */ 1233 1232 list_for_each_safe(pos, temp, &ep->asocs) { 1234 1233 asoc = list_entry(pos, struct sctp_association, asocs); 1235 1234 ··· 1242 1241 if (sctp_state(asoc, CLOSED)) { 1243 1242 sctp_unhash_established(asoc); 1244 1243 sctp_association_free(asoc); 1244 + continue; 1245 + } 1246 + } 1245 1247 1246 - } else if (sock_flag(sk, SOCK_LINGER) && 1247 - !sk->sk_lingertime) 1248 - sctp_primitive_ABORT(asoc, NULL); 1249 - else 1250 - sctp_primitive_SHUTDOWN(asoc, NULL); 1251 - } else 1248 + if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) 1249 + sctp_primitive_ABORT(asoc, NULL); 1250 + else 1252 1251 sctp_primitive_SHUTDOWN(asoc, NULL); 1253 1252 } 1254 1253 ··· 5318 5317 */ 5319 5318 sctp_release_sock(sk); 5320 5319 current_timeo = schedule_timeout(current_timeo); 5320 + BUG_ON(sk != asoc->base.sk); 5321 5321 sctp_lock_sock(sk); 5322 5322 5323 5323 *timeo_p = current_timeo; ··· 5606 5604 */ 5607 5605 newsp->type = type; 5608 5606 5609 - spin_lock_bh(&oldsk->sk_lock.slock); 5610 - /* Migrate the backlog from oldsk to newsk. */ 5611 - sctp_backlog_migrate(assoc, oldsk, newsk); 5612 - /* Migrate the association to the new socket. */ 5607 + /* Mark the new socket "in-use" by the user so that any packets 5608 + * that may arrive on the association after we've moved it are 5609 + * queued to the backlog. This prevents a potential race between 5610 + * backlog processing on the old socket and new-packet processing 5611 + * on the new socket. 5612 + */ 5613 + sctp_lock_sock(newsk); 5613 5614 sctp_assoc_migrate(assoc, newsk); 5614 - spin_unlock_bh(&oldsk->sk_lock.slock); 5615 5615 5616 5616 /* If the association on the newsk is already closed before accept() 5617 5617 * is called, set RCV_SHUTDOWN flag. ··· 5622 5618 newsk->sk_shutdown |= RCV_SHUTDOWN; 5623 5619 5624 5620 newsk->sk_state = SCTP_SS_ESTABLISHED; 5621 + sctp_release_sock(newsk); 5625 5622 } 5626 5623 5627 5624 /* This proto struct describes the ULP interface for SCTP. */
+1
net/sunrpc/cache.c
··· 159 159 detail->update(tmp, new); 160 160 tmp->next = *head; 161 161 *head = tmp; 162 + detail->entries++; 162 163 cache_get(tmp); 163 164 is_new = cache_fresh_locked(tmp, new->expiry_time); 164 165 cache_fresh_locked(old, 0);
+1 -1
net/xfrm/xfrm_input.c
··· 62 62 case IPPROTO_COMP: 63 63 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) 64 64 return -EINVAL; 65 - *spi = ntohl(ntohs(*(u16*)(skb->h.raw + 2))); 65 + *spi = htonl(ntohs(*(u16*)(skb->h.raw + 2))); 66 66 *seq = 0; 67 67 return 0; 68 68 default:
+70 -20
scripts/mod/modpost.c
··· 697 697 698 698 /* Walk through all sections */ 699 699 for (i = 0; i < hdr->e_shnum; i++) { 700 - Elf_Rela *rela; 701 - Elf_Rela *start = (void *)hdr + sechdrs[i].sh_offset; 702 - Elf_Rela *stop = (void*)start + sechdrs[i].sh_size; 703 - const char *name = secstrings + sechdrs[i].sh_name + 704 - strlen(".rela"); 700 + const char *name = secstrings + sechdrs[i].sh_name; 701 + const char *secname; 702 + Elf_Rela r; 703 + unsigned int r_sym; 705 704 /* We want to process only relocation sections and not .init */ 706 - if (section_ref_ok(name) || (sechdrs[i].sh_type != SHT_RELA)) 707 - continue; 708 - 709 - for (rela = start; rela < stop; rela++) { 710 - Elf_Rela r; 711 - const char *secname; 712 - r.r_offset = TO_NATIVE(rela->r_offset); 713 - r.r_info = TO_NATIVE(rela->r_info); 714 - r.r_addend = TO_NATIVE(rela->r_addend); 715 - sym = elf->symtab_start + ELF_R_SYM(r.r_info); 716 - /* Skip special sections */ 717 - if (sym->st_shndx >= SHN_LORESERVE) 705 + if (sechdrs[i].sh_type == SHT_RELA) { 706 + Elf_Rela *rela; 707 + Elf_Rela *start = (void *)hdr + sechdrs[i].sh_offset; 708 + Elf_Rela *stop = (void*)start + sechdrs[i].sh_size; 709 + name += strlen(".rela"); 710 + if (section_ref_ok(name)) 718 711 continue; 719 712 720 - secname = secstrings + sechdrs[sym->st_shndx].sh_name; 721 - if (section(secname)) 722 - warn_sec_mismatch(modname, name, elf, sym, r); 713 + for (rela = start; rela < stop; rela++) { 714 + r.r_offset = TO_NATIVE(rela->r_offset); 715 + #if KERNEL_ELFCLASS == ELFCLASS64 716 + if (hdr->e_machine == EM_MIPS) { 717 + r_sym = ELF64_MIPS_R_SYM(rela->r_info); 718 + r_sym = TO_NATIVE(r_sym); 719 + } else { 720 + r.r_info = TO_NATIVE(rela->r_info); 721 + r_sym = ELF_R_SYM(r.r_info); 722 + } 723 + #else 724 + r.r_info = TO_NATIVE(rela->r_info); 725 + r_sym = ELF_R_SYM(r.r_info); 726 + #endif 727 + r.r_addend = TO_NATIVE(rela->r_addend); 728 + sym = elf->symtab_start + r_sym; 729 + /* Skip special sections */ 730 + if (sym->st_shndx >= SHN_LORESERVE) 731 + continue; 732 + 733 + secname = secstrings + 734 + sechdrs[sym->st_shndx].sh_name; 735 + if (section(secname)) 736 + warn_sec_mismatch(modname, name, 737 + elf, sym, r); 738 + } 739 + } else if (sechdrs[i].sh_type == SHT_REL) { 740 + Elf_Rel *rel; 741 + Elf_Rel *start = (void *)hdr + sechdrs[i].sh_offset; 742 + Elf_Rel *stop = (void*)start + sechdrs[i].sh_size; 743 + name += strlen(".rel"); 744 + if (section_ref_ok(name)) 745 + continue; 746 + 747 + for (rel = start; rel < stop; rel++) { 748 + r.r_offset = TO_NATIVE(rel->r_offset); 749 + #if KERNEL_ELFCLASS == ELFCLASS64 750 + if (hdr->e_machine == EM_MIPS) { 751 + r_sym = ELF64_MIPS_R_SYM(rel->r_info); 752 + r_sym = TO_NATIVE(r_sym); 753 + } else { 754 + r.r_info = TO_NATIVE(rel->r_info); 755 + r_sym = ELF_R_SYM(r.r_info); 756 + } 757 + #else 758 + r.r_info = TO_NATIVE(rel->r_info); 759 + r_sym = ELF_R_SYM(r.r_info); 760 + #endif 761 + r.r_addend = 0; 762 + sym = elf->symtab_start + r_sym; 763 + /* Skip special sections */ 764 + if (sym->st_shndx >= SHN_LORESERVE) 765 + continue; 766 + 767 + secname = secstrings + 768 + sechdrs[sym->st_shndx].sh_name; 769 + if (section(secname)) 770 + warn_sec_mismatch(modname, name, 771 + elf, sym, r); 772 + } 723 773 } 724 774 } 725 775 }
+21 -2
scripts/mod/modpost.h
··· 21 21 #define ELF_ST_BIND ELF32_ST_BIND 22 22 #define ELF_ST_TYPE ELF32_ST_TYPE 23 23 24 + #define Elf_Rel Elf32_Rel 24 25 #define Elf_Rela Elf32_Rela 25 26 #define ELF_R_SYM ELF32_R_SYM 26 27 #define ELF_R_TYPE ELF32_R_TYPE ··· 35 34 #define ELF_ST_BIND ELF64_ST_BIND 36 35 #define ELF_ST_TYPE ELF64_ST_TYPE 37 36 37 + #define Elf_Rel Elf64_Rel 38 38 #define Elf_Rela Elf64_Rela 39 39 #define ELF_R_SYM ELF64_R_SYM 40 40 #define ELF_R_TYPE ELF64_R_TYPE 41 41 #endif 42 + 43 + /* The 64-bit MIPS ELF ABI uses an unusual reloc format. */ 44 + typedef struct 45 + { 46 + Elf32_Word r_sym; /* Symbol index */ 47 + unsigned char r_ssym; /* Special symbol for 2nd relocation */ 48 + unsigned char r_type3; /* 3rd relocation type */ 49 + unsigned char r_type2; /* 2nd relocation type */ 50 + unsigned char r_type1; /* 1st relocation type */ 51 + } _Elf64_Mips_R_Info; 52 + 53 + typedef union 54 + { 55 + Elf64_Xword r_info_number; 56 + _Elf64_Mips_R_Info r_info_fields; 57 + } _Elf64_Mips_R_Info_union; 58 + 59 + #define ELF64_MIPS_R_SYM(i) \ 60 + ((__extension__ (_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_sym) 42 61 43 62 #if KERNEL_ELFDATA != HOST_ELFDATA 44 63 ··· 68 47 for (i = 0; i < size; i++) 69 48 ((unsigned char*)dest)[i] = ((unsigned char*)src)[size - i-1]; 70 49 } 71 - 72 - 73 50 74 51 #define TO_NATIVE(x) \ 75 52 ({ \
+1 -1
security/selinux/hooks.c
··· 3231 3231 goto out; 3232 3232 3233 3233 /* Handle mapped IPv4 packets arriving via IPv6 sockets */ 3234 - if (family == PF_INET6 && skb->protocol == ntohs(ETH_P_IP)) 3234 + if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) 3235 3235 family = PF_INET; 3236 3236 3237 3237 read_lock_bh(&sk->sk_callback_lock);
+4
security/selinux/ss/services.c
··· 594 594 595 595 *scontext_len = strlen(initial_sid_to_string[sid]) + 1; 596 596 scontextp = kmalloc(*scontext_len,GFP_ATOMIC); 597 + if (!scontextp) { 598 + rc = -ENOMEM; 599 + goto out; 600 + } 597 601 strcpy(scontextp, initial_sid_to_string[sid]); 598 602 *scontext = scontextp; 599 603 goto out;
+1 -1
sound/drivers/mpu401/mpu401.c
··· 151 151 152 152 MODULE_DEVICE_TABLE(pnp, snd_mpu401_pnpids); 153 153 154 - static int __init snd_mpu401_pnp(int dev, struct pnp_dev *device, 154 + static int __devinit snd_mpu401_pnp(int dev, struct pnp_dev *device, 155 155 const struct pnp_device_id *id) 156 156 { 157 157 if (!pnp_port_valid(device, 0) ||
+2
sound/isa/es18xx.c
··· 85 85 #include <linux/pnp.h> 86 86 #include <linux/isapnp.h> 87 87 #include <linux/moduleparam.h> 88 + #include <linux/delay.h> 89 + 88 90 #include <asm/io.h> 89 91 #include <asm/dma.h> 90 92 #include <sound/core.h>
+6 -4
sound/oss/ad1848.c
··· 2026 2026 if (irq > 0) 2027 2027 { 2028 2028 devc->dev_no = my_dev; 2029 - if (request_irq(devc->irq, adintr, 0, devc->name, (void *)my_dev) < 0) 2029 + if (request_irq(devc->irq, adintr, 0, devc->name, 2030 + (void *)(long)my_dev) < 0) 2030 2031 { 2031 2032 printk(KERN_WARNING "ad1848: Unable to allocate IRQ\n"); 2032 2033 /* Don't free it either then.. */ ··· 2176 2175 if (!share_dma) 2177 2176 { 2178 2177 if (devc->irq > 0) /* There is no point in freeing irq, if it wasn't allocated */ 2179 - free_irq(devc->irq, (void *)devc->dev_no); 2178 + free_irq(devc->irq, (void *)(long)devc->dev_no); 2180 2179 2181 2180 sound_free_dma(dma_playback); 2182 2181 ··· 2205 2204 unsigned char c930_stat = 0; 2206 2205 int cnt = 0; 2207 2206 2208 - dev = (int)dev_id; 2207 + dev = (long)dev_id; 2209 2208 devc = (ad1848_info *) audio_devs[dev]->devc; 2210 2209 2211 2210 interrupt_again: /* Jump back here if int status doesn't reset */ ··· 2901 2900 return(dev); 2902 2901 } 2903 2902 2904 - static struct pnp_dev *ad1848_init_generic(struct pnp_card *bus, struct address_info *hw_config, int slot) 2903 + static struct pnp_dev __init *ad1848_init_generic(struct pnp_card *bus, 2904 + struct address_info *hw_config, int slot) 2905 2905 { 2906 2906 2907 2907 /* Configure Audio device */
+3 -3
sound/oss/nm256_audio.c
··· 960 960 961 961 962 962 /* Installs the AC97 mixer into CARD. */ 963 - static int __init 963 + static int __devinit 964 964 nm256_install_mixer (struct nm256_info *card) 965 965 { 966 966 int mixer; ··· 995 995 * RAM. 996 996 */ 997 997 998 - static void __init 998 + static void __devinit 999 999 nm256_peek_for_sig (struct nm256_info *card) 1000 1000 { 1001 1001 u32 port1offset ··· 1056 1056 card->playing = 0; 1057 1057 card->recording = 0; 1058 1058 card->rev = rev; 1059 - spin_lock_init(&card->lock); 1059 + spin_lock_init(&card->lock); 1060 1060 1061 1061 /* Init the memory port info. */ 1062 1062 for (x = 0; x < 2; x++) {