Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'topic/hda-jack-rework' into for-next

This is a merge of rework of HD-audio jack event handling code.
It extends the jack table to allow multiple callbacks.

+2943 -1988
+107
Documentation/devicetree/bindings/mfd/tc3589x.txt
··· 1 + * Toshiba TC3589x multi-purpose expander 2 + 3 + The Toshiba TC3589x series are I2C-based MFD devices which may expose the 4 + following built-in devices: gpio, keypad, rotator (vibrator), PWM (for 5 + e.g. LEDs or vibrators) The included models are: 6 + 7 + - TC35890 8 + - TC35892 9 + - TC35893 10 + - TC35894 11 + - TC35895 12 + - TC35896 13 + 14 + Required properties: 15 + - compatible : must be "toshiba,tc35890", "toshiba,tc35892", "toshiba,tc35893", 16 + "toshiba,tc35894", "toshiba,tc35895" or "toshiba,tc35896" 17 + - reg : I2C address of the device 18 + - interrupt-parent : specifies which IRQ controller we're connected to 19 + - interrupts : the interrupt on the parent the controller is connected to 20 + - interrupt-controller : marks the device node as an interrupt controller 21 + - #interrupt-cells : should be <1>, the first cell is the IRQ offset on this 22 + TC3589x interrupt controller. 23 + 24 + Optional nodes: 25 + 26 + - GPIO 27 + This GPIO module inside the TC3589x has 24 (TC35890, TC35892) or 20 28 + (other models) GPIO lines. 29 + - compatible : must be "toshiba,tc3589x-gpio" 30 + - interrupts : interrupt on the parent, which must be the tc3589x MFD device 31 + - interrupt-controller : marks the device node as an interrupt controller 32 + - #interrupt-cells : should be <2>, the first cell is the IRQ offset on this 33 + TC3589x GPIO interrupt controller, the second cell is the interrupt flags 34 + in accordance with <dt-bindings/interrupt-controller/irq.h>. The following 35 + flags are valid: 36 + - IRQ_TYPE_LEVEL_LOW 37 + - IRQ_TYPE_LEVEL_HIGH 38 + - IRQ_TYPE_EDGE_RISING 39 + - IRQ_TYPE_EDGE_FALLING 40 + - IRQ_TYPE_EDGE_BOTH 41 + - gpio-controller : marks the device node as a GPIO controller 42 + - #gpio-cells : should be <2>, the first cell is the GPIO offset on this 43 + GPIO controller, the second cell is the flags. 44 + 45 + - Keypad 46 + This keypad is the same on all variants, supporting up to 96 different 47 + keys. The linux-specific properties are modeled on those already existing 48 + in other input drivers. 49 + - compatible : must be "toshiba,tc3589x-keypad" 50 + - debounce-delay-ms : debounce interval in milliseconds 51 + - keypad,num-rows : number of rows in the matrix, see 52 + bindings/input/matrix-keymap.txt 53 + - keypad,num-columns : number of columns in the matrix, see 54 + bindings/input/matrix-keymap.txt 55 + - linux,keymap: the definition can be found in 56 + bindings/input/matrix-keymap.txt 57 + - linux,no-autorepeat: do no enable autorepeat feature. 58 + - linux,wakeup: use any event on keypad as wakeup event. 59 + 60 + Example: 61 + 62 + tc35893@44 { 63 + compatible = "toshiba,tc35893"; 64 + reg = <0x44>; 65 + interrupt-parent = <&gpio6>; 66 + interrupts = <26 IRQ_TYPE_EDGE_RISING>; 67 + 68 + interrupt-controller; 69 + #interrupt-cells = <1>; 70 + 71 + tc3589x_gpio { 72 + compatible = "toshiba,tc3589x-gpio"; 73 + interrupts = <0>; 74 + 75 + interrupt-controller; 76 + #interrupt-cells = <2>; 77 + gpio-controller; 78 + #gpio-cells = <2>; 79 + }; 80 + tc3589x_keypad { 81 + compatible = "toshiba,tc3589x-keypad"; 82 + interrupts = <6>; 83 + debounce-delay-ms = <4>; 84 + keypad,num-columns = <8>; 85 + keypad,num-rows = <8>; 86 + linux,no-autorepeat; 87 + linux,wakeup; 88 + linux,keymap = <0x0301006b 89 + 0x04010066 90 + 0x06040072 91 + 0x040200d7 92 + 0x0303006a 93 + 0x0205000e 94 + 0x0607008b 95 + 0x0500001c 96 + 0x0403000b 97 + 0x03040034 98 + 0x05020067 99 + 0x0305006c 100 + 0x040500e7 101 + 0x0005009e 102 + 0x06020073 103 + 0x01030039 104 + 0x07060069 105 + 0x050500d9>; 106 + }; 107 + };
+1 -1
Documentation/devicetree/bindings/mtd/gpmc-nand.txt
··· 22 22 width of 8 is assumed. 23 23 24 24 - ti,nand-ecc-opt: A string setting the ECC layout to use. One of: 25 - "sw" <deprecated> use "ham1" instead 25 + "sw" 1-bit Hamming ecc code via software 26 26 "hw" <deprecated> use "ham1" instead 27 27 "hw-romcode" <deprecated> use "ham1" instead 28 28 "ham1" 1-bit Hamming ecc code
+1 -1
Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
··· 62 62 #gpio-cells = <2>; 63 63 interrupt-controller; 64 64 #interrupt-cells = <2>; 65 - interrupts = <0 32 0x4>; 65 + interrupts = <0 16 0x4>; 66 66 67 67 pinctrl-names = "default"; 68 68 pinctrl-0 = <&gsbi5_uart_default>;
+1 -1
Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
··· 1 1 ADI AXI-SPDIF controller 2 2 3 3 Required properties: 4 - - compatible : Must be "adi,axi-spdif-1.00.a" 4 + - compatible : Must be "adi,axi-spdif-tx-1.00.a" 5 5 - reg : Must contain SPDIF core's registers location and length 6 6 - clocks : Pairs of phandle and specifier referencing the controller's clocks. 7 7 The controller expects two clocks, the clock used for the AXI interface and
+8 -6
Documentation/dma-buf-sharing.txt
··· 56 56 size_t size, int flags, 57 57 const char *exp_name) 58 58 59 - If this succeeds, dma_buf_export allocates a dma_buf structure, and returns a 60 - pointer to the same. It also associates an anonymous file with this buffer, 61 - so it can be exported. On failure to allocate the dma_buf object, it returns 62 - NULL. 59 + If this succeeds, dma_buf_export_named allocates a dma_buf structure, and 60 + returns a pointer to the same. It also associates an anonymous file with this 61 + buffer, so it can be exported. On failure to allocate the dma_buf object, 62 + it returns NULL. 63 63 64 64 'exp_name' is the name of exporter - to facilitate information while 65 65 debugging. ··· 76 76 drivers and/or processes. 77 77 78 78 Interface: 79 - int dma_buf_fd(struct dma_buf *dmabuf) 79 + int dma_buf_fd(struct dma_buf *dmabuf, int flags) 80 80 81 81 This API installs an fd for the anonymous file associated with this buffer; 82 82 returns either 'fd', or error. ··· 157 157 "dma_buf->ops->" indirection from the users of this interface. 158 158 159 159 In struct dma_buf_ops, unmap_dma_buf is defined as 160 - void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *); 160 + void (*unmap_dma_buf)(struct dma_buf_attachment *, 161 + struct sg_table *, 162 + enum dma_data_direction); 161 163 162 164 unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like 163 165 map_dma_buf, this API also must be implemented by the exporter.
+33 -3
Documentation/kdump/kdump.txt
··· 18 18 a remote system. 19 19 20 20 Kdump and kexec are currently supported on the x86, x86_64, ppc64, ia64, 21 - and s390x architectures. 21 + s390x and arm architectures. 22 22 23 23 When the system kernel boots, it reserves a small section of memory for 24 24 the dump-capture kernel. This ensures that ongoing Direct Memory Access ··· 112 112 2) Or use the system kernel binary itself as dump-capture kernel and there is 113 113 no need to build a separate dump-capture kernel. This is possible 114 114 only with the architectures which support a relocatable kernel. As 115 - of today, i386, x86_64, ppc64 and ia64 architectures support relocatable 115 + of today, i386, x86_64, ppc64, ia64 and arm architectures support relocatable 116 116 kernel. 117 117 118 118 Building a relocatable kernel is advantageous from the point of view that ··· 241 241 kernel will be aligned to 64Mb, so if the start address is not then 242 242 any space below the alignment point will be wasted. 243 243 244 + Dump-capture kernel config options (Arch Dependent, arm) 245 + ---------------------------------------------------------- 246 + 247 + - To use a relocatable kernel, 248 + Enable "AUTO_ZRELADDR" support under "Boot" options: 249 + 250 + AUTO_ZRELADDR=y 244 251 245 252 Extended crashkernel syntax 246 253 =========================== ··· 261 254 The syntax is: 262 255 263 256 crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset] 257 + range=start-[end] 258 + 259 + Please note, on arm, the offset is required. 260 + crashkernel=<range1>:<size1>[,<range2>:<size2>,...]@offset 264 261 range=start-[end] 265 262 266 263 'start' is inclusive and 'end' is exclusive. ··· 307 296 on the memory consumption of the kdump system. In general this is not 308 297 dependent on the memory size of the production system. 309 298 299 + On arm, use "crashkernel=Y@X". Note that the start address of the kernel 300 + will be aligned to 128MiB (0x08000000), so if the start address is not then 301 + any space below the alignment point may be overwritten by the dump-capture kernel, 302 + which means it is possible that the vmcore is not that precise as expected. 303 + 304 + 310 305 Load the Dump-capture Kernel 311 306 ============================ 312 307 ··· 332 315 - Use vmlinux or vmlinuz.gz 333 316 For s390x: 334 317 - Use image or bzImage 335 - 318 + For arm: 319 + - Use zImage 336 320 337 321 If you are using a uncompressed vmlinux image then use following command 338 322 to load dump-capture kernel. ··· 348 330 kexec -p <dump-capture-kernel-bzImage> \ 349 331 --initrd=<initrd-for-dump-capture-kernel> \ 350 332 --append="root=<root-dev> <arch-specific-options>" 333 + 334 + If you are using a compressed zImage, then use following command 335 + to load dump-capture kernel. 336 + 337 + kexec --type zImage -p <dump-capture-kernel-bzImage> \ 338 + --initrd=<initrd-for-dump-capture-kernel> \ 339 + --dtb=<dtb-for-dump-capture-kernel> \ 340 + --append="root=<root-dev> <arch-specific-options>" 341 + 351 342 352 343 Please note, that --args-linux does not need to be specified for ia64. 353 344 It is planned to make this a no-op on that architecture, but for now ··· 373 346 374 347 For s390x: 375 348 "1 maxcpus=1 cgroup_disable=memory" 349 + 350 + For arm: 351 + "1 maxcpus=1 reset_devices" 376 352 377 353 Notes on loading the dump-capture kernel: 378 354
+169 -40
Documentation/this_cpu_ops.txt
··· 2 2 ------------------- 3 3 4 4 this_cpu operations are a way of optimizing access to per cpu 5 - variables associated with the *currently* executing processor through 6 - the use of segment registers (or a dedicated register where the cpu 7 - permanently stored the beginning of the per cpu area for a specific 8 - processor). 5 + variables associated with the *currently* executing processor. This is 6 + done through the use of segment registers (or a dedicated register where 7 + the cpu permanently stored the beginning of the per cpu area for a 8 + specific processor). 9 9 10 - The this_cpu operations add a per cpu variable offset to the processor 11 - specific percpu base and encode that operation in the instruction 10 + this_cpu operations add a per cpu variable offset to the processor 11 + specific per cpu base and encode that operation in the instruction 12 12 operating on the per cpu variable. 13 13 14 - This means there are no atomicity issues between the calculation of 14 + This means that there are no atomicity issues between the calculation of 15 15 the offset and the operation on the data. Therefore it is not 16 - necessary to disable preempt or interrupts to ensure that the 16 + necessary to disable preemption or interrupts to ensure that the 17 17 processor is not changed between the calculation of the address and 18 18 the operation on the data. 19 19 20 20 Read-modify-write operations are of particular interest. Frequently 21 21 processors have special lower latency instructions that can operate 22 - without the typical synchronization overhead but still provide some 23 - sort of relaxed atomicity guarantee. The x86 for example can execute 24 - RMV (Read Modify Write) instructions like inc/dec/cmpxchg without the 22 + without the typical synchronization overhead, but still provide some 23 + sort of relaxed atomicity guarantees. The x86, for example, can execute 24 + RMW (Read Modify Write) instructions like inc/dec/cmpxchg without the 25 25 lock prefix and the associated latency penalty. 26 26 27 27 Access to the variable without the lock prefix is not synchronized but ··· 29 29 data specific to the currently executing processor. Only the current 30 30 processor should be accessing that variable and therefore there are no 31 31 concurrency issues with other processors in the system. 32 + 33 + Please note that accesses by remote processors to a per cpu area are 34 + exceptional situations and may impact performance and/or correctness 35 + (remote write operations) of local RMW operations via this_cpu_*. 36 + 37 + The main use of the this_cpu operations has been to optimize counter 38 + operations. 39 + 40 + The following this_cpu() operations with implied preemption protection 41 + are defined. These operations can be used without worrying about 42 + preemption and interrupts. 43 + 44 + this_cpu_add() 45 + this_cpu_read(pcp) 46 + this_cpu_write(pcp, val) 47 + this_cpu_add(pcp, val) 48 + this_cpu_and(pcp, val) 49 + this_cpu_or(pcp, val) 50 + this_cpu_add_return(pcp, val) 51 + this_cpu_xchg(pcp, nval) 52 + this_cpu_cmpxchg(pcp, oval, nval) 53 + this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 54 + this_cpu_sub(pcp, val) 55 + this_cpu_inc(pcp) 56 + this_cpu_dec(pcp) 57 + this_cpu_sub_return(pcp, val) 58 + this_cpu_inc_return(pcp) 59 + this_cpu_dec_return(pcp) 60 + 61 + 62 + Inner working of this_cpu operations 63 + ------------------------------------ 32 64 33 65 On x86 the fs: or the gs: segment registers contain the base of the 34 66 per cpu area. It is then possible to simply use the segment override ··· 80 48 mov ax, gs:[x] 81 49 82 50 instead of a sequence of calculation of the address and then a fetch 83 - from that address which occurs with the percpu operations. Before 51 + from that address which occurs with the per cpu operations. Before 84 52 this_cpu_ops such sequence also required preempt disable/enable to 85 53 prevent the kernel from moving the thread to a different processor 86 54 while the calculation is performed. 87 55 88 - The main use of the this_cpu operations has been to optimize counter 89 - operations. 56 + Consider the following this_cpu operation: 90 57 91 58 this_cpu_inc(x) 92 59 93 - results in the following single instruction (no lock prefix!) 60 + The above results in the following single instruction (no lock prefix!) 94 61 95 62 inc gs:[x] 96 63 97 64 instead of the following operations required if there is no segment 98 - register. 65 + register: 99 66 100 67 int *y; 101 68 int cpu; ··· 104 73 (*y)++; 105 74 put_cpu(); 106 75 107 - Note that these operations can only be used on percpu data that is 76 + Note that these operations can only be used on per cpu data that is 108 77 reserved for a specific processor. Without disabling preemption in the 109 78 surrounding code this_cpu_inc() will only guarantee that one of the 110 - percpu counters is correctly incremented. However, there is no 79 + per cpu counters is correctly incremented. However, there is no 111 80 guarantee that the OS will not move the process directly before or 112 81 after the this_cpu instruction is executed. In general this means that 113 82 the value of the individual counters for each processor are ··· 117 86 Per cpu variables are used for performance reasons. Bouncing cache 118 87 lines can be avoided if multiple processors concurrently go through 119 88 the same code paths. Since each processor has its own per cpu 120 - variables no concurrent cacheline updates take place. The price that 89 + variables no concurrent cache line updates take place. The price that 121 90 has to be paid for this optimization is the need to add up the per cpu 122 - counters when the value of the counter is needed. 91 + counters when the value of a counter is needed. 123 92 124 93 125 94 Special operations: ··· 131 100 of the per cpu variable that belongs to the currently executing 132 101 processor. this_cpu_ptr avoids multiple steps that the common 133 102 get_cpu/put_cpu sequence requires. No processor number is 134 - available. Instead the offset of the local per cpu area is simply 135 - added to the percpu offset. 103 + available. Instead, the offset of the local per cpu area is simply 104 + added to the per cpu offset. 136 105 106 + Note that this operation is usually used in a code segment when 107 + preemption has been disabled. The pointer is then used to 108 + access local per cpu data in a critical section. When preemption 109 + is re-enabled this pointer is usually no longer useful since it may 110 + no longer point to per cpu data of the current processor. 137 111 138 112 139 113 Per cpu variables and offsets 140 114 ----------------------------- 141 115 142 - Per cpu variables have *offsets* to the beginning of the percpu 116 + Per cpu variables have *offsets* to the beginning of the per cpu 143 117 area. They do not have addresses although they look like that in the 144 118 code. Offsets cannot be directly dereferenced. The offset must be 145 - added to a base pointer of a percpu area of a processor in order to 119 + added to a base pointer of a per cpu area of a processor in order to 146 120 form a valid address. 147 121 148 122 Therefore the use of x or &x outside of the context of per cpu 149 123 operations is invalid and will generally be treated like a NULL 150 124 pointer dereference. 151 125 152 - In the context of per cpu operations 126 + DEFINE_PER_CPU(int, x); 153 127 154 - x is a per cpu variable. Most this_cpu operations take a cpu 155 - variable. 128 + In the context of per cpu operations the above implies that x is a per 129 + cpu variable. Most this_cpu operations take a cpu variable. 156 130 157 - &x is the *offset* a per cpu variable. this_cpu_ptr() takes 158 - the offset of a per cpu variable which makes this look a bit 159 - strange. 131 + int __percpu *p = &x; 160 132 133 + &x and hence p is the *offset* of a per cpu variable. this_cpu_ptr() 134 + takes the offset of a per cpu variable which makes this look a bit 135 + strange. 161 136 162 137 163 138 Operations on a field of a per cpu structure ··· 189 152 190 153 struct s __percpu *ps = &p; 191 154 192 - z = this_cpu_dec(ps->m); 155 + this_cpu_dec(ps->m); 193 156 194 157 z = this_cpu_inc_return(ps->n); 195 158 ··· 209 172 Variants of this_cpu ops 210 173 ------------------------- 211 174 212 - this_cpu ops are interrupt safe. Some architecture do not support 175 + this_cpu ops are interrupt safe. Some architectures do not support 213 176 these per cpu local operations. In that case the operation must be 214 177 replaced by code that disables interrupts, then does the operations 215 - that are guaranteed to be atomic and then reenable interrupts. Doing 178 + that are guaranteed to be atomic and then re-enable interrupts. Doing 216 179 so is expensive. If there are other reasons why the scheduler cannot 217 180 change the processor we are executing on then there is no reason to 218 - disable interrupts. For that purpose the __this_cpu operations are 219 - provided. For example. 181 + disable interrupts. For that purpose the following __this_cpu operations 182 + are provided. 220 183 221 - __this_cpu_inc(x); 184 + These operations have no guarantee against concurrent interrupts or 185 + preemption. If a per cpu variable is not used in an interrupt context 186 + and the scheduler cannot preempt, then they are safe. If any interrupts 187 + still occur while an operation is in progress and if the interrupt too 188 + modifies the variable, then RMW actions can not be guaranteed to be 189 + safe. 222 190 223 - Will increment x and will not fallback to code that disables 191 + __this_cpu_add() 192 + __this_cpu_read(pcp) 193 + __this_cpu_write(pcp, val) 194 + __this_cpu_add(pcp, val) 195 + __this_cpu_and(pcp, val) 196 + __this_cpu_or(pcp, val) 197 + __this_cpu_add_return(pcp, val) 198 + __this_cpu_xchg(pcp, nval) 199 + __this_cpu_cmpxchg(pcp, oval, nval) 200 + __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 201 + __this_cpu_sub(pcp, val) 202 + __this_cpu_inc(pcp) 203 + __this_cpu_dec(pcp) 204 + __this_cpu_sub_return(pcp, val) 205 + __this_cpu_inc_return(pcp) 206 + __this_cpu_dec_return(pcp) 207 + 208 + 209 + Will increment x and will not fall-back to code that disables 224 210 interrupts on platforms that cannot accomplish atomicity through 225 211 address relocation and a Read-Modify-Write operation in the same 226 212 instruction. 227 - 228 213 229 214 230 215 &this_cpu_ptr(pp)->n vs this_cpu_ptr(&pp->n) 231 216 -------------------------------------------- 232 217 233 218 The first operation takes the offset and forms an address and then 234 - adds the offset of the n field. 219 + adds the offset of the n field. This may result in two add 220 + instructions emitted by the compiler. 235 221 236 222 The second one first adds the two offsets and then does the 237 223 relocation. IMHO the second form looks cleaner and has an easier time ··· 262 202 this_cpu_read() and friends are used. 263 203 264 204 265 - Christoph Lameter, April 3rd, 2013 205 + Remote access to per cpu data 206 + ------------------------------ 207 + 208 + Per cpu data structures are designed to be used by one cpu exclusively. 209 + If you use the variables as intended, this_cpu_ops() are guaranteed to 210 + be "atomic" as no other CPU has access to these data structures. 211 + 212 + There are special cases where you might need to access per cpu data 213 + structures remotely. It is usually safe to do a remote read access 214 + and that is frequently done to summarize counters. Remote write access 215 + something which could be problematic because this_cpu ops do not 216 + have lock semantics. A remote write may interfere with a this_cpu 217 + RMW operation. 218 + 219 + Remote write accesses to percpu data structures are highly discouraged 220 + unless absolutely necessary. Please consider using an IPI to wake up 221 + the remote CPU and perform the update to its per cpu area. 222 + 223 + To access per-cpu data structure remotely, typically the per_cpu_ptr() 224 + function is used: 225 + 226 + 227 + DEFINE_PER_CPU(struct data, datap); 228 + 229 + struct data *p = per_cpu_ptr(&datap, cpu); 230 + 231 + This makes it explicit that we are getting ready to access a percpu 232 + area remotely. 233 + 234 + You can also do the following to convert the datap offset to an address 235 + 236 + struct data *p = this_cpu_ptr(&datap); 237 + 238 + but, passing of pointers calculated via this_cpu_ptr to other cpus is 239 + unusual and should be avoided. 240 + 241 + Remote access are typically only for reading the status of another cpus 242 + per cpu data. Write accesses can cause unique problems due to the 243 + relaxed synchronization requirements for this_cpu operations. 244 + 245 + One example that illustrates some concerns with write operations is 246 + the following scenario that occurs because two per cpu variables 247 + share a cache-line but the relaxed synchronization is applied to 248 + only one process updating the cache-line. 249 + 250 + Consider the following example 251 + 252 + 253 + struct test { 254 + atomic_t a; 255 + int b; 256 + }; 257 + 258 + DEFINE_PER_CPU(struct test, onecacheline); 259 + 260 + There is some concern about what would happen if the field 'a' is updated 261 + remotely from one processor and the local processor would use this_cpu ops 262 + to update field b. Care should be taken that such simultaneous accesses to 263 + data within the same cache line are avoided. Also costly synchronization 264 + may be necessary. IPIs are generally recommended in such scenarios instead 265 + of a remote write to the per cpu area of another processor. 266 + 267 + Even in cases where the remote writes are rare, please bear in 268 + mind that a remote write will evict the cache line from the processor 269 + that most likely will access it. If the processor wakes up and finds a 270 + missing local cache line of a per cpu area, its performance and hence 271 + the wake up times will be affected. 272 + 273 + Christoph Lameter, August 4th, 2014 274 + Pranith Kumar, Aug 2nd, 2014
+13
MAINTAINERS
··· 1279 1279 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1280 1280 L: linux-rockchip@lists.infradead.org 1281 1281 S: Maintained 1282 + F: arch/arm/boot/dts/rk3* 1282 1283 F: arch/arm/mach-rockchip/ 1284 + F: drivers/clk/rockchip/ 1285 + F: drivers/i2c/busses/i2c-rk3x.c 1283 1286 F: drivers/*/*rockchip* 1287 + F: drivers/*/*/*rockchip* 1288 + F: sound/soc/rockchip/ 1284 1289 1285 1290 ARM/SAMSUNG ARM ARCHITECTURES 1286 1291 M: Ben Dooks <ben-linux@fluff.org> ··· 9561 9556 S: Maintained 9562 9557 F: Documentation/usb/ohci.txt 9563 9558 F: drivers/usb/host/ohci* 9559 + 9560 + USB OVER IP DRIVER 9561 + M: Valentina Manea <valentina.manea.m@gmail.com> 9562 + M: Shuah Khan <shuah.kh@samsung.com> 9563 + L: linux-usb@vger.kernel.org 9564 + S: Maintained 9565 + F: drivers/usb/usbip/ 9566 + F: tools/usb/usbip/ 9564 9567 9565 9568 USB PEGASUS DRIVER 9566 9569 M: Petko Manolov <petkan@nucleusys.com>
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 17 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc2 4 + EXTRAVERSION = -rc3 5 5 NAME = Shuffling Zombie Juror 6 6 7 7 # *DOCUMENTATION*
+8 -4
arch/alpha/include/asm/io.h
··· 500 500 #define outb_p outb 501 501 #define outw_p outw 502 502 #define outl_p outl 503 - #define readb_relaxed(addr) __raw_readb(addr) 504 - #define readw_relaxed(addr) __raw_readw(addr) 505 - #define readl_relaxed(addr) __raw_readl(addr) 506 - #define readq_relaxed(addr) __raw_readq(addr) 503 + #define readb_relaxed(addr) __raw_readb(addr) 504 + #define readw_relaxed(addr) __raw_readw(addr) 505 + #define readl_relaxed(addr) __raw_readl(addr) 506 + #define readq_relaxed(addr) __raw_readq(addr) 507 + #define writeb_relaxed(b, addr) __raw_writeb(b, addr) 508 + #define writew_relaxed(b, addr) __raw_writew(b, addr) 509 + #define writel_relaxed(b, addr) __raw_writel(b, addr) 510 + #define writeq_relaxed(b, addr) __raw_writeq(b, addr) 507 511 508 512 #define mmiowb() 509 513
+1 -1
arch/alpha/include/asm/unistd.h
··· 3 3 4 4 #include <uapi/asm/unistd.h> 5 5 6 - #define NR_SYSCALLS 508 6 + #define NR_SYSCALLS 511 7 7 8 8 #define __ARCH_WANT_OLD_READDIR 9 9 #define __ARCH_WANT_STAT64
+3
arch/alpha/include/uapi/asm/unistd.h
··· 469 469 #define __NR_process_vm_writev 505 470 470 #define __NR_kcmp 506 471 471 #define __NR_finit_module 507 472 + #define __NR_sched_setattr 508 473 + #define __NR_sched_getattr 509 474 + #define __NR_renameat2 510 472 475 473 476 #endif /* _UAPI_ALPHA_UNISTD_H */
+3
arch/alpha/kernel/systbls.S
··· 526 526 .quad sys_process_vm_writev /* 505 */ 527 527 .quad sys_kcmp 528 528 .quad sys_finit_module 529 + .quad sys_sched_setattr 530 + .quad sys_sched_getattr 531 + .quad sys_renameat2 /* 510 */ 529 532 530 533 .size sys_call_table, . - sys_call_table 531 534 .type sys_call_table, @object
+1
arch/arc/mm/cache_arc700.c
··· 581 581 tot_sz -= sz; 582 582 } 583 583 } 584 + EXPORT_SYMBOL(flush_icache_range); 584 585 585 586 /* 586 587 * General purpose helper to make I and D cache lines consistent.
-2
arch/arm/Kconfig
··· 1983 1983 config KEXEC 1984 1984 bool "Kexec system call (EXPERIMENTAL)" 1985 1985 depends on (!SMP || PM_SLEEP_SMP) 1986 - select CRYPTO 1987 - select CRYPTO_SHA256 1988 1986 help 1989 1987 kexec is a system call that implements the ability to shutdown your 1990 1988 current kernel, and to start another kernel. It is like a reboot
+8 -8
arch/arm/boot/dts/dra7.dtsi
··· 245 245 gpio-controller; 246 246 #gpio-cells = <2>; 247 247 interrupt-controller; 248 - #interrupt-cells = <1>; 248 + #interrupt-cells = <2>; 249 249 }; 250 250 251 251 gpio2: gpio@48055000 { ··· 256 256 gpio-controller; 257 257 #gpio-cells = <2>; 258 258 interrupt-controller; 259 - #interrupt-cells = <1>; 259 + #interrupt-cells = <2>; 260 260 }; 261 261 262 262 gpio3: gpio@48057000 { ··· 267 267 gpio-controller; 268 268 #gpio-cells = <2>; 269 269 interrupt-controller; 270 - #interrupt-cells = <1>; 270 + #interrupt-cells = <2>; 271 271 }; 272 272 273 273 gpio4: gpio@48059000 { ··· 278 278 gpio-controller; 279 279 #gpio-cells = <2>; 280 280 interrupt-controller; 281 - #interrupt-cells = <1>; 281 + #interrupt-cells = <2>; 282 282 }; 283 283 284 284 gpio5: gpio@4805b000 { ··· 289 289 gpio-controller; 290 290 #gpio-cells = <2>; 291 291 interrupt-controller; 292 - #interrupt-cells = <1>; 292 + #interrupt-cells = <2>; 293 293 }; 294 294 295 295 gpio6: gpio@4805d000 { ··· 300 300 gpio-controller; 301 301 #gpio-cells = <2>; 302 302 interrupt-controller; 303 - #interrupt-cells = <1>; 303 + #interrupt-cells = <2>; 304 304 }; 305 305 306 306 gpio7: gpio@48051000 { ··· 311 311 gpio-controller; 312 312 #gpio-cells = <2>; 313 313 interrupt-controller; 314 - #interrupt-cells = <1>; 314 + #interrupt-cells = <2>; 315 315 }; 316 316 317 317 gpio8: gpio@48053000 { ··· 322 322 gpio-controller; 323 323 #gpio-cells = <2>; 324 324 interrupt-controller; 325 - #interrupt-cells = <1>; 325 + #interrupt-cells = <2>; 326 326 }; 327 327 328 328 uart1: serial@4806a000 {
+8
arch/arm/boot/dts/imx53-qsrb.dts
··· 28 28 MX53_PAD_CSI0_DAT9__I2C1_SCL 0x400001ec 29 29 >; 30 30 }; 31 + 32 + pinctrl_pmic: pmicgrp { 33 + fsl,pins = < 34 + MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */ 35 + >; 36 + }; 31 37 }; 32 38 }; 33 39 ··· 44 38 45 39 pmic: mc34708@8 { 46 40 compatible = "fsl,mc34708"; 41 + pinctrl-names = "default"; 42 + pinctrl-0 = <&pinctrl_pmic>; 47 43 reg = <0x08>; 48 44 interrupt-parent = <&gpio5>; 49 45 interrupts = <23 0x8>;
+3 -1
arch/arm/boot/dts/imx6dl-hummingboard.dts
··· 58 58 59 59 sound-spdif { 60 60 compatible = "fsl,imx-audio-spdif"; 61 - model = "imx-spdif"; 61 + model = "On-board SPDIF"; 62 62 /* IMX6 doesn't implement this yet */ 63 63 spdif-controller = <&spdif>; 64 64 spdif-out; ··· 181 181 }; 182 182 183 183 &usbh1 { 184 + disable-over-current; 184 185 vbus-supply = <&reg_usbh1_vbus>; 185 186 status = "okay"; 186 187 }; 187 188 188 189 &usbotg { 190 + disable-over-current; 189 191 pinctrl-names = "default"; 190 192 pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>; 191 193 vbus-supply = <&reg_usbotg_vbus>;
+14 -5
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
··· 61 61 62 62 sound-spdif { 63 63 compatible = "fsl,imx-audio-spdif"; 64 - model = "imx-spdif"; 64 + model = "Integrated SPDIF"; 65 65 /* IMX6 doesn't implement this yet */ 66 66 spdif-controller = <&spdif>; 67 67 spdif-out; ··· 130 130 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; 131 131 }; 132 132 133 + pinctrl_cubox_i_usbh1: cubox-i-usbh1 { 134 + fsl,pins = <MX6QDL_PAD_GPIO_3__USB_H1_OC 0x1b0b0>; 135 + }; 136 + 133 137 pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { 134 138 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>; 135 139 }; 136 140 137 - pinctrl_cubox_i_usbotg_id: cubox-i-usbotg-id { 141 + pinctrl_cubox_i_usbotg: cubox-i-usbotg { 138 142 /* 139 - * The Cubox-i pulls this low, but as it's pointless 143 + * The Cubox-i pulls ID low, but as it's pointless 140 144 * leaving it as a pull-up, even if it is just 10uA. 141 145 */ 142 - fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>; 146 + fsl,pins = < 147 + MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059 148 + MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0 149 + >; 143 150 }; 144 151 145 152 pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus { ··· 180 173 }; 181 174 182 175 &usbh1 { 176 + pinctrl-names = "default"; 177 + pinctrl-0 = <&pinctrl_cubox_i_usbh1>; 183 178 vbus-supply = <&reg_usbh1_vbus>; 184 179 status = "okay"; 185 180 }; 186 181 187 182 &usbotg { 188 183 pinctrl-names = "default"; 189 - pinctrl-0 = <&pinctrl_cubox_i_usbotg_id>; 184 + pinctrl-0 = <&pinctrl_cubox_i_usbotg>; 190 185 vbus-supply = <&reg_usbotg_vbus>; 191 186 status = "okay"; 192 187 };
+1 -1
arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
··· 17 17 enet { 18 18 pinctrl_microsom_enet_ar8035: microsom-enet-ar8035 { 19 19 fsl,pins = < 20 - MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0 20 + MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b8b0 21 21 MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 22 22 /* AR8035 reset */ 23 23 MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x130b0
+1
arch/arm/boot/dts/omap3-beagle.dts
··· 292 292 &uart3 { 293 293 pinctrl-names = "default"; 294 294 pinctrl-0 = <&uart3_pins>; 295 + interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>; 295 296 }; 296 297 297 298 &gpio1 {
+1 -1
arch/arm/boot/dts/omap3-n900.dts
··· 353 353 }; 354 354 355 355 twl_power: power { 356 - compatible = "ti,twl4030-power-n900"; 356 + compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off"; 357 357 ti,use_poweroff; 358 358 }; 359 359 };
+1 -1
arch/arm/boot/dts/omap3430-sdp.dts
··· 107 107 #address-cells = <1>; 108 108 #size-cells = <1>; 109 109 reg = <1 0 0x08000000>; 110 - ti,nand-ecc-opt = "ham1"; 110 + ti,nand-ecc-opt = "sw"; 111 111 nand-bus-width = <8>; 112 112 gpmc,cs-on-ns = <0>; 113 113 gpmc,cs-rd-off-ns = <36>;
+10 -6
arch/arm/boot/dts/omap54xx-clocks.dtsi
··· 367 367 368 368 l3_iclk_div: l3_iclk_div { 369 369 #clock-cells = <0>; 370 - compatible = "fixed-factor-clock"; 370 + compatible = "ti,divider-clock"; 371 + ti,max-div = <2>; 372 + ti,bit-shift = <4>; 373 + reg = <0x100>; 371 374 clocks = <&dpll_core_h12x2_ck>; 372 - clock-mult = <1>; 373 - clock-div = <1>; 375 + ti,index-power-of-two; 374 376 }; 375 377 376 378 gpu_l3_iclk: gpu_l3_iclk { ··· 385 383 386 384 l4_root_clk_div: l4_root_clk_div { 387 385 #clock-cells = <0>; 388 - compatible = "fixed-factor-clock"; 386 + compatible = "ti,divider-clock"; 387 + ti,max-div = <2>; 388 + ti,bit-shift = <8>; 389 + reg = <0x100>; 389 390 clocks = <&l3_iclk_div>; 390 - clock-mult = <1>; 391 - clock-div = <1>; 391 + ti,index-power-of-two; 392 392 }; 393 393 394 394 slimbus1_slimbus_clk: slimbus1_slimbus_clk {
-4
arch/arm/boot/dts/twl6030.dtsi
··· 83 83 regulator-always-on; 84 84 }; 85 85 86 - clk32kg: regulator-clk32kg { 87 - compatible = "ti,twl6030-clk32kg"; 88 - }; 89 - 90 86 twl_usb_comparator: usb-comparator { 91 87 compatible = "ti,twl6030-usb"; 92 88 interrupts = <4>, <10>;
-1
arch/arm/include/asm/cacheflush.h
··· 472 472 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ 473 473 "isb \n\t" \ 474 474 "bl v7_flush_dcache_"__stringify(level)" \n\t" \ 475 - "clrex \n\t" \ 476 475 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ 477 476 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ 478 477 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+2 -1
arch/arm/include/asm/cputype.h
··· 74 74 #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 75 75 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 76 76 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 77 + #define ARM_CPU_PART_MASK 0xff00fff0 77 78 78 79 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 79 80 #define ARM_CPU_XSCALE_ARCH_V1 0x2000 ··· 180 179 */ 181 180 static inline unsigned int __attribute_const__ read_cpuid_part(void) 182 181 { 183 - return read_cpuid_id() & 0xff00fff0; 182 + return read_cpuid_id() & ARM_CPU_PART_MASK; 184 183 } 185 184 186 185 static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
+1
arch/arm/include/asm/elf.h
··· 50 50 #define R_ARM_ABS32 2 51 51 #define R_ARM_CALL 28 52 52 #define R_ARM_JUMP24 29 53 + #define R_ARM_TARGET1 38 53 54 #define R_ARM_V4BX 40 54 55 #define R_ARM_PREL31 42 55 56 #define R_ARM_MOVW_ABS_NC 43
+15
arch/arm/include/asm/smp_plat.h
··· 8 8 #include <linux/cpumask.h> 9 9 #include <linux/err.h> 10 10 11 + #include <asm/cpu.h> 11 12 #include <asm/cputype.h> 12 13 13 14 /* ··· 24 23 #else 25 24 return true; 26 25 #endif 26 + } 27 + 28 + /** 29 + * smp_cpuid_part() - return part id for a given cpu 30 + * @cpu: logical cpu id. 31 + * 32 + * Return: part id of logical cpu passed as argument. 33 + */ 34 + static inline unsigned int smp_cpuid_part(int cpu) 35 + { 36 + struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu); 37 + 38 + return is_smp() ? cpu_info->cpuid & ARM_CPU_PART_MASK : 39 + read_cpuid_part(); 27 40 } 28 41 29 42 /* all SMP configurations have the extended CPUID registers */
+15 -14
arch/arm/kernel/entry-header.S
··· 208 208 #endif 209 209 .endif 210 210 msr spsr_cxsf, \rpsr 211 - #if defined(CONFIG_CPU_V6) 212 - ldr r0, [sp] 213 - strex r1, r2, [sp] @ clear the exclusive monitor 214 - ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr 215 - #elif defined(CONFIG_CPU_32v6K) 216 - clrex @ clear the exclusive monitor 217 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 218 - #else 219 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 211 + #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 212 + @ We must avoid clrex due to Cortex-A15 erratum #830321 213 + sub r0, sp, #4 @ uninhabited address 214 + strex r1, r2, [r0] @ clear the exclusive monitor 220 215 #endif 216 + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 221 217 .endm 222 218 223 219 .macro restore_user_regs, fast = 0, offset = 0 224 220 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 225 221 ldr lr, [sp, #\offset + S_PC]! @ get pc 226 222 msr spsr_cxsf, r1 @ save in spsr_svc 227 - #if defined(CONFIG_CPU_V6) 223 + #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 224 + @ We must avoid clrex due to Cortex-A15 erratum #830321 228 225 strex r1, r2, [sp] @ clear the exclusive monitor 229 - #elif defined(CONFIG_CPU_32v6K) 230 - clrex @ clear the exclusive monitor 231 226 #endif 232 227 .if \fast 233 228 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr ··· 256 261 .endif 257 262 ldr lr, [sp, #S_SP] @ top of the stack 258 263 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc 259 - clrex @ clear the exclusive monitor 264 + 265 + @ We must avoid clrex due to Cortex-A15 erratum #830321 266 + strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor 267 + 260 268 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context 261 269 ldmia sp, {r0 - r12} 262 270 mov sp, lr ··· 280 282 .endm 281 283 #else /* ifdef CONFIG_CPU_V7M */ 282 284 .macro restore_user_regs, fast = 0, offset = 0 283 - clrex @ clear the exclusive monitor 284 285 mov r2, sp 285 286 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 286 287 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 287 288 ldr lr, [sp, #\offset + S_PC] @ get pc 288 289 add sp, sp, #\offset + S_SP 289 290 msr spsr_cxsf, r1 @ save in spsr_svc 291 + 292 + @ We must avoid clrex due to Cortex-A15 erratum #830321 293 + strex r1, r2, [sp] @ clear the exclusive monitor 294 + 290 295 .if \fast 291 296 ldmdb sp, {r1 - r12} @ get calling r1 - r12 292 297 .else
+1
arch/arm/kernel/module.c
··· 91 91 break; 92 92 93 93 case R_ARM_ABS32: 94 + case R_ARM_TARGET1: 94 95 *(u32 *)loc += sym->st_value; 95 96 break; 96 97
-1
arch/arm/mach-bcm/Makefile
··· 36 36 37 37 ifeq ($(CONFIG_ARCH_BRCMSTB),y) 38 38 obj-y += brcmstb.o 39 - obj-$(CONFIG_SMP) += headsmp-brcmstb.o platsmp-brcmstb.o 40 39 endif
-19
arch/arm/mach-bcm/brcmstb.h
··· 1 - /* 2 - * Copyright (C) 2013-2014 Broadcom Corporation 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License as 6 - * published by the Free Software Foundation version 2. 7 - * 8 - * This program is distributed "as is" WITHOUT ANY WARRANTY of any 9 - * kind, whether express or implied; without even the implied warranty 10 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 - * GNU General Public License for more details. 12 - */ 13 - 14 - #ifndef __BRCMSTB_H__ 15 - #define __BRCMSTB_H__ 16 - 17 - void brcmstb_secondary_startup(void); 18 - 19 - #endif /* __BRCMSTB_H__ */
-33
arch/arm/mach-bcm/headsmp-brcmstb.S
··· 1 - /* 2 - * SMP boot code for secondary CPUs 3 - * Based on arch/arm/mach-tegra/headsmp.S 4 - * 5 - * Copyright (C) 2010 NVIDIA, Inc. 6 - * Copyright (C) 2013-2014 Broadcom Corporation 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License as 10 - * published by the Free Software Foundation version 2. 11 - * 12 - * This program is distributed "as is" WITHOUT ANY WARRANTY of any 13 - * kind, whether express or implied; without even the implied warranty 14 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - */ 17 - 18 - #include <asm/assembler.h> 19 - #include <linux/linkage.h> 20 - #include <linux/init.h> 21 - 22 - .section ".text.head", "ax" 23 - 24 - ENTRY(brcmstb_secondary_startup) 25 - /* 26 - * Ensure CPU is in a sane state by disabling all IRQs and switching 27 - * into SVC mode. 28 - */ 29 - setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r0 30 - 31 - bl v7_invalidate_l1 32 - b secondary_startup 33 - ENDPROC(brcmstb_secondary_startup)
-363
arch/arm/mach-bcm/platsmp-brcmstb.c
··· 1 - /* 2 - * Broadcom STB CPU SMP and hotplug support for ARM 3 - * 4 - * Copyright (C) 2013-2014 Broadcom Corporation 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License as 8 - * published by the Free Software Foundation version 2. 9 - * 10 - * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 - * kind, whether express or implied; without even the implied warranty 12 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - */ 15 - 16 - #include <linux/delay.h> 17 - #include <linux/errno.h> 18 - #include <linux/init.h> 19 - #include <linux/io.h> 20 - #include <linux/of_address.h> 21 - #include <linux/of_platform.h> 22 - #include <linux/printk.h> 23 - #include <linux/regmap.h> 24 - #include <linux/smp.h> 25 - #include <linux/mfd/syscon.h> 26 - #include <linux/spinlock.h> 27 - 28 - #include <asm/cacheflush.h> 29 - #include <asm/cp15.h> 30 - #include <asm/mach-types.h> 31 - #include <asm/smp_plat.h> 32 - 33 - #include "brcmstb.h" 34 - 35 - enum { 36 - ZONE_MAN_CLKEN_MASK = BIT(0), 37 - ZONE_MAN_RESET_CNTL_MASK = BIT(1), 38 - ZONE_MAN_MEM_PWR_MASK = BIT(4), 39 - ZONE_RESERVED_1_MASK = BIT(5), 40 - ZONE_MAN_ISO_CNTL_MASK = BIT(6), 41 - ZONE_MANUAL_CONTROL_MASK = BIT(7), 42 - ZONE_PWR_DN_REQ_MASK = BIT(9), 43 - ZONE_PWR_UP_REQ_MASK = BIT(10), 44 - ZONE_BLK_RST_ASSERT_MASK = BIT(12), 45 - ZONE_PWR_OFF_STATE_MASK = BIT(25), 46 - ZONE_PWR_ON_STATE_MASK = BIT(26), 47 - ZONE_DPG_PWR_STATE_MASK = BIT(28), 48 - ZONE_MEM_PWR_STATE_MASK = BIT(29), 49 - ZONE_RESET_STATE_MASK = BIT(31), 50 - CPU0_PWR_ZONE_CTRL_REG = 1, 51 - CPU_RESET_CONFIG_REG = 2, 52 - }; 53 - 54 - static void __iomem *cpubiuctrl_block; 55 - static void __iomem *hif_cont_block; 56 - static u32 cpu0_pwr_zone_ctrl_reg; 57 - static u32 cpu_rst_cfg_reg; 58 - static u32 hif_cont_reg; 59 - 60 - #ifdef CONFIG_HOTPLUG_CPU 61 - static DEFINE_PER_CPU_ALIGNED(int, per_cpu_sw_state); 62 - 63 - static int per_cpu_sw_state_rd(u32 cpu) 64 - { 65 - sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); 66 - return per_cpu(per_cpu_sw_state, cpu); 67 - } 68 - 69 - static void per_cpu_sw_state_wr(u32 cpu, int val) 70 - { 71 - per_cpu(per_cpu_sw_state, cpu) = val; 72 - dmb(); 73 - sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); 74 - dsb_sev(); 75 - } 76 - #else 77 - static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } 78 - #endif 79 - 80 - static void __iomem *pwr_ctrl_get_base(u32 cpu) 81 - { 82 - void __iomem *base = cpubiuctrl_block + cpu0_pwr_zone_ctrl_reg; 83 - base += (cpu_logical_map(cpu) * 4); 84 - return base; 85 - } 86 - 87 - static u32 pwr_ctrl_rd(u32 cpu) 88 - { 89 - void __iomem *base = pwr_ctrl_get_base(cpu); 90 - return readl_relaxed(base); 91 - } 92 - 93 - static void pwr_ctrl_wr(u32 cpu, u32 val) 94 - { 95 - void __iomem *base = pwr_ctrl_get_base(cpu); 96 - writel(val, base); 97 - } 98 - 99 - static void cpu_rst_cfg_set(u32 cpu, int set) 100 - { 101 - u32 val; 102 - val = readl_relaxed(cpubiuctrl_block + cpu_rst_cfg_reg); 103 - if (set) 104 - val |= BIT(cpu_logical_map(cpu)); 105 - else 106 - val &= ~BIT(cpu_logical_map(cpu)); 107 - writel_relaxed(val, cpubiuctrl_block + cpu_rst_cfg_reg); 108 - } 109 - 110 - static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr) 111 - { 112 - const int reg_ofs = cpu_logical_map(cpu) * 8; 113 - writel_relaxed(0, hif_cont_block + hif_cont_reg + reg_ofs); 114 - writel_relaxed(boot_addr, hif_cont_block + hif_cont_reg + 4 + reg_ofs); 115 - } 116 - 117 - static void brcmstb_cpu_boot(u32 cpu) 118 - { 119 - pr_info("SMP: Booting CPU%d...\n", cpu); 120 - 121 - /* 122 - * set the reset vector to point to the secondary_startup 123 - * routine 124 - */ 125 - cpu_set_boot_addr(cpu, virt_to_phys(brcmstb_secondary_startup)); 126 - 127 - /* unhalt the cpu */ 128 - cpu_rst_cfg_set(cpu, 0); 129 - } 130 - 131 - static void brcmstb_cpu_power_on(u32 cpu) 132 - { 133 - /* 134 - * The secondary cores power was cut, so we must go through 135 - * power-on initialization. 136 - */ 137 - u32 tmp; 138 - 139 - pr_info("SMP: Powering up CPU%d...\n", cpu); 140 - 141 - /* Request zone power up */ 142 - pwr_ctrl_wr(cpu, ZONE_PWR_UP_REQ_MASK); 143 - 144 - /* Wait for the power up FSM to complete */ 145 - do { 146 - tmp = pwr_ctrl_rd(cpu); 147 - } while (!(tmp & ZONE_PWR_ON_STATE_MASK)); 148 - 149 - per_cpu_sw_state_wr(cpu, 1); 150 - } 151 - 152 - static int brcmstb_cpu_get_power_state(u32 cpu) 153 - { 154 - int tmp = pwr_ctrl_rd(cpu); 155 - return (tmp & ZONE_RESET_STATE_MASK) ? 0 : 1; 156 - } 157 - 158 - #ifdef CONFIG_HOTPLUG_CPU 159 - 160 - static void brcmstb_cpu_die(u32 cpu) 161 - { 162 - v7_exit_coherency_flush(all); 163 - 164 - /* Prevent all interrupts from reaching this CPU. */ 165 - arch_local_irq_disable(); 166 - 167 - /* 168 - * Final full barrier to ensure everything before this instruction has 169 - * quiesced. 170 - */ 171 - isb(); 172 - dsb(); 173 - 174 - per_cpu_sw_state_wr(cpu, 0); 175 - 176 - /* Sit and wait to die */ 177 - wfi(); 178 - 179 - /* We should never get here... */ 180 - panic("Spurious interrupt on CPU %d received!\n", cpu); 181 - } 182 - 183 - static int brcmstb_cpu_kill(u32 cpu) 184 - { 185 - u32 tmp; 186 - 187 - pr_info("SMP: Powering down CPU%d...\n", cpu); 188 - 189 - while (per_cpu_sw_state_rd(cpu)) 190 - ; 191 - 192 - /* Program zone reset */ 193 - pwr_ctrl_wr(cpu, ZONE_RESET_STATE_MASK | ZONE_BLK_RST_ASSERT_MASK | 194 - ZONE_PWR_DN_REQ_MASK); 195 - 196 - /* Verify zone reset */ 197 - tmp = pwr_ctrl_rd(cpu); 198 - if (!(tmp & ZONE_RESET_STATE_MASK)) 199 - pr_err("%s: Zone reset bit for CPU %d not asserted!\n", 200 - __func__, cpu); 201 - 202 - /* Wait for power down */ 203 - do { 204 - tmp = pwr_ctrl_rd(cpu); 205 - } while (!(tmp & ZONE_PWR_OFF_STATE_MASK)); 206 - 207 - /* Settle-time from Broadcom-internal DVT reference code */ 208 - udelay(7); 209 - 210 - /* Assert reset on the CPU */ 211 - cpu_rst_cfg_set(cpu, 1); 212 - 213 - return 1; 214 - } 215 - 216 - #endif /* CONFIG_HOTPLUG_CPU */ 217 - 218 - static int __init setup_hifcpubiuctrl_regs(struct device_node *np) 219 - { 220 - int rc = 0; 221 - char *name; 222 - struct device_node *syscon_np = NULL; 223 - 224 - name = "syscon-cpu"; 225 - 226 - syscon_np = of_parse_phandle(np, name, 0); 227 - if (!syscon_np) { 228 - pr_err("can't find phandle %s\n", name); 229 - rc = -EINVAL; 230 - goto cleanup; 231 - } 232 - 233 - cpubiuctrl_block = of_iomap(syscon_np, 0); 234 - if (!cpubiuctrl_block) { 235 - pr_err("iomap failed for cpubiuctrl_block\n"); 236 - rc = -EINVAL; 237 - goto cleanup; 238 - } 239 - 240 - rc = of_property_read_u32_index(np, name, CPU0_PWR_ZONE_CTRL_REG, 241 - &cpu0_pwr_zone_ctrl_reg); 242 - if (rc) { 243 - pr_err("failed to read 1st entry from %s property (%d)\n", name, 244 - rc); 245 - rc = -EINVAL; 246 - goto cleanup; 247 - } 248 - 249 - rc = of_property_read_u32_index(np, name, CPU_RESET_CONFIG_REG, 250 - &cpu_rst_cfg_reg); 251 - if (rc) { 252 - pr_err("failed to read 2nd entry from %s property (%d)\n", name, 253 - rc); 254 - rc = -EINVAL; 255 - goto cleanup; 256 - } 257 - 258 - cleanup: 259 - if (syscon_np) 260 - of_node_put(syscon_np); 261 - 262 - return rc; 263 - } 264 - 265 - static int __init setup_hifcont_regs(struct device_node *np) 266 - { 267 - int rc = 0; 268 - char *name; 269 - struct device_node *syscon_np = NULL; 270 - 271 - name = "syscon-cont"; 272 - 273 - syscon_np = of_parse_phandle(np, name, 0); 274 - if (!syscon_np) { 275 - pr_err("can't find phandle %s\n", name); 276 - rc = -EINVAL; 277 - goto cleanup; 278 - } 279 - 280 - hif_cont_block = of_iomap(syscon_np, 0); 281 - if (!hif_cont_block) { 282 - pr_err("iomap failed for hif_cont_block\n"); 283 - rc = -EINVAL; 284 - goto cleanup; 285 - } 286 - 287 - /* offset is at top of hif_cont_block */ 288 - hif_cont_reg = 0; 289 - 290 - cleanup: 291 - if (syscon_np) 292 - of_node_put(syscon_np); 293 - 294 - return rc; 295 - } 296 - 297 - static void __init brcmstb_cpu_ctrl_setup(unsigned int max_cpus) 298 - { 299 - int rc; 300 - struct device_node *np; 301 - char *name; 302 - 303 - name = "brcm,brcmstb-smpboot"; 304 - np = of_find_compatible_node(NULL, NULL, name); 305 - if (!np) { 306 - pr_err("can't find compatible node %s\n", name); 307 - return; 308 - } 309 - 310 - rc = setup_hifcpubiuctrl_regs(np); 311 - if (rc) 312 - return; 313 - 314 - rc = setup_hifcont_regs(np); 315 - if (rc) 316 - return; 317 - } 318 - 319 - static DEFINE_SPINLOCK(boot_lock); 320 - 321 - static void brcmstb_secondary_init(unsigned int cpu) 322 - { 323 - /* 324 - * Synchronise with the boot thread. 325 - */ 326 - spin_lock(&boot_lock); 327 - spin_unlock(&boot_lock); 328 - } 329 - 330 - static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle) 331 - { 332 - /* 333 - * set synchronisation state between this boot processor 334 - * and the secondary one 335 - */ 336 - spin_lock(&boot_lock); 337 - 338 - /* Bring up power to the core if necessary */ 339 - if (brcmstb_cpu_get_power_state(cpu) == 0) 340 - brcmstb_cpu_power_on(cpu); 341 - 342 - brcmstb_cpu_boot(cpu); 343 - 344 - /* 345 - * now the secondary core is starting up let it run its 346 - * calibrations, then wait for it to finish 347 - */ 348 - spin_unlock(&boot_lock); 349 - 350 - return 0; 351 - } 352 - 353 - static struct smp_operations brcmstb_smp_ops __initdata = { 354 - .smp_prepare_cpus = brcmstb_cpu_ctrl_setup, 355 - .smp_secondary_init = brcmstb_secondary_init, 356 - .smp_boot_secondary = brcmstb_boot_secondary, 357 - #ifdef CONFIG_HOTPLUG_CPU 358 - .cpu_kill = brcmstb_cpu_kill, 359 - .cpu_die = brcmstb_cpu_die, 360 - #endif 361 - }; 362 - 363 - CPU_METHOD_OF_DECLARE(brcmstb_smp, "brcm,brahma-b15", &brcmstb_smp_ops);
-1
arch/arm/mach-exynos/mcpm-exynos.c
··· 43 43 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \ 44 44 "isb\n\t"\ 45 45 "bl v7_flush_dcache_"__stringify(level)"\n\t" \ 46 - "clrex\n\t"\ 47 46 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \ 48 47 "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \ 49 48 /* Dummy Load of a device register to avoid Erratum 799270 */ \
+1 -1
arch/arm/mach-omap2/board-flash.c
··· 142 142 board_nand_data.nr_parts = nr_parts; 143 143 board_nand_data.devsize = nand_type; 144 144 145 - board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW; 145 + board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_SW; 146 146 gpmc_nand_init(&board_nand_data, gpmc_t); 147 147 } 148 148 #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
+2 -1
arch/arm/mach-omap2/gpmc-nand.c
··· 49 49 return 0; 50 50 51 51 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ 52 - if (ecc_opt == OMAP_ECC_HAM1_CODE_HW) 52 + if (ecc_opt == OMAP_ECC_HAM1_CODE_HW || 53 + ecc_opt == OMAP_ECC_HAM1_CODE_SW) 53 54 return 1; 54 55 else 55 56 return 0;
+5 -2
arch/arm/mach-omap2/gpmc.c
··· 1403 1403 pr_err("%s: ti,nand-ecc-opt not found\n", __func__); 1404 1404 return -ENODEV; 1405 1405 } 1406 - if (!strcmp(s, "ham1") || !strcmp(s, "sw") || 1407 - !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) 1406 + 1407 + if (!strcmp(s, "sw")) 1408 + gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW; 1409 + else if (!strcmp(s, "ham1") || 1410 + !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) 1408 1411 gpmc_nand_data->ecc_opt = 1409 1412 OMAP_ECC_HAM1_CODE_HW; 1410 1413 else if (!strcmp(s, "bch4"))
+1 -1
arch/arm/mach-omap2/id.c
··· 663 663 664 664 default: 665 665 /* Unknown default to latest silicon rev as default*/ 666 - pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%d)\n", 666 + pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n", 667 667 __func__, idcode, hawkeye, rev); 668 668 omap_revision = DRA752_REV_ES1_1; 669 669 }
+1 -1
arch/arm/mach-omap2/omap_device.c
··· 56 56 57 57 r = clk_get_sys(dev_name(&od->pdev->dev), clk_alias); 58 58 if (!IS_ERR(r)) { 59 - dev_warn(&od->pdev->dev, 59 + dev_dbg(&od->pdev->dev, 60 60 "alias %s already exists\n", clk_alias); 61 61 clk_put(r); 62 62 return;
+7
arch/arm/mach-omap2/omap_hwmod.c
··· 2185 2185 oh->mux->pads_dynamic))) { 2186 2186 omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED); 2187 2187 _reconfigure_io_chain(); 2188 + } else if (oh->flags & HWMOD_FORCE_MSTANDBY) { 2189 + _reconfigure_io_chain(); 2188 2190 } 2189 2191 2190 2192 _add_initiator_dep(oh, mpu_oh); ··· 2292 2290 /* Mux pins for device idle if populated */ 2293 2291 if (oh->mux && oh->mux->pads_dynamic) { 2294 2292 omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE); 2293 + _reconfigure_io_chain(); 2294 + } else if (oh->flags & HWMOD_FORCE_MSTANDBY) { 2295 2295 _reconfigure_io_chain(); 2296 2296 } 2297 2297 ··· 3347 3343 return -EINVAL; 3348 3344 3349 3345 if (!ois) 3346 + return 0; 3347 + 3348 + if (ois[0] == NULL) /* Empty list */ 3350 3349 return 0; 3351 3350 3352 3351 if (!linkspace) {
+20 -2
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
··· 35 35 #include "i2c.h" 36 36 #include "mmc.h" 37 37 #include "wd_timer.h" 38 + #include "soc.h" 38 39 39 40 /* Base offset for all DRA7XX interrupts external to MPUSS */ 40 41 #define DRA7XX_IRQ_GIC_START 32 ··· 3262 3261 &dra7xx_l4_per3__usb_otg_ss1, 3263 3262 &dra7xx_l4_per3__usb_otg_ss2, 3264 3263 &dra7xx_l4_per3__usb_otg_ss3, 3265 - &dra7xx_l4_per3__usb_otg_ss4, 3266 3264 &dra7xx_l3_main_1__vcp1, 3267 3265 &dra7xx_l4_per2__vcp1, 3268 3266 &dra7xx_l3_main_1__vcp2, ··· 3270 3270 NULL, 3271 3271 }; 3272 3272 3273 + static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = { 3274 + &dra7xx_l4_per3__usb_otg_ss4, 3275 + NULL, 3276 + }; 3277 + 3278 + static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = { 3279 + NULL, 3280 + }; 3281 + 3273 3282 int __init dra7xx_hwmod_init(void) 3274 3283 { 3284 + int ret; 3285 + 3275 3286 omap_hwmod_init(); 3276 - return omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs); 3287 + ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs); 3288 + 3289 + if (!ret && soc_is_dra74x()) 3290 + return omap_hwmod_register_links(dra74x_hwmod_ocp_ifs); 3291 + else if (!ret && soc_is_dra72x()) 3292 + return omap_hwmod_register_links(dra72x_hwmod_ocp_ifs); 3293 + 3294 + return ret; 3277 3295 }
+6
arch/arm/mach-omap2/soc.h
··· 245 245 #define soc_is_omap54xx() 0 246 246 #define soc_is_omap543x() 0 247 247 #define soc_is_dra7xx() 0 248 + #define soc_is_dra74x() 0 249 + #define soc_is_dra72x() 0 248 250 249 251 #if defined(MULTI_OMAP2) 250 252 # if defined(CONFIG_ARCH_OMAP2) ··· 395 393 396 394 #if defined(CONFIG_SOC_DRA7XX) 397 395 #undef soc_is_dra7xx 396 + #undef soc_is_dra74x 397 + #undef soc_is_dra72x 398 398 #define soc_is_dra7xx() (of_machine_is_compatible("ti,dra7")) 399 + #define soc_is_dra74x() (of_machine_is_compatible("ti,dra74")) 400 + #define soc_is_dra72x() (of_machine_is_compatible("ti,dra72")) 399 401 #endif 400 402 401 403 /* Various silicon revisions for omap2 */
+2 -2
arch/arm/mach-shmobile/clock-r8a7790.c
··· 183 183 184 184 static struct clk div4_clks[DIV4_NR] = { 185 185 [DIV4_SDH] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 8, 0x0dff, CLK_ENABLE_ON_INIT), 186 - [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1de0, CLK_ENABLE_ON_INIT), 187 - [DIV4_SD1] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 0, 0x1de0, CLK_ENABLE_ON_INIT), 186 + [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1df0, CLK_ENABLE_ON_INIT), 187 + [DIV4_SD1] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 0, 0x1df0, CLK_ENABLE_ON_INIT), 188 188 }; 189 189 190 190 /* DIV6 clocks */
+1 -1
arch/arm/mach-shmobile/clock-r8a7791.c
··· 152 152 153 153 static struct clk div4_clks[DIV4_NR] = { 154 154 [DIV4_SDH] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 8, 0x0dff, CLK_ENABLE_ON_INIT), 155 - [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1de0, CLK_ENABLE_ON_INIT), 155 + [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1df0, CLK_ENABLE_ON_INIT), 156 156 }; 157 157 158 158 /* DIV6 clocks */
+1 -1
arch/arm/mach-shmobile/clock-sh73a0.c
··· 644 644 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 645 645 CLKDEV_DEV_ID("e6cb0000.serial", &mstp_clks[MSTP207]), /* SCIFA5 */ 646 646 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ 647 - CLKDEV_DEV_ID("0xe6c3000.serial", &mstp_clks[MSTP206]), /* SCIFB */ 647 + CLKDEV_DEV_ID("e6c3000.serial", &mstp_clks[MSTP206]), /* SCIFB */ 648 648 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ 649 649 CLKDEV_DEV_ID("e6c40000.serial", &mstp_clks[MSTP204]), /* SCIFA0 */ 650 650 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
+11 -3
arch/arm/mach-vexpress/spc.c
··· 426 426 427 427 static int ve_init_opp_table(struct device *cpu_dev) 428 428 { 429 - int cluster = topology_physical_package_id(cpu_dev->id); 430 - int idx, ret = 0, max_opp = info->num_opps[cluster]; 431 - struct ve_spc_opp *opps = info->opps[cluster]; 429 + int cluster; 430 + int idx, ret = 0, max_opp; 431 + struct ve_spc_opp *opps; 432 + 433 + cluster = topology_physical_package_id(cpu_dev->id); 434 + cluster = cluster < 0 ? 0 : cluster; 435 + 436 + max_opp = info->num_opps[cluster]; 437 + opps = info->opps[cluster]; 432 438 433 439 for (idx = 0; idx < max_opp; idx++, opps++) { 434 440 ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); ··· 542 536 543 537 spc->hw.init = &init; 544 538 spc->cluster = topology_physical_package_id(cpu_dev->id); 539 + 540 + spc->cluster = spc->cluster < 0 ? 0 : spc->cluster; 545 541 546 542 init.name = dev_name(cpu_dev); 547 543 init.ops = &clk_spc_ops;
-6
arch/arm/mm/abort-ev6.S
··· 17 17 */ 18 18 .align 5 19 19 ENTRY(v6_early_abort) 20 - #ifdef CONFIG_CPU_V6 21 - sub r1, sp, #4 @ Get unused stack location 22 - strex r0, r1, [r1] @ Clear the exclusive monitor 23 - #elif defined(CONFIG_CPU_32v6K) 24 - clrex 25 - #endif 26 20 mrc p15, 0, r1, c5, c0, 0 @ get FSR 27 21 mrc p15, 0, r0, c6, c0, 0 @ get FAR 28 22 /*
-6
arch/arm/mm/abort-ev7.S
··· 13 13 */ 14 14 .align 5 15 15 ENTRY(v7_early_abort) 16 - /* 17 - * The effect of data aborts on on the exclusive access monitor are 18 - * UNPREDICTABLE. Do a CLREX to clear the state 19 - */ 20 - clrex 21 - 22 16 mrc p15, 0, r1, c5, c0, 0 @ get FSR 23 17 mrc p15, 0, r0, c6, c0, 0 @ get FAR 24 18
+1
arch/hexagon/mm/cache.c
··· 68 68 ); 69 69 local_irq_restore(flags); 70 70 } 71 + EXPORT_SYMBOL(flush_icache_range); 71 72 72 73 void hexagon_clean_dcache_range(unsigned long start, unsigned long end) 73 74 {
-2
arch/ia64/Kconfig
··· 549 549 config KEXEC 550 550 bool "kexec system call" 551 551 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) 552 - select CRYPTO 553 - select CRYPTO_SHA256 554 552 help 555 553 kexec is a system call that implements the ability to shutdown your 556 554 current kernel, and to start another kernel. It is like a reboot
-2
arch/m68k/Kconfig
··· 91 91 config KEXEC 92 92 bool "kexec system call" 93 93 depends on M68KCLASSIC 94 - select CRYPTO 95 - select CRYPTO_SHA256 96 94 help 97 95 kexec is a system call that implements the ability to shutdown your 98 96 current kernel, and to start another kernel. It is like a reboot
-2
arch/mips/Kconfig
··· 2396 2396 2397 2397 config KEXEC 2398 2398 bool "Kexec system call" 2399 - select CRYPTO 2400 - select CRYPTO_SHA256 2401 2399 help 2402 2400 kexec is a system call that implements the ability to shutdown your 2403 2401 current kernel, and to start another kernel. It is like a reboot
-2
arch/powerpc/Kconfig
··· 399 399 config KEXEC 400 400 bool "kexec system call" 401 401 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) 402 - select CRYPTO 403 - select CRYPTO_SHA256 404 402 help 405 403 kexec is a system call that implements the ability to shutdown your 406 404 current kernel, and to start another kernel. It is like a reboot
-2
arch/s390/Kconfig
··· 48 48 49 49 config KEXEC 50 50 def_bool y 51 - select CRYPTO 52 - select CRYPTO_SHA256 53 51 54 52 config AUDIT_ARCH 55 53 def_bool y
+4 -1
arch/s390/include/uapi/asm/unistd.h
··· 283 283 #define __NR_sched_setattr 345 284 284 #define __NR_sched_getattr 346 285 285 #define __NR_renameat2 347 286 - #define NR_syscalls 348 286 + #define __NR_seccomp 348 287 + #define __NR_getrandom 349 288 + #define __NR_memfd_create 350 289 + #define NR_syscalls 351 287 290 288 291 /* 289 292 * There are some system calls that are not present on 64 bit, some
+3
arch/s390/kernel/compat_wrapper.c
··· 214 214 COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); 215 215 COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags); 216 216 COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags); 217 + COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs) 218 + COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) 219 + COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
+7
arch/s390/kernel/ipl.c
··· 2060 2060 S390_lowcore.program_new_psw.addr = 2061 2061 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 2062 2062 2063 + /* 2064 + * Clear subchannel ID and number to signal new kernel that no CCW or 2065 + * SCSI IPL has been done (for kexec and kdump) 2066 + */ 2067 + S390_lowcore.subchannel_id = 0; 2068 + S390_lowcore.subchannel_nr = 0; 2069 + 2063 2070 /* Store status at absolute zero */ 2064 2071 store_status(); 2065 2072
+19
arch/s390/kernel/setup.c
··· 24 24 #include <linux/stddef.h> 25 25 #include <linux/unistd.h> 26 26 #include <linux/ptrace.h> 27 + #include <linux/random.h> 27 28 #include <linux/user.h> 28 29 #include <linux/tty.h> 29 30 #include <linux/ioport.h> ··· 62 61 #include <asm/diag.h> 63 62 #include <asm/os_info.h> 64 63 #include <asm/sclp.h> 64 + #include <asm/sysinfo.h> 65 65 #include "entry.h" 66 66 67 67 /* ··· 768 766 #endif 769 767 770 768 get_cpu_id(&cpu_id); 769 + add_device_randomness(&cpu_id, sizeof(cpu_id)); 771 770 switch (cpu_id.machine) { 772 771 case 0x9672: 773 772 #if !defined(CONFIG_64BIT) ··· 804 801 strcpy(elf_platform, "zEC12"); 805 802 break; 806 803 } 804 + } 805 + 806 + /* 807 + * Add system information as device randomness 808 + */ 809 + static void __init setup_randomness(void) 810 + { 811 + struct sysinfo_3_2_2 *vmms; 812 + 813 + vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL); 814 + if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count) 815 + add_device_randomness(&vmms, vmms->count); 816 + free_page((unsigned long) vmms); 807 817 } 808 818 809 819 /* ··· 917 901 918 902 /* Setup zfcpdump support */ 919 903 setup_zfcpdump(); 904 + 905 + /* Add system specific data to the random pool */ 906 + setup_randomness(); 920 907 } 921 908 922 909 #ifdef CONFIG_32BIT
+3
arch/s390/kernel/syscalls.S
··· 356 356 SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ 357 357 SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) 358 358 SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) 359 + SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) 360 + SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) 361 + SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
-2
arch/sh/Kconfig
··· 598 598 config KEXEC 599 599 bool "kexec system call (EXPERIMENTAL)" 600 600 depends on SUPERH32 && MMU 601 - select CRYPTO 602 - select CRYPTO_SHA256 603 601 help 604 602 kexec is a system call that implements the ability to shutdown your 605 603 current kernel, and to start another kernel. It is like a reboot
+1
arch/sh/mm/cache.c
··· 229 229 230 230 cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1); 231 231 } 232 + EXPORT_SYMBOL(flush_icache_range); 232 233 233 234 void flush_icache_page(struct vm_area_struct *vma, struct page *page) 234 235 {
-2
arch/tile/Kconfig
··· 191 191 192 192 config KEXEC 193 193 bool "kexec system call" 194 - select CRYPTO 195 - select CRYPTO_SHA256 196 194 ---help--- 197 195 kexec is a system call that implements the ability to shutdown your 198 196 current kernel, and to start another kernel. It is like a reboot
+1
arch/tile/kernel/smp.c
··· 183 183 preempt_enable(); 184 184 } 185 185 } 186 + EXPORT_SYMBOL(flush_icache_range); 186 187 187 188 188 189 /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
+5 -4
arch/unicore32/kernel/signal.c
··· 254 254 255 255 err |= setup_sigframe(frame, regs, set); 256 256 if (err == 0) 257 - err |= setup_return(regs, &ksig->ka, frame->retcode, frame, usig); 257 + err |= setup_return(regs, &ksig->ka, frame->retcode, frame, 258 + ksig->sig); 258 259 259 260 return err; 260 261 } ··· 277 276 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->UCreg_sp); 278 277 err |= setup_sigframe(&frame->sig, regs, set); 279 278 if (err == 0) 280 - err |= setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig); 279 + err |= setup_return(regs, &ksig->ka, frame->sig.retcode, frame, 280 + ksig->sig); 281 281 282 282 if (err == 0) { 283 283 /* ··· 305 303 int syscall) 306 304 { 307 305 struct thread_info *thread = current_thread_info(); 308 - struct task_struct *tsk = current; 309 306 sigset_t *oldset = sigmask_to_save(); 310 307 int usig = ksig->sig; 311 308 int ret; ··· 374 373 if (!user_mode(regs)) 375 374 return; 376 375 377 - if (get_signsl(&ksig)) { 376 + if (get_signal(&ksig)) { 378 377 handle_signal(&ksig, regs, syscall); 379 378 return; 380 379 }
+1 -3
arch/x86/Kbuild
··· 17 17 obj-y += platform/ 18 18 obj-y += net/ 19 19 20 - ifeq ($(CONFIG_X86_64),y) 21 - obj-$(CONFIG_KEXEC) += purgatory/ 22 - endif 20 + obj-$(CONFIG_KEXEC_FILE) += purgatory/
+14 -4
arch/x86/Kconfig
··· 1585 1585 1586 1586 config KEXEC 1587 1587 bool "kexec system call" 1588 - select BUILD_BIN2C 1589 - select CRYPTO 1590 - select CRYPTO_SHA256 1591 1588 ---help--- 1592 1589 kexec is a system call that implements the ability to shutdown your 1593 1590 current kernel, and to start another kernel. It is like a reboot ··· 1599 1602 interface is strongly in flux, so no good recommendation can be 1600 1603 made. 1601 1604 1605 + config KEXEC_FILE 1606 + bool "kexec file based system call" 1607 + select BUILD_BIN2C 1608 + depends on KEXEC 1609 + depends on X86_64 1610 + depends on CRYPTO=y 1611 + depends on CRYPTO_SHA256=y 1612 + ---help--- 1613 + This is new version of kexec system call. This system call is 1614 + file based and takes file descriptors as system call argument 1615 + for kernel and initramfs as opposed to list of segments as 1616 + accepted by previous system call. 1617 + 1602 1618 config KEXEC_VERIFY_SIG 1603 1619 bool "Verify kernel signature during kexec_file_load() syscall" 1604 - depends on KEXEC 1620 + depends on KEXEC_FILE 1605 1621 ---help--- 1606 1622 This option makes kernel signature verification mandatory for 1607 1623 kexec_file_load() syscall. If kernel is signature can not be
+2 -4
arch/x86/Makefile
··· 184 184 $(Q)$(MAKE) $(build)=arch/x86/syscalls all 185 185 186 186 archprepare: 187 - ifeq ($(CONFIG_KEXEC),y) 188 - # Build only for 64bit. No loaders for 32bit yet. 189 - ifeq ($(CONFIG_X86_64),y) 187 + ifeq ($(CONFIG_KEXEC_FILE),y) 190 188 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c 191 - endif 192 189 endif 193 190 194 191 ### ··· 251 254 $(Q)rm -rf $(objtree)/arch/x86_64 252 255 $(Q)$(MAKE) $(clean)=$(boot) 253 256 $(Q)$(MAKE) $(clean)=arch/x86/tools 257 + $(Q)$(MAKE) $(clean)=arch/x86/purgatory 254 258 255 259 PHONY += kvmconfig 256 260 kvmconfig:
+2
arch/x86/include/asm/io_apic.h
··· 227 227 228 228 extern void io_apic_eoi(unsigned int apic, unsigned int vector); 229 229 230 + extern bool mp_should_keep_irq(struct device *dev); 231 + 230 232 #else /* !CONFIG_X86_IO_APIC */ 231 233 232 234 #define io_apic_assign_pci_irqs 0
+7 -2
arch/x86/include/asm/pgtable.h
··· 131 131 132 132 static inline int pte_special(pte_t pte) 133 133 { 134 - return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) == 135 - (_PAGE_PRESENT|_PAGE_SPECIAL); 134 + /* 135 + * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h. 136 + * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 == 137 + * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL. 138 + */ 139 + return (pte_flags(pte) & _PAGE_SPECIAL) && 140 + (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE)); 136 141 } 137 142 138 143 static inline unsigned long pte_pfn(pte_t pte)
+1 -1
arch/x86/kernel/Makefile
··· 71 71 obj-$(CONFIG_X86_TSC) += trace_clock.o 72 72 obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 73 73 obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 74 + obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o 74 75 obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 75 76 obj-y += kprobes/ 76 77 obj-$(CONFIG_MODULES) += module.o ··· 119 118 120 119 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o 121 120 obj-y += vsmp_64.o 122 - obj-$(CONFIG_KEXEC) += kexec-bzimage64.o 123 121 endif
+26 -1
arch/x86/kernel/apic/io_apic.c
··· 1070 1070 } 1071 1071 1072 1072 if (flags & IOAPIC_MAP_ALLOC) { 1073 + /* special handling for legacy IRQs */ 1074 + if (irq < nr_legacy_irqs() && info->count == 1 && 1075 + mp_irqdomain_map(domain, irq, pin) != 0) 1076 + irq = -1; 1077 + 1073 1078 if (irq > 0) 1074 1079 info->count++; 1075 1080 else if (info->count == 0) ··· 3901 3896 info->polarity = 1; 3902 3897 } 3903 3898 info->node = NUMA_NO_NODE; 3904 - info->set = 1; 3899 + 3900 + /* 3901 + * setup_IO_APIC_irqs() programs all legacy IRQs with default 3902 + * trigger and polarity attributes. Don't set the flag for that 3903 + * case so the first legacy IRQ user could reprogram the pin 3904 + * with real trigger and polarity attributes. 3905 + */ 3906 + if (virq >= nr_legacy_irqs() || info->count) 3907 + info->set = 1; 3905 3908 } 3906 3909 set_io_apic_irq_attr(&attr, ioapic, hwirq, info->trigger, 3907 3910 info->polarity); ··· 3957 3944 mutex_unlock(&ioapic_mutex); 3958 3945 3959 3946 return ret; 3947 + } 3948 + 3949 + bool mp_should_keep_irq(struct device *dev) 3950 + { 3951 + if (dev->power.is_prepared) 3952 + return true; 3953 + #ifdef CONFIG_PM_RUNTIME 3954 + if (dev->power.runtime_status == RPM_SUSPENDING) 3955 + return true; 3956 + #endif 3957 + 3958 + return false; 3960 3959 } 3961 3960 3962 3961 /* Enable IOAPIC early just for system timer */
+2 -4
arch/x86/kernel/crash.c
··· 182 182 crash_save_cpu(regs, safe_smp_processor_id()); 183 183 } 184 184 185 - #ifdef CONFIG_X86_64 186 - 185 + #ifdef CONFIG_KEXEC_FILE 187 186 static int get_nr_ram_ranges_callback(unsigned long start_pfn, 188 187 unsigned long nr_pfn, void *arg) 189 188 { ··· 695 696 696 697 return ret; 697 698 } 698 - 699 - #endif /* CONFIG_X86_64 */ 699 + #endif /* CONFIG_KEXEC_FILE */
+1 -1
arch/x86/kernel/irqinit.c
··· 203 203 set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); 204 204 } 205 205 206 - if (!acpi_ioapic && !of_ioapic) 206 + if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs()) 207 207 setup_irq(2, &irq2); 208 208 209 209 #ifdef CONFIG_X86_32
+11
arch/x86/kernel/machine_kexec_64.c
··· 25 25 #include <asm/debugreg.h> 26 26 #include <asm/kexec-bzimage64.h> 27 27 28 + #ifdef CONFIG_KEXEC_FILE 28 29 static struct kexec_file_ops *kexec_file_loaders[] = { 29 30 &kexec_bzImage64_ops, 30 31 }; 32 + #endif 31 33 32 34 static void free_transition_pgtable(struct kimage *image) 33 35 { ··· 180 178 ); 181 179 } 182 180 181 + #ifdef CONFIG_KEXEC_FILE 183 182 /* Update purgatory as needed after various image segments have been prepared */ 184 183 static int arch_update_purgatory(struct kimage *image) 185 184 { ··· 212 209 213 210 return ret; 214 211 } 212 + #else /* !CONFIG_KEXEC_FILE */ 213 + static inline int arch_update_purgatory(struct kimage *image) 214 + { 215 + return 0; 216 + } 217 + #endif /* CONFIG_KEXEC_FILE */ 215 218 216 219 int machine_kexec_prepare(struct kimage *image) 217 220 { ··· 338 329 339 330 /* arch-dependent functionality related to kexec file-based syscall */ 340 331 332 + #ifdef CONFIG_KEXEC_FILE 341 333 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, 342 334 unsigned long buf_len) 343 335 { ··· 532 522 (int)ELF64_R_TYPE(rel[i].r_info), value); 533 523 return -ENOEXEC; 534 524 } 525 + #endif /* CONFIG_KEXEC_FILE */
+2
arch/x86/kernel/time.c
··· 68 68 69 69 void __init setup_default_timer_irq(void) 70 70 { 71 + if (!nr_legacy_irqs()) 72 + return; 71 73 setup_irq(0, &irq0); 72 74 } 73 75
+1 -1
arch/x86/pci/intel_mid_pci.c
··· 229 229 230 230 static void intel_mid_pci_irq_disable(struct pci_dev *dev) 231 231 { 232 - if (!dev->dev.power.is_prepared && dev->irq > 0) 232 + if (!mp_should_keep_irq(&dev->dev) && dev->irq > 0) 233 233 mp_unmap_irq(dev->irq); 234 234 } 235 235
+1 -1
arch/x86/pci/irq.c
··· 1256 1256 1257 1257 static void pirq_disable_irq(struct pci_dev *dev) 1258 1258 { 1259 - if (io_apic_assign_pci_irqs && !dev->dev.power.is_prepared && 1259 + if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && 1260 1260 dev->irq) { 1261 1261 mp_unmap_irq(dev->irq); 1262 1262 dev->irq = 0;
+2 -4
arch/x86/purgatory/Makefile
··· 11 11 # sure how to relocate those. Like kexec-tools, use custom flags. 12 12 13 13 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large 14 + KBUILD_CFLAGS += -m$(BITS) 14 15 15 16 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE 16 17 $(call if_changed,ld) ··· 25 24 $(call if_changed,bin2c) 26 25 27 26 28 - # No loaders for 32bits yet. 29 - ifeq ($(CONFIG_X86_64),y) 30 - obj-$(CONFIG_KEXEC) += kexec-purgatory.o 31 - endif 27 + obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o
+75 -17
arch/xtensa/Kconfig
··· 4 4 config XTENSA 5 5 def_bool y 6 6 select ARCH_WANT_FRAME_POINTERS 7 - select HAVE_IDE 8 - select GENERIC_ATOMIC64 9 - select GENERIC_CLOCKEVENTS 10 - select VIRT_TO_BUS 11 - select GENERIC_IRQ_SHOW 12 - select GENERIC_SCHED_CLOCK 13 - select MODULES_USE_ELF_RELA 14 - select GENERIC_PCI_IOMAP 15 7 select ARCH_WANT_IPC_PARSE_VERSION 16 8 select ARCH_WANT_OPTIONAL_GPIOLIB 17 9 select BUILDTIME_EXTABLE_SORT 18 10 select CLONE_BACKWARDS 19 - select IRQ_DOMAIN 20 - select HAVE_OPROFILE 11 + select COMMON_CLK 12 + select GENERIC_ATOMIC64 13 + select GENERIC_CLOCKEVENTS 14 + select GENERIC_IRQ_SHOW 15 + select GENERIC_PCI_IOMAP 16 + select GENERIC_SCHED_CLOCK 21 17 select HAVE_FUNCTION_TRACER 22 18 select HAVE_IRQ_TIME_ACCOUNTING 19 + select HAVE_OPROFILE 23 20 select HAVE_PERF_EVENTS 24 - select COMMON_CLK 21 + select IRQ_DOMAIN 22 + select MODULES_USE_ELF_RELA 23 + select VIRT_TO_BUS 25 24 help 26 25 Xtensa processors are 32-bit RISC machines designed by Tensilica 27 26 primarily for embedded systems. These processors are both ··· 61 62 def_bool y 62 63 63 64 config MMU 64 - def_bool n 65 + bool 66 + default n if !XTENSA_VARIANT_CUSTOM 67 + default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM 65 68 66 69 config VARIANT_IRQ_SWITCH 67 70 def_bool n ··· 103 102 select VARIANT_IRQ_SWITCH 104 103 select ARCH_REQUIRE_GPIOLIB 105 104 select XTENSA_CALIBRATE_CCOUNT 105 + 106 + config XTENSA_VARIANT_CUSTOM 107 + bool "Custom Xtensa processor configuration" 108 + select MAY_HAVE_SMP 109 + select HAVE_XTENSA_GPIO32 110 + help 111 + Select this variant to use a custom Xtensa processor configuration. 112 + You will be prompted for a processor variant CORENAME. 106 113 endchoice 114 + 115 + config XTENSA_VARIANT_CUSTOM_NAME 116 + string "Xtensa Processor Custom Core Variant Name" 117 + depends on XTENSA_VARIANT_CUSTOM 118 + help 119 + Provide the name of a custom Xtensa processor variant. 120 + This CORENAME selects arch/xtensa/variant/CORENAME. 121 + Dont forget you have to select MMU if you have one. 122 + 123 + config XTENSA_VARIANT_NAME 124 + string 125 + default "dc232b" if XTENSA_VARIANT_DC232B 126 + default "dc233c" if XTENSA_VARIANT_DC233C 127 + default "fsf" if XTENSA_VARIANT_FSF 128 + default "s6000" if XTENSA_VARIANT_S6000 129 + default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM 130 + 131 + config XTENSA_VARIANT_MMU 132 + bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)" 133 + depends on XTENSA_VARIANT_CUSTOM 134 + default y 135 + help 136 + Build a Conventional Kernel with full MMU support, 137 + ie: it supports a TLB with auto-loading, page protection. 107 138 108 139 config XTENSA_UNALIGNED_USER 109 140 bool "Unaligned memory access in use space" ··· 189 156 190 157 Say N if you want to disable CPU hotplug. 191 158 192 - config MATH_EMULATION 193 - bool "Math emulation" 194 - help 195 - Can we use information of configuration file? 196 - 197 159 config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX 198 160 bool "Initialize Xtensa MMU inside the Linux kernel code" 161 + depends on MMU 199 162 default y 200 163 help 201 164 Earlier version initialized the MMU in the exception vector ··· 221 192 222 193 config HIGHMEM 223 194 bool "High Memory Support" 195 + depends on MMU 224 196 help 225 197 Linux can use the full amount of RAM in the system by 226 198 default. However, the default MMUv2 setup only maps the ··· 237 207 N here. 238 208 239 209 If unsure, say Y. 210 + 211 + config FAST_SYSCALL_XTENSA 212 + bool "Enable fast atomic syscalls" 213 + default n 214 + help 215 + fast_syscall_xtensa is a syscall that can make atomic operations 216 + on UP kernel when processor has no s32c1i support. 217 + 218 + This syscall is deprecated. It may have issues when called with 219 + invalid arguments. It is provided only for backwards compatibility. 220 + Only enable it if your userspace software requires it. 221 + 222 + If unsure, say N. 223 + 224 + config FAST_SYSCALL_SPILL_REGISTERS 225 + bool "Enable spill registers syscall" 226 + default n 227 + help 228 + fast_syscall_spill_registers is a syscall that spills all active 229 + register windows of a calling userspace task onto its stack. 230 + 231 + This syscall is deprecated. It may have issues when called with 232 + invalid arguments. It is provided only for backwards compatibility. 233 + Only enable it if your userspace software requires it. 234 + 235 + If unsure, say N. 240 236 241 237 endmenu 242 238 ··· 306 250 307 251 config XTENSA_PLATFORM_XT2000 308 252 bool "XT2000" 253 + select HAVE_IDE 309 254 help 310 255 XT2000 is the name of Tensilica's feature-rich emulation platform. 311 256 This hardware is capable of running a full Linux distribution. 312 257 313 258 config XTENSA_PLATFORM_S6105 314 259 bool "S6105" 260 + select HAVE_IDE 315 261 select SERIAL_CONSOLE 316 262 select NO_IOPORT_MAP 317 263
+2 -5
arch/xtensa/Makefile
··· 4 4 # for more details. 5 5 # 6 6 # Copyright (C) 2001 - 2005 Tensilica Inc. 7 + # Copyright (C) 2014 Cadence Design Systems Inc. 7 8 # 8 9 # This file is included by the global makefile so that you can add your own 9 10 # architecture-specific flags and dependencies. Remember to do have actions ··· 14 13 # Core configuration. 15 14 # (Use VAR=<xtensa_config> to use another default compiler.) 16 15 17 - variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf 18 - variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b 19 - variant-$(CONFIG_XTENSA_VARIANT_DC233C) := dc233c 20 - variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000 21 - variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom 16 + variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME)) 22 17 23 18 VARIANT = $(variant-y) 24 19 export VARIANT
+4 -1
arch/xtensa/boot/dts/kc705.dts
··· 4 4 5 5 / { 6 6 compatible = "cdns,xtensa-kc705"; 7 + chosen { 8 + bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000"; 9 + }; 7 10 memory@0 { 8 11 device_type = "memory"; 9 - reg = <0x00000000 0x08000000>; 12 + reg = <0x00000000 0x38000000>; 10 13 }; 11 14 };
-1
arch/xtensa/configs/common_defconfig
··· 66 66 CONFIG_MMU=y 67 67 # CONFIG_XTENSA_UNALIGNED_USER is not set 68 68 # CONFIG_PREEMPT is not set 69 - # CONFIG_MATH_EMULATION is not set 70 69 # CONFIG_HIGHMEM is not set 71 70 72 71 #
+1 -2
arch/xtensa/configs/iss_defconfig
··· 146 146 # CONFIG_XTENSA_VARIANT_S6000 is not set 147 147 # CONFIG_XTENSA_UNALIGNED_USER is not set 148 148 # CONFIG_PREEMPT is not set 149 - # CONFIG_MATH_EMULATION is not set 150 149 CONFIG_XTENSA_CALIBRATE_CCOUNT=y 151 150 CONFIG_SERIAL_CONSOLE=y 152 151 CONFIG_XTENSA_ISS_NETWORK=y ··· 307 308 # EEPROM support 308 309 # 309 310 # CONFIG_EEPROM_93CX6 is not set 310 - CONFIG_HAVE_IDE=y 311 + # CONFIG_HAVE_IDE is not set 311 312 # CONFIG_IDE is not set 312 313 313 314 #
-1
arch/xtensa/configs/s6105_defconfig
··· 109 109 CONFIG_XTENSA_VARIANT_S6000=y 110 110 # CONFIG_XTENSA_UNALIGNED_USER is not set 111 111 CONFIG_PREEMPT=y 112 - # CONFIG_MATH_EMULATION is not set 113 112 # CONFIG_HIGHMEM is not set 114 113 CONFIG_XTENSA_CALIBRATE_CCOUNT=y 115 114 CONFIG_SERIAL_CONSOLE=y
+2
arch/xtensa/include/asm/cacheflush.h
··· 37 37 * specials for cache aliasing: 38 38 * 39 39 * __flush_invalidate_dcache_page_alias(vaddr,paddr) 40 + * __invalidate_dcache_page_alias(vaddr,paddr) 40 41 * __invalidate_icache_page_alias(vaddr,paddr) 41 42 */ 42 43 ··· 63 62 64 63 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) 65 64 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); 65 + extern void __invalidate_dcache_page_alias(unsigned long, unsigned long); 66 66 #else 67 67 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt, 68 68 unsigned long phys) { }
+26 -4
arch/xtensa/include/asm/fixmap.h
··· 23 23 * Here we define all the compile-time 'special' virtual 24 24 * addresses. The point is to have a constant address at 25 25 * compile time, but to set the physical address only 26 - * in the boot process. We allocate these special addresses 27 - * from the end of the consistent memory region backwards. 26 + * in the boot process. We allocate these special addresses 27 + * from the start of the consistent memory region upwards. 28 28 * Also this lets us do fail-safe vmalloc(), we 29 29 * can guarantee that these special addresses and 30 30 * vmalloc()-ed addresses never overlap. ··· 38 38 #ifdef CONFIG_HIGHMEM 39 39 /* reserved pte's for temporary kernel mappings */ 40 40 FIX_KMAP_BEGIN, 41 - FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 41 + FIX_KMAP_END = FIX_KMAP_BEGIN + 42 + (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1, 42 43 #endif 43 44 __end_of_fixed_addresses 44 45 }; ··· 48 47 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 49 48 #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) 50 49 51 - #include <asm-generic/fixmap.h> 50 + #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) 51 + #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) 52 + 53 + #ifndef __ASSEMBLY__ 54 + /* 55 + * 'index to address' translation. If anyone tries to use the idx 56 + * directly without translation, we catch the bug with a NULL-deference 57 + * kernel oops. Illegal ranges of incoming indices are caught too. 58 + */ 59 + static __always_inline unsigned long fix_to_virt(const unsigned int idx) 60 + { 61 + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); 62 + return __fix_to_virt(idx); 63 + } 64 + 65 + static inline unsigned long virt_to_fix(const unsigned long vaddr) 66 + { 67 + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 68 + return __virt_to_fix(vaddr); 69 + } 70 + 71 + #endif 52 72 53 73 #define kmap_get_fixmap_pte(vaddr) \ 54 74 pte_offset_kernel( \
+38 -2
arch/xtensa/include/asm/highmem.h
··· 12 12 #ifndef _XTENSA_HIGHMEM_H 13 13 #define _XTENSA_HIGHMEM_H 14 14 15 + #include <linux/wait.h> 15 16 #include <asm/cacheflush.h> 16 17 #include <asm/fixmap.h> 17 18 #include <asm/kmap_types.h> 18 19 #include <asm/pgtable.h> 19 20 20 - #define PKMAP_BASE (FIXADDR_START - PMD_SIZE) 21 - #define LAST_PKMAP PTRS_PER_PTE 21 + #define PKMAP_BASE ((FIXADDR_START - \ 22 + (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK) 23 + #define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS) 22 24 #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 23 25 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) 24 26 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 25 27 26 28 #define kmap_prot PAGE_KERNEL 29 + 30 + #if DCACHE_WAY_SIZE > PAGE_SIZE 31 + #define get_pkmap_color get_pkmap_color 32 + static inline int get_pkmap_color(struct page *page) 33 + { 34 + return DCACHE_ALIAS(page_to_phys(page)); 35 + } 36 + 37 + extern unsigned int last_pkmap_nr_arr[]; 38 + 39 + static inline unsigned int get_next_pkmap_nr(unsigned int color) 40 + { 41 + last_pkmap_nr_arr[color] = 42 + (last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK; 43 + return last_pkmap_nr_arr[color] + color; 44 + } 45 + 46 + static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) 47 + { 48 + return pkmap_nr < DCACHE_N_COLORS; 49 + } 50 + 51 + static inline int get_pkmap_entries_count(unsigned int color) 52 + { 53 + return LAST_PKMAP / DCACHE_N_COLORS; 54 + } 55 + 56 + extern wait_queue_head_t pkmap_map_wait_arr[]; 57 + 58 + static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) 59 + { 60 + return pkmap_map_wait_arr + color; 61 + } 62 + #endif 27 63 28 64 extern pte_t *pkmap_page_table; 29 65
+12 -2
arch/xtensa/include/asm/page.h
··· 78 78 # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) 79 79 #else 80 80 # define DCACHE_ALIAS_ORDER 0 81 + # define DCACHE_ALIAS(a) ((void)(a), 0) 81 82 #endif 83 + #define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER) 82 84 83 85 #if ICACHE_WAY_SIZE > PAGE_SIZE 84 86 # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) ··· 136 134 #endif 137 135 138 136 struct page; 137 + struct vm_area_struct; 139 138 extern void clear_page(void *page); 140 139 extern void copy_page(void *to, void *from); 141 140 ··· 146 143 */ 147 144 148 145 #if DCACHE_WAY_SIZE > PAGE_SIZE 149 - extern void clear_user_page(void*, unsigned long, struct page*); 150 - extern void copy_user_page(void*, void*, unsigned long, struct page*); 146 + extern void clear_page_alias(void *vaddr, unsigned long paddr); 147 + extern void copy_page_alias(void *to, void *from, 148 + unsigned long to_paddr, unsigned long from_paddr); 149 + 150 + #define clear_user_highpage clear_user_highpage 151 + void clear_user_highpage(struct page *page, unsigned long vaddr); 152 + #define __HAVE_ARCH_COPY_USER_HIGHPAGE 153 + void copy_user_highpage(struct page *to, struct page *from, 154 + unsigned long vaddr, struct vm_area_struct *vma); 151 155 #else 152 156 # define clear_user_page(page, vaddr, pg) clear_page(page) 153 157 # define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+6 -1
arch/xtensa/include/asm/pgtable.h
··· 67 67 #define VMALLOC_START 0xC0000000 68 68 #define VMALLOC_END 0xC7FEFFFF 69 69 #define TLBTEMP_BASE_1 0xC7FF0000 70 - #define TLBTEMP_BASE_2 0xC7FF8000 70 + #define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) 71 + #if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE 72 + #define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) 73 + #else 74 + #define TLBTEMP_SIZE ICACHE_WAY_SIZE 75 + #endif 71 76 72 77 /* 73 78 * For the Xtensa architecture, the PTE layout is as follows:
+5
arch/xtensa/include/asm/uaccess.h
··· 52 52 */ 53 53 .macro get_fs ad, sp 54 54 GET_CURRENT(\ad,\sp) 55 + #if THREAD_CURRENT_DS > 1020 56 + addi \ad, \ad, TASK_THREAD 57 + l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD 58 + #else 55 59 l32i \ad, \ad, THREAD_CURRENT_DS 60 + #endif 56 61 .endm 57 62 58 63 /*
+10 -9
arch/xtensa/include/uapi/asm/ioctls.h
··· 28 28 #define TCSETSW 0x5403 29 29 #define TCSETSF 0x5404 30 30 31 - #define TCGETA _IOR('t', 23, struct termio) 32 - #define TCSETA _IOW('t', 24, struct termio) 33 - #define TCSETAW _IOW('t', 25, struct termio) 34 - #define TCSETAF _IOW('t', 28, struct termio) 31 + #define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */ 32 + #define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */ 33 + #define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */ 34 + #define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */ 35 35 36 36 #define TCSBRK _IO('t', 29) 37 37 #define TCXONC _IO('t', 30) 38 38 #define TCFLSH _IO('t', 31) 39 39 40 - #define TIOCSWINSZ _IOW('t', 103, struct winsize) 41 - #define TIOCGWINSZ _IOR('t', 104, struct winsize) 40 + #define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */ 41 + #define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */ 42 42 #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ 43 43 #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ 44 44 #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ ··· 88 88 #define TIOCSETD _IOW('T', 35, int) 89 89 #define TIOCGETD _IOR('T', 36, int) 90 90 #define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/ 91 - #define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/ 92 91 #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ 93 92 #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ 94 93 #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ ··· 113 114 #define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */ 114 115 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ 115 116 # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ 116 - #define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */ 117 - #define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ 117 + #define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */ 118 + /* _IOR('T', 90, struct serial_multiport_struct) */ 119 + #define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */ 120 + /* _IOW('T', 91, struct serial_multiport_struct) */ 118 121 119 122 #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ 120 123 #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+4 -1
arch/xtensa/include/uapi/asm/unistd.h
··· 739 739 #define __NR_sched_getattr 335 740 740 __SYSCALL(335, sys_sched_getattr, 3) 741 741 742 - #define __NR_syscall_count 336 742 + #define __NR_renameat2 336 743 + __SYSCALL(336, sys_renameat2, 5) 744 + 745 + #define __NR_syscall_count 337 743 746 744 747 /* 745 748 * sysxtensa syscall handler
+82 -48
arch/xtensa/kernel/align.S
··· 8 8 * this archive for more details. 9 9 * 10 10 * Copyright (C) 2001 - 2005 Tensilica, Inc. 11 + * Copyright (C) 2014 Cadence Design Systems Inc. 11 12 * 12 13 * Rewritten by Chris Zankel <chris@zankel.net> 13 14 * ··· 175 174 s32i a0, a2, PT_AREG2 176 175 s32i a3, a2, PT_AREG3 177 176 177 + rsr a3, excsave1 178 + movi a4, fast_unaligned_fixup 179 + s32i a4, a3, EXC_TABLE_FIXUP 180 + 178 181 /* Keep value of SAR in a0 */ 179 182 180 183 rsr a0, sar ··· 230 225 addx8 a5, a6, a5 231 226 jx a5 # jump into table 232 227 233 - /* Invalid instruction, CRITICAL! */ 234 - .Linvalid_instruction_load: 235 - j .Linvalid_instruction 236 - 237 228 /* Load: Load memory address. */ 238 229 239 230 .Lload: movi a3, ~3 ··· 273 272 /* Set target register. */ 274 273 275 274 1: 276 - 277 - #if XCHAL_HAVE_LOOPS 278 - rsr a5, lend # check if we reached LEND 279 - bne a7, a5, 1f 280 - rsr a5, lcount # and LCOUNT != 0 281 - beqz a5, 1f 282 - addi a5, a5, -1 # decrement LCOUNT and set 283 - rsr a7, lbeg # set PC to LBEGIN 284 - wsr a5, lcount 285 - #endif 286 - 287 - 1: wsr a7, epc1 # skip load instruction 288 275 extui a4, a4, INSN_T, 4 # extract target register 289 276 movi a5, .Lload_table 290 277 addx8 a4, a4, a5 ··· 315 326 mov a3, a14 ; _j 1f; .align 8 316 327 mov a3, a15 ; _j 1f; .align 8 317 328 329 + /* We cannot handle this exception. */ 330 + 331 + .extern _kernel_exception 332 + .Linvalid_instruction_load: 333 + .Linvalid_instruction_store: 334 + 335 + movi a4, 0 336 + rsr a3, excsave1 337 + s32i a4, a3, EXC_TABLE_FIXUP 338 + 339 + /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ 340 + 341 + l32i a8, a2, PT_AREG8 342 + l32i a7, a2, PT_AREG7 343 + l32i a6, a2, PT_AREG6 344 + l32i a5, a2, PT_AREG5 345 + l32i a4, a2, PT_AREG4 346 + wsr a0, sar 347 + mov a1, a2 348 + 349 + rsr a0, ps 350 + bbsi.l a0, PS_UM_BIT, 2f # jump if user mode 351 + 352 + movi a0, _kernel_exception 353 + jx a0 354 + 355 + 2: movi a0, _user_exception 356 + jx a0 357 + 318 358 1: # a7: instruction pointer, a4: instruction, a3: value 319 359 320 360 movi a6, 0 # mask: ffffffff:00000000 ··· 371 353 /* Get memory address */ 372 354 373 355 1: 374 - #if XCHAL_HAVE_LOOPS 375 - rsr a4, lend # check if we reached LEND 376 - bne a7, a4, 1f 377 - rsr a4, lcount # and LCOUNT != 0 378 - beqz a4, 1f 379 - addi a4, a4, -1 # decrement LCOUNT and set 380 - rsr a7, lbeg # set PC to LBEGIN 381 - wsr a4, lcount 382 - #endif 383 - 384 - 1: wsr a7, epc1 # skip store instruction 385 356 movi a4, ~3 386 357 and a4, a4, a8 # align memory address 387 358 ··· 382 375 #endif 383 376 384 377 __ssa8r a8 385 - __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) 378 + __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) 386 379 __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE) 387 380 #ifdef UNALIGNED_USER_EXCEPTION 388 381 l32e a5, a4, -8 389 382 #else 390 383 l32i a5, a4, 0 # load lower address word 391 384 #endif 392 - and a5, a5, a7 # mask 393 - __sh a7, a3 # shift value 394 - or a5, a5, a7 # or with original value 385 + and a5, a5, a8 # mask 386 + __sh a8, a3 # shift value 387 + or a5, a5, a8 # or with original value 395 388 #ifdef UNALIGNED_USER_EXCEPTION 396 389 s32e a5, a4, -8 397 - l32e a7, a4, -4 390 + l32e a8, a4, -4 398 391 #else 399 392 s32i a5, a4, 0 # store 400 - l32i a7, a4, 4 # same for upper address word 393 + l32i a8, a4, 4 # same for upper address word 401 394 #endif 402 395 __sl a5, a3 403 - and a6, a7, a6 396 + and a6, a8, a6 404 397 or a6, a6, a5 405 398 #ifdef UNALIGNED_USER_EXCEPTION 406 399 s32e a6, a4, -4 ··· 408 401 s32i a6, a4, 4 409 402 #endif 410 403 411 - /* Done. restore stack and return */ 412 - 413 404 .Lexit: 405 + #if XCHAL_HAVE_LOOPS 406 + rsr a4, lend # check if we reached LEND 407 + bne a7, a4, 1f 408 + rsr a4, lcount # and LCOUNT != 0 409 + beqz a4, 1f 410 + addi a4, a4, -1 # decrement LCOUNT and set 411 + rsr a7, lbeg # set PC to LBEGIN 412 + wsr a4, lcount 413 + #endif 414 + 415 + 1: wsr a7, epc1 # skip emulated instruction 416 + 417 + /* Update icount if we're single-stepping in userspace. */ 418 + rsr a4, icountlevel 419 + beqz a4, 1f 420 + bgeui a4, LOCKLEVEL + 1, 1f 421 + rsr a4, icount 422 + addi a4, a4, 1 423 + wsr a4, icount 424 + 1: 414 425 movi a4, 0 415 426 rsr a3, excsave1 416 427 s32i a4, a3, EXC_TABLE_FIXUP ··· 449 424 l32i a2, a2, PT_AREG2 450 425 rfe 451 426 452 - /* We cannot handle this exception. */ 427 + ENDPROC(fast_unaligned) 453 428 454 - .extern _kernel_exception 455 - .Linvalid_instruction_store: 456 - .Linvalid_instruction: 429 + ENTRY(fast_unaligned_fixup) 457 430 458 - /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ 431 + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE 432 + wsr a3, excsave1 459 433 460 434 l32i a8, a2, PT_AREG8 461 435 l32i a7, a2, PT_AREG7 462 436 l32i a6, a2, PT_AREG6 463 437 l32i a5, a2, PT_AREG5 464 438 l32i a4, a2, PT_AREG4 439 + l32i a0, a2, PT_AREG2 440 + xsr a0, depc # restore depc and a0 465 441 wsr a0, sar 466 - mov a1, a2 442 + 443 + rsr a0, exccause 444 + s32i a0, a2, PT_DEPC # mark as a regular exception 467 445 468 446 rsr a0, ps 469 - bbsi.l a2, PS_UM_BIT, 1f # jump if user mode 447 + bbsi.l a0, PS_UM_BIT, 1f # jump if user mode 470 448 471 - movi a0, _kernel_exception 449 + rsr a0, exccause 450 + addx4 a0, a0, a3 # find entry in table 451 + l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler 452 + l32i a3, a2, PT_AREG3 453 + jx a0 454 + 1: 455 + rsr a0, exccause 456 + addx4 a0, a0, a3 # find entry in table 457 + l32i a0, a0, EXC_TABLE_FAST_USER # load handler 458 + l32i a3, a2, PT_AREG3 472 459 jx a0 473 460 474 - 1: movi a0, _user_exception 475 - jx a0 476 - 477 - ENDPROC(fast_unaligned) 461 + ENDPROC(fast_unaligned_fixup) 478 462 479 463 #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
+42 -12
arch/xtensa/kernel/entry.S
··· 986 986 * j done 987 987 */ 988 988 989 + #ifdef CONFIG_FAST_SYSCALL_XTENSA 990 + 989 991 #define TRY \ 990 992 .section __ex_table, "a"; \ 991 993 .word 66f, 67f; \ ··· 1003 1001 movi a7, 4 # sizeof(unsigned int) 1004 1002 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1005 1003 1006 - addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 1007 - _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill 1008 - _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp 1004 + _bgeui a6, SYS_XTENSA_COUNT, .Lill 1005 + _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp 1009 1006 1010 1007 /* Fall through for ATOMIC_CMP_SWP. */ 1011 1008 ··· 1016 1015 l32i a7, a2, PT_AREG7 # restore a7 1017 1016 l32i a0, a2, PT_AREG0 # restore a0 1018 1017 movi a2, 1 # and return 1 1019 - addi a6, a6, 1 # restore a6 (really necessary?) 1020 1018 rfe 1021 1019 1022 1020 1: l32i a7, a2, PT_AREG7 # restore a7 1023 1021 l32i a0, a2, PT_AREG0 # restore a0 1024 1022 movi a2, 0 # return 0 (note that we cannot set 1025 - addi a6, a6, 1 # restore a6 (really necessary?) 1026 1023 rfe 1027 1024 1028 1025 .Lnswp: /* Atomic set, add, and exg_add. */ 1029 1026 1030 1027 TRY l32i a7, a3, 0 # orig 1028 + addi a6, a6, -SYS_XTENSA_ATOMIC_SET 1031 1029 add a0, a4, a7 # + arg 1032 1030 moveqz a0, a4, a6 # set 1031 + addi a6, a6, SYS_XTENSA_ATOMIC_SET 1033 1032 TRY s32i a0, a3, 0 # write new value 1034 1033 1035 1034 mov a0, a2 1036 1035 mov a2, a7 1037 1036 l32i a7, a0, PT_AREG7 # restore a7 1038 1037 l32i a0, a0, PT_AREG0 # restore a0 1039 - addi a6, a6, 1 # restore a6 (really necessary?) 1040 1038 rfe 1041 1039 1042 1040 CATCH ··· 1044 1044 movi a2, -EFAULT 1045 1045 rfe 1046 1046 1047 - .Lill: l32i a7, a2, PT_AREG0 # restore a7 1047 + .Lill: l32i a7, a2, PT_AREG7 # restore a7 1048 1048 l32i a0, a2, PT_AREG0 # restore a0 1049 1049 movi a2, -EINVAL 1050 1050 rfe 1051 1051 1052 1052 ENDPROC(fast_syscall_xtensa) 1053 + 1054 + #else /* CONFIG_FAST_SYSCALL_XTENSA */ 1055 + 1056 + ENTRY(fast_syscall_xtensa) 1057 + 1058 + l32i a0, a2, PT_AREG0 # restore a0 1059 + movi a2, -ENOSYS 1060 + rfe 1061 + 1062 + ENDPROC(fast_syscall_xtensa) 1063 + 1064 + #endif /* CONFIG_FAST_SYSCALL_XTENSA */ 1053 1065 1054 1066 1055 1067 /* fast_syscall_spill_registers. ··· 1077 1065 * 1078 1066 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1079 1067 */ 1068 + 1069 + #ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS 1080 1070 1081 1071 ENTRY(fast_syscall_spill_registers) 1082 1072 ··· 1414 1400 1415 1401 ENDPROC(fast_syscall_spill_registers_fixup_return) 1416 1402 1403 + #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1404 + 1405 + ENTRY(fast_syscall_spill_registers) 1406 + 1407 + l32i a0, a2, PT_AREG0 # restore a0 1408 + movi a2, -ENOSYS 1409 + rfe 1410 + 1411 + ENDPROC(fast_syscall_spill_registers) 1412 + 1413 + #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1414 + 1417 1415 #ifdef CONFIG_MMU 1418 1416 /* 1419 1417 * We should never get here. Bail out! ··· 1591 1565 rsr a0, excvaddr 1592 1566 bltu a0, a3, 2f 1593 1567 1594 - addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) 1568 + addi a1, a0, -TLBTEMP_SIZE 1595 1569 bgeu a1, a3, 2f 1596 1570 1597 1571 /* Check if we have to restore an ITLB mapping. */ ··· 1846 1820 1847 1821 entry a1, 16 1848 1822 1849 - mov a10, a2 # preserve 'prev' (a2) 1850 1823 mov a11, a3 # and 'next' (a3) 1851 1824 1852 1825 l32i a4, a2, TASK_THREAD_INFO ··· 1853 1828 1854 1829 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 1855 1830 1856 - s32i a0, a10, THREAD_RA # save return address 1857 - s32i a1, a10, THREAD_SP # save stack pointer 1831 + #if THREAD_RA > 1020 || THREAD_SP > 1020 1832 + addi a10, a2, TASK_THREAD 1833 + s32i a0, a10, THREAD_RA - TASK_THREAD # save return address 1834 + s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer 1835 + #else 1836 + s32i a0, a2, THREAD_RA # save return address 1837 + s32i a1, a2, THREAD_SP # save stack pointer 1838 + #endif 1858 1839 1859 1840 /* Disable ints while we manipulate the stack pointer. */ 1860 1841 ··· 1901 1870 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 1902 1871 1903 1872 wsr a14, ps 1904 - mov a2, a10 # return 'prev' 1905 1873 rsync 1906 1874 1907 1875 retw
+6 -6
arch/xtensa/kernel/pci-dma.c
··· 49 49 50 50 /* We currently don't support coherent memory outside KSEG */ 51 51 52 - if (ret < XCHAL_KSEG_CACHED_VADDR 53 - || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) 54 - BUG(); 52 + BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || 53 + ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); 55 54 56 55 57 56 if (ret != 0) { ··· 67 68 void dma_free_coherent(struct device *hwdev, size_t size, 68 69 void *vaddr, dma_addr_t dma_handle) 69 70 { 70 - long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; 71 + unsigned long addr = (unsigned long)vaddr + 72 + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; 71 73 72 - if (addr < 0 || addr >= XCHAL_KSEG_SIZE) 73 - BUG(); 74 + BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || 75 + addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); 74 76 75 77 free_pages(addr, get_order(size)); 76 78 }
+1
arch/xtensa/kernel/smp.c
··· 571 571 }; 572 572 on_each_cpu(ipi_flush_icache_range, &fd, 1); 573 573 } 574 + EXPORT_SYMBOL(flush_icache_range); 574 575 575 576 /* ------------------------------------------------------------------------- */ 576 577
+1 -4
arch/xtensa/kernel/traps.c
··· 101 101 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 102 102 #ifdef CONFIG_XTENSA_UNALIGNED_USER 103 103 { EXCCAUSE_UNALIGNED, USER, fast_unaligned }, 104 - #else 105 - { EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, 106 104 #endif 105 + { EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, 107 106 { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, 108 107 #endif 109 108 #ifdef CONFIG_MMU ··· 263 264 */ 264 265 265 266 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 266 - #ifndef CONFIG_XTENSA_UNALIGNED_USER 267 267 void 268 268 do_unaligned_user (struct pt_regs *regs) 269 269 { ··· 283 285 force_sig_info(SIGSEGV, &info, current); 284 286 285 287 } 286 - #endif 287 288 #endif 288 289 289 290 void
+7 -1
arch/xtensa/kernel/vectors.S
··· 454 454 s32i a0, a2, PT_DEPC 455 455 456 456 _DoubleExceptionVector_handle_exception: 457 + addi a0, a0, -EXCCAUSE_UNALIGNED 458 + beqz a0, 2f 457 459 addx4 a0, a0, a3 458 - l32i a0, a0, EXC_TABLE_FAST_USER 460 + l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED 461 + xsr a3, excsave1 462 + jx a0 463 + 2: 464 + movi a0, user_exception 459 465 xsr a3, excsave1 460 466 jx a0 461 467
+2 -2
arch/xtensa/kernel/vmlinux.lds.S
··· 269 269 .UserExceptionVector.literal) 270 270 SECTION_VECTOR (_DoubleExceptionVector_literal, 271 271 .DoubleExceptionVector.literal, 272 - DOUBLEEXC_VECTOR_VADDR - 40, 272 + DOUBLEEXC_VECTOR_VADDR - 48, 273 273 SIZEOF(.UserExceptionVector.text), 274 274 .UserExceptionVector.text) 275 275 SECTION_VECTOR (_DoubleExceptionVector_text, 276 276 .DoubleExceptionVector.text, 277 277 DOUBLEEXC_VECTOR_VADDR, 278 - 40, 278 + 48, 279 279 .DoubleExceptionVector.literal) 280 280 281 281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+68 -9
arch/xtensa/mm/cache.c
··· 59 59 * 60 60 */ 61 61 62 - #if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) 63 - #error "HIGHMEM is not supported on cores with aliasing cache." 64 - #endif 62 + #if (DCACHE_WAY_SIZE > PAGE_SIZE) 63 + static inline void kmap_invalidate_coherent(struct page *page, 64 + unsigned long vaddr) 65 + { 66 + if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { 67 + unsigned long kvaddr; 68 + 69 + if (!PageHighMem(page)) { 70 + kvaddr = (unsigned long)page_to_virt(page); 71 + 72 + __invalidate_dcache_page(kvaddr); 73 + } else { 74 + kvaddr = TLBTEMP_BASE_1 + 75 + (page_to_phys(page) & DCACHE_ALIAS_MASK); 76 + 77 + __invalidate_dcache_page_alias(kvaddr, 78 + page_to_phys(page)); 79 + } 80 + } 81 + } 82 + 83 + static inline void *coherent_kvaddr(struct page *page, unsigned long base, 84 + unsigned long vaddr, unsigned long *paddr) 85 + { 86 + if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { 87 + *paddr = page_to_phys(page); 88 + return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); 89 + } else { 90 + *paddr = 0; 91 + return page_to_virt(page); 92 + } 93 + } 94 + 95 + void clear_user_highpage(struct page *page, unsigned long vaddr) 96 + { 97 + unsigned long paddr; 98 + void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); 99 + 100 + pagefault_disable(); 101 + kmap_invalidate_coherent(page, vaddr); 102 + set_bit(PG_arch_1, &page->flags); 103 + clear_page_alias(kvaddr, paddr); 104 + pagefault_enable(); 105 + } 106 + 107 + void copy_user_highpage(struct page *dst, struct page *src, 108 + unsigned long vaddr, struct vm_area_struct *vma) 109 + { 110 + unsigned long dst_paddr, src_paddr; 111 + void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, 112 + &dst_paddr); 113 + void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, 114 + &src_paddr); 115 + 116 + pagefault_disable(); 117 + kmap_invalidate_coherent(dst, vaddr); 118 + set_bit(PG_arch_1, &dst->flags); 119 + copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); 120 + pagefault_enable(); 121 + } 122 + 123 + #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ 65 124 66 125 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 67 126 ··· 162 103 if (!alias && !mapping) 163 104 return; 164 105 165 - __flush_invalidate_dcache_page((long)page_address(page)); 106 + virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); 107 + __flush_invalidate_dcache_page_alias(virt, phys); 166 108 167 109 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); 168 110 ··· 228 168 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 229 169 230 170 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { 231 - 232 - unsigned long paddr = (unsigned long) page_address(page); 233 171 unsigned long phys = page_to_phys(page); 234 - unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); 172 + unsigned long tmp; 235 173 236 - __flush_invalidate_dcache_page(paddr); 237 - 174 + tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); 175 + __flush_invalidate_dcache_page_alias(tmp, phys); 176 + tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); 238 177 __flush_invalidate_dcache_page_alias(tmp, phys); 239 178 __invalidate_icache_page_alias(tmp, phys); 240 179
+31 -10
arch/xtensa/mm/highmem.c
··· 14 14 15 15 static pte_t *kmap_pte; 16 16 17 + #if DCACHE_WAY_SIZE > PAGE_SIZE 18 + unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS]; 19 + wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS]; 20 + 21 + static void __init kmap_waitqueues_init(void) 22 + { 23 + unsigned int i; 24 + 25 + for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i) 26 + init_waitqueue_head(pkmap_map_wait_arr + i); 27 + } 28 + #else 29 + static inline void kmap_waitqueues_init(void) 30 + { 31 + } 32 + #endif 33 + 34 + static inline enum fixed_addresses kmap_idx(int type, unsigned long color) 35 + { 36 + return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS + 37 + color; 38 + } 39 + 17 40 void *kmap_atomic(struct page *page) 18 41 { 19 42 enum fixed_addresses idx; 20 43 unsigned long vaddr; 21 - int type; 22 44 23 45 pagefault_disable(); 24 46 if (!PageHighMem(page)) 25 47 return page_address(page); 26 48 27 - type = kmap_atomic_idx_push(); 28 - idx = type + KM_TYPE_NR * smp_processor_id(); 49 + idx = kmap_idx(kmap_atomic_idx_push(), 50 + DCACHE_ALIAS(page_to_phys(page))); 29 51 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 30 52 #ifdef CONFIG_DEBUG_HIGHMEM 31 - BUG_ON(!pte_none(*(kmap_pte - idx))); 53 + BUG_ON(!pte_none(*(kmap_pte + idx))); 32 54 #endif 33 - set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC)); 55 + set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC)); 34 56 35 57 return (void *)vaddr; 36 58 } ··· 60 38 61 39 void __kunmap_atomic(void *kvaddr) 62 40 { 63 - int idx, type; 64 - 65 41 if (kvaddr >= (void *)FIXADDR_START && 66 42 kvaddr < (void *)FIXADDR_TOP) { 67 - type = kmap_atomic_idx(); 68 - idx = type + KM_TYPE_NR * smp_processor_id(); 43 + int idx = kmap_idx(kmap_atomic_idx(), 44 + DCACHE_ALIAS((unsigned long)kvaddr)); 69 45 70 46 /* 71 47 * Force other mappings to Oops if they'll try to access this ··· 71 51 * is a bad idea also, in case the page changes cacheability 72 52 * attributes or becomes a protected page in a hypervisor. 73 53 */ 74 - pte_clear(&init_mm, kvaddr, kmap_pte - idx); 54 + pte_clear(&init_mm, kvaddr, kmap_pte + idx); 75 55 local_flush_tlb_kernel_range((unsigned long)kvaddr, 76 56 (unsigned long)kvaddr + PAGE_SIZE); 77 57 ··· 89 69 /* cache the first kmap pte */ 90 70 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 91 71 kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 72 + kmap_waitqueues_init(); 92 73 }
+52 -64
arch/xtensa/mm/misc.S
··· 110 110 #if (DCACHE_WAY_SIZE > PAGE_SIZE) 111 111 112 112 /* 113 - * clear_user_page (void *addr, unsigned long vaddr, struct page *page) 114 - * a2 a3 a4 113 + * clear_page_alias(void *addr, unsigned long paddr) 114 + * a2 a3 115 115 */ 116 116 117 - ENTRY(clear_user_page) 117 + ENTRY(clear_page_alias) 118 118 119 119 entry a1, 32 120 120 121 - /* Mark page dirty and determine alias. */ 121 + /* Skip setting up a temporary DTLB if not aliased low page. */ 122 122 123 - movi a7, (1 << PG_ARCH_1) 124 - l32i a5, a4, PAGE_FLAGS 125 - xor a6, a2, a3 126 - extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER 127 - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER 128 - or a5, a5, a7 129 - slli a3, a3, PAGE_SHIFT 130 - s32i a5, a4, PAGE_FLAGS 123 + movi a5, PAGE_OFFSET 124 + movi a6, 0 125 + beqz a3, 1f 131 126 132 - /* Skip setting up a temporary DTLB if not aliased. */ 127 + /* Setup a temporary DTLB for the addr. */ 133 128 134 - beqz a6, 1f 135 - 136 - /* Invalidate kernel page. */ 137 - 138 - mov a10, a2 139 - call8 __invalidate_dcache_page 140 - 141 - /* Setup a temporary DTLB with the color of the VPN */ 142 - 143 - movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff 144 - movi a5, TLBTEMP_BASE_1 # virt 145 - add a6, a2, a4 # ppn 146 - add a2, a5, a3 # add 'color' 147 - 129 + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 130 + mov a4, a2 148 131 wdtlb a6, a2 149 132 dsync 150 133 ··· 148 165 149 166 /* We need to invalidate the temporary idtlb entry, if any. */ 150 167 151 - 1: addi a2, a2, -PAGE_SIZE 152 - idtlb a2 168 + 1: idtlb a4 153 169 dsync 154 170 155 171 retw 156 172 157 - ENDPROC(clear_user_page) 173 + ENDPROC(clear_page_alias) 158 174 159 175 /* 160 - * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) 161 - * a2 a3 a4 a5 176 + * copy_page_alias(void *to, void *from, 177 + * a2 a3 178 + * unsigned long to_paddr, unsigned long from_paddr) 179 + * a4 a5 162 180 */ 163 181 164 - ENTRY(copy_user_page) 182 + ENTRY(copy_page_alias) 165 183 166 184 entry a1, 32 167 185 168 - /* Mark page dirty and determine alias for destination. */ 186 + /* Skip setting up a temporary DTLB for destination if not aliased. */ 169 187 170 - movi a8, (1 << PG_ARCH_1) 171 - l32i a9, a5, PAGE_FLAGS 172 - xor a6, a2, a4 173 - xor a7, a3, a4 174 - extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER 175 - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER 176 - extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER 177 - or a9, a9, a8 178 - slli a4, a4, PAGE_SHIFT 179 - s32i a9, a5, PAGE_FLAGS 180 - movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff 188 + movi a6, 0 189 + movi a7, 0 190 + beqz a4, 1f 181 191 182 - beqz a6, 1f 192 + /* Setup a temporary DTLB for destination. */ 183 193 184 - /* Invalidate dcache */ 185 - 186 - mov a10, a2 187 - call8 __invalidate_dcache_page 188 - 189 - /* Setup a temporary DTLB with a matching color. */ 190 - 191 - movi a8, TLBTEMP_BASE_1 # base 192 - add a6, a2, a5 # ppn 193 - add a2, a8, a4 # add 'color' 194 - 194 + addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) 195 195 wdtlb a6, a2 196 196 dsync 197 197 198 - /* Skip setting up a temporary DTLB for destination if not aliased. */ 198 + /* Skip setting up a temporary DTLB for source if not aliased. */ 199 199 200 - 1: beqz a7, 1f 200 + 1: beqz a5, 1f 201 201 202 - /* Setup a temporary DTLB with a matching color. */ 202 + /* Setup a temporary DTLB for source. */ 203 203 204 - movi a8, TLBTEMP_BASE_2 # base 205 - add a7, a3, a5 # ppn 206 - add a3, a8, a4 204 + addi a7, a5, PAGE_KERNEL 207 205 addi a8, a3, 1 # way1 208 206 209 207 wdtlb a7, a8 ··· 235 271 236 272 retw 237 273 238 - ENDPROC(copy_user_page) 274 + ENDPROC(copy_page_alias) 239 275 240 276 #endif 241 277 ··· 264 300 retw 265 301 266 302 ENDPROC(__flush_invalidate_dcache_page_alias) 303 + 304 + /* 305 + * void __invalidate_dcache_page_alias (addr, phys) 306 + * a2 a3 307 + */ 308 + 309 + ENTRY(__invalidate_dcache_page_alias) 310 + 311 + entry sp, 16 312 + 313 + movi a7, 0 # required for exception handler 314 + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 315 + mov a4, a2 316 + wdtlb a6, a2 317 + dsync 318 + 319 + ___invalidate_dcache_page a2 a3 320 + 321 + idtlb a4 322 + dsync 323 + 324 + retw 325 + 326 + ENDPROC(__invalidate_dcache_page_alias) 267 327 #endif 268 328 269 329 ENTRY(__tlbtemp_mapping_itlb)
+22 -16
arch/xtensa/mm/mmu.c
··· 18 18 #include <asm/io.h> 19 19 20 20 #if defined(CONFIG_HIGHMEM) 21 - static void * __init init_pmd(unsigned long vaddr) 21 + static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) 22 22 { 23 23 pgd_t *pgd = pgd_offset_k(vaddr); 24 24 pmd_t *pmd = pmd_offset(pgd, vaddr); 25 + pte_t *pte; 26 + unsigned long i; 25 27 26 - if (pmd_none(*pmd)) { 27 - unsigned i; 28 - pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE); 28 + n_pages = ALIGN(n_pages, PTRS_PER_PTE); 29 29 30 - for (i = 0; i < 1024; i++) 31 - pte_clear(NULL, 0, pte + i); 30 + pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n", 31 + __func__, vaddr, n_pages); 32 32 33 - set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK)); 34 - BUG_ON(pte != pte_offset_kernel(pmd, 0)); 35 - pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n", 36 - __func__, vaddr, pmd, pte); 37 - return pte; 38 - } else { 39 - return pte_offset_kernel(pmd, 0); 33 + pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t)); 34 + 35 + for (i = 0; i < n_pages; ++i) 36 + pte_clear(NULL, 0, pte + i); 37 + 38 + for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { 39 + pte_t *cur_pte = pte + i; 40 + 41 + BUG_ON(!pmd_none(*pmd)); 42 + set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); 43 + BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); 44 + pr_debug("%s: pmd: 0x%p, pte: 0x%p\n", 45 + __func__, pmd, cur_pte); 40 46 } 47 + return pte; 41 48 } 42 49 43 50 static void __init fixedrange_init(void) 44 51 { 45 - BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE); 46 - init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK); 52 + init_pmd(__fix_to_virt(0), __end_of_fixed_addresses); 47 53 } 48 54 #endif 49 55 ··· 58 52 memset(swapper_pg_dir, 0, PAGE_SIZE); 59 53 #ifdef CONFIG_HIGHMEM 60 54 fixedrange_init(); 61 - pkmap_page_table = init_pmd(PKMAP_BASE); 55 + pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); 62 56 kmap_init(); 63 57 #endif 64 58 }
+1 -1
block/bio-integrity.c
··· 520 520 */ 521 521 if (error) { 522 522 bio->bi_end_io = bip->bip_end_io; 523 - bio_endio(bio, error); 523 + bio_endio_nodec(bio, error); 524 524 525 525 return; 526 526 }
-1
block/blk-core.c
··· 1252 1252 rq->__sector = (sector_t) -1; 1253 1253 rq->bio = rq->biotail = NULL; 1254 1254 memset(rq->__cmd, 0, sizeof(rq->__cmd)); 1255 - rq->cmd = rq->__cmd; 1256 1255 } 1257 1256 EXPORT_SYMBOL(blk_rq_set_block_pc); 1258 1257
+22 -14
block/blk-mq.c
··· 112 112 */ 113 113 void blk_mq_freeze_queue(struct request_queue *q) 114 114 { 115 + bool freeze; 116 + 115 117 spin_lock_irq(q->queue_lock); 116 - q->mq_freeze_depth++; 118 + freeze = !q->mq_freeze_depth++; 117 119 spin_unlock_irq(q->queue_lock); 118 120 119 - percpu_ref_kill(&q->mq_usage_counter); 120 - blk_mq_run_queues(q, false); 121 + if (freeze) { 122 + percpu_ref_kill(&q->mq_usage_counter); 123 + blk_mq_run_queues(q, false); 124 + } 121 125 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 122 126 } 123 127 124 128 static void blk_mq_unfreeze_queue(struct request_queue *q) 125 129 { 126 - bool wake = false; 130 + bool wake; 127 131 128 132 spin_lock_irq(q->queue_lock); 129 133 wake = !--q->mq_freeze_depth; ··· 175 171 rq->special = NULL; 176 172 /* tag was already set */ 177 173 rq->errors = 0; 174 + 175 + rq->cmd = rq->__cmd; 178 176 179 177 rq->extra_len = 0; 180 178 rq->sense_len = 0; ··· 1074 1068 blk_account_io_start(rq, 1); 1075 1069 } 1076 1070 1071 + static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) 1072 + { 1073 + return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 1074 + !blk_queue_nomerges(hctx->queue); 1075 + } 1076 + 1077 1077 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, 1078 1078 struct blk_mq_ctx *ctx, 1079 1079 struct request *rq, struct bio *bio) 1080 1080 { 1081 - struct request_queue *q = hctx->queue; 1082 - 1083 - if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) { 1081 + if (!hctx_allow_merges(hctx)) { 1084 1082 blk_mq_bio_to_request(rq, bio); 1085 1083 spin_lock(&ctx->lock); 1086 1084 insert_rq: ··· 1092 1082 spin_unlock(&ctx->lock); 1093 1083 return false; 1094 1084 } else { 1085 + struct request_queue *q = hctx->queue; 1086 + 1095 1087 spin_lock(&ctx->lock); 1096 1088 if (!blk_mq_attempt_merge(q, ctx, bio)) { 1097 1089 blk_mq_bio_to_request(rq, bio); ··· 1586 1574 hctx->tags = set->tags[i]; 1587 1575 1588 1576 /* 1589 - * Allocate space for all possible cpus to avoid allocation in 1577 + * Allocate space for all possible cpus to avoid allocation at 1590 1578 * runtime 1591 1579 */ 1592 1580 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), ··· 1674 1662 1675 1663 queue_for_each_hw_ctx(q, hctx, i) { 1676 1664 /* 1677 - * If not software queues are mapped to this hardware queue, 1678 - * disable it and free the request entries 1665 + * If no software queues are mapped to this hardware queue, 1666 + * disable it and free the request entries. 1679 1667 */ 1680 1668 if (!hctx->nr_ctx) { 1681 1669 struct blk_mq_tag_set *set = q->tag_set; ··· 1725 1713 { 1726 1714 struct blk_mq_tag_set *set = q->tag_set; 1727 1715 1728 - blk_mq_freeze_queue(q); 1729 - 1730 1716 mutex_lock(&set->tag_list_lock); 1731 1717 list_del_init(&q->tag_set_list); 1732 1718 blk_mq_update_tag_set_depth(set); 1733 1719 mutex_unlock(&set->tag_list_lock); 1734 - 1735 - blk_mq_unfreeze_queue(q); 1736 1720 } 1737 1721 1738 1722 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
+16 -3
block/cfq-iosched.c
··· 1272 1272 rb_insert_color(&cfqg->rb_node, &st->rb); 1273 1273 } 1274 1274 1275 + /* 1276 + * This has to be called only on activation of cfqg 1277 + */ 1275 1278 static void 1276 1279 cfq_update_group_weight(struct cfq_group *cfqg) 1277 1280 { 1278 - BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1279 - 1280 1281 if (cfqg->new_weight) { 1281 1282 cfqg->weight = cfqg->new_weight; 1282 1283 cfqg->new_weight = 0; 1283 1284 } 1285 + } 1286 + 1287 + static void 1288 + cfq_update_group_leaf_weight(struct cfq_group *cfqg) 1289 + { 1290 + BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1284 1291 1285 1292 if (cfqg->new_leaf_weight) { 1286 1293 cfqg->leaf_weight = cfqg->new_leaf_weight; ··· 1306 1299 /* add to the service tree */ 1307 1300 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1308 1301 1309 - cfq_update_group_weight(cfqg); 1302 + /* 1303 + * Update leaf_weight. We cannot update weight at this point 1304 + * because cfqg might already have been activated and is 1305 + * contributing its current weight to the parent's child_weight. 1306 + */ 1307 + cfq_update_group_leaf_weight(cfqg); 1310 1308 __cfq_group_service_tree_add(st, cfqg); 1311 1309 1312 1310 /* ··· 1335 1323 */ 1336 1324 while ((parent = cfqg_parent(pos))) { 1337 1325 if (propagate) { 1326 + cfq_update_group_weight(pos); 1338 1327 propagate = !parent->nr_active++; 1339 1328 parent->children_weight += pos->weight; 1340 1329 }
+27 -13
block/scsi_ioctl.c
··· 279 279 r = blk_rq_unmap_user(bio); 280 280 if (!ret) 281 281 ret = r; 282 - blk_put_request(rq); 283 282 284 283 return ret; 285 284 } ··· 295 296 struct bio *bio; 296 297 297 298 if (hdr->interface_id != 'S') 298 - return -EINVAL; 299 - if (hdr->cmd_len > BLK_MAX_CDB) 300 299 return -EINVAL; 301 300 302 301 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) ··· 314 317 if (hdr->flags & SG_FLAG_Q_AT_HEAD) 315 318 at_head = 1; 316 319 320 + ret = -ENOMEM; 317 321 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 318 322 if (!rq) 319 - return -ENOMEM; 323 + goto out; 320 324 blk_rq_set_block_pc(rq); 321 325 322 - if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { 323 - blk_put_request(rq); 324 - return -EFAULT; 326 + if (hdr->cmd_len > BLK_MAX_CDB) { 327 + rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); 328 + if (!rq->cmd) 329 + goto out_put_request; 325 330 } 326 331 332 + ret = -EFAULT; 333 + if (blk_fill_sghdr_rq(q, rq, hdr, mode)) 334 + goto out_free_cdb; 335 + 336 + ret = 0; 327 337 if (hdr->iovec_count) { 328 338 size_t iov_data_len; 329 339 struct iovec *iov = NULL; ··· 339 335 0, NULL, &iov); 340 336 if (ret < 0) { 341 337 kfree(iov); 342 - goto out; 338 + goto out_free_cdb; 343 339 } 344 340 345 341 iov_data_len = ret; ··· 362 358 GFP_KERNEL); 363 359 364 360 if (ret) 365 - goto out; 361 + goto out_free_cdb; 366 362 367 363 bio = rq->bio; 368 364 memset(sense, 0, sizeof(sense)); ··· 380 376 381 377 hdr->duration = jiffies_to_msecs(jiffies - start_time); 382 378 383 - return blk_complete_sghdr_rq(rq, hdr, bio); 384 - out: 379 + ret = blk_complete_sghdr_rq(rq, hdr, bio); 380 + 381 + out_free_cdb: 382 + if (rq->cmd != rq->__cmd) 383 + kfree(rq->cmd); 384 + out_put_request: 385 385 blk_put_request(rq); 386 + out: 386 387 return ret; 387 388 } 388 389 ··· 457 448 } 458 449 459 450 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 451 + if (!rq) { 452 + err = -ENOMEM; 453 + goto error; 454 + } 455 + blk_rq_set_block_pc(rq); 460 456 461 457 cmdlen = COMMAND_SIZE(opcode); 462 458 ··· 515 501 memset(sense, 0, sizeof(sense)); 516 502 rq->sense = sense; 517 503 rq->sense_len = 0; 518 - blk_rq_set_block_pc(rq); 519 504 520 505 blk_execute_rq(q, disk, rq, 0); 521 506 ··· 534 521 535 522 error: 536 523 kfree(buffer); 537 - blk_put_request(rq); 524 + if (rq) 525 + blk_put_request(rq); 538 526 return err; 539 527 } 540 528 EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
+17
drivers/acpi/acpi_lpss.c
··· 196 196 .setup = lpss_i2c_setup, 197 197 }; 198 198 199 + static struct lpss_shared_clock bsw_pwm_clock = { 200 + .name = "pwm_clk", 201 + .rate = 19200000, 202 + }; 203 + 204 + static struct lpss_device_desc bsw_pwm_dev_desc = { 205 + .clk_required = true, 206 + .save_ctx = true, 207 + .shared_clock = &bsw_pwm_clock, 208 + }; 209 + 199 210 #else 200 211 201 212 #define LPSS_ADDR(desc) (0UL) ··· 235 224 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) }, 236 225 { "INT33B2", }, 237 226 { "INT33FC", }, 227 + 228 + /* Braswell LPSS devices */ 229 + { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, 230 + { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, 231 + { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, 232 + { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, 238 233 239 234 { "INT3430", LPSS_ADDR(lpt_dev_desc) }, 240 235 { "INT3431", LPSS_ADDR(lpt_dev_desc) },
+18 -3
drivers/acpi/ec.c
··· 197 197 t->rdata[t->ri++] = acpi_ec_read_data(ec); 198 198 if (t->rlen == t->ri) { 199 199 t->flags |= ACPI_EC_COMMAND_COMPLETE; 200 + if (t->command == ACPI_EC_COMMAND_QUERY) 201 + pr_debug("hardware QR_EC completion\n"); 200 202 wakeup = true; 201 203 } 202 204 } else ··· 210 208 } 211 209 return wakeup; 212 210 } else { 213 - if ((status & ACPI_EC_FLAG_IBF) == 0) { 211 + /* 212 + * There is firmware refusing to respond QR_EC when SCI_EVT 213 + * is not set, for which case, we complete the QR_EC 214 + * without issuing it to the firmware. 215 + * https://bugzilla.kernel.org/show_bug.cgi?id=86211 216 + */ 217 + if (!(status & ACPI_EC_FLAG_SCI) && 218 + (t->command == ACPI_EC_COMMAND_QUERY)) { 219 + t->flags |= ACPI_EC_COMMAND_POLL; 220 + t->rdata[t->ri++] = 0x00; 221 + t->flags |= ACPI_EC_COMMAND_COMPLETE; 222 + pr_debug("software QR_EC completion\n"); 223 + wakeup = true; 224 + } else if ((status & ACPI_EC_FLAG_IBF) == 0) { 214 225 acpi_ec_write_cmd(ec, t->command); 215 226 t->flags |= ACPI_EC_COMMAND_POLL; 216 227 } else ··· 303 288 /* following two actions should be kept atomic */ 304 289 ec->curr = t; 305 290 start_transaction(ec); 306 - if (ec->curr->command == ACPI_EC_COMMAND_QUERY) 307 - clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 308 291 spin_unlock_irqrestore(&ec->lock, tmp); 309 292 ret = ec_poll(ec); 310 293 spin_lock_irqsave(&ec->lock, tmp); 294 + if (ec->curr->command == ACPI_EC_COMMAND_QUERY) 295 + clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 311 296 ec->curr = NULL; 312 297 spin_unlock_irqrestore(&ec->lock, tmp); 313 298 return ret;
+4
drivers/acpi/pci_irq.c
··· 484 484 /* Keep IOAPIC pin configuration when suspending */ 485 485 if (dev->dev.power.is_prepared) 486 486 return; 487 + #ifdef CONFIG_PM_RUNTIME 488 + if (dev->dev.power.runtime_status == RPM_SUSPENDING) 489 + return; 490 + #endif 487 491 488 492 entry = acpi_pci_irq_lookup(dev, pin); 489 493 if (!entry)
+11 -6
drivers/acpi/scan.c
··· 922 922 device->driver->ops.notify(device, event); 923 923 } 924 924 925 - static acpi_status acpi_device_notify_fixed(void *data) 925 + static void acpi_device_notify_fixed(void *data) 926 926 { 927 927 struct acpi_device *device = data; 928 928 929 929 /* Fixed hardware devices have no handles */ 930 930 acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); 931 + } 932 + 933 + static acpi_status acpi_device_fixed_event(void *data) 934 + { 935 + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); 931 936 return AE_OK; 932 937 } 933 938 ··· 943 938 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) 944 939 status = 945 940 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 946 - acpi_device_notify_fixed, 941 + acpi_device_fixed_event, 947 942 device); 948 943 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) 949 944 status = 950 945 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 951 - acpi_device_notify_fixed, 946 + acpi_device_fixed_event, 952 947 device); 953 948 else 954 949 status = acpi_install_notify_handler(device->handle, ··· 965 960 { 966 961 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) 967 962 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 968 - acpi_device_notify_fixed); 963 + acpi_device_fixed_event); 969 964 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) 970 965 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 971 - acpi_device_notify_fixed); 966 + acpi_device_fixed_event); 972 967 else 973 968 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, 974 969 acpi_device_notify); ··· 980 975 struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); 981 976 int ret; 982 977 983 - if (acpi_dev->handler) 978 + if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev)) 984 979 return -EINVAL; 985 980 986 981 if (!acpi_drv->ops.add)
+5 -1
drivers/block/brd.c
··· 442 442 int rd_size = CONFIG_BLK_DEV_RAM_SIZE; 443 443 static int max_part; 444 444 static int part_shift; 445 + static int part_show = 0; 445 446 module_param(rd_nr, int, S_IRUGO); 446 447 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); 447 448 module_param(rd_size, int, S_IRUGO); 448 449 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); 449 450 module_param(max_part, int, S_IRUGO); 450 451 MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); 452 + module_param(part_show, int, S_IRUGO); 453 + MODULE_PARM_DESC(part_show, "Control RAM disk visibility in /proc/partitions"); 451 454 MODULE_LICENSE("GPL"); 452 455 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); 453 456 MODULE_ALIAS("rd"); ··· 504 501 disk->fops = &brd_fops; 505 502 disk->private_data = brd; 506 503 disk->queue = brd->brd_queue; 507 - disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; 504 + if (!part_show) 505 + disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; 508 506 sprintf(disk->disk_name, "ram%d", i); 509 507 set_capacity(disk, rd_size * 2); 510 508
-1
drivers/block/xsysace.c
··· 1203 1203 .probe = ace_probe, 1204 1204 .remove = ace_remove, 1205 1205 .driver = { 1206 - .owner = THIS_MODULE, 1207 1206 .name = "xsysace", 1208 1207 .of_match_table = ace_of_match, 1209 1208 },
+7 -3
drivers/block/zram/zram_drv.c
··· 378 378 /* Should NEVER happen. Return bio error if it does. */ 379 379 if (unlikely(ret)) { 380 380 pr_err("Decompression failed! err=%d, page=%u\n", ret, index); 381 - atomic64_inc(&zram->stats.failed_reads); 382 381 return ret; 383 382 } 384 383 ··· 546 547 zcomp_strm_release(zram->comp, zstrm); 547 548 if (is_partial_io(bvec)) 548 549 kfree(uncmem); 549 - if (ret) 550 - atomic64_inc(&zram->stats.failed_writes); 551 550 return ret; 552 551 } 553 552 ··· 561 564 } else { 562 565 atomic64_inc(&zram->stats.num_writes); 563 566 ret = zram_bvec_write(zram, bvec, index, offset); 567 + } 568 + 569 + if (unlikely(ret)) { 570 + if (rw == READ) 571 + atomic64_inc(&zram->stats.failed_reads); 572 + else 573 + atomic64_inc(&zram->stats.failed_writes); 564 574 } 565 575 566 576 return ret;
+1 -1
drivers/block/zram/zram_drv.h
··· 84 84 atomic64_t compr_data_size; /* compressed size of pages stored */ 85 85 atomic64_t num_reads; /* failed + successful */ 86 86 atomic64_t num_writes; /* --do-- */ 87 - atomic64_t failed_reads; /* should NEVER! happen */ 87 + atomic64_t failed_reads; /* can happen when memory is too low */ 88 88 atomic64_t failed_writes; /* can happen when memory is too low */ 89 89 atomic64_t invalid_io; /* non-page-aligned I/O requests */ 90 90 atomic64_t notify_free; /* no. of swap slot free notifications */
+2 -1
drivers/cpufreq/intel_pstate.c
··· 660 660 ICPU(0x3f, core_params), 661 661 ICPU(0x45, core_params), 662 662 ICPU(0x46, core_params), 663 + ICPU(0x4c, byt_params), 663 664 ICPU(0x4f, core_params), 664 665 ICPU(0x56, core_params), 665 666 {} ··· 689 688 690 689 add_timer_on(&cpu->timer, cpunum); 691 690 692 - pr_info("Intel pstate controlling: cpu %d\n", cpunum); 691 + pr_debug("Intel pstate controlling: cpu %d\n", cpunum); 693 692 694 693 return 0; 695 694 }
+1 -1
drivers/cpufreq/s5pv210-cpufreq.c
··· 501 501 return val >> 8; 502 502 } 503 503 504 - static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) 504 + static int s5pv210_cpu_init(struct cpufreq_policy *policy) 505 505 { 506 506 unsigned long mem_type; 507 507 int ret;
+3 -10
drivers/cpuidle/cpuidle-big_little.c
··· 138 138 return idx; 139 139 } 140 140 141 - static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id) 141 + static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id) 142 142 { 143 - struct cpuinfo_arm *cpu_info; 144 143 struct cpumask *cpumask; 145 - unsigned long cpuid; 146 144 int cpu; 147 145 148 146 cpumask = kzalloc(cpumask_size(), GFP_KERNEL); 149 147 if (!cpumask) 150 148 return -ENOMEM; 151 149 152 - for_each_possible_cpu(cpu) { 153 - cpu_info = &per_cpu(cpu_data, cpu); 154 - cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id(); 155 - 156 - /* read cpu id part number */ 157 - if ((cpuid & 0xFFF0) == cpu_id) 150 + for_each_possible_cpu(cpu) 151 + if (smp_cpuid_part(cpu) == part_id) 158 152 cpumask_set_cpu(cpu, cpumask); 159 - } 160 153 161 154 drv->cpumask = cpumask; 162 155
+1 -1
drivers/dma-buf/fence.c
··· 29 29 EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); 30 30 EXPORT_TRACEPOINT_SYMBOL(fence_emit); 31 31 32 - /** 32 + /* 33 33 * fence context counter: each execution context should have its own 34 34 * fence context, this allows checking if fences belong to the same 35 35 * context or not. One device can have multiple separate contexts,
+1
drivers/gpu/drm/ast/ast_tables.h
··· 99 99 {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ 100 100 {0x77, 0x58, 0x80}, /* 17: VCLK119 */ 101 101 {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ 102 + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ 102 103 }; 103 104 104 105 static struct ast_vbios_stdtable vbios_stdtable[] = {
+2 -1
drivers/gpu/drm/drm_crtc.c
··· 4696 4696 return -EINVAL; 4697 4697 4698 4698 /* overflow checks for 32bit size calculations */ 4699 + /* NOTE: DIV_ROUND_UP() can overflow */ 4699 4700 cpp = DIV_ROUND_UP(args->bpp, 8); 4700 - if (cpp > 0xffffffffU / args->width) 4701 + if (!cpp || cpp > 0xffffffffU / args->width) 4701 4702 return -EINVAL; 4702 4703 stride = cpp * args->width; 4703 4704 if (args->height > 0xffffffffU / stride)
+2
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
··· 397 397 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 398 398 DBG("%s", mdp4_crtc->name); 399 399 /* make sure we hold a ref to mdp clks while setting up mode: */ 400 + drm_crtc_vblank_get(crtc); 400 401 mdp4_enable(get_kms(crtc)); 401 402 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 402 403 } ··· 408 407 crtc_flush(crtc); 409 408 /* drop the ref to mdp clk's that we got in prepare: */ 410 409 mdp4_disable(get_kms(crtc)); 410 + drm_crtc_vblank_put(crtc); 411 411 } 412 412 413 413 static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+1 -2
drivers/gpu/drm/msm/msm_drv.c
··· 974 974 975 975 for (i = 0; i < ARRAY_SIZE(devnames); i++) { 976 976 struct device *dev; 977 - int ret; 978 977 979 978 dev = bus_find_device_by_name(&platform_bus_type, 980 979 NULL, devnames[i]); 981 980 if (!dev) { 982 - dev_info(master, "still waiting for %s\n", devnames[i]); 981 + dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]); 983 982 return -EPROBE_DEFER; 984 983 } 985 984
+1 -1
drivers/gpu/drm/msm/msm_fbdev.c
··· 143 143 ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr); 144 144 if (ret) { 145 145 dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); 146 - goto fail; 146 + goto fail_unlock; 147 147 } 148 148 149 149 fbi = framebuffer_alloc(0, dev->dev);
+2 -2
drivers/gpu/drm/msm/msm_iommu.c
··· 27 27 static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, 28 28 unsigned long iova, int flags, void *arg) 29 29 { 30 - DBG("*** fault: iova=%08lx, flags=%d", iova, flags); 31 - return -ENOSYS; 30 + pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); 31 + return 0; 32 32 } 33 33 34 34 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+19 -7
drivers/gpu/drm/radeon/cik.c
··· 5749 5749 WREG32(0x15D8, 0); 5750 5750 WREG32(0x15DC, 0); 5751 5751 5752 - /* empty context1-15 */ 5753 - /* FIXME start with 4G, once using 2 level pt switch to full 5754 - * vm size space 5755 - */ 5752 + /* restore context1-15 */ 5756 5753 /* set vm size, must be a multiple of 4 */ 5757 5754 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 5758 5755 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); 5759 5756 for (i = 1; i < 16; i++) { 5760 5757 if (i < 8) 5761 5758 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 5762 - rdev->gart.table_addr >> 12); 5759 + rdev->vm_manager.saved_table_addr[i]); 5763 5760 else 5764 5761 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), 5765 - rdev->gart.table_addr >> 12); 5762 + rdev->vm_manager.saved_table_addr[i]); 5766 5763 } 5767 5764 5768 5765 /* enable context1-15 */ ··· 5824 5827 */ 5825 5828 static void cik_pcie_gart_disable(struct radeon_device *rdev) 5826 5829 { 5830 + unsigned i; 5831 + 5832 + for (i = 1; i < 16; ++i) { 5833 + uint32_t reg; 5834 + if (i < 8) 5835 + reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2); 5836 + else 5837 + reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); 5838 + rdev->vm_manager.saved_table_addr[i] = RREG32(reg); 5839 + } 5840 + 5827 5841 /* Disable all tables */ 5828 5842 WREG32(VM_CONTEXT0_CNTL, 0); 5829 5843 WREG32(VM_CONTEXT1_CNTL, 0); ··· 9563 9555 int ret, i; 9564 9556 u16 tmp16; 9565 9557 9558 + if (pci_is_root_bus(rdev->pdev->bus)) 9559 + return; 9560 + 9566 9561 if (radeon_pcie_gen2 == 0) 9567 9562 return; 9568 9563 ··· 9792 9781 if (orig != data) 9793 9782 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); 9794 9783 9795 - if (!disable_clkreq) { 9784 + if (!disable_clkreq && 9785 + !pci_is_root_bus(rdev->pdev->bus)) { 9796 9786 struct pci_dev *root = rdev->pdev->bus->self; 9797 9787 u32 lnkcap; 9798 9788
+8 -1
drivers/gpu/drm/radeon/ni.c
··· 1271 1271 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 1272 1272 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); 1273 1273 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 1274 - rdev->gart.table_addr >> 12); 1274 + rdev->vm_manager.saved_table_addr[i]); 1275 1275 } 1276 1276 1277 1277 /* enable context1-7 */ ··· 1303 1303 1304 1304 static void cayman_pcie_gart_disable(struct radeon_device *rdev) 1305 1305 { 1306 + unsigned i; 1307 + 1308 + for (i = 1; i < 8; ++i) { 1309 + rdev->vm_manager.saved_table_addr[i] = RREG32( 1310 + VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2)); 1311 + } 1312 + 1306 1313 /* Disable all tables */ 1307 1314 WREG32(VM_CONTEXT0_CNTL, 0); 1308 1315 WREG32(VM_CONTEXT1_CNTL, 0);
+8 -18
drivers/gpu/drm/radeon/r600.c
··· 1812 1812 { 1813 1813 u32 tiling_config; 1814 1814 u32 ramcfg; 1815 - u32 cc_rb_backend_disable; 1816 1815 u32 cc_gc_shader_pipe_config; 1817 1816 u32 tmp; 1818 1817 int i, j; ··· 1938 1939 } 1939 1940 tiling_config |= BANK_SWAPS(1); 1940 1941 1941 - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 1942 - tmp = R6XX_MAX_BACKENDS - 1943 - r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); 1944 - if (tmp < rdev->config.r600.max_backends) { 1945 - rdev->config.r600.max_backends = tmp; 1946 - } 1947 - 1948 1942 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; 1949 - tmp = R6XX_MAX_PIPES - 1950 - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); 1951 - if (tmp < rdev->config.r600.max_pipes) { 1952 - rdev->config.r600.max_pipes = tmp; 1953 - } 1954 - tmp = R6XX_MAX_SIMDS - 1955 - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 1956 - if (tmp < rdev->config.r600.max_simds) { 1957 - rdev->config.r600.max_simds = tmp; 1958 - } 1959 1943 tmp = rdev->config.r600.max_simds - 1960 1944 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 1961 1945 rdev->config.r600.active_simds = tmp; 1962 1946 1963 1947 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 1948 + tmp = 0; 1949 + for (i = 0; i < rdev->config.r600.max_backends; i++) 1950 + tmp |= (1 << i); 1951 + /* if all the backends are disabled, fix it up here */ 1952 + if ((disabled_rb_mask & tmp) == tmp) { 1953 + for (i = 0; i < rdev->config.r600.max_backends; i++) 1954 + disabled_rb_mask &= ~(1 << i); 1955 + } 1964 1956 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1965 1957 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, 1966 1958 R6XX_MAX_BACKENDS, disabled_rb_mask);
+2
drivers/gpu/drm/radeon/radeon.h
··· 915 915 u64 vram_base_offset; 916 916 /* is vm enabled? */ 917 917 bool enabled; 918 + /* for hw to save the PD addr on suspend/resume */ 919 + uint32_t saved_table_addr[RADEON_NUM_VM]; 918 920 }; 919 921 920 922 /*
+8 -15
drivers/gpu/drm/radeon/rv770.c
··· 1177 1177 u32 hdp_host_path_cntl; 1178 1178 u32 sq_dyn_gpr_size_simd_ab_0; 1179 1179 u32 gb_tiling_config = 0; 1180 - u32 cc_rb_backend_disable = 0; 1181 1180 u32 cc_gc_shader_pipe_config = 0; 1182 1181 u32 mc_arb_ramcfg; 1183 1182 u32 db_debug4, tmp; ··· 1310 1311 WREG32(SPI_CONFIG_CNTL, 0); 1311 1312 } 1312 1313 1313 - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 1314 - tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16); 1315 - if (tmp < rdev->config.rv770.max_backends) { 1316 - rdev->config.rv770.max_backends = tmp; 1317 - } 1318 - 1319 1314 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 1320 - tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK); 1321 - if (tmp < rdev->config.rv770.max_pipes) { 1322 - rdev->config.rv770.max_pipes = tmp; 1323 - } 1324 - tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); 1325 - if (tmp < rdev->config.rv770.max_simds) { 1326 - rdev->config.rv770.max_simds = tmp; 1327 - } 1328 1315 tmp = rdev->config.rv770.max_simds - 1329 1316 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); 1330 1317 rdev->config.rv770.active_simds = tmp; ··· 1333 1348 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; 1334 1349 1335 1350 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; 1351 + tmp = 0; 1352 + for (i = 0; i < rdev->config.rv770.max_backends; i++) 1353 + tmp |= (1 << i); 1354 + /* if all the backends are disabled, fix it up here */ 1355 + if ((disabled_rb_mask & tmp) == tmp) { 1356 + for (i = 0; i < rdev->config.rv770.max_backends; i++) 1357 + disabled_rb_mask &= ~(1 << i); 1358 + } 1336 1359 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1337 1360 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends, 1338 1361 R7XX_MAX_BACKENDS, disabled_rb_mask);
+18 -3
drivers/gpu/drm/radeon/si.c
··· 4290 4290 for (i = 1; i < 16; i++) { 4291 4291 if (i < 8) 4292 4292 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 4293 - rdev->gart.table_addr >> 12); 4293 + rdev->vm_manager.saved_table_addr[i]); 4294 4294 else 4295 4295 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), 4296 - rdev->gart.table_addr >> 12); 4296 + rdev->vm_manager.saved_table_addr[i]); 4297 4297 } 4298 4298 4299 4299 /* enable context1-15 */ ··· 4325 4325 4326 4326 static void si_pcie_gart_disable(struct radeon_device *rdev) 4327 4327 { 4328 + unsigned i; 4329 + 4330 + for (i = 1; i < 16; ++i) { 4331 + uint32_t reg; 4332 + if (i < 8) 4333 + reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2); 4334 + else 4335 + reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); 4336 + rdev->vm_manager.saved_table_addr[i] = RREG32(reg); 4337 + } 4338 + 4328 4339 /* Disable all tables */ 4329 4340 WREG32(VM_CONTEXT0_CNTL, 0); 4330 4341 WREG32(VM_CONTEXT1_CNTL, 0); ··· 7188 7177 int ret, i; 7189 7178 u16 tmp16; 7190 7179 7180 + if (pci_is_root_bus(rdev->pdev->bus)) 7181 + return; 7182 + 7191 7183 if (radeon_pcie_gen2 == 0) 7192 7184 return; 7193 7185 ··· 7468 7454 if (orig != data) 7469 7455 WREG32_PIF_PHY1(PB1_PIF_CNTL, data); 7470 7456 7471 - if (!disable_clkreq) { 7457 + if (!disable_clkreq && 7458 + !pci_is_root_bus(rdev->pdev->bus)) { 7472 7459 struct pci_dev *root = rdev->pdev->bus->self; 7473 7460 u32 lnkcap; 7474 7461
+1
drivers/gpu/drm/sti/Kconfig
··· 1 1 config DRM_STI 2 2 tristate "DRM Support for STMicroelectronics SoC stiH41x Series" 3 3 depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) 4 + select RESET_CONTROLLER 4 5 select DRM_KMS_HELPER 5 6 select DRM_GEM_CMA_HELPER 6 7 select DRM_KMS_CMA_HELPER
+2 -2
drivers/gpu/drm/sti/sti_drm_drv.c
··· 201 201 master = platform_device_register_resndata(dev, 202 202 DRIVER_NAME "__master", -1, 203 203 NULL, 0, NULL, 0); 204 - if (!master) 205 - return -EINVAL; 204 + if (IS_ERR(master)) 205 + return PTR_ERR(master); 206 206 207 207 platform_set_drvdata(pdev, master); 208 208 return 0;
+5 -5
drivers/gpu/drm/sti/sti_hda.c
··· 730 730 return -ENOMEM; 731 731 } 732 732 hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); 733 - if (IS_ERR(hda->regs)) 734 - return PTR_ERR(hda->regs); 733 + if (!hda->regs) 734 + return -ENOMEM; 735 735 736 736 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 737 737 "video-dacs-ctrl"); 738 738 if (res) { 739 739 hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start, 740 740 resource_size(res)); 741 - if (IS_ERR(hda->video_dacs_ctrl)) 742 - return PTR_ERR(hda->video_dacs_ctrl); 741 + if (!hda->video_dacs_ctrl) 742 + return -ENOMEM; 743 743 } else { 744 744 /* If no existing video-dacs-ctrl resource continue the probe */ 745 745 DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n"); ··· 770 770 return 0; 771 771 } 772 772 773 - static struct of_device_id hda_of_match[] = { 773 + static const struct of_device_id hda_of_match[] = { 774 774 { .compatible = "st,stih416-hda", }, 775 775 { .compatible = "st,stih407-hda", }, 776 776 { /* end node */ }
+5 -5
drivers/gpu/drm/sti/sti_hdmi.c
··· 677 677 .unbind = sti_hdmi_unbind, 678 678 }; 679 679 680 - static struct of_device_id hdmi_of_match[] = { 680 + static const struct of_device_id hdmi_of_match[] = { 681 681 { 682 682 .compatible = "st,stih416-hdmi", 683 683 .data = &tx3g0c55phy_ops, ··· 713 713 return -ENOMEM; 714 714 } 715 715 hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); 716 - if (IS_ERR(hdmi->regs)) 717 - return PTR_ERR(hdmi->regs); 716 + if (!hdmi->regs) 717 + return -ENOMEM; 718 718 719 719 if (of_device_is_compatible(np, "st,stih416-hdmi")) { 720 720 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ··· 725 725 } 726 726 hdmi->syscfg = devm_ioremap_nocache(dev, res->start, 727 727 resource_size(res)); 728 - if (IS_ERR(hdmi->syscfg)) 729 - return PTR_ERR(hdmi->syscfg); 728 + if (!hdmi->syscfg) 729 + return -ENOMEM; 730 730 731 731 } 732 732
+3 -3
drivers/gpu/drm/sti/sti_tvout.c
··· 591 591 return -ENOMEM; 592 592 } 593 593 tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); 594 - if (IS_ERR(tvout->regs)) 595 - return PTR_ERR(tvout->regs); 594 + if (!tvout->regs) 595 + return -ENOMEM; 596 596 597 597 /* get reset resources */ 598 598 tvout->reset = devm_reset_control_get(dev, "tvout"); ··· 624 624 return 0; 625 625 } 626 626 627 - static struct of_device_id tvout_of_match[] = { 627 + static const struct of_device_id tvout_of_match[] = { 628 628 { .compatible = "st,stih416-tvout", }, 629 629 { .compatible = "st,stih407-tvout", }, 630 630 { /* end node */ }
+25 -18
drivers/hid/hid-logitech-dj.c
··· 656 656 struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); 657 657 struct dj_report *dj_report = (struct dj_report *) data; 658 658 unsigned long flags; 659 - bool report_processed = false; 660 659 661 660 dbg_hid("%s, size:%d\n", __func__, size); 662 661 ··· 682 683 * device (via hid_input_report() ) and return 1 so hid-core does not do 683 684 * anything else with it. 684 685 */ 686 + 687 + /* case 1) */ 688 + if (data[0] != REPORT_ID_DJ_SHORT) 689 + return false; 690 + 685 691 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || 686 692 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { 687 - dev_err(&hdev->dev, "%s: invalid device index:%d\n", 693 + /* 694 + * Device index is wrong, bail out. 695 + * This driver can ignore safely the receiver notifications, 696 + * so ignore those reports too. 697 + */ 698 + if (dj_report->device_index != DJ_RECEIVER_INDEX) 699 + dev_err(&hdev->dev, "%s: invalid device index:%d\n", 688 700 __func__, dj_report->device_index); 689 701 return false; 690 702 } 691 703 692 704 spin_lock_irqsave(&djrcv_dev->lock, flags); 693 - if (dj_report->report_id == REPORT_ID_DJ_SHORT) { 694 - switch (dj_report->report_type) { 695 - case REPORT_TYPE_NOTIF_DEVICE_PAIRED: 696 - case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: 697 - logi_dj_recv_queue_notification(djrcv_dev, dj_report); 698 - break; 699 - case REPORT_TYPE_NOTIF_CONNECTION_STATUS: 700 - if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == 701 - STATUS_LINKLOSS) { 702 - logi_dj_recv_forward_null_report(djrcv_dev, dj_report); 703 - } 704 - break; 705 - default: 706 - logi_dj_recv_forward_report(djrcv_dev, dj_report); 705 + switch (dj_report->report_type) { 706 + case REPORT_TYPE_NOTIF_DEVICE_PAIRED: 707 + case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: 708 + logi_dj_recv_queue_notification(djrcv_dev, dj_report); 709 + break; 710 + case REPORT_TYPE_NOTIF_CONNECTION_STATUS: 711 + if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == 712 + STATUS_LINKLOSS) { 713 + logi_dj_recv_forward_null_report(djrcv_dev, dj_report); 707 714 } 708 - report_processed = true; 715 + break; 716 + default: 717 + logi_dj_recv_forward_report(djrcv_dev, dj_report); 709 718 } 710 719 spin_unlock_irqrestore(&djrcv_dev->lock, flags); 711 720 712 - return report_processed; 721 + return true; 713 722 } 714 723 715 724 static int logi_dj_probe(struct hid_device *hdev,
+1
drivers/hid/hid-logitech-dj.h
··· 27 27 28 28 #define DJ_MAX_PAIRED_DEVICES 6 29 29 #define DJ_MAX_NUMBER_NOTIFICATIONS 8 30 + #define DJ_RECEIVER_INDEX 0 30 31 #define DJ_DEVICE_INDEX_MIN 1 31 32 #define DJ_DEVICE_INDEX_MAX 6 32 33
+10
drivers/hid/hid-magicmouse.c
··· 290 290 if (size < 4 || ((size - 4) % 9) != 0) 291 291 return 0; 292 292 npoints = (size - 4) / 9; 293 + if (npoints > 15) { 294 + hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", 295 + size); 296 + return 0; 297 + } 293 298 msc->ntouches = 0; 294 299 for (ii = 0; ii < npoints; ii++) 295 300 magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); ··· 312 307 if (size < 6 || ((size - 6) % 8) != 0) 313 308 return 0; 314 309 npoints = (size - 6) / 8; 310 + if (npoints > 15) { 311 + hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", 312 + size); 313 + return 0; 314 + } 315 315 msc->ntouches = 0; 316 316 for (ii = 0; ii < npoints; ii++) 317 317 magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
+6
drivers/hid/hid-picolcd_core.c
··· 350 350 if (!data) 351 351 return 1; 352 352 353 + if (size > 64) { 354 + hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n", 355 + size); 356 + return 0; 357 + } 358 + 353 359 if (report->id == REPORT_KEY_STATE) { 354 360 if (data->input_keys) 355 361 ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+19 -6
drivers/md/dm-crypt.c
··· 1688 1688 unsigned int key_size, opt_params; 1689 1689 unsigned long long tmpll; 1690 1690 int ret; 1691 + size_t iv_size_padding; 1691 1692 struct dm_arg_set as; 1692 1693 const char *opt_string; 1693 1694 char dummy; ··· 1725 1724 1726 1725 cc->dmreq_start = sizeof(struct ablkcipher_request); 1727 1726 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1728 - cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1729 - cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & 1730 - ~(crypto_tfm_ctx_alignment() - 1); 1727 + cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); 1728 + 1729 + if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { 1730 + /* Allocate the padding exactly */ 1731 + iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) 1732 + & crypto_ablkcipher_alignmask(any_tfm(cc)); 1733 + } else { 1734 + /* 1735 + * If the cipher requires greater alignment than kmalloc 1736 + * alignment, we don't know the exact position of the 1737 + * initialization vector. We must assume worst case. 1738 + */ 1739 + iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); 1740 + } 1731 1741 1732 1742 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1733 - sizeof(struct dm_crypt_request) + cc->iv_size); 1743 + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); 1734 1744 if (!cc->req_pool) { 1735 1745 ti->error = "Cannot allocate crypt request mempool"; 1736 1746 goto bad; 1737 1747 } 1738 1748 1739 1749 cc->per_bio_data_size = ti->per_bio_data_size = 1740 - sizeof(struct dm_crypt_io) + cc->dmreq_start + 1741 - sizeof(struct dm_crypt_request) + cc->iv_size; 1750 + ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + 1751 + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, 1752 + ARCH_KMALLOC_MINALIGN); 1742 1753 1743 1754 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1744 1755 if (!cc->page_pool) {
+1 -1
drivers/mfd/ab8500-core.c
··· 1754 1754 if (ret) 1755 1755 return ret; 1756 1756 1757 - #if CONFIG_DEBUG_FS 1757 + #ifdef CONFIG_DEBUG_FS 1758 1758 /* Pass to debugfs */ 1759 1759 ab8500_debug_resources[0].start = ab8500->irq; 1760 1760 ab8500_debug_resources[0].end = ab8500->irq;
+1 -1
drivers/mfd/htc-i2cpld.c
··· 404 404 } 405 405 406 406 i2c_set_clientdata(client, chip); 407 - snprintf(client->name, I2C_NAME_SIZE, "Chip_0x%d", client->addr); 407 + snprintf(client->name, I2C_NAME_SIZE, "Chip_0x%x", client->addr); 408 408 chip->client = client; 409 409 410 410 /* Reset the chip */
+1 -1
drivers/mfd/omap-usb-host.c
··· 647 647 default: 648 648 omap->nports = OMAP3_HS_USB_PORTS; 649 649 dev_dbg(dev, 650 - "USB HOST Rev:0x%d not recognized, assuming %d ports\n", 650 + "USB HOST Rev:0x%x not recognized, assuming %d ports\n", 651 651 omap->usbhs_rev, omap->nports); 652 652 break; 653 653 }
+10 -10
drivers/mfd/twl4030-power.c
··· 724 724 * above. 725 725 */ 726 726 static struct twl4030_resconfig omap3_idle_rconfig[] = { 727 - TWL_REMAP_SLEEP(RES_VAUX1, DEV_GRP_NULL, 0, 0), 728 - TWL_REMAP_SLEEP(RES_VAUX2, DEV_GRP_NULL, 0, 0), 729 - TWL_REMAP_SLEEP(RES_VAUX3, DEV_GRP_NULL, 0, 0), 730 - TWL_REMAP_SLEEP(RES_VAUX4, DEV_GRP_NULL, 0, 0), 731 - TWL_REMAP_SLEEP(RES_VMMC1, DEV_GRP_NULL, 0, 0), 732 - TWL_REMAP_SLEEP(RES_VMMC2, DEV_GRP_NULL, 0, 0), 727 + TWL_REMAP_SLEEP(RES_VAUX1, TWL4030_RESCONFIG_UNDEF, 0, 0), 728 + TWL_REMAP_SLEEP(RES_VAUX2, TWL4030_RESCONFIG_UNDEF, 0, 0), 729 + TWL_REMAP_SLEEP(RES_VAUX3, TWL4030_RESCONFIG_UNDEF, 0, 0), 730 + TWL_REMAP_SLEEP(RES_VAUX4, TWL4030_RESCONFIG_UNDEF, 0, 0), 731 + TWL_REMAP_SLEEP(RES_VMMC1, TWL4030_RESCONFIG_UNDEF, 0, 0), 732 + TWL_REMAP_SLEEP(RES_VMMC2, TWL4030_RESCONFIG_UNDEF, 0, 0), 733 733 TWL_REMAP_OFF(RES_VPLL1, DEV_GRP_P1, 3, 1), 734 734 TWL_REMAP_SLEEP(RES_VPLL2, DEV_GRP_P1, 0, 0), 735 - TWL_REMAP_SLEEP(RES_VSIM, DEV_GRP_NULL, 0, 0), 736 - TWL_REMAP_SLEEP(RES_VDAC, DEV_GRP_NULL, 0, 0), 735 + TWL_REMAP_SLEEP(RES_VSIM, TWL4030_RESCONFIG_UNDEF, 0, 0), 736 + TWL_REMAP_SLEEP(RES_VDAC, TWL4030_RESCONFIG_UNDEF, 0, 0), 737 737 TWL_REMAP_SLEEP(RES_VINTANA1, TWL_DEV_GRP_P123, 1, 2), 738 738 TWL_REMAP_SLEEP(RES_VINTANA2, TWL_DEV_GRP_P123, 0, 2), 739 739 TWL_REMAP_SLEEP(RES_VINTDIG, TWL_DEV_GRP_P123, 1, 2), 740 740 TWL_REMAP_SLEEP(RES_VIO, TWL_DEV_GRP_P123, 2, 2), 741 741 TWL_REMAP_OFF(RES_VDD1, DEV_GRP_P1, 4, 1), 742 742 TWL_REMAP_OFF(RES_VDD2, DEV_GRP_P1, 3, 1), 743 - TWL_REMAP_SLEEP(RES_VUSB_1V5, DEV_GRP_NULL, 0, 0), 744 - TWL_REMAP_SLEEP(RES_VUSB_1V8, DEV_GRP_NULL, 0, 0), 743 + TWL_REMAP_SLEEP(RES_VUSB_1V5, TWL4030_RESCONFIG_UNDEF, 0, 0), 744 + TWL_REMAP_SLEEP(RES_VUSB_1V8, TWL4030_RESCONFIG_UNDEF, 0, 0), 745 745 TWL_REMAP_SLEEP(RES_VUSB_3V1, TWL_DEV_GRP_P123, 0, 0), 746 746 /* Resource #20 USB charge pump skipped */ 747 747 TWL_REMAP_SLEEP(RES_REGEN, TWL_DEV_GRP_P123, 2, 1),
+1
drivers/misc/mei/client.c
··· 601 601 cl->timer_count = MEI_CONNECT_TIMEOUT; 602 602 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 603 603 } else { 604 + cl->state = MEI_FILE_INITIALIZING; 604 605 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 605 606 } 606 607
+5 -6
drivers/misc/mei/nfc.c
··· 342 342 ndev = (struct mei_nfc_dev *) cldev->priv_data; 343 343 dev = ndev->cl->dev; 344 344 345 + err = -ENOMEM; 345 346 mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL); 346 347 if (!mei_buf) 347 - return -ENOMEM; 348 + goto out; 348 349 349 350 hdr = (struct mei_nfc_hci_hdr *) mei_buf; 350 351 hdr->cmd = MEI_NFC_CMD_HCI_SEND; ··· 355 354 hdr->data_size = length; 356 355 357 356 memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length); 358 - 359 357 err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE); 360 358 if (err < 0) 361 - return err; 362 - 363 - kfree(mei_buf); 359 + goto out; 364 360 365 361 if (!wait_event_interruptible_timeout(ndev->send_wq, 366 362 ndev->recv_req_id == ndev->req_id, HZ)) { ··· 366 368 } else { 367 369 ndev->req_id++; 368 370 } 369 - 371 + out: 372 + kfree(mei_buf); 370 373 return err; 371 374 } 372 375
+12 -4
drivers/mtd/nand/omap2.c
··· 931 931 u32 val; 932 932 933 933 val = readl(info->reg.gpmc_ecc_config); 934 - if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs) 934 + if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs) 935 935 return -EINVAL; 936 936 937 937 /* read ecc result */ ··· 1794 1794 } 1795 1795 1796 1796 /* populate MTD interface based on ECC scheme */ 1797 - nand_chip->ecc.layout = &omap_oobinfo; 1798 1797 ecclayout = &omap_oobinfo; 1799 1798 switch (info->ecc_opt) { 1799 + case OMAP_ECC_HAM1_CODE_SW: 1800 + nand_chip->ecc.mode = NAND_ECC_SOFT; 1801 + break; 1802 + 1800 1803 case OMAP_ECC_HAM1_CODE_HW: 1801 1804 pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n"); 1802 1805 nand_chip->ecc.mode = NAND_ECC_HW; ··· 1851 1848 nand_chip->ecc.priv = nand_bch_init(mtd, 1852 1849 nand_chip->ecc.size, 1853 1850 nand_chip->ecc.bytes, 1854 - &nand_chip->ecc.layout); 1851 + &ecclayout); 1855 1852 if (!nand_chip->ecc.priv) { 1856 1853 pr_err("nand: error: unable to use s/w BCH library\n"); 1857 1854 err = -EINVAL; ··· 1926 1923 nand_chip->ecc.priv = nand_bch_init(mtd, 1927 1924 nand_chip->ecc.size, 1928 1925 nand_chip->ecc.bytes, 1929 - &nand_chip->ecc.layout); 1926 + &ecclayout); 1930 1927 if (!nand_chip->ecc.priv) { 1931 1928 pr_err("nand: error: unable to use s/w BCH library\n"); 1932 1929 err = -EINVAL; ··· 2015 2012 goto return_error; 2016 2013 } 2017 2014 2015 + if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) 2016 + goto scan_tail; 2017 + 2018 2018 /* all OOB bytes from oobfree->offset till end off OOB are free */ 2019 2019 ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; 2020 2020 /* check if NAND device's OOB is enough to store ECC signatures */ ··· 2027 2021 err = -EINVAL; 2028 2022 goto return_error; 2029 2023 } 2024 + nand_chip->ecc.layout = ecclayout; 2030 2025 2026 + scan_tail: 2031 2027 /* second phase scan */ 2032 2028 if (nand_scan_tail(mtd)) { 2033 2029 err = -ENXIO;
+1 -2
drivers/pinctrl/nomadik/pinctrl-abx500.c
··· 620 620 } else 621 621 seq_printf(s, " %-9s", chip->get(chip, offset) ? "hi" : "lo"); 622 622 623 - if (pctldev) 624 - mode = abx500_get_mode(pctldev, chip, offset); 623 + mode = abx500_get_mode(pctldev, chip, offset); 625 624 626 625 seq_printf(s, " %s", (mode < 0) ? "unknown" : modes[mode]); 627 626
+2 -2
drivers/pinctrl/pinctrl-at91.c
··· 497 497 static void at91_pin_dbg(const struct device *dev, const struct at91_pmx_pin *pin) 498 498 { 499 499 if (pin->mux) { 500 - dev_dbg(dev, "pio%c%d configured as periph%c with conf = 0x%lu\n", 500 + dev_dbg(dev, "pio%c%d configured as periph%c with conf = 0x%lx\n", 501 501 pin->bank + 'A', pin->pin, pin->mux - 1 + 'A', pin->conf); 502 502 } else { 503 - dev_dbg(dev, "pio%c%d configured as gpio with conf = 0x%lu\n", 503 + dev_dbg(dev, "pio%c%d configured as gpio with conf = 0x%lx\n", 504 504 pin->bank + 'A', pin->pin, pin->conf); 505 505 } 506 506 }
+9 -6
drivers/pinctrl/pinctrl-rockchip.c
··· 438 438 int reg, ret, mask; 439 439 unsigned long flags; 440 440 u8 bit; 441 - u32 data; 441 + u32 data, rmask; 442 442 443 443 if (iomux_num > 3) 444 444 return -EINVAL; ··· 478 478 spin_lock_irqsave(&bank->slock, flags); 479 479 480 480 data = (mask << (bit + 16)); 481 + rmask = data | (data >> 16); 481 482 data |= (mux & mask) << bit; 482 - ret = regmap_write(regmap, reg, data); 483 + ret = regmap_update_bits(regmap, reg, rmask, data); 483 484 484 485 spin_unlock_irqrestore(&bank->slock, flags); 485 486 ··· 635 634 struct regmap *regmap; 636 635 unsigned long flags; 637 636 int reg, ret, i; 638 - u32 data; 637 + u32 data, rmask; 639 638 u8 bit; 640 639 641 640 rk3288_calc_drv_reg_and_bit(bank, pin_num, &regmap, &reg, &bit); ··· 658 657 659 658 /* enable the write to the equivalent lower bits */ 660 659 data = ((1 << RK3288_DRV_BITS_PER_PIN) - 1) << (bit + 16); 660 + rmask = data | (data >> 16); 661 661 data |= (ret << bit); 662 662 663 - ret = regmap_write(regmap, reg, data); 663 + ret = regmap_update_bits(regmap, reg, rmask, data); 664 664 spin_unlock_irqrestore(&bank->slock, flags); 665 665 666 666 return ret; ··· 724 722 int reg, ret; 725 723 unsigned long flags; 726 724 u8 bit; 727 - u32 data; 725 + u32 data, rmask; 728 726 729 727 dev_dbg(info->dev, "setting pull of GPIO%d-%d to %d\n", 730 728 bank->bank_num, pin_num, pull); ··· 752 750 753 751 /* enable the write to the equivalent lower bits */ 754 752 data = ((1 << RK3188_PULL_BITS_PER_PIN) - 1) << (bit + 16); 753 + rmask = data | (data >> 16); 755 754 756 755 switch (pull) { 757 756 case PIN_CONFIG_BIAS_DISABLE: ··· 773 770 return -EINVAL; 774 771 } 775 772 776 - ret = regmap_write(regmap, reg, data); 773 + ret = regmap_update_bits(regmap, reg, rmask, data); 777 774 778 775 spin_unlock_irqrestore(&bank->slock, flags); 779 776 break;
+3 -2
drivers/pinctrl/pinctrl-tegra-xusb.c
··· 680 680 if (args->args_count <= 0) 681 681 return ERR_PTR(-EINVAL); 682 682 683 - if (index > ARRAY_SIZE(padctl->phys)) 683 + if (index >= ARRAY_SIZE(padctl->phys)) 684 684 return ERR_PTR(-EINVAL); 685 685 686 686 return padctl->phys[index]; ··· 930 930 931 931 padctl->provider = devm_of_phy_provider_register(&pdev->dev, 932 932 tegra_xusb_padctl_xlate); 933 - if (err < 0) { 933 + if (IS_ERR(padctl->provider)) { 934 + err = PTR_ERR(padctl->provider); 934 935 dev_err(&pdev->dev, "failed to register PHYs: %d\n", err); 935 936 goto unregister; 936 937 }
+63 -6
drivers/pinctrl/samsung/pinctrl-exynos.c
··· 127 127 struct irq_chip *chip = irq_data_get_irq_chip(irqd); 128 128 struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); 129 129 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); 130 - struct samsung_pin_bank_type *bank_type = bank->type; 131 130 struct samsung_pinctrl_drv_data *d = bank->drvdata; 132 - unsigned int pin = irqd->hwirq; 133 - unsigned int shift = EXYNOS_EINT_CON_LEN * pin; 131 + unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; 134 132 unsigned int con, trig_type; 135 133 unsigned long reg_con = our_chip->eint_con + bank->eint_offset; 136 - unsigned long flags; 137 - unsigned int mask; 138 134 139 135 switch (type) { 140 136 case IRQ_TYPE_EDGE_RISING: ··· 163 167 con |= trig_type << shift; 164 168 writel(con, d->virt_base + reg_con); 165 169 170 + return 0; 171 + } 172 + 173 + static int exynos_irq_request_resources(struct irq_data *irqd) 174 + { 175 + struct irq_chip *chip = irq_data_get_irq_chip(irqd); 176 + struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); 177 + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); 178 + struct samsung_pin_bank_type *bank_type = bank->type; 179 + struct samsung_pinctrl_drv_data *d = bank->drvdata; 180 + unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; 181 + unsigned long reg_con = our_chip->eint_con + bank->eint_offset; 182 + unsigned long flags; 183 + unsigned int mask; 184 + unsigned int con; 185 + int ret; 186 + 187 + ret = gpio_lock_as_irq(&bank->gpio_chip, irqd->hwirq); 188 + if (ret) { 189 + dev_err(bank->gpio_chip.dev, "unable to lock pin %s-%lu IRQ\n", 190 + bank->name, irqd->hwirq); 191 + return ret; 192 + } 193 + 166 194 reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC]; 167 - shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC]; 195 + shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; 168 196 mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; 169 197 170 198 spin_lock_irqsave(&bank->slock, flags); ··· 200 180 201 181 spin_unlock_irqrestore(&bank->slock, flags); 202 182 183 + exynos_irq_unmask(irqd); 184 + 203 185 return 0; 186 + } 187 + 188 + static void exynos_irq_release_resources(struct irq_data *irqd) 189 + { 190 + struct irq_chip *chip = irq_data_get_irq_chip(irqd); 191 + struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); 192 + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); 193 + struct samsung_pin_bank_type *bank_type = bank->type; 194 + struct samsung_pinctrl_drv_data *d = bank->drvdata; 195 + unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; 196 + unsigned long reg_con = our_chip->eint_con + bank->eint_offset; 197 + unsigned long flags; 198 + unsigned int mask; 199 + unsigned int con; 200 + 201 + reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC]; 202 + shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; 203 + mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; 204 + 205 + exynos_irq_mask(irqd); 206 + 207 + spin_lock_irqsave(&bank->slock, flags); 208 + 209 + con = readl(d->virt_base + reg_con); 210 + con &= ~(mask << shift); 211 + con |= FUNC_INPUT << shift; 212 + writel(con, d->virt_base + reg_con); 213 + 214 + spin_unlock_irqrestore(&bank->slock, flags); 215 + 216 + gpio_unlock_as_irq(&bank->gpio_chip, irqd->hwirq); 204 217 } 205 218 206 219 /* ··· 246 193 .irq_mask = exynos_irq_mask, 247 194 .irq_ack = exynos_irq_ack, 248 195 .irq_set_type = exynos_irq_set_type, 196 + .irq_request_resources = exynos_irq_request_resources, 197 + .irq_release_resources = exynos_irq_release_resources, 249 198 }, 250 199 .eint_con = EXYNOS_GPIO_ECON_OFFSET, 251 200 .eint_mask = EXYNOS_GPIO_EMASK_OFFSET, ··· 391 336 .irq_ack = exynos_irq_ack, 392 337 .irq_set_type = exynos_irq_set_type, 393 338 .irq_set_wake = exynos_wkup_irq_set_wake, 339 + .irq_request_resources = exynos_irq_request_resources, 340 + .irq_release_resources = exynos_irq_release_resources, 394 341 }, 395 342 .eint_con = EXYNOS_WKUP_ECON_OFFSET, 396 343 .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
+1
drivers/pinctrl/samsung/pinctrl-samsung.h
··· 26 26 #include <linux/gpio.h> 27 27 28 28 /* pinmux function number for pin as gpio output line */ 29 + #define FUNC_INPUT 0x0 29 30 #define FUNC_OUTPUT 0x1 30 31 31 32 /**
+4 -4
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
··· 4509 4509 }; 4510 4510 4511 4511 static const char * const can0_groups[] = { 4512 - "can0_data_a", 4512 + "can0_data", 4513 4513 "can0_data_b", 4514 4514 "can0_data_c", 4515 4515 "can0_data_d", 4516 4516 "can0_data_e", 4517 4517 "can0_data_f", 4518 - "can_clk_a", 4518 + "can_clk", 4519 4519 "can_clk_b", 4520 4520 "can_clk_c", 4521 4521 "can_clk_d", 4522 4522 }; 4523 4523 4524 4524 static const char * const can1_groups[] = { 4525 - "can1_data_a", 4525 + "can1_data", 4526 4526 "can1_data_b", 4527 4527 "can1_data_c", 4528 4528 "can1_data_d", 4529 - "can_clk_a", 4529 + "can_clk", 4530 4530 "can_clk_b", 4531 4531 "can_clk_c", 4532 4532 "can_clk_d",
+14 -7
drivers/rtc/rtc-s5m.c
··· 717 717 info->device_type = s5m87xx->device_type; 718 718 info->wtsr_smpl = s5m87xx->wtsr_smpl; 719 719 720 - info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq); 721 - if (info->irq <= 0) { 722 - ret = -EINVAL; 723 - dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n", 720 + if (s5m87xx->irq_data) { 721 + info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq); 722 + if (info->irq <= 0) { 723 + ret = -EINVAL; 724 + dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n", 724 725 alarm_irq); 725 - goto err; 726 + goto err; 727 + } 726 728 } 727 729 728 730 platform_set_drvdata(pdev, info); ··· 744 742 if (IS_ERR(info->rtc_dev)) { 745 743 ret = PTR_ERR(info->rtc_dev); 746 744 goto err; 745 + } 746 + 747 + if (!info->irq) { 748 + dev_info(&pdev->dev, "Alarm IRQ not available\n"); 749 + return 0; 747 750 } 748 751 749 752 ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, ··· 809 802 struct s5m_rtc_info *info = dev_get_drvdata(dev); 810 803 int ret = 0; 811 804 812 - if (device_may_wakeup(dev)) 805 + if (info->irq && device_may_wakeup(dev)) 813 806 ret = disable_irq_wake(info->irq); 814 807 815 808 return ret; ··· 820 813 struct s5m_rtc_info *info = dev_get_drvdata(dev); 821 814 int ret = 0; 822 815 823 - if (device_may_wakeup(dev)) 816 + if (info->irq && device_may_wakeup(dev)) 824 817 ret = enable_irq_wake(info->irq); 825 818 826 819 return ret;
+17 -3
drivers/s390/char/con3215.c
··· 1035 1035 const unsigned char *buf, int count) 1036 1036 { 1037 1037 struct raw3215_info *raw; 1038 + int i, written; 1038 1039 1039 1040 if (!tty) 1040 1041 return 0; 1041 1042 raw = (struct raw3215_info *) tty->driver_data; 1042 - raw3215_write(raw, buf, count); 1043 - return count; 1043 + written = count; 1044 + while (count > 0) { 1045 + for (i = 0; i < count; i++) 1046 + if (buf[i] == '\t' || buf[i] == '\n') 1047 + break; 1048 + raw3215_write(raw, buf, i); 1049 + count -= i; 1050 + buf += i; 1051 + if (count > 0) { 1052 + raw3215_putchar(raw, *buf); 1053 + count--; 1054 + buf++; 1055 + } 1056 + } 1057 + return written; 1044 1058 } 1045 1059 1046 1060 /* ··· 1202 1188 driver->subtype = SYSTEM_TYPE_TTY; 1203 1189 driver->init_termios = tty_std_termios; 1204 1190 driver->init_termios.c_iflag = IGNBRK | IGNPAR; 1205 - driver->init_termios.c_oflag = ONLCR | XTABS; 1191 + driver->init_termios.c_oflag = ONLCR; 1206 1192 driver->init_termios.c_lflag = ISIG; 1207 1193 driver->flags = TTY_DRIVER_REAL_RAW; 1208 1194 tty_set_operations(driver, &tty3215_ops);
+1 -1
drivers/s390/char/sclp_tty.c
··· 559 559 driver->subtype = SYSTEM_TYPE_TTY; 560 560 driver->init_termios = tty_std_termios; 561 561 driver->init_termios.c_iflag = IGNBRK | IGNPAR; 562 - driver->init_termios.c_oflag = ONLCR | XTABS; 562 + driver->init_termios.c_oflag = ONLCR; 563 563 driver->init_termios.c_lflag = ISIG | ECHO; 564 564 driver->flags = TTY_DRIVER_REAL_RAW; 565 565 tty_set_operations(driver, &sclp_ops);
-1
drivers/scsi/scsi_lib.c
··· 1808 1808 1809 1809 cmd->tag = req->tag; 1810 1810 1811 - req->cmd = req->__cmd; 1812 1811 cmd->cmnd = req->cmd; 1813 1812 cmd->prot_op = SCSI_PROT_NORMAL; 1814 1813
+1 -1
drivers/spi/spi-au1550.c
··· 945 945 spi_bitbang_stop(&hw->bitbang); 946 946 free_irq(hw->irq, hw); 947 947 iounmap((void __iomem *)hw->regs); 948 - release_mem_region(r->start, sizeof(psc_spi_t)); 948 + release_mem_region(hw->ioarea->start, sizeof(psc_spi_t)); 949 949 950 950 if (hw->usedma) { 951 951 au1550_spi_dma_rxtmp_free(hw);
+8 -8
drivers/spi/spi-davinci.c
··· 417 417 flags, dev_name(&spi->dev)); 418 418 internal_cs = false; 419 419 } 420 - } 421 420 422 - if (retval) { 423 - dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", 424 - spi->cs_gpio, retval); 425 - return retval; 426 - } 421 + if (retval) { 422 + dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", 423 + spi->cs_gpio, retval); 424 + return retval; 425 + } 427 426 428 - if (internal_cs) 429 - set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); 427 + if (internal_cs) 428 + set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); 429 + } 430 430 431 431 if (spi->mode & SPI_READY) 432 432 set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
+2
drivers/spi/spi-dw-pci.c
··· 62 62 if (ret) 63 63 return ret; 64 64 65 + dws->regs = pcim_iomap_table(pdev)[pci_bar]; 66 + 65 67 dws->bus_num = 0; 66 68 dws->num_cs = 4; 67 69 dws->irq = pdev->irq;
+1 -1
drivers/spi/spi-dw.c
··· 271 271 transfer_list); 272 272 273 273 if (!last_transfer->cs_change) 274 - spi_chip_sel(dws, dws->cur_msg->spi, 0); 274 + spi_chip_sel(dws, msg->spi, 0); 275 275 276 276 spi_finalize_current_message(dws->master); 277 277 }
+2 -1
drivers/spi/spi-omap2-mcspi.c
··· 329 329 disable_fifo: 330 330 if (t->rx_buf != NULL) 331 331 chconf &= ~OMAP2_MCSPI_CHCONF_FFER; 332 - else 332 + 333 + if (t->tx_buf != NULL) 333 334 chconf &= ~OMAP2_MCSPI_CHCONF_FFET; 334 335 335 336 mcspi_write_chconf0(spi, chconf);
+1
drivers/spi/spi-pxa2xx.c
··· 1074 1074 { "INT3430", 0 }, 1075 1075 { "INT3431", 0 }, 1076 1076 { "80860F0E", 0 }, 1077 + { "8086228E", 0 }, 1077 1078 { }, 1078 1079 }; 1079 1080 MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+2 -2
drivers/spi/spi-rockchip.c
··· 499 499 } 500 500 501 501 /* div doesn't support odd number */ 502 - div = rs->max_freq / rs->speed; 502 + div = max_t(u32, rs->max_freq / rs->speed, 1); 503 503 div = (div + 1) & 0xfffe; 504 504 505 505 spi_enable_chip(rs, 0); ··· 678 678 rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); 679 679 rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); 680 680 rs->dma_tx.direction = DMA_MEM_TO_DEV; 681 - rs->dma_tx.direction = DMA_DEV_TO_MEM; 681 + rs->dma_rx.direction = DMA_DEV_TO_MEM; 682 682 683 683 master->can_dma = rockchip_spi_can_dma; 684 684 master->dma_tx = rs->dma_tx.ch;
+58 -36
drivers/spi/spi-rspi.c
··· 472 472 dma_cookie_t cookie; 473 473 int ret; 474 474 475 - if (tx) { 476 - desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, 477 - tx->sgl, tx->nents, DMA_TO_DEVICE, 478 - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 479 - if (!desc_tx) 480 - goto no_dma; 481 - 482 - irq_mask |= SPCR_SPTIE; 483 - } 475 + /* First prepare and submit the DMA request(s), as this may fail */ 484 476 if (rx) { 485 477 desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, 486 478 rx->sgl, rx->nents, DMA_FROM_DEVICE, 487 479 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 488 - if (!desc_rx) 489 - goto no_dma; 480 + if (!desc_rx) { 481 + ret = -EAGAIN; 482 + goto no_dma_rx; 483 + } 484 + 485 + desc_rx->callback = rspi_dma_complete; 486 + desc_rx->callback_param = rspi; 487 + cookie = dmaengine_submit(desc_rx); 488 + if (dma_submit_error(cookie)) { 489 + ret = cookie; 490 + goto no_dma_rx; 491 + } 490 492 491 493 irq_mask |= SPCR_SPRIE; 494 + } 495 + 496 + if (tx) { 497 + desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, 498 + tx->sgl, tx->nents, DMA_TO_DEVICE, 499 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 500 + if (!desc_tx) { 501 + ret = -EAGAIN; 502 + goto no_dma_tx; 503 + } 504 + 505 + if (rx) { 506 + /* No callback */ 507 + desc_tx->callback = NULL; 508 + } else { 509 + desc_tx->callback = rspi_dma_complete; 510 + desc_tx->callback_param = rspi; 511 + } 512 + cookie = dmaengine_submit(desc_tx); 513 + if (dma_submit_error(cookie)) { 514 + ret = cookie; 515 + goto no_dma_tx; 516 + } 517 + 518 + irq_mask |= SPCR_SPTIE; 492 519 } 493 520 494 521 /* ··· 530 503 rspi_enable_irq(rspi, irq_mask); 531 504 rspi->dma_callbacked = 0; 532 505 533 - if (rx) { 534 - desc_rx->callback = rspi_dma_complete; 535 - desc_rx->callback_param = rspi; 536 - cookie = dmaengine_submit(desc_rx); 537 - if (dma_submit_error(cookie)) 538 - return cookie; 506 + /* Now start DMA */ 507 + if (rx) 539 508 dma_async_issue_pending(rspi->master->dma_rx); 540 - } 541 - if (tx) { 542 - if (rx) { 543 - /* No callback */ 544 - desc_tx->callback = NULL; 545 - } else { 546 - desc_tx->callback = rspi_dma_complete; 547 - desc_tx->callback_param = rspi; 548 - } 549 - cookie = dmaengine_submit(desc_tx); 550 - if (dma_submit_error(cookie)) 551 - return cookie; 509 + if (tx) 552 510 dma_async_issue_pending(rspi->master->dma_tx); 553 - } 554 511 555 512 ret = wait_event_interruptible_timeout(rspi->wait, 556 513 rspi->dma_callbacked, HZ); 557 514 if (ret > 0 && rspi->dma_callbacked) 558 515 ret = 0; 559 - else if (!ret) 516 + else if (!ret) { 517 + dev_err(&rspi->master->dev, "DMA timeout\n"); 560 518 ret = -ETIMEDOUT; 519 + if (tx) 520 + dmaengine_terminate_all(rspi->master->dma_tx); 521 + if (rx) 522 + dmaengine_terminate_all(rspi->master->dma_rx); 523 + } 561 524 562 525 rspi_disable_irq(rspi, irq_mask); 563 526 ··· 558 541 559 542 return ret; 560 543 561 - no_dma: 562 - pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 563 - dev_driver_string(&rspi->master->dev), 564 - dev_name(&rspi->master->dev)); 565 - return -EAGAIN; 544 + no_dma_tx: 545 + if (rx) 546 + dmaengine_terminate_all(rspi->master->dma_rx); 547 + no_dma_rx: 548 + if (ret == -EAGAIN) { 549 + pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 550 + dev_driver_string(&rspi->master->dev), 551 + dev_name(&rspi->master->dev)); 552 + } 553 + return ret; 566 554 } 567 555 568 556 static void rspi_receive_init(const struct rspi_data *rspi)
+43 -38
drivers/spi/spi-sh-msiof.c
··· 636 636 dma_cookie_t cookie; 637 637 int ret; 638 638 639 + /* First prepare and submit the DMA request(s), as this may fail */ 640 + if (rx) { 641 + ier_bits |= IER_RDREQE | IER_RDMAE; 642 + desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, 643 + p->rx_dma_addr, len, DMA_FROM_DEVICE, 644 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 645 + if (!desc_rx) { 646 + ret = -EAGAIN; 647 + goto no_dma_rx; 648 + } 649 + 650 + desc_rx->callback = sh_msiof_dma_complete; 651 + desc_rx->callback_param = p; 652 + cookie = dmaengine_submit(desc_rx); 653 + if (dma_submit_error(cookie)) { 654 + ret = cookie; 655 + goto no_dma_rx; 656 + } 657 + } 658 + 639 659 if (tx) { 640 660 ier_bits |= IER_TDREQE | IER_TDMAE; 641 661 dma_sync_single_for_device(p->master->dma_tx->device->dev, ··· 663 643 desc_tx = dmaengine_prep_slave_single(p->master->dma_tx, 664 644 p->tx_dma_addr, len, DMA_TO_DEVICE, 665 645 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 666 - if (!desc_tx) 667 - return -EAGAIN; 668 - } 646 + if (!desc_tx) { 647 + ret = -EAGAIN; 648 + goto no_dma_tx; 649 + } 669 650 670 - if (rx) { 671 - ier_bits |= IER_RDREQE | IER_RDMAE; 672 - desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, 673 - p->rx_dma_addr, len, DMA_FROM_DEVICE, 674 - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 675 - if (!desc_rx) 676 - return -EAGAIN; 651 + if (rx) { 652 + /* No callback */ 653 + desc_tx->callback = NULL; 654 + } else { 655 + desc_tx->callback = sh_msiof_dma_complete; 656 + desc_tx->callback_param = p; 657 + } 658 + cookie = dmaengine_submit(desc_tx); 659 + if (dma_submit_error(cookie)) { 660 + ret = cookie; 661 + goto no_dma_tx; 662 + } 677 663 } 678 664 679 665 /* 1 stage FIFO watermarks for DMA */ ··· 692 666 693 667 reinit_completion(&p->done); 694 668 695 - if (rx) { 696 - desc_rx->callback = sh_msiof_dma_complete; 697 - desc_rx->callback_param = p; 698 - cookie = dmaengine_submit(desc_rx); 699 - if (dma_submit_error(cookie)) { 700 - ret = cookie; 701 - goto stop_ier; 702 - } 669 + /* Now start DMA */ 670 + if (rx) 703 671 dma_async_issue_pending(p->master->dma_rx); 704 - } 705 - 706 - if (tx) { 707 - if (rx) { 708 - /* No callback */ 709 - desc_tx->callback = NULL; 710 - } else { 711 - desc_tx->callback = sh_msiof_dma_complete; 712 - desc_tx->callback_param = p; 713 - } 714 - cookie = dmaengine_submit(desc_tx); 715 - if (dma_submit_error(cookie)) { 716 - ret = cookie; 717 - goto stop_rx; 718 - } 672 + if (tx) 719 673 dma_async_issue_pending(p->master->dma_tx); 720 - } 721 674 722 675 ret = sh_msiof_spi_start(p, rx); 723 676 if (ret) { 724 677 dev_err(&p->pdev->dev, "failed to start hardware\n"); 725 - goto stop_tx; 678 + goto stop_dma; 726 679 } 727 680 728 681 /* wait for tx fifo to be emptied / rx fifo to be filled */ ··· 731 726 stop_reset: 732 727 sh_msiof_reset_str(p); 733 728 sh_msiof_spi_stop(p, rx); 734 - stop_tx: 729 + stop_dma: 735 730 if (tx) 736 731 dmaengine_terminate_all(p->master->dma_tx); 737 - stop_rx: 732 + no_dma_tx: 738 733 if (rx) 739 734 dmaengine_terminate_all(p->master->dma_rx); 740 - stop_ier: 741 735 sh_msiof_write(p, IER, 0); 736 + no_dma_rx: 742 737 return ret; 743 738 } 744 739
+1
drivers/spi/spi.c
··· 848 848 849 849 /** 850 850 * spi_finalize_current_transfer - report completion of a transfer 851 + * @master: the master reporting completion 851 852 * 852 853 * Called by SPI drivers using the core transfer_one_message() 853 854 * implementation to notify it that the current interrupt driven
-2
drivers/staging/Kconfig
··· 28 28 29 29 source "drivers/staging/slicoss/Kconfig" 30 30 31 - source "drivers/staging/usbip/Kconfig" 32 - 33 31 source "drivers/staging/wlan-ng/Kconfig" 34 32 35 33 source "drivers/staging/comedi/Kconfig"
-1
drivers/staging/Makefile
··· 6 6 obj-y += media/ 7 7 obj-$(CONFIG_ET131X) += et131x/ 8 8 obj-$(CONFIG_SLICOSS) += slicoss/ 9 - obj-$(CONFIG_USBIP_CORE) += usbip/ 10 9 obj-$(CONFIG_PRISM2_USB) += wlan-ng/ 11 10 obj-$(CONFIG_COMEDI) += comedi/ 12 11 obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
+4 -1
drivers/staging/android/logger.c
··· 790 790 if (unlikely(ret)) { 791 791 pr_err("failed to register misc device for log '%s'!\n", 792 792 log->misc.name); 793 - goto out_free_log; 793 + goto out_free_misc_name; 794 794 } 795 795 796 796 pr_info("created %luK log '%s'\n", 797 797 (unsigned long) log->size >> 10, log->misc.name); 798 798 799 799 return 0; 800 + 801 + out_free_misc_name: 802 + kfree(log->misc.name); 800 803 801 804 out_free_log: 802 805 kfree(log);
+27 -41
drivers/staging/et131x/et131x.c
··· 1421 1421 * @reg: the register to read 1422 1422 * @value: 16-bit value to write 1423 1423 */ 1424 - static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) 1424 + static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, 1425 + u16 value) 1425 1426 { 1426 1427 struct mac_regs __iomem *mac = &adapter->regs->mac; 1427 - struct phy_device *phydev = adapter->phydev; 1428 1428 int status = 0; 1429 - u8 addr; 1430 1429 u32 delay = 0; 1431 1430 u32 mii_addr; 1432 1431 u32 mii_cmd; 1433 1432 u32 mii_indicator; 1434 - 1435 - if (!phydev) 1436 - return -EIO; 1437 - 1438 - addr = phydev->addr; 1439 1433 1440 1434 /* Save a local copy of the registers we are dealing with so we can 1441 1435 * set them back ··· 1625 1631 struct net_device *netdev = bus->priv; 1626 1632 struct et131x_adapter *adapter = netdev_priv(netdev); 1627 1633 1628 - return et131x_mii_write(adapter, reg, value); 1629 - } 1630 - 1631 - static int et131x_mdio_reset(struct mii_bus *bus) 1632 - { 1633 - struct net_device *netdev = bus->priv; 1634 - struct et131x_adapter *adapter = netdev_priv(netdev); 1635 - 1636 - et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); 1637 - 1638 - return 0; 1634 + return et131x_mii_write(adapter, phy_addr, reg, value); 1639 1635 } 1640 1636 1641 1637 /* et1310_phy_power_switch - PHY power control ··· 1640 1656 static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) 1641 1657 { 1642 1658 u16 data; 1659 + struct phy_device *phydev = adapter->phydev; 1643 1660 1644 1661 et131x_mii_read(adapter, MII_BMCR, &data); 1645 1662 data &= ~BMCR_PDOWN; 1646 1663 if (down) 1647 1664 data |= BMCR_PDOWN; 1648 - et131x_mii_write(adapter, MII_BMCR, data); 1665 + et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); 1649 1666 } 1650 1667 1651 1668 /* et131x_xcvr_init - Init the phy if we are setting it into force mode */ 1652 1669 static void et131x_xcvr_init(struct et131x_adapter *adapter) 1653 1670 { 1654 1671 u16 lcr2; 1672 + struct phy_device *phydev = adapter->phydev; 1655 1673 1656 1674 /* Set the LED behavior such that LED 1 indicates speed (off = 1657 1675 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates ··· 1674 1688 else 1675 1689 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); 1676 1690 1677 - et131x_mii_write(adapter, PHY_LED_2, lcr2); 1691 + et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); 1678 1692 } 1679 1693 } 1680 1694 ··· 3629 3643 3630 3644 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3631 3645 &register18); 3632 - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3633 - register18 | 0x4); 3634 - et131x_mii_write(adapter, PHY_INDEX_REG, 3646 + et131x_mii_write(adapter, phydev->addr, 3647 + PHY_MPHY_CONTROL_REG, register18 | 0x4); 3648 + et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, 3635 3649 register18 | 0x8402); 3636 - et131x_mii_write(adapter, PHY_DATA_REG, 3650 + et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, 3637 3651 register18 | 511); 3638 - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3639 - register18); 3652 + et131x_mii_write(adapter, phydev->addr, 3653 + PHY_MPHY_CONTROL_REG, register18); 3640 3654 } 3641 3655 3642 3656 et1310_config_flow_control(adapter); ··· 3648 3662 et131x_mii_read(adapter, PHY_CONFIG, &reg); 3649 3663 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; 3650 3664 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; 3651 - et131x_mii_write(adapter, PHY_CONFIG, reg); 3665 + et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, 3666 + reg); 3652 3667 } 3653 3668 3654 3669 et131x_set_rx_dma_timer(adapter); ··· 3662 3675 3663 3676 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3664 3677 &register18); 3665 - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3666 - register18 | 0x4); 3667 - et131x_mii_write(adapter, PHY_INDEX_REG, 3668 - register18 | 0x8402); 3669 - et131x_mii_write(adapter, PHY_DATA_REG, 3670 - register18 | 511); 3671 - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3672 - register18); 3678 + et131x_mii_write(adapter, phydev->addr, 3679 + PHY_MPHY_CONTROL_REG, register18 | 0x4); 3680 + et131x_mii_write(adapter, phydev->addr, 3681 + PHY_INDEX_REG, register18 | 0x8402); 3682 + et131x_mii_write(adapter, phydev->addr, 3683 + PHY_DATA_REG, register18 | 511); 3684 + et131x_mii_write(adapter, phydev->addr, 3685 + PHY_MPHY_CONTROL_REG, register18); 3673 3686 } 3674 3687 3675 3688 /* Free the packets being actively sent & stopped */ ··· 4631 4644 /* Copy address into the net_device struct */ 4632 4645 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 4633 4646 4634 - /* Init variable for counting how long we do not have link status */ 4635 - adapter->boot_coma = 0; 4636 - et1310_disable_phy_coma(adapter); 4637 - 4638 4647 rc = -ENOMEM; 4639 4648 4640 4649 /* Setup the mii_bus struct */ ··· 4646 4663 adapter->mii_bus->priv = netdev; 4647 4664 adapter->mii_bus->read = et131x_mdio_read; 4648 4665 adapter->mii_bus->write = et131x_mdio_write; 4649 - adapter->mii_bus->reset = et131x_mdio_reset; 4650 4666 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), 4651 4667 GFP_KERNEL); 4652 4668 if (!adapter->mii_bus->irq) ··· 4668 4686 4669 4687 /* Setup et1310 as per the documentation */ 4670 4688 et131x_adapter_setup(adapter); 4689 + 4690 + /* Init variable for counting how long we do not have link status */ 4691 + adapter->boot_coma = 0; 4692 + et1310_disable_phy_coma(adapter); 4671 4693 4672 4694 /* We can enable interrupts now 4673 4695 *
+1
drivers/staging/lustre/lustre/libcfs/workitem.c
··· 365 365 return -ENOMEM; 366 366 367 367 strncpy(sched->ws_name, name, CFS_WS_NAME_LEN); 368 + sched->ws_name[CFS_WS_NAME_LEN - 1] = '\0'; 368 369 sched->ws_cptab = cptab; 369 370 sched->ws_cpt = cpt; 370 371
+1 -1
drivers/staging/lustre/lustre/obdclass/class_obd.c
··· 35 35 */ 36 36 37 37 #define DEBUG_SUBSYSTEM S_CLASS 38 - # include <asm/atomic.h> 38 + # include <linux/atomic.h> 39 39 40 40 #include "../include/obd_support.h" 41 41 #include "../include/obd_class.h"
+2
drivers/staging/rtl8188eu/os_dep/usb_intf.c
··· 43 43 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ 44 44 /*=== Customer ID ===*/ 45 45 /****** 8188EUS ********/ 46 + {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */ 46 47 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ 47 48 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 48 49 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 50 + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 49 51 {} /* Terminating entry */ 50 52 }; 51 53
drivers/staging/usbip/Kconfig drivers/usb/usbip/Kconfig
drivers/staging/usbip/Makefile drivers/usb/usbip/Makefile
drivers/staging/usbip/README drivers/usb/usbip/README
drivers/staging/usbip/stub.h drivers/usb/usbip/stub.h
-27
drivers/staging/usbip/stub_dev.c drivers/usb/usbip/stub_dev.c
··· 26 26 #include "stub.h" 27 27 28 28 /* 29 - * Define device IDs here if you want to explicitly limit exportable devices. 30 - * In most cases, wildcard matching will be okay because driver binding can be 31 - * changed dynamically by a userland program. 32 - */ 33 - static struct usb_device_id stub_table[] = { 34 - #if 0 35 - /* just an example */ 36 - { USB_DEVICE(0x05ac, 0x0301) }, /* Mac 1 button mouse */ 37 - { USB_DEVICE(0x0430, 0x0009) }, /* Plat Home Keyboard */ 38 - { USB_DEVICE(0x059b, 0x0001) }, /* Iomega USB Zip 100 */ 39 - { USB_DEVICE(0x04b3, 0x4427) }, /* IBM USB CD-ROM */ 40 - { USB_DEVICE(0x05a9, 0xa511) }, /* LifeView USB cam */ 41 - { USB_DEVICE(0x55aa, 0x0201) }, /* Imation card reader */ 42 - { USB_DEVICE(0x046d, 0x0870) }, /* Qcam Express(QV-30) */ 43 - { USB_DEVICE(0x04bb, 0x0101) }, /* IO-DATA HD 120GB */ 44 - { USB_DEVICE(0x04bb, 0x0904) }, /* IO-DATA USB-ET/TX */ 45 - { USB_DEVICE(0x04bb, 0x0201) }, /* IO-DATA USB-ET/TX */ 46 - { USB_DEVICE(0x08bb, 0x2702) }, /* ONKYO USB Speaker */ 47 - { USB_DEVICE(0x046d, 0x08b2) }, /* Logicool Qcam 4000 Pro */ 48 - #endif 49 - /* magic for wild card */ 50 - { .driver_info = 1 }, 51 - { 0, } /* Terminating entry */ 52 - }; 53 - MODULE_DEVICE_TABLE(usb, stub_table); 54 - 55 - /* 56 29 * usbip_status shows the status of usbip-host as long as this driver is bound 57 30 * to the target device. 58 31 */
drivers/staging/usbip/stub_main.c drivers/usb/usbip/stub_main.c
drivers/staging/usbip/stub_rx.c drivers/usb/usbip/stub_rx.c
drivers/staging/usbip/stub_tx.c drivers/usb/usbip/stub_tx.c
drivers/staging/usbip/uapi/usbip.h include/uapi/linux/usbip.h
drivers/staging/usbip/usbip_common.c drivers/usb/usbip/usbip_common.c
+1 -1
drivers/staging/usbip/usbip_common.h drivers/usb/usbip/usbip_common.h
··· 29 29 #include <linux/types.h> 30 30 #include <linux/usb.h> 31 31 #include <linux/wait.h> 32 - #include "uapi/usbip.h" 32 + #include <uapi/linux/usbip.h> 33 33 34 34 #define USBIP_VERSION "1.0.0" 35 35
drivers/staging/usbip/usbip_event.c drivers/usb/usbip/usbip_event.c
drivers/staging/usbip/usbip_protocol.txt drivers/usb/usbip/usbip_protocol.txt
drivers/staging/usbip/userspace/.gitignore tools/usb/usbip/.gitignore
drivers/staging/usbip/userspace/AUTHORS tools/usb/usbip/AUTHORS
drivers/staging/usbip/userspace/COPYING tools/usb/usbip/COPYING
drivers/staging/usbip/userspace/INSTALL tools/usb/usbip/INSTALL
drivers/staging/usbip/userspace/Makefile.am tools/usb/usbip/Makefile.am
drivers/staging/usbip/userspace/README tools/usb/usbip/README
drivers/staging/usbip/userspace/autogen.sh tools/usb/usbip/autogen.sh
drivers/staging/usbip/userspace/cleanup.sh tools/usb/usbip/cleanup.sh
drivers/staging/usbip/userspace/configure.ac tools/usb/usbip/configure.ac
drivers/staging/usbip/userspace/doc/usbip.8 tools/usb/usbip/doc/usbip.8
drivers/staging/usbip/userspace/doc/usbipd.8 tools/usb/usbip/doc/usbipd.8
drivers/staging/usbip/userspace/libsrc/Makefile.am tools/usb/usbip/libsrc/Makefile.am
drivers/staging/usbip/userspace/libsrc/list.h tools/usb/usbip/libsrc/list.h
drivers/staging/usbip/userspace/libsrc/names.c tools/usb/usbip/libsrc/names.c
drivers/staging/usbip/userspace/libsrc/names.h tools/usb/usbip/libsrc/names.h
drivers/staging/usbip/userspace/libsrc/sysfs_utils.c tools/usb/usbip/libsrc/sysfs_utils.c
drivers/staging/usbip/userspace/libsrc/sysfs_utils.h tools/usb/usbip/libsrc/sysfs_utils.h
drivers/staging/usbip/userspace/libsrc/usbip_common.c tools/usb/usbip/libsrc/usbip_common.c
drivers/staging/usbip/userspace/libsrc/usbip_common.h tools/usb/usbip/libsrc/usbip_common.h
drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c tools/usb/usbip/libsrc/usbip_host_driver.c
drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h tools/usb/usbip/libsrc/usbip_host_driver.h
drivers/staging/usbip/userspace/libsrc/vhci_driver.c tools/usb/usbip/libsrc/vhci_driver.c
drivers/staging/usbip/userspace/libsrc/vhci_driver.h tools/usb/usbip/libsrc/vhci_driver.h
drivers/staging/usbip/userspace/src/Makefile.am tools/usb/usbip/src/Makefile.am
drivers/staging/usbip/userspace/src/usbip.c tools/usb/usbip/src/usbip.c
drivers/staging/usbip/userspace/src/usbip.h tools/usb/usbip/src/usbip.h
drivers/staging/usbip/userspace/src/usbip_attach.c tools/usb/usbip/src/usbip_attach.c
drivers/staging/usbip/userspace/src/usbip_bind.c tools/usb/usbip/src/usbip_bind.c
drivers/staging/usbip/userspace/src/usbip_detach.c tools/usb/usbip/src/usbip_detach.c
drivers/staging/usbip/userspace/src/usbip_list.c tools/usb/usbip/src/usbip_list.c
drivers/staging/usbip/userspace/src/usbip_network.c tools/usb/usbip/src/usbip_network.c
drivers/staging/usbip/userspace/src/usbip_network.h tools/usb/usbip/src/usbip_network.h
drivers/staging/usbip/userspace/src/usbip_port.c tools/usb/usbip/src/usbip_port.c
drivers/staging/usbip/userspace/src/usbip_unbind.c tools/usb/usbip/src/usbip_unbind.c
drivers/staging/usbip/userspace/src/usbipd.c tools/usb/usbip/src/usbipd.c
drivers/staging/usbip/userspace/src/utils.c tools/usb/usbip/src/utils.c
drivers/staging/usbip/userspace/src/utils.h tools/usb/usbip/src/utils.h
drivers/staging/usbip/vhci.h drivers/usb/usbip/vhci.h
drivers/staging/usbip/vhci_hcd.c drivers/usb/usbip/vhci_hcd.c
drivers/staging/usbip/vhci_rx.c drivers/usb/usbip/vhci_rx.c
drivers/staging/usbip/vhci_sysfs.c drivers/usb/usbip/vhci_sysfs.c
drivers/staging/usbip/vhci_tx.c drivers/usb/usbip/vhci_tx.c
+20 -1
drivers/thunderbolt/path.c
··· 150 150 151 151 /* Activate hops. */ 152 152 for (i = path->path_length - 1; i >= 0; i--) { 153 - struct tb_regs_hop hop; 153 + struct tb_regs_hop hop = { 0 }; 154 + 155 + /* 156 + * We do (currently) not tear down paths setup by the firmeware. 157 + * If a firmware device is unplugged and plugged in again then 158 + * it can happen that we reuse some of the hops from the (now 159 + * defunct) firmeware path. This causes the hotplug operation to 160 + * fail (the pci device does not show up). Clearing the hop 161 + * before overwriting it fixes the problem. 162 + * 163 + * Should be removed once we discover and tear down firmeware 164 + * paths. 165 + */ 166 + res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, 167 + 2 * path->hops[i].in_hop_index, 2); 168 + if (res) { 169 + __tb_path_deactivate_hops(path, i); 170 + __tb_path_deallocate_nfc(path, 0); 171 + goto err; 172 + } 154 173 155 174 /* dword 0 */ 156 175 hop.next_hop = path->hops[i].next_hop_index;
+2
drivers/usb/Kconfig
··· 92 92 93 93 source "drivers/usb/image/Kconfig" 94 94 95 + source "drivers/usb/usbip/Kconfig" 96 + 95 97 endif 96 98 97 99 source "drivers/usb/musb/Kconfig"
+2
drivers/usb/Makefile
··· 60 60 obj-$(CONFIG_USB_GADGET) += gadget/ 61 61 62 62 obj-$(CONFIG_USB_COMMON) += common/ 63 + 64 + obj-$(CONFIG_USBIP_CORE) += usbip/
+10 -6
drivers/usb/core/hub.c
··· 1728 1728 * - Change autosuspend delay of hub can avoid unnecessary auto 1729 1729 * suspend timer for hub, also may decrease power consumption 1730 1730 * of USB bus. 1731 + * 1732 + * - If user has indicated to prevent autosuspend by passing 1733 + * usbcore.autosuspend = -1 then keep autosuspend disabled. 1731 1734 */ 1732 - pm_runtime_set_autosuspend_delay(&hdev->dev, 0); 1735 + #ifdef CONFIG_PM_RUNTIME 1736 + if (hdev->dev.power.autosuspend_delay >= 0) 1737 + pm_runtime_set_autosuspend_delay(&hdev->dev, 0); 1738 + #endif 1733 1739 1734 1740 /* 1735 1741 * Hubs have proper suspend/resume support, except for root hubs ··· 2113 2107 { 2114 2108 struct usb_port *port_dev = NULL; 2115 2109 struct usb_device *udev = *pdev; 2116 - struct usb_hub *hub; 2117 - int port1; 2110 + struct usb_hub *hub = NULL; 2111 + int port1 = 1; 2118 2112 2119 2113 /* mark the device as inactive, so any further urb submissions for 2120 2114 * this device (and any of its children) will fail immediately. ··· 4637 4631 if (status != -ENODEV && 4638 4632 port1 != unreliable_port && 4639 4633 printk_ratelimit()) 4640 - dev_err(&udev->dev, "connect-debounce failed, port %d disabled\n", 4641 - port1); 4642 - 4634 + dev_err(&port_dev->dev, "connect-debounce failed\n"); 4643 4635 portstatus &= ~USB_PORT_STAT_CONNECTION; 4644 4636 unreliable_port = port1; 4645 4637 } else {
+1 -1
drivers/usb/dwc2/gadget.c
··· 1901 1901 static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg) 1902 1902 { 1903 1903 u32 dsts = readl(hsotg->regs + DSTS); 1904 - int ep0_mps = 0, ep_mps; 1904 + int ep0_mps = 0, ep_mps = 8; 1905 1905 1906 1906 /* 1907 1907 * This should signal the finish of the enumeration phase
+1 -1
drivers/usb/dwc3/dwc3-omap.c
··· 425 425 426 426 static int dwc3_omap_extcon_register(struct dwc3_omap *omap) 427 427 { 428 - u32 ret; 428 + int ret; 429 429 struct device_node *node = omap->dev->of_node; 430 430 struct extcon_dev *edev; 431 431
+1 -1
drivers/usb/gadget/Makefile
··· 3 3 # 4 4 subdir-ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG 5 5 subdir-ccflags-$(CONFIG_USB_GADGET_VERBOSE) += -DVERBOSE_DEBUG 6 - ccflags-y += -I$(PWD)/drivers/usb/gadget/udc 6 + ccflags-y += -Idrivers/usb/gadget/udc 7 7 8 8 obj-$(CONFIG_USB_LIBCOMPOSITE) += libcomposite.o 9 9 libcomposite-y := usbstring.o config.o epautoconf.o
+2 -2
drivers/usb/gadget/function/Makefile
··· 2 2 # USB peripheral controller drivers 3 3 # 4 4 5 - ccflags-y := -I$(PWD)/drivers/usb/gadget/ 6 - ccflags-y += -I$(PWD)/drivers/usb/gadget/udc/ 5 + ccflags-y := -Idrivers/usb/gadget/ 6 + ccflags-y += -Idrivers/usb/gadget/udc/ 7 7 8 8 # USB Functions 9 9 usb_f_acm-y := f_acm.o
-3
drivers/usb/gadget/function/u_ether.c
··· 1127 1127 1128 1128 DBG(dev, "%s\n", __func__); 1129 1129 1130 - netif_tx_lock(dev->net); 1131 1130 netif_stop_queue(dev->net); 1132 - netif_tx_unlock(dev->net); 1133 - 1134 1131 netif_carrier_off(dev->net); 1135 1132 1136 1133 /* disable endpoints, forcing (synchronous) completion
+3
drivers/usb/gadget/function/uvc_video.c
··· 195 195 printk(KERN_INFO "Failed to queue request (%d).\n", ret); 196 196 usb_ep_set_halt(ep); 197 197 spin_unlock_irqrestore(&video->queue.irqlock, flags); 198 + uvc_queue_cancel(queue, 0); 198 199 goto requeue; 199 200 } 200 201 spin_unlock_irqrestore(&video->queue.irqlock, flags); ··· 282 281 static int 283 282 uvc_video_pump(struct uvc_video *video) 284 283 { 284 + struct uvc_video_queue *queue = &video->queue; 285 285 struct usb_request *req; 286 286 struct uvc_buffer *buf; 287 287 unsigned long flags; ··· 324 322 printk(KERN_INFO "Failed to queue request (%d)\n", ret); 325 323 usb_ep_set_halt(video->ep); 326 324 spin_unlock_irqrestore(&video->queue.irqlock, flags); 325 + uvc_queue_cancel(queue, 0); 327 326 break; 328 327 } 329 328 spin_unlock_irqrestore(&video->queue.irqlock, flags);
+3 -3
drivers/usb/gadget/legacy/Makefile
··· 2 2 # USB gadget drivers 3 3 # 4 4 5 - ccflags-y := -I$(PWD)/drivers/usb/gadget/ 6 - ccflags-y += -I$(PWD)/drivers/usb/gadget/udc/ 7 - ccflags-y += -I$(PWD)/drivers/usb/gadget/function/ 5 + ccflags-y := -Idrivers/usb/gadget/ 6 + ccflags-y += -Idrivers/usb/gadget/udc/ 7 + ccflags-y += -Idrivers/usb/gadget/function/ 8 8 9 9 g_zero-y := zero.o 10 10 g_audio-y := audio.o
+2
drivers/usb/gadget/legacy/dbgp.c
··· 222 222 { 223 223 #ifdef CONFIG_USB_G_DBGP_SERIAL 224 224 kfree(dbgp.serial); 225 + dbgp.serial = NULL; 225 226 #endif 226 227 if (dbgp.req) { 227 228 kfree(dbgp.req->buf); 228 229 usb_ep_free_request(gadget->ep0, dbgp.req); 230 + dbgp.req = NULL; 229 231 } 230 232 231 233 gadget->ep0->driver_data = NULL;
+1 -1
drivers/usb/gadget/legacy/inode.c
··· 440 440 441 441 value = -ENOMEM; 442 442 kbuf = memdup_user(buf, len); 443 - if (!kbuf) { 443 + if (IS_ERR(kbuf)) { 444 444 value = PTR_ERR(kbuf); 445 445 goto free1; 446 446 }
+2 -1
drivers/usb/gadget/udc/Kconfig
··· 332 332 gadget drivers to also be dynamically linked. 333 333 334 334 config USB_EG20T 335 - tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC" 335 + tristate "Intel QUARK X1000/EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC" 336 336 depends on PCI 337 337 help 338 338 This is a USB device driver for EG20T PCH. ··· 353 353 ML7213/ML7831 is companion chip for Intel Atom E6xx series. 354 354 ML7213/ML7831 is completely compatible for Intel EG20T PCH. 355 355 356 + This driver can be used with Intel's Quark X1000 SOC platform 356 357 # 357 358 # LAST -- dummy/emulated controller 358 359 #
+1 -1
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 1661 1661 if (dma_status) { 1662 1662 int i; 1663 1663 1664 - for (i = 1; i < USBA_NR_DMAS; i++) 1664 + for (i = 1; i <= USBA_NR_DMAS; i++) 1665 1665 if (dma_status & (1 << i)) 1666 1666 usba_dma_irq(udc, &udc->usba_ep[i]); 1667 1667 }
+6 -2
drivers/usb/gadget/udc/fusb300_udc.c
··· 1398 1398 1399 1399 /* initialize udc */ 1400 1400 fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL); 1401 - if (fusb300 == NULL) 1401 + if (fusb300 == NULL) { 1402 + ret = -ENOMEM; 1402 1403 goto clean_up; 1404 + } 1403 1405 1404 1406 for (i = 0; i < FUSB300_MAX_NUM_EP; i++) { 1405 1407 _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL); 1406 - if (_ep[i] == NULL) 1408 + if (_ep[i] == NULL) { 1409 + ret = -ENOMEM; 1407 1410 goto clean_up; 1411 + } 1408 1412 fusb300->ep[i] = _ep[i]; 1409 1413 } 1410 1414
+19 -3
drivers/usb/gadget/udc/pch_udc.c
··· 343 343 * @setup_data: Received setup data 344 344 * @phys_addr: of device memory 345 345 * @base_addr: for mapped device memory 346 + * @bar: Indicates which PCI BAR for USB regs 346 347 * @irq: IRQ line for the device 347 348 * @cfg_data: current cfg, intf, and alt in use 348 349 * @vbus_gpio: GPIO informaton for detecting VBUS ··· 371 370 struct usb_ctrlrequest setup_data; 372 371 unsigned long phys_addr; 373 372 void __iomem *base_addr; 373 + unsigned bar; 374 374 unsigned irq; 375 375 struct pch_udc_cfg_data cfg_data; 376 376 struct pch_vbus_gpio_data vbus_gpio; 377 377 }; 378 378 #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget)) 379 379 380 + #define PCH_UDC_PCI_BAR_QUARK_X1000 0 380 381 #define PCH_UDC_PCI_BAR 1 381 382 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 383 + #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939 382 384 #define PCI_VENDOR_ID_ROHM 0x10DB 383 385 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D 384 386 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808 ··· 3080 3076 iounmap(dev->base_addr); 3081 3077 if (dev->mem_region) 3082 3078 release_mem_region(dev->phys_addr, 3083 - pci_resource_len(pdev, PCH_UDC_PCI_BAR)); 3079 + pci_resource_len(pdev, dev->bar)); 3084 3080 if (dev->active) 3085 3081 pci_disable_device(pdev); 3086 3082 kfree(dev); ··· 3148 3144 dev->active = 1; 3149 3145 pci_set_drvdata(pdev, dev); 3150 3146 3147 + /* Determine BAR based on PCI ID */ 3148 + if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC) 3149 + dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000; 3150 + else 3151 + dev->bar = PCH_UDC_PCI_BAR; 3152 + 3151 3153 /* PCI resource allocation */ 3152 - resource = pci_resource_start(pdev, 1); 3153 - len = pci_resource_len(pdev, 1); 3154 + resource = pci_resource_start(pdev, dev->bar); 3155 + len = pci_resource_len(pdev, dev->bar); 3154 3156 3155 3157 if (!request_mem_region(resource, len, KBUILD_MODNAME)) { 3156 3158 dev_err(&pdev->dev, "%s: pci device used already\n", __func__); ··· 3221 3211 } 3222 3212 3223 3213 static const struct pci_device_id pch_udc_pcidev_id[] = { 3214 + { 3215 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 3216 + PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC), 3217 + .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 3218 + .class_mask = 0xffffffff, 3219 + }, 3224 3220 { 3225 3221 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC), 3226 3222 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+2 -2
drivers/usb/gadget/udc/r8a66597-udc.c
··· 1868 1868 1869 1869 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1870 1870 reg = devm_ioremap_resource(&pdev->dev, res); 1871 - if (!reg) 1872 - return -ENODEV; 1871 + if (IS_ERR(reg)) 1872 + return PTR_ERR(reg); 1873 1873 1874 1874 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1875 1875 irq = ires->start;
+1 -1
drivers/usb/host/ehci-hub.c
··· 1230 1230 if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) { 1231 1231 spin_unlock_irqrestore(&ehci->lock, flags); 1232 1232 retval = ehset_single_step_set_feature(hcd, 1233 - wIndex); 1233 + wIndex + 1); 1234 1234 spin_lock_irqsave(&ehci->lock, flags); 1235 1235 break; 1236 1236 }
+9
drivers/usb/host/xhci-pci.c
··· 101 101 /* AMD PLL quirk */ 102 102 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 103 103 xhci->quirks |= XHCI_AMD_PLL_FIX; 104 + 105 + if (pdev->vendor == PCI_VENDOR_ID_AMD) 106 + xhci->quirks |= XHCI_TRUST_TX_LENGTH; 107 + 104 108 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 105 109 xhci->quirks |= XHCI_LPM_SUPPORT; 106 110 xhci->quirks |= XHCI_INTEL_HOST; ··· 154 150 xhci->quirks |= XHCI_RESET_ON_RESUME; 155 151 if (pdev->vendor == PCI_VENDOR_ID_VIA) 156 152 xhci->quirks |= XHCI_RESET_ON_RESUME; 153 + 154 + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ 155 + if (pdev->vendor == PCI_VENDOR_ID_VIA && 156 + pdev->device == 0x3432) 157 + xhci->quirks |= XHCI_BROKEN_STREAMS; 157 158 158 159 if (xhci->quirks & XHCI_RESET_ON_RESUME) 159 160 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+41 -63
drivers/usb/host/xhci-ring.c
··· 364 364 } 365 365 } 366 366 367 - /* 368 - * Find the segment that trb is in. Start searching in start_seg. 369 - * If we must move past a segment that has a link TRB with a toggle cycle state 370 - * bit set, then we will toggle the value pointed at by cycle_state. 371 - */ 372 - static struct xhci_segment *find_trb_seg( 373 - struct xhci_segment *start_seg, 374 - union xhci_trb *trb, int *cycle_state) 375 - { 376 - struct xhci_segment *cur_seg = start_seg; 377 - struct xhci_generic_trb *generic_trb; 378 - 379 - while (cur_seg->trbs > trb || 380 - &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 381 - generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 382 - if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) 383 - *cycle_state ^= 0x1; 384 - cur_seg = cur_seg->next; 385 - if (cur_seg == start_seg) 386 - /* Looped over the entire list. Oops! */ 387 - return NULL; 388 - } 389 - return cur_seg; 390 - } 391 - 392 - 393 367 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 394 368 unsigned int slot_id, unsigned int ep_index, 395 369 unsigned int stream_id) ··· 433 459 struct xhci_virt_device *dev = xhci->devs[slot_id]; 434 460 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 435 461 struct xhci_ring *ep_ring; 436 - struct xhci_generic_trb *trb; 462 + struct xhci_segment *new_seg; 463 + union xhci_trb *new_deq; 437 464 dma_addr_t addr; 438 465 u64 hw_dequeue; 466 + bool cycle_found = false; 467 + bool td_last_trb_found = false; 439 468 440 469 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 441 470 ep_index, stream_id); ··· 463 486 hw_dequeue = le64_to_cpu(ep_ctx->deq); 464 487 } 465 488 466 - /* Find virtual address and segment of hardware dequeue pointer */ 467 - state->new_deq_seg = ep_ring->deq_seg; 468 - state->new_deq_ptr = ep_ring->dequeue; 469 - while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr) 470 - != (dma_addr_t)(hw_dequeue & ~0xf)) { 471 - next_trb(xhci, ep_ring, &state->new_deq_seg, 472 - &state->new_deq_ptr); 473 - if (state->new_deq_ptr == ep_ring->dequeue) { 474 - WARN_ON(1); 489 + new_seg = ep_ring->deq_seg; 490 + new_deq = ep_ring->dequeue; 491 + state->new_cycle_state = hw_dequeue & 0x1; 492 + 493 + /* 494 + * We want to find the pointer, segment and cycle state of the new trb 495 + * (the one after current TD's last_trb). We know the cycle state at 496 + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are 497 + * found. 498 + */ 499 + do { 500 + if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) 501 + == (dma_addr_t)(hw_dequeue & ~0xf)) { 502 + cycle_found = true; 503 + if (td_last_trb_found) 504 + break; 505 + } 506 + if (new_deq == cur_td->last_trb) 507 + td_last_trb_found = true; 508 + 509 + if (cycle_found && 510 + TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) && 511 + new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE)) 512 + state->new_cycle_state ^= 0x1; 513 + 514 + next_trb(xhci, ep_ring, &new_seg, &new_deq); 515 + 516 + /* Search wrapped around, bail out */ 517 + if (new_deq == ep->ring->dequeue) { 518 + xhci_err(xhci, "Error: Failed finding new dequeue state\n"); 519 + state->new_deq_seg = NULL; 520 + state->new_deq_ptr = NULL; 475 521 return; 476 522 } 477 - } 478 - /* 479 - * Find cycle state for last_trb, starting at old cycle state of 480 - * hw_dequeue. If there is only one segment ring, find_trb_seg() will 481 - * return immediately and cannot toggle the cycle state if this search 482 - * wraps around, so add one more toggle manually in that case. 483 - */ 484 - state->new_cycle_state = hw_dequeue & 0x1; 485 - if (ep_ring->first_seg == ep_ring->first_seg->next && 486 - cur_td->last_trb < state->new_deq_ptr) 487 - state->new_cycle_state ^= 0x1; 488 523 489 - state->new_deq_ptr = cur_td->last_trb; 490 - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 491 - "Finding segment containing last TRB in TD."); 492 - state->new_deq_seg = find_trb_seg(state->new_deq_seg, 493 - state->new_deq_ptr, &state->new_cycle_state); 494 - if (!state->new_deq_seg) { 495 - WARN_ON(1); 496 - return; 497 - } 524 + } while (!cycle_found || !td_last_trb_found); 498 525 499 - /* Increment to find next TRB after last_trb. Cycle if appropriate. */ 500 - trb = &state->new_deq_ptr->generic; 501 - if (TRB_TYPE_LINK_LE32(trb->field[3]) && 502 - (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) 503 - state->new_cycle_state ^= 0x1; 504 - next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 526 + state->new_deq_seg = new_seg; 527 + state->new_deq_ptr = new_deq; 505 528 506 529 /* Don't update the ring cycle state for the producer (us). */ 507 530 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, ··· 2464 2487 * last TRB of the previous TD. The command completion handle 2465 2488 * will take care the rest. 2466 2489 */ 2467 - if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { 2490 + if (!event_seg && (trb_comp_code == COMP_STOP || 2491 + trb_comp_code == COMP_STOP_INVAL)) { 2468 2492 ret = 0; 2469 2493 goto cleanup; 2470 2494 }
+3
drivers/usb/host/xhci.c
··· 2880 2880 ep_index, ep->stopped_stream, ep->stopped_td, 2881 2881 &deq_state); 2882 2882 2883 + if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) 2884 + return; 2885 + 2883 2886 /* HW with the reset endpoint quirk will use the saved dequeue state to 2884 2887 * issue a configure endpoint command later. 2885 2888 */
+1
drivers/usb/misc/sisusbvga/sisusb.c
··· 3250 3250 { USB_DEVICE(0x0711, 0x0918) }, 3251 3251 { USB_DEVICE(0x0711, 0x0920) }, 3252 3252 { USB_DEVICE(0x0711, 0x0950) }, 3253 + { USB_DEVICE(0x0711, 0x5200) }, 3253 3254 { USB_DEVICE(0x182d, 0x021c) }, 3254 3255 { USB_DEVICE(0x182d, 0x0269) }, 3255 3256 { }
+1 -1
drivers/usb/musb/ux500_dma.c
··· 96 96 struct musb *musb = ux500_channel->controller->private_data; 97 97 98 98 dev_dbg(musb->controller, 99 - "packet_sz=%d, mode=%d, dma_addr=0x%llu, len=%d is_tx=%d\n", 99 + "packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n", 100 100 packet_sz, mode, (unsigned long long) dma_addr, 101 101 len, ux500_channel->is_tx); 102 102
+1 -3
drivers/usb/phy/phy-gpio-vbus-usb.c
··· 260 260 261 261 gpio_vbus->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), 262 262 GFP_KERNEL); 263 - if (!gpio_vbus->phy.otg) { 264 - kfree(gpio_vbus); 263 + if (!gpio_vbus->phy.otg) 265 264 return -ENOMEM; 266 - } 267 265 268 266 platform_set_drvdata(pdev, gpio_vbus); 269 267 gpio_vbus->dev = &pdev->dev;
+2 -2
drivers/usb/phy/phy-msm-usb.c
··· 1601 1601 */ 1602 1602 if (motg->phy_number) { 1603 1603 phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4); 1604 - if (IS_ERR(phy_select)) 1605 - return PTR_ERR(phy_select); 1604 + if (!phy_select) 1605 + return -ENOMEM; 1606 1606 /* Enable second PHY with the OTG port */ 1607 1607 writel(0x1, phy_select); 1608 1608 }
+1 -1
drivers/usb/phy/phy-samsung-usb.h
··· 216 216 217 217 #define EXYNOS5_DRD_PHYPARAM1 (0x20) 218 218 219 - #define PHYPARAM1_PCS_TXDEEMPH_MASK (0x1f << 0) 219 + #define PHYPARAM1_PCS_TXDEEMPH_MASK (0x3f << 0) 220 220 #define PHYPARAM1_PCS_TXDEEMPH (0x1c) 221 221 222 222 #define EXYNOS5_DRD_PHYTERM (0x24)
+3
drivers/usb/phy/phy.c
··· 232 232 phy = __usb_find_phy_dev(dev, &phy_bind_list, index); 233 233 if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { 234 234 dev_dbg(dev, "unable to find transceiver\n"); 235 + if (!IS_ERR(phy)) 236 + phy = ERR_PTR(-ENODEV); 237 + 235 238 goto err0; 236 239 } 237 240
+3
drivers/usb/serial/ftdi_sio.c
··· 146 146 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 147 147 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 148 148 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 149 + { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) }, 149 150 { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, 150 151 { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, 151 152 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, ··· 935 934 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, 936 935 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, 937 936 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, 937 + /* ekey Devices */ 938 + { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, 938 939 /* Infineon Devices */ 939 940 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, 940 941 { } /* Terminating entry */
+7
drivers/usb/serial/ftdi_sio_ids.h
··· 42 42 /* www.candapter.com Ewert Energy Systems CANdapter device */ 43 43 #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ 44 44 45 + #define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */ 46 + 45 47 /* 46 48 * Texas Instruments XDS100v2 JTAG / BeagleBone A3 47 49 * http://processors.wiki.ti.com/index.php/XDS100 ··· 1380 1378 #define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */ 1381 1379 #define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */ 1382 1380 #define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */ 1381 + 1382 + /* 1383 + * ekey biometric systems GmbH (http://ekey.net/) 1384 + */ 1385 + #define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */
+28 -3
drivers/usb/serial/option.c
··· 275 275 #define ZTE_PRODUCT_MF622 0x0001 276 276 #define ZTE_PRODUCT_MF628 0x0015 277 277 #define ZTE_PRODUCT_MF626 0x0031 278 - #define ZTE_PRODUCT_MC2718 0xffe8 279 278 #define ZTE_PRODUCT_AC2726 0xfff1 279 + #define ZTE_PRODUCT_CDMA_TECH 0xfffe 280 + #define ZTE_PRODUCT_AC8710T 0xffff 281 + #define ZTE_PRODUCT_MC2718 0xffe8 282 + #define ZTE_PRODUCT_AD3812 0xffeb 283 + #define ZTE_PRODUCT_MC2716 0xffed 280 284 281 285 #define BENQ_VENDOR_ID 0x04a5 282 286 #define BENQ_PRODUCT_H10 0x4068 ··· 498 494 #define INOVIA_VENDOR_ID 0x20a6 499 495 #define INOVIA_SEW858 0x1105 500 496 497 + /* VIA Telecom */ 498 + #define VIATELECOM_VENDOR_ID 0x15eb 499 + #define VIATELECOM_PRODUCT_CDS7 0x0001 500 + 501 501 /* some devices interfaces need special handling due to a number of reasons */ 502 502 enum option_blacklist_reason { 503 503 OPTION_BLACKLIST_NONE = 0, ··· 535 527 .reserved = BIT(4), 536 528 }; 537 529 530 + static const struct option_blacklist_info zte_ad3812_z_blacklist = { 531 + .sendsetup = BIT(0) | BIT(1) | BIT(2), 532 + }; 533 + 538 534 static const struct option_blacklist_info zte_mc2718_z_blacklist = { 539 535 .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), 536 + }; 537 + 538 + static const struct option_blacklist_info zte_mc2716_z_blacklist = { 539 + .sendsetup = BIT(1) | BIT(2) | BIT(3), 540 540 }; 541 541 542 542 static const struct option_blacklist_info huawei_cdc12_blacklist = { ··· 1086 1070 { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) }, 1087 1071 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 1088 1072 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 1073 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 1089 1074 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1090 1075 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1091 1076 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ ··· 1561 1544 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) }, 1562 1545 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) }, 1563 1546 1564 - /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */ 1547 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, 1548 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 1549 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, 1565 1550 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), 1566 1551 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, 1552 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), 1553 + .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, 1554 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), 1555 + .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, 1567 1556 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1568 1557 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1569 1558 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1570 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 1571 1559 1572 1560 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1573 1561 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, ··· 1746 1724 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1747 1725 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1748 1726 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1727 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1749 1728 { } /* Terminating entry */ 1750 1729 }; 1751 1730 MODULE_DEVICE_TABLE(usb, option_ids); ··· 1939 1916 dev_dbg(dev, "%s: type %x req %x\n", __func__, 1940 1917 req_pkt->bRequestType, req_pkt->bRequest); 1941 1918 } 1919 + } else if (status == -ENOENT || status == -ESHUTDOWN) { 1920 + dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status); 1942 1921 } else 1943 1922 dev_err(dev, "%s: error %d\n", __func__, status); 1944 1923
+1
drivers/usb/serial/pl2303.c
··· 45 45 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, 46 46 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, 47 47 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, 48 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, 48 49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 49 50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 50 51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+1
drivers/usb/serial/pl2303.h
··· 22 22 #define PL2303_PRODUCT_ID_GPRS 0x0609 23 23 #define PL2303_PRODUCT_ID_HCR331 0x331a 24 24 #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 25 + #define PL2303_PRODUCT_ID_ZTEK 0xe1f1 25 26 26 27 #define ATEN_VENDOR_ID 0x0557 27 28 #define ATEN_VENDOR_ID2 0x0547
+27 -10
drivers/usb/serial/usb-serial.c
··· 764 764 if (usb_endpoint_is_bulk_in(endpoint)) { 765 765 /* we found a bulk in endpoint */ 766 766 dev_dbg(ddev, "found bulk in on endpoint %d\n", i); 767 - bulk_in_endpoint[num_bulk_in] = endpoint; 768 - ++num_bulk_in; 767 + if (num_bulk_in < MAX_NUM_PORTS) { 768 + bulk_in_endpoint[num_bulk_in] = endpoint; 769 + ++num_bulk_in; 770 + } 769 771 } 770 772 771 773 if (usb_endpoint_is_bulk_out(endpoint)) { 772 774 /* we found a bulk out endpoint */ 773 775 dev_dbg(ddev, "found bulk out on endpoint %d\n", i); 774 - bulk_out_endpoint[num_bulk_out] = endpoint; 775 - ++num_bulk_out; 776 + if (num_bulk_out < MAX_NUM_PORTS) { 777 + bulk_out_endpoint[num_bulk_out] = endpoint; 778 + ++num_bulk_out; 779 + } 776 780 } 777 781 778 782 if (usb_endpoint_is_int_in(endpoint)) { 779 783 /* we found a interrupt in endpoint */ 780 784 dev_dbg(ddev, "found interrupt in on endpoint %d\n", i); 781 - interrupt_in_endpoint[num_interrupt_in] = endpoint; 782 - ++num_interrupt_in; 785 + if (num_interrupt_in < MAX_NUM_PORTS) { 786 + interrupt_in_endpoint[num_interrupt_in] = 787 + endpoint; 788 + ++num_interrupt_in; 789 + } 783 790 } 784 791 785 792 if (usb_endpoint_is_int_out(endpoint)) { 786 793 /* we found an interrupt out endpoint */ 787 794 dev_dbg(ddev, "found interrupt out on endpoint %d\n", i); 788 - interrupt_out_endpoint[num_interrupt_out] = endpoint; 789 - ++num_interrupt_out; 795 + if (num_interrupt_out < MAX_NUM_PORTS) { 796 + interrupt_out_endpoint[num_interrupt_out] = 797 + endpoint; 798 + ++num_interrupt_out; 799 + } 790 800 } 791 801 } 792 802 ··· 819 809 if (usb_endpoint_is_int_in(endpoint)) { 820 810 /* we found a interrupt in endpoint */ 821 811 dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n"); 822 - interrupt_in_endpoint[num_interrupt_in] = endpoint; 823 - ++num_interrupt_in; 812 + if (num_interrupt_in < MAX_NUM_PORTS) { 813 + interrupt_in_endpoint[num_interrupt_in] = endpoint; 814 + ++num_interrupt_in; 815 + } 824 816 } 825 817 } 826 818 } ··· 860 848 num_ports = type->calc_num_ports(serial); 861 849 if (!num_ports) 862 850 num_ports = type->num_ports; 851 + } 852 + 853 + if (num_ports > MAX_NUM_PORTS) { 854 + dev_warn(ddev, "too many ports requested: %d\n", num_ports); 855 + num_ports = MAX_NUM_PORTS; 863 856 } 864 857 865 858 serial->num_ports = num_ports;
+6 -1
drivers/usb/serial/whiteheat.c
··· 514 514 dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__); 515 515 return; 516 516 } 517 + if (!urb->actual_length) { 518 + dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__); 519 + return; 520 + } 517 521 if (status) { 518 522 dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status); 519 523 if (status != -ENOENT) ··· 538 534 /* These are unsolicited reports from the firmware, hence no 539 535 waiting command to wakeup */ 540 536 dev_dbg(&urb->dev->dev, "%s - event received\n", __func__); 541 - } else if (data[0] == WHITEHEAT_GET_DTR_RTS) { 537 + } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) && 538 + (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) { 542 539 memcpy(command_info->result_buffer, &data[1], 543 540 urb->actual_length - 1); 544 541 command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
-20
drivers/usb/serial/zte_ev.c
··· 272 272 } 273 273 274 274 static const struct usb_device_id id_table[] = { 275 - /* AC8710, AC8710T */ 276 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) }, 277 - /* AC8700 */ 278 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) }, 279 275 /* MG880 */ 280 276 { USB_DEVICE(0x19d2, 0xfffd) }, 281 - { USB_DEVICE(0x19d2, 0xfffc) }, 282 - { USB_DEVICE(0x19d2, 0xfffb) }, 283 - /* AC8710_V3 */ 284 - { USB_DEVICE(0x19d2, 0xfff6) }, 285 - { USB_DEVICE(0x19d2, 0xfff7) }, 286 - { USB_DEVICE(0x19d2, 0xfff8) }, 287 - { USB_DEVICE(0x19d2, 0xfff9) }, 288 - { USB_DEVICE(0x19d2, 0xffee) }, 289 - /* AC2716, MC2716 */ 290 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) }, 291 - /* AD3812 */ 292 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) }, 293 - { USB_DEVICE(0x19d2, 0xffec) }, 294 - { USB_DEVICE(0x05C6, 0x3197) }, 295 - { USB_DEVICE(0x05C6, 0x6000) }, 296 - { USB_DEVICE(0x05C6, 0x9008) }, 297 277 { }, 298 278 }; 299 279 MODULE_DEVICE_TABLE(usb, id_table);
+6
drivers/usb/storage/unusual_devs.h
··· 922 922 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 923 923 US_FL_FIX_CAPACITY ), 924 924 925 + UNUSUAL_DEV( 0x06ca, 0x2003, 0x0100, 0x0100, 926 + "Newer Technology", 927 + "uSCSI", 928 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, 929 + US_FL_SCM_MULT_TARG ), 930 + 925 931 /* Reported by Adrian Pilchowiec <adi1981@epf.pl> */ 926 932 UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000, 927 933 "RockChip",
+1 -2
drivers/usb/wusbcore/wa-xfer.c
··· 2602 2602 dev = &wa->usb_iface->dev; 2603 2603 --(wa->active_buf_in_urbs); 2604 2604 active_buf_in_urbs = wa->active_buf_in_urbs; 2605 + rpipe = xfer->ep->hcpriv; 2605 2606 2606 2607 if (usb_pipeisoc(xfer->urb->pipe)) { 2607 2608 struct usb_iso_packet_descriptor *iso_frame_desc = ··· 2660 2659 resubmit_dti = (isoc_data_frame_count == 2661 2660 urb_frame_count); 2662 2661 } else if (active_buf_in_urbs == 0) { 2663 - rpipe = xfer->ep->hcpriv; 2664 2662 dev_dbg(dev, 2665 2663 "xfer %p 0x%08X#%u: data in done (%zu bytes)\n", 2666 2664 xfer, wa_xfer_id(xfer), seg->index, ··· 2685 2685 */ 2686 2686 resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING; 2687 2687 spin_lock_irqsave(&xfer->lock, flags); 2688 - rpipe = xfer->ep->hcpriv; 2689 2688 if (printk_ratelimit()) 2690 2689 dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n", 2691 2690 xfer, wa_xfer_id(xfer), seg->index,
+1
drivers/video/backlight/pwm_bl.c
··· 173 173 data->max_brightness--; 174 174 } 175 175 176 + data->enable_gpio = -EINVAL; 176 177 return 0; 177 178 } 178 179
+18 -5
drivers/video/fbdev/amba-clcd.c
··· 24 24 #include <linux/list.h> 25 25 #include <linux/amba/bus.h> 26 26 #include <linux/amba/clcd.h> 27 + #include <linux/bitops.h> 27 28 #include <linux/clk.h> 28 29 #include <linux/hardirq.h> 29 30 #include <linux/dma-mapping.h> ··· 651 650 { 652 651 struct device_node *endpoint; 653 652 int err; 653 + unsigned int bpp; 654 654 u32 max_bandwidth; 655 655 u32 tft_r0b0g0[3]; 656 656 ··· 669 667 670 668 err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth", 671 669 &max_bandwidth); 672 - if (!err) 673 - fb->panel->bpp = 8 * max_bandwidth / (fb->panel->mode.xres * 674 - fb->panel->mode.yres * fb->panel->mode.refresh); 675 - else 676 - fb->panel->bpp = 32; 670 + if (!err) { 671 + /* 672 + * max_bandwidth is in bytes per second and pixclock in 673 + * pico-seconds, so the maximum allowed bits per pixel is 674 + * 8 * max_bandwidth / (PICOS2KHZ(pixclock) * 1000) 675 + * Rearrange this calculation to avoid overflow and then ensure 676 + * result is a valid format. 677 + */ 678 + bpp = max_bandwidth / (1000 / 8) 679 + / PICOS2KHZ(fb->panel->mode.pixclock); 680 + bpp = rounddown_pow_of_two(bpp); 681 + if (bpp > 32) 682 + bpp = 32; 683 + } else 684 + bpp = 32; 685 + fb->panel->bpp = bpp; 677 686 678 687 #ifdef CONFIG_CPU_BIG_ENDIAN 679 688 fb->panel->cntl |= CNTL_BEBO;
+2
drivers/video/fbdev/atmel_lcdfb.c
··· 1102 1102 timings = of_get_display_timings(display_np); 1103 1103 if (!timings) { 1104 1104 dev_err(dev, "failed to get display timings\n"); 1105 + ret = -EINVAL; 1105 1106 goto put_display_node; 1106 1107 } 1107 1108 1108 1109 timings_np = of_find_node_by_name(display_np, "display-timings"); 1109 1110 if (!timings_np) { 1110 1111 dev_err(dev, "failed to find display-timings node\n"); 1112 + ret = -ENODEV; 1111 1113 goto put_display_node; 1112 1114 } 1113 1115
+1 -1
drivers/video/fbdev/chipsfb.c
··· 273 273 { 0xa8, 0x00 } 274 274 }; 275 275 276 - static void __init chips_hw_init(void) 276 + static void chips_hw_init(void) 277 277 { 278 278 int i; 279 279
+1 -1
drivers/video/fbdev/da8xx-fb.c
··· 419 419 { 420 420 u32 reg; 421 421 422 - reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0xf; 422 + reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0x3ff; 423 423 reg |= (((back_porch-1) & 0xff) << 24) 424 424 | (((front_porch-1) & 0xff) << 16) 425 425 | (((pulse_width-1) & 0x3f) << 10);
+1
drivers/video/of_display_timing.c
··· 236 236 if (native_mode) 237 237 of_node_put(native_mode); 238 238 display_timings_release(disp); 239 + disp = NULL; 239 240 entryfail: 240 241 kfree(disp); 241 242 dispfail:
+36 -8
fs/btrfs/async-thread.c
··· 22 22 #include <linux/list.h> 23 23 #include <linux/spinlock.h> 24 24 #include <linux/freezer.h> 25 - #include <linux/workqueue.h> 26 25 #include "async-thread.h" 27 26 #include "ctree.h" 28 27 ··· 54 55 struct __btrfs_workqueue *high; 55 56 }; 56 57 57 - static inline struct __btrfs_workqueue 58 - *__btrfs_alloc_workqueue(const char *name, int flags, int max_active, 58 + static void normal_work_helper(struct btrfs_work *work); 59 + 60 + #define BTRFS_WORK_HELPER(name) \ 61 + void btrfs_##name(struct work_struct *arg) \ 62 + { \ 63 + struct btrfs_work *work = container_of(arg, struct btrfs_work, \ 64 + normal_work); \ 65 + normal_work_helper(work); \ 66 + } 67 + 68 + BTRFS_WORK_HELPER(worker_helper); 69 + BTRFS_WORK_HELPER(delalloc_helper); 70 + BTRFS_WORK_HELPER(flush_delalloc_helper); 71 + BTRFS_WORK_HELPER(cache_helper); 72 + BTRFS_WORK_HELPER(submit_helper); 73 + BTRFS_WORK_HELPER(fixup_helper); 74 + BTRFS_WORK_HELPER(endio_helper); 75 + BTRFS_WORK_HELPER(endio_meta_helper); 76 + BTRFS_WORK_HELPER(endio_meta_write_helper); 77 + BTRFS_WORK_HELPER(endio_raid56_helper); 78 + BTRFS_WORK_HELPER(rmw_helper); 79 + BTRFS_WORK_HELPER(endio_write_helper); 80 + BTRFS_WORK_HELPER(freespace_write_helper); 81 + BTRFS_WORK_HELPER(delayed_meta_helper); 82 + BTRFS_WORK_HELPER(readahead_helper); 83 + BTRFS_WORK_HELPER(qgroup_rescan_helper); 84 + BTRFS_WORK_HELPER(extent_refs_helper); 85 + BTRFS_WORK_HELPER(scrub_helper); 86 + BTRFS_WORK_HELPER(scrubwrc_helper); 87 + BTRFS_WORK_HELPER(scrubnc_helper); 88 + 89 + static struct __btrfs_workqueue * 90 + __btrfs_alloc_workqueue(const char *name, int flags, int max_active, 59 91 int thresh) 60 92 { 61 93 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); ··· 262 232 spin_unlock_irqrestore(lock, flags); 263 233 } 264 234 265 - static void normal_work_helper(struct work_struct *arg) 235 + static void normal_work_helper(struct btrfs_work *work) 266 236 { 267 - struct btrfs_work *work; 268 237 struct __btrfs_workqueue *wq; 269 238 int need_order = 0; 270 239 271 - work = container_of(arg, struct btrfs_work, normal_work); 272 240 /* 273 241 * We should not touch things inside work in the following cases: 274 242 * 1) after work->func() if it has no ordered_free ··· 290 262 trace_btrfs_all_work_done(work); 291 263 } 292 264 293 - void btrfs_init_work(struct btrfs_work *work, 265 + void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, 294 266 btrfs_func_t func, 295 267 btrfs_func_t ordered_func, 296 268 btrfs_func_t ordered_free) ··· 298 270 work->func = func; 299 271 work->ordered_func = ordered_func; 300 272 work->ordered_free = ordered_free; 301 - INIT_WORK(&work->normal_work, normal_work_helper); 273 + INIT_WORK(&work->normal_work, uniq_func); 302 274 INIT_LIST_HEAD(&work->ordered_list); 303 275 work->flags = 0; 304 276 }
+27 -1
fs/btrfs/async-thread.h
··· 19 19 20 20 #ifndef __BTRFS_ASYNC_THREAD_ 21 21 #define __BTRFS_ASYNC_THREAD_ 22 + #include <linux/workqueue.h> 22 23 23 24 struct btrfs_workqueue; 24 25 /* Internal use only */ 25 26 struct __btrfs_workqueue; 26 27 struct btrfs_work; 27 28 typedef void (*btrfs_func_t)(struct btrfs_work *arg); 29 + typedef void (*btrfs_work_func_t)(struct work_struct *arg); 28 30 29 31 struct btrfs_work { 30 32 btrfs_func_t func; ··· 40 38 unsigned long flags; 41 39 }; 42 40 41 + #define BTRFS_WORK_HELPER_PROTO(name) \ 42 + void btrfs_##name(struct work_struct *arg) 43 + 44 + BTRFS_WORK_HELPER_PROTO(worker_helper); 45 + BTRFS_WORK_HELPER_PROTO(delalloc_helper); 46 + BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper); 47 + BTRFS_WORK_HELPER_PROTO(cache_helper); 48 + BTRFS_WORK_HELPER_PROTO(submit_helper); 49 + BTRFS_WORK_HELPER_PROTO(fixup_helper); 50 + BTRFS_WORK_HELPER_PROTO(endio_helper); 51 + BTRFS_WORK_HELPER_PROTO(endio_meta_helper); 52 + BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper); 53 + BTRFS_WORK_HELPER_PROTO(endio_raid56_helper); 54 + BTRFS_WORK_HELPER_PROTO(rmw_helper); 55 + BTRFS_WORK_HELPER_PROTO(endio_write_helper); 56 + BTRFS_WORK_HELPER_PROTO(freespace_write_helper); 57 + BTRFS_WORK_HELPER_PROTO(delayed_meta_helper); 58 + BTRFS_WORK_HELPER_PROTO(readahead_helper); 59 + BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper); 60 + BTRFS_WORK_HELPER_PROTO(extent_refs_helper); 61 + BTRFS_WORK_HELPER_PROTO(scrub_helper); 62 + BTRFS_WORK_HELPER_PROTO(scrubwrc_helper); 63 + BTRFS_WORK_HELPER_PROTO(scrubnc_helper); 64 + 43 65 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, 44 66 int flags, 45 67 int max_active, 46 68 int thresh); 47 - void btrfs_init_work(struct btrfs_work *work, 69 + void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, 48 70 btrfs_func_t func, 49 71 btrfs_func_t ordered_func, 50 72 btrfs_func_t ordered_free);
+2 -2
fs/btrfs/delayed-inode.c
··· 1395 1395 return -ENOMEM; 1396 1396 1397 1397 async_work->delayed_root = delayed_root; 1398 - btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, 1399 - NULL, NULL); 1398 + btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper, 1399 + btrfs_async_run_delayed_root, NULL, NULL); 1400 1400 async_work->nr = nr; 1401 1401 1402 1402 btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
+31 -25
fs/btrfs/disk-io.c
··· 39 39 #include "btrfs_inode.h" 40 40 #include "volumes.h" 41 41 #include "print-tree.h" 42 - #include "async-thread.h" 43 42 #include "locking.h" 44 43 #include "tree-log.h" 45 44 #include "free-space-cache.h" ··· 692 693 { 693 694 struct end_io_wq *end_io_wq = bio->bi_private; 694 695 struct btrfs_fs_info *fs_info; 696 + struct btrfs_workqueue *wq; 697 + btrfs_work_func_t func; 695 698 696 699 fs_info = end_io_wq->info; 697 700 end_io_wq->error = err; 698 - btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); 699 701 700 702 if (bio->bi_rw & REQ_WRITE) { 701 - if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) 702 - btrfs_queue_work(fs_info->endio_meta_write_workers, 703 - &end_io_wq->work); 704 - else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) 705 - btrfs_queue_work(fs_info->endio_freespace_worker, 706 - &end_io_wq->work); 707 - else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) 708 - btrfs_queue_work(fs_info->endio_raid56_workers, 709 - &end_io_wq->work); 710 - else 711 - btrfs_queue_work(fs_info->endio_write_workers, 712 - &end_io_wq->work); 703 + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { 704 + wq = fs_info->endio_meta_write_workers; 705 + func = btrfs_endio_meta_write_helper; 706 + } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { 707 + wq = fs_info->endio_freespace_worker; 708 + func = btrfs_freespace_write_helper; 709 + } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 710 + wq = fs_info->endio_raid56_workers; 711 + func = btrfs_endio_raid56_helper; 712 + } else { 713 + wq = fs_info->endio_write_workers; 714 + func = btrfs_endio_write_helper; 715 + } 713 716 } else { 714 - if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) 715 - btrfs_queue_work(fs_info->endio_raid56_workers, 716 - &end_io_wq->work); 717 - else if (end_io_wq->metadata) 718 - btrfs_queue_work(fs_info->endio_meta_workers, 719 - &end_io_wq->work); 720 - else 721 - btrfs_queue_work(fs_info->endio_workers, 722 - &end_io_wq->work); 717 + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 718 + wq = fs_info->endio_raid56_workers; 719 + func = btrfs_endio_raid56_helper; 720 + } else if (end_io_wq->metadata) { 721 + wq = fs_info->endio_meta_workers; 722 + func = btrfs_endio_meta_helper; 723 + } else { 724 + wq = fs_info->endio_workers; 725 + func = btrfs_endio_helper; 726 + } 723 727 } 728 + 729 + btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); 730 + btrfs_queue_work(wq, &end_io_wq->work); 724 731 } 725 732 726 733 /* ··· 833 828 async->submit_bio_start = submit_bio_start; 834 829 async->submit_bio_done = submit_bio_done; 835 830 836 - btrfs_init_work(&async->work, run_one_async_start, 831 + btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, 837 832 run_one_async_done, run_one_async_free); 838 833 839 834 async->bio_flags = bio_flags; ··· 3455 3450 btrfs_set_stack_device_generation(dev_item, 0); 3456 3451 btrfs_set_stack_device_type(dev_item, dev->type); 3457 3452 btrfs_set_stack_device_id(dev_item, dev->devid); 3458 - btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes); 3453 + btrfs_set_stack_device_total_bytes(dev_item, 3454 + dev->disk_total_bytes); 3459 3455 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used); 3460 3456 btrfs_set_stack_device_io_align(dev_item, dev->io_align); 3461 3457 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
+6 -17
fs/btrfs/extent-tree.c
··· 552 552 caching_ctl->block_group = cache; 553 553 caching_ctl->progress = cache->key.objectid; 554 554 atomic_set(&caching_ctl->count, 1); 555 - btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); 555 + btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, 556 + caching_thread, NULL, NULL); 556 557 557 558 spin_lock(&cache->lock); 558 559 /* ··· 2750 2749 async->sync = 0; 2751 2750 init_completion(&async->wait); 2752 2751 2753 - btrfs_init_work(&async->work, delayed_ref_async_start, 2754 - NULL, NULL); 2752 + btrfs_init_work(&async->work, btrfs_extent_refs_helper, 2753 + delayed_ref_async_start, NULL, NULL); 2755 2754 2756 2755 btrfs_queue_work(root->fs_info->extent_workers, &async->work); 2757 2756 ··· 3587 3586 */ 3588 3587 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 3589 3588 { 3590 - /* 3591 - * we add in the count of missing devices because we want 3592 - * to make sure that any RAID levels on a degraded FS 3593 - * continue to be honored. 3594 - */ 3595 - u64 num_devices = root->fs_info->fs_devices->rw_devices + 3596 - root->fs_info->fs_devices->missing_devices; 3589 + u64 num_devices = root->fs_info->fs_devices->rw_devices; 3597 3590 u64 target; 3598 3591 u64 tmp; 3599 3592 ··· 8435 8440 if (stripped) 8436 8441 return extended_to_chunk(stripped); 8437 8442 8438 - /* 8439 - * we add in the count of missing devices because we want 8440 - * to make sure that any RAID levels on a degraded FS 8441 - * continue to be honored. 8442 - */ 8443 - num_devices = root->fs_info->fs_devices->rw_devices + 8444 - root->fs_info->fs_devices->missing_devices; 8443 + num_devices = root->fs_info->fs_devices->rw_devices; 8445 8444 8446 8445 stripped = BTRFS_BLOCK_GROUP_RAID0 | 8447 8446 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
+3 -2
fs/btrfs/extent_io.c
··· 2532 2532 test_bit(BIO_UPTODATE, &bio->bi_flags); 2533 2533 if (err) 2534 2534 uptodate = 0; 2535 + offset += len; 2535 2536 continue; 2536 2537 } 2537 2538 } ··· 4208 4207 return -ENOMEM; 4209 4208 path->leave_spinning = 1; 4210 4209 4211 - start = ALIGN(start, BTRFS_I(inode)->root->sectorsize); 4212 - len = ALIGN(len, BTRFS_I(inode)->root->sectorsize); 4210 + start = round_down(start, BTRFS_I(inode)->root->sectorsize); 4211 + len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start; 4213 4212 4214 4213 /* 4215 4214 * lookup the last file extent. We're not using i_size here
+12 -5
fs/btrfs/file.c
··· 1840 1840 { 1841 1841 if (filp->private_data) 1842 1842 btrfs_ioctl_trans_end(filp); 1843 - filemap_flush(inode->i_mapping); 1843 + /* 1844 + * ordered_data_close is set by settattr when we are about to truncate 1845 + * a file from a non-zero size to a zero size. This tries to 1846 + * flush down new bytes that may have been written if the 1847 + * application were using truncate to replace a file in place. 1848 + */ 1849 + if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 1850 + &BTRFS_I(inode)->runtime_flags)) 1851 + filemap_flush(inode->i_mapping); 1844 1852 return 0; 1845 1853 } 1846 1854 ··· 2096 2088 goto out; 2097 2089 } 2098 2090 2099 - if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) { 2091 + if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) { 2100 2092 u64 num_bytes; 2101 2093 2102 - path->slots[0]++; 2103 2094 key.offset = offset; 2104 2095 btrfs_set_item_key_safe(root, path, &key); 2105 2096 fi = btrfs_item_ptr(leaf, path->slots[0], ··· 2223 2216 goto out_only_mutex; 2224 2217 } 2225 2218 2226 - lockstart = round_up(offset , BTRFS_I(inode)->root->sectorsize); 2219 + lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize); 2227 2220 lockend = round_down(offset + len, 2228 2221 BTRFS_I(inode)->root->sectorsize) - 1; 2229 2222 same_page = ((offset >> PAGE_CACHE_SHIFT) == ··· 2284 2277 tail_start + tail_len, 0, 1); 2285 2278 if (ret) 2286 2279 goto out_only_mutex; 2287 - } 2280 + } 2288 2281 } 2289 2282 } 2290 2283
+89 -20
fs/btrfs/inode.c
··· 1096 1096 async_cow->end = cur_end; 1097 1097 INIT_LIST_HEAD(&async_cow->extents); 1098 1098 1099 - btrfs_init_work(&async_cow->work, async_cow_start, 1100 - async_cow_submit, async_cow_free); 1099 + btrfs_init_work(&async_cow->work, 1100 + btrfs_delalloc_helper, 1101 + async_cow_start, async_cow_submit, 1102 + async_cow_free); 1101 1103 1102 1104 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1103 1105 PAGE_CACHE_SHIFT; ··· 1883 1881 1884 1882 SetPageChecked(page); 1885 1883 page_cache_get(page); 1886 - btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); 1884 + btrfs_init_work(&fixup->work, btrfs_fixup_helper, 1885 + btrfs_writepage_fixup_worker, NULL, NULL); 1887 1886 fixup->page = page; 1888 1887 btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work); 1889 1888 return -EBUSY; ··· 2825 2822 struct inode *inode = page->mapping->host; 2826 2823 struct btrfs_root *root = BTRFS_I(inode)->root; 2827 2824 struct btrfs_ordered_extent *ordered_extent = NULL; 2828 - struct btrfs_workqueue *workers; 2825 + struct btrfs_workqueue *wq; 2826 + btrfs_work_func_t func; 2829 2827 2830 2828 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 2831 2829 ··· 2835 2831 end - start + 1, uptodate)) 2836 2832 return 0; 2837 2833 2838 - btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); 2834 + if (btrfs_is_free_space_inode(inode)) { 2835 + wq = root->fs_info->endio_freespace_worker; 2836 + func = btrfs_freespace_write_helper; 2837 + } else { 2838 + wq = root->fs_info->endio_write_workers; 2839 + func = btrfs_endio_write_helper; 2840 + } 2839 2841 2840 - if (btrfs_is_free_space_inode(inode)) 2841 - workers = root->fs_info->endio_freespace_worker; 2842 - else 2843 - workers = root->fs_info->endio_write_workers; 2844 - btrfs_queue_work(workers, &ordered_extent->work); 2842 + btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, 2843 + NULL); 2844 + btrfs_queue_work(wq, &ordered_extent->work); 2845 2845 2846 2846 return 0; 2847 2847 } ··· 4682 4674 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 4683 4675 remove_extent_mapping(map_tree, em); 4684 4676 free_extent_map(em); 4677 + if (need_resched()) { 4678 + write_unlock(&map_tree->lock); 4679 + cond_resched(); 4680 + write_lock(&map_tree->lock); 4681 + } 4685 4682 } 4686 4683 write_unlock(&map_tree->lock); 4687 4684 ··· 4709 4696 &cached_state, GFP_NOFS); 4710 4697 free_extent_state(state); 4711 4698 4699 + cond_resched(); 4712 4700 spin_lock(&io_tree->lock); 4713 4701 } 4714 4702 spin_unlock(&io_tree->lock); ··· 5195 5181 iput(inode); 5196 5182 inode = ERR_PTR(ret); 5197 5183 } 5184 + /* 5185 + * If orphan cleanup did remove any orphans, it means the tree 5186 + * was modified and therefore the commit root is not the same as 5187 + * the current root anymore. This is a problem, because send 5188 + * uses the commit root and therefore can see inode items that 5189 + * don't exist in the current root anymore, and for example make 5190 + * calls to btrfs_iget, which will do tree lookups based on the 5191 + * current root and not on the commit root. Those lookups will 5192 + * fail, returning a -ESTALE error, and making send fail with 5193 + * that error. So make sure a send does not see any orphans we 5194 + * have just removed, and that it will see the same inodes 5195 + * regardless of whether a transaction commit happened before 5196 + * it started (meaning that the commit root will be the same as 5197 + * the current root) or not. 5198 + */ 5199 + if (sub_root->node != sub_root->commit_root) { 5200 + u64 sub_flags = btrfs_root_flags(&sub_root->root_item); 5201 + 5202 + if (sub_flags & BTRFS_ROOT_SUBVOL_RDONLY) { 5203 + struct extent_buffer *eb; 5204 + 5205 + /* 5206 + * Assert we can't have races between dentry 5207 + * lookup called through the snapshot creation 5208 + * ioctl and the VFS. 5209 + */ 5210 + ASSERT(mutex_is_locked(&dir->i_mutex)); 5211 + 5212 + down_write(&root->fs_info->commit_root_sem); 5213 + eb = sub_root->commit_root; 5214 + sub_root->commit_root = 5215 + btrfs_root_node(sub_root); 5216 + up_write(&root->fs_info->commit_root_sem); 5217 + free_extent_buffer(eb); 5218 + } 5219 + } 5198 5220 } 5199 5221 5200 5222 return inode; ··· 5654 5604 btrfs_free_path(path); 5655 5605 return ERR_PTR(-ENOMEM); 5656 5606 } 5607 + 5608 + /* 5609 + * O_TMPFILE, set link count to 0, so that after this point, 5610 + * we fill in an inode item with the correct link count. 5611 + */ 5612 + if (!name) 5613 + set_nlink(inode, 0); 5657 5614 5658 5615 /* 5659 5616 * we have to initialize this early, so we can reclaim the inode ··· 6154 6097 static int merge_extent_mapping(struct extent_map_tree *em_tree, 6155 6098 struct extent_map *existing, 6156 6099 struct extent_map *em, 6157 - u64 map_start, u64 map_len) 6100 + u64 map_start) 6158 6101 { 6159 6102 u64 start_diff; 6160 6103 6161 6104 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 6162 6105 start_diff = map_start - em->start; 6163 6106 em->start = map_start; 6164 - em->len = map_len; 6107 + em->len = existing->start - em->start; 6165 6108 if (em->block_start < EXTENT_MAP_LAST_BYTE && 6166 6109 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 6167 6110 em->block_start += start_diff; ··· 6332 6275 goto not_found; 6333 6276 if (start + len <= found_key.offset) 6334 6277 goto not_found; 6278 + if (start > found_key.offset) 6279 + goto next; 6335 6280 em->start = start; 6336 6281 em->orig_start = start; 6337 6282 em->len = found_key.offset - start; ··· 6449 6390 em->len); 6450 6391 if (existing) { 6451 6392 err = merge_extent_mapping(em_tree, existing, 6452 - em, start, 6453 - root->sectorsize); 6393 + em, start); 6454 6394 free_extent_map(existing); 6455 6395 if (err) { 6456 6396 free_extent_map(em); ··· 7216 7158 if (!ret) 7217 7159 goto out_test; 7218 7160 7219 - btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL); 7161 + btrfs_init_work(&ordered->work, btrfs_endio_write_helper, 7162 + finish_ordered_fn, NULL, NULL); 7220 7163 btrfs_queue_work(root->fs_info->endio_write_workers, 7221 7164 &ordered->work); 7222 7165 out_test: ··· 7365 7306 map_length = orig_bio->bi_iter.bi_size; 7366 7307 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, 7367 7308 &map_length, NULL, 0); 7368 - if (ret) { 7369 - bio_put(orig_bio); 7309 + if (ret) 7370 7310 return -EIO; 7371 - } 7372 7311 7373 7312 if (map_length >= orig_bio->bi_iter.bi_size) { 7374 7313 bio = orig_bio; ··· 7383 7326 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); 7384 7327 if (!bio) 7385 7328 return -ENOMEM; 7329 + 7386 7330 bio->bi_private = dip; 7387 7331 bio->bi_end_io = btrfs_end_dio_bio; 7388 7332 atomic_inc(&dip->pending_bios); ··· 7592 7534 count = iov_iter_count(iter); 7593 7535 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7594 7536 &BTRFS_I(inode)->runtime_flags)) 7595 - filemap_fdatawrite_range(inode->i_mapping, offset, count); 7537 + filemap_fdatawrite_range(inode->i_mapping, offset, 7538 + offset + count - 1); 7596 7539 7597 7540 if (rw & WRITE) { 7598 7541 /* ··· 8554 8495 work->inode = inode; 8555 8496 work->wait = wait; 8556 8497 work->delay_iput = delay_iput; 8557 - btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); 8498 + WARN_ON_ONCE(!inode); 8499 + btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, 8500 + btrfs_run_delalloc_work, NULL, NULL); 8558 8501 8559 8502 return work; 8560 8503 } ··· 9040 8979 if (ret) 9041 8980 goto out; 9042 8981 8982 + /* 8983 + * We set number of links to 0 in btrfs_new_inode(), and here we set 8984 + * it to 1 because d_tmpfile() will issue a warning if the count is 0, 8985 + * through: 8986 + * 8987 + * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 8988 + */ 8989 + set_nlink(inode, 1); 9043 8990 d_tmpfile(dentry, inode); 9044 8991 mark_inode_dirty(inode); 9045 8992
+2 -34
fs/btrfs/ioctl.c
··· 711 711 if (ret) 712 712 goto fail; 713 713 714 - ret = btrfs_orphan_cleanup(pending_snapshot->snap); 715 - if (ret) 716 - goto fail; 717 - 718 - /* 719 - * If orphan cleanup did remove any orphans, it means the tree was 720 - * modified and therefore the commit root is not the same as the 721 - * current root anymore. This is a problem, because send uses the 722 - * commit root and therefore can see inode items that don't exist 723 - * in the current root anymore, and for example make calls to 724 - * btrfs_iget, which will do tree lookups based on the current root 725 - * and not on the commit root. Those lookups will fail, returning a 726 - * -ESTALE error, and making send fail with that error. So make sure 727 - * a send does not see any orphans we have just removed, and that it 728 - * will see the same inodes regardless of whether a transaction 729 - * commit happened before it started (meaning that the commit root 730 - * will be the same as the current root) or not. 731 - */ 732 - if (readonly && pending_snapshot->snap->node != 733 - pending_snapshot->snap->commit_root) { 734 - trans = btrfs_join_transaction(pending_snapshot->snap); 735 - if (IS_ERR(trans) && PTR_ERR(trans) != -ENOENT) { 736 - ret = PTR_ERR(trans); 737 - goto fail; 738 - } 739 - if (!IS_ERR(trans)) { 740 - ret = btrfs_commit_transaction(trans, 741 - pending_snapshot->snap); 742 - if (ret) 743 - goto fail; 744 - } 745 - } 746 - 747 714 inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); 748 715 if (IS_ERR(inode)) { 749 716 ret = PTR_ERR(inode); ··· 3494 3527 btrfs_mark_buffer_dirty(leaf); 3495 3528 btrfs_release_path(path); 3496 3529 3497 - last_dest_end = new_key.offset + datal; 3530 + last_dest_end = ALIGN(new_key.offset + datal, 3531 + root->sectorsize); 3498 3532 ret = clone_finish_inode_update(trans, inode, 3499 3533 last_dest_end, 3500 3534 destoff, olen);
+1
fs/btrfs/ordered-data.c
··· 615 615 spin_unlock(&root->ordered_extent_lock); 616 616 617 617 btrfs_init_work(&ordered->flush_work, 618 + btrfs_flush_delalloc_helper, 618 619 btrfs_run_ordered_extent_work, NULL, NULL); 619 620 list_add_tail(&ordered->work_list, &works); 620 621 btrfs_queue_work(root->fs_info->flush_workers,
+2 -1
fs/btrfs/qgroup.c
··· 1973 1973 elem.seq, &roots); 1974 1974 btrfs_put_tree_mod_seq(fs_info, &elem); 1975 1975 if (ret < 0) 1976 - return ret; 1976 + goto out; 1977 1977 1978 1978 if (roots->nnodes != 1) 1979 1979 goto out; ··· 2720 2720 memset(&fs_info->qgroup_rescan_work, 0, 2721 2721 sizeof(fs_info->qgroup_rescan_work)); 2722 2722 btrfs_init_work(&fs_info->qgroup_rescan_work, 2723 + btrfs_qgroup_rescan_helper, 2723 2724 btrfs_qgroup_rescan_worker, NULL, NULL); 2724 2725 2725 2726 if (ret) {
+6 -3
fs/btrfs/raid56.c
··· 1416 1416 1417 1417 static void async_rmw_stripe(struct btrfs_raid_bio *rbio) 1418 1418 { 1419 - btrfs_init_work(&rbio->work, rmw_work, NULL, NULL); 1419 + btrfs_init_work(&rbio->work, btrfs_rmw_helper, 1420 + rmw_work, NULL, NULL); 1420 1421 1421 1422 btrfs_queue_work(rbio->fs_info->rmw_workers, 1422 1423 &rbio->work); ··· 1425 1424 1426 1425 static void async_read_rebuild(struct btrfs_raid_bio *rbio) 1427 1426 { 1428 - btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL); 1427 + btrfs_init_work(&rbio->work, btrfs_rmw_helper, 1428 + read_rebuild_work, NULL, NULL); 1429 1429 1430 1430 btrfs_queue_work(rbio->fs_info->rmw_workers, 1431 1431 &rbio->work); ··· 1667 1665 plug = container_of(cb, struct btrfs_plug_cb, cb); 1668 1666 1669 1667 if (from_schedule) { 1670 - btrfs_init_work(&plug->work, unplug_work, NULL, NULL); 1668 + btrfs_init_work(&plug->work, btrfs_rmw_helper, 1669 + unplug_work, NULL, NULL); 1671 1670 btrfs_queue_work(plug->info->rmw_workers, 1672 1671 &plug->work); 1673 1672 return;
+2 -1
fs/btrfs/reada.c
··· 798 798 /* FIXME we cannot handle this properly right now */ 799 799 BUG(); 800 800 } 801 - btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL); 801 + btrfs_init_work(&rmw->work, btrfs_readahead_helper, 802 + reada_start_machine_worker, NULL, NULL); 802 803 rmw->fs_info = fs_info; 803 804 804 805 btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
+19 -6
fs/btrfs/scrub.c
··· 428 428 sbio->index = i; 429 429 sbio->sctx = sctx; 430 430 sbio->page_count = 0; 431 - btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, 432 - NULL, NULL); 431 + btrfs_init_work(&sbio->work, btrfs_scrub_helper, 432 + scrub_bio_end_io_worker, NULL, NULL); 433 433 434 434 if (i != SCRUB_BIOS_PER_SCTX - 1) 435 435 sctx->bios[i]->next_free = i + 1; ··· 999 999 fixup_nodatasum->root = fs_info->extent_root; 1000 1000 fixup_nodatasum->mirror_num = failed_mirror_index + 1; 1001 1001 scrub_pending_trans_workers_inc(sctx); 1002 - btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum, 1003 - NULL, NULL); 1002 + btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper, 1003 + scrub_fixup_nodatasum, NULL, NULL); 1004 1004 btrfs_queue_work(fs_info->scrub_workers, 1005 1005 &fixup_nodatasum->work); 1006 1006 goto out; ··· 1616 1616 sbio->err = err; 1617 1617 sbio->bio = bio; 1618 1618 1619 - btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); 1619 + btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, 1620 + scrub_wr_bio_end_io_worker, NULL, NULL); 1620 1621 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); 1621 1622 } 1622 1623 ··· 2905 2904 struct scrub_ctx *sctx; 2906 2905 int ret; 2907 2906 struct btrfs_device *dev; 2907 + struct rcu_string *name; 2908 2908 2909 2909 if (btrfs_fs_closing(fs_info)) 2910 2910 return -EINVAL; ··· 2965 2963 if (!dev || (dev->missing && !is_dev_replace)) { 2966 2964 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2967 2965 return -ENODEV; 2966 + } 2967 + 2968 + if (!is_dev_replace && !readonly && !dev->writeable) { 2969 + mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2970 + rcu_read_lock(); 2971 + name = rcu_dereference(dev->name); 2972 + btrfs_err(fs_info, "scrub: device %s is not writable", 2973 + name->str); 2974 + rcu_read_unlock(); 2975 + return -EROFS; 2968 2976 } 2969 2977 2970 2978 mutex_lock(&fs_info->scrub_lock); ··· 3215 3203 nocow_ctx->len = len; 3216 3204 nocow_ctx->mirror_num = mirror_num; 3217 3205 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; 3218 - btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL); 3206 + btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper, 3207 + copy_nocow_pages_worker, NULL, NULL); 3219 3208 INIT_LIST_HEAD(&nocow_ctx->inodes); 3220 3209 btrfs_queue_work(fs_info->scrub_nocow_workers, 3221 3210 &nocow_ctx->work);
+1 -1
fs/btrfs/sysfs.c
··· 614 614 if (!fs_info->device_dir_kobj) 615 615 return -EINVAL; 616 616 617 - if (one_device) { 617 + if (one_device && one_device->bdev) { 618 618 disk = one_device->bdev->bd_part; 619 619 disk_kobj = &part_to_dev(disk)->kobj; 620 620
+13 -4
fs/btrfs/tree-log.c
··· 3298 3298 struct list_head ordered_sums; 3299 3299 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3300 3300 bool has_extents = false; 3301 - bool need_find_last_extent = (*last_extent == 0); 3301 + bool need_find_last_extent = true; 3302 3302 bool done = false; 3303 3303 3304 3304 INIT_LIST_HEAD(&ordered_sums); ··· 3352 3352 */ 3353 3353 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { 3354 3354 has_extents = true; 3355 - if (need_find_last_extent && 3356 - first_key.objectid == (u64)-1) 3355 + if (first_key.objectid == (u64)-1) 3357 3356 first_key = ins_keys[i]; 3358 3357 } else { 3359 3358 need_find_last_extent = false; ··· 3425 3426 3426 3427 if (!has_extents) 3427 3428 return ret; 3429 + 3430 + if (need_find_last_extent && *last_extent == first_key.offset) { 3431 + /* 3432 + * We don't have any leafs between our current one and the one 3433 + * we processed before that can have file extent items for our 3434 + * inode (and have a generation number smaller than our current 3435 + * transaction id). 3436 + */ 3437 + need_find_last_extent = false; 3438 + } 3428 3439 3429 3440 /* 3430 3441 * Because we use btrfs_search_forward we could skip leaves that were ··· 3546 3537 0, 0); 3547 3538 if (ret) 3548 3539 break; 3549 - *last_extent = offset + len; 3540 + *last_extent = extent_end; 3550 3541 } 3551 3542 /* 3552 3543 * Need to let the callers know we dropped the path so they should
+60 -5
fs/btrfs/volumes.c
··· 508 508 ret = 1; 509 509 device->fs_devices = fs_devices; 510 510 } else if (!device->name || strcmp(device->name->str, path)) { 511 + /* 512 + * When FS is already mounted. 513 + * 1. If you are here and if the device->name is NULL that 514 + * means this device was missing at time of FS mount. 515 + * 2. If you are here and if the device->name is different 516 + * from 'path' that means either 517 + * a. The same device disappeared and reappeared with 518 + * different name. or 519 + * b. The missing-disk-which-was-replaced, has 520 + * reappeared now. 521 + * 522 + * We must allow 1 and 2a above. But 2b would be a spurious 523 + * and unintentional. 524 + * 525 + * Further in case of 1 and 2a above, the disk at 'path' 526 + * would have missed some transaction when it was away and 527 + * in case of 2a the stale bdev has to be updated as well. 528 + * 2b must not be allowed at all time. 529 + */ 530 + 531 + /* 532 + * As of now don't allow update to btrfs_fs_device through 533 + * the btrfs dev scan cli, after FS has been mounted. 534 + */ 535 + if (fs_devices->opened) { 536 + return -EBUSY; 537 + } else { 538 + /* 539 + * That is if the FS is _not_ mounted and if you 540 + * are here, that means there is more than one 541 + * disk with same uuid and devid.We keep the one 542 + * with larger generation number or the last-in if 543 + * generation are equal. 544 + */ 545 + if (found_transid < device->generation) 546 + return -EEXIST; 547 + } 548 + 511 549 name = rcu_string_strdup(path, GFP_NOFS); 512 550 if (!name) 513 551 return -ENOMEM; ··· 556 518 device->missing = 0; 557 519 } 558 520 } 521 + 522 + /* 523 + * Unmount does not free the btrfs_device struct but would zero 524 + * generation along with most of the other members. So just update 525 + * it back. We need it to pick the disk with largest generation 526 + * (as above). 527 + */ 528 + if (!fs_devices->opened) 529 + device->generation = found_transid; 559 530 560 531 if (found_transid > fs_devices->latest_trans) { 561 532 fs_devices->latest_devid = devid; ··· 1483 1436 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1484 1437 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1485 1438 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1486 - btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); 1439 + btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); 1487 1440 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 1488 1441 btrfs_set_device_group(leaf, dev_item, 0); 1489 1442 btrfs_set_device_seek_speed(leaf, dev_item, 0); ··· 1718 1671 device->fs_devices->total_devices--; 1719 1672 1720 1673 if (device->missing) 1721 - root->fs_info->fs_devices->missing_devices--; 1674 + device->fs_devices->missing_devices--; 1722 1675 1723 1676 next_device = list_entry(root->fs_info->fs_devices->devices.next, 1724 1677 struct btrfs_device, dev_list); ··· 1848 1801 if (srcdev->bdev) { 1849 1802 fs_info->fs_devices->open_devices--; 1850 1803 1851 - /* zero out the old super */ 1852 - btrfs_scratch_superblock(srcdev); 1804 + /* 1805 + * zero out the old super if it is not writable 1806 + * (e.g. seed device) 1807 + */ 1808 + if (srcdev->writeable) 1809 + btrfs_scratch_superblock(srcdev); 1853 1810 } 1854 1811 1855 1812 call_rcu(&srcdev->rcu, free_device); ··· 1992 1941 fs_devices->seeding = 0; 1993 1942 fs_devices->num_devices = 0; 1994 1943 fs_devices->open_devices = 0; 1944 + fs_devices->missing_devices = 0; 1945 + fs_devices->num_can_discard = 0; 1946 + fs_devices->rotating = 0; 1995 1947 fs_devices->seed = seed_devices; 1996 1948 1997 1949 generate_random_uuid(fs_devices->fsid); ··· 5854 5800 else 5855 5801 generate_random_uuid(dev->uuid); 5856 5802 5857 - btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); 5803 + btrfs_init_work(&dev->work, btrfs_submit_helper, 5804 + pending_bios_fn, NULL, NULL); 5858 5805 5859 5806 return dev; 5860 5807 }
+17 -1
fs/ext4/ext4.h
··· 1825 1825 /* 1826 1826 * Special error return code only used by dx_probe() and its callers. 1827 1827 */ 1828 - #define ERR_BAD_DX_DIR -75000 1828 + #define ERR_BAD_DX_DIR (-(MAX_ERRNO - 1)) 1829 1829 1830 1830 /* 1831 1831 * Timeout and state flag for lazy initialization inode thread. ··· 2452 2452 if (newsize > EXT4_I(inode)->i_disksize) 2453 2453 EXT4_I(inode)->i_disksize = newsize; 2454 2454 up_write(&EXT4_I(inode)->i_data_sem); 2455 + } 2456 + 2457 + /* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */ 2458 + static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize) 2459 + { 2460 + int changed = 0; 2461 + 2462 + if (newsize > inode->i_size) { 2463 + i_size_write(inode, newsize); 2464 + changed = 1; 2465 + } 2466 + if (newsize > EXT4_I(inode)->i_disksize) { 2467 + ext4_update_i_disksize(inode, newsize); 2468 + changed |= 2; 2469 + } 2470 + return changed; 2455 2471 } 2456 2472 2457 2473 struct ext4_group_info {
+44 -44
fs/ext4/extents.c
··· 4665 4665 } 4666 4666 4667 4667 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4668 - ext4_lblk_t len, int flags, int mode) 4668 + ext4_lblk_t len, loff_t new_size, 4669 + int flags, int mode) 4669 4670 { 4670 4671 struct inode *inode = file_inode(file); 4671 4672 handle_t *handle; ··· 4675 4674 int retries = 0; 4676 4675 struct ext4_map_blocks map; 4677 4676 unsigned int credits; 4677 + loff_t epos; 4678 4678 4679 4679 map.m_lblk = offset; 4680 + map.m_len = len; 4680 4681 /* 4681 4682 * Don't normalize the request if it can fit in one extent so 4682 4683 * that it doesn't get unnecessarily split into multiple ··· 4693 4690 credits = ext4_chunk_trans_blocks(inode, len); 4694 4691 4695 4692 retry: 4696 - while (ret >= 0 && ret < len) { 4697 - map.m_lblk = map.m_lblk + ret; 4698 - map.m_len = len = len - ret; 4693 + while (ret >= 0 && len) { 4699 4694 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4700 4695 credits); 4701 4696 if (IS_ERR(handle)) { ··· 4710 4709 ret2 = ext4_journal_stop(handle); 4711 4710 break; 4712 4711 } 4712 + map.m_lblk += ret; 4713 + map.m_len = len = len - ret; 4714 + epos = (loff_t)map.m_lblk << inode->i_blkbits; 4715 + inode->i_ctime = ext4_current_time(inode); 4716 + if (new_size) { 4717 + if (epos > new_size) 4718 + epos = new_size; 4719 + if (ext4_update_inode_size(inode, epos) & 0x1) 4720 + inode->i_mtime = inode->i_ctime; 4721 + } else { 4722 + if (epos > inode->i_size) 4723 + ext4_set_inode_flag(inode, 4724 + EXT4_INODE_EOFBLOCKS); 4725 + } 4726 + ext4_mark_inode_dirty(handle, inode); 4713 4727 ret2 = ext4_journal_stop(handle); 4714 4728 if (ret2) 4715 4729 break; ··· 4747 4731 loff_t new_size = 0; 4748 4732 int ret = 0; 4749 4733 int flags; 4750 - int partial; 4734 + int credits; 4735 + int partial_begin, partial_end; 4751 4736 loff_t start, end; 4752 4737 ext4_lblk_t lblk; 4753 4738 struct address_space *mapping = inode->i_mapping; ··· 4788 4771 4789 4772 if (start < offset || end > offset + len) 4790 4773 return -EINVAL; 4791 - partial = (offset + len) & ((1 << blkbits) - 1); 4774 + partial_begin = offset & ((1 << blkbits) - 1); 4775 + partial_end = (offset + len) & ((1 << blkbits) - 1); 4792 4776 4793 4777 lblk = start >> blkbits; 4794 4778 max_blocks = (end >> blkbits); ··· 4823 4805 * If we have a partial block after EOF we have to allocate 4824 4806 * the entire block. 4825 4807 */ 4826 - if (partial) 4808 + if (partial_end) 4827 4809 max_blocks += 1; 4828 4810 } 4829 4811 ··· 4831 4813 4832 4814 /* Now release the pages and zero block aligned part of pages*/ 4833 4815 truncate_pagecache_range(inode, start, end - 1); 4816 + inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4834 4817 4835 4818 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4836 4819 ext4_inode_block_unlocked_dio(inode); ··· 4844 4825 if (ret) 4845 4826 goto out_dio; 4846 4827 4847 - ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, 4848 - mode); 4828 + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 4829 + flags, mode); 4849 4830 if (ret) 4850 4831 goto out_dio; 4851 4832 } 4833 + if (!partial_begin && !partial_end) 4834 + goto out_dio; 4852 4835 4853 - handle = ext4_journal_start(inode, EXT4_HT_MISC, 4); 4836 + /* 4837 + * In worst case we have to writeout two nonadjacent unwritten 4838 + * blocks and update the inode 4839 + */ 4840 + credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 4841 + if (ext4_should_journal_data(inode)) 4842 + credits += 2; 4843 + handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4854 4844 if (IS_ERR(handle)) { 4855 4845 ret = PTR_ERR(handle); 4856 4846 ext4_std_error(inode->i_sb, ret); ··· 4867 4839 } 4868 4840 4869 4841 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4870 - 4871 4842 if (new_size) { 4872 - if (new_size > i_size_read(inode)) 4873 - i_size_write(inode, new_size); 4874 - if (new_size > EXT4_I(inode)->i_disksize) 4875 - ext4_update_i_disksize(inode, new_size); 4843 + ext4_update_inode_size(inode, new_size); 4876 4844 } else { 4877 4845 /* 4878 4846 * Mark that we allocate beyond EOF so the subsequent truncate ··· 4877 4853 if ((offset + len) > i_size_read(inode)) 4878 4854 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4879 4855 } 4880 - 4881 4856 ext4_mark_inode_dirty(handle, inode); 4882 4857 4883 4858 /* Zero out partial block at the edges of the range */ ··· 4903 4880 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4904 4881 { 4905 4882 struct inode *inode = file_inode(file); 4906 - handle_t *handle; 4907 4883 loff_t new_size = 0; 4908 4884 unsigned int max_blocks; 4909 4885 int ret = 0; 4910 4886 int flags; 4911 4887 ext4_lblk_t lblk; 4912 - struct timespec tv; 4913 4888 unsigned int blkbits = inode->i_blkbits; 4914 4889 4915 4890 /* Return error if mode is not supported */ ··· 4958 4937 goto out; 4959 4938 } 4960 4939 4961 - ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, mode); 4940 + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 4941 + flags, mode); 4962 4942 if (ret) 4963 4943 goto out; 4964 4944 4965 - handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 4966 - if (IS_ERR(handle)) 4967 - goto out; 4968 - 4969 - tv = inode->i_ctime = ext4_current_time(inode); 4970 - 4971 - if (new_size) { 4972 - if (new_size > i_size_read(inode)) { 4973 - i_size_write(inode, new_size); 4974 - inode->i_mtime = tv; 4975 - } 4976 - if (new_size > EXT4_I(inode)->i_disksize) 4977 - ext4_update_i_disksize(inode, new_size); 4978 - } else { 4979 - /* 4980 - * Mark that we allocate beyond EOF so the subsequent truncate 4981 - * can proceed even if the new size is the same as i_size. 4982 - */ 4983 - if ((offset + len) > i_size_read(inode)) 4984 - ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4945 + if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4946 + ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, 4947 + EXT4_I(inode)->i_sync_tid); 4985 4948 } 4986 - ext4_mark_inode_dirty(handle, inode); 4987 - if (file->f_flags & O_SYNC) 4988 - ext4_handle_sync(handle); 4989 - 4990 - ext4_journal_stop(handle); 4991 4949 out: 4992 4950 mutex_unlock(&inode->i_mutex); 4993 4951 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
+16 -28
fs/ext4/inode.c
··· 1055 1055 } else 1056 1056 copied = block_write_end(file, mapping, pos, 1057 1057 len, copied, page, fsdata); 1058 - 1059 1058 /* 1060 - * No need to use i_size_read() here, the i_size 1061 - * cannot change under us because we hole i_mutex. 1062 - * 1063 - * But it's important to update i_size while still holding page lock: 1059 + * it's important to update i_size while still holding page lock: 1064 1060 * page writeout could otherwise come in and zero beyond i_size. 1065 1061 */ 1066 - if (pos + copied > inode->i_size) { 1067 - i_size_write(inode, pos + copied); 1068 - i_size_changed = 1; 1069 - } 1070 - 1071 - if (pos + copied > EXT4_I(inode)->i_disksize) { 1072 - /* We need to mark inode dirty even if 1073 - * new_i_size is less that inode->i_size 1074 - * but greater than i_disksize. (hint delalloc) 1075 - */ 1076 - ext4_update_i_disksize(inode, (pos + copied)); 1077 - i_size_changed = 1; 1078 - } 1062 + i_size_changed = ext4_update_inode_size(inode, pos + copied); 1079 1063 unlock_page(page); 1080 1064 page_cache_release(page); 1081 1065 ··· 1107 1123 int ret = 0, ret2; 1108 1124 int partial = 0; 1109 1125 unsigned from, to; 1110 - loff_t new_i_size; 1126 + int size_changed = 0; 1111 1127 1112 1128 trace_ext4_journalled_write_end(inode, pos, len, copied); 1113 1129 from = pos & (PAGE_CACHE_SIZE - 1); ··· 1130 1146 if (!partial) 1131 1147 SetPageUptodate(page); 1132 1148 } 1133 - new_i_size = pos + copied; 1134 - if (new_i_size > inode->i_size) 1135 - i_size_write(inode, pos+copied); 1149 + size_changed = ext4_update_inode_size(inode, pos + copied); 1136 1150 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1137 1151 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1138 - if (new_i_size > EXT4_I(inode)->i_disksize) { 1139 - ext4_update_i_disksize(inode, new_i_size); 1152 + unlock_page(page); 1153 + page_cache_release(page); 1154 + 1155 + if (size_changed) { 1140 1156 ret2 = ext4_mark_inode_dirty(handle, inode); 1141 1157 if (!ret) 1142 1158 ret = ret2; 1143 1159 } 1144 1160 1145 - unlock_page(page); 1146 - page_cache_release(page); 1147 1161 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1148 1162 /* if we have allocated more blocks and copied 1149 1163 * less. We will have blocks allocated outside ··· 2077 2095 struct ext4_map_blocks *map = &mpd->map; 2078 2096 int err; 2079 2097 loff_t disksize; 2098 + int progress = 0; 2080 2099 2081 2100 mpd->io_submit.io_end->offset = 2082 2101 ((loff_t)map->m_lblk) << inode->i_blkbits; ··· 2094 2111 * is non-zero, a commit should free up blocks. 2095 2112 */ 2096 2113 if ((err == -ENOMEM) || 2097 - (err == -ENOSPC && ext4_count_free_clusters(sb))) 2114 + (err == -ENOSPC && ext4_count_free_clusters(sb))) { 2115 + if (progress) 2116 + goto update_disksize; 2098 2117 return err; 2118 + } 2099 2119 ext4_msg(sb, KERN_CRIT, 2100 2120 "Delayed block allocation failed for " 2101 2121 "inode %lu at logical offset %llu with" ··· 2115 2129 *give_up_on_write = true; 2116 2130 return err; 2117 2131 } 2132 + progress = 1; 2118 2133 /* 2119 2134 * Update buffer state, submit mapped pages, and get us new 2120 2135 * extent to map 2121 2136 */ 2122 2137 err = mpage_map_and_submit_buffers(mpd); 2123 2138 if (err < 0) 2124 - return err; 2139 + goto update_disksize; 2125 2140 } while (map->m_len); 2126 2141 2142 + update_disksize: 2127 2143 /* 2128 2144 * Update on-disk size after IO is submitted. Races with 2129 2145 * truncate are avoided by checking i_size under i_data_sem.
+5
fs/ext4/mballoc.c
··· 1412 1412 int last = first + count - 1; 1413 1413 struct super_block *sb = e4b->bd_sb; 1414 1414 1415 + if (WARN_ON(count == 0)) 1416 + return; 1415 1417 BUG_ON(last >= (sb->s_blocksize << 3)); 1416 1418 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1417 1419 /* Don't bother if the block group is corrupt. */ ··· 3223 3221 int err; 3224 3222 3225 3223 if (pa == NULL) { 3224 + if (ac->ac_f_ex.fe_len == 0) 3225 + return; 3226 3226 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 3227 3227 if (err) { 3228 3228 /* ··· 3239 3235 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 3240 3236 ac->ac_f_ex.fe_len); 3241 3237 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3238 + ext4_mb_unload_buddy(&e4b); 3242 3239 return; 3243 3240 } 3244 3241 if (pa->pa_type == MB_INODE_PA)
+51 -5
fs/ext4/namei.c
··· 1227 1227 buffer */ 1228 1228 int num = 0; 1229 1229 ext4_lblk_t nblocks; 1230 - int i, err; 1230 + int i, err = 0; 1231 1231 int namelen; 1232 1232 1233 1233 *res_dir = NULL; ··· 1264 1264 * return. Otherwise, fall back to doing a search the 1265 1265 * old fashioned way. 1266 1266 */ 1267 - if (bh || (err != ERR_BAD_DX_DIR)) 1267 + if (err == -ENOENT) 1268 + return NULL; 1269 + if (err && err != ERR_BAD_DX_DIR) 1270 + return ERR_PTR(err); 1271 + if (bh) 1268 1272 return bh; 1269 1273 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " 1270 1274 "falling back\n")); ··· 1299 1295 } 1300 1296 num++; 1301 1297 bh = ext4_getblk(NULL, dir, b++, 0, &err); 1298 + if (unlikely(err)) { 1299 + if (ra_max == 0) 1300 + return ERR_PTR(err); 1301 + break; 1302 + } 1302 1303 bh_use[ra_max] = bh; 1303 1304 if (bh) 1304 1305 ll_rw_block(READ | REQ_META | REQ_PRIO, ··· 1426 1417 return ERR_PTR(-ENAMETOOLONG); 1427 1418 1428 1419 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); 1420 + if (IS_ERR(bh)) 1421 + return (struct dentry *) bh; 1429 1422 inode = NULL; 1430 1423 if (bh) { 1431 1424 __u32 ino = le32_to_cpu(de->inode); ··· 1461 1450 struct buffer_head *bh; 1462 1451 1463 1452 bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL); 1453 + if (IS_ERR(bh)) 1454 + return (struct dentry *) bh; 1464 1455 if (!bh) 1465 1456 return ERR_PTR(-ENOENT); 1466 1457 ino = le32_to_cpu(de->inode); ··· 2740 2727 2741 2728 retval = -ENOENT; 2742 2729 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); 2730 + if (IS_ERR(bh)) 2731 + return PTR_ERR(bh); 2743 2732 if (!bh) 2744 2733 goto end_rmdir; 2745 2734 ··· 2809 2794 2810 2795 retval = -ENOENT; 2811 2796 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); 2797 + if (IS_ERR(bh)) 2798 + return PTR_ERR(bh); 2812 2799 if (!bh) 2813 2800 goto end_unlink; 2814 2801 ··· 3138 3121 struct ext4_dir_entry_2 *de; 3139 3122 3140 3123 bh = ext4_find_entry(dir, d_name, &de, NULL); 3124 + if (IS_ERR(bh)) 3125 + return PTR_ERR(bh); 3141 3126 if (bh) { 3142 3127 retval = ext4_delete_entry(handle, dir, de, bh); 3143 3128 brelse(bh); ··· 3147 3128 return retval; 3148 3129 } 3149 3130 3150 - static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent) 3131 + static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent, 3132 + int force_reread) 3151 3133 { 3152 3134 int retval; 3153 3135 /* ··· 3160 3140 if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino || 3161 3141 ent->de->name_len != ent->dentry->d_name.len || 3162 3142 strncmp(ent->de->name, ent->dentry->d_name.name, 3163 - ent->de->name_len)) { 3143 + ent->de->name_len) || 3144 + force_reread) { 3164 3145 retval = ext4_find_delete_entry(handle, ent->dir, 3165 3146 &ent->dentry->d_name); 3166 3147 } else { ··· 3212 3191 .dentry = new_dentry, 3213 3192 .inode = new_dentry->d_inode, 3214 3193 }; 3194 + int force_reread; 3215 3195 int retval; 3216 3196 3217 3197 dquot_initialize(old.dir); ··· 3224 3202 dquot_initialize(new.inode); 3225 3203 3226 3204 old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); 3205 + if (IS_ERR(old.bh)) 3206 + return PTR_ERR(old.bh); 3227 3207 /* 3228 3208 * Check for inode number is _not_ due to possible IO errors. 3229 3209 * We might rmdir the source, keep it as pwd of some process ··· 3238 3214 3239 3215 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, 3240 3216 &new.de, &new.inlined); 3217 + if (IS_ERR(new.bh)) { 3218 + retval = PTR_ERR(new.bh); 3219 + goto end_rename; 3220 + } 3241 3221 if (new.bh) { 3242 3222 if (!new.inode) { 3243 3223 brelse(new.bh); ··· 3274 3246 if (retval) 3275 3247 goto end_rename; 3276 3248 } 3249 + /* 3250 + * If we're renaming a file within an inline_data dir and adding or 3251 + * setting the new dirent causes a conversion from inline_data to 3252 + * extents/blockmap, we need to force the dirent delete code to 3253 + * re-read the directory, or else we end up trying to delete a dirent 3254 + * from what is now the extent tree root (or a block map). 3255 + */ 3256 + force_reread = (new.dir->i_ino == old.dir->i_ino && 3257 + ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); 3277 3258 if (!new.bh) { 3278 3259 retval = ext4_add_entry(handle, new.dentry, old.inode); 3279 3260 if (retval) ··· 3293 3256 if (retval) 3294 3257 goto end_rename; 3295 3258 } 3259 + if (force_reread) 3260 + force_reread = !ext4_test_inode_flag(new.dir, 3261 + EXT4_INODE_INLINE_DATA); 3296 3262 3297 3263 /* 3298 3264 * Like most other Unix systems, set the ctime for inodes on a ··· 3307 3267 /* 3308 3268 * ok, that's it 3309 3269 */ 3310 - ext4_rename_delete(handle, &old); 3270 + ext4_rename_delete(handle, &old, force_reread); 3311 3271 3312 3272 if (new.inode) { 3313 3273 ext4_dec_count(handle, new.inode); ··· 3370 3330 3371 3331 old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, 3372 3332 &old.de, &old.inlined); 3333 + if (IS_ERR(old.bh)) 3334 + return PTR_ERR(old.bh); 3373 3335 /* 3374 3336 * Check for inode number is _not_ due to possible IO errors. 3375 3337 * We might rmdir the source, keep it as pwd of some process ··· 3384 3342 3385 3343 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, 3386 3344 &new.de, &new.inlined); 3345 + if (IS_ERR(new.bh)) { 3346 + retval = PTR_ERR(new.bh); 3347 + goto end_rename; 3348 + } 3387 3349 3388 3350 /* RENAME_EXCHANGE case: old *and* new must both exist */ 3389 3351 if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
+3 -2
fs/ext4/super.c
··· 3181 3181 3182 3182 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3183 3183 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 3184 - /* journal checksum v2 */ 3184 + /* journal checksum v3 */ 3185 3185 compat = 0; 3186 - incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2; 3186 + incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; 3187 3187 } else { 3188 3188 /* journal checksum v1 */ 3189 3189 compat = JBD2_FEATURE_COMPAT_CHECKSUM; ··· 3205 3205 jbd2_journal_clear_features(sbi->s_journal, 3206 3206 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 3207 3207 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | 3208 + JBD2_FEATURE_INCOMPAT_CSUM_V3 | 3208 3209 JBD2_FEATURE_INCOMPAT_CSUM_V2); 3209 3210 } 3210 3211
+12 -9
fs/jbd2/commit.c
··· 97 97 struct commit_header *h; 98 98 __u32 csum; 99 99 100 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 100 + if (!jbd2_journal_has_csum_v2or3(j)) 101 101 return; 102 102 103 103 h = (struct commit_header *)(bh->b_data); ··· 313 313 return checksum; 314 314 } 315 315 316 - static void write_tag_block(int tag_bytes, journal_block_tag_t *tag, 316 + static void write_tag_block(journal_t *j, journal_block_tag_t *tag, 317 317 unsigned long long block) 318 318 { 319 319 tag->t_blocknr = cpu_to_be32(block & (u32)~0); 320 - if (tag_bytes > JBD2_TAG_SIZE32) 320 + if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT)) 321 321 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); 322 322 } 323 323 ··· 327 327 struct jbd2_journal_block_tail *tail; 328 328 __u32 csum; 329 329 330 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 330 + if (!jbd2_journal_has_csum_v2or3(j)) 331 331 return; 332 332 333 333 tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize - ··· 340 340 static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, 341 341 struct buffer_head *bh, __u32 sequence) 342 342 { 343 + journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag; 343 344 struct page *page = bh->b_page; 344 345 __u8 *addr; 345 346 __u32 csum32; 346 347 __be32 seq; 347 348 348 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 349 + if (!jbd2_journal_has_csum_v2or3(j)) 349 350 return; 350 351 351 352 seq = cpu_to_be32(sequence); ··· 356 355 bh->b_size); 357 356 kunmap_atomic(addr); 358 357 359 - /* We only have space to store the lower 16 bits of the crc32c. */ 360 - tag->t_checksum = cpu_to_be16(csum32); 358 + if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3)) 359 + tag3->t_checksum = cpu_to_be32(csum32); 360 + else 361 + tag->t_checksum = cpu_to_be16(csum32); 361 362 } 362 363 /* 363 364 * jbd2_journal_commit_transaction ··· 399 396 LIST_HEAD(io_bufs); 400 397 LIST_HEAD(log_bufs); 401 398 402 - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 399 + if (jbd2_journal_has_csum_v2or3(journal)) 403 400 csum_size = sizeof(struct jbd2_journal_block_tail); 404 401 405 402 /* ··· 693 690 tag_flag |= JBD2_FLAG_SAME_UUID; 694 691 695 692 tag = (journal_block_tag_t *) tagp; 696 - write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr); 693 + write_tag_block(journal, tag, jh2bh(jh)->b_blocknr); 697 694 tag->t_flags = cpu_to_be16(tag_flag); 698 695 jbd2_block_tag_csum_set(journal, tag, wbuf[bufs], 699 696 commit_transaction->t_tid);
+37 -19
fs/jbd2/journal.c
··· 124 124 /* Checksumming functions */ 125 125 static int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb) 126 126 { 127 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 127 + if (!jbd2_journal_has_csum_v2or3(j)) 128 128 return 1; 129 129 130 130 return sb->s_checksum_type == JBD2_CRC32C_CHKSUM; ··· 145 145 146 146 static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb) 147 147 { 148 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 148 + if (!jbd2_journal_has_csum_v2or3(j)) 149 149 return 1; 150 150 151 151 return sb->s_checksum == jbd2_superblock_csum(j, sb); ··· 153 153 154 154 static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb) 155 155 { 156 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 156 + if (!jbd2_journal_has_csum_v2or3(j)) 157 157 return; 158 158 159 159 sb->s_checksum = jbd2_superblock_csum(j, sb); ··· 1522 1522 goto out; 1523 1523 } 1524 1524 1525 - if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && 1526 - JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { 1525 + if (jbd2_journal_has_csum_v2or3(journal) && 1526 + JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) { 1527 1527 /* Can't have checksum v1 and v2 on at the same time! */ 1528 1528 printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 " 1529 + "at the same time!\n"); 1530 + goto out; 1531 + } 1532 + 1533 + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) && 1534 + JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) { 1535 + /* Can't have checksum v2 and v3 at the same time! */ 1536 + printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 " 1529 1537 "at the same time!\n"); 1530 1538 goto out; 1531 1539 } ··· 1544 1536 } 1545 1537 1546 1538 /* Load the checksum driver */ 1547 - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { 1539 + if (jbd2_journal_has_csum_v2or3(journal)) { 1548 1540 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); 1549 1541 if (IS_ERR(journal->j_chksum_driver)) { 1550 1542 printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); ··· 1561 1553 } 1562 1554 1563 1555 /* Precompute checksum seed for all metadata */ 1564 - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 1556 + if (jbd2_journal_has_csum_v2or3(journal)) 1565 1557 journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid, 1566 1558 sizeof(sb->s_uuid)); 1567 1559 ··· 1821 1813 if (!jbd2_journal_check_available_features(journal, compat, ro, incompat)) 1822 1814 return 0; 1823 1815 1824 - /* Asking for checksumming v2 and v1? Only give them v2. */ 1825 - if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 && 1816 + /* If enabling v2 checksums, turn on v3 instead */ 1817 + if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) { 1818 + incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2; 1819 + incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3; 1820 + } 1821 + 1822 + /* Asking for checksumming v3 and v1? Only give them v3. */ 1823 + if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 && 1826 1824 compat & JBD2_FEATURE_COMPAT_CHECKSUM) 1827 1825 compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM; 1828 1826 ··· 1837 1823 1838 1824 sb = journal->j_superblock; 1839 1825 1840 - /* If enabling v2 checksums, update superblock */ 1841 - if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) { 1826 + /* If enabling v3 checksums, update superblock */ 1827 + if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) { 1842 1828 sb->s_checksum_type = JBD2_CRC32C_CHKSUM; 1843 1829 sb->s_feature_compat &= 1844 1830 ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM); ··· 1856 1842 } 1857 1843 1858 1844 /* Precompute checksum seed for all metadata */ 1859 - if (JBD2_HAS_INCOMPAT_FEATURE(journal, 1860 - JBD2_FEATURE_INCOMPAT_CSUM_V2)) 1845 + if (jbd2_journal_has_csum_v2or3(journal)) 1861 1846 journal->j_csum_seed = jbd2_chksum(journal, ~0, 1862 1847 sb->s_uuid, 1863 1848 sizeof(sb->s_uuid)); ··· 1865 1852 /* If enabling v1 checksums, downgrade superblock */ 1866 1853 if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM)) 1867 1854 sb->s_feature_incompat &= 1868 - ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2); 1855 + ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 | 1856 + JBD2_FEATURE_INCOMPAT_CSUM_V3); 1869 1857 1870 1858 sb->s_feature_compat |= cpu_to_be32(compat); 1871 1859 sb->s_feature_ro_compat |= cpu_to_be32(ro); ··· 2179 2165 */ 2180 2166 size_t journal_tag_bytes(journal_t *journal) 2181 2167 { 2182 - journal_block_tag_t tag; 2183 - size_t x = 0; 2168 + size_t sz; 2169 + 2170 + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) 2171 + return sizeof(journal_block_tag3_t); 2172 + 2173 + sz = sizeof(journal_block_tag_t); 2184 2174 2185 2175 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 2186 - x += sizeof(tag.t_checksum); 2176 + sz += sizeof(__u16); 2187 2177 2188 2178 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) 2189 - return x + JBD2_TAG_SIZE64; 2179 + return sz; 2190 2180 else 2191 - return x + JBD2_TAG_SIZE32; 2181 + return sz - sizeof(__u32); 2192 2182 } 2193 2183 2194 2184 /*
+20 -13
fs/jbd2/recovery.c
··· 181 181 __be32 provided; 182 182 __u32 calculated; 183 183 184 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 184 + if (!jbd2_journal_has_csum_v2or3(j)) 185 185 return 1; 186 186 187 187 tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize - ··· 205 205 int nr = 0, size = journal->j_blocksize; 206 206 int tag_bytes = journal_tag_bytes(journal); 207 207 208 - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 208 + if (jbd2_journal_has_csum_v2or3(journal)) 209 209 size -= sizeof(struct jbd2_journal_block_tail); 210 210 211 211 tagp = &bh->b_data[sizeof(journal_header_t)]; ··· 338 338 return err; 339 339 } 340 340 341 - static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag) 341 + static inline unsigned long long read_tag_block(journal_t *journal, 342 + journal_block_tag_t *tag) 342 343 { 343 344 unsigned long long block = be32_to_cpu(tag->t_blocknr); 344 - if (tag_bytes > JBD2_TAG_SIZE32) 345 + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) 345 346 block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32; 346 347 return block; 347 348 } ··· 385 384 __be32 provided; 386 385 __u32 calculated; 387 386 388 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 387 + if (!jbd2_journal_has_csum_v2or3(j)) 389 388 return 1; 390 389 391 390 h = buf; ··· 400 399 static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, 401 400 void *buf, __u32 sequence) 402 401 { 402 + journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag; 403 403 __u32 csum32; 404 404 __be32 seq; 405 405 406 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 406 + if (!jbd2_journal_has_csum_v2or3(j)) 407 407 return 1; 408 408 409 409 seq = cpu_to_be32(sequence); 410 410 csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq)); 411 411 csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize); 412 412 413 - return tag->t_checksum == cpu_to_be16(csum32); 413 + if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3)) 414 + return tag3->t_checksum == cpu_to_be32(csum32); 415 + else 416 + return tag->t_checksum == cpu_to_be16(csum32); 414 417 } 415 418 416 419 static int do_one_pass(journal_t *journal, ··· 431 426 int tag_bytes = journal_tag_bytes(journal); 432 427 __u32 crc32_sum = ~0; /* Transactional Checksums */ 433 428 int descr_csum_size = 0; 429 + int block_error = 0; 434 430 435 431 /* 436 432 * First thing is to establish what we expect to find in the log ··· 518 512 switch(blocktype) { 519 513 case JBD2_DESCRIPTOR_BLOCK: 520 514 /* Verify checksum first */ 521 - if (JBD2_HAS_INCOMPAT_FEATURE(journal, 522 - JBD2_FEATURE_INCOMPAT_CSUM_V2)) 515 + if (jbd2_journal_has_csum_v2or3(journal)) 523 516 descr_csum_size = 524 517 sizeof(struct jbd2_journal_block_tail); 525 518 if (descr_csum_size > 0 && ··· 579 574 unsigned long long blocknr; 580 575 581 576 J_ASSERT(obh != NULL); 582 - blocknr = read_tag_block(tag_bytes, 577 + blocknr = read_tag_block(journal, 583 578 tag); 584 579 585 580 /* If the block has been ··· 603 598 "checksum recovering " 604 599 "block %llu in log\n", 605 600 blocknr); 606 - continue; 601 + block_error = 1; 602 + goto skip_write; 607 603 } 608 604 609 605 /* Find a buffer for the new ··· 803 797 success = -EIO; 804 798 } 805 799 } 806 - 800 + if (block_error && success == 0) 801 + success = -EIO; 807 802 return success; 808 803 809 804 failed: ··· 818 811 __be32 provided; 819 812 __u32 calculated; 820 813 821 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 814 + if (!jbd2_journal_has_csum_v2or3(j)) 822 815 return 1; 823 816 824 817 tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
+3 -3
fs/jbd2/revoke.c
··· 91 91 #include <linux/list.h> 92 92 #include <linux/init.h> 93 93 #include <linux/bio.h> 94 - #endif 95 94 #include <linux/log2.h> 95 + #endif 96 96 97 97 static struct kmem_cache *jbd2_revoke_record_cache; 98 98 static struct kmem_cache *jbd2_revoke_table_cache; ··· 597 597 offset = *offsetp; 598 598 599 599 /* Do we need to leave space at the end for a checksum? */ 600 - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 600 + if (jbd2_journal_has_csum_v2or3(journal)) 601 601 csum_size = sizeof(struct jbd2_journal_revoke_tail); 602 602 603 603 /* Make sure we have a descriptor with space left for the record */ ··· 644 644 struct jbd2_journal_revoke_tail *tail; 645 645 __u32 csum; 646 646 647 - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) 647 + if (!jbd2_journal_has_csum_v2or3(j)) 648 648 return; 649 649 650 650 tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
+1 -1
fs/locks.c
··· 1619 1619 smp_mb(); 1620 1620 error = check_conflicting_open(dentry, arg); 1621 1621 if (error) 1622 - locks_unlink_lock(flp); 1622 + locks_unlink_lock(before); 1623 1623 out: 1624 1624 if (is_deleg) 1625 1625 mutex_unlock(&inode->i_mutex);
+4 -1
fs/nfs/nfs3acl.c
··· 129 129 .rpc_argp = &args, 130 130 .rpc_resp = &fattr, 131 131 }; 132 - int status; 132 + int status = 0; 133 + 134 + if (acl == NULL && (!S_ISDIR(inode->i_mode) || dfacl == NULL)) 135 + goto out; 133 136 134 137 status = -EOPNOTSUPP; 135 138 if (!nfs_server_capable(inode, NFS_CAP_ACLS))
+17 -9
fs/nfs/nfs4proc.c
··· 2560 2560 struct nfs4_closedata *calldata = data; 2561 2561 struct nfs4_state *state = calldata->state; 2562 2562 struct nfs_server *server = NFS_SERVER(calldata->inode); 2563 + nfs4_stateid *res_stateid = NULL; 2563 2564 2564 2565 dprintk("%s: begin!\n", __func__); 2565 2566 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) ··· 2571 2570 */ 2572 2571 switch (task->tk_status) { 2573 2572 case 0: 2574 - if (calldata->roc) 2573 + res_stateid = &calldata->res.stateid; 2574 + if (calldata->arg.fmode == 0 && calldata->roc) 2575 2575 pnfs_roc_set_barrier(state->inode, 2576 2576 calldata->roc_barrier); 2577 - nfs_clear_open_stateid(state, &calldata->res.stateid, 0); 2578 2577 renew_lease(server, calldata->timestamp); 2579 - goto out_release; 2578 + break; 2580 2579 case -NFS4ERR_ADMIN_REVOKED: 2581 2580 case -NFS4ERR_STALE_STATEID: 2582 2581 case -NFS4ERR_OLD_STATEID: ··· 2590 2589 goto out_release; 2591 2590 } 2592 2591 } 2593 - nfs_clear_open_stateid(state, NULL, calldata->arg.fmode); 2592 + nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode); 2594 2593 out_release: 2595 2594 nfs_release_seqid(calldata->arg.seqid); 2596 2595 nfs_refresh_inode(calldata->inode, calldata->res.fattr); ··· 2602 2601 struct nfs4_closedata *calldata = data; 2603 2602 struct nfs4_state *state = calldata->state; 2604 2603 struct inode *inode = calldata->inode; 2604 + bool is_rdonly, is_wronly, is_rdwr; 2605 2605 int call_close = 0; 2606 2606 2607 2607 dprintk("%s: begin!\n", __func__); ··· 2610 2608 goto out_wait; 2611 2609 2612 2610 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2613 - calldata->arg.fmode = FMODE_READ|FMODE_WRITE; 2614 2611 spin_lock(&state->owner->so_lock); 2612 + is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2613 + is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2614 + is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2615 + /* Calculate the current open share mode */ 2616 + calldata->arg.fmode = 0; 2617 + if (is_rdonly || is_rdwr) 2618 + calldata->arg.fmode |= FMODE_READ; 2619 + if (is_wronly || is_rdwr) 2620 + calldata->arg.fmode |= FMODE_WRITE; 2615 2621 /* Calculate the change in open mode */ 2616 2622 if (state->n_rdwr == 0) { 2617 2623 if (state->n_rdonly == 0) { 2618 - call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 2619 - call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2624 + call_close |= is_rdonly || is_rdwr; 2620 2625 calldata->arg.fmode &= ~FMODE_READ; 2621 2626 } 2622 2627 if (state->n_wronly == 0) { 2623 - call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 2624 - call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2628 + call_close |= is_wronly || is_rdwr; 2625 2629 calldata->arg.fmode &= ~FMODE_WRITE; 2626 2630 } 2627 2631 }
+11 -2
fs/ocfs2/cluster/quorum.c
··· 160 160 } 161 161 162 162 out: 163 - spin_unlock(&qs->qs_lock); 164 - if (fence) 163 + if (fence) { 164 + spin_unlock(&qs->qs_lock); 165 165 o2quo_fence_self(); 166 + } else { 167 + mlog(ML_NOTICE, "not fencing this node, heartbeating: %d, " 168 + "connected: %d, lowest: %d (%sreachable)\n", 169 + qs->qs_heartbeating, qs->qs_connected, lowest_hb, 170 + lowest_reachable ? "" : "un"); 171 + spin_unlock(&qs->qs_lock); 172 + 173 + } 174 + 166 175 } 167 176 168 177 static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
+39 -6
fs/ocfs2/cluster/tcp.c
··· 1480 1480 return ret; 1481 1481 } 1482 1482 1483 + static int o2net_set_usertimeout(struct socket *sock) 1484 + { 1485 + int user_timeout = O2NET_TCP_USER_TIMEOUT; 1486 + 1487 + return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, 1488 + (char *)&user_timeout, sizeof(user_timeout)); 1489 + } 1490 + 1483 1491 static void o2net_initialize_handshake(void) 1484 1492 { 1485 1493 o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32( ··· 1544 1536 #endif 1545 1537 1546 1538 printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been " 1547 - "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc), 1548 - msecs / 1000, msecs % 1000); 1539 + "idle for %lu.%lu secs.\n", 1540 + SC_NODEF_ARGS(sc), msecs / 1000, msecs % 1000); 1549 1541 1550 - /* 1551 - * Initialize the nn_timeout so that the next connection attempt 1552 - * will continue in o2net_start_connect. 1542 + /* idle timerout happen, don't shutdown the connection, but 1543 + * make fence decision. Maybe the connection can recover before 1544 + * the decision is made. 1553 1545 */ 1554 1546 atomic_set(&nn->nn_timeout, 1); 1547 + o2quo_conn_err(o2net_num_from_nn(nn)); 1548 + queue_delayed_work(o2net_wq, &nn->nn_still_up, 1549 + msecs_to_jiffies(O2NET_QUORUM_DELAY_MS)); 1555 1550 1556 - o2net_sc_queue_work(sc, &sc->sc_shutdown_work); 1551 + o2net_sc_reset_idle_timer(sc); 1552 + 1557 1553 } 1558 1554 1559 1555 static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc) ··· 1572 1560 1573 1561 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) 1574 1562 { 1563 + struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 1564 + 1565 + /* clear fence decision since the connection recover from timeout*/ 1566 + if (atomic_read(&nn->nn_timeout)) { 1567 + o2quo_conn_up(o2net_num_from_nn(nn)); 1568 + cancel_delayed_work(&nn->nn_still_up); 1569 + atomic_set(&nn->nn_timeout, 0); 1570 + } 1571 + 1575 1572 /* Only push out an existing timer */ 1576 1573 if (timer_pending(&sc->sc_idle_timeout)) 1577 1574 o2net_sc_reset_idle_timer(sc); ··· 1668 1647 ret = o2net_set_nodelay(sc->sc_sock); 1669 1648 if (ret) { 1670 1649 mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret); 1650 + goto out; 1651 + } 1652 + 1653 + ret = o2net_set_usertimeout(sock); 1654 + if (ret) { 1655 + mlog(ML_ERROR, "set TCP_USER_TIMEOUT failed with %d\n", ret); 1671 1656 goto out; 1672 1657 } 1673 1658 ··· 1855 1828 ret = o2net_set_nodelay(new_sock); 1856 1829 if (ret) { 1857 1830 mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret); 1831 + goto out; 1832 + } 1833 + 1834 + ret = o2net_set_usertimeout(new_sock); 1835 + if (ret) { 1836 + mlog(ML_ERROR, "set TCP_USER_TIMEOUT failed with %d\n", ret); 1858 1837 goto out; 1859 1838 } 1860 1839
+1
fs/ocfs2/cluster/tcp.h
··· 63 63 #define O2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000 64 64 #define O2NET_IDLE_TIMEOUT_MS_DEFAULT 30000 65 65 66 + #define O2NET_TCP_USER_TIMEOUT 0x7fffffff 66 67 67 68 /* TODO: figure this out.... */ 68 69 static inline int o2net_link_down(int err, struct socket *sock)
+43 -86
fs/ocfs2/ioctl.c
··· 35 35 copy_to_user((typeof(a) __user *)b, &(a), sizeof(a)) 36 36 37 37 /* 38 - * This call is void because we are already reporting an error that may 39 - * be -EFAULT. The error will be returned from the ioctl(2) call. It's 40 - * just a best-effort to tell userspace that this request caused the error. 38 + * This is just a best-effort to tell userspace that this request 39 + * caused the error. 41 40 */ 42 41 static inline void o2info_set_request_error(struct ocfs2_info_request *kreq, 43 42 struct ocfs2_info_request __user *req) ··· 145 146 static int ocfs2_info_handle_blocksize(struct inode *inode, 146 147 struct ocfs2_info_request __user *req) 147 148 { 148 - int status = -EFAULT; 149 149 struct ocfs2_info_blocksize oib; 150 150 151 151 if (o2info_from_user(oib, req)) 152 - goto bail; 152 + return -EFAULT; 153 153 154 154 oib.ib_blocksize = inode->i_sb->s_blocksize; 155 155 156 156 o2info_set_request_filled(&oib.ib_req); 157 157 158 158 if (o2info_to_user(oib, req)) 159 - goto bail; 159 + return -EFAULT; 160 160 161 - status = 0; 162 - bail: 163 - if (status) 164 - o2info_set_request_error(&oib.ib_req, req); 165 - 166 - return status; 161 + return 0; 167 162 } 168 163 169 164 static int ocfs2_info_handle_clustersize(struct inode *inode, 170 165 struct ocfs2_info_request __user *req) 171 166 { 172 - int status = -EFAULT; 173 167 struct ocfs2_info_clustersize oic; 174 168 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 175 169 176 170 if (o2info_from_user(oic, req)) 177 - goto bail; 171 + return -EFAULT; 178 172 179 173 oic.ic_clustersize = osb->s_clustersize; 180 174 181 175 o2info_set_request_filled(&oic.ic_req); 182 176 183 177 if (o2info_to_user(oic, req)) 184 - goto bail; 178 + return -EFAULT; 185 179 186 - status = 0; 187 - bail: 188 - if (status) 189 - o2info_set_request_error(&oic.ic_req, req); 190 - 191 - return status; 180 + return 0; 192 181 } 193 182 194 183 static int ocfs2_info_handle_maxslots(struct inode *inode, 195 184 struct ocfs2_info_request __user *req) 196 185 { 197 - int status = -EFAULT; 198 186 struct ocfs2_info_maxslots oim; 199 187 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 200 188 201 189 if (o2info_from_user(oim, req)) 202 - goto bail; 190 + return -EFAULT; 203 191 204 192 oim.im_max_slots = osb->max_slots; 205 193 206 194 o2info_set_request_filled(&oim.im_req); 207 195 208 196 if (o2info_to_user(oim, req)) 209 - goto bail; 197 + return -EFAULT; 210 198 211 - status = 0; 212 - bail: 213 - if (status) 214 - o2info_set_request_error(&oim.im_req, req); 215 - 216 - return status; 199 + return 0; 217 200 } 218 201 219 202 static int ocfs2_info_handle_label(struct inode *inode, 220 203 struct ocfs2_info_request __user *req) 221 204 { 222 - int status = -EFAULT; 223 205 struct ocfs2_info_label oil; 224 206 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 225 207 226 208 if (o2info_from_user(oil, req)) 227 - goto bail; 209 + return -EFAULT; 228 210 229 211 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); 230 212 231 213 o2info_set_request_filled(&oil.il_req); 232 214 233 215 if (o2info_to_user(oil, req)) 234 - goto bail; 216 + return -EFAULT; 235 217 236 - status = 0; 237 - bail: 238 - if (status) 239 - o2info_set_request_error(&oil.il_req, req); 240 - 241 - return status; 218 + return 0; 242 219 } 243 220 244 221 static int ocfs2_info_handle_uuid(struct inode *inode, 245 222 struct ocfs2_info_request __user *req) 246 223 { 247 - int status = -EFAULT; 248 224 struct ocfs2_info_uuid oiu; 249 225 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 250 226 251 227 if (o2info_from_user(oiu, req)) 252 - goto bail; 228 + return -EFAULT; 253 229 254 230 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); 255 231 256 232 o2info_set_request_filled(&oiu.iu_req); 257 233 258 234 if (o2info_to_user(oiu, req)) 259 - goto bail; 235 + return -EFAULT; 260 236 261 - status = 0; 262 - bail: 263 - if (status) 264 - o2info_set_request_error(&oiu.iu_req, req); 265 - 266 - return status; 237 + return 0; 267 238 } 268 239 269 240 static int ocfs2_info_handle_fs_features(struct inode *inode, 270 241 struct ocfs2_info_request __user *req) 271 242 { 272 - int status = -EFAULT; 273 243 struct ocfs2_info_fs_features oif; 274 244 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 275 245 276 246 if (o2info_from_user(oif, req)) 277 - goto bail; 247 + return -EFAULT; 278 248 279 249 oif.if_compat_features = osb->s_feature_compat; 280 250 oif.if_incompat_features = osb->s_feature_incompat; ··· 252 284 o2info_set_request_filled(&oif.if_req); 253 285 254 286 if (o2info_to_user(oif, req)) 255 - goto bail; 287 + return -EFAULT; 256 288 257 - status = 0; 258 - bail: 259 - if (status) 260 - o2info_set_request_error(&oif.if_req, req); 261 - 262 - return status; 289 + return 0; 263 290 } 264 291 265 292 static int ocfs2_info_handle_journal_size(struct inode *inode, 266 293 struct ocfs2_info_request __user *req) 267 294 { 268 - int status = -EFAULT; 269 295 struct ocfs2_info_journal_size oij; 270 296 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 271 297 272 298 if (o2info_from_user(oij, req)) 273 - goto bail; 299 + return -EFAULT; 274 300 275 301 oij.ij_journal_size = i_size_read(osb->journal->j_inode); 276 302 277 303 o2info_set_request_filled(&oij.ij_req); 278 304 279 305 if (o2info_to_user(oij, req)) 280 - goto bail; 306 + return -EFAULT; 281 307 282 - status = 0; 283 - bail: 284 - if (status) 285 - o2info_set_request_error(&oij.ij_req, req); 286 - 287 - return status; 308 + return 0; 288 309 } 289 310 290 311 static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb, ··· 330 373 u32 i; 331 374 u64 blkno = -1; 332 375 char namebuf[40]; 333 - int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE; 376 + int status, type = INODE_ALLOC_SYSTEM_INODE; 334 377 struct ocfs2_info_freeinode *oifi = NULL; 335 378 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 336 379 struct inode *inode_alloc = NULL; ··· 342 385 goto out_err; 343 386 } 344 387 345 - if (o2info_from_user(*oifi, req)) 346 - goto bail; 388 + if (o2info_from_user(*oifi, req)) { 389 + status = -EFAULT; 390 + goto out_free; 391 + } 347 392 348 393 oifi->ifi_slotnum = osb->max_slots; 349 394 ··· 383 424 384 425 o2info_set_request_filled(&oifi->ifi_req); 385 426 386 - if (o2info_to_user(*oifi, req)) 387 - goto bail; 427 + if (o2info_to_user(*oifi, req)) { 428 + status = -EFAULT; 429 + goto out_free; 430 + } 388 431 389 432 status = 0; 390 433 bail: 391 434 if (status) 392 435 o2info_set_request_error(&oifi->ifi_req, req); 393 - 436 + out_free: 394 437 kfree(oifi); 395 438 out_err: 396 439 return status; ··· 619 658 { 620 659 u64 blkno = -1; 621 660 char namebuf[40]; 622 - int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE; 661 + int status, type = GLOBAL_BITMAP_SYSTEM_INODE; 623 662 624 663 struct ocfs2_info_freefrag *oiff; 625 664 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); ··· 632 671 goto out_err; 633 672 } 634 673 635 - if (o2info_from_user(*oiff, req)) 636 - goto bail; 674 + if (o2info_from_user(*oiff, req)) { 675 + status = -EFAULT; 676 + goto out_free; 677 + } 637 678 /* 638 679 * chunksize from userspace should be power of 2. 639 680 */ ··· 674 711 675 712 if (o2info_to_user(*oiff, req)) { 676 713 status = -EFAULT; 677 - goto bail; 714 + goto out_free; 678 715 } 679 716 680 717 status = 0; 681 718 bail: 682 719 if (status) 683 720 o2info_set_request_error(&oiff->iff_req, req); 684 - 721 + out_free: 685 722 kfree(oiff); 686 723 out_err: 687 724 return status; ··· 690 727 static int ocfs2_info_handle_unknown(struct inode *inode, 691 728 struct ocfs2_info_request __user *req) 692 729 { 693 - int status = -EFAULT; 694 730 struct ocfs2_info_request oir; 695 731 696 732 if (o2info_from_user(oir, req)) 697 - goto bail; 733 + return -EFAULT; 698 734 699 735 o2info_clear_request_filled(&oir); 700 736 701 737 if (o2info_to_user(oir, req)) 702 - goto bail; 738 + return -EFAULT; 703 739 704 - status = 0; 705 - bail: 706 - if (status) 707 - o2info_set_request_error(&oir, req); 708 - 709 - return status; 740 + return 0; 710 741 } 711 742 712 743 /*
+3 -4
include/linux/blk-mq.h
··· 127 127 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ 128 128 129 129 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 130 - BLK_MQ_F_SHOULD_SORT = 1 << 1, 131 - BLK_MQ_F_TAG_SHARED = 1 << 2, 132 - BLK_MQ_F_SG_MERGE = 1 << 3, 133 - BLK_MQ_F_SYSFS_UP = 1 << 4, 130 + BLK_MQ_F_TAG_SHARED = 1 << 1, 131 + BLK_MQ_F_SG_MERGE = 1 << 2, 132 + BLK_MQ_F_SYSFS_UP = 1 << 3, 134 133 135 134 BLK_MQ_S_STOPPED = 0, 136 135 BLK_MQ_S_TAG_ACTIVE = 1,
+25 -5
include/linux/jbd2.h
··· 159 159 * journal_block_tag (in the descriptor). The other h_chksum* fields are 160 160 * not used. 161 161 * 162 - * Checksum v1 and v2 are mutually exclusive features. 162 + * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses 163 + * journal_block_tag3_t to store a full 32-bit checksum. Everything else 164 + * is the same as v2. 165 + * 166 + * Checksum v1, v2, and v3 are mutually exclusive features. 163 167 */ 164 168 struct commit_header { 165 169 __be32 h_magic; ··· 183 179 * raw struct shouldn't be used for pointer math or sizeof() - use 184 180 * journal_tag_bytes(journal) instead to compute this. 185 181 */ 182 + typedef struct journal_block_tag3_s 183 + { 184 + __be32 t_blocknr; /* The on-disk block number */ 185 + __be32 t_flags; /* See below */ 186 + __be32 t_blocknr_high; /* most-significant high 32bits. */ 187 + __be32 t_checksum; /* crc32c(uuid+seq+block) */ 188 + } journal_block_tag3_t; 189 + 186 190 typedef struct journal_block_tag_s 187 191 { 188 192 __be32 t_blocknr; /* The on-disk block number */ ··· 198 186 __be16 t_flags; /* See below */ 199 187 __be32 t_blocknr_high; /* most-significant high 32bits. */ 200 188 } journal_block_tag_t; 201 - 202 - #define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high)) 203 - #define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t)) 204 189 205 190 /* Tail of descriptor block, for checksumming */ 206 191 struct jbd2_journal_block_tail { ··· 293 284 #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 294 285 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 295 286 #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 287 + #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 296 288 297 289 /* Features known to this kernel version: */ 298 290 #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM ··· 301 291 #define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ 302 292 JBD2_FEATURE_INCOMPAT_64BIT | \ 303 293 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ 304 - JBD2_FEATURE_INCOMPAT_CSUM_V2) 294 + JBD2_FEATURE_INCOMPAT_CSUM_V2 | \ 295 + JBD2_FEATURE_INCOMPAT_CSUM_V3) 305 296 306 297 #ifdef __KERNEL__ 307 298 ··· 1306 1295 1307 1296 extern int jbd2_journal_blocks_per_page(struct inode *inode); 1308 1297 extern size_t journal_tag_bytes(journal_t *journal); 1298 + 1299 + static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) 1300 + { 1301 + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) || 1302 + JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) 1303 + return 1; 1304 + 1305 + return 0; 1306 + } 1309 1307 1310 1308 /* 1311 1309 * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
+11 -2
include/linux/platform_data/mtd-nand-omap2.h
··· 21 21 }; 22 22 23 23 enum omap_ecc { 24 - /* 1-bit ECC calculation by GPMC, Error detection by Software */ 25 - OMAP_ECC_HAM1_CODE_HW = 0, 24 + /* 25 + * 1-bit ECC: calculation and correction by SW 26 + * ECC stored at end of spare area 27 + */ 28 + OMAP_ECC_HAM1_CODE_SW = 0, 29 + 30 + /* 31 + * 1-bit ECC: calculation by GPMC, Error detection by Software 32 + * ECC layout compatible with ROM code layout 33 + */ 34 + OMAP_ECC_HAM1_CODE_HW, 26 35 /* 4-bit ECC calculation by GPMC, Error detection by Software */ 27 36 OMAP_ECC_BCH4_CODE_HW_DETECTION_SW, 28 37 /* 4-bit ECC calculation by GPMC, Error detection by ELM */
+1
include/linux/seqno-fence.h
··· 62 62 * @context: the execution context this fence is a part of 63 63 * @seqno_ofs: the offset within @sync_buf 64 64 * @seqno: the sequence # to signal on 65 + * @cond: fence wait condition 65 66 * @ops: the fence_ops for operations on this seqno fence 66 67 * 67 68 * This function initializes a struct seqno_fence with passed parameters,
+7
include/linux/spi/spi.h
··· 253 253 * the device whose settings are being modified. 254 254 * @transfer: adds a message to the controller's transfer queue. 255 255 * @cleanup: frees controller-specific state 256 + * @can_dma: determine whether this master supports DMA 256 257 * @queued: whether this master is providing an internal message queue 257 258 * @kworker: thread struct for message pump 258 259 * @kworker_task: pointer to task for message pump kworker thread ··· 263 262 * @cur_msg: the currently in-flight message 264 263 * @cur_msg_prepared: spi_prepare_message was called for the currently 265 264 * in-flight message 265 + * @cur_msg_mapped: message has been mapped for DMA 266 266 * @xfer_completion: used by core transfer_one_message() 267 267 * @busy: message pump is busy 268 268 * @running: message pump is running ··· 301 299 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 302 300 * number. Any individual value may be -ENOENT for CS lines that 303 301 * are not GPIOs (driven by the SPI controller itself). 302 + * @dma_tx: DMA transmit channel 303 + * @dma_rx: DMA receive channel 304 + * @dummy_rx: dummy receive buffer for full-duplex devices 305 + * @dummy_tx: dummy transmit buffer for full-duplex devices 304 306 * 305 307 * Each SPI master controller can communicate with one or more @spi_device 306 308 * children. These make a small bus, sharing MOSI, MISO and SCK signals ··· 638 632 * addresses for each transfer buffer 639 633 * @complete: called to report transaction completions 640 634 * @context: the argument to complete() when it's called 635 + * @frame_length: the total number of bytes in the message 641 636 * @actual_length: the total number of bytes that were transferred in all 642 637 * successful segments 643 638 * @status: zero for success, else negative errno
+1 -1
include/sound/soc.h
··· 277 277 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE | \ 278 278 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \ 279 279 .tlv.c = (snd_soc_bytes_tlv_callback), \ 280 - .info = snd_soc_info_bytes_ext, \ 280 + .info = snd_soc_bytes_info_ext, \ 281 281 .private_value = (unsigned long)&(struct soc_bytes_ext) \ 282 282 {.max = xcount, .get = xhandler_get, .put = xhandler_put, } } 283 283 #define SOC_SINGLE_XR_SX(xname, xregbase, xregcount, xnbits, \
+1 -1
include/uapi/linux/xattr.h
··· 13 13 #ifndef _UAPI_LINUX_XATTR_H 14 14 #define _UAPI_LINUX_XATTR_H 15 15 16 - #ifdef __UAPI_DEF_XATTR 16 + #if __UAPI_DEF_XATTR 17 17 #define __USE_KERNEL_XATTR_DEFS 18 18 19 19 #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
+11
kernel/kexec.c
··· 64 64 char __weak kexec_purgatory[0]; 65 65 size_t __weak kexec_purgatory_size = 0; 66 66 67 + #ifdef CONFIG_KEXEC_FILE 67 68 static int kexec_calculate_store_digests(struct kimage *image); 69 + #endif 68 70 69 71 /* Location of the reserved area for the crash kernel */ 70 72 struct resource crashk_res = { ··· 343 341 return ret; 344 342 } 345 343 344 + #ifdef CONFIG_KEXEC_FILE 346 345 static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) 347 346 { 348 347 struct fd f = fdget(fd); ··· 615 612 kfree(image); 616 613 return ret; 617 614 } 615 + #else /* CONFIG_KEXEC_FILE */ 616 + static inline void kimage_file_post_load_cleanup(struct kimage *image) { } 617 + #endif /* CONFIG_KEXEC_FILE */ 618 618 619 619 static int kimage_is_destination_range(struct kimage *image, 620 620 unsigned long start, ··· 1381 1375 } 1382 1376 #endif 1383 1377 1378 + #ifdef CONFIG_KEXEC_FILE 1384 1379 SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, 1385 1380 unsigned long, cmdline_len, const char __user *, cmdline_ptr, 1386 1381 unsigned long, flags) ··· 1457 1450 kimage_free(image); 1458 1451 return ret; 1459 1452 } 1453 + 1454 + #endif /* CONFIG_KEXEC_FILE */ 1460 1455 1461 1456 void crash_kexec(struct pt_regs *regs) 1462 1457 { ··· 2015 2006 2016 2007 subsys_initcall(crash_save_vmcoreinfo_init); 2017 2008 2009 + #ifdef CONFIG_KEXEC_FILE 2018 2010 static int __kexec_add_segment(struct kimage *image, char *buf, 2019 2011 unsigned long bufsz, unsigned long mem, 2020 2012 unsigned long memsz) ··· 2692 2682 2693 2683 return 0; 2694 2684 } 2685 + #endif /* CONFIG_KEXEC_FILE */ 2695 2686 2696 2687 /* 2697 2688 * Move into place and start executing a preloaded standalone
+4 -7
kernel/resource.c
··· 351 351 end = res->end; 352 352 BUG_ON(start >= end); 353 353 354 + if (first_level_children_only) 355 + sibling_only = true; 356 + 354 357 read_lock(&resource_lock); 355 358 356 - if (first_level_children_only) { 357 - p = iomem_resource.child; 358 - sibling_only = true; 359 - } else 360 - p = &iomem_resource; 361 - 362 - while ((p = next_resource(p, sibling_only))) { 359 + for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { 363 360 if (p->flags != res->flags) 364 361 continue; 365 362 if (name && strcmp(p->name, name))
+15 -1
kernel/trace/ring_buffer.c
··· 626 626 work = &cpu_buffer->irq_work; 627 627 } 628 628 629 - work->waiters_pending = true; 630 629 poll_wait(filp, &work->waiters, poll_table); 630 + work->waiters_pending = true; 631 + /* 632 + * There's a tight race between setting the waiters_pending and 633 + * checking if the ring buffer is empty. Once the waiters_pending bit 634 + * is set, the next event will wake the task up, but we can get stuck 635 + * if there's only a single event in. 636 + * 637 + * FIXME: Ideally, we need a memory barrier on the writer side as well, 638 + * but adding a memory barrier to all events will cause too much of a 639 + * performance hit in the fast path. We only need a memory barrier when 640 + * the buffer goes from empty to having content. But as this race is 641 + * extremely small, and it's not a problem if another event comes in, we 642 + * will fix it later. 643 + */ 644 + smp_mb(); 631 645 632 646 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 633 647 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+10 -1
lib/Kconfig.debug
··· 892 892 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this 893 893 will test all possible w/w mutex interface abuse with the 894 894 exception of simply not acquiring all the required locks. 895 + Note that this feature can introduce significant overhead, so 896 + it really should not be enabled in a production or distro kernel, 897 + even a debug kernel. If you are a driver writer, enable it. If 898 + you are a distro, do not. 895 899 896 900 config DEBUG_LOCK_ALLOC 897 901 bool "Lock debugging: detect incorrect freeing of live locks" ··· 1036 1032 either tracing or lock debugging. 1037 1033 1038 1034 config STACKTRACE 1039 - bool 1035 + bool "Stack backtrace support" 1040 1036 depends on STACKTRACE_SUPPORT 1037 + help 1038 + This option causes the kernel to create a /proc/pid/stack for 1039 + every process, showing its current stack trace. 1040 + It is also used by various kernel debugging features that require 1041 + stack trace generation. 1041 1042 1042 1043 config DEBUG_KOBJECT 1043 1044 bool "kobject debugging"
+1 -1
mm/hugetlb_cgroup.c
··· 217 217 218 218 if (hugetlb_cgroup_disabled()) 219 219 return; 220 - VM_BUG_ON(!spin_is_locked(&hugetlb_lock)); 220 + lockdep_assert_held(&hugetlb_lock); 221 221 h_cg = hugetlb_cgroup_from_page(page); 222 222 if (unlikely(!h_cg)) 223 223 return;
+1 -2
mm/memblock.c
··· 192 192 phys_addr_t align, phys_addr_t start, 193 193 phys_addr_t end, int nid) 194 194 { 195 - int ret; 196 - phys_addr_t kernel_end; 195 + phys_addr_t kernel_end, ret; 197 196 198 197 /* pump up @end */ 199 198 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
+3 -4
mm/memory.c
··· 751 751 unsigned long pfn = pte_pfn(pte); 752 752 753 753 if (HAVE_PTE_SPECIAL) { 754 - if (likely(!pte_special(pte) || pte_numa(pte))) 754 + if (likely(!pte_special(pte))) 755 755 goto check_pfn; 756 756 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 757 757 return NULL; ··· 777 777 } 778 778 } 779 779 780 + if (is_zero_pfn(pfn)) 781 + return NULL; 780 782 check_pfn: 781 783 if (unlikely(pfn > highest_memmap_pfn)) { 782 784 print_bad_pte(vma, addr, pte, NULL); 783 785 return NULL; 784 786 } 785 - 786 - if (is_zero_pfn(pfn)) 787 - return NULL; 788 787 789 788 /* 790 789 * NOTE! We still have PageReserved() pages in the page tables.
+1 -1
mm/pgtable-generic.c
··· 195 195 pmd_t entry = *pmdp; 196 196 if (pmd_numa(entry)) 197 197 entry = pmd_mknonnuma(entry); 198 - set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); 198 + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); 199 199 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 200 200 } 201 201 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+1
mm/zbud.c
··· 195 195 .total_size = zbud_zpool_total_size, 196 196 }; 197 197 198 + MODULE_ALIAS("zpool-zbud"); 198 199 #endif /* CONFIG_ZPOOL */ 199 200 200 201 /*****************
+1 -1
mm/zpool.c
··· 150 150 driver = zpool_get_driver(type); 151 151 152 152 if (!driver) { 153 - request_module(type); 153 + request_module("zpool-%s", type); 154 154 driver = zpool_get_driver(type); 155 155 } 156 156
+1
mm/zsmalloc.c
··· 315 315 .total_size = zs_zpool_total_size, 316 316 }; 317 317 318 + MODULE_ALIAS("zpool-zsmalloc"); 318 319 #endif /* CONFIG_ZPOOL */ 319 320 320 321 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
+2 -2
scripts/checkpatch.pl
··· 2133 2133 # Check for improperly formed commit descriptions 2134 2134 if ($in_commit_log && 2135 2135 $line =~ /\bcommit\s+[0-9a-f]{5,}/i && 2136 - $line !~ /\b[Cc]ommit [0-9a-f]{12,16} \("/) { 2136 + $line !~ /\b[Cc]ommit [0-9a-f]{12,40} \("/) { 2137 2137 $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i; 2138 2138 my $init_char = $1; 2139 2139 my $orig_commit = lc($2); ··· 2141 2141 my $desc = 'commit description'; 2142 2142 ($id, $desc) = git_commit_info($orig_commit, $id, $desc); 2143 2143 ERROR("GIT_COMMIT_ID", 2144 - "Please use 12 to 16 chars for the git commit ID like: '${init_char}ommit $id (\"$desc\")'\n" . $herecurr); 2144 + "Please use 12 or more chars for the git commit ID like: '${init_char}ommit $id (\"$desc\")'\n" . $herecurr); 2145 2145 } 2146 2146 2147 2147 # Check for added, moved or deleted files
+1
scripts/kernel-doc
··· 2085 2085 $prototype =~ s/^noinline +//; 2086 2086 $prototype =~ s/__init +//; 2087 2087 $prototype =~ s/__init_or_module +//; 2088 + $prototype =~ s/__meminit +//; 2088 2089 $prototype =~ s/__must_check +//; 2089 2090 $prototype =~ s/__weak +//; 2090 2091 my $define = $prototype =~ s/^#\s*define\s+//; #ak added
+3 -2
security/tomoyo/realpath.c
··· 173 173 * Use filesystem name if filesystem does not support rename() 174 174 * operation. 175 175 */ 176 - if (!inode->i_op->rename) 176 + if (!inode->i_op->rename && !inode->i_op->rename2) 177 177 goto prepend_filesystem_name; 178 178 } 179 179 /* Prepend device name. */ ··· 282 282 * Get local name for filesystems without rename() operation 283 283 * or dentry without vfsmount. 284 284 */ 285 - if (!path->mnt || !inode->i_op->rename) 285 + if (!path->mnt || 286 + (!inode->i_op->rename && !inode->i_op->rename2)) 286 287 pos = tomoyo_get_local_path(path->dentry, buf, 287 288 buf_len - 1); 288 289 /* Get absolute name for the rest. */
+2 -2
sound/core/info.c
··· 684 684 * snd_info_get_line - read one line from the procfs buffer 685 685 * @buffer: the procfs buffer 686 686 * @line: the buffer to store 687 - * @len: the max. buffer size - 1 687 + * @len: the max. buffer size 688 688 * 689 689 * Reads one line from the buffer and stores the string. 690 690 * ··· 704 704 buffer->stop = 1; 705 705 if (c == '\n') 706 706 break; 707 - if (len) { 707 + if (len > 1) { 708 708 len--; 709 709 *line++ = c; 710 710 }
+2 -2
sound/core/pcm_misc.c
··· 142 142 }, 143 143 [SNDRV_PCM_FORMAT_DSD_U8] = { 144 144 .width = 8, .phys = 8, .le = 1, .signd = 0, 145 - .silence = {}, 145 + .silence = { 0x69 }, 146 146 }, 147 147 [SNDRV_PCM_FORMAT_DSD_U16_LE] = { 148 148 .width = 16, .phys = 16, .le = 1, .signd = 0, 149 - .silence = {}, 149 + .silence = { 0x69, 0x69 }, 150 150 }, 151 151 [SNDRV_PCM_FORMAT_DSD_U32_LE] = { 152 152 .width = 32, .phys = 32, .le = 1, .signd = 0,
+10 -1
sound/firewire/amdtp.c
··· 507 507 static void update_pcm_pointers(struct amdtp_stream *s, 508 508 struct snd_pcm_substream *pcm, 509 509 unsigned int frames) 510 - { unsigned int ptr; 510 + { 511 + unsigned int ptr; 512 + 513 + /* 514 + * In IEC 61883-6, one data block represents one event. In ALSA, one 515 + * event equals to one PCM frame. But Dice has a quirk to transfer 516 + * two PCM frames in one data block. 517 + */ 518 + if (s->double_pcm_frames) 519 + frames *= 2; 511 520 512 521 ptr = s->pcm_buffer_pointer + frames; 513 522 if (ptr >= pcm->runtime->buffer_size)
+1
sound/firewire/amdtp.h
··· 125 125 unsigned int pcm_buffer_pointer; 126 126 unsigned int pcm_period_pointer; 127 127 bool pointer_flush; 128 + bool double_pcm_frames; 128 129 129 130 struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8]; 130 131
+20 -9
sound/firewire/dice.c
··· 567 567 return err; 568 568 569 569 /* 570 - * At rates above 96 kHz, pretend that the stream runs at half the 571 - * actual sample rate with twice the number of channels; two samples 572 - * of a channel are stored consecutively in the packet. Requires 573 - * blocking mode and PCM buffer size should be aligned to SYT_INTERVAL. 570 + * At 176.4/192.0 kHz, Dice has a quirk to transfer two PCM frames in 571 + * one data block of AMDTP packet. Thus sampling transfer frequency is 572 + * a half of PCM sampling frequency, i.e. PCM frames at 192.0 kHz are 573 + * transferred on AMDTP packets at 96 kHz. Two successive samples of a 574 + * channel are stored consecutively in the packet. This quirk is called 575 + * as 'Dual Wire'. 576 + * For this quirk, blocking mode is required and PCM buffer size should 577 + * be aligned to SYT_INTERVAL. 574 578 */ 575 579 channels = params_channels(hw_params); 576 580 if (rate_index > 4) { ··· 583 579 return err; 584 580 } 585 581 586 - for (i = 0; i < channels; i++) { 587 - dice->stream.pcm_positions[i * 2] = i; 588 - dice->stream.pcm_positions[i * 2 + 1] = i + channels; 589 - } 590 - 591 582 rate /= 2; 592 583 channels *= 2; 584 + dice->stream.double_pcm_frames = true; 585 + } else { 586 + dice->stream.double_pcm_frames = false; 593 587 } 594 588 595 589 mode = rate_index_to_mode(rate_index); 596 590 amdtp_stream_set_parameters(&dice->stream, rate, channels, 597 591 dice->rx_midi_ports[mode]); 592 + if (rate_index > 4) { 593 + channels /= 2; 594 + 595 + for (i = 0; i < channels; i++) { 596 + dice->stream.pcm_positions[i] = i * 2; 597 + dice->stream.pcm_positions[i + channels] = i * 2 + 1; 598 + } 599 + } 600 + 598 601 amdtp_stream_set_pcm_format(&dice->stream, 599 602 params_format(hw_params)); 600 603
+1 -3
sound/pci/ctxfi/ct20k1reg.h
··· 7 7 */ 8 8 9 9 #ifndef CT20K1REG_H 10 - #define CT20k1REG_H 10 + #define CT20K1REG_H 11 11 12 12 /* 20k1 registers */ 13 13 #define DSPXRAM_START 0x000000 ··· 632 632 #define I2SD_R 0x19L 633 633 634 634 #endif /* CT20K1REG_H */ 635 - 636 -
+1 -1
sound/pci/hda/ca0132_regs.h
··· 20 20 */ 21 21 22 22 #ifndef __CA0132_REGS_H 23 - #define __CA0312_REGS_H 23 + #define __CA0132_REGS_H 24 24 25 25 #define DSP_CHIP_OFFSET 0x100000 26 26 #define DSP_DBGCNTL_MODULE_OFFSET 0xE30
+13 -10
sound/pci/hda/hda_generic.c
··· 2032 2032 * independent HP controls 2033 2033 */ 2034 2034 2035 - static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack); 2035 + static void call_hp_automute(struct hda_codec *codec, 2036 + struct hda_jack_callback *jack); 2036 2037 static int indep_hp_info(struct snd_kcontrol *kcontrol, 2037 2038 struct snd_ctl_elem_info *uinfo) 2038 2039 { ··· 3949 3948 } 3950 3949 3951 3950 /* standard HP-automute helper */ 3952 - void snd_hda_gen_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack) 3951 + void snd_hda_gen_hp_automute(struct hda_codec *codec, 3952 + struct hda_jack_callback *jack) 3953 3953 { 3954 3954 struct hda_gen_spec *spec = codec->spec; 3955 3955 hda_nid_t *pins = spec->autocfg.hp_pins; ··· 3970 3968 EXPORT_SYMBOL_GPL(snd_hda_gen_hp_automute); 3971 3969 3972 3970 /* standard line-out-automute helper */ 3973 - void snd_hda_gen_line_automute(struct hda_codec *codec, struct hda_jack_tbl *jack) 3971 + void snd_hda_gen_line_automute(struct hda_codec *codec, 3972 + struct hda_jack_callback *jack) 3974 3973 { 3975 3974 struct hda_gen_spec *spec = codec->spec; 3976 3975 ··· 3991 3988 EXPORT_SYMBOL_GPL(snd_hda_gen_line_automute); 3992 3989 3993 3990 /* standard mic auto-switch helper */ 3994 - void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *jack) 3991 + void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, 3992 + struct hda_jack_callback *jack) 3995 3993 { 3996 3994 struct hda_gen_spec *spec = codec->spec; 3997 3995 int i; ··· 4015 4011 EXPORT_SYMBOL_GPL(snd_hda_gen_mic_autoswitch); 4016 4012 4017 4013 /* call appropriate hooks */ 4018 - static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack) 4014 + static void call_hp_automute(struct hda_codec *codec, 4015 + struct hda_jack_callback *jack) 4019 4016 { 4020 4017 struct hda_gen_spec *spec = codec->spec; 4021 4018 if (spec->hp_automute_hook) ··· 4026 4021 } 4027 4022 4028 4023 static void call_line_automute(struct hda_codec *codec, 4029 - struct hda_jack_tbl *jack) 4024 + struct hda_jack_callback *jack) 4030 4025 { 4031 4026 struct hda_gen_spec *spec = codec->spec; 4032 4027 if (spec->line_automute_hook) ··· 4036 4031 } 4037 4032 4038 4033 static void call_mic_autoswitch(struct hda_codec *codec, 4039 - struct hda_jack_tbl *jack) 4034 + struct hda_jack_callback *jack) 4040 4035 { 4041 4036 struct hda_gen_spec *spec = codec->spec; 4042 4037 if (spec->mic_autoswitch_hook) ··· 4185 4180 if (!is_jack_detectable(codec, nid)) 4186 4181 continue; 4187 4182 codec_dbg(codec, "Enable HP auto-muting on NID 0x%x\n", nid); 4188 - snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT, 4183 + snd_hda_jack_detect_enable_callback(codec, nid, 4189 4184 call_hp_automute); 4190 4185 spec->detect_hp = 1; 4191 4186 } ··· 4198 4193 continue; 4199 4194 codec_dbg(codec, "Enable Line-Out auto-muting on NID 0x%x\n", nid); 4200 4195 snd_hda_jack_detect_enable_callback(codec, nid, 4201 - HDA_GEN_FRONT_EVENT, 4202 4196 call_line_automute); 4203 4197 spec->detect_lo = 1; 4204 4198 } ··· 4239 4235 for (i = 1; i < spec->am_num_entries; i++) 4240 4236 snd_hda_jack_detect_enable_callback(codec, 4241 4237 spec->am_entry[i].pin, 4242 - HDA_GEN_MIC_EVENT, 4243 4238 call_mic_autoswitch); 4244 4239 return true; 4245 4240 }
+6 -12
sound/pci/hda/hda_generic.h
··· 12 12 #ifndef __SOUND_HDA_GENERIC_H 13 13 #define __SOUND_HDA_GENERIC_H 14 14 15 - /* unsol event tags */ 16 - enum { 17 - HDA_GEN_HP_EVENT = 1, HDA_GEN_FRONT_EVENT, HDA_GEN_MIC_EVENT, 18 - HDA_GEN_LAST_EVENT = HDA_GEN_MIC_EVENT 19 - }; 20 - 21 15 /* table entry for multi-io paths */ 22 16 struct hda_multi_io { 23 17 hda_nid_t pin; /* multi-io widget pin NID */ ··· 284 290 285 291 /* automute / autoswitch hooks */ 286 292 void (*hp_automute_hook)(struct hda_codec *codec, 287 - struct hda_jack_tbl *tbl); 293 + struct hda_jack_callback *cb); 288 294 void (*line_automute_hook)(struct hda_codec *codec, 289 - struct hda_jack_tbl *tbl); 295 + struct hda_jack_callback *cb); 290 296 void (*mic_autoswitch_hook)(struct hda_codec *codec, 291 - struct hda_jack_tbl *tbl); 297 + struct hda_jack_callback *cb); 292 298 }; 293 299 294 300 int snd_hda_gen_spec_init(struct hda_gen_spec *spec); ··· 320 326 321 327 /* standard jack event callbacks */ 322 328 void snd_hda_gen_hp_automute(struct hda_codec *codec, 323 - struct hda_jack_tbl *jack); 329 + struct hda_jack_callback *jack); 324 330 void snd_hda_gen_line_automute(struct hda_codec *codec, 325 - struct hda_jack_tbl *jack); 331 + struct hda_jack_callback *jack); 326 332 void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, 327 - struct hda_jack_tbl *jack); 333 + struct hda_jack_callback *jack); 328 334 void snd_hda_gen_update_outputs(struct hda_codec *codec); 329 335 330 336 #ifdef CONFIG_PM
+53 -30
sound/pci/hda/hda_jack.c
··· 94 94 /** 95 95 * snd_hda_jack_tbl_new - create a jack-table entry for the given NID 96 96 */ 97 - struct hda_jack_tbl * 97 + static struct hda_jack_tbl * 98 98 snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid) 99 99 { 100 100 struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid); ··· 108 108 jack->tag = codec->jacktbl.used; 109 109 return jack; 110 110 } 111 - EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_new); 112 111 113 112 void snd_hda_jack_tbl_clear(struct hda_codec *codec) 114 113 { 114 + struct hda_jack_tbl *jack = codec->jacktbl.list; 115 + int i; 116 + 117 + for (i = 0; i < codec->jacktbl.used; i++, jack++) { 118 + struct hda_jack_callback *cb, *next; 115 119 #ifdef CONFIG_SND_HDA_INPUT_JACK 116 - /* free jack instances manually when clearing/reconfiguring */ 117 - if (!codec->bus->shutdown && codec->jacktbl.list) { 118 - struct hda_jack_tbl *jack = codec->jacktbl.list; 119 - int i; 120 - for (i = 0; i < codec->jacktbl.used; i++, jack++) { 121 - if (jack->jack) 122 - snd_device_free(codec->bus->card, jack->jack); 120 + /* free jack instances manually when clearing/reconfiguring */ 121 + if (!codec->bus->shutdown && jack->jack) 122 + snd_device_free(codec->bus->card, jack->jack); 123 + #endif 124 + for (cb = jack->callback; cb; cb = next) { 125 + next = cb->next; 126 + kfree(cb); 123 127 } 124 128 } 125 - #endif 126 129 snd_array_free(&codec->jacktbl); 127 130 } 128 131 ··· 218 215 219 216 /** 220 217 * snd_hda_jack_detect_enable - enable the jack-detection 218 + * 219 + * In the case of error, the return value will be a pointer embedded with 220 + * errno. Check and handle the return value appropriately with standard 221 + * macros such as @IS_ERR() and @PTR_ERR(). 221 222 */ 222 - int snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid, 223 - unsigned char action, 224 - hda_jack_callback cb) 223 + struct hda_jack_callback * 224 + snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid, 225 + hda_jack_callback_fn func) 225 226 { 226 - struct hda_jack_tbl *jack = snd_hda_jack_tbl_new(codec, nid); 227 + struct hda_jack_tbl *jack; 228 + struct hda_jack_callback *callback = NULL; 229 + int err; 230 + 231 + jack = snd_hda_jack_tbl_new(codec, nid); 227 232 if (!jack) 228 - return -ENOMEM; 233 + return ERR_PTR(-ENOMEM); 234 + if (func) { 235 + callback = kzalloc(sizeof(*callback), GFP_KERNEL); 236 + if (!callback) 237 + return ERR_PTR(-ENOMEM); 238 + callback->func = func; 239 + callback->tbl = jack; 240 + callback->next = jack->callback; 241 + jack->callback = callback; 242 + } 243 + 229 244 if (jack->jack_detect) 230 - return 0; /* already registered */ 245 + return callback; /* already registered */ 231 246 jack->jack_detect = 1; 232 - if (action) 233 - jack->action = action; 234 - if (cb) 235 - jack->callback = cb; 236 247 if (codec->jackpoll_interval > 0) 237 - return 0; /* No unsol if we're polling instead */ 238 - return snd_hda_codec_write_cache(codec, nid, 0, 248 + return callback; /* No unsol if we're polling instead */ 249 + err = snd_hda_codec_write_cache(codec, nid, 0, 239 250 AC_VERB_SET_UNSOLICITED_ENABLE, 240 251 AC_USRSP_EN | jack->tag); 252 + if (err < 0) 253 + return ERR_PTR(err); 254 + return callback; 241 255 } 242 256 EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable_callback); 243 257 244 - int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid, 245 - unsigned char action) 258 + int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid) 246 259 { 247 - return snd_hda_jack_detect_enable_callback(codec, nid, action, NULL); 260 + return PTR_ERR_OR_ZERO(snd_hda_jack_detect_enable_callback(codec, nid, NULL)); 248 261 } 249 262 EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable); 250 263 ··· 450 431 return err; 451 432 452 433 if (!phantom_jack) 453 - return snd_hda_jack_detect_enable(codec, nid, 0); 434 + return snd_hda_jack_detect_enable(codec, nid); 454 435 return 0; 455 436 } 456 437 ··· 517 498 static void call_jack_callback(struct hda_codec *codec, 518 499 struct hda_jack_tbl *jack) 519 500 { 520 - if (jack->callback) 521 - jack->callback(codec, jack); 501 + struct hda_jack_callback *cb; 502 + 503 + for (cb = jack->callback; cb; cb = cb->next) 504 + cb->func(codec, cb); 522 505 if (jack->gated_jack) { 523 506 struct hda_jack_tbl *gated = 524 507 snd_hda_jack_tbl_get(codec, jack->gated_jack); 525 - if (gated && gated->callback) 526 - gated->callback(codec, gated); 508 + if (gated) { 509 + for (cb = gated->callback; cb; cb = cb->next) 510 + cb->func(codec, cb); 511 + } 527 512 } 528 513 } 529 514
+14 -28
sound/pci/hda/hda_jack.h
··· 14 14 15 15 struct auto_pin_cfg; 16 16 struct hda_jack_tbl; 17 + struct hda_jack_callback; 17 18 18 - typedef void (*hda_jack_callback) (struct hda_codec *, struct hda_jack_tbl *); 19 + typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *); 20 + 21 + struct hda_jack_callback { 22 + struct hda_jack_tbl *tbl; 23 + hda_jack_callback_fn func; 24 + unsigned int private_data; /* arbitrary data */ 25 + struct hda_jack_callback *next; 26 + }; 19 27 20 28 struct hda_jack_tbl { 21 29 hda_nid_t nid; 22 - unsigned char action; /* event action (0 = none) */ 23 30 unsigned char tag; /* unsol event tag */ 24 - unsigned int private_data; /* arbitrary data */ 25 - hda_jack_callback callback; 31 + struct hda_jack_callback *callback; 26 32 /* jack-detection stuff */ 27 33 unsigned int pin_sense; /* cached pin-sense value */ 28 34 unsigned int jack_detect:1; /* capable of jack-detection? */ ··· 49 43 struct hda_jack_tbl * 50 44 snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag); 51 45 52 - struct hda_jack_tbl * 53 - snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid); 54 46 void snd_hda_jack_tbl_clear(struct hda_codec *codec); 55 - 56 - /** 57 - * snd_hda_jack_get_action - get jack-tbl entry for the tag 58 - * 59 - * Call this from the unsol event handler to get the assigned action for the 60 - * event. This will mark the dirty flag for the later reporting, too. 61 - */ 62 - static inline unsigned char 63 - snd_hda_jack_get_action(struct hda_codec *codec, unsigned int tag) 64 - { 65 - struct hda_jack_tbl *jack = snd_hda_jack_tbl_get_from_tag(codec, tag); 66 - if (jack) { 67 - jack->jack_dirty = 1; 68 - return jack->action; 69 - } 70 - return 0; 71 - } 72 47 73 48 void snd_hda_jack_set_dirty_all(struct hda_codec *codec); 74 49 75 - int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid, 76 - unsigned char action); 77 - int snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid, 78 - unsigned char action, 79 - hda_jack_callback cb); 50 + int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid); 51 + struct hda_jack_callback * 52 + snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid, 53 + hda_jack_callback_fn cb); 80 54 81 55 int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid, 82 56 hda_nid_t gating_nid);
+32 -38
sound/pci/hda/patch_ca0132.c
··· 3224 3224 { 3225 3225 struct ca0132_spec *spec = container_of( 3226 3226 to_delayed_work(work), struct ca0132_spec, unsol_hp_work); 3227 + struct hda_jack_tbl *jack; 3228 + 3227 3229 ca0132_select_out(spec->codec); 3228 - snd_hda_jack_report_sync(spec->codec); 3230 + jack = snd_hda_jack_tbl_get(spec->codec, UNSOL_TAG_HP); 3231 + if (jack) { 3232 + jack->block_report = 0; 3233 + snd_hda_jack_report_sync(spec->codec); 3234 + } 3229 3235 } 3230 3236 3231 3237 static void ca0132_set_dmic(struct hda_codec *codec, int enable); ··· 4120 4114 } 4121 4115 } 4122 4116 4123 - static void ca0132_init_unsol(struct hda_codec *codec) 4124 - { 4125 - snd_hda_jack_detect_enable(codec, UNSOL_TAG_HP, UNSOL_TAG_HP); 4126 - snd_hda_jack_detect_enable(codec, UNSOL_TAG_AMIC1, UNSOL_TAG_AMIC1); 4127 - } 4128 - 4129 4117 static void refresh_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir) 4130 4118 { 4131 4119 unsigned int caps; ··· 4390 4390 ca0132_set_dsp_msr(codec, true); 4391 4391 } 4392 4392 4393 - static void ca0132_process_dsp_response(struct hda_codec *codec) 4393 + static void ca0132_process_dsp_response(struct hda_codec *codec, 4394 + struct hda_jack_callback *callback) 4394 4395 { 4395 4396 struct ca0132_spec *spec = codec->spec; 4396 4397 ··· 4404 4403 dspio_clear_response_queue(codec); 4405 4404 } 4406 4405 4407 - static void ca0132_unsol_event(struct hda_codec *codec, unsigned int res) 4406 + static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) 4408 4407 { 4409 4408 struct ca0132_spec *spec = codec->spec; 4410 4409 4411 - if (((res >> AC_UNSOL_RES_TAG_SHIFT) & 0x3f) == UNSOL_TAG_DSP) { 4412 - ca0132_process_dsp_response(codec); 4413 - } else { 4414 - res = snd_hda_jack_get_action(codec, 4415 - (res >> AC_UNSOL_RES_TAG_SHIFT) & 0x3f); 4410 + /* Delay enabling the HP amp, to let the mic-detection 4411 + * state machine run. 4412 + */ 4413 + cancel_delayed_work_sync(&spec->unsol_hp_work); 4414 + queue_delayed_work(codec->bus->workq, &spec->unsol_hp_work, 4415 + msecs_to_jiffies(500)); 4416 + cb->tbl->block_report = 1; 4417 + } 4416 4418 4417 - codec_dbg(codec, "snd_hda_jack_get_action: 0x%x\n", res); 4419 + static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb) 4420 + { 4421 + ca0132_select_mic(codec); 4422 + } 4418 4423 4419 - switch (res) { 4420 - case UNSOL_TAG_HP: 4421 - /* Delay enabling the HP amp, to let the mic-detection 4422 - * state machine run. 4423 - */ 4424 - cancel_delayed_work_sync(&spec->unsol_hp_work); 4425 - queue_delayed_work(codec->bus->workq, 4426 - &spec->unsol_hp_work, 4427 - msecs_to_jiffies(500)); 4428 - break; 4429 - case UNSOL_TAG_AMIC1: 4430 - ca0132_select_mic(codec); 4431 - snd_hda_jack_report_sync(codec); 4432 - break; 4433 - default: 4434 - break; 4435 - } 4436 - } 4424 + static void ca0132_init_unsol(struct hda_codec *codec) 4425 + { 4426 + snd_hda_jack_detect_enable_callback(codec, UNSOL_TAG_HP, hp_callback); 4427 + snd_hda_jack_detect_enable_callback(codec, UNSOL_TAG_AMIC1, 4428 + amic_callback); 4429 + snd_hda_jack_detect_enable_callback(codec, UNSOL_TAG_DSP, 4430 + ca0132_process_dsp_response); 4437 4431 } 4438 4432 4439 4433 /* ··· 4439 4443 static struct hda_verb ca0132_base_init_verbs[] = { 4440 4444 /*enable ct extension*/ 4441 4445 {0x15, VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE, 0x1}, 4442 - /*enable DSP node unsol, needed for DSP download*/ 4443 - {0x16, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | UNSOL_TAG_DSP}, 4444 4446 {} 4445 4447 }; 4446 4448 ··· 4555 4561 4556 4562 snd_hda_power_up(codec); 4557 4563 4564 + ca0132_init_unsol(codec); 4565 + 4558 4566 ca0132_init_params(codec); 4559 4567 ca0132_init_flags(codec); 4560 4568 snd_hda_sequence_write(codec, spec->base_init_verbs); ··· 4578 4582 4579 4583 for (i = 0; i < spec->num_init_verbs; i++) 4580 4584 snd_hda_sequence_write(codec, spec->init_verbs[i]); 4581 - 4582 - ca0132_init_unsol(codec); 4583 4585 4584 4586 ca0132_select_out(codec); 4585 4587 ca0132_select_mic(codec); ··· 4606 4612 .build_pcms = ca0132_build_pcms, 4607 4613 .init = ca0132_init, 4608 4614 .free = ca0132_free, 4609 - .unsol_event = ca0132_unsol_event, 4615 + .unsol_event = snd_hda_jack_unsol_event, 4610 4616 }; 4611 4617 4612 4618 static void ca0132_config(struct hda_codec *codec)
+1 -4
sound/pci/hda/patch_cirrus.c
··· 135 135 #define CS421X_IDX_DAC_CFG 0x03 136 136 #define CS421X_IDX_SPK_CTL 0x04 137 137 138 - #define SPDIF_EVENT 0x04 139 - 140 138 /* Cirrus Logic CS4213 is like CS4210 but does not have SPDIF input/output */ 141 139 #define CS4213_VENDOR_NID 0x09 142 140 ··· 982 984 } 983 985 984 986 static void cs4210_spdif_automute(struct hda_codec *codec, 985 - struct hda_jack_tbl *tbl) 987 + struct hda_jack_callback *tbl) 986 988 { 987 989 struct cs_spec *spec = codec->spec; 988 990 bool spdif_present = false; ··· 1017 1019 if (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) { 1018 1020 spec->spdif_detect = 1; 1019 1021 snd_hda_jack_detect_enable_callback(codec, nid, 1020 - SPDIF_EVENT, 1021 1022 cs4210_spdif_automute); 1022 1023 } 1023 1024 }
+10 -2
sound/pci/hda/patch_conexant.c
··· 216 216 CXT_FIXUP_HEADPHONE_MIC_PIN, 217 217 CXT_FIXUP_HEADPHONE_MIC, 218 218 CXT_FIXUP_GPIO1, 219 + CXT_FIXUP_ASPIRE_DMIC, 219 220 CXT_FIXUP_THINKPAD_ACPI, 220 221 CXT_FIXUP_OLPC_XO, 221 222 CXT_FIXUP_CAP_MIX_AMP, ··· 393 392 } 394 393 395 394 /* mic_autoswitch hook */ 396 - static void olpc_xo_automic(struct hda_codec *codec, struct hda_jack_tbl *jack) 395 + static void olpc_xo_automic(struct hda_codec *codec, 396 + struct hda_jack_callback *jack) 397 397 { 398 398 struct conexant_spec *spec = codec->spec; 399 399 int saved_cached_write = codec->cached_write; ··· 665 663 { } 666 664 }, 667 665 }, 666 + [CXT_FIXUP_ASPIRE_DMIC] = { 667 + .type = HDA_FIXUP_FUNC, 668 + .v.func = cxt_fixup_stereo_dmic, 669 + .chained = true, 670 + .chain_id = CXT_FIXUP_GPIO1, 671 + }, 668 672 [CXT_FIXUP_THINKPAD_ACPI] = { 669 673 .type = HDA_FIXUP_FUNC, 670 674 .v.func = hda_fixup_thinkpad_acpi, ··· 751 743 752 744 static const struct snd_pci_quirk cxt5066_fixups[] = { 753 745 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), 754 - SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1), 746 + SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), 755 747 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), 756 748 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), 757 749 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+12 -6
sound/pci/hda/patch_hdmi.c
··· 1163 1163 1164 1164 static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll); 1165 1165 1166 - static void jack_callback(struct hda_codec *codec, struct hda_jack_tbl *jack) 1166 + static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid) 1167 1167 { 1168 1168 struct hdmi_spec *spec = codec->spec; 1169 - int pin_idx = pin_nid_to_pin_index(codec, jack->nid); 1169 + int pin_idx = pin_nid_to_pin_index(codec, nid); 1170 + 1170 1171 if (pin_idx < 0) 1171 1172 return; 1172 - 1173 1173 if (hdmi_present_sense(get_pin(spec, pin_idx), 1)) 1174 1174 snd_hda_jack_report_sync(codec); 1175 + } 1176 + 1177 + static void jack_callback(struct hda_codec *codec, 1178 + struct hda_jack_callback *jack) 1179 + { 1180 + check_presence_and_report(codec, jack->tbl->nid); 1175 1181 } 1176 1182 1177 1183 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) ··· 1196 1190 codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA), 1197 1191 !!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV)); 1198 1192 1199 - jack_callback(codec, jack); 1193 + check_presence_and_report(codec, jack->nid); 1200 1194 } 1201 1195 1202 1196 static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) ··· 2171 2165 hda_nid_t pin_nid = per_pin->pin_nid; 2172 2166 2173 2167 hdmi_init_pin(codec, pin_nid); 2174 - snd_hda_jack_detect_enable_callback(codec, pin_nid, pin_nid, 2168 + snd_hda_jack_detect_enable_callback(codec, pin_nid, 2175 2169 codec->jackpoll_interval > 0 ? jack_callback : NULL); 2176 2170 } 2177 2171 return 0; ··· 2434 2428 if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP) 2435 2429 snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, 2436 2430 AMP_OUT_UNMUTE); 2437 - snd_hda_jack_detect_enable(codec, pin, pin); 2431 + snd_hda_jack_detect_enable(codec, pin); 2438 2432 return 0; 2439 2433 } 2440 2434
+30 -11
sound/pci/hda/patch_realtek.c
··· 40 40 /* keep halting ALC5505 DSP, for power saving */ 41 41 #define HALT_REALTEK_ALC5505 42 42 43 - /* unsol event tags */ 44 - #define ALC_DCVOL_EVENT 0x08 45 - 46 43 /* for GPIO Poll */ 47 44 #define GPIO_MASK 0x03 48 45 ··· 264 267 } 265 268 266 269 /* update the master volume per volume-knob's unsol event */ 267 - static void alc_update_knob_master(struct hda_codec *codec, struct hda_jack_tbl *jack) 270 + static void alc_update_knob_master(struct hda_codec *codec, 271 + struct hda_jack_callback *jack) 268 272 { 269 273 unsigned int val; 270 274 struct snd_kcontrol *kctl; ··· 277 279 uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); 278 280 if (!uctl) 279 281 return; 280 - val = snd_hda_codec_read(codec, jack->nid, 0, 282 + val = snd_hda_codec_read(codec, jack->tbl->nid, 0, 281 283 AC_VERB_GET_VOLUME_KNOB_CONTROL, 0); 282 284 val &= HDA_AMP_VOLMASK; 283 285 uctl->value.integer.value[0] = val; ··· 371 373 case 0x10ec0885: 372 374 case 0x10ec0887: 373 375 /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ 376 + case 0x10ec0900: 374 377 alc889_coef_init(codec); 375 378 break; 376 379 case 0x10ec0888: ··· 1128 1129 const struct hda_fixup *fix, int action) 1129 1130 { 1130 1131 if (action == HDA_FIXUP_ACT_PROBE) 1131 - snd_hda_jack_detect_enable_callback(codec, 0x21, ALC_DCVOL_EVENT, alc_update_knob_master); 1132 + snd_hda_jack_detect_enable_callback(codec, 0x21, 1133 + alc_update_knob_master); 1132 1134 } 1133 1135 1134 1136 static const struct hda_fixup alc880_fixups[] = { ··· 1592 1592 spec->gen.detect_hp = 1; 1593 1593 spec->gen.automute_speaker = 1; 1594 1594 spec->gen.autocfg.hp_pins[0] = 0x0f; /* copy it for automute */ 1595 - snd_hda_jack_detect_enable_callback(codec, 0x0f, HDA_GEN_HP_EVENT, 1595 + snd_hda_jack_detect_enable_callback(codec, 0x0f, 1596 1596 snd_hda_gen_hp_automute); 1597 1597 snd_hda_add_verbs(codec, alc_gpio1_init_verbs); 1598 1598 } ··· 2346 2346 switch (codec->vendor_id) { 2347 2347 case 0x10ec0882: 2348 2348 case 0x10ec0885: 2349 + case 0x10ec0900: 2349 2350 break; 2350 2351 default: 2351 2352 /* ALC883 and variants */ ··· 3273 3272 } 3274 3273 3275 3274 static void alc269_x101_hp_automute_hook(struct hda_codec *codec, 3276 - struct hda_jack_tbl *jack) 3275 + struct hda_jack_callback *jack) 3277 3276 { 3278 3277 struct alc_spec *spec = codec->spec; 3279 3278 int vref; ··· 3927 3926 alc_update_headset_mode(codec); 3928 3927 } 3929 3928 3930 - static void alc_update_headset_jack_cb(struct hda_codec *codec, struct hda_jack_tbl *jack) 3929 + static void alc_update_headset_jack_cb(struct hda_codec *codec, 3930 + struct hda_jack_callback *jack) 3931 3931 { 3932 3932 struct alc_spec *spec = codec->spec; 3933 3933 spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN; ··· 4168 4166 } 4169 4167 4170 4168 static void alc283_hp_automute_hook(struct hda_codec *codec, 4171 - struct hda_jack_tbl *jack) 4169 + struct hda_jack_callback *jack) 4172 4170 { 4173 4171 struct alc_spec *spec = codec->spec; 4174 4172 int vref; ··· 4254 4252 spec->gen.auto_mute_via_amp = 1; 4255 4253 spec->gen.automute_hook = asus_tx300_automute; 4256 4254 snd_hda_jack_detect_enable_callback(codec, 0x1b, 4257 - HDA_GEN_HP_EVENT, 4258 4255 snd_hda_gen_hp_automute); 4259 4256 break; 4260 4257 case HDA_FIXUP_ACT_BUILD: ··· 4354 4353 ALC292_FIXUP_TPT440_DOCK, 4355 4354 ALC283_FIXUP_BXBT2807_MIC, 4356 4355 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, 4356 + ALC282_FIXUP_ASPIRE_V5_PINS, 4357 4357 }; 4358 4358 4359 4359 static const struct hda_fixup alc269_fixups[] = { ··· 4802 4800 .chained_before = true, 4803 4801 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE 4804 4802 }, 4803 + [ALC282_FIXUP_ASPIRE_V5_PINS] = { 4804 + .type = HDA_FIXUP_PINS, 4805 + .v.pins = (const struct hda_pintbl[]) { 4806 + { 0x12, 0x90a60130 }, 4807 + { 0x14, 0x90170110 }, 4808 + { 0x17, 0x40000008 }, 4809 + { 0x18, 0x411111f0 }, 4810 + { 0x19, 0x411111f0 }, 4811 + { 0x1a, 0x411111f0 }, 4812 + { 0x1b, 0x411111f0 }, 4813 + { 0x1d, 0x40f89b2d }, 4814 + { 0x1e, 0x411111f0 }, 4815 + { 0x21, 0x0321101f }, 4816 + { }, 4817 + }, 4818 + }, 4805 4819 4806 4820 }; 4807 4821 ··· 4829 4811 SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), 4830 4812 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), 4831 4813 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), 4814 + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), 4832 4815 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 4833 4816 SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), 4834 4817 SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+20 -59
sound/pci/hda/patch_sigmatel.c
··· 40 40 #include "hda_generic.h" 41 41 42 42 enum { 43 - STAC_VREF_EVENT = 8, 44 - STAC_PWR_EVENT, 45 - }; 46 - 47 - enum { 48 43 STAC_REF, 49 44 STAC_9200_OQO, 50 45 STAC_9200_DELL_D21, ··· 481 486 482 487 /* update power bit per jack plug/unplug */ 483 488 static void jack_update_power(struct hda_codec *codec, 484 - struct hda_jack_tbl *jack) 489 + struct hda_jack_callback *jack) 485 490 { 486 491 struct sigmatel_spec *spec = codec->spec; 487 492 int i; ··· 489 494 if (!spec->num_pwrs) 490 495 return; 491 496 492 - if (jack && jack->nid) { 493 - stac_toggle_power_map(codec, jack->nid, 494 - snd_hda_jack_detect(codec, jack->nid), 497 + if (jack && jack->tbl->nid) { 498 + stac_toggle_power_map(codec, jack->tbl->nid, 499 + snd_hda_jack_detect(codec, jack->tbl->nid), 495 500 true); 496 501 return; 497 502 } ··· 499 504 /* update all jacks */ 500 505 for (i = 0; i < spec->num_pwrs; i++) { 501 506 hda_nid_t nid = spec->pwr_nids[i]; 502 - jack = snd_hda_jack_tbl_get(codec, nid); 503 - if (!jack || !jack->action) 507 + if (!snd_hda_jack_tbl_get(codec, nid)) 504 508 continue; 505 - if (jack->action == STAC_PWR_EVENT || 506 - jack->action <= HDA_GEN_LAST_EVENT) 507 - stac_toggle_power_map(codec, nid, 508 - snd_hda_jack_detect(codec, nid), 509 - false); 509 + stac_toggle_power_map(codec, nid, 510 + snd_hda_jack_detect(codec, nid), 511 + false); 510 512 } 511 513 512 514 snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_IDT_SET_POWER_MAP, 513 515 spec->power_map_bits); 514 516 } 515 517 516 - static void stac_hp_automute(struct hda_codec *codec, 517 - struct hda_jack_tbl *jack) 518 - { 519 - snd_hda_gen_hp_automute(codec, jack); 520 - jack_update_power(codec, jack); 521 - } 522 - 523 - static void stac_line_automute(struct hda_codec *codec, 524 - struct hda_jack_tbl *jack) 525 - { 526 - snd_hda_gen_line_automute(codec, jack); 527 - jack_update_power(codec, jack); 528 - } 529 - 530 - static void stac_mic_autoswitch(struct hda_codec *codec, 531 - struct hda_jack_tbl *jack) 532 - { 533 - snd_hda_gen_mic_autoswitch(codec, jack); 534 - jack_update_power(codec, jack); 535 - } 536 - 537 - static void stac_vref_event(struct hda_codec *codec, struct hda_jack_tbl *event) 518 + static void stac_vref_event(struct hda_codec *codec, 519 + struct hda_jack_callback *event) 538 520 { 539 521 unsigned int data; 540 522 ··· 534 562 hda_nid_t nid = spec->pwr_nids[i]; 535 563 unsigned int def_conf = snd_hda_codec_get_pincfg(codec, nid); 536 564 def_conf = get_defcfg_connect(def_conf); 537 - if (snd_hda_jack_tbl_get(codec, nid)) 538 - continue; 539 565 if (def_conf == AC_JACK_PORT_COMPLEX && 540 - !(spec->vref_mute_led_nid == nid || 541 - is_jack_detectable(codec, nid))) { 566 + spec->vref_mute_led_nid != nid && 567 + is_jack_detectable(codec, nid)) { 542 568 snd_hda_jack_detect_enable_callback(codec, nid, 543 - STAC_PWR_EVENT, 544 569 jack_update_power); 545 570 } else { 546 571 if (def_conf == AC_JACK_PORT_NONE) ··· 2988 3019 const struct hda_fixup *fix, int action) 2989 3020 { 2990 3021 struct sigmatel_spec *spec = codec->spec; 2991 - struct hda_jack_tbl *jack; 3022 + struct hda_jack_callback *jack; 2992 3023 2993 3024 if (action != HDA_FIXUP_ACT_PRE_PROBE) 2994 3025 return; ··· 2996 3027 /* Enable VREF power saving on GPIO1 detect */ 2997 3028 snd_hda_codec_write_cache(codec, codec->afg, 0, 2998 3029 AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x02); 2999 - snd_hda_jack_detect_enable_callback(codec, codec->afg, 3000 - STAC_VREF_EVENT, 3001 - stac_vref_event); 3002 - jack = snd_hda_jack_tbl_get(codec, codec->afg); 3003 - if (jack) 3030 + jack = snd_hda_jack_detect_enable_callback(codec, codec->afg, 3031 + stac_vref_event); 3032 + if (!IS_ERR(jack)) 3004 3033 jack->private_data = 0x02; 3005 3034 3006 3035 spec->gpio_mask |= 0x02; ··· 4010 4043 const struct hda_fixup *fix, int action) 4011 4044 { 4012 4045 struct sigmatel_spec *spec = codec->spec; 4013 - struct hda_jack_tbl *jack; 4046 + struct hda_jack_callback *jack; 4014 4047 4015 4048 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 4016 4049 snd_hda_apply_pincfgs(codec, dell_9205_m43_pin_configs); ··· 4018 4051 /* Enable unsol response for GPIO4/Dock HP connection */ 4019 4052 snd_hda_codec_write_cache(codec, codec->afg, 0, 4020 4053 AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x10); 4021 - snd_hda_jack_detect_enable_callback(codec, codec->afg, 4022 - STAC_VREF_EVENT, 4023 - stac_vref_event); 4024 - jack = snd_hda_jack_tbl_get(codec, codec->afg); 4025 - if (jack) 4054 + jack = snd_hda_jack_detect_enable_callback(codec, codec->afg, 4055 + stac_vref_event); 4056 + if (!IS_ERR(jack)) 4026 4057 jack->private_data = 0x01; 4027 4058 4028 4059 spec->gpio_dir = 0x0b; ··· 4183 4218 spec->gen.pcm_capture_hook = stac_capture_pcm_hook; 4184 4219 4185 4220 spec->gen.automute_hook = stac_update_outputs; 4186 - spec->gen.hp_automute_hook = stac_hp_automute; 4187 - spec->gen.line_automute_hook = stac_line_automute; 4188 - spec->gen.mic_autoswitch_hook = stac_mic_autoswitch; 4189 4221 4190 4222 err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); 4191 4223 if (err < 0) ··· 4238 4276 4239 4277 return 0; 4240 4278 } 4241 - 4242 4279 4243 4280 static int stac_init(struct hda_codec *codec) 4244 4281 {
+4 -26
sound/pci/hda/patch_via.c
··· 118 118 struct hda_codec *codec, 119 119 struct snd_pcm_substream *substream, 120 120 int action); 121 - static void via_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *tbl); 122 121 123 122 static struct via_spec *via_new_spec(struct hda_codec *codec) 124 123 { ··· 574 575 {} /* terminator */ 575 576 }; 576 577 577 - static void via_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *tbl) 578 - { 579 - set_widgets_power_state(codec); 580 - snd_hda_gen_hp_automute(codec, tbl); 581 - } 582 - 583 - static void via_line_automute(struct hda_codec *codec, struct hda_jack_tbl *tbl) 584 - { 585 - set_widgets_power_state(codec); 586 - snd_hda_gen_line_automute(codec, tbl); 587 - } 588 - 589 - static void via_jack_powerstate_event(struct hda_codec *codec, struct hda_jack_tbl *tbl) 578 + static void via_jack_powerstate_event(struct hda_codec *codec, 579 + struct hda_jack_callback *tbl) 590 580 { 591 581 set_widgets_power_state(codec); 592 582 } 593 - 594 - #define VIA_JACK_EVENT (HDA_GEN_LAST_EVENT + 1) 595 583 596 584 static void via_set_jack_unsol_events(struct hda_codec *codec) 597 585 { ··· 587 601 hda_nid_t pin; 588 602 int i; 589 603 590 - spec->gen.hp_automute_hook = via_hp_automute; 591 - if (cfg->speaker_pins[0]) 592 - spec->gen.line_automute_hook = via_line_automute; 593 - 594 604 for (i = 0; i < cfg->line_outs; i++) { 595 605 pin = cfg->line_out_pins[i]; 596 - if (pin && !snd_hda_jack_tbl_get(codec, pin) && 597 - is_jack_detectable(codec, pin)) 606 + if (pin && is_jack_detectable(codec, pin)) 598 607 snd_hda_jack_detect_enable_callback(codec, pin, 599 - VIA_JACK_EVENT, 600 608 via_jack_powerstate_event); 601 609 } 602 610 603 611 for (i = 0; i < cfg->num_inputs; i++) { 604 612 pin = cfg->line_out_pins[i]; 605 - if (pin && !snd_hda_jack_tbl_get(codec, pin) && 606 - is_jack_detectable(codec, pin)) 613 + if (pin && is_jack_detectable(codec, pin)) 607 614 snd_hda_jack_detect_enable_callback(codec, pin, 608 - VIA_JACK_EVENT, 609 615 via_jack_powerstate_event); 610 616 } 611 617 }
+6 -6
sound/soc/codecs/cs4265.c
··· 282 282 283 283 /*64k*/ 284 284 {8192000, 64000, 1, 0}, 285 - {1228800, 64000, 1, 1}, 286 - {1693440, 64000, 1, 2}, 287 - {2457600, 64000, 1, 3}, 288 - {3276800, 64000, 1, 4}, 285 + {12288000, 64000, 1, 1}, 286 + {16934400, 64000, 1, 2}, 287 + {24576000, 64000, 1, 3}, 288 + {32768000, 64000, 1, 4}, 289 289 290 290 /* 88.2k */ 291 291 {11289600, 88200, 1, 0}, ··· 435 435 index = cs4265_get_clk_index(cs4265->sysclk, params_rate(params)); 436 436 if (index >= 0) { 437 437 snd_soc_update_bits(codec, CS4265_ADC_CTL, 438 - CS4265_ADC_FM, clk_map_table[index].fm_mode); 438 + CS4265_ADC_FM, clk_map_table[index].fm_mode << 6); 439 439 snd_soc_update_bits(codec, CS4265_MCLK_FREQ, 440 440 CS4265_MCLK_FREQ_MASK, 441 - clk_map_table[index].mclkdiv); 441 + clk_map_table[index].mclkdiv << 4); 442 442 443 443 } else { 444 444 dev_err(codec->dev, "can't get correct mclk\n");
+1 -1
sound/soc/codecs/da732x.h
··· 11 11 */ 12 12 13 13 #ifndef __DA732X_H_ 14 - #define __DA732X_H 14 + #define __DA732X_H_ 15 15 16 16 #include <sound/soc.h> 17 17
+1
sound/soc/codecs/rt5640.c
··· 2059 2059 static const struct regmap_config rt5640_regmap = { 2060 2060 .reg_bits = 8, 2061 2061 .val_bits = 16, 2062 + .use_single_rw = true, 2062 2063 2063 2064 .max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) * 2064 2065 RT5640_PR_SPACING),
+4 -4
sound/soc/codecs/rt5677.c
··· 2135 2135 { "BST2", NULL, "IN2P" }, 2136 2136 { "BST2", NULL, "IN2N" }, 2137 2137 2138 - { "IN1P", NULL, "micbias1" }, 2139 - { "IN1N", NULL, "micbias1" }, 2140 - { "IN2P", NULL, "micbias1" }, 2141 - { "IN2N", NULL, "micbias1" }, 2138 + { "IN1P", NULL, "MICBIAS1" }, 2139 + { "IN1N", NULL, "MICBIAS1" }, 2140 + { "IN2P", NULL, "MICBIAS1" }, 2141 + { "IN2N", NULL, "MICBIAS1" }, 2142 2142 2143 2143 { "ADC 1", NULL, "BST1" }, 2144 2144 { "ADC 1", NULL, "ADC 1 power" },
+8
sound/soc/generic/simple-card.c
··· 481 481 snd_soc_card_set_drvdata(&priv->snd_card, priv); 482 482 483 483 ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card); 484 + if (ret >= 0) 485 + return ret; 484 486 485 487 err: 486 488 asoc_simple_card_unref(pdev); 487 489 return ret; 490 + } 491 + 492 + static int asoc_simple_card_remove(struct platform_device *pdev) 493 + { 494 + return asoc_simple_card_unref(pdev); 488 495 } 489 496 490 497 static const struct of_device_id asoc_simple_of_match[] = { ··· 507 500 .of_match_table = asoc_simple_of_match, 508 501 }, 509 502 .probe = asoc_simple_card_probe, 503 + .remove = asoc_simple_card_remove, 510 504 }; 511 505 512 506 module_platform_driver(asoc_simple_card);
+1 -1
sound/soc/omap/omap-twl4030.c
··· 260 260 .stream_name = "TWL4030 Voice", 261 261 .cpu_dai_name = "omap-mcbsp.3", 262 262 .codec_dai_name = "twl4030-voice", 263 - .platform_name = "omap-mcbsp.2", 263 + .platform_name = "omap-mcbsp.3", 264 264 .codec_name = "twl4030-codec", 265 265 .dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF | 266 266 SND_SOC_DAIFMT_CBM_CFM,
+1 -1
sound/soc/sh/rcar/gen.c
··· 247 247 }; 248 248 249 249 /* it shouldn't happen */ 250 - if (use_dvc & !use_src) 250 + if (use_dvc && !use_src) 251 251 dev_err(dev, "DVC is selected without SRC\n"); 252 252 253 253 /* use SSIU or SSI ? */
+1 -1
sound/soc/soc-core.c
··· 1325 1325 device_initialize(rtd->dev); 1326 1326 rtd->dev->parent = rtd->card->dev; 1327 1327 rtd->dev->release = rtd_release; 1328 - rtd->dev->init_name = name; 1328 + dev_set_name(rtd->dev, "%s", name); 1329 1329 dev_set_drvdata(rtd->dev, rtd); 1330 1330 mutex_init(&rtd->pcm_mutex); 1331 1331 INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
+1 -1
sound/soc/tegra/tegra_asoc_utils.h
··· 21 21 */ 22 22 23 23 #ifndef __TEGRA_ASOC_UTILS_H__ 24 - #define __TEGRA_ASOC_UTILS_H_ 24 + #define __TEGRA_ASOC_UTILS_H__ 25 25 26 26 struct clk; 27 27 struct device;
+3 -3
tools/testing/selftests/ipc/Makefile
··· 1 1 uname_M := $(shell uname -m 2>/dev/null || echo not) 2 2 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/) 3 3 ifeq ($(ARCH),i386) 4 - ARCH := X86 4 + ARCH := x86 5 5 CFLAGS := -DCONFIG_X86_32 -D__i386__ 6 6 endif 7 7 ifeq ($(ARCH),x86_64) 8 - ARCH := X86 8 + ARCH := x86 9 9 CFLAGS := -DCONFIG_X86_64 -D__x86_64__ 10 10 endif 11 11 12 12 CFLAGS += -I../../../../usr/include/ 13 13 14 14 all: 15 - ifeq ($(ARCH),X86) 15 + ifeq ($(ARCH),x86) 16 16 gcc $(CFLAGS) msgque.c -o msgque_test 17 17 else 18 18 echo "Not an x86 target, can't build msgque selftest"
+3 -3
tools/testing/selftests/kcmp/Makefile
··· 1 1 uname_M := $(shell uname -m 2>/dev/null || echo not) 2 2 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/) 3 3 ifeq ($(ARCH),i386) 4 - ARCH := X86 4 + ARCH := x86 5 5 CFLAGS := -DCONFIG_X86_32 -D__i386__ 6 6 endif 7 7 ifeq ($(ARCH),x86_64) 8 - ARCH := X86 8 + ARCH := x86 9 9 CFLAGS := -DCONFIG_X86_64 -D__x86_64__ 10 10 endif 11 11 ··· 15 15 CFLAGS += -I../../../../arch/x86/include/ 16 16 17 17 all: 18 - ifeq ($(ARCH),X86) 18 + ifeq ($(ARCH),x86) 19 19 gcc $(CFLAGS) kcmp_test.c -o kcmp_test 20 20 else 21 21 echo "Not an x86 target, can't build kcmp selftest"
+5 -5
tools/testing/selftests/memfd/Makefile
··· 1 1 uname_M := $(shell uname -m 2>/dev/null || echo not) 2 2 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/) 3 3 ifeq ($(ARCH),i386) 4 - ARCH := X86 4 + ARCH := x86 5 5 endif 6 6 ifeq ($(ARCH),x86_64) 7 - ARCH := X86 7 + ARCH := x86 8 8 endif 9 9 10 10 CFLAGS += -D_FILE_OFFSET_BITS=64 ··· 14 14 CFLAGS += -I../../../../include/ 15 15 16 16 all: 17 - ifeq ($(ARCH),X86) 17 + ifeq ($(ARCH),x86) 18 18 gcc $(CFLAGS) memfd_test.c -o memfd_test 19 19 else 20 20 echo "Not an x86 target, can't build memfd selftest" 21 21 endif 22 22 23 23 run_tests: all 24 - ifeq ($(ARCH),X86) 24 + ifeq ($(ARCH),x86) 25 25 gcc $(CFLAGS) memfd_test.c -o memfd_test 26 26 endif 27 27 @./memfd_test || echo "memfd_test: [FAIL]" 28 28 29 29 build_fuse: 30 - ifeq ($(ARCH),X86) 30 + ifeq ($(ARCH),x86) 31 31 gcc $(CFLAGS) fuse_mnt.c `pkg-config fuse --cflags --libs` -o fuse_mnt 32 32 gcc $(CFLAGS) fuse_test.c -o fuse_test 33 33 else