Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' into sched/core, to resolve conflict

Conflicts:
arch/sparc/include/asm/topology_64.h

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+3728 -1984
+1 -1
Documentation/ABI/testing/sysfs-devices-system-cpu
··· 162 162 What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1} 163 163 Date: August 2008 164 164 KernelVersion: 2.6.27 165 - Contact: discuss@x86-64.org 165 + Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> 166 166 Description: Disable L3 cache indices 167 167 168 168 These files exist in every CPU's cache/index3 directory. Each
+3 -1
Documentation/devicetree/bindings/clock/silabs,si5351.txt
··· 17 17 - #clock-cells: from common clock binding; shall be set to 1. 18 18 - clocks: from common clock binding; list of parent clock 19 19 handles, shall be xtal reference clock or xtal and clkin for 20 - si5351c only. 20 + si5351c only. Corresponding clock input names are "xtal" and 21 + "clkin" respectively. 21 22 - #address-cells: shall be set to 1. 22 23 - #size-cells: shall be set to 0. 23 24 ··· 72 71 73 72 /* connect xtal input to 25MHz reference */ 74 73 clocks = <&ref25>; 74 + clock-names = "xtal"; 75 75 76 76 /* connect xtal input as source of pll0 and pll1 */ 77 77 silabs,pll-source = <0 0>, <1 0>;
+2 -1
Documentation/devicetree/bindings/net/cdns-emac.txt
··· 3 3 Required properties: 4 4 - compatible: Should be "cdns,[<chip>-]{emac}" 5 5 Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC. 6 - or the generic form: "cdns,emac". 6 + Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. 7 + Or the generic form: "cdns,emac". 7 8 - reg: Address and length of the register set for the device 8 9 - interrupts: Should contain macb interrupt 9 10 - phy-mode: see ethernet.txt file in the same directory.
+1 -1
Documentation/hwmon/tmp401
··· 20 20 Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html 21 21 * Texas Instruments TMP435 22 22 Prefix: 'tmp435' 23 - Addresses scanned: I2C 0x37, 0x48 - 0x4f 23 + Addresses scanned: I2C 0x48 - 0x4f 24 24 Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html 25 25 26 26 Authors:
+7 -26
Documentation/target/tcmu-design.txt
··· 15 15 a) Discovering and configuring TCMU uio devices 16 16 b) Waiting for events on the device(s) 17 17 c) Managing the command ring 18 - 3) Command filtering and pass_level 19 - 4) A final note 18 + 3) A final note 20 19 21 20 22 21 TCM Userspace Design ··· 323 324 /* Process events from cmd ring until we catch up with cmd_head */ 324 325 while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) { 325 326 326 - if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) { 327 + if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) { 327 328 uint8_t *cdb = (void *)mb + ent->req.cdb_off; 328 329 bool success = true; 329 330 ··· 338 339 ent->rsp.scsi_status = SCSI_CHECK_CONDITION; 339 340 } 340 341 } 342 + else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) { 343 + /* Tell the kernel we didn't handle unknown opcodes */ 344 + ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP; 345 + } 341 346 else { 342 - /* Do nothing for PAD entries */ 347 + /* Do nothing for PAD entries except update cmd_tail */ 343 348 } 344 349 345 350 /* update cmd_tail */ ··· 361 358 362 359 return 0; 363 360 } 364 - 365 - 366 - Command filtering and pass_level 367 - -------------------------------- 368 - 369 - TCMU supports a "pass_level" option with valid values of 0 or 1. When 370 - the value is 0 (the default), nearly all SCSI commands received for 371 - the device are passed through to the handler. This allows maximum 372 - flexibility but increases the amount of code required by the handler, 373 - to support all mandatory SCSI commands. If pass_level is set to 1, 374 - then only IO-related commands are presented, and the rest are handled 375 - by LIO's in-kernel command emulation. The commands presented at level 376 - 1 include all versions of: 377 - 378 - READ 379 - WRITE 380 - WRITE_VERIFY 381 - XDWRITEREAD 382 - WRITE_SAME 383 - COMPARE_AND_WRITE 384 - SYNCHRONIZE_CACHE 385 - UNMAP 386 361 387 362 388 363 A final note
+14 -4
Documentation/virtual/kvm/mmu.txt
··· 169 169 Contains the value of cr4.smep && !cr0.wp for which the page is valid 170 170 (pages for which this is true are different from other pages; see the 171 171 treatment of cr0.wp=0 below). 172 + role.smap_andnot_wp: 173 + Contains the value of cr4.smap && !cr0.wp for which the page is valid 174 + (pages for which this is true are different from other pages; see the 175 + treatment of cr0.wp=0 below). 172 176 gfn: 173 177 Either the guest page table containing the translations shadowed by this 174 178 page, or the base page frame for linear translations. See role.direct. ··· 348 344 349 345 (user write faults generate a #PF) 350 346 351 - In the first case there is an additional complication if CR4.SMEP is 352 - enabled: since we've turned the page into a kernel page, the kernel may now 353 - execute it. We handle this by also setting spte.nx. If we get a user 354 - fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back. 347 + In the first case there are two additional complications: 348 + - if CR4.SMEP is enabled: since we've turned the page into a kernel page, 349 + the kernel may now execute it. We handle this by also setting spte.nx. 350 + If we get a user fetch or read fault, we'll change spte.u=1 and 351 + spte.nx=gpte.nx back. 352 + - if CR4.SMAP is disabled: since the page has been changed to a kernel 353 + page, it can not be reused when CR4.SMAP is enabled. We set 354 + CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note, 355 + here we do not care the case that CR4.SMAP is enabled since KVM will 356 + directly inject #PF to guest due to failed permission check. 355 357 356 358 To prevent an spte that was converted into a kernel page with cr0.wp=0 357 359 from being written by the kernel after cr0.wp has changed to 1, we make
+10 -9
MAINTAINERS
··· 2427 2427 S: Supported 2428 2428 F: include/linux/capability.h 2429 2429 F: include/uapi/linux/capability.h 2430 - F: security/capability.c 2431 2430 F: security/commoncap.c 2432 2431 F: kernel/capability.c 2433 2432 ··· 3824 3825 L: linux-embedded@vger.kernel.org 3825 3826 S: Maintained 3826 3827 3827 - EMULEX LPFC FC SCSI DRIVER 3828 - M: James Smart <james.smart@emulex.com> 3828 + EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER 3829 + M: James Smart <james.smart@avagotech.com> 3830 + M: Dick Kennedy <dick.kennedy@avagotech.com> 3829 3831 L: linux-scsi@vger.kernel.org 3830 - W: http://sourceforge.net/projects/lpfcxxxx 3832 + W: http://www.avagotech.com 3831 3833 S: Supported 3832 3834 F: drivers/scsi/lpfc/ 3833 3835 ··· 4536 4536 M: Guenter Roeck <linux@roeck-us.net> 4537 4537 L: lm-sensors@lm-sensors.org 4538 4538 W: http://www.lm-sensors.org/ 4539 - T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ 4539 + T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/ 4540 4540 T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git 4541 4541 S: Maintained 4542 4542 F: Documentation/hwmon/ ··· 8829 8829 F: include/uapi/linux/phantom.h 8830 8830 8831 8831 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER 8832 - M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> 8832 + M: Jayamohan Kallickal <jayamohan.kallickal@avagotech.com> 8833 + M: Minh Tran <minh.tran@avagotech.com> 8834 + M: John Soni Jose <sony.john-n@avagotech.com> 8833 8835 L: linux-scsi@vger.kernel.org 8834 - W: http://www.emulex.com 8836 + W: http://www.avagotech.com 8835 8837 S: Supported 8836 8838 F: drivers/scsi/be2iscsi/ 8837 8839 ··· 10587 10585 F: include/uapi/linux/virtio_input.h 10588 10586 10589 10587 VIA RHINE NETWORK DRIVER 10590 - M: Roger Luethi <rl@hellgate.ch> 10591 - S: Maintained 10588 + S: Orphan 10592 10589 F: drivers/net/ethernet/via/via-rhine.c 10593 10590 10594 10591 VIA SD/MMC CARD CONTROLLER DRIVER
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 1 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 4 + EXTRAVERSION = -rc6 5 5 NAME = Hurr durr I'ma sheep 6 6 7 7 # *DOCUMENTATION*
+10 -6
arch/alpha/boot/Makefile
··· 14 14 tools/bootpzh bootloader bootpheader bootpzheader 15 15 OBJSTRIP := $(obj)/tools/objstrip 16 16 17 + HOSTCFLAGS := -Wall -I$(objtree)/usr/include 18 + BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj) 19 + 17 20 # SRM bootable image. Copy to offset 512 of a partition. 18 21 $(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh 19 22 ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@ ··· 99 96 $(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE 100 97 $(call if_changed,objstrip) 101 98 102 - LDFLAGS_bootloader := -static -uvsprintf -T #-N -relax 103 - LDFLAGS_bootpheader := -static -uvsprintf -T #-N -relax 104 - LDFLAGS_bootpzheader := -static -uvsprintf -T #-N -relax 99 + LDFLAGS_bootloader := -static -T # -N -relax 100 + LDFLAGS_bootloader := -static -T # -N -relax 101 + LDFLAGS_bootpheader := -static -T # -N -relax 102 + LDFLAGS_bootpzheader := -static -T # -N -relax 105 103 106 - OBJ_bootlx := $(obj)/head.o $(obj)/main.o 107 - OBJ_bootph := $(obj)/head.o $(obj)/bootp.o 108 - OBJ_bootpzh := $(obj)/head.o $(obj)/bootpz.o $(obj)/misc.o 104 + OBJ_bootlx := $(obj)/head.o $(obj)/stdio.o $(obj)/main.o 105 + OBJ_bootph := $(obj)/head.o $(obj)/stdio.o $(obj)/bootp.o 106 + OBJ_bootpzh := $(obj)/head.o $(obj)/stdio.o $(obj)/bootpz.o $(obj)/misc.o 109 107 110 108 $(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) $(LIBS_Y) FORCE 111 109 $(call if_changed,ld)
-1
arch/alpha/boot/main.c
··· 19 19 20 20 #include "ksize.h" 21 21 22 - extern int vsprintf(char *, const char *, va_list); 23 22 extern unsigned long switch_to_osf_pal(unsigned long nr, 24 23 struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, 25 24 unsigned long *vptb);
+306
arch/alpha/boot/stdio.c
··· 1 + /* 2 + * Copyright (C) Paul Mackerras 1997. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation; either version 7 + * 2 of the License, or (at your option) any later version. 8 + */ 9 + #include <stdarg.h> 10 + #include <stddef.h> 11 + 12 + size_t strnlen(const char * s, size_t count) 13 + { 14 + const char *sc; 15 + 16 + for (sc = s; count-- && *sc != '\0'; ++sc) 17 + /* nothing */; 18 + return sc - s; 19 + } 20 + 21 + # define do_div(n, base) ({ \ 22 + unsigned int __base = (base); \ 23 + unsigned int __rem; \ 24 + __rem = ((unsigned long long)(n)) % __base; \ 25 + (n) = ((unsigned long long)(n)) / __base; \ 26 + __rem; \ 27 + }) 28 + 29 + 30 + static int skip_atoi(const char **s) 31 + { 32 + int i, c; 33 + 34 + for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) 35 + i = i*10 + c - '0'; 36 + return i; 37 + } 38 + 39 + #define ZEROPAD 1 /* pad with zero */ 40 + #define SIGN 2 /* unsigned/signed long */ 41 + #define PLUS 4 /* show plus */ 42 + #define SPACE 8 /* space if plus */ 43 + #define LEFT 16 /* left justified */ 44 + #define SPECIAL 32 /* 0x */ 45 + #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ 46 + 47 + static char * number(char * str, unsigned long long num, int base, int size, int precision, int type) 48 + { 49 + char c,sign,tmp[66]; 50 + const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; 51 + int i; 52 + 53 + if (type & LARGE) 54 + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; 55 + if (type & LEFT) 56 + type &= ~ZEROPAD; 57 + if (base < 2 || base > 36) 58 + return 0; 59 + c = (type & ZEROPAD) ? '0' : ' '; 60 + sign = 0; 61 + if (type & SIGN) { 62 + if ((signed long long)num < 0) { 63 + sign = '-'; 64 + num = - (signed long long)num; 65 + size--; 66 + } else if (type & PLUS) { 67 + sign = '+'; 68 + size--; 69 + } else if (type & SPACE) { 70 + sign = ' '; 71 + size--; 72 + } 73 + } 74 + if (type & SPECIAL) { 75 + if (base == 16) 76 + size -= 2; 77 + else if (base == 8) 78 + size--; 79 + } 80 + i = 0; 81 + if (num == 0) 82 + tmp[i++]='0'; 83 + else while (num != 0) { 84 + tmp[i++] = digits[do_div(num, base)]; 85 + } 86 + if (i > precision) 87 + precision = i; 88 + size -= precision; 89 + if (!(type&(ZEROPAD+LEFT))) 90 + while(size-->0) 91 + *str++ = ' '; 92 + if (sign) 93 + *str++ = sign; 94 + if (type & SPECIAL) { 95 + if (base==8) 96 + *str++ = '0'; 97 + else if (base==16) { 98 + *str++ = '0'; 99 + *str++ = digits[33]; 100 + } 101 + } 102 + if (!(type & LEFT)) 103 + while (size-- > 0) 104 + *str++ = c; 105 + while (i < precision--) 106 + *str++ = '0'; 107 + while (i-- > 0) 108 + *str++ = tmp[i]; 109 + while (size-- > 0) 110 + *str++ = ' '; 111 + return str; 112 + } 113 + 114 + int vsprintf(char *buf, const char *fmt, va_list args) 115 + { 116 + int len; 117 + unsigned long long num; 118 + int i, base; 119 + char * str; 120 + const char *s; 121 + 122 + int flags; /* flags to number() */ 123 + 124 + int field_width; /* width of output field */ 125 + int precision; /* min. # of digits for integers; max 126 + number of chars for from string */ 127 + int qualifier; /* 'h', 'l', or 'L' for integer fields */ 128 + /* 'z' support added 23/7/1999 S.H. */ 129 + /* 'z' changed to 'Z' --davidm 1/25/99 */ 130 + 131 + 132 + for (str=buf ; *fmt ; ++fmt) { 133 + if (*fmt != '%') { 134 + *str++ = *fmt; 135 + continue; 136 + } 137 + 138 + /* process flags */ 139 + flags = 0; 140 + repeat: 141 + ++fmt; /* this also skips first '%' */ 142 + switch (*fmt) { 143 + case '-': flags |= LEFT; goto repeat; 144 + case '+': flags |= PLUS; goto repeat; 145 + case ' ': flags |= SPACE; goto repeat; 146 + case '#': flags |= SPECIAL; goto repeat; 147 + case '0': flags |= ZEROPAD; goto repeat; 148 + } 149 + 150 + /* get field width */ 151 + field_width = -1; 152 + if ('0' <= *fmt && *fmt <= '9') 153 + field_width = skip_atoi(&fmt); 154 + else if (*fmt == '*') { 155 + ++fmt; 156 + /* it's the next argument */ 157 + field_width = va_arg(args, int); 158 + if (field_width < 0) { 159 + field_width = -field_width; 160 + flags |= LEFT; 161 + } 162 + } 163 + 164 + /* get the precision */ 165 + precision = -1; 166 + if (*fmt == '.') { 167 + ++fmt; 168 + if ('0' <= *fmt && *fmt <= '9') 169 + precision = skip_atoi(&fmt); 170 + else if (*fmt == '*') { 171 + ++fmt; 172 + /* it's the next argument */ 173 + precision = va_arg(args, int); 174 + } 175 + if (precision < 0) 176 + precision = 0; 177 + } 178 + 179 + /* get the conversion qualifier */ 180 + qualifier = -1; 181 + if (*fmt == 'l' && *(fmt + 1) == 'l') { 182 + qualifier = 'q'; 183 + fmt += 2; 184 + } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' 185 + || *fmt == 'Z') { 186 + qualifier = *fmt; 187 + ++fmt; 188 + } 189 + 190 + /* default base */ 191 + base = 10; 192 + 193 + switch (*fmt) { 194 + case 'c': 195 + if (!(flags & LEFT)) 196 + while (--field_width > 0) 197 + *str++ = ' '; 198 + *str++ = (unsigned char) va_arg(args, int); 199 + while (--field_width > 0) 200 + *str++ = ' '; 201 + continue; 202 + 203 + case 's': 204 + s = va_arg(args, char *); 205 + if (!s) 206 + s = "<NULL>"; 207 + 208 + len = strnlen(s, precision); 209 + 210 + if (!(flags & LEFT)) 211 + while (len < field_width--) 212 + *str++ = ' '; 213 + for (i = 0; i < len; ++i) 214 + *str++ = *s++; 215 + while (len < field_width--) 216 + *str++ = ' '; 217 + continue; 218 + 219 + case 'p': 220 + if (field_width == -1) { 221 + field_width = 2*sizeof(void *); 222 + flags |= ZEROPAD; 223 + } 224 + str = number(str, 225 + (unsigned long) va_arg(args, void *), 16, 226 + field_width, precision, flags); 227 + continue; 228 + 229 + 230 + case 'n': 231 + if (qualifier == 'l') { 232 + long * ip = va_arg(args, long *); 233 + *ip = (str - buf); 234 + } else if (qualifier == 'Z') { 235 + size_t * ip = va_arg(args, size_t *); 236 + *ip = (str - buf); 237 + } else { 238 + int * ip = va_arg(args, int *); 239 + *ip = (str - buf); 240 + } 241 + continue; 242 + 243 + case '%': 244 + *str++ = '%'; 245 + continue; 246 + 247 + /* integer number formats - set up the flags and "break" */ 248 + case 'o': 249 + base = 8; 250 + break; 251 + 252 + case 'X': 253 + flags |= LARGE; 254 + case 'x': 255 + base = 16; 256 + break; 257 + 258 + case 'd': 259 + case 'i': 260 + flags |= SIGN; 261 + case 'u': 262 + break; 263 + 264 + default: 265 + *str++ = '%'; 266 + if (*fmt) 267 + *str++ = *fmt; 268 + else 269 + --fmt; 270 + continue; 271 + } 272 + if (qualifier == 'l') { 273 + num = va_arg(args, unsigned long); 274 + if (flags & SIGN) 275 + num = (signed long) num; 276 + } else if (qualifier == 'q') { 277 + num = va_arg(args, unsigned long long); 278 + if (flags & SIGN) 279 + num = (signed long long) num; 280 + } else if (qualifier == 'Z') { 281 + num = va_arg(args, size_t); 282 + } else if (qualifier == 'h') { 283 + num = (unsigned short) va_arg(args, int); 284 + if (flags & SIGN) 285 + num = (signed short) num; 286 + } else { 287 + num = va_arg(args, unsigned int); 288 + if (flags & SIGN) 289 + num = (signed int) num; 290 + } 291 + str = number(str, num, base, field_width, precision, flags); 292 + } 293 + *str = '\0'; 294 + return str-buf; 295 + } 296 + 297 + int sprintf(char * buf, const char *fmt, ...) 298 + { 299 + va_list args; 300 + int i; 301 + 302 + va_start(args, fmt); 303 + i=vsprintf(buf,fmt,args); 304 + va_end(args); 305 + return i; 306 + }
+3
arch/alpha/boot/tools/objstrip.c
··· 27 27 #include <linux/param.h> 28 28 #ifdef __ELF__ 29 29 # include <linux/elf.h> 30 + # define elfhdr elf64_hdr 31 + # define elf_phdr elf64_phdr 32 + # define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) 30 33 #endif 31 34 32 35 /* bootfile size must be multiple of BLOCK_SIZE: */
-1
arch/alpha/include/asm/types.h
··· 2 2 #define _ALPHA_TYPES_H 3 3 4 4 #include <asm-generic/int-ll64.h> 5 - #include <uapi/asm/types.h> 6 5 7 6 #endif /* _ALPHA_TYPES_H */
+1 -1
arch/alpha/include/asm/unistd.h
··· 3 3 4 4 #include <uapi/asm/unistd.h> 5 5 6 - #define NR_SYSCALLS 511 6 + #define NR_SYSCALLS 514 7 7 8 8 #define __ARCH_WANT_OLD_READDIR 9 9 #define __ARCH_WANT_STAT64
+3
arch/alpha/include/uapi/asm/unistd.h
··· 472 472 #define __NR_sched_setattr 508 473 473 #define __NR_sched_getattr 509 474 474 #define __NR_renameat2 510 475 + #define __NR_getrandom 511 476 + #define __NR_memfd_create 512 477 + #define __NR_execveat 513 475 478 476 479 #endif /* _UAPI_ALPHA_UNISTD_H */
-1
arch/alpha/kernel/err_ev6.c
··· 6 6 * Error handling code supporting Alpha systems 7 7 */ 8 8 9 - #include <linux/init.h> 10 9 #include <linux/sched.h> 11 10 12 11 #include <asm/io.h>
-1
arch/alpha/kernel/irq.c
··· 19 19 #include <linux/ptrace.h> 20 20 #include <linux/interrupt.h> 21 21 #include <linux/random.h> 22 - #include <linux/init.h> 23 22 #include <linux/irq.h> 24 23 #include <linux/proc_fs.h> 25 24 #include <linux/seq_file.h>
+1 -2
arch/alpha/kernel/osf_sys.c
··· 1019 1019 if (tv) { 1020 1020 if (get_tv32((struct timeval *)&kts, tv)) 1021 1021 return -EFAULT; 1022 + kts.tv_nsec *= 1000; 1022 1023 } 1023 1024 if (tz) { 1024 1025 if (copy_from_user(&ktz, tz, sizeof(*tz))) 1025 1026 return -EFAULT; 1026 1027 } 1027 - 1028 - kts.tv_nsec *= 1000; 1029 1028 1030 1029 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); 1031 1030 }
+3 -4
arch/alpha/kernel/process.c
··· 236 236 } 237 237 238 238 /* 239 - * Copy an alpha thread.. 239 + * Copy architecture-specific thread state 240 240 */ 241 - 242 241 int 243 242 copy_thread(unsigned long clone_flags, unsigned long usp, 244 - unsigned long arg, 243 + unsigned long kthread_arg, 245 244 struct task_struct *p) 246 245 { 247 246 extern void ret_from_fork(void); ··· 261 262 sizeof(struct switch_stack) + sizeof(struct pt_regs)); 262 263 childstack->r26 = (unsigned long) ret_from_kernel_thread; 263 264 childstack->r9 = usp; /* function */ 264 - childstack->r10 = arg; 265 + childstack->r10 = kthread_arg; 265 266 childregs->hae = alpha_mv.hae_cache, 266 267 childti->pcb.usp = 0; 267 268 return 0;
+1 -7
arch/alpha/kernel/smp.c
··· 63 63 enum ipi_message_type { 64 64 IPI_RESCHEDULE, 65 65 IPI_CALL_FUNC, 66 - IPI_CALL_FUNC_SINGLE, 67 66 IPI_CPU_STOP, 68 67 }; 69 68 ··· 505 506 return -EINVAL; 506 507 } 507 508 508 - 509 509 static void 510 510 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) 511 511 { ··· 548 550 549 551 case IPI_CALL_FUNC: 550 552 generic_smp_call_function_interrupt(); 551 - break; 552 - 553 - case IPI_CALL_FUNC_SINGLE: 554 - generic_smp_call_function_single_interrupt(); 555 553 break; 556 554 557 555 case IPI_CPU_STOP: ··· 600 606 601 607 void arch_send_call_function_single_ipi(int cpu) 602 608 { 603 - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 609 + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); 604 610 } 605 611 606 612 static void
+1 -2
arch/alpha/kernel/srmcons.c
··· 237 237 238 238 return -ENODEV; 239 239 } 240 - 241 - module_init(srmcons_init); 240 + device_initcall(srmcons_init); 242 241 243 242 244 243 /*
+1 -1
arch/alpha/kernel/sys_marvel.c
··· 331 331 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); 332 332 irq = intline; 333 333 334 - msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI); 334 + msi_loc = dev->msi_cap; 335 335 msg_ctl = 0; 336 336 if (msi_loc) 337 337 pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
+3
arch/alpha/kernel/systbls.S
··· 529 529 .quad sys_sched_setattr 530 530 .quad sys_sched_getattr 531 531 .quad sys_renameat2 /* 510 */ 532 + .quad sys_getrandom 533 + .quad sys_memfd_create 534 + .quad sys_execveat 532 535 533 536 .size sys_call_table, . - sys_call_table 534 537 .type sys_call_table, @object
-1
arch/alpha/kernel/traps.c
··· 14 14 #include <linux/tty.h> 15 15 #include <linux/delay.h> 16 16 #include <linux/module.h> 17 - #include <linux/init.h> 18 17 #include <linux/kallsyms.h> 19 18 #include <linux/ratelimit.h> 20 19
-1
arch/alpha/oprofile/op_model_ev4.c
··· 8 8 */ 9 9 10 10 #include <linux/oprofile.h> 11 - #include <linux/init.h> 12 11 #include <linux/smp.h> 13 12 #include <asm/ptrace.h> 14 13
-1
arch/alpha/oprofile/op_model_ev5.c
··· 8 8 */ 9 9 10 10 #include <linux/oprofile.h> 11 - #include <linux/init.h> 12 11 #include <linux/smp.h> 13 12 #include <asm/ptrace.h> 14 13
-1
arch/alpha/oprofile/op_model_ev6.c
··· 8 8 */ 9 9 10 10 #include <linux/oprofile.h> 11 - #include <linux/init.h> 12 11 #include <linux/smp.h> 13 12 #include <asm/ptrace.h> 14 13
-1
arch/alpha/oprofile/op_model_ev67.c
··· 9 9 */ 10 10 11 11 #include <linux/oprofile.h> 12 - #include <linux/init.h> 13 12 #include <linux/smp.h> 14 13 #include <asm/ptrace.h> 15 14
+1 -1
arch/arm/boot/dts/Makefile
··· 223 223 imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \ 224 224 imx25-karo-tx25.dtb \ 225 225 imx25-pdk.dtb 226 - dtb-$(CONFIG_SOC_IMX31) += \ 226 + dtb-$(CONFIG_SOC_IMX27) += \ 227 227 imx27-apf27.dtb \ 228 228 imx27-apf27dev.dtb \ 229 229 imx27-eukrea-mbimxsd27-baseboard.dtb \
-4
arch/arm/boot/dts/am335x-boneblack.dts
··· 80 80 status = "okay"; 81 81 }; 82 82 }; 83 - 84 - &rtc { 85 - system-power-controller; 86 - };
+1 -1
arch/arm/boot/dts/am335x-evmsk.dts
··· 654 654 wlcore: wlcore@2 { 655 655 compatible = "ti,wl1271"; 656 656 reg = <2>; 657 - interrupt-parent = <&gpio1>; 657 + interrupt-parent = <&gpio0>; 658 658 interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */ 659 659 ref-clock-frequency = <38400000>; 660 660 };
+1 -1
arch/arm/boot/dts/exynos4412-trats2.dts
··· 736 736 737 737 display-timings { 738 738 timing-0 { 739 - clock-frequency = <0>; 739 + clock-frequency = <57153600>; 740 740 hactive = <720>; 741 741 vactive = <1280>; 742 742 hfront-porch = <5>;
+1 -1
arch/arm/boot/dts/imx27.dtsi
··· 533 533 534 534 fec: ethernet@1002b000 { 535 535 compatible = "fsl,imx27-fec"; 536 - reg = <0x1002b000 0x4000>; 536 + reg = <0x1002b000 0x1000>; 537 537 interrupts = <50>; 538 538 clocks = <&clks IMX27_CLK_FEC_IPG_GATE>, 539 539 <&clks IMX27_CLK_FEC_AHB_GATE>;
+2
arch/arm/boot/dts/omap3-devkit8000.dts
··· 110 110 nand@0,0 { 111 111 reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ 112 112 nand-bus-width = <16>; 113 + gpmc,device-width = <2>; 114 + ti,nand-ecc-opt = "sw"; 113 115 114 116 gpmc,sync-clk-ps = <0>; 115 117 gpmc,cs-on-ns = <0>;
+2 -2
arch/arm/boot/dts/zynq-7000.dtsi
··· 193 193 }; 194 194 195 195 gem0: ethernet@e000b000 { 196 - compatible = "cdns,gem"; 196 + compatible = "cdns,zynq-gem"; 197 197 reg = <0xe000b000 0x1000>; 198 198 status = "disabled"; 199 199 interrupts = <0 22 4>; ··· 204 204 }; 205 205 206 206 gem1: ethernet@e000c000 { 207 - compatible = "cdns,gem"; 207 + compatible = "cdns,zynq-gem"; 208 208 reg = <0xe000c000 0x1000>; 209 209 status = "disabled"; 210 210 interrupts = <0 45 4>;
+1 -1
arch/arm/configs/multi_v7_defconfig
··· 429 429 CONFIG_USB_EHCI_TEGRA=y 430 430 CONFIG_USB_EHCI_HCD_STI=y 431 431 CONFIG_USB_EHCI_HCD_PLATFORM=y 432 - CONFIG_USB_ISP1760_HCD=y 432 + CONFIG_USB_ISP1760=y 433 433 CONFIG_USB_OHCI_HCD=y 434 434 CONFIG_USB_OHCI_HCD_STI=y 435 435 CONFIG_USB_OHCI_HCD_PLATFORM=y
+3 -1
arch/arm/kernel/entry-common.S
··· 33 33 UNWIND(.fnstart ) 34 34 UNWIND(.cantunwind ) 35 35 disable_irq @ disable interrupts 36 - ldr r1, [tsk, #TI_FLAGS] 36 + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 37 + tst r1, #_TIF_SYSCALL_WORK 38 + bne __sys_trace_return 37 39 tst r1, #_TIF_WORK_MASK 38 40 bne fast_work_pending 39 41 asm_trace_hardirqs_on
+5 -4
arch/arm/kernel/perf_event_cpu.c
··· 304 304 static int of_pmu_irq_cfg(struct platform_device *pdev) 305 305 { 306 306 int i, irq; 307 - int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); 308 - 309 - if (!irqs) 310 - return -ENOMEM; 307 + int *irqs; 311 308 312 309 /* Don't bother with PPIs; they're already affine */ 313 310 irq = platform_get_irq(pdev, 0); 314 311 if (irq >= 0 && irq_is_percpu(irq)) 315 312 return 0; 313 + 314 + irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); 315 + if (!irqs) 316 + return -ENOMEM; 316 317 317 318 for (i = 0; i < pdev->num_resources; ++i) { 318 319 struct device_node *dn;
+13 -3
arch/arm/mach-imx/gpc.c
··· 280 280 struct device_node *np; 281 281 282 282 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); 283 - if (WARN_ON(!np || 284 - !of_find_property(np, "interrupt-controller", NULL))) 285 - pr_warn("Outdated DT detected, system is about to crash!!!\n"); 283 + if (WARN_ON(!np)) 284 + return; 285 + 286 + if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { 287 + pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); 288 + 289 + /* map GPC, so that at least CPUidle and WARs keep working */ 290 + gpc_base = of_iomap(np, 0); 291 + } 286 292 } 287 293 288 294 #ifdef CONFIG_PM_GENERIC_DOMAINS ··· 448 442 { 449 443 struct regulator *pu_reg; 450 444 int ret; 445 + 446 + /* bail out if DT too old and doesn't provide the necessary info */ 447 + if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells")) 448 + return 0; 451 449 452 450 pu_reg = devm_regulator_get_optional(&pdev->dev, "pu"); 453 451 if (PTR_ERR(pu_reg) == -ENODEV)
+1 -1
arch/arm/mach-pxa/pxa_cplds_irqs.c
··· 107 107 struct resource *res; 108 108 struct cplds *fpga; 109 109 int ret; 110 - unsigned int base_irq = 0; 110 + int base_irq; 111 111 unsigned long irqflags = 0; 112 112 113 113 fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
+10 -10
arch/arm/mm/mmu.c
··· 1112 1112 } 1113 1113 1114 1114 /* 1115 - * Find the first non-section-aligned page, and point 1115 + * Find the first non-pmd-aligned page, and point 1116 1116 * memblock_limit at it. This relies on rounding the 1117 - * limit down to be section-aligned, which happens at 1118 - * the end of this function. 1117 + * limit down to be pmd-aligned, which happens at the 1118 + * end of this function. 1119 1119 * 1120 1120 * With this algorithm, the start or end of almost any 1121 - * bank can be non-section-aligned. The only exception 1122 - * is that the start of the bank 0 must be section- 1121 + * bank can be non-pmd-aligned. The only exception is 1122 + * that the start of the bank 0 must be section- 1123 1123 * aligned, since otherwise memory would need to be 1124 1124 * allocated when mapping the start of bank 0, which 1125 1125 * occurs before any free memory is mapped. 1126 1126 */ 1127 1127 if (!memblock_limit) { 1128 - if (!IS_ALIGNED(block_start, SECTION_SIZE)) 1128 + if (!IS_ALIGNED(block_start, PMD_SIZE)) 1129 1129 memblock_limit = block_start; 1130 - else if (!IS_ALIGNED(block_end, SECTION_SIZE)) 1130 + else if (!IS_ALIGNED(block_end, PMD_SIZE)) 1131 1131 memblock_limit = arm_lowmem_limit; 1132 1132 } 1133 1133 ··· 1137 1137 high_memory = __va(arm_lowmem_limit - 1) + 1; 1138 1138 1139 1139 /* 1140 - * Round the memblock limit down to a section size. This 1140 + * Round the memblock limit down to a pmd size. This 1141 1141 * helps to ensure that we will allocate memory from the 1142 - * last full section, which should be mapped. 1142 + * last full pmd, which should be mapped. 1143 1143 */ 1144 1144 if (memblock_limit) 1145 - memblock_limit = round_down(memblock_limit, SECTION_SIZE); 1145 + memblock_limit = round_down(memblock_limit, PMD_SIZE); 1146 1146 if (!memblock_limit) 1147 1147 memblock_limit = arm_lowmem_limit; 1148 1148
+1
arch/arm/xen/enlighten.c
··· 272 272 void xen_arch_post_suspend(int suspend_cancelled) { } 273 273 void xen_timer_resume(void) { } 274 274 void xen_arch_resume(void) { } 275 + void xen_arch_suspend(void) { } 275 276 276 277 277 278 /* In the hypervisor.S file. */
+10 -3
arch/ia64/pci/pci.c
··· 478 478 479 479 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 480 480 { 481 - struct pci_controller *controller = bridge->bus->sysdata; 482 - 483 - ACPI_COMPANION_SET(&bridge->dev, controller->companion); 481 + /* 482 + * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL 483 + * here, pci_create_root_bus() has been called by someone else and 484 + * sysdata is likely to be different from what we expect. Let it go in 485 + * that case. 486 + */ 487 + if (!bridge->dev.parent) { 488 + struct pci_controller *controller = bridge->bus->sysdata; 489 + ACPI_COMPANION_SET(&bridge->dev, controller->companion); 490 + } 484 491 return 0; 485 492 } 486 493
+3
arch/mips/ath79/prom.c
··· 1 1 /* 2 2 * Atheros AR71XX/AR724X/AR913X specific prom routines 3 3 * 4 + * Copyright (C) 2015 Laurent Fasnacht <l@libres.ch> 4 5 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> 5 6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 6 7 * ··· 26 25 { 27 26 fw_init_cmdline(); 28 27 28 + #ifdef CONFIG_BLK_DEV_INITRD 29 29 /* Read the initrd address from the firmware environment */ 30 30 initrd_start = fw_getenvl("initrd_start"); 31 31 if (initrd_start) { 32 32 initrd_start = KSEG0ADDR(initrd_start); 33 33 initrd_end = initrd_start + fw_getenvl("initrd_size"); 34 34 } 35 + #endif 35 36 } 36 37 37 38 void __init prom_free_prom_memory(void)
+1 -1
arch/mips/configs/fuloong2e_defconfig
··· 194 194 CONFIG_USB_C67X00_HCD=m 195 195 CONFIG_USB_EHCI_HCD=y 196 196 CONFIG_USB_EHCI_ROOT_HUB_TT=y 197 - CONFIG_USB_ISP1760_HCD=m 197 + CONFIG_USB_ISP1760=m 198 198 CONFIG_USB_OHCI_HCD=y 199 199 CONFIG_USB_UHCI_HCD=m 200 200 CONFIG_USB_R8A66597_HCD=m
+1 -1
arch/mips/kernel/irq.c
··· 29 29 int kgdb_early_setup; 30 30 #endif 31 31 32 - static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; 32 + static DECLARE_BITMAP(irq_map, NR_IRQS); 33 33 34 34 int allocate_irqno(void) 35 35 {
+1 -1
arch/mips/kernel/smp-bmips.c
··· 444 444 static void bmips_wr_vec(unsigned long dst, char *start, char *end) 445 445 { 446 446 memcpy((void *)dst, start, end - start); 447 - dma_cache_wback((unsigned long)start, end - start); 447 + dma_cache_wback(dst, end - start); 448 448 local_flush_icache_range(dst, dst + (end - start)); 449 449 instruction_hazard(); 450 450 }
+13 -2
arch/mips/lib/strnlen_user.S
··· 34 34 FEXPORT(__strnlen_\func\()_nocheck_asm) 35 35 move v0, a0 36 36 PTR_ADDU a1, a0 # stop pointer 37 - 1: beq v0, a1, 1f # limit reached? 37 + 1: 38 + #ifdef CONFIG_CPU_DADDI_WORKAROUNDS 39 + .set noat 40 + li AT, 1 41 + #endif 42 + beq v0, a1, 1f # limit reached? 38 43 .ifeqs "\func", "kernel" 39 44 EX(lb, t0, (v0), .Lfault\@) 40 45 .else ··· 47 42 .endif 48 43 .set noreorder 49 44 bnez t0, 1b 50 - 1: PTR_ADDIU v0, 1 45 + 1: 46 + #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 47 + PTR_ADDIU v0, 1 48 + #else 49 + PTR_ADDU v0, AT 50 + .set at 51 + #endif 51 52 .set reorder 52 53 PTR_SUBU v0, a0 53 54 jr ra
+2 -2
arch/powerpc/kernel/mce.c
··· 73 73 uint64_t nip, uint64_t addr) 74 74 { 75 75 uint64_t srr1; 76 - int index = __this_cpu_inc_return(mce_nest_count); 76 + int index = __this_cpu_inc_return(mce_nest_count) - 1; 77 77 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); 78 78 79 79 /* ··· 184 184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 185 185 return; 186 186 187 - index = __this_cpu_inc_return(mce_queue_count); 187 + index = __this_cpu_inc_return(mce_queue_count) - 1; 188 188 /* If queue is full, just return for now. */ 189 189 if (index >= MAX_MC_EVT) { 190 190 __this_cpu_dec(mce_queue_count);
+1
arch/powerpc/kernel/vmlinux.lds.S
··· 213 213 *(.opd) 214 214 } 215 215 216 + . = ALIGN(256); 216 217 .got : AT(ADDR(.got) - LOAD_OFFSET) { 217 218 __toc_start = .; 218 219 #ifndef CONFIG_RELOCATABLE
+3 -2
arch/powerpc/kvm/book3s_hv.c
··· 1952 1952 */ 1953 1953 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) 1954 1954 { 1955 - struct kvm_vcpu *vcpu; 1955 + struct kvm_vcpu *vcpu, *vnext; 1956 1956 int i; 1957 1957 int srcu_idx; 1958 1958 ··· 1982 1982 */ 1983 1983 if ((threads_per_core > 1) && 1984 1984 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { 1985 - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 1985 + list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, 1986 + arch.run_list) { 1986 1987 vcpu->arch.ret = -EBUSY; 1987 1988 kvmppc_remove_runnable(vc, vcpu); 1988 1989 wake_up(&vcpu->arch.cpu_run);
+16 -9
arch/powerpc/mm/hugetlbpage.c
··· 689 689 struct page * 690 690 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 691 691 { 692 - pte_t *ptep; 693 - struct page *page; 692 + pte_t *ptep, pte; 694 693 unsigned shift; 695 694 unsigned long mask, flags; 695 + struct page *page = ERR_PTR(-EINVAL); 696 + 697 + local_irq_save(flags); 698 + ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); 699 + if (!ptep) 700 + goto no_page; 701 + pte = READ_ONCE(*ptep); 696 702 /* 703 + * Verify it is a huge page else bail. 697 704 * Transparent hugepages are handled by generic code. We can skip them 698 705 * here. 699 706 */ 700 - local_irq_save(flags); 701 - ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); 707 + if (!shift || pmd_trans_huge(__pmd(pte_val(pte)))) 708 + goto no_page; 702 709 703 - /* Verify it is a huge page else bail. */ 704 - if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) { 705 - local_irq_restore(flags); 706 - return ERR_PTR(-EINVAL); 710 + if (!pte_present(pte)) { 711 + page = NULL; 712 + goto no_page; 707 713 } 708 714 mask = (1UL << shift) - 1; 709 - page = pte_page(*ptep); 715 + page = pte_page(pte); 710 716 if (page) 711 717 page += (address & mask) / PAGE_SIZE; 712 718 719 + no_page: 713 720 local_irq_restore(flags); 714 721 return page; 715 722 }
+11
arch/powerpc/mm/pgtable_64.c
··· 839 839 * hash fault look at them. 840 840 */ 841 841 memset(pgtable, 0, PTE_FRAG_SIZE); 842 + /* 843 + * Serialize against find_linux_pte_or_hugepte which does lock-less 844 + * lookup in page tables with local interrupts disabled. For huge pages 845 + * it casts pmd_t to pte_t. Since format of pte_t is different from 846 + * pmd_t we want to prevent transit from pmd pointing to page table 847 + * to pmd pointing to huge page (and back) while interrupts are disabled. 848 + * We clear pmd to possibly replace it with page table pointer in 849 + * different code paths. So make sure we wait for the parallel 850 + * find_linux_pte_or_hugepage to finish. 851 + */ 852 + kick_all_cpus_sync(); 842 853 return old_pmd; 843 854 } 844 855
+13 -12
arch/s390/crypto/ghash_s390.c
··· 16 16 #define GHASH_DIGEST_SIZE 16 17 17 18 18 struct ghash_ctx { 19 - u8 icv[16]; 20 - u8 key[16]; 19 + u8 key[GHASH_BLOCK_SIZE]; 21 20 }; 22 21 23 22 struct ghash_desc_ctx { 23 + u8 icv[GHASH_BLOCK_SIZE]; 24 + u8 key[GHASH_BLOCK_SIZE]; 24 25 u8 buffer[GHASH_BLOCK_SIZE]; 25 26 u32 bytes; 26 27 }; ··· 29 28 static int ghash_init(struct shash_desc *desc) 30 29 { 31 30 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); 31 + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); 32 32 33 33 memset(dctx, 0, sizeof(*dctx)); 34 + memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE); 34 35 35 36 return 0; 36 37 } ··· 48 45 } 49 46 50 47 memcpy(ctx->key, key, GHASH_BLOCK_SIZE); 51 - memset(ctx->icv, 0, GHASH_BLOCK_SIZE); 52 48 53 49 return 0; 54 50 } ··· 56 54 const u8 *src, unsigned int srclen) 57 55 { 58 56 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); 59 - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); 60 57 unsigned int n; 61 58 u8 *buf = dctx->buffer; 62 59 int ret; ··· 71 70 src += n; 72 71 73 72 if (!dctx->bytes) { 74 - ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, 73 + ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, 75 74 GHASH_BLOCK_SIZE); 76 75 if (ret != GHASH_BLOCK_SIZE) 77 76 return -EIO; ··· 80 79 81 80 n = srclen & ~(GHASH_BLOCK_SIZE - 1); 82 81 if (n) { 83 - ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n); 82 + ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n); 84 83 if (ret != n) 85 84 return -EIO; 86 85 src += n; ··· 95 94 return 0; 96 95 } 97 96 98 - static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) 97 + static int ghash_flush(struct ghash_desc_ctx *dctx) 99 98 { 100 99 u8 *buf = dctx->buffer; 101 100 int ret; ··· 105 104 106 105 memset(pos, 0, dctx->bytes); 107 106 108 - ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); 107 + ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE); 109 108 if (ret != GHASH_BLOCK_SIZE) 110 109 return -EIO; 110 + 111 + dctx->bytes = 0; 111 112 } 112 113 113 - dctx->bytes = 0; 114 114 return 0; 115 115 } 116 116 117 117 static int ghash_final(struct shash_desc *desc, u8 *dst) 118 118 { 119 119 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); 120 - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); 121 120 int ret; 122 121 123 - ret = ghash_flush(ctx, dctx); 122 + ret = ghash_flush(dctx); 124 123 if (!ret) 125 - memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE); 124 + memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE); 126 125 return ret; 127 126 } 128 127
+1 -1
arch/s390/crypto/prng.c
··· 125 125 /* fill page with urandom bytes */ 126 126 get_random_bytes(pg, PAGE_SIZE); 127 127 /* exor page with stckf values */ 128 - for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) { 128 + for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) { 129 129 u64 *p = ((u64 *)pg) + n; 130 130 *p ^= get_tod_clock_fast(); 131 131 }
+1 -1
arch/s390/include/asm/pgtable.h
··· 494 494 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 495 495 } 496 496 497 - static inline int pmd_pfn(pmd_t pmd) 497 + static inline unsigned long pmd_pfn(pmd_t pmd) 498 498 { 499 499 unsigned long origin_mask; 500 500
+10 -9
arch/s390/net/bpf_jit_comp.c
··· 443 443 444 444 /* 445 445 * Compile one eBPF instruction into s390x code 446 + * 447 + * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of 448 + * stack space for the large switch statement. 446 449 */ 447 - static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i) 450 + static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i) 448 451 { 449 452 struct bpf_insn *insn = &fp->insnsi[i]; 450 453 int jmp_off, last, insn_count = 1; ··· 591 588 EMIT4(0xb9160000, dst_reg, rc_reg); 592 589 break; 593 590 } 594 - case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */ 595 - case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */ 591 + case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */ 592 + case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */ 596 593 { 597 594 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 598 595 ··· 605 602 EMIT4_IMM(0xa7090000, REG_W0, 0); 606 603 /* lgr %w1,%dst */ 607 604 EMIT4(0xb9040000, REG_W1, dst_reg); 608 - /* llgfr %dst,%src (u32 cast) */ 609 - EMIT4(0xb9160000, dst_reg, src_reg); 610 605 /* dlgr %w0,%dst */ 611 - EMIT4(0xb9870000, REG_W0, dst_reg); 606 + EMIT4(0xb9870000, REG_W0, src_reg); 612 607 /* lgr %dst,%rc */ 613 608 EMIT4(0xb9040000, dst_reg, rc_reg); 614 609 break; ··· 633 632 EMIT4(0xb9160000, dst_reg, rc_reg); 634 633 break; 635 634 } 636 - case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */ 637 - case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */ 635 + case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */ 636 + case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */ 638 637 { 639 638 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 640 639 ··· 650 649 EMIT4(0xb9040000, REG_W1, dst_reg); 651 650 /* dlg %w0,<d(imm)>(%l) */ 652 651 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L, 653 - EMIT_CONST_U64((u32) imm)); 652 + EMIT_CONST_U64(imm)); 654 653 /* lgr %dst,%rc */ 655 654 EMIT4(0xb9040000, dst_reg, rc_reg); 656 655 break;
+2 -1
arch/sparc/include/asm/cpudata_64.h
··· 24 24 unsigned int icache_line_size; 25 25 unsigned int ecache_size; 26 26 unsigned int ecache_line_size; 27 - int core_id; 27 + unsigned short sock_id; 28 + unsigned short core_id; 28 29 int proc_id; 29 30 } cpuinfo_sparc; 30 31
+21 -1
arch/sparc/include/asm/pgtable_64.h
··· 308 308 " sllx %1, 32, %1\n" 309 309 " or %0, %1, %0\n" 310 310 " .previous\n" 311 + " .section .sun_m7_2insn_patch, \"ax\"\n" 312 + " .word 661b\n" 313 + " sethi %%uhi(%4), %1\n" 314 + " sethi %%hi(%4), %0\n" 315 + " .word 662b\n" 316 + " or %1, %%ulo(%4), %1\n" 317 + " or %0, %%lo(%4), %0\n" 318 + " .word 663b\n" 319 + " sllx %1, 32, %1\n" 320 + " or %0, %1, %0\n" 321 + " .previous\n" 311 322 : "=r" (mask), "=r" (tmp) 312 323 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | 313 324 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | 314 325 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), 315 326 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | 316 327 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | 328 + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V), 329 + "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | 330 + _PAGE_CP_4V | _PAGE_E_4V | 317 331 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); 318 332 319 333 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); ··· 356 342 " andn %0, %4, %0\n" 357 343 " or %0, %5, %0\n" 358 344 " .previous\n" 345 + " .section .sun_m7_2insn_patch, \"ax\"\n" 346 + " .word 661b\n" 347 + " andn %0, %6, %0\n" 348 + " or %0, %5, %0\n" 349 + " .previous\n" 359 350 : "=r" (val) 360 351 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), 361 - "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); 352 + "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V), 353 + "i" (_PAGE_CP_4V)); 362 354 363 355 return __pgprot(val); 364 356 }
+2 -1
arch/sparc/include/asm/topology_64.h
··· 40 40 #ifdef CONFIG_SMP 41 41 #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 42 42 #define topology_core_id(cpu) (cpu_data(cpu).core_id) 43 - #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 43 + #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) 44 44 #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 45 45 #endif /* CONFIG_SMP */ 46 46 47 47 extern cpumask_t cpu_core_map[NR_CPUS]; 48 + extern cpumask_t cpu_core_sib_map[NR_CPUS]; 48 49 static inline const struct cpumask *cpu_coregroup_mask(int cpu) 49 50 { 50 51 return &cpu_core_map[cpu];
+2
arch/sparc/include/asm/trap_block.h
··· 79 79 }; 80 80 extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, 81 81 __sun4v_2insn_patch_end; 82 + extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch, 83 + __sun_m7_2insn_patch_end; 82 84 83 85 84 86 #endif /* !(__ASSEMBLY__) */
+2
arch/sparc/kernel/entry.h
··· 69 69 struct sun4v_1insn_patch_entry *); 70 70 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, 71 71 struct sun4v_2insn_patch_entry *); 72 + void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *, 73 + struct sun4v_2insn_patch_entry *); 72 74 extern unsigned int dcache_parity_tl1_occurred; 73 75 extern unsigned int icache_parity_tl1_occurred; 74 76
-1
arch/sparc/kernel/leon_pci_grpci2.c
··· 723 723 err = -ENOMEM; 724 724 goto err1; 725 725 } 726 - memset(grpci2priv, 0, sizeof(*grpci2priv)); 727 726 priv->regs = regs; 728 727 priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ 729 728 priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
+111 -27
arch/sparc/kernel/mdesc.c
··· 614 614 } 615 615 } 616 616 617 - static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) 617 + static void find_back_node_value(struct mdesc_handle *hp, u64 node, 618 + char *srch_val, 619 + void (*func)(struct mdesc_handle *, u64, int), 620 + u64 val, int depth) 618 621 { 619 - u64 a; 622 + u64 arc; 620 623 621 - mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { 622 - u64 t = mdesc_arc_target(hp, a); 623 - const char *name; 624 - const u64 *id; 624 + /* Since we have an estimate of recursion depth, do a sanity check. */ 625 + if (depth == 0) 626 + return; 625 627 626 - name = mdesc_node_name(hp, t); 627 - if (!strcmp(name, "cpu")) { 628 - id = mdesc_get_property(hp, t, "id", NULL); 629 - if (*id < NR_CPUS) 630 - cpu_data(*id).core_id = core_id; 631 - } else { 632 - u64 j; 628 + mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) { 629 + u64 n = mdesc_arc_target(hp, arc); 630 + const char *name = mdesc_node_name(hp, n); 633 631 634 - mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { 635 - u64 n = mdesc_arc_target(hp, j); 636 - const char *n_name; 632 + if (!strcmp(srch_val, name)) 633 + (*func)(hp, n, val); 637 634 638 - n_name = mdesc_node_name(hp, n); 639 - if (strcmp(n_name, "cpu")) 640 - continue; 641 - 642 - id = mdesc_get_property(hp, n, "id", NULL); 643 - if (*id < NR_CPUS) 644 - cpu_data(*id).core_id = core_id; 645 - } 646 - } 635 + find_back_node_value(hp, n, srch_val, func, val, depth-1); 647 636 } 637 + } 638 + 639 + static void __mark_core_id(struct mdesc_handle *hp, u64 node, 640 + int core_id) 641 + { 642 + const u64 *id = mdesc_get_property(hp, node, "id", NULL); 643 + 644 + if (*id < num_possible_cpus()) 645 + cpu_data(*id).core_id = core_id; 646 + } 647 + 648 + static void __mark_sock_id(struct mdesc_handle *hp, u64 node, 649 + int sock_id) 650 + { 651 + const u64 *id = mdesc_get_property(hp, node, "id", NULL); 652 + 653 + if (*id < num_possible_cpus()) 654 + cpu_data(*id).sock_id = sock_id; 655 + } 656 + 657 + static void mark_core_ids(struct mdesc_handle *hp, u64 mp, 658 + int core_id) 659 + { 660 + find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); 661 + } 662 + 663 + static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, 664 + int sock_id) 665 + { 666 + find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); 648 667 } 649 668 650 669 static void set_core_ids(struct mdesc_handle *hp) ··· 672 653 u64 mp; 673 654 674 655 idx = 1; 656 + 657 + /* Identify unique cores by looking for cpus backpointed to by 658 + * level 1 instruction caches. 659 + */ 675 660 mdesc_for_each_node_by_name(hp, mp, "cache") { 676 661 const u64 *level; 677 662 const char *type; ··· 690 667 continue; 691 668 692 669 mark_core_ids(hp, mp, idx); 693 - 694 670 idx++; 695 671 } 672 + } 673 + 674 + static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) 675 + { 676 + u64 mp; 677 + int idx = 1; 678 + int fnd = 0; 679 + 680 + /* Identify unique sockets by looking for cpus backpointed to by 681 + * shared level n caches. 682 + */ 683 + mdesc_for_each_node_by_name(hp, mp, "cache") { 684 + const u64 *cur_lvl; 685 + 686 + cur_lvl = mdesc_get_property(hp, mp, "level", NULL); 687 + if (*cur_lvl != level) 688 + continue; 689 + 690 + mark_sock_ids(hp, mp, idx); 691 + idx++; 692 + fnd = 1; 693 + } 694 + return fnd; 695 + } 696 + 697 + static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp) 698 + { 699 + int idx = 1; 700 + 701 + mdesc_for_each_node_by_name(hp, mp, "socket") { 702 + u64 a; 703 + 704 + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { 705 + u64 t = mdesc_arc_target(hp, a); 706 + const char *name; 707 + const u64 *id; 708 + 709 + name = mdesc_node_name(hp, t); 710 + if (strcmp(name, "cpu")) 711 + continue; 712 + 713 + id = mdesc_get_property(hp, t, "id", NULL); 714 + if (*id < num_possible_cpus()) 715 + cpu_data(*id).sock_id = idx; 716 + } 717 + idx++; 718 + } 719 + } 720 + 721 + static void set_sock_ids(struct mdesc_handle *hp) 722 + { 723 + u64 mp; 724 + 725 + /* If machine description exposes sockets data use it. 726 + * Otherwise fallback to use shared L3 or L2 caches. 727 + */ 728 + mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); 729 + if (mp != MDESC_NODE_NULL) 730 + return set_sock_ids_by_socket(hp, mp); 731 + 732 + if (!set_sock_ids_by_cache(hp, 3)) 733 + set_sock_ids_by_cache(hp, 2); 696 734 } 697 735 698 736 static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) ··· 791 707 continue; 792 708 793 709 mark_proc_ids(hp, mp, idx); 794 - 795 710 idx++; 796 711 } 797 712 } ··· 983 900 984 901 set_core_ids(hp); 985 902 set_proc_ids(hp); 903 + set_sock_ids(hp); 986 904 987 905 mdesc_release(hp); 988 906
+51 -8
arch/sparc/kernel/pci.c
··· 1002 1002 subsys_initcall(pcibios_init); 1003 1003 1004 1004 #ifdef CONFIG_SYSFS 1005 + 1006 + #define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */ 1007 + 1008 + static void pcie_bus_slot_names(struct pci_bus *pbus) 1009 + { 1010 + struct pci_dev *pdev; 1011 + struct pci_bus *bus; 1012 + 1013 + list_for_each_entry(pdev, &pbus->devices, bus_list) { 1014 + char name[SLOT_NAME_SIZE]; 1015 + struct pci_slot *pci_slot; 1016 + const u32 *slot_num; 1017 + int len; 1018 + 1019 + slot_num = of_get_property(pdev->dev.of_node, 1020 + "physical-slot#", &len); 1021 + 1022 + if (slot_num == NULL || len != 4) 1023 + continue; 1024 + 1025 + snprintf(name, sizeof(name), "%u", slot_num[0]); 1026 + pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL); 1027 + 1028 + if (IS_ERR(pci_slot)) 1029 + pr_err("PCI: pci_create_slot returned %ld.\n", 1030 + PTR_ERR(pci_slot)); 1031 + } 1032 + 1033 + list_for_each_entry(bus, &pbus->children, node) 1034 + pcie_bus_slot_names(bus); 1035 + } 1036 + 1005 1037 static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) 1006 1038 { 1007 1039 const struct pci_slot_names { ··· 1085 1053 1086 1054 while ((pbus = pci_find_next_bus(pbus)) != NULL) { 1087 1055 struct device_node *node; 1056 + struct pci_dev *pdev; 1088 1057 1089 - if (pbus->self) { 1090 - /* PCI->PCI bridge */ 1091 - node = pbus->self->dev.of_node; 1058 + pdev = list_first_entry(&pbus->devices, struct pci_dev, 1059 + bus_list); 1060 + 1061 + if (pdev && pci_is_pcie(pdev)) { 1062 + pcie_bus_slot_names(pbus); 1092 1063 } else { 1093 - struct pci_pbm_info *pbm = pbus->sysdata; 1094 1064 1095 - /* Host PCI controller */ 1096 - node = pbm->op->dev.of_node; 1065 + if (pbus->self) { 1066 + 1067 + /* PCI->PCI bridge */ 1068 + node = pbus->self->dev.of_node; 1069 + 1070 + } else { 1071 + struct pci_pbm_info *pbm = pbus->sysdata; 1072 + 1073 + /* Host PCI controller */ 1074 + node = pbm->op->dev.of_node; 1075 + } 1076 + 1077 + pci_bus_slot_names(node, pbus); 1097 1078 } 1098 - 1099 - pci_bus_slot_names(node, pbus); 1100 1079 } 1101 1080 1102 1081 return 0;
+21
arch/sparc/kernel/setup_64.c
··· 255 255 } 256 256 } 257 257 258 + void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start, 259 + struct sun4v_2insn_patch_entry *end) 260 + { 261 + while (start < end) { 262 + unsigned long addr = start->addr; 263 + 264 + *(unsigned int *) (addr + 0) = start->insns[0]; 265 + wmb(); 266 + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 267 + 268 + *(unsigned int *) (addr + 4) = start->insns[1]; 269 + wmb(); 270 + __asm__ __volatile__("flush %0" : : "r" (addr + 4)); 271 + 272 + start++; 273 + } 274 + } 275 + 258 276 static void __init sun4v_patch(void) 259 277 { 260 278 extern void sun4v_hvapi_init(void); ··· 285 267 286 268 sun4v_patch_2insn_range(&__sun4v_2insn_patch, 287 269 &__sun4v_2insn_patch_end); 270 + if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7) 271 + sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, 272 + &__sun_m7_2insn_patch_end); 288 273 289 274 sun4v_hvapi_init(); 290 275 }
+13
arch/sparc/kernel/smp_64.c
··· 60 60 cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 61 61 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 62 62 63 + cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { 64 + [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 65 + 63 66 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 64 67 EXPORT_SYMBOL(cpu_core_map); 68 + EXPORT_SYMBOL(cpu_core_sib_map); 65 69 66 70 static cpumask_t smp_commenced_mask; 67 71 ··· 1244 1240 if (cpu_data(i).core_id == 1245 1241 cpu_data(j).core_id) 1246 1242 cpumask_set_cpu(j, &cpu_core_map[i]); 1243 + } 1244 + } 1245 + 1246 + for_each_present_cpu(i) { 1247 + unsigned int j; 1248 + 1249 + for_each_present_cpu(j) { 1250 + if (cpu_data(i).sock_id == cpu_data(j).sock_id) 1251 + cpumask_set_cpu(j, &cpu_core_sib_map[i]); 1247 1252 } 1248 1253 } 1249 1254
+5
arch/sparc/kernel/vmlinux.lds.S
··· 138 138 *(.pause_3insn_patch) 139 139 __pause_3insn_patch_end = .; 140 140 } 141 + .sun_m7_2insn_patch : { 142 + __sun_m7_2insn_patch = .; 143 + *(.sun_m7_2insn_patch) 144 + __sun_m7_2insn_patch_end = .; 145 + } 141 146 PERCPU_SECTION(SMP_CACHE_BYTES) 142 147 143 148 . = ALIGN(PAGE_SIZE);
+53 -21
arch/sparc/mm/init_64.c
··· 54 54 #include "init_64.h" 55 55 56 56 unsigned long kern_linear_pte_xor[4] __read_mostly; 57 + static unsigned long page_cache4v_flag; 57 58 58 59 /* A bitmap, two bits for every 256MB of physical memory. These two 59 60 * bits determine what page size we use for kernel linear ··· 1910 1909 1911 1910 static void __init sun4v_linear_pte_xor_finalize(void) 1912 1911 { 1912 + unsigned long pagecv_flag; 1913 + 1914 + /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead 1915 + * enables MCD error. Do not set bit 9 on M7 processor. 1916 + */ 1917 + switch (sun4v_chip_type) { 1918 + case SUN4V_CHIP_SPARC_M7: 1919 + pagecv_flag = 0x00; 1920 + break; 1921 + default: 1922 + pagecv_flag = _PAGE_CV_4V; 1923 + break; 1924 + } 1913 1925 #ifndef CONFIG_DEBUG_PAGEALLOC 1914 1926 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { 1915 1927 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 1916 1928 PAGE_OFFSET; 1917 - kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1929 + kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag | 1918 1930 _PAGE_P_4V | _PAGE_W_4V); 1919 1931 } else { 1920 1932 kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; ··· 1936 1922 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { 1937 1923 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ 1938 1924 PAGE_OFFSET; 1939 - kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1925 + kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag | 1940 1926 _PAGE_P_4V | _PAGE_W_4V); 1941 1927 } else { 1942 1928 kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; ··· 1945 1931 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { 1946 1932 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ 1947 1933 PAGE_OFFSET; 1948 - kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1934 + kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag | 1949 1935 _PAGE_P_4V | _PAGE_W_4V); 1950 1936 } else { 1951 1937 kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; ··· 1971 1957 1972 1958 return available; 1973 1959 } 1960 + 1961 + #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) 1962 + #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) 1963 + #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) 1964 + #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) 1965 + #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) 1966 + #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) 1974 1967 1975 1968 /* We need to exclude reserved regions. This exclusion will include 1976 1969 * vmlinux and initrd. To be more precise the initrd size could be used to ··· 2054 2033 #ifndef CONFIG_DEBUG_PAGEALLOC 2055 2034 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 2056 2035 #endif 2036 + 2037 + /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde 2038 + * bit on M7 processor. This is a conflicting usage of the same 2039 + * bit. Enabling TTE.cv on M7 would turn on Memory Corruption 2040 + * Detection error on all pages and this will lead to problems 2041 + * later. Kernel does not run with MCD enabled and hence rest 2042 + * of the required steps to fully configure memory corruption 2043 + * detection are not taken. We need to ensure TTE.mcde is not 2044 + * set on M7 processor. Compute the value of cacheability 2045 + * flag for use later taking this into consideration. 2046 + */ 2047 + switch (sun4v_chip_type) { 2048 + case SUN4V_CHIP_SPARC_M7: 2049 + page_cache4v_flag = _PAGE_CP_4V; 2050 + break; 2051 + default: 2052 + page_cache4v_flag = _PAGE_CACHE_4V; 2053 + break; 2054 + } 2057 2055 2058 2056 if (tlb_type == hypervisor) 2059 2057 sun4v_pgprot_init(); ··· 2314 2274 } 2315 2275 #endif 2316 2276 2317 - #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) 2318 - #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) 2319 - #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) 2320 - #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) 2321 - #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) 2322 - #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) 2323 - 2324 2277 pgprot_t PAGE_KERNEL __read_mostly; 2325 2278 EXPORT_SYMBOL(PAGE_KERNEL); 2326 2279 ··· 2345 2312 _PAGE_P_4U | _PAGE_W_4U); 2346 2313 if (tlb_type == hypervisor) 2347 2314 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2348 - _PAGE_CP_4V | _PAGE_CV_4V | 2349 - _PAGE_P_4V | _PAGE_W_4V); 2315 + page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); 2350 2316 2351 2317 pte_base |= _PAGE_PMD_HUGE; 2352 2318 ··· 2482 2450 int i; 2483 2451 2484 2452 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | 2485 - _PAGE_CACHE_4V | _PAGE_P_4V | 2453 + page_cache4v_flag | _PAGE_P_4V | 2486 2454 __ACCESS_BITS_4V | __DIRTY_BITS_4V | 2487 2455 _PAGE_EXEC_4V); 2488 2456 PAGE_KERNEL_LOCKED = PAGE_KERNEL; 2489 2457 2490 2458 _PAGE_IE = _PAGE_IE_4V; 2491 2459 _PAGE_E = _PAGE_E_4V; 2492 - _PAGE_CACHE = _PAGE_CACHE_4V; 2460 + _PAGE_CACHE = page_cache4v_flag; 2493 2461 2494 2462 #ifdef CONFIG_DEBUG_PAGEALLOC 2495 2463 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; ··· 2497 2465 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2498 2466 PAGE_OFFSET; 2499 2467 #endif 2500 - kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2501 - _PAGE_P_4V | _PAGE_W_4V); 2468 + kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V | 2469 + _PAGE_W_4V); 2502 2470 2503 2471 for (i = 1; i < 4; i++) 2504 2472 kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; ··· 2511 2479 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | 2512 2480 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); 2513 2481 2514 - page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; 2515 - page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2482 + page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag; 2483 + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | 2516 2484 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); 2517 - page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2485 + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | 2518 2486 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2519 - page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2487 + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | 2520 2488 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2521 2489 2522 2490 page_exec_bit = _PAGE_EXEC_4V; ··· 2574 2542 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); 2575 2543 if (tlb_type == hypervisor) 2576 2544 val = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2577 - _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | 2545 + page_cache4v_flag | _PAGE_P_4V | 2578 2546 _PAGE_EXEC_4V | _PAGE_W_4V); 2579 2547 2580 2548 return val | paddr;
+3
arch/x86/include/asm/kvm_host.h
··· 207 207 unsigned nxe:1; 208 208 unsigned cr0_wp:1; 209 209 unsigned smep_andnot_wp:1; 210 + unsigned smap_andnot_wp:1; 210 211 }; 211 212 }; 212 213 ··· 401 400 struct kvm_mmu_memory_cache mmu_page_header_cache; 402 401 403 402 struct fpu guest_fpu; 403 + bool eager_fpu; 404 404 u64 xcr0; 405 405 u64 guest_supported_xcr0; 406 406 u32 guest_xstate_size; ··· 745 743 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 746 744 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 747 745 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 746 + void (*fpu_activate)(struct kvm_vcpu *vcpu); 748 747 void (*fpu_deactivate)(struct kvm_vcpu *vcpu); 749 748 750 749 void (*tlb_flush)(struct kvm_vcpu *vcpu);
+1
arch/x86/include/uapi/asm/msr-index.h
··· 140 140 #define MSR_CORE_C3_RESIDENCY 0x000003fc 141 141 #define MSR_CORE_C6_RESIDENCY 0x000003fd 142 142 #define MSR_CORE_C7_RESIDENCY 0x000003fe 143 + #define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff 143 144 #define MSR_PKG_C2_RESIDENCY 0x0000060d 144 145 #define MSR_PKG_C8_RESIDENCY 0x00000630 145 146 #define MSR_PKG_C9_RESIDENCY 0x00000631
+5 -2
arch/x86/kernel/cpu/mcheck/mce.c
··· 708 708 struct pt_regs *regs) 709 709 { 710 710 int i, ret = 0; 711 + char *tmp; 711 712 712 713 for (i = 0; i < mca_cfg.banks; i++) { 713 714 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); ··· 717 716 if (quirk_no_way_out) 718 717 quirk_no_way_out(i, m, regs); 719 718 } 720 - if (mce_severity(m, mca_cfg.tolerant, msg, true) >= 721 - MCE_PANIC_SEVERITY) 719 + 720 + if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 721 + *msg = tmp; 722 722 ret = 1; 723 + } 723 724 } 724 725 return ret; 725 726 }
+15
arch/x86/kernel/i387.c
··· 173 173 xstate_size = sizeof(struct i387_fxsave_struct); 174 174 else 175 175 xstate_size = sizeof(struct i387_fsave_struct); 176 + 177 + /* 178 + * Quirk: we don't yet handle the XSAVES* instructions 179 + * correctly, as we don't correctly convert between 180 + * standard and compacted format when interfacing 181 + * with user-space - so disable it for now. 182 + * 183 + * The difference is small: with recent CPUs the 184 + * compacted format is only marginally smaller than 185 + * the standard FPU state format. 186 + * 187 + * ( This is easy to backport while we are fixing 188 + * XSAVES* support. ) 189 + */ 190 + setup_clear_cpu_cap(X86_FEATURE_XSAVES); 176 191 } 177 192 178 193 /*
+4
arch/x86/kvm/cpuid.c
··· 16 16 #include <linux/module.h> 17 17 #include <linux/vmalloc.h> 18 18 #include <linux/uaccess.h> 19 + #include <asm/i387.h> /* For use_eager_fpu. Ugh! */ 20 + #include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */ 19 21 #include <asm/user.h> 20 22 #include <asm/xsave.h> 21 23 #include "cpuid.h" ··· 96 94 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); 97 95 if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) 98 96 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 97 + 98 + vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu); 99 99 100 100 /* 101 101 * The existing code assumes virtual address is 48-bit in the canonical
+8
arch/x86/kvm/cpuid.h
··· 117 117 best = kvm_find_cpuid_entry(vcpu, 7, 0); 118 118 return best && (best->ebx & bit(X86_FEATURE_RTM)); 119 119 } 120 + 121 + static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) 122 + { 123 + struct kvm_cpuid_entry2 *best; 124 + 125 + best = kvm_find_cpuid_entry(vcpu, 7, 0); 126 + return best && (best->ebx & bit(X86_FEATURE_MPX)); 127 + } 120 128 #endif
+12 -4
arch/x86/kvm/mmu.c
··· 3736 3736 } 3737 3737 } 3738 3738 3739 - void update_permission_bitmask(struct kvm_vcpu *vcpu, 3740 - struct kvm_mmu *mmu, bool ept) 3739 + static void update_permission_bitmask(struct kvm_vcpu *vcpu, 3740 + struct kvm_mmu *mmu, bool ept) 3741 3741 { 3742 3742 unsigned bit, byte, pfec; 3743 3743 u8 map; ··· 3918 3918 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) 3919 3919 { 3920 3920 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 3921 + bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); 3921 3922 struct kvm_mmu *context = &vcpu->arch.mmu; 3922 3923 3923 3924 MMU_WARN_ON(VALID_PAGE(context->root_hpa)); ··· 3937 3936 context->base_role.cr0_wp = is_write_protection(vcpu); 3938 3937 context->base_role.smep_andnot_wp 3939 3938 = smep && !is_write_protection(vcpu); 3939 + context->base_role.smap_andnot_wp 3940 + = smap && !is_write_protection(vcpu); 3940 3941 } 3941 3942 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); 3942 3943 ··· 4210 4207 const u8 *new, int bytes) 4211 4208 { 4212 4209 gfn_t gfn = gpa >> PAGE_SHIFT; 4213 - union kvm_mmu_page_role mask = { .word = 0 }; 4214 4210 struct kvm_mmu_page *sp; 4215 4211 LIST_HEAD(invalid_list); 4216 4212 u64 entry, gentry, *spte; 4217 4213 int npte; 4218 4214 bool remote_flush, local_flush, zap_page; 4215 + union kvm_mmu_page_role mask = (union kvm_mmu_page_role) { 4216 + .cr0_wp = 1, 4217 + .cr4_pae = 1, 4218 + .nxe = 1, 4219 + .smep_andnot_wp = 1, 4220 + .smap_andnot_wp = 1, 4221 + }; 4219 4222 4220 4223 /* 4221 4224 * If we don't have indirect shadow pages, it means no page is ··· 4247 4238 ++vcpu->kvm->stat.mmu_pte_write; 4248 4239 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 4249 4240 4250 - mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; 4251 4241 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { 4252 4242 if (detect_write_misaligned(sp, gpa, bytes) || 4253 4243 detect_write_flooding(sp)) {
+2 -2
arch/x86/kvm/mmu.h
··· 71 71 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); 72 72 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 73 73 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); 74 - void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 75 - bool ept); 76 74 77 75 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 78 76 { ··· 163 165 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); 164 166 int index = (pfec >> 1) + 165 167 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); 168 + 169 + WARN_ON(pfec & PFERR_RSVD_MASK); 166 170 167 171 return (mmu->permissions[index] >> pte_access) & 1; 168 172 }
+7
arch/x86/kvm/paging_tmpl.h
··· 718 718 mmu_is_nested(vcpu)); 719 719 if (likely(r != RET_MMIO_PF_INVALID)) 720 720 return r; 721 + 722 + /* 723 + * page fault with PFEC.RSVD = 1 is caused by shadow 724 + * page fault, should not be used to walk guest page 725 + * table. 726 + */ 727 + error_code &= ~PFERR_RSVD_MASK; 721 728 }; 722 729 723 730 r = mmu_topup_memory_caches(vcpu);
+1
arch/x86/kvm/svm.c
··· 4381 4381 .cache_reg = svm_cache_reg, 4382 4382 .get_rflags = svm_get_rflags, 4383 4383 .set_rflags = svm_set_rflags, 4384 + .fpu_activate = svm_fpu_activate, 4384 4385 .fpu_deactivate = svm_fpu_deactivate, 4385 4386 4386 4387 .tlb_flush = svm_flush_tlb,
+1
arch/x86/kvm/vmx.c
··· 10185 10185 .cache_reg = vmx_cache_reg, 10186 10186 .get_rflags = vmx_get_rflags, 10187 10187 .set_rflags = vmx_set_rflags, 10188 + .fpu_activate = vmx_fpu_activate, 10188 10189 .fpu_deactivate = vmx_fpu_deactivate, 10189 10190 10190 10191 .tlb_flush = vmx_flush_tlb,
+19 -7
arch/x86/kvm/x86.c
··· 702 702 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 703 703 { 704 704 unsigned long old_cr4 = kvm_read_cr4(vcpu); 705 - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | 706 - X86_CR4_PAE | X86_CR4_SMEP; 705 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | 706 + X86_CR4_SMEP | X86_CR4_SMAP; 707 + 707 708 if (cr4 & CR4_RESERVED_BITS) 708 709 return 1; 709 710 ··· 744 743 if (((cr4 ^ old_cr4) & pdptr_bits) || 745 744 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 746 745 kvm_mmu_reset_context(vcpu); 747 - 748 - if ((cr4 ^ old_cr4) & X86_CR4_SMAP) 749 - update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false); 750 746 751 747 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 752 748 kvm_update_cpuid(vcpu); ··· 6195 6197 return; 6196 6198 6197 6199 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 6200 + if (is_error_page(page)) 6201 + return; 6198 6202 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); 6199 6203 6200 6204 /* ··· 7060 7060 fpu_save_init(&vcpu->arch.guest_fpu); 7061 7061 __kernel_fpu_end(); 7062 7062 ++vcpu->stat.fpu_reload; 7063 - kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); 7063 + if (!vcpu->arch.eager_fpu) 7064 + kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); 7065 + 7064 7066 trace_kvm_fpu(0); 7065 7067 } 7066 7068 ··· 7078 7076 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 7079 7077 unsigned int id) 7080 7078 { 7079 + struct kvm_vcpu *vcpu; 7080 + 7081 7081 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) 7082 7082 printk_once(KERN_WARNING 7083 7083 "kvm: SMP vm created on host with unstable TSC; " 7084 7084 "guest TSC will not be reliable\n"); 7085 - return kvm_x86_ops->vcpu_create(kvm, id); 7085 + 7086 + vcpu = kvm_x86_ops->vcpu_create(kvm, id); 7087 + 7088 + /* 7089 + * Activate fpu unconditionally in case the guest needs eager FPU. It will be 7090 + * deactivated soon if it doesn't. 7091 + */ 7092 + kvm_x86_ops->fpu_activate(vcpu); 7093 + return vcpu; 7086 7094 } 7087 7095 7088 7096 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+6 -1
arch/x86/net/bpf_jit_comp.c
··· 966 966 } 967 967 ctx.cleanup_addr = proglen; 968 968 969 - for (pass = 0; pass < 10; pass++) { 969 + /* JITed image shrinks with every pass and the loop iterates 970 + * until the image stops shrinking. Very large bpf programs 971 + * may converge on the last pass. In such case do one more 972 + * pass to emit the final image 973 + */ 974 + for (pass = 0; pass < 10 || image; pass++) { 970 975 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 971 976 if (proglen <= 0) { 972 977 image = NULL;
+10 -3
arch/x86/pci/acpi.c
··· 482 482 483 483 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 484 484 { 485 - struct pci_sysdata *sd = bridge->bus->sysdata; 486 - 487 - ACPI_COMPANION_SET(&bridge->dev, sd->companion); 485 + /* 486 + * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL 487 + * here, pci_create_root_bus() has been called by someone else and 488 + * sysdata is likely to be different from what we expect. Let it go in 489 + * that case. 490 + */ 491 + if (!bridge->dev.parent) { 492 + struct pci_sysdata *sd = bridge->bus->sysdata; 493 + ACPI_COMPANION_SET(&bridge->dev, sd->companion); 494 + } 488 495 return 0; 489 496 } 490 497
+13
arch/xtensa/include/asm/dma-mapping.h
··· 185 185 return -EINVAL; 186 186 } 187 187 188 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 189 + dma_addr_t *dma_handle, gfp_t flag, 190 + struct dma_attrs *attrs) 191 + { 192 + return NULL; 193 + } 194 + 195 + static inline void dma_free_attrs(struct device *dev, size_t size, 196 + void *vaddr, dma_addr_t dma_handle, 197 + struct dma_attrs *attrs) 198 + { 199 + } 200 + 188 201 #endif /* _XTENSA_DMA_MAPPING_H */
+3 -2
block/blk-core.c
··· 734 734 } 735 735 EXPORT_SYMBOL(blk_init_queue_node); 736 736 737 + static void blk_queue_bio(struct request_queue *q, struct bio *bio); 738 + 737 739 struct request_queue * 738 740 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 739 741 spinlock_t *lock) ··· 1580 1578 blk_rq_bio_prep(req->q, req, bio); 1581 1579 } 1582 1580 1583 - void blk_queue_bio(struct request_queue *q, struct bio *bio) 1581 + static void blk_queue_bio(struct request_queue *q, struct bio *bio) 1584 1582 { 1585 1583 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1586 1584 struct blk_plug *plug; ··· 1688 1686 spin_unlock_irq(q->queue_lock); 1689 1687 } 1690 1688 } 1691 - EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1692 1689 1693 1690 /* 1694 1691 * If bio->bi_dev is a partition, remap the location
-9
crypto/Kconfig
··· 1512 1512 This option enables the user-spaces interface for random 1513 1513 number generator algorithms. 1514 1514 1515 - config CRYPTO_USER_API_AEAD 1516 - tristate "User-space interface for AEAD cipher algorithms" 1517 - depends on NET 1518 - select CRYPTO_AEAD 1519 - select CRYPTO_USER_API 1520 - help 1521 - This option enables the user-spaces interface for AEAD 1522 - cipher algorithms. 1523 - 1524 1515 config CRYPTO_HASH_INFO 1525 1516 bool 1526 1517
+4 -5
crypto/algif_aead.c
··· 33 33 /* 34 34 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum 35 35 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES 36 - * bytes 36 + * pages 37 37 */ 38 38 #define RSGL_MAX_ENTRIES ALG_MAX_PAGES 39 39 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES]; ··· 435 435 if (err < 0) 436 436 goto unlock; 437 437 usedpages += err; 438 - /* chain the new scatterlist with initial list */ 438 + /* chain the new scatterlist with previous one */ 439 439 if (cnt) 440 - scatterwalk_crypto_chain(ctx->rsgl[0].sg, 441 - ctx->rsgl[cnt].sg, 1, 442 - sg_nents(ctx->rsgl[cnt-1].sg)); 440 + af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]); 441 + 443 442 /* we do not need more iovecs as we have sufficient memory */ 444 443 if (outlen <= usedpages) 445 444 break;
+2 -1
drivers/block/nvme-scsi.c
··· 2257 2257 page_code = GET_INQ_PAGE_CODE(cmd); 2258 2258 alloc_len = GET_INQ_ALLOC_LENGTH(cmd); 2259 2259 2260 - inq_response = kmalloc(alloc_len, GFP_KERNEL); 2260 + inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH), 2261 + GFP_KERNEL); 2261 2262 if (inq_response == NULL) { 2262 2263 res = -ENOMEM; 2263 2264 goto out_mem;
+4
drivers/bluetooth/ath3k.c
··· 88 88 { USB_DEVICE(0x04CA, 0x3007) }, 89 89 { USB_DEVICE(0x04CA, 0x3008) }, 90 90 { USB_DEVICE(0x04CA, 0x300b) }, 91 + { USB_DEVICE(0x04CA, 0x300f) }, 91 92 { USB_DEVICE(0x04CA, 0x3010) }, 92 93 { USB_DEVICE(0x0930, 0x0219) }, 93 94 { USB_DEVICE(0x0930, 0x0220) }, ··· 105 104 { USB_DEVICE(0x0cf3, 0xe003) }, 106 105 { USB_DEVICE(0x0CF3, 0xE004) }, 107 106 { USB_DEVICE(0x0CF3, 0xE005) }, 107 + { USB_DEVICE(0x0CF3, 0xE006) }, 108 108 { USB_DEVICE(0x13d3, 0x3362) }, 109 109 { USB_DEVICE(0x13d3, 0x3375) }, 110 110 { USB_DEVICE(0x13d3, 0x3393) }, ··· 145 143 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, 146 144 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 147 145 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 146 + { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, 148 147 { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, 149 148 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 150 149 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, ··· 161 158 { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, 162 159 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 163 160 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 161 + { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, 164 162 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, 165 163 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 166 164 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+3
drivers/bluetooth/btusb.c
··· 186 186 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, 187 187 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 188 188 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 189 + { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, 189 190 { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, 190 191 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 191 192 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, ··· 203 202 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, 204 203 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 205 204 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 205 + { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, 206 206 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 207 207 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 208 208 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, ··· 220 218 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, 221 219 222 220 /* QCA ROME chipset */ 221 + { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, 223 222 { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, 224 223 { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, 225 224
+2 -2
drivers/bus/mips_cdmm.c
··· 453 453 454 454 /* Look for a specific device type */ 455 455 for (; drb < bus->drbs; drb += size + 1) { 456 - acsr = readl(cdmm + drb * CDMM_DRB_SIZE); 456 + acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE); 457 457 type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; 458 458 if (type == dev_type) 459 459 return cdmm + drb * CDMM_DRB_SIZE; ··· 500 500 bus->discovered = true; 501 501 pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs); 502 502 for (; drb < bus->drbs; drb += size + 1) { 503 - acsr = readl(cdmm + drb * CDMM_DRB_SIZE); 503 + acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE); 504 504 type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; 505 505 size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT; 506 506 rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT;
+45 -18
drivers/clk/clk-si5351.c
··· 1128 1128 if (!pdata) 1129 1129 return -ENOMEM; 1130 1130 1131 - pdata->clk_xtal = of_clk_get(np, 0); 1132 - if (!IS_ERR(pdata->clk_xtal)) 1133 - clk_put(pdata->clk_xtal); 1134 - pdata->clk_clkin = of_clk_get(np, 1); 1135 - if (!IS_ERR(pdata->clk_clkin)) 1136 - clk_put(pdata->clk_clkin); 1137 - 1138 1131 /* 1139 1132 * property silabs,pll-source : <num src>, [<..>] 1140 1133 * allow to selectively set pll source ··· 1321 1328 i2c_set_clientdata(client, drvdata); 1322 1329 drvdata->client = client; 1323 1330 drvdata->variant = variant; 1324 - drvdata->pxtal = pdata->clk_xtal; 1325 - drvdata->pclkin = pdata->clk_clkin; 1331 + drvdata->pxtal = devm_clk_get(&client->dev, "xtal"); 1332 + drvdata->pclkin = devm_clk_get(&client->dev, "clkin"); 1333 + 1334 + if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER || 1335 + PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER) 1336 + return -EPROBE_DEFER; 1337 + 1338 + /* 1339 + * Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL, 1340 + * VARIANT_C can have CLKIN instead. 1341 + */ 1342 + if (IS_ERR(drvdata->pxtal) && 1343 + (drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) { 1344 + dev_err(&client->dev, "missing parent clock\n"); 1345 + return -EINVAL; 1346 + } 1326 1347 1327 1348 drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config); 1328 1349 if (IS_ERR(drvdata->regmap)) { ··· 1400 1393 } 1401 1394 } 1402 1395 1396 + if (!IS_ERR(drvdata->pxtal)) 1397 + clk_prepare_enable(drvdata->pxtal); 1398 + if (!IS_ERR(drvdata->pclkin)) 1399 + clk_prepare_enable(drvdata->pclkin); 1400 + 1403 1401 /* register xtal input clock gate */ 1404 1402 memset(&init, 0, sizeof(init)); 1405 1403 init.name = si5351_input_names[0]; ··· 1419 1407 clk = devm_clk_register(&client->dev, &drvdata->xtal); 1420 1408 if (IS_ERR(clk)) { 1421 1409 dev_err(&client->dev, "unable to register %s\n", init.name); 1422 - return PTR_ERR(clk); 1410 + ret = PTR_ERR(clk); 1411 + goto err_clk; 1423 1412 } 1424 1413 1425 1414 /* register clkin input clock gate */ ··· 1438 1425 if (IS_ERR(clk)) { 1439 1426 dev_err(&client->dev, "unable to register %s\n", 1440 1427 init.name); 1441 - return PTR_ERR(clk); 1428 + ret = PTR_ERR(clk); 1429 + goto err_clk; 1442 1430 } 1443 1431 } 1444 1432 ··· 1461 1447 clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw); 1462 1448 if (IS_ERR(clk)) { 1463 1449 dev_err(&client->dev, "unable to register %s\n", init.name); 1464 - return -EINVAL; 1450 + ret = PTR_ERR(clk); 1451 + goto err_clk; 1465 1452 } 1466 1453 1467 1454 /* register PLLB or VXCO (Si5351B) */ ··· 1486 1471 clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw); 1487 1472 if (IS_ERR(clk)) { 1488 1473 dev_err(&client->dev, "unable to register %s\n", init.name); 1489 - return -EINVAL; 1474 + ret = PTR_ERR(clk); 1475 + goto err_clk; 1490 1476 } 1491 1477 1492 1478 /* register clk multisync and clk out divider */ ··· 1508 1492 num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL); 1509 1493 1510 1494 if (WARN_ON(!drvdata->msynth || !drvdata->clkout || 1511 - !drvdata->onecell.clks)) 1512 - return -ENOMEM; 1495 + !drvdata->onecell.clks)) { 1496 + ret = -ENOMEM; 1497 + goto err_clk; 1498 + } 1513 1499 1514 1500 for (n = 0; n < num_clocks; n++) { 1515 1501 drvdata->msynth[n].num = n; ··· 1529 1511 if (IS_ERR(clk)) { 1530 1512 dev_err(&client->dev, "unable to register %s\n", 1531 1513 init.name); 1532 - return -EINVAL; 1514 + ret = PTR_ERR(clk); 1515 + goto err_clk; 1533 1516 } 1534 1517 } 1535 1518 ··· 1557 1538 if (IS_ERR(clk)) { 1558 1539 dev_err(&client->dev, "unable to register %s\n", 1559 1540 init.name); 1560 - return -EINVAL; 1541 + ret = PTR_ERR(clk); 1542 + goto err_clk; 1561 1543 } 1562 1544 drvdata->onecell.clks[n] = clk; 1563 1545 ··· 1577 1557 &drvdata->onecell); 1578 1558 if (ret) { 1579 1559 dev_err(&client->dev, "unable to add clk provider\n"); 1580 - return ret; 1560 + goto err_clk; 1581 1561 } 1582 1562 1583 1563 return 0; 1564 + 1565 + err_clk: 1566 + if (!IS_ERR(drvdata->pxtal)) 1567 + clk_disable_unprepare(drvdata->pxtal); 1568 + if (!IS_ERR(drvdata->pclkin)) 1569 + clk_disable_unprepare(drvdata->pclkin); 1570 + return ret; 1584 1571 } 1585 1572 1586 1573 static const struct i2c_device_id si5351_i2c_ids[] = {
+8
drivers/clk/clk.c
··· 1475 1475 */ 1476 1476 if (clk->prepare_count) { 1477 1477 clk_core_prepare(parent); 1478 + flags = clk_enable_lock(); 1478 1479 clk_core_enable(parent); 1479 1480 clk_core_enable(clk); 1481 + clk_enable_unlock(flags); 1480 1482 } 1481 1483 1482 1484 /* update the clk tree topology */ ··· 1493 1491 struct clk_core *parent, 1494 1492 struct clk_core *old_parent) 1495 1493 { 1494 + unsigned long flags; 1495 + 1496 1496 /* 1497 1497 * Finish the migration of prepare state and undo the changes done 1498 1498 * for preventing a race with clk_enable(). 1499 1499 */ 1500 1500 if (core->prepare_count) { 1501 + flags = clk_enable_lock(); 1501 1502 clk_core_disable(core); 1502 1503 clk_core_disable(old_parent); 1504 + clk_enable_unlock(flags); 1503 1505 clk_core_unprepare(old_parent); 1504 1506 } 1505 1507 } ··· 1531 1525 clk_enable_unlock(flags); 1532 1526 1533 1527 if (clk->prepare_count) { 1528 + flags = clk_enable_lock(); 1534 1529 clk_core_disable(clk); 1535 1530 clk_core_disable(parent); 1531 + clk_enable_unlock(flags); 1536 1532 clk_core_unprepare(parent); 1537 1533 } 1538 1534 return ret;
+2 -2
drivers/clk/qcom/gcc-msm8916.c
··· 71 71 static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = { 72 72 { P_XO, 0 }, 73 73 { P_GPLL0_AUX, 3 }, 74 - { P_GPLL2_AUX, 2 }, 75 74 { P_GPLL1, 1 }, 75 + { P_GPLL2_AUX, 2 }, 76 76 }; 77 77 78 78 static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = { ··· 1115 1115 static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = { 1116 1116 F(100000000, P_GPLL0, 8, 0, 0), 1117 1117 F(160000000, P_GPLL0, 5, 0, 0), 1118 - F(228570000, P_GPLL0, 5, 0, 0), 1118 + F(228570000, P_GPLL0, 3.5, 0, 0), 1119 1119 { } 1120 1120 }; 1121 1121
+1 -1
drivers/clk/samsung/Makefile
··· 10 10 obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o 11 11 obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o 12 12 obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o 13 - obj-$(CONFIG_ARCH_EXYNOS5433) += clk-exynos5433.o 13 + obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos5433.o 14 14 obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o 15 15 obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o 16 16 obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
+1
drivers/clk/samsung/clk-exynos5420.c
··· 271 271 { .offset = SRC_MASK_PERIC0, .value = 0x11111110, }, 272 272 { .offset = SRC_MASK_PERIC1, .value = 0x11111100, }, 273 273 { .offset = SRC_MASK_ISP, .value = 0x11111000, }, 274 + { .offset = GATE_BUS_TOP, .value = 0xffffffff, }, 274 275 { .offset = GATE_BUS_DISP1, .value = 0xffffffff, }, 275 276 { .offset = GATE_IP_PERIC, .value = 0xffffffff, }, 276 277 };
+6 -6
drivers/clk/samsung/clk-exynos5433.c
··· 748 748 PLL_35XX_RATE(825000000U, 275, 4, 1), 749 749 PLL_35XX_RATE(800000000U, 400, 6, 1), 750 750 PLL_35XX_RATE(733000000U, 733, 12, 1), 751 - PLL_35XX_RATE(700000000U, 360, 6, 1), 751 + PLL_35XX_RATE(700000000U, 175, 3, 1), 752 752 PLL_35XX_RATE(667000000U, 222, 4, 1), 753 753 PLL_35XX_RATE(633000000U, 211, 4, 1), 754 754 PLL_35XX_RATE(600000000U, 500, 5, 2), ··· 760 760 PLL_35XX_RATE(444000000U, 370, 5, 2), 761 761 PLL_35XX_RATE(420000000U, 350, 5, 2), 762 762 PLL_35XX_RATE(400000000U, 400, 6, 2), 763 - PLL_35XX_RATE(350000000U, 360, 6, 2), 763 + PLL_35XX_RATE(350000000U, 350, 6, 2), 764 764 PLL_35XX_RATE(333000000U, 222, 4, 2), 765 765 PLL_35XX_RATE(300000000U, 500, 5, 3), 766 766 PLL_35XX_RATE(266000000U, 532, 6, 3), 767 767 PLL_35XX_RATE(200000000U, 400, 6, 3), 768 768 PLL_35XX_RATE(166000000U, 332, 6, 3), 769 769 PLL_35XX_RATE(160000000U, 320, 6, 3), 770 - PLL_35XX_RATE(133000000U, 552, 6, 4), 770 + PLL_35XX_RATE(133000000U, 532, 6, 4), 771 771 PLL_35XX_RATE(100000000U, 400, 6, 4), 772 772 { /* sentinel */ } 773 773 }; ··· 1490 1490 1491 1491 /* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */ 1492 1492 GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133", 1493 - ENABLE_PCLK_MIF_SECURE_RTC, 0, 0, 0), 1493 + ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT, 0, 0, 0), 1494 1494 1495 1495 /* ENABLE_PCLK_MIF_SECURE_RTC */ 1496 1496 GATE(CLK_PCLK_RTC, "pclk_rtc", "div_aclk_mif_133", ··· 3665 3665 ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0), 3666 3666 GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo", 3667 3667 ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0), 3668 - GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo_pll", 3668 + GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2", 3669 3669 ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0), 3670 3670 }; 3671 3671 ··· 3927 3927 #define ENABLE_PCLK_MSCL 0x0900 3928 3928 #define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0 0x0904 3929 3929 #define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1 0x0908 3930 - #define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x000c 3930 + #define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x090c 3931 3931 #define ENABLE_SCLK_MSCL 0x0a00 3932 3932 #define ENABLE_IP_MSCL0 0x0b00 3933 3933 #define ENABLE_IP_MSCL1 0x0b04
+1 -1
drivers/gpio/gpio-kempld.c
··· 117 117 = container_of(chip, struct kempld_gpio_data, chip); 118 118 struct kempld_device_data *pld = gpio->pld; 119 119 120 - return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); 120 + return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); 121 121 } 122 122 123 123 static int kempld_gpio_pincount(struct kempld_device_data *pld)
+6 -4
drivers/gpio/gpiolib.c
··· 53 53 static LIST_HEAD(gpio_lookup_list); 54 54 LIST_HEAD(gpio_chips); 55 55 56 + 57 + static void gpiochip_free_hogs(struct gpio_chip *chip); 58 + static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 59 + 60 + 56 61 static inline void desc_set_label(struct gpio_desc *d, const char *label) 57 62 { 58 63 d->label = label; ··· 302 297 303 298 err_remove_chip: 304 299 acpi_gpiochip_remove(chip); 300 + gpiochip_free_hogs(chip); 305 301 of_gpiochip_remove(chip); 306 302 spin_lock_irqsave(&gpio_lock, flags); 307 303 list_del(&chip->list); ··· 318 312 return status; 319 313 } 320 314 EXPORT_SYMBOL_GPL(gpiochip_add); 321 - 322 - /* Forward-declaration */ 323 - static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 324 - static void gpiochip_free_hogs(struct gpio_chip *chip); 325 315 326 316 /** 327 317 * gpiochip_remove() - unregister a gpio_chip
+3
drivers/gpu/drm/drm_plane_helper.c
··· 465 465 if (!crtc[i]) 466 466 continue; 467 467 468 + if (crtc[i]->cursor == plane) 469 + continue; 470 + 468 471 /* There's no other way to figure out whether the crtc is running. */ 469 472 ret = drm_crtc_vblank_get(crtc[i]); 470 473 if (ret == 0) {
+2 -2
drivers/gpu/drm/exynos/exynos7_drm_decon.c
··· 91 91 92 92 static void decon_clear_channel(struct decon_context *ctx) 93 93 { 94 - int win, ch_enabled = 0; 94 + unsigned int win, ch_enabled = 0; 95 95 96 96 DRM_DEBUG_KMS("%s\n", __FILE__); 97 97 ··· 710 710 } 711 711 } 712 712 713 - static struct exynos_drm_crtc_ops decon_crtc_ops = { 713 + static const struct exynos_drm_crtc_ops decon_crtc_ops = { 714 714 .dpms = decon_dpms, 715 715 .mode_fixup = decon_mode_fixup, 716 716 .commit = decon_commit,
+9 -4
drivers/gpu/drm/exynos/exynos_dp_core.c
··· 32 32 #include <drm/bridge/ptn3460.h> 33 33 34 34 #include "exynos_dp_core.h" 35 - #include "exynos_drm_fimd.h" 36 35 37 36 #define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \ 38 37 connector) ··· 195 196 } 196 197 } 197 198 198 - dev_err(dp->dev, "EDID Read success!\n"); 199 + dev_dbg(dp->dev, "EDID Read success!\n"); 199 200 return 0; 200 201 } 201 202 ··· 1065 1066 1066 1067 static void exynos_dp_poweron(struct exynos_dp_device *dp) 1067 1068 { 1069 + struct exynos_drm_crtc *crtc = dp_to_crtc(dp); 1070 + 1068 1071 if (dp->dpms_mode == DRM_MODE_DPMS_ON) 1069 1072 return; 1070 1073 ··· 1077 1076 } 1078 1077 } 1079 1078 1080 - fimd_dp_clock_enable(dp_to_crtc(dp), true); 1079 + if (crtc->ops->clock_enable) 1080 + crtc->ops->clock_enable(dp_to_crtc(dp), true); 1081 1081 1082 1082 clk_prepare_enable(dp->clock); 1083 1083 exynos_dp_phy_init(dp); ··· 1089 1087 1090 1088 static void exynos_dp_poweroff(struct exynos_dp_device *dp) 1091 1089 { 1090 + struct exynos_drm_crtc *crtc = dp_to_crtc(dp); 1091 + 1092 1092 if (dp->dpms_mode != DRM_MODE_DPMS_ON) 1093 1093 return; 1094 1094 ··· 1106 1102 exynos_dp_phy_exit(dp); 1107 1103 clk_disable_unprepare(dp->clock); 1108 1104 1109 - fimd_dp_clock_enable(dp_to_crtc(dp), false); 1105 + if (crtc->ops->clock_enable) 1106 + crtc->ops->clock_enable(dp_to_crtc(dp), false); 1110 1107 1111 1108 if (dp->panel) { 1112 1109 if (drm_panel_unprepare(dp->panel))
+5 -5
drivers/gpu/drm/exynos/exynos_drm_crtc.c
··· 238 238 }; 239 239 240 240 struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, 241 - struct drm_plane *plane, 242 - int pipe, 243 - enum exynos_drm_output_type type, 244 - struct exynos_drm_crtc_ops *ops, 245 - void *ctx) 241 + struct drm_plane *plane, 242 + int pipe, 243 + enum exynos_drm_output_type type, 244 + const struct exynos_drm_crtc_ops *ops, 245 + void *ctx) 246 246 { 247 247 struct exynos_drm_crtc *exynos_crtc; 248 248 struct exynos_drm_private *private = drm_dev->dev_private;
+5 -5
drivers/gpu/drm/exynos/exynos_drm_crtc.h
··· 18 18 #include "exynos_drm_drv.h" 19 19 20 20 struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, 21 - struct drm_plane *plane, 22 - int pipe, 23 - enum exynos_drm_output_type type, 24 - struct exynos_drm_crtc_ops *ops, 25 - void *context); 21 + struct drm_plane *plane, 22 + int pipe, 23 + enum exynos_drm_output_type type, 24 + const struct exynos_drm_crtc_ops *ops, 25 + void *context); 26 26 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe); 27 27 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe); 28 28 void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
+6 -14
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 71 71 * @dma_addr: array of bus(accessed by dma) address to the memory region 72 72 * allocated for a overlay. 73 73 * @zpos: order of overlay layer(z position). 74 - * @index_color: if using color key feature then this value would be used 75 - * as index color. 76 - * @default_win: a window to be enabled. 77 - * @color_key: color key on or off. 78 - * @local_path: in case of lcd type, local path mode on or off. 79 - * @transparency: transparency on or off. 80 - * @activated: activated or not. 81 74 * @enabled: enabled or not. 82 75 * @resume: to resume or not. 83 76 * ··· 101 108 uint32_t pixel_format; 102 109 dma_addr_t dma_addr[MAX_FB_BUFFER]; 103 110 unsigned int zpos; 104 - unsigned int index_color; 105 111 106 - bool default_win:1; 107 - bool color_key:1; 108 - bool local_path:1; 109 - bool transparency:1; 110 - bool activated:1; 111 112 bool enabled:1; 112 113 bool resume:1; 113 114 }; ··· 168 181 * @win_disable: disable hardware specific overlay. 169 182 * @te_handler: trigger to transfer video image at the tearing effect 170 183 * synchronization signal if there is a page flip request. 184 + * @clock_enable: optional function enabling/disabling display domain clock, 185 + * called from exynos-dp driver before powering up (with 186 + * 'enable' argument as true) and after powering down (with 187 + * 'enable' as false). 171 188 */ 172 189 struct exynos_drm_crtc; 173 190 struct exynos_drm_crtc_ops { ··· 186 195 void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos); 187 196 void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos); 188 197 void (*te_handler)(struct exynos_drm_crtc *crtc); 198 + void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable); 189 199 }; 190 200 191 201 /* ··· 213 221 unsigned int dpms; 214 222 wait_queue_head_t pending_flip_queue; 215 223 struct drm_pending_vblank_event *event; 216 - struct exynos_drm_crtc_ops *ops; 224 + const struct exynos_drm_crtc_ops *ops; 217 225 void *ctx; 218 226 }; 219 227
+1 -38
drivers/gpu/drm/exynos/exynos_drm_fb.c
··· 171 171 return &exynos_fb->fb; 172 172 } 173 173 174 - static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd) 175 - { 176 - unsigned int cnt = 0; 177 - 178 - if (mode_cmd->pixel_format != DRM_FORMAT_NV12) 179 - return drm_format_num_planes(mode_cmd->pixel_format); 180 - 181 - while (cnt != MAX_FB_BUFFER) { 182 - if (!mode_cmd->handles[cnt]) 183 - break; 184 - cnt++; 185 - } 186 - 187 - /* 188 - * check if NV12 or NV12M. 189 - * 190 - * NV12 191 - * handles[0] = base1, offsets[0] = 0 192 - * handles[1] = base1, offsets[1] = Y_size 193 - * 194 - * NV12M 195 - * handles[0] = base1, offsets[0] = 0 196 - * handles[1] = base2, offsets[1] = 0 197 - */ 198 - if (cnt == 2) { 199 - /* 200 - * in case of NV12 format, offsets[1] is not 0 and 201 - * handles[0] is same as handles[1]. 202 - */ 203 - if (mode_cmd->offsets[1] && 204 - mode_cmd->handles[0] == mode_cmd->handles[1]) 205 - cnt = 1; 206 - } 207 - 208 - return cnt; 209 - } 210 - 211 174 static struct drm_framebuffer * 212 175 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 213 176 struct drm_mode_fb_cmd2 *mode_cmd) ··· 193 230 194 231 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 195 232 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); 196 - exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); 233 + exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format); 197 234 198 235 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 199 236
+24 -29
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 33 33 #include "exynos_drm_crtc.h" 34 34 #include "exynos_drm_plane.h" 35 35 #include "exynos_drm_iommu.h" 36 - #include "exynos_drm_fimd.h" 37 36 38 37 /* 39 38 * FIMD stands for Fully Interactive Mobile Display and ··· 215 216 DRM_DEBUG_KMS("vblank wait timed out.\n"); 216 217 } 217 218 218 - static void fimd_enable_video_output(struct fimd_context *ctx, int win, 219 + static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win, 219 220 bool enable) 220 221 { 221 222 u32 val = readl(ctx->regs + WINCON(win)); ··· 228 229 writel(val, ctx->regs + WINCON(win)); 229 230 } 230 231 231 - static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win, 232 + static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, 233 + unsigned int win, 232 234 bool enable) 233 235 { 234 236 u32 val = readl(ctx->regs + SHADOWCON); ··· 244 244 245 245 static void fimd_clear_channel(struct fimd_context *ctx) 246 246 { 247 - int win, ch_enabled = 0; 247 + unsigned int win, ch_enabled = 0; 248 248 249 249 DRM_DEBUG_KMS("%s\n", __FILE__); 250 250 ··· 946 946 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 947 947 } 948 948 949 - static struct exynos_drm_crtc_ops fimd_crtc_ops = { 949 + static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) 950 + { 951 + struct fimd_context *ctx = crtc->ctx; 952 + u32 val; 953 + 954 + /* 955 + * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE 956 + * clock. On these SoCs the bootloader may enable it but any 957 + * power domain off/on will reset it to disable state. 958 + */ 959 + if (ctx->driver_data != &exynos5_fimd_driver_data) 960 + return; 961 + 962 + val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; 963 + writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); 964 + } 965 + 966 + static const struct exynos_drm_crtc_ops fimd_crtc_ops = { 950 967 .dpms = fimd_dpms, 951 968 .mode_fixup = fimd_mode_fixup, 952 969 .commit = fimd_commit, ··· 973 956 .win_commit = fimd_win_commit, 974 957 .win_disable = fimd_win_disable, 975 958 .te_handler = fimd_te_handler, 959 + .clock_enable = fimd_dp_clock_enable, 976 960 }; 977 961 978 962 static irqreturn_t fimd_irq_handler(int irq, void *dev_id) ··· 1043 1025 if (ctx->display) 1044 1026 exynos_drm_create_enc_conn(drm_dev, ctx->display); 1045 1027 1046 - ret = fimd_iommu_attach_devices(ctx, drm_dev); 1047 - if (ret) 1048 - return ret; 1049 - 1050 - return 0; 1051 - 1028 + return fimd_iommu_attach_devices(ctx, drm_dev); 1052 1029 } 1053 1030 1054 1031 static void fimd_unbind(struct device *dev, struct device *master, ··· 1204 1191 1205 1192 return 0; 1206 1193 } 1207 - 1208 - void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) 1209 - { 1210 - struct fimd_context *ctx = crtc->ctx; 1211 - u32 val; 1212 - 1213 - /* 1214 - * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE 1215 - * clock. On these SoCs the bootloader may enable it but any 1216 - * power domain off/on will reset it to disable state. 1217 - */ 1218 - if (ctx->driver_data != &exynos5_fimd_driver_data) 1219 - return; 1220 - 1221 - val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; 1222 - writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); 1223 - } 1224 - EXPORT_SYMBOL_GPL(fimd_dp_clock_enable); 1225 1194 1226 1195 struct platform_driver fimd_driver = { 1227 1196 .probe = fimd_probe,
-15
drivers/gpu/drm/exynos/exynos_drm_fimd.h
··· 1 - /* 2 - * Copyright (c) 2015 Samsung Electronics Co., Ltd. 3 - * 4 - * This program is free software; you can redistribute it and/or modify it 5 - * under the terms of the GNU General Public License as published by the 6 - * Free Software Foundation; either version 2 of the License, or (at your 7 - * option) any later version. 8 - */ 9 - 10 - #ifndef _EXYNOS_DRM_FIMD_H_ 11 - #define _EXYNOS_DRM_FIMD_H_ 12 - 13 - extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable); 14 - 15 - #endif /* _EXYNOS_DRM_FIMD_H_ */
+1 -1
drivers/gpu/drm/exynos/exynos_drm_plane.c
··· 76 76 return -EFAULT; 77 77 } 78 78 79 - exynos_plane->dma_addr[i] = buffer->dma_addr; 79 + exynos_plane->dma_addr[i] = buffer->dma_addr + fb->offsets[i]; 80 80 81 81 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", 82 82 i, (unsigned long)exynos_plane->dma_addr[i]);
+1 -1
drivers/gpu/drm/exynos/exynos_drm_vidi.c
··· 217 217 return 0; 218 218 } 219 219 220 - static struct exynos_drm_crtc_ops vidi_crtc_ops = { 220 + static const struct exynos_drm_crtc_ops vidi_crtc_ops = { 221 221 .dpms = vidi_dpms, 222 222 .enable_vblank = vidi_enable_vblank, 223 223 .disable_vblank = vidi_disable_vblank,
+38 -34
drivers/gpu/drm/exynos/exynos_mixer.c
··· 44 44 #define MIXER_WIN_NR 3 45 45 #define MIXER_DEFAULT_WIN 0 46 46 47 + /* The pixelformats that are natively supported by the mixer. */ 48 + #define MXR_FORMAT_RGB565 4 49 + #define MXR_FORMAT_ARGB1555 5 50 + #define MXR_FORMAT_ARGB4444 6 51 + #define MXR_FORMAT_ARGB8888 7 52 + 47 53 struct mixer_resources { 48 54 int irq; 49 55 void __iomem *mixer_regs; ··· 333 327 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK); 334 328 } 335 329 336 - static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable) 330 + static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, 331 + bool enable) 337 332 { 338 333 struct mixer_resources *res = &ctx->mixer_res; 339 334 u32 val = enable ? ~0 : 0; ··· 366 359 struct mixer_resources *res = &ctx->mixer_res; 367 360 368 361 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN); 369 - 370 - mixer_regs_dump(ctx); 371 362 } 372 363 373 364 static void mixer_stop(struct mixer_context *ctx) ··· 378 373 while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) && 379 374 --timeout) 380 375 usleep_range(10000, 12000); 381 - 382 - mixer_regs_dump(ctx); 383 376 } 384 377 385 - static void vp_video_buffer(struct mixer_context *ctx, int win) 378 + static void vp_video_buffer(struct mixer_context *ctx, unsigned int win) 386 379 { 387 380 struct mixer_resources *res = &ctx->mixer_res; 388 381 unsigned long flags; 389 382 struct exynos_drm_plane *plane; 390 - unsigned int buf_num = 1; 391 383 dma_addr_t luma_addr[2], chroma_addr[2]; 392 384 bool tiled_mode = false; 393 385 bool crcb_mode = false; ··· 395 393 switch (plane->pixel_format) { 396 394 case DRM_FORMAT_NV12: 397 395 crcb_mode = false; 398 - buf_num = 2; 399 396 break; 400 - /* TODO: single buffer format NV12, NV21 */ 397 + case DRM_FORMAT_NV21: 398 + crcb_mode = true; 399 + break; 401 400 default: 402 - /* ignore pixel format at disable time */ 403 - if (!plane->dma_addr[0]) 404 - break; 405 - 406 401 DRM_ERROR("pixel format for vp is wrong [%d].\n", 407 402 plane->pixel_format); 408 403 return; 409 404 } 410 405 411 - if (buf_num == 2) { 412 - luma_addr[0] = plane->dma_addr[0]; 413 - chroma_addr[0] = plane->dma_addr[1]; 414 - } else { 415 - luma_addr[0] = plane->dma_addr[0]; 416 - chroma_addr[0] = plane->dma_addr[0] 417 - + (plane->pitch * plane->fb_height); 418 - } 406 + luma_addr[0] = plane->dma_addr[0]; 407 + chroma_addr[0] = plane->dma_addr[1]; 419 408 420 409 if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) { 421 410 ctx->interlace = true; ··· 477 484 mixer_vsync_set_update(ctx, true); 478 485 spin_unlock_irqrestore(&res->reg_slock, flags); 479 486 487 + mixer_regs_dump(ctx); 480 488 vp_regs_dump(ctx); 481 489 } 482 490 ··· 512 518 return -ENOTSUPP; 513 519 } 514 520 515 - static void mixer_graph_buffer(struct mixer_context *ctx, int win) 521 + static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win) 516 522 { 517 523 struct mixer_resources *res = &ctx->mixer_res; 518 524 unsigned long flags; ··· 525 531 526 532 plane = &ctx->planes[win]; 527 533 528 - #define RGB565 4 529 - #define ARGB1555 5 530 - #define ARGB4444 6 531 - #define ARGB8888 7 534 + switch (plane->pixel_format) { 535 + case DRM_FORMAT_XRGB4444: 536 + fmt = MXR_FORMAT_ARGB4444; 537 + break; 532 538 533 - switch (plane->bpp) { 534 - case 16: 535 - fmt = ARGB4444; 539 + case DRM_FORMAT_XRGB1555: 540 + fmt = MXR_FORMAT_ARGB1555; 536 541 break; 537 - case 32: 538 - fmt = ARGB8888; 542 + 543 + case DRM_FORMAT_RGB565: 544 + fmt = MXR_FORMAT_RGB565; 539 545 break; 546 + 547 + case DRM_FORMAT_XRGB8888: 548 + case DRM_FORMAT_ARGB8888: 549 + fmt = MXR_FORMAT_ARGB8888; 550 + break; 551 + 540 552 default: 541 - fmt = ARGB8888; 553 + DRM_DEBUG_KMS("pixelformat unsupported by mixer\n"); 554 + return; 542 555 } 543 556 544 557 /* check if mixer supports requested scaling setup */ ··· 618 617 619 618 mixer_vsync_set_update(ctx, true); 620 619 spin_unlock_irqrestore(&res->reg_slock, flags); 620 + 621 + mixer_regs_dump(ctx); 621 622 } 622 623 623 624 static void vp_win_reset(struct mixer_context *ctx) ··· 1073 1070 mutex_unlock(&ctx->mixer_mutex); 1074 1071 1075 1072 mixer_stop(ctx); 1073 + mixer_regs_dump(ctx); 1076 1074 mixer_window_suspend(ctx); 1077 1075 1078 1076 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); ··· 1130 1126 return -EINVAL; 1131 1127 } 1132 1128 1133 - static struct exynos_drm_crtc_ops mixer_crtc_ops = { 1129 + static const struct exynos_drm_crtc_ops mixer_crtc_ops = { 1134 1130 .dpms = mixer_dpms, 1135 1131 .enable_vblank = mixer_enable_vblank, 1136 1132 .disable_vblank = mixer_disable_vblank, ··· 1160 1156 .has_sclk = 1, 1161 1157 }; 1162 1158 1163 - static struct platform_device_id mixer_driver_types[] = { 1159 + static const struct platform_device_id mixer_driver_types[] = { 1164 1160 { 1165 1161 .name = "s5p-mixer", 1166 1162 .driver_data = (unsigned long)&exynos4210_mxr_drv_data,
+11 -13
drivers/gpu/drm/i915/intel_pm.c
··· 2045 2045 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; 2046 2046 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2047 2047 2048 - if (crtc->primary->state->fb) { 2049 - p->pri.enabled = true; 2048 + if (crtc->primary->state->fb) 2050 2049 p->pri.bytes_per_pixel = 2051 2050 crtc->primary->state->fb->bits_per_pixel / 8; 2052 - } else { 2053 - p->pri.enabled = false; 2054 - p->pri.bytes_per_pixel = 0; 2055 - } 2051 + else 2052 + p->pri.bytes_per_pixel = 4; 2056 2053 2057 - if (crtc->cursor->state->fb) { 2058 - p->cur.enabled = true; 2059 - p->cur.bytes_per_pixel = 4; 2060 - } else { 2061 - p->cur.enabled = false; 2062 - p->cur.bytes_per_pixel = 0; 2063 - } 2054 + p->cur.bytes_per_pixel = 4; 2055 + /* 2056 + * TODO: for now, assume primary and cursor planes are always enabled. 2057 + * Setting them to false makes the screen flicker. 2058 + */ 2059 + p->pri.enabled = true; 2060 + p->cur.enabled = true; 2061 + 2064 2062 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w; 2065 2063 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w; 2066 2064
+1 -1
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 384 384 if (gpu->memptrs_bo) { 385 385 if (gpu->memptrs_iova) 386 386 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); 387 - drm_gem_object_unreference(gpu->memptrs_bo); 387 + drm_gem_object_unreference_unlocked(gpu->memptrs_bo); 388 388 } 389 389 release_firmware(gpu->pm4); 390 390 release_firmware(gpu->pfp);
+5 -5
drivers/gpu/drm/msm/dsi/dsi.c
··· 177 177 goto fail; 178 178 } 179 179 180 + for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { 181 + encoders[i]->bridge = msm_dsi->bridge; 182 + msm_dsi->encoders[i] = encoders[i]; 183 + } 184 + 180 185 msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id); 181 186 if (IS_ERR(msm_dsi->connector)) { 182 187 ret = PTR_ERR(msm_dsi->connector); 183 188 dev_err(dev->dev, "failed to create dsi connector: %d\n", ret); 184 189 msm_dsi->connector = NULL; 185 190 goto fail; 186 - } 187 - 188 - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { 189 - encoders[i]->bridge = msm_dsi->bridge; 190 - msm_dsi->encoders[i] = encoders[i]; 191 191 } 192 192 193 193 priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
+8 -13
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 1023 1023 *data = buf[1]; /* strip out dcs type */ 1024 1024 return 1; 1025 1025 } else { 1026 - pr_err("%s: read data does not match with rx_buf len %d\n", 1026 + pr_err("%s: read data does not match with rx_buf len %zu\n", 1027 1027 __func__, msg->rx_len); 1028 1028 return -EINVAL; 1029 1029 } ··· 1040 1040 data[1] = buf[2]; 1041 1041 return 2; 1042 1042 } else { 1043 - pr_err("%s: read data does not match with rx_buf len %d\n", 1043 + pr_err("%s: read data does not match with rx_buf len %zu\n", 1044 1044 __func__, msg->rx_len); 1045 1045 return -EINVAL; 1046 1046 } ··· 1093 1093 { 1094 1094 u32 *lp, *temp, data; 1095 1095 int i, j = 0, cnt; 1096 - bool ack_error = false; 1097 1096 u32 read_cnt; 1098 1097 u8 reg[16]; 1099 1098 int repeated_bytes = 0; ··· 1104 1105 if (cnt > 4) 1105 1106 cnt = 4; /* 4 x 32 bits registers only */ 1106 1107 1107 - /* Calculate real read data count */ 1108 - read_cnt = dsi_read(msm_host, 0x1d4) >> 16; 1109 - 1110 - ack_error = (rx_byte == 4) ? 1111 - (read_cnt == 8) : /* short pkt + 4-byte error pkt */ 1112 - (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/ 1113 - 1114 - if (ack_error) 1115 - read_cnt -= 4; /* Remove 4 byte error pkt */ 1108 + if (rx_byte == 4) 1109 + read_cnt = 4; 1110 + else 1111 + read_cnt = pkt_size + 6; 1116 1112 1117 1113 /* 1118 1114 * In case of multiple reads from the panel, after the first read, there ··· 1209 1215 container_of(work, struct msm_dsi_host, err_work); 1210 1216 u32 status = msm_host->err_work_state; 1211 1217 1212 - pr_err("%s: status=%x\n", __func__, status); 1218 + pr_err_ratelimited("%s: status=%x\n", __func__, status); 1213 1219 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW) 1214 1220 dsi_sw_reset_restore(msm_host); 1215 1221 ··· 1791 1797 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 1792 1798 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__); 1793 1799 ret = 0; 1800 + break; 1794 1801 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 1795 1802 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 1796 1803 ret = dsi_short_read1_resp(buf, msg);
+5 -1
drivers/gpu/drm/msm/dsi/dsi_manager.c
··· 462 462 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 463 463 struct drm_connector *connector = NULL; 464 464 struct dsi_connector *dsi_connector; 465 - int ret; 465 + int ret, i; 466 466 467 467 dsi_connector = devm_kzalloc(msm_dsi->dev->dev, 468 468 sizeof(*dsi_connector), GFP_KERNEL); ··· 494 494 ret = drm_connector_register(connector); 495 495 if (ret) 496 496 goto fail; 497 + 498 + for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) 499 + drm_mode_connector_attach_encoder(connector, 500 + msm_dsi->encoders[i]); 497 501 498 502 return connector; 499 503
+2 -2
drivers/gpu/drm/msm/edp/edp_aux.c
··· 132 132 /* msg sanity check */ 133 133 if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) || 134 134 (msg->size > AUX_CMD_I2C_MAX)) { 135 - pr_err("%s: invalid msg: size(%d), request(%x)\n", 135 + pr_err("%s: invalid msg: size(%zu), request(%x)\n", 136 136 __func__, msg->size, msg->request); 137 137 return -EINVAL; 138 138 } ··· 155 155 */ 156 156 edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); 157 157 msm_edp_aux_ctrl(aux, 1); 158 - pr_err("%s: aux timeout, %d\n", __func__, ret); 158 + pr_err("%s: aux timeout, %zd\n", __func__, ret); 159 159 goto unlock_exit; 160 160 } 161 161 DBG("completion");
+2
drivers/gpu/drm/msm/edp/edp_connector.c
··· 151 151 if (ret) 152 152 goto fail; 153 153 154 + drm_mode_connector_attach_encoder(connector, edp->encoder); 155 + 154 156 return connector; 155 157 156 158 fail:
+2 -1
drivers/gpu/drm/msm/edp/edp_ctrl.c
··· 1149 1149 ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux); 1150 1150 if (!ctrl->aux || !ctrl->drm_aux) { 1151 1151 pr_err("%s:failed to init aux\n", __func__); 1152 - return ret; 1152 + return -ENOMEM; 1153 1153 } 1154 1154 1155 1155 ctrl->phy = msm_edp_phy_init(dev, ctrl->base); 1156 1156 if (!ctrl->phy) { 1157 1157 pr_err("%s:failed to init phy\n", __func__); 1158 + ret = -ENOMEM; 1158 1159 goto err_destory_aux; 1159 1160 } 1160 1161
+17 -17
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
··· 72 72 .base = { 0x12d00, 0x12e00, 0x12f00 }, 73 73 }, 74 74 .intf = { 75 - .count = 4, 76 75 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, 77 - }, 78 - .intfs = { 79 - [0] = INTF_eDP, 80 - [1] = INTF_DSI, 81 - [2] = INTF_DSI, 82 - [3] = INTF_HDMI, 76 + .connect = { 77 + [0] = INTF_eDP, 78 + [1] = INTF_DSI, 79 + [2] = INTF_DSI, 80 + [3] = INTF_HDMI, 81 + }, 83 82 }, 84 83 .max_clk = 200000000, 85 84 }; ··· 141 142 .base = { 0x12f00, 0x13000, 0x13100, 0x13200 }, 142 143 }, 143 144 .intf = { 144 - .count = 5, 145 145 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, 146 - }, 147 - .intfs = { 148 - [0] = INTF_eDP, 149 - [1] = INTF_DSI, 150 - [2] = INTF_DSI, 151 - [3] = INTF_HDMI, 146 + .connect = { 147 + [0] = INTF_eDP, 148 + [1] = INTF_DSI, 149 + [2] = INTF_DSI, 150 + [3] = INTF_HDMI, 151 + }, 152 152 }, 153 153 .max_clk = 320000000, 154 154 }; ··· 194 196 195 197 }, 196 198 .intf = { 197 - .count = 1, /* INTF_1 */ 198 - .base = { 0x6B800 }, 199 + .base = { 0x00000, 0x6b800 }, 200 + .connect = { 201 + [0] = INTF_DISABLED, 202 + [1] = INTF_DSI, 203 + }, 199 204 }, 200 - /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */ 201 205 .max_clk = 320000000, 202 206 }; 203 207
+6 -3
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
··· 59 59 60 60 #define MDP5_INTF_NUM_MAX 5 61 61 62 + struct mdp5_intf_block { 63 + uint32_t base[MAX_BASES]; 64 + u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */ 65 + }; 66 + 62 67 struct mdp5_cfg_hw { 63 68 char *name; 64 69 ··· 77 72 struct mdp5_sub_block dspp; 78 73 struct mdp5_sub_block ad; 79 74 struct mdp5_sub_block pp; 80 - struct mdp5_sub_block intf; 81 - 82 - u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */ 75 + struct mdp5_intf_block intf; 83 76 84 77 uint32_t max_clk; 85 78 };
+6 -6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
··· 206 206 207 207 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) 208 208 { 209 - const int intf_cnt = hw_cfg->intf.count; 210 - const u32 *intfs = hw_cfg->intfs; 209 + const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; 210 + const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); 211 211 int id = 0, i; 212 212 213 213 for (i = 0; i < intf_cnt; i++) { ··· 228 228 struct msm_drm_private *priv = dev->dev_private; 229 229 const struct mdp5_cfg_hw *hw_cfg = 230 230 mdp5_cfg_get_hw_config(mdp5_kms->cfg); 231 - enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num]; 231 + enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num]; 232 232 struct drm_encoder *encoder; 233 233 int ret = 0; 234 234 ··· 365 365 /* Construct encoders and modeset initialize connector devices 366 366 * for each external display interface. 367 367 */ 368 - for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) { 368 + for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { 369 369 ret = modeset_init_intf(mdp5_kms, i); 370 370 if (ret) 371 371 goto fail; ··· 514 514 */ 515 515 mdp5_enable(mdp5_kms); 516 516 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { 517 - if (!config->hw->intf.base[i] || 518 - mdp5_cfg_intf_is_virtual(config->hw->intfs[i])) 517 + if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || 518 + !config->hw->intf.base[i]) 519 519 continue; 520 520 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 521 521 }
+1 -1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 273 273 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), 274 274 msm_framebuffer_iova(fb, mdp5_kms->id, 2)); 275 275 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), 276 - msm_framebuffer_iova(fb, mdp5_kms->id, 4)); 276 + msm_framebuffer_iova(fb, mdp5_kms->id, 3)); 277 277 278 278 plane->fb = fb; 279 279 }
+14 -10
drivers/gpu/drm/msm/msm_drv.c
··· 21 21 22 22 static void msm_fb_output_poll_changed(struct drm_device *dev) 23 23 { 24 + #ifdef CONFIG_DRM_MSM_FBDEV 24 25 struct msm_drm_private *priv = dev->dev_private; 25 26 if (priv->fbdev) 26 27 drm_fb_helper_hotplug_event(priv->fbdev); 28 + #endif 27 29 } 28 30 29 31 static const struct drm_mode_config_funcs mode_config_funcs = { ··· 96 94 } 97 95 98 96 if (reglog) 99 - printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size); 97 + printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size); 100 98 101 99 return ptr; 102 100 } ··· 104 102 void msm_writel(u32 data, void __iomem *addr) 105 103 { 106 104 if (reglog) 107 - printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data); 105 + printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); 108 106 writel(data, addr); 109 107 } 110 108 ··· 112 110 { 113 111 u32 val = readl(addr); 114 112 if (reglog) 115 - printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val); 113 + printk(KERN_ERR "IO:R %p %08x\n", addr, val); 116 114 return val; 117 115 } 118 116 ··· 145 143 if (gpu) { 146 144 mutex_lock(&dev->struct_mutex); 147 145 gpu->funcs->pm_suspend(gpu); 148 - gpu->funcs->destroy(gpu); 149 146 mutex_unlock(&dev->struct_mutex); 147 + gpu->funcs->destroy(gpu); 150 148 } 151 149 152 150 if (priv->vram.paddr) { ··· 179 177 const struct of_device_id *match; 180 178 match = of_match_node(match_types, dev->of_node); 181 179 if (match) 182 - return (int)match->data; 180 + return (int)(unsigned long)match->data; 183 181 #endif 184 182 return 4; 185 183 } ··· 218 216 if (ret) 219 217 return ret; 220 218 size = r.end - r.start; 221 - DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start); 219 + DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); 222 220 } else 223 221 #endif 224 222 ··· 285 283 286 284 drm_mode_config_init(dev); 287 285 288 - ret = msm_init_vram(dev); 289 - if (ret) 290 - goto fail; 291 - 292 286 platform_set_drvdata(pdev, dev); 293 287 294 288 /* Bind all our sub-components: */ 295 289 ret = component_bind_all(dev->dev, dev); 296 290 if (ret) 297 291 return ret; 292 + 293 + ret = msm_init_vram(dev); 294 + if (ret) 295 + goto fail; 298 296 299 297 switch (get_mdp_ver(pdev)) { 300 298 case 4: ··· 421 419 422 420 static void msm_lastclose(struct drm_device *dev) 423 421 { 422 + #ifdef CONFIG_DRM_MSM_FBDEV 424 423 struct msm_drm_private *priv = dev->dev_private; 425 424 if (priv->fbdev) 426 425 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); 426 + #endif 427 427 } 428 428 429 429 static irqreturn_t msm_irq(int irq, void *arg)
+3 -4
drivers/gpu/drm/msm/msm_fb.c
··· 172 172 { 173 173 struct msm_drm_private *priv = dev->dev_private; 174 174 struct msm_kms *kms = priv->kms; 175 - struct msm_framebuffer *msm_fb; 176 - struct drm_framebuffer *fb = NULL; 175 + struct msm_framebuffer *msm_fb = NULL; 176 + struct drm_framebuffer *fb; 177 177 const struct msm_format *format; 178 178 int ret, i, n; 179 179 unsigned int hsub, vsub; ··· 239 239 return fb; 240 240 241 241 fail: 242 - if (fb) 243 - msm_framebuffer_destroy(fb); 242 + kfree(msm_fb); 244 243 245 244 return ERR_PTR(ret); 246 245 }
+1 -1
drivers/gpu/drm/msm/msm_gem.c
··· 483 483 uint64_t off = drm_vma_node_start(&obj->vma_node); 484 484 485 485 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 486 - seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n", 486 + seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n", 487 487 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 488 488 msm_obj->read_fence, msm_obj->write_fence, 489 489 obj->name, obj->refcount.refcount.counter,
+2 -2
drivers/gpu/drm/msm/msm_iommu.c
··· 60 60 u32 pa = sg_phys(sg) - sg->offset; 61 61 size_t bytes = sg->length + sg->offset; 62 62 63 - VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); 63 + VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); 64 64 65 65 ret = iommu_map(domain, da, pa, bytes, prot); 66 66 if (ret) ··· 99 99 if (unmapped < bytes) 100 100 return unmapped; 101 101 102 - VERB("unmap[%d]: %08x(%x)", i, iova, bytes); 102 + VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); 103 103 104 104 BUG_ON(!PAGE_ALIGNED(bytes)); 105 105
+1 -1
drivers/gpu/drm/msm/msm_ringbuffer.c
··· 56 56 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) 57 57 { 58 58 if (ring->bo) 59 - drm_gem_object_unreference(ring->bo); 59 + drm_gem_object_unreference_unlocked(ring->bo); 60 60 kfree(ring); 61 61 }
+1 -1
drivers/gpu/drm/nouveau/include/nvif/class.h
··· 14 14 15 15 #define FERMI_TWOD_A 0x0000902d 16 16 17 - #define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x0000903d 17 + #define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039 18 18 19 19 #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 20 20 #define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
-1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
··· 329 329 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); 330 330 331 331 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 332 - printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]); 333 332 for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) 334 333 nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); 335 334 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+5 -2
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
··· 90 90 return disable; 91 91 } 92 92 93 - static int 93 + int 94 94 gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 95 95 struct nvkm_oclass *oclass, void *data, u32 size, 96 96 struct nvkm_object **pobject) 97 97 { 98 + struct nvkm_devinit_impl *impl = (void *)oclass; 98 99 struct nv50_devinit_priv *priv; 100 + u64 disable; 99 101 int ret; 100 102 101 103 ret = nvkm_devinit_create(parent, engine, oclass, &priv); ··· 105 103 if (ret) 106 104 return ret; 107 105 108 - if (nv_rd32(priv, 0x022500) & 0x00000001) 106 + disable = impl->disable(&priv->base); 107 + if (disable & (1ULL << NVDEV_ENGINE_DISP)) 109 108 priv->base.post = true; 110 109 111 110 return 0;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
··· 48 48 gm107_devinit_oclass = &(struct nvkm_devinit_impl) { 49 49 .base.handle = NV_SUBDEV(DEVINIT, 0x07), 50 50 .base.ofuncs = &(struct nvkm_ofuncs) { 51 - .ctor = nv50_devinit_ctor, 51 + .ctor = gf100_devinit_ctor, 52 52 .dtor = _nvkm_devinit_dtor, 53 53 .init = nv50_devinit_init, 54 54 .fini = _nvkm_devinit_fini,
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
··· 161 161 gm204_devinit_oclass = &(struct nvkm_devinit_impl) { 162 162 .base.handle = NV_SUBDEV(DEVINIT, 0x07), 163 163 .base.ofuncs = &(struct nvkm_ofuncs) { 164 - .ctor = nv50_devinit_ctor, 164 + .ctor = gf100_devinit_ctor, 165 165 .dtor = _nvkm_devinit_dtor, 166 166 .init = nv50_devinit_init, 167 167 .fini = _nvkm_devinit_fini,
+3
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
··· 15 15 16 16 int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32); 17 17 18 + int gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *, 19 + struct nvkm_oclass *, void *, u32, 20 + struct nvkm_object **); 18 21 int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32); 19 22 20 23 u64 gm107_devinit_disable(struct nvkm_devinit *);
+3 -1
drivers/gpu/drm/radeon/atombios_crtc.c
··· 1798 1798 if ((crtc->mode.clock == test_crtc->mode.clock) && 1799 1799 (adjusted_clock == test_adjusted_clock) && 1800 1800 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && 1801 - (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) 1801 + (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && 1802 + (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) == 1803 + drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector)))) 1802 1804 return test_radeon_crtc->pll_id; 1803 1805 } 1804 1806 }
+11 -9
drivers/gpu/drm/radeon/atombios_dp.c
··· 421 421 { 422 422 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 423 423 u8 msg[DP_DPCD_SIZE]; 424 - int ret; 424 + int ret, i; 425 425 426 - ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, 427 - DP_DPCD_SIZE); 428 - if (ret > 0) { 429 - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); 426 + for (i = 0; i < 7; i++) { 427 + ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, 428 + DP_DPCD_SIZE); 429 + if (ret == DP_DPCD_SIZE) { 430 + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); 430 431 431 - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), 432 - dig_connector->dpcd); 432 + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), 433 + dig_connector->dpcd); 433 434 434 - radeon_dp_probe_oui(radeon_connector); 435 + radeon_dp_probe_oui(radeon_connector); 435 436 436 - return true; 437 + return true; 438 + } 437 439 } 438 440 dig_connector->dpcd[0] = 0; 439 441 return false;
+1 -1
drivers/gpu/drm/radeon/cik.c
··· 5822 5822 L2_CACHE_BIGK_FRAGMENT_SIZE(4)); 5823 5823 /* setup context0 */ 5824 5824 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 5825 - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 5825 + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 5826 5826 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 5827 5827 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 5828 5828 (u32)(rdev->dummy_page.addr >> 12));
+1 -1
drivers/gpu/drm/radeon/evergreen.c
··· 2485 2485 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 2486 2486 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 2487 2487 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 2488 - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 2488 + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 2489 2489 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 2490 2490 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 2491 2491 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+3 -2
drivers/gpu/drm/radeon/evergreen_hdmi.c
··· 400 400 if (enable) { 401 401 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 402 402 403 - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { 403 + if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) { 404 404 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 405 405 HDMI_AVI_INFO_SEND | /* enable AVI info frames */ 406 406 HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ ··· 438 438 if (!dig || !dig->afmt) 439 439 return; 440 440 441 - if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { 441 + if (enable && connector && 442 + drm_detect_monitor_audio(radeon_connector_edid(connector))) { 442 443 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 443 444 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 444 445 struct radeon_connector_atom_dig *dig_connector;
+1 -1
drivers/gpu/drm/radeon/ni.c
··· 1282 1282 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1283 1283 /* setup context0 */ 1284 1284 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1285 - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 1285 + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 1286 1286 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1287 1287 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 1288 1288 (u32)(rdev->dummy_page.addr >> 12));
+1 -1
drivers/gpu/drm/radeon/r600.c
··· 1112 1112 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1113 1113 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1114 1114 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1115 - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 1115 + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 1116 1116 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1117 1117 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1118 1118 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+12 -15
drivers/gpu/drm/radeon/radeon_audio.c
··· 460 460 if (!connector || !connector->encoder) 461 461 return; 462 462 463 - if (!radeon_encoder_is_digital(connector->encoder)) 464 - return; 465 - 466 463 rdev = connector->encoder->dev->dev_private; 467 464 468 465 if (!radeon_audio_chipset_supported(rdev)) ··· 468 471 radeon_encoder = to_radeon_encoder(connector->encoder); 469 472 dig = radeon_encoder->enc_priv; 470 473 471 - if (!dig->afmt) 472 - return; 473 - 474 474 if (status == connector_status_connected) { 475 - struct radeon_connector *radeon_connector = to_radeon_connector(connector); 475 + struct radeon_connector *radeon_connector; 476 + int sink_type; 477 + 478 + if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { 479 + radeon_encoder->audio = NULL; 480 + return; 481 + } 482 + 483 + radeon_connector = to_radeon_connector(connector); 484 + sink_type = radeon_dp_getsinktype(radeon_connector); 476 485 477 486 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 478 - radeon_dp_getsinktype(radeon_connector) == 479 - CONNECTOR_OBJECT_ID_DISPLAYPORT) 487 + sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 480 488 radeon_encoder->audio = rdev->audio.dp_funcs; 481 489 else 482 490 radeon_encoder->audio = rdev->audio.hdmi_funcs; 483 491 484 492 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 485 - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { 486 - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 487 - } else { 488 - radeon_audio_enable(rdev, dig->afmt->pin, 0); 489 - dig->afmt->pin = NULL; 490 - } 493 + radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 491 494 } else { 492 495 radeon_audio_enable(rdev, dig->afmt->pin, 0); 493 496 dig->afmt->pin = NULL;
+2 -6
drivers/gpu/drm/radeon/radeon_connectors.c
··· 1379 1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1380 1380 radeon_connector_update_scratch_regs(connector, ret); 1381 1381 1382 - if (radeon_audio != 0) { 1383 - radeon_connector_get_edid(connector); 1382 + if (radeon_audio != 0) 1384 1383 radeon_audio_detect(connector, ret); 1385 - } 1386 1384 1387 1385 exit: 1388 1386 pm_runtime_mark_last_busy(connector->dev->dev); ··· 1717 1719 1718 1720 radeon_connector_update_scratch_regs(connector, ret); 1719 1721 1720 - if (radeon_audio != 0) { 1721 - radeon_connector_get_edid(connector); 1722 + if (radeon_audio != 0) 1722 1723 radeon_audio_detect(connector, ret); 1723 - } 1724 1724 1725 1725 out: 1726 1726 pm_runtime_mark_last_busy(connector->dev->dev);
-2
drivers/gpu/drm/radeon/radeon_dp_auxch.c
··· 30 30 AUX_SW_RX_HPD_DISCON | \ 31 31 AUX_SW_RX_PARTIAL_BYTE | \ 32 32 AUX_SW_NON_AUX_MODE | \ 33 - AUX_SW_RX_MIN_COUNT_VIOL | \ 34 - AUX_SW_RX_INVALID_STOP | \ 35 33 AUX_SW_RX_SYNC_INVALID_L | \ 36 34 AUX_SW_RX_SYNC_INVALID_H | \ 37 35 AUX_SW_RX_INVALID_START | \
+1 -1
drivers/gpu/drm/radeon/rv770.c
··· 921 921 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 922 922 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 923 923 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 924 - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 924 + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 925 925 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 926 926 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 927 927 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+1 -1
drivers/gpu/drm/radeon/si.c
··· 4303 4303 L2_CACHE_BIGK_FRAGMENT_SIZE(4)); 4304 4304 /* setup context0 */ 4305 4305 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 4306 - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 4306 + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 4307 4307 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 4308 4308 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 4309 4309 (u32)(rdev->dummy_page.addr >> 12));
+1 -1
drivers/gpu/drm/vgem/Makefile
··· 1 1 ccflags-y := -Iinclude/drm 2 - vgem-y := vgem_drv.o vgem_dma_buf.o 2 + vgem-y := vgem_drv.o 3 3 4 4 obj-$(CONFIG_DRM_VGEM) += vgem.o
-94
drivers/gpu/drm/vgem/vgem_dma_buf.c
··· 1 - /* 2 - * Copyright © 2012 Intel Corporation 3 - * Copyright © 2014 The Chromium OS Authors 4 - * 5 - * Permission is hereby granted, free of charge, to any person obtaining a 6 - * copy of this software and associated documentation files (the "Software"), 7 - * to deal in the Software without restriction, including without limitation 8 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 - * and/or sell copies of the Software, and to permit persons to whom the 10 - * Software is furnished to do so, subject to the following conditions: 11 - * 12 - * The above copyright notice and this permission notice (including the next 13 - * paragraph) shall be included in all copies or substantial portions of the 14 - * Software. 15 - * 16 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 - * IN THE SOFTWARE. 23 - * 24 - * Authors: 25 - * Ben Widawsky <ben@bwidawsk.net> 26 - * 27 - */ 28 - 29 - #include <linux/dma-buf.h> 30 - #include "vgem_drv.h" 31 - 32 - struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj) 33 - { 34 - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); 35 - BUG_ON(obj->pages == NULL); 36 - 37 - return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE); 38 - } 39 - 40 - int vgem_gem_prime_pin(struct drm_gem_object *gobj) 41 - { 42 - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); 43 - return vgem_gem_get_pages(obj); 44 - } 45 - 46 - void vgem_gem_prime_unpin(struct drm_gem_object *gobj) 47 - { 48 - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); 49 - vgem_gem_put_pages(obj); 50 - } 51 - 52 - void *vgem_gem_prime_vmap(struct drm_gem_object *gobj) 53 - { 54 - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); 55 - BUG_ON(obj->pages == NULL); 56 - 57 - return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); 58 - } 59 - 60 - void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 61 - { 62 - vunmap(vaddr); 63 - } 64 - 65 - struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, 66 - struct dma_buf *dma_buf) 67 - { 68 - struct drm_vgem_gem_object *obj = NULL; 69 - int ret; 70 - 71 - obj = kzalloc(sizeof(*obj), GFP_KERNEL); 72 - if (obj == NULL) { 73 - ret = -ENOMEM; 74 - goto fail; 75 - } 76 - 77 - ret = drm_gem_object_init(dev, &obj->base, dma_buf->size); 78 - if (ret) { 79 - ret = -ENOMEM; 80 - goto fail_free; 81 - } 82 - 83 - get_dma_buf(dma_buf); 84 - 85 - obj->base.dma_buf = dma_buf; 86 - obj->use_dma_buf = true; 87 - 88 - return &obj->base; 89 - 90 - fail_free: 91 - kfree(obj); 92 - fail: 93 - return ERR_PTR(ret); 94 - }
+1 -10
drivers/gpu/drm/vgem/vgem_drv.c
··· 302 302 }; 303 303 304 304 static struct drm_driver vgem_driver = { 305 - .driver_features = DRIVER_GEM | DRIVER_PRIME, 305 + .driver_features = DRIVER_GEM, 306 306 .gem_free_object = vgem_gem_free_object, 307 307 .gem_vm_ops = &vgem_gem_vm_ops, 308 308 .ioctls = vgem_ioctls, 309 309 .fops = &vgem_driver_fops, 310 310 .dumb_create = vgem_gem_dumb_create, 311 311 .dumb_map_offset = vgem_gem_dumb_map, 312 - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 313 - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 314 - .gem_prime_export = drm_gem_prime_export, 315 - .gem_prime_import = vgem_gem_prime_import, 316 - .gem_prime_pin = vgem_gem_prime_pin, 317 - .gem_prime_unpin = vgem_gem_prime_unpin, 318 - .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table, 319 - .gem_prime_vmap = vgem_gem_prime_vmap, 320 - .gem_prime_vunmap = vgem_gem_prime_vunmap, 321 312 .name = DRIVER_NAME, 322 313 .desc = DRIVER_DESC, 323 314 .date = DRIVER_DATE,
-11
drivers/gpu/drm/vgem/vgem_drv.h
··· 43 43 extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); 44 44 extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); 45 45 46 - /* vgem_dma_buf.c */ 47 - extern struct sg_table *vgem_gem_prime_get_sg_table( 48 - struct drm_gem_object *gobj); 49 - extern int vgem_gem_prime_pin(struct drm_gem_object *gobj); 50 - extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj); 51 - extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj); 52 - extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 53 - extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, 54 - struct dma_buf *dma_buf); 55 - 56 - 57 46 #endif
+1
drivers/hid/hid-ids.h
··· 164 164 #define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204 165 165 #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 166 166 #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 167 + #define USB_DEVICE_ID_ATEN_CS682 0x2213 167 168 168 169 #define USB_VENDOR_ID_ATMEL 0x03eb 169 170 #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
-20
drivers/hid/hid-logitech-hidpp.c
··· 44 44 /* bits 1..20 are reserved for classes */ 45 45 #define HIDPP_QUIRK_DELAYED_INIT BIT(21) 46 46 #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22) 47 - #define HIDPP_QUIRK_MULTI_INPUT BIT(23) 48 47 49 48 /* 50 49 * There are two hidpp protocols in use, the first version hidpp10 is known ··· 705 706 struct hid_field *field, struct hid_usage *usage, 706 707 unsigned long **bit, int *max) 707 708 { 708 - struct hidpp_device *hidpp = hid_get_drvdata(hdev); 709 - 710 - if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && 711 - (field->application == HID_GD_KEYBOARD)) 712 - return 0; 713 - 714 709 return -1; 715 710 } 716 711 ··· 712 719 struct input_dev *input_dev, bool origin_is_hid_core) 713 720 { 714 721 struct wtp_data *wd = hidpp->private_data; 715 - 716 - if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core) 717 - /* this is the generic hid-input call */ 718 - return; 719 722 720 723 __set_bit(EV_ABS, input_dev->evbit); 721 724 __set_bit(EV_KEY, input_dev->evbit); ··· 1234 1245 if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) 1235 1246 connect_mask &= ~HID_CONNECT_HIDINPUT; 1236 1247 1237 - /* Re-enable hidinput for multi-input devices */ 1238 - if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) 1239 - connect_mask |= HID_CONNECT_HIDINPUT; 1240 - 1241 1248 ret = hid_hw_start(hdev, connect_mask); 1242 1249 if (ret) { 1243 1250 hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); ··· 1281 1296 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 1282 1297 USB_DEVICE_ID_LOGITECH_T651), 1283 1298 .driver_data = HIDPP_QUIRK_CLASS_WTP }, 1284 - { /* Keyboard TK820 */ 1285 - HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, 1286 - USB_VENDOR_ID_LOGITECH, 0x4102), 1287 - .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT | 1288 - HIDPP_QUIRK_CLASS_WTP }, 1289 1299 1290 1300 { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, 1291 1301 USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+10 -3
drivers/hid/hid-sensor-hub.c
··· 294 294 if (!report) 295 295 return -EINVAL; 296 296 297 - mutex_lock(&hsdev->mutex); 297 + mutex_lock(hsdev->mutex_ptr); 298 298 if (flag == SENSOR_HUB_SYNC) { 299 299 memset(&hsdev->pending, 0, sizeof(hsdev->pending)); 300 300 init_completion(&hsdev->pending.ready); ··· 328 328 kfree(hsdev->pending.raw_data); 329 329 hsdev->pending.status = false; 330 330 } 331 - mutex_unlock(&hsdev->mutex); 331 + mutex_unlock(hsdev->mutex_ptr); 332 332 333 333 return ret_val; 334 334 } ··· 667 667 hsdev->vendor_id = hdev->vendor; 668 668 hsdev->product_id = hdev->product; 669 669 hsdev->usage = collection->usage; 670 - mutex_init(&hsdev->mutex); 670 + hsdev->mutex_ptr = devm_kzalloc(&hdev->dev, 671 + sizeof(struct mutex), 672 + GFP_KERNEL); 673 + if (!hsdev->mutex_ptr) { 674 + ret = -ENOMEM; 675 + goto err_stop_hw; 676 + } 677 + mutex_init(hsdev->mutex_ptr); 671 678 hsdev->start_collection_index = i; 672 679 if (last_hsdev) 673 680 last_hsdev->end_collection_index = i;
+4 -1
drivers/hid/i2c-hid/i2c-hid.c
··· 862 862 union acpi_object *obj; 863 863 struct acpi_device *adev; 864 864 acpi_handle handle; 865 + int ret; 865 866 866 867 handle = ACPI_HANDLE(&client->dev); 867 868 if (!handle || acpi_bus_get_device(handle, &adev)) ··· 878 877 pdata->hid_descriptor_address = obj->integer.value; 879 878 ACPI_FREE(obj); 880 879 881 - return acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios); 880 + /* GPIOs are optional */ 881 + ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios); 882 + return ret < 0 && ret != -ENXIO ? ret : 0; 882 883 } 883 884 884 885 static const struct acpi_device_id i2c_hid_acpi_match[] = {
+1
drivers/hid/usbhid/hid-quirks.c
··· 61 61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, 62 62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, 63 63 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, 64 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET }, 64 65 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET }, 65 66 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, 66 67 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
+3
drivers/hid/wacom_wac.c
··· 1072 1072 int count = 0; 1073 1073 int i; 1074 1074 1075 + if (!touch_max) 1076 + return 0; 1077 + 1075 1078 /* non-HID_GENERIC single touch input doesn't call this routine */ 1076 1079 if ((touch_max == 1) && (wacom->features.type == HID_GENERIC)) 1077 1080 return wacom->hid_data.tipswitch &&
+2
drivers/hwmon/nct6683.c
··· 439 439 (*t)->dev_attr.attr.name, tg->base + i); 440 440 if ((*t)->s2) { 441 441 a2 = &su->u.a2; 442 + sysfs_attr_init(&a2->dev_attr.attr); 442 443 a2->dev_attr.attr.name = su->name; 443 444 a2->nr = (*t)->u.s.nr + i; 444 445 a2->index = (*t)->u.s.index; ··· 450 449 *attrs = &a2->dev_attr.attr; 451 450 } else { 452 451 a = &su->u.a1; 452 + sysfs_attr_init(&a->dev_attr.attr); 453 453 a->dev_attr.attr.name = su->name; 454 454 a->index = (*t)->u.index + i; 455 455 a->dev_attr.attr.mode =
+2
drivers/hwmon/nct6775.c
··· 995 995 (*t)->dev_attr.attr.name, tg->base + i); 996 996 if ((*t)->s2) { 997 997 a2 = &su->u.a2; 998 + sysfs_attr_init(&a2->dev_attr.attr); 998 999 a2->dev_attr.attr.name = su->name; 999 1000 a2->nr = (*t)->u.s.nr + i; 1000 1001 a2->index = (*t)->u.s.index; ··· 1006 1005 *attrs = &a2->dev_attr.attr; 1007 1006 } else { 1008 1007 a = &su->u.a1; 1008 + sysfs_attr_init(&a->dev_attr.attr); 1009 1009 a->dev_attr.attr.name = su->name; 1010 1010 a->index = (*t)->u.index + i; 1011 1011 a->dev_attr.attr.mode =
+9
drivers/hwmon/ntc_thermistor.c
··· 239 239 ntc_thermistor_parse_dt(struct platform_device *pdev) 240 240 { 241 241 struct iio_channel *chan; 242 + enum iio_chan_type type; 242 243 struct device_node *np = pdev->dev.of_node; 243 244 struct ntc_thermistor_platform_data *pdata; 245 + int ret; 244 246 245 247 if (!np) 246 248 return NULL; ··· 254 252 chan = iio_channel_get(&pdev->dev, NULL); 255 253 if (IS_ERR(chan)) 256 254 return ERR_CAST(chan); 255 + 256 + ret = iio_get_channel_type(chan, &type); 257 + if (ret < 0) 258 + return ERR_PTR(ret); 259 + 260 + if (type != IIO_VOLTAGE) 261 + return ERR_PTR(-EINVAL); 257 262 258 263 if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv)) 259 264 return ERR_PTR(-ENODEV);
+1 -1
drivers/hwmon/tmp401.c
··· 44 44 #include <linux/sysfs.h> 45 45 46 46 /* Addresses to scan */ 47 - static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d, 47 + static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d, 48 48 0x4e, 0x4f, I2C_CLIENT_END }; 49 49 50 50 enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
+1 -1
drivers/infiniband/core/cm.c
··· 861 861 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 862 862 break; 863 863 case IB_CM_REQ_SENT: 864 + case IB_CM_MRA_REQ_RCVD: 864 865 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 865 866 spin_unlock_irq(&cm_id_priv->lock); 866 867 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, ··· 880 879 NULL, 0, NULL, 0); 881 880 } 882 881 break; 883 - case IB_CM_MRA_REQ_RCVD: 884 882 case IB_CM_REP_SENT: 885 883 case IB_CM_MRA_REP_RCVD: 886 884 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+21 -11
drivers/infiniband/core/cma.c
··· 845 845 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 846 846 ib = (struct sockaddr_ib *) &id->route.addr.src_addr; 847 847 ib->sib_family = listen_ib->sib_family; 848 - ib->sib_pkey = path->pkey; 849 - ib->sib_flowinfo = path->flow_label; 850 - memcpy(&ib->sib_addr, &path->sgid, 16); 848 + if (path) { 849 + ib->sib_pkey = path->pkey; 850 + ib->sib_flowinfo = path->flow_label; 851 + memcpy(&ib->sib_addr, &path->sgid, 16); 852 + } else { 853 + ib->sib_pkey = listen_ib->sib_pkey; 854 + ib->sib_flowinfo = listen_ib->sib_flowinfo; 855 + ib->sib_addr = listen_ib->sib_addr; 856 + } 851 857 ib->sib_sid = listen_ib->sib_sid; 852 858 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 853 859 ib->sib_scope_id = listen_ib->sib_scope_id; 854 860 855 - ib = (struct sockaddr_ib *) &id->route.addr.dst_addr; 856 - ib->sib_family = listen_ib->sib_family; 857 - ib->sib_pkey = path->pkey; 858 - ib->sib_flowinfo = path->flow_label; 859 - memcpy(&ib->sib_addr, &path->dgid, 16); 861 + if (path) { 862 + ib = (struct sockaddr_ib *) &id->route.addr.dst_addr; 863 + ib->sib_family = listen_ib->sib_family; 864 + ib->sib_pkey = path->pkey; 865 + ib->sib_flowinfo = path->flow_label; 866 + memcpy(&ib->sib_addr, &path->dgid, 16); 867 + } 860 868 } 861 869 862 870 static __be16 ss_get_port(const struct sockaddr_storage *ss) ··· 913 905 { 914 906 struct cma_hdr *hdr; 915 907 916 - if ((listen_id->route.addr.src_addr.ss_family == AF_IB) && 917 - (ib_event->event == IB_CM_REQ_RECEIVED)) { 918 - cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); 908 + if (listen_id->route.addr.src_addr.ss_family == AF_IB) { 909 + if (ib_event->event == IB_CM_REQ_RECEIVED) 910 + cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); 911 + else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 912 + cma_save_ib_info(id, listen_id, NULL); 919 913 return 0; 920 914 } 921 915
+3 -1
drivers/infiniband/hw/ocrdma/ocrdma.h
··· 40 40 #include <be_roce.h> 41 41 #include "ocrdma_sli.h" 42 42 43 - #define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u" 43 + #define OCRDMA_ROCE_DRV_VERSION "10.6.0.0" 44 44 45 45 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 46 46 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" ··· 515 515 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); 516 516 if (rdma_is_multicast_addr(&in6)) 517 517 rdma_get_mcast_mac(&in6, mac_addr); 518 + else if (rdma_link_local_addr(&in6)) 519 + rdma_get_ll_mac(&in6, mac_addr); 518 520 else 519 521 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); 520 522 return 0;
+10 -2
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
··· 56 56 vlan_tag = attr->vlan_id; 57 57 if (!vlan_tag || (vlan_tag > 0xFFF)) 58 58 vlan_tag = dev->pvid; 59 - if (vlan_tag && (vlan_tag < 0x1000)) { 59 + if (vlan_tag || dev->pfc_state) { 60 + if (!vlan_tag) { 61 + pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", 62 + dev->id); 63 + pr_err("ocrdma%d:Using VLAN 0 for this connection\n", 64 + dev->id); 65 + } 60 66 eth.eth_type = cpu_to_be16(0x8100); 61 67 eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 62 68 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; ··· 127 121 goto av_conf_err; 128 122 } 129 123 130 - if (pd->uctx) { 124 + if ((pd->uctx) && 125 + (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) && 126 + (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) { 131 127 status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid, 132 128 attr->dmac, &attr->vlan_id); 133 129 if (status) {
+49 -32
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 933 933 struct ocrdma_eqe eqe; 934 934 struct ocrdma_eqe *ptr; 935 935 u16 cq_id; 936 + u8 mcode; 936 937 int budget = eq->cq_cnt; 937 938 938 939 do { 939 940 ptr = ocrdma_get_eqe(eq); 940 941 eqe = *ptr; 941 942 ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); 943 + mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK) 944 + >> OCRDMA_EQE_MAJOR_CODE_SHIFT; 945 + if (mcode == OCRDMA_MAJOR_CODE_SENTINAL) 946 + pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n", 947 + eq->q.id, eqe.id_valid); 942 948 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) 943 949 break; 944 950 ··· 1440 1434 struct ocrdma_alloc_pd_range_rsp *rsp; 1441 1435 1442 1436 /* Pre allocate the DPP PDs */ 1443 - cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); 1444 - if (!cmd) 1445 - return -ENOMEM; 1446 - cmd->pd_count = dev->attr.max_dpp_pds; 1447 - cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; 1448 - status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1449 - if (status) 1450 - goto mbx_err; 1451 - rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1437 + if (dev->attr.max_dpp_pds) { 1438 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, 1439 + sizeof(*cmd)); 1440 + if (!cmd) 1441 + return -ENOMEM; 1442 + cmd->pd_count = dev->attr.max_dpp_pds; 1443 + cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; 1444 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1445 + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1452 1446 1453 - if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) { 1454 - dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> 1455 - OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; 1456 - dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & 1457 - OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1458 - dev->pd_mgr->max_dpp_pd = rsp->pd_count; 1459 - pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); 1460 - dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, 1461 - GFP_KERNEL); 1447 + if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && 1448 + rsp->pd_count) { 1449 + dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> 1450 + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; 1451 + dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & 1452 + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1453 + dev->pd_mgr->max_dpp_pd = rsp->pd_count; 1454 + pd_bitmap_size = 1455 + BITS_TO_LONGS(rsp->pd_count) * sizeof(long); 1456 + dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, 1457 + GFP_KERNEL); 1458 + } 1459 + kfree(cmd); 1462 1460 } 1463 - kfree(cmd); 1464 1461 1465 1462 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); 1466 1463 if (!cmd) ··· 1471 1462 1472 1463 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds; 1473 1464 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1474 - if (status) 1475 - goto mbx_err; 1476 1465 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1477 - if (rsp->pd_count) { 1466 + if (!status && rsp->pd_count) { 1478 1467 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & 1479 1468 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1480 1469 dev->pd_mgr->max_normal_pd = rsp->pd_count; ··· 1480 1473 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, 1481 1474 GFP_KERNEL); 1482 1475 } 1476 + kfree(cmd); 1483 1477 1484 1478 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) { 1485 1479 /* Enable PD resource manager */ 1486 1480 dev->pd_mgr->pd_prealloc_valid = true; 1487 - } else { 1488 - return -ENOMEM; 1481 + return 0; 1489 1482 } 1490 - mbx_err: 1491 - kfree(cmd); 1492 1483 return status; 1493 1484 } 1494 1485 ··· 2411 2406 struct ocrdma_query_qp *cmd; 2412 2407 struct ocrdma_query_qp_rsp *rsp; 2413 2408 2414 - cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd)); 2409 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp)); 2415 2410 if (!cmd) 2416 2411 return status; 2417 2412 cmd->qp_id = qp->id; ··· 2433 2428 int status; 2434 2429 struct ib_ah_attr *ah_attr = &attrs->ah_attr; 2435 2430 union ib_gid sgid, zgid; 2436 - u32 vlan_id; 2431 + u32 vlan_id = 0xFFFF; 2437 2432 u8 mac_addr[6]; 2438 2433 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2439 2434 ··· 2473 2468 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); 2474 2469 if (attr_mask & IB_QP_VID) { 2475 2470 vlan_id = attrs->vlan_id; 2471 + } else if (dev->pfc_state) { 2472 + vlan_id = 0; 2473 + pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", 2474 + dev->id); 2475 + pr_err("ocrdma%d:Using VLAN 0 for this connection\n", 2476 + dev->id); 2477 + } 2478 + 2479 + if (vlan_id < 0x1000) { 2476 2480 cmd->params.vlan_dmac_b4_to_b5 |= 2477 2481 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2478 2482 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2479 2483 cmd->params.rnt_rc_sl_fl |= 2480 2484 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2481 2485 } 2486 + 2482 2487 return 0; 2483 2488 } 2484 2489 ··· 2534 2519 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; 2535 2520 } 2536 2521 if (attr_mask & IB_QP_PATH_MTU) { 2537 - if (attrs->path_mtu < IB_MTU_256 || 2522 + if (attrs->path_mtu < IB_MTU_512 || 2538 2523 attrs->path_mtu > IB_MTU_4096) { 2524 + pr_err("ocrdma%d: IB MTU %d is not supported\n", 2525 + dev->id, ib_mtu_enum_to_int(attrs->path_mtu)); 2539 2526 status = -EINVAL; 2540 2527 goto pmtu_err; 2541 2528 } ··· 3164 3147 ocrdma_free_pd_pool(dev); 3165 3148 ocrdma_mbx_delete_ah_tbl(dev); 3166 3149 3167 - /* cleanup the eqs */ 3168 - ocrdma_destroy_eqs(dev); 3169 - 3170 3150 /* cleanup the control path */ 3171 3151 ocrdma_destroy_mq(dev); 3152 + 3153 + /* cleanup the eqs */ 3154 + ocrdma_destroy_eqs(dev); 3172 3155 }
+9
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
··· 1176 1176 struct ocrdma_mqe_hdr hdr; 1177 1177 struct ocrdma_mbx_rsp rsp; 1178 1178 struct ocrdma_qp_params params; 1179 + u32 dpp_credits_cqid; 1180 + u32 rbq_id; 1179 1181 }; 1180 1182 1181 1183 enum { ··· 1626 1624 enum { 1627 1625 OCRDMA_EQE_VALID_SHIFT = 0, 1628 1626 OCRDMA_EQE_VALID_MASK = BIT(0), 1627 + OCRDMA_EQE_MAJOR_CODE_MASK = 0x0E, 1628 + OCRDMA_EQE_MAJOR_CODE_SHIFT = 0x01, 1629 1629 OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE, 1630 1630 OCRDMA_EQE_RESOURCE_ID_SHIFT = 16, 1631 1631 OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF << 1632 1632 OCRDMA_EQE_RESOURCE_ID_SHIFT, 1633 + }; 1634 + 1635 + enum major_code { 1636 + OCRDMA_MAJOR_CODE_COMPLETION = 0x00, 1637 + OCRDMA_MAJOR_CODE_SENTINAL = 0x01 1633 1638 }; 1634 1639 1635 1640 struct ocrdma_eqe {
+7 -5
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 365 365 if (!pd) 366 366 return ERR_PTR(-ENOMEM); 367 367 368 - if (udata && uctx) { 368 + if (udata && uctx && dev->attr.max_dpp_pds) { 369 369 pd->dpp_enabled = 370 370 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; 371 371 pd->num_dpp_qp = ··· 1721 1721 struct ocrdma_qp *qp; 1722 1722 struct ocrdma_dev *dev; 1723 1723 struct ib_qp_attr attrs; 1724 - int attr_mask = IB_QP_STATE; 1724 + int attr_mask; 1725 1725 unsigned long flags; 1726 1726 1727 1727 qp = get_ocrdma_qp(ibqp); 1728 1728 dev = get_ocrdma_dev(ibqp->device); 1729 1729 1730 - attrs.qp_state = IB_QPS_ERR; 1731 1730 pd = qp->pd; 1732 1731 1733 1732 /* change the QP state to ERROR */ 1734 - _ocrdma_modify_qp(ibqp, &attrs, attr_mask); 1735 - 1733 + if (qp->state != OCRDMA_QPS_RST) { 1734 + attrs.qp_state = IB_QPS_ERR; 1735 + attr_mask = IB_QP_STATE; 1736 + _ocrdma_modify_qp(ibqp, &attrs, attr_mask); 1737 + } 1736 1738 /* ensure that CQEs for newly created QP (whose id may be same with 1737 1739 * one which just getting destroyed are same), dont get 1738 1740 * discarded until the old CQEs are discarded.
+3 -3
drivers/infiniband/ulp/isert/ib_isert.c
··· 547 547 return 0; 548 548 549 549 err_prot_mr: 550 - ib_dereg_mr(desc->pi_ctx->prot_mr); 550 + ib_dereg_mr(pi_ctx->prot_mr); 551 551 err_prot_frpl: 552 - ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); 552 + ib_free_fast_reg_page_list(pi_ctx->prot_frpl); 553 553 err_pi_ctx: 554 - kfree(desc->pi_ctx); 554 + kfree(pi_ctx); 555 555 556 556 return ret; 557 557 }
+61
drivers/input/joydev.c
··· 747 747 input_close_device(handle); 748 748 } 749 749 750 + static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) 751 + { 752 + DECLARE_BITMAP(jd_scratch, KEY_CNT); 753 + 754 + BUILD_BUG_ON(ABS_CNT > KEY_CNT || EV_CNT > KEY_CNT); 755 + 756 + /* 757 + * Virtualization (VMware, etc) and remote management (HP 758 + * ILO2) solutions use absolute coordinates for their virtual 759 + * pointing devices so that there is one-to-one relationship 760 + * between pointer position on the host screen and virtual 761 + * guest screen, and so their mice use ABS_X, ABS_Y and 3 762 + * primary button events. This clashes with what joydev 763 + * considers to be joysticks (a device with at minimum ABS_X 764 + * axis). 765 + * 766 + * Here we are trying to separate absolute mice from 767 + * joysticks. A device is, for joystick detection purposes, 768 + * considered to be an absolute mouse if the following is 769 + * true: 770 + * 771 + * 1) Event types are exactly EV_ABS, EV_KEY and EV_SYN. 772 + * 2) Absolute events are exactly ABS_X and ABS_Y. 773 + * 3) Keys are exactly BTN_LEFT, BTN_RIGHT and BTN_MIDDLE. 774 + * 4) Device is not on "Amiga" bus. 775 + */ 776 + 777 + bitmap_zero(jd_scratch, EV_CNT); 778 + __set_bit(EV_ABS, jd_scratch); 779 + __set_bit(EV_KEY, jd_scratch); 780 + __set_bit(EV_SYN, jd_scratch); 781 + if (!bitmap_equal(jd_scratch, dev->evbit, EV_CNT)) 782 + return false; 783 + 784 + bitmap_zero(jd_scratch, ABS_CNT); 785 + __set_bit(ABS_X, jd_scratch); 786 + __set_bit(ABS_Y, jd_scratch); 787 + if (!bitmap_equal(dev->absbit, jd_scratch, ABS_CNT)) 788 + return false; 789 + 790 + bitmap_zero(jd_scratch, KEY_CNT); 791 + __set_bit(BTN_LEFT, jd_scratch); 792 + __set_bit(BTN_RIGHT, jd_scratch); 793 + __set_bit(BTN_MIDDLE, jd_scratch); 794 + 795 + if (!bitmap_equal(dev->keybit, jd_scratch, KEY_CNT)) 796 + return false; 797 + 798 + /* 799 + * Amiga joystick (amijoy) historically uses left/middle/right 800 + * button events. 801 + */ 802 + if (dev->id.bustype == BUS_AMIGA) 803 + return false; 804 + 805 + return true; 806 + } 750 807 751 808 static bool joydev_match(struct input_handler *handler, struct input_dev *dev) 752 809 { ··· 813 756 814 757 /* Avoid tablets, digitisers and similar devices */ 815 758 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit)) 759 + return false; 760 + 761 + /* Avoid absolute mice */ 762 + if (joydev_dev_is_absolute_mouse(dev)) 816 763 return false; 817 764 818 765 return true;
+1 -1
drivers/input/mouse/Kconfig
··· 156 156 Say Y here if you are running under control of VMware hypervisor 157 157 (ESXi, Workstation or Fusion). Also make sure that when you enable 158 158 this option, you remove the xf86-input-vmmouse user-space driver 159 - or upgrade it to at least xf86-input-vmmouse 13.0.1, which doesn't 159 + or upgrade it to at least xf86-input-vmmouse 13.1.0, which doesn't 160 160 load in the presence of an in-kernel vmmouse driver. 161 161 162 162 If unsure, say N.
+5
drivers/input/mouse/alps.c
··· 941 941 case V7_PACKET_ID_TWO: 942 942 mt[1].x &= ~0x000F; 943 943 mt[1].y |= 0x000F; 944 + /* Detect false-postive touches where x & y report max value */ 945 + if (mt[1].y == 0x7ff && mt[1].x == 0xff0) { 946 + mt[1].x = 0; 947 + /* y gets set to 0 at the end of this function */ 948 + } 944 949 break; 945 950 946 951 case V7_PACKET_ID_MULTI:
+1 -1
drivers/input/mouse/elantech.c
··· 315 315 unsigned int x2, unsigned int y2) 316 316 { 317 317 elantech_set_slot(dev, 0, num_fingers != 0, x1, y1); 318 - elantech_set_slot(dev, 1, num_fingers == 2, x2, y2); 318 + elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2); 319 319 } 320 320 321 321 /*
+1 -1
drivers/input/touchscreen/stmpe-ts.c
··· 164 164 STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN); 165 165 166 166 /* start polling for touch_det to detect release */ 167 - schedule_delayed_work(&ts->work, HZ / 50); 167 + schedule_delayed_work(&ts->work, msecs_to_jiffies(50)); 168 168 169 169 return IRQ_HANDLED; 170 170 }
+1 -1
drivers/input/touchscreen/sx8654.c
··· 187 187 return -ENOMEM; 188 188 189 189 input = devm_input_allocate_device(&client->dev); 190 - if (!sx8654) 190 + if (!input) 191 191 return -ENOMEM; 192 192 193 193 input->name = "SX8654 I2C Touchscreen";
+8 -1
drivers/irqchip/irq-gic-v3-its.c
··· 828 828 u64 typer = readq_relaxed(its->base + GITS_TYPER); 829 829 u32 ids = GITS_TYPER_DEVBITS(typer); 830 830 831 - order = get_order((1UL << ids) * entry_size); 831 + /* 832 + * 'order' was initialized earlier to the default page 833 + * granule of the the ITS. We can't have an allocation 834 + * smaller than that. If the requested allocation 835 + * is smaller, round up to the default page granule. 836 + */ 837 + order = max(get_order((1UL << ids) * entry_size), 838 + order); 832 839 if (order >= MAX_ORDER) { 833 840 order = MAX_ORDER - 1; 834 841 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
+1 -1
drivers/lguest/core.c
··· 173 173 bool lguest_address_ok(const struct lguest *lg, 174 174 unsigned long addr, unsigned long len) 175 175 { 176 - return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); 176 + return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr); 177 177 } 178 178 179 179 /*
+6 -1
drivers/md/bitmap.c
··· 177 177 * nr_pending is 0 and In_sync is clear, the entries we return will 178 178 * still be in the same position on the list when we re-enter 179 179 * list_for_each_entry_continue_rcu. 180 + * 181 + * Note that if entered with 'rdev == NULL' to start at the 182 + * beginning, we temporarily assign 'rdev' to an address which 183 + * isn't really an rdev, but which can be used by 184 + * list_for_each_entry_continue_rcu() to find the first entry. 180 185 */ 181 186 rcu_read_lock(); 182 187 if (rdev == NULL) 183 188 /* start at the beginning */ 184 - rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set); 189 + rdev = list_entry(&mddev->disks, struct md_rdev, same_set); 185 190 else { 186 191 /* release the previous rdev and start from there. */ 187 192 rdev_dec_pending(rdev, mddev);
+3 -1
drivers/md/dm-mpath.c
··· 429 429 /* blk-mq request-based interface */ 430 430 *__clone = blk_get_request(bdev_get_queue(bdev), 431 431 rq_data_dir(rq), GFP_ATOMIC); 432 - if (IS_ERR(*__clone)) 432 + if (IS_ERR(*__clone)) { 433 433 /* ENOMEM, requeue */ 434 + clear_mapinfo(m, map_context); 434 435 return r; 436 + } 435 437 (*__clone)->bio = (*__clone)->biotail = NULL; 436 438 (*__clone)->rq_disk = bdev->bd_disk; 437 439 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+9 -7
drivers/md/dm-table.c
··· 820 820 } 821 821 EXPORT_SYMBOL(dm_consume_args); 822 822 823 + static bool __table_type_request_based(unsigned table_type) 824 + { 825 + return (table_type == DM_TYPE_REQUEST_BASED || 826 + table_type == DM_TYPE_MQ_REQUEST_BASED); 827 + } 828 + 823 829 static int dm_table_set_type(struct dm_table *t) 824 830 { 825 831 unsigned i; ··· 858 852 * Determine the type from the live device. 859 853 * Default to bio-based if device is new. 860 854 */ 861 - if (live_md_type == DM_TYPE_REQUEST_BASED || 862 - live_md_type == DM_TYPE_MQ_REQUEST_BASED) 855 + if (__table_type_request_based(live_md_type)) 863 856 request_based = 1; 864 857 else 865 858 bio_based = 1; ··· 908 903 } 909 904 t->type = DM_TYPE_MQ_REQUEST_BASED; 910 905 911 - } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { 906 + } else if (list_empty(devices) && __table_type_request_based(live_md_type)) { 912 907 /* inherit live MD type */ 913 908 t->type = live_md_type; 914 909 ··· 930 925 931 926 bool dm_table_request_based(struct dm_table *t) 932 927 { 933 - unsigned table_type = dm_table_get_type(t); 934 - 935 - return (table_type == DM_TYPE_REQUEST_BASED || 936 - table_type == DM_TYPE_MQ_REQUEST_BASED); 928 + return __table_type_request_based(dm_table_get_type(t)); 937 929 } 938 930 939 931 bool dm_table_mq_request_based(struct dm_table *t)
+24 -16
drivers/md/dm.c
··· 1082 1082 dm_put(md); 1083 1083 } 1084 1084 1085 - static void free_rq_clone(struct request *clone, bool must_be_mapped) 1085 + static void free_rq_clone(struct request *clone) 1086 1086 { 1087 1087 struct dm_rq_target_io *tio = clone->end_io_data; 1088 1088 struct mapped_device *md = tio->md; 1089 - 1090 - WARN_ON_ONCE(must_be_mapped && !clone->q); 1091 1089 1092 1090 blk_rq_unprep_clone(clone); 1093 1091 ··· 1130 1132 rq->sense_len = clone->sense_len; 1131 1133 } 1132 1134 1133 - free_rq_clone(clone, true); 1135 + free_rq_clone(clone); 1134 1136 if (!rq->q->mq_ops) 1135 1137 blk_end_request_all(rq, error); 1136 1138 else ··· 1149 1151 } 1150 1152 1151 1153 if (clone) 1152 - free_rq_clone(clone, false); 1154 + free_rq_clone(clone); 1153 1155 } 1154 1156 1155 1157 /* ··· 1162 1164 1163 1165 spin_lock_irqsave(q->queue_lock, flags); 1164 1166 blk_requeue_request(q, rq); 1167 + blk_run_queue_async(q); 1165 1168 spin_unlock_irqrestore(q->queue_lock, flags); 1166 1169 } 1167 1170 ··· 1723 1724 struct mapped_device *md = q->queuedata; 1724 1725 struct dm_table *map = dm_get_live_table_fast(md); 1725 1726 struct dm_target *ti; 1726 - sector_t max_sectors; 1727 - int max_size = 0; 1727 + sector_t max_sectors, max_size = 0; 1728 1728 1729 1729 if (unlikely(!map)) 1730 1730 goto out; ··· 1738 1740 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1739 1741 (sector_t) queue_max_sectors(q)); 1740 1742 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1741 - if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ 1742 - max_size = 0; 1743 + 1744 + /* 1745 + * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t 1746 + * to the targets' merge function since it holds sectors not bytes). 1747 + * Just doing this as an interim fix for stable@ because the more 1748 + * comprehensive cleanup of switching to sector_t will impact every 1749 + * DM target that implements a ->merge hook. 1750 + */ 1751 + if (max_size > INT_MAX) 1752 + max_size = INT_MAX; 1743 1753 1744 1754 /* 1745 1755 * merge_bvec_fn() returns number of bytes ··· 1755 1749 * max is precomputed maximal io size 1756 1750 */ 1757 1751 if (max_size && ti->type->merge) 1758 - max_size = ti->type->merge(ti, bvm, biovec, max_size); 1752 + max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); 1759 1753 /* 1760 1754 * If the target doesn't support merge method and some of the devices 1761 1755 * provided their merge_bvec method (we know this by looking for the ··· 1977 1971 dm_kill_unmapped_request(rq, r); 1978 1972 return r; 1979 1973 } 1980 - if (IS_ERR(clone)) 1981 - return DM_MAPIO_REQUEUE; 1974 + if (r != DM_MAPIO_REMAPPED) 1975 + return r; 1982 1976 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 1983 1977 /* -ENOMEM */ 1984 1978 ti->type->release_clone_rq(clone); ··· 2759 2753 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { 2760 2754 /* clone request is allocated at the end of the pdu */ 2761 2755 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); 2762 - if (!clone_rq(rq, md, tio, GFP_ATOMIC)) 2763 - return BLK_MQ_RQ_QUEUE_BUSY; 2756 + (void) clone_rq(rq, md, tio, GFP_ATOMIC); 2764 2757 queue_kthread_work(&md->kworker, &tio->work); 2765 2758 } else { 2766 2759 /* Direct call is fine since .queue_rq allows allocations */ 2767 - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 2768 - dm_requeue_unmapped_original_request(md, rq); 2760 + if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { 2761 + /* Undo dm_start_request() before requeuing */ 2762 + rq_completed(md, rq_data_dir(rq), false); 2763 + return BLK_MQ_RQ_QUEUE_BUSY; 2764 + } 2769 2765 } 2770 2766 2771 2767 return BLK_MQ_RQ_QUEUE_OK;
+8 -6
drivers/md/md.c
··· 4211 4211 if (!mddev->pers || !mddev->pers->sync_request) 4212 4212 return -EINVAL; 4213 4213 4214 - if (cmd_match(page, "frozen")) 4215 - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4216 - else 4217 - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4218 4214 4219 4215 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4216 + if (cmd_match(page, "frozen")) 4217 + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4218 + else 4219 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4220 4220 flush_workqueue(md_misc_wq); 4221 4221 if (mddev->sync_thread) { 4222 4222 set_bit(MD_RECOVERY_INTR, &mddev->recovery); ··· 4229 4229 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4230 4230 return -EBUSY; 4231 4231 else if (cmd_match(page, "resync")) 4232 - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4232 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4233 4233 else if (cmd_match(page, "recover")) { 4234 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4234 4235 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4235 - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4236 4236 } else if (cmd_match(page, "reshape")) { 4237 4237 int err; 4238 4238 if (mddev->pers->start_reshape == NULL) 4239 4239 return -EINVAL; 4240 4240 err = mddev_lock(mddev); 4241 4241 if (!err) { 4242 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4242 4243 err = mddev->pers->start_reshape(mddev); 4243 4244 mddev_unlock(mddev); 4244 4245 } ··· 4251 4250 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4252 4251 else if (!cmd_match(page, "repair")) 4253 4252 return -EINVAL; 4253 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4254 4254 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4255 4255 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4256 4256 }
+3 -1
drivers/md/raid0.c
··· 524 524 ? (sector & (chunk_sects-1)) 525 525 : sector_div(sector, chunk_sects)); 526 526 527 + /* Restore due to sector_div */ 528 + sector = bio->bi_iter.bi_sector; 529 + 527 530 if (sectors < bio_sectors(bio)) { 528 531 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); 529 532 bio_chain(split, bio); ··· 534 531 split = bio; 535 532 } 536 533 537 - sector = bio->bi_iter.bi_sector; 538 534 zone = find_zone(mddev->private, &sector); 539 535 tmp_dev = map_sector(mddev, zone, sector, &sector); 540 536 split->bi_bdev = tmp_dev->bdev;
+87 -61
drivers/md/raid5.c
··· 749 749 static bool stripe_can_batch(struct stripe_head *sh) 750 750 { 751 751 return test_bit(STRIPE_BATCH_READY, &sh->state) && 752 + !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 752 753 is_full_stripe_write(sh); 753 754 } 754 755 ··· 837 836 if (atomic_dec_return(&conf->preread_active_stripes) 838 837 < IO_THRESHOLD) 839 838 md_wakeup_thread(conf->mddev->thread); 839 + 840 + if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { 841 + int seq = sh->bm_seq; 842 + if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && 843 + sh->batch_head->bm_seq > seq) 844 + seq = sh->batch_head->bm_seq; 845 + set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); 846 + sh->batch_head->bm_seq = seq; 847 + } 840 848 841 849 atomic_inc(&sh->count); 842 850 unlock_out: ··· 1832 1822 } else 1833 1823 init_async_submit(&submit, 0, tx, NULL, NULL, 1834 1824 to_addr_conv(sh, percpu, j)); 1835 - async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1825 + tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1836 1826 if (!last_stripe) { 1837 1827 j++; 1838 1828 sh = list_first_entry(&sh->batch_list, struct stripe_head, ··· 2997 2987 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2998 2988 (unsigned long long)(*bip)->bi_iter.bi_sector, 2999 2989 (unsigned long long)sh->sector, dd_idx); 3000 - spin_unlock_irq(&sh->stripe_lock); 3001 2990 3002 2991 if (conf->mddev->bitmap && firstwrite) { 2992 + /* Cannot hold spinlock over bitmap_startwrite, 2993 + * but must ensure this isn't added to a batch until 2994 + * we have added to the bitmap and set bm_seq. 2995 + * So set STRIPE_BITMAP_PENDING to prevent 2996 + * batching. 2997 + * If multiple add_stripe_bio() calls race here they 2998 + * much all set STRIPE_BITMAP_PENDING. So only the first one 2999 + * to complete "bitmap_startwrite" gets to set 3000 + * STRIPE_BIT_DELAY. This is important as once a stripe 3001 + * is added to a batch, STRIPE_BIT_DELAY cannot be changed 3002 + * any more. 3003 + */ 3004 + set_bit(STRIPE_BITMAP_PENDING, &sh->state); 3005 + spin_unlock_irq(&sh->stripe_lock); 3003 3006 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 3004 3007 STRIPE_SECTORS, 0); 3005 - sh->bm_seq = conf->seq_flush+1; 3006 - set_bit(STRIPE_BIT_DELAY, &sh->state); 3008 + spin_lock_irq(&sh->stripe_lock); 3009 + clear_bit(STRIPE_BITMAP_PENDING, &sh->state); 3010 + if (!sh->batch_head) { 3011 + sh->bm_seq = conf->seq_flush+1; 3012 + set_bit(STRIPE_BIT_DELAY, &sh->state); 3013 + } 3007 3014 } 3015 + spin_unlock_irq(&sh->stripe_lock); 3008 3016 3009 3017 if (stripe_can_batch(sh)) 3010 3018 stripe_add_to_batch_list(conf, sh); ··· 3420 3392 set_bit(STRIPE_HANDLE, &sh->state); 3421 3393 } 3422 3394 3395 + static void break_stripe_batch_list(struct stripe_head *head_sh, 3396 + unsigned long handle_flags); 3423 3397 /* handle_stripe_clean_event 3424 3398 * any written block on an uptodate or failed drive can be returned. 3425 3399 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but ··· 3435 3405 int discard_pending = 0; 3436 3406 struct stripe_head *head_sh = sh; 3437 3407 bool do_endio = false; 3438 - int wakeup_nr = 0; 3439 3408 3440 3409 for (i = disks; i--; ) 3441 3410 if (sh->dev[i].written) { ··· 3523 3494 if (atomic_dec_and_test(&conf->pending_full_writes)) 3524 3495 md_wakeup_thread(conf->mddev->thread); 3525 3496 3526 - if (!head_sh->batch_head || !do_endio) 3527 - return; 3528 - for (i = 0; i < head_sh->disks; i++) { 3529 - if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) 3530 - wakeup_nr++; 3531 - } 3532 - while (!list_empty(&head_sh->batch_list)) { 3533 - int i; 3534 - sh = list_first_entry(&head_sh->batch_list, 3535 - struct stripe_head, batch_list); 3536 - list_del_init(&sh->batch_list); 3537 - 3538 - set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, 3539 - head_sh->state & ~((1 << STRIPE_ACTIVE) | 3540 - (1 << STRIPE_PREREAD_ACTIVE) | 3541 - STRIPE_EXPAND_SYNC_FLAG)); 3542 - sh->check_state = head_sh->check_state; 3543 - sh->reconstruct_state = head_sh->reconstruct_state; 3544 - for (i = 0; i < sh->disks; i++) { 3545 - if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3546 - wakeup_nr++; 3547 - sh->dev[i].flags = head_sh->dev[i].flags; 3548 - } 3549 - 3550 - spin_lock_irq(&sh->stripe_lock); 3551 - sh->batch_head = NULL; 3552 - spin_unlock_irq(&sh->stripe_lock); 3553 - if (sh->state & STRIPE_EXPAND_SYNC_FLAG) 3554 - set_bit(STRIPE_HANDLE, &sh->state); 3555 - release_stripe(sh); 3556 - } 3557 - 3558 - spin_lock_irq(&head_sh->stripe_lock); 3559 - head_sh->batch_head = NULL; 3560 - spin_unlock_irq(&head_sh->stripe_lock); 3561 - wake_up_nr(&conf->wait_for_overlap, wakeup_nr); 3562 - if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG) 3563 - set_bit(STRIPE_HANDLE, &head_sh->state); 3497 + if (head_sh->batch_head && do_endio) 3498 + break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); 3564 3499 } 3565 3500 3566 3501 static void handle_stripe_dirtying(struct r5conf *conf, ··· 4165 4172 4166 4173 static int clear_batch_ready(struct stripe_head *sh) 4167 4174 { 4175 + /* Return '1' if this is a member of batch, or 4176 + * '0' if it is a lone stripe or a head which can now be 4177 + * handled. 4178 + */ 4168 4179 struct stripe_head *tmp; 4169 4180 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) 4170 - return 0; 4181 + return (sh->batch_head && sh->batch_head != sh); 4171 4182 spin_lock(&sh->stripe_lock); 4172 4183 if (!sh->batch_head) { 4173 4184 spin_unlock(&sh->stripe_lock); ··· 4199 4202 return 0; 4200 4203 } 4201 4204 4202 - static void check_break_stripe_batch_list(struct stripe_head *sh) 4205 + static void break_stripe_batch_list(struct stripe_head *head_sh, 4206 + unsigned long handle_flags) 4203 4207 { 4204 - struct stripe_head *head_sh, *next; 4208 + struct stripe_head *sh, *next; 4205 4209 int i; 4206 - 4207 - if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) 4208 - return; 4209 - 4210 - head_sh = sh; 4210 + int do_wakeup = 0; 4211 4211 4212 4212 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { 4213 4213 4214 4214 list_del_init(&sh->batch_list); 4215 4215 4216 - set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, 4217 - head_sh->state & ~((1 << STRIPE_ACTIVE) | 4218 - (1 << STRIPE_PREREAD_ACTIVE) | 4219 - (1 << STRIPE_DEGRADED) | 4220 - STRIPE_EXPAND_SYNC_FLAG)); 4216 + WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | 4217 + (1 << STRIPE_SYNCING) | 4218 + (1 << STRIPE_REPLACED) | 4219 + (1 << STRIPE_PREREAD_ACTIVE) | 4220 + (1 << STRIPE_DELAYED) | 4221 + (1 << STRIPE_BIT_DELAY) | 4222 + (1 << STRIPE_FULL_WRITE) | 4223 + (1 << STRIPE_BIOFILL_RUN) | 4224 + (1 << STRIPE_COMPUTE_RUN) | 4225 + (1 << STRIPE_OPS_REQ_PENDING) | 4226 + (1 << STRIPE_DISCARD) | 4227 + (1 << STRIPE_BATCH_READY) | 4228 + (1 << STRIPE_BATCH_ERR) | 4229 + (1 << STRIPE_BITMAP_PENDING))); 4230 + WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | 4231 + (1 << STRIPE_REPLACED))); 4232 + 4233 + set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | 4234 + (1 << STRIPE_DEGRADED)), 4235 + head_sh->state & (1 << STRIPE_INSYNC)); 4236 + 4221 4237 sh->check_state = head_sh->check_state; 4222 4238 sh->reconstruct_state = head_sh->reconstruct_state; 4223 - for (i = 0; i < sh->disks; i++) 4239 + for (i = 0; i < sh->disks; i++) { 4240 + if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 4241 + do_wakeup = 1; 4224 4242 sh->dev[i].flags = head_sh->dev[i].flags & 4225 4243 (~((1 << R5_WriteError) | (1 << R5_Overlap))); 4226 - 4244 + } 4227 4245 spin_lock_irq(&sh->stripe_lock); 4228 4246 sh->batch_head = NULL; 4229 4247 spin_unlock_irq(&sh->stripe_lock); 4230 - 4231 - set_bit(STRIPE_HANDLE, &sh->state); 4248 + if (handle_flags == 0 || 4249 + sh->state & handle_flags) 4250 + set_bit(STRIPE_HANDLE, &sh->state); 4232 4251 release_stripe(sh); 4233 4252 } 4253 + spin_lock_irq(&head_sh->stripe_lock); 4254 + head_sh->batch_head = NULL; 4255 + spin_unlock_irq(&head_sh->stripe_lock); 4256 + for (i = 0; i < head_sh->disks; i++) 4257 + if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) 4258 + do_wakeup = 1; 4259 + if (head_sh->state & handle_flags) 4260 + set_bit(STRIPE_HANDLE, &head_sh->state); 4261 + 4262 + if (do_wakeup) 4263 + wake_up(&head_sh->raid_conf->wait_for_overlap); 4234 4264 } 4235 4265 4236 4266 static void handle_stripe(struct stripe_head *sh) ··· 4282 4258 return; 4283 4259 } 4284 4260 4285 - check_break_stripe_batch_list(sh); 4261 + if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) 4262 + break_stripe_batch_list(sh, 0); 4286 4263 4287 4264 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4288 4265 spin_lock(&sh->stripe_lock); ··· 4337 4312 if (s.failed > conf->max_degraded) { 4338 4313 sh->check_state = 0; 4339 4314 sh->reconstruct_state = 0; 4315 + break_stripe_batch_list(sh, 0); 4340 4316 if (s.to_read+s.to_write+s.written) 4341 4317 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 4342 4318 if (s.syncing + s.replacing)
+4 -1
drivers/md/raid5.h
··· 337 337 STRIPE_ON_RELEASE_LIST, 338 338 STRIPE_BATCH_READY, 339 339 STRIPE_BATCH_ERR, 340 + STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add 341 + * to batch yet. 342 + */ 340 343 }; 341 344 342 - #define STRIPE_EXPAND_SYNC_FLAG \ 345 + #define STRIPE_EXPAND_SYNC_FLAGS \ 343 346 ((1 << STRIPE_EXPAND_SOURCE) |\ 344 347 (1 << STRIPE_EXPAND_READY) |\ 345 348 (1 << STRIPE_EXPANDING) |\
+4 -4
drivers/mfd/da9052-core.c
··· 433 433 static const struct mfd_cell da9052_subdev_info[] = { 434 434 { 435 435 .name = "da9052-regulator", 436 + .id = 0, 437 + }, 438 + { 439 + .name = "da9052-regulator", 436 440 .id = 1, 437 441 }, 438 442 { ··· 486 482 { 487 483 .name = "da9052-regulator", 488 484 .id = 13, 489 - }, 490 - { 491 - .name = "da9052-regulator", 492 - .id = 14, 493 485 }, 494 486 { 495 487 .name = "da9052-onkey",
+7 -2
drivers/mmc/host/atmel-mci.c
··· 1304 1304 1305 1305 if (ios->clock) { 1306 1306 unsigned int clock_min = ~0U; 1307 - u32 clkdiv; 1307 + int clkdiv; 1308 1308 1309 1309 spin_lock_bh(&host->lock); 1310 1310 if (!host->mode_reg) { ··· 1328 1328 /* Calculate clock divider */ 1329 1329 if (host->caps.has_odd_clk_div) { 1330 1330 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; 1331 - if (clkdiv > 511) { 1331 + if (clkdiv < 0) { 1332 + dev_warn(&mmc->class_dev, 1333 + "clock %u too fast; using %lu\n", 1334 + clock_min, host->bus_hz / 2); 1335 + clkdiv = 0; 1336 + } else if (clkdiv > 511) { 1332 1337 dev_warn(&mmc->class_dev, 1333 1338 "clock %u too slow; using %lu\n", 1334 1339 clock_min, host->bus_hz / (511 + 2));
+1 -1
drivers/net/bonding/bond_options.c
··· 624 624 out: 625 625 if (ret) 626 626 bond_opt_error_interpret(bond, opt, ret, val); 627 - else 627 + else if (bond->dev->reg_state == NETREG_REGISTERED) 628 628 call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev); 629 629 630 630 return ret;
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 1774 1774 int stats_state; 1775 1775 1776 1776 /* used for synchronization of concurrent threads statistics handling */ 1777 - struct mutex stats_lock; 1777 + struct semaphore stats_lock; 1778 1778 1779 1779 /* used by dmae command loader */ 1780 1780 struct dmae_command stats_dmae;
+5 -4
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 12054 12054 mutex_init(&bp->port.phy_mutex); 12055 12055 mutex_init(&bp->fw_mb_mutex); 12056 12056 mutex_init(&bp->drv_info_mutex); 12057 - mutex_init(&bp->stats_lock); 12057 + sema_init(&bp->stats_lock, 1); 12058 12058 bp->drv_info_mng_owner = false; 12059 12059 12060 12060 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); ··· 13690 13690 cancel_delayed_work_sync(&bp->sp_task); 13691 13691 cancel_delayed_work_sync(&bp->period_task); 13692 13692 13693 - mutex_lock(&bp->stats_lock); 13694 - bp->stats_state = STATS_STATE_DISABLED; 13695 - mutex_unlock(&bp->stats_lock); 13693 + if (!down_timeout(&bp->stats_lock, HZ / 10)) { 13694 + bp->stats_state = STATS_STATE_DISABLED; 13695 + up(&bp->stats_lock); 13696 + } 13696 13697 13697 13698 bnx2x_save_statistics(bp); 13698 13699
+14 -6
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
··· 1372 1372 * that context in case someone is in the middle of a transition. 1373 1373 * For other events, wait a bit until lock is taken. 1374 1374 */ 1375 - if (!mutex_trylock(&bp->stats_lock)) { 1375 + if (down_trylock(&bp->stats_lock)) { 1376 1376 if (event == STATS_EVENT_UPDATE) 1377 1377 return; 1378 1378 1379 1379 DP(BNX2X_MSG_STATS, 1380 1380 "Unlikely stats' lock contention [event %d]\n", event); 1381 - mutex_lock(&bp->stats_lock); 1381 + if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { 1382 + BNX2X_ERR("Failed to take stats lock [event %d]\n", 1383 + event); 1384 + return; 1385 + } 1382 1386 } 1383 1387 1384 1388 bnx2x_stats_stm[state][event].action(bp); 1385 1389 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1386 1390 1387 - mutex_unlock(&bp->stats_lock); 1391 + up(&bp->stats_lock); 1388 1392 1389 1393 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1390 1394 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", ··· 1974 1970 /* Wait for statistics to end [while blocking further requests], 1975 1971 * then run supplied function 'safely'. 1976 1972 */ 1977 - mutex_lock(&bp->stats_lock); 1973 + rc = down_timeout(&bp->stats_lock, HZ / 10); 1974 + if (unlikely(rc)) { 1975 + BNX2X_ERR("Failed to take statistics lock for safe execution\n"); 1976 + goto out_no_lock; 1977 + } 1978 1978 1979 1979 bnx2x_stats_comp(bp); 1980 1980 while (bp->stats_pending && cnt--) ··· 1996 1988 /* No need to restart statistics - if they're enabled, the timer 1997 1989 * will restart the statistics. 1998 1990 */ 1999 - mutex_unlock(&bp->stats_lock); 2000 - 1991 + up(&bp->stats_lock); 1992 + out_no_lock: 2001 1993 return rc; 2002 1994 }
+2 -2
drivers/net/ethernet/brocade/bna/bfa_ioc.c
··· 2414 2414 if (status == BFA_STATUS_OK) 2415 2415 bfa_ioc_lpu_start(ioc); 2416 2416 else 2417 - bfa_nw_iocpf_timeout(ioc); 2417 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2418 2418 2419 2419 return status; 2420 2420 } ··· 3029 3029 } 3030 3030 3031 3031 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3032 - bfa_nw_iocpf_timeout(ioc); 3032 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 3033 3033 } else { 3034 3034 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3035 3035 mod_timer(&ioc->iocpf_timer, jiffies +
-4
drivers/net/ethernet/brocade/bna/bnad.c
··· 3701 3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, 3702 3702 ((unsigned long)bnad)); 3703 3703 3704 - /* Now start the timer before calling IOC */ 3705 - mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, 3706 - jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); 3707 - 3708 3704 /* 3709 3705 * Start the chip 3710 3706 * If the call back comes with error, we bail out.
+7
drivers/net/ethernet/brocade/bna/cna_fwimg.c
··· 30 30 u32 *bfi_image_size, char *fw_name) 31 31 { 32 32 const struct firmware *fw; 33 + u32 n; 33 34 34 35 if (request_firmware(&fw, fw_name, &pdev->dev)) { 35 36 pr_alert("Can't locate firmware %s\n", fw_name); ··· 40 39 *bfi_image = (u32 *)fw->data; 41 40 *bfi_image_size = fw->size/sizeof(u32); 42 41 bfi_fw = fw; 42 + 43 + /* Convert loaded firmware to host order as it is stored in file 44 + * as sequence of LE32 integers. 45 + */ 46 + for (n = 0; n < *bfi_image_size; n++) 47 + le32_to_cpus(*bfi_image + n); 43 48 44 49 return *bfi_image; 45 50 error:
+18
drivers/net/ethernet/cadence/macb.c
··· 350 350 else 351 351 phydev->supported &= PHY_BASIC_FEATURES; 352 352 353 + if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) 354 + phydev->supported &= ~SUPPORTED_1000baseT_Half; 355 + 353 356 phydev->advertising = phydev->supported; 354 357 355 358 bp->link = 0; ··· 1040 1037 * add that if/when we get our hands on a full-blown MII PHY. 1041 1038 */ 1042 1039 1040 + /* There is a hardware issue under heavy load where DMA can 1041 + * stop, this causes endless "used buffer descriptor read" 1042 + * interrupts but it can be cleared by re-enabling RX. See 1043 + * the at91 manual, section 41.3.1 or the Zynq manual 1044 + * section 16.7.4 for details. 1045 + */ 1043 1046 if (status & MACB_BIT(RXUBR)) { 1044 1047 ctrl = macb_readl(bp, NCR); 1045 1048 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); ··· 2702 2693 .init = at91ether_init, 2703 2694 }; 2704 2695 2696 + static const struct macb_config zynq_config = { 2697 + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | 2698 + MACB_CAPS_NO_GIGABIT_HALF, 2699 + .dma_burst_length = 16, 2700 + .clk_init = macb_clk_init, 2701 + .init = macb_init, 2702 + }; 2703 + 2705 2704 static const struct of_device_id macb_dt_ids[] = { 2706 2705 { .compatible = "cdns,at32ap7000-macb" }, 2707 2706 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, ··· 2720 2703 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 2721 2704 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 2722 2705 { .compatible = "cdns,emac", .data = &emac_config }, 2706 + { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 2723 2707 { /* sentinel */ } 2724 2708 }; 2725 2709 MODULE_DEVICE_TABLE(of, macb_dt_ids);
+1
drivers/net/ethernet/cadence/macb.h
··· 393 393 #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 394 394 #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 395 395 #define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 396 + #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 396 397 #define MACB_CAPS_FIFO_MODE 0x10000000 397 398 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 398 399 #define MACB_CAPS_SG_DISABLED 0x40000000
+3 -3
drivers/net/ethernet/emulex/benet/be_main.c
··· 2358 2358 adapter->cfg_num_qs); 2359 2359 2360 2360 for_all_evt_queues(adapter, eqo, i) { 2361 + int numa_node = dev_to_node(&adapter->pdev->dev); 2361 2362 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) 2362 2363 return -ENOMEM; 2363 - cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev), 2364 - eqo->affinity_mask); 2365 - 2364 + cpumask_set_cpu(cpumask_local_spread(i, numa_node), 2365 + eqo->affinity_mask); 2366 2366 netif_napi_add(adapter->netdev, &eqo->napi, be_poll, 2367 2367 BE_NAPI_WEIGHT); 2368 2368 napi_hash_add(&eqo->napi);
+6 -10
drivers/net/ethernet/ibm/emac/core.c
··· 2084 2084 2085 2085 static int emac_get_regs_len(struct emac_instance *dev) 2086 2086 { 2087 - if (emac_has_feature(dev, EMAC_FTR_EMAC4)) 2088 2087 return sizeof(struct emac_ethtool_regs_subhdr) + 2089 - EMAC4_ETHTOOL_REGS_SIZE(dev); 2090 - else 2091 - return sizeof(struct emac_ethtool_regs_subhdr) + 2092 - EMAC_ETHTOOL_REGS_SIZE(dev); 2088 + sizeof(struct emac_regs); 2093 2089 } 2094 2090 2095 2091 static int emac_ethtool_get_regs_len(struct net_device *ndev) ··· 2110 2114 struct emac_ethtool_regs_subhdr *hdr = buf; 2111 2115 2112 2116 hdr->index = dev->cell_index; 2113 - if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { 2117 + if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { 2118 + hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER; 2119 + } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { 2114 2120 hdr->version = EMAC4_ETHTOOL_REGS_VER; 2115 - memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev)); 2116 - return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev); 2117 2121 } else { 2118 2122 hdr->version = EMAC_ETHTOOL_REGS_VER; 2119 - memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev)); 2120 - return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev); 2121 2123 } 2124 + memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs)); 2125 + return (void *)(hdr + 1) + sizeof(struct emac_regs); 2122 2126 } 2123 2127 2124 2128 static void emac_ethtool_get_regs(struct net_device *ndev,
+2 -5
drivers/net/ethernet/ibm/emac/core.h
··· 461 461 }; 462 462 463 463 #define EMAC_ETHTOOL_REGS_VER 0 464 - #define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ 465 - (dev)->rsrc_regs.start + 1) 466 - #define EMAC4_ETHTOOL_REGS_VER 1 467 - #define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ 468 - (dev)->rsrc_regs.start + 1) 464 + #define EMAC4_ETHTOOL_REGS_VER 1 465 + #define EMAC4SYNC_ETHTOOL_REGS_VER 2 469 466 470 467 #endif /* __IBM_NEWEMAC_CORE_H */
+7 -2
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 714 714 msecs_to_jiffies(timeout))) { 715 715 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 716 716 op); 717 - err = -EIO; 718 - goto out_reset; 717 + if (op == MLX4_CMD_NOP) { 718 + err = -EBUSY; 719 + goto out; 720 + } else { 721 + err = -EIO; 722 + goto out_reset; 723 + } 719 724 } 720 725 721 726 err = context->result;
+3 -7
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 1501 1501 { 1502 1502 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; 1503 1503 int numa_node = priv->mdev->dev->numa_node; 1504 - int ret = 0; 1505 1504 1506 1505 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) 1507 1506 return -ENOMEM; 1508 1507 1509 - ret = cpumask_set_cpu_local_first(ring_idx, numa_node, 1510 - ring->affinity_mask); 1511 - if (ret) 1512 - free_cpumask_var(ring->affinity_mask); 1513 - 1514 - return ret; 1508 + cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), 1509 + ring->affinity_mask); 1510 + return 0; 1515 1511 } 1516 1512 1517 1513 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+3 -3
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 144 144 ring->queue_index = queue_index; 145 145 146 146 if (queue_index < priv->num_tx_rings_p_up) 147 - cpumask_set_cpu_local_first(queue_index, 148 - priv->mdev->dev->numa_node, 149 - &ring->affinity_mask); 147 + cpumask_set_cpu(cpumask_local_spread(queue_index, 148 + priv->mdev->dev->numa_node), 149 + &ring->affinity_mask); 150 150 151 151 *pring = ring; 152 152 return 0;
+4 -4
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 3187 3187 int cqn = vhcr->in_modifier; 3188 3188 struct mlx4_cq_context *cqc = inbox->buf; 3189 3189 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3190 - struct res_cq *cq; 3190 + struct res_cq *cq = NULL; 3191 3191 struct res_mtt *mtt; 3192 3192 3193 3193 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); ··· 3223 3223 { 3224 3224 int err; 3225 3225 int cqn = vhcr->in_modifier; 3226 - struct res_cq *cq; 3226 + struct res_cq *cq = NULL; 3227 3227 3228 3228 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); 3229 3229 if (err) ··· 3362 3362 int err; 3363 3363 int srqn = vhcr->in_modifier; 3364 3364 struct res_mtt *mtt; 3365 - struct res_srq *srq; 3365 + struct res_srq *srq = NULL; 3366 3366 struct mlx4_srq_context *srqc = inbox->buf; 3367 3367 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; 3368 3368 ··· 3406 3406 { 3407 3407 int err; 3408 3408 int srqn = vhcr->in_modifier; 3409 - struct res_srq *srq; 3409 + struct res_srq *srq = NULL; 3410 3410 3411 3411 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); 3412 3412 if (err)
+3 -3
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
··· 3025 3025 u8 dw, rows, cols, banks, ranks; 3026 3026 u32 val; 3027 3027 3028 - if (size != sizeof(struct netxen_dimm_cfg)) { 3028 + if (size < attr->size) { 3029 3029 netdev_err(netdev, "Invalid size\n"); 3030 - return -1; 3030 + return -EINVAL; 3031 3031 } 3032 3032 3033 3033 memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); ··· 3137 3137 3138 3138 static struct bin_attribute bin_attr_dimm = { 3139 3139 .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, 3140 - .size = 0, 3140 + .size = sizeof(struct netxen_dimm_cfg), 3141 3141 .read = netxen_sysfs_read_dimm, 3142 3142 }; 3143 3143
+5 -3
drivers/net/ethernet/rocker/rocker.c
··· 2921 2921 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); 2922 2922 int err = 0; 2923 2923 2924 - if (!n) 2924 + if (!n) { 2925 2925 n = neigh_create(&arp_tbl, &ip_addr, dev); 2926 - if (!n) 2927 - return -ENOMEM; 2926 + if (IS_ERR(n)) 2927 + return IS_ERR(n); 2928 + } 2928 2929 2929 2930 /* If the neigh is already resolved, then go ahead and 2930 2931 * install the entry, otherwise start the ARP process to ··· 2937 2936 else 2938 2937 neigh_event_send(n, NULL); 2939 2938 2939 + neigh_release(n); 2940 2940 return err; 2941 2941 } 2942 2942
+25 -17
drivers/net/ethernet/sfc/rx.c
··· 224 224 } 225 225 } 226 226 227 - static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) 227 + static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, 228 + struct efx_rx_buffer *rx_buf, 229 + unsigned int num_bufs) 228 230 { 229 - if (rx_buf->page) { 230 - put_page(rx_buf->page); 231 - rx_buf->page = NULL; 232 - } 231 + do { 232 + if (rx_buf->page) { 233 + put_page(rx_buf->page); 234 + rx_buf->page = NULL; 235 + } 236 + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 237 + } while (--num_bufs); 233 238 } 234 239 235 240 /* Attempt to recycle the page if there is an RX recycle ring; the page can ··· 283 278 /* If this is the last buffer in a page, unmap and free it. */ 284 279 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { 285 280 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 286 - efx_free_rx_buffer(rx_buf); 281 + efx_free_rx_buffers(rx_queue, rx_buf, 1); 287 282 } 288 283 rx_buf->page = NULL; 289 284 } ··· 309 304 310 305 efx_recycle_rx_pages(channel, rx_buf, n_frags); 311 306 312 - do { 313 - efx_free_rx_buffer(rx_buf); 314 - rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 315 - } while (--n_frags); 307 + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 316 308 } 317 309 318 310 /** ··· 433 431 434 432 skb = napi_get_frags(napi); 435 433 if (unlikely(!skb)) { 436 - while (n_frags--) { 437 - put_page(rx_buf->page); 438 - rx_buf->page = NULL; 439 - rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 440 - } 434 + struct efx_rx_queue *rx_queue; 435 + 436 + rx_queue = efx_channel_get_rx_queue(channel); 437 + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 441 438 return; 442 439 } 443 440 ··· 623 622 624 623 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); 625 624 if (unlikely(skb == NULL)) { 626 - efx_free_rx_buffer(rx_buf); 625 + struct efx_rx_queue *rx_queue; 626 + 627 + rx_queue = efx_channel_get_rx_queue(channel); 628 + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 627 629 return; 628 630 } 629 631 skb_record_rx_queue(skb, channel->rx_queue.core_index); ··· 665 661 * loopback layer, and free the rx_buf here 666 662 */ 667 663 if (unlikely(efx->loopback_selftest)) { 664 + struct efx_rx_queue *rx_queue; 665 + 668 666 efx_loopback_rx_packet(efx, eh, rx_buf->len); 669 - efx_free_rx_buffer(rx_buf); 667 + rx_queue = efx_channel_get_rx_queue(channel); 668 + efx_free_rx_buffers(rx_queue, rx_buf, 669 + channel->rx_pkt_n_frags); 670 670 goto out; 671 671 } 672 672
+6
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 117 117 int use_riwt; 118 118 int irq_wake; 119 119 spinlock_t ptp_lock; 120 + 121 + #ifdef CONFIG_DEBUG_FS 122 + struct dentry *dbgfs_dir; 123 + struct dentry *dbgfs_rings_status; 124 + struct dentry *dbgfs_dma_cap; 125 + #endif 120 126 }; 121 127 122 128 int stmmac_mdio_unregister(struct net_device *ndev);
+53 -23
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 118 118 119 119 #ifdef CONFIG_DEBUG_FS 120 120 static int stmmac_init_fs(struct net_device *dev); 121 - static void stmmac_exit_fs(void); 121 + static void stmmac_exit_fs(struct net_device *dev); 122 122 #endif 123 123 124 124 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) ··· 1916 1916 netif_carrier_off(dev); 1917 1917 1918 1918 #ifdef CONFIG_DEBUG_FS 1919 - stmmac_exit_fs(); 1919 + stmmac_exit_fs(dev); 1920 1920 #endif 1921 1921 1922 1922 stmmac_release_ptp(priv); ··· 2508 2508 2509 2509 #ifdef CONFIG_DEBUG_FS 2510 2510 static struct dentry *stmmac_fs_dir; 2511 - static struct dentry *stmmac_rings_status; 2512 - static struct dentry *stmmac_dma_cap; 2513 2511 2514 2512 static void sysfs_display_ring(void *head, int size, int extend_desc, 2515 2513 struct seq_file *seq) ··· 2646 2648 2647 2649 static int stmmac_init_fs(struct net_device *dev) 2648 2650 { 2649 - /* Create debugfs entries */ 2650 - stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 2651 + struct stmmac_priv *priv = netdev_priv(dev); 2651 2652 2652 - if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 2653 - pr_err("ERROR %s, debugfs create directory failed\n", 2654 - STMMAC_RESOURCE_NAME); 2653 + /* Create per netdev entries */ 2654 + priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 2655 + 2656 + if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { 2657 + pr_err("ERROR %s/%s, debugfs create directory failed\n", 2658 + STMMAC_RESOURCE_NAME, dev->name); 2655 2659 2656 2660 return -ENOMEM; 2657 2661 } 2658 2662 2659 2663 /* Entry to report DMA RX/TX rings */ 2660 - stmmac_rings_status = debugfs_create_file("descriptors_status", 2661 - S_IRUGO, stmmac_fs_dir, dev, 2662 - &stmmac_rings_status_fops); 2664 + priv->dbgfs_rings_status = 2665 + debugfs_create_file("descriptors_status", S_IRUGO, 2666 + priv->dbgfs_dir, dev, 2667 + &stmmac_rings_status_fops); 2663 2668 2664 - if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { 2669 + if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { 2665 2670 pr_info("ERROR creating stmmac ring debugfs file\n"); 2666 - debugfs_remove(stmmac_fs_dir); 2671 + debugfs_remove_recursive(priv->dbgfs_dir); 2667 2672 2668 2673 return -ENOMEM; 2669 2674 } 2670 2675 2671 2676 /* Entry to report the DMA HW features */ 2672 - stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, 2673 - dev, &stmmac_dma_cap_fops); 2677 + priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, 2678 + priv->dbgfs_dir, 2679 + dev, &stmmac_dma_cap_fops); 2674 2680 2675 - if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { 2681 + if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { 2676 2682 pr_info("ERROR creating stmmac MMC debugfs file\n"); 2677 - debugfs_remove(stmmac_rings_status); 2678 - debugfs_remove(stmmac_fs_dir); 2683 + debugfs_remove_recursive(priv->dbgfs_dir); 2679 2684 2680 2685 return -ENOMEM; 2681 2686 } ··· 2686 2685 return 0; 2687 2686 } 2688 2687 2689 - static void stmmac_exit_fs(void) 2688 + static void stmmac_exit_fs(struct net_device *dev) 2690 2689 { 2691 - debugfs_remove(stmmac_rings_status); 2692 - debugfs_remove(stmmac_dma_cap); 2693 - debugfs_remove(stmmac_fs_dir); 2690 + struct stmmac_priv *priv = netdev_priv(dev); 2691 + 2692 + debugfs_remove_recursive(priv->dbgfs_dir); 2694 2693 } 2695 2694 #endif /* CONFIG_DEBUG_FS */ 2696 2695 ··· 3149 3148 3150 3149 __setup("stmmaceth=", stmmac_cmdline_opt); 3151 3150 #endif /* MODULE */ 3151 + 3152 + static int __init stmmac_init(void) 3153 + { 3154 + #ifdef CONFIG_DEBUG_FS 3155 + /* Create debugfs main directory if it doesn't exist yet */ 3156 + if (!stmmac_fs_dir) { 3157 + stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 3158 + 3159 + if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 3160 + pr_err("ERROR %s, debugfs create directory failed\n", 3161 + STMMAC_RESOURCE_NAME); 3162 + 3163 + return -ENOMEM; 3164 + } 3165 + } 3166 + #endif 3167 + 3168 + return 0; 3169 + } 3170 + 3171 + static void __exit stmmac_exit(void) 3172 + { 3173 + #ifdef CONFIG_DEBUG_FS 3174 + debugfs_remove_recursive(stmmac_fs_dir); 3175 + #endif 3176 + } 3177 + 3178 + module_init(stmmac_init) 3179 + module_exit(stmmac_exit) 3152 3180 3153 3181 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 3154 3182 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+42 -3
drivers/net/phy/amd-xgbe-phy.c
··· 755 755 return ret; 756 756 } 757 757 758 + static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev) 759 + { 760 + if (phydev->autoneg == AUTONEG_ENABLE) { 761 + if (phydev->advertising & ADVERTISED_10000baseKR_Full) 762 + return true; 763 + } else { 764 + if (phydev->speed == SPEED_10000) 765 + return true; 766 + } 767 + 768 + return false; 769 + } 770 + 771 + static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev) 772 + { 773 + if (phydev->autoneg == AUTONEG_ENABLE) { 774 + if (phydev->advertising & ADVERTISED_2500baseX_Full) 775 + return true; 776 + } else { 777 + if (phydev->speed == SPEED_2500) 778 + return true; 779 + } 780 + 781 + return false; 782 + } 783 + 784 + static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev) 785 + { 786 + if (phydev->autoneg == AUTONEG_ENABLE) { 787 + if (phydev->advertising & ADVERTISED_1000baseKX_Full) 788 + return true; 789 + } else { 790 + if (phydev->speed == SPEED_1000) 791 + return true; 792 + } 793 + 794 + return false; 795 + } 796 + 758 797 static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable, 759 798 bool restart) 760 799 { ··· 1274 1235 /* Set initial mode - call the mode setting routines 1275 1236 * directly to insure we are properly configured 1276 1237 */ 1277 - if (phydev->advertising & SUPPORTED_10000baseKR_Full) 1238 + if (amd_xgbe_phy_use_xgmii_mode(phydev)) 1278 1239 ret = amd_xgbe_phy_xgmii_mode(phydev); 1279 - else if (phydev->advertising & SUPPORTED_1000baseKX_Full) 1240 + else if (amd_xgbe_phy_use_gmii_mode(phydev)) 1280 1241 ret = amd_xgbe_phy_gmii_mode(phydev); 1281 - else if (phydev->advertising & SUPPORTED_2500baseX_Full) 1242 + else if (amd_xgbe_phy_use_gmii_2500_mode(phydev)) 1282 1243 ret = amd_xgbe_phy_gmii_2500_mode(phydev); 1283 1244 else 1284 1245 ret = -EINVAL;
+1 -1
drivers/net/phy/bcm7xxx.c
··· 404 404 .name = "Broadcom BCM7425", 405 405 .features = PHY_GBIT_FEATURES | 406 406 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 407 - .flags = 0, 407 + .flags = PHY_IS_INTERNAL, 408 408 .config_init = bcm7xxx_config_init, 409 409 .config_aneg = genphy_config_aneg, 410 410 .read_status = genphy_read_status,
+19 -4
drivers/net/phy/dp83640.c
··· 47 47 #define PSF_TX 0x1000 48 48 #define EXT_EVENT 1 49 49 #define CAL_EVENT 7 50 - #define CAL_TRIGGER 7 50 + #define CAL_TRIGGER 1 51 51 #define DP83640_N_PINS 12 52 52 53 53 #define MII_DP83640_MICR 0x11 ··· 496 496 else 497 497 evnt |= EVNT_RISE; 498 498 } 499 + mutex_lock(&clock->extreg_lock); 499 500 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); 501 + mutex_unlock(&clock->extreg_lock); 500 502 return 0; 501 503 502 504 case PTP_CLK_REQ_PEROUT: ··· 534 532 535 533 static void enable_status_frames(struct phy_device *phydev, bool on) 536 534 { 535 + struct dp83640_private *dp83640 = phydev->priv; 536 + struct dp83640_clock *clock = dp83640->clock; 537 537 u16 cfg0 = 0, ver; 538 538 539 539 if (on) ··· 543 539 544 540 ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT; 545 541 542 + mutex_lock(&clock->extreg_lock); 543 + 546 544 ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0); 547 545 ext_write(0, phydev, PAGE6, PSF_CFG1, ver); 546 + 547 + mutex_unlock(&clock->extreg_lock); 548 548 549 549 if (!phydev->attached_dev) { 550 550 pr_warn("expected to find an attached netdevice\n"); ··· 846 838 list_del_init(&rxts->list); 847 839 phy2rxts(phy_rxts, rxts); 848 840 849 - spin_lock_irqsave(&dp83640->rx_queue.lock, flags); 841 + spin_lock(&dp83640->rx_queue.lock); 850 842 skb_queue_walk(&dp83640->rx_queue, skb) { 851 843 struct dp83640_skb_info *skb_info; 852 844 ··· 861 853 break; 862 854 } 863 855 } 864 - spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags); 856 + spin_unlock(&dp83640->rx_queue.lock); 865 857 866 858 if (!shhwtstamps) 867 859 list_add_tail(&rxts->list, &dp83640->rxts); ··· 1181 1173 1182 1174 if (clock->chosen && !list_empty(&clock->phylist)) 1183 1175 recalibrate(clock); 1184 - else 1176 + else { 1177 + mutex_lock(&clock->extreg_lock); 1185 1178 enable_broadcast(phydev, clock->page, 1); 1179 + mutex_unlock(&clock->extreg_lock); 1180 + } 1186 1181 1187 1182 enable_status_frames(phydev, true); 1183 + 1184 + mutex_lock(&clock->extreg_lock); 1188 1185 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); 1186 + mutex_unlock(&clock->extreg_lock); 1187 + 1189 1188 return 0; 1190 1189 } 1191 1190
+19 -15
drivers/net/phy/phy.c
··· 742 742 */ 743 743 void phy_start(struct phy_device *phydev) 744 744 { 745 + bool do_resume = false; 746 + int err = 0; 747 + 745 748 mutex_lock(&phydev->lock); 746 749 747 750 switch (phydev->state) { ··· 755 752 phydev->state = PHY_UP; 756 753 break; 757 754 case PHY_HALTED: 755 + /* make sure interrupts are re-enabled for the PHY */ 756 + err = phy_enable_interrupts(phydev); 757 + if (err < 0) 758 + break; 759 + 758 760 phydev->state = PHY_RESUMING; 761 + do_resume = true; 762 + break; 759 763 default: 760 764 break; 761 765 } 762 766 mutex_unlock(&phydev->lock); 767 + 768 + /* if phy was suspended, bring the physical link up again */ 769 + if (do_resume) 770 + phy_resume(phydev); 763 771 } 764 772 EXPORT_SYMBOL(phy_start); 765 773 ··· 783 769 struct delayed_work *dwork = to_delayed_work(work); 784 770 struct phy_device *phydev = 785 771 container_of(dwork, struct phy_device, state_queue); 786 - bool needs_aneg = false, do_suspend = false, do_resume = false; 772 + bool needs_aneg = false, do_suspend = false; 787 773 int err = 0; 788 774 789 775 mutex_lock(&phydev->lock); ··· 902 888 } 903 889 break; 904 890 case PHY_RESUMING: 905 - err = phy_clear_interrupt(phydev); 906 - if (err) 907 - break; 908 - 909 - err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 910 - if (err) 911 - break; 912 - 913 891 if (AUTONEG_ENABLE == phydev->autoneg) { 914 892 err = phy_aneg_done(phydev); 915 893 if (err < 0) ··· 939 933 } 940 934 phydev->adjust_link(phydev->attached_dev); 941 935 } 942 - do_resume = true; 943 936 break; 944 937 } 945 938 ··· 948 943 err = phy_start_aneg(phydev); 949 944 else if (do_suspend) 950 945 phy_suspend(phydev); 951 - else if (do_resume) 952 - phy_resume(phydev); 953 946 954 947 if (err < 0) 955 948 phy_error(phydev); ··· 1056 1053 { 1057 1054 /* According to 802.3az,the EEE is supported only in full duplex-mode. 1058 1055 * Also EEE feature is active when core is operating with MII, GMII 1059 - * or RGMII. Internal PHYs are also allowed to proceed and should 1060 - * return an error if they do not support EEE. 1056 + * or RGMII (all kinds). Internal PHYs are also allowed to proceed and 1057 + * should return an error if they do not support EEE. 1061 1058 */ 1062 1059 if ((phydev->duplex == DUPLEX_FULL) && 1063 1060 ((phydev->interface == PHY_INTERFACE_MODE_MII) || 1064 1061 (phydev->interface == PHY_INTERFACE_MODE_GMII) || 1065 - (phydev->interface == PHY_INTERFACE_MODE_RGMII) || 1062 + (phydev->interface >= PHY_INTERFACE_MODE_RGMII && 1063 + phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) || 1066 1064 phy_is_internal(phydev))) { 1067 1065 int eee_lp, eee_cap, eee_adv; 1068 1066 u32 lp, cap, adv;
+1 -1
drivers/net/usb/cdc_ncm.c
··· 1182 1182 * payload data instead. 1183 1183 */ 1184 1184 usbnet_set_skb_tx_stats(skb_out, n, 1185 - ctx->tx_curr_frame_payload - skb_out->len); 1185 + (long)ctx->tx_curr_frame_payload - skb_out->len); 1186 1186 1187 1187 return skb_out; 1188 1188
+1 -1
drivers/net/vxlan.c
··· 2961 2961 * to the list by the previous loop. 2962 2962 */ 2963 2963 if (!net_eq(dev_net(vxlan->dev), net)) 2964 - unregister_netdevice_queue(dev, &list); 2964 + unregister_netdevice_queue(vxlan->dev, &list); 2965 2965 } 2966 2966 2967 2967 unregister_netdevice_many(&list);
+5 -7
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
··· 511 511 msgbuf->rx_pktids, 512 512 msgbuf->ioctl_resp_pktid); 513 513 if (msgbuf->ioctl_resp_ret_len != 0) { 514 - if (!skb) { 515 - brcmf_err("Invalid packet id idx recv'd %d\n", 516 - msgbuf->ioctl_resp_pktid); 514 + if (!skb) 517 515 return -EBADF; 518 - } 516 + 519 517 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 520 518 len : msgbuf->ioctl_resp_ret_len); 521 519 } ··· 872 874 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; 873 875 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 874 876 msgbuf->tx_pktids, idx); 875 - if (!skb) { 876 - brcmf_err("Invalid packet id idx recv'd %d\n", idx); 877 + if (!skb) 877 878 return; 878 - } 879 879 880 880 set_bit(flowid, msgbuf->txstatus_done_map); 881 881 commonring = msgbuf->flowrings[flowid]; ··· 1152 1156 1153 1157 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1154 1158 msgbuf->rx_pktids, idx); 1159 + if (!skb) 1160 + return; 1155 1161 1156 1162 if (data_offset) 1157 1163 skb_pull(skb, data_offset);
+1
drivers/net/wireless/iwlwifi/Kconfig
··· 21 21 Intel 7260 Wi-Fi Adapter 22 22 Intel 3160 Wi-Fi Adapter 23 23 Intel 7265 Wi-Fi Adapter 24 + Intel 3165 Wi-Fi Adapter 24 25 25 26 26 27 This driver uses the kernel's mac80211 subsystem.
+8 -8
drivers/net/wireless/iwlwifi/iwl-7000.c
··· 70 70 71 71 /* Highest firmware API version supported */ 72 72 #define IWL7260_UCODE_API_MAX 13 73 - #define IWL3160_UCODE_API_MAX 13 74 73 75 74 /* Oldest version we won't warn about */ 76 75 #define IWL7260_UCODE_API_OK 12 77 - #define IWL3160_UCODE_API_OK 12 76 + #define IWL3165_UCODE_API_OK 13 78 77 79 78 /* Lowest firmware API version supported */ 80 79 #define IWL7260_UCODE_API_MIN 10 81 - #define IWL3160_UCODE_API_MIN 10 80 + #define IWL3165_UCODE_API_MIN 13 82 81 83 82 /* NVM versions */ 84 83 #define IWL7260_NVM_VERSION 0x0a1d ··· 102 103 103 104 #define IWL3160_FW_PRE "iwlwifi-3160-" 104 105 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" 105 - 106 - #define IWL3165_FW_PRE "iwlwifi-3165-" 107 - #define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" 108 106 109 107 #define IWL7265_FW_PRE "iwlwifi-7265-" 110 108 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" ··· 244 248 245 249 const struct iwl_cfg iwl3165_2ac_cfg = { 246 250 .name = "Intel(R) Dual Band Wireless AC 3165", 247 - .fw_name_pre = IWL3165_FW_PRE, 251 + .fw_name_pre = IWL7265D_FW_PRE, 248 252 IWL_DEVICE_7000, 253 + /* sparse doens't like the re-assignment but it is safe */ 254 + #ifndef __CHECKER__ 255 + .ucode_api_ok = IWL3165_UCODE_API_OK, 256 + .ucode_api_min = IWL3165_UCODE_API_MIN, 257 + #endif 249 258 .ht_params = &iwl7000_ht_params, 250 259 .nvm_ver = IWL3165_NVM_VERSION, 251 260 .nvm_calib_ver = IWL3165_TX_POWER_VERSION, ··· 326 325 327 326 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 328 327 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 329 - MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 330 328 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 331 329 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+5
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
··· 6 6 * GPL LICENSE SUMMARY 7 7 * 8 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 9 + * Copyright(c) 2015 Intel Mobile Communications GmbH 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of version 2 of the GNU General Public License as ··· 32 31 * BSD LICENSE 33 32 * 34 33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 + * Copyright(c) 2015 Intel Mobile Communications GmbH 35 35 * All rights reserved. 36 36 * 37 37 * Redistribution and use in source and binary forms, with or without ··· 749 747 ht_info->ht_supported = false; 750 748 return; 751 749 } 750 + 751 + if (data->sku_cap_mimo_disabled) 752 + rx_chains = 1; 752 753 753 754 ht_info->ht_supported = true; 754 755 ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+3
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
··· 6 6 * GPL LICENSE SUMMARY 7 7 * 8 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 9 + * Copyright(c) 2015 Intel Mobile Communications GmbH 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of version 2 of the GNU General Public License as ··· 32 31 * BSD LICENSE 33 32 * 34 33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 + * Copyright(c) 2015 Intel Mobile Communications GmbH 35 35 * All rights reserved. 36 36 * 37 37 * Redistribution and use in source and binary forms, with or without ··· 86 84 bool sku_cap_11ac_enable; 87 85 bool sku_cap_amt_enable; 88 86 bool sku_cap_ipan_enable; 87 + bool sku_cap_mimo_disabled; 89 88 90 89 u16 radio_cfg_type; 91 90 u8 radio_cfg_step;
+24 -8
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
··· 6 6 * GPL LICENSE SUMMARY 7 7 * 8 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 9 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 10 * 11 11 * This program is free software; you can redistribute it and/or modify 12 12 * it under the terms of version 2 of the GNU General Public License as ··· 32 32 * BSD LICENSE 33 33 * 34 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 35 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 35 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 36 * All rights reserved. 37 37 * 38 38 * Redistribution and use in source and binary forms, with or without ··· 116 116 117 117 /* SKU Capabilities (actual values from NVM definition) */ 118 118 enum nvm_sku_bits { 119 - NVM_SKU_CAP_BAND_24GHZ = BIT(0), 120 - NVM_SKU_CAP_BAND_52GHZ = BIT(1), 121 - NVM_SKU_CAP_11N_ENABLE = BIT(2), 122 - NVM_SKU_CAP_11AC_ENABLE = BIT(3), 119 + NVM_SKU_CAP_BAND_24GHZ = BIT(0), 120 + NVM_SKU_CAP_BAND_52GHZ = BIT(1), 121 + NVM_SKU_CAP_11N_ENABLE = BIT(2), 122 + NVM_SKU_CAP_11AC_ENABLE = BIT(3), 123 + NVM_SKU_CAP_MIMO_DISABLE = BIT(5), 123 124 }; 124 125 125 126 /* ··· 369 368 if (cfg->ht_params->ldpc) 370 369 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; 371 370 371 + if (data->sku_cap_mimo_disabled) { 372 + num_rx_ants = 1; 373 + num_tx_ants = 1; 374 + } 375 + 372 376 if (num_tx_ants > 1) 373 377 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 374 378 else ··· 471 465 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 472 466 return le16_to_cpup(nvm_sw + RADIO_CFG); 473 467 474 - return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); 468 + return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000)); 475 469 476 470 } 477 471 ··· 533 527 const u8 *hw_addr; 534 528 535 529 if (mac_override) { 530 + static const u8 reserved_mac[] = { 531 + 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 532 + }; 533 + 536 534 hw_addr = (const u8 *)(mac_override + 537 535 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 538 536 ··· 548 538 data->hw_addr[4] = hw_addr[5]; 549 539 data->hw_addr[5] = hw_addr[4]; 550 540 551 - if (is_valid_ether_addr(data->hw_addr)) 541 + /* 542 + * Force the use of the OTP MAC address in case of reserved MAC 543 + * address in the NVM, or if address is given but invalid. 544 + */ 545 + if (is_valid_ether_addr(data->hw_addr) && 546 + memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) 552 547 return; 553 548 554 549 IWL_ERR_DEV(dev, ··· 625 610 data->sku_cap_11n_enable = false; 626 611 data->sku_cap_11ac_enable = data->sku_cap_11n_enable && 627 612 (sku & NVM_SKU_CAP_11AC_ENABLE); 613 + data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; 628 614 629 615 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 630 616
+1 -1
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
··· 776 776 struct iwl_host_cmd cmd = { 777 777 .id = BT_CONFIG, 778 778 .len = { sizeof(*bt_cmd), }, 779 - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 779 + .dataflags = { IWL_HCMD_DFL_DUP, }, 780 780 .flags = CMD_ASYNC, 781 781 }; 782 782 struct iwl_mvm_sta *mvmsta;
+16 -6
drivers/net/wireless/iwlwifi/mvm/d3.c
··· 1750 1750 int i, j, n_matches, ret; 1751 1751 1752 1752 fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1753 - if (!IS_ERR_OR_NULL(fw_status)) 1753 + if (!IS_ERR_OR_NULL(fw_status)) { 1754 1754 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1755 + kfree(fw_status); 1756 + } 1755 1757 1756 1758 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1757 1759 wakeup.rfkill_release = true; ··· 1870 1868 /* get the BSS vif pointer again */ 1871 1869 vif = iwl_mvm_get_bss_vif(mvm); 1872 1870 if (IS_ERR_OR_NULL(vif)) 1873 - goto out_unlock; 1871 + goto err; 1874 1872 1875 1873 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); 1876 1874 if (ret) 1877 - goto out_unlock; 1875 + goto err; 1878 1876 1879 1877 if (d3_status != IWL_D3_STATUS_ALIVE) { 1880 1878 IWL_INFO(mvm, "Device was reset during suspend\n"); 1881 - goto out_unlock; 1879 + goto err; 1882 1880 } 1883 1881 1884 1882 /* query SRAM first in case we want event logging */ ··· 1904 1902 goto out_iterate; 1905 1903 } 1906 1904 1907 - out_unlock: 1905 + err: 1906 + iwl_mvm_free_nd(mvm); 1908 1907 mutex_unlock(&mvm->mutex); 1909 1908 1910 1909 out_iterate: ··· 1918 1915 /* return 1 to reconfigure the device */ 1919 1916 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1920 1917 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); 1918 + 1919 + /* We always return 1, which causes mac80211 to do a reconfig 1920 + * with IEEE80211_RECONFIG_TYPE_RESTART. This type of 1921 + * reconfig calls iwl_mvm_restart_complete(), where we unref 1922 + * the IWL_MVM_REF_UCODE_DOWN, so we need to take the 1923 + * reference here. 1924 + */ 1925 + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1921 1926 return 1; 1922 1927 } 1923 1928 ··· 2032 2021 __iwl_mvm_resume(mvm, true); 2033 2022 rtnl_unlock(); 2034 2023 iwl_abort_notification_waits(&mvm->notif_wait); 2035 - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 2036 2024 ieee80211_restart_hw(mvm->hw); 2037 2025 2038 2026 /* wait for restart and disconnect all interfaces */
-3
drivers/net/wireless/iwlwifi/mvm/mac80211.c
··· 3995 3995 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) 3996 3996 return; 3997 3997 3998 - if (event->u.mlme.status == MLME_SUCCESS) 3999 - return; 4000 - 4001 3998 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); 4002 3999 trig_mlme = (void *)trig->data; 4003 4000 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+4 -2
drivers/net/wireless/iwlwifi/mvm/ops.c
··· 1263 1263 ieee80211_iterate_active_interfaces( 1264 1264 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1265 1265 iwl_mvm_d0i3_disconnect_iter, mvm); 1266 - 1267 - iwl_free_resp(&get_status_cmd); 1268 1266 out: 1269 1267 iwl_mvm_d0i3_enable_tx(mvm, qos_seq); 1268 + 1269 + /* qos_seq might point inside resp_pkt, so free it only now */ 1270 + if (get_status_cmd.resp_pkt) 1271 + iwl_free_resp(&get_status_cmd); 1270 1272 1271 1273 /* the FW might have updated the regdomain */ 1272 1274 iwl_mvm_update_changed_regdom(mvm);
+3
drivers/net/wireless/iwlwifi/mvm/rs.c
··· 180 180 if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) 181 181 return false; 182 182 183 + if (mvm->nvm_data->sku_cap_mimo_disabled) 184 + return false; 185 + 183 186 return true; 184 187 } 185 188
+3 -3
drivers/net/wireless/iwlwifi/pcie/internal.h
··· 1 1 /****************************************************************************** 2 2 * 3 - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 3 + * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 5 * 6 6 * Portions of this file are derived from the ipw3945 project, as well 7 7 * as portions of the ieee80211 subsystem header files. ··· 320 320 321 321 /*protect hw register */ 322 322 spinlock_t reg_lock; 323 - bool cmd_in_flight; 323 + bool cmd_hold_nic_awake; 324 324 bool ref_cmd_in_flight; 325 325 326 326 /* protect ref counter */
+7 -5
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 1049 1049 iwl_pcie_rx_stop(trans); 1050 1050 1051 1051 /* Power-down device's busmaster DMA clocks */ 1052 - iwl_write_prph(trans, APMG_CLK_DIS_REG, 1053 - APMG_CLK_VAL_DMA_CLK_RQT); 1054 - udelay(5); 1052 + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { 1053 + iwl_write_prph(trans, APMG_CLK_DIS_REG, 1054 + APMG_CLK_VAL_DMA_CLK_RQT); 1055 + udelay(5); 1056 + } 1055 1057 } 1056 1058 1057 1059 /* Make sure (redundant) we've released our request to stay awake */ ··· 1372 1370 1373 1371 spin_lock_irqsave(&trans_pcie->reg_lock, *flags); 1374 1372 1375 - if (trans_pcie->cmd_in_flight) 1373 + if (trans_pcie->cmd_hold_nic_awake) 1376 1374 goto out; 1377 1375 1378 1376 /* this bit wakes up the NIC */ ··· 1438 1436 */ 1439 1437 __acquire(&trans_pcie->reg_lock); 1440 1438 1441 - if (trans_pcie->cmd_in_flight) 1439 + if (trans_pcie->cmd_hold_nic_awake) 1442 1440 goto out; 1443 1441 1444 1442 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+9 -14
drivers/net/wireless/iwlwifi/pcie/tx.c
··· 1039 1039 iwl_trans_pcie_ref(trans); 1040 1040 } 1041 1041 1042 - if (trans_pcie->cmd_in_flight) 1043 - return 0; 1044 - 1045 - trans_pcie->cmd_in_flight = true; 1046 - 1047 1042 /* 1048 1043 * wake up the NIC to make sure that the firmware will see the host 1049 1044 * command - we will let the NIC sleep once all the host commands 1050 1045 * returned. This needs to be done only on NICs that have 1051 1046 * apmg_wake_up_wa set. 1052 1047 */ 1053 - if (trans->cfg->base_params->apmg_wake_up_wa) { 1048 + if (trans->cfg->base_params->apmg_wake_up_wa && 1049 + !trans_pcie->cmd_hold_nic_awake) { 1054 1050 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1055 1051 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1056 1052 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) ··· 1060 1064 if (ret < 0) { 1061 1065 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1062 1066 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1063 - trans_pcie->cmd_in_flight = false; 1064 1067 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1065 1068 return -EIO; 1066 1069 } 1070 + trans_pcie->cmd_hold_nic_awake = true; 1067 1071 } 1068 1072 1069 1073 return 0; ··· 1081 1085 iwl_trans_pcie_unref(trans); 1082 1086 } 1083 1087 1084 - if (WARN_ON(!trans_pcie->cmd_in_flight)) 1085 - return 0; 1088 + if (trans->cfg->base_params->apmg_wake_up_wa) { 1089 + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 1090 + return 0; 1086 1091 1087 - trans_pcie->cmd_in_flight = false; 1088 - 1089 - if (trans->cfg->base_params->apmg_wake_up_wa) 1092 + trans_pcie->cmd_hold_nic_awake = false; 1090 1093 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1091 - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1092 - 1094 + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1095 + } 1093 1096 return 0; 1094 1097 } 1095 1098
+1 -1
drivers/net/xen-netback/netback.c
··· 1250 1250 netdev_err(queue->vif->dev, 1251 1251 "txreq.offset: %x, size: %u, end: %lu\n", 1252 1252 txreq.offset, txreq.size, 1253 - (txreq.offset&~PAGE_MASK) + txreq.size); 1253 + (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size); 1254 1254 xenvif_fatal_tx_err(queue->vif); 1255 1255 break; 1256 1256 }
+20 -14
drivers/net/xen-netback/xenbus.c
··· 34 34 enum xenbus_state frontend_state; 35 35 struct xenbus_watch hotplug_status_watch; 36 36 u8 have_hotplug_status_watch:1; 37 + 38 + const char *hotplug_script; 37 39 }; 38 40 39 41 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); ··· 240 238 xenvif_free(be->vif); 241 239 be->vif = NULL; 242 240 } 241 + kfree(be->hotplug_script); 243 242 kfree(be); 244 243 dev_set_drvdata(&dev->dev, NULL); 245 244 return 0; ··· 258 255 struct xenbus_transaction xbt; 259 256 int err; 260 257 int sg; 258 + const char *script; 261 259 struct backend_info *be = kzalloc(sizeof(struct backend_info), 262 260 GFP_KERNEL); 263 261 if (!be) { ··· 351 347 if (err) 352 348 pr_debug("Error writing multi-queue-max-queues\n"); 353 349 350 + script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); 351 + if (IS_ERR(script)) { 352 + err = PTR_ERR(script); 353 + xenbus_dev_fatal(dev, err, "reading script"); 354 + goto fail; 355 + } 356 + 357 + be->hotplug_script = script; 358 + 354 359 err = xenbus_switch_state(dev, XenbusStateInitWait); 355 360 if (err) 356 361 goto fail; ··· 392 379 struct kobj_uevent_env *env) 393 380 { 394 381 struct backend_info *be = dev_get_drvdata(&xdev->dev); 395 - char *val; 396 382 397 - val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); 398 - if (IS_ERR(val)) { 399 - int err = PTR_ERR(val); 400 - xenbus_dev_fatal(xdev, err, "reading script"); 401 - return err; 402 - } else { 403 - if (add_uevent_var(env, "script=%s", val)) { 404 - kfree(val); 405 - return -ENOMEM; 406 - } 407 - kfree(val); 408 - } 383 + if (!be) 384 + return 0; 409 385 410 - if (!be || !be->vif) 386 + if (add_uevent_var(env, "script=%s", be->hotplug_script)) 387 + return -ENOMEM; 388 + 389 + if (!be->vif) 411 390 return 0; 412 391 413 392 return add_uevent_var(env, "vif=%s", be->vif->dev->name); ··· 798 793 goto err; 799 794 } 800 795 796 + queue->credit_bytes = credit_bytes; 801 797 queue->remaining_credit = credit_bytes; 802 798 queue->credit_usec = credit_usec; 803 799
+2 -13
drivers/net/xen-netfront.c
··· 1698 1698 1699 1699 if (netif_running(info->netdev)) 1700 1700 napi_disable(&queue->napi); 1701 + del_timer_sync(&queue->rx_refill_timer); 1701 1702 netif_napi_del(&queue->napi); 1702 1703 } 1703 1704 ··· 2103 2102 static int xennet_remove(struct xenbus_device *dev) 2104 2103 { 2105 2104 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2106 - unsigned int num_queues = info->netdev->real_num_tx_queues; 2107 - struct netfront_queue *queue = NULL; 2108 - unsigned int i = 0; 2109 2105 2110 2106 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2111 2107 ··· 2110 2112 2111 2113 unregister_netdev(info->netdev); 2112 2114 2113 - for (i = 0; i < num_queues; ++i) { 2114 - queue = &info->queues[i]; 2115 - del_timer_sync(&queue->rx_refill_timer); 2116 - } 2117 - 2118 - if (num_queues) { 2119 - kfree(info->queues); 2120 - info->queues = NULL; 2121 - } 2122 - 2115 + xennet_destroy_queues(info); 2123 2116 xennet_free_netdev(info->netdev); 2124 2117 2125 2118 return 0;
+3 -1
drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
··· 643 643 CYGNUS_PINRANGE(87, 104, 12), 644 644 CYGNUS_PINRANGE(99, 102, 2), 645 645 CYGNUS_PINRANGE(101, 90, 4), 646 - CYGNUS_PINRANGE(105, 116, 10), 646 + CYGNUS_PINRANGE(105, 116, 6), 647 + CYGNUS_PINRANGE(111, 100, 2), 648 + CYGNUS_PINRANGE(113, 122, 4), 647 649 CYGNUS_PINRANGE(123, 11, 1), 648 650 CYGNUS_PINRANGE(124, 38, 4), 649 651 CYGNUS_PINRANGE(128, 43, 1),
+44
drivers/pinctrl/intel/pinctrl-cherryview.c
··· 1292 1292 chv_gpio_irq_mask_unmask(d, false); 1293 1293 } 1294 1294 1295 + static unsigned chv_gpio_irq_startup(struct irq_data *d) 1296 + { 1297 + /* 1298 + * Check if the interrupt has been requested with 0 as triggering 1299 + * type. In that case it is assumed that the current values 1300 + * programmed to the hardware are used (e.g BIOS configured 1301 + * defaults). 1302 + * 1303 + * In that case ->irq_set_type() will never be called so we need to 1304 + * read back the values from hardware now, set correct flow handler 1305 + * and update mappings before the interrupt is being used. 1306 + */ 1307 + if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) { 1308 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1309 + struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc); 1310 + unsigned offset = irqd_to_hwirq(d); 1311 + int pin = chv_gpio_offset_to_pin(pctrl, offset); 1312 + irq_flow_handler_t handler; 1313 + unsigned long flags; 1314 + u32 intsel, value; 1315 + 1316 + intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); 1317 + intsel &= CHV_PADCTRL0_INTSEL_MASK; 1318 + intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; 1319 + 1320 + value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1)); 1321 + if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL) 1322 + handler = handle_level_irq; 1323 + else 1324 + handler = handle_edge_irq; 1325 + 1326 + spin_lock_irqsave(&pctrl->lock, flags); 1327 + if (!pctrl->intr_lines[intsel]) { 1328 + __irq_set_handler_locked(d->irq, handler); 1329 + pctrl->intr_lines[intsel] = offset; 1330 + } 1331 + spin_unlock_irqrestore(&pctrl->lock, flags); 1332 + } 1333 + 1334 + chv_gpio_irq_unmask(d); 1335 + return 0; 1336 + } 1337 + 1295 1338 static int chv_gpio_irq_type(struct irq_data *d, unsigned type) 1296 1339 { 1297 1340 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ··· 1400 1357 1401 1358 static struct irq_chip chv_gpio_irqchip = { 1402 1359 .name = "chv-gpio", 1360 + .irq_startup = chv_gpio_irq_startup, 1403 1361 .irq_ack = chv_gpio_irq_ack, 1404 1362 .irq_mask = chv_gpio_irq_mask, 1405 1363 .irq_unmask = chv_gpio_irq_unmask,
+1 -1
drivers/pinctrl/meson/pinctrl-meson.c
··· 569 569 domain->chip.direction_output = meson_gpio_direction_output; 570 570 domain->chip.get = meson_gpio_get; 571 571 domain->chip.set = meson_gpio_set; 572 - domain->chip.base = -1; 572 + domain->chip.base = domain->data->pin_base; 573 573 domain->chip.ngpio = domain->data->num_pins; 574 574 domain->chip.can_sleep = false; 575 575 domain->chip.of_node = domain->of_node;
+2 -2
drivers/pinctrl/meson/pinctrl-meson8b.c
··· 876 876 .banks = meson8b_banks, 877 877 .num_banks = ARRAY_SIZE(meson8b_banks), 878 878 .pin_base = 0, 879 - .num_pins = 83, 879 + .num_pins = 130, 880 880 }, 881 881 { 882 882 .name = "ao-bank", 883 883 .banks = meson8b_ao_banks, 884 884 .num_banks = ARRAY_SIZE(meson8b_ao_banks), 885 - .pin_base = 83, 885 + .pin_base = 130, 886 886 .num_pins = 16, 887 887 }, 888 888 };
+19 -18
drivers/platform/x86/thinkpad_acpi.c
··· 2897 2897 return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason); 2898 2898 } 2899 2899 2900 - static DEVICE_ATTR_RO(hotkey_wakeup_reason); 2900 + static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL); 2901 2901 2902 2902 static void hotkey_wakeup_reason_notify_change(void) 2903 2903 { ··· 2913 2913 return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack); 2914 2914 } 2915 2915 2916 - static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete); 2916 + static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO, 2917 + hotkey_wakeup_hotunplug_complete_show, NULL); 2917 2918 2918 2919 static void hotkey_wakeup_hotunplug_complete_notify_change(void) 2919 2920 { ··· 2979 2978 &dev_attr_hotkey_enable.attr, 2980 2979 &dev_attr_hotkey_bios_enabled.attr, 2981 2980 &dev_attr_hotkey_bios_mask.attr, 2982 - &dev_attr_hotkey_wakeup_reason.attr, 2983 - &dev_attr_hotkey_wakeup_hotunplug_complete.attr, 2981 + &dev_attr_wakeup_reason.attr, 2982 + &dev_attr_wakeup_hotunplug_complete.attr, 2984 2983 &dev_attr_hotkey_mask.attr, 2985 2984 &dev_attr_hotkey_all_mask.attr, 2986 2985 &dev_attr_hotkey_recommended_mask.attr, ··· 4394 4393 attr, buf, count); 4395 4394 } 4396 4395 4397 - static DEVICE_ATTR_RW(wan_enable); 4396 + static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO, 4397 + wan_enable_show, wan_enable_store); 4398 4398 4399 4399 /* --------------------------------------------------------------------- */ 4400 4400 4401 4401 static struct attribute *wan_attributes[] = { 4402 - &dev_attr_wan_enable.attr, 4402 + &dev_attr_wwan_enable.attr, 4403 4403 NULL 4404 4404 }; 4405 4405 ··· 8140 8138 return count; 8141 8139 } 8142 8140 8143 - static DEVICE_ATTR_RW(fan_pwm1_enable); 8141 + static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, 8142 + fan_pwm1_enable_show, fan_pwm1_enable_store); 8144 8143 8145 8144 /* sysfs fan pwm1 ------------------------------------------------------ */ 8146 8145 static ssize_t fan_pwm1_show(struct device *dev, ··· 8201 8198 return (rc) ? rc : count; 8202 8199 } 8203 8200 8204 - static DEVICE_ATTR_RW(fan_pwm1); 8201 + static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store); 8205 8202 8206 8203 /* sysfs fan fan1_input ------------------------------------------------ */ 8207 8204 static ssize_t fan_fan1_input_show(struct device *dev, ··· 8218 8215 return snprintf(buf, PAGE_SIZE, "%u\n", speed); 8219 8216 } 8220 8217 8221 - static DEVICE_ATTR_RO(fan_fan1_input); 8218 + static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL); 8222 8219 8223 8220 /* sysfs fan fan2_input ------------------------------------------------ */ 8224 8221 static ssize_t fan_fan2_input_show(struct device *dev, ··· 8235 8232 return snprintf(buf, PAGE_SIZE, "%u\n", speed); 8236 8233 } 8237 8234 8238 - static DEVICE_ATTR_RO(fan_fan2_input); 8235 + static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL); 8239 8236 8240 8237 /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ 8241 8238 static ssize_t fan_fan_watchdog_show(struct device_driver *drv, ··· 8268 8265 8269 8266 /* --------------------------------------------------------------------- */ 8270 8267 static struct attribute *fan_attributes[] = { 8271 - &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, 8272 - &dev_attr_fan_fan1_input.attr, 8268 + &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr, 8269 + &dev_attr_fan1_input.attr, 8273 8270 NULL, /* for fan2_input */ 8274 8271 NULL 8275 8272 }; ··· 8403 8400 if (tp_features.second_fan) { 8404 8401 /* attach second fan tachometer */ 8405 8402 fan_attributes[ARRAY_SIZE(fan_attributes)-2] = 8406 - &dev_attr_fan_fan2_input.attr; 8403 + &dev_attr_fan2_input.attr; 8407 8404 } 8408 8405 rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, 8409 8406 &fan_attr_group); ··· 8851 8848 return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME); 8852 8849 } 8853 8850 8854 - static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name); 8851 + static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL); 8855 8852 8856 8853 /* --------------------------------------------------------------------- */ 8857 8854 ··· 9393 9390 hwmon_device_unregister(tpacpi_hwmon); 9394 9391 9395 9392 if (tp_features.sensors_pdev_attrs_registered) 9396 - device_remove_file(&tpacpi_sensors_pdev->dev, 9397 - &dev_attr_thinkpad_acpi_pdev_name); 9393 + device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); 9398 9394 if (tpacpi_sensors_pdev) 9399 9395 platform_device_unregister(tpacpi_sensors_pdev); 9400 9396 if (tpacpi_pdev) ··· 9514 9512 thinkpad_acpi_module_exit(); 9515 9513 return ret; 9516 9514 } 9517 - ret = device_create_file(&tpacpi_sensors_pdev->dev, 9518 - &dev_attr_thinkpad_acpi_pdev_name); 9515 + ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); 9519 9516 if (ret) { 9520 9517 pr_err("unable to create sysfs hwmon device attributes\n"); 9521 9518 thinkpad_acpi_module_exit();
+64 -12
drivers/pwm/pwm-img.c
··· 16 16 #include <linux/mfd/syscon.h> 17 17 #include <linux/module.h> 18 18 #include <linux/of.h> 19 + #include <linux/of_device.h> 19 20 #include <linux/platform_device.h> 20 21 #include <linux/pwm.h> 21 22 #include <linux/regmap.h> ··· 39 38 #define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1 40 39 #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4) 41 40 42 - #define MAX_TMBASE_STEPS 65536 41 + /* 42 + * PWM period is specified with a timebase register, 43 + * in number of step periods. The PWM duty cycle is also 44 + * specified in step periods, in the [0, $timebase] range. 45 + * In other words, the timebase imposes the duty cycle 46 + * resolution. Therefore, let's constraint the timebase to 47 + * a minimum value to allow a sane range of duty cycle values. 48 + * Imposing a minimum timebase, will impose a maximum PWM frequency. 49 + * 50 + * The value chosen is completely arbitrary. 51 + */ 52 + #define MIN_TMBASE_STEPS 16 53 + 54 + struct img_pwm_soc_data { 55 + u32 max_timebase; 56 + }; 43 57 44 58 struct img_pwm_chip { 45 59 struct device *dev; ··· 63 47 struct clk *sys_clk; 64 48 void __iomem *base; 65 49 struct regmap *periph_regs; 50 + int max_period_ns; 51 + int min_period_ns; 52 + const struct img_pwm_soc_data *data; 66 53 }; 67 54 68 55 static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip) ··· 91 72 u32 val, div, duty, timebase; 92 73 unsigned long mul, output_clk_hz, input_clk_hz; 93 74 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip); 75 + unsigned int max_timebase = pwm_chip->data->max_timebase; 76 + 77 + if (period_ns < pwm_chip->min_period_ns || 78 + period_ns > pwm_chip->max_period_ns) { 79 + dev_err(chip->dev, "configured period not in range\n"); 80 + return -ERANGE; 81 + } 94 82 95 83 input_clk_hz = clk_get_rate(pwm_chip->pwm_clk); 96 84 output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns); 97 85 98 86 mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz); 99 - if (mul <= MAX_TMBASE_STEPS) { 87 + if (mul <= max_timebase) { 100 88 div = PWM_CTRL_CFG_NO_SUB_DIV; 101 89 timebase = DIV_ROUND_UP(mul, 1); 102 - } else if (mul <= MAX_TMBASE_STEPS * 8) { 90 + } else if (mul <= max_timebase * 8) { 103 91 div = PWM_CTRL_CFG_SUB_DIV0; 104 92 timebase = DIV_ROUND_UP(mul, 8); 105 - } else if (mul <= MAX_TMBASE_STEPS * 64) { 93 + } else if (mul <= max_timebase * 64) { 106 94 div = PWM_CTRL_CFG_SUB_DIV1; 107 95 timebase = DIV_ROUND_UP(mul, 64); 108 - } else if (mul <= MAX_TMBASE_STEPS * 512) { 96 + } else if (mul <= max_timebase * 512) { 109 97 div = PWM_CTRL_CFG_SUB_DIV0_DIV1; 110 98 timebase = DIV_ROUND_UP(mul, 512); 111 - } else if (mul > MAX_TMBASE_STEPS * 512) { 99 + } else if (mul > max_timebase * 512) { 112 100 dev_err(chip->dev, 113 101 "failed to configure timebase steps/divider value\n"); 114 102 return -EINVAL; ··· 169 143 .owner = THIS_MODULE, 170 144 }; 171 145 146 + static const struct img_pwm_soc_data pistachio_pwm = { 147 + .max_timebase = 255, 148 + }; 149 + 150 + static const struct of_device_id img_pwm_of_match[] = { 151 + { 152 + .compatible = "img,pistachio-pwm", 153 + .data = &pistachio_pwm, 154 + }, 155 + { } 156 + }; 157 + MODULE_DEVICE_TABLE(of, img_pwm_of_match); 158 + 172 159 static int img_pwm_probe(struct platform_device *pdev) 173 160 { 174 161 int ret; 162 + u64 val; 163 + unsigned long clk_rate; 175 164 struct resource *res; 176 165 struct img_pwm_chip *pwm; 166 + const struct of_device_id *of_dev_id; 177 167 178 168 pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); 179 169 if (!pwm) ··· 201 159 pwm->base = devm_ioremap_resource(&pdev->dev, res); 202 160 if (IS_ERR(pwm->base)) 203 161 return PTR_ERR(pwm->base); 162 + 163 + of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev); 164 + if (!of_dev_id) 165 + return -ENODEV; 166 + pwm->data = of_dev_id->data; 204 167 205 168 pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 206 169 "img,cr-periph"); ··· 235 188 dev_err(&pdev->dev, "could not prepare or enable pwm clock\n"); 236 189 goto disable_sysclk; 237 190 } 191 + 192 + clk_rate = clk_get_rate(pwm->pwm_clk); 193 + 194 + /* The maximum input clock divider is 512 */ 195 + val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase; 196 + do_div(val, clk_rate); 197 + pwm->max_period_ns = val; 198 + 199 + val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS; 200 + do_div(val, clk_rate); 201 + pwm->min_period_ns = val; 238 202 239 203 pwm->chip.dev = &pdev->dev; 240 204 pwm->chip.ops = &img_pwm_ops; ··· 285 227 286 228 return pwmchip_remove(&pwm_chip->chip); 287 229 } 288 - 289 - static const struct of_device_id img_pwm_of_match[] = { 290 - { .compatible = "img,pistachio-pwm", }, 291 - { } 292 - }; 293 - MODULE_DEVICE_TABLE(of, img_pwm_of_match); 294 230 295 231 static struct platform_driver img_pwm_driver = { 296 232 .driver = {
+3 -2
drivers/regulator/da9052-regulator.c
··· 394 394 395 395 static int da9052_regulator_probe(struct platform_device *pdev) 396 396 { 397 + const struct mfd_cell *cell = mfd_get_cell(pdev); 397 398 struct regulator_config config = { }; 398 399 struct da9052_regulator *regulator; 399 400 struct da9052 *da9052; ··· 410 409 regulator->da9052 = da9052; 411 410 412 411 regulator->info = find_regulator_info(regulator->da9052->chip_id, 413 - pdev->id); 412 + cell->id); 414 413 if (regulator->info == NULL) { 415 414 dev_err(&pdev->dev, "invalid regulator ID specified\n"); 416 415 return -EINVAL; ··· 420 419 config.driver_data = regulator; 421 420 config.regmap = da9052->regmap; 422 421 if (pdata && pdata->regulators) { 423 - config.init_data = pdata->regulators[pdev->id]; 422 + config.init_data = pdata->regulators[cell->id]; 424 423 } else { 425 424 #ifdef CONFIG_OF 426 425 struct device_node *nproot = da9052->dev->of_node;
+9 -11
drivers/s390/crypto/ap_bus.c
··· 1158 1158 poll_timeout = time; 1159 1159 hr_time = ktime_set(0, poll_timeout); 1160 1160 1161 - if (!hrtimer_is_queued(&ap_poll_timer) || 1162 - !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { 1163 - hrtimer_set_expires(&ap_poll_timer, hr_time); 1164 - hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 1165 - } 1161 + spin_lock_bh(&ap_poll_timer_lock); 1162 + hrtimer_cancel(&ap_poll_timer); 1163 + hrtimer_set_expires(&ap_poll_timer, hr_time); 1164 + hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 1165 + spin_unlock_bh(&ap_poll_timer_lock); 1166 + 1166 1167 return count; 1167 1168 } 1168 1169 ··· 1529 1528 ktime_t hr_time; 1530 1529 1531 1530 spin_lock_bh(&ap_poll_timer_lock); 1532 - if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) 1533 - goto out; 1534 - if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1531 + if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) { 1535 1532 hr_time = ktime_set(0, poll_timeout); 1536 1533 hrtimer_forward_now(&ap_poll_timer, hr_time); 1537 1534 hrtimer_restart(&ap_poll_timer); 1538 1535 } 1539 - out: 1540 1536 spin_unlock_bh(&ap_poll_timer_lock); 1541 1537 } 1542 1538 ··· 1950 1952 { 1951 1953 int i; 1952 1954 1953 - if (ap_domain_index != -1) 1955 + if ((ap_domain_index != -1) && (ap_test_config_domain(ap_domain_index))) 1954 1956 for (i = 0; i < AP_DEVICES; i++) 1955 1957 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1956 1958 } ··· 2095 2097 hrtimer_cancel(&ap_poll_timer); 2096 2098 destroy_workqueue(ap_work_queue); 2097 2099 tasklet_kill(&ap_tasklet); 2098 - root_device_unregister(ap_root_device); 2099 2100 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 2100 2101 __ap_match_all))) 2101 2102 { ··· 2103 2106 } 2104 2107 for (i = 0; ap_bus_attrs[i]; i++) 2105 2108 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2109 + root_device_unregister(ap_root_device); 2106 2110 bus_unregister(&ap_bus_type); 2107 2111 unregister_reset_call(&ap_reset_call); 2108 2112 if (ap_using_interrupts())
+3 -3
drivers/scsi/be2iscsi/be.h
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 10 * Contact Information: 11 - * linux-drivers@emulex.com 11 + * linux-drivers@avagotech.com 12 12 * 13 - * Emulex 13 + * Avago Technologies 14 14 * 3333 Susan Street 15 15 * Costa Mesa, CA 92626 16 16 */
+3 -3
drivers/scsi/be2iscsi/be_cmds.c
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 10 * Contact Information: 11 - * linux-drivers@emulex.com 11 + * linux-drivers@avagotech.com 12 12 * 13 - * Emulex 13 + * Avago Technologies 14 14 * 3333 Susan Street 15 15 * Costa Mesa, CA 92626 16 16 */
+3 -3
drivers/scsi/be2iscsi/be_cmds.h
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 10 * Contact Information: 11 - * linux-drivers@emulex.com 11 + * linux-drivers@avagotech.com 12 12 * 13 - * Emulex 13 + * Avago Technologies 14 14 * 3333 Susan Street 15 15 * Costa Mesa, CA 92626 16 16 */
+4 -4
drivers/scsi/be2iscsi/be_iscsi.c
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 7 7 * as published by the Free Software Foundation. The full GNU General 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 11 11 * 12 12 * Contact Information: 13 - * linux-drivers@emulex.com 13 + * linux-drivers@avagotech.com 14 14 * 15 - * Emulex 15 + * Avago Technologies 16 16 * 3333 Susan Street 17 17 * Costa Mesa, CA 92626 18 18 */
+4 -4
drivers/scsi/be2iscsi/be_iscsi.h
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 7 7 * as published by the Free Software Foundation. The full GNU General 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 11 11 * 12 12 * Contact Information: 13 - * linux-drivers@emulex.com 13 + * linux-drivers@avagotech.com 14 14 * 15 - * Emulex 15 + * Avago Technologies 16 16 * 3333 Susan Street 17 17 * Costa Mesa, CA 92626 18 18 */
+6 -6
drivers/scsi/be2iscsi/be_main.c
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 7 7 * as published by the Free Software Foundation. The full GNU General 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 11 11 * 12 12 * Contact Information: 13 - * linux-drivers@emulex.com 13 + * linux-drivers@avagotech.com 14 14 * 15 - * Emulex 15 + * Avago Technologies 16 16 * 3333 Susan Street 17 17 * Costa Mesa, CA 92626 18 18 */ ··· 50 50 51 51 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 52 52 MODULE_VERSION(BUILD_STR); 53 - MODULE_AUTHOR("Emulex Corporation"); 53 + MODULE_AUTHOR("Avago Technologies"); 54 54 MODULE_LICENSE("GPL"); 55 55 module_param(be_iopoll_budget, int, 0); 56 56 module_param(enable_msix, int, 0); ··· 552 552 553 553 static struct scsi_host_template beiscsi_sht = { 554 554 .module = THIS_MODULE, 555 - .name = "Emulex 10Gbe open-iscsi Initiator Driver", 555 + .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver", 556 556 .proc_name = DRV_NAME, 557 557 .queuecommand = iscsi_queuecommand, 558 558 .change_queue_depth = scsi_change_queue_depth,
+5 -5
drivers/scsi/be2iscsi/be_main.h
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 7 7 * as published by the Free Software Foundation. The full GNU General 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 11 11 * 12 12 * Contact Information: 13 - * linux-drivers@emulex.com 13 + * linux-drivers@avagotech.com 14 14 * 15 - * Emulex 15 + * Avago Technologies 16 16 * 3333 Susan Street 17 17 * Costa Mesa, CA 92626 18 18 */ ··· 37 37 38 38 #define DRV_NAME "be2iscsi" 39 39 #define BUILD_STR "10.4.114.0" 40 - #define BE_NAME "Emulex OneConnect" \ 40 + #define BE_NAME "Avago Technologies OneConnect" \ 41 41 "Open-iSCSI Driver version" BUILD_STR 42 42 #define DRV_DESC BE_NAME " " "Driver" 43 43
+4 -4
drivers/scsi/be2iscsi/be_mgmt.c
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 7 7 * as published by the Free Software Foundation. The full GNU General 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 11 11 * 12 12 * Contact Information: 13 - * linux-drivers@emulex.com 13 + * linux-drivers@avagotech.com 14 14 * 15 - * Emulex 15 + * Avago Technologies 16 16 * 3333 Susan Street 17 17 * Costa Mesa, CA 92626 18 18 */
+4 -4
drivers/scsi/be2iscsi/be_mgmt.h
··· 1 1 /** 2 - * Copyright (C) 2005 - 2014 Emulex 2 + * Copyright (C) 2005 - 2015 Avago Technologies 3 3 * All rights reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or ··· 7 7 * as published by the Free Software Foundation. The full GNU General 8 8 * Public License is included in this distribution in the file called COPYING. 9 9 * 10 - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 11 11 * 12 12 * Contact Information: 13 - * linux-drivers@emulex.com 13 + * linux-drivers@avagotech.com 14 14 * 15 - * Emulex 15 + * Avago Technologies 16 16 * 3333 Susan Street 17 17 * Costa Mesa, CA 92626 18 18 */
+21 -20
drivers/scsi/lpfc/lpfc_scsi.c
··· 1130 1130 } 1131 1131 1132 1132 /** 1133 - * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 1134 - * @data: A pointer to the immediate command data portion of the IOCB. 1135 - * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 1136 - * 1137 - * The routine copies the entire FCP command from @fcp_cmnd to @data while 1138 - * byte swapping the data to big endian format for transmission on the wire. 1139 - **/ 1140 - static void 1141 - lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) 1142 - { 1143 - int i, j; 1144 - 1145 - for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 1146 - i += sizeof(uint32_t), j++) { 1147 - ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 1148 - } 1149 - } 1150 - 1151 - /** 1152 1133 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 1153 1134 * @phba: The Hba for which this call is being executed. 1154 1135 * @lpfc_cmd: The scsi buffer which is going to be mapped. ··· 1264 1283 * we need to set word 4 of IOCB here 1265 1284 */ 1266 1285 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1267 - lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 1268 1286 return 0; 1269 1287 } 1270 1288 ··· 4127 4147 } 4128 4148 4129 4149 /** 4150 + * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 4151 + * @data: A pointer to the immediate command data portion of the IOCB. 4152 + * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 4153 + * 4154 + * The routine copies the entire FCP command from @fcp_cmnd to @data while 4155 + * byte swapping the data to big endian format for transmission on the wire. 4156 + **/ 4157 + static void 4158 + lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) 4159 + { 4160 + int i, j; 4161 + for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 4162 + i += sizeof(uint32_t), j++) { 4163 + ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 4164 + } 4165 + } 4166 + 4167 + /** 4130 4168 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4131 4169 * @vport: The virtual port for which this call is being executed. 4132 4170 * @lpfc_cmd: The scsi command which needs to send. ··· 4223 4225 fcp_cmnd->fcpCntl3 = 0; 4224 4226 phba->fc4ControlRequests++; 4225 4227 } 4228 + if (phba->sli_rev == 3 && 4229 + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 4230 + lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 4226 4231 /* 4227 4232 * Finish initializing those IOCB fields that are independent 4228 4233 * of the scsi_cmnd request_buffer
+2 -4
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 1020 1020 struct se_portal_group *se_tpg = &base_tpg->se_tpg; 1021 1021 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; 1022 1022 1023 - if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, 1024 - &se_tpg->tpg_group.cg_item)) { 1023 + if (!target_depend_item(&se_tpg->tpg_group.cg_item)) { 1025 1024 atomic_set(&base_tpg->lport_tpg_enabled, 1); 1026 1025 qlt_enable_vha(base_vha); 1027 1026 } ··· 1036 1037 1037 1038 if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { 1038 1039 atomic_set(&base_tpg->lport_tpg_enabled, 0); 1039 - configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, 1040 - &se_tpg->tpg_group.cg_item); 1040 + target_undepend_item(&se_tpg->tpg_group.cg_item); 1041 1041 } 1042 1042 complete(&base_tpg->tpg_base_comp); 1043 1043 }
+5 -14
drivers/scsi/sd.c
··· 1600 1600 { 1601 1601 u64 start_lba = blk_rq_pos(scmd->request); 1602 1602 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); 1603 + u64 factor = scmd->device->sector_size / 512; 1603 1604 u64 bad_lba; 1604 1605 int info_valid; 1605 1606 /* ··· 1622 1621 if (scsi_bufflen(scmd) <= scmd->device->sector_size) 1623 1622 return 0; 1624 1623 1625 - if (scmd->device->sector_size < 512) { 1626 - /* only legitimate sector_size here is 256 */ 1627 - start_lba <<= 1; 1628 - end_lba <<= 1; 1629 - } else { 1630 - /* be careful ... don't want any overflows */ 1631 - unsigned int factor = scmd->device->sector_size / 512; 1632 - do_div(start_lba, factor); 1633 - do_div(end_lba, factor); 1634 - } 1624 + /* be careful ... don't want any overflows */ 1625 + do_div(start_lba, factor); 1626 + do_div(end_lba, factor); 1635 1627 1636 1628 /* The bad lba was reported incorrectly, we have no idea where 1637 1629 * the error is. ··· 2182 2188 if (sector_size != 512 && 2183 2189 sector_size != 1024 && 2184 2190 sector_size != 2048 && 2185 - sector_size != 4096 && 2186 - sector_size != 256) { 2191 + sector_size != 4096) { 2187 2192 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2188 2193 sector_size); 2189 2194 /* ··· 2237 2244 sdkp->capacity <<= 2; 2238 2245 else if (sector_size == 1024) 2239 2246 sdkp->capacity <<= 1; 2240 - else if (sector_size == 256) 2241 - sdkp->capacity >>= 1; 2242 2247 2243 2248 blk_queue_physical_block_size(sdp->request_queue, 2244 2249 sdkp->physical_block_size);
+1 -2
drivers/scsi/storvsc_drv.c
··· 1600 1600 break; 1601 1601 default: 1602 1602 vm_srb->data_in = UNKNOWN_TYPE; 1603 - vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN | 1604 - SRB_FLAGS_DATA_OUT); 1603 + vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; 1605 1604 break; 1606 1605 } 1607 1606
+4 -3
drivers/ssb/driver_pcicore.c
··· 359 359 360 360 /* 361 361 * Accessing PCI config without a proper delay after devices reset (not 362 - * GPIO reset) was causing reboots on WRT300N v1.0. 362 + * GPIO reset) was causing reboots on WRT300N v1.0 (BCM4704). 363 363 * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it 364 364 * completely. Flushing all writes was also tested but with no luck. 365 + * The same problem was reported for WRT350N v1 (BCM4705), so we just 366 + * sleep here unconditionally. 365 367 */ 366 - if (pc->dev->bus->chip_id == 0x4704) 367 - usleep_range(1000, 2000); 368 + usleep_range(1000, 2000); 368 369 369 370 /* Enable PCI bridge BAR0 prefetch and burst */ 370 371 val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+1 -1
drivers/target/iscsi/iscsi_target.c
··· 230 230 * Here we serialize access across the TIQN+TPG Tuple. 231 231 */ 232 232 ret = down_interruptible(&tpg->np_login_sem); 233 - if ((ret != 0) || signal_pending(current)) 233 + if (ret != 0) 234 234 return -1; 235 235 236 236 spin_lock_bh(&tpg->tpg_state_lock);
+1
drivers/target/iscsi/iscsi_target_login.c
··· 346 346 if (IS_ERR(sess->se_sess)) { 347 347 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 348 348 ISCSI_LOGIN_STATUS_NO_RESOURCES); 349 + kfree(sess->sess_ops); 349 350 kfree(sess); 350 351 return -ENOMEM; 351 352 }
+1 -4
drivers/target/iscsi/iscsi_target_tpg.c
··· 161 161 int iscsit_get_tpg( 162 162 struct iscsi_portal_group *tpg) 163 163 { 164 - int ret; 165 - 166 - ret = mutex_lock_interruptible(&tpg->tpg_access_lock); 167 - return ((ret != 0) || signal_pending(current)) ? -1 : 0; 164 + return mutex_lock_interruptible(&tpg->tpg_access_lock); 168 165 } 169 166 170 167 void iscsit_put_tpg(struct iscsi_portal_group *tpg)
+2 -2
drivers/target/target_core_alua.c
··· 704 704 705 705 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 706 706 return 0; 707 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 707 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 708 708 return 0; 709 709 710 710 if (!port) ··· 2377 2377 2378 2378 int core_setup_alua(struct se_device *dev) 2379 2379 { 2380 - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 2380 + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 2381 2381 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2382 2382 struct t10_alua_lu_gp_member *lu_gp_mem; 2383 2383
+19 -21
drivers/target/target_core_configfs.c
··· 212 212 213 213 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" 214 214 " %s\n", tf->tf_group.cg_item.ci_name); 215 - /* 216 - * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() 217 - */ 218 - tf->tf_ops.tf_subsys = tf->tf_subsys; 219 215 tf->tf_fabric = &tf->tf_group.cg_item; 220 216 pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" 221 217 " for %s\n", name); ··· 287 291 }, 288 292 }; 289 293 290 - struct configfs_subsystem *target_core_subsystem[] = { 291 - &target_core_fabrics, 292 - NULL, 293 - }; 294 + int target_depend_item(struct config_item *item) 295 + { 296 + return configfs_depend_item(&target_core_fabrics, item); 297 + } 298 + EXPORT_SYMBOL(target_depend_item); 299 + 300 + void target_undepend_item(struct config_item *item) 301 + { 302 + return configfs_undepend_item(&target_core_fabrics, item); 303 + } 304 + EXPORT_SYMBOL(target_undepend_item); 294 305 295 306 /*############################################################################## 296 307 // Start functions called by external Target Fabrics Modules ··· 470 467 * struct target_fabric_configfs->tf_cit_tmpl 471 468 */ 472 469 tf->tf_module = fo->module; 473 - tf->tf_subsys = target_core_subsystem[0]; 474 470 snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); 475 471 476 472 tf->tf_ops = *fo; ··· 811 809 { 812 810 int ret; 813 811 814 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 812 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 815 813 return sprintf(page, "Passthrough\n"); 816 814 817 815 spin_lock(&dev->dev_reservation_lock); ··· 962 960 static ssize_t target_core_dev_pr_show_attr_res_type( 963 961 struct se_device *dev, char *page) 964 962 { 965 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 963 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 966 964 return sprintf(page, "SPC_PASSTHROUGH\n"); 967 965 else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 968 966 return sprintf(page, "SPC2_RESERVATIONS\n"); ··· 975 973 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( 976 974 struct se_device *dev, char *page) 977 975 { 978 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 976 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 979 977 return 0; 980 978 981 979 return sprintf(page, "APTPL Bit Status: %s\n", ··· 990 988 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( 991 989 struct se_device *dev, char *page) 992 990 { 993 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 991 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 994 992 return 0; 995 993 996 994 return sprintf(page, "Ready to process PR APTPL metadata..\n"); ··· 1037 1035 u16 port_rpti = 0, tpgt = 0; 1038 1036 u8 type = 0, scope; 1039 1037 1040 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1038 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1041 1039 return 0; 1042 1040 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1043 1041 return 0; ··· 2872 2870 { 2873 2871 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 2874 2872 struct config_group *lu_gp_cg = NULL; 2875 - struct configfs_subsystem *subsys; 2873 + struct configfs_subsystem *subsys = &target_core_fabrics; 2876 2874 struct t10_alua_lu_gp *lu_gp; 2877 2875 int ret; 2878 2876 ··· 2880 2878 " Engine: %s on %s/%s on "UTS_RELEASE"\n", 2881 2879 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); 2882 2880 2883 - subsys = target_core_subsystem[0]; 2884 2881 config_group_init(&subsys->su_group); 2885 2882 mutex_init(&subsys->su_mutex); 2886 2883 ··· 3009 3008 3010 3009 static void __exit target_core_exit_configfs(void) 3011 3010 { 3012 - struct configfs_subsystem *subsys; 3013 3011 struct config_group *hba_cg, *alua_cg, *lu_gp_cg; 3014 3012 struct config_item *item; 3015 3013 int i; 3016 - 3017 - subsys = target_core_subsystem[0]; 3018 3014 3019 3015 lu_gp_cg = &alua_lu_gps_group; 3020 3016 for (i = 0; lu_gp_cg->default_groups[i]; i++) { ··· 3043 3045 * We expect subsys->su_group.default_groups to be released 3044 3046 * by configfs subsystem provider logic.. 3045 3047 */ 3046 - configfs_unregister_subsystem(subsys); 3047 - kfree(subsys->su_group.default_groups); 3048 + configfs_unregister_subsystem(&target_core_fabrics); 3049 + kfree(target_core_fabrics.su_group.default_groups); 3048 3050 3049 3051 core_alua_free_lu_gp(default_lu_gp); 3050 3052 default_lu_gp = NULL;
+76 -2
drivers/target/target_core_device.c
··· 33 33 #include <linux/kthread.h> 34 34 #include <linux/in.h> 35 35 #include <linux/export.h> 36 + #include <asm/unaligned.h> 36 37 #include <net/sock.h> 37 38 #include <net/tcp.h> 38 39 #include <scsi/scsi.h> ··· 528 527 list_add_tail(&port->sep_list, &dev->dev_sep_list); 529 528 spin_unlock(&dev->se_port_lock); 530 529 531 - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 530 + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 532 531 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 533 532 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 534 533 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { ··· 1604 1603 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1605 1604 * passthrough because this is being provided by the backend LLD. 1606 1605 */ 1607 - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1606 + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { 1608 1607 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1609 1608 strncpy(&dev->t10_wwn.model[0], 1610 1609 dev->transport->inquiry_prod, 16); ··· 1708 1707 target_free_device(g_lun0_dev); 1709 1708 core_delete_hba(hba); 1710 1709 } 1710 + 1711 + /* 1712 + * Common CDB parsing for kernel and user passthrough. 1713 + */ 1714 + sense_reason_t 1715 + passthrough_parse_cdb(struct se_cmd *cmd, 1716 + sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1717 + { 1718 + unsigned char *cdb = cmd->t_task_cdb; 1719 + 1720 + /* 1721 + * Clear a lun set in the cdb if the initiator talking to use spoke 1722 + * and old standards version, as we can't assume the underlying device 1723 + * won't choke up on it. 1724 + */ 1725 + switch (cdb[0]) { 1726 + case READ_10: /* SBC - RDProtect */ 1727 + case READ_12: /* SBC - RDProtect */ 1728 + case READ_16: /* SBC - RDProtect */ 1729 + case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 1730 + case VERIFY: /* SBC - VRProtect */ 1731 + case VERIFY_16: /* SBC - VRProtect */ 1732 + case WRITE_VERIFY: /* SBC - VRProtect */ 1733 + case WRITE_VERIFY_12: /* SBC - VRProtect */ 1734 + case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ 1735 + break; 1736 + default: 1737 + cdb[1] &= 0x1f; /* clear logical unit number */ 1738 + break; 1739 + } 1740 + 1741 + /* 1742 + * For REPORT LUNS we always need to emulate the response, for everything 1743 + * else, pass it up. 1744 + */ 1745 + if (cdb[0] == REPORT_LUNS) { 1746 + cmd->execute_cmd = spc_emulate_report_luns; 1747 + return TCM_NO_SENSE; 1748 + } 1749 + 1750 + /* Set DATA_CDB flag for ops that should have it */ 1751 + switch (cdb[0]) { 1752 + case READ_6: 1753 + case READ_10: 1754 + case READ_12: 1755 + case READ_16: 1756 + case WRITE_6: 1757 + case WRITE_10: 1758 + case WRITE_12: 1759 + case WRITE_16: 1760 + case WRITE_VERIFY: 1761 + case WRITE_VERIFY_12: 1762 + case 0x8e: /* WRITE_VERIFY_16 */ 1763 + case COMPARE_AND_WRITE: 1764 + case XDWRITEREAD_10: 1765 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1766 + break; 1767 + case VARIABLE_LENGTH_CMD: 1768 + switch (get_unaligned_be16(&cdb[8])) { 1769 + case READ_32: 1770 + case WRITE_32: 1771 + case 0x0c: /* WRITE_VERIFY_32 */ 1772 + case XDWRITEREAD_32: 1773 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1774 + break; 1775 + } 1776 + } 1777 + 1778 + cmd->execute_cmd = exec_cmd; 1779 + 1780 + return TCM_NO_SENSE; 1781 + } 1782 + EXPORT_SYMBOL(passthrough_parse_cdb);
-1
drivers/target/target_core_file.c
··· 958 958 .inquiry_prod = "FILEIO", 959 959 .inquiry_rev = FD_VERSION, 960 960 .owner = THIS_MODULE, 961 - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 962 961 .attach_hba = fd_attach_hba, 963 962 .detach_hba = fd_detach_hba, 964 963 .alloc_device = fd_alloc_device,
-1
drivers/target/target_core_iblock.c
··· 904 904 .inquiry_prod = "IBLOCK", 905 905 .inquiry_rev = IBLOCK_VERSION, 906 906 .owner = THIS_MODULE, 907 - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 908 907 .attach_hba = iblock_attach_hba, 909 908 .detach_hba = iblock_detach_hba, 910 909 .alloc_device = iblock_alloc_device,
-3
drivers/target/target_core_internal.h
··· 4 4 /* target_core_alua.c */ 5 5 extern struct t10_alua_lu_gp *default_lu_gp; 6 6 7 - /* target_core_configfs.c */ 8 - extern struct configfs_subsystem *target_core_subsystem[]; 9 - 10 7 /* target_core_device.c */ 11 8 extern struct mutex g_device_mutex; 12 9 extern struct list_head g_device_list;
+8 -26
drivers/target/target_core_pr.c
··· 1367 1367 1368 1368 static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) 1369 1369 { 1370 - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, 1371 - &tpg->tpg_group.cg_item); 1370 + return target_depend_item(&tpg->tpg_group.cg_item); 1372 1371 } 1373 1372 1374 1373 static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) 1375 1374 { 1376 - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1377 - &tpg->tpg_group.cg_item); 1378 - 1375 + target_undepend_item(&tpg->tpg_group.cg_item); 1379 1376 atomic_dec_mb(&tpg->tpg_pr_ref_count); 1380 1377 } 1381 1378 1382 1379 static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1383 1380 { 1384 - struct se_portal_group *tpg = nacl->se_tpg; 1385 - 1386 1381 if (nacl->dynamic_node_acl) 1387 1382 return 0; 1388 - 1389 - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, 1390 - &nacl->acl_group.cg_item); 1383 + return target_depend_item(&nacl->acl_group.cg_item); 1391 1384 } 1392 1385 1393 1386 static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) 1394 1387 { 1395 - struct se_portal_group *tpg = nacl->se_tpg; 1396 - 1397 - if (nacl->dynamic_node_acl) { 1398 - atomic_dec_mb(&nacl->acl_pr_ref_count); 1399 - return; 1400 - } 1401 - 1402 - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1403 - &nacl->acl_group.cg_item); 1404 - 1388 + if (!nacl->dynamic_node_acl) 1389 + target_undepend_item(&nacl->acl_group.cg_item); 1405 1390 atomic_dec_mb(&nacl->acl_pr_ref_count); 1406 1391 } 1407 1392 ··· 1404 1419 nacl = lun_acl->se_lun_nacl; 1405 1420 tpg = nacl->se_tpg; 1406 1421 1407 - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, 1408 - &lun_acl->se_lun_group.cg_item); 1422 + return target_depend_item(&lun_acl->se_lun_group.cg_item); 1409 1423 } 1410 1424 1411 1425 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) ··· 1422 1438 nacl = lun_acl->se_lun_nacl; 1423 1439 tpg = nacl->se_tpg; 1424 1440 1425 - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1426 - &lun_acl->se_lun_group.cg_item); 1427 - 1441 + target_undepend_item(&lun_acl->se_lun_group.cg_item); 1428 1442 atomic_dec_mb(&se_deve->pr_ref_count); 1429 1443 } 1430 1444 ··· 4093 4111 return 0; 4094 4112 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 4095 4113 return 0; 4096 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 4114 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 4097 4115 return 0; 4098 4116 4099 4117 spin_lock(&dev->dev_reservation_lock);
+5 -53
drivers/target/target_core_pscsi.c
··· 521 521 " pdv_host_id: %d\n", pdv->pdv_host_id); 522 522 return -EINVAL; 523 523 } 524 + pdv->pdv_lld_host = sh; 524 525 } 525 526 } else { 526 527 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { ··· 604 603 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && 605 604 (phv->phv_lld_host != NULL)) 606 605 scsi_host_put(phv->phv_lld_host); 606 + else if (pdv->pdv_lld_host) 607 + scsi_host_put(pdv->pdv_lld_host); 607 608 608 609 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) 609 610 scsi_device_put(sd); ··· 973 970 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 974 971 } 975 972 976 - /* 977 - * Clear a lun set in the cdb if the initiator talking to use spoke 978 - * and old standards version, as we can't assume the underlying device 979 - * won't choke up on it. 980 - */ 981 - static inline void pscsi_clear_cdb_lun(unsigned char *cdb) 982 - { 983 - switch (cdb[0]) { 984 - case READ_10: /* SBC - RDProtect */ 985 - case READ_12: /* SBC - RDProtect */ 986 - case READ_16: /* SBC - RDProtect */ 987 - case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 988 - case VERIFY: /* SBC - VRProtect */ 989 - case VERIFY_16: /* SBC - VRProtect */ 990 - case WRITE_VERIFY: /* SBC - VRProtect */ 991 - case WRITE_VERIFY_12: /* SBC - VRProtect */ 992 - case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ 993 - break; 994 - default: 995 - cdb[1] &= 0x1f; /* clear logical unit number */ 996 - break; 997 - } 998 - } 999 - 1000 973 static sense_reason_t 1001 974 pscsi_parse_cdb(struct se_cmd *cmd) 1002 975 { 1003 - unsigned char *cdb = cmd->t_task_cdb; 1004 - 1005 976 if (cmd->se_cmd_flags & SCF_BIDI) 1006 977 return TCM_UNSUPPORTED_SCSI_OPCODE; 1007 978 1008 - pscsi_clear_cdb_lun(cdb); 1009 - 1010 - /* 1011 - * For REPORT LUNS we always need to emulate the response, for everything 1012 - * else the default for pSCSI is to pass the command to the underlying 1013 - * LLD / physical hardware. 1014 - */ 1015 - switch (cdb[0]) { 1016 - case REPORT_LUNS: 1017 - cmd->execute_cmd = spc_emulate_report_luns; 1018 - return 0; 1019 - case READ_6: 1020 - case READ_10: 1021 - case READ_12: 1022 - case READ_16: 1023 - case WRITE_6: 1024 - case WRITE_10: 1025 - case WRITE_12: 1026 - case WRITE_16: 1027 - case WRITE_VERIFY: 1028 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1029 - /* FALLTHROUGH*/ 1030 - default: 1031 - cmd->execute_cmd = pscsi_execute_cmd; 1032 - return 0; 1033 - } 979 + return passthrough_parse_cdb(cmd, pscsi_execute_cmd); 1034 980 } 1035 981 1036 982 static sense_reason_t ··· 1141 1189 static struct se_subsystem_api pscsi_template = { 1142 1190 .name = "pscsi", 1143 1191 .owner = THIS_MODULE, 1144 - .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, 1192 + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1145 1193 .attach_hba = pscsi_attach_hba, 1146 1194 .detach_hba = pscsi_detach_hba, 1147 1195 .pmode_enable_hba = pscsi_pmode_enable_hba,
+1
drivers/target/target_core_pscsi.h
··· 45 45 int pdv_lun_id; 46 46 struct block_device *pdv_bd; 47 47 struct scsi_device *pdv_sd; 48 + struct Scsi_Host *pdv_lld_host; 48 49 } ____cacheline_aligned; 49 50 50 51 typedef enum phv_modes {
-1
drivers/target/target_core_rd.c
··· 733 733 .name = "rd_mcp", 734 734 .inquiry_prod = "RAMDISK-MCP", 735 735 .inquiry_rev = RD_MCP_VERSION, 736 - .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 737 736 .attach_hba = rd_attach_hba, 738 737 .detach_hba = rd_detach_hba, 739 738 .alloc_device = rd_alloc_device,
+1 -1
drivers/target/target_core_sbc.c
··· 568 568 * comparision using SGLs at cmd->t_bidi_data_sg.. 569 569 */ 570 570 rc = down_interruptible(&dev->caw_sem); 571 - if ((rc != 0) || signal_pending(current)) { 571 + if (rc != 0) { 572 572 cmd->transport_complete_callback = NULL; 573 573 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 574 574 }
+6 -7
drivers/target/target_core_transport.c
··· 1196 1196 * Check if SAM Task Attribute emulation is enabled for this 1197 1197 * struct se_device storage object 1198 1198 */ 1199 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1199 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1200 1200 return 0; 1201 1201 1202 1202 if (cmd->sam_task_attr == TCM_ACA_TAG) { ··· 1770 1770 sectors, 0, NULL, 0); 1771 1771 if (unlikely(cmd->pi_err)) { 1772 1772 spin_lock_irq(&cmd->t_state_lock); 1773 - cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1773 + cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1774 1774 spin_unlock_irq(&cmd->t_state_lock); 1775 1775 transport_generic_request_failure(cmd, cmd->pi_err); 1776 1776 return -1; ··· 1787 1787 { 1788 1788 struct se_device *dev = cmd->se_dev; 1789 1789 1790 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1790 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1791 1791 return false; 1792 1792 1793 1793 /* ··· 1868 1868 1869 1869 if (target_handle_task_attr(cmd)) { 1870 1870 spin_lock_irq(&cmd->t_state_lock); 1871 - cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1871 + cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); 1872 1872 spin_unlock_irq(&cmd->t_state_lock); 1873 1873 return; 1874 1874 } ··· 1912 1912 { 1913 1913 struct se_device *dev = cmd->se_dev; 1914 1914 1915 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1915 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1916 1916 return; 1917 1917 1918 1918 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { ··· 1957 1957 case DMA_TO_DEVICE: 1958 1958 if (cmd->se_cmd_flags & SCF_BIDI) { 1959 1959 ret = cmd->se_tfo->queue_data_in(cmd); 1960 - if (ret < 0) 1961 - break; 1960 + break; 1962 1961 } 1963 1962 /* Fall through for DMA_TO_DEVICE */ 1964 1963 case DMA_NONE:
+32 -111
drivers/target/target_core_user.c
··· 71 71 u32 host_id; 72 72 }; 73 73 74 - /* User wants all cmds or just some */ 75 - enum passthru_level { 76 - TCMU_PASS_ALL = 0, 77 - TCMU_PASS_IO, 78 - TCMU_PASS_INVALID, 79 - }; 80 - 81 74 #define TCMU_CONFIG_LEN 256 82 75 83 76 struct tcmu_dev { ··· 82 89 #define TCMU_DEV_BIT_OPEN 0 83 90 #define TCMU_DEV_BIT_BROKEN 1 84 91 unsigned long flags; 85 - enum passthru_level pass_level; 86 92 87 93 struct uio_info uio_info; 88 94 ··· 675 683 setup_timer(&udev->timeout, tcmu_device_timedout, 676 684 (unsigned long)udev); 677 685 678 - udev->pass_level = TCMU_PASS_ALL; 679 - 680 686 return &udev->se_dev; 681 687 } 682 688 ··· 938 948 } 939 949 940 950 enum { 941 - Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level, 951 + Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, 942 952 }; 943 953 944 954 static match_table_t tokens = { 945 955 {Opt_dev_config, "dev_config=%s"}, 946 956 {Opt_dev_size, "dev_size=%u"}, 947 - {Opt_pass_level, "pass_level=%u"}, 957 + {Opt_hw_block_size, "hw_block_size=%u"}, 948 958 {Opt_err, NULL} 949 959 }; 950 960 ··· 955 965 char *orig, *ptr, *opts, *arg_p; 956 966 substring_t args[MAX_OPT_ARGS]; 957 967 int ret = 0, token; 958 - int arg; 968 + unsigned long tmp_ul; 959 969 960 970 opts = kstrdup(page, GFP_KERNEL); 961 971 if (!opts) ··· 988 998 if (ret < 0) 989 999 pr_err("kstrtoul() failed for dev_size=\n"); 990 1000 break; 991 - case Opt_pass_level: 992 - match_int(args, &arg); 993 - if (arg >= TCMU_PASS_INVALID) { 994 - pr_warn("TCMU: Invalid pass_level: %d\n", arg); 1001 + case Opt_hw_block_size: 1002 + arg_p = match_strdup(&args[0]); 1003 + if (!arg_p) { 1004 + ret = -ENOMEM; 995 1005 break; 996 1006 } 997 - 998 - pr_debug("TCMU: Setting pass_level to %d\n", arg); 999 - udev->pass_level = arg; 1007 + ret = kstrtoul(arg_p, 0, &tmp_ul); 1008 + kfree(arg_p); 1009 + if (ret < 0) { 1010 + pr_err("kstrtoul() failed for hw_block_size=\n"); 1011 + break; 1012 + } 1013 + if (!tmp_ul) { 1014 + pr_err("hw_block_size must be nonzero\n"); 1015 + break; 1016 + } 1017 + dev->dev_attrib.hw_block_size = tmp_ul; 1000 1018 break; 1001 1019 default: 1002 1020 break; ··· 1022 1024 1023 1025 bl = sprintf(b + bl, "Config: %s ", 1024 1026 udev->dev_config[0] ? udev->dev_config : "NULL"); 1025 - bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n", 1026 - udev->dev_size, udev->pass_level); 1027 + bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); 1027 1028 1028 1029 return bl; 1029 1030 } ··· 1036 1039 } 1037 1040 1038 1041 static sense_reason_t 1039 - tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents, 1040 - enum dma_data_direction data_direction) 1041 - { 1042 - int ret; 1043 - 1044 - ret = tcmu_queue_cmd(se_cmd); 1045 - 1046 - if (ret != 0) 1047 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1048 - else 1049 - return TCM_NO_SENSE; 1050 - } 1051 - 1052 - static sense_reason_t 1053 1042 tcmu_pass_op(struct se_cmd *se_cmd) 1054 1043 { 1055 1044 int ret = tcmu_queue_cmd(se_cmd); ··· 1046 1063 return TCM_NO_SENSE; 1047 1064 } 1048 1065 1049 - static struct sbc_ops tcmu_sbc_ops = { 1050 - .execute_rw = tcmu_execute_rw, 1051 - .execute_sync_cache = tcmu_pass_op, 1052 - .execute_write_same = tcmu_pass_op, 1053 - .execute_write_same_unmap = tcmu_pass_op, 1054 - .execute_unmap = tcmu_pass_op, 1055 - }; 1056 - 1057 1066 static sense_reason_t 1058 1067 tcmu_parse_cdb(struct se_cmd *cmd) 1059 1068 { 1060 - unsigned char *cdb = cmd->t_task_cdb; 1061 - struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev); 1062 - sense_reason_t ret; 1063 - 1064 - switch (udev->pass_level) { 1065 - case TCMU_PASS_ALL: 1066 - /* We're just like pscsi, then */ 1067 - /* 1068 - * For REPORT LUNS we always need to emulate the response, for everything 1069 - * else, pass it up. 1070 - */ 1071 - switch (cdb[0]) { 1072 - case REPORT_LUNS: 1073 - cmd->execute_cmd = spc_emulate_report_luns; 1074 - break; 1075 - case READ_6: 1076 - case READ_10: 1077 - case READ_12: 1078 - case READ_16: 1079 - case WRITE_6: 1080 - case WRITE_10: 1081 - case WRITE_12: 1082 - case WRITE_16: 1083 - case WRITE_VERIFY: 1084 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1085 - /* FALLTHROUGH */ 1086 - default: 1087 - cmd->execute_cmd = tcmu_pass_op; 1088 - } 1089 - ret = TCM_NO_SENSE; 1090 - break; 1091 - case TCMU_PASS_IO: 1092 - ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops); 1093 - break; 1094 - default: 1095 - pr_err("Unknown tcm-user pass level %d\n", udev->pass_level); 1096 - ret = TCM_CHECK_CONDITION_ABORT_CMD; 1097 - } 1098 - 1099 - return ret; 1069 + return passthrough_parse_cdb(cmd, tcmu_pass_op); 1100 1070 } 1101 1071 1102 - DEF_TB_DEFAULT_ATTRIBS(tcmu); 1072 + DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); 1073 + TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type); 1074 + 1075 + DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size); 1076 + TB_DEV_ATTR_RO(tcmu, hw_block_size); 1077 + 1078 + DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors); 1079 + TB_DEV_ATTR_RO(tcmu, hw_max_sectors); 1080 + 1081 + DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth); 1082 + TB_DEV_ATTR_RO(tcmu, hw_queue_depth); 1103 1083 1104 1084 static struct configfs_attribute *tcmu_backend_dev_attrs[] = { 1105 - &tcmu_dev_attrib_emulate_model_alias.attr, 1106 - &tcmu_dev_attrib_emulate_dpo.attr, 1107 - &tcmu_dev_attrib_emulate_fua_write.attr, 1108 - &tcmu_dev_attrib_emulate_fua_read.attr, 1109 - &tcmu_dev_attrib_emulate_write_cache.attr, 1110 - &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr, 1111 - &tcmu_dev_attrib_emulate_tas.attr, 1112 - &tcmu_dev_attrib_emulate_tpu.attr, 1113 - &tcmu_dev_attrib_emulate_tpws.attr, 1114 - &tcmu_dev_attrib_emulate_caw.attr, 1115 - &tcmu_dev_attrib_emulate_3pc.attr, 1116 - &tcmu_dev_attrib_pi_prot_type.attr, 1117 1085 &tcmu_dev_attrib_hw_pi_prot_type.attr, 1118 - &tcmu_dev_attrib_pi_prot_format.attr, 1119 - &tcmu_dev_attrib_enforce_pr_isids.attr, 1120 - &tcmu_dev_attrib_is_nonrot.attr, 1121 - &tcmu_dev_attrib_emulate_rest_reord.attr, 1122 - &tcmu_dev_attrib_force_pr_aptpl.attr, 1123 1086 &tcmu_dev_attrib_hw_block_size.attr, 1124 - &tcmu_dev_attrib_block_size.attr, 1125 1087 &tcmu_dev_attrib_hw_max_sectors.attr, 1126 - &tcmu_dev_attrib_optimal_sectors.attr, 1127 1088 &tcmu_dev_attrib_hw_queue_depth.attr, 1128 - &tcmu_dev_attrib_queue_depth.attr, 1129 - &tcmu_dev_attrib_max_unmap_lba_count.attr, 1130 - &tcmu_dev_attrib_max_unmap_block_desc_count.attr, 1131 - &tcmu_dev_attrib_unmap_granularity.attr, 1132 - &tcmu_dev_attrib_unmap_granularity_alignment.attr, 1133 - &tcmu_dev_attrib_max_write_same_len.attr, 1134 1089 NULL, 1135 1090 }; 1136 1091 ··· 1077 1156 .inquiry_prod = "USER", 1078 1157 .inquiry_rev = TCMU_VERSION, 1079 1158 .owner = THIS_MODULE, 1080 - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 1159 + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1081 1160 .attach_hba = tcmu_attach_hba, 1082 1161 .detach_hba = tcmu_detach_hba, 1083 1162 .alloc_device = tcmu_alloc_device,
+6 -9
drivers/target/target_core_xcopy.c
··· 58 58 bool src) 59 59 { 60 60 struct se_device *se_dev; 61 - struct configfs_subsystem *subsys = target_core_subsystem[0]; 62 61 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; 63 62 int rc; 64 63 ··· 89 90 " se_dev\n", xop->src_dev); 90 91 } 91 92 92 - rc = configfs_depend_item(subsys, 93 - &se_dev->dev_group.cg_item); 93 + rc = target_depend_item(&se_dev->dev_group.cg_item); 94 94 if (rc != 0) { 95 95 pr_err("configfs_depend_item attempt failed:" 96 96 " %d for se_dev: %p\n", rc, se_dev); ··· 97 99 return rc; 98 100 } 99 101 100 - pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" 101 - " se_dev->se_dev_group: %p\n", subsys, se_dev, 102 + pr_debug("Called configfs_depend_item for se_dev: %p" 103 + " se_dev->se_dev_group: %p\n", se_dev, 102 104 &se_dev->dev_group); 103 105 104 106 mutex_unlock(&g_device_mutex); ··· 371 373 372 374 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) 373 375 { 374 - struct configfs_subsystem *subsys = target_core_subsystem[0]; 375 376 struct se_device *remote_dev; 376 377 377 378 if (xop->op_origin == XCOL_SOURCE_RECV_OP) ··· 378 381 else 379 382 remote_dev = xop->src_dev; 380 383 381 - pr_debug("Calling configfs_undepend_item for subsys: %p" 384 + pr_debug("Calling configfs_undepend_item for" 382 385 " remote_dev: %p remote_dev->dev_group: %p\n", 383 - subsys, remote_dev, &remote_dev->dev_group.cg_item); 386 + remote_dev, &remote_dev->dev_group.cg_item); 384 387 385 - configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); 388 + target_undepend_item(&remote_dev->dev_group.cg_item); 386 389 } 387 390 388 391 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
+3 -3
drivers/thermal/armada_thermal.c
··· 224 224 .is_valid_shift = 10, 225 225 .temp_shift = 0, 226 226 .temp_mask = 0x3ff, 227 - .coef_b = 1169498786UL, 228 - .coef_m = 2000000UL, 229 - .coef_div = 4289, 227 + .coef_b = 2931108200UL, 228 + .coef_m = 5000000UL, 229 + .coef_div = 10502, 230 230 .inverted = true, 231 231 }; 232 232
+2 -1
drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
··· 420 420 TI_BANDGAP_FEATURE_FREEZE_BIT | 421 421 TI_BANDGAP_FEATURE_TALERT | 422 422 TI_BANDGAP_FEATURE_COUNTER_DELAY | 423 - TI_BANDGAP_FEATURE_HISTORY_BUFFER, 423 + TI_BANDGAP_FEATURE_HISTORY_BUFFER | 424 + TI_BANDGAP_FEATURE_ERRATA_814, 424 425 .fclock_name = "l3instr_ts_gclk_div", 425 426 .div_ck_name = "l3instr_ts_gclk_div", 426 427 .conv_table = dra752_adc_to_temp,
+2 -1
drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
··· 319 319 TI_BANDGAP_FEATURE_FREEZE_BIT | 320 320 TI_BANDGAP_FEATURE_TALERT | 321 321 TI_BANDGAP_FEATURE_COUNTER_DELAY | 322 - TI_BANDGAP_FEATURE_HISTORY_BUFFER, 322 + TI_BANDGAP_FEATURE_HISTORY_BUFFER | 323 + TI_BANDGAP_FEATURE_ERRATA_813, 323 324 .fclock_name = "l3instr_ts_gclk_div", 324 325 .div_ck_name = "l3instr_ts_gclk_div", 325 326 .conv_table = omap5430_adc_to_temp,
+76 -2
drivers/thermal/ti-soc-thermal/ti-bandgap.c
··· 119 119 } 120 120 121 121 /** 122 + * ti_errata814_bandgap_read_temp() - helper function to read dra7 sensor temperature 123 + * @bgp: pointer to ti_bandgap structure 124 + * @reg: desired register (offset) to be read 125 + * 126 + * Function to read dra7 bandgap sensor temperature. This is done separately 127 + * so as to workaround the errata "Bandgap Temperature read Dtemp can be 128 + * corrupted" - Errata ID: i814". 129 + * Read accesses to registers listed below can be corrupted due to incorrect 130 + * resynchronization between clock domains. 131 + * Read access to registers below can be corrupted : 132 + * CTRL_CORE_DTEMP_MPU/GPU/CORE/DSPEVE/IVA_n (n = 0 to 4) 133 + * CTRL_CORE_TEMP_SENSOR_MPU/GPU/CORE/DSPEVE/IVA_n 134 + * 135 + * Return: the register value. 136 + */ 137 + static u32 ti_errata814_bandgap_read_temp(struct ti_bandgap *bgp, u32 reg) 138 + { 139 + u32 val1, val2; 140 + 141 + val1 = ti_bandgap_readl(bgp, reg); 142 + val2 = ti_bandgap_readl(bgp, reg); 143 + 144 + /* If both times we read the same value then that is right */ 145 + if (val1 == val2) 146 + return val1; 147 + 148 + /* if val1 and val2 are different read it third time */ 149 + return ti_bandgap_readl(bgp, reg); 150 + } 151 + 152 + /** 122 153 * ti_bandgap_read_temp() - helper function to read sensor temperature 123 154 * @bgp: pointer to ti_bandgap structure 124 155 * @id: bandgap sensor id ··· 179 148 } 180 149 181 150 /* read temperature */ 182 - temp = ti_bandgap_readl(bgp, reg); 151 + if (TI_BANDGAP_HAS(bgp, ERRATA_814)) 152 + temp = ti_errata814_bandgap_read_temp(bgp, reg); 153 + else 154 + temp = ti_bandgap_readl(bgp, reg); 155 + 183 156 temp &= tsr->bgap_dtemp_mask; 184 157 185 158 if (TI_BANDGAP_HAS(bgp, FREEZE_BIT)) ··· 445 410 { 446 411 struct temp_sensor_data *ts_data = bgp->conf->sensors[id].ts_data; 447 412 struct temp_sensor_registers *tsr; 448 - u32 thresh_val, reg_val, t_hot, t_cold; 413 + u32 thresh_val, reg_val, t_hot, t_cold, ctrl; 449 414 int err = 0; 450 415 451 416 tsr = bgp->conf->sensors[id].registers; ··· 477 442 ~(tsr->threshold_thot_mask | tsr->threshold_tcold_mask); 478 443 reg_val |= (t_hot << __ffs(tsr->threshold_thot_mask)) | 479 444 (t_cold << __ffs(tsr->threshold_tcold_mask)); 445 + 446 + /** 447 + * Errata i813: 448 + * Spurious Thermal Alert: Talert can happen randomly while the device 449 + * remains under the temperature limit defined for this event to trig. 450 + * This spurious event is caused by a incorrect re-synchronization 451 + * between clock domains. The comparison between configured threshold 452 + * and current temperature value can happen while the value is 453 + * transitioning (metastable), thus causing inappropriate event 454 + * generation. No spurious event occurs as long as the threshold value 455 + * stays unchanged. Spurious event can be generated while a thermal 456 + * alert threshold is modified in 457 + * CONTROL_BANDGAP_THRESHOLD_MPU/GPU/CORE/DSPEVE/IVA_n. 458 + */ 459 + 460 + if (TI_BANDGAP_HAS(bgp, ERRATA_813)) { 461 + /* Mask t_hot and t_cold events at the IP Level */ 462 + ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl); 463 + 464 + if (hot) 465 + ctrl &= ~tsr->mask_hot_mask; 466 + else 467 + ctrl &= ~tsr->mask_cold_mask; 468 + 469 + ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl); 470 + } 471 + 472 + /* Write the threshold value */ 480 473 ti_bandgap_writel(bgp, reg_val, tsr->bgap_threshold); 474 + 475 + if (TI_BANDGAP_HAS(bgp, ERRATA_813)) { 476 + /* Unmask t_hot and t_cold events at the IP Level */ 477 + ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl); 478 + if (hot) 479 + ctrl |= tsr->mask_hot_mask; 480 + else 481 + ctrl |= tsr->mask_cold_mask; 482 + 483 + ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl); 484 + } 481 485 482 486 if (err) { 483 487 dev_err(bgp->dev, "failed to reprogram thot threshold\n");
+6
drivers/thermal/ti-soc-thermal/ti-bandgap.h
··· 318 318 * TI_BANDGAP_FEATURE_HISTORY_BUFFER - used when the bandgap device features 319 319 * a history buffer of temperatures. 320 320 * 321 + * TI_BANDGAP_FEATURE_ERRATA_814 - used to workaorund when the bandgap device 322 + * has Errata 814 323 + * TI_BANDGAP_FEATURE_ERRATA_813 - used to workaorund when the bandgap device 324 + * has Errata 813 321 325 * TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a 322 326 * specific feature (above) or not. Return non-zero, if yes. 323 327 */ ··· 335 331 #define TI_BANDGAP_FEATURE_FREEZE_BIT BIT(7) 336 332 #define TI_BANDGAP_FEATURE_COUNTER_DELAY BIT(8) 337 333 #define TI_BANDGAP_FEATURE_HISTORY_BUFFER BIT(9) 334 + #define TI_BANDGAP_FEATURE_ERRATA_814 BIT(10) 335 + #define TI_BANDGAP_FEATURE_ERRATA_813 BIT(11) 338 336 #define TI_BANDGAP_HAS(b, f) \ 339 337 ((b)->conf->features & TI_BANDGAP_FEATURE_ ## f) 340 338
+1 -1
drivers/tty/hvc/hvc_xen.c
··· 289 289 return -ENOMEM; 290 290 } 291 291 292 - info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); 292 + info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false); 293 293 info->vtermno = HVC_COOKIE; 294 294 295 295 spin_lock(&xencons_lock);
+9 -8
drivers/tty/mips_ejtag_fdc.c
··· 174 174 static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv, 175 175 unsigned int offs, unsigned int data) 176 176 { 177 - iowrite32(data, priv->reg + offs); 177 + __raw_writel(data, priv->reg + offs); 178 178 } 179 179 180 180 static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv, 181 181 unsigned int offs) 182 182 { 183 - return ioread32(priv->reg + offs); 183 + return __raw_readl(priv->reg + offs); 184 184 } 185 185 186 186 /* Encoding of byte stream in FDC words */ ··· 347 347 s += inc[word.bytes - 1]; 348 348 349 349 /* Busy wait until there's space in fifo */ 350 - while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) 350 + while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF) 351 351 ; 352 - iowrite32(word.word, regs + REG_FDTX(c->index)); 352 + __raw_writel(word.word, regs + REG_FDTX(c->index)); 353 353 } 354 354 out: 355 355 local_irq_restore(flags); ··· 1227 1227 1228 1228 /* Read next word from KGDB channel */ 1229 1229 do { 1230 - stat = ioread32(regs + REG_FDSTAT); 1230 + stat = __raw_readl(regs + REG_FDSTAT); 1231 1231 1232 1232 /* No data waiting? */ 1233 1233 if (stat & REG_FDSTAT_RXE) ··· 1236 1236 /* Read next word */ 1237 1237 channel = (stat & REG_FDSTAT_RXCHAN) >> 1238 1238 REG_FDSTAT_RXCHAN_SHIFT; 1239 - data = ioread32(regs + REG_FDRX); 1239 + data = __raw_readl(regs + REG_FDRX); 1240 1240 } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN); 1241 1241 1242 1242 /* Decode into rbuf */ ··· 1266 1266 return; 1267 1267 1268 1268 /* Busy wait until there's space in fifo */ 1269 - while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) 1269 + while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF) 1270 1270 ; 1271 - iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); 1271 + __raw_writel(word.word, 1272 + regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); 1272 1273 } 1273 1274 1274 1275 /* flush the whole write buffer to the TX FIFO */
+2 -4
drivers/vhost/scsi.c
··· 1409 1409 * dependency now. 1410 1410 */ 1411 1411 se_tpg = &tpg->se_tpg; 1412 - ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, 1413 - &se_tpg->tpg_group.cg_item); 1412 + ret = target_depend_item(&se_tpg->tpg_group.cg_item); 1414 1413 if (ret) { 1415 1414 pr_warn("configfs_depend_item() failed: %d\n", ret); 1416 1415 kfree(vs_tpg); ··· 1512 1513 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. 1513 1514 */ 1514 1515 se_tpg = &tpg->se_tpg; 1515 - configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, 1516 - &se_tpg->tpg_group.cg_item); 1516 + target_undepend_item(&se_tpg->tpg_group.cg_item); 1517 1517 } 1518 1518 if (match) { 1519 1519 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+4
drivers/video/backlight/pwm_bl.c
··· 274 274 275 275 pb->pwm = devm_pwm_get(&pdev->dev, NULL); 276 276 if (IS_ERR(pb->pwm)) { 277 + ret = PTR_ERR(pb->pwm); 278 + if (ret == -EPROBE_DEFER) 279 + goto err_alloc; 280 + 277 281 dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n"); 278 282 pb->legacy = true; 279 283 pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
+8 -4
drivers/xen/events/events_base.c
··· 957 957 } 958 958 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); 959 959 960 - int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 960 + int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) 961 961 { 962 962 struct evtchn_bind_virq bind_virq; 963 963 int evtchn, irq, ret; ··· 971 971 if (irq < 0) 972 972 goto out; 973 973 974 - irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 975 - handle_percpu_irq, "virq"); 974 + if (percpu) 975 + irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 976 + handle_percpu_irq, "virq"); 977 + else 978 + irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 979 + handle_edge_irq, "virq"); 976 980 977 981 bind_virq.virq = virq; 978 982 bind_virq.vcpu = cpu; ··· 1066 1062 { 1067 1063 int irq, retval; 1068 1064 1069 - irq = bind_virq_to_irq(virq, cpu); 1065 + irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); 1070 1066 if (irq < 0) 1071 1067 return irq; 1072 1068 retval = request_irq(irq, handler, irqflags, devname, dev_id);
+1 -1
fs/binfmt_elf.c
··· 918 918 total_size = total_mapping_size(elf_phdata, 919 919 loc->elf_ex.e_phnum); 920 920 if (!total_size) { 921 - error = -EINVAL; 921 + retval = -EINVAL; 922 922 goto out_free_dentry; 923 923 } 924 924 }
+17
fs/btrfs/backref.c
··· 880 880 * indirect refs to their parent bytenr. 881 881 * When roots are found, they're added to the roots list 882 882 * 883 + * NOTE: This can return values > 0 884 + * 883 885 * FIXME some caching might speed things up 884 886 */ 885 887 static int find_parent_nodes(struct btrfs_trans_handle *trans, ··· 1200 1198 return ret; 1201 1199 } 1202 1200 1201 + /** 1202 + * btrfs_check_shared - tell us whether an extent is shared 1203 + * 1204 + * @trans: optional trans handle 1205 + * 1206 + * btrfs_check_shared uses the backref walking code but will short 1207 + * circuit as soon as it finds a root or inode that doesn't match the 1208 + * one passed in. This provides a significant performance benefit for 1209 + * callers (such as fiemap) which want to know whether the extent is 1210 + * shared but do not need a ref count. 1211 + * 1212 + * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. 1213 + */ 1203 1214 int btrfs_check_shared(struct btrfs_trans_handle *trans, 1204 1215 struct btrfs_fs_info *fs_info, u64 root_objectid, 1205 1216 u64 inum, u64 bytenr) ··· 1241 1226 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, 1242 1227 roots, NULL, root_objectid, inum); 1243 1228 if (ret == BACKREF_FOUND_SHARED) { 1229 + /* this is the only condition under which we return 1 */ 1244 1230 ret = 1; 1245 1231 break; 1246 1232 } 1247 1233 if (ret < 0 && ret != -ENOENT) 1248 1234 break; 1235 + ret = 0; 1249 1236 node = ulist_next(tmp, &uiter); 1250 1237 if (!node) 1251 1238 break;
+20
fs/btrfs/extent-tree.c
··· 8829 8829 goto again; 8830 8830 } 8831 8831 8832 + /* 8833 + * if we are changing raid levels, try to allocate a corresponding 8834 + * block group with the new raid level. 8835 + */ 8836 + alloc_flags = update_block_group_flags(root, cache->flags); 8837 + if (alloc_flags != cache->flags) { 8838 + ret = do_chunk_alloc(trans, root, alloc_flags, 8839 + CHUNK_ALLOC_FORCE); 8840 + /* 8841 + * ENOSPC is allowed here, we may have enough space 8842 + * already allocated at the new raid level to 8843 + * carry on 8844 + */ 8845 + if (ret == -ENOSPC) 8846 + ret = 0; 8847 + if (ret < 0) 8848 + goto out; 8849 + } 8832 8850 8833 8851 ret = set_block_group_ro(cache, 0); 8834 8852 if (!ret) ··· 8860 8842 out: 8861 8843 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 8862 8844 alloc_flags = update_block_group_flags(root, cache->flags); 8845 + lock_chunks(root->fs_info->chunk_root); 8863 8846 check_system_chunk(trans, root, alloc_flags); 8847 + unlock_chunks(root->fs_info->chunk_root); 8864 8848 } 8865 8849 mutex_unlock(&root->fs_info->ro_block_group_mutex); 8866 8850
+1
fs/btrfs/volumes.c
··· 4625 4625 { 4626 4626 u64 chunk_offset; 4627 4627 4628 + ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex)); 4628 4629 chunk_offset = find_next_chunk(extent_root->fs_info); 4629 4630 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); 4630 4631 }
+2 -1
fs/cifs/cifs_dfs_ref.c
··· 24 24 #include "cifsfs.h" 25 25 #include "dns_resolve.h" 26 26 #include "cifs_debug.h" 27 + #include "cifs_unicode.h" 27 28 28 29 static LIST_HEAD(cifs_dfs_automount_list); 29 30 ··· 313 312 xid = get_xid(); 314 313 rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, 315 314 &num_referrals, &referrals, 316 - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 315 + cifs_remap(cifs_sb)); 317 316 free_xid(xid); 318 317 319 318 cifs_put_tlink(tlink);
+136 -46
fs/cifs/cifs_unicode.c
··· 27 27 #include "cifsglob.h" 28 28 #include "cifs_debug.h" 29 29 30 - /* 31 - * cifs_utf16_bytes - how long will a string be after conversion? 32 - * @utf16 - pointer to input string 33 - * @maxbytes - don't go past this many bytes of input string 34 - * @codepage - destination codepage 35 - * 36 - * Walk a utf16le string and return the number of bytes that the string will 37 - * be after being converted to the given charset, not including any null 38 - * termination required. Don't walk past maxbytes in the source buffer. 39 - */ 40 - int 41 - cifs_utf16_bytes(const __le16 *from, int maxbytes, 42 - const struct nls_table *codepage) 43 - { 44 - int i; 45 - int charlen, outlen = 0; 46 - int maxwords = maxbytes / 2; 47 - char tmp[NLS_MAX_CHARSET_SIZE]; 48 - __u16 ftmp; 49 - 50 - for (i = 0; i < maxwords; i++) { 51 - ftmp = get_unaligned_le16(&from[i]); 52 - if (ftmp == 0) 53 - break; 54 - 55 - charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE); 56 - if (charlen > 0) 57 - outlen += charlen; 58 - else 59 - outlen++; 60 - } 61 - 62 - return outlen; 63 - } 64 - 65 30 int cifs_remap(struct cifs_sb_info *cifs_sb) 66 31 { 67 32 int map_type; ··· 120 155 * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE). 121 156 */ 122 157 static int 123 - cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, 158 + cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp, 124 159 int maptype) 125 160 { 126 161 int len = 1; 162 + __u16 src_char; 163 + 164 + src_char = *from; 127 165 128 166 if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target)) 129 167 return len; ··· 136 168 137 169 /* if character not one of seven in special remap set */ 138 170 len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); 139 - if (len <= 0) { 140 - *target = '?'; 141 - len = 1; 142 - } 171 + if (len <= 0) 172 + goto surrogate_pair; 173 + 174 + return len; 175 + 176 + surrogate_pair: 177 + /* convert SURROGATE_PAIR and IVS */ 178 + if (strcmp(cp->charset, "utf8")) 179 + goto unknown; 180 + len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6); 181 + if (len <= 0) 182 + goto unknown; 183 + return len; 184 + 185 + unknown: 186 + *target = '?'; 187 + len = 1; 143 188 return len; 144 189 } 145 190 ··· 187 206 int nullsize = nls_nullsize(codepage); 188 207 int fromwords = fromlen / 2; 189 208 char tmp[NLS_MAX_CHARSET_SIZE]; 190 - __u16 ftmp; 209 + __u16 ftmp[3]; /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */ 191 210 192 211 /* 193 212 * because the chars can be of varying widths, we need to take care ··· 198 217 safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize); 199 218 200 219 for (i = 0; i < fromwords; i++) { 201 - ftmp = get_unaligned_le16(&from[i]); 202 - if (ftmp == 0) 220 + ftmp[0] = get_unaligned_le16(&from[i]); 221 + if (ftmp[0] == 0) 203 222 break; 223 + if (i + 1 < fromwords) 224 + ftmp[1] = get_unaligned_le16(&from[i + 1]); 225 + else 226 + ftmp[1] = 0; 227 + if (i + 2 < fromwords) 228 + ftmp[2] = get_unaligned_le16(&from[i + 2]); 229 + else 230 + ftmp[2] = 0; 204 231 205 232 /* 206 233 * check to see if converting this character might make the ··· 223 234 /* put converted char into 'to' buffer */ 224 235 charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type); 225 236 outlen += charlen; 237 + 238 + /* charlen (=bytes of UTF-8 for 1 character) 239 + * 4bytes UTF-8(surrogate pair) is charlen=4 240 + * (4bytes UTF-16 code) 241 + * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4 242 + * (2 UTF-8 pairs divided to 2 UTF-16 pairs) */ 243 + if (charlen == 4) 244 + i++; 245 + else if (charlen >= 5) 246 + /* 5-6bytes UTF-8 */ 247 + i += 2; 226 248 } 227 249 228 250 /* properly null-terminate string */ ··· 293 293 success: 294 294 put_unaligned_le16(0, &to[i]); 295 295 return i; 296 + } 297 + 298 + /* 299 + * cifs_utf16_bytes - how long will a string be after conversion? 300 + * @utf16 - pointer to input string 301 + * @maxbytes - don't go past this many bytes of input string 302 + * @codepage - destination codepage 303 + * 304 + * Walk a utf16le string and return the number of bytes that the string will 305 + * be after being converted to the given charset, not including any null 306 + * termination required. Don't walk past maxbytes in the source buffer. 307 + */ 308 + int 309 + cifs_utf16_bytes(const __le16 *from, int maxbytes, 310 + const struct nls_table *codepage) 311 + { 312 + int i; 313 + int charlen, outlen = 0; 314 + int maxwords = maxbytes / 2; 315 + char tmp[NLS_MAX_CHARSET_SIZE]; 316 + __u16 ftmp[3]; 317 + 318 + for (i = 0; i < maxwords; i++) { 319 + ftmp[0] = get_unaligned_le16(&from[i]); 320 + if (ftmp[0] == 0) 321 + break; 322 + if (i + 1 < maxwords) 323 + ftmp[1] = get_unaligned_le16(&from[i + 1]); 324 + else 325 + ftmp[1] = 0; 326 + if (i + 2 < maxwords) 327 + ftmp[2] = get_unaligned_le16(&from[i + 2]); 328 + else 329 + ftmp[2] = 0; 330 + 331 + charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD); 332 + outlen += charlen; 333 + } 334 + 335 + return outlen; 296 336 } 297 337 298 338 /* ··· 449 409 char src_char; 450 410 __le16 dst_char; 451 411 wchar_t tmp; 412 + wchar_t *wchar_to; /* UTF-16 */ 413 + int ret; 414 + unicode_t u; 452 415 453 416 if (map_chars == NO_MAP_UNI_RSVD) 454 417 return cifs_strtoUTF16(target, source, PATH_MAX, cp); 418 + 419 + wchar_to = kzalloc(6, GFP_KERNEL); 455 420 456 421 for (i = 0; i < srclen; j++) { 457 422 src_char = source[i]; ··· 486 441 * if no match, use question mark, which at least in 487 442 * some cases serves as wild card 488 443 */ 489 - if (charlen < 1) { 490 - dst_char = cpu_to_le16(0x003f); 491 - charlen = 1; 444 + if (charlen > 0) 445 + goto ctoUTF16; 446 + 447 + /* convert SURROGATE_PAIR */ 448 + if (strcmp(cp->charset, "utf8") || !wchar_to) 449 + goto unknown; 450 + if (*(source + i) & 0x80) { 451 + charlen = utf8_to_utf32(source + i, 6, &u); 452 + if (charlen < 0) 453 + goto unknown; 454 + } else 455 + goto unknown; 456 + ret = utf8s_to_utf16s(source + i, charlen, 457 + UTF16_LITTLE_ENDIAN, 458 + wchar_to, 6); 459 + if (ret < 0) 460 + goto unknown; 461 + 462 + i += charlen; 463 + dst_char = cpu_to_le16(*wchar_to); 464 + if (charlen <= 3) 465 + /* 1-3bytes UTF-8 to 2bytes UTF-16 */ 466 + put_unaligned(dst_char, &target[j]); 467 + else if (charlen == 4) { 468 + /* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16 469 + * 7-8bytes UTF-8(IVS) divided to 2 UTF-16 470 + * (charlen=3+4 or 4+4) */ 471 + put_unaligned(dst_char, &target[j]); 472 + dst_char = cpu_to_le16(*(wchar_to + 1)); 473 + j++; 474 + put_unaligned(dst_char, &target[j]); 475 + } else if (charlen >= 5) { 476 + /* 5-6bytes UTF-8 to 6bytes UTF-16 */ 477 + put_unaligned(dst_char, &target[j]); 478 + dst_char = cpu_to_le16(*(wchar_to + 1)); 479 + j++; 480 + put_unaligned(dst_char, &target[j]); 481 + dst_char = cpu_to_le16(*(wchar_to + 2)); 482 + j++; 483 + put_unaligned(dst_char, &target[j]); 492 484 } 485 + continue; 486 + 487 + unknown: 488 + dst_char = cpu_to_le16(0x003f); 489 + charlen = 1; 493 490 } 491 + 492 + ctoUTF16: 494 493 /* 495 494 * character may take more than one byte in the source string, 496 495 * but will take exactly two bytes in the target string ··· 545 456 546 457 ctoUTF16_out: 547 458 put_unaligned(0, &target[j]); /* Null terminate target unicode string */ 459 + kfree(wchar_to); 548 460 return j; 549 461 } 550 462
+2
fs/cifs/cifsfs.c
··· 469 469 seq_puts(s, ",nouser_xattr"); 470 470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 471 471 seq_puts(s, ",mapchars"); 472 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 473 + seq_puts(s, ",mapposix"); 472 474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 473 475 seq_puts(s, ",sfu"); 474 476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+2 -2
fs/cifs/cifsproto.h
··· 361 361 extern int CIFSUnixCreateSymLink(const unsigned int xid, 362 362 struct cifs_tcon *tcon, 363 363 const char *fromName, const char *toName, 364 - const struct nls_table *nls_codepage); 364 + const struct nls_table *nls_codepage, int remap); 365 365 extern int CIFSSMBUnixQuerySymLink(const unsigned int xid, 366 366 struct cifs_tcon *tcon, 367 367 const unsigned char *searchName, char **syminfo, 368 - const struct nls_table *nls_codepage); 368 + const struct nls_table *nls_codepage, int remap); 369 369 extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, 370 370 __u16 fid, char **symlinkinfo, 371 371 const struct nls_table *nls_codepage);
+12 -11
fs/cifs/cifssmb.c
··· 2784 2784 int 2785 2785 CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, 2786 2786 const char *fromName, const char *toName, 2787 - const struct nls_table *nls_codepage) 2787 + const struct nls_table *nls_codepage, int remap) 2788 2788 { 2789 2789 TRANSACTION2_SPI_REQ *pSMB = NULL; 2790 2790 TRANSACTION2_SPI_RSP *pSMBr = NULL; ··· 2804 2804 2805 2805 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2806 2806 name_len = 2807 - cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName, 2808 - /* find define for this maxpathcomponent */ 2809 - PATH_MAX, nls_codepage); 2807 + cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName, 2808 + /* find define for this maxpathcomponent */ 2809 + PATH_MAX, nls_codepage, remap); 2810 2810 name_len++; /* trailing null */ 2811 2811 name_len *= 2; 2812 2812 ··· 2828 2828 data_offset = (char *) (&pSMB->hdr.Protocol) + offset; 2829 2829 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2830 2830 name_len_target = 2831 - cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX 2832 - /* find define for this maxpathcomponent */ 2833 - , nls_codepage); 2831 + cifsConvertToUTF16((__le16 *) data_offset, toName, 2832 + /* find define for this maxpathcomponent */ 2833 + PATH_MAX, nls_codepage, remap); 2834 2834 name_len_target++; /* trailing null */ 2835 2835 name_len_target *= 2; 2836 2836 } else { /* BB improve the check for buffer overruns BB */ ··· 3034 3034 int 3035 3035 CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, 3036 3036 const unsigned char *searchName, char **symlinkinfo, 3037 - const struct nls_table *nls_codepage) 3037 + const struct nls_table *nls_codepage, int remap) 3038 3038 { 3039 3039 /* SMB_QUERY_FILE_UNIX_LINK */ 3040 3040 TRANSACTION2_QPI_REQ *pSMB = NULL; ··· 3055 3055 3056 3056 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 3057 3057 name_len = 3058 - cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName, 3059 - PATH_MAX, nls_codepage); 3058 + cifsConvertToUTF16((__le16 *) pSMB->FileName, 3059 + searchName, PATH_MAX, nls_codepage, 3060 + remap); 3060 3061 name_len++; /* trailing null */ 3061 3062 name_len *= 2; 3062 3063 } else { /* BB improve the check for buffer overruns BB */ ··· 4918 4917 strncpy(pSMB->RequestFileName, search_name, name_len); 4919 4918 } 4920 4919 4921 - if (ses->server && ses->server->sign) 4920 + if (ses->server->sign) 4922 4921 pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 4923 4922 4924 4923 pSMB->hdr.Uid = ses->Suid;
+2 -1
fs/cifs/connect.c
··· 386 386 rc = generic_ip_connect(server); 387 387 if (rc) { 388 388 cifs_dbg(FYI, "reconnect error %d\n", rc); 389 + mutex_unlock(&server->srv_mutex); 389 390 msleep(3000); 390 391 } else { 391 392 atomic_inc(&tcpSesReconnectCount); ··· 394 393 if (server->tcpStatus != CifsExiting) 395 394 server->tcpStatus = CifsNeedNegotiate; 396 395 spin_unlock(&GlobalMid_Lock); 396 + mutex_unlock(&server->srv_mutex); 397 397 } 398 - mutex_unlock(&server->srv_mutex); 399 398 } while (server->tcpStatus == CifsNeedReconnect); 400 399 401 400 return rc;
+1 -2
fs/cifs/dir.c
··· 620 620 } 621 621 rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, 622 622 cifs_sb->local_nls, 623 - cifs_sb->mnt_cifs_flags & 624 - CIFS_MOUNT_MAP_SPECIAL_CHR); 623 + cifs_remap(cifs_sb)); 625 624 if (rc) 626 625 goto mknod_out; 627 626
+3 -4
fs/cifs/file.c
··· 140 140 posix_flags = cifs_posix_convert_flags(f_flags); 141 141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, 142 142 poplock, full_path, cifs_sb->local_nls, 143 - cifs_sb->mnt_cifs_flags & 144 - CIFS_MOUNT_MAP_SPECIAL_CHR); 143 + cifs_remap(cifs_sb)); 145 144 cifs_put_tlink(tlink); 146 145 147 146 if (rc) ··· 1552 1553 rc = server->ops->mand_unlock_range(cfile, flock, xid); 1553 1554 1554 1555 out: 1555 - if (flock->fl_flags & FL_POSIX) 1556 - posix_lock_file_wait(file, flock); 1556 + if (flock->fl_flags & FL_POSIX && !rc) 1557 + rc = posix_lock_file_wait(file, flock); 1557 1558 return rc; 1558 1559 } 1559 1560
+27 -4
fs/cifs/inode.c
··· 373 373 374 374 /* could have done a find first instead but this returns more info */ 375 375 rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, 376 - cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 377 - CIFS_MOUNT_MAP_SPECIAL_CHR); 376 + cifs_sb->local_nls, cifs_remap(cifs_sb)); 378 377 cifs_put_tlink(tlink); 379 378 380 379 if (!rc) { ··· 401 402 rc = -ENOMEM; 402 403 } else { 403 404 /* we already have inode, update it */ 405 + 406 + /* if uniqueid is different, return error */ 407 + if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM && 408 + CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) { 409 + rc = -ESTALE; 410 + goto cgiiu_exit; 411 + } 412 + 413 + /* if filetype is different, return error */ 414 + if (unlikely(((*pinode)->i_mode & S_IFMT) != 415 + (fattr.cf_mode & S_IFMT))) { 416 + rc = -ESTALE; 417 + goto cgiiu_exit; 418 + } 419 + 404 420 cifs_fattr_to_inode(*pinode, &fattr); 405 421 } 406 422 423 + cgiiu_exit: 407 424 return rc; 408 425 } 409 426 ··· 854 839 if (!*inode) 855 840 rc = -ENOMEM; 856 841 } else { 842 + /* we already have inode, update it */ 843 + 844 + /* if filetype is different, return error */ 845 + if (unlikely(((*inode)->i_mode & S_IFMT) != 846 + (fattr.cf_mode & S_IFMT))) { 847 + rc = -ESTALE; 848 + goto cgii_exit; 849 + } 850 + 857 851 cifs_fattr_to_inode(*inode, &fattr); 858 852 } 859 853 ··· 2239 2215 pTcon = tlink_tcon(tlink); 2240 2216 rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, 2241 2217 cifs_sb->local_nls, 2242 - cifs_sb->mnt_cifs_flags & 2243 - CIFS_MOUNT_MAP_SPECIAL_CHR); 2218 + cifs_remap(cifs_sb)); 2244 2219 cifs_put_tlink(tlink); 2245 2220 } 2246 2221
+2 -1
fs/cifs/link.c
··· 717 717 rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); 718 718 else if (pTcon->unix_ext) 719 719 rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, 720 - cifs_sb->local_nls); 720 + cifs_sb->local_nls, 721 + cifs_remap(cifs_sb)); 721 722 /* else 722 723 rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName, 723 724 cifs_sb_target->local_nls); */
+2
fs/cifs/readdir.c
··· 90 90 if (dentry) { 91 91 inode = d_inode(dentry); 92 92 if (inode) { 93 + if (d_mountpoint(dentry)) 94 + goto out; 93 95 /* 94 96 * If we're generating inode numbers, then we don't 95 97 * want to clobber the existing one with the one that
+2 -1
fs/cifs/smb1ops.c
··· 960 960 /* Check for unix extensions */ 961 961 if (cap_unix(tcon->ses)) { 962 962 rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path, 963 - cifs_sb->local_nls); 963 + cifs_sb->local_nls, 964 + cifs_remap(cifs_sb)); 964 965 if (rc == -EREMOTE) 965 966 rc = cifs_unix_dfs_readlink(xid, tcon, full_path, 966 967 target_path,
+1 -1
fs/cifs/smb2pdu.c
··· 110 110 111 111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ 112 112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ 113 - if ((tcon->ses) && 113 + if ((tcon->ses) && (tcon->ses->server) && 114 114 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 115 115 hdr->CreditCharge = cpu_to_le16(1); 116 116 /* else CreditCharge MBZ */
+4 -4
fs/dcache.c
··· 1239 1239 /* might go back up the wrong parent if we have had a rename. */ 1240 1240 if (need_seqretry(&rename_lock, seq)) 1241 1241 goto rename_retry; 1242 - next = child->d_child.next; 1243 - while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { 1242 + /* go into the first sibling still alive */ 1243 + do { 1244 + next = child->d_child.next; 1244 1245 if (next == &this_parent->d_subdirs) 1245 1246 goto ascend; 1246 1247 child = list_entry(next, struct dentry, d_child); 1247 - next = next->next; 1248 - } 1248 + } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); 1249 1249 rcu_read_unlock(); 1250 1250 goto resume; 1251 1251 }
+3
fs/nfs/nfs4proc.c
··· 38 38 #include <linux/mm.h> 39 39 #include <linux/delay.h> 40 40 #include <linux/errno.h> 41 + #include <linux/file.h> 41 42 #include <linux/string.h> 42 43 #include <linux/ratelimit.h> 43 44 #include <linux/printk.h> ··· 5605 5604 p->server = server; 5606 5605 atomic_inc(&lsp->ls_count); 5607 5606 p->ctx = get_nfs_open_context(ctx); 5607 + get_file(fl->fl_file); 5608 5608 memcpy(&p->fl, fl, sizeof(p->fl)); 5609 5609 return p; 5610 5610 out_free_seqid: ··· 5718 5716 nfs_free_seqid(data->arg.lock_seqid); 5719 5717 nfs4_put_lock_state(data->lsp); 5720 5718 put_nfs_open_context(data->ctx); 5719 + fput(data->fl.fl_file); 5721 5720 kfree(data); 5722 5721 dprintk("%s: done!\n", __func__); 5723 5722 }
+8 -5
fs/nfs/write.c
··· 1845 1845 trace_nfs_writeback_inode_enter(inode); 1846 1846 1847 1847 ret = filemap_write_and_wait(inode->i_mapping); 1848 - if (!ret) { 1849 - ret = nfs_commit_inode(inode, FLUSH_SYNC); 1850 - if (!ret) 1851 - pnfs_sync_inode(inode, true); 1852 - } 1848 + if (ret) 1849 + goto out; 1850 + ret = nfs_commit_inode(inode, FLUSH_SYNC); 1851 + if (ret < 0) 1852 + goto out; 1853 + pnfs_sync_inode(inode, true); 1854 + ret = 0; 1853 1855 1856 + out: 1854 1857 trace_nfs_writeback_inode_exit(inode, ret); 1855 1858 return ret; 1856 1859 }
+1 -1
fs/omfs/bitmap.c
··· 159 159 goto out; 160 160 161 161 found: 162 - *return_block = i * bits_per_entry + bit; 162 + *return_block = (u64) i * bits_per_entry + bit; 163 163 *return_size = run; 164 164 ret = set_run(sb, i, bits_per_entry, bit, run, 1); 165 165
+7 -3
fs/omfs/inode.c
··· 306 306 */ 307 307 static int omfs_get_imap(struct super_block *sb) 308 308 { 309 - unsigned int bitmap_size, count, array_size; 309 + unsigned int bitmap_size, array_size; 310 + int count; 310 311 struct omfs_sb_info *sbi = OMFS_SB(sb); 311 312 struct buffer_head *bh; 312 313 unsigned long **ptr; ··· 360 359 } 361 360 362 361 enum { 363 - Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask 362 + Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err 364 363 }; 365 364 366 365 static const match_table_t tokens = { ··· 369 368 {Opt_umask, "umask=%o"}, 370 369 {Opt_dmask, "dmask=%o"}, 371 370 {Opt_fmask, "fmask=%o"}, 371 + {Opt_err, NULL}, 372 372 }; 373 373 374 374 static int parse_options(char *options, struct omfs_sb_info *sbi) ··· 550 548 } 551 549 552 550 sb->s_root = d_make_root(root); 553 - if (!sb->s_root) 551 + if (!sb->s_root) { 552 + ret = -ENOMEM; 554 553 goto out_brelse_bh2; 554 + } 555 555 printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); 556 556 557 557 ret = 0;
+3
fs/overlayfs/copy_up.c
··· 299 299 struct cred *override_cred; 300 300 char *link = NULL; 301 301 302 + if (WARN_ON(!workdir)) 303 + return -EROFS; 304 + 302 305 ovl_path_upper(parent, &parentpath); 303 306 upperdir = parentpath.dentry; 304 307
+28 -5
fs/overlayfs/dir.c
··· 222 222 struct kstat stat; 223 223 int err; 224 224 225 + if (WARN_ON(!workdir)) 226 + return ERR_PTR(-EROFS); 227 + 225 228 err = ovl_lock_rename_workdir(workdir, upperdir); 226 229 if (err) 227 230 goto out; ··· 324 321 struct dentry *upper; 325 322 struct dentry *newdentry; 326 323 int err; 324 + 325 + if (WARN_ON(!workdir)) 326 + return -EROFS; 327 327 328 328 err = ovl_lock_rename_workdir(workdir, upperdir); 329 329 if (err) ··· 512 506 struct dentry *opaquedir = NULL; 513 507 int err; 514 508 515 - if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { 516 - opaquedir = ovl_check_empty_and_clear(dentry); 517 - err = PTR_ERR(opaquedir); 518 - if (IS_ERR(opaquedir)) 519 - goto out; 509 + if (WARN_ON(!workdir)) 510 + return -EROFS; 511 + 512 + if (is_dir) { 513 + if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { 514 + opaquedir = ovl_check_empty_and_clear(dentry); 515 + err = PTR_ERR(opaquedir); 516 + if (IS_ERR(opaquedir)) 517 + goto out; 518 + } else { 519 + LIST_HEAD(list); 520 + 521 + /* 522 + * When removing an empty opaque directory, then it 523 + * makes no sense to replace it with an exact replica of 524 + * itself. But emptiness still needs to be checked. 525 + */ 526 + err = ovl_check_empty_dir(dentry, &list); 527 + ovl_cache_free(&list); 528 + if (err) 529 + goto out; 530 + } 520 531 } 521 532 522 533 err = ovl_lock_rename_workdir(workdir, upperdir);
+5 -5
fs/overlayfs/super.c
··· 529 529 { 530 530 struct ovl_fs *ufs = sb->s_fs_info; 531 531 532 - if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) 532 + if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir)) 533 533 return -EROFS; 534 534 535 535 return 0; ··· 925 925 ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); 926 926 err = PTR_ERR(ufs->workdir); 927 927 if (IS_ERR(ufs->workdir)) { 928 - pr_err("overlayfs: failed to create directory %s/%s\n", 929 - ufs->config.workdir, OVL_WORKDIR_NAME); 930 - goto out_put_upper_mnt; 928 + pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n", 929 + ufs->config.workdir, OVL_WORKDIR_NAME, -err); 930 + sb->s_flags |= MS_RDONLY; 931 + ufs->workdir = NULL; 931 932 } 932 933 } 933 934 ··· 998 997 kfree(ufs->lower_mnt); 999 998 out_put_workdir: 1000 999 dput(ufs->workdir); 1001 - out_put_upper_mnt: 1002 1000 mntput(ufs->upper_mnt); 1003 1001 out_put_lowerpath: 1004 1002 for (i = 0; i < numlower; i++)
+4 -4
fs/xfs/libxfs/xfs_attr_leaf.c
··· 574 574 * After the last attribute is removed revert to original inode format, 575 575 * making all literal area available to the data fork once more. 576 576 */ 577 - STATIC void 578 - xfs_attr_fork_reset( 577 + void 578 + xfs_attr_fork_remove( 579 579 struct xfs_inode *ip, 580 580 struct xfs_trans *tp) 581 581 { ··· 641 641 (mp->m_flags & XFS_MOUNT_ATTR2) && 642 642 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && 643 643 !(args->op_flags & XFS_DA_OP_ADDNAME)) { 644 - xfs_attr_fork_reset(dp, args->trans); 644 + xfs_attr_fork_remove(dp, args->trans); 645 645 } else { 646 646 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); 647 647 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); ··· 905 905 if (forkoff == -1) { 906 906 ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); 907 907 ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); 908 - xfs_attr_fork_reset(dp, args->trans); 908 + xfs_attr_fork_remove(dp, args->trans); 909 909 goto out; 910 910 } 911 911
+1 -1
fs/xfs/libxfs/xfs_attr_leaf.h
··· 53 53 int xfs_attr_shortform_list(struct xfs_attr_list_context *context); 54 54 int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); 55 55 int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes); 56 - 56 + void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp); 57 57 58 58 /* 59 59 * Internal routines when attribute fork size == XFS_LBSIZE(mp).
+20 -13
fs/xfs/libxfs/xfs_bmap.c
··· 3224 3224 align_alen += temp; 3225 3225 align_off -= temp; 3226 3226 } 3227 - /* 3228 - * Same adjustment for the end of the requested area. 3229 - */ 3230 - if ((temp = (align_alen % extsz))) { 3227 + 3228 + /* Same adjustment for the end of the requested area. */ 3229 + temp = (align_alen % extsz); 3230 + if (temp) 3231 3231 align_alen += extsz - temp; 3232 - } 3232 + 3233 + /* 3234 + * For large extent hint sizes, the aligned extent might be larger than 3235 + * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 3236 + * the length back under MAXEXTLEN. The outer allocation loops handle 3237 + * short allocation just fine, so it is safe to do this. We only want to 3238 + * do it when we are forced to, though, because it means more allocation 3239 + * operations are required. 3240 + */ 3241 + while (align_alen > MAXEXTLEN) 3242 + align_alen -= extsz; 3243 + ASSERT(align_alen <= MAXEXTLEN); 3244 + 3233 3245 /* 3234 3246 * If the previous block overlaps with this proposed allocation 3235 3247 * then move the start forward without adjusting the length. ··· 3330 3318 return -EINVAL; 3331 3319 } else { 3332 3320 ASSERT(orig_off >= align_off); 3333 - ASSERT(orig_end <= align_off + align_alen); 3321 + /* see MAXEXTLEN handling above */ 3322 + ASSERT(orig_end <= align_off + align_alen || 3323 + align_alen + extsz > MAXEXTLEN); 3334 3324 } 3335 3325 3336 3326 #ifdef DEBUG ··· 4113 4099 /* Figure out the extent size, adjust alen */ 4114 4100 extsz = xfs_get_extsz_hint(ip); 4115 4101 if (extsz) { 4116 - /* 4117 - * Make sure we don't exceed a single extent length when we 4118 - * align the extent by reducing length we are going to 4119 - * allocate by the maximum amount extent size aligment may 4120 - * require. 4121 - */ 4122 - alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1)); 4123 4102 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, 4124 4103 1, 0, &aoff, &alen); 4125 4104 ASSERT(!error);
+6 -3
fs/xfs/libxfs/xfs_ialloc.c
··· 376 376 */ 377 377 newlen = args.mp->m_ialloc_inos; 378 378 if (args.mp->m_maxicount && 379 - percpu_counter_read(&args.mp->m_icount) + newlen > 379 + percpu_counter_read_positive(&args.mp->m_icount) + newlen > 380 380 args.mp->m_maxicount) 381 381 return -ENOSPC; 382 382 args.minlen = args.maxlen = args.mp->m_ialloc_blks; ··· 1339 1339 * If we have already hit the ceiling of inode blocks then clear 1340 1340 * okalloc so we scan all available agi structures for a free 1341 1341 * inode. 1342 + * 1343 + * Read rough value of mp->m_icount by percpu_counter_read_positive, 1344 + * which will sacrifice the preciseness but improve the performance. 1342 1345 */ 1343 1346 if (mp->m_maxicount && 1344 - percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos > 1345 - mp->m_maxicount) { 1347 + percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos 1348 + > mp->m_maxicount) { 1346 1349 noroom = 1; 1347 1350 okalloc = 0; 1348 1351 }
+50 -35
fs/xfs/xfs_attr_inactive.c
··· 380 380 return error; 381 381 } 382 382 383 + /* 384 + * xfs_attr_inactive kills all traces of an attribute fork on an inode. It 385 + * removes both the on-disk and in-memory inode fork. Note that this also has to 386 + * handle the condition of inodes without attributes but with an attribute fork 387 + * configured, so we can't use xfs_inode_hasattr() here. 388 + * 389 + * The in-memory attribute fork is removed even on error. 390 + */ 383 391 int 384 - xfs_attr_inactive(xfs_inode_t *dp) 392 + xfs_attr_inactive( 393 + struct xfs_inode *dp) 385 394 { 386 - xfs_trans_t *trans; 387 - xfs_mount_t *mp; 388 - int error; 395 + struct xfs_trans *trans; 396 + struct xfs_mount *mp; 397 + int cancel_flags = 0; 398 + int lock_mode = XFS_ILOCK_SHARED; 399 + int error = 0; 389 400 390 401 mp = dp->i_mount; 391 402 ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); 392 403 393 - xfs_ilock(dp, XFS_ILOCK_SHARED); 394 - if (!xfs_inode_hasattr(dp) || 395 - dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 396 - xfs_iunlock(dp, XFS_ILOCK_SHARED); 397 - return 0; 398 - } 399 - xfs_iunlock(dp, XFS_ILOCK_SHARED); 404 + xfs_ilock(dp, lock_mode); 405 + if (!XFS_IFORK_Q(dp)) 406 + goto out_destroy_fork; 407 + xfs_iunlock(dp, lock_mode); 400 408 401 409 /* 402 410 * Start our first transaction of the day. ··· 416 408 * the inode in every transaction to let it float upward through 417 409 * the log. 418 410 */ 411 + lock_mode = 0; 419 412 trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); 420 413 error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); 421 - if (error) { 422 - xfs_trans_cancel(trans, 0); 423 - return error; 424 - } 425 - xfs_ilock(dp, XFS_ILOCK_EXCL); 414 + if (error) 415 + goto out_cancel; 416 + 417 + lock_mode = XFS_ILOCK_EXCL; 418 + cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT; 419 + xfs_ilock(dp, lock_mode); 420 + 421 + if (!XFS_IFORK_Q(dp)) 422 + goto out_cancel; 426 423 427 424 /* 428 425 * No need to make quota reservations here. We expect to release some ··· 435 422 */ 436 423 xfs_trans_ijoin(trans, dp, 0); 437 424 438 - /* 439 - * Decide on what work routines to call based on the inode size. 440 - */ 441 - if (!xfs_inode_hasattr(dp) || 442 - dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 443 - error = 0; 444 - goto out; 445 - } 446 - error = xfs_attr3_root_inactive(&trans, dp); 447 - if (error) 448 - goto out; 425 + /* invalidate and truncate the attribute fork extents */ 426 + if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) { 427 + error = xfs_attr3_root_inactive(&trans, dp); 428 + if (error) 429 + goto out_cancel; 449 430 450 - error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); 451 - if (error) 452 - goto out; 431 + error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); 432 + if (error) 433 + goto out_cancel; 434 + } 435 + 436 + /* Reset the attribute fork - this also destroys the in-core fork */ 437 + xfs_attr_fork_remove(dp, trans); 453 438 454 439 error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); 455 - xfs_iunlock(dp, XFS_ILOCK_EXCL); 456 - 440 + xfs_iunlock(dp, lock_mode); 457 441 return error; 458 442 459 - out: 460 - xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); 461 - xfs_iunlock(dp, XFS_ILOCK_EXCL); 443 + out_cancel: 444 + xfs_trans_cancel(trans, cancel_flags); 445 + out_destroy_fork: 446 + /* kill the in-core attr fork before we drop the inode lock */ 447 + if (dp->i_afp) 448 + xfs_idestroy_fork(dp, XFS_ATTR_FORK); 449 + if (lock_mode) 450 + xfs_iunlock(dp, lock_mode); 462 451 return error; 463 452 }
+1 -1
fs/xfs/xfs_file.c
··· 124 124 status = 0; 125 125 } while (count); 126 126 127 - return (-status); 127 + return status; 128 128 } 129 129 130 130 int
+12 -10
fs/xfs/xfs_inode.c
··· 1946 1946 /* 1947 1947 * If there are attributes associated with the file then blow them away 1948 1948 * now. The code calls a routine that recursively deconstructs the 1949 - * attribute fork. We need to just commit the current transaction 1950 - * because we can't use it for xfs_attr_inactive(). 1949 + * attribute fork. If also blows away the in-core attribute fork. 1951 1950 */ 1952 - if (ip->i_d.di_anextents > 0) { 1953 - ASSERT(ip->i_d.di_forkoff != 0); 1954 - 1951 + if (XFS_IFORK_Q(ip)) { 1955 1952 error = xfs_attr_inactive(ip); 1956 1953 if (error) 1957 1954 return; 1958 1955 } 1959 1956 1960 - if (ip->i_afp) 1961 - xfs_idestroy_fork(ip, XFS_ATTR_FORK); 1962 - 1957 + ASSERT(!ip->i_afp); 1963 1958 ASSERT(ip->i_d.di_anextents == 0); 1959 + ASSERT(ip->i_d.di_forkoff == 0); 1964 1960 1965 1961 /* 1966 1962 * Free the inode. ··· 2879 2883 if (error) 2880 2884 return error; 2881 2885 2882 - /* Satisfy xfs_bumplink that this is a real tmpfile */ 2886 + /* 2887 + * Prepare the tmpfile inode as if it were created through the VFS. 2888 + * Otherwise, the link increment paths will complain about nlink 0->1. 2889 + * Drop the link count as done by d_tmpfile(), complete the inode setup 2890 + * and flag it as linkable. 2891 + */ 2892 + drop_nlink(VFS_I(tmpfile)); 2883 2893 xfs_finish_inode_setup(tmpfile); 2884 2894 VFS_I(tmpfile)->i_state |= I_LINKABLE; 2885 2895 ··· 3153 3151 * intermediate state on disk. 3154 3152 */ 3155 3153 if (wip) { 3156 - ASSERT(wip->i_d.di_nlink == 0); 3154 + ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0); 3157 3155 error = xfs_bumplink(tp, wip); 3158 3156 if (error) 3159 3157 goto out_trans_abort;
+20 -14
fs/xfs/xfs_mount.c
··· 1084 1084 return xfs_sync_sb(mp, true); 1085 1085 } 1086 1086 1087 + /* 1088 + * Deltas for the inode count are +/-64, hence we use a large batch size 1089 + * of 128 so we don't need to take the counter lock on every update. 1090 + */ 1091 + #define XFS_ICOUNT_BATCH 128 1087 1092 int 1088 1093 xfs_mod_icount( 1089 1094 struct xfs_mount *mp, 1090 1095 int64_t delta) 1091 1096 { 1092 - /* deltas are +/-64, hence the large batch size of 128. */ 1093 - __percpu_counter_add(&mp->m_icount, delta, 128); 1094 - if (percpu_counter_compare(&mp->m_icount, 0) < 0) { 1097 + __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH); 1098 + if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) { 1095 1099 ASSERT(0); 1096 1100 percpu_counter_add(&mp->m_icount, -delta); 1097 1101 return -EINVAL; ··· 1117 1113 return 0; 1118 1114 } 1119 1115 1116 + /* 1117 + * Deltas for the block count can vary from 1 to very large, but lock contention 1118 + * only occurs on frequent small block count updates such as in the delayed 1119 + * allocation path for buffered writes (page a time updates). Hence we set 1120 + * a large batch count (1024) to minimise global counter updates except when 1121 + * we get near to ENOSPC and we have to be very accurate with our updates. 1122 + */ 1123 + #define XFS_FDBLOCKS_BATCH 1024 1120 1124 int 1121 1125 xfs_mod_fdblocks( 1122 1126 struct xfs_mount *mp, ··· 1163 1151 * Taking blocks away, need to be more accurate the closer we 1164 1152 * are to zero. 1165 1153 * 1166 - * batch size is set to a maximum of 1024 blocks - if we are 1167 - * allocating of freeing extents larger than this then we aren't 1168 - * going to be hammering the counter lock so a lock per update 1169 - * is not a problem. 1170 - * 1171 1154 * If the counter has a value of less than 2 * max batch size, 1172 1155 * then make everything serialise as we are real close to 1173 1156 * ENOSPC. 1174 1157 */ 1175 - #define __BATCH 1024 1176 - if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0) 1158 + if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH, 1159 + XFS_FDBLOCKS_BATCH) < 0) 1177 1160 batch = 1; 1178 1161 else 1179 - batch = __BATCH; 1180 - #undef __BATCH 1162 + batch = XFS_FDBLOCKS_BATCH; 1181 1163 1182 1164 __percpu_counter_add(&mp->m_fdblocks, delta, batch); 1183 - if (percpu_counter_compare(&mp->m_fdblocks, 1184 - XFS_ALLOC_SET_ASIDE(mp)) >= 0) { 1165 + if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp), 1166 + XFS_FDBLOCKS_BATCH) >= 0) { 1185 1167 /* we had space! */ 1186 1168 return 0; 1187 1169 }
-2
include/linux/blkdev.h
··· 821 821 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 822 822 struct scsi_ioctl_command __user *); 823 823 824 - extern void blk_queue_bio(struct request_queue *q, struct bio *bio); 825 - 826 824 /* 827 825 * A queue has just exitted congestion. Note this in the global counter of 828 826 * congested queues, and wake up anyone who was waiting for requests to be
+1 -1
include/linux/brcmphy.h
··· 17 17 #define PHY_ID_BCM7250 0xae025280 18 18 #define PHY_ID_BCM7364 0xae025260 19 19 #define PHY_ID_BCM7366 0x600d8490 20 - #define PHY_ID_BCM7425 0x03625e60 20 + #define PHY_ID_BCM7425 0x600d86b0 21 21 #define PHY_ID_BCM7429 0x600d8730 22 22 #define PHY_ID_BCM7439 0x600d8480 23 23 #define PHY_ID_BCM7439_2 0xae025080
+2 -4
include/linux/cpumask.h
··· 151 151 return 1; 152 152 } 153 153 154 - static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) 154 + static inline unsigned int cpumask_local_spread(unsigned int i, int node) 155 155 { 156 - set_bit(0, cpumask_bits(dstp)); 157 - 158 156 return 0; 159 157 } 160 158 ··· 206 208 207 209 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); 208 210 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); 209 - int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); 211 + unsigned int cpumask_local_spread(unsigned int i, int node); 210 212 211 213 /** 212 214 * for_each_cpu - iterate over every cpu in a mask
+2 -2
include/linux/hid-sensor-hub.h
··· 74 74 * @usage: Usage id for this hub device instance. 75 75 * @start_collection_index: Starting index for a phy type collection 76 76 * @end_collection_index: Last index for a phy type collection 77 - * @mutex: synchronizing mutex. 77 + * @mutex_ptr: synchronizing mutex pointer. 78 78 * @pending: Holds information of pending sync read request. 79 79 */ 80 80 struct hid_sensor_hub_device { ··· 84 84 u32 usage; 85 85 int start_collection_index; 86 86 int end_collection_index; 87 - struct mutex mutex; 87 + struct mutex *mutex_ptr; 88 88 struct sensor_hub_pending pending; 89 89 }; 90 90
+21 -6
include/linux/ktime.h
··· 166 166 } 167 167 168 168 #if BITS_PER_LONG < 64 169 - extern u64 __ktime_divns(const ktime_t kt, s64 div); 170 - static inline u64 ktime_divns(const ktime_t kt, s64 div) 169 + extern s64 __ktime_divns(const ktime_t kt, s64 div); 170 + static inline s64 ktime_divns(const ktime_t kt, s64 div) 171 171 { 172 + /* 173 + * Negative divisors could cause an inf loop, 174 + * so bug out here. 175 + */ 176 + BUG_ON(div < 0); 172 177 if (__builtin_constant_p(div) && !(div >> 32)) { 173 - u64 ns = kt.tv64; 174 - do_div(ns, div); 175 - return ns; 178 + s64 ns = kt.tv64; 179 + u64 tmp = ns < 0 ? -ns : ns; 180 + 181 + do_div(tmp, div); 182 + return ns < 0 ? -tmp : tmp; 176 183 } else { 177 184 return __ktime_divns(kt, div); 178 185 } 179 186 } 180 187 #else /* BITS_PER_LONG < 64 */ 181 - # define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) 188 + static inline s64 ktime_divns(const ktime_t kt, s64 div) 189 + { 190 + /* 191 + * 32-bit implementation cannot handle negative divisors, 192 + * so catch them on 64bit as well. 193 + */ 194 + WARN_ON(div < 0); 195 + return kt.tv64 / div; 196 + } 182 197 #endif 183 198 184 199 static inline s64 ktime_to_us(const ktime_t kt)
+12 -1
include/linux/percpu_counter.h
··· 41 41 void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 42 42 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); 43 43 s64 __percpu_counter_sum(struct percpu_counter *fbc); 44 - int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); 44 + int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 45 + 46 + static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 47 + { 48 + return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); 49 + } 45 50 46 51 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) 47 52 { ··· 119 114 return -1; 120 115 else 121 116 return 0; 117 + } 118 + 119 + static inline int 120 + __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) 121 + { 122 + return percpu_counter_compare(fbc, rhs); 122 123 } 123 124 124 125 static inline void
-4
include/linux/platform_data/si5351.h
··· 5 5 #ifndef __LINUX_PLATFORM_DATA_SI5351_H__ 6 6 #define __LINUX_PLATFORM_DATA_SI5351_H__ 7 7 8 - struct clk; 9 - 10 8 /** 11 9 * enum si5351_pll_src - Si5351 pll clock source 12 10 * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config ··· 105 107 * @clkout: array of clkout configuration 106 108 */ 107 109 struct si5351_platform_data { 108 - struct clk *clk_xtal; 109 - struct clk *clk_clkin; 110 110 enum si5351_pll_src pll_src[2]; 111 111 struct si5351_clkout_config clkout[8]; 112 112 };
+19
include/linux/rhashtable.h
··· 17 17 #ifndef _LINUX_RHASHTABLE_H 18 18 #define _LINUX_RHASHTABLE_H 19 19 20 + #include <linux/atomic.h> 20 21 #include <linux/compiler.h> 21 22 #include <linux/errno.h> 22 23 #include <linux/jhash.h> ··· 101 100 * @key_len: Length of key 102 101 * @key_offset: Offset of key in struct to be hashed 103 102 * @head_offset: Offset of rhash_head in struct to be hashed 103 + * @insecure_max_entries: Maximum number of entries (may be exceeded) 104 104 * @max_size: Maximum size while expanding 105 105 * @min_size: Minimum size while shrinking 106 106 * @nulls_base: Base value to generate nulls marker ··· 117 115 size_t key_len; 118 116 size_t key_offset; 119 117 size_t head_offset; 118 + unsigned int insecure_max_entries; 120 119 unsigned int max_size; 121 120 unsigned int min_size; 122 121 u32 nulls_base; ··· 287 284 { 288 285 return atomic_read(&ht->nelems) > tbl->size && 289 286 (!ht->p.max_size || tbl->size < ht->p.max_size); 287 + } 288 + 289 + /** 290 + * rht_grow_above_max - returns true if table is above maximum 291 + * @ht: hash table 292 + * @tbl: current table 293 + */ 294 + static inline bool rht_grow_above_max(const struct rhashtable *ht, 295 + const struct bucket_table *tbl) 296 + { 297 + return ht->p.insecure_max_entries && 298 + atomic_read(&ht->nelems) >= ht->p.insecure_max_entries; 290 299 } 291 300 292 301 /* The bucket lock is selected based on the hash and protects mutations ··· 603 588 goto slow_path; 604 589 goto out; 605 590 } 591 + 592 + err = -E2BIG; 593 + if (unlikely(rht_grow_above_max(ht, tbl))) 594 + goto out; 606 595 607 596 if (unlikely(rht_grow_above_100(ht, tbl))) { 608 597 slow_path:
+1
include/linux/skbuff.h
··· 176 176 struct net_device *physindev; 177 177 struct net_device *physoutdev; 178 178 char neigh_header[8]; 179 + __be32 ipv4_daddr; 179 180 }; 180 181 #endif 181 182
+2
include/linux/tcp.h
··· 158 158 * sum(delta(snd_una)), or how many bytes 159 159 * were acked. 160 160 */ 161 + struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */ 162 + 161 163 u32 snd_una; /* First byte we want an ack for */ 162 164 u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 163 165 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
+5 -3
include/net/inet_connection_sock.h
··· 98 98 const struct tcp_congestion_ops *icsk_ca_ops; 99 99 const struct inet_connection_sock_af_ops *icsk_af_ops; 100 100 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); 101 - __u8 icsk_ca_state:7, 101 + __u8 icsk_ca_state:6, 102 + icsk_ca_setsockopt:1, 102 103 icsk_ca_dst_locked:1; 103 104 __u8 icsk_retransmits; 104 105 __u8 icsk_pending; ··· 130 129 131 130 u32 probe_timestamp; 132 131 } icsk_mtup; 133 - u32 icsk_ca_priv[16]; 134 132 u32 icsk_user_timeout; 135 - #define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) 133 + 134 + u64 icsk_ca_priv[64 / sizeof(u64)]; 135 + #define ICSK_CA_PRIV_SIZE (8 * sizeof(u64)) 136 136 }; 137 137 138 138 #define ICSK_TIME_RETRANS 1 /* Retransmit timer */
+4 -3
include/net/mac80211.h
··· 354 354 }; 355 355 356 356 /** 357 - * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT 357 + * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT 358 358 * @data: See &enum ieee80211_rssi_event_data 359 359 */ 360 360 struct ieee80211_rssi_event { ··· 388 388 }; 389 389 390 390 /** 391 - * enum ieee80211_mlme_event - data attached to an %MLME_EVENT 391 + * struct ieee80211_mlme_event - data attached to an %MLME_EVENT 392 392 * @data: See &enum ieee80211_mlme_event_data 393 393 * @status: See &enum ieee80211_mlme_event_status 394 394 * @reason: the reason code if applicable ··· 401 401 402 402 /** 403 403 * struct ieee80211_event - event to be sent to the driver 404 - * @type The event itself. See &enum ieee80211_event_type. 404 + * @type: The event itself. See &enum ieee80211_event_type. 405 405 * @rssi: relevant if &type is %RSSI_EVENT 406 406 * @mlme: relevant if &type is %AUTH_EVENT 407 + * @u: union holding the above two fields 407 408 */ 408 409 struct ieee80211_event { 409 410 enum ieee80211_event_type type;
+5 -2
include/net/sctp/sctp.h
··· 574 574 /* Map v4 address to v4-mapped v6 address */ 575 575 static inline void sctp_v4_map_v6(union sctp_addr *addr) 576 576 { 577 + __be16 port; 578 + 579 + port = addr->v4.sin_port; 580 + addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; 581 + addr->v6.sin6_port = port; 577 582 addr->v6.sin6_family = AF_INET6; 578 583 addr->v6.sin6_flowinfo = 0; 579 584 addr->v6.sin6_scope_id = 0; 580 - addr->v6.sin6_port = addr->v4.sin_port; 581 - addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; 582 585 addr->v6.sin6_addr.s6_addr32[0] = 0; 583 586 addr->v6.sin6_addr.s6_addr32[1] = 0; 584 587 addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
+4 -4
include/target/target_core_backend.h
··· 1 1 #ifndef TARGET_CORE_BACKEND_H 2 2 #define TARGET_CORE_BACKEND_H 3 3 4 - #define TRANSPORT_PLUGIN_PHBA_PDEV 1 5 - #define TRANSPORT_PLUGIN_VHBA_PDEV 2 6 - #define TRANSPORT_PLUGIN_VHBA_VDEV 3 4 + #define TRANSPORT_FLAG_PASSTHROUGH 1 7 5 8 6 struct target_backend_cits { 9 7 struct config_item_type tb_dev_cit; ··· 20 22 char inquiry_rev[4]; 21 23 struct module *owner; 22 24 23 - u8 transport_type; 25 + u8 transport_flags; 24 26 25 27 int (*attach_hba)(struct se_hba *, u32); 26 28 void (*detach_hba)(struct se_hba *); ··· 136 138 int se_dev_set_max_sectors(struct se_device *, u32); 137 139 int se_dev_set_optimal_sectors(struct se_device *, u32); 138 140 int se_dev_set_block_size(struct se_device *, u32); 141 + sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, 142 + sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); 139 143 140 144 #endif /* TARGET_CORE_BACKEND_H */
-2
include/target/target_core_configfs.h
··· 40 40 struct config_item *tf_fabric; 41 41 /* Passed from fabric modules */ 42 42 struct config_item_type *tf_fabric_cit; 43 - /* Pointer to target core subsystem */ 44 - struct configfs_subsystem *tf_subsys; 45 43 /* Pointer to fabric's struct module */ 46 44 struct module *tf_module; 47 45 struct target_core_fabric_ops tf_ops;
+3 -1
include/target/target_core_fabric.h
··· 4 4 struct target_core_fabric_ops { 5 5 struct module *module; 6 6 const char *name; 7 - struct configfs_subsystem *tf_subsys; 8 7 char *(*get_fabric_name)(void); 9 8 u8 (*get_fabric_proto_ident)(struct se_portal_group *); 10 9 char *(*tpg_get_wwn)(struct se_portal_group *); ··· 107 108 108 109 int target_register_template(const struct target_core_fabric_ops *fo); 109 110 void target_unregister_template(const struct target_core_fabric_ops *fo); 111 + 112 + int target_depend_item(struct config_item *item); 113 + void target_undepend_item(struct config_item *item); 110 114 111 115 struct se_session *transport_init_session(enum target_prot_op); 112 116 int transport_alloc_session_tags(struct se_session *, unsigned int,
+50 -4
include/trace/events/kmem.h
··· 140 140 TP_ARGS(call_site, ptr) 141 141 ); 142 142 143 - DEFINE_EVENT(kmem_free, kmem_cache_free, 143 + DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free, 144 144 145 145 TP_PROTO(unsigned long call_site, const void *ptr), 146 146 147 - TP_ARGS(call_site, ptr) 147 + TP_ARGS(call_site, ptr), 148 + 149 + /* 150 + * This trace can be potentially called from an offlined cpu. 151 + * Since trace points use RCU and RCU should not be used from 152 + * offline cpus, filter such calls out. 153 + * While this trace can be called from a preemptable section, 154 + * it has no impact on the condition since tasks can migrate 155 + * only from online cpus to other online cpus. Thus its safe 156 + * to use raw_smp_processor_id. 157 + */ 158 + TP_CONDITION(cpu_online(raw_smp_processor_id())) 148 159 ); 149 160 150 - TRACE_EVENT(mm_page_free, 161 + TRACE_EVENT_CONDITION(mm_page_free, 151 162 152 163 TP_PROTO(struct page *page, unsigned int order), 153 164 154 165 TP_ARGS(page, order), 166 + 167 + 168 + /* 169 + * This trace can be potentially called from an offlined cpu. 170 + * Since trace points use RCU and RCU should not be used from 171 + * offline cpus, filter such calls out. 172 + * While this trace can be called from a preemptable section, 173 + * it has no impact on the condition since tasks can migrate 174 + * only from online cpus to other online cpus. Thus its safe 175 + * to use raw_smp_processor_id. 176 + */ 177 + TP_CONDITION(cpu_online(raw_smp_processor_id())), 155 178 156 179 TP_STRUCT__entry( 157 180 __field( unsigned long, pfn ) ··· 276 253 TP_ARGS(page, order, migratetype) 277 254 ); 278 255 279 - DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, 256 + TRACE_EVENT_CONDITION(mm_page_pcpu_drain, 280 257 281 258 TP_PROTO(struct page *page, unsigned int order, int migratetype), 282 259 283 260 TP_ARGS(page, order, migratetype), 261 + 262 + /* 263 + * This trace can be potentially called from an offlined cpu. 264 + * Since trace points use RCU and RCU should not be used from 265 + * offline cpus, filter such calls out. 266 + * While this trace can be called from a preemptable section, 267 + * it has no impact on the condition since tasks can migrate 268 + * only from online cpus to other online cpus. Thus its safe 269 + * to use raw_smp_processor_id. 270 + */ 271 + TP_CONDITION(cpu_online(raw_smp_processor_id())), 272 + 273 + TP_STRUCT__entry( 274 + __field( unsigned long, pfn ) 275 + __field( unsigned int, order ) 276 + __field( int, migratetype ) 277 + ), 278 + 279 + TP_fast_assign( 280 + __entry->pfn = page ? page_to_pfn(page) : -1UL; 281 + __entry->order = order; 282 + __entry->migratetype = migratetype; 283 + ), 284 284 285 285 TP_printk("page=%p pfn=%lu order=%d migratetype=%d", 286 286 pfn_to_page(__entry->pfn), __entry->pfn,
+3
include/uapi/linux/netfilter/nf_conntrack_tcp.h
··· 42 42 /* The field td_maxack has been set */ 43 43 #define IP_CT_TCP_FLAG_MAXACK_SET 0x20 44 44 45 + /* Marks possibility for expected RFC5961 challenge ACK */ 46 + #define IP_CT_EXP_CHALLENGE_ACK 0x40 47 + 45 48 struct nf_ct_tcp_flags { 46 49 __u8 flags; 47 50 __u8 mask;
+1 -1
include/uapi/linux/rtnetlink.h
··· 337 337 #define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */ 338 338 #define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */ 339 339 #define RTNH_F_ONLINK 4 /* Gateway is forced on link */ 340 - #define RTNH_F_EXTERNAL 8 /* Route installed externally */ 340 + #define RTNH_F_OFFLOAD 8 /* offloaded route */ 341 341 342 342 /* Macros to handle hexthops */ 343 343
+1
include/uapi/linux/virtio_balloon.h
··· 26 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 27 * SUCH DAMAGE. */ 28 28 #include <linux/types.h> 29 + #include <linux/virtio_types.h> 29 30 #include <linux/virtio_ids.h> 30 31 #include <linux/virtio_config.h> 31 32
+1 -1
include/xen/events.h
··· 17 17 irq_handler_t handler, 18 18 unsigned long irqflags, const char *devname, 19 19 void *dev_id); 20 - int bind_virq_to_irq(unsigned int virq, unsigned int cpu); 20 + int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu); 21 21 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 22 22 irq_handler_t handler, 23 23 unsigned long irqflags, const char *devname,
+3
kernel/module.c
··· 3370 3370 module_bug_cleanup(mod); 3371 3371 mutex_unlock(&module_mutex); 3372 3372 3373 + blocking_notifier_call_chain(&module_notify_list, 3374 + MODULE_STATE_GOING, mod); 3375 + 3373 3376 /* we can't deallocate the module until we clear memory protection */ 3374 3377 unset_module_init_ro_nx(mod); 3375 3378 unset_module_core_ro_nx(mod);
+1 -4
kernel/sched/core.c
··· 4425 4425 long ret; 4426 4426 4427 4427 current->in_iowait = 1; 4428 - if (old_iowait) 4429 - blk_schedule_flush_plug(current); 4430 - else 4431 - blk_flush_plug(current); 4428 + blk_schedule_flush_plug(current); 4432 4429 4433 4430 delayacct_blkio_start(); 4434 4431 rq = raw_rq();
+8 -6
kernel/time/hrtimer.c
··· 266 266 /* 267 267 * Divide a ktime value by a nanosecond value 268 268 */ 269 - u64 __ktime_divns(const ktime_t kt, s64 div) 269 + s64 __ktime_divns(const ktime_t kt, s64 div) 270 270 { 271 - u64 dclc; 272 271 int sft = 0; 272 + s64 dclc; 273 + u64 tmp; 273 274 274 275 dclc = ktime_to_ns(kt); 276 + tmp = dclc < 0 ? -dclc : dclc; 277 + 275 278 /* Make sure the divisor is less than 2^32: */ 276 279 while (div >> 32) { 277 280 sft++; 278 281 div >>= 1; 279 282 } 280 - dclc >>= sft; 281 - do_div(dclc, (unsigned long) div); 282 - 283 - return dclc; 283 + tmp >>= sft; 284 + do_div(tmp, (unsigned long) div); 285 + return dclc < 0 ? -tmp : tmp; 284 286 } 285 287 EXPORT_SYMBOL_GPL(__ktime_divns); 286 288 #endif /* BITS_PER_LONG >= 64 */
+1 -1
kernel/watchdog.c
··· 621 621 put_online_cpus(); 622 622 623 623 unlock: 624 - mutex_lock(&watchdog_proc_mutex); 624 + mutex_unlock(&watchdog_proc_mutex); 625 625 } 626 626 627 627 void watchdog_nmi_disable_all(void)
+24 -46
lib/cpumask.c
··· 139 139 #endif 140 140 141 141 /** 142 - * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first 143 - * 142 + * cpumask_local_spread - select the i'th cpu with local numa cpu's first 144 143 * @i: index number 145 - * @numa_node: local numa_node 146 - * @dstp: cpumask with the relevant cpu bit set according to the policy 144 + * @node: local numa_node 147 145 * 148 - * This function sets the cpumask according to a numa aware policy. 149 - * cpumask could be used as an affinity hint for the IRQ related to a 150 - * queue. When the policy is to spread queues across cores - local cores 151 - * first. 146 + * This function selects an online CPU according to a numa aware policy; 147 + * local cpus are returned first, followed by non-local ones, then it 148 + * wraps around. 152 149 * 153 - * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set 154 - * the cpu bit and need to re-call the function. 150 + * It's not very efficient, but useful for setup. 155 151 */ 156 - int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) 152 + unsigned int cpumask_local_spread(unsigned int i, int node) 157 153 { 158 - cpumask_var_t mask; 159 154 int cpu; 160 - int ret = 0; 161 155 162 - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 163 - return -ENOMEM; 164 - 156 + /* Wrap: we always want a cpu. */ 165 157 i %= num_online_cpus(); 166 158 167 - if (numa_node == -1 || !cpumask_of_node(numa_node)) { 168 - /* Use all online cpu's for non numa aware system */ 169 - cpumask_copy(mask, cpu_online_mask); 159 + if (node == -1) { 160 + for_each_cpu(cpu, cpu_online_mask) 161 + if (i-- == 0) 162 + return cpu; 170 163 } else { 171 - int n; 164 + /* NUMA first. */ 165 + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) 166 + if (i-- == 0) 167 + return cpu; 172 168 173 - cpumask_and(mask, 174 - cpumask_of_node(numa_node), cpu_online_mask); 169 + for_each_cpu(cpu, cpu_online_mask) { 170 + /* Skip NUMA nodes, done above. */ 171 + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) 172 + continue; 175 173 176 - n = cpumask_weight(mask); 177 - if (i >= n) { 178 - i -= n; 179 - 180 - /* If index > number of local cpu's, mask out local 181 - * cpu's 182 - */ 183 - cpumask_andnot(mask, cpu_online_mask, mask); 174 + if (i-- == 0) 175 + return cpu; 184 176 } 185 177 } 186 - 187 - for_each_cpu(cpu, mask) { 188 - if (--i < 0) 189 - goto out; 190 - } 191 - 192 - ret = -EAGAIN; 193 - 194 - out: 195 - free_cpumask_var(mask); 196 - 197 - if (!ret) 198 - cpumask_set_cpu(cpu, dstp); 199 - 200 - return ret; 178 + BUG(); 201 179 } 202 - EXPORT_SYMBOL(cpumask_set_cpu_local_first); 180 + EXPORT_SYMBOL(cpumask_local_spread);
+3 -3
lib/percpu_counter.c
··· 197 197 * Compare counter against given value. 198 198 * Return 1 if greater, 0 if equal and -1 if less 199 199 */ 200 - int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 200 + int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) 201 201 { 202 202 s64 count; 203 203 204 204 count = percpu_counter_read(fbc); 205 205 /* Check to see if rough count will be sufficient for comparison */ 206 - if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { 206 + if (abs(count - rhs) > (batch * num_online_cpus())) { 207 207 if (count > rhs) 208 208 return 1; 209 209 else ··· 218 218 else 219 219 return 0; 220 220 } 221 - EXPORT_SYMBOL(percpu_counter_compare); 221 + EXPORT_SYMBOL(__percpu_counter_compare); 222 222 223 223 static int __init percpu_counter_startup(void) 224 224 {
+11
lib/rhashtable.c
··· 14 14 * published by the Free Software Foundation. 15 15 */ 16 16 17 + #include <linux/atomic.h> 17 18 #include <linux/kernel.h> 18 19 #include <linux/init.h> 19 20 #include <linux/log2.h> ··· 447 446 if (key && rhashtable_lookup_fast(ht, key, ht->p)) 448 447 goto exit; 449 448 449 + err = -E2BIG; 450 + if (unlikely(rht_grow_above_max(ht, tbl))) 451 + goto exit; 452 + 450 453 err = -EAGAIN; 451 454 if (rhashtable_check_elasticity(ht, tbl, hash) || 452 455 rht_grow_above_100(ht, tbl)) ··· 742 737 743 738 if (params->max_size) 744 739 ht->p.max_size = rounddown_pow_of_two(params->max_size); 740 + 741 + if (params->insecure_max_entries) 742 + ht->p.insecure_max_entries = 743 + rounddown_pow_of_two(params->insecure_max_entries); 744 + else 745 + ht->p.insecure_max_entries = ht->p.max_size * 2; 745 746 746 747 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 747 748
+1 -1
net/8021q/vlan.c
··· 443 443 case NETDEV_UP: 444 444 /* Put all VLANs for this dev in the up state too. */ 445 445 vlan_group_for_each_dev(grp, i, vlandev) { 446 - flgs = vlandev->flags; 446 + flgs = dev_get_flags(vlandev); 447 447 if (flgs & IFF_UP) 448 448 continue; 449 449
+4 -2
net/bluetooth/hci_core.c
··· 2854 2854 * state. If we were running both LE and BR/EDR inquiry 2855 2855 * simultaneously, and BR/EDR inquiry is already 2856 2856 * finished, stop discovery, otherwise BR/EDR inquiry 2857 - * will stop discovery when finished. 2857 + * will stop discovery when finished. If we will resolve 2858 + * remote device name, do not change discovery state. 2858 2859 */ 2859 - if (!test_bit(HCI_INQUIRY, &hdev->flags)) 2860 + if (!test_bit(HCI_INQUIRY, &hdev->flags) && 2861 + hdev->discovery.state != DISCOVERY_RESOLVING) 2860 2862 hci_discovery_set_state(hdev, 2861 2863 DISCOVERY_STOPPED); 2862 2864 } else {
+2 -2
net/bridge/br_multicast.c
··· 1072 1072 1073 1073 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1074 1074 vid); 1075 - if (!err) 1075 + if (err) 1076 1076 break; 1077 1077 } 1078 1078 ··· 1822 1822 if (query->startup_sent < br->multicast_startup_query_count) 1823 1823 query->startup_sent++; 1824 1824 1825 - RCU_INIT_POINTER(querier, NULL); 1825 + RCU_INIT_POINTER(querier->port, NULL); 1826 1826 br_multicast_send_query(br, NULL, query); 1827 1827 spin_unlock(&br->multicast_lock); 1828 1828 }
+9 -18
net/bridge/br_netfilter.c
··· 37 37 #include <net/route.h> 38 38 #include <net/netfilter/br_netfilter.h> 39 39 40 - #if IS_ENABLED(CONFIG_NF_CONNTRACK) 41 - #include <net/netfilter/nf_conntrack.h> 42 - #endif 43 - 44 40 #include <asm/uaccess.h> 45 41 #include "br_private.h" 46 42 #ifdef CONFIG_SYSCTL ··· 346 350 return 0; 347 351 } 348 352 349 - static bool dnat_took_place(const struct sk_buff *skb) 353 + static bool daddr_was_changed(const struct sk_buff *skb, 354 + const struct nf_bridge_info *nf_bridge) 350 355 { 351 - #if IS_ENABLED(CONFIG_NF_CONNTRACK) 352 - enum ip_conntrack_info ctinfo; 353 - struct nf_conn *ct; 354 - 355 - ct = nf_ct_get(skb, &ctinfo); 356 - if (!ct || nf_ct_is_untracked(ct)) 357 - return false; 358 - 359 - return test_bit(IPS_DST_NAT_BIT, &ct->status); 360 - #else 361 - return false; 362 - #endif 356 + return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr; 363 357 } 364 358 365 359 /* This requires some explaining. If DNAT has taken place, 366 360 * we will need to fix up the destination Ethernet address. 361 + * This is also true when SNAT takes place (for the reply direction). 367 362 * 368 363 * There are two cases to consider: 369 364 * 1. The packet was DNAT'ed to a device in the same bridge ··· 408 421 nf_bridge->pkt_otherhost = false; 409 422 } 410 423 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; 411 - if (dnat_took_place(skb)) { 424 + if (daddr_was_changed(skb, nf_bridge)) { 412 425 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { 413 426 struct in_device *in_dev = __in_dev_get_rcu(dev); 414 427 ··· 619 632 struct sk_buff *skb, 620 633 const struct nf_hook_state *state) 621 634 { 635 + struct nf_bridge_info *nf_bridge; 622 636 struct net_bridge_port *p; 623 637 struct net_bridge *br; 624 638 __u32 len = nf_bridge_encap_header_len(skb); ··· 656 668 return NF_DROP; 657 669 if (!setup_pre_routing(skb)) 658 670 return NF_DROP; 671 + 672 + nf_bridge = nf_bridge_info_get(skb); 673 + nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr; 659 674 660 675 skb->protocol = htons(ETH_P_IP); 661 676
+2
net/bridge/br_stp_timer.c
··· 97 97 netif_carrier_on(br->dev); 98 98 } 99 99 br_log_state(p); 100 + rcu_read_lock(); 100 101 br_ifinfo_notify(RTM_NEWLINK, p); 102 + rcu_read_unlock(); 101 103 spin_unlock(&br->lock); 102 104 } 103 105
+8
net/caif/caif_socket.c
··· 330 330 release_sock(sk); 331 331 timeo = schedule_timeout(timeo); 332 332 lock_sock(sk); 333 + 334 + if (sock_flag(sk, SOCK_DEAD)) 335 + break; 336 + 333 337 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 334 338 } 335 339 ··· 377 373 struct sk_buff *skb; 378 374 379 375 lock_sock(sk); 376 + if (sock_flag(sk, SOCK_DEAD)) { 377 + err = -ECONNRESET; 378 + goto unlock; 379 + } 380 380 skb = skb_dequeue(&sk->sk_receive_queue); 381 381 caif_check_flow_release(sk); 382 382
+20 -13
net/ceph/osd_client.c
··· 1306 1306 if (list_empty(&req->r_osd_item)) 1307 1307 req->r_osd = NULL; 1308 1308 } 1309 - 1310 - list_del_init(&req->r_req_lru_item); /* can be on notarget */ 1311 1309 ceph_osdc_put_request(req); 1312 1310 } 1313 1311 ··· 2015 2017 err = __map_request(osdc, req, 2016 2018 force_resend || force_resend_writes); 2017 2019 dout("__map_request returned %d\n", err); 2018 - if (err == 0) 2019 - continue; /* no change and no osd was specified */ 2020 2020 if (err < 0) 2021 2021 continue; /* hrm! */ 2022 - if (req->r_osd == NULL) { 2023 - dout("tid %llu maps to no valid osd\n", req->r_tid); 2024 - needmap++; /* request a newer map */ 2025 - continue; 2026 - } 2022 + if (req->r_osd == NULL || err > 0) { 2023 + if (req->r_osd == NULL) { 2024 + dout("lingering %p tid %llu maps to no osd\n", 2025 + req, req->r_tid); 2026 + /* 2027 + * A homeless lingering request makes 2028 + * no sense, as it's job is to keep 2029 + * a particular OSD connection open. 2030 + * Request a newer map and kick the 2031 + * request, knowing that it won't be 2032 + * resent until we actually get a map 2033 + * that can tell us where to send it. 2034 + */ 2035 + needmap++; 2036 + } 2027 2037 2028 - dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, 2029 - req->r_osd ? req->r_osd->o_osd : -1); 2030 - __register_request(osdc, req); 2031 - __unregister_linger_request(osdc, req); 2038 + dout("kicking lingering %p tid %llu osd%d\n", req, 2039 + req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); 2040 + __register_request(osdc, req); 2041 + __unregister_linger_request(osdc, req); 2042 + } 2032 2043 } 2033 2044 reset_changed_osds(osdc); 2034 2045 mutex_unlock(&osdc->request_mutex);
+3
net/core/rtnetlink.c
··· 2416 2416 { 2417 2417 struct sk_buff *skb; 2418 2418 2419 + if (dev->reg_state != NETREG_REGISTERED) 2420 + return; 2421 + 2419 2422 skb = rtmsg_ifinfo_build_skb(type, dev, change, flags); 2420 2423 if (skb) 2421 2424 rtmsg_ifinfo_send(skb, dev, flags);
+2 -2
net/dsa/dsa.c
··· 359 359 */ 360 360 ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); 361 361 if (ds == NULL) 362 - return NULL; 362 + return ERR_PTR(-ENOMEM); 363 363 364 364 ds->dst = dst; 365 365 ds->index = index; ··· 370 370 371 371 ret = dsa_switch_setup_one(ds, parent); 372 372 if (ret) 373 - return NULL; 373 + return ERR_PTR(ret); 374 374 375 375 return ds; 376 376 }
+2 -1
net/ipv4/esp4.c
··· 256 256 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 257 257 aead_givcrypt_set_assoc(req, asg, assoclen); 258 258 aead_givcrypt_set_giv(req, esph->enc_data, 259 - XFRM_SKB_CB(skb)->seq.output.low); 259 + XFRM_SKB_CB(skb)->seq.output.low + 260 + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 260 261 261 262 ESP_SKB_CB(skb)->tmp = tmp; 262 263 err = crypto_aead_givencrypt(req);
+2 -1
net/ipv4/fib_trie.c
··· 1164 1164 state = fa->fa_state; 1165 1165 new_fa->fa_state = state & ~FA_S_ACCESSED; 1166 1166 new_fa->fa_slen = fa->fa_slen; 1167 + new_fa->tb_id = tb->tb_id; 1167 1168 1168 1169 err = netdev_switch_fib_ipv4_add(key, plen, fi, 1169 1170 new_fa->fa_tos, ··· 1765 1764 /* record local slen */ 1766 1765 slen = fa->fa_slen; 1767 1766 1768 - if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL)) 1767 + if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD)) 1769 1768 continue; 1770 1769 1771 1770 netdev_switch_fib_ipv4_del(n->key,
+10 -4
net/ipv4/ip_vti.c
··· 65 65 goto drop; 66 66 67 67 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; 68 - skb->mark = be32_to_cpu(tunnel->parms.i_key); 69 68 70 69 return xfrm_input(skb, nexthdr, spi, encap_type); 71 70 } ··· 90 91 struct pcpu_sw_netstats *tstats; 91 92 struct xfrm_state *x; 92 93 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; 94 + u32 orig_mark = skb->mark; 95 + int ret; 93 96 94 97 if (!tunnel) 95 98 return 1; ··· 108 107 x = xfrm_input_state(skb); 109 108 family = x->inner_mode->afinfo->family; 110 109 111 - if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) 110 + skb->mark = be32_to_cpu(tunnel->parms.i_key); 111 + ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 112 + skb->mark = orig_mark; 113 + 114 + if (!ret) 112 115 return -EPERM; 113 116 114 117 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); ··· 221 216 222 217 memset(&fl, 0, sizeof(fl)); 223 218 224 - skb->mark = be32_to_cpu(tunnel->parms.o_key); 225 - 226 219 switch (skb->protocol) { 227 220 case htons(ETH_P_IP): 228 221 xfrm_decode_session(skb, &fl, AF_INET); ··· 235 232 dev_kfree_skb(skb); 236 233 return NETDEV_TX_OK; 237 234 } 235 + 236 + /* override mark with tunnel output key */ 237 + fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key); 238 238 239 239 return vti_xmit(skb, dev, &fl); 240 240 }
+6
net/ipv4/netfilter/arp_tables.c
··· 1075 1075 /* overflow check */ 1076 1076 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1077 1077 return -ENOMEM; 1078 + if (tmp.num_counters == 0) 1079 + return -EINVAL; 1080 + 1078 1081 tmp.name[sizeof(tmp.name)-1] = 0; 1079 1082 1080 1083 newinfo = xt_alloc_table_info(tmp.size); ··· 1502 1499 return -ENOMEM; 1503 1500 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1504 1501 return -ENOMEM; 1502 + if (tmp.num_counters == 0) 1503 + return -EINVAL; 1504 + 1505 1505 tmp.name[sizeof(tmp.name)-1] = 0; 1506 1506 1507 1507 newinfo = xt_alloc_table_info(tmp.size);
+6
net/ipv4/netfilter/ip_tables.c
··· 1262 1262 /* overflow check */ 1263 1263 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1264 1264 return -ENOMEM; 1265 + if (tmp.num_counters == 0) 1266 + return -EINVAL; 1267 + 1265 1268 tmp.name[sizeof(tmp.name)-1] = 0; 1266 1269 1267 1270 newinfo = xt_alloc_table_info(tmp.size); ··· 1812 1809 return -ENOMEM; 1813 1810 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1814 1811 return -ENOMEM; 1812 + if (tmp.num_counters == 0) 1813 + return -EINVAL; 1814 + 1815 1815 tmp.name[sizeof(tmp.name)-1] = 0; 1816 1816 1817 1817 newinfo = xt_alloc_table_info(tmp.size);
+4
net/ipv4/route.c
··· 902 902 bool send; 903 903 int code; 904 904 905 + /* IP on this device is disabled. */ 906 + if (!in_dev) 907 + goto out; 908 + 905 909 net = dev_net(rt->dst.dev); 906 910 if (!IN_DEV_FORWARD(in_dev)) { 907 911 switch (rt->dst.error) {
+7 -4
net/ipv4/tcp.c
··· 402 402 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 403 403 tp->snd_cwnd_clamp = ~0; 404 404 tp->mss_cache = TCP_MSS_DEFAULT; 405 + u64_stats_init(&tp->syncp); 405 406 406 407 tp->reordering = sysctl_tcp_reordering; 407 408 tcp_enable_early_retrans(tp); ··· 2599 2598 const struct tcp_sock *tp = tcp_sk(sk); 2600 2599 const struct inet_connection_sock *icsk = inet_csk(sk); 2601 2600 u32 now = tcp_time_stamp; 2601 + unsigned int start; 2602 2602 u32 rate; 2603 2603 2604 2604 memset(info, 0, sizeof(*info)); ··· 2667 2665 rate = READ_ONCE(sk->sk_max_pacing_rate); 2668 2666 info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; 2669 2667 2670 - spin_lock_bh(&sk->sk_lock.slock); 2671 - info->tcpi_bytes_acked = tp->bytes_acked; 2672 - info->tcpi_bytes_received = tp->bytes_received; 2673 - spin_unlock_bh(&sk->sk_lock.slock); 2668 + do { 2669 + start = u64_stats_fetch_begin_irq(&tp->syncp); 2670 + info->tcpi_bytes_acked = tp->bytes_acked; 2671 + info->tcpi_bytes_received = tp->bytes_received; 2672 + } while (u64_stats_fetch_retry_irq(&tp->syncp, start)); 2674 2673 } 2675 2674 EXPORT_SYMBOL_GPL(tcp_get_info); 2676 2675
+4 -1
net/ipv4/tcp_cong.c
··· 187 187 188 188 tcp_cleanup_congestion_control(sk); 189 189 icsk->icsk_ca_ops = ca; 190 + icsk->icsk_ca_setsockopt = 1; 190 191 191 192 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) 192 193 icsk->icsk_ca_ops->init(sk); ··· 336 335 rcu_read_lock(); 337 336 ca = __tcp_ca_find_autoload(name); 338 337 /* No change asking for existing value */ 339 - if (ca == icsk->icsk_ca_ops) 338 + if (ca == icsk->icsk_ca_ops) { 339 + icsk->icsk_ca_setsockopt = 1; 340 340 goto out; 341 + } 341 342 if (!ca) 342 343 err = -ENOENT; 343 344 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
+4
net/ipv4/tcp_fastopen.c
··· 206 206 skb_set_owner_r(skb2, child); 207 207 __skb_queue_tail(&child->sk_receive_queue, skb2); 208 208 tp->syn_data_acked = 1; 209 + 210 + /* u64_stats_update_begin(&tp->syncp) not needed here, 211 + * as we certainly are not changing upper 32bit value (0) 212 + */ 209 213 tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1; 210 214 } else { 211 215 end_seq = TCP_SKB_CB(skb)->seq + 1;
+13 -6
net/ipv4/tcp_input.c
··· 2698 2698 struct tcp_sock *tp = tcp_sk(sk); 2699 2699 bool recovered = !before(tp->snd_una, tp->high_seq); 2700 2700 2701 + if ((flag & FLAG_SND_UNA_ADVANCED) && 2702 + tcp_try_undo_loss(sk, false)) 2703 + return; 2704 + 2701 2705 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2702 2706 /* Step 3.b. A timeout is spurious if not all data are 2703 2707 * lost, i.e., never-retransmitted data are (s)acked. 2704 2708 */ 2705 - if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED)) 2709 + if ((flag & FLAG_ORIG_SACK_ACKED) && 2710 + tcp_try_undo_loss(sk, true)) 2706 2711 return; 2707 2712 2708 - if (after(tp->snd_nxt, tp->high_seq) && 2709 - (flag & FLAG_DATA_SACKED || is_dupack)) { 2710 - tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ 2713 + if (after(tp->snd_nxt, tp->high_seq)) { 2714 + if (flag & FLAG_DATA_SACKED || is_dupack) 2715 + tp->frto = 0; /* Step 3.a. loss was real */ 2711 2716 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { 2712 2717 tp->high_seq = tp->snd_nxt; 2713 2718 __tcp_push_pending_frames(sk, tcp_current_mss(sk), ··· 2737 2732 else if (flag & FLAG_SND_UNA_ADVANCED) 2738 2733 tcp_reset_reno_sack(tp); 2739 2734 } 2740 - if (tcp_try_undo_loss(sk, false)) 2741 - return; 2742 2735 tcp_xmit_retransmit_queue(sk); 2743 2736 } 2744 2737 ··· 3286 3283 { 3287 3284 u32 delta = ack - tp->snd_una; 3288 3285 3286 + u64_stats_update_begin(&tp->syncp); 3289 3287 tp->bytes_acked += delta; 3288 + u64_stats_update_end(&tp->syncp); 3290 3289 tp->snd_una = ack; 3291 3290 } 3292 3291 ··· 3297 3292 { 3298 3293 u32 delta = seq - tp->rcv_nxt; 3299 3294 3295 + u64_stats_update_begin(&tp->syncp); 3300 3296 tp->bytes_received += delta; 3297 + u64_stats_update_end(&tp->syncp); 3301 3298 tp->rcv_nxt = seq; 3302 3299 } 3303 3300
+5 -2
net/ipv4/tcp_minisocks.c
··· 300 300 tw->tw_v6_daddr = sk->sk_v6_daddr; 301 301 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 302 302 tw->tw_tclass = np->tclass; 303 - tw->tw_flowlabel = np->flow_label >> 12; 303 + tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); 304 304 tw->tw_ipv6only = sk->sk_ipv6only; 305 305 } 306 306 #endif ··· 420 420 rcu_read_unlock(); 421 421 } 422 422 423 - if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner)) 423 + /* If no valid choice made yet, assign current system default ca. */ 424 + if (!ca_got_dst && 425 + (!icsk->icsk_ca_setsockopt || 426 + !try_module_get(icsk->icsk_ca_ops->owner))) 424 427 tcp_assign_congestion_control(sk); 425 428 426 429 tcp_set_ca_state(sk, TCP_CA_Open);
+2 -4
net/ipv4/udp.c
··· 1345 1345 } 1346 1346 unlock_sock_fast(sk, slow); 1347 1347 1348 - if (noblock) 1349 - return -EAGAIN; 1350 - 1351 - /* starting over for a new packet */ 1348 + /* starting over for a new packet, but check if we need to yield */ 1349 + cond_resched(); 1352 1350 msg->msg_flags &= ~MSG_TRUNC; 1353 1351 goto try_again; 1354 1352 }
+2 -1
net/ipv6/esp6.c
··· 248 248 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 249 249 aead_givcrypt_set_assoc(req, asg, assoclen); 250 250 aead_givcrypt_set_giv(req, esph->enc_data, 251 - XFRM_SKB_CB(skb)->seq.output.low); 251 + XFRM_SKB_CB(skb)->seq.output.low + 252 + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 252 253 253 254 ESP_SKB_CB(skb)->tmp = tmp; 254 255 err = crypto_aead_givencrypt(req);
+37 -2
net/ipv6/ip6_fib.c
··· 693 693 { 694 694 struct rt6_info *iter = NULL; 695 695 struct rt6_info **ins; 696 + struct rt6_info **fallback_ins = NULL; 696 697 int replace = (info->nlh && 697 698 (info->nlh->nlmsg_flags & NLM_F_REPLACE)); 698 699 int add = (!info->nlh || ··· 717 716 (info->nlh->nlmsg_flags & NLM_F_EXCL)) 718 717 return -EEXIST; 719 718 if (replace) { 720 - found++; 721 - break; 719 + if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) { 720 + found++; 721 + break; 722 + } 723 + if (rt_can_ecmp) 724 + fallback_ins = fallback_ins ?: ins; 725 + goto next_iter; 722 726 } 723 727 724 728 if (iter->dst.dev == rt->dst.dev && ··· 759 753 if (iter->rt6i_metric > rt->rt6i_metric) 760 754 break; 761 755 756 + next_iter: 762 757 ins = &iter->dst.rt6_next; 758 + } 759 + 760 + if (fallback_ins && !found) { 761 + /* No ECMP-able route found, replace first non-ECMP one */ 762 + ins = fallback_ins; 763 + iter = *ins; 764 + found++; 763 765 } 764 766 765 767 /* Reset round-robin state, if necessary */ ··· 829 815 } 830 816 831 817 } else { 818 + int nsiblings; 819 + 832 820 if (!found) { 833 821 if (add) 834 822 goto add; ··· 851 835 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 852 836 fn->fn_flags |= RTN_RTINFO; 853 837 } 838 + nsiblings = iter->rt6i_nsiblings; 854 839 fib6_purge_rt(iter, fn, info->nl_net); 855 840 rt6_release(iter); 841 + 842 + if (nsiblings) { 843 + /* Replacing an ECMP route, remove all siblings */ 844 + ins = &rt->dst.rt6_next; 845 + iter = *ins; 846 + while (iter) { 847 + if (rt6_qualify_for_ecmp(iter)) { 848 + *ins = iter->dst.rt6_next; 849 + fib6_purge_rt(iter, fn, info->nl_net); 850 + rt6_release(iter); 851 + nsiblings--; 852 + } else { 853 + ins = &iter->dst.rt6_next; 854 + } 855 + iter = *ins; 856 + } 857 + WARN_ON(nsiblings != 0); 858 + } 856 859 } 857 860 858 861 return 0;
+3 -1
net/ipv6/ip6_output.c
··· 1300 1300 1301 1301 /* If this is the first and only packet and device 1302 1302 * supports checksum offloading, let's use it. 1303 + * Use transhdrlen, same as IPv4, because partial 1304 + * sums only work when transhdrlen is set. 1303 1305 */ 1304 - if (!skb && sk->sk_protocol == IPPROTO_UDP && 1306 + if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && 1305 1307 length + fragheaderlen < mtu && 1306 1308 rt->dst.dev->features & NETIF_F_V6_CSUM && 1307 1309 !exthdrlen)
+24 -3
net/ipv6/ip6_vti.c
··· 322 322 } 323 323 324 324 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; 325 - skb->mark = be32_to_cpu(t->parms.i_key); 326 325 327 326 rcu_read_unlock(); 328 327 ··· 341 342 struct pcpu_sw_netstats *tstats; 342 343 struct xfrm_state *x; 343 344 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; 345 + u32 orig_mark = skb->mark; 346 + int ret; 344 347 345 348 if (!t) 346 349 return 1; ··· 359 358 x = xfrm_input_state(skb); 360 359 family = x->inner_mode->afinfo->family; 361 360 362 - if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) 361 + skb->mark = be32_to_cpu(t->parms.i_key); 362 + ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 363 + skb->mark = orig_mark; 364 + 365 + if (!ret) 363 366 return -EPERM; 364 367 365 368 skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); ··· 435 430 struct net_device *tdev; 436 431 struct xfrm_state *x; 437 432 int err = -1; 433 + int mtu; 438 434 439 435 if (!dst) 440 436 goto tx_err_link_failure; ··· 469 463 skb_dst_set(skb, dst); 470 464 skb->dev = skb_dst(skb)->dev; 471 465 466 + mtu = dst_mtu(dst); 467 + if (!skb->ignore_df && skb->len > mtu) { 468 + skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); 469 + 470 + if (skb->protocol == htons(ETH_P_IPV6)) 471 + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 472 + else 473 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 474 + htonl(mtu)); 475 + 476 + return -EMSGSIZE; 477 + } 478 + 472 479 err = dst_output(skb); 473 480 if (net_xmit_eval(err) == 0) { 474 481 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); ··· 514 495 int ret; 515 496 516 497 memset(&fl, 0, sizeof(fl)); 517 - skb->mark = be32_to_cpu(t->parms.o_key); 518 498 519 499 switch (skb->protocol) { 520 500 case htons(ETH_P_IPV6): ··· 533 515 default: 534 516 goto tx_err; 535 517 } 518 + 519 + /* override mark with tunnel output key */ 520 + fl.flowi_mark = be32_to_cpu(t->parms.o_key); 536 521 537 522 ret = vti6_xmit(skb, dev, &fl); 538 523 if (ret < 0)
+6
net/ipv6/netfilter/ip6_tables.c
··· 1275 1275 /* overflow check */ 1276 1276 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1277 1277 return -ENOMEM; 1278 + if (tmp.num_counters == 0) 1279 + return -EINVAL; 1280 + 1278 1281 tmp.name[sizeof(tmp.name)-1] = 0; 1279 1282 1280 1283 newinfo = xt_alloc_table_info(tmp.size); ··· 1825 1822 return -ENOMEM; 1826 1823 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1827 1824 return -ENOMEM; 1825 + if (tmp.num_counters == 0) 1826 + return -EINVAL; 1827 + 1828 1828 tmp.name[sizeof(tmp.name)-1] = 0; 1829 1829 1830 1830 newinfo = xt_alloc_table_info(tmp.size);
+9 -5
net/ipv6/route.c
··· 2504 2504 int attrlen; 2505 2505 int err = 0, last_err = 0; 2506 2506 2507 + remaining = cfg->fc_mp_len; 2507 2508 beginning: 2508 2509 rtnh = (struct rtnexthop *)cfg->fc_mp; 2509 - remaining = cfg->fc_mp_len; 2510 2510 2511 2511 /* Parse a Multipath Entry */ 2512 2512 while (rtnh_ok(rtnh, remaining)) { ··· 2536 2536 * next hops that have been already added. 2537 2537 */ 2538 2538 add = 0; 2539 + remaining = cfg->fc_mp_len - remaining; 2539 2540 goto beginning; 2540 2541 } 2541 2542 } 2542 2543 /* Because each route is added like a single route we remove 2543 - * this flag after the first nexthop (if there is a collision, 2544 - * we have already fail to add the first nexthop: 2545 - * fib6_add_rt2node() has reject it). 2544 + * these flags after the first nexthop: if there is a collision, 2545 + * we have already failed to add the first nexthop: 2546 + * fib6_add_rt2node() has rejected it; when replacing, old 2547 + * nexthops have been replaced by first new, the rest should 2548 + * be added to it. 2546 2549 */ 2547 - cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL; 2550 + cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | 2551 + NLM_F_REPLACE); 2548 2552 rtnh = rtnh_next(rtnh, &remaining); 2549 2553 } 2550 2554
+1 -1
net/ipv6/tcp_ipv6.c
··· 914 914 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 915 915 tcp_time_stamp + tcptw->tw_ts_offset, 916 916 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), 917 - tw->tw_tclass, (tw->tw_flowlabel << 12)); 917 + tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); 918 918 919 919 inet_twsk_put(tw); 920 920 }
+5 -5
net/ipv6/udp.c
··· 525 525 } 526 526 unlock_sock_fast(sk, slow); 527 527 528 - if (noblock) 529 - return -EAGAIN; 530 - 531 - /* starting over for a new packet */ 528 + /* starting over for a new packet, but check if we need to yield */ 529 + cond_resched(); 532 530 msg->msg_flags &= ~MSG_TRUNC; 533 531 goto try_again; 534 532 } ··· 729 731 (inet->inet_dport && inet->inet_dport != rmt_port) || 730 732 (!ipv6_addr_any(&sk->sk_v6_daddr) && 731 733 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 732 - (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 734 + (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) || 735 + (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 736 + !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 733 737 return false; 734 738 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 735 739 return false;
+11 -48
net/mac80211/cfg.c
··· 2495 2495 struct ieee80211_roc_work *new_roc, 2496 2496 struct ieee80211_roc_work *cur_roc) 2497 2497 { 2498 - unsigned long j = jiffies; 2499 - unsigned long cur_roc_end = cur_roc->hw_start_time + 2500 - msecs_to_jiffies(cur_roc->duration); 2501 - struct ieee80211_roc_work *next_roc; 2502 - int new_dur; 2498 + unsigned long now = jiffies; 2499 + unsigned long remaining = cur_roc->hw_start_time + 2500 + msecs_to_jiffies(cur_roc->duration) - 2501 + now; 2503 2502 2504 2503 if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) 2505 2504 return false; 2506 2505 2507 - if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end)) 2506 + /* if it doesn't fit entirely, schedule a new one */ 2507 + if (new_roc->duration > jiffies_to_msecs(remaining)) 2508 2508 return false; 2509 2509 2510 2510 ieee80211_handle_roc_started(new_roc); 2511 2511 2512 - new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j); 2513 - 2514 - /* cur_roc is long enough - add new_roc to the dependents list. */ 2515 - if (new_dur <= 0) { 2516 - list_add_tail(&new_roc->list, &cur_roc->dependents); 2517 - return true; 2518 - } 2519 - 2520 - new_roc->duration = new_dur; 2521 - 2522 - /* 2523 - * if cur_roc was already coalesced before, we might 2524 - * want to extend the next roc instead of adding 2525 - * a new one. 2526 - */ 2527 - next_roc = list_entry(cur_roc->list.next, 2528 - struct ieee80211_roc_work, list); 2529 - if (&next_roc->list != &local->roc_list && 2530 - next_roc->chan == new_roc->chan && 2531 - next_roc->sdata == new_roc->sdata && 2532 - !WARN_ON(next_roc->started)) { 2533 - list_add_tail(&new_roc->list, &next_roc->dependents); 2534 - next_roc->duration = max(next_roc->duration, 2535 - new_roc->duration); 2536 - next_roc->type = max(next_roc->type, new_roc->type); 2537 - return true; 2538 - } 2539 - 2540 - /* add right after cur_roc */ 2541 - list_add(&new_roc->list, &cur_roc->list); 2542 - 2512 + /* add to dependents so we send the expired event properly */ 2513 + list_add_tail(&new_roc->list, &cur_roc->dependents); 2543 2514 return true; 2544 2515 } 2545 2516 ··· 2623 2652 * In the offloaded ROC case, if it hasn't begun, add 2624 2653 * this new one to the dependent list to be handled 2625 2654 * when the master one begins. If it has begun, 2626 - * check that there's still a minimum time left and 2627 - * if so, start this one, transmitting the frame, but 2628 - * add it to the list directly after this one with 2629 - * a reduced time so we'll ask the driver to execute 2630 - * it right after finishing the previous one, in the 2631 - * hope that it'll also be executed right afterwards, 2632 - * effectively extending the old one. 2633 - * If there's no minimum time left, just add it to the 2634 - * normal list. 2635 - * TODO: the ROC type is ignored here, assuming that it 2636 - * is better to immediately use the current ROC. 2655 + * check if it fits entirely within the existing one, 2656 + * in which case it will just be dependent as well. 2657 + * Otherwise, schedule it by itself. 2637 2658 */ 2638 2659 if (!tmp->hw_begun) { 2639 2660 list_add_tail(&roc->list, &tmp->dependents);
+3 -6
net/mac80211/ieee80211_i.h
··· 205 205 * @IEEE80211_RX_CMNTR: received on cooked monitor already 206 206 * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported 207 207 * to cfg80211_report_obss_beacon(). 208 + * @IEEE80211_RX_REORDER_TIMER: this frame is released by the 209 + * reorder buffer timeout timer, not the normal RX path 208 210 * 209 211 * These flags are used across handling multiple interfaces 210 212 * for a single frame. ··· 214 212 enum ieee80211_rx_flags { 215 213 IEEE80211_RX_CMNTR = BIT(0), 216 214 IEEE80211_RX_BEACON_REPORTED = BIT(1), 215 + IEEE80211_RX_REORDER_TIMER = BIT(2), 217 216 }; 218 217 219 218 struct ieee80211_rx_data { ··· 327 324 u8 dst[ETH_ALEN]; 328 325 u8 flags; 329 326 }; 330 - 331 - #if HZ/100 == 0 332 - #define IEEE80211_ROC_MIN_LEFT 1 333 - #else 334 - #define IEEE80211_ROC_MIN_LEFT (HZ/100) 335 - #endif 336 327 337 328 struct ieee80211_roc_work { 338 329 struct list_head list;
+6
net/mac80211/iface.c
··· 522 522 memcpy(sdata->vif.hw_queue, master->vif.hw_queue, 523 523 sizeof(sdata->vif.hw_queue)); 524 524 sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; 525 + 526 + mutex_lock(&local->key_mtx); 527 + sdata->crypto_tx_tailroom_needed_cnt += 528 + master->crypto_tx_tailroom_needed_cnt; 529 + mutex_unlock(&local->key_mtx); 530 + 525 531 break; 526 532 } 527 533 case NL80211_IFTYPE_AP:
+73 -9
net/mac80211/key.c
··· 58 58 lockdep_assert_held(&local->key_mtx); 59 59 } 60 60 61 + static void 62 + update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) 63 + { 64 + struct ieee80211_sub_if_data *vlan; 65 + 66 + if (sdata->vif.type != NL80211_IFTYPE_AP) 67 + return; 68 + 69 + mutex_lock(&sdata->local->mtx); 70 + 71 + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 72 + vlan->crypto_tx_tailroom_needed_cnt += delta; 73 + 74 + mutex_unlock(&sdata->local->mtx); 75 + } 76 + 61 77 static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) 62 78 { 63 79 /* ··· 95 79 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net 96 80 */ 97 81 82 + update_vlan_tailroom_need_count(sdata, 1); 83 + 98 84 if (!sdata->crypto_tx_tailroom_needed_cnt++) { 99 85 /* 100 86 * Flush all XMIT packets currently using HW encryption or no ··· 104 86 */ 105 87 synchronize_net(); 106 88 } 89 + } 90 + 91 + static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, 92 + int delta) 93 + { 94 + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); 95 + 96 + update_vlan_tailroom_need_count(sdata, -delta); 97 + sdata->crypto_tx_tailroom_needed_cnt -= delta; 107 98 } 108 99 109 100 static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) ··· 171 144 172 145 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 173 146 (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) 174 - sdata->crypto_tx_tailroom_needed_cnt--; 147 + decrease_tailroom_need_count(sdata, 1); 175 148 176 149 WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && 177 150 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); ··· 568 541 schedule_delayed_work(&sdata->dec_tailroom_needed_wk, 569 542 HZ/2); 570 543 } else { 571 - sdata->crypto_tx_tailroom_needed_cnt--; 544 + decrease_tailroom_need_count(sdata, 1); 572 545 } 573 546 } 574 547 ··· 658 631 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) 659 632 { 660 633 struct ieee80211_key *key; 634 + struct ieee80211_sub_if_data *vlan; 661 635 662 636 ASSERT_RTNL(); 663 637 ··· 667 639 668 640 mutex_lock(&sdata->local->key_mtx); 669 641 670 - sdata->crypto_tx_tailroom_needed_cnt = 0; 642 + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || 643 + sdata->crypto_tx_tailroom_pending_dec); 644 + 645 + if (sdata->vif.type == NL80211_IFTYPE_AP) { 646 + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 647 + WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || 648 + vlan->crypto_tx_tailroom_pending_dec); 649 + } 671 650 672 651 list_for_each_entry(key, &sdata->key_list, list) { 673 652 increment_tailroom_need_count(sdata); 674 653 ieee80211_key_enable_hw_accel(key); 654 + } 655 + 656 + mutex_unlock(&sdata->local->key_mtx); 657 + } 658 + 659 + void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata) 660 + { 661 + struct ieee80211_sub_if_data *vlan; 662 + 663 + mutex_lock(&sdata->local->key_mtx); 664 + 665 + sdata->crypto_tx_tailroom_needed_cnt = 0; 666 + 667 + if (sdata->vif.type == NL80211_IFTYPE_AP) { 668 + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 669 + vlan->crypto_tx_tailroom_needed_cnt = 0; 675 670 } 676 671 677 672 mutex_unlock(&sdata->local->key_mtx); ··· 739 688 { 740 689 struct ieee80211_key *key, *tmp; 741 690 742 - sdata->crypto_tx_tailroom_needed_cnt -= 743 - sdata->crypto_tx_tailroom_pending_dec; 691 + decrease_tailroom_need_count(sdata, 692 + sdata->crypto_tx_tailroom_pending_dec); 744 693 sdata->crypto_tx_tailroom_pending_dec = 0; 745 694 746 695 ieee80211_debugfs_key_remove_mgmt_default(sdata); ··· 760 709 { 761 710 struct ieee80211_local *local = sdata->local; 762 711 struct ieee80211_sub_if_data *vlan; 712 + struct ieee80211_sub_if_data *master; 763 713 struct ieee80211_key *key, *tmp; 764 714 LIST_HEAD(keys); 765 715 ··· 780 728 list_for_each_entry_safe(key, tmp, &keys, list) 781 729 __ieee80211_key_destroy(key, false); 782 730 783 - WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || 784 - sdata->crypto_tx_tailroom_pending_dec); 731 + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 732 + if (sdata->bss) { 733 + master = container_of(sdata->bss, 734 + struct ieee80211_sub_if_data, 735 + u.ap); 736 + 737 + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != 738 + master->crypto_tx_tailroom_needed_cnt); 739 + } 740 + } else { 741 + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || 742 + sdata->crypto_tx_tailroom_pending_dec); 743 + } 744 + 785 745 if (sdata->vif.type == NL80211_IFTYPE_AP) { 786 746 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 787 747 WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || ··· 857 793 */ 858 794 859 795 mutex_lock(&sdata->local->key_mtx); 860 - sdata->crypto_tx_tailroom_needed_cnt -= 861 - sdata->crypto_tx_tailroom_pending_dec; 796 + decrease_tailroom_need_count(sdata, 797 + sdata->crypto_tx_tailroom_pending_dec); 862 798 sdata->crypto_tx_tailroom_pending_dec = 0; 863 799 mutex_unlock(&sdata->local->key_mtx); 864 800 }
+1
net/mac80211/key.h
··· 161 161 void ieee80211_free_sta_keys(struct ieee80211_local *local, 162 162 struct sta_info *sta); 163 163 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 164 + void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata); 164 165 165 166 #define key_mtx_dereference(local, ref) \ 166 167 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
+3 -2
net/mac80211/rx.c
··· 2121 2121 /* deliver to local stack */ 2122 2122 skb->protocol = eth_type_trans(skb, dev); 2123 2123 memset(skb->cb, 0, sizeof(skb->cb)); 2124 - if (rx->local->napi) 2124 + if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && 2125 + rx->local->napi) 2125 2126 napi_gro_receive(rx->local->napi, skb); 2126 2127 else 2127 2128 netif_receive_skb(skb); ··· 3232 3231 /* This is OK -- must be QoS data frame */ 3233 3232 .security_idx = tid, 3234 3233 .seqno_idx = tid, 3235 - .flags = 0, 3234 + .flags = IEEE80211_RX_REORDER_TIMER, 3236 3235 }; 3237 3236 struct tid_ampdu_rx *tid_agg_rx; 3238 3237
+3
net/mac80211/util.c
··· 2023 2023 2024 2024 /* add back keys */ 2025 2025 list_for_each_entry(sdata, &local->interfaces, list) 2026 + ieee80211_reset_crypto_tx_tailroom(sdata); 2027 + 2028 + list_for_each_entry(sdata, &local->interfaces, list) 2026 2029 if (ieee80211_sdata_running(sdata)) 2027 2030 ieee80211_enable_keys(sdata); 2028 2031
+4 -2
net/mac80211/wep.c
··· 98 98 99 99 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 100 100 101 - if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN || 102 - skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) 101 + if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) 103 102 return NULL; 104 103 105 104 hdrlen = ieee80211_hdrlen(hdr->frame_control); ··· 165 166 u8 *iv; 166 167 size_t len; 167 168 u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; 169 + 170 + if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN)) 171 + return -1; 168 172 169 173 iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); 170 174 if (!iv)
+2
net/netfilter/Kconfig
··· 863 863 depends on NETFILTER_XTABLES 864 864 depends on NETFILTER_ADVANCED 865 865 depends on (IPV6 || IPV6=n) 866 + depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) 866 867 depends on IP_NF_MANGLE 867 868 select NF_DEFRAG_IPV4 868 869 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES ··· 1357 1356 depends on NETFILTER_ADVANCED 1358 1357 depends on !NF_CONNTRACK || NF_CONNTRACK 1359 1358 depends on (IPV6 || IPV6=n) 1359 + depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) 1360 1360 select NF_DEFRAG_IPV4 1361 1361 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 1362 1362 help
+3
net/netfilter/ipvs/ip_vs_ctl.c
··· 3823 3823 cancel_work_sync(&ipvs->defense_work.work); 3824 3824 unregister_net_sysctl_table(ipvs->sysctl_hdr); 3825 3825 ip_vs_stop_estimator(net, &ipvs->tot_stats); 3826 + 3827 + if (!net_eq(net, &init_net)) 3828 + kfree(ipvs->sysctl_tbl); 3826 3829 } 3827 3830 3828 3831 #else
+32 -3
net/netfilter/nf_conntrack_proto_tcp.c
··· 202 202 * sES -> sES :-) 203 203 * sFW -> sCW Normal close request answered by ACK. 204 204 * sCW -> sCW 205 - * sLA -> sTW Last ACK detected. 205 + * sLA -> sTW Last ACK detected (RFC5961 challenged) 206 206 * sTW -> sTW Retransmitted last ACK. Remain in the same state. 207 207 * sCL -> sCL 208 208 */ ··· 261 261 * sES -> sES :-) 262 262 * sFW -> sCW Normal close request answered by ACK. 263 263 * sCW -> sCW 264 - * sLA -> sTW Last ACK detected. 264 + * sLA -> sTW Last ACK detected (RFC5961 challenged) 265 265 * sTW -> sTW Retransmitted last ACK. 266 266 * sCL -> sCL 267 267 */ ··· 906 906 1 : ct->proto.tcp.last_win; 907 907 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale = 908 908 ct->proto.tcp.last_wscale; 909 + ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK; 909 910 ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags = 910 911 ct->proto.tcp.last_flags; 911 912 memset(&ct->proto.tcp.seen[dir], 0, ··· 924 923 * may be in sync but we are not. In that case, we annotate 925 924 * the TCP options and let the packet go through. If it is a 926 925 * valid SYN packet, the server will reply with a SYN/ACK, and 927 - * then we'll get in sync. Otherwise, the server ignores it. */ 926 + * then we'll get in sync. Otherwise, the server potentially 927 + * responds with a challenge ACK if implementing RFC5961. 928 + */ 928 929 if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) { 929 930 struct ip_ct_tcp_state seen = {}; 930 931 ··· 942 939 ct->proto.tcp.last_flags |= 943 940 IP_CT_TCP_FLAG_SACK_PERM; 944 941 } 942 + /* Mark the potential for RFC5961 challenge ACK, 943 + * this pose a special problem for LAST_ACK state 944 + * as ACK is intrepretated as ACKing last FIN. 945 + */ 946 + if (old_state == TCP_CONNTRACK_LAST_ACK) 947 + ct->proto.tcp.last_flags |= 948 + IP_CT_EXP_CHALLENGE_ACK; 945 949 } 946 950 spin_unlock_bh(&ct->lock); 947 951 if (LOG_INVALID(net, IPPROTO_TCP)) ··· 980 970 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 981 971 "nf_ct_tcp: invalid state "); 982 972 return -NF_ACCEPT; 973 + case TCP_CONNTRACK_TIME_WAIT: 974 + /* RFC5961 compliance cause stack to send "challenge-ACK" 975 + * e.g. in response to spurious SYNs. Conntrack MUST 976 + * not believe this ACK is acking last FIN. 977 + */ 978 + if (old_state == TCP_CONNTRACK_LAST_ACK && 979 + index == TCP_ACK_SET && 980 + ct->proto.tcp.last_dir != dir && 981 + ct->proto.tcp.last_index == TCP_SYN_SET && 982 + (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) { 983 + /* Detected RFC5961 challenge ACK */ 984 + ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK; 985 + spin_unlock_bh(&ct->lock); 986 + if (LOG_INVALID(net, IPPROTO_TCP)) 987 + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 988 + "nf_ct_tcp: challenge-ACK ignored "); 989 + return NF_ACCEPT; /* Don't change state */ 990 + } 991 + break; 983 992 case TCP_CONNTRACK_CLOSE: 984 993 if (index == TCP_RST_SET 985 994 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
+2 -2
net/netfilter/nf_tables_api.c
··· 4472 4472 */ 4473 4473 void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) 4474 4474 { 4475 - switch (type) { 4476 - case NFT_DATA_VALUE: 4475 + if (type < NFT_DATA_VERDICT) 4477 4476 return; 4477 + switch (type) { 4478 4478 case NFT_DATA_VERDICT: 4479 4479 return nft_verdict_uninit(data); 4480 4480 default:
+2 -1
net/netlink/af_netlink.c
··· 89 89 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; 90 90 } 91 91 92 - struct netlink_table *nl_table; 92 + struct netlink_table *nl_table __read_mostly; 93 93 EXPORT_SYMBOL_GPL(nl_table); 94 94 95 95 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); ··· 1081 1081 if (err) { 1082 1082 if (err == -EEXIST) 1083 1083 err = -EADDRINUSE; 1084 + nlk_sk(sk)->portid = 0; 1084 1085 sock_put(sk); 1085 1086 } 1086 1087
+5
net/sched/cls_api.c
··· 81 81 struct tcf_proto_ops *t; 82 82 int rc = -ENOENT; 83 83 84 + /* Wait for outstanding call_rcu()s, if any, from a 85 + * tcf_proto_ops's destroy() handler. 86 + */ 87 + rcu_barrier(); 88 + 84 89 write_lock(&cls_mod_lock); 85 90 list_for_each_entry(t, &tcf_proto_base, head) { 86 91 if (t == ops) {
+6 -4
net/sched/sch_api.c
··· 815 815 if (dev->flags & IFF_UP) 816 816 dev_deactivate(dev); 817 817 818 - if (new && new->ops->attach) { 819 - new->ops->attach(new); 820 - num_q = 0; 821 - } 818 + if (new && new->ops->attach) 819 + goto skip; 822 820 823 821 for (i = 0; i < num_q; i++) { 824 822 struct netdev_queue *dev_queue = dev_ingress_queue(dev); ··· 832 834 qdisc_destroy(old); 833 835 } 834 836 837 + skip: 835 838 if (!ingress) { 836 839 notify_and_destroy(net, skb, n, classid, 837 840 dev->qdisc, new); 838 841 if (new && !new->ops->attach) 839 842 atomic_inc(&new->refcnt); 840 843 dev->qdisc = new ? : &noop_qdisc; 844 + 845 + if (new && new->ops->attach) 846 + new->ops->attach(new); 841 847 } else { 842 848 notify_and_destroy(net, skb, n, classid, old, new); 843 849 }
+3 -3
net/switchdev/switchdev.c
··· 338 338 fi, tos, type, nlflags, 339 339 tb_id); 340 340 if (!err) 341 - fi->fib_flags |= RTNH_F_EXTERNAL; 341 + fi->fib_flags |= RTNH_F_OFFLOAD; 342 342 } 343 343 344 344 return err; ··· 364 364 const struct swdev_ops *ops; 365 365 int err = 0; 366 366 367 - if (!(fi->fib_flags & RTNH_F_EXTERNAL)) 367 + if (!(fi->fib_flags & RTNH_F_OFFLOAD)) 368 368 return 0; 369 369 370 370 dev = netdev_switch_get_dev_by_nhs(fi); ··· 376 376 err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len, 377 377 fi, tos, type, tb_id); 378 378 if (!err) 379 - fi->fib_flags &= ~RTNH_F_EXTERNAL; 379 + fi->fib_flags &= ~RTNH_F_OFFLOAD; 380 380 } 381 381 382 382 return err;
+8
net/unix/af_unix.c
··· 1880 1880 unix_state_unlock(sk); 1881 1881 timeo = freezable_schedule_timeout(timeo); 1882 1882 unix_state_lock(sk); 1883 + 1884 + if (sock_flag(sk, SOCK_DEAD)) 1885 + break; 1886 + 1883 1887 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1884 1888 } 1885 1889 ··· 1943 1939 struct sk_buff *skb, *last; 1944 1940 1945 1941 unix_state_lock(sk); 1942 + if (sock_flag(sk, SOCK_DEAD)) { 1943 + err = -ECONNRESET; 1944 + goto unlock; 1945 + } 1946 1946 last = skb = skb_peek(&sk->sk_receive_queue); 1947 1947 again: 1948 1948 if (skb == NULL) {
+16 -1
net/xfrm/xfrm_input.c
··· 13 13 #include <net/dst.h> 14 14 #include <net/ip.h> 15 15 #include <net/xfrm.h> 16 + #include <net/ip_tunnels.h> 17 + #include <net/ip6_tunnel.h> 16 18 17 19 static struct kmem_cache *secpath_cachep __read_mostly; 18 20 ··· 188 186 struct xfrm_state *x = NULL; 189 187 xfrm_address_t *daddr; 190 188 struct xfrm_mode *inner_mode; 189 + u32 mark = skb->mark; 191 190 unsigned int family; 192 191 int decaps = 0; 193 192 int async = 0; ··· 205 202 daddr = (xfrm_address_t *)(skb_network_header(skb) + 206 203 XFRM_SPI_SKB_CB(skb)->daddroff); 207 204 family = XFRM_SPI_SKB_CB(skb)->family; 205 + 206 + /* if tunnel is present override skb->mark value with tunnel i_key */ 207 + if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { 208 + switch (family) { 209 + case AF_INET: 210 + mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); 211 + break; 212 + case AF_INET6: 213 + mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); 214 + break; 215 + } 216 + } 208 217 209 218 /* Allocate new secpath or COW existing one. */ 210 219 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { ··· 244 229 goto drop; 245 230 } 246 231 247 - x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); 232 + x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); 248 233 if (x == NULL) { 249 234 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 250 235 xfrm_audit_state_notfound(skb, family, spi, seq);
+2
net/xfrm/xfrm_replay.c
··· 99 99 100 100 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 101 101 XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; 102 + XFRM_SKB_CB(skb)->seq.output.hi = 0; 102 103 if (unlikely(x->replay.oseq == 0)) { 103 104 x->replay.oseq--; 104 105 xfrm_audit_state_replay_overflow(x, skb); ··· 178 177 179 178 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 180 179 XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; 180 + XFRM_SKB_CB(skb)->seq.output.hi = 0; 181 181 if (unlikely(replay_esn->oseq == 0)) { 182 182 replay_esn->oseq--; 183 183 xfrm_audit_state_replay_overflow(x, skb);
+1 -1
net/xfrm/xfrm_state.c
··· 927 927 x->id.spi != spi) 928 928 continue; 929 929 930 - spin_unlock_bh(&net->xfrm.xfrm_state_lock); 931 930 xfrm_state_hold(x); 931 + spin_unlock_bh(&net->xfrm.xfrm_state_lock); 932 932 return x; 933 933 } 934 934 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+1 -8
scripts/gdb/linux/modules.py
··· 73 73 " " if utils.get_long_type().sizeof == 8 else "")) 74 74 75 75 for module in module_list(): 76 - ref = 0 77 - module_refptr = module['refptr'] 78 - for cpu in cpus.cpu_list("cpu_possible_mask"): 79 - refptr = cpus.per_cpu(module_refptr, cpu) 80 - ref += refptr['incs'] 81 - ref -= refptr['decs'] 82 - 83 76 gdb.write("{address} {name:<19} {size:>8} {ref}".format( 84 77 address=str(module['module_core']).split()[0], 85 78 name=module['name'].string(), 86 79 size=str(module['core_size']), 87 - ref=str(ref))) 80 + ref=str(module['refcnt']['counter']))) 88 81 89 82 source_list = module['source_list'] 90 83 t = self._module_use_type.get_type().pointer()
-1
sound/atmel/ac97c.c
··· 916 916 { 917 917 struct ac97c_platform_data *pdata; 918 918 struct device_node *node = dev->of_node; 919 - const struct of_device_id *match; 920 919 921 920 if (!node) { 922 921 dev_err(dev, "Device does not have associated DT data\n");
+1 -1
sound/core/pcm_lib.c
··· 339 339 if (delta > new_hw_ptr) { 340 340 /* check for double acknowledged interrupts */ 341 341 hdelta = curr_jiffies - runtime->hw_ptr_jiffies; 342 - if (hdelta > runtime->hw_ptr_buffer_jiffies/2) { 342 + if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { 343 343 hw_base += runtime->buffer_size; 344 344 if (hw_base >= runtime->boundary) { 345 345 hw_base = 0;
+12 -1
sound/pci/hda/hda_generic.c
··· 844 844 snd_hda_codec_write(codec, nid, 0, 845 845 AC_VERB_SET_POWER_STATE, state); 846 846 changed = nid; 847 + /* all known codecs seem to be capable to handl 848 + * widgets state even in D3, so far. 849 + * if any new codecs need to restore the widget 850 + * states after D0 transition, call the function 851 + * below. 852 + */ 853 + #if 0 /* disabled */ 847 854 if (state == AC_PWRST_D0) 848 855 snd_hdac_regmap_sync_node(&codec->core, nid); 856 + #endif 849 857 } 850 858 } 851 859 return changed; ··· 4926 4918 dig_only: 4927 4919 parse_digital(codec); 4928 4920 4929 - if (spec->power_down_unused || codec->power_save_node) 4921 + if (spec->power_down_unused || codec->power_save_node) { 4930 4922 if (!codec->power_filter) 4931 4923 codec->power_filter = snd_hda_gen_path_power_filter; 4924 + if (!codec->patch_ops.stream_pm) 4925 + codec->patch_ops.stream_pm = snd_hda_gen_stream_pm; 4926 + } 4932 4927 4933 4928 if (!spec->no_analog && spec->beep_nid) { 4934 4929 err = snd_hda_attach_beep_device(codec, spec->beep_nid);
+2
sound/pci/hda/hda_intel.c
··· 2089 2089 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2090 2090 { PCI_DEVICE(0x1002, 0xaab0), 2091 2091 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2092 + { PCI_DEVICE(0x1002, 0xaac8), 2093 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2092 2094 /* VIA VT8251/VT8237A */ 2093 2095 { PCI_DEVICE(0x1106, 0x3288), 2094 2096 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
+12
sound/pci/hda/patch_conexant.c
··· 968 968 .patch = patch_conexant_auto }, 969 969 { .id = 0x14f150b9, .name = "CX20665", 970 970 .patch = patch_conexant_auto }, 971 + { .id = 0x14f150f1, .name = "CX20721", 972 + .patch = patch_conexant_auto }, 973 + { .id = 0x14f150f2, .name = "CX20722", 974 + .patch = patch_conexant_auto }, 975 + { .id = 0x14f150f3, .name = "CX20723", 976 + .patch = patch_conexant_auto }, 977 + { .id = 0x14f150f4, .name = "CX20724", 978 + .patch = patch_conexant_auto }, 971 979 { .id = 0x14f1510f, .name = "CX20751/2", 972 980 .patch = patch_conexant_auto }, 973 981 { .id = 0x14f15110, .name = "CX20751/2", ··· 1010 1002 MODULE_ALIAS("snd-hda-codec-id:14f150ac"); 1011 1003 MODULE_ALIAS("snd-hda-codec-id:14f150b8"); 1012 1004 MODULE_ALIAS("snd-hda-codec-id:14f150b9"); 1005 + MODULE_ALIAS("snd-hda-codec-id:14f150f1"); 1006 + MODULE_ALIAS("snd-hda-codec-id:14f150f2"); 1007 + MODULE_ALIAS("snd-hda-codec-id:14f150f3"); 1008 + MODULE_ALIAS("snd-hda-codec-id:14f150f4"); 1013 1009 MODULE_ALIAS("snd-hda-codec-id:14f1510f"); 1014 1010 MODULE_ALIAS("snd-hda-codec-id:14f15110"); 1015 1011 MODULE_ALIAS("snd-hda-codec-id:14f15111");
+63 -3
sound/pci/hda/patch_realtek.c
··· 883 883 { 0x10ec0668, 0x1028, 0, "ALC3661" }, 884 884 { 0x10ec0275, 0x1028, 0, "ALC3260" }, 885 885 { 0x10ec0899, 0x1028, 0, "ALC3861" }, 886 + { 0x10ec0298, 0x1028, 0, "ALC3266" }, 887 + { 0x10ec0256, 0x1028, 0, "ALC3246" }, 886 888 { 0x10ec0670, 0x1025, 0, "ALC669X" }, 887 889 { 0x10ec0676, 0x1025, 0, "ALC679X" }, 888 890 { 0x10ec0282, 0x1043, 0, "ALC3229" }, ··· 3675 3673 alc_process_coef_fw(codec, coef0293); 3676 3674 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); 3677 3675 break; 3676 + case 0x10ec0662: 3677 + snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); 3678 + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); 3679 + break; 3678 3680 case 0x10ec0668: 3679 3681 alc_write_coef_idx(codec, 0x11, 0x0001); 3680 3682 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); ··· 3743 3737 case 0x10ec0286: 3744 3738 case 0x10ec0288: 3745 3739 alc_process_coef_fw(codec, coef0288); 3746 - break; 3747 3740 break; 3748 3741 case 0x10ec0292: 3749 3742 alc_process_coef_fw(codec, coef0292); ··· 4017 4012 if (new_headset_mode != ALC_HEADSET_MODE_MIC) { 4018 4013 snd_hda_set_pin_ctl_cache(codec, hp_pin, 4019 4014 AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN); 4020 - if (spec->headphone_mic_pin) 4015 + if (spec->headphone_mic_pin && spec->headphone_mic_pin != hp_pin) 4021 4016 snd_hda_set_pin_ctl_cache(codec, spec->headphone_mic_pin, 4022 4017 PIN_VREFHIZ); 4023 4018 } ··· 4218 4213 } 4219 4214 break; 4220 4215 } 4216 + } 4217 + 4218 + static void alc_fixup_headset_mode_alc662(struct hda_codec *codec, 4219 + const struct hda_fixup *fix, int action) 4220 + { 4221 + struct alc_spec *spec = codec->spec; 4222 + 4223 + if (action == HDA_FIXUP_ACT_PRE_PROBE) { 4224 + spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; 4225 + spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */ 4226 + 4227 + /* Disable boost for mic-in permanently. (This code is only called 4228 + from quirks that guarantee that the headphone is at NID 0x1b.) */ 4229 + snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000); 4230 + snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP); 4231 + } else 4232 + alc_fixup_headset_mode(codec, fix, action); 4221 4233 } 4222 4234 4223 4235 static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, ··· 5141 5119 SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), 5142 5120 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), 5143 5121 SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), 5122 + SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), 5144 5123 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), 5145 5124 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), 5146 5125 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), ··· 5171 5148 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5172 5149 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), 5173 5150 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), 5151 + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5174 5152 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5175 5153 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5176 5154 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), ··· 5369 5345 {0x17, 0x40000000}, 5370 5346 {0x1d, 0x40700001}, 5371 5347 {0x21, 0x02211050}), 5348 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5349 + ALC255_STANDARD_PINS, 5350 + {0x12, 0x90a60180}, 5351 + {0x14, 0x90170130}, 5352 + {0x17, 0x40000000}, 5353 + {0x1d, 0x40700001}, 5354 + {0x21, 0x02211040}), 5372 5355 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5373 5356 ALC256_STANDARD_PINS, 5374 5357 {0x13, 0x40000000}), ··· 5629 5598 5630 5599 spec = codec->spec; 5631 5600 spec->gen.shared_mic_vref_pin = 0x18; 5632 - codec->power_save_node = 1; 5601 + if (codec->core.vendor_id != 0x10ec0292) 5602 + codec->power_save_node = 1; 5633 5603 5634 5604 snd_hda_pick_fixup(codec, alc269_fixup_models, 5635 5605 alc269_fixup_tbl, alc269_fixups); ··· 6111 6079 ALC662_FIXUP_NO_JACK_DETECT, 6112 6080 ALC662_FIXUP_ZOTAC_Z68, 6113 6081 ALC662_FIXUP_INV_DMIC, 6082 + ALC662_FIXUP_DELL_MIC_NO_PRESENCE, 6114 6083 ALC668_FIXUP_DELL_MIC_NO_PRESENCE, 6084 + ALC662_FIXUP_HEADSET_MODE, 6115 6085 ALC668_FIXUP_HEADSET_MODE, 6116 6086 ALC662_FIXUP_BASS_MODE4_CHMAP, 6117 6087 ALC662_FIXUP_BASS_16, ··· 6306 6272 .chained = true, 6307 6273 .chain_id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE 6308 6274 }, 6275 + [ALC662_FIXUP_DELL_MIC_NO_PRESENCE] = { 6276 + .type = HDA_FIXUP_PINS, 6277 + .v.pins = (const struct hda_pintbl[]) { 6278 + { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */ 6279 + /* headphone mic by setting pin control of 0x1b (headphone out) to in + vref_50 */ 6280 + { } 6281 + }, 6282 + .chained = true, 6283 + .chain_id = ALC662_FIXUP_HEADSET_MODE 6284 + }, 6285 + [ALC662_FIXUP_HEADSET_MODE] = { 6286 + .type = HDA_FIXUP_FUNC, 6287 + .v.func = alc_fixup_headset_mode_alc662, 6288 + }, 6309 6289 [ALC668_FIXUP_DELL_MIC_NO_PRESENCE] = { 6310 6290 .type = HDA_FIXUP_PINS, 6311 6291 .v.pins = (const struct hda_pintbl[]) { ··· 6471 6423 }; 6472 6424 6473 6425 static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = { 6426 + SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE, 6427 + {0x12, 0x4004c000}, 6428 + {0x14, 0x01014010}, 6429 + {0x15, 0x411111f0}, 6430 + {0x16, 0x411111f0}, 6431 + {0x18, 0x01a19020}, 6432 + {0x19, 0x411111f0}, 6433 + {0x1a, 0x0181302f}, 6434 + {0x1b, 0x0221401f}, 6435 + {0x1c, 0x411111f0}, 6436 + {0x1d, 0x4054c601}, 6437 + {0x1e, 0x411111f0}), 6474 6438 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE, 6475 6439 {0x12, 0x99a30130}, 6476 6440 {0x14, 0x90170110},
+2 -2
sound/pci/hda/patch_sigmatel.c
··· 4403 4403 #ifdef CONFIG_PM 4404 4404 .suspend = stac_suspend, 4405 4405 #endif 4406 - .stream_pm = snd_hda_gen_stream_pm, 4407 4406 .reboot_notify = stac_shutup, 4408 4407 }; 4409 4408 ··· 4696 4697 return err; 4697 4698 4698 4699 spec = codec->spec; 4699 - codec->power_save_node = 1; 4700 + /* disabled power_save_node since it causes noises on a Dell machine */ 4701 + /* codec->power_save_node = 1; */ 4700 4702 spec->linear_tone_beep = 0; 4701 4703 spec->gen.own_eapd_ctl = 1; 4702 4704 spec->gen.power_down_unused = 1;
-1
sound/pci/hda/thinkpad_helper.c
··· 72 72 if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { 73 73 old_vmaster_hook = spec->vmaster_mute.hook; 74 74 spec->vmaster_mute.hook = update_tpacpi_mute_led; 75 - spec->vmaster_mute_enum = 1; 76 75 removefunc = false; 77 76 } 78 77 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
+2 -2
sound/soc/codecs/mc13783.c
··· 623 623 AUDIO_SSI_SEL, 0); 624 624 else 625 625 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC, 626 - 0, AUDIO_SSI_SEL); 626 + AUDIO_SSI_SEL, AUDIO_SSI_SEL); 627 627 628 628 if (priv->dac_ssi_port == MC13783_SSI1_PORT) 629 629 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC, 630 630 AUDIO_SSI_SEL, 0); 631 631 else 632 632 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC, 633 - 0, AUDIO_SSI_SEL); 633 + AUDIO_SSI_SEL, AUDIO_SSI_SEL); 634 634 635 635 return 0; 636 636 }
+1 -1
sound/soc/codecs/uda1380.c
··· 437 437 if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) 438 438 return -EINVAL; 439 439 440 - uda1380_write(codec, UDA1380_IFACE, iface); 440 + uda1380_write_reg_cache(codec, UDA1380_IFACE, iface); 441 441 442 442 return 0; 443 443 }
+1 -1
sound/soc/codecs/wm8960.c
··· 395 395 { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", }, 396 396 { "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */ 397 397 { "Right Input Mixer", NULL, "RINPUT2" }, 398 - { "Right Input Mixer", NULL, "LINPUT3" }, 398 + { "Right Input Mixer", NULL, "RINPUT3" }, 399 399 400 400 { "Left ADC", NULL, "Left Input Mixer" }, 401 401 { "Right ADC", NULL, "Right Input Mixer" },
+1 -1
sound/soc/codecs/wm8994.c
··· 2754 2754 }; 2755 2755 2756 2756 static int fs_ratios[] = { 2757 - 64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536 2757 + 64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536 2758 2758 }; 2759 2759 2760 2760 static int bclk_divs[] = {
+1 -1
sound/soc/davinci/davinci-mcasp.c
··· 1247 1247 u32 reg; 1248 1248 int i; 1249 1249 1250 - context->pm_state = pm_runtime_enabled(mcasp->dev); 1250 + context->pm_state = pm_runtime_active(mcasp->dev); 1251 1251 if (!context->pm_state) 1252 1252 pm_runtime_get_sync(mcasp->dev); 1253 1253
+8 -3
sound/soc/soc-dapm.c
··· 3100 3100 } 3101 3101 3102 3102 prefix = soc_dapm_prefix(dapm); 3103 - if (prefix) 3103 + if (prefix) { 3104 3104 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); 3105 - else 3105 + if (widget->sname) 3106 + w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix, 3107 + widget->sname); 3108 + } else { 3106 3109 w->name = kasprintf(GFP_KERNEL, "%s", widget->name); 3107 - 3110 + if (widget->sname) 3111 + w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname); 3112 + } 3108 3113 if (w->name == NULL) { 3109 3114 kfree(w); 3110 3115 return NULL;
+2
sound/usb/quirks.c
··· 1117 1117 switch (chip->usb_id) { 1118 1118 case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ 1119 1119 case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ 1120 + case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ 1121 + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ 1120 1122 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1121 1123 return true; 1122 1124 }
+2
tools/net/bpf_jit_disasm.c
··· 123 123 assert(ret == 0); 124 124 125 125 ptr = haystack; 126 + memset(pmatch, 0, sizeof(pmatch)); 127 + 126 128 while (1) { 127 129 ret = regexec(&regex, ptr, 1, pmatch, 0); 128 130 if (ret == 0) {
+184 -40
tools/power/x86/turbostat/turbostat.c
··· 52 52 unsigned int skip_c1; 53 53 unsigned int do_nhm_cstates; 54 54 unsigned int do_snb_cstates; 55 + unsigned int do_knl_cstates; 55 56 unsigned int do_pc2; 56 57 unsigned int do_pc3; 57 58 unsigned int do_pc6; ··· 92 91 unsigned int do_ring_perf_limit_reasons; 93 92 unsigned int crystal_hz; 94 93 unsigned long long tsc_hz; 94 + int base_cpu; 95 95 96 96 #define RAPL_PKG (1 << 0) 97 97 /* 0x610 MSR_PKG_POWER_LIMIT */ ··· 318 316 319 317 if (do_nhm_cstates) 320 318 outp += sprintf(outp, " CPU%%c1"); 321 - if (do_nhm_cstates && !do_slm_cstates) 319 + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) 322 320 outp += sprintf(outp, " CPU%%c3"); 323 321 if (do_nhm_cstates) 324 322 outp += sprintf(outp, " CPU%%c6"); ··· 548 546 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 549 547 goto done; 550 548 551 - if (do_nhm_cstates && !do_slm_cstates) 549 + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) 552 550 outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); 553 551 if (do_nhm_cstates) 554 552 outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); ··· 1020 1018 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1021 1019 return 0; 1022 1020 1023 - if (do_nhm_cstates && !do_slm_cstates) { 1021 + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { 1024 1022 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1025 1023 return -6; 1026 1024 } 1027 1025 1028 - if (do_nhm_cstates) { 1026 + if (do_nhm_cstates && !do_knl_cstates) { 1029 1027 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1028 + return -7; 1029 + } else if (do_knl_cstates) { 1030 + if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) 1030 1031 return -7; 1031 1032 } 1032 1033 ··· 1155 1150 unsigned long long msr; 1156 1151 unsigned int ratio; 1157 1152 1158 - get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); 1153 + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1159 1154 1160 1155 fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); 1161 1156 ··· 1167 1162 fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n", 1168 1163 ratio, bclk, ratio * bclk); 1169 1164 1170 - get_msr(0, MSR_IA32_POWER_CTL, &msr); 1165 + get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); 1171 1166 fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", 1172 1167 msr, msr & 0x2 ? "EN" : "DIS"); 1173 1168 ··· 1180 1175 unsigned long long msr; 1181 1176 unsigned int ratio; 1182 1177 1183 - get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr); 1178 + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); 1184 1179 1185 1180 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr); 1186 1181 ··· 1202 1197 unsigned long long msr; 1203 1198 unsigned int ratio; 1204 1199 1205 - get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr); 1200 + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); 1206 1201 1207 1202 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr); 1208 1203 ··· 1254 1249 unsigned long long msr; 1255 1250 unsigned int ratio; 1256 1251 1257 - get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr); 1252 + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); 1258 1253 1259 1254 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); 1260 1255 ··· 1301 1296 } 1302 1297 1303 1298 static void 1299 + dump_knl_turbo_ratio_limits(void) 1300 + { 1301 + int cores; 1302 + unsigned int ratio; 1303 + unsigned long long msr; 1304 + int delta_cores; 1305 + int delta_ratio; 1306 + int i; 1307 + 1308 + get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr); 1309 + 1310 + fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", 1311 + msr); 1312 + 1313 + /** 1314 + * Turbo encoding in KNL is as follows: 1315 + * [7:0] -- Base value of number of active cores of bucket 1. 1316 + * [15:8] -- Base value of freq ratio of bucket 1. 1317 + * [20:16] -- +ve delta of number of active cores of bucket 2. 1318 + * i.e. active cores of bucket 2 = 1319 + * active cores of bucket 1 + delta 1320 + * [23:21] -- Negative delta of freq ratio of bucket 2. 1321 + * i.e. freq ratio of bucket 2 = 1322 + * freq ratio of bucket 1 - delta 1323 + * [28:24]-- +ve delta of number of active cores of bucket 3. 1324 + * [31:29]-- -ve delta of freq ratio of bucket 3. 1325 + * [36:32]-- +ve delta of number of active cores of bucket 4. 1326 + * [39:37]-- -ve delta of freq ratio of bucket 4. 1327 + * [44:40]-- +ve delta of number of active cores of bucket 5. 1328 + * [47:45]-- -ve delta of freq ratio of bucket 5. 1329 + * [52:48]-- +ve delta of number of active cores of bucket 6. 1330 + * [55:53]-- -ve delta of freq ratio of bucket 6. 1331 + * [60:56]-- +ve delta of number of active cores of bucket 7. 1332 + * [63:61]-- -ve delta of freq ratio of bucket 7. 1333 + */ 1334 + cores = msr & 0xFF; 1335 + ratio = (msr >> 8) && 0xFF; 1336 + if (ratio > 0) 1337 + fprintf(stderr, 1338 + "%d * %.0f = %.0f MHz max turbo %d active cores\n", 1339 + ratio, bclk, ratio * bclk, cores); 1340 + 1341 + for (i = 16; i < 64; i = i + 8) { 1342 + delta_cores = (msr >> i) & 0x1F; 1343 + delta_ratio = (msr >> (i + 5)) && 0x7; 1344 + if (!delta_cores || !delta_ratio) 1345 + return; 1346 + cores = cores + delta_cores; 1347 + ratio = ratio - delta_ratio; 1348 + 1349 + /** -ve ratios will make successive ratio calculations 1350 + * negative. Hence return instead of carrying on. 1351 + */ 1352 + if (ratio > 0) 1353 + fprintf(stderr, 1354 + "%d * %.0f = %.0f MHz max turbo %d active cores\n", 1355 + ratio, bclk, ratio * bclk, cores); 1356 + } 1357 + } 1358 + 1359 + static void 1304 1360 dump_nhm_cst_cfg(void) 1305 1361 { 1306 1362 unsigned long long msr; 1307 1363 1308 - get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1364 + get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1309 1365 1310 1366 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 1311 1367 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) ··· 1447 1381 } 1448 1382 1449 1383 /* 1450 - * cpu_is_first_sibling_in_core(cpu) 1451 - * return 1 if given CPU is 1st HT sibling in the core 1384 + * get_cpu_position_in_core(cpu) 1385 + * return the position of the CPU among its HT siblings in the core 1386 + * return -1 if the sibling is not in list 1452 1387 */ 1453 - int cpu_is_first_sibling_in_core(int cpu) 1388 + int get_cpu_position_in_core(int cpu) 1454 1389 { 1455 - return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); 1390 + char path[64]; 1391 + FILE *filep; 1392 + int this_cpu; 1393 + char character; 1394 + int i; 1395 + 1396 + sprintf(path, 1397 + "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", 1398 + cpu); 1399 + filep = fopen(path, "r"); 1400 + if (filep == NULL) { 1401 + perror(path); 1402 + exit(1); 1403 + } 1404 + 1405 + for (i = 0; i < topo.num_threads_per_core; i++) { 1406 + fscanf(filep, "%d", &this_cpu); 1407 + if (this_cpu == cpu) { 1408 + fclose(filep); 1409 + return i; 1410 + } 1411 + 1412 + /* Account for no separator after last thread*/ 1413 + if (i != (topo.num_threads_per_core - 1)) 1414 + fscanf(filep, "%c", &character); 1415 + } 1416 + 1417 + fclose(filep); 1418 + return -1; 1456 1419 } 1457 1420 1458 1421 /* ··· 1507 1412 { 1508 1413 char path[80]; 1509 1414 FILE *filep; 1510 - int sib1, sib2; 1511 - int matches; 1415 + int sib1; 1416 + int matches = 0; 1512 1417 char character; 1418 + char str[100]; 1419 + char *ch; 1513 1420 1514 1421 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); 1515 1422 filep = fopen_or_die(path, "r"); 1423 + 1516 1424 /* 1517 1425 * file format: 1518 - * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) 1519 - * otherwinse 1 sibling (self). 1426 + * A ',' separated or '-' separated set of numbers 1427 + * (eg 1-2 or 1,3,4,5) 1520 1428 */ 1521 - matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2); 1429 + fscanf(filep, "%d%c\n", &sib1, &character); 1430 + fseek(filep, 0, SEEK_SET); 1431 + fgets(str, 100, filep); 1432 + ch = strchr(str, character); 1433 + while (ch != NULL) { 1434 + matches++; 1435 + ch = strchr(ch+1, character); 1436 + } 1522 1437 1523 1438 fclose(filep); 1524 - 1525 - if (matches == 3) 1526 - return 2; 1527 - else 1528 - return 1; 1439 + return matches+1; 1529 1440 } 1530 1441 1531 1442 /* ··· 1695 1594 void check_dev_msr() 1696 1595 { 1697 1596 struct stat sb; 1597 + char pathname[32]; 1698 1598 1699 - if (stat("/dev/cpu/0/msr", &sb)) 1599 + sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); 1600 + if (stat(pathname, &sb)) 1700 1601 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 1701 1602 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 1702 1603 } ··· 1711 1608 cap_user_data_t cap_data = &cap_data_data; 1712 1609 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); 1713 1610 int do_exit = 0; 1611 + char pathname[32]; 1714 1612 1715 1613 /* check for CAP_SYS_RAWIO */ 1716 1614 cap_header->pid = getpid(); ··· 1726 1622 } 1727 1623 1728 1624 /* test file permissions */ 1729 - if (euidaccess("/dev/cpu/0/msr", R_OK)) { 1625 + sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); 1626 + if (euidaccess(pathname, R_OK)) { 1730 1627 do_exit++; 1731 1628 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); 1732 1629 } ··· 1809 1704 default: 1810 1705 return 0; 1811 1706 } 1812 - get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1707 + get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1813 1708 1814 1709 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 1815 1710 ··· 1858 1753 } 1859 1754 } 1860 1755 1756 + int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) 1757 + { 1758 + if (!genuine_intel) 1759 + return 0; 1760 + 1761 + if (family != 6) 1762 + return 0; 1763 + 1764 + switch (model) { 1765 + case 0x57: /* Knights Landing */ 1766 + return 1; 1767 + default: 1768 + return 0; 1769 + } 1770 + } 1861 1771 static void 1862 1772 dump_cstate_pstate_config_info(family, model) 1863 1773 { ··· 1889 1769 1890 1770 if (has_nhm_turbo_ratio_limit(family, model)) 1891 1771 dump_nhm_turbo_ratio_limits(); 1772 + 1773 + if (has_knl_turbo_ratio_limit(family, model)) 1774 + dump_knl_turbo_ratio_limits(); 1892 1775 1893 1776 dump_nhm_cst_cfg(); 1894 1777 } ··· 1924 1801 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) 1925 1802 return 0; 1926 1803 1927 - switch (msr & 0x7) { 1804 + switch (msr & 0xF) { 1928 1805 case ENERGY_PERF_BIAS_PERFORMANCE: 1929 1806 epb_string = "performance"; 1930 1807 break; ··· 2048 1925 unsigned long long msr; 2049 1926 2050 1927 if (do_rapl & RAPL_PKG_POWER_INFO) 2051 - if (!get_msr(0, MSR_PKG_POWER_INFO, &msr)) 1928 + if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) 2052 1929 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; 2053 1930 2054 1931 switch (model) { ··· 2073 1950 case 0x3F: /* HSX */ 2074 1951 case 0x4F: /* BDX */ 2075 1952 case 0x56: /* BDX-DE */ 1953 + case 0x57: /* KNL */ 2076 1954 return (rapl_dram_energy_units = 15.3 / 1000000); 2077 1955 default: 2078 1956 return (rapl_energy_units); ··· 2115 1991 case 0x3F: /* HSX */ 2116 1992 case 0x4F: /* BDX */ 2117 1993 case 0x56: /* BDX-DE */ 1994 + case 0x57: /* KNL */ 2118 1995 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2119 1996 break; 2120 1997 case 0x2D: ··· 2131 2006 } 2132 2007 2133 2008 /* units on package 0, verify later other packages match */ 2134 - if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) 2009 + if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) 2135 2010 return; 2136 2011 2137 2012 rapl_power_units = 1.0 / (1 << (msr & 0xF)); ··· 2456 2331 return 0; 2457 2332 } 2458 2333 2334 + int is_knl(unsigned int family, unsigned int model) 2335 + { 2336 + if (!genuine_intel) 2337 + return 0; 2338 + switch (model) { 2339 + case 0x57: /* KNL */ 2340 + return 1; 2341 + } 2342 + return 0; 2343 + } 2344 + 2459 2345 #define SLM_BCLK_FREQS 5 2460 2346 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; 2461 2347 ··· 2476 2340 unsigned int i; 2477 2341 double freq; 2478 2342 2479 - if (get_msr(0, MSR_FSB_FREQ, &msr)) 2343 + if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) 2480 2344 fprintf(stderr, "SLM BCLK: unknown\n"); 2481 2345 2482 2346 i = msr & 0xf; ··· 2544 2408 if (!do_nhm_platform_info) 2545 2409 goto guess; 2546 2410 2547 - if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) 2411 + if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) 2548 2412 goto guess; 2549 2413 2550 2414 target_c_local = (msr >> 16) & 0xFF; ··· 2677 2541 do_c8_c9_c10 = has_hsw_msrs(family, model); 2678 2542 do_skl_residency = has_skl_msrs(family, model); 2679 2543 do_slm_cstates = is_slm(family, model); 2544 + do_knl_cstates = is_knl(family, model); 2680 2545 bclk = discover_bclk(family, model); 2681 2546 2682 2547 rapl_probe(family, model); ··· 2892 2755 2893 2756 my_package_id = get_physical_package_id(cpu_id); 2894 2757 my_core_id = get_core_id(cpu_id); 2895 - 2896 - if (cpu_is_first_sibling_in_core(cpu_id)) { 2897 - my_thread_id = 0; 2758 + my_thread_id = get_cpu_position_in_core(cpu_id); 2759 + if (!my_thread_id) 2898 2760 topo.num_cores++; 2899 - } else { 2900 - my_thread_id = 1; 2901 - } 2902 2761 2903 2762 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); 2904 2763 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); ··· 2918 2785 for_all_proc_cpus(initialize_counters); 2919 2786 } 2920 2787 2788 + void set_base_cpu(void) 2789 + { 2790 + base_cpu = sched_getcpu(); 2791 + if (base_cpu < 0) 2792 + err(-ENODEV, "No valid cpus found"); 2793 + 2794 + if (debug > 1) 2795 + fprintf(stderr, "base_cpu = %d\n", base_cpu); 2796 + } 2797 + 2921 2798 void turbostat_init() 2922 2799 { 2800 + setup_all_buffers(); 2801 + set_base_cpu(); 2923 2802 check_dev_msr(); 2924 2803 check_permissions(); 2925 2804 process_cpuid(); 2926 2805 2927 - setup_all_buffers(); 2928 2806 2929 2807 if (debug) 2930 2808 for_all_cpus(print_epb, ODD_COUNTERS); ··· 3014 2870 } 3015 2871 3016 2872 void print_version() { 3017 - fprintf(stderr, "turbostat version 4.5 2 Apr, 2015" 2873 + fprintf(stderr, "turbostat version 4.7 27-May, 2015" 3018 2874 " - Len Brown <lenb@kernel.org>\n"); 3019 2875 } 3020 2876