Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/hyperv/netvsc.c
kernel/bpf/hashtab.c

Almost entirely overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>

+4202 -2274
+2 -16
MAINTAINERS
··· 3223 3223 3224 3224 CISCO VIC ETHERNET NIC DRIVER 3225 3225 M: Christian Benvenuti <benve@cisco.com> 3226 - M: Sujith Sankar <ssujith@cisco.com> 3227 3226 M: Govindarajulu Varadarajan <_govind@gmx.com> 3228 3227 M: Neel Patel <neepatel@cisco.com> 3229 3228 S: Supported ··· 7780 7781 F: net/mac80211/ 7781 7782 F: drivers/net/wireless/mac80211_hwsim.[ch] 7782 7783 7783 - MACVLAN DRIVER 7784 - M: Patrick McHardy <kaber@trash.net> 7785 - L: netdev@vger.kernel.org 7786 - S: Maintained 7787 - F: drivers/net/macvlan.c 7788 - F: include/linux/if_macvlan.h 7789 - 7790 7784 MAILBOX API 7791 7785 M: Jassi Brar <jassisinghbrar@gmail.com> 7792 7786 L: linux-kernel@vger.kernel.org ··· 7852 7860 MARVELL MWIFIEX WIRELESS DRIVER 7853 7861 M: Amitkumar Karwar <akarwar@marvell.com> 7854 7862 M: Nishant Sarmukadam <nishants@marvell.com> 7863 + M: Ganapathi Bhat <gbhat@marvell.com> 7864 + M: Xinming Hu <huxm@marvell.com> 7855 7865 L: linux-wireless@vger.kernel.org 7856 7866 S: Maintained 7857 7867 F: drivers/net/wireless/marvell/mwifiex/ ··· 13389 13395 W: https://linuxtv.org 13390 13396 S: Maintained 13391 13397 F: drivers/media/platform/vivid/* 13392 - 13393 - VLAN (802.1Q) 13394 - M: Patrick McHardy <kaber@trash.net> 13395 - L: netdev@vger.kernel.org 13396 - S: Maintained 13397 - F: drivers/net/macvlan.c 13398 - F: include/linux/if_*vlan.h 13399 - F: net/8021q/ 13400 13398 13401 13399 VLYNQ BUS 13402 13400 M: Florian Fainelli <f.fainelli@gmail.com>
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 11 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc2 4 + EXTRAVERSION = -rc3 5 5 NAME = Fearless Coyote 6 6 7 7 # *DOCUMENTATION*
+1
arch/arm/tools/syscall.tbl
··· 411 411 394 common pkey_mprotect sys_pkey_mprotect 412 412 395 common pkey_alloc sys_pkey_alloc 413 413 396 common pkey_free sys_pkey_free 414 + 397 common statx sys_statx
+4
arch/arm64/Kconfig
··· 1073 1073 def_bool y 1074 1074 depends on COMPAT && SYSVIPC 1075 1075 1076 + config KEYS_COMPAT 1077 + def_bool y 1078 + depends on COMPAT && KEYS 1079 + 1076 1080 endmenu 1077 1081 1078 1082 menu "Power management options"
+1 -1
arch/arm64/include/asm/cpufeature.h
··· 251 251 static inline bool system_uses_ttbr0_pan(void) 252 252 { 253 253 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && 254 - !cpus_have_cap(ARM64_HAS_PAN); 254 + !cpus_have_const_cap(ARM64_HAS_PAN); 255 255 } 256 256 257 257 #endif /* __ASSEMBLY__ */
+1 -1
arch/arm64/kernel/cpuidle.c
··· 30 30 } 31 31 32 32 /** 33 - * cpu_suspend() - function to enter a low-power idle state 33 + * arm_cpuidle_suspend() - function to enter a low-power idle state 34 34 * @arg: argument to pass to CPU suspend operations 35 35 * 36 36 * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
-6
arch/arm64/kernel/probes/kprobes.c
··· 372 372 return 0; 373 373 } 374 374 375 - int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 376 - unsigned long val, void *data) 377 - { 378 - return NOTIFY_DONE; 379 - } 380 - 381 375 static void __kprobes kprobe_handler(struct pt_regs *regs) 382 376 { 383 377 struct kprobe *p, *cur_kprobe;
+1 -1
arch/arm64/mm/kasan_init.c
··· 162 162 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 163 163 164 164 vmemmap_populate(kimg_shadow_start, kimg_shadow_end, 165 - pfn_to_nid(virt_to_pfn(_text))); 165 + pfn_to_nid(virt_to_pfn(lm_alias(_text)))); 166 166 167 167 /* 168 168 * vmemmap_populate() has populated the shadow region that covers the
+6 -2
arch/openrisc/include/asm/cmpxchg.h
··· 77 77 return val; 78 78 } 79 79 80 - #define xchg(ptr, with) \ 81 - ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr)))) 80 + #define xchg(ptr, with) \ 81 + ({ \ 82 + (__typeof__(*(ptr))) __xchg((unsigned long)(with), \ 83 + (ptr), \ 84 + sizeof(*(ptr))); \ 85 + }) 82 86 83 87 #endif /* __ASM_OPENRISC_CMPXCHG_H */
+1 -1
arch/openrisc/include/asm/uaccess.h
··· 211 211 case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \ 212 212 case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ 213 213 case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ 214 - case 8: __get_user_asm2(x, ptr, retval); \ 214 + case 8: __get_user_asm2(x, ptr, retval); break; \ 215 215 default: (x) = __get_user_bad(); \ 216 216 } \ 217 217 } while (0)
+4
arch/openrisc/kernel/or32_ksyms.c
··· 30 30 #include <asm/hardirq.h> 31 31 #include <asm/delay.h> 32 32 #include <asm/pgalloc.h> 33 + #include <asm/pgtable.h> 33 34 34 35 #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) 35 36 ··· 43 42 DECLARE_EXPORT(__ashrdi3); 44 43 DECLARE_EXPORT(__ashldi3); 45 44 DECLARE_EXPORT(__lshrdi3); 45 + DECLARE_EXPORT(__ucmpdi2); 46 46 47 + EXPORT_SYMBOL(empty_zero_page); 47 48 EXPORT_SYMBOL(__copy_tofrom_user); 49 + EXPORT_SYMBOL(__clear_user); 48 50 EXPORT_SYMBOL(memset);
+1
arch/openrisc/kernel/process.c
··· 90 90 } 91 91 92 92 void (*pm_power_off) (void) = machine_power_off; 93 + EXPORT_SYMBOL(pm_power_off); 93 94 94 95 /* 95 96 * When a process does an "exec", machine state like FPU and debug
+2 -21
arch/parisc/include/asm/cacheflush.h
··· 43 43 44 44 #define flush_kernel_dcache_range(start,size) \ 45 45 flush_kernel_dcache_range_asm((start), (start)+(size)); 46 - /* vmap range flushes and invalidates. Architecturally, we don't need 47 - * the invalidate, because the CPU should refuse to speculate once an 48 - * area has been flushed, so invalidate is left empty */ 49 - static inline void flush_kernel_vmap_range(void *vaddr, int size) 50 - { 51 - unsigned long start = (unsigned long)vaddr; 52 46 53 - flush_kernel_dcache_range_asm(start, start + size); 54 - } 55 - static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 56 - { 57 - unsigned long start = (unsigned long)vaddr; 58 - void *cursor = vaddr; 59 - 60 - for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { 61 - struct page *page = vmalloc_to_page(cursor); 62 - 63 - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 64 - flush_kernel_dcache_page(page); 65 - } 66 - flush_kernel_dcache_range_asm(start, start + size); 67 - } 47 + void flush_kernel_vmap_range(void *vaddr, int size); 48 + void invalidate_kernel_vmap_range(void *vaddr, int size); 68 49 69 50 #define flush_cache_vmap(start, end) flush_cache_all() 70 51 #define flush_cache_vunmap(start, end) flush_cache_all()
+2 -1
arch/parisc/include/asm/uaccess.h
··· 32 32 * that put_user is the same as __put_user, etc. 33 33 */ 34 34 35 - #define access_ok(type, uaddr, size) (1) 35 + #define access_ok(type, uaddr, size) \ 36 + ( (uaddr) == (uaddr) ) 36 37 37 38 #define put_user __put_user 38 39 #define get_user __get_user
+2 -1
arch/parisc/include/uapi/asm/unistd.h
··· 362 362 #define __NR_copy_file_range (__NR_Linux + 346) 363 363 #define __NR_preadv2 (__NR_Linux + 347) 364 364 #define __NR_pwritev2 (__NR_Linux + 348) 365 + #define __NR_statx (__NR_Linux + 349) 365 366 366 - #define __NR_Linux_syscalls (__NR_pwritev2 + 1) 367 + #define __NR_Linux_syscalls (__NR_statx + 1) 367 368 368 369 369 370 #define __IGNORE_select /* newselect */
+22
arch/parisc/kernel/cache.c
··· 616 616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 617 617 } 618 618 } 619 + 620 + void flush_kernel_vmap_range(void *vaddr, int size) 621 + { 622 + unsigned long start = (unsigned long)vaddr; 623 + 624 + if ((unsigned long)size > parisc_cache_flush_threshold) 625 + flush_data_cache(); 626 + else 627 + flush_kernel_dcache_range_asm(start, start + size); 628 + } 629 + EXPORT_SYMBOL(flush_kernel_vmap_range); 630 + 631 + void invalidate_kernel_vmap_range(void *vaddr, int size) 632 + { 633 + unsigned long start = (unsigned long)vaddr; 634 + 635 + if ((unsigned long)size > parisc_cache_flush_threshold) 636 + flush_data_cache(); 637 + else 638 + flush_kernel_dcache_range_asm(start, start + size); 639 + } 640 + EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+8
arch/parisc/kernel/module.c
··· 620 620 */ 621 621 *loc = fsel(val, addend); 622 622 break; 623 + case R_PARISC_SECREL32: 624 + /* 32-bit section relative address. */ 625 + *loc = fsel(val, addend); 626 + break; 623 627 case R_PARISC_DPREL21L: 624 628 /* left 21 bit of relative address */ 625 629 val = lrsel(val - dp, addend); ··· 810 806 * the beginning of this file. 811 807 */ 812 808 *loc = fsel(val, addend); 809 + break; 810 + case R_PARISC_SECREL32: 811 + /* 32-bit section relative address. */ 812 + *loc = fsel(val, addend); 813 813 break; 814 814 case R_PARISC_FPTR64: 815 815 /* 64-bit function address */
+49 -45
arch/parisc/kernel/perf.c
··· 39 39 * the PDC INTRIGUE calls. This is done to eliminate bugs introduced 40 40 * in various PDC revisions. The code is much more maintainable 41 41 * and reliable this way vs having to debug on every version of PDC 42 - * on every box. 42 + * on every box. 43 43 */ 44 44 45 45 #include <linux/capability.h> ··· 195 195 static int perf_release(struct inode *inode, struct file *file); 196 196 static int perf_open(struct inode *inode, struct file *file); 197 197 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); 198 - static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 199 - loff_t *ppos); 198 + static ssize_t perf_write(struct file *file, const char __user *buf, 199 + size_t count, loff_t *ppos); 200 200 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 201 201 static void perf_start_counters(void); 202 202 static int perf_stop_counters(uint32_t *raddr); ··· 222 222 /* 223 223 * configure: 224 224 * 225 - * Configure the cpu with a given data image. First turn off the counters, 225 + * Configure the cpu with a given data image. First turn off the counters, 226 226 * then download the image, then turn the counters back on. 227 227 */ 228 228 static int perf_config(uint32_t *image_ptr) ··· 234 234 error = perf_stop_counters(raddr); 235 235 if (error != 0) { 236 236 printk("perf_config: perf_stop_counters = %ld\n", error); 237 - return -EINVAL; 237 + return -EINVAL; 238 238 } 239 239 240 240 printk("Preparing to write image\n"); ··· 242 242 error = perf_write_image((uint64_t *)image_ptr); 243 243 if (error != 0) { 244 244 printk("perf_config: DOWNLOAD = %ld\n", error); 245 - return -EINVAL; 245 + return -EINVAL; 246 246 } 247 247 248 248 printk("Preparing to start counters\n"); ··· 254 254 } 255 255 256 256 /* 257 - * Open the device and initialize all of its memory. The device is only 257 + * Open the device and initialize all of its memory. The device is only 258 258 * opened once, but can be "queried" by multiple processes that know its 259 259 * file descriptor. 260 260 */ ··· 298 298 * called on the processor that the download should happen 299 299 * on. 300 300 */ 301 - static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 302 - loff_t *ppos) 301 + static ssize_t perf_write(struct file *file, const char __user *buf, 302 + size_t count, loff_t *ppos) 303 303 { 304 304 size_t image_size; 305 305 uint32_t image_type; 306 306 uint32_t interface_type; 307 307 uint32_t test; 308 308 309 - if (perf_processor_interface == ONYX_INTF) 309 + if (perf_processor_interface == ONYX_INTF) 310 310 image_size = PCXU_IMAGE_SIZE; 311 - else if (perf_processor_interface == CUDA_INTF) 311 + else if (perf_processor_interface == CUDA_INTF) 312 312 image_size = PCXW_IMAGE_SIZE; 313 - else 313 + else 314 314 return -EFAULT; 315 315 316 316 if (!capable(CAP_SYS_ADMIN)) ··· 330 330 331 331 /* First check the machine type is correct for 332 332 the requested image */ 333 - if (((perf_processor_interface == CUDA_INTF) && 334 - (interface_type != CUDA_INTF)) || 335 - ((perf_processor_interface == ONYX_INTF) && 336 - (interface_type != ONYX_INTF))) 333 + if (((perf_processor_interface == CUDA_INTF) && 334 + (interface_type != CUDA_INTF)) || 335 + ((perf_processor_interface == ONYX_INTF) && 336 + (interface_type != ONYX_INTF))) 337 337 return -EINVAL; 338 338 339 339 /* Next check to make sure the requested image 340 340 is valid */ 341 - if (((interface_type == CUDA_INTF) && 341 + if (((interface_type == CUDA_INTF) && 342 342 (test >= MAX_CUDA_IMAGES)) || 343 - ((interface_type == ONYX_INTF) && 344 - (test >= MAX_ONYX_IMAGES))) 343 + ((interface_type == ONYX_INTF) && 344 + (test >= MAX_ONYX_IMAGES))) 345 345 return -EINVAL; 346 346 347 347 /* Copy the image into the processor */ 348 - if (interface_type == CUDA_INTF) 348 + if (interface_type == CUDA_INTF) 349 349 return perf_config(cuda_images[test]); 350 350 else 351 351 return perf_config(onyx_images[test]); ··· 359 359 static void perf_patch_images(void) 360 360 { 361 361 #if 0 /* FIXME!! */ 362 - /* 362 + /* 363 363 * NOTE: this routine is VERY specific to the current TLB image. 364 364 * If the image is changed, this routine might also need to be changed. 365 365 */ ··· 367 367 extern void $i_dtlb_miss_2_0(); 368 368 extern void PA2_0_iva(); 369 369 370 - /* 370 + /* 371 371 * We can only use the lower 32-bits, the upper 32-bits should be 0 372 - * anyway given this is in the kernel 372 + * anyway given this is in the kernel 373 373 */ 374 374 uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0); 375 375 uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0); ··· 377 377 378 378 if (perf_processor_interface == ONYX_INTF) { 379 379 /* clear last 2 bytes */ 380 - onyx_images[TLBMISS][15] &= 0xffffff00; 380 + onyx_images[TLBMISS][15] &= 0xffffff00; 381 381 /* set 2 bytes */ 382 382 onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); 383 383 onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00; 384 384 onyx_images[TLBMISS][17] = itlb_addr; 385 385 386 386 /* clear last 2 bytes */ 387 - onyx_images[TLBHANDMISS][15] &= 0xffffff00; 387 + onyx_images[TLBHANDMISS][15] &= 0xffffff00; 388 388 /* set 2 bytes */ 389 389 onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); 390 390 onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00; 391 391 onyx_images[TLBHANDMISS][17] = itlb_addr; 392 392 393 393 /* clear last 2 bytes */ 394 - onyx_images[BIG_CPI][15] &= 0xffffff00; 394 + onyx_images[BIG_CPI][15] &= 0xffffff00; 395 395 /* set 2 bytes */ 396 396 onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24)); 397 397 onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00; ··· 404 404 405 405 } else if (perf_processor_interface == CUDA_INTF) { 406 406 /* Cuda interface */ 407 - cuda_images[TLBMISS][16] = 407 + cuda_images[TLBMISS][16] = 408 408 (cuda_images[TLBMISS][16]&0xffff0000) | 409 409 ((dtlb_addr >> 8)&0x0000ffff); 410 - cuda_images[TLBMISS][17] = 410 + cuda_images[TLBMISS][17] = 411 411 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 412 412 cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000; 413 413 414 - cuda_images[TLBHANDMISS][16] = 414 + cuda_images[TLBHANDMISS][16] = 415 415 (cuda_images[TLBHANDMISS][16]&0xffff0000) | 416 416 ((dtlb_addr >> 8)&0x0000ffff); 417 - cuda_images[TLBHANDMISS][17] = 417 + cuda_images[TLBHANDMISS][17] = 418 418 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 419 419 cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000; 420 420 421 - cuda_images[BIG_CPI][16] = 421 + cuda_images[BIG_CPI][16] = 422 422 (cuda_images[BIG_CPI][16]&0xffff0000) | 423 423 ((dtlb_addr >> 8)&0x0000ffff); 424 - cuda_images[BIG_CPI][17] = 424 + cuda_images[BIG_CPI][17] = 425 425 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 426 426 cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000; 427 427 } else { ··· 433 433 434 434 /* 435 435 * ioctl routine 436 - * All routines effect the processor that they are executed on. Thus you 436 + * All routines effect the processor that they are executed on. Thus you 437 437 * must be running on the processor that you wish to change. 438 438 */ 439 439 ··· 459 459 } 460 460 461 461 /* copy out the Counters */ 462 - if (copy_to_user((void __user *)arg, raddr, 462 + if (copy_to_user((void __user *)arg, raddr, 463 463 sizeof (raddr)) != 0) { 464 464 error = -EFAULT; 465 465 break; ··· 487 487 .open = perf_open, 488 488 .release = perf_release 489 489 }; 490 - 490 + 491 491 static struct miscdevice perf_dev = { 492 492 MISC_DYNAMIC_MINOR, 493 493 PA_PERF_DEV, ··· 595 595 /* OR sticky2 (bit 1496) to counter2 bit 32 */ 596 596 tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000; 597 597 raddr[2] = (uint32_t)tmp64; 598 - 598 + 599 599 /* Counter3 is bits 1497 to 1528 */ 600 600 tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff; 601 601 /* OR sticky3 (bit 1529) to counter3 bit 32 */ ··· 617 617 userbuf[22] = 0; 618 618 userbuf[23] = 0; 619 619 620 - /* 620 + /* 621 621 * Write back the zeroed bytes + the image given 622 622 * the read was destructive. 623 623 */ ··· 625 625 } else { 626 626 627 627 /* 628 - * Read RDR-15 which contains the counters and sticky bits 628 + * Read RDR-15 which contains the counters and sticky bits 629 629 */ 630 630 if (!perf_rdr_read_ubuf(15, userbuf)) { 631 631 return -13; 632 632 } 633 633 634 - /* 634 + /* 635 635 * Clear out the counters 636 636 */ 637 637 perf_rdr_clear(15); ··· 644 644 raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL); 645 645 raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL); 646 646 } 647 - 647 + 648 648 return 0; 649 649 } 650 650 ··· 682 682 i = tentry->num_words; 683 683 while (i--) { 684 684 buffer[i] = 0; 685 - } 685 + } 686 686 687 687 /* Check for bits an even number of 64 */ 688 688 if ((xbits = width & 0x03f) != 0) { ··· 808 808 } 809 809 810 810 runway = ioremap_nocache(cpu_device->hpa.start, 4096); 811 + if (!runway) { 812 + pr_err("perf_write_image: ioremap failed!\n"); 813 + return -ENOMEM; 814 + } 811 815 812 816 /* Merge intrigue bits into Runway STATUS 0 */ 813 817 tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful; 814 - __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), 818 + __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), 815 819 runway + RUNWAY_STATUS); 816 - 820 + 817 821 /* Write RUNWAY DEBUG registers */ 818 822 for (i = 0; i < 8; i++) { 819 823 __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG); 820 824 } 821 825 822 - return 0; 826 + return 0; 823 827 } 824 828 825 829 /* ··· 847 843 perf_rdr_shift_out_U(rdr_num, buffer[i]); 848 844 } else { 849 845 perf_rdr_shift_out_W(rdr_num, buffer[i]); 850 - } 846 + } 851 847 } 852 848 printk("perf_rdr_write done\n"); 853 849 }
+2
arch/parisc/kernel/process.c
··· 142 142 143 143 printk(KERN_EMERG "System shut down completed.\n" 144 144 "Please power this system off now."); 145 + 146 + for (;;); 145 147 } 146 148 147 149 void (*pm_power_off)(void) = machine_power_off;
+1
arch/parisc/kernel/syscall_table.S
··· 444 444 ENTRY_SAME(copy_file_range) 445 445 ENTRY_COMP(preadv2) 446 446 ENTRY_COMP(pwritev2) 447 + ENTRY_SAME(statx) 447 448 448 449 449 450 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
+1
arch/powerpc/include/asm/systbl.h
··· 387 387 COMPAT_SYS_SPU(preadv2) 388 388 COMPAT_SYS_SPU(pwritev2) 389 389 SYSCALL(kexec_file_load) 390 + SYSCALL(statx)
+1 -1
arch/powerpc/include/asm/unistd.h
··· 12 12 #include <uapi/asm/unistd.h> 13 13 14 14 15 - #define NR_syscalls 383 15 + #define NR_syscalls 384 16 16 17 17 #define __NR__exit __NR_exit 18 18
+1
arch/powerpc/include/uapi/asm/unistd.h
··· 393 393 #define __NR_preadv2 380 394 394 #define __NR_pwritev2 381 395 395 #define __NR_kexec_file_load 382 396 + #define __NR_statx 383 396 397 397 398 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
+3 -1
arch/powerpc/platforms/pseries/lpar.c
··· 751 751 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; 752 752 mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; 753 753 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; 754 - mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; 754 + 755 + if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) 756 + mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; 755 757 } 756 758 757 759 void radix_init_pseries(void)
+14 -2
arch/x86/events/core.c
··· 2101 2101 2102 2102 static void refresh_pce(void *ignored) 2103 2103 { 2104 - if (current->mm) 2105 - load_mm_cr4(current->mm); 2104 + if (current->active_mm) 2105 + load_mm_cr4(current->active_mm); 2106 2106 } 2107 2107 2108 2108 static void x86_pmu_event_mapped(struct perf_event *event) 2109 2109 { 2110 2110 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) 2111 2111 return; 2112 + 2113 + /* 2114 + * This function relies on not being called concurrently in two 2115 + * tasks in the same mm. Otherwise one task could observe 2116 + * perf_rdpmc_allowed > 1 and return all the way back to 2117 + * userspace with CR4.PCE clear while another task is still 2118 + * doing on_each_cpu_mask() to propagate CR4.PCE. 2119 + * 2120 + * For now, this can't happen because all callers hold mmap_sem 2121 + * for write. If this changes, we'll need a different solution. 2122 + */ 2123 + lockdep_assert_held_exclusive(&current->mm->mmap_sem); 2112 2124 2113 2125 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1) 2114 2126 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
-3
arch/x86/include/asm/pgtable-3level.h
··· 121 121 *(tmp + 1) = 0; 122 122 } 123 123 124 - #if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \ 125 - defined(CONFIG_PARAVIRT)) 126 124 static inline void native_pud_clear(pud_t *pudp) 127 125 { 128 126 } 129 - #endif 130 127 131 128 static inline void pud_clear(pud_t *pudp) 132 129 {
+1 -1
arch/x86/include/asm/pgtable.h
··· 62 62 # define set_pud(pudp, pud) native_set_pud(pudp, pud) 63 63 #endif 64 64 65 - #ifndef __PAGETABLE_PMD_FOLDED 65 + #ifndef __PAGETABLE_PUD_FOLDED 66 66 #define pud_clear(pud) native_pud_clear(pud) 67 67 #endif 68 68
+7 -2
arch/x86/kernel/acpi/boot.c
··· 179 179 return -EINVAL; 180 180 } 181 181 182 + if (!enabled) { 183 + ++disabled_cpus; 184 + return -EINVAL; 185 + } 186 + 182 187 if (boot_cpu_physical_apicid != -1U) 183 188 ver = boot_cpu_apic_version; 184 189 185 - cpu = __generic_processor_info(id, ver, enabled); 190 + cpu = generic_processor_info(id, ver); 186 191 if (cpu >= 0) 187 192 early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid; 188 193 ··· 715 710 #ifdef CONFIG_ACPI_HOTPLUG_CPU 716 711 #include <acpi/processor.h> 717 712 718 - int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 713 + static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 719 714 { 720 715 #ifdef CONFIG_ACPI_NUMA 721 716 int nid;
+7 -19
arch/x86/kernel/apic/apic.c
··· 2063 2063 return nr_logical_cpuids++; 2064 2064 } 2065 2065 2066 - int __generic_processor_info(int apicid, int version, bool enabled) 2066 + int generic_processor_info(int apicid, int version) 2067 2067 { 2068 2068 int cpu, max = nr_cpu_ids; 2069 2069 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, ··· 2121 2121 if (num_processors >= nr_cpu_ids) { 2122 2122 int thiscpu = max + disabled_cpus; 2123 2123 2124 - if (enabled) { 2125 - pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " 2126 - "reached. Processor %d/0x%x ignored.\n", 2127 - max, thiscpu, apicid); 2128 - } 2124 + pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " 2125 + "reached. Processor %d/0x%x ignored.\n", 2126 + max, thiscpu, apicid); 2129 2127 2130 2128 disabled_cpus++; 2131 2129 return -EINVAL; ··· 2175 2177 apic->x86_32_early_logical_apicid(cpu); 2176 2178 #endif 2177 2179 set_cpu_possible(cpu, true); 2178 - 2179 - if (enabled) { 2180 - num_processors++; 2181 - physid_set(apicid, phys_cpu_present_map); 2182 - set_cpu_present(cpu, true); 2183 - } else { 2184 - disabled_cpus++; 2185 - } 2180 + physid_set(apicid, phys_cpu_present_map); 2181 + set_cpu_present(cpu, true); 2182 + num_processors++; 2186 2183 2187 2184 return cpu; 2188 - } 2189 - 2190 - int generic_processor_info(int apicid, int version) 2191 - { 2192 - return __generic_processor_info(apicid, version, true); 2193 2185 } 2194 2186 2195 2187 int hard_smp_processor_id(void)
+1 -1
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
··· 727 727 if (atomic_dec_and_test(&rdtgrp->waitcount) && 728 728 (rdtgrp->flags & RDT_DELETED)) { 729 729 kernfs_unbreak_active_protection(kn); 730 - kernfs_put(kn); 730 + kernfs_put(rdtgrp->kn); 731 731 kfree(rdtgrp); 732 732 } else { 733 733 kernfs_unbreak_active_protection(kn);
+1
arch/x86/kernel/head64.c
··· 4 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 5 */ 6 6 7 + #define DISABLE_BRANCH_PROFILING 7 8 #include <linux/init.h> 8 9 #include <linux/linkage.h> 9 10 #include <linux/types.h>
+2 -4
arch/x86/kernel/nmi.c
··· 166 166 spin_lock_irqsave(&desc->lock, flags); 167 167 168 168 /* 169 - * most handlers of type NMI_UNKNOWN never return because 170 - * they just assume the NMI is theirs. Just a sanity check 171 - * to manage expectations 169 + * Indicate if there are multiple registrations on the 170 + * internal NMI handler call chains (SERR and IO_CHECK). 172 171 */ 173 - WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); 174 172 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); 175 173 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); 176 174
+2
arch/x86/kernel/tsc.c
··· 1333 1333 * the refined calibration and directly register it as a clocksource. 1334 1334 */ 1335 1335 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { 1336 + if (boot_cpu_has(X86_FEATURE_ART)) 1337 + art_related_clocksource = &clocksource_tsc; 1336 1338 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1337 1339 return 0; 1338 1340 }
+30 -6
arch/x86/kernel/unwind_frame.c
··· 82 82 return sizeof(*regs); 83 83 } 84 84 85 + #ifdef CONFIG_X86_32 86 + #define GCC_REALIGN_WORDS 3 87 + #else 88 + #define GCC_REALIGN_WORDS 1 89 + #endif 90 + 85 91 static bool is_last_task_frame(struct unwind_state *state) 86 92 { 87 - unsigned long bp = (unsigned long)state->bp; 88 - unsigned long regs = (unsigned long)task_pt_regs(state->task); 93 + unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2; 94 + unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS; 89 95 90 96 /* 91 97 * We have to check for the last task frame at two different locations 92 98 * because gcc can occasionally decide to realign the stack pointer and 93 - * change the offset of the stack frame by a word in the prologue of a 94 - * function called by head/entry code. 99 + * change the offset of the stack frame in the prologue of a function 100 + * called by head/entry code. Examples: 101 + * 102 + * <start_secondary>: 103 + * push %edi 104 + * lea 0x8(%esp),%edi 105 + * and $0xfffffff8,%esp 106 + * pushl -0x4(%edi) 107 + * push %ebp 108 + * mov %esp,%ebp 109 + * 110 + * <x86_64_start_kernel>: 111 + * lea 0x8(%rsp),%r10 112 + * and $0xfffffffffffffff0,%rsp 113 + * pushq -0x8(%r10) 114 + * push %rbp 115 + * mov %rsp,%rbp 116 + * 117 + * Note that after aligning the stack, it pushes a duplicate copy of 118 + * the return address before pushing the frame pointer. 95 119 */ 96 - return bp == regs - FRAME_HEADER_SIZE || 97 - bp == regs - FRAME_HEADER_SIZE - sizeof(long); 120 + return (state->bp == last_bp || 121 + (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); 98 122 } 99 123 100 124 /*
+1
arch/x86/mm/kasan_init_64.c
··· 1 + #define DISABLE_BRANCH_PROFILING 1 2 #define pr_fmt(fmt) "kasan: " fmt 2 3 #include <linux/bootmem.h> 3 4 #include <linux/kasan.h>
+1 -1
arch/x86/mm/mpx.c
··· 590 590 * we might run off the end of the bounds table if we are on 591 591 * a 64-bit kernel and try to get 8 bytes. 592 592 */ 593 - int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, 593 + static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, 594 594 long __user *bd_entry_ptr) 595 595 { 596 596 u32 bd_entry_32;
+1
arch/x86/platform/intel-mid/device_libs/Makefile
··· 26 26 obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o 27 27 # MISC Devices 28 28 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o 29 + obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o 29 30 obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o 30 31 obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
+82
arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
··· 1 + /* 2 + * Intel Merrifield power button support 3 + * 4 + * (C) Copyright 2017 Intel Corporation 5 + * 6 + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License 10 + * as published by the Free Software Foundation; version 2 11 + * of the License. 12 + */ 13 + 14 + #include <linux/init.h> 15 + #include <linux/ioport.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/sfi.h> 18 + 19 + #include <asm/intel-mid.h> 20 + #include <asm/intel_scu_ipc.h> 21 + 22 + static struct resource mrfld_power_btn_resources[] = { 23 + { 24 + .flags = IORESOURCE_IRQ, 25 + }, 26 + }; 27 + 28 + static struct platform_device mrfld_power_btn_dev = { 29 + .name = "msic_power_btn", 30 + .id = PLATFORM_DEVID_NONE, 31 + .num_resources = ARRAY_SIZE(mrfld_power_btn_resources), 32 + .resource = mrfld_power_btn_resources, 33 + }; 34 + 35 + static int mrfld_power_btn_scu_status_change(struct notifier_block *nb, 36 + unsigned long code, void *data) 37 + { 38 + if (code == SCU_DOWN) { 39 + platform_device_unregister(&mrfld_power_btn_dev); 40 + return 0; 41 + } 42 + 43 + return platform_device_register(&mrfld_power_btn_dev); 44 + } 45 + 46 + static struct notifier_block mrfld_power_btn_scu_notifier = { 47 + .notifier_call = mrfld_power_btn_scu_status_change, 48 + }; 49 + 50 + static int __init register_mrfld_power_btn(void) 51 + { 52 + if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) 53 + return -ENODEV; 54 + 55 + /* 56 + * We need to be sure that the SCU IPC is ready before 57 + * PMIC power button device can be registered: 58 + */ 59 + intel_scu_notifier_add(&mrfld_power_btn_scu_notifier); 60 + 61 + return 0; 62 + } 63 + arch_initcall(register_mrfld_power_btn); 64 + 65 + static void __init *mrfld_power_btn_platform_data(void *info) 66 + { 67 + struct resource *res = mrfld_power_btn_resources; 68 + struct sfi_device_table_entry *pentry = info; 69 + 70 + res->start = res->end = pentry->irq; 71 + return NULL; 72 + } 73 + 74 + static const struct devs_id mrfld_power_btn_dev_id __initconst = { 75 + .name = "bcove_power_btn", 76 + .type = SFI_DEV_TYPE_IPC, 77 + .delay = 1, 78 + .msic = 1, 79 + .get_platform_data = &mrfld_power_btn_platform_data, 80 + }; 81 + 82 + sfi_device(mrfld_power_btn_dev_id);
+1 -1
arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
··· 19 19 #include <asm/intel_scu_ipc.h> 20 20 #include <asm/io_apic.h> 21 21 22 - #define TANGIER_EXT_TIMER0_MSI 15 22 + #define TANGIER_EXT_TIMER0_MSI 12 23 23 24 24 static struct platform_device wdt_dev = { 25 25 .name = "intel_mid_wdt",
+4 -11
arch/x86/platform/intel-mid/mfld.c
··· 17 17 18 18 #include "intel_mid_weak_decls.h" 19 19 20 - static void penwell_arch_setup(void); 21 - /* penwell arch ops */ 22 - static struct intel_mid_ops penwell_ops = { 23 - .arch_setup = penwell_arch_setup, 24 - }; 25 - 26 - static void mfld_power_off(void) 27 - { 28 - } 29 - 30 20 static unsigned long __init mfld_calibrate_tsc(void) 31 21 { 32 22 unsigned long fast_calibrate; ··· 53 63 static void __init penwell_arch_setup(void) 54 64 { 55 65 x86_platform.calibrate_tsc = mfld_calibrate_tsc; 56 - pm_power_off = mfld_power_off; 57 66 } 67 + 68 + static struct intel_mid_ops penwell_ops = { 69 + .arch_setup = penwell_arch_setup, 70 + }; 58 71 59 72 void *get_penwell_ops(void) 60 73 {
+9 -3
block/bio.c
··· 376 376 bio_list_init(&punt); 377 377 bio_list_init(&nopunt); 378 378 379 - while ((bio = bio_list_pop(current->bio_list))) 379 + while ((bio = bio_list_pop(&current->bio_list[0]))) 380 380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 381 + current->bio_list[0] = nopunt; 381 382 382 - *current->bio_list = nopunt; 383 + bio_list_init(&nopunt); 384 + while ((bio = bio_list_pop(&current->bio_list[1]))) 385 + bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 386 + current->bio_list[1] = nopunt; 383 387 384 388 spin_lock(&bs->rescue_lock); 385 389 bio_list_merge(&bs->rescue_list, &punt); ··· 470 466 * we retry with the original gfp_flags. 471 467 */ 472 468 473 - if (current->bio_list && !bio_list_empty(current->bio_list)) 469 + if (current->bio_list && 470 + (!bio_list_empty(&current->bio_list[0]) || 471 + !bio_list_empty(&current->bio_list[1]))) 474 472 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 475 473 476 474 p = mempool_alloc(bs->bio_pool, gfp_mask);
+18 -12
block/blk-core.c
··· 1973 1973 */ 1974 1974 blk_qc_t generic_make_request(struct bio *bio) 1975 1975 { 1976 - struct bio_list bio_list_on_stack; 1976 + /* 1977 + * bio_list_on_stack[0] contains bios submitted by the current 1978 + * make_request_fn. 1979 + * bio_list_on_stack[1] contains bios that were submitted before 1980 + * the current make_request_fn, but that haven't been processed 1981 + * yet. 1982 + */ 1983 + struct bio_list bio_list_on_stack[2]; 1977 1984 blk_qc_t ret = BLK_QC_T_NONE; 1978 1985 1979 1986 if (!generic_make_request_checks(bio)) ··· 1997 1990 * should be added at the tail 1998 1991 */ 1999 1992 if (current->bio_list) { 2000 - bio_list_add(current->bio_list, bio); 1993 + bio_list_add(&current->bio_list[0], bio); 2001 1994 goto out; 2002 1995 } 2003 1996 ··· 2016 2009 * bio_list, and call into ->make_request() again. 2017 2010 */ 2018 2011 BUG_ON(bio->bi_next); 2019 - bio_list_init(&bio_list_on_stack); 2020 - current->bio_list = &bio_list_on_stack; 2012 + bio_list_init(&bio_list_on_stack[0]); 2013 + current->bio_list = bio_list_on_stack; 2021 2014 do { 2022 2015 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2023 2016 2024 2017 if (likely(blk_queue_enter(q, false) == 0)) { 2025 - struct bio_list hold; 2026 2018 struct bio_list lower, same; 2027 2019 2028 2020 /* Create a fresh bio_list for all subordinate requests */ 2029 - hold = bio_list_on_stack; 2030 - bio_list_init(&bio_list_on_stack); 2021 + bio_list_on_stack[1] = bio_list_on_stack[0]; 2022 + bio_list_init(&bio_list_on_stack[0]); 2031 2023 ret = q->make_request_fn(q, bio); 2032 2024 2033 2025 blk_queue_exit(q); ··· 2036 2030 */ 2037 2031 bio_list_init(&lower); 2038 2032 bio_list_init(&same); 2039 - while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL) 2033 + while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 2040 2034 if (q == bdev_get_queue(bio->bi_bdev)) 2041 2035 bio_list_add(&same, bio); 2042 2036 else 2043 2037 bio_list_add(&lower, bio); 2044 2038 /* now assemble so we handle the lowest level first */ 2045 - bio_list_merge(&bio_list_on_stack, &lower); 2046 - bio_list_merge(&bio_list_on_stack, &same); 2047 - bio_list_merge(&bio_list_on_stack, &hold); 2039 + bio_list_merge(&bio_list_on_stack[0], &lower); 2040 + bio_list_merge(&bio_list_on_stack[0], &same); 2041 + bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 2048 2042 } else { 2049 2043 bio_io_error(bio); 2050 2044 } 2051 - bio = bio_list_pop(current->bio_list); 2045 + bio = bio_list_pop(&bio_list_on_stack[0]); 2052 2046 } while (bio); 2053 2047 current->bio_list = NULL; /* deactivate */ 2054 2048
+3
block/blk-mq-tag.c
··· 295 295 for (i = 0; i < set->nr_hw_queues; i++) { 296 296 struct blk_mq_tags *tags = set->tags[i]; 297 297 298 + if (!tags) 299 + continue; 300 + 298 301 for (j = 0; j < tags->nr_tags; j++) { 299 302 if (!tags->static_rqs[j]) 300 303 continue;
+5 -4
block/blk-mq.c
··· 1434 1434 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1435 1435 } 1436 1436 1437 - static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) 1437 + static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, 1438 + bool may_sleep) 1438 1439 { 1439 1440 struct request_queue *q = rq->q; 1440 1441 struct blk_mq_queue_data bd = { ··· 1476 1475 } 1477 1476 1478 1477 insert: 1479 - blk_mq_sched_insert_request(rq, false, true, true, false); 1478 + blk_mq_sched_insert_request(rq, false, true, false, may_sleep); 1480 1479 } 1481 1480 1482 1481 /* ··· 1570 1569 1571 1570 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) { 1572 1571 rcu_read_lock(); 1573 - blk_mq_try_issue_directly(old_rq, &cookie); 1572 + blk_mq_try_issue_directly(old_rq, &cookie, false); 1574 1573 rcu_read_unlock(); 1575 1574 } else { 1576 1575 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu); 1577 - blk_mq_try_issue_directly(old_rq, &cookie); 1576 + blk_mq_try_issue_directly(old_rq, &cookie, true); 1578 1577 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx); 1579 1578 } 1580 1579 goto done;
+42 -15
drivers/acpi/acpi_processor.c
··· 182 182 183 183 void __weak arch_unregister_cpu(int cpu) {} 184 184 185 - int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 186 - { 187 - return -ENODEV; 188 - } 189 - 190 185 static int acpi_processor_hotadd_init(struct acpi_processor *pr) 191 186 { 192 187 unsigned long long sta; ··· 278 283 } 279 284 device_declaration = 1; 280 285 pr->acpi_id = value; 286 + } 287 + 288 + if (acpi_duplicate_processor_id(pr->acpi_id)) { 289 + dev_err(&device->dev, 290 + "Failed to get unique processor _UID (0x%x)\n", 291 + pr->acpi_id); 292 + return -ENODEV; 281 293 } 282 294 283 295 pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, ··· 587 585 static int nr_unique_ids __initdata; 588 586 589 587 /* The number of the duplicate processor IDs */ 590 - static int nr_duplicate_ids __initdata; 588 + static int nr_duplicate_ids; 591 589 592 590 /* Used to store the unique processor IDs */ 593 591 static int unique_processor_ids[] __initdata = { ··· 595 593 }; 596 594 597 595 /* Used to store the duplicate processor IDs */ 598 - static int duplicate_processor_ids[] __initdata = { 596 + static int duplicate_processor_ids[] = { 599 597 [0 ... NR_CPUS - 1] = -1, 600 598 }; 601 599 ··· 640 638 void **rv) 641 639 { 642 640 acpi_status status; 641 + acpi_object_type acpi_type; 642 + unsigned long long uid; 643 643 union acpi_object object = { 0 }; 644 644 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 645 645 646 - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 646 + status = acpi_get_type(handle, &acpi_type); 647 647 if (ACPI_FAILURE(status)) 648 - acpi_handle_info(handle, "Not get the processor object\n"); 649 - else 650 - processor_validated_ids_update(object.processor.proc_id); 648 + return false; 651 649 652 - return AE_OK; 650 + switch (acpi_type) { 651 + case ACPI_TYPE_PROCESSOR: 652 + status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 653 + if (ACPI_FAILURE(status)) 654 + goto err; 655 + uid = object.processor.proc_id; 656 + break; 657 + 658 + case ACPI_TYPE_DEVICE: 659 + status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); 660 + if (ACPI_FAILURE(status)) 661 + goto err; 662 + break; 663 + default: 664 + goto err; 665 + } 666 + 667 + processor_validated_ids_update(uid); 668 + return true; 669 + 670 + err: 671 + acpi_handle_info(handle, "Invalid processor object\n"); 672 + return false; 673 + 653 674 } 654 675 655 - static void __init acpi_processor_check_duplicates(void) 676 + void __init acpi_processor_check_duplicates(void) 656 677 { 657 - /* Search all processor nodes in ACPI namespace */ 678 + /* check the correctness for all processors in ACPI namespace */ 658 679 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 659 680 ACPI_UINT32_MAX, 660 681 acpi_processor_ids_walk, 661 682 NULL, NULL, NULL); 683 + acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk, 684 + NULL, NULL); 662 685 } 663 686 664 - bool __init acpi_processor_validate_proc_id(int proc_id) 687 + bool acpi_duplicate_processor_id(int proc_id) 665 688 { 666 689 int i; 667 690
-1
drivers/acpi/bus.c
··· 1249 1249 acpi_wakeup_device_init(); 1250 1250 acpi_debugger_init(); 1251 1251 acpi_setup_sb_notify_handler(); 1252 - acpi_set_processor_mapping(); 1253 1252 return 0; 1254 1253 } 1255 1254
+22 -111
drivers/acpi/processor_core.c
··· 32 32 } 33 33 34 34 static int map_lapic_id(struct acpi_subtable_header *entry, 35 - u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled) 35 + u32 acpi_id, phys_cpuid_t *apic_id) 36 36 { 37 37 struct acpi_madt_local_apic *lapic = 38 38 container_of(entry, struct acpi_madt_local_apic, header); 39 39 40 - if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED)) 40 + if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) 41 41 return -ENODEV; 42 42 43 43 if (lapic->processor_id != acpi_id) ··· 48 48 } 49 49 50 50 static int map_x2apic_id(struct acpi_subtable_header *entry, 51 - int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, 52 - bool ignore_disabled) 51 + int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) 53 52 { 54 53 struct acpi_madt_local_x2apic *apic = 55 54 container_of(entry, struct acpi_madt_local_x2apic, header); 56 55 57 - if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED)) 56 + if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) 58 57 return -ENODEV; 59 58 60 59 if (device_declaration && (apic->uid == acpi_id)) { ··· 65 66 } 66 67 67 68 static int map_lsapic_id(struct acpi_subtable_header *entry, 68 - int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, 69 - bool ignore_disabled) 69 + int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) 70 70 { 71 71 struct acpi_madt_local_sapic *lsapic = 72 72 container_of(entry, struct acpi_madt_local_sapic, header); 73 73 74 - if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED)) 74 + if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) 75 75 return -ENODEV; 76 76 77 77 if (device_declaration) { ··· 87 89 * Retrieve the ARM CPU physical identifier (MPIDR) 88 90 */ 89 91 static int map_gicc_mpidr(struct acpi_subtable_header *entry, 90 - int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr, 91 - bool ignore_disabled) 92 + int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr) 92 93 { 93 94 struct acpi_madt_generic_interrupt *gicc = 94 95 container_of(entry, struct acpi_madt_generic_interrupt, header); 95 96 96 - if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED)) 97 + if (!(gicc->flags & ACPI_MADT_ENABLED)) 97 98 return -ENODEV; 98 99 99 100 /* device_declaration means Device object in DSDT, in the ··· 109 112 } 110 113 111 114 static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, 112 - int type, u32 acpi_id, bool ignore_disabled) 115 + int type, u32 acpi_id) 113 116 { 114 117 unsigned long madt_end, entry; 115 118 phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */ ··· 127 130 struct acpi_subtable_header *header = 128 131 (struct acpi_subtable_header *)entry; 129 132 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 130 - if (!map_lapic_id(header, acpi_id, &phys_id, 131 - ignore_disabled)) 133 + if (!map_lapic_id(header, acpi_id, &phys_id)) 132 134 break; 133 135 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { 134 - if (!map_x2apic_id(header, type, acpi_id, &phys_id, 135 - ignore_disabled)) 136 + if (!map_x2apic_id(header, type, acpi_id, &phys_id)) 136 137 break; 137 138 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 138 - if (!map_lsapic_id(header, type, acpi_id, &phys_id, 139 - ignore_disabled)) 139 + if (!map_lsapic_id(header, type, acpi_id, &phys_id)) 140 140 break; 141 141 } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { 142 - if (!map_gicc_mpidr(header, type, acpi_id, &phys_id, 143 - ignore_disabled)) 142 + if (!map_gicc_mpidr(header, type, acpi_id, &phys_id)) 144 143 break; 145 144 } 146 145 entry += header->length; ··· 154 161 if (!madt) 155 162 return PHYS_CPUID_INVALID; 156 163 157 - rv = map_madt_entry(madt, 1, acpi_id, true); 164 + rv = map_madt_entry(madt, 1, acpi_id); 158 165 159 166 acpi_put_table((struct acpi_table_header *)madt); 160 167 161 168 return rv; 162 169 } 163 170 164 - static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id, 165 - bool ignore_disabled) 171 + static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id) 166 172 { 167 173 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 168 174 union acpi_object *obj; ··· 182 190 183 191 header = (struct acpi_subtable_header *)obj->buffer.pointer; 184 192 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) 185 - map_lapic_id(header, acpi_id, &phys_id, ignore_disabled); 193 + map_lapic_id(header, acpi_id, &phys_id); 186 194 else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) 187 - map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled); 195 + map_lsapic_id(header, type, acpi_id, &phys_id); 188 196 else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) 189 - map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled); 197 + map_x2apic_id(header, type, acpi_id, &phys_id); 190 198 else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) 191 - map_gicc_mpidr(header, type, acpi_id, &phys_id, 192 - ignore_disabled); 199 + map_gicc_mpidr(header, type, acpi_id, &phys_id); 193 200 194 201 exit: 195 202 kfree(buffer.pointer); 196 203 return phys_id; 197 204 } 198 205 199 - static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type, 200 - u32 acpi_id, bool ignore_disabled) 206 + phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) 201 207 { 202 208 phys_cpuid_t phys_id; 203 209 204 - phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled); 210 + phys_id = map_mat_entry(handle, type, acpi_id); 205 211 if (invalid_phys_cpuid(phys_id)) 206 - phys_id = map_madt_entry(get_madt_table(), type, acpi_id, 207 - ignore_disabled); 212 + phys_id = map_madt_entry(get_madt_table(), type, acpi_id); 208 213 209 214 return phys_id; 210 - } 211 - 212 - phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) 213 - { 214 - return __acpi_get_phys_id(handle, type, acpi_id, true); 215 215 } 216 216 217 217 int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) ··· 261 277 return acpi_map_cpuid(phys_id, acpi_id); 262 278 } 263 279 EXPORT_SYMBOL_GPL(acpi_get_cpuid); 264 - 265 - #ifdef CONFIG_ACPI_HOTPLUG_CPU 266 - static bool __init 267 - map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid) 268 - { 269 - int type, id; 270 - u32 acpi_id; 271 - acpi_status status; 272 - acpi_object_type acpi_type; 273 - unsigned long long tmp; 274 - union acpi_object object = { 0 }; 275 - struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 276 - 277 - status = acpi_get_type(handle, &acpi_type); 278 - if (ACPI_FAILURE(status)) 279 - return false; 280 - 281 - switch (acpi_type) { 282 - case ACPI_TYPE_PROCESSOR: 283 - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 284 - if (ACPI_FAILURE(status)) 285 - return false; 286 - acpi_id = object.processor.proc_id; 287 - 288 - /* validate the acpi_id */ 289 - if(acpi_processor_validate_proc_id(acpi_id)) 290 - return false; 291 - break; 292 - case ACPI_TYPE_DEVICE: 293 - status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); 294 - if (ACPI_FAILURE(status)) 295 - return false; 296 - acpi_id = tmp; 297 - break; 298 - default: 299 - return false; 300 - } 301 - 302 - type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 303 - 304 - *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false); 305 - id = acpi_map_cpuid(*phys_id, acpi_id); 306 - 307 - if (id < 0) 308 - return false; 309 - *cpuid = id; 310 - return true; 311 - } 312 - 313 - static acpi_status __init 314 - set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context, 315 - void **rv) 316 - { 317 - phys_cpuid_t phys_id; 318 - int cpu_id; 319 - 320 - if (!map_processor(handle, &phys_id, &cpu_id)) 321 - return AE_ERROR; 322 - 323 - acpi_map_cpu2node(handle, cpu_id, phys_id); 324 - return AE_OK; 325 - } 326 - 327 - void __init acpi_set_processor_mapping(void) 328 - { 329 - /* Set persistent cpu <-> node mapping for all processors. */ 330 - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 331 - ACPI_UINT32_MAX, set_processor_node_mapping, 332 - NULL, NULL, NULL); 333 - } 334 - #else 335 - void __init acpi_set_processor_mapping(void) {} 336 - #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 337 280 338 281 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC 339 282 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
-5
drivers/base/core.c
··· 639 639 return restart_syscall(); 640 640 } 641 641 642 - void assert_held_device_hotplug(void) 643 - { 644 - lockdep_assert_held(&device_hotplug_lock); 645 - } 646 - 647 642 #ifdef CONFIG_BLOCK 648 643 static inline int device_is_not_partition(struct device *dev) 649 644 {
+2 -1
drivers/bluetooth/Kconfig
··· 344 344 345 345 config BT_QCOMSMD 346 346 tristate "Qualcomm SMD based HCI support" 347 - depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST 347 + depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 348 + depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n) 348 349 select BT_QCA 349 350 help 350 351 Qualcomm SMD based HCI driver.
+1 -15
drivers/clocksource/tcb_clksrc.c
··· 10 10 #include <linux/io.h> 11 11 #include <linux/platform_device.h> 12 12 #include <linux/atmel_tc.h> 13 - #include <linux/sched_clock.h> 14 13 15 14 16 15 /* ··· 56 57 return (upper << 16) | lower; 57 58 } 58 59 59 - static u32 tc_get_cv32(void) 60 - { 61 - return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); 62 - } 63 - 64 60 static u64 tc_get_cycles32(struct clocksource *cs) 65 61 { 66 - return tc_get_cv32(); 62 + return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); 67 63 } 68 64 69 65 static struct clocksource clksrc = { ··· 68 74 .mask = CLOCKSOURCE_MASK(32), 69 75 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 70 76 }; 71 - 72 - static u64 notrace tc_read_sched_clock(void) 73 - { 74 - return tc_get_cv32(); 75 - } 76 77 77 78 #ifdef CONFIG_GENERIC_CLOCKEVENTS 78 79 ··· 339 350 clksrc.read = tc_get_cycles32; 340 351 /* setup ony channel 0 */ 341 352 tcb_setup_single_chan(tc, best_divisor_idx); 342 - 343 - /* register sched_clock on chips with single 32 bit counter */ 344 - sched_clock_register(tc_read_sched_clock, 32, divided_rate); 345 353 } else { 346 354 /* tclib will give us three clocks no matter what the 347 355 * underlying platform supports.
+5 -3
drivers/cpufreq/cpufreq.c
··· 680 680 char *buf) 681 681 { 682 682 unsigned int cur_freq = __cpufreq_get(policy); 683 - if (!cur_freq) 684 - return sprintf(buf, "<unknown>"); 685 - return sprintf(buf, "%u\n", cur_freq); 683 + 684 + if (cur_freq) 685 + return sprintf(buf, "%u\n", cur_freq); 686 + 687 + return sprintf(buf, "<unknown>\n"); 686 688 } 687 689 688 690 /**
+31 -33
drivers/cpufreq/intel_pstate.c
··· 84 84 return div64_u64(x << EXT_FRAC_BITS, y); 85 85 } 86 86 87 + static inline int32_t percent_ext_fp(int percent) 88 + { 89 + return div_ext_fp(percent, 100); 90 + } 91 + 87 92 /** 88 93 * struct sample - Store performance sample 89 94 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average ··· 850 845 851 846 static void intel_pstate_hwp_set(struct cpufreq_policy *policy) 852 847 { 853 - int min, hw_min, max, hw_max, cpu, range, adj_range; 848 + int min, hw_min, max, hw_max, cpu; 854 849 struct perf_limits *perf_limits = limits; 855 850 u64 value, cap; 856 851 857 852 for_each_cpu(cpu, policy->cpus) { 858 - int max_perf_pct, min_perf_pct; 859 853 struct cpudata *cpu_data = all_cpu_data[cpu]; 860 854 s16 epp; 861 855 ··· 867 863 hw_max = HWP_GUARANTEED_PERF(cap); 868 864 else 869 865 hw_max = HWP_HIGHEST_PERF(cap); 870 - range = hw_max - hw_min; 871 866 872 - max_perf_pct = perf_limits->max_perf_pct; 873 - min_perf_pct = perf_limits->min_perf_pct; 867 + min = fp_ext_toint(hw_max * perf_limits->min_perf); 874 868 875 869 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 876 - adj_range = min_perf_pct * range / 100; 877 - min = hw_min + adj_range; 870 + 878 871 value &= ~HWP_MIN_PERF(~0L); 879 872 value |= HWP_MIN_PERF(min); 880 873 881 - adj_range = max_perf_pct * range / 100; 882 - max = hw_min + adj_range; 883 - 874 + max = fp_ext_toint(hw_max * perf_limits->max_perf); 884 875 value &= ~HWP_MAX_PERF(~0L); 885 876 value |= HWP_MAX_PERF(max); 886 877 ··· 988 989 static int pid_param_set(void *data, u64 val) 989 990 { 990 991 *(u32 *)data = val; 992 + pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 991 993 intel_pstate_reset_all_pid(); 992 994 return 0; 993 995 } ··· 1225 1225 limits->max_perf_pct); 1226 1226 limits->max_perf_pct = max(limits->min_perf_pct, 1227 1227 limits->max_perf_pct); 1228 - limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 1228 + limits->max_perf = percent_ext_fp(limits->max_perf_pct); 1229 1229 1230 1230 intel_pstate_update_policies(); 1231 1231 ··· 1262 1262 limits->min_perf_pct); 1263 1263 limits->min_perf_pct = min(limits->max_perf_pct, 1264 1264 limits->min_perf_pct); 1265 - limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 1265 + limits->min_perf = percent_ext_fp(limits->min_perf_pct); 1266 1266 1267 1267 intel_pstate_update_policies(); 1268 1268 ··· 2080 2080 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 2081 2081 struct perf_limits *limits) 2082 2082 { 2083 + int32_t max_policy_perf, min_policy_perf; 2083 2084 2084 - limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 2085 - policy->cpuinfo.max_freq); 2086 - limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100); 2085 + max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq); 2086 + max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1)); 2087 2087 if (policy->max == policy->min) { 2088 - limits->min_policy_pct = limits->max_policy_pct; 2088 + min_policy_perf = max_policy_perf; 2089 2089 } else { 2090 - limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100, 2091 - policy->cpuinfo.max_freq); 2092 - limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 2093 - 0, 100); 2090 + min_policy_perf = div_ext_fp(policy->min, 2091 + policy->cpuinfo.max_freq); 2092 + min_policy_perf = clamp_t(int32_t, min_policy_perf, 2093 + 0, max_policy_perf); 2094 2094 } 2095 2095 2096 - /* Normalize user input to [min_policy_pct, max_policy_pct] */ 2097 - limits->min_perf_pct = max(limits->min_policy_pct, 2098 - limits->min_sysfs_pct); 2099 - limits->min_perf_pct = min(limits->max_policy_pct, 2100 - limits->min_perf_pct); 2101 - limits->max_perf_pct = min(limits->max_policy_pct, 2102 - limits->max_sysfs_pct); 2103 - limits->max_perf_pct = max(limits->min_policy_pct, 2104 - limits->max_perf_pct); 2096 + /* Normalize user input to [min_perf, max_perf] */ 2097 + limits->min_perf = max(min_policy_perf, 2098 + percent_ext_fp(limits->min_sysfs_pct)); 2099 + limits->min_perf = min(limits->min_perf, max_policy_perf); 2100 + limits->max_perf = min(max_policy_perf, 2101 + percent_ext_fp(limits->max_sysfs_pct)); 2102 + limits->max_perf = max(min_policy_perf, limits->max_perf); 2105 2103 2106 - /* Make sure min_perf_pct <= max_perf_pct */ 2107 - limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 2104 + /* Make sure min_perf <= max_perf */ 2105 + limits->min_perf = min(limits->min_perf, limits->max_perf); 2108 2106 2109 - limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 2110 - limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 2111 2107 limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); 2112 2108 limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); 2109 + limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100); 2110 + limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100); 2113 2111 2114 2112 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, 2115 2113 limits->max_perf_pct, limits->min_perf_pct);
+30 -3
drivers/dax/dax.c
··· 427 427 int rc = VM_FAULT_SIGBUS; 428 428 phys_addr_t phys; 429 429 pfn_t pfn; 430 + unsigned int fault_size = PAGE_SIZE; 430 431 431 432 if (check_vma(dax_dev, vmf->vma, __func__)) 432 433 return VM_FAULT_SIGBUS; ··· 438 437 return VM_FAULT_SIGBUS; 439 438 } 440 439 440 + if (fault_size != dax_region->align) 441 + return VM_FAULT_SIGBUS; 442 + 441 443 phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE); 442 444 if (phys == -1) { 443 - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, 445 + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, 444 446 vmf->pgoff); 445 447 return VM_FAULT_SIGBUS; 446 448 } ··· 468 464 phys_addr_t phys; 469 465 pgoff_t pgoff; 470 466 pfn_t pfn; 467 + unsigned int fault_size = PMD_SIZE; 471 468 472 469 if (check_vma(dax_dev, vmf->vma, __func__)) 473 470 return VM_FAULT_SIGBUS; ··· 485 480 return VM_FAULT_SIGBUS; 486 481 } 487 482 483 + if (fault_size < dax_region->align) 484 + return VM_FAULT_SIGBUS; 485 + else if (fault_size > dax_region->align) 486 + return VM_FAULT_FALLBACK; 487 + 488 + /* if we are outside of the VMA */ 489 + if (pmd_addr < vmf->vma->vm_start || 490 + (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 491 + return VM_FAULT_SIGBUS; 492 + 488 493 pgoff = linear_page_index(vmf->vma, pmd_addr); 489 494 phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); 490 495 if (phys == -1) { 491 - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, 496 + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, 492 497 pgoff); 493 498 return VM_FAULT_SIGBUS; 494 499 } ··· 518 503 phys_addr_t phys; 519 504 pgoff_t pgoff; 520 505 pfn_t pfn; 506 + unsigned int fault_size = PUD_SIZE; 507 + 521 508 522 509 if (check_vma(dax_dev, vmf->vma, __func__)) 523 510 return VM_FAULT_SIGBUS; ··· 536 519 return VM_FAULT_SIGBUS; 537 520 } 538 521 522 + if (fault_size < dax_region->align) 523 + return VM_FAULT_SIGBUS; 524 + else if (fault_size > dax_region->align) 525 + return VM_FAULT_FALLBACK; 526 + 527 + /* if we are outside of the VMA */ 528 + if (pud_addr < vmf->vma->vm_start || 529 + (pud_addr + PUD_SIZE) > vmf->vma->vm_end) 530 + return VM_FAULT_SIGBUS; 531 + 539 532 pgoff = linear_page_index(vmf->vma, pud_addr); 540 533 phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); 541 534 if (phys == -1) { 542 - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, 535 + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, 543 536 pgoff); 544 537 return VM_FAULT_SIGBUS; 545 538 }
+1 -1
drivers/gpio/gpio-altera-a10sr.c
··· 96 96 gpio->regmap = a10sr->regmap; 97 97 98 98 gpio->gp = altr_a10sr_gc; 99 - 99 + gpio->gp.parent = pdev->dev.parent; 100 100 gpio->gp.of_node = pdev->dev.of_node; 101 101 102 102 ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
+11 -15
drivers/gpio/gpio-altera.c
··· 90 90 91 91 altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); 92 92 93 - if (type == IRQ_TYPE_NONE) 93 + if (type == IRQ_TYPE_NONE) { 94 + irq_set_handler_locked(d, handle_bad_irq); 94 95 return 0; 95 - if (type == IRQ_TYPE_LEVEL_HIGH && 96 - altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH) 96 + } 97 + if (type == altera_gc->interrupt_trigger) { 98 + if (type == IRQ_TYPE_LEVEL_HIGH) 99 + irq_set_handler_locked(d, handle_level_irq); 100 + else 101 + irq_set_handler_locked(d, handle_simple_irq); 97 102 return 0; 98 - if (type == IRQ_TYPE_EDGE_RISING && 99 - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING) 100 - return 0; 101 - if (type == IRQ_TYPE_EDGE_FALLING && 102 - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING) 103 - return 0; 104 - if (type == IRQ_TYPE_EDGE_BOTH && 105 - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH) 106 - return 0; 107 - 103 + } 104 + irq_set_handler_locked(d, handle_bad_irq); 108 105 return -EINVAL; 109 106 } 110 107 ··· 227 230 chained_irq_exit(chip, desc); 228 231 } 229 232 230 - 231 233 static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) 232 234 { 233 235 struct altera_gpio_chip *altera_gc; ··· 306 310 altera_gc->interrupt_trigger = reg; 307 311 308 312 ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, 309 - handle_simple_irq, IRQ_TYPE_NONE); 313 + handle_bad_irq, IRQ_TYPE_NONE); 310 314 311 315 if (ret) { 312 316 dev_err(&pdev->dev, "could not add irqchip\n");
+60 -5
drivers/gpio/gpio-mcp23s08.c
··· 270 270 static irqreturn_t mcp23s08_irq(int irq, void *data) 271 271 { 272 272 struct mcp23s08 *mcp = data; 273 - int intcap, intf, i; 273 + int intcap, intf, i, gpio, gpio_orig, intcap_mask; 274 274 unsigned int child_irq; 275 + bool intf_set, intcap_changed, gpio_bit_changed, 276 + defval_changed, gpio_set; 275 277 276 278 mutex_lock(&mcp->lock); 277 279 if (mcp_read(mcp, MCP_INTF, &intf) < 0) { ··· 289 287 } 290 288 291 289 mcp->cache[MCP_INTCAP] = intcap; 290 + 291 + /* This clears the interrupt(configurable on S18) */ 292 + if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) { 293 + mutex_unlock(&mcp->lock); 294 + return IRQ_HANDLED; 295 + } 296 + gpio_orig = mcp->cache[MCP_GPIO]; 297 + mcp->cache[MCP_GPIO] = gpio; 292 298 mutex_unlock(&mcp->lock); 293 299 300 + if (mcp->cache[MCP_INTF] == 0) { 301 + /* There is no interrupt pending */ 302 + return IRQ_HANDLED; 303 + } 304 + 305 + dev_dbg(mcp->chip.parent, 306 + "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n", 307 + intcap, intf, gpio_orig, gpio); 294 308 295 309 for (i = 0; i < mcp->chip.ngpio; i++) { 296 - if ((BIT(i) & mcp->cache[MCP_INTF]) && 297 - ((BIT(i) & intcap & mcp->irq_rise) || 298 - (mcp->irq_fall & ~intcap & BIT(i)) || 299 - (BIT(i) & mcp->cache[MCP_INTCON]))) { 310 + /* We must check all of the inputs on the chip, 311 + * otherwise we may not notice a change on >=2 pins. 312 + * 313 + * On at least the mcp23s17, INTCAP is only updated 314 + * one byte at a time(INTCAPA and INTCAPB are 315 + * not written to at the same time - only on a per-bank 316 + * basis). 317 + * 318 + * INTF only contains the single bit that caused the 319 + * interrupt per-bank. On the mcp23s17, there is 320 + * INTFA and INTFB. If two pins are changed on the A 321 + * side at the same time, INTF will only have one bit 322 + * set. If one pin on the A side and one pin on the B 323 + * side are changed at the same time, INTF will have 324 + * two bits set. Thus, INTF can't be the only check 325 + * to see if the input has changed. 326 + */ 327 + 328 + intf_set = BIT(i) & mcp->cache[MCP_INTF]; 329 + if (i < 8 && intf_set) 330 + intcap_mask = 0x00FF; 331 + else if (i >= 8 && intf_set) 332 + intcap_mask = 0xFF00; 333 + else 334 + intcap_mask = 0x00; 335 + 336 + intcap_changed = (intcap_mask & 337 + (BIT(i) & mcp->cache[MCP_INTCAP])) != 338 + (intcap_mask & (BIT(i) & gpio_orig)); 339 + gpio_set = BIT(i) & mcp->cache[MCP_GPIO]; 340 + gpio_bit_changed = (BIT(i) & gpio_orig) != 341 + (BIT(i) & mcp->cache[MCP_GPIO]); 342 + defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) && 343 + ((BIT(i) & mcp->cache[MCP_GPIO]) != 344 + (BIT(i) & mcp->cache[MCP_DEFVAL])); 345 + 346 + if (((gpio_bit_changed || intcap_changed) && 347 + (BIT(i) & mcp->irq_rise) && gpio_set) || 348 + ((gpio_bit_changed || intcap_changed) && 349 + (BIT(i) & mcp->irq_fall) && !gpio_set) || 350 + defval_changed) { 300 351 child_irq = irq_find_mapping(mcp->chip.irqdomain, i); 301 352 handle_nested_irq(child_irq); 302 353 }
+3 -4
drivers/gpio/gpio-mockup.c
··· 197 197 struct seq_file *sfile; 198 198 struct gpio_desc *desc; 199 199 struct gpio_chip *gc; 200 - int status, val; 200 + int val; 201 201 char buf; 202 202 203 203 sfile = file->private_data; ··· 206 206 chip = priv->chip; 207 207 gc = &chip->gc; 208 208 209 - status = copy_from_user(&buf, usr_buf, 1); 210 - if (status) 211 - return status; 209 + if (copy_from_user(&buf, usr_buf, 1)) 210 + return -EFAULT; 212 211 213 212 if (buf == '0') 214 213 val = 0;
+3 -10
drivers/gpio/gpio-xgene.c
··· 42 42 struct gpio_chip chip; 43 43 void __iomem *base; 44 44 spinlock_t lock; 45 - #ifdef CONFIG_PM 46 45 u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; 47 - #endif 48 46 }; 49 47 50 48 static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) ··· 136 138 return 0; 137 139 } 138 140 139 - #ifdef CONFIG_PM 140 - static int xgene_gpio_suspend(struct device *dev) 141 + static __maybe_unused int xgene_gpio_suspend(struct device *dev) 141 142 { 142 143 struct xgene_gpio *gpio = dev_get_drvdata(dev); 143 144 unsigned long bank_offset; ··· 149 152 return 0; 150 153 } 151 154 152 - static int xgene_gpio_resume(struct device *dev) 155 + static __maybe_unused int xgene_gpio_resume(struct device *dev) 153 156 { 154 157 struct xgene_gpio *gpio = dev_get_drvdata(dev); 155 158 unsigned long bank_offset; ··· 163 166 } 164 167 165 168 static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); 166 - #define XGENE_GPIO_PM_OPS (&xgene_gpio_pm) 167 - #else 168 - #define XGENE_GPIO_PM_OPS NULL 169 - #endif 170 169 171 170 static int xgene_gpio_probe(struct platform_device *pdev) 172 171 { ··· 234 241 .name = "xgene-gpio", 235 242 .of_match_table = xgene_gpio_of_match, 236 243 .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), 237 - .pm = XGENE_GPIO_PM_OPS, 244 + .pm = &xgene_gpio_pm, 238 245 }, 239 246 .probe = xgene_gpio_probe, 240 247 };
-2
drivers/gpu/drm/amd/acp/Makefile
··· 3 3 # of AMDSOC/AMDGPU drm driver. 4 4 # It provides the HW control for ACP related functionalities. 5 5 6 - subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include 7 - 8 6 AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 240 240 for (; i >= 0; i--) 241 241 drm_free_large(p->chunks[i].kdata); 242 242 kfree(p->chunks); 243 + p->chunks = NULL; 244 + p->nchunks = 0; 243 245 put_ctx: 244 246 amdgpu_ctx_put(p->ctx); 245 247 free_chunk:
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2590 2590 use_bank = 0; 2591 2591 } 2592 2592 2593 - *pos &= 0x3FFFF; 2593 + *pos &= (1UL << 22) - 1; 2594 2594 2595 2595 if (use_bank) { 2596 2596 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || ··· 2666 2666 use_bank = 0; 2667 2667 } 2668 2668 2669 - *pos &= 0x3FFFF; 2669 + *pos &= (1UL << 22) - 1; 2670 2670 2671 2671 if (use_bank) { 2672 2672 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+6
drivers/gpu/drm/amd/amdgpu/si_dpm.c
··· 3464 3464 (adev->pdev->device == 0x6667)) { 3465 3465 max_sclk = 75000; 3466 3466 } 3467 + } else if (adev->asic_type == CHIP_OLAND) { 3468 + if ((adev->pdev->device == 0x6604) && 3469 + (adev->pdev->subsystem_vendor == 0x1028) && 3470 + (adev->pdev->subsystem_device == 0x066F)) { 3471 + max_sclk = 75000; 3472 + } 3467 3473 } 3468 3474 3469 3475 if (rps->vce_active) {
+1 -1
drivers/gpu/drm/amd/amdgpu/vi.c
··· 1051 1051 /* rev0 hardware requires workarounds to support PG */ 1052 1052 adev->pg_flags = 0; 1053 1053 if (adev->rev_id != 0x00) { 1054 - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1054 + adev->pg_flags |= 1055 1055 AMD_PG_SUPPORT_GFX_SMG | 1056 1056 AMD_PG_SUPPORT_GFX_PIPELINE | 1057 1057 AMD_PG_SUPPORT_CP |
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
··· 178 178 if (bgate) { 179 179 cgs_set_powergating_state(hwmgr->device, 180 180 AMD_IP_BLOCK_TYPE_VCE, 181 - AMD_PG_STATE_UNGATE); 181 + AMD_PG_STATE_GATE); 182 182 cgs_set_clockgating_state(hwmgr->device, 183 183 AMD_IP_BLOCK_TYPE_VCE, 184 184 AMD_CG_STATE_GATE);
+1 -2
drivers/gpu/drm/arm/malidp_crtc.c
··· 63 63 64 64 clk_prepare_enable(hwdev->pxlclk); 65 65 66 - /* mclk needs to be set to the same or higher rate than pxlclk */ 67 - clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000); 66 + /* We rely on firmware to set mclk to a sensible level. */ 68 67 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); 69 68 70 69 hwdev->modeset(hwdev, &vm);
+1 -1
drivers/gpu/drm/arm/malidp_hw.c
··· 83 83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, 84 84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, 85 85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, 86 - { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 }, 86 + { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE }, 87 87 }; 88 88 89 89 #define MALIDP_DE_DEFAULT_PREFETCH_START 5
+16 -2
drivers/gpu/drm/arm/malidp_planes.c
··· 37 37 #define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) 38 38 #define MALIDP_LAYER_COMP_SIZE 0x010 39 39 #define MALIDP_LAYER_OFFSET 0x014 40 + #define MALIDP550_LS_ENABLE 0x01c 41 + #define MALIDP550_LS_R1_IN_SIZE 0x020 40 42 41 43 /* 42 44 * This 4-entry look-up-table is used to determine the full 8-bit alpha value ··· 244 242 LAYER_V_VAL(plane->state->crtc_y), 245 243 mp->layer->base + MALIDP_LAYER_OFFSET); 246 244 245 + if (mp->layer->id == DE_SMART) 246 + malidp_hw_write(mp->hwdev, 247 + LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), 248 + mp->layer->base + MALIDP550_LS_R1_IN_SIZE); 249 + 247 250 /* first clear the rotation bits */ 248 251 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); 249 252 val &= ~LAYER_ROT_MASK; ··· 337 330 plane->hwdev = malidp->dev; 338 331 plane->layer = &map->layers[i]; 339 332 340 - /* Skip the features which the SMART layer doesn't have */ 341 - if (id == DE_SMART) 333 + if (id == DE_SMART) { 334 + /* 335 + * Enable the first rectangle in the SMART layer to be 336 + * able to use it as a drm plane. 337 + */ 338 + malidp_hw_write(malidp->dev, 1, 339 + plane->layer->base + MALIDP550_LS_ENABLE); 340 + /* Skip the features which the SMART layer doesn't have. */ 342 341 continue; 342 + } 343 343 344 344 drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags); 345 345 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
+1
drivers/gpu/drm/arm/malidp_regs.h
··· 84 84 /* Stride register offsets relative to Lx_BASE */ 85 85 #define MALIDP_DE_LG_STRIDE 0x18 86 86 #define MALIDP_DE_LV_STRIDE0 0x18 87 + #define MALIDP550_DE_LS_R1_STRIDE 0x28 87 88 88 89 /* macros to set values into registers */ 89 90 #define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
+1
drivers/gpu/drm/i915/i915_drv.h
··· 293 293 PLANE_PRIMARY, 294 294 PLANE_SPRITE0, 295 295 PLANE_SPRITE1, 296 + PLANE_SPRITE2, 296 297 PLANE_CURSOR, 297 298 I915_MAX_PLANES, 298 299 };
+94 -3
drivers/gpu/drm/i915/i915_gem.c
··· 1434 1434 1435 1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1436 1436 1437 + ret = -ENODEV; 1438 + if (obj->ops->pwrite) 1439 + ret = obj->ops->pwrite(obj, args); 1440 + if (ret != -ENODEV) 1441 + goto err; 1442 + 1437 1443 ret = i915_gem_object_wait(obj, 1438 1444 I915_WAIT_INTERRUPTIBLE | 1439 1445 I915_WAIT_ALL, ··· 2125 2119 */ 2126 2120 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2127 2121 obj->mm.madv = __I915_MADV_PURGED; 2122 + obj->mm.pages = ERR_PTR(-EFAULT); 2128 2123 } 2129 2124 2130 2125 /* Try to discard unwanted pages */ ··· 2225 2218 2226 2219 __i915_gem_object_reset_page_iter(obj); 2227 2220 2228 - obj->ops->put_pages(obj, pages); 2221 + if (!IS_ERR(pages)) 2222 + obj->ops->put_pages(obj, pages); 2223 + 2229 2224 unlock: 2230 2225 mutex_unlock(&obj->mm.lock); 2231 2226 } ··· 2446 2437 if (err) 2447 2438 return err; 2448 2439 2449 - if (unlikely(!obj->mm.pages)) { 2440 + if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { 2450 2441 err = ____i915_gem_object_get_pages(obj); 2451 2442 if (err) 2452 2443 goto unlock; ··· 2524 2515 2525 2516 pinned = true; 2526 2517 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2527 - if (unlikely(!obj->mm.pages)) { 2518 + if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { 2528 2519 ret = ____i915_gem_object_get_pages(obj); 2529 2520 if (ret) 2530 2521 goto err_unlock; ··· 2570 2561 err_unlock: 2571 2562 ptr = ERR_PTR(ret); 2572 2563 goto out_unlock; 2564 + } 2565 + 2566 + static int 2567 + i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, 2568 + const struct drm_i915_gem_pwrite *arg) 2569 + { 2570 + struct address_space *mapping = obj->base.filp->f_mapping; 2571 + char __user *user_data = u64_to_user_ptr(arg->data_ptr); 2572 + u64 remain, offset; 2573 + unsigned int pg; 2574 + 2575 + /* Before we instantiate/pin the backing store for our use, we 2576 + * can prepopulate the shmemfs filp efficiently using a write into 2577 + * the pagecache. We avoid the penalty of instantiating all the 2578 + * pages, important if the user is just writing to a few and never 2579 + * uses the object on the GPU, and using a direct write into shmemfs 2580 + * allows it to avoid the cost of retrieving a page (either swapin 2581 + * or clearing-before-use) before it is overwritten. 2582 + */ 2583 + if (READ_ONCE(obj->mm.pages)) 2584 + return -ENODEV; 2585 + 2586 + /* Before the pages are instantiated the object is treated as being 2587 + * in the CPU domain. The pages will be clflushed as required before 2588 + * use, and we can freely write into the pages directly. If userspace 2589 + * races pwrite with any other operation; corruption will ensue - 2590 + * that is userspace's prerogative! 2591 + */ 2592 + 2593 + remain = arg->size; 2594 + offset = arg->offset; 2595 + pg = offset_in_page(offset); 2596 + 2597 + do { 2598 + unsigned int len, unwritten; 2599 + struct page *page; 2600 + void *data, *vaddr; 2601 + int err; 2602 + 2603 + len = PAGE_SIZE - pg; 2604 + if (len > remain) 2605 + len = remain; 2606 + 2607 + err = pagecache_write_begin(obj->base.filp, mapping, 2608 + offset, len, 0, 2609 + &page, &data); 2610 + if (err < 0) 2611 + return err; 2612 + 2613 + vaddr = kmap(page); 2614 + unwritten = copy_from_user(vaddr + pg, user_data, len); 2615 + kunmap(page); 2616 + 2617 + err = pagecache_write_end(obj->base.filp, mapping, 2618 + offset, len, len - unwritten, 2619 + page, data); 2620 + if (err < 0) 2621 + return err; 2622 + 2623 + if (unwritten) 2624 + return -EFAULT; 2625 + 2626 + remain -= len; 2627 + user_data += len; 2628 + offset += len; 2629 + pg = 0; 2630 + } while (remain); 2631 + 2632 + return 0; 2573 2633 } 2574 2634 2575 2635 static bool ban_context(const struct i915_gem_context *ctx) ··· 3106 3028 if (args->timeout_ns > 0) { 3107 3029 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3108 3030 if (args->timeout_ns < 0) 3031 + args->timeout_ns = 0; 3032 + 3033 + /* 3034 + * Apparently ktime isn't accurate enough and occasionally has a 3035 + * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3036 + * things up to make the test happy. We allow up to 1 jiffy. 3037 + * 3038 + * This is a regression from the timespec->ktime conversion. 3039 + */ 3040 + if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3109 3041 args->timeout_ns = 0; 3110 3042 } 3111 3043 ··· 4062 3974 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4063 3975 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4064 3976 I915_GEM_OBJECT_IS_SHRINKABLE, 3977 + 4065 3978 .get_pages = i915_gem_object_get_pages_gtt, 4066 3979 .put_pages = i915_gem_object_put_pages_gtt, 3980 + 3981 + .pwrite = i915_gem_object_pwrite_gtt, 4067 3982 }; 4068 3983 4069 3984 struct drm_i915_gem_object *
+4 -4
drivers/gpu/drm/i915/i915_gem_evict.c
··· 293 293 * those as well to make room for our guard pages. 294 294 */ 295 295 if (check_color) { 296 - if (vma->node.start + vma->node.size == node->start) { 297 - if (vma->node.color == node->color) 296 + if (node->start + node->size == target->start) { 297 + if (node->color == target->color) 298 298 continue; 299 299 } 300 - if (vma->node.start == node->start + node->size) { 301 - if (vma->node.color == node->color) 300 + if (node->start == target->start + target->size) { 301 + if (node->color == target->color) 302 302 continue; 303 303 } 304 304 }
+3
drivers/gpu/drm/i915/i915_gem_object.h
··· 54 54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *); 55 55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); 56 56 57 + int (*pwrite)(struct drm_i915_gem_object *, 58 + const struct drm_i915_gem_pwrite *); 59 + 57 60 int (*dmabuf_export)(struct drm_i915_gem_object *); 58 61 void (*release)(struct drm_i915_gem_object *); 59 62 };
+37 -20
drivers/gpu/drm/i915/i915_vma.c
··· 512 512 return ret; 513 513 } 514 514 515 + static void 516 + i915_vma_remove(struct i915_vma *vma) 517 + { 518 + struct drm_i915_gem_object *obj = vma->obj; 519 + 520 + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 521 + GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 522 + 523 + drm_mm_remove_node(&vma->node); 524 + list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 525 + 526 + /* Since the unbound list is global, only move to that list if 527 + * no more VMAs exist. 528 + */ 529 + if (--obj->bind_count == 0) 530 + list_move_tail(&obj->global_link, 531 + &to_i915(obj->base.dev)->mm.unbound_list); 532 + 533 + /* And finally now the object is completely decoupled from this vma, 534 + * we can drop its hold on the backing storage and allow it to be 535 + * reaped by the shrinker. 536 + */ 537 + i915_gem_object_unpin_pages(obj); 538 + GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 539 + } 540 + 515 541 int __i915_vma_do_pin(struct i915_vma *vma, 516 542 u64 size, u64 alignment, u64 flags) 517 543 { 518 - unsigned int bound = vma->flags; 544 + const unsigned int bound = vma->flags; 519 545 int ret; 520 546 521 547 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); ··· 550 524 551 525 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 552 526 ret = -EBUSY; 553 - goto err; 527 + goto err_unpin; 554 528 } 555 529 556 530 if ((bound & I915_VMA_BIND_MASK) == 0) { 557 531 ret = i915_vma_insert(vma, size, alignment, flags); 558 532 if (ret) 559 - goto err; 533 + goto err_unpin; 560 534 } 561 535 562 536 ret = i915_vma_bind(vma, vma->obj->cache_level, flags); 563 537 if (ret) 564 - goto err; 538 + goto err_remove; 565 539 566 540 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 567 541 __i915_vma_set_map_and_fenceable(vma); ··· 570 544 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 571 545 return 0; 572 546 573 - err: 547 + err_remove: 548 + if ((bound & I915_VMA_BIND_MASK) == 0) { 549 + GEM_BUG_ON(vma->pages); 550 + i915_vma_remove(vma); 551 + } 552 + err_unpin: 574 553 __i915_vma_unpin(vma); 575 554 return ret; 576 555 } ··· 688 657 } 689 658 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 690 659 691 - drm_mm_remove_node(&vma->node); 692 - list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 693 - 694 660 if (vma->pages != obj->mm.pages) { 695 661 GEM_BUG_ON(!vma->pages); 696 662 sg_free_table(vma->pages); ··· 695 667 } 696 668 vma->pages = NULL; 697 669 698 - /* Since the unbound list is global, only move to that list if 699 - * no more VMAs exist. */ 700 - if (--obj->bind_count == 0) 701 - list_move_tail(&obj->global_link, 702 - &to_i915(obj->base.dev)->mm.unbound_list); 703 - 704 - /* And finally now the object is completely decoupled from this vma, 705 - * we can drop its hold on the backing storage and allow it to be 706 - * reaped by the shrinker. 707 - */ 708 - i915_gem_object_unpin_pages(obj); 709 - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 670 + i915_vma_remove(vma); 710 671 711 672 destroy: 712 673 if (unlikely(i915_vma_is_closed(vma)))
+29 -29
drivers/gpu/drm/i915/intel_display.c
··· 3669 3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3670 3670 crtc->base.mode = crtc->base.state->mode; 3671 3671 3672 - DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n", 3673 - old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, 3674 - pipe_config->pipe_src_w, pipe_config->pipe_src_h); 3675 - 3676 3672 /* 3677 3673 * Update pipe size and adjust fitter if needed: the reason for this is 3678 3674 * that in compute_mode_changes we check the native mode (not the pfit ··· 4792 4796 struct intel_crtc_scaler_state *scaler_state = 4793 4797 &crtc->config->scaler_state; 4794 4798 4795 - DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); 4796 - 4797 4799 if (crtc->config->pch_pfit.enabled) { 4798 4800 int id; 4799 4801 4800 - if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4801 - DRM_ERROR("Requesting pfit without getting a scaler first\n"); 4802 + if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 4802 4803 return; 4803 - } 4804 4804 4805 4805 id = scaler_state->scaler_id; 4806 4806 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4807 4807 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4808 4808 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4809 4809 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4810 - 4811 - DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); 4812 4810 } 4813 4811 } 4814 4812 ··· 14369 14379 } while (progress); 14370 14380 } 14371 14381 14382 + static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 14383 + { 14384 + struct intel_atomic_state *state, *next; 14385 + struct llist_node *freed; 14386 + 14387 + freed = llist_del_all(&dev_priv->atomic_helper.free_list); 14388 + llist_for_each_entry_safe(state, next, freed, freed) 14389 + drm_atomic_state_put(&state->base); 14390 + } 14391 + 14392 + static void intel_atomic_helper_free_state_worker(struct work_struct *work) 14393 + { 14394 + struct drm_i915_private *dev_priv = 14395 + container_of(work, typeof(*dev_priv), atomic_helper.free_work); 14396 + 14397 + intel_atomic_helper_free_state(dev_priv); 14398 + } 14399 + 14372 14400 static void intel_atomic_commit_tail(struct drm_atomic_state *state) 14373 14401 { 14374 14402 struct drm_device *dev = state->dev; ··· 14553 14545 * can happen also when the device is completely off. 14554 14546 */ 14555 14547 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 14548 + 14549 + intel_atomic_helper_free_state(dev_priv); 14556 14550 } 14557 14551 14558 14552 static void intel_atomic_commit_work(struct work_struct *work) ··· 14956 14946 to_intel_atomic_state(old_crtc_state->state); 14957 14947 bool modeset = needs_modeset(crtc->state); 14958 14948 14949 + if (!modeset && 14950 + (intel_cstate->base.color_mgmt_changed || 14951 + intel_cstate->update_pipe)) { 14952 + intel_color_set_csc(crtc->state); 14953 + intel_color_load_luts(crtc->state); 14954 + } 14955 + 14959 14956 /* Perform vblank evasion around commit operation */ 14960 14957 intel_pipe_update_start(intel_crtc); 14961 14958 14962 14959 if (modeset) 14963 14960 goto out; 14964 - 14965 - if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { 14966 - intel_color_set_csc(crtc->state); 14967 - intel_color_load_luts(crtc->state); 14968 - } 14969 14961 14970 14962 if (intel_cstate->update_pipe) 14971 14963 intel_update_pipe_config(intel_crtc, old_intel_cstate); ··· 16611 16599 drm_modeset_acquire_fini(&ctx); 16612 16600 } 16613 16601 16614 - static void intel_atomic_helper_free_state(struct work_struct *work) 16615 - { 16616 - struct drm_i915_private *dev_priv = 16617 - container_of(work, typeof(*dev_priv), atomic_helper.free_work); 16618 - struct intel_atomic_state *state, *next; 16619 - struct llist_node *freed; 16620 - 16621 - freed = llist_del_all(&dev_priv->atomic_helper.free_list); 16622 - llist_for_each_entry_safe(state, next, freed, freed) 16623 - drm_atomic_state_put(&state->base); 16624 - } 16625 - 16626 16602 int intel_modeset_init(struct drm_device *dev) 16627 16603 { 16628 16604 struct drm_i915_private *dev_priv = to_i915(dev); ··· 16631 16631 dev->mode_config.funcs = &intel_mode_funcs; 16632 16632 16633 16633 INIT_WORK(&dev_priv->atomic_helper.free_work, 16634 - intel_atomic_helper_free_state); 16634 + intel_atomic_helper_free_state_worker); 16635 16635 16636 16636 intel_init_quirks(dev); 16637 16637
+4 -6
drivers/gpu/drm/i915/intel_fbdev.c
··· 357 357 bool *enabled, int width, int height) 358 358 { 359 359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); 360 - unsigned long conn_configured, mask; 360 + unsigned long conn_configured, conn_seq, mask; 361 361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); 362 362 int i, j; 363 363 bool *save_enabled; 364 364 bool fallback = true; 365 365 int num_connectors_enabled = 0; 366 366 int num_connectors_detected = 0; 367 - int pass = 0; 368 367 369 368 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); 370 369 if (!save_enabled) ··· 373 374 mask = BIT(count) - 1; 374 375 conn_configured = 0; 375 376 retry: 377 + conn_seq = conn_configured; 376 378 for (i = 0; i < count; i++) { 377 379 struct drm_fb_helper_connector *fb_conn; 378 380 struct drm_connector *connector; ··· 387 387 if (conn_configured & BIT(i)) 388 388 continue; 389 389 390 - if (pass == 0 && !connector->has_tile) 390 + if (conn_seq == 0 && !connector->has_tile) 391 391 continue; 392 392 393 393 if (connector->status == connector_status_connected) ··· 498 498 conn_configured |= BIT(i); 499 499 } 500 500 501 - if ((conn_configured & mask) != mask) { 502 - pass++; 501 + if ((conn_configured & mask) != mask && conn_configured != conn_seq) 503 502 goto retry; 504 - } 505 503 506 504 /* 507 505 * If the BIOS didn't enable everything it could, fall back to have the
+13 -5
drivers/gpu/drm/i915/intel_pm.c
··· 4891 4891 break; 4892 4892 } 4893 4893 4894 + /* When byt can survive without system hang with dynamic 4895 + * sw freq adjustments, this restriction can be lifted. 4896 + */ 4897 + if (IS_VALLEYVIEW(dev_priv)) 4898 + goto skip_hw_write; 4899 + 4894 4900 I915_WRITE(GEN6_RP_UP_EI, 4895 4901 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4896 4902 I915_WRITE(GEN6_RP_UP_THRESHOLD, ··· 4917 4911 GEN6_RP_UP_BUSY_AVG | 4918 4912 GEN6_RP_DOWN_IDLE_AVG); 4919 4913 4914 + skip_hw_write: 4920 4915 dev_priv->rps.power = new_power; 4921 4916 dev_priv->rps.up_threshold = threshold_up; 4922 4917 dev_priv->rps.down_threshold = threshold_down; ··· 7923 7916 * @timeout_base_ms: timeout for polling with preemption enabled 7924 7917 * 7925 7918 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 7926 - * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. 7919 + * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. 7927 7920 * The request is acknowledged once the PCODE reply dword equals @reply after 7928 7921 * applying @reply_mask. Polling is first attempted with preemption enabled 7929 - * for @timeout_base_ms and if this times out for another 10 ms with 7922 + * for @timeout_base_ms and if this times out for another 50 ms with 7930 7923 * preemption disabled. 7931 7924 * 7932 7925 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some ··· 7962 7955 * worst case) _and_ PCODE was busy for some reason even after a 7963 7956 * (queued) request and @timeout_base_ms delay. As a workaround retry 7964 7957 * the poll with preemption disabled to maximize the number of 7965 - * requests. Increase the timeout from @timeout_base_ms to 10ms to 7958 + * requests. Increase the timeout from @timeout_base_ms to 50ms to 7966 7959 * account for interrupts that could reduce the number of these 7967 - * requests. 7960 + * requests, and for any quirks of the PCODE firmware that delays 7961 + * the request completion. 7968 7962 */ 7969 7963 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); 7970 7964 WARN_ON_ONCE(timeout_base_ms > 3); 7971 7965 preempt_disable(); 7972 - ret = wait_for_atomic(COND, 10); 7966 + ret = wait_for_atomic(COND, 50); 7973 7967 preempt_enable(); 7974 7968 7975 7969 out:
-3
drivers/gpu/drm/i915/intel_sprite.c
··· 254 254 int scaler_id = plane_state->scaler_id; 255 255 const struct intel_scaler *scaler; 256 256 257 - DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", 258 - plane_id, PS_PLANE_SEL(plane_id)); 259 - 260 257 scaler = &crtc_state->scaler_state.scalers[scaler_id]; 261 258 262 259 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
+6 -7
drivers/gpu/drm/i915/intel_uncore.c
··· 119 119 120 120 for_each_fw_domain_masked(d, fw_domains, dev_priv) 121 121 fw_domain_wait_ack(d); 122 + 123 + dev_priv->uncore.fw_domains_active |= fw_domains; 122 124 } 123 125 124 126 static void ··· 132 130 fw_domain_put(d); 133 131 fw_domain_posting_read(d); 134 132 } 133 + 134 + dev_priv->uncore.fw_domains_active &= ~fw_domains; 135 135 } 136 136 137 137 static void ··· 244 240 if (WARN_ON(domain->wake_count == 0)) 245 241 domain->wake_count++; 246 242 247 - if (--domain->wake_count == 0) { 243 + if (--domain->wake_count == 0) 248 244 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 249 - dev_priv->uncore.fw_domains_active &= ~domain->mask; 250 - } 251 245 252 246 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 253 247 ··· 456 454 fw_domains &= ~domain->mask; 457 455 } 458 456 459 - if (fw_domains) { 457 + if (fw_domains) 460 458 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 461 - dev_priv->uncore.fw_domains_active |= fw_domains; 462 - } 463 459 } 464 460 465 461 /** ··· 968 968 fw_domain_arm_timer(domain); 969 969 970 970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 971 - dev_priv->uncore.fw_domains_active |= fw_domains; 972 971 } 973 972 974 973 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
-3
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
··· 147 147 struct drm_gem_object *obj = buffer->priv; 148 148 int ret = 0; 149 149 150 - if (WARN_ON(!obj->filp)) 151 - return -EINVAL; 152 - 153 150 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 154 151 if (ret < 0) 155 152 return ret;
+6
drivers/gpu/drm/radeon/si_dpm.c
··· 2984 2984 (rdev->pdev->device == 0x6667)) { 2985 2985 max_sclk = 75000; 2986 2986 } 2987 + } else if (rdev->family == CHIP_OLAND) { 2988 + if ((rdev->pdev->device == 0x6604) && 2989 + (rdev->pdev->subsystem_vendor == 0x1028) && 2990 + (rdev->pdev->subsystem_device == 0x066F)) { 2991 + max_sclk = 75000; 2992 + } 2987 2993 } 2988 2994 2989 2995 if (rps->vce_active) {
+24 -13
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
··· 464 464 { 465 465 struct drm_device *dev = crtc->dev; 466 466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 467 + unsigned long flags; 467 468 468 469 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 469 470 mutex_lock(&tilcdc_crtc->enable_lock); ··· 485 484 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, 486 485 LCDC_PALETTE_LOAD_MODE(DATA_ONLY), 487 486 LCDC_PALETTE_LOAD_MODE_MASK); 487 + 488 + /* There is no real chance for a race here as the time stamp 489 + * is taken before the raster DMA is started. The spin-lock is 490 + * taken to have a memory barrier after taking the time-stamp 491 + * and to avoid a context switch between taking the stamp and 492 + * enabling the raster. 493 + */ 494 + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 495 + tilcdc_crtc->last_vblank = ktime_get(); 488 496 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 497 + spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 489 498 490 499 drm_crtc_vblank_on(crtc); 491 500 ··· 550 539 } 551 540 552 541 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 553 - tilcdc_crtc->last_vblank = 0; 554 542 555 543 tilcdc_crtc->enabled = false; 556 544 mutex_unlock(&tilcdc_crtc->enable_lock); ··· 612 602 { 613 603 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 614 604 struct drm_device *dev = crtc->dev; 615 - unsigned long flags; 616 605 617 606 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 618 607 ··· 623 614 drm_framebuffer_reference(fb); 624 615 625 616 crtc->primary->fb = fb; 617 + tilcdc_crtc->event = event; 626 618 627 - spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 619 + mutex_lock(&tilcdc_crtc->enable_lock); 628 620 629 - if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { 621 + if (tilcdc_crtc->enabled) { 622 + unsigned long flags; 630 623 ktime_t next_vblank; 631 624 s64 tdiff; 632 625 633 - next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, 634 - 1000000 / crtc->hwmode.vrefresh); 626 + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 635 627 628 + next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, 629 + 1000000 / crtc->hwmode.vrefresh); 636 630 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); 637 631 638 632 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) 639 633 tilcdc_crtc->next_fb = fb; 634 + else 635 + set_scanout(crtc, fb); 636 + 637 + spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 640 638 } 641 639 642 - if (tilcdc_crtc->next_fb != fb) 643 - set_scanout(crtc, fb); 644 - 645 - tilcdc_crtc->event = event; 646 - 647 - spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 640 + mutex_unlock(&tilcdc_crtc->enable_lock); 648 641 649 642 return 0; 650 643 } ··· 1047 1036 1048 1037 fail: 1049 1038 tilcdc_crtc_destroy(crtc); 1050 - return -ENOMEM; 1039 + return ret; 1051 1040 }
+3 -2
drivers/hid/Kconfig
··· 175 175 Support for Cherry Cymotion keyboard. 176 176 177 177 config HID_CHICONY 178 - tristate "Chicony Tactical pad" 178 + tristate "Chicony devices" 179 179 depends on HID 180 180 default !EXPERT 181 181 ---help--- 182 - Support for Chicony Tactical pad. 182 + Support for Chicony Tactical pad and special keys on Chicony keyboards. 183 183 184 184 config HID_CORSAIR 185 185 tristate "Corsair devices" ··· 190 190 191 191 Supported devices: 192 192 - Vengeance K90 193 + - Scimitar PRO RGB 193 194 194 195 config HID_PRODIKEYS 195 196 tristate "Prodikeys PC-MIDI Keyboard support"
+1
drivers/hid/hid-chicony.c
··· 86 86 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, 87 87 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 88 88 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 89 + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, 89 90 { } 90 91 }; 91 92 MODULE_DEVICE_TABLE(hid, ch_devices);
+2
drivers/hid/hid-core.c
··· 1870 1870 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 1871 1871 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 1872 1872 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, 1873 + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, 1873 1874 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1874 1875 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, 1875 1876 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, ··· 1911 1910 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, 1912 1911 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, 1913 1912 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, 1913 + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, 1914 1914 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 1915 1915 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1916 1916 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+47
drivers/hid/hid-corsair.c
··· 3 3 * 4 4 * Supported devices: 5 5 * - Vengeance K90 Keyboard 6 + * - Scimitar PRO RGB Gaming Mouse 6 7 * 7 8 * Copyright (c) 2015 Clement Vuchener 9 + * Copyright (c) 2017 Oscar Campos 8 10 */ 9 11 10 12 /* ··· 672 670 return 0; 673 671 } 674 672 673 + /* 674 + * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is 675 + * non parseable as they define two consecutive Logical Minimum for 676 + * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16 677 + * that should be obviousy 0x26 for Logical Magimum of 16 bits. This 678 + * prevents poper parsing of the report descriptor due Logical 679 + * Minimum being larger than Logical Maximum. 680 + * 681 + * This driver fixes the report descriptor for: 682 + * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse 683 + */ 684 + 685 + static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, 686 + unsigned int *rsize) 687 + { 688 + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); 689 + 690 + if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { 691 + /* 692 + * Corsair Scimitar RGB Pro report descriptor is broken and 693 + * defines two different Logical Minimum for the Consumer 694 + * Application. The byte 77 should be a 0x26 defining a 16 695 + * bits integer for the Logical Maximum but it is a 0x16 696 + * instead (Logical Minimum) 697 + */ 698 + switch (hdev->product) { 699 + case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB: 700 + if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16 701 + && rdesc[78] == 0xff && rdesc[79] == 0x0f) { 702 + hid_info(hdev, "Fixing up report descriptor\n"); 703 + rdesc[77] = 0x26; 704 + } 705 + break; 706 + } 707 + 708 + } 709 + return rdesc; 710 + } 711 + 675 712 static const struct hid_device_id corsair_devices[] = { 676 713 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), 677 714 .driver_data = CORSAIR_USE_K90_MACRO | 678 715 CORSAIR_USE_K90_BACKLIGHT }, 716 + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, 717 + USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, 679 718 {} 680 719 }; 681 720 ··· 729 686 .event = corsair_event, 730 687 .remove = corsair_remove, 731 688 .input_mapping = corsair_input_mapping, 689 + .report_fixup = corsair_mouse_report_fixup, 732 690 }; 733 691 734 692 module_hid_driver(corsair_driver); 735 693 736 694 MODULE_LICENSE("GPL"); 695 + /* Original K90 driver author */ 737 696 MODULE_AUTHOR("Clement Vuchener"); 697 + /* Scimitar PRO RGB driver author */ 698 + MODULE_AUTHOR("Oscar Campos"); 738 699 MODULE_DESCRIPTION("HID driver for Corsair devices");
+4
drivers/hid/hid-ids.h
··· 278 278 #define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13 279 279 #define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15 280 280 #define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17 281 + #define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38 282 + #define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39 283 + #define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e 281 284 282 285 #define USB_VENDOR_ID_CREATIVELABS 0x041e 283 286 #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c ··· 560 557 561 558 #define USB_VENDOR_ID_JESS 0x0c45 562 559 #define USB_DEVICE_ID_JESS_YUREX 0x1010 560 + #define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112 563 561 564 562 #define USB_VENDOR_ID_JESS2 0x0f30 565 563 #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
+2
drivers/hid/hid-sony.c
··· 2632 2632 sony_leds_remove(sc); 2633 2633 if (sc->quirks & SONY_BATTERY_SUPPORT) 2634 2634 sony_battery_remove(sc); 2635 + if (sc->touchpad) 2636 + sony_unregister_touchpad(sc); 2635 2637 sony_cancel_work_sync(sc); 2636 2638 kfree(sc->output_report_dmabuf); 2637 2639 sony_remove_dev_list(sc);
+3
drivers/hid/usbhid/hid-quirks.c
··· 80 80 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS }, 81 81 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, 82 82 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 83 + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 84 + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 85 + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 83 86 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 84 87 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 85 88 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+3 -1
drivers/hid/wacom_sys.c
··· 2579 2579 2580 2580 /* make sure we don't trigger the LEDs */ 2581 2581 wacom_led_groups_release(wacom); 2582 - wacom_release_resources(wacom); 2582 + 2583 + if (wacom->wacom_wac.features.type != REMOTE) 2584 + wacom_release_resources(wacom); 2583 2585 2584 2586 hid_set_drvdata(hdev, NULL); 2585 2587 }
+6 -4
drivers/hid/wacom_wac.c
··· 1959 1959 input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH); 1960 1960 input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL); 1961 1961 input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); 1962 - input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); 1963 - input_set_capability(input, EV_KEY, BTN_TOOL_LENS); 1962 + if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) { 1963 + input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); 1964 + input_set_capability(input, EV_KEY, BTN_TOOL_LENS); 1965 + } 1964 1966 break; 1965 1967 case WACOM_HID_WD_FINGERWHEEL: 1966 1968 wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); ··· 4199 4197 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; 4200 4198 static const struct wacom_features wacom_features_0x360 = 4201 4199 { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, 4202 - INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; 4200 + INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; 4203 4201 static const struct wacom_features wacom_features_0x361 = 4204 4202 { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, 4205 - INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; 4203 + INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; 4206 4204 4207 4205 static const struct wacom_features wacom_features_HID_ANY_ID = 4208 4206 { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
+15 -12
drivers/md/dm.c
··· 989 989 struct dm_offload *o = container_of(cb, struct dm_offload, cb); 990 990 struct bio_list list; 991 991 struct bio *bio; 992 + int i; 992 993 993 994 INIT_LIST_HEAD(&o->cb.list); 994 995 995 996 if (unlikely(!current->bio_list)) 996 997 return; 997 998 998 - list = *current->bio_list; 999 - bio_list_init(current->bio_list); 999 + for (i = 0; i < 2; i++) { 1000 + list = current->bio_list[i]; 1001 + bio_list_init(&current->bio_list[i]); 1000 1002 1001 - while ((bio = bio_list_pop(&list))) { 1002 - struct bio_set *bs = bio->bi_pool; 1003 - if (unlikely(!bs) || bs == fs_bio_set) { 1004 - bio_list_add(current->bio_list, bio); 1005 - continue; 1003 + while ((bio = bio_list_pop(&list))) { 1004 + struct bio_set *bs = bio->bi_pool; 1005 + if (unlikely(!bs) || bs == fs_bio_set) { 1006 + bio_list_add(&current->bio_list[i], bio); 1007 + continue; 1008 + } 1009 + 1010 + spin_lock(&bs->rescue_lock); 1011 + bio_list_add(&bs->rescue_list, bio); 1012 + queue_work(bs->rescue_workqueue, &bs->rescue_work); 1013 + spin_unlock(&bs->rescue_lock); 1006 1014 } 1007 - 1008 - spin_lock(&bs->rescue_lock); 1009 - bio_list_add(&bs->rescue_list, bio); 1010 - queue_work(bs->rescue_workqueue, &bs->rescue_work); 1011 - spin_unlock(&bs->rescue_lock); 1012 1015 } 1013 1016 } 1014 1017
+1 -1
drivers/md/md-cluster.c
··· 777 777 bm_lockres->flags |= DLM_LKF_NOQUEUE; 778 778 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); 779 779 if (ret == -EAGAIN) { 780 - memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE); 781 780 s = read_resync_info(mddev, bm_lockres); 782 781 if (s) { 783 782 pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", ··· 973 974 lockres_free(cinfo->bitmap_lockres); 974 975 unlock_all_bitmaps(mddev); 975 976 dlm_release_lockspace(cinfo->lockspace, 2); 977 + kfree(cinfo); 976 978 return 0; 977 979 } 978 980
+11 -16
drivers/md/md.c
··· 440 440 } 441 441 EXPORT_SYMBOL(md_flush_request); 442 442 443 - void md_unplug(struct blk_plug_cb *cb, bool from_schedule) 444 - { 445 - struct mddev *mddev = cb->data; 446 - md_wakeup_thread(mddev->thread); 447 - kfree(cb); 448 - } 449 - EXPORT_SYMBOL(md_unplug); 450 - 451 443 static inline struct mddev *mddev_get(struct mddev *mddev) 452 444 { 453 445 atomic_inc(&mddev->active); ··· 1879 1887 } 1880 1888 sb = page_address(rdev->sb_page); 1881 1889 sb->data_size = cpu_to_le64(num_sectors); 1882 - sb->super_offset = rdev->sb_start; 1890 + sb->super_offset = cpu_to_le64(rdev->sb_start); 1883 1891 sb->sb_csum = calc_sb_1_csum(sb); 1884 1892 do { 1885 1893 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, ··· 2287 2295 /* Check if any mddev parameters have changed */ 2288 2296 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2289 2297 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2290 - (mddev->layout != le64_to_cpu(sb->layout)) || 2298 + (mddev->layout != le32_to_cpu(sb->layout)) || 2291 2299 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2292 2300 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2293 2301 return true; ··· 6450 6458 mddev->layout = info->layout; 6451 6459 mddev->chunk_sectors = info->chunk_size >> 9; 6452 6460 6453 - mddev->max_disks = MD_SB_DISKS; 6454 - 6455 6461 if (mddev->persistent) { 6456 - mddev->flags = 0; 6457 - mddev->sb_flags = 0; 6462 + mddev->max_disks = MD_SB_DISKS; 6463 + mddev->flags = 0; 6464 + mddev->sb_flags = 0; 6458 6465 } 6459 6466 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6460 6467 ··· 6524 6533 return -ENOSPC; 6525 6534 } 6526 6535 rv = mddev->pers->resize(mddev, num_sectors); 6527 - if (!rv) 6528 - revalidate_disk(mddev->gendisk); 6536 + if (!rv) { 6537 + if (mddev->queue) { 6538 + set_capacity(mddev->gendisk, mddev->array_sectors); 6539 + revalidate_disk(mddev->gendisk); 6540 + } 6541 + } 6529 6542 return rv; 6530 6543 } 6531 6544
-6
drivers/md/md.h
··· 676 676 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 677 677 struct mddev *mddev); 678 678 679 - extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); 680 679 extern void md_reload_sb(struct mddev *mddev, int raid_disk); 681 680 extern void md_update_sb(struct mddev *mddev, int force); 682 681 extern void md_kick_rdev_from_array(struct md_rdev * rdev); 683 682 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); 684 - static inline int mddev_check_plugged(struct mddev *mddev) 685 - { 686 - return !!blk_check_plugged(md_unplug, mddev, 687 - sizeof(struct blk_plug_cb)); 688 - } 689 683 690 684 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) 691 685 {
+24 -5
drivers/md/raid1.c
··· 1027 1027 static void freeze_array(struct r1conf *conf, int extra) 1028 1028 { 1029 1029 /* Stop sync I/O and normal I/O and wait for everything to 1030 - * go quite. 1030 + * go quiet. 1031 1031 * This is called in two situations: 1032 1032 * 1) management command handlers (reshape, remove disk, quiesce). 1033 1033 * 2) one normal I/O request failed. ··· 1587 1587 split = bio; 1588 1588 } 1589 1589 1590 - if (bio_data_dir(split) == READ) 1590 + if (bio_data_dir(split) == READ) { 1591 1591 raid1_read_request(mddev, split); 1592 - else 1592 + 1593 + /* 1594 + * If a bio is splitted, the first part of bio will 1595 + * pass barrier but the bio is queued in 1596 + * current->bio_list (see generic_make_request). If 1597 + * there is a raise_barrier() called here, the second 1598 + * part of bio can't pass barrier. But since the first 1599 + * part bio isn't dispatched to underlaying disks yet, 1600 + * the barrier is never released, hence raise_barrier 1601 + * will alays wait. We have a deadlock. 1602 + * Note, this only happens in read path. For write 1603 + * path, the first part of bio is dispatched in a 1604 + * schedule() call (because of blk plug) or offloaded 1605 + * to raid10d. 1606 + * Quitting from the function immediately can change 1607 + * the bio order queued in bio_list and avoid the deadlock. 1608 + */ 1609 + if (split != bio) { 1610 + generic_make_request(bio); 1611 + break; 1612 + } 1613 + } else 1593 1614 raid1_write_request(mddev, split); 1594 1615 } while (split != bio); 1595 1616 } ··· 3267 3246 return ret; 3268 3247 } 3269 3248 md_set_array_sectors(mddev, newsize); 3270 - set_capacity(mddev->gendisk, mddev->array_sectors); 3271 - revalidate_disk(mddev->gendisk); 3272 3249 if (sectors > mddev->dev_sectors && 3273 3250 mddev->recovery_cp > mddev->dev_sectors) { 3274 3251 mddev->recovery_cp = mddev->dev_sectors;
+36 -8
drivers/md/raid10.c
··· 974 974 !conf->barrier || 975 975 (atomic_read(&conf->nr_pending) && 976 976 current->bio_list && 977 - !bio_list_empty(current->bio_list)), 977 + (!bio_list_empty(&current->bio_list[0]) || 978 + !bio_list_empty(&current->bio_list[1]))), 978 979 conf->resync_lock); 979 980 conf->nr_waiting--; 980 981 if (!conf->nr_waiting) ··· 1478 1477 mbio->bi_bdev = (void*)rdev; 1479 1478 1480 1479 atomic_inc(&r10_bio->remaining); 1480 + 1481 + cb = blk_check_plugged(raid10_unplug, mddev, 1482 + sizeof(*plug)); 1483 + if (cb) 1484 + plug = container_of(cb, struct raid10_plug_cb, 1485 + cb); 1486 + else 1487 + plug = NULL; 1481 1488 spin_lock_irqsave(&conf->device_lock, flags); 1482 - bio_list_add(&conf->pending_bio_list, mbio); 1483 - conf->pending_count++; 1489 + if (plug) { 1490 + bio_list_add(&plug->pending, mbio); 1491 + plug->pending_cnt++; 1492 + } else { 1493 + bio_list_add(&conf->pending_bio_list, mbio); 1494 + conf->pending_count++; 1495 + } 1484 1496 spin_unlock_irqrestore(&conf->device_lock, flags); 1485 - if (!mddev_check_plugged(mddev)) 1497 + if (!plug) 1486 1498 md_wakeup_thread(mddev->thread); 1487 1499 } 1488 1500 } ··· 1585 1571 split = bio; 1586 1572 } 1587 1573 1574 + /* 1575 + * If a bio is splitted, the first part of bio will pass 1576 + * barrier but the bio is queued in current->bio_list (see 1577 + * generic_make_request). If there is a raise_barrier() called 1578 + * here, the second part of bio can't pass barrier. But since 1579 + * the first part bio isn't dispatched to underlaying disks 1580 + * yet, the barrier is never released, hence raise_barrier will 1581 + * alays wait. We have a deadlock. 1582 + * Note, this only happens in read path. For write path, the 1583 + * first part of bio is dispatched in a schedule() call 1584 + * (because of blk plug) or offloaded to raid10d. 1585 + * Quitting from the function immediately can change the bio 1586 + * order queued in bio_list and avoid the deadlock. 1587 + */ 1588 1588 __make_request(mddev, split); 1589 + if (split != bio && bio_data_dir(bio) == READ) { 1590 + generic_make_request(bio); 1591 + break; 1592 + } 1589 1593 } while (split != bio); 1590 1594 1591 1595 /* In case raid10d snuck in to freeze_array */ ··· 3975 3943 return ret; 3976 3944 } 3977 3945 md_set_array_sectors(mddev, size); 3978 - if (mddev->queue) { 3979 - set_capacity(mddev->gendisk, mddev->array_sectors); 3980 - revalidate_disk(mddev->gendisk); 3981 - } 3982 3946 if (sectors > mddev->dev_sectors && 3983 3947 mddev->recovery_cp > oldsize) { 3984 3948 mddev->recovery_cp = oldsize;
+2 -3
drivers/md/raid5.c
··· 1401 1401 (test_bit(R5_Wantdrain, &dev->flags) || 1402 1402 test_bit(R5_InJournal, &dev->flags))) || 1403 1403 (srctype == SYNDROME_SRC_WRITTEN && 1404 - dev->written)) { 1404 + (dev->written || 1405 + test_bit(R5_InJournal, &dev->flags)))) { 1405 1406 if (test_bit(R5_InJournal, &dev->flags)) 1406 1407 srcs[slot] = sh->dev[i].orig_page; 1407 1408 else ··· 7606 7605 return ret; 7607 7606 } 7608 7607 md_set_array_sectors(mddev, newsize); 7609 - set_capacity(mddev->gendisk, mddev->array_sectors); 7610 - revalidate_disk(mddev->gendisk); 7611 7608 if (sectors > mddev->dev_sectors && 7612 7609 mddev->recovery_cp > mddev->dev_sectors) { 7613 7610 mddev->recovery_cp = mddev->dev_sectors;
+16 -14
drivers/net/ethernet/amd/xgbe/xgbe-common.h
··· 984 984 #define XP_ECC_CNT1_DESC_DED_WIDTH 8 985 985 #define XP_ECC_CNT1_DESC_SEC_INDEX 0 986 986 #define XP_ECC_CNT1_DESC_SEC_WIDTH 8 987 - #define XP_ECC_IER_DESC_DED_INDEX 0 987 + #define XP_ECC_IER_DESC_DED_INDEX 5 988 988 #define XP_ECC_IER_DESC_DED_WIDTH 1 989 - #define XP_ECC_IER_DESC_SEC_INDEX 1 989 + #define XP_ECC_IER_DESC_SEC_INDEX 4 990 990 #define XP_ECC_IER_DESC_SEC_WIDTH 1 991 - #define XP_ECC_IER_RX_DED_INDEX 2 991 + #define XP_ECC_IER_RX_DED_INDEX 3 992 992 #define XP_ECC_IER_RX_DED_WIDTH 1 993 - #define XP_ECC_IER_RX_SEC_INDEX 3 993 + #define XP_ECC_IER_RX_SEC_INDEX 2 994 994 #define XP_ECC_IER_RX_SEC_WIDTH 1 995 - #define XP_ECC_IER_TX_DED_INDEX 4 995 + #define XP_ECC_IER_TX_DED_INDEX 1 996 996 #define XP_ECC_IER_TX_DED_WIDTH 1 997 - #define XP_ECC_IER_TX_SEC_INDEX 5 997 + #define XP_ECC_IER_TX_SEC_INDEX 0 998 998 #define XP_ECC_IER_TX_SEC_WIDTH 1 999 - #define XP_ECC_ISR_DESC_DED_INDEX 0 999 + #define XP_ECC_ISR_DESC_DED_INDEX 5 1000 1000 #define XP_ECC_ISR_DESC_DED_WIDTH 1 1001 - #define XP_ECC_ISR_DESC_SEC_INDEX 1 1001 + #define XP_ECC_ISR_DESC_SEC_INDEX 4 1002 1002 #define XP_ECC_ISR_DESC_SEC_WIDTH 1 1003 - #define XP_ECC_ISR_RX_DED_INDEX 2 1003 + #define XP_ECC_ISR_RX_DED_INDEX 3 1004 1004 #define XP_ECC_ISR_RX_DED_WIDTH 1 1005 - #define XP_ECC_ISR_RX_SEC_INDEX 3 1005 + #define XP_ECC_ISR_RX_SEC_INDEX 2 1006 1006 #define XP_ECC_ISR_RX_SEC_WIDTH 1 1007 - #define XP_ECC_ISR_TX_DED_INDEX 4 1007 + #define XP_ECC_ISR_TX_DED_INDEX 1 1008 1008 #define XP_ECC_ISR_TX_DED_WIDTH 1 1009 - #define XP_ECC_ISR_TX_SEC_INDEX 5 1009 + #define XP_ECC_ISR_TX_SEC_INDEX 0 1010 1010 #define XP_ECC_ISR_TX_SEC_WIDTH 1 1011 1011 #define XP_I2C_MUTEX_BUSY_INDEX 31 1012 1012 #define XP_I2C_MUTEX_BUSY_WIDTH 1 ··· 1148 1148 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 1149 1149 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 1150 1150 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 1151 - #define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 1152 - #define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 1151 + #define RX_PACKET_ATTRIBUTES_LAST_INDEX 2 1152 + #define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1 1153 1153 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 1154 1154 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 1155 1155 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 ··· 1158 1158 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 1159 1159 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 1160 1160 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 1161 + #define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 1162 + #define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 1161 1163 1162 1164 #define RX_NORMAL_DESC0_OVT_INDEX 0 1163 1165 #define RX_NORMAL_DESC0_OVT_WIDTH 16
+11 -9
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 1896 1896 1897 1897 /* Get the header length */ 1898 1898 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1899 + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1900 + FIRST, 1); 1899 1901 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1900 1902 RX_NORMAL_DESC2, HL); 1901 1903 if (rdata->rx.hdr_len) 1902 1904 pdata->ext_stats.rx_split_header_packets++; 1905 + } else { 1906 + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1907 + FIRST, 0); 1903 1908 } 1904 1909 1905 1910 /* Get the RSS hash */ ··· 1927 1922 } 1928 1923 } 1929 1924 1930 - /* Get the packet length */ 1931 - rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1932 - 1933 - if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { 1934 - /* Not all the data has been transferred for this packet */ 1935 - XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1936 - INCOMPLETE, 1); 1925 + /* Not all the data has been transferred for this packet */ 1926 + if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) 1937 1927 return 0; 1938 - } 1939 1928 1940 1929 /* This is the last of the data for this packet */ 1941 1930 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1942 - INCOMPLETE, 0); 1931 + LAST, 1); 1932 + 1933 + /* Get the packet length */ 1934 + rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1943 1935 1944 1936 /* Set checksum done indicator as appropriate */ 1945 1937 if (netdev->features & NETIF_F_RXCSUM)
+63 -39
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 1972 1972 { 1973 1973 struct sk_buff *skb; 1974 1974 u8 *packet; 1975 - unsigned int copy_len; 1976 1975 1977 1976 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); 1978 1977 if (!skb) 1979 1978 return NULL; 1980 1979 1981 - /* Start with the header buffer which may contain just the header 1980 + /* Pull in the header buffer which may contain just the header 1982 1981 * or the header plus data 1983 1982 */ 1984 1983 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, ··· 1986 1987 1987 1988 packet = page_address(rdata->rx.hdr.pa.pages) + 1988 1989 rdata->rx.hdr.pa.pages_offset; 1989 - copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; 1990 - copy_len = min(rdata->rx.hdr.dma_len, copy_len); 1991 - skb_copy_to_linear_data(skb, packet, copy_len); 1992 - skb_put(skb, copy_len); 1993 - 1994 - len -= copy_len; 1995 - if (len) { 1996 - /* Add the remaining data as a frag */ 1997 - dma_sync_single_range_for_cpu(pdata->dev, 1998 - rdata->rx.buf.dma_base, 1999 - rdata->rx.buf.dma_off, 2000 - rdata->rx.buf.dma_len, 2001 - DMA_FROM_DEVICE); 2002 - 2003 - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2004 - rdata->rx.buf.pa.pages, 2005 - rdata->rx.buf.pa.pages_offset, 2006 - len, rdata->rx.buf.dma_len); 2007 - rdata->rx.buf.pa.pages = NULL; 2008 - } 1990 + skb_copy_to_linear_data(skb, packet, len); 1991 + skb_put(skb, len); 2009 1992 2010 1993 return skb; 1994 + } 1995 + 1996 + static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata, 1997 + struct xgbe_packet_data *packet) 1998 + { 1999 + /* Always zero if not the first descriptor */ 2000 + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) 2001 + return 0; 2002 + 2003 + /* First descriptor with split header, return header length */ 2004 + if (rdata->rx.hdr_len) 2005 + return rdata->rx.hdr_len; 2006 + 2007 + /* First descriptor but not the last descriptor and no split header, 2008 + * so the full buffer was used 2009 + */ 2010 + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) 2011 + return rdata->rx.hdr.dma_len; 2012 + 2013 + /* First descriptor and last descriptor and no split header, so 2014 + * calculate how much of the buffer was used 2015 + */ 2016 + return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len); 2017 + } 2018 + 2019 + static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata, 2020 + struct xgbe_packet_data *packet, 2021 + unsigned int len) 2022 + { 2023 + /* Always the full buffer if not the last descriptor */ 2024 + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) 2025 + return rdata->rx.buf.dma_len; 2026 + 2027 + /* Last descriptor so calculate how much of the buffer was used 2028 + * for the last bit of data 2029 + */ 2030 + return rdata->rx.len - len; 2011 2031 } 2012 2032 2013 2033 static int xgbe_tx_poll(struct xgbe_channel *channel) ··· 2111 2093 struct napi_struct *napi; 2112 2094 struct sk_buff *skb; 2113 2095 struct skb_shared_hwtstamps *hwtstamps; 2114 - unsigned int incomplete, error, context_next, context; 2115 - unsigned int len, rdesc_len, max_len; 2096 + unsigned int last, error, context_next, context; 2097 + unsigned int len, buf1_len, buf2_len, max_len; 2116 2098 unsigned int received = 0; 2117 2099 int packet_count = 0; 2118 2100 ··· 2122 2104 if (!ring) 2123 2105 return 0; 2124 2106 2125 - incomplete = 0; 2107 + last = 0; 2126 2108 context_next = 0; 2127 2109 2128 2110 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; ··· 2156 2138 received++; 2157 2139 ring->cur++; 2158 2140 2159 - incomplete = XGMAC_GET_BITS(packet->attributes, 2160 - RX_PACKET_ATTRIBUTES, 2161 - INCOMPLETE); 2141 + last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2142 + LAST); 2162 2143 context_next = XGMAC_GET_BITS(packet->attributes, 2163 2144 RX_PACKET_ATTRIBUTES, 2164 2145 CONTEXT_NEXT); ··· 2166 2149 CONTEXT); 2167 2150 2168 2151 /* Earlier error, just drain the remaining data */ 2169 - if ((incomplete || context_next) && error) 2152 + if ((!last || context_next) && error) 2170 2153 goto read_again; 2171 2154 2172 2155 if (error || packet->errors) { ··· 2178 2161 } 2179 2162 2180 2163 if (!context) { 2181 - /* Length is cumulative, get this descriptor's length */ 2182 - rdesc_len = rdata->rx.len - len; 2183 - len += rdesc_len; 2164 + /* Get the data length in the descriptor buffers */ 2165 + buf1_len = xgbe_rx_buf1_len(rdata, packet); 2166 + len += buf1_len; 2167 + buf2_len = xgbe_rx_buf2_len(rdata, packet, len); 2168 + len += buf2_len; 2184 2169 2185 - if (rdesc_len && !skb) { 2170 + if (!skb) { 2186 2171 skb = xgbe_create_skb(pdata, napi, rdata, 2187 - rdesc_len); 2188 - if (!skb) 2172 + buf1_len); 2173 + if (!skb) { 2189 2174 error = 1; 2190 - } else if (rdesc_len) { 2175 + goto skip_data; 2176 + } 2177 + } 2178 + 2179 + if (buf2_len) { 2191 2180 dma_sync_single_range_for_cpu(pdata->dev, 2192 2181 rdata->rx.buf.dma_base, 2193 2182 rdata->rx.buf.dma_off, ··· 2203 2180 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2204 2181 rdata->rx.buf.pa.pages, 2205 2182 rdata->rx.buf.pa.pages_offset, 2206 - rdesc_len, 2183 + buf2_len, 2207 2184 rdata->rx.buf.dma_len); 2208 2185 rdata->rx.buf.pa.pages = NULL; 2209 2186 } 2210 2187 } 2211 2188 2212 - if (incomplete || context_next) 2189 + skip_data: 2190 + if (!last || context_next) 2213 2191 goto read_again; 2214 2192 2215 2193 if (!skb) ··· 2268 2244 } 2269 2245 2270 2246 /* Check if we need to save state before leaving */ 2271 - if (received && (incomplete || context_next)) { 2247 + if (received && (!last || context_next)) { 2272 2248 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 2273 2249 rdata->state_saved = 1; 2274 2250 rdata->state.skb = skb;
+1
drivers/net/ethernet/aquantia/atlantic/aq_main.c
··· 98 98 99 99 if (err < 0) 100 100 goto err_exit; 101 + ndev->mtu = new_mtu; 101 102 102 103 if (netif_running(ndev)) { 103 104 aq_ndev_close(ndev);
+1
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
··· 137 137 .tx_rings = HW_ATL_A0_TX_RINGS, 138 138 .rx_rings = HW_ATL_A0_RX_RINGS, 139 139 .hw_features = NETIF_F_HW_CSUM | 140 + NETIF_F_RXCSUM | 140 141 NETIF_F_RXHASH | 141 142 NETIF_F_SG | 142 143 NETIF_F_TSO,
+1
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
··· 188 188 .tx_rings = HW_ATL_B0_TX_RINGS, 189 189 .rx_rings = HW_ATL_B0_RX_RINGS, 190 190 .hw_features = NETIF_F_HW_CSUM | 191 + NETIF_F_RXCSUM | 191 192 NETIF_F_RXHASH | 192 193 NETIF_F_SG | 193 194 NETIF_F_TSO |
+4 -2
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 3599 3599 3600 3600 bcmgenet_netif_stop(dev); 3601 3601 3602 - phy_suspend(priv->phydev); 3602 + if (!device_may_wakeup(d)) 3603 + phy_suspend(priv->phydev); 3603 3604 3604 3605 netif_device_detach(dev); 3605 3606 ··· 3697 3696 3698 3697 netif_device_attach(dev); 3699 3698 3700 - phy_resume(priv->phydev); 3699 + if (!device_may_wakeup(d)) 3700 + phy_resume(priv->phydev); 3701 3701 3702 3702 if (priv->eee.eee_enabled) 3703 3703 bcmgenet_eee_enable_set(dev, true);
-17
drivers/net/ethernet/broadcom/genet/bcmmii.c
··· 222 222 } 223 223 } 224 224 225 - static void bcmgenet_internal_phy_setup(struct net_device *dev) 226 - { 227 - struct bcmgenet_priv *priv = netdev_priv(dev); 228 - u32 reg; 229 - 230 - /* Power up PHY */ 231 - bcmgenet_phy_power_set(dev, true); 232 - if (!GENET_IS_V5(priv)) { 233 - /* enable APD */ 234 - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 235 - reg |= EXT_PWR_DN_EN_LD; 236 - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 237 - } 238 - bcmgenet_mii_reset(dev); 239 - } 240 - 241 225 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) 242 226 { 243 227 u32 reg; ··· 271 287 272 288 if (priv->internal_phy) { 273 289 phy_name = "internal PHY"; 274 - bcmgenet_internal_phy_setup(dev); 275 290 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 276 291 phy_name = "MoCA"; 277 292 bcmgenet_moca_phy_setup(priv);
+1 -1
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
··· 325 325 return PTR_ERR(kern_buf); 326 326 327 327 rc = sscanf(kern_buf, "%x:%x", &addr, &len); 328 - if (rc < 2) { 328 + if (rc < 2 || len > UINT_MAX >> 2) { 329 329 netdev_warn(bnad->netdev, "failed to read user buffer\n"); 330 330 kfree(kern_buf); 331 331 return -EINVAL;
+2
drivers/net/ethernet/ibm/ibmvnic.c
··· 1347 1347 release_sub_crq_queue(adapter, 1348 1348 adapter->tx_scrq[i]); 1349 1349 } 1350 + kfree(adapter->tx_scrq); 1350 1351 adapter->tx_scrq = NULL; 1351 1352 } 1352 1353 ··· 1360 1359 release_sub_crq_queue(adapter, 1361 1360 adapter->rx_scrq[i]); 1362 1361 } 1362 + kfree(adapter->rx_scrq); 1363 1363 adapter->rx_scrq = NULL; 1364 1364 } 1365 1365 }
+11
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 2305 2305 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); 2306 2306 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { 2307 2307 /* PCI might be offline */ 2308 + 2309 + /* If device removal has been requested, 2310 + * do not continue retrying. 2311 + */ 2312 + if (dev->persist->interface_state & 2313 + MLX4_INTERFACE_STATE_NOWAIT) { 2314 + mlx4_warn(dev, 2315 + "communication channel is offline\n"); 2316 + return -EIO; 2317 + } 2318 + 2308 2319 msleep(100); 2309 2320 wr_toggle = swab32(readl(&priv->mfunc.comm-> 2310 2321 slave_write));
+11
drivers/net/ethernet/mellanox/mlx4/main.c
··· 1940 1940 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1941 1941 if (!offline_bit) 1942 1942 return 0; 1943 + 1944 + /* If device removal has been requested, 1945 + * do not continue retrying. 1946 + */ 1947 + if (dev->persist->interface_state & 1948 + MLX4_INTERFACE_STATE_NOWAIT) 1949 + break; 1950 + 1943 1951 /* There are cases as part of AER/Reset flow that PF needs 1944 1952 * around 100 msec to load. We therefore sleep for 100 msec 1945 1953 * to allow other tasks to make use of that CPU during this ··· 3962 3954 struct mlx4_priv *priv = mlx4_priv(dev); 3963 3955 struct devlink *devlink = priv_to_devlink(priv); 3964 3956 int active_vfs = 0; 3957 + 3958 + if (mlx4_is_slave(dev)) 3959 + persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; 3965 3960 3966 3961 mutex_lock(&persist->interface_state_mutex); 3967 3962 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
+4
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 361 361 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 362 362 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 363 363 case MLX5_CMD_OP_QUERY_Q_COUNTER: 364 + case MLX5_CMD_OP_SET_RATE_LIMIT: 365 + case MLX5_CMD_OP_QUERY_RATE_LIMIT: 364 366 case MLX5_CMD_OP_ALLOC_PD: 365 367 case MLX5_CMD_OP_ALLOC_UAR: 366 368 case MLX5_CMD_OP_CONFIG_INT_MODERATION: ··· 499 497 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 500 498 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 501 499 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 500 + MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); 501 + MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); 502 502 MLX5_COMMAND_STR_CASE(ALLOC_PD); 503 503 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 504 504 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
-4
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 928 928 int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); 929 929 void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); 930 930 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); 931 - void mlx5e_add_vxlan_port(struct net_device *netdev, 932 - struct udp_tunnel_info *ti); 933 - void mlx5e_del_vxlan_port(struct net_device *netdev, 934 - struct udp_tunnel_info *ti); 935 931 936 932 int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, 937 933 void *sp);
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 3102 3102 vf_stats); 3103 3103 } 3104 3104 3105 - void mlx5e_add_vxlan_port(struct net_device *netdev, 3106 - struct udp_tunnel_info *ti) 3105 + static void mlx5e_add_vxlan_port(struct net_device *netdev, 3106 + struct udp_tunnel_info *ti) 3107 3107 { 3108 3108 struct mlx5e_priv *priv = netdev_priv(netdev); 3109 3109 ··· 3116 3116 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); 3117 3117 } 3118 3118 3119 - void mlx5e_del_vxlan_port(struct net_device *netdev, 3120 - struct udp_tunnel_info *ti) 3119 + static void mlx5e_del_vxlan_port(struct net_device *netdev, 3120 + struct udp_tunnel_info *ti) 3121 3121 { 3122 3122 struct mlx5e_priv *priv = netdev_priv(netdev); 3123 3123
-2
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 393 393 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, 394 394 .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, 395 395 .ndo_get_stats64 = mlx5e_rep_get_stats, 396 - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, 397 - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, 398 396 .ndo_has_offload_stats = mlx5e_has_offload_stats, 399 397 .ndo_get_offload_stats = mlx5e_get_offload_stats, 400 398 };
+4
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 601 601 if (lro_num_seg > 1) { 602 602 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); 603 603 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); 604 + /* Subtract one since we already counted this as one 605 + * "regular" packet in mlx5e_complete_rx_cqe() 606 + */ 607 + rq->stats.packets += lro_num_seg - 1; 604 608 rq->stats.lro_packets++; 605 609 rq->stats.lro_bytes += cqe_bcnt; 606 610 }
+50 -24
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 133 133 return rule; 134 134 } 135 135 136 + static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, 137 + struct mlx5e_tc_flow *flow) 138 + { 139 + struct mlx5_fc *counter = NULL; 140 + 141 + if (!IS_ERR(flow->rule)) { 142 + counter = mlx5_flow_rule_counter(flow->rule); 143 + mlx5_del_flow_rules(flow->rule); 144 + mlx5_fc_destroy(priv->mdev, counter); 145 + } 146 + 147 + if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { 148 + mlx5_destroy_flow_table(priv->fs.tc.t); 149 + priv->fs.tc.t = NULL; 150 + } 151 + } 152 + 136 153 static struct mlx5_flow_handle * 137 154 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 138 155 struct mlx5_flow_spec *spec, ··· 166 149 } 167 150 168 151 static void mlx5e_detach_encap(struct mlx5e_priv *priv, 169 - struct mlx5e_tc_flow *flow) { 152 + struct mlx5e_tc_flow *flow); 153 + 154 + static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, 155 + struct mlx5e_tc_flow *flow) 156 + { 157 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 158 + 159 + mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr); 160 + 161 + mlx5_eswitch_del_vlan_action(esw, flow->attr); 162 + 163 + if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) 164 + mlx5e_detach_encap(priv, flow); 165 + } 166 + 167 + static void mlx5e_detach_encap(struct mlx5e_priv *priv, 168 + struct mlx5e_tc_flow *flow) 169 + { 170 170 struct list_head *next = flow->encap.next; 171 171 172 172 list_del(&flow->encap); ··· 207 173 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 208 174 struct mlx5e_tc_flow *flow) 209 175 { 210 - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 211 - struct mlx5_fc *counter = NULL; 212 - 213 - if (!IS_ERR(flow->rule)) { 214 - counter = mlx5_flow_rule_counter(flow->rule); 215 - mlx5_del_flow_rules(flow->rule); 216 - mlx5_fc_destroy(priv->mdev, counter); 217 - } 218 - 219 - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 220 - mlx5_eswitch_del_vlan_action(esw, flow->attr); 221 - if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) 222 - mlx5e_detach_encap(priv, flow); 223 - } 224 - 225 - if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { 226 - mlx5_destroy_flow_table(priv->fs.tc.t); 227 - priv->fs.tc.t = NULL; 228 - } 176 + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) 177 + mlx5e_tc_del_fdb_flow(priv, flow); 178 + else 179 + mlx5e_tc_del_nic_flow(priv, flow); 229 180 } 230 181 231 182 static void parse_vxlan_attr(struct mlx5_flow_spec *spec, ··· 267 248 skb_flow_dissector_target(f->dissector, 268 249 FLOW_DISSECTOR_KEY_ENC_PORTS, 269 250 f->mask); 251 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 252 + struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); 253 + struct mlx5e_priv *up_priv = netdev_priv(up_dev); 270 254 271 255 /* Full udp dst port must be given */ 272 256 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) 273 257 goto vxlan_match_offload_err; 274 258 275 - if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && 259 + if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) && 276 260 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 277 261 parse_vxlan_attr(spec, f); 278 262 else { ··· 998 976 struct mlx5_esw_flow_attr *attr) 999 977 { 1000 978 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 979 + struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); 980 + struct mlx5e_priv *up_priv = netdev_priv(up_dev); 1001 981 unsigned short family = ip_tunnel_info_af(tun_info); 1002 982 struct ip_tunnel_key *key = &tun_info->key; 1003 983 struct mlx5_encap_entry *e; ··· 1020 996 return -EOPNOTSUPP; 1021 997 } 1022 998 1023 - if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && 999 + if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) && 1024 1000 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { 1025 1001 tunnel_type = MLX5_HEADER_TYPE_VXLAN; 1026 1002 } else { ··· 1136 1112 } 1137 1113 1138 1114 if (is_tcf_vlan(a)) { 1139 - if (tcf_vlan_action(a) == VLAN_F_POP) { 1115 + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { 1140 1116 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 1141 - } else if (tcf_vlan_action(a) == VLAN_F_PUSH) { 1117 + } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { 1142 1118 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) 1143 1119 return -EOPNOTSUPP; 1144 1120 1145 1121 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 1146 1122 attr->vlan = tcf_vlan_push_vid(a); 1123 + } else { /* action is TCA_VLAN_ACT_MODIFY */ 1124 + return -EOPNOTSUPP; 1147 1125 } 1148 1126 continue; 1149 1127 }
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 274 274 sq->stats.tso_bytes += skb->len - ihs; 275 275 } 276 276 277 + sq->stats.packets += skb_shinfo(skb)->gso_segs; 277 278 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; 278 279 } else { 279 280 bf = sq->bf_budget && 280 281 !skb->xmit_more && 281 282 !skb_shinfo(skb)->nr_frags; 282 283 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); 284 + sq->stats.packets++; 283 285 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 284 286 } 285 287 288 + sq->stats.bytes += num_bytes; 286 289 wi->num_bytes = num_bytes; 287 290 288 291 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; ··· 384 381 if (bf) 385 382 sq->bf_budget--; 386 383 387 - sq->stats.packets++; 388 - sq->stats.bytes += num_bytes; 389 384 return NETDEV_TX_OK; 390 385 391 386 dma_unmap_wqe_err:
+6
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 209 209 struct mlx5_eswitch_rep *vport_reps; 210 210 DECLARE_HASHTABLE(encap_tbl, 8); 211 211 u8 inline_mode; 212 + u64 num_flows; 212 213 }; 213 214 214 215 struct mlx5_eswitch { ··· 272 271 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 273 272 struct mlx5_flow_spec *spec, 274 273 struct mlx5_esw_flow_attr *attr); 274 + void 275 + mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 276 + struct mlx5_flow_handle *rule, 277 + struct mlx5_esw_flow_attr *attr); 278 + 275 279 struct mlx5_flow_handle * 276 280 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); 277 281
+22
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 93 93 spec, &flow_act, dest, i); 94 94 if (IS_ERR(rule)) 95 95 mlx5_fc_destroy(esw->dev, counter); 96 + else 97 + esw->offloads.num_flows++; 96 98 97 99 return rule; 100 + } 101 + 102 + void 103 + mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 104 + struct mlx5_flow_handle *rule, 105 + struct mlx5_esw_flow_attr *attr) 106 + { 107 + struct mlx5_fc *counter = NULL; 108 + 109 + if (!IS_ERR(rule)) { 110 + counter = mlx5_flow_rule_counter(rule); 111 + mlx5_del_flow_rules(rule); 112 + mlx5_fc_destroy(esw->dev, counter); 113 + esw->offloads.num_flows--; 114 + } 98 115 } 99 116 100 117 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) ··· 924 907 if (MLX5_CAP_ETH(dev, wqe_inline_mode) != 925 908 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 926 909 return -EOPNOTSUPP; 910 + 911 + if (esw->offloads.num_flows > 0) { 912 + esw_warn(dev, "Can't set inline mode when flows are configured\n"); 913 + return -EOPNOTSUPP; 914 + } 927 915 928 916 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 929 917 if (err)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 87 87 [2] = { 88 88 .mask = MLX5_PROF_MASK_QP_SIZE | 89 89 MLX5_PROF_MASK_MR_CACHE, 90 - .log_max_qp = 17, 90 + .log_max_qp = 18, 91 91 .mr_cache[0] = { 92 92 .size = 500, 93 93 .limit = 250
+1 -1
drivers/net/ethernet/sfc/efx.c
··· 2404 2404 tnl.type = (u16)efx_tunnel_type; 2405 2405 tnl.port = ti->port; 2406 2406 2407 - if (efx->type->udp_tnl_add_port) 2407 + if (efx->type->udp_tnl_del_port) 2408 2408 (void)efx->type->udp_tnl_del_port(efx, tnl); 2409 2409 } 2410 2410
+8 -2
drivers/net/ethernet/ti/Kconfig
··· 74 74 will be called cpsw. 75 75 76 76 config TI_CPTS 77 - tristate "TI Common Platform Time Sync (CPTS) Support" 77 + bool "TI Common Platform Time Sync (CPTS) Support" 78 78 depends on TI_CPSW || TI_KEYSTONE_NETCP 79 - imply PTP_1588_CLOCK 79 + depends on PTP_1588_CLOCK 80 80 ---help--- 81 81 This driver supports the Common Platform Time Sync unit of 82 82 the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. 83 83 The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the 84 84 driver offers a PTP Hardware Clock. 85 + 86 + config TI_CPTS_MOD 87 + tristate 88 + depends on TI_CPTS 89 + default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y 90 + default m 85 91 86 92 config TI_KEYSTONE_NETCP 87 93 tristate "TI Keystone NETCP Core Support"
+1 -1
drivers/net/ethernet/ti/Makefile
··· 12 12 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 13 13 obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o 14 14 obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o 15 - obj-$(CONFIG_TI_CPTS) += cpts.o 15 + obj-$(CONFIG_TI_CPTS_MOD) += cpts.o 16 16 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 17 17 ti_cpsw-y := cpsw.o 18 18
+72 -6
drivers/net/fjes/fjes_main.c
··· 45 45 MODULE_LICENSE("GPL"); 46 46 MODULE_VERSION(DRV_VERSION); 47 47 48 + #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02" 49 + 48 50 static int fjes_request_irq(struct fjes_adapter *); 49 51 static void fjes_free_irq(struct fjes_adapter *); 50 52 ··· 80 78 static int fjes_poll(struct napi_struct *, int); 81 79 82 80 static const struct acpi_device_id fjes_acpi_ids[] = { 83 - {"PNP0C02", 0}, 81 + {ACPI_MOTHERBOARD_RESOURCE_HID, 0}, 84 82 {"", 0}, 85 83 }; 86 84 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); ··· 117 115 }, 118 116 }; 119 117 120 - static int fjes_acpi_add(struct acpi_device *device) 118 + static bool is_extended_socket_device(struct acpi_device *device) 121 119 { 122 120 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; 123 121 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; 124 - struct platform_device *plat_dev; 125 122 union acpi_object *str; 126 123 acpi_status status; 127 124 int result; 128 125 129 126 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); 130 127 if (ACPI_FAILURE(status)) 131 - return -ENODEV; 128 + return false; 132 129 133 130 str = buffer.pointer; 134 131 result = utf16s_to_utf8s((wchar_t *)str->string.pointer, ··· 137 136 138 137 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { 139 138 kfree(buffer.pointer); 140 - return -ENODEV; 139 + return false; 141 140 } 142 141 kfree(buffer.pointer); 142 + 143 + return true; 144 + } 145 + 146 + static int acpi_check_extended_socket_status(struct acpi_device *device) 147 + { 148 + unsigned long long sta; 149 + acpi_status status; 150 + 151 + status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta); 152 + if (ACPI_FAILURE(status)) 153 + return -ENODEV; 154 + 155 + if (!((sta & ACPI_STA_DEVICE_PRESENT) && 156 + (sta & ACPI_STA_DEVICE_ENABLED) && 157 + (sta & ACPI_STA_DEVICE_UI) && 158 + (sta & ACPI_STA_DEVICE_FUNCTIONING))) 159 + return -ENODEV; 160 + 161 + return 0; 162 + } 163 + 164 + static int fjes_acpi_add(struct acpi_device *device) 165 + { 166 + struct platform_device *plat_dev; 167 + acpi_status status; 168 + 169 + if (!is_extended_socket_device(device)) 170 + return -ENODEV; 171 + 172 + if (acpi_check_extended_socket_status(device)) 173 + return -ENODEV; 143 174 144 175 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 145 176 fjes_get_acpi_resource, fjes_resource); ··· 1349 1316 netdev->min_mtu = fjes_support_mtu[0]; 1350 1317 netdev->max_mtu = fjes_support_mtu[3]; 1351 1318 netdev->flags |= IFF_BROADCAST; 1352 - netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; 1319 + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1353 1320 } 1354 1321 1355 1322 static void fjes_irq_watch_task(struct work_struct *work) ··· 1506 1473 } 1507 1474 } 1508 1475 1476 + static acpi_status 1477 + acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level, 1478 + void *context, void **return_value) 1479 + { 1480 + struct acpi_device *device; 1481 + bool *found = context; 1482 + int result; 1483 + 1484 + result = acpi_bus_get_device(obj_handle, &device); 1485 + if (result) 1486 + return AE_OK; 1487 + 1488 + if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID)) 1489 + return AE_OK; 1490 + 1491 + if (!is_extended_socket_device(device)) 1492 + return AE_OK; 1493 + 1494 + if (acpi_check_extended_socket_status(device)) 1495 + return AE_OK; 1496 + 1497 + *found = true; 1498 + return AE_CTRL_TERMINATE; 1499 + } 1500 + 1509 1501 /* fjes_init_module - Driver Registration Routine */ 1510 1502 static int __init fjes_init_module(void) 1511 1503 { 1504 + bool found = false; 1512 1505 int result; 1506 + 1507 + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 1508 + acpi_find_extended_socket_device, NULL, &found, 1509 + NULL); 1510 + 1511 + if (!found) 1512 + return -ENODEV; 1513 1513 1514 1514 pr_info("%s - version %s - %s\n", 1515 1515 fjes_driver_string, fjes_driver_version, fjes_copyright);
+2
drivers/net/tun.c
··· 1931 1931 return -EINVAL; 1932 1932 1933 1933 tun->set_features = features; 1934 + tun->dev->wanted_features &= ~TUN_USER_FEATURES; 1935 + tun->dev->wanted_features |= features; 1934 1936 netdev_update_features(tun->dev); 1935 1937 1936 1938 return 0;
+6
drivers/net/usb/qmi_wwan.c
··· 580 580 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), 581 581 .driver_info = (unsigned long)&qmi_wwan_info, 582 582 }, 583 + { /* Motorola Mapphone devices with MDM6600 */ 584 + USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff), 585 + .driver_info = (unsigned long)&qmi_wwan_info, 586 + }, 583 587 584 588 /* 2. Combined interface devices matching on class+protocol */ 585 589 { /* Huawei E367 and possibly others in "Windows mode" */ ··· 929 925 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 930 926 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 931 927 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 928 + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 929 + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 932 930 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 933 931 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 934 932 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
+17 -7
drivers/net/usb/r8152.c
··· 32 32 #define NETNEXT_VERSION "08" 33 33 34 34 /* Information for net */ 35 - #define NET_VERSION "8" 35 + #define NET_VERSION "9" 36 36 37 37 #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 38 38 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" ··· 501 501 #define RTL8153_RMS RTL8153_MAX_PACKET 502 502 #define RTL8152_TX_TIMEOUT (5 * HZ) 503 503 #define RTL8152_NAPI_WEIGHT 64 504 + #define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \ 505 + sizeof(struct rx_desc) + RX_ALIGN) 504 506 505 507 /* rtl8152 flags */ 506 508 enum rtl8152_flags { ··· 1364 1362 spin_lock_init(&tp->rx_lock); 1365 1363 spin_lock_init(&tp->tx_lock); 1366 1364 INIT_LIST_HEAD(&tp->tx_free); 1365 + INIT_LIST_HEAD(&tp->rx_done); 1367 1366 skb_queue_head_init(&tp->tx_queue); 1368 1367 skb_queue_head_init(&tp->rx_queue); 1369 1368 ··· 2256 2253 2257 2254 static void r8153_set_rx_early_size(struct r8152 *tp) 2258 2255 { 2259 - u32 mtu = tp->netdev->mtu; 2260 - u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8; 2256 + u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4; 2261 2257 2262 2258 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); 2263 2259 } ··· 2901 2899 2902 2900 rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); 2903 2901 2904 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); 2902 + ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE; 2903 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); 2905 2904 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); 2906 2905 2907 2906 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ··· 2954 2951 usleep_range(1000, 2000); 2955 2952 } 2956 2953 2957 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); 2954 + ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE; 2955 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); 2958 2956 2959 2957 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); 2960 2958 ocp_data &= ~TEREDO_WAKE_MASK; ··· 4216 4212 4217 4213 dev->mtu = new_mtu; 4218 4214 4219 - if (netif_running(dev) && netif_carrier_ok(dev)) 4220 - r8153_set_rx_early_size(tp); 4215 + if (netif_running(dev)) { 4216 + u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE; 4217 + 4218 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms); 4219 + 4220 + if (netif_carrier_ok(dev)) 4221 + r8153_set_rx_early_size(tp); 4222 + } 4221 4223 4222 4224 mutex_unlock(&tp->control); 4223 4225
+3 -1
drivers/net/vrf.c
··· 546 546 } 547 547 548 548 if (rt6_local) { 549 - if (rt6_local->rt6i_idev) 549 + if (rt6_local->rt6i_idev) { 550 550 in6_dev_put(rt6_local->rt6i_idev); 551 + rt6_local->rt6i_idev = NULL; 552 + } 551 553 552 554 dst = &rt6_local->dst; 553 555 dev_put(dst->dev);
+1 -1
drivers/net/wireless/ath/ath10k/hw.c
··· 51 51 .rtc_soc_base_address = 0x00000800, 52 52 .rtc_wmac_base_address = 0x00001000, 53 53 .soc_core_base_address = 0x0003a000, 54 - .wlan_mac_base_address = 0x00020000, 54 + .wlan_mac_base_address = 0x00010000, 55 55 .ce_wrapper_base_address = 0x00034000, 56 56 .ce0_base_address = 0x00034400, 57 57 .ce1_base_address = 0x00034800,
+3 -2
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 2319 2319 { 2320 2320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2321 2321 2322 - /* Called when we need to transmit (a) frame(s) from agg queue */ 2322 + /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ 2323 2323 2324 2324 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2325 2325 tids, more_data, true); ··· 2338 2338 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 2339 2339 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2340 2340 2341 - if (tid_data->state != IWL_AGG_ON && 2341 + if (!iwl_mvm_is_dqa_supported(mvm) && 2342 + tid_data->state != IWL_AGG_ON && 2342 2343 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) 2343 2344 continue; 2344 2345
+6 -5
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 3135 3135 struct ieee80211_sta *sta, 3136 3136 enum ieee80211_frame_release_type reason, 3137 3137 u16 cnt, u16 tids, bool more_data, 3138 - bool agg) 3138 + bool single_sta_queue) 3139 3139 { 3140 3140 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3141 3141 struct iwl_mvm_add_sta_cmd cmd = { ··· 3155 3155 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 3156 3156 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 3157 3157 3158 - /* If we're releasing frames from aggregation queues then check if the 3159 - * all queues combined that we're releasing frames from have 3158 + /* If we're releasing frames from aggregation or dqa queues then check 3159 + * if all the queues that we're releasing frames from, combined, have: 3160 3160 * - more frames than the service period, in which case more_data 3161 3161 * needs to be set 3162 3162 * - fewer than 'cnt' frames, in which case we need to adjust the 3163 3163 * firmware command (but do that unconditionally) 3164 3164 */ 3165 - if (agg) { 3165 + if (single_sta_queue) { 3166 3166 int remaining = cnt; 3167 3167 int sleep_tx_count; 3168 3168 ··· 3172 3172 u16 n_queued; 3173 3173 3174 3174 tid_data = &mvmsta->tid_data[tid]; 3175 - if (WARN(tid_data->state != IWL_AGG_ON && 3175 + if (WARN(!iwl_mvm_is_dqa_supported(mvm) && 3176 + tid_data->state != IWL_AGG_ON && 3176 3177 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, 3177 3178 "TID %d state is %d\n", 3178 3179 tid, tid_data->state)) {
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
··· 547 547 struct ieee80211_sta *sta, 548 548 enum ieee80211_frame_release_type reason, 549 549 u16 cnt, u16 tids, bool more_data, 550 - bool agg); 550 + bool single_sta_queue); 551 551 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 552 552 bool drain); 553 553 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+18 -23
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 - * Copyright(c) 2016 Intel Deutschland GmbH 10 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 11 * 12 12 * This program is free software; you can redistribute it and/or modify 13 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 34 * 35 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 37 38 * All rights reserved. 38 39 * 39 40 * Redistribution and use in source and binary forms, with or without ··· 629 628 * values. 630 629 * Note that we don't need to make sure it isn't agg'd, since we're 631 630 * TXing non-sta 631 + * For DQA mode - we shouldn't increase it though 632 632 */ 633 - atomic_inc(&mvm->pending_frames[sta_id]); 633 + if (!iwl_mvm_is_dqa_supported(mvm)) 634 + atomic_inc(&mvm->pending_frames[sta_id]); 634 635 635 636 return 0; 636 637 } ··· 1008 1005 1009 1006 spin_unlock(&mvmsta->lock); 1010 1007 1011 - /* Increase pending frames count if this isn't AMPDU */ 1012 - if ((iwl_mvm_is_dqa_supported(mvm) && 1013 - mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON && 1014 - mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) || 1015 - (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)) 1008 + /* Increase pending frames count if this isn't AMPDU or DQA queue */ 1009 + if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu) 1016 1010 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); 1017 1011 1018 1012 return 0; ··· 1079 1079 lockdep_assert_held(&mvmsta->lock); 1080 1080 1081 1081 if ((tid_data->state == IWL_AGG_ON || 1082 - tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && 1082 + tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || 1083 + iwl_mvm_is_dqa_supported(mvm)) && 1083 1084 iwl_mvm_tid_queued(tid_data) == 0) { 1084 1085 /* 1085 - * Now that this aggregation queue is empty tell mac80211 so it 1086 - * knows we no longer have frames buffered for the station on 1087 - * this TID (for the TIM bitmap calculation.) 1086 + * Now that this aggregation or DQA queue is empty tell 1087 + * mac80211 so it knows we no longer have frames buffered for 1088 + * the station on this TID (for the TIM bitmap calculation.) 1088 1089 */ 1089 1090 ieee80211_sta_set_buffered(sta, tid, false); 1090 1091 } ··· 1258 1257 u8 skb_freed = 0; 1259 1258 u16 next_reclaimed, seq_ctl; 1260 1259 bool is_ndp = false; 1261 - bool txq_agg = false; /* Is this TXQ aggregated */ 1262 1260 1263 1261 __skb_queue_head_init(&skbs); 1264 1262 ··· 1283 1283 info->flags |= IEEE80211_TX_STAT_ACK; 1284 1284 break; 1285 1285 case TX_STATUS_FAIL_DEST_PS: 1286 + /* In DQA, the FW should have stopped the queue and not 1287 + * return this status 1288 + */ 1289 + WARN_ON(iwl_mvm_is_dqa_supported(mvm)); 1286 1290 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1287 1291 break; 1288 1292 default: ··· 1391 1387 bool send_eosp_ndp = false; 1392 1388 1393 1389 spin_lock_bh(&mvmsta->lock); 1394 - if (iwl_mvm_is_dqa_supported(mvm)) { 1395 - enum iwl_mvm_agg_state state; 1396 - 1397 - state = mvmsta->tid_data[tid].state; 1398 - txq_agg = (state == IWL_AGG_ON || 1399 - state == IWL_EMPTYING_HW_QUEUE_DELBA); 1400 - } else { 1401 - txq_agg = txq_id >= mvm->first_agg_queue; 1402 - } 1403 1390 1404 1391 if (!is_ndp) { 1405 1392 tid_data->next_reclaimed = next_reclaimed; ··· 1447 1452 * If the txq is not an AMPDU queue, there is no chance we freed 1448 1453 * several skbs. Check that out... 1449 1454 */ 1450 - if (txq_agg) 1455 + if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) 1451 1456 goto out; 1452 1457 1453 1458 /* We can't free more than one frame at once on a shared queue */ 1454 - WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); 1459 + WARN_ON(skb_freed > 1); 1455 1460 1456 1461 /* If we have still frames for this STA nothing to do here */ 1457 1462 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
+7 -4
drivers/net/wireless/marvell/mwifiex/main.c
··· 57 57 * In case of any errors during inittialization, this function also ensures 58 58 * proper cleanup before exiting. 59 59 */ 60 - static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, 61 - void **padapter) 60 + static int mwifiex_register(void *card, struct device *dev, 61 + struct mwifiex_if_ops *if_ops, void **padapter) 62 62 { 63 63 struct mwifiex_adapter *adapter; 64 64 int i; ··· 68 68 return -ENOMEM; 69 69 70 70 *padapter = adapter; 71 + adapter->dev = dev; 71 72 adapter->card = card; 72 73 73 74 /* Save interface specific operations in adapter */ ··· 1569 1568 { 1570 1569 struct mwifiex_adapter *adapter; 1571 1570 1572 - if (mwifiex_register(card, if_ops, (void **)&adapter)) { 1571 + if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) { 1573 1572 pr_err("%s: software init failed\n", __func__); 1574 1573 goto err_init_sw; 1575 1574 } 1576 1575 1577 - adapter->dev = dev; 1578 1576 mwifiex_probe_of(adapter); 1579 1577 1580 1578 adapter->iface_type = iface_type; ··· 1717 1717 1718 1718 wiphy_unregister(adapter->wiphy); 1719 1719 wiphy_free(adapter->wiphy); 1720 + 1721 + if (adapter->irq_wakeup >= 0) 1722 + device_init_wakeup(adapter->dev, false); 1720 1723 1721 1724 /* Unregister device */ 1722 1725 mwifiex_dbg(adapter, INFO,
+19 -19
drivers/net/wireless/marvell/mwifiex/pcie.c
··· 2739 2739 schedule_work(&card->work); 2740 2740 } 2741 2741 2742 + static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter) 2743 + { 2744 + struct pcie_service_card *card = adapter->card; 2745 + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; 2746 + 2747 + if (reg->sleep_cookie) 2748 + mwifiex_pcie_delete_sleep_cookie_buf(adapter); 2749 + 2750 + mwifiex_pcie_delete_cmdrsp_buf(adapter); 2751 + mwifiex_pcie_delete_evtbd_ring(adapter); 2752 + mwifiex_pcie_delete_rxbd_ring(adapter); 2753 + mwifiex_pcie_delete_txbd_ring(adapter); 2754 + card->cmdrsp_buf = NULL; 2755 + } 2756 + 2742 2757 /* 2743 2758 * This function initializes the PCI-E host memory space, WCB rings, etc. 2744 2759 * ··· 2865 2850 2866 2851 /* 2867 2852 * This function cleans up the allocated card buffers. 2868 - * 2869 - * The following are freed by this function - 2870 - * - TXBD ring buffers 2871 - * - RXBD ring buffers 2872 - * - Event BD ring buffers 2873 - * - Command response ring buffer 2874 - * - Sleep cookie buffer 2875 2853 */ 2876 2854 static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) 2877 2855 { ··· 2882 2874 mwifiex_dbg(adapter, ERROR, 2883 2875 "Failed to write driver not-ready signature\n"); 2884 2876 } 2877 + 2878 + mwifiex_pcie_free_buffers(adapter); 2885 2879 2886 2880 if (pdev) { 2887 2881 pci_iounmap(pdev, card->pci_mmap); ··· 3136 3126 pci_iounmap(pdev, card->pci_mmap1); 3137 3127 } 3138 3128 3139 - /* This function cleans up the PCI-E host memory space. 3140 - * Some code is extracted from mwifiex_unregister_dev() 3141 - * 3142 - */ 3129 + /* This function cleans up the PCI-E host memory space. */ 3143 3130 static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) 3144 3131 { 3145 3132 struct pcie_service_card *card = adapter->card; ··· 3147 3140 3148 3141 adapter->seq_num = 0; 3149 3142 3150 - if (reg->sleep_cookie) 3151 - mwifiex_pcie_delete_sleep_cookie_buf(adapter); 3152 - 3153 - mwifiex_pcie_delete_cmdrsp_buf(adapter); 3154 - mwifiex_pcie_delete_evtbd_ring(adapter); 3155 - mwifiex_pcie_delete_rxbd_ring(adapter); 3156 - mwifiex_pcie_delete_txbd_ring(adapter); 3157 - card->cmdrsp_buf = NULL; 3143 + mwifiex_pcie_free_buffers(adapter); 3158 3144 } 3159 3145 3160 3146 static struct mwifiex_if_ops pcie_ops = {
+3 -3
drivers/remoteproc/Kconfig
··· 76 76 depends on OF && ARCH_QCOM 77 77 depends on REMOTEPROC 78 78 depends on QCOM_SMEM 79 - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 79 + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) 80 80 select MFD_SYSCON 81 81 select QCOM_MDT_LOADER 82 82 select QCOM_RPROC_COMMON ··· 93 93 depends on OF && ARCH_QCOM 94 94 depends on QCOM_SMEM 95 95 depends on REMOTEPROC 96 - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 96 + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) 97 97 select MFD_SYSCON 98 98 select QCOM_RPROC_COMMON 99 99 select QCOM_SCM ··· 104 104 config QCOM_WCNSS_PIL 105 105 tristate "Qualcomm WCNSS Peripheral Image Loader" 106 106 depends on OF && ARCH_QCOM 107 - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 107 + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) 108 108 depends on QCOM_SMEM 109 109 depends on REMOTEPROC 110 110 select QCOM_MDT_LOADER
-14
drivers/scsi/Kconfig
··· 1253 1253 This makes debugging information from the lpfc driver 1254 1254 available via the debugfs filesystem. 1255 1255 1256 - config LPFC_NVME_INITIATOR 1257 - bool "Emulex LightPulse Fibre Channel NVME Initiator Support" 1258 - depends on SCSI_LPFC && NVME_FC 1259 - ---help--- 1260 - This enables NVME Initiator support in the Emulex lpfc driver. 1261 - 1262 - config LPFC_NVME_TARGET 1263 - bool "Emulex LightPulse Fibre Channel NVME Initiator Support" 1264 - depends on SCSI_LPFC && NVME_TARGET_FC 1265 - ---help--- 1266 - This enables NVME Target support in the Emulex lpfc driver. 1267 - Target enablement must still be enabled on a per adapter 1268 - basis by module parameters. 1269 - 1270 1256 config SCSI_SIM710 1271 1257 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" 1272 1258 depends on (EISA || MCA) && SCSI
+32 -21
drivers/scsi/hpsa.c
··· 2956 2956 /* fill_cmd can't fail here, no data buffer to map. */ 2957 2957 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, 2958 2958 scsi3addr, TYPE_MSG); 2959 - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 2959 + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 2960 2960 if (rc) { 2961 2961 dev_warn(&h->pdev->dev, "Failed to send reset command\n"); 2962 2962 goto out; ··· 3714 3714 * # (integer code indicating one of several NOT READY states 3715 3715 * describing why a volume is to be kept offline) 3716 3716 */ 3717 - static int hpsa_volume_offline(struct ctlr_info *h, 3717 + static unsigned char hpsa_volume_offline(struct ctlr_info *h, 3718 3718 unsigned char scsi3addr[]) 3719 3719 { 3720 3720 struct CommandList *c; ··· 3735 3735 DEFAULT_TIMEOUT); 3736 3736 if (rc) { 3737 3737 cmd_free(h, c); 3738 - return 0; 3738 + return HPSA_VPD_LV_STATUS_UNSUPPORTED; 3739 3739 } 3740 3740 sense = c->err_info->SenseInfo; 3741 3741 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) ··· 3746 3746 cmd_status = c->err_info->CommandStatus; 3747 3747 scsi_status = c->err_info->ScsiStatus; 3748 3748 cmd_free(h, c); 3749 - /* Is the volume 'not ready'? */ 3750 - if (cmd_status != CMD_TARGET_STATUS || 3751 - scsi_status != SAM_STAT_CHECK_CONDITION || 3752 - sense_key != NOT_READY || 3753 - asc != ASC_LUN_NOT_READY) { 3754 - return 0; 3755 - } 3756 3749 3757 3750 /* Determine the reason for not ready state */ 3758 3751 ldstat = hpsa_get_volume_status(h, scsi3addr); 3759 3752 3760 3753 /* Keep volume offline in certain cases: */ 3761 3754 switch (ldstat) { 3755 + case HPSA_LV_FAILED: 3762 3756 case HPSA_LV_UNDERGOING_ERASE: 3763 3757 case HPSA_LV_NOT_AVAILABLE: 3764 3758 case HPSA_LV_UNDERGOING_RPI: ··· 3774 3780 default: 3775 3781 break; 3776 3782 } 3777 - return 0; 3783 + return HPSA_LV_OK; 3778 3784 } 3779 3785 3780 3786 /* ··· 3847 3853 /* Do an inquiry to the device to see what it is. */ 3848 3854 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 3849 3855 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 3850 - /* Inquiry failed (msg printed already) */ 3851 3856 dev_err(&h->pdev->dev, 3852 - "hpsa_update_device_info: inquiry failed\n"); 3853 - rc = -EIO; 3857 + "%s: inquiry failed, device will be skipped.\n", 3858 + __func__); 3859 + rc = HPSA_INQUIRY_FAILED; 3854 3860 goto bail_out; 3855 3861 } 3856 3862 ··· 3879 3885 if ((this_device->devtype == TYPE_DISK || 3880 3886 this_device->devtype == TYPE_ZBC) && 3881 3887 is_logical_dev_addr_mode(scsi3addr)) { 3882 - int volume_offline; 3888 + unsigned char volume_offline; 3883 3889 3884 3890 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 3885 3891 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 3886 3892 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 3887 3893 volume_offline = hpsa_volume_offline(h, scsi3addr); 3888 - if (volume_offline < 0 || volume_offline > 0xff) 3889 - volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 3890 - this_device->volume_offline = volume_offline & 0xff; 3894 + if (volume_offline == HPSA_LV_FAILED) { 3895 + rc = HPSA_LV_FAILED; 3896 + dev_err(&h->pdev->dev, 3897 + "%s: LV failed, device will be skipped.\n", 3898 + __func__); 3899 + goto bail_out; 3900 + } 3891 3901 } else { 3892 3902 this_device->raid_level = RAID_UNKNOWN; 3893 3903 this_device->offload_config = 0; ··· 4377 4379 goto out; 4378 4380 } 4379 4381 if (rc) { 4380 - dev_warn(&h->pdev->dev, 4381 - "Inquiry failed, skipping device.\n"); 4382 + h->drv_req_rescan = 1; 4382 4383 continue; 4383 4384 } 4384 4385 ··· 5555 5558 5556 5559 spin_lock_irqsave(&h->scan_lock, flags); 5557 5560 h->scan_finished = 1; 5558 - wake_up_all(&h->scan_wait_queue); 5561 + wake_up(&h->scan_wait_queue); 5559 5562 spin_unlock_irqrestore(&h->scan_lock, flags); 5560 5563 } 5561 5564 ··· 5573 5576 if (unlikely(lockup_detected(h))) 5574 5577 return hpsa_scan_complete(h); 5575 5578 5579 + /* 5580 + * If a scan is already waiting to run, no need to add another 5581 + */ 5582 + spin_lock_irqsave(&h->scan_lock, flags); 5583 + if (h->scan_waiting) { 5584 + spin_unlock_irqrestore(&h->scan_lock, flags); 5585 + return; 5586 + } 5587 + 5588 + spin_unlock_irqrestore(&h->scan_lock, flags); 5589 + 5576 5590 /* wait until any scan already in progress is finished. */ 5577 5591 while (1) { 5578 5592 spin_lock_irqsave(&h->scan_lock, flags); 5579 5593 if (h->scan_finished) 5580 5594 break; 5595 + h->scan_waiting = 1; 5581 5596 spin_unlock_irqrestore(&h->scan_lock, flags); 5582 5597 wait_event(h->scan_wait_queue, h->scan_finished); 5583 5598 /* Note: We don't need to worry about a race between this ··· 5599 5590 */ 5600 5591 } 5601 5592 h->scan_finished = 0; /* mark scan as in progress */ 5593 + h->scan_waiting = 0; 5602 5594 spin_unlock_irqrestore(&h->scan_lock, flags); 5603 5595 5604 5596 if (unlikely(lockup_detected(h))) ··· 8802 8792 init_waitqueue_head(&h->event_sync_wait_queue); 8803 8793 mutex_init(&h->reset_mutex); 8804 8794 h->scan_finished = 1; /* no scan currently in progress */ 8795 + h->scan_waiting = 0; 8805 8796 8806 8797 pci_set_drvdata(pdev, h); 8807 8798 h->ndevices = 0;
+1
drivers/scsi/hpsa.h
··· 201 201 dma_addr_t errinfo_pool_dhandle; 202 202 unsigned long *cmd_pool_bits; 203 203 int scan_finished; 204 + u8 scan_waiting : 1; 204 205 spinlock_t scan_lock; 205 206 wait_queue_head_t scan_wait_queue; 206 207
+2
drivers/scsi/hpsa_cmd.h
··· 156 156 #define CFGTBL_BusType_Fibre2G 0x00000200l 157 157 158 158 /* VPD Inquiry types */ 159 + #define HPSA_INQUIRY_FAILED 0x02 159 160 #define HPSA_VPD_SUPPORTED_PAGES 0x00 160 161 #define HPSA_VPD_LV_DEVICE_ID 0x83 161 162 #define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 ··· 167 166 /* Logical volume states */ 168 167 #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff 169 168 #define HPSA_LV_OK 0x0 169 + #define HPSA_LV_FAILED 0x01 170 170 #define HPSA_LV_NOT_AVAILABLE 0x0b 171 171 #define HPSA_LV_UNDERGOING_ERASE 0x0F 172 172 #define HPSA_LV_UNDERGOING_RPI 0x12
+2 -2
drivers/scsi/lpfc/lpfc_attr.c
··· 3315 3315 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3316 3316 * Supported Values: 1 - register just FCP 3317 3317 * 3 - register both FCP and NVME 3318 - * Supported values are [1,3]. Default value is 3 3318 + * Supported values are [1,3]. Default value is 1 3319 3319 */ 3320 - LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, 3320 + LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, 3321 3321 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, 3322 3322 "Define fc4 type to register with fabric."); 3323 3323
+7
drivers/scsi/lpfc/lpfc_init.c
··· 5891 5891 /* Check to see if it matches any module parameter */ 5892 5892 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 5893 5893 if (wwn == lpfc_enable_nvmet[i]) { 5894 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 5894 5895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5895 5896 "6017 NVME Target %016llx\n", 5896 5897 wwn); 5897 5898 phba->nvmet_support = 1; /* a match */ 5899 + #else 5900 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5901 + "6021 Can't enable NVME Target." 5902 + " NVME_TARGET_FC infrastructure" 5903 + " is not in kernel\n"); 5904 + #endif 5898 5905 } 5899 5906 } 5900 5907 }
+4 -4
drivers/scsi/lpfc/lpfc_nvme.c
··· 2149 2149 /* localport is allocated from the stack, but the registration 2150 2150 * call allocates heap memory as well as the private area. 2151 2151 */ 2152 - #ifdef CONFIG_LPFC_NVME_INITIATOR 2152 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2153 2153 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2154 2154 &vport->phba->pcidev->dev, &localport); 2155 2155 #else ··· 2190 2190 void 2191 2191 lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2192 2192 { 2193 - #ifdef CONFIG_LPFC_NVME_INITIATOR 2193 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2194 2194 struct nvme_fc_local_port *localport; 2195 2195 struct lpfc_nvme_lport *lport; 2196 2196 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; ··· 2274 2274 int 2275 2275 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2276 2276 { 2277 - #ifdef CONFIG_LPFC_NVME_INITIATOR 2277 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2278 2278 int ret = 0; 2279 2279 struct nvme_fc_local_port *localport; 2280 2280 struct lpfc_nvme_lport *lport; ··· 2403 2403 void 2404 2404 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2405 2405 { 2406 - #ifdef CONFIG_LPFC_NVME_INITIATOR 2406 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2407 2407 int ret; 2408 2408 struct nvme_fc_local_port *localport; 2409 2409 struct lpfc_nvme_lport *lport;
+4 -4
drivers/scsi/lpfc/lpfc_nvmet.c
··· 671 671 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 672 672 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; 673 673 674 - #ifdef CONFIG_LPFC_NVME_TARGET 674 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 675 675 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 676 676 &phba->pcidev->dev, 677 677 &phba->targetport); ··· 756 756 void 757 757 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 758 758 { 759 - #ifdef CONFIG_LPFC_NVME_TARGET 759 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 760 760 struct lpfc_nvmet_tgtport *tgtp; 761 761 762 762 if (phba->nvmet_support == 0) ··· 788 788 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 789 789 struct hbq_dmabuf *nvmebuf) 790 790 { 791 - #ifdef CONFIG_LPFC_NVME_TARGET 791 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 792 792 struct lpfc_nvmet_tgtport *tgtp; 793 793 struct fc_frame_header *fc_hdr; 794 794 struct lpfc_nvmet_rcv_ctx *ctxp; ··· 891 891 struct rqb_dmabuf *nvmebuf, 892 892 uint64_t isr_timestamp) 893 893 { 894 - #ifdef CONFIG_LPFC_NVME_TARGET 894 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 895 895 struct lpfc_nvmet_rcv_ctx *ctxp; 896 896 struct lpfc_nvmet_tgtport *tgtp; 897 897 struct fc_frame_header *fc_hdr;
+2 -2
drivers/scsi/megaraid/megaraid_sas.h
··· 35 35 /* 36 36 * MegaRAID SAS Driver meta data 37 37 */ 38 - #define MEGASAS_VERSION "07.701.16.00-rc1" 39 - #define MEGASAS_RELDATE "February 2, 2017" 38 + #define MEGASAS_VERSION "07.701.17.00-rc1" 39 + #define MEGASAS_RELDATE "March 2, 2017" 40 40 41 41 /* 42 42 * Device IDs
+12 -5
drivers/scsi/megaraid/megaraid_sas_base.c
··· 1963 1963 if (!mr_device_priv_data) 1964 1964 return -ENOMEM; 1965 1965 sdev->hostdata = mr_device_priv_data; 1966 + 1967 + atomic_set(&mr_device_priv_data->r1_ldio_hint, 1968 + instance->r1_ldio_hint_default); 1966 1969 return 0; 1967 1970 } 1968 1971 ··· 5037 5034 &instance->irq_context[j]); 5038 5035 /* Retry irq register for IO_APIC*/ 5039 5036 instance->msix_vectors = 0; 5040 - if (is_probe) 5037 + if (is_probe) { 5038 + pci_free_irq_vectors(instance->pdev); 5041 5039 return megasas_setup_irqs_ioapic(instance); 5042 - else 5040 + } else { 5043 5041 return -1; 5042 + } 5044 5043 } 5045 5044 } 5046 5045 return 0; ··· 5282 5277 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5283 5278 } 5284 5279 5285 - i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5286 - if (i < 0) 5287 - goto fail_setup_irqs; 5280 + if (!instance->msix_vectors) { 5281 + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5282 + if (i < 0) 5283 + goto fail_setup_irqs; 5284 + } 5288 5285 5289 5286 dev_info(&instance->pdev->dev, 5290 5287 "firmware supports msix\t: (%d)", fw_msix_count);
+2 -2
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 2159 2159 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2160 2160 2161 2161 if (is_stream_detected(rctx_g35) && 2162 - (raid->level == 5) && 2162 + ((raid->level == 5) || (raid->level == 6)) && 2163 2163 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && 2164 2164 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) 2165 2165 cpu_sel = MR_RAID_CTX_CPUSEL_0; ··· 2338 2338 fp_possible = false; 2339 2339 atomic_dec(&instance->fw_outstanding); 2340 2340 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || 2341 - atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { 2341 + (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) { 2342 2342 fp_possible = false; 2343 2343 atomic_dec(&instance->fw_outstanding); 2344 2344 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
+1
drivers/scsi/qla2xxx/Kconfig
··· 3 3 depends on PCI && SCSI 4 4 depends on SCSI_FC_ATTRS 5 5 select FW_LOADER 6 + select BTREE 6 7 ---help--- 7 8 This qla2xxx driver supports all QLogic Fibre Channel 8 9 PCI and PCIe host adapters.
+1 -3
drivers/scsi/qla2xxx/qla_attr.c
··· 2154 2154 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 2155 2155 } 2156 2156 2157 - BUG_ON(atomic_read(&vha->vref_count)); 2158 - 2159 2157 qla2x00_free_fcports(vha); 2160 2158 2161 2159 mutex_lock(&ha->vport_lock); ··· 2164 2166 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2165 2167 vha->gnl.ldma); 2166 2168 2167 - if (vha->qpair->vp_idx == vha->vp_idx) { 2169 + if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2168 2170 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) 2169 2171 ql_log(ql_log_warn, vha, 0x7087, 2170 2172 "Queue Pair delete failed.\n");
+1
drivers/scsi/qla2xxx/qla_dbg.h
··· 348 348 #define ql_dbg_tgt 0x00004000 /* Target mode */ 349 349 #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ 350 350 #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ 351 + #define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */ 351 352 352 353 extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, 353 354 uint32_t, void **);
+50 -6
drivers/scsi/qla2xxx/qla_def.h
··· 25 25 #include <linux/firmware.h> 26 26 #include <linux/aer.h> 27 27 #include <linux/mutex.h> 28 + #include <linux/btree.h> 28 29 29 30 #include <scsi/scsi.h> 30 31 #include <scsi/scsi_host.h> ··· 396 395 struct completion comp; 397 396 } abt; 398 397 struct ct_arg ctarg; 398 + #define MAX_IOCB_MB_REG 28 399 + #define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t)) 399 400 struct { 400 - __le16 in_mb[28]; /* fr fw */ 401 - __le16 out_mb[28]; /* to fw */ 401 + __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ 402 + __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ 402 403 void *out, *in; 403 404 dma_addr_t out_dma, in_dma; 405 + struct completion comp; 406 + int rc; 404 407 } mbx; 405 408 struct { 406 409 struct imm_ntfy_from_isp *ntfy; ··· 442 437 uint32_t handle; 443 438 uint16_t flags; 444 439 uint16_t type; 445 - char *name; 440 + const char *name; 446 441 int iocbs; 447 442 struct qla_qpair *qpair; 448 443 u32 gen1; /* scratch */ ··· 2305 2300 struct ct_sns_desc ct_desc; 2306 2301 enum discovery_state disc_state; 2307 2302 enum login_state fw_login_state; 2303 + unsigned long plogi_nack_done_deadline; 2304 + 2308 2305 u32 login_gen, last_login_gen; 2309 2306 u32 rscn_gen, last_rscn_gen; 2310 2307 u32 chip_reset; ··· 3113 3106 uint32_t gold_fw_version; 3114 3107 }; 3115 3108 3109 + struct qla_dif_statistics { 3110 + uint64_t dif_input_bytes; 3111 + uint64_t dif_output_bytes; 3112 + uint64_t dif_input_requests; 3113 + uint64_t dif_output_requests; 3114 + uint32_t dif_guard_err; 3115 + uint32_t dif_ref_tag_err; 3116 + uint32_t dif_app_tag_err; 3117 + }; 3118 + 3116 3119 struct qla_statistics { 3117 3120 uint32_t total_isp_aborts; 3118 3121 uint64_t input_bytes; ··· 3135 3118 uint32_t stat_max_pend_cmds; 3136 3119 uint32_t stat_max_qfull_cmds_alloc; 3137 3120 uint32_t stat_max_qfull_cmds_dropped; 3121 + 3122 + struct qla_dif_statistics qla_dif_stats; 3138 3123 }; 3139 3124 3140 3125 struct bidi_statistics { 3141 3126 unsigned long long io_count; 3142 3127 unsigned long long transfer_bytes; 3128 + }; 3129 + 3130 + struct qla_tc_param { 3131 + struct scsi_qla_host *vha; 3132 + uint32_t blk_sz; 3133 + uint32_t bufflen; 3134 + struct scatterlist *sg; 3135 + struct scatterlist *prot_sg; 3136 + struct crc_context *ctx; 3137 + uint8_t *ctx_dsd_alloced; 3143 3138 }; 3144 3139 3145 3140 /* Multi queue support */ ··· 3301 3272 uint8_t tgt_node_name[WWN_SIZE]; 3302 3273 3303 3274 struct dentry *dfs_tgt_sess; 3275 + struct dentry *dfs_tgt_port_database; 3276 + 3304 3277 struct list_head q_full_list; 3305 3278 uint32_t num_pend_cmds; 3306 3279 uint32_t num_qfull_cmds_alloc; ··· 3312 3281 spinlock_t sess_lock; 3313 3282 int rspq_vector_cpuid; 3314 3283 spinlock_t atio_lock ____cacheline_aligned; 3284 + struct btree_head32 host_map; 3315 3285 }; 3316 3286 3317 3287 #define MAX_QFULL_CMDS_ALLOC 8192 ··· 3321 3289 ((ha->cur_fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT) 3322 3290 3323 3291 #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ 3292 + 3293 + #define QLA_EARLY_LINKUP(_ha) \ 3294 + ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \ 3295 + _ha->flags.fw_started && !_ha->flags.fw_init_done) 3324 3296 3325 3297 /* 3326 3298 * Qlogic host adapter specific data structure. ··· 3375 3339 uint32_t fawwpn_enabled:1; 3376 3340 uint32_t exlogins_enabled:1; 3377 3341 uint32_t exchoffld_enabled:1; 3378 - /* 35 bits */ 3342 + 3343 + uint32_t lip_ae:1; 3344 + uint32_t n2n_ae:1; 3345 + uint32_t fw_started:1; 3346 + uint32_t fw_init_done:1; 3379 3347 } flags; 3380 3348 3381 3349 /* This spinlock is used to protect "io transactions", you must ··· 3472 3432 #define P2P_LOOP 3 3473 3433 uint8_t interrupts_on; 3474 3434 uint32_t isp_abort_cnt; 3475 - 3476 3435 #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 3477 3436 #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 3478 3437 #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 ··· 3952 3913 struct list_head vp_fcports; /* list of fcports */ 3953 3914 struct list_head work_list; 3954 3915 spinlock_t work_lock; 3916 + struct work_struct iocb_work; 3955 3917 3956 3918 /* Commonly used flags and state information. */ 3957 3919 struct Scsi_Host *host; ··· 4116 4076 /* Count of active session/fcport */ 4117 4077 int fcport_count; 4118 4078 wait_queue_head_t fcport_waitQ; 4079 + wait_queue_head_t vref_waitq; 4119 4080 } scsi_qla_host_t; 4120 4081 4121 4082 struct qla27xx_image_status { ··· 4172 4131 mb(); \ 4173 4132 if (__vha->flags.delete_progress) { \ 4174 4133 atomic_dec(&__vha->vref_count); \ 4134 + wake_up(&__vha->vref_waitq); \ 4175 4135 __bail = 1; \ 4176 4136 } else { \ 4177 4137 __bail = 0; \ 4178 4138 } \ 4179 4139 } while (0) 4180 4140 4181 - #define QLA_VHA_MARK_NOT_BUSY(__vha) \ 4141 + #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ 4182 4142 atomic_dec(&__vha->vref_count); \ 4143 + wake_up(&__vha->vref_waitq); \ 4144 + } while (0) \ 4183 4145 4184 4146 #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ 4185 4147 atomic_inc(&__qpair->ref_count); \
+103 -4
drivers/scsi/qla2xxx/qla_dfs.c
··· 19 19 struct qla_hw_data *ha = vha->hw; 20 20 unsigned long flags; 21 21 struct fc_port *sess = NULL; 22 - struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; 22 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 23 23 24 - seq_printf(s, "%s\n",vha->host_str); 24 + seq_printf(s, "%s\n", vha->host_str); 25 25 if (tgt) { 26 - seq_printf(s, "Port ID Port Name Handle\n"); 26 + seq_puts(s, "Port ID Port Name Handle\n"); 27 27 28 28 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 29 29 list_for_each_entry(sess, &vha->vp_fcports, list) ··· 44 44 return single_open(file, qla2x00_dfs_tgt_sess_show, vha); 45 45 } 46 46 47 - 48 47 static const struct file_operations dfs_tgt_sess_ops = { 49 48 .open = qla2x00_dfs_tgt_sess_open, 49 + .read = seq_read, 50 + .llseek = seq_lseek, 51 + .release = single_release, 52 + }; 53 + 54 + static int 55 + qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) 56 + { 57 + scsi_qla_host_t *vha = s->private; 58 + struct qla_hw_data *ha = vha->hw; 59 + struct gid_list_info *gid_list; 60 + dma_addr_t gid_list_dma; 61 + fc_port_t fc_port; 62 + char *id_iter; 63 + int rc, i; 64 + uint16_t entries, loop_id; 65 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 66 + 67 + seq_printf(s, "%s\n", vha->host_str); 68 + if (tgt) { 69 + gid_list = dma_alloc_coherent(&ha->pdev->dev, 70 + qla2x00_gid_list_size(ha), 71 + &gid_list_dma, GFP_KERNEL); 72 + if (!gid_list) { 73 + ql_dbg(ql_dbg_user, vha, 0x705c, 74 + "DMA allocation failed for %u\n", 75 + qla2x00_gid_list_size(ha)); 76 + return 0; 77 + } 78 + 79 + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, 80 + &entries); 81 + if (rc != QLA_SUCCESS) 82 + goto out_free_id_list; 83 + 84 + id_iter = (char *)gid_list; 85 + 86 + seq_puts(s, "Port Name Port ID Loop ID\n"); 87 + 88 + for (i = 0; i < entries; i++) { 89 + struct gid_list_info *gid = 90 + (struct gid_list_info *)id_iter; 91 + loop_id = le16_to_cpu(gid->loop_id); 92 + memset(&fc_port, 0, sizeof(fc_port_t)); 93 + 94 + fc_port.loop_id = loop_id; 95 + 96 + rc = qla24xx_gpdb_wait(vha, &fc_port, 0); 97 + seq_printf(s, "%8phC %02x%02x%02x %d\n", 98 + fc_port.port_name, fc_port.d_id.b.domain, 99 + fc_port.d_id.b.area, fc_port.d_id.b.al_pa, 100 + fc_port.loop_id); 101 + id_iter += ha->gid_list_info_size; 102 + } 103 + out_free_id_list: 104 + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 105 + gid_list, gid_list_dma); 106 + } 107 + 108 + return 0; 109 + } 110 + 111 + static int 112 + qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file) 113 + { 114 + scsi_qla_host_t *vha = inode->i_private; 115 + 116 + return single_open(file, qla2x00_dfs_tgt_port_database_show, vha); 117 + } 118 + 119 + static const struct file_operations dfs_tgt_port_database_ops = { 120 + .open = qla2x00_dfs_tgt_port_database_open, 50 121 .read = seq_read, 51 122 .llseek = seq_lseek, 52 123 .release = single_release, ··· 185 114 seq_printf(s, "num Q full sent = %lld\n", 186 115 vha->tgt_counters.num_q_full_sent); 187 116 117 + /* DIF stats */ 118 + seq_printf(s, "DIF Inp Bytes = %lld\n", 119 + vha->qla_stats.qla_dif_stats.dif_input_bytes); 120 + seq_printf(s, "DIF Outp Bytes = %lld\n", 121 + vha->qla_stats.qla_dif_stats.dif_output_bytes); 122 + seq_printf(s, "DIF Inp Req = %lld\n", 123 + vha->qla_stats.qla_dif_stats.dif_input_requests); 124 + seq_printf(s, "DIF Outp Req = %lld\n", 125 + vha->qla_stats.qla_dif_stats.dif_output_requests); 126 + seq_printf(s, "DIF Guard err = %d\n", 127 + vha->qla_stats.qla_dif_stats.dif_guard_err); 128 + seq_printf(s, "DIF Ref tag err = %d\n", 129 + vha->qla_stats.qla_dif_stats.dif_ref_tag_err); 130 + seq_printf(s, "DIF App tag err = %d\n", 131 + vha->qla_stats.qla_dif_stats.dif_app_tag_err); 188 132 return 0; 189 133 } 190 134 ··· 367 281 goto out; 368 282 } 369 283 284 + ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", 285 + S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); 286 + if (!ha->tgt.dfs_tgt_port_database) { 287 + ql_log(ql_log_warn, vha, 0xffff, 288 + "Unable to create debugFS tgt_port_database node.\n"); 289 + goto out; 290 + } 291 + 370 292 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, 371 293 &dfs_fce_ops); 372 294 if (!ha->dfs_fce) { ··· 403 309 if (ha->tgt.dfs_tgt_sess) { 404 310 debugfs_remove(ha->tgt.dfs_tgt_sess); 405 311 ha->tgt.dfs_tgt_sess = NULL; 312 + } 313 + 314 + if (ha->tgt.dfs_tgt_port_database) { 315 + debugfs_remove(ha->tgt.dfs_tgt_port_database); 316 + ha->tgt.dfs_tgt_port_database = NULL; 406 317 } 407 318 408 319 if (ha->dfs_fw_resource_cnt) {
+14 -4
drivers/scsi/qla2xxx/qla_gbl.h
··· 193 193 void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, 194 194 uint16_t *); 195 195 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); 196 + int qla24xx_async_abort_cmd(srb_t *); 196 197 197 198 /* 198 199 * Global Functions in qla_mid.c source file. ··· 257 256 extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); 258 257 extern int qla2x00_issue_marker(scsi_qla_host_t *, int); 259 258 extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, 260 - uint32_t *, uint16_t, struct qla_tgt_cmd *); 259 + uint32_t *, uint16_t, struct qla_tc_param *); 261 260 extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, 262 - uint32_t *, uint16_t, struct qla_tgt_cmd *); 261 + uint32_t *, uint16_t, struct qla_tc_param *); 263 262 extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, 264 - uint32_t *, uint16_t, struct qla_tgt_cmd *); 263 + uint32_t *, uint16_t, struct qla_tc_param *); 265 264 extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); 266 265 extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); 267 266 extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, ··· 369 368 370 369 extern int 371 370 qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 372 - dma_addr_t, uint); 371 + dma_addr_t, uint16_t); 373 372 374 373 extern int qla24xx_abort_command(srb_t *); 375 374 extern int qla24xx_async_abort_command(srb_t *); ··· 472 471 473 472 extern int 474 473 qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); 474 + 475 + int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *); 476 + int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8); 477 + int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, 478 + uint16_t *); 479 + int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, 480 + struct port_database_24xx *); 475 481 476 482 /* 477 483 * Global Function Prototypes in qla_isr.c source file. ··· 854 846 uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); 855 847 void qla24xx_delete_sess_fn(struct work_struct *); 856 848 void qlt_unknown_atio_work_fn(struct work_struct *); 849 + void qlt_update_host_map(struct scsi_qla_host *, port_id_t); 850 + void qlt_remove_target_resources(struct qla_hw_data *); 857 851 858 852 #endif /* _QLA_GBL_H */
+31 -54
drivers/scsi/qla2xxx/qla_init.c
··· 629 629 struct srb *sp = s; 630 630 struct scsi_qla_host *vha = sp->vha; 631 631 struct qla_hw_data *ha = vha->hw; 632 - uint64_t zero = 0; 633 632 struct port_database_24xx *pd; 634 633 fc_port_t *fcport = sp->fcport; 635 634 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; ··· 648 649 649 650 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 650 651 651 - /* Check for logged in state. */ 652 - if (pd->current_login_state != PDS_PRLI_COMPLETE && 653 - pd->last_login_state != PDS_PRLI_COMPLETE) { 654 - ql_dbg(ql_dbg_mbx, vha, 0xffff, 655 - "Unable to verify login-state (%x/%x) for " 656 - "loop_id %x.\n", pd->current_login_state, 657 - pd->last_login_state, fcport->loop_id); 658 - rval = QLA_FUNCTION_FAILED; 659 - goto gpd_error_out; 660 - } 661 - 662 - if (fcport->loop_id == FC_NO_LOOP_ID || 663 - (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 664 - memcmp(fcport->port_name, pd->port_name, 8))) { 665 - /* We lost the device mid way. */ 666 - rval = QLA_NOT_LOGGED_IN; 667 - goto gpd_error_out; 668 - } 669 - 670 - /* Names are little-endian. */ 671 - memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 672 - 673 - /* Get port_id of device. */ 674 - fcport->d_id.b.domain = pd->port_id[0]; 675 - fcport->d_id.b.area = pd->port_id[1]; 676 - fcport->d_id.b.al_pa = pd->port_id[2]; 677 - fcport->d_id.b.rsvd_1 = 0; 678 - 679 - /* If not target must be initiator or unknown type. */ 680 - if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 681 - fcport->port_type = FCT_INITIATOR; 682 - else 683 - fcport->port_type = FCT_TARGET; 684 - 685 - /* Passback COS information. */ 686 - fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 687 - FC_COS_CLASS2 : FC_COS_CLASS3; 688 - 689 - if (pd->prli_svc_param_word_3[0] & BIT_7) { 690 - fcport->flags |= FCF_CONF_COMP_SUPPORTED; 691 - fcport->conf_compl_supported = 1; 692 - } 652 + rval = __qla24xx_parse_gpdb(vha, fcport, pd); 693 653 694 654 gpd_error_out: 695 655 memset(&ea, 0, sizeof(ea)); ··· 834 876 fcport->login_retry--; 835 877 836 878 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 837 - (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || 838 879 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 839 880 return 0; 881 + 882 + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 883 + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) 884 + return 0; 885 + } 840 886 841 887 /* for pure Target Mode. Login will not be initiated */ 842 888 if (vha->host->active_mode == MODE_TARGET) ··· 1003 1041 fcport->flags); 1004 1042 1005 1043 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1006 - (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || 1007 1044 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1008 1045 return; 1046 + 1047 + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1048 + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) 1049 + return; 1050 + } 1009 1051 1010 1052 if (fcport->flags & FCF_ASYNC_SENT) { 1011 1053 fcport->login_retry++; ··· 1224 1258 complete(&abt->u.abt.comp); 1225 1259 } 1226 1260 1227 - static int 1261 + int 1228 1262 qla24xx_async_abort_cmd(srb_t *cmd_sp) 1229 1263 { 1230 1264 scsi_qla_host_t *vha = cmd_sp->vha; ··· 3178 3212 } else { 3179 3213 ql_dbg(ql_dbg_init, vha, 0x00d3, 3180 3214 "Init Firmware -- success.\n"); 3215 + ha->flags.fw_started = 1; 3181 3216 } 3182 3217 3183 3218 return (rval); ··· 3341 3374 uint8_t domain; 3342 3375 char connect_type[22]; 3343 3376 struct qla_hw_data *ha = vha->hw; 3344 - unsigned long flags; 3345 3377 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3378 + port_id_t id; 3346 3379 3347 3380 /* Get host addresses. */ 3348 3381 rval = qla2x00_get_adapter_id(vha, ··· 3420 3453 3421 3454 /* Save Host port and loop ID. */ 3422 3455 /* byte order - Big Endian */ 3423 - vha->d_id.b.domain = domain; 3424 - vha->d_id.b.area = area; 3425 - vha->d_id.b.al_pa = al_pa; 3426 - 3427 - spin_lock_irqsave(&ha->vport_slock, flags); 3428 - qlt_update_vp_map(vha, SET_AL_PA); 3429 - spin_unlock_irqrestore(&ha->vport_slock, flags); 3456 + id.b.domain = domain; 3457 + id.b.area = area; 3458 + id.b.al_pa = al_pa; 3459 + id.b.rsvd_1 = 0; 3460 + qlt_update_host_map(vha, id); 3430 3461 3431 3462 if (!vha->flags.init_done) 3432 3463 ql_log(ql_log_info, vha, 0x2010, ··· 4001 4036 atomic_set(&vha->loop_state, LOOP_READY); 4002 4037 ql_dbg(ql_dbg_disc, vha, 0x2069, 4003 4038 "LOOP READY.\n"); 4039 + ha->flags.fw_init_done = 1; 4004 4040 4005 4041 /* 4006 4042 * Process any ATIO queue entries that came in ··· 5114 5148 } 5115 5149 } 5116 5150 atomic_dec(&vha->vref_count); 5151 + wake_up(&vha->vref_waitq); 5117 5152 } 5118 5153 spin_unlock_irqrestore(&ha->vport_slock, flags); 5119 5154 } ··· 5493 5526 if (!(IS_P3P_TYPE(ha))) 5494 5527 ha->isp_ops->reset_chip(vha); 5495 5528 5529 + ha->flags.n2n_ae = 0; 5530 + ha->flags.lip_ae = 0; 5531 + ha->current_topology = 0; 5532 + ha->flags.fw_started = 0; 5533 + ha->flags.fw_init_done = 0; 5496 5534 ha->chip_reset++; 5497 5535 5498 5536 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); ··· 6774 6802 return; 6775 6803 if (!ha->fw_major_version) 6776 6804 return; 6805 + if (!ha->flags.fw_started) 6806 + return; 6777 6807 6778 6808 ret = qla2x00_stop_firmware(vha); 6779 6809 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && ··· 6789 6815 "Attempting retry of stop-firmware command.\n"); 6790 6816 ret = qla2x00_stop_firmware(vha); 6791 6817 } 6818 + 6819 + ha->flags.fw_started = 0; 6820 + ha->flags.fw_init_done = 0; 6792 6821 } 6793 6822 6794 6823 int
+6 -7
drivers/scsi/qla2xxx/qla_iocb.c
··· 889 889 890 890 int 891 891 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 892 - uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 892 + uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 893 893 { 894 894 void *next_dsd; 895 895 uint8_t avail_dsds = 0; ··· 898 898 struct scatterlist *sg_prot; 899 899 uint32_t *cur_dsd = dsd; 900 900 uint16_t used_dsds = tot_dsds; 901 - 902 901 uint32_t prot_int; /* protection interval */ 903 902 uint32_t partial; 904 903 struct qla2_sgx sgx; ··· 965 966 } else { 966 967 list_add_tail(&dsd_ptr->list, 967 968 &(tc->ctx->dsd_list)); 968 - tc->ctx_dsd_alloced = 1; 969 + *tc->ctx_dsd_alloced = 1; 969 970 } 970 971 971 972 ··· 1004 1005 1005 1006 int 1006 1007 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1007 - uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1008 + uint16_t tot_dsds, struct qla_tc_param *tc) 1008 1009 { 1009 1010 void *next_dsd; 1010 1011 uint8_t avail_dsds = 0; ··· 1065 1066 } else { 1066 1067 list_add_tail(&dsd_ptr->list, 1067 1068 &(tc->ctx->dsd_list)); 1068 - tc->ctx_dsd_alloced = 1; 1069 + *tc->ctx_dsd_alloced = 1; 1069 1070 } 1070 1071 1071 1072 /* add new list to cmd iocb or last list */ ··· 1091 1092 1092 1093 int 1093 1094 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1094 - uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1095 + uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 1095 1096 { 1096 1097 void *next_dsd; 1097 1098 uint8_t avail_dsds = 0; ··· 1157 1158 } else { 1158 1159 list_add_tail(&dsd_ptr->list, 1159 1160 &(tc->ctx->dsd_list)); 1160 - tc->ctx_dsd_alloced = 1; 1161 + *tc->ctx_dsd_alloced = 1; 1161 1162 } 1162 1163 1163 1164 /* add new list to cmd iocb or last list */
+33 -8
drivers/scsi/qla2xxx/qla_isr.c
··· 708 708 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 709 709 710 710 ha->isp_ops->fw_dump(vha, 1); 711 + ha->flags.fw_init_done = 0; 712 + ha->flags.fw_started = 0; 711 713 712 714 if (IS_FWI2_CAPABLE(ha)) { 713 715 if (mb[1] == 0 && mb[2] == 0) { ··· 763 761 break; 764 762 765 763 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 764 + ha->flags.lip_ae = 1; 765 + ha->flags.n2n_ae = 0; 766 + 766 767 ql_dbg(ql_dbg_async, vha, 0x5009, 767 768 "LIP occurred (%x).\n", mb[1]); 768 769 ··· 802 797 break; 803 798 804 799 case MBA_LOOP_DOWN: /* Loop Down Event */ 800 + ha->flags.n2n_ae = 0; 801 + ha->flags.lip_ae = 0; 802 + ha->current_topology = 0; 803 + 805 804 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 806 805 ? RD_REG_WORD(&reg24->mailbox4) : 0; 807 806 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4]) ··· 875 866 876 867 /* case MBA_DCBX_COMPLETE: */ 877 868 case MBA_POINT_TO_POINT: /* Point-to-Point */ 869 + ha->flags.lip_ae = 0; 870 + ha->flags.n2n_ae = 1; 871 + 878 872 if (IS_QLA2100(ha)) 879 873 break; 880 874 ··· 1632 1620 QLA_LOGIO_LOGIN_RETRIED : 0; 1633 1621 if (logio->entry_status) { 1634 1622 ql_log(ql_log_warn, fcport->vha, 0x5034, 1635 - "Async-%s error entry - hdl=%x" 1623 + "Async-%s error entry - %8phC hdl=%x" 1636 1624 "portid=%02x%02x%02x entry-status=%x.\n", 1637 - type, sp->handle, fcport->d_id.b.domain, 1625 + type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 1638 1626 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1639 1627 logio->entry_status); 1640 1628 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, ··· 1645 1633 1646 1634 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1647 1635 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1648 - "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1649 - "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1636 + "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x " 1637 + "iop0=%x.\n", type, fcport->port_name, sp->handle, 1638 + fcport->d_id.b.domain, 1650 1639 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1651 1640 le32_to_cpu(logio->io_parameter[0])); 1652 1641 ··· 1687 1674 case LSC_SCODE_NPORT_USED: 1688 1675 data[0] = MBS_LOOP_ID_USED; 1689 1676 break; 1677 + case LSC_SCODE_CMD_FAILED: 1678 + if (iop[1] == 0x0606) { 1679 + /* 1680 + * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 1681 + * Target side acked. 1682 + */ 1683 + data[0] = MBS_COMMAND_COMPLETE; 1684 + goto logio_done; 1685 + } 1686 + data[0] = MBS_COMMAND_ERROR; 1687 + break; 1690 1688 case LSC_SCODE_NOXCB: 1691 1689 vha->hw->exch_starvation++; 1692 1690 if (vha->hw->exch_starvation > 5) { ··· 1719 1695 } 1720 1696 1721 1697 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1722 - "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1723 - "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1698 + "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x " 1699 + "iop0=%x iop1=%x.\n", type, fcport->port_name, 1700 + sp->handle, fcport->d_id.b.domain, 1724 1701 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1725 1702 le16_to_cpu(logio->comp_status), 1726 1703 le32_to_cpu(logio->io_parameter[0]), ··· 2704 2679 return; 2705 2680 2706 2681 abt = &sp->u.iocb_cmd; 2707 - abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); 2682 + abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); 2708 2683 sp->done(sp, 0); 2709 2684 } 2710 2685 ··· 2718 2693 struct sts_entry_24xx *pkt; 2719 2694 struct qla_hw_data *ha = vha->hw; 2720 2695 2721 - if (!vha->flags.online) 2696 + if (!ha->flags.fw_started) 2722 2697 return; 2723 2698 2724 2699 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+268 -36
drivers/scsi/qla2xxx/qla_mbx.c
··· 10 10 #include <linux/delay.h> 11 11 #include <linux/gfp.h> 12 12 13 + static struct mb_cmd_name { 14 + uint16_t cmd; 15 + const char *str; 16 + } mb_str[] = { 17 + {MBC_GET_PORT_DATABASE, "GPDB"}, 18 + {MBC_GET_ID_LIST, "GIDList"}, 19 + {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 + }; 21 + 22 + static const char *mb_to_str(uint16_t cmd) 23 + { 24 + int i; 25 + struct mb_cmd_name *e; 26 + 27 + for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 28 + e = mb_str + i; 29 + if (cmd == e->cmd) 30 + return e->str; 31 + } 32 + return "unknown"; 33 + } 34 + 13 35 static struct rom_cmd { 14 36 uint16_t cmd; 15 37 } rom_cmds[] = { ··· 2840 2818 2841 2819 int 2842 2820 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 2843 - dma_addr_t stats_dma, uint options) 2821 + dma_addr_t stats_dma, uint16_t options) 2844 2822 { 2845 2823 int rval; 2846 2824 mbx_cmd_t mc; ··· 2850 2828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 2851 2829 "Entered %s.\n", __func__); 2852 2830 2853 - mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2854 - mcp->mb[2] = MSW(stats_dma); 2855 - mcp->mb[3] = LSW(stats_dma); 2856 - mcp->mb[6] = MSW(MSD(stats_dma)); 2857 - mcp->mb[7] = LSW(MSD(stats_dma)); 2858 - mcp->mb[8] = sizeof(struct link_statistics) / 4; 2859 - mcp->mb[9] = vha->vp_idx; 2860 - mcp->mb[10] = options; 2861 - mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2862 - mcp->in_mb = MBX_2|MBX_1|MBX_0; 2863 - mcp->tov = MBX_TOV_SECONDS; 2864 - mcp->flags = IOCTL_CMD; 2865 - rval = qla2x00_mailbox_command(vha, mcp); 2831 + memset(&mc, 0, sizeof(mc)); 2832 + mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 2833 + mc.mb[2] = MSW(stats_dma); 2834 + mc.mb[3] = LSW(stats_dma); 2835 + mc.mb[6] = MSW(MSD(stats_dma)); 2836 + mc.mb[7] = LSW(MSD(stats_dma)); 2837 + mc.mb[8] = sizeof(struct link_statistics) / 4; 2838 + mc.mb[9] = cpu_to_le16(vha->vp_idx); 2839 + mc.mb[10] = cpu_to_le16(options); 2840 + 2841 + rval = qla24xx_send_mb_cmd(vha, &mc); 2866 2842 2867 2843 if (rval == QLA_SUCCESS) { 2868 2844 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { ··· 3623 3603 scsi_qla_host_t *vp = NULL; 3624 3604 unsigned long flags; 3625 3605 int found; 3606 + port_id_t id; 3626 3607 3627 3608 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3628 3609 "Entered %s.\n", __func__); ··· 3631 3610 if (rptid_entry->entry_status != 0) 3632 3611 return; 3633 3612 3613 + id.b.domain = rptid_entry->port_id[2]; 3614 + id.b.area = rptid_entry->port_id[1]; 3615 + id.b.al_pa = rptid_entry->port_id[0]; 3616 + id.b.rsvd_1 = 0; 3617 + 3634 3618 if (rptid_entry->format == 0) { 3635 3619 /* loop */ 3636 - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, 3620 + ql_dbg(ql_dbg_async, vha, 0x10b7, 3637 3621 "Format 0 : Number of VPs setup %d, number of " 3638 3622 "VPs acquired %d.\n", rptid_entry->vp_setup, 3639 3623 rptid_entry->vp_acquired); 3640 - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, 3624 + ql_dbg(ql_dbg_async, vha, 0x10b8, 3641 3625 "Primary port id %02x%02x%02x.\n", 3642 3626 rptid_entry->port_id[2], rptid_entry->port_id[1], 3643 3627 rptid_entry->port_id[0]); 3644 3628 3645 - vha->d_id.b.domain = rptid_entry->port_id[2]; 3646 - vha->d_id.b.area = rptid_entry->port_id[1]; 3647 - vha->d_id.b.al_pa = rptid_entry->port_id[0]; 3648 - 3649 - spin_lock_irqsave(&ha->vport_slock, flags); 3650 - qlt_update_vp_map(vha, SET_AL_PA); 3651 - spin_unlock_irqrestore(&ha->vport_slock, flags); 3629 + qlt_update_host_map(vha, id); 3652 3630 3653 3631 } else if (rptid_entry->format == 1) { 3654 3632 /* fabric */ 3655 - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, 3633 + ql_dbg(ql_dbg_async, vha, 0x10b9, 3656 3634 "Format 1: VP[%d] enabled - status %d - with " 3657 3635 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3658 3636 rptid_entry->vp_status, ··· 3673 3653 WWN_SIZE); 3674 3654 } 3675 3655 3676 - vha->d_id.b.domain = rptid_entry->port_id[2]; 3677 - vha->d_id.b.area = rptid_entry->port_id[1]; 3678 - vha->d_id.b.al_pa = rptid_entry->port_id[0]; 3679 - spin_lock_irqsave(&ha->vport_slock, flags); 3680 - qlt_update_vp_map(vha, SET_AL_PA); 3681 - spin_unlock_irqrestore(&ha->vport_slock, flags); 3656 + qlt_update_host_map(vha, id); 3682 3657 } 3683 3658 3684 3659 fc_host_port_name(vha->host) = ··· 3709 3694 if (!found) 3710 3695 return; 3711 3696 3712 - vp->d_id.b.domain = rptid_entry->port_id[2]; 3713 - vp->d_id.b.area = rptid_entry->port_id[1]; 3714 - vp->d_id.b.al_pa = rptid_entry->port_id[0]; 3715 - spin_lock_irqsave(&ha->vport_slock, flags); 3716 - qlt_update_vp_map(vp, SET_AL_PA); 3717 - spin_unlock_irqrestore(&ha->vport_slock, flags); 3697 + qlt_update_host_map(vp, id); 3718 3698 3719 3699 /* 3720 3700 * Cannot configure here as we are still sitting on the ··· 5835 5825 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 5836 5826 size, DMA_FROM_DEVICE); 5837 5827 5828 + return rval; 5829 + } 5830 + 5831 + static void qla2x00_async_mb_sp_done(void *s, int res) 5832 + { 5833 + struct srb *sp = s; 5834 + 5835 + sp->u.iocb_cmd.u.mbx.rc = res; 5836 + 5837 + complete(&sp->u.iocb_cmd.u.mbx.comp); 5838 + /* don't free sp here. Let the caller do the free */ 5839 + } 5840 + 5841 + /* 5842 + * This mailbox uses the iocb interface to send MB command. 5843 + * This allows non-critial (non chip setup) command to go 5844 + * out in parrallel. 5845 + */ 5846 + int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 5847 + { 5848 + int rval = QLA_FUNCTION_FAILED; 5849 + srb_t *sp; 5850 + struct srb_iocb *c; 5851 + 5852 + if (!vha->hw->flags.fw_started) 5853 + goto done; 5854 + 5855 + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 5856 + if (!sp) 5857 + goto done; 5858 + 5859 + sp->type = SRB_MB_IOCB; 5860 + sp->name = mb_to_str(mcp->mb[0]); 5861 + 5862 + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 5863 + 5864 + memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 5865 + 5866 + c = &sp->u.iocb_cmd; 5867 + c->timeout = qla2x00_async_iocb_timeout; 5868 + init_completion(&c->u.mbx.comp); 5869 + 5870 + sp->done = qla2x00_async_mb_sp_done; 5871 + 5872 + rval = qla2x00_start_sp(sp); 5873 + if (rval != QLA_SUCCESS) { 5874 + ql_dbg(ql_dbg_mbx, vha, 0xffff, 5875 + "%s: %s Failed submission. %x.\n", 5876 + __func__, sp->name, rval); 5877 + goto done_free_sp; 5878 + } 5879 + 5880 + ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n", 5881 + sp->name, sp->handle); 5882 + 5883 + wait_for_completion(&c->u.mbx.comp); 5884 + memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 5885 + 5886 + rval = c->u.mbx.rc; 5887 + switch (rval) { 5888 + case QLA_FUNCTION_TIMEOUT: 5889 + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n", 5890 + __func__, sp->name, rval); 5891 + break; 5892 + case QLA_SUCCESS: 5893 + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n", 5894 + __func__, sp->name); 5895 + sp->free(sp); 5896 + break; 5897 + default: 5898 + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n", 5899 + __func__, sp->name, rval); 5900 + sp->free(sp); 5901 + break; 5902 + } 5903 + 5904 + return rval; 5905 + 5906 + done_free_sp: 5907 + sp->free(sp); 5908 + done: 5909 + return rval; 5910 + } 5911 + 5912 + /* 5913 + * qla24xx_gpdb_wait 5914 + * NOTE: Do not call this routine from DPC thread 5915 + */ 5916 + int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 5917 + { 5918 + int rval = QLA_FUNCTION_FAILED; 5919 + dma_addr_t pd_dma; 5920 + struct port_database_24xx *pd; 5921 + struct qla_hw_data *ha = vha->hw; 5922 + mbx_cmd_t mc; 5923 + 5924 + if (!vha->hw->flags.fw_started) 5925 + goto done; 5926 + 5927 + pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 5928 + if (pd == NULL) { 5929 + ql_log(ql_log_warn, vha, 0xffff, 5930 + "Failed to allocate port database structure.\n"); 5931 + goto done_free_sp; 5932 + } 5933 + memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 5934 + 5935 + memset(&mc, 0, sizeof(mc)); 5936 + mc.mb[0] = MBC_GET_PORT_DATABASE; 5937 + mc.mb[1] = cpu_to_le16(fcport->loop_id); 5938 + mc.mb[2] = MSW(pd_dma); 5939 + mc.mb[3] = LSW(pd_dma); 5940 + mc.mb[6] = MSW(MSD(pd_dma)); 5941 + mc.mb[7] = LSW(MSD(pd_dma)); 5942 + mc.mb[9] = cpu_to_le16(vha->vp_idx); 5943 + mc.mb[10] = cpu_to_le16((uint16_t)opt); 5944 + 5945 + rval = qla24xx_send_mb_cmd(vha, &mc); 5946 + if (rval != QLA_SUCCESS) { 5947 + ql_dbg(ql_dbg_mbx, vha, 0xffff, 5948 + "%s: %8phC fail\n", __func__, fcport->port_name); 5949 + goto done_free_sp; 5950 + } 5951 + 5952 + rval = __qla24xx_parse_gpdb(vha, fcport, pd); 5953 + 5954 + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n", 5955 + __func__, fcport->port_name); 5956 + 5957 + done_free_sp: 5958 + if (pd) 5959 + dma_pool_free(ha->s_dma_pool, pd, pd_dma); 5960 + done: 5961 + return rval; 5962 + } 5963 + 5964 + int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 5965 + struct port_database_24xx *pd) 5966 + { 5967 + int rval = QLA_SUCCESS; 5968 + uint64_t zero = 0; 5969 + 5970 + /* Check for logged in state. */ 5971 + if (pd->current_login_state != PDS_PRLI_COMPLETE && 5972 + pd->last_login_state != PDS_PRLI_COMPLETE) { 5973 + ql_dbg(ql_dbg_mbx, vha, 0xffff, 5974 + "Unable to verify login-state (%x/%x) for " 5975 + "loop_id %x.\n", pd->current_login_state, 5976 + pd->last_login_state, fcport->loop_id); 5977 + rval = QLA_FUNCTION_FAILED; 5978 + goto gpd_error_out; 5979 + } 5980 + 5981 + if (fcport->loop_id == FC_NO_LOOP_ID || 5982 + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 5983 + memcmp(fcport->port_name, pd->port_name, 8))) { 5984 + /* We lost the device mid way. */ 5985 + rval = QLA_NOT_LOGGED_IN; 5986 + goto gpd_error_out; 5987 + } 5988 + 5989 + /* Names are little-endian. */ 5990 + memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 5991 + memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 5992 + 5993 + /* Get port_id of device. */ 5994 + fcport->d_id.b.domain = pd->port_id[0]; 5995 + fcport->d_id.b.area = pd->port_id[1]; 5996 + fcport->d_id.b.al_pa = pd->port_id[2]; 5997 + fcport->d_id.b.rsvd_1 = 0; 5998 + 5999 + /* If not target must be initiator or unknown type. */ 6000 + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6001 + fcport->port_type = FCT_INITIATOR; 6002 + else 6003 + fcport->port_type = FCT_TARGET; 6004 + 6005 + /* Passback COS information. */ 6006 + fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6007 + FC_COS_CLASS2 : FC_COS_CLASS3; 6008 + 6009 + if (pd->prli_svc_param_word_3[0] & BIT_7) { 6010 + fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6011 + fcport->conf_compl_supported = 1; 6012 + } 6013 + 6014 + gpd_error_out: 6015 + return rval; 6016 + } 6017 + 6018 + /* 6019 + * qla24xx_gidlist__wait 6020 + * NOTE: don't call this routine from DPC thread. 6021 + */ 6022 + int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6023 + void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6024 + { 6025 + int rval = QLA_FUNCTION_FAILED; 6026 + mbx_cmd_t mc; 6027 + 6028 + if (!vha->hw->flags.fw_started) 6029 + goto done; 6030 + 6031 + memset(&mc, 0, sizeof(mc)); 6032 + mc.mb[0] = MBC_GET_ID_LIST; 6033 + mc.mb[2] = MSW(id_list_dma); 6034 + mc.mb[3] = LSW(id_list_dma); 6035 + mc.mb[6] = MSW(MSD(id_list_dma)); 6036 + mc.mb[7] = LSW(MSD(id_list_dma)); 6037 + mc.mb[8] = 0; 6038 + mc.mb[9] = cpu_to_le16(vha->vp_idx); 6039 + 6040 + rval = qla24xx_send_mb_cmd(vha, &mc); 6041 + if (rval != QLA_SUCCESS) { 6042 + ql_dbg(ql_dbg_mbx, vha, 0xffff, 6043 + "%s: fail\n", __func__); 6044 + } else { 6045 + *entries = mc.mb[1]; 6046 + ql_dbg(ql_dbg_mbx, vha, 0xffff, 6047 + "%s: done\n", __func__); 6048 + } 6049 + done: 5838 6050 return rval; 5839 6051 }
+8 -6
drivers/scsi/qla2xxx/qla_mid.c
··· 74 74 * ensures no active vp_list traversal while the vport is removed 75 75 * from the queue) 76 76 */ 77 + wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count), 78 + 10*HZ); 79 + 77 80 spin_lock_irqsave(&ha->vport_slock, flags); 78 - while (atomic_read(&vha->vref_count)) { 79 - spin_unlock_irqrestore(&ha->vport_slock, flags); 80 - 81 - msleep(500); 82 - 83 - spin_lock_irqsave(&ha->vport_slock, flags); 81 + if (atomic_read(&vha->vref_count)) { 82 + ql_dbg(ql_dbg_vport, vha, 0xfffa, 83 + "vha->vref_count=%u timeout\n", vha->vref_count.counter); 84 + vha->vref_count = (atomic_t)ATOMIC_INIT(0); 84 85 } 85 86 list_del(&vha->list); 86 87 qlt_update_vp_map(vha, RESET_VP_IDX); ··· 270 269 271 270 spin_lock_irqsave(&ha->vport_slock, flags); 272 271 atomic_dec(&vha->vref_count); 272 + wake_up(&vha->vref_waitq); 273 273 } 274 274 i++; 275 275 }
+22 -1
drivers/scsi/qla2xxx/qla_os.c
··· 2560 2560 return atomic_read(&vha->loop_state) == LOOP_READY; 2561 2561 } 2562 2562 2563 + static void qla2x00_iocb_work_fn(struct work_struct *work) 2564 + { 2565 + struct scsi_qla_host *vha = container_of(work, 2566 + struct scsi_qla_host, iocb_work); 2567 + int cnt = 0; 2568 + 2569 + while (!list_empty(&vha->work_list)) { 2570 + qla2x00_do_work(vha); 2571 + cnt++; 2572 + if (cnt > 10) 2573 + break; 2574 + } 2575 + } 2576 + 2563 2577 /* 2564 2578 * PCI driver interface 2565 2579 */ ··· 3092 3078 */ 3093 3079 qla2xxx_wake_dpc(base_vha); 3094 3080 3081 + INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); 3095 3082 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3096 3083 3097 3084 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { ··· 3484 3469 qla2x00_free_sysfs_attr(base_vha, true); 3485 3470 3486 3471 fc_remove_host(base_vha->host); 3472 + qlt_remove_target_resources(ha); 3487 3473 3488 3474 scsi_remove_host(base_vha->host); 3489 3475 ··· 4284 4268 spin_lock_init(&vha->work_lock); 4285 4269 spin_lock_init(&vha->cmd_list_lock); 4286 4270 init_waitqueue_head(&vha->fcport_waitQ); 4271 + init_waitqueue_head(&vha->vref_waitq); 4287 4272 4288 4273 vha->gnl.size = sizeof(struct get_name_list_extended) * 4289 4274 (ha->max_loop_id + 1); ··· 4336 4319 spin_lock_irqsave(&vha->work_lock, flags); 4337 4320 list_add_tail(&e->list, &vha->work_list); 4338 4321 spin_unlock_irqrestore(&vha->work_lock, flags); 4339 - qla2xxx_wake_dpc(vha); 4322 + 4323 + if (QLA_EARLY_LINKUP(vha->hw)) 4324 + schedule_work(&vha->iocb_work); 4325 + else 4326 + qla2xxx_wake_dpc(vha); 4340 4327 4341 4328 return QLA_SUCCESS; 4342 4329 }
+472 -280
drivers/scsi/qla2xxx/qla_target.c
··· 130 130 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 131 131 fc_port_t *fcport, bool local); 132 132 void qlt_unreg_sess(struct fc_port *sess); 133 + static void qlt_24xx_handle_abts(struct scsi_qla_host *, 134 + struct abts_recv_from_24xx *); 135 + 133 136 /* 134 137 * Global Variables 135 138 */ ··· 142 139 static struct workqueue_struct *qla_tgt_wq; 143 140 static DEFINE_MUTEX(qla_tgt_mutex); 144 141 static LIST_HEAD(qla_tgt_glist); 142 + 143 + static const char *prot_op_str(u32 prot_op) 144 + { 145 + switch (prot_op) { 146 + case TARGET_PROT_NORMAL: return "NORMAL"; 147 + case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 148 + case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 149 + case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 150 + case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 151 + case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 152 + case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 153 + default: return "UNKNOWN"; 154 + } 155 + } 145 156 146 157 /* This API intentionally takes dest as a parameter, rather than returning 147 158 * int value to avoid caller forgetting to issue wmb() after the store */ ··· 187 170 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 188 171 uint8_t *d_id) 189 172 { 190 - struct qla_hw_data *ha = vha->hw; 191 - uint8_t vp_idx; 173 + struct scsi_qla_host *host; 174 + uint32_t key = 0; 192 175 193 - if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) 194 - return NULL; 195 - 196 - if (vha->d_id.b.al_pa == d_id[2]) 176 + if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && 177 + (vha->d_id.b.al_pa == d_id[2])) 197 178 return vha; 198 179 199 - BUG_ON(ha->tgt.tgt_vp_map == NULL); 200 - vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 201 - if (likely(test_bit(vp_idx, ha->vp_idx_map))) 202 - return ha->tgt.tgt_vp_map[vp_idx].vha; 180 + key = (uint32_t)d_id[0] << 16; 181 + key |= (uint32_t)d_id[1] << 8; 182 + key |= (uint32_t)d_id[2]; 203 183 204 - return NULL; 184 + host = btree_lookup32(&vha->hw->tgt.host_map, key); 185 + if (!host) 186 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 187 + "Unable to find host %06x\n", key); 188 + 189 + return host; 205 190 } 206 191 207 192 static inline ··· 408 389 (struct abts_recv_from_24xx *)atio; 409 390 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 410 391 entry->vp_index); 392 + unsigned long flags; 393 + 411 394 if (unlikely(!host)) { 412 395 ql_dbg(ql_dbg_tgt, vha, 0xffff, 413 396 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " ··· 417 396 vha->vp_idx, entry->vp_index); 418 397 break; 419 398 } 420 - qlt_response_pkt(host, (response_t *)atio); 399 + if (!ha_locked) 400 + spin_lock_irqsave(&host->hw->hardware_lock, flags); 401 + qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 402 + if (!ha_locked) 403 + spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 421 404 break; 422 - 423 405 } 424 406 425 407 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ ··· 578 554 sp->fcport->login_gen++; 579 555 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 580 556 sp->fcport->logout_on_delete = 1; 557 + sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 581 558 break; 582 559 583 560 case SRB_NACK_PRLI: ··· 638 613 break; 639 614 case SRB_NACK_PRLI: 640 615 fcport->fw_login_state = DSC_LS_PRLI_PEND; 616 + fcport->deleted = 0; 641 617 c = "PRLI"; 642 618 break; 643 619 case SRB_NACK_LOGO: ··· 1241 1215 } 1242 1216 1243 1217 /* Get list of logged in devices */ 1244 - rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 1218 + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1245 1219 if (rc != QLA_SUCCESS) { 1246 1220 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1247 1221 "qla_target(%d): get_id_list() failed: %x\n", ··· 1576 1550 struct qla_hw_data *ha = vha->hw; 1577 1551 request_t *pkt; 1578 1552 struct nack_to_isp *nack; 1553 + 1554 + if (!ha->flags.fw_started) 1555 + return; 1579 1556 1580 1557 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1581 1558 ··· 2042 2013 } 2043 2014 EXPORT_SYMBOL(qlt_free_mcmd); 2044 2015 2016 + /* 2017 + * ha->hardware_lock supposed to be held on entry. Might drop it, then 2018 + * reacquire 2019 + */ 2020 + void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, 2021 + uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2022 + { 2023 + struct atio_from_isp *atio = &cmd->atio; 2024 + struct ctio7_to_24xx *ctio; 2025 + uint16_t temp; 2026 + 2027 + ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2028 + "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2029 + "sense_key=%02x, asc=%02x, ascq=%02x", 2030 + vha, atio, scsi_status, sense_key, asc, ascq); 2031 + 2032 + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2033 + if (!ctio) { 2034 + ql_dbg(ql_dbg_async, vha, 0x3067, 2035 + "qla2x00t(%ld): %s failed: unable to allocate request packet", 2036 + vha->host_no, __func__); 2037 + goto out; 2038 + } 2039 + 2040 + ctio->entry_type = CTIO_TYPE7; 2041 + ctio->entry_count = 1; 2042 + ctio->handle = QLA_TGT_SKIP_HANDLE; 2043 + ctio->nport_handle = cmd->sess->loop_id; 2044 + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2045 + ctio->vp_index = vha->vp_idx; 2046 + ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2047 + ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2048 + ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2049 + ctio->exchange_addr = atio->u.isp24.exchange_addr; 2050 + ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 2051 + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); 2052 + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2053 + ctio->u.status1.ox_id = cpu_to_le16(temp); 2054 + ctio->u.status1.scsi_status = 2055 + cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2056 + ctio->u.status1.response_len = cpu_to_le16(18); 2057 + ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2058 + 2059 + if (ctio->u.status1.residual != 0) 2060 + ctio->u.status1.scsi_status |= 2061 + cpu_to_le16(SS_RESIDUAL_UNDER); 2062 + 2063 + /* Response code and sense key */ 2064 + put_unaligned_le32(((0x70 << 24) | (sense_key << 8)), 2065 + (&ctio->u.status1.sense_data)[0]); 2066 + /* Additional sense length */ 2067 + put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]); 2068 + /* ASC and ASCQ */ 2069 + put_unaligned_le32(((asc << 24) | (ascq << 16)), 2070 + (&ctio->u.status1.sense_data)[3]); 2071 + 2072 + /* Memory Barrier */ 2073 + wmb(); 2074 + 2075 + qla2x00_start_iocbs(vha, vha->req); 2076 + out: 2077 + return; 2078 + } 2079 + 2045 2080 /* callback from target fabric module code */ 2046 2081 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2047 2082 { ··· 2354 2261 */ 2355 2262 return -EAGAIN; 2356 2263 } else 2357 - ha->tgt.cmds[h-1] = prm->cmd; 2264 + ha->tgt.cmds[h - 1] = prm->cmd; 2358 2265 2359 2266 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2360 2267 pkt->nport_handle = prm->cmd->loop_id; ··· 2484 2391 return cmd->bufflen > 0; 2485 2392 } 2486 2393 2394 + static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2395 + { 2396 + struct qla_tgt_cmd *cmd; 2397 + struct scsi_qla_host *vha; 2398 + 2399 + /* asc 0x10=dif error */ 2400 + if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2401 + cmd = prm->cmd; 2402 + vha = cmd->vha; 2403 + /* ASCQ */ 2404 + switch (prm->sense_buffer[13]) { 2405 + case 1: 2406 + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 2407 + "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2408 + "se_cmd=%p tag[%x]", 2409 + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2410 + cmd->atio.u.isp24.exchange_addr); 2411 + break; 2412 + case 2: 2413 + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 2414 + "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2415 + "se_cmd=%p tag[%x]", 2416 + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2417 + cmd->atio.u.isp24.exchange_addr); 2418 + break; 2419 + case 3: 2420 + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 2421 + "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2422 + "se_cmd=%p tag[%x]", 2423 + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2424 + cmd->atio.u.isp24.exchange_addr); 2425 + break; 2426 + default: 2427 + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 2428 + "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2429 + "se_cmd=%p tag[%x]", 2430 + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2431 + cmd->atio.u.isp24.exchange_addr); 2432 + break; 2433 + } 2434 + ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16); 2435 + } 2436 + } 2437 + 2487 2438 /* 2488 2439 * Called without ha->hardware_lock held 2489 2440 */ ··· 2649 2512 for (i = 0; i < prm->sense_buffer_len/4; i++) 2650 2513 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2651 2514 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2652 - #if 0 2653 - if (unlikely((prm->sense_buffer_len % 4) != 0)) { 2654 - static int q; 2655 - if (q < 10) { 2656 - ql_dbg(ql_dbg_tgt, vha, 0xe04f, 2657 - "qla_target(%d): %d bytes of sense " 2658 - "lost", prm->tgt->ha->vp_idx, 2659 - prm->sense_buffer_len % 4); 2660 - q++; 2661 - } 2662 - } 2663 - #endif 2515 + 2516 + qlt_print_dif_err(prm); 2517 + 2664 2518 } else { 2665 2519 ctio->u.status1.flags &= 2666 2520 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); ··· 2665 2537 /* Sense with len > 24, is it possible ??? */ 2666 2538 } 2667 2539 2668 - 2669 - 2670 - /* diff */ 2671 2540 static inline int 2672 2541 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2673 2542 { 2674 - /* 2675 - * Uncomment when corresponding SCSI changes are done. 2676 - * 2677 - if (!sp->cmd->prot_chk) 2678 - return 0; 2679 - * 2680 - */ 2681 2543 switch (se_cmd->prot_op) { 2682 2544 case TARGET_PROT_DOUT_INSERT: 2683 2545 case TARGET_PROT_DIN_STRIP: ··· 2688 2570 return 0; 2689 2571 } 2690 2572 2691 - /* 2692 - * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 2693 - * 2694 - */ 2695 - static inline void 2696 - qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) 2573 + static inline int 2574 + qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2697 2575 { 2698 - uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2576 + switch (se_cmd->prot_op) { 2577 + case TARGET_PROT_DIN_INSERT: 2578 + case TARGET_PROT_DOUT_INSERT: 2579 + case TARGET_PROT_DIN_STRIP: 2580 + case TARGET_PROT_DOUT_STRIP: 2581 + case TARGET_PROT_DIN_PASS: 2582 + case TARGET_PROT_DOUT_PASS: 2583 + return 1; 2584 + default: 2585 + return 0; 2586 + } 2587 + return 0; 2588 + } 2699 2589 2700 - /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 2590 + /* 2591 + * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2592 + */ 2593 + static void 2594 + qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2595 + uint16_t *pfw_prot_opts) 2596 + { 2597 + struct se_cmd *se_cmd = &cmd->se_cmd; 2598 + uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2599 + scsi_qla_host_t *vha = cmd->tgt->vha; 2600 + struct qla_hw_data *ha = vha->hw; 2601 + uint32_t t32 = 0; 2602 + 2603 + /* 2604 + * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2701 2605 * have been immplemented by TCM, before AppTag is avail. 2702 2606 * Look for modesense_handlers[] 2703 2607 */ ··· 2727 2587 ctx->app_tag_mask[0] = 0x0; 2728 2588 ctx->app_tag_mask[1] = 0x0; 2729 2589 2590 + if (IS_PI_UNINIT_CAPABLE(ha)) { 2591 + if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2592 + (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2593 + *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2594 + else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2595 + *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2596 + } 2597 + 2598 + t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2599 + 2730 2600 switch (se_cmd->prot_type) { 2731 2601 case TARGET_DIF_TYPE0_PROT: 2732 2602 /* 2733 - * No check for ql2xenablehba_err_chk, as it would be an 2734 - * I/O error if hba tag generation is not done. 2603 + * No check for ql2xenablehba_err_chk, as it 2604 + * would be an I/O error if hba tag generation 2605 + * is not done. 2735 2606 */ 2736 2607 ctx->ref_tag = cpu_to_le32(lba); 2737 - 2738 - if (!qlt_hba_err_chk_enabled(se_cmd)) 2739 - break; 2740 - 2741 2608 /* enable ALL bytes of the ref tag */ 2742 2609 ctx->ref_tag_mask[0] = 0xff; 2743 2610 ctx->ref_tag_mask[1] = 0xff; 2744 2611 ctx->ref_tag_mask[2] = 0xff; 2745 2612 ctx->ref_tag_mask[3] = 0xff; 2746 2613 break; 2747 - /* 2748 - * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 2749 - * 16 bit app tag. 2750 - */ 2751 2614 case TARGET_DIF_TYPE1_PROT: 2752 - ctx->ref_tag = cpu_to_le32(lba); 2753 - 2754 - if (!qlt_hba_err_chk_enabled(se_cmd)) 2755 - break; 2756 - 2757 - /* enable ALL bytes of the ref tag */ 2758 - ctx->ref_tag_mask[0] = 0xff; 2759 - ctx->ref_tag_mask[1] = 0xff; 2760 - ctx->ref_tag_mask[2] = 0xff; 2761 - ctx->ref_tag_mask[3] = 0xff; 2762 - break; 2763 - /* 2764 - * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 2765 - * match LBA in CDB + N 2766 - */ 2615 + /* 2616 + * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2617 + * REF tag, and 16 bit app tag. 2618 + */ 2619 + ctx->ref_tag = cpu_to_le32(lba); 2620 + if (!qla_tgt_ref_mask_check(se_cmd) || 2621 + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2622 + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2623 + break; 2624 + } 2625 + /* enable ALL bytes of the ref tag */ 2626 + ctx->ref_tag_mask[0] = 0xff; 2627 + ctx->ref_tag_mask[1] = 0xff; 2628 + ctx->ref_tag_mask[2] = 0xff; 2629 + ctx->ref_tag_mask[3] = 0xff; 2630 + break; 2767 2631 case TARGET_DIF_TYPE2_PROT: 2768 - ctx->ref_tag = cpu_to_le32(lba); 2769 - 2770 - if (!qlt_hba_err_chk_enabled(se_cmd)) 2771 - break; 2772 - 2773 - /* enable ALL bytes of the ref tag */ 2774 - ctx->ref_tag_mask[0] = 0xff; 2775 - ctx->ref_tag_mask[1] = 0xff; 2776 - ctx->ref_tag_mask[2] = 0xff; 2777 - ctx->ref_tag_mask[3] = 0xff; 2778 - break; 2779 - 2780 - /* For Type 3 protection: 16 bit GUARD only */ 2632 + /* 2633 + * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 2634 + * tag has to match LBA in CDB + N 2635 + */ 2636 + ctx->ref_tag = cpu_to_le32(lba); 2637 + if (!qla_tgt_ref_mask_check(se_cmd) || 2638 + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2639 + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2640 + break; 2641 + } 2642 + /* enable ALL bytes of the ref tag */ 2643 + ctx->ref_tag_mask[0] = 0xff; 2644 + ctx->ref_tag_mask[1] = 0xff; 2645 + ctx->ref_tag_mask[2] = 0xff; 2646 + ctx->ref_tag_mask[3] = 0xff; 2647 + break; 2781 2648 case TARGET_DIF_TYPE3_PROT: 2782 - ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2783 - ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2784 - break; 2649 + /* For TYPE 3 protection: 16 bit GUARD only */ 2650 + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2651 + ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2652 + ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2653 + break; 2785 2654 } 2786 2655 } 2787 - 2788 2656 2789 2657 static inline int 2790 2658 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) ··· 2812 2664 struct se_cmd *se_cmd = &cmd->se_cmd; 2813 2665 uint32_t h; 2814 2666 struct atio_from_isp *atio = &prm->cmd->atio; 2667 + struct qla_tc_param tc; 2815 2668 uint16_t t16; 2816 2669 2817 2670 ha = vha->hw; ··· 2838 2689 case TARGET_PROT_DIN_INSERT: 2839 2690 case TARGET_PROT_DOUT_STRIP: 2840 2691 transfer_length = data_bytes; 2841 - data_bytes += dif_bytes; 2692 + if (cmd->prot_sg_cnt) 2693 + data_bytes += dif_bytes; 2842 2694 break; 2843 - 2844 2695 case TARGET_PROT_DIN_STRIP: 2845 2696 case TARGET_PROT_DOUT_INSERT: 2846 2697 case TARGET_PROT_DIN_PASS: 2847 2698 case TARGET_PROT_DOUT_PASS: 2848 2699 transfer_length = data_bytes + dif_bytes; 2849 2700 break; 2850 - 2851 2701 default: 2852 2702 BUG(); 2853 2703 break; ··· 2882 2734 break; 2883 2735 } 2884 2736 2885 - 2886 2737 /* ---- PKT ---- */ 2887 2738 /* Update entry type to indicate Command Type CRC_2 IOCB */ 2888 2739 pkt->entry_type = CTIO_CRC2; ··· 2899 2752 } else 2900 2753 ha->tgt.cmds[h-1] = prm->cmd; 2901 2754 2902 - 2903 2755 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2904 - pkt->nport_handle = prm->cmd->loop_id; 2756 + pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2905 2757 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2906 2758 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2907 2759 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ··· 2921 2775 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2922 2776 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2923 2777 2924 - 2925 2778 pkt->dseg_count = prm->tot_dsds; 2926 2779 /* Fibre channel byte count */ 2927 2780 pkt->transfer_length = cpu_to_le32(transfer_length); 2928 - 2929 2781 2930 2782 /* ----- CRC context -------- */ 2931 2783 ··· 2944 2800 /* Set handle */ 2945 2801 crc_ctx_pkt->handle = pkt->handle; 2946 2802 2947 - qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); 2803 + qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 2948 2804 2949 2805 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 2950 2806 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 2951 2807 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 2952 - 2953 2808 2954 2809 if (!bundling) { 2955 2810 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; ··· 2970 2827 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 2971 2828 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 2972 2829 2830 + memset((uint8_t *)&tc, 0 , sizeof(tc)); 2831 + tc.vha = vha; 2832 + tc.blk_sz = cmd->blk_sz; 2833 + tc.bufflen = cmd->bufflen; 2834 + tc.sg = cmd->sg; 2835 + tc.prot_sg = cmd->prot_sg; 2836 + tc.ctx = crc_ctx_pkt; 2837 + tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 2973 2838 2974 2839 /* Walks data segments */ 2975 2840 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 2976 2841 2977 2842 if (!bundling && prm->prot_seg_cnt) { 2978 2843 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 2979 - prm->tot_dsds, cmd)) 2844 + prm->tot_dsds, &tc)) 2980 2845 goto crc_queuing_error; 2981 2846 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 2982 - (prm->tot_dsds - prm->prot_seg_cnt), cmd)) 2847 + (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 2983 2848 goto crc_queuing_error; 2984 2849 2985 2850 if (bundling && prm->prot_seg_cnt) { ··· 2996 2845 2997 2846 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 2998 2847 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 2999 - prm->prot_seg_cnt, cmd)) 2848 + prm->prot_seg_cnt, &tc)) 3000 2849 goto crc_queuing_error; 3001 2850 } 3002 2851 return QLA_SUCCESS; 3003 2852 3004 2853 crc_queuing_error: 3005 2854 /* Cleanup will be performed by the caller */ 2855 + vha->hw->tgt.cmds[h - 1] = NULL; 3006 2856 3007 2857 return QLA_FUNCTION_FAILED; 3008 2858 } 3009 - 3010 2859 3011 2860 /* 3012 2861 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * ··· 3057 2906 else 3058 2907 vha->tgt_counters.core_qla_que_buf++; 3059 2908 3060 - if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { 2909 + if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) { 3061 2910 /* 3062 2911 * Either the port is not online or this request was from 3063 2912 * previous life, just abort the processing. ··· 3198 3047 3199 3048 spin_lock_irqsave(&ha->hardware_lock, flags); 3200 3049 3201 - if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || 3050 + if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) || 3202 3051 (cmd->sess && cmd->sess->deleted)) { 3203 3052 /* 3204 3053 * Either the port is not online or this request was from ··· 3255 3104 3256 3105 3257 3106 /* 3258 - * Checks the guard or meta-data for the type of error 3259 - * detected by the HBA. 3107 + * it is assumed either hardware_lock or qpair lock is held. 3260 3108 */ 3261 - static inline int 3109 + static void 3262 3110 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, 3263 - struct ctio_crc_from_fw *sts) 3111 + struct ctio_crc_from_fw *sts) 3264 3112 { 3265 3113 uint8_t *ap = &sts->actual_dif[0]; 3266 3114 uint8_t *ep = &sts->expected_dif[0]; 3267 - uint32_t e_ref_tag, a_ref_tag; 3268 - uint16_t e_app_tag, a_app_tag; 3269 - uint16_t e_guard, a_guard; 3270 3115 uint64_t lba = cmd->se_cmd.t_task_lba; 3116 + uint8_t scsi_status, sense_key, asc, ascq; 3117 + unsigned long flags; 3271 3118 3272 - a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3273 - a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 3274 - a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 3119 + cmd->trc_flags |= TRC_DIF_ERR; 3275 3120 3276 - e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 3277 - e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 3278 - e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 3121 + cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3122 + cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 3123 + cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 3279 3124 3280 - ql_dbg(ql_dbg_tgt, vha, 0xe075, 3281 - "iocb(s) %p Returned STATUS.\n", sts); 3125 + cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 3126 + cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 3127 + cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 3282 3128 3283 - ql_dbg(ql_dbg_tgt, vha, 0xf075, 3284 - "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", 3285 - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3286 - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); 3129 + ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3130 + "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3287 3131 3288 - /* 3289 - * Ignore sector if: 3290 - * For type 3: ref & app tag is all 'f's 3291 - * For type 0,1,2: app tag is all 'f's 3292 - */ 3293 - if ((a_app_tag == 0xffff) && 3294 - ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || 3295 - (a_ref_tag == 0xffffffff))) { 3296 - uint32_t blocks_done; 3132 + scsi_status = sense_key = asc = ascq = 0; 3297 3133 3298 - /* 2TB boundary case covered automatically with this */ 3299 - blocks_done = e_ref_tag - (uint32_t)lba + 1; 3300 - cmd->se_cmd.bad_sector = e_ref_tag; 3301 - cmd->se_cmd.pi_err = 0; 3302 - ql_dbg(ql_dbg_tgt, vha, 0xf074, 3303 - "need to return scsi good\n"); 3134 + /* check appl tag */ 3135 + if (cmd->e_app_tag != cmd->a_app_tag) { 3136 + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 3137 + "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " 3138 + "Ref[%x|%x], App[%x|%x], " 3139 + "Guard [%x|%x] cmd=%p ox_id[%04x]", 3140 + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3141 + cmd->a_ref_tag, cmd->e_ref_tag, 3142 + cmd->a_app_tag, cmd->e_app_tag, 3143 + cmd->a_guard, cmd->e_guard, 3144 + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); 3304 3145 3305 - /* Update protection tag */ 3306 - if (cmd->prot_sg_cnt) { 3307 - uint32_t i, k = 0, num_ent; 3308 - struct scatterlist *sg, *sgl; 3309 - 3310 - 3311 - sgl = cmd->prot_sg; 3312 - 3313 - /* Patch the corresponding protection tags */ 3314 - for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { 3315 - num_ent = sg_dma_len(sg) / 8; 3316 - if (k + num_ent < blocks_done) { 3317 - k += num_ent; 3318 - continue; 3319 - } 3320 - k = blocks_done; 3321 - break; 3322 - } 3323 - 3324 - if (k != blocks_done) { 3325 - ql_log(ql_log_warn, vha, 0xf076, 3326 - "unexpected tag values tag:lba=%u:%llu)\n", 3327 - e_ref_tag, (unsigned long long)lba); 3328 - goto out; 3329 - } 3330 - 3331 - #if 0 3332 - struct sd_dif_tuple *spt; 3333 - /* TODO: 3334 - * This section came from initiator. Is it valid here? 3335 - * should ulp be override with actual val??? 3336 - */ 3337 - spt = page_address(sg_page(sg)) + sg->offset; 3338 - spt += j; 3339 - 3340 - spt->app_tag = 0xffff; 3341 - if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) 3342 - spt->ref_tag = 0xffffffff; 3343 - #endif 3344 - } 3345 - 3346 - return 0; 3347 - } 3348 - 3349 - /* check guard */ 3350 - if (e_guard != a_guard) { 3351 - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 3352 - cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 3353 - 3354 - ql_log(ql_log_warn, vha, 0xe076, 3355 - "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3356 - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3357 - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3358 - a_guard, e_guard, cmd); 3359 - goto out; 3146 + cmd->dif_err_code = DIF_ERR_APP; 3147 + scsi_status = SAM_STAT_CHECK_CONDITION; 3148 + sense_key = ABORTED_COMMAND; 3149 + asc = 0x10; 3150 + ascq = 0x2; 3360 3151 } 3361 3152 3362 3153 /* check ref tag */ 3363 - if (e_ref_tag != a_ref_tag) { 3364 - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 3365 - cmd->se_cmd.bad_sector = e_ref_tag; 3154 + if (cmd->e_ref_tag != cmd->a_ref_tag) { 3155 + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 3156 + "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " 3157 + "Ref[%x|%x], App[%x|%x], " 3158 + "Guard[%x|%x] cmd=%p ox_id[%04x] ", 3159 + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3160 + cmd->a_ref_tag, cmd->e_ref_tag, 3161 + cmd->a_app_tag, cmd->e_app_tag, 3162 + cmd->a_guard, cmd->e_guard, 3163 + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); 3366 3164 3367 - ql_log(ql_log_warn, vha, 0xe077, 3368 - "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3369 - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3370 - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3371 - a_guard, e_guard, cmd); 3165 + cmd->dif_err_code = DIF_ERR_REF; 3166 + scsi_status = SAM_STAT_CHECK_CONDITION; 3167 + sense_key = ABORTED_COMMAND; 3168 + asc = 0x10; 3169 + ascq = 0x3; 3372 3170 goto out; 3373 3171 } 3374 3172 3375 - /* check appl tag */ 3376 - if (e_app_tag != a_app_tag) { 3377 - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 3378 - cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 3379 - 3380 - ql_log(ql_log_warn, vha, 0xe078, 3381 - "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3382 - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3383 - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3384 - a_guard, e_guard, cmd); 3385 - goto out; 3173 + /* check guard */ 3174 + if (cmd->e_guard != cmd->a_guard) { 3175 + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 3176 + "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " 3177 + "Ref[%x|%x], App[%x|%x], " 3178 + "Guard [%x|%x] cmd=%p ox_id[%04x]", 3179 + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3180 + cmd->a_ref_tag, cmd->e_ref_tag, 3181 + cmd->a_app_tag, cmd->e_app_tag, 3182 + cmd->a_guard, cmd->e_guard, 3183 + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); 3184 + cmd->dif_err_code = DIF_ERR_GRD; 3185 + scsi_status = SAM_STAT_CHECK_CONDITION; 3186 + sense_key = ABORTED_COMMAND; 3187 + asc = 0x10; 3188 + ascq = 0x1; 3386 3189 } 3387 3190 out: 3388 - return 1; 3389 - } 3191 + switch (cmd->state) { 3192 + case QLA_TGT_STATE_NEED_DATA: 3193 + /* handle_data will load DIF error code */ 3194 + cmd->state = QLA_TGT_STATE_DATA_IN; 3195 + vha->hw->tgt.tgt_ops->handle_data(cmd); 3196 + break; 3197 + default: 3198 + spin_lock_irqsave(&cmd->cmd_lock, flags); 3199 + if (cmd->aborted) { 3200 + spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3201 + vha->hw->tgt.tgt_ops->free_cmd(cmd); 3202 + break; 3203 + } 3204 + spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3390 3205 3206 + qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq); 3207 + /* assume scsi status gets out on the wire. 3208 + * Will not wait for completion. 3209 + */ 3210 + vha->hw->tgt.tgt_ops->free_cmd(cmd); 3211 + break; 3212 + } 3213 + } 3391 3214 3392 3215 /* If hardware_lock held on entry, might drop it, then reaquire */ 3393 3216 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ ··· 3376 3251 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3377 3252 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3378 3253 3379 - pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); 3254 + pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3380 3255 if (pkt == NULL) { 3381 3256 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3382 3257 "qla_target(%d): %s failed: unable to allocate " ··· 3668 3543 { 3669 3544 int term = 0; 3670 3545 3546 + if (cmd->se_cmd.prot_op) 3547 + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 3548 + "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3549 + "se_cmd=%p tag[%x] op %#x/%s", 3550 + cmd->lba, cmd->lba, 3551 + cmd->num_blks, &cmd->se_cmd, 3552 + cmd->atio.u.isp24.exchange_addr, 3553 + cmd->se_cmd.prot_op, 3554 + prot_op_str(cmd->se_cmd.prot_op)); 3555 + 3671 3556 if (ctio != NULL) { 3672 3557 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3673 3558 term = !(c->flags & ··· 3895 3760 struct ctio_crc_from_fw *crc = 3896 3761 (struct ctio_crc_from_fw *)ctio; 3897 3762 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 3898 - "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", 3763 + "qla_target(%d): CTIO with DIF_ERROR status %x " 3764 + "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 3765 + "expect_dif[0x%llx]\n", 3899 3766 vha->vp_idx, status, cmd->state, se_cmd, 3900 3767 *((u64 *)&crc->actual_dif[0]), 3901 3768 *((u64 *)&crc->expected_dif[0])); 3902 3769 3903 - if (qlt_handle_dif_error(vha, cmd, ctio)) { 3904 - if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3905 - /* scsi Write/xfer rdy complete */ 3906 - goto skip_term; 3907 - } else { 3908 - /* scsi read/xmit respond complete 3909 - * call handle dif to send scsi status 3910 - * rather than terminate exchange. 3911 - */ 3912 - cmd->state = QLA_TGT_STATE_PROCESSED; 3913 - ha->tgt.tgt_ops->handle_dif_err(cmd); 3914 - return; 3915 - } 3916 - } else { 3917 - /* Need to generate a SCSI good completion. 3918 - * because FW did not send scsi status. 3919 - */ 3920 - status = 0; 3921 - goto skip_term; 3922 - } 3923 - break; 3770 + qlt_handle_dif_error(vha, cmd, ctio); 3771 + return; 3924 3772 } 3925 3773 default: 3926 3774 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, ··· 3926 3808 return; 3927 3809 } 3928 3810 } 3929 - skip_term: 3930 3811 3931 3812 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3932 3813 cmd->trc_flags |= TRC_CTIO_DONE; ··· 4701 4584 } 4702 4585 4703 4586 if (sess != NULL) { 4704 - if (sess->fw_login_state == DSC_LS_PLOGI_PEND) { 4587 + if (sess->fw_login_state != DSC_LS_PLOGI_PEND && 4588 + sess->fw_login_state != DSC_LS_PLOGI_COMP) { 4705 4589 /* 4706 4590 * Impatient initiator sent PRLI before last 4707 4591 * PLOGI could finish. Will force him to re-try, ··· 4741 4623 4742 4624 /* Make session global (not used in fabric mode) */ 4743 4625 if (ha->current_topology != ISP_CFG_F) { 4744 - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4745 - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4746 - qla2xxx_wake_dpc(vha); 4626 + if (sess) { 4627 + ql_dbg(ql_dbg_disc, vha, 0xffff, 4628 + "%s %d %8phC post nack\n", 4629 + __func__, __LINE__, sess->port_name); 4630 + qla24xx_post_nack_work(vha, sess, iocb, 4631 + SRB_NACK_PRLI); 4632 + res = 0; 4633 + } else { 4634 + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4635 + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4636 + qla2xxx_wake_dpc(vha); 4637 + } 4747 4638 } else { 4748 4639 if (sess) { 4749 4640 ql_dbg(ql_dbg_disc, vha, 0xffff, 4750 - "%s %d %8phC post nack\n", 4751 - __func__, __LINE__, sess->port_name); 4752 - 4641 + "%s %d %8phC post nack\n", 4642 + __func__, __LINE__, sess->port_name); 4753 4643 qla24xx_post_nack_work(vha, sess, iocb, 4754 4644 SRB_NACK_PRLI); 4755 4645 res = 0; 4756 4646 } 4757 4647 } 4758 4648 break; 4759 - 4760 4649 4761 4650 case ELS_TPRLO: 4762 4651 if (le16_to_cpu(iocb->u.isp24.flags) & ··· 5204 5079 5205 5080 static int 5206 5081 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, 5207 - struct atio_from_isp *atio) 5082 + struct atio_from_isp *atio, bool ha_locked) 5208 5083 { 5209 5084 struct qla_hw_data *ha = vha->hw; 5210 5085 uint16_t status; 5086 + unsigned long flags; 5211 5087 5212 5088 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5213 5089 return 0; 5214 5090 5091 + if (!ha_locked) 5092 + spin_lock_irqsave(&ha->hardware_lock, flags); 5215 5093 status = temp_sam_status; 5216 5094 qlt_send_busy(vha, atio, status); 5095 + if (!ha_locked) 5096 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 5097 + 5217 5098 return 1; 5218 5099 } 5219 5100 ··· 5234 5103 unsigned long flags; 5235 5104 5236 5105 if (unlikely(tgt == NULL)) { 5237 - ql_dbg(ql_dbg_io, vha, 0x3064, 5106 + ql_dbg(ql_dbg_tgt, vha, 0x3064, 5238 5107 "ATIO pkt, but no tgt (ha %p)", ha); 5239 5108 return; 5240 5109 } ··· 5264 5133 5265 5134 5266 5135 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5267 - rc = qlt_chk_qfull_thresh_hold(vha, atio); 5136 + rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); 5268 5137 if (rc != 0) { 5269 5138 tgt->atio_irq_cmd_count--; 5270 5139 return; ··· 5387 5256 break; 5388 5257 } 5389 5258 5390 - rc = qlt_chk_qfull_thresh_hold(vha, atio); 5259 + rc = qlt_chk_qfull_thresh_hold(vha, atio, true); 5391 5260 if (rc != 0) { 5392 5261 tgt->irq_cmd_count--; 5393 5262 return; ··· 5662 5531 5663 5532 fcport->loop_id = loop_id; 5664 5533 5665 - rc = qla2x00_get_port_database(vha, fcport, 0); 5534 + rc = qla24xx_gpdb_wait(vha, fcport, 0); 5666 5535 if (rc != QLA_SUCCESS) { 5667 5536 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 5668 5537 "qla_target(%d): Failed to retrieve fcport " ··· 5844 5713 } 5845 5714 } 5846 5715 5847 - spin_lock_irqsave(&ha->hardware_lock, flags); 5848 - 5849 - if (tgt->tgt_stop) 5850 - goto out_term; 5851 - 5852 5716 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 5717 + ha->tgt.tgt_ops->put_sess(sess); 5718 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5719 + 5853 5720 if (rc != 0) 5854 5721 goto out_term; 5855 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 5856 - if (sess) 5857 - ha->tgt.tgt_ops->put_sess(sess); 5858 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5859 5722 return; 5860 5723 5861 5724 out_term2: 5862 - spin_lock_irqsave(&ha->hardware_lock, flags); 5863 - 5864 - out_term: 5865 - qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 5866 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 5867 - 5868 5725 if (sess) 5869 5726 ha->tgt.tgt_ops->put_sess(sess); 5870 5727 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5728 + 5729 + out_term: 5730 + spin_lock_irqsave(&ha->hardware_lock, flags); 5731 + qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 5732 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 5871 5733 } 5872 5734 5873 5735 static void qlt_tmr_work(struct qla_tgt *tgt, ··· 5880 5756 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5881 5757 5882 5758 if (tgt->tgt_stop) 5883 - goto out_term; 5759 + goto out_term2; 5884 5760 5885 5761 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 5886 5762 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); ··· 5892 5768 5893 5769 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5894 5770 if (!sess) 5895 - goto out_term; 5771 + goto out_term2; 5896 5772 } else { 5897 5773 if (sess->deleted) { 5898 5774 sess = NULL; 5899 - goto out_term; 5775 + goto out_term2; 5900 5776 } 5901 5777 5902 5778 if (!kref_get_unless_zero(&sess->sess_kref)) { ··· 5904 5780 "%s: kref_get fail %8phC\n", 5905 5781 __func__, sess->port_name); 5906 5782 sess = NULL; 5907 - goto out_term; 5783 + goto out_term2; 5908 5784 } 5909 5785 } 5910 5786 ··· 5914 5790 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 5915 5791 5916 5792 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 5793 + ha->tgt.tgt_ops->put_sess(sess); 5794 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5795 + 5917 5796 if (rc != 0) 5918 5797 goto out_term; 5919 - 5920 - ha->tgt.tgt_ops->put_sess(sess); 5921 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5922 5798 return; 5923 5799 5800 + out_term2: 5801 + if (sess) 5802 + ha->tgt.tgt_ops->put_sess(sess); 5803 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5924 5804 out_term: 5925 5805 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); 5926 - ha->tgt.tgt_ops->put_sess(sess); 5927 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5928 5806 } 5929 5807 5930 5808 static void qlt_sess_work_fn(struct work_struct *work) ··· 6019 5893 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 6020 5894 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 6021 5895 6022 - if (base_vha->fc_vport) 6023 - return 0; 6024 - 6025 5896 mutex_lock(&qla_tgt_mutex); 6026 5897 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6027 5898 mutex_unlock(&qla_tgt_mutex); 5899 + 5900 + if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 5901 + ha->tgt.tgt_ops->add_target(base_vha); 6028 5902 6029 5903 return 0; 6030 5904 } ··· 6052 5926 qlt_release(vha->vha_tgt.qla_tgt); 6053 5927 6054 5928 return 0; 5929 + } 5930 + 5931 + void qlt_remove_target_resources(struct qla_hw_data *ha) 5932 + { 5933 + struct scsi_qla_host *node; 5934 + u32 key = 0; 5935 + 5936 + btree_for_each_safe32(&ha->tgt.host_map, key, node) 5937 + btree_remove32(&ha->tgt.host_map, key); 5938 + 5939 + btree_destroy32(&ha->tgt.host_map); 6055 5940 } 6056 5941 6057 5942 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, ··· 6371 6234 struct atio_from_isp *pkt; 6372 6235 int cnt, i; 6373 6236 6374 - if (!vha->flags.online) 6237 + if (!ha->flags.fw_started) 6375 6238 return; 6376 6239 6377 6240 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || ··· 6718 6581 void 6719 6582 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 6720 6583 { 6584 + int rc; 6585 + 6721 6586 if (!QLA_TGT_MODE_ENABLED()) 6722 6587 return; 6723 6588 ··· 6739 6600 qlt_unknown_atio_work_fn); 6740 6601 6741 6602 qlt_clear_mode(base_vha); 6603 + 6604 + rc = btree_init32(&ha->tgt.host_map); 6605 + if (rc) 6606 + ql_log(ql_log_info, base_vha, 0xffff, 6607 + "Unable to initialize ha->host_map btree\n"); 6608 + 6609 + qlt_update_vp_map(base_vha, SET_VP_IDX); 6742 6610 } 6743 6611 6744 6612 irqreturn_t ··· 6788 6642 spin_lock_irqsave(&ha->hardware_lock, flags); 6789 6643 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); 6790 6644 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6645 + 6646 + kfree(op); 6791 6647 } 6792 6648 6793 6649 void ··· 6854 6706 void 6855 6707 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 6856 6708 { 6709 + void *slot; 6710 + u32 key; 6711 + int rc; 6712 + 6857 6713 if (!QLA_TGT_MODE_ENABLED()) 6858 6714 return; 6715 + 6716 + key = vha->d_id.b24; 6859 6717 6860 6718 switch (cmd) { 6861 6719 case SET_VP_IDX: 6862 6720 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 6863 6721 break; 6864 6722 case SET_AL_PA: 6865 - vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 6723 + slot = btree_lookup32(&vha->hw->tgt.host_map, key); 6724 + if (!slot) { 6725 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 6726 + "Save vha in host_map %p %06x\n", vha, key); 6727 + rc = btree_insert32(&vha->hw->tgt.host_map, 6728 + key, vha, GFP_ATOMIC); 6729 + if (rc) 6730 + ql_log(ql_log_info, vha, 0xffff, 6731 + "Unable to insert s_id into host_map: %06x\n", 6732 + key); 6733 + return; 6734 + } 6735 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 6736 + "replace existing vha in host_map %p %06x\n", vha, key); 6737 + btree_update32(&vha->hw->tgt.host_map, key, vha); 6866 6738 break; 6867 6739 case RESET_VP_IDX: 6868 6740 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 6869 6741 break; 6870 6742 case RESET_AL_PA: 6871 - vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 6743 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 6744 + "clear vha in host_map %p %06x\n", vha, key); 6745 + slot = btree_lookup32(&vha->hw->tgt.host_map, key); 6746 + if (slot) 6747 + btree_remove32(&vha->hw->tgt.host_map, key); 6748 + vha->d_id.b24 = 0; 6872 6749 break; 6750 + } 6751 + } 6752 + 6753 + void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) 6754 + { 6755 + unsigned long flags; 6756 + struct qla_hw_data *ha = vha->hw; 6757 + 6758 + if (!vha->d_id.b24) { 6759 + spin_lock_irqsave(&ha->vport_slock, flags); 6760 + vha->d_id = id; 6761 + qlt_update_vp_map(vha, SET_AL_PA); 6762 + spin_unlock_irqrestore(&ha->vport_slock, flags); 6763 + } else if (vha->d_id.b24 != id.b24) { 6764 + spin_lock_irqsave(&ha->vport_slock, flags); 6765 + qlt_update_vp_map(vha, RESET_AL_PA); 6766 + vha->d_id = id; 6767 + qlt_update_vp_map(vha, SET_AL_PA); 6768 + spin_unlock_irqrestore(&ha->vport_slock, flags); 6873 6769 } 6874 6770 } 6875 6771
+33 -6
drivers/scsi/qla2xxx/qla_target.h
··· 378 378 atio->u.isp24.fcp_cmnd.add_cdb_len = 0; 379 379 } 380 380 381 + static inline int get_datalen_for_atio(struct atio_from_isp *atio) 382 + { 383 + int len = atio->u.isp24.fcp_cmnd.add_cdb_len; 384 + 385 + return (be32_to_cpu(get_unaligned((uint32_t *) 386 + &atio->u.isp24.fcp_cmnd.add_cdb[len * 4]))); 387 + } 388 + 381 389 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ 382 390 383 391 /* ··· 675 667 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, 676 668 unsigned char *, uint32_t, int, int, int); 677 669 void (*handle_data)(struct qla_tgt_cmd *); 678 - void (*handle_dif_err)(struct qla_tgt_cmd *); 679 670 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, 680 671 uint32_t); 681 672 void (*free_cmd)(struct qla_tgt_cmd *); ··· 691 684 void (*clear_nacl_from_fcport_map)(struct fc_port *); 692 685 void (*put_sess)(struct fc_port *); 693 686 void (*shutdown_sess)(struct fc_port *); 687 + int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts); 688 + int (*chk_dif_tags)(uint32_t tag); 689 + void (*add_target)(struct scsi_qla_host *); 694 690 }; 695 691 696 692 int qla2x00_wait_for_hba_online(struct scsi_qla_host *); ··· 730 720 #define QLA_TGT_ABORT_ALL 0xFFFE 731 721 #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD 732 722 #define QLA_TGT_NEXUS_LOSS 0xFFFC 733 - #define QLA_TGT_ABTS 0xFFFB 734 - #define QLA_TGT_2G_ABORT_TASK 0xFFFA 723 + #define QLA_TGT_ABTS 0xFFFB 724 + #define QLA_TGT_2G_ABORT_TASK 0xFFFA 735 725 736 726 /* Notify Acknowledge flags */ 737 727 #define NOTIFY_ACK_RES_COUNT BIT_8 ··· 855 845 TRC_CMD_FREE = BIT_17, 856 846 TRC_DATA_IN = BIT_18, 857 847 TRC_ABORT = BIT_19, 848 + TRC_DIF_ERR = BIT_20, 858 849 }; 859 850 860 851 struct qla_tgt_cmd { ··· 873 862 unsigned int sg_mapped:1; 874 863 unsigned int free_sg:1; 875 864 unsigned int write_data_transferred:1; 876 - unsigned int ctx_dsd_alloced:1; 877 865 unsigned int q_full:1; 878 866 unsigned int term_exchg:1; 879 867 unsigned int cmd_sent_to_fw:1; ··· 895 885 struct list_head cmd_list; 896 886 897 887 struct atio_from_isp atio; 898 - /* t10dif */ 888 + 889 + uint8_t ctx_dsd_alloced; 890 + 891 + /* T10-DIF */ 892 + #define DIF_ERR_NONE 0 893 + #define DIF_ERR_GRD 1 894 + #define DIF_ERR_REF 2 895 + #define DIF_ERR_APP 3 896 + int8_t dif_err_code; 899 897 struct scatterlist *prot_sg; 900 898 uint32_t prot_sg_cnt; 901 - uint32_t blk_sz; 899 + uint32_t blk_sz, num_blks; 900 + uint8_t scsi_status, sense_key, asc, ascq; 901 + 902 902 struct crc_context *ctx; 903 + uint8_t *cdb; 904 + uint64_t lba; 905 + uint16_t a_guard, e_guard, a_app_tag, e_app_tag; 906 + uint32_t a_ref_tag, e_ref_tag; 903 907 904 908 uint64_t jiffies_at_alloc; 905 909 uint64_t jiffies_at_free; ··· 1076 1052 extern int qlt_free_qfull_cmds(struct scsi_qla_host *); 1077 1053 extern void qlt_logo_completion_handler(fc_port_t *, int); 1078 1054 extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); 1055 + 1056 + void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t, 1057 + uint8_t, uint8_t, uint8_t); 1079 1058 1080 1059 #endif /* __QLA_TARGET_H */
+3 -3
drivers/scsi/qla2xxx/qla_version.h
··· 7 7 /* 8 8 * Driver version 9 9 */ 10 - #define QLA2XXX_VERSION "8.07.00.38-k" 10 + #define QLA2XXX_VERSION "9.00.00.00-k" 11 11 12 - #define QLA_DRIVER_MAJOR_VER 8 13 - #define QLA_DRIVER_MINOR_VER 7 12 + #define QLA_DRIVER_MAJOR_VER 9 13 + #define QLA_DRIVER_MINOR_VER 0 14 14 #define QLA_DRIVER_PATCH_VER 0 15 15 #define QLA_DRIVER_BETA_VER 0
+33 -16
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 531 531 return; 532 532 } 533 533 534 + switch (cmd->dif_err_code) { 535 + case DIF_ERR_GRD: 536 + cmd->se_cmd.pi_err = 537 + TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 538 + break; 539 + case DIF_ERR_REF: 540 + cmd->se_cmd.pi_err = 541 + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 542 + break; 543 + case DIF_ERR_APP: 544 + cmd->se_cmd.pi_err = 545 + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 546 + break; 547 + case DIF_ERR_NONE: 548 + default: 549 + break; 550 + } 551 + 534 552 if (cmd->se_cmd.pi_err) 535 553 transport_generic_request_failure(&cmd->se_cmd, 536 554 cmd->se_cmd.pi_err); ··· 573 555 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 574 556 } 575 557 576 - static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) 558 + static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) 577 559 { 578 - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 579 - 580 - /* take an extra kref to prevent cmd free too early. 581 - * need to wait for SCSI status/check condition to 582 - * finish responding generate by transport_generic_request_failure. 583 - */ 584 - kref_get(&cmd->se_cmd.cmd_kref); 585 - transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); 560 + return 0; 586 561 } 587 562 588 - /* 589 - * Called from qla_target.c:qlt_do_ctio_completion() 590 - */ 591 - static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) 563 + static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, 564 + uint16_t *pfw_prot_opts) 592 565 { 593 - INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); 594 - queue_work(tcm_qla2xxx_free_wq, &cmd->work); 566 + struct se_cmd *se_cmd = &cmd->se_cmd; 567 + 568 + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 569 + *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; 570 + 571 + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) 572 + *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; 573 + 574 + return 0; 595 575 } 596 576 597 577 /* ··· 1626 1610 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1627 1611 .handle_cmd = tcm_qla2xxx_handle_cmd, 1628 1612 .handle_data = tcm_qla2xxx_handle_data, 1629 - .handle_dif_err = tcm_qla2xxx_handle_dif_err, 1630 1613 .handle_tmr = tcm_qla2xxx_handle_tmr, 1631 1614 .free_cmd = tcm_qla2xxx_free_cmd, 1632 1615 .free_mcmd = tcm_qla2xxx_free_mcmd, ··· 1637 1622 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, 1638 1623 .put_sess = tcm_qla2xxx_put_sess, 1639 1624 .shutdown_sess = tcm_qla2xxx_shutdown_sess, 1625 + .get_dif_tags = tcm_qla2xxx_dif_tags, 1626 + .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, 1640 1627 }; 1641 1628 1642 1629 static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+1 -1
drivers/scsi/ufs/ufshcd.c
··· 7642 7642 if (kstrtoul(buf, 0, &value)) 7643 7643 return -EINVAL; 7644 7644 7645 - if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) 7645 + if (value >= UFS_PM_LVL_MAX) 7646 7646 return -EINVAL; 7647 7647 7648 7648 spin_lock_irqsave(hba->host->host_lock, flags);
+38 -44
drivers/target/target_core_alua.c
··· 43 43 #include "target_core_ua.h" 44 44 45 45 static sense_reason_t core_alua_check_transition(int state, int valid, 46 - int *primary); 46 + int *primary, int explicit); 47 47 static int core_alua_set_tg_pt_secondary_state( 48 48 struct se_lun *lun, int explicit, int offline); 49 49 ··· 335 335 * the state is a primary or secondary target port asymmetric 336 336 * access state. 337 337 */ 338 - rc = core_alua_check_transition(alua_access_state, 339 - valid_states, &primary); 338 + rc = core_alua_check_transition(alua_access_state, valid_states, 339 + &primary, 1); 340 340 if (rc) { 341 341 /* 342 342 * If the SET TARGET PORT GROUPS attempts to establish ··· 691 691 692 692 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 693 693 return 0; 694 - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 694 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) 695 695 return 0; 696 696 697 697 /* ··· 762 762 * Check implicit and explicit ALUA state change request. 763 763 */ 764 764 static sense_reason_t 765 - core_alua_check_transition(int state, int valid, int *primary) 765 + core_alua_check_transition(int state, int valid, int *primary, int explicit) 766 766 { 767 767 /* 768 768 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are ··· 804 804 *primary = 0; 805 805 break; 806 806 case ALUA_ACCESS_STATE_TRANSITION: 807 - /* 808 - * Transitioning is set internally, and 809 - * cannot be selected manually. 810 - */ 811 - goto not_supported; 807 + if (!(valid & ALUA_T_SUP) || explicit) 808 + /* 809 + * Transitioning is set internally and by tcmu daemon, 810 + * and cannot be selected through a STPG. 811 + */ 812 + goto not_supported; 813 + *primary = 0; 814 + break; 812 815 default: 813 816 pr_err("Unknown ALUA access state: 0x%02x\n", state); 814 817 return TCM_INVALID_PARAMETER_LIST; ··· 1016 1013 static void core_alua_do_transition_tg_pt_work(struct work_struct *work) 1017 1014 { 1018 1015 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, 1019 - struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); 1016 + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); 1020 1017 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1021 1018 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == 1022 1019 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); ··· 1073 1070 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1074 1071 return 0; 1075 1072 1076 - if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1073 + if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) 1077 1074 return -EAGAIN; 1078 1075 1079 1076 /* 1080 1077 * Flush any pending transitions 1081 1078 */ 1082 - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && 1083 - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == 1084 - ALUA_ACCESS_STATE_TRANSITION) { 1085 - /* Just in case */ 1086 - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; 1087 - tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1088 - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); 1089 - wait_for_completion(&wait); 1090 - tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1091 - return 0; 1092 - } 1079 + if (!explicit) 1080 + flush_work(&tg_pt_gp->tg_pt_gp_transition_work); 1093 1081 1094 1082 /* 1095 1083 * Save the old primary ALUA access state, and set the current state 1096 1084 * to ALUA_ACCESS_STATE_TRANSITION. 1097 1085 */ 1098 - tg_pt_gp->tg_pt_gp_alua_previous_state = 1099 - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 1100 - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; 1101 - 1102 1086 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1103 1087 ALUA_ACCESS_STATE_TRANSITION); 1104 1088 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? ··· 1093 1103 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1094 1104 1095 1105 core_alua_queue_state_change_ua(tg_pt_gp); 1106 + 1107 + if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1108 + return 0; 1109 + 1110 + tg_pt_gp->tg_pt_gp_alua_previous_state = 1111 + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 1112 + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; 1096 1113 1097 1114 /* 1098 1115 * Check for the optional ALUA primary state transition delay ··· 1114 1117 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1115 1118 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1116 1119 1117 - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1118 - unsigned long transition_tmo; 1119 - 1120 - transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; 1121 - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, 1122 - &tg_pt_gp->tg_pt_gp_transition_work, 1123 - transition_tmo); 1124 - } else { 1120 + schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); 1121 + if (explicit) { 1125 1122 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1126 - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, 1127 - &tg_pt_gp->tg_pt_gp_transition_work, 0); 1128 1123 wait_for_completion(&wait); 1129 1124 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1130 1125 } ··· 1138 1149 struct t10_alua_tg_pt_gp *tg_pt_gp; 1139 1150 int primary, valid_states, rc = 0; 1140 1151 1152 + if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) 1153 + return -ENODEV; 1154 + 1141 1155 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 1142 - if (core_alua_check_transition(new_state, valid_states, &primary) != 0) 1156 + if (core_alua_check_transition(new_state, valid_states, &primary, 1157 + explicit) != 0) 1143 1158 return -EINVAL; 1144 1159 1145 1160 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; ··· 1688 1695 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1689 1696 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1690 1697 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1691 - INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, 1692 - core_alua_do_transition_tg_pt_work); 1698 + INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, 1699 + core_alua_do_transition_tg_pt_work); 1693 1700 tg_pt_gp->tg_pt_gp_dev = dev; 1694 1701 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1695 1702 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); ··· 1797 1804 dev->t10_alua.alua_tg_pt_gps_counter--; 1798 1805 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1799 1806 1800 - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); 1807 + flush_work(&tg_pt_gp->tg_pt_gp_transition_work); 1801 1808 1802 1809 /* 1803 1810 * Allow a struct t10_alua_tg_pt_gp_member * referenced by ··· 1966 1973 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1967 1974 int move = 0; 1968 1975 1969 - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || 1976 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || 1970 1977 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 1971 1978 return -ENODEV; 1972 1979 ··· 2223 2230 unsigned long tmp; 2224 2231 int ret; 2225 2232 2226 - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || 2233 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || 2227 2234 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 2228 2235 return -ENODEV; 2229 2236 ··· 2309 2316 2310 2317 int core_setup_alua(struct se_device *dev) 2311 2318 { 2312 - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 2319 + if (!(dev->transport->transport_flags & 2320 + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && 2313 2321 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2314 2322 struct t10_alua_lu_gp_member *lu_gp_mem; 2315 2323
+4
drivers/target/target_core_configfs.c
··· 421 421 pr_err("Missing tfo->aborted_task()\n"); 422 422 return -EINVAL; 423 423 } 424 + if (!tfo->check_stop_free) { 425 + pr_err("Missing tfo->check_stop_free()\n"); 426 + return -EINVAL; 427 + } 424 428 /* 425 429 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 426 430 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+14 -36
drivers/target/target_core_pscsi.c
··· 154 154 155 155 buf = kzalloc(12, GFP_KERNEL); 156 156 if (!buf) 157 - return; 157 + goto out_free; 158 158 159 159 memset(cdb, 0, MAX_COMMAND_SIZE); 160 160 cdb[0] = MODE_SENSE; ··· 169 169 * If MODE_SENSE still returns zero, set the default value to 1024. 170 170 */ 171 171 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); 172 + out_free: 172 173 if (!sdev->sector_size) 173 174 sdev->sector_size = 1024; 174 - out_free: 175 + 175 176 kfree(buf); 176 177 } 177 178 ··· 315 314 sd->lun, sd->queue_depth); 316 315 } 317 316 318 - dev->dev_attrib.hw_block_size = sd->sector_size; 317 + dev->dev_attrib.hw_block_size = 318 + min_not_zero((int)sd->sector_size, 512); 319 319 dev->dev_attrib.hw_max_sectors = 320 - min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); 320 + min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); 321 321 dev->dev_attrib.hw_queue_depth = sd->queue_depth; 322 322 323 323 /* ··· 341 339 /* 342 340 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. 343 341 */ 344 - if (sd->type == TYPE_TAPE) 342 + if (sd->type == TYPE_TAPE) { 345 343 pscsi_tape_read_blocksize(dev, sd); 344 + dev->dev_attrib.hw_block_size = sd->sector_size; 345 + } 346 346 return 0; 347 347 } 348 348 ··· 410 406 /* 411 407 * Called with struct Scsi_Host->host_lock called. 412 408 */ 413 - static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) 409 + static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) 414 410 __releases(sh->host_lock) 415 411 { 416 412 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; ··· 434 430 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 435 431 sd->channel, sd->id, sd->lun); 436 432 437 - return 0; 438 - } 439 - 440 - /* 441 - * Called with struct Scsi_Host->host_lock called. 442 - */ 443 - static int pscsi_create_type_other(struct se_device *dev, 444 - struct scsi_device *sd) 445 - __releases(sh->host_lock) 446 - { 447 - struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 448 - struct Scsi_Host *sh = sd->host; 449 - int ret; 450 - 451 - spin_unlock_irq(sh->host_lock); 452 - ret = pscsi_add_device_to_list(dev, sd); 453 - if (ret) 454 - return ret; 455 - 456 - pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", 457 - phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 458 - sd->channel, sd->id, sd->lun); 459 433 return 0; 460 434 } 461 435 ··· 524 542 case TYPE_DISK: 525 543 ret = pscsi_create_type_disk(dev, sd); 526 544 break; 527 - case TYPE_ROM: 528 - ret = pscsi_create_type_rom(dev, sd); 529 - break; 530 545 default: 531 - ret = pscsi_create_type_other(dev, sd); 546 + ret = pscsi_create_type_nondisk(dev, sd); 532 547 break; 533 548 } 534 549 ··· 590 611 else if (pdv->pdv_lld_host) 591 612 scsi_host_put(pdv->pdv_lld_host); 592 613 593 - if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) 594 - scsi_device_put(sd); 614 + scsi_device_put(sd); 595 615 596 616 pdv->pdv_sd = NULL; 597 617 } ··· 1042 1064 if (pdv->pdv_bd && pdv->pdv_bd->bd_part) 1043 1065 return pdv->pdv_bd->bd_part->nr_sects; 1044 1066 1045 - dump_stack(); 1046 1067 return 0; 1047 1068 } 1048 1069 ··· 1080 1103 static const struct target_backend_ops pscsi_ops = { 1081 1104 .name = "pscsi", 1082 1105 .owner = THIS_MODULE, 1083 - .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1106 + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH | 1107 + TRANSPORT_FLAG_PASSTHROUGH_ALUA, 1084 1108 .attach_hba = pscsi_attach_hba, 1085 1109 .detach_hba = pscsi_detach_hba, 1086 1110 .pmode_enable_hba = pscsi_pmode_enable_hba,
+8 -2
drivers/target/target_core_sbc.c
··· 1105 1105 return ret; 1106 1106 break; 1107 1107 case VERIFY: 1108 + case VERIFY_16: 1108 1109 size = 0; 1109 - sectors = transport_get_sectors_10(cdb); 1110 - cmd->t_task_lba = transport_lba_32(cdb); 1110 + if (cdb[0] == VERIFY) { 1111 + sectors = transport_get_sectors_10(cdb); 1112 + cmd->t_task_lba = transport_lba_32(cdb); 1113 + } else { 1114 + sectors = transport_get_sectors_16(cdb); 1115 + cmd->t_task_lba = transport_lba_64(cdb); 1116 + } 1111 1117 cmd->execute_cmd = sbc_emulate_noop; 1112 1118 goto check_lba; 1113 1119 case REZERO_UNIT:
+2 -1
drivers/target/target_core_tpg.c
··· 602 602 if (ret) 603 603 goto out_kill_ref; 604 604 605 - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 605 + if (!(dev->transport->transport_flags & 606 + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && 606 607 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 607 608 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); 608 609
+1 -2
drivers/target/target_core_transport.c
··· 636 636 * Fabric modules are expected to return '1' here if the se_cmd being 637 637 * passed is released at this point, or zero if not being released. 638 638 */ 639 - return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) 640 - : 0; 639 + return cmd->se_tfo->check_stop_free(cmd); 641 640 } 642 641 643 642 static void transport_lun_remove_cmd(struct se_cmd *cmd)
+123 -29
drivers/target/target_core_user.c
··· 28 28 #include <linux/stringify.h> 29 29 #include <linux/bitops.h> 30 30 #include <linux/highmem.h> 31 + #include <linux/configfs.h> 31 32 #include <net/genetlink.h> 32 33 #include <scsi/scsi_common.h> 33 34 #include <scsi/scsi_proto.h> ··· 113 112 spinlock_t commands_lock; 114 113 115 114 struct timer_list timeout; 115 + unsigned int cmd_time_out; 116 116 117 117 char dev_config[TCMU_CONFIG_LEN]; 118 118 }; ··· 174 172 175 173 tcmu_cmd->se_cmd = se_cmd; 176 174 tcmu_cmd->tcmu_dev = udev; 177 - tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); 175 + if (udev->cmd_time_out) 176 + tcmu_cmd->deadline = jiffies + 177 + msecs_to_jiffies(udev->cmd_time_out); 178 178 179 179 idr_preload(GFP_KERNEL); 180 180 spin_lock_irq(&udev->commands_lock); ··· 455 451 456 452 pr_debug("sleeping for ring space\n"); 457 453 spin_unlock_irq(&udev->cmdr_lock); 458 - ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 454 + if (udev->cmd_time_out) 455 + ret = schedule_timeout( 456 + msecs_to_jiffies(udev->cmd_time_out)); 457 + else 458 + ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 459 459 finish_wait(&udev->wait_cmdr, &__wait); 460 460 if (!ret) { 461 461 pr_warn("tcmu: command timed out\n"); ··· 534 526 /* TODO: only if FLUSH and FUA? */ 535 527 uio_event_notify(&udev->uio_info); 536 528 537 - mod_timer(&udev->timeout, 538 - round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); 529 + if (udev->cmd_time_out) 530 + mod_timer(&udev->timeout, round_jiffies_up(jiffies + 531 + msecs_to_jiffies(udev->cmd_time_out))); 539 532 540 533 return TCM_NO_SENSE; 541 534 } ··· 751 742 } 752 743 753 744 udev->hba = hba; 745 + udev->cmd_time_out = TCMU_TIME_OUT; 754 746 755 747 init_waitqueue_head(&udev->wait_cmdr); 756 748 spin_lock_init(&udev->cmdr_lock); ··· 970 960 if (dev->dev_attrib.hw_block_size == 0) 971 961 dev->dev_attrib.hw_block_size = 512; 972 962 /* Other attributes can be configured in userspace */ 973 - dev->dev_attrib.hw_max_sectors = 128; 963 + if (!dev->dev_attrib.hw_max_sectors) 964 + dev->dev_attrib.hw_max_sectors = 128; 974 965 dev->dev_attrib.hw_queue_depth = 128; 975 966 976 967 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, ··· 1008 997 kfree(udev); 1009 998 } 1010 999 1000 + static bool tcmu_dev_configured(struct tcmu_dev *udev) 1001 + { 1002 + return udev->uio_info.uio_dev ? true : false; 1003 + } 1004 + 1011 1005 static void tcmu_free_device(struct se_device *dev) 1012 1006 { 1013 1007 struct tcmu_dev *udev = TCMU_DEV(dev); ··· 1034 1018 spin_unlock_irq(&udev->commands_lock); 1035 1019 WARN_ON(!all_expired); 1036 1020 1037 - /* Device was configured */ 1038 - if (udev->uio_info.uio_dev) { 1021 + if (tcmu_dev_configured(udev)) { 1039 1022 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, 1040 1023 udev->uio_info.uio_dev->minor); 1041 1024 ··· 1046 1031 } 1047 1032 1048 1033 enum { 1049 - Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, 1034 + Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 1035 + Opt_err, 1050 1036 }; 1051 1037 1052 1038 static match_table_t tokens = { 1053 1039 {Opt_dev_config, "dev_config=%s"}, 1054 1040 {Opt_dev_size, "dev_size=%u"}, 1055 1041 {Opt_hw_block_size, "hw_block_size=%u"}, 1042 + {Opt_hw_max_sectors, "hw_max_sectors=%u"}, 1056 1043 {Opt_err, NULL} 1057 1044 }; 1045 + 1046 + static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 1047 + { 1048 + unsigned long tmp_ul; 1049 + char *arg_p; 1050 + int ret; 1051 + 1052 + arg_p = match_strdup(arg); 1053 + if (!arg_p) 1054 + return -ENOMEM; 1055 + 1056 + ret = kstrtoul(arg_p, 0, &tmp_ul); 1057 + kfree(arg_p); 1058 + if (ret < 0) { 1059 + pr_err("kstrtoul() failed for dev attrib\n"); 1060 + return ret; 1061 + } 1062 + if (!tmp_ul) { 1063 + pr_err("dev attrib must be nonzero\n"); 1064 + return -EINVAL; 1065 + } 1066 + *dev_attrib = tmp_ul; 1067 + return 0; 1068 + } 1058 1069 1059 1070 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 1060 1071 const char *page, ssize_t count) ··· 1089 1048 char *orig, *ptr, *opts, *arg_p; 1090 1049 substring_t args[MAX_OPT_ARGS]; 1091 1050 int ret = 0, token; 1092 - unsigned long tmp_ul; 1093 1051 1094 1052 opts = kstrdup(page, GFP_KERNEL); 1095 1053 if (!opts) ··· 1122 1082 pr_err("kstrtoul() failed for dev_size=\n"); 1123 1083 break; 1124 1084 case Opt_hw_block_size: 1125 - arg_p = match_strdup(&args[0]); 1126 - if (!arg_p) { 1127 - ret = -ENOMEM; 1128 - break; 1129 - } 1130 - ret = kstrtoul(arg_p, 0, &tmp_ul); 1131 - kfree(arg_p); 1132 - if (ret < 0) { 1133 - pr_err("kstrtoul() failed for hw_block_size=\n"); 1134 - break; 1135 - } 1136 - if (!tmp_ul) { 1137 - pr_err("hw_block_size must be nonzero\n"); 1138 - break; 1139 - } 1140 - dev->dev_attrib.hw_block_size = tmp_ul; 1085 + ret = tcmu_set_dev_attrib(&args[0], 1086 + &(dev->dev_attrib.hw_block_size)); 1087 + break; 1088 + case Opt_hw_max_sectors: 1089 + ret = tcmu_set_dev_attrib(&args[0], 1090 + &(dev->dev_attrib.hw_max_sectors)); 1141 1091 break; 1142 1092 default: 1143 1093 break; 1144 1094 } 1095 + 1096 + if (ret) 1097 + break; 1145 1098 } 1146 1099 1147 1100 kfree(orig); ··· 1167 1134 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 1168 1135 } 1169 1136 1170 - static const struct target_backend_ops tcmu_ops = { 1137 + static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 1138 + { 1139 + struct se_dev_attrib *da = container_of(to_config_group(item), 1140 + struct se_dev_attrib, da_group); 1141 + struct tcmu_dev *udev = container_of(da->da_dev, 1142 + struct tcmu_dev, se_dev); 1143 + 1144 + return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 1145 + } 1146 + 1147 + static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 1148 + size_t count) 1149 + { 1150 + struct se_dev_attrib *da = container_of(to_config_group(item), 1151 + struct se_dev_attrib, da_group); 1152 + struct tcmu_dev *udev = container_of(da->da_dev, 1153 + struct tcmu_dev, se_dev); 1154 + u32 val; 1155 + int ret; 1156 + 1157 + if (da->da_dev->export_count) { 1158 + pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 1159 + return -EINVAL; 1160 + } 1161 + 1162 + ret = kstrtou32(page, 0, &val); 1163 + if (ret < 0) 1164 + return ret; 1165 + 1166 + if (!val) { 1167 + pr_err("Illegal value for cmd_time_out\n"); 1168 + return -EINVAL; 1169 + } 1170 + 1171 + udev->cmd_time_out = val * MSEC_PER_SEC; 1172 + return count; 1173 + } 1174 + CONFIGFS_ATTR(tcmu_, cmd_time_out); 1175 + 1176 + static struct configfs_attribute **tcmu_attrs; 1177 + 1178 + static struct target_backend_ops tcmu_ops = { 1171 1179 .name = "user", 1172 1180 .owner = THIS_MODULE, 1173 1181 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, ··· 1222 1148 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1223 1149 .get_device_type = sbc_get_device_type, 1224 1150 .get_blocks = tcmu_get_blocks, 1225 - .tb_dev_attrib_attrs = passthrough_attrib_attrs, 1151 + .tb_dev_attrib_attrs = NULL, 1226 1152 }; 1227 1153 1228 1154 static int __init tcmu_module_init(void) 1229 1155 { 1230 - int ret; 1156 + int ret, i, len = 0; 1231 1157 1232 1158 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1233 1159 ··· 1249 1175 goto out_unreg_device; 1250 1176 } 1251 1177 1178 + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 1179 + len += sizeof(struct configfs_attribute *); 1180 + } 1181 + len += sizeof(struct configfs_attribute *) * 2; 1182 + 1183 + tcmu_attrs = kzalloc(len, GFP_KERNEL); 1184 + if (!tcmu_attrs) { 1185 + ret = -ENOMEM; 1186 + goto out_unreg_genl; 1187 + } 1188 + 1189 + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 1190 + tcmu_attrs[i] = passthrough_attrib_attrs[i]; 1191 + } 1192 + tcmu_attrs[i] = &tcmu_attr_cmd_time_out; 1193 + tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 1194 + 1252 1195 ret = transport_backend_register(&tcmu_ops); 1253 1196 if (ret) 1254 - goto out_unreg_genl; 1197 + goto out_attrs; 1255 1198 1256 1199 return 0; 1257 1200 1201 + out_attrs: 1202 + kfree(tcmu_attrs); 1258 1203 out_unreg_genl: 1259 1204 genl_unregister_family(&tcmu_genl_family); 1260 1205 out_unreg_device: ··· 1287 1194 static void __exit tcmu_module_exit(void) 1288 1195 { 1289 1196 target_backend_unregister(&tcmu_ops); 1197 + kfree(tcmu_attrs); 1290 1198 genl_unregister_family(&tcmu_genl_family); 1291 1199 root_device_unregister(tcmu_root_device); 1292 1200 kmem_cache_destroy(tcmu_cmd_cache);
+6 -5
drivers/tty/serial/st-asc.c
··· 575 575 pinctrl_select_state(ascport->pinctrl, 576 576 ascport->states[NO_HW_FLOWCTRL]); 577 577 578 - gpiod = devm_get_gpiod_from_child(port->dev, "rts", 579 - &np->fwnode); 580 - if (!IS_ERR(gpiod)) { 581 - gpiod_direction_output(gpiod, 0); 578 + gpiod = devm_fwnode_get_gpiod_from_child(port->dev, 579 + "rts", 580 + &np->fwnode, 581 + GPIOD_OUT_LOW, 582 + np->name); 583 + if (!IS_ERR(gpiod)) 582 584 ascport->rts = gpiod; 583 - } 584 585 } 585 586 } 586 587
+41
drivers/vhost/vsock.c
··· 223 223 return len; 224 224 } 225 225 226 + static int 227 + vhost_transport_cancel_pkt(struct vsock_sock *vsk) 228 + { 229 + struct vhost_vsock *vsock; 230 + struct virtio_vsock_pkt *pkt, *n; 231 + int cnt = 0; 232 + LIST_HEAD(freeme); 233 + 234 + /* Find the vhost_vsock according to guest context id */ 235 + vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); 236 + if (!vsock) 237 + return -ENODEV; 238 + 239 + spin_lock_bh(&vsock->send_pkt_list_lock); 240 + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { 241 + if (pkt->vsk != vsk) 242 + continue; 243 + list_move(&pkt->list, &freeme); 244 + } 245 + spin_unlock_bh(&vsock->send_pkt_list_lock); 246 + 247 + list_for_each_entry_safe(pkt, n, &freeme, list) { 248 + if (pkt->reply) 249 + cnt++; 250 + list_del(&pkt->list); 251 + virtio_transport_free_pkt(pkt); 252 + } 253 + 254 + if (cnt) { 255 + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; 256 + int new_cnt; 257 + 258 + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); 259 + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) 260 + vhost_poll_queue(&tx_vq->poll); 261 + } 262 + 263 + return 0; 264 + } 265 + 226 266 static struct virtio_vsock_pkt * 227 267 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, 228 268 unsigned int out, unsigned int in) ··· 715 675 .release = virtio_transport_release, 716 676 .connect = virtio_transport_connect, 717 677 .shutdown = virtio_transport_shutdown, 678 + .cancel_pkt = vhost_transport_cancel_pkt, 718 679 719 680 .dgram_enqueue = virtio_transport_dgram_enqueue, 720 681 .dgram_dequeue = virtio_transport_dgram_dequeue,
+6 -5
drivers/xen/gntdev.c
··· 36 36 #include <linux/spinlock.h> 37 37 #include <linux/slab.h> 38 38 #include <linux/highmem.h> 39 + #include <linux/refcount.h> 39 40 40 41 #include <xen/xen.h> 41 42 #include <xen/grant_table.h> ··· 87 86 int index; 88 87 int count; 89 88 int flags; 90 - atomic_t users; 89 + refcount_t users; 91 90 struct unmap_notify notify; 92 91 struct ioctl_gntdev_grant_ref *grants; 93 92 struct gnttab_map_grant_ref *map_ops; ··· 167 166 168 167 add->index = 0; 169 168 add->count = count; 170 - atomic_set(&add->users, 1); 169 + refcount_set(&add->users, 1); 171 170 172 171 return add; 173 172 ··· 213 212 if (!map) 214 213 return; 215 214 216 - if (!atomic_dec_and_test(&map->users)) 215 + if (!refcount_dec_and_test(&map->users)) 217 216 return; 218 217 219 218 atomic_sub(map->count, &pages_mapped); ··· 401 400 struct grant_map *map = vma->vm_private_data; 402 401 403 402 pr_debug("gntdev_vma_open %p\n", vma); 404 - atomic_inc(&map->users); 403 + refcount_inc(&map->users); 405 404 } 406 405 407 406 static void gntdev_vma_close(struct vm_area_struct *vma) ··· 1005 1004 goto unlock_out; 1006 1005 } 1007 1006 1008 - atomic_inc(&map->users); 1007 + refcount_inc(&map->users); 1009 1008 1010 1009 vma->vm_ops = &gntdev_vmops; 1011 1010
+4 -3
fs/afs/callback.c
··· 362 362 { 363 363 struct afs_server *server; 364 364 struct afs_vnode *vnode, *xvnode; 365 - time_t now; 365 + time64_t now; 366 366 long timeout; 367 367 int ret; 368 368 ··· 370 370 371 371 _enter(""); 372 372 373 - now = get_seconds(); 373 + now = ktime_get_real_seconds(); 374 374 375 375 /* find the first vnode to update */ 376 376 spin_lock(&server->cb_lock); ··· 424 424 425 425 /* and then reschedule */ 426 426 _debug("reschedule"); 427 - vnode->update_at = get_seconds() + afs_vnode_update_timeout; 427 + vnode->update_at = ktime_get_real_seconds() + 428 + afs_vnode_update_timeout; 428 429 429 430 spin_lock(&server->cb_lock); 430 431
+5 -6
fs/afs/cmservice.c
··· 187 187 struct afs_callback *cb; 188 188 struct afs_server *server; 189 189 __be32 *bp; 190 - u32 tmp; 191 190 int ret, loop; 192 191 193 192 _enter("{%u}", call->unmarshall); ··· 248 249 if (ret < 0) 249 250 return ret; 250 251 251 - tmp = ntohl(call->tmp); 252 - _debug("CB count: %u", tmp); 253 - if (tmp != call->count && tmp != 0) 252 + call->count2 = ntohl(call->tmp); 253 + _debug("CB count: %u", call->count2); 254 + if (call->count2 != call->count && call->count2 != 0) 254 255 return -EBADMSG; 255 256 call->offset = 0; 256 257 call->unmarshall++; ··· 258 259 case 4: 259 260 _debug("extract CB array"); 260 261 ret = afs_extract_data(call, call->buffer, 261 - call->count * 3 * 4, false); 262 + call->count2 * 3 * 4, false); 262 263 if (ret < 0) 263 264 return ret; 264 265 265 266 _debug("unmarshall CB array"); 266 267 cb = call->request; 267 268 bp = call->buffer; 268 - for (loop = call->count; loop > 0; loop--, cb++) { 269 + for (loop = call->count2; loop > 0; loop--, cb++) { 269 270 cb->version = ntohl(*bp++); 270 271 cb->expiry = ntohl(*bp++); 271 272 cb->type = ntohl(*bp++);
+16 -4
fs/afs/file.c
··· 30 30 31 31 const struct file_operations afs_file_operations = { 32 32 .open = afs_open, 33 + .flush = afs_flush, 33 34 .release = afs_release, 34 35 .llseek = generic_file_llseek, 35 36 .read_iter = generic_file_read_iter, ··· 185 184 if (!req) 186 185 goto enomem; 187 186 187 + /* We request a full page. If the page is a partial one at the 188 + * end of the file, the server will return a short read and the 189 + * unmarshalling code will clear the unfilled space. 190 + */ 188 191 atomic_set(&req->usage, 1); 189 192 req->pos = (loff_t)page->index << PAGE_SHIFT; 190 - req->len = min_t(size_t, i_size_read(inode) - req->pos, 191 - PAGE_SIZE); 193 + req->len = PAGE_SIZE; 192 194 req->nr_pages = 1; 193 195 req->pages[0] = page; 194 196 get_page(page); ··· 212 208 fscache_uncache_page(vnode->cache, page); 213 209 #endif 214 210 BUG_ON(PageFsCache(page)); 215 - goto error; 211 + 212 + if (ret == -EINTR || 213 + ret == -ENOMEM || 214 + ret == -ERESTARTSYS || 215 + ret == -EAGAIN) 216 + goto error; 217 + goto io_error; 216 218 } 217 219 218 220 SetPageUptodate(page); ··· 237 227 _leave(" = 0"); 238 228 return 0; 239 229 230 + io_error: 231 + SetPageError(page); 232 + goto error; 240 233 enomem: 241 234 ret = -ENOMEM; 242 235 error: 243 - SetPageError(page); 244 236 unlock_page(page); 245 237 _leave(" = %d", ret); 246 238 return ret;
+50 -27
fs/afs/fsclient.c
··· 17 17 #include "afs_fs.h" 18 18 19 19 /* 20 + * We need somewhere to discard into in case the server helpfully returns more 21 + * than we asked for in FS.FetchData{,64}. 22 + */ 23 + static u8 afs_discard_buffer[64]; 24 + 25 + /* 20 26 * decode an AFSFid block 21 27 */ 22 28 static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid) ··· 111 105 vnode->vfs_inode.i_mode = mode; 112 106 } 113 107 114 - vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server; 108 + vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client; 115 109 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; 116 110 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; 117 111 vnode->vfs_inode.i_version = data_version; ··· 145 139 vnode->cb_version = ntohl(*bp++); 146 140 vnode->cb_expiry = ntohl(*bp++); 147 141 vnode->cb_type = ntohl(*bp++); 148 - vnode->cb_expires = vnode->cb_expiry + get_seconds(); 142 + vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds(); 149 143 *_bp = bp; 150 144 } 151 145 ··· 321 315 void *buffer; 322 316 int ret; 323 317 324 - _enter("{%u,%zu/%u;%u/%llu}", 318 + _enter("{%u,%zu/%u;%llu/%llu}", 325 319 call->unmarshall, call->offset, call->count, 326 320 req->remain, req->actual_len); 327 321 ··· 359 353 360 354 req->actual_len |= ntohl(call->tmp); 361 355 _debug("DATA length: %llu", req->actual_len); 362 - /* Check that the server didn't want to send us extra. We 363 - * might want to just discard instead, but that requires 364 - * cooperation from AF_RXRPC. 365 - */ 366 - if (req->actual_len > req->len) 367 - return -EBADMSG; 368 356 369 357 req->remain = req->actual_len; 370 358 call->offset = req->pos & (PAGE_SIZE - 1); ··· 368 368 call->unmarshall++; 369 369 370 370 begin_page: 371 + ASSERTCMP(req->index, <, req->nr_pages); 371 372 if (req->remain > PAGE_SIZE - call->offset) 372 373 size = PAGE_SIZE - call->offset; 373 374 else ··· 379 378 380 379 /* extract the returned data */ 381 380 case 3: 382 - _debug("extract data %u/%llu %zu/%u", 381 + _debug("extract data %llu/%llu %zu/%u", 383 382 req->remain, req->actual_len, call->offset, call->count); 384 383 385 384 buffer = kmap(req->pages[req->index]); ··· 390 389 if (call->offset == PAGE_SIZE) { 391 390 if (req->page_done) 392 391 req->page_done(call, req); 392 + req->index++; 393 393 if (req->remain > 0) { 394 - req->index++; 395 394 call->offset = 0; 395 + if (req->index >= req->nr_pages) { 396 + call->unmarshall = 4; 397 + goto begin_discard; 398 + } 396 399 goto begin_page; 397 400 } 398 401 } 402 + goto no_more_data; 403 + 404 + /* Discard any excess data the server gave us */ 405 + begin_discard: 406 + case 4: 407 + size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain); 408 + call->count = size; 409 + _debug("extract discard %llu/%llu %zu/%u", 410 + req->remain, req->actual_len, call->offset, call->count); 411 + 412 + call->offset = 0; 413 + ret = afs_extract_data(call, afs_discard_buffer, call->count, true); 414 + req->remain -= call->offset; 415 + if (ret < 0) 416 + return ret; 417 + if (req->remain > 0) 418 + goto begin_discard; 399 419 400 420 no_more_data: 401 421 call->offset = 0; 402 - call->unmarshall++; 422 + call->unmarshall = 5; 403 423 404 424 /* extract the metadata */ 405 - case 4: 425 + case 5: 406 426 ret = afs_extract_data(call, call->buffer, 407 427 (21 + 3 + 6) * 4, false); 408 428 if (ret < 0) ··· 438 416 call->offset = 0; 439 417 call->unmarshall++; 440 418 441 - case 5: 419 + case 6: 442 420 break; 443 421 } 444 422 445 - if (call->count < PAGE_SIZE) { 446 - buffer = kmap(req->pages[req->index]); 447 - memset(buffer + call->count, 0, PAGE_SIZE - call->count); 448 - kunmap(req->pages[req->index]); 423 + for (; req->index < req->nr_pages; req->index++) { 424 + if (call->count < PAGE_SIZE) 425 + zero_user_segment(req->pages[req->index], 426 + call->count, PAGE_SIZE); 449 427 if (req->page_done) 450 428 req->page_done(call, req); 429 + call->count = 0; 451 430 } 452 431 453 432 _leave(" = 0 [done]"); ··· 734 711 memset(bp, 0, padsz); 735 712 bp = (void *) bp + padsz; 736 713 } 737 - *bp++ = htonl(AFS_SET_MODE); 738 - *bp++ = 0; /* mtime */ 714 + *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); 715 + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ 739 716 *bp++ = 0; /* owner */ 740 717 *bp++ = 0; /* group */ 741 718 *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ ··· 1003 980 memset(bp, 0, c_padsz); 1004 981 bp = (void *) bp + c_padsz; 1005 982 } 1006 - *bp++ = htonl(AFS_SET_MODE); 1007 - *bp++ = 0; /* mtime */ 983 + *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); 984 + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ 1008 985 *bp++ = 0; /* owner */ 1009 986 *bp++ = 0; /* group */ 1010 987 *bp++ = htonl(S_IRWXUGO); /* unix mode */ ··· 1203 1180 *bp++ = htonl(vnode->fid.vnode); 1204 1181 *bp++ = htonl(vnode->fid.unique); 1205 1182 1206 - *bp++ = 0; /* mask */ 1207 - *bp++ = 0; /* mtime */ 1183 + *bp++ = htonl(AFS_SET_MTIME); /* mask */ 1184 + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ 1208 1185 *bp++ = 0; /* owner */ 1209 1186 *bp++ = 0; /* group */ 1210 1187 *bp++ = 0; /* unix mode */ ··· 1236 1213 _enter(",%x,{%x:%u},,", 1237 1214 key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode); 1238 1215 1239 - size = to - offset; 1216 + size = (loff_t)to - (loff_t)offset; 1240 1217 if (first != last) 1241 1218 size += (loff_t)(last - first) << PAGE_SHIFT; 1242 1219 pos = (loff_t)first << PAGE_SHIFT; ··· 1280 1257 *bp++ = htonl(vnode->fid.vnode); 1281 1258 *bp++ = htonl(vnode->fid.unique); 1282 1259 1283 - *bp++ = 0; /* mask */ 1284 - *bp++ = 0; /* mtime */ 1260 + *bp++ = htonl(AFS_SET_MTIME); /* mask */ 1261 + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ 1285 1262 *bp++ = 0; /* owner */ 1286 1263 *bp++ = 0; /* group */ 1287 1264 *bp++ = 0; /* unix mode */
+22 -20
fs/afs/inode.c
··· 54 54 inode->i_fop = &afs_dir_file_operations; 55 55 break; 56 56 case AFS_FTYPE_SYMLINK: 57 - inode->i_mode = S_IFLNK | vnode->status.mode; 58 - inode->i_op = &page_symlink_inode_operations; 57 + /* Symlinks with a mode of 0644 are actually mountpoints. */ 58 + if ((vnode->status.mode & 0777) == 0644) { 59 + inode->i_flags |= S_AUTOMOUNT; 60 + 61 + spin_lock(&vnode->lock); 62 + set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); 63 + spin_unlock(&vnode->lock); 64 + 65 + inode->i_mode = S_IFDIR | 0555; 66 + inode->i_op = &afs_mntpt_inode_operations; 67 + inode->i_fop = &afs_mntpt_file_operations; 68 + } else { 69 + inode->i_mode = S_IFLNK | vnode->status.mode; 70 + inode->i_op = &page_symlink_inode_operations; 71 + } 59 72 inode_nohighmem(inode); 60 73 break; 61 74 default: ··· 83 70 84 71 set_nlink(inode, vnode->status.nlink); 85 72 inode->i_uid = vnode->status.owner; 86 - inode->i_gid = GLOBAL_ROOT_GID; 73 + inode->i_gid = vnode->status.group; 87 74 inode->i_size = vnode->status.size; 88 - inode->i_ctime.tv_sec = vnode->status.mtime_server; 75 + inode->i_ctime.tv_sec = vnode->status.mtime_client; 89 76 inode->i_ctime.tv_nsec = 0; 90 77 inode->i_atime = inode->i_mtime = inode->i_ctime; 91 78 inode->i_blocks = 0; 92 79 inode->i_generation = vnode->fid.unique; 93 80 inode->i_version = vnode->status.data_version; 94 81 inode->i_mapping->a_ops = &afs_fs_aops; 95 - 96 - /* check to see whether a symbolic link is really a mountpoint */ 97 - if (vnode->status.type == AFS_FTYPE_SYMLINK) { 98 - afs_mntpt_check_symlink(vnode, key); 99 - 100 - if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) { 101 - inode->i_mode = S_IFDIR | vnode->status.mode; 102 - inode->i_op = &afs_mntpt_inode_operations; 103 - inode->i_fop = &afs_mntpt_file_operations; 104 - } 105 - } 106 - 107 82 return 0; 108 83 } 109 84 ··· 246 245 vnode->cb_version = 0; 247 246 vnode->cb_expiry = 0; 248 247 vnode->cb_type = 0; 249 - vnode->cb_expires = get_seconds(); 248 + vnode->cb_expires = ktime_get_real_seconds(); 250 249 } else { 251 250 vnode->cb_version = cb->version; 252 251 vnode->cb_expiry = cb->expiry; 253 252 vnode->cb_type = cb->type; 254 - vnode->cb_expires = vnode->cb_expiry + get_seconds(); 253 + vnode->cb_expires = vnode->cb_expiry + 254 + ktime_get_real_seconds(); 255 255 } 256 256 } 257 257 ··· 325 323 !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && 326 324 !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) && 327 325 !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { 328 - if (vnode->cb_expires < get_seconds() + 10) { 326 + if (vnode->cb_expires < ktime_get_real_seconds() + 10) { 329 327 _debug("callback expired"); 330 328 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); 331 329 } else { ··· 446 444 447 445 mutex_lock(&vnode->permits_lock); 448 446 permits = vnode->permits; 449 - rcu_assign_pointer(vnode->permits, NULL); 447 + RCU_INIT_POINTER(vnode->permits, NULL); 450 448 mutex_unlock(&vnode->permits_lock); 451 449 if (permits) 452 450 call_rcu(&permits->rcu, afs_zap_permits);
+13 -10
fs/afs/internal.h
··· 11 11 12 12 #include <linux/compiler.h> 13 13 #include <linux/kernel.h> 14 + #include <linux/ktime.h> 14 15 #include <linux/fs.h> 15 16 #include <linux/pagemap.h> 16 17 #include <linux/rxrpc.h> ··· 91 90 unsigned request_size; /* size of request data */ 92 91 unsigned reply_max; /* maximum size of reply */ 93 92 unsigned first_offset; /* offset into mapping[first] */ 94 - unsigned last_to; /* amount of mapping[last] */ 93 + union { 94 + unsigned last_to; /* amount of mapping[last] */ 95 + unsigned count2; /* count used in unmarshalling */ 96 + }; 95 97 unsigned char unmarshall; /* unmarshalling phase */ 96 98 bool incoming; /* T if incoming call */ 97 99 bool send_pages; /* T if data from mapping should be sent */ ··· 131 127 */ 132 128 struct afs_read { 133 129 loff_t pos; /* Where to start reading */ 134 - loff_t len; /* How much to read */ 130 + loff_t len; /* How much we're asking for */ 135 131 loff_t actual_len; /* How much we're actually getting */ 132 + loff_t remain; /* Amount remaining */ 136 133 atomic_t usage; 137 - unsigned int remain; /* Amount remaining */ 138 134 unsigned int index; /* Which page we're reading into */ 139 - unsigned int pg_offset; /* Offset in page we're at */ 140 135 unsigned int nr_pages; 141 136 void (*page_done)(struct afs_call *, struct afs_read *); 142 137 struct page *pages[]; ··· 250 247 */ 251 248 struct afs_vlocation { 252 249 atomic_t usage; 253 - time_t time_of_death; /* time at which put reduced usage to 0 */ 250 + time64_t time_of_death; /* time at which put reduced usage to 0 */ 254 251 struct list_head link; /* link in cell volume location list */ 255 252 struct list_head grave; /* link in master graveyard list */ 256 253 struct list_head update; /* link in master update list */ ··· 261 258 struct afs_cache_vlocation vldb; /* volume information DB record */ 262 259 struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ 263 260 wait_queue_head_t waitq; /* status change waitqueue */ 264 - time_t update_at; /* time at which record should be updated */ 261 + time64_t update_at; /* time at which record should be updated */ 265 262 spinlock_t lock; /* access lock */ 266 263 afs_vlocation_state_t state; /* volume location state */ 267 264 unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ ··· 274 271 */ 275 272 struct afs_server { 276 273 atomic_t usage; 277 - time_t time_of_death; /* time at which put reduced usage to 0 */ 274 + time64_t time_of_death; /* time at which put reduced usage to 0 */ 278 275 struct in_addr addr; /* server address */ 279 276 struct afs_cell *cell; /* cell in which server resides */ 280 277 struct list_head link; /* link in cell's server list */ ··· 377 374 struct rb_node server_rb; /* link in server->fs_vnodes */ 378 375 struct rb_node cb_promise; /* link in server->cb_promises */ 379 376 struct work_struct cb_broken_work; /* work to be done on callback break */ 380 - time_t cb_expires; /* time at which callback expires */ 381 - time_t cb_expires_at; /* time used to order cb_promise */ 377 + time64_t cb_expires; /* time at which callback expires */ 378 + time64_t cb_expires_at; /* time used to order cb_promise */ 382 379 unsigned cb_version; /* callback version */ 383 380 unsigned cb_expiry; /* callback expiry time */ 384 381 afs_callback_type_t cb_type; /* type of callback */ ··· 560 557 extern const struct file_operations afs_mntpt_file_operations; 561 558 562 559 extern struct vfsmount *afs_d_automount(struct path *); 563 - extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *); 564 560 extern void afs_mntpt_kill_timer(void); 565 561 566 562 /* ··· 720 718 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); 721 719 extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *); 722 720 extern int afs_writeback_all(struct afs_vnode *); 721 + extern int afs_flush(struct file *, fl_owner_t); 723 722 extern int afs_fsync(struct file *, loff_t, loff_t, int); 724 723 725 724
+2
fs/afs/misc.c
··· 84 84 case RXKADDATALEN: return -EKEYREJECTED; 85 85 case RXKADILLEGALLEVEL: return -EKEYREJECTED; 86 86 87 + case RXGEN_OPCODE: return -ENOTSUPP; 88 + 87 89 default: return -EREMOTEIO; 88 90 } 89 91 }
-53
fs/afs/mntpt.c
··· 47 47 static unsigned long afs_mntpt_expiry_timeout = 10 * 60; 48 48 49 49 /* 50 - * check a symbolic link to see whether it actually encodes a mountpoint 51 - * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately 52 - */ 53 - int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) 54 - { 55 - struct page *page; 56 - size_t size; 57 - char *buf; 58 - int ret; 59 - 60 - _enter("{%x:%u,%u}", 61 - vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 62 - 63 - /* read the contents of the symlink into the pagecache */ 64 - page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, 65 - afs_page_filler, key); 66 - if (IS_ERR(page)) { 67 - ret = PTR_ERR(page); 68 - goto out; 69 - } 70 - 71 - ret = -EIO; 72 - if (PageError(page)) 73 - goto out_free; 74 - 75 - buf = kmap(page); 76 - 77 - /* examine the symlink's contents */ 78 - size = vnode->status.size; 79 - _debug("symlink to %*.*s", (int) size, (int) size, buf); 80 - 81 - if (size > 2 && 82 - (buf[0] == '%' || buf[0] == '#') && 83 - buf[size - 1] == '.' 84 - ) { 85 - _debug("symlink is a mountpoint"); 86 - spin_lock(&vnode->lock); 87 - set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); 88 - vnode->vfs_inode.i_flags |= S_AUTOMOUNT; 89 - spin_unlock(&vnode->lock); 90 - } 91 - 92 - ret = 0; 93 - 94 - kunmap(page); 95 - out_free: 96 - put_page(page); 97 - out: 98 - _leave(" = %d", ret); 99 - return ret; 100 - } 101 - 102 - /* 103 50 * no valid lookup procedure on this sort of dir 104 51 */ 105 52 static struct dentry *afs_mntpt_lookup(struct inode *dir,
+83 -64
fs/afs/rxrpc.c
··· 259 259 call->buffer = NULL; 260 260 } 261 261 262 + #define AFS_BVEC_MAX 8 263 + 264 + /* 265 + * Load the given bvec with the next few pages. 266 + */ 267 + static void afs_load_bvec(struct afs_call *call, struct msghdr *msg, 268 + struct bio_vec *bv, pgoff_t first, pgoff_t last, 269 + unsigned offset) 270 + { 271 + struct page *pages[AFS_BVEC_MAX]; 272 + unsigned int nr, n, i, to, bytes = 0; 273 + 274 + nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX); 275 + n = find_get_pages_contig(call->mapping, first, nr, pages); 276 + ASSERTCMP(n, ==, nr); 277 + 278 + msg->msg_flags |= MSG_MORE; 279 + for (i = 0; i < nr; i++) { 280 + to = PAGE_SIZE; 281 + if (first + i >= last) { 282 + to = call->last_to; 283 + msg->msg_flags &= ~MSG_MORE; 284 + } 285 + bv[i].bv_page = pages[i]; 286 + bv[i].bv_len = to - offset; 287 + bv[i].bv_offset = offset; 288 + bytes += to - offset; 289 + offset = 0; 290 + } 291 + 292 + iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes); 293 + } 294 + 262 295 /* 263 296 * attach the data from a bunch of pages on an inode to a call 264 297 */ 265 298 static int afs_send_pages(struct afs_call *call, struct msghdr *msg) 266 299 { 267 - struct page *pages[8]; 268 - unsigned count, n, loop, offset, to; 300 + struct bio_vec bv[AFS_BVEC_MAX]; 301 + unsigned int bytes, nr, loop, offset; 269 302 pgoff_t first = call->first, last = call->last; 270 303 int ret; 271 - 272 - _enter(""); 273 304 274 305 offset = call->first_offset; 275 306 call->first_offset = 0; 276 307 277 308 do { 278 - _debug("attach %lx-%lx", first, last); 309 + afs_load_bvec(call, msg, bv, first, last, offset); 310 + offset = 0; 311 + bytes = msg->msg_iter.count; 312 + nr = msg->msg_iter.nr_segs; 279 313 280 - count = last - first + 1; 281 - if (count > ARRAY_SIZE(pages)) 282 - count = ARRAY_SIZE(pages); 283 - n = find_get_pages_contig(call->mapping, first, count, pages); 284 - ASSERTCMP(n, ==, count); 285 - 286 - loop = 0; 287 - do { 288 - struct bio_vec bvec = {.bv_page = pages[loop], 289 - .bv_offset = offset}; 290 - msg->msg_flags = 0; 291 - to = PAGE_SIZE; 292 - if (first + loop >= last) 293 - to = call->last_to; 294 - else 295 - msg->msg_flags = MSG_MORE; 296 - bvec.bv_len = to - offset; 297 - offset = 0; 298 - 299 - _debug("- range %u-%u%s", 300 - offset, to, msg->msg_flags ? " [more]" : ""); 301 - iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, 302 - &bvec, 1, to - offset); 303 - 304 - /* have to change the state *before* sending the last 305 - * packet as RxRPC might give us the reply before it 306 - * returns from sending the request */ 307 - if (first + loop >= last) 308 - call->state = AFS_CALL_AWAIT_REPLY; 309 - ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, 310 - msg, to - offset); 311 - if (ret < 0) 312 - break; 313 - } while (++loop < count); 314 - first += count; 315 - 316 - for (loop = 0; loop < count; loop++) 317 - put_page(pages[loop]); 314 + /* Have to change the state *before* sending the last 315 + * packet as RxRPC might give us the reply before it 316 + * returns from sending the request. 317 + */ 318 + if (first + nr - 1 >= last) 319 + call->state = AFS_CALL_AWAIT_REPLY; 320 + ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, 321 + msg, bytes); 322 + for (loop = 0; loop < nr; loop++) 323 + put_page(bv[loop].bv_page); 318 324 if (ret < 0) 319 325 break; 326 + 327 + first += nr; 320 328 } while (first <= last); 321 329 322 - _leave(" = %d", ret); 323 330 return ret; 324 331 } 325 332 ··· 340 333 struct rxrpc_call *rxcall; 341 334 struct msghdr msg; 342 335 struct kvec iov[1]; 336 + size_t offset; 337 + u32 abort_code; 343 338 int ret; 344 339 345 340 _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); ··· 390 381 msg.msg_controllen = 0; 391 382 msg.msg_flags = (call->send_pages ? MSG_MORE : 0); 392 383 393 - /* have to change the state *before* sending the last packet as RxRPC 394 - * might give us the reply before it returns from sending the 395 - * request */ 384 + /* We have to change the state *before* sending the last packet as 385 + * rxrpc might give us the reply before it returns from sending the 386 + * request. Further, if the send fails, we may already have been given 387 + * a notification and may have collected it. 388 + */ 396 389 if (!call->send_pages) 397 390 call->state = AFS_CALL_AWAIT_REPLY; 398 391 ret = rxrpc_kernel_send_data(afs_socket, rxcall, ··· 416 405 return afs_wait_for_call_to_complete(call); 417 406 418 407 error_do_abort: 419 - rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); 408 + call->state = AFS_CALL_COMPLETE; 409 + if (ret != -ECONNABORTED) { 410 + rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, 411 + -ret, "KSD"); 412 + } else { 413 + abort_code = 0; 414 + offset = 0; 415 + rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset, 416 + false, &abort_code); 417 + ret = call->type->abort_to_error(abort_code); 418 + } 420 419 error_kill_call: 421 420 afs_put_call(call); 422 421 _leave(" = %d", ret); ··· 473 452 case -EINPROGRESS: 474 453 case -EAGAIN: 475 454 goto out; 455 + case -ECONNABORTED: 456 + goto call_complete; 476 457 case -ENOTCONN: 477 458 abort_code = RX_CALL_DEAD; 478 459 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 479 460 abort_code, -ret, "KNC"); 480 - goto do_abort; 461 + goto save_error; 481 462 case -ENOTSUPP: 482 - abort_code = RX_INVALID_OPERATION; 463 + abort_code = RXGEN_OPCODE; 483 464 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 484 465 abort_code, -ret, "KIV"); 485 - goto do_abort; 466 + goto save_error; 486 467 case -ENODATA: 487 468 case -EBADMSG: 488 469 case -EMSGSIZE: ··· 494 471 abort_code = RXGEN_SS_UNMARSHAL; 495 472 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 496 473 abort_code, EBADMSG, "KUM"); 497 - goto do_abort; 474 + goto save_error; 498 475 } 499 476 } 500 477 ··· 505 482 _leave(""); 506 483 return; 507 484 508 - do_abort: 485 + save_error: 509 486 call->error = ret; 487 + call_complete: 510 488 call->state = AFS_CALL_COMPLETE; 511 489 goto done; 512 490 } ··· 517 493 */ 518 494 static int afs_wait_for_call_to_complete(struct afs_call *call) 519 495 { 520 - const char *abort_why; 521 496 int ret; 522 497 523 498 DECLARE_WAITQUEUE(myself, current); ··· 535 512 continue; 536 513 } 537 514 538 - abort_why = "KWC"; 539 - ret = call->error; 540 - if (call->state == AFS_CALL_COMPLETE) 541 - break; 542 - abort_why = "KWI"; 543 - ret = -EINTR; 544 - if (signal_pending(current)) 515 + if (call->state == AFS_CALL_COMPLETE || 516 + signal_pending(current)) 545 517 break; 546 518 schedule(); 547 519 } ··· 544 526 remove_wait_queue(&call->waitq, &myself); 545 527 __set_current_state(TASK_RUNNING); 546 528 547 - /* kill the call */ 529 + /* Kill off the call if it's still live. */ 548 530 if (call->state < AFS_CALL_COMPLETE) { 549 - _debug("call incomplete"); 531 + _debug("call interrupted"); 550 532 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 551 - RX_CALL_DEAD, -ret, abort_why); 533 + RX_USER_ABORT, -EINTR, "KWI"); 552 534 } 553 535 536 + ret = call->error; 554 537 _debug("call complete"); 555 538 afs_put_call(call); 556 539 _leave(" = %d", ret);
+7 -2
fs/afs/security.c
··· 114 114 115 115 mutex_lock(&vnode->permits_lock); 116 116 permits = vnode->permits; 117 - rcu_assign_pointer(vnode->permits, NULL); 117 + RCU_INIT_POINTER(vnode->permits, NULL); 118 118 mutex_unlock(&vnode->permits_lock); 119 119 120 120 if (permits) ··· 340 340 } else { 341 341 if (!(access & AFS_ACE_LOOKUP)) 342 342 goto permission_denied; 343 + if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR)) 344 + goto permission_denied; 343 345 if (mask & (MAY_EXEC | MAY_READ)) { 344 346 if (!(access & AFS_ACE_READ)) 345 347 goto permission_denied; 348 + if (!(inode->i_mode & S_IRUSR)) 349 + goto permission_denied; 346 350 } else if (mask & MAY_WRITE) { 347 351 if (!(access & AFS_ACE_WRITE)) 352 + goto permission_denied; 353 + if (!(inode->i_mode & S_IWUSR)) 348 354 goto permission_denied; 349 355 } 350 356 } 351 357 352 358 key_put(key); 353 - ret = generic_permission(inode, mask); 354 359 _leave(" = %d", ret); 355 360 return ret; 356 361
+3 -3
fs/afs/server.c
··· 242 242 spin_lock(&afs_server_graveyard_lock); 243 243 if (atomic_read(&server->usage) == 0) { 244 244 list_move_tail(&server->grave, &afs_server_graveyard); 245 - server->time_of_death = get_seconds(); 245 + server->time_of_death = ktime_get_real_seconds(); 246 246 queue_delayed_work(afs_wq, &afs_server_reaper, 247 247 afs_server_timeout * HZ); 248 248 } ··· 277 277 LIST_HEAD(corpses); 278 278 struct afs_server *server; 279 279 unsigned long delay, expiry; 280 - time_t now; 280 + time64_t now; 281 281 282 - now = get_seconds(); 282 + now = ktime_get_real_seconds(); 283 283 spin_lock(&afs_server_graveyard_lock); 284 284 285 285 while (!list_empty(&afs_server_graveyard)) {
+9 -7
fs/afs/vlocation.c
··· 340 340 struct afs_vlocation *xvl; 341 341 342 342 /* wait at least 10 minutes before updating... */ 343 - vl->update_at = get_seconds() + afs_vlocation_update_timeout; 343 + vl->update_at = ktime_get_real_seconds() + 344 + afs_vlocation_update_timeout; 344 345 345 346 spin_lock(&afs_vlocation_updates_lock); 346 347 ··· 507 506 if (atomic_read(&vl->usage) == 0) { 508 507 _debug("buried"); 509 508 list_move_tail(&vl->grave, &afs_vlocation_graveyard); 510 - vl->time_of_death = get_seconds(); 509 + vl->time_of_death = ktime_get_real_seconds(); 511 510 queue_delayed_work(afs_wq, &afs_vlocation_reap, 512 511 afs_vlocation_timeout * HZ); 513 512 ··· 544 543 LIST_HEAD(corpses); 545 544 struct afs_vlocation *vl; 546 545 unsigned long delay, expiry; 547 - time_t now; 546 + time64_t now; 548 547 549 548 _enter(""); 550 549 551 - now = get_seconds(); 550 + now = ktime_get_real_seconds(); 552 551 spin_lock(&afs_vlocation_graveyard_lock); 553 552 554 553 while (!list_empty(&afs_vlocation_graveyard)) { ··· 623 622 { 624 623 struct afs_cache_vlocation vldb; 625 624 struct afs_vlocation *vl, *xvl; 626 - time_t now; 625 + time64_t now; 627 626 long timeout; 628 627 int ret; 629 628 630 629 _enter(""); 631 630 632 - now = get_seconds(); 631 + now = ktime_get_real_seconds(); 633 632 634 633 /* find a record to update */ 635 634 spin_lock(&afs_vlocation_updates_lock); ··· 685 684 686 685 /* and then reschedule */ 687 686 _debug("reschedule"); 688 - vl->update_at = get_seconds() + afs_vlocation_update_timeout; 687 + vl->update_at = ktime_get_real_seconds() + 688 + afs_vlocation_update_timeout; 689 689 690 690 spin_lock(&afs_vlocation_updates_lock); 691 691
+54 -22
fs/afs/write.c
··· 84 84 * partly or wholly fill a page that's under preparation for writing 85 85 */ 86 86 static int afs_fill_page(struct afs_vnode *vnode, struct key *key, 87 - loff_t pos, struct page *page) 87 + loff_t pos, unsigned int len, struct page *page) 88 88 { 89 89 struct afs_read *req; 90 - loff_t i_size; 91 90 int ret; 92 91 93 92 _enter(",,%llu", (unsigned long long)pos); ··· 98 99 99 100 atomic_set(&req->usage, 1); 100 101 req->pos = pos; 102 + req->len = len; 101 103 req->nr_pages = 1; 102 104 req->pages[0] = page; 103 - 104 - i_size = i_size_read(&vnode->vfs_inode); 105 - if (pos + PAGE_SIZE > i_size) 106 - req->len = i_size - pos; 107 - else 108 - req->len = PAGE_SIZE; 105 + get_page(page); 109 106 110 107 ret = afs_vnode_fetch_data(vnode, key, req); 111 108 afs_put_read(req); ··· 154 159 kfree(candidate); 155 160 return -ENOMEM; 156 161 } 157 - *pagep = page; 158 - /* page won't leak in error case: it eventually gets cleaned off LRU */ 159 162 160 163 if (!PageUptodate(page) && len != PAGE_SIZE) { 161 - ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page); 164 + ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page); 162 165 if (ret < 0) { 166 + unlock_page(page); 167 + put_page(page); 163 168 kfree(candidate); 164 169 _leave(" = %d [prep]", ret); 165 170 return ret; 166 171 } 167 172 SetPageUptodate(page); 168 173 } 174 + 175 + /* page won't leak in error case: it eventually gets cleaned off LRU */ 176 + *pagep = page; 169 177 170 178 try_again: 171 179 spin_lock(&vnode->writeback_lock); ··· 231 233 if (wb->state == AFS_WBACK_PENDING) 232 234 wb->state = AFS_WBACK_CONFLICTING; 233 235 spin_unlock(&vnode->writeback_lock); 234 - if (PageDirty(page)) { 236 + if (clear_page_dirty_for_io(page)) { 235 237 ret = afs_write_back_from_locked_page(wb, page); 236 238 if (ret < 0) { 237 239 afs_put_writeback(candidate); ··· 255 257 struct page *page, void *fsdata) 256 258 { 257 259 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 260 + struct key *key = file->private_data; 258 261 loff_t i_size, maybe_i_size; 262 + int ret; 259 263 260 264 _enter("{%x:%u},{%lx}", 261 265 vnode->fid.vid, vnode->fid.vnode, page->index); ··· 271 271 if (maybe_i_size > i_size) 272 272 i_size_write(&vnode->vfs_inode, maybe_i_size); 273 273 spin_unlock(&vnode->writeback_lock); 274 + } 275 + 276 + if (!PageUptodate(page)) { 277 + if (copied < len) { 278 + /* Try and load any missing data from the server. The 279 + * unmarshalling routine will take care of clearing any 280 + * bits that are beyond the EOF. 281 + */ 282 + ret = afs_fill_page(vnode, key, pos + copied, 283 + len - copied, page); 284 + if (ret < 0) 285 + return ret; 286 + } 287 + SetPageUptodate(page); 274 288 } 275 289 276 290 set_page_dirty(page); ··· 321 307 ASSERTCMP(pv.nr, ==, count); 322 308 323 309 for (loop = 0; loop < count; loop++) { 324 - ClearPageUptodate(pv.pages[loop]); 310 + struct page *page = pv.pages[loop]; 311 + ClearPageUptodate(page); 325 312 if (error) 326 - SetPageError(pv.pages[loop]); 327 - end_page_writeback(pv.pages[loop]); 313 + SetPageError(page); 314 + if (PageWriteback(page)) 315 + end_page_writeback(page); 316 + if (page->index >= first) 317 + first = page->index + 1; 328 318 } 329 319 330 320 __pagevec_release(&pv); ··· 353 335 _enter(",%lx", primary_page->index); 354 336 355 337 count = 1; 356 - if (!clear_page_dirty_for_io(primary_page)) 357 - BUG(); 358 338 if (test_set_page_writeback(primary_page)) 359 339 BUG(); 360 340 ··· 518 502 */ 519 503 lock_page(page); 520 504 521 - if (page->mapping != mapping) { 505 + if (page->mapping != mapping || !PageDirty(page)) { 522 506 unlock_page(page); 523 507 put_page(page); 524 508 continue; 525 509 } 526 510 527 - if (wbc->sync_mode != WB_SYNC_NONE) 528 - wait_on_page_writeback(page); 529 - 530 - if (PageWriteback(page) || !PageDirty(page)) { 511 + if (PageWriteback(page)) { 531 512 unlock_page(page); 513 + if (wbc->sync_mode != WB_SYNC_NONE) 514 + wait_on_page_writeback(page); 515 + put_page(page); 532 516 continue; 533 517 } 534 518 ··· 539 523 wb->state = AFS_WBACK_WRITING; 540 524 spin_unlock(&wb->vnode->writeback_lock); 541 525 526 + if (!clear_page_dirty_for_io(page)) 527 + BUG(); 542 528 ret = afs_write_back_from_locked_page(wb, page); 543 529 unlock_page(page); 544 530 put_page(page); ··· 761 743 out: 762 744 inode_unlock(inode); 763 745 return ret; 746 + } 747 + 748 + /* 749 + * Flush out all outstanding writes on a file opened for writing when it is 750 + * closed. 751 + */ 752 + int afs_flush(struct file *file, fl_owner_t id) 753 + { 754 + _enter(""); 755 + 756 + if ((file->f_mode & FMODE_WRITE) == 0) 757 + return 0; 758 + 759 + return vfs_fsync(file, 0); 764 760 } 765 761 766 762 /*
+2 -1
fs/btrfs/extent_io.c
··· 1714 1714 * can we find nothing at @index. 1715 1715 */ 1716 1716 ASSERT(page_ops & PAGE_LOCK); 1717 - return ret; 1717 + err = -EAGAIN; 1718 + goto out; 1718 1719 } 1719 1720 1720 1721 for (i = 0; i < ret; i++) {
+14
fs/btrfs/inode.c
··· 6709 6709 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6710 6710 ret = btrfs_decompress(compress_type, tmp, page, 6711 6711 extent_offset, inline_size, max_size); 6712 + 6713 + /* 6714 + * decompression code contains a memset to fill in any space between the end 6715 + * of the uncompressed data and the end of max_size in case the decompressed 6716 + * data ends up shorter than ram_bytes. That doesn't cover the hole between 6717 + * the end of an inline extent and the beginning of the next block, so we 6718 + * cover that region here. 6719 + */ 6720 + 6721 + if (max_size + pg_offset < PAGE_SIZE) { 6722 + char *map = kmap(page); 6723 + memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); 6724 + kunmap(page); 6725 + } 6712 6726 kfree(tmp); 6713 6727 return ret; 6714 6728 }
+1
fs/f2fs/debug.c
··· 196 196 si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); 197 197 si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE; 198 198 si->base_mem += NM_I(sbi)->nat_blocks / 8; 199 + si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short); 199 200 200 201 get_cache: 201 202 si->cache_mem = 0;
+1 -1
fs/f2fs/dir.c
··· 750 750 dentry_blk = page_address(page); 751 751 bit_pos = dentry - dentry_blk->dentry; 752 752 for (i = 0; i < slots; i++) 753 - clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); 753 + __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); 754 754 755 755 /* Let's check and deallocate this dentry page */ 756 756 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
+2
fs/f2fs/f2fs.h
··· 561 561 struct mutex build_lock; /* lock for build free nids */ 562 562 unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; 563 563 unsigned char *nat_block_bitmap; 564 + unsigned short *free_nid_count; /* free nid count of NAT block */ 565 + spinlock_t free_nid_lock; /* protect updating of nid count */ 564 566 565 567 /* for checkpoint */ 566 568 char *nat_bitmap; /* NAT bitmap pointer */
+77 -86
fs/f2fs/node.c
··· 338 338 set_nat_flag(e, IS_CHECKPOINTED, false); 339 339 __set_nat_cache_dirty(nm_i, e); 340 340 341 - if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR) 342 - clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits); 343 - 344 341 /* update fsync_mark if its inode nat entry is still alive */ 345 342 if (ni->nid != ni->ino) 346 343 e = __lookup_nat_cache(nm_i, ni->ino); ··· 1820 1823 kmem_cache_free(free_nid_slab, i); 1821 1824 } 1822 1825 1823 - void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) 1826 + static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 1827 + bool set, bool build, bool locked) 1824 1828 { 1825 1829 struct f2fs_nm_info *nm_i = NM_I(sbi); 1826 1830 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); ··· 1831 1833 return; 1832 1834 1833 1835 if (set) 1834 - set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1836 + __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1835 1837 else 1836 - clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1838 + __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1839 + 1840 + if (!locked) 1841 + spin_lock(&nm_i->free_nid_lock); 1842 + if (set) 1843 + nm_i->free_nid_count[nat_ofs]++; 1844 + else if (!build) 1845 + nm_i->free_nid_count[nat_ofs]--; 1846 + if (!locked) 1847 + spin_unlock(&nm_i->free_nid_lock); 1837 1848 } 1838 1849 1839 1850 static void scan_nat_page(struct f2fs_sb_info *sbi, ··· 1854 1847 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 1855 1848 int i; 1856 1849 1857 - set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 1850 + if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 1851 + return; 1852 + 1853 + __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 1858 1854 1859 1855 i = start_nid % NAT_ENTRY_PER_BLOCK; 1860 1856 ··· 1871 1861 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1872 1862 if (blk_addr == NULL_ADDR) 1873 1863 freed = add_free_nid(sbi, start_nid, true); 1874 - update_free_nid_bitmap(sbi, start_nid, freed); 1864 + update_free_nid_bitmap(sbi, start_nid, freed, true, false); 1875 1865 } 1876 1866 } 1877 1867 ··· 1886 1876 1887 1877 for (i = 0; i < nm_i->nat_blocks; i++) { 1888 1878 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 1879 + continue; 1880 + if (!nm_i->free_nid_count[i]) 1889 1881 continue; 1890 1882 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 1891 1883 nid_t nid; ··· 1919 1907 up_read(&nm_i->nat_tree_lock); 1920 1908 } 1921 1909 1922 - static int scan_nat_bits(struct f2fs_sb_info *sbi) 1923 - { 1924 - struct f2fs_nm_info *nm_i = NM_I(sbi); 1925 - struct page *page; 1926 - unsigned int i = 0; 1927 - nid_t nid; 1928 - 1929 - if (!enabled_nat_bits(sbi, NULL)) 1930 - return -EAGAIN; 1931 - 1932 - down_read(&nm_i->nat_tree_lock); 1933 - check_empty: 1934 - i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 1935 - if (i >= nm_i->nat_blocks) { 1936 - i = 0; 1937 - goto check_partial; 1938 - } 1939 - 1940 - for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK; 1941 - nid++) { 1942 - if (unlikely(nid >= nm_i->max_nid)) 1943 - break; 1944 - add_free_nid(sbi, nid, true); 1945 - } 1946 - 1947 - if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) 1948 - goto out; 1949 - i++; 1950 - goto check_empty; 1951 - 1952 - check_partial: 1953 - i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 1954 - if (i >= nm_i->nat_blocks) { 1955 - disable_nat_bits(sbi, true); 1956 - up_read(&nm_i->nat_tree_lock); 1957 - return -EINVAL; 1958 - } 1959 - 1960 - nid = i * NAT_ENTRY_PER_BLOCK; 1961 - page = get_current_nat_page(sbi, nid); 1962 - scan_nat_page(sbi, page, nid); 1963 - f2fs_put_page(page, 1); 1964 - 1965 - if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) { 1966 - i++; 1967 - goto check_partial; 1968 - } 1969 - out: 1970 - up_read(&nm_i->nat_tree_lock); 1971 - return 0; 1972 - } 1973 - 1974 1910 static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 1975 1911 { 1976 1912 struct f2fs_nm_info *nm_i = NM_I(sbi); ··· 1940 1980 1941 1981 if (nm_i->nid_cnt[FREE_NID_LIST]) 1942 1982 return; 1943 - 1944 - /* try to find free nids with nat_bits */ 1945 - if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST]) 1946 - return; 1947 - } 1948 - 1949 - /* find next valid candidate */ 1950 - if (enabled_nat_bits(sbi, NULL)) { 1951 - int idx = find_next_zero_bit_le(nm_i->full_nat_bits, 1952 - nm_i->nat_blocks, 0); 1953 - 1954 - if (idx >= nm_i->nat_blocks) 1955 - set_sbi_flag(sbi, SBI_NEED_FSCK); 1956 - else 1957 - nid = idx * NAT_ENTRY_PER_BLOCK; 1958 1983 } 1959 1984 1960 1985 /* readahead nat pages to be scanned */ ··· 2026 2081 __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); 2027 2082 nm_i->available_nids--; 2028 2083 2029 - update_free_nid_bitmap(sbi, *nid, false); 2084 + update_free_nid_bitmap(sbi, *nid, false, false, false); 2030 2085 2031 2086 spin_unlock(&nm_i->nid_list_lock); 2032 2087 return true; ··· 2082 2137 2083 2138 nm_i->available_nids++; 2084 2139 2085 - update_free_nid_bitmap(sbi, nid, true); 2140 + update_free_nid_bitmap(sbi, nid, true, false, false); 2086 2141 2087 2142 spin_unlock(&nm_i->nid_list_lock); 2088 2143 ··· 2328 2383 list_add_tail(&nes->set_list, head); 2329 2384 } 2330 2385 2331 - void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2386 + static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2332 2387 struct page *page) 2333 2388 { 2334 2389 struct f2fs_nm_info *nm_i = NM_I(sbi); ··· 2347 2402 valid++; 2348 2403 } 2349 2404 if (valid == 0) { 2350 - set_bit_le(nat_index, nm_i->empty_nat_bits); 2351 - clear_bit_le(nat_index, nm_i->full_nat_bits); 2405 + __set_bit_le(nat_index, nm_i->empty_nat_bits); 2406 + __clear_bit_le(nat_index, nm_i->full_nat_bits); 2352 2407 return; 2353 2408 } 2354 2409 2355 - clear_bit_le(nat_index, nm_i->empty_nat_bits); 2410 + __clear_bit_le(nat_index, nm_i->empty_nat_bits); 2356 2411 if (valid == NAT_ENTRY_PER_BLOCK) 2357 - set_bit_le(nat_index, nm_i->full_nat_bits); 2412 + __set_bit_le(nat_index, nm_i->full_nat_bits); 2358 2413 else 2359 - clear_bit_le(nat_index, nm_i->full_nat_bits); 2414 + __clear_bit_le(nat_index, nm_i->full_nat_bits); 2360 2415 } 2361 2416 2362 2417 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, ··· 2412 2467 add_free_nid(sbi, nid, false); 2413 2468 spin_lock(&NM_I(sbi)->nid_list_lock); 2414 2469 NM_I(sbi)->available_nids++; 2415 - update_free_nid_bitmap(sbi, nid, true); 2470 + update_free_nid_bitmap(sbi, nid, true, false, false); 2416 2471 spin_unlock(&NM_I(sbi)->nid_list_lock); 2417 2472 } else { 2418 2473 spin_lock(&NM_I(sbi)->nid_list_lock); 2419 - update_free_nid_bitmap(sbi, nid, false); 2474 + update_free_nid_bitmap(sbi, nid, false, false, false); 2420 2475 spin_unlock(&NM_I(sbi)->nid_list_lock); 2421 2476 } 2422 2477 } ··· 2522 2577 return 0; 2523 2578 } 2524 2579 2580 + inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 2581 + { 2582 + struct f2fs_nm_info *nm_i = NM_I(sbi); 2583 + unsigned int i = 0; 2584 + nid_t nid, last_nid; 2585 + 2586 + if (!enabled_nat_bits(sbi, NULL)) 2587 + return; 2588 + 2589 + for (i = 0; i < nm_i->nat_blocks; i++) { 2590 + i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 2591 + if (i >= nm_i->nat_blocks) 2592 + break; 2593 + 2594 + __set_bit_le(i, nm_i->nat_block_bitmap); 2595 + 2596 + nid = i * NAT_ENTRY_PER_BLOCK; 2597 + last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK; 2598 + 2599 + spin_lock(&nm_i->free_nid_lock); 2600 + for (; nid < last_nid; nid++) 2601 + update_free_nid_bitmap(sbi, nid, true, true, true); 2602 + spin_unlock(&nm_i->free_nid_lock); 2603 + } 2604 + 2605 + for (i = 0; i < nm_i->nat_blocks; i++) { 2606 + i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 2607 + if (i >= nm_i->nat_blocks) 2608 + break; 2609 + 2610 + __set_bit_le(i, nm_i->nat_block_bitmap); 2611 + } 2612 + } 2613 + 2525 2614 static int init_node_manager(struct f2fs_sb_info *sbi) 2526 2615 { 2527 2616 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); ··· 2617 2638 return 0; 2618 2639 } 2619 2640 2620 - int init_free_nid_cache(struct f2fs_sb_info *sbi) 2641 + static int init_free_nid_cache(struct f2fs_sb_info *sbi) 2621 2642 { 2622 2643 struct f2fs_nm_info *nm_i = NM_I(sbi); 2623 2644 ··· 2630 2651 GFP_KERNEL); 2631 2652 if (!nm_i->nat_block_bitmap) 2632 2653 return -ENOMEM; 2654 + 2655 + nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks * 2656 + sizeof(unsigned short), GFP_KERNEL); 2657 + if (!nm_i->free_nid_count) 2658 + return -ENOMEM; 2659 + 2660 + spin_lock_init(&nm_i->free_nid_lock); 2661 + 2633 2662 return 0; 2634 2663 } 2635 2664 ··· 2656 2669 err = init_free_nid_cache(sbi); 2657 2670 if (err) 2658 2671 return err; 2672 + 2673 + /* load free nid status from nat_bits table */ 2674 + load_free_nid_bitmap(sbi); 2659 2675 2660 2676 build_free_nids(sbi, true, true); 2661 2677 return 0; ··· 2720 2730 2721 2731 kvfree(nm_i->nat_block_bitmap); 2722 2732 kvfree(nm_i->free_nid_bitmap); 2733 + kvfree(nm_i->free_nid_count); 2723 2734 2724 2735 kfree(nm_i->nat_bitmap); 2725 2736 kfree(nm_i->nat_bits);
+6
fs/f2fs/segment.c
··· 1163 1163 if (f2fs_discard_en(sbi) && 1164 1164 !f2fs_test_and_set_bit(offset, se->discard_map)) 1165 1165 sbi->discard_blks--; 1166 + 1167 + /* don't overwrite by SSR to keep node chain */ 1168 + if (se->type == CURSEG_WARM_NODE) { 1169 + if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 1170 + se->ckpt_valid_blocks++; 1171 + } 1166 1172 } else { 1167 1173 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { 1168 1174 #ifdef CONFIG_F2FS_CHECK_FS
+21 -14
fs/fs-writeback.c
··· 173 173 spin_unlock_bh(&wb->work_lock); 174 174 } 175 175 176 + static void finish_writeback_work(struct bdi_writeback *wb, 177 + struct wb_writeback_work *work) 178 + { 179 + struct wb_completion *done = work->done; 180 + 181 + if (work->auto_free) 182 + kfree(work); 183 + if (done && atomic_dec_and_test(&done->cnt)) 184 + wake_up_all(&wb->bdi->wb_waitq); 185 + } 186 + 176 187 static void wb_queue_work(struct bdi_writeback *wb, 177 188 struct wb_writeback_work *work) 178 189 { 179 190 trace_writeback_queue(wb, work); 180 191 181 - spin_lock_bh(&wb->work_lock); 182 - if (!test_bit(WB_registered, &wb->state)) 183 - goto out_unlock; 184 192 if (work->done) 185 193 atomic_inc(&work->done->cnt); 186 - list_add_tail(&work->list, &wb->work_list); 187 - mod_delayed_work(bdi_wq, &wb->dwork, 0); 188 - out_unlock: 194 + 195 + spin_lock_bh(&wb->work_lock); 196 + 197 + if (test_bit(WB_registered, &wb->state)) { 198 + list_add_tail(&work->list, &wb->work_list); 199 + mod_delayed_work(bdi_wq, &wb->dwork, 0); 200 + } else 201 + finish_writeback_work(wb, work); 202 + 189 203 spin_unlock_bh(&wb->work_lock); 190 204 } 191 205 ··· 1887 1873 1888 1874 set_bit(WB_writeback_running, &wb->state); 1889 1875 while ((work = get_next_work_item(wb)) != NULL) { 1890 - struct wb_completion *done = work->done; 1891 - 1892 1876 trace_writeback_exec(wb, work); 1893 - 1894 1877 wrote += wb_writeback(wb, work); 1895 - 1896 - if (work->auto_free) 1897 - kfree(work); 1898 - if (done && atomic_dec_and_test(&done->cnt)) 1899 - wake_up_all(&wb->bdi->wb_waitq); 1878 + finish_writeback_work(wb, work); 1900 1879 } 1901 1880 1902 1881 /*
+2 -2
fs/nfs/callback.c
··· 232 232 .svo_module = THIS_MODULE, 233 233 }; 234 234 235 - struct svc_serv_ops *nfs4_cb_sv_ops[] = { 235 + static struct svc_serv_ops *nfs4_cb_sv_ops[] = { 236 236 [0] = &nfs40_cb_sv_ops, 237 237 [1] = &nfs41_cb_sv_ops, 238 238 }; 239 239 #else 240 - struct svc_serv_ops *nfs4_cb_sv_ops[] = { 240 + static struct svc_serv_ops *nfs4_cb_sv_ops[] = { 241 241 [0] = &nfs40_cb_sv_ops, 242 242 [1] = NULL, 243 243 };
+24 -1
fs/nfs/client.c
··· 325 325 return NULL; 326 326 } 327 327 328 - static bool nfs_client_init_is_complete(const struct nfs_client *clp) 328 + /* 329 + * Return true if @clp is done initializing, false if still working on it. 330 + * 331 + * Use nfs_client_init_status to check if it was successful. 332 + */ 333 + bool nfs_client_init_is_complete(const struct nfs_client *clp) 329 334 { 330 335 return clp->cl_cons_state <= NFS_CS_READY; 331 336 } 337 + EXPORT_SYMBOL_GPL(nfs_client_init_is_complete); 338 + 339 + /* 340 + * Return 0 if @clp was successfully initialized, -errno otherwise. 341 + * 342 + * This must be called *after* nfs_client_init_is_complete() returns true, 343 + * otherwise it will pop WARN_ON_ONCE and return -EINVAL 344 + */ 345 + int nfs_client_init_status(const struct nfs_client *clp) 346 + { 347 + /* called without checking nfs_client_init_is_complete */ 348 + if (clp->cl_cons_state > NFS_CS_READY) { 349 + WARN_ON_ONCE(1); 350 + return -EINVAL; 351 + } 352 + return clp->cl_cons_state; 353 + } 354 + EXPORT_SYMBOL_GPL(nfs_client_init_status); 332 355 333 356 int nfs_wait_client_init_complete(const struct nfs_client *clp) 334 357 {
+7 -1
fs/nfs/filelayout/filelayoutdev.c
··· 266 266 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); 267 267 struct nfs4_pnfs_ds *ret = ds; 268 268 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); 269 + int status; 269 270 270 271 if (ds == NULL) { 271 272 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", ··· 278 277 if (ds->ds_clp) 279 278 goto out_test_devid; 280 279 281 - nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, 280 + status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, 282 281 dataserver_retrans, 4, 283 282 s->nfs_client->cl_minorversion); 283 + if (status) { 284 + nfs4_mark_deviceid_unavailable(devid); 285 + ret = NULL; 286 + goto out; 287 + } 284 288 285 289 out_test_devid: 286 290 if (ret->ds_clp == NULL ||
+13 -1
fs/nfs/flexfilelayout/flexfilelayout.h
··· 175 175 static inline bool 176 176 ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node) 177 177 { 178 - return nfs4_test_deviceid_unavailable(node); 178 + /* 179 + * Flexfiles should never mark a DS unavailable, but if it does 180 + * print a (ratelimited) warning as this can affect performance. 181 + */ 182 + if (nfs4_test_deviceid_unavailable(node)) { 183 + u32 *p = (u32 *)node->deviceid.data; 184 + 185 + pr_warn_ratelimited("NFS: flexfiles layout referencing an " 186 + "unavailable device [%x%x%x%x]\n", 187 + p[0], p[1], p[2], p[3]); 188 + return true; 189 + } 190 + return false; 179 191 } 180 192 181 193 static inline int
+3 -2
fs/nfs/flexfilelayout/flexfilelayoutdev.c
··· 384 384 struct inode *ino = lseg->pls_layout->plh_inode; 385 385 struct nfs_server *s = NFS_SERVER(ino); 386 386 unsigned int max_payload; 387 + int status; 387 388 388 389 if (!ff_layout_mirror_valid(lseg, mirror, true)) { 389 390 pr_err_ratelimited("NFS: %s: No data server for offset index %d\n", ··· 405 404 /* FIXME: For now we assume the server sent only one version of NFS 406 405 * to use for the DS. 407 406 */ 408 - nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, 407 + status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, 409 408 dataserver_retrans, 410 409 mirror->mirror_ds->ds_versions[0].version, 411 410 mirror->mirror_ds->ds_versions[0].minor_version); ··· 421 420 mirror->mirror_ds->ds_versions[0].wsize = max_payload; 422 421 goto out; 423 422 } 423 + out_fail: 424 424 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), 425 425 mirror, lseg->pls_range.offset, 426 426 lseg->pls_range.length, NFS4ERR_NXIO, 427 427 OP_ILLEGAL, GFP_NOIO); 428 - out_fail: 429 428 if (fail_return || !ff_layout_has_available_ds(lseg)) 430 429 pnfs_error_mark_layout_for_return(ino, lseg); 431 430 ds = NULL;
+2
fs/nfs/internal.h
··· 186 186 struct nfs_fh *, 187 187 struct nfs_fattr *, 188 188 rpc_authflavor_t); 189 + extern bool nfs_client_init_is_complete(const struct nfs_client *clp); 190 + extern int nfs_client_init_status(const struct nfs_client *clp); 189 191 extern int nfs_wait_client_init_complete(const struct nfs_client *clp); 190 192 extern void nfs_mark_client_ready(struct nfs_client *clp, int state); 191 193 extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
+2 -2
fs/nfs/nfs4client.c
··· 1023 1023 server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead; 1024 1024 server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead; 1025 1025 1026 - if (server->rsize > server_resp_sz) 1026 + if (!server->rsize || server->rsize > server_resp_sz) 1027 1027 server->rsize = server_resp_sz; 1028 - if (server->wsize > server_rqst_sz) 1028 + if (!server->wsize || server->wsize > server_rqst_sz) 1029 1029 server->wsize = server_rqst_sz; 1030 1030 #endif /* CONFIG_NFS_V4_1 */ 1031 1031 }
+4 -7
fs/nfs/nfs4proc.c
··· 2258 2258 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2259 2259 return 0; 2260 2260 2261 - /* even though OPEN succeeded, access is denied. Close the file */ 2262 - nfs4_close_state(state, fmode); 2263 2261 return -EACCES; 2264 2262 } 2265 2263 ··· 7425 7427 struct nfs41_exchange_id_data *cdata = 7426 7428 (struct nfs41_exchange_id_data *)data; 7427 7429 7428 - nfs_put_client(cdata->args.client); 7429 7430 if (cdata->xprt) { 7430 7431 xprt_put(cdata->xprt); 7431 7432 rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); 7432 7433 } 7434 + nfs_put_client(cdata->args.client); 7433 7435 kfree(cdata->res.impl_id); 7434 7436 kfree(cdata->res.server_scope); 7435 7437 kfree(cdata->res.server_owner); ··· 7536 7538 task_setup_data.callback_data = calldata; 7537 7539 7538 7540 task = rpc_run_task(&task_setup_data); 7539 - if (IS_ERR(task)) { 7540 - status = PTR_ERR(task); 7541 - goto out_impl_id; 7542 - } 7541 + if (IS_ERR(task)) 7542 + return PTR_ERR(task); 7543 7543 7544 7544 if (!xprt) { 7545 7545 status = rpc_wait_for_completion_task(task); ··· 7565 7569 kfree(calldata->res.server_owner); 7566 7570 out_calldata: 7567 7571 kfree(calldata); 7572 + nfs_put_client(clp); 7568 7573 goto out; 7569 7574 } 7570 7575
+1 -1
fs/nfs/nfs4xdr.c
··· 3942 3942 if (len <= 0) 3943 3943 goto out; 3944 3944 dprintk("%s: name=%s\n", __func__, group_name->data); 3945 - return NFS_ATTR_FATTR_OWNER_NAME; 3945 + return NFS_ATTR_FATTR_GROUP_NAME; 3946 3946 } else { 3947 3947 len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, 3948 3948 XDR_MAX_NETOBJ);
+1 -1
fs/nfs/pnfs.h
··· 367 367 struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, 368 368 gfp_t gfp_flags); 369 369 void nfs4_pnfs_v3_ds_connect_unload(void); 370 - void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, 370 + int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, 371 371 struct nfs4_deviceid_node *devid, unsigned int timeo, 372 372 unsigned int retrans, u32 version, u32 minor_version); 373 373 struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
+25 -6
fs/nfs/pnfs_nfs.c
··· 745 745 /* 746 746 * Create an rpc connection to the nfs4_pnfs_ds data server. 747 747 * Currently only supports IPv4 and IPv6 addresses. 748 - * If connection fails, make devid unavailable. 748 + * If connection fails, make devid unavailable and return a -errno. 749 749 */ 750 - void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, 750 + int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, 751 751 struct nfs4_deviceid_node *devid, unsigned int timeo, 752 752 unsigned int retrans, u32 version, u32 minor_version) 753 753 { 754 - if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { 755 - int err = 0; 754 + int err; 756 755 756 + again: 757 + err = 0; 758 + if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { 757 759 if (version == 3) { 758 760 err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, 759 761 retrans); ··· 768 766 err = -EPROTONOSUPPORT; 769 767 } 770 768 771 - if (err) 772 - nfs4_mark_deviceid_unavailable(devid); 773 769 nfs4_clear_ds_conn_bit(ds); 774 770 } else { 775 771 nfs4_wait_ds_connect(ds); 772 + 773 + /* what was waited on didn't connect AND didn't mark unavail */ 774 + if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid)) 775 + goto again; 776 776 } 777 + 778 + /* 779 + * At this point the ds->ds_clp should be ready, but it might have 780 + * hit an error. 781 + */ 782 + if (!err) { 783 + if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) { 784 + WARN_ON_ONCE(ds->ds_clp || 785 + !nfs4_test_deviceid_unavailable(devid)); 786 + return -EINVAL; 787 + } 788 + err = nfs_client_init_status(ds->ds_clp); 789 + } 790 + 791 + return err; 777 792 } 778 793 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect); 779 794
+4 -2
fs/nfs/write.c
··· 1784 1784 (long long)req_offset(req)); 1785 1785 if (status < 0) { 1786 1786 nfs_context_set_write_error(req->wb_context, status); 1787 - nfs_inode_remove_request(req); 1787 + if (req->wb_page) 1788 + nfs_inode_remove_request(req); 1788 1789 dprintk_cont(", error = %d\n", status); 1789 1790 goto next; 1790 1791 } ··· 1794 1793 * returned by the server against all stored verfs. */ 1795 1794 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { 1796 1795 /* We have a match */ 1797 - nfs_inode_remove_request(req); 1796 + if (req->wb_page) 1797 + nfs_inode_remove_request(req); 1798 1798 dprintk_cont(" OK\n"); 1799 1799 goto next; 1800 1800 }
+2
fs/xfs/libxfs/xfs_dir2_priv.h
··· 125 125 extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); 126 126 extern int xfs_dir2_sf_removename(struct xfs_da_args *args); 127 127 extern int xfs_dir2_sf_replace(struct xfs_da_args *args); 128 + extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp, 129 + int size); 128 130 129 131 /* xfs_dir2_readdir.c */ 130 132 extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
+87
fs/xfs/libxfs/xfs_dir2_sf.c
··· 629 629 } 630 630 #endif /* DEBUG */ 631 631 632 + /* Verify the consistency of an inline directory. */ 633 + int 634 + xfs_dir2_sf_verify( 635 + struct xfs_mount *mp, 636 + struct xfs_dir2_sf_hdr *sfp, 637 + int size) 638 + { 639 + struct xfs_dir2_sf_entry *sfep; 640 + struct xfs_dir2_sf_entry *next_sfep; 641 + char *endp; 642 + const struct xfs_dir_ops *dops; 643 + xfs_ino_t ino; 644 + int i; 645 + int i8count; 646 + int offset; 647 + __uint8_t filetype; 648 + 649 + dops = xfs_dir_get_ops(mp, NULL); 650 + 651 + /* 652 + * Give up if the directory is way too short. 653 + */ 654 + XFS_WANT_CORRUPTED_RETURN(mp, size > 655 + offsetof(struct xfs_dir2_sf_hdr, parent)); 656 + XFS_WANT_CORRUPTED_RETURN(mp, size >= 657 + xfs_dir2_sf_hdr_size(sfp->i8count)); 658 + 659 + endp = (char *)sfp + size; 660 + 661 + /* Check .. entry */ 662 + ino = dops->sf_get_parent_ino(sfp); 663 + i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 664 + XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); 665 + offset = dops->data_first_offset; 666 + 667 + /* Check all reported entries */ 668 + sfep = xfs_dir2_sf_firstentry(sfp); 669 + for (i = 0; i < sfp->count; i++) { 670 + /* 671 + * struct xfs_dir2_sf_entry has a variable length. 672 + * Check the fixed-offset parts of the structure are 673 + * within the data buffer. 674 + */ 675 + XFS_WANT_CORRUPTED_RETURN(mp, 676 + ((char *)sfep + sizeof(*sfep)) < endp); 677 + 678 + /* Don't allow names with known bad length. */ 679 + XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0); 680 + XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN); 681 + 682 + /* 683 + * Check that the variable-length part of the structure is 684 + * within the data buffer. The next entry starts after the 685 + * name component, so nextentry is an acceptable test. 686 + */ 687 + next_sfep = dops->sf_nextentry(sfp, sfep); 688 + XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep); 689 + 690 + /* Check that the offsets always increase. */ 691 + XFS_WANT_CORRUPTED_RETURN(mp, 692 + xfs_dir2_sf_get_offset(sfep) >= offset); 693 + 694 + /* Check the inode number. */ 695 + ino = dops->sf_get_ino(sfp, sfep); 696 + i8count += ino > XFS_DIR2_MAX_SHORT_INUM; 697 + XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); 698 + 699 + /* Check the file type. */ 700 + filetype = dops->sf_get_ftype(sfep); 701 + XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX); 702 + 703 + offset = xfs_dir2_sf_get_offset(sfep) + 704 + dops->data_entsize(sfep->namelen); 705 + 706 + sfep = next_sfep; 707 + } 708 + XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count); 709 + XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp); 710 + 711 + /* Make sure this whole thing ought to be in local format. */ 712 + XFS_WANT_CORRUPTED_RETURN(mp, offset + 713 + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + 714 + (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize); 715 + 716 + return 0; 717 + } 718 + 632 719 /* 633 720 * Create a new (shortform) directory. 634 721 */
+23 -3
fs/xfs/libxfs/xfs_inode_fork.c
··· 33 33 #include "xfs_trace.h" 34 34 #include "xfs_attr_sf.h" 35 35 #include "xfs_da_format.h" 36 + #include "xfs_da_btree.h" 37 + #include "xfs_dir2_priv.h" 36 38 37 39 kmem_zone_t *xfs_ifork_zone; 38 40 ··· 322 320 int whichfork, 323 321 int size) 324 322 { 323 + int error; 325 324 326 325 /* 327 326 * If the size is unreasonable, then something ··· 337 334 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 338 335 ip->i_mount, dip); 339 336 return -EFSCORRUPTED; 337 + } 338 + 339 + if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) { 340 + error = xfs_dir2_sf_verify(ip->i_mount, 341 + (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip), 342 + size); 343 + if (error) 344 + return error; 340 345 } 341 346 342 347 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); ··· 867 856 * In these cases, the format always takes precedence, because the 868 857 * format indicates the current state of the fork. 869 858 */ 870 - void 859 + int 871 860 xfs_iflush_fork( 872 861 xfs_inode_t *ip, 873 862 xfs_dinode_t *dip, ··· 877 866 char *cp; 878 867 xfs_ifork_t *ifp; 879 868 xfs_mount_t *mp; 869 + int error; 880 870 static const short brootflag[2] = 881 871 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 882 872 static const short dataflag[2] = ··· 886 874 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 887 875 888 876 if (!iip) 889 - return; 877 + return 0; 890 878 ifp = XFS_IFORK_PTR(ip, whichfork); 891 879 /* 892 880 * This can happen if we gave up in iformat in an error path, ··· 894 882 */ 895 883 if (!ifp) { 896 884 ASSERT(whichfork == XFS_ATTR_FORK); 897 - return; 885 + return 0; 898 886 } 899 887 cp = XFS_DFORK_PTR(dip, whichfork); 900 888 mp = ip->i_mount; 901 889 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 902 890 case XFS_DINODE_FMT_LOCAL: 891 + if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) { 892 + error = xfs_dir2_sf_verify(mp, 893 + (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data, 894 + ifp->if_bytes); 895 + if (error) 896 + return error; 897 + } 903 898 if ((iip->ili_fields & dataflag[whichfork]) && 904 899 (ifp->if_bytes > 0)) { 905 900 ASSERT(ifp->if_u1.if_data != NULL); ··· 959 940 ASSERT(0); 960 941 break; 961 942 } 943 + return 0; 962 944 } 963 945 964 946 /*
+1 -1
fs/xfs/libxfs/xfs_inode_fork.h
··· 140 140 struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); 141 141 142 142 int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); 143 - void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, 143 + int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, 144 144 struct xfs_inode_log_item *, int); 145 145 void xfs_idestroy_fork(struct xfs_inode *, int); 146 146 void xfs_idata_realloc(struct xfs_inode *, int, int);
-11
fs/xfs/xfs_dir2_readdir.c
··· 71 71 struct xfs_da_geometry *geo = args->geo; 72 72 73 73 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 74 - /* 75 - * Give up if the directory is way too short. 76 - */ 77 - if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { 78 - ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); 79 - return -EIO; 80 - } 81 - 82 74 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 83 75 ASSERT(dp->i_df.if_u1.if_data != NULL); 84 76 85 77 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 86 - 87 - if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count)) 88 - return -EFSCORRUPTED; 89 78 90 79 /* 91 80 * If the block number in the offset is out of range, we're done.
+9 -3
fs/xfs/xfs_inode.c
··· 3475 3475 struct xfs_inode_log_item *iip = ip->i_itemp; 3476 3476 struct xfs_dinode *dip; 3477 3477 struct xfs_mount *mp = ip->i_mount; 3478 + int error; 3478 3479 3479 3480 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3480 3481 ASSERT(xfs_isiflocked(ip)); ··· 3558 3557 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3559 3558 ip->i_d.di_flushiter = 0; 3560 3559 3561 - xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3562 - if (XFS_IFORK_Q(ip)) 3563 - xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); 3560 + error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3561 + if (error) 3562 + return error; 3563 + if (XFS_IFORK_Q(ip)) { 3564 + error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); 3565 + if (error) 3566 + return error; 3567 + } 3564 3568 xfs_inobp_check(mp, bp); 3565 3569 3566 3570 /*
+1 -4
include/linux/acpi.h
··· 287 287 } 288 288 289 289 /* Validate the processor object's proc_id */ 290 - bool acpi_processor_validate_proc_id(int proc_id); 290 + bool acpi_duplicate_processor_id(int proc_id); 291 291 292 292 #ifdef CONFIG_ACPI_HOTPLUG_CPU 293 293 /* Arch dependent functions for cpu hotplug support */ 294 294 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, 295 295 int *pcpu); 296 296 int acpi_unmap_cpu(int cpu); 297 - int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); 298 297 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 299 - 300 - void acpi_set_processor_mapping(void); 301 298 302 299 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC 303 300 int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
-1
include/linux/device.h
··· 1140 1140 extern void lock_device_hotplug(void); 1141 1141 extern void unlock_device_hotplug(void); 1142 1142 extern int lock_device_hotplug_sysfs(void); 1143 - void assert_held_device_hotplug(void); 1144 1143 extern int device_offline(struct device *dev); 1145 1144 extern int device_online(struct device *dev); 1146 1145 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+2
include/linux/errqueue.h
··· 20 20 struct sock_extended_err ee; 21 21 u16 addr_offset; 22 22 __be16 port; 23 + u8 opt_stats:1, 24 + unused:7; 23 25 }; 24 26 25 27 #endif
-16
include/linux/gpio/consumer.h
··· 143 143 struct fwnode_handle *child, 144 144 enum gpiod_flags flags, 145 145 const char *label); 146 - /* FIXME: delete this helper when users are switched over */ 147 - static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, 148 - const char *con_id, struct fwnode_handle *child) 149 - { 150 - return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 151 - 0, child, 152 - GPIOD_ASIS, 153 - "?"); 154 - } 155 146 156 147 #else /* CONFIG_GPIOLIB */ 157 148 ··· 431 440 struct fwnode_handle *child, 432 441 enum gpiod_flags flags, 433 442 const char *label) 434 - { 435 - return ERR_PTR(-ENOSYS); 436 - } 437 - 438 - /* FIXME: delete this when all users are switched over */ 439 - static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, 440 - const char *con_id, struct fwnode_handle *child) 441 443 { 442 444 return ERR_PTR(-ENOSYS); 443 445 }
+1
include/linux/kasan.h
··· 6 6 struct kmem_cache; 7 7 struct page; 8 8 struct vm_struct; 9 + struct task_struct; 9 10 10 11 #ifdef CONFIG_KASAN 11 12
+1
include/linux/mlx4/device.h
··· 476 476 enum { 477 477 MLX4_INTERFACE_STATE_UP = 1 << 0, 478 478 MLX4_INTERFACE_STATE_DELETION = 1 << 1, 479 + MLX4_INTERFACE_STATE_NOWAIT = 1 << 2, 479 480 }; 480 481 481 482 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
+3
include/linux/virtio_vsock.h
··· 48 48 struct virtio_vsock_hdr hdr; 49 49 struct work_struct work; 50 50 struct list_head list; 51 + /* socket refcnt not held, only use for cancellation */ 52 + struct vsock_sock *vsk; 51 53 void *buf; 52 54 u32 len; 53 55 u32 off; ··· 58 56 59 57 struct virtio_vsock_pkt_info { 60 58 u32 remote_cid, remote_port; 59 + struct vsock_sock *vsk; 61 60 struct msghdr *msg; 62 61 u32 pkt_len; 63 62 u16 type;
+3
include/net/af_vsock.h
··· 100 100 void (*destruct)(struct vsock_sock *); 101 101 void (*release)(struct vsock_sock *); 102 102 103 + /* Cancel all pending packets sent on vsock. */ 104 + int (*cancel_pkt)(struct vsock_sock *vsk); 105 + 103 106 /* Connections. */ 104 107 int (*connect)(struct vsock_sock *); 105 108
+1 -1
include/net/netfilter/nf_conntrack.h
··· 244 244 u32 seq); 245 245 246 246 /* Fake conntrack entry for untracked connections */ 247 - DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 247 + DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); 248 248 static inline struct nf_conn *nf_ct_untracked_get(void) 249 249 { 250 250 return raw_cpu_ptr(&nf_conntrack_untracked);
+29 -1
include/net/netfilter/nf_tables.h
··· 103 103 }; 104 104 }; 105 105 106 + /* Store/load an u16 or u8 integer to/from the u32 data register. 107 + * 108 + * Note, when using concatenations, register allocation happens at 32-bit 109 + * level. So for store instruction, pad the rest part with zero to avoid 110 + * garbage values. 111 + */ 112 + 113 + static inline void nft_reg_store16(u32 *dreg, u16 val) 114 + { 115 + *dreg = 0; 116 + *(u16 *)dreg = val; 117 + } 118 + 119 + static inline void nft_reg_store8(u32 *dreg, u8 val) 120 + { 121 + *dreg = 0; 122 + *(u8 *)dreg = val; 123 + } 124 + 125 + static inline u16 nft_reg_load16(u32 *sreg) 126 + { 127 + return *(u16 *)sreg; 128 + } 129 + 130 + static inline u8 nft_reg_load8(u32 *sreg) 131 + { 132 + return *(u8 *)sreg; 133 + } 134 + 106 135 static inline void nft_data_copy(u32 *dst, const struct nft_data *src, 107 136 unsigned int len) 108 137 { ··· 232 203 struct nft_set; 233 204 struct nft_set_iter { 234 205 u8 genmask; 235 - bool flush; 236 206 unsigned int count; 237 207 unsigned int skip; 238 208 int err;
+4 -2
include/net/netfilter/nf_tables_ipv6.h
··· 9 9 struct sk_buff *skb, 10 10 const struct nf_hook_state *state) 11 11 { 12 + unsigned int flags = IP6_FH_F_AUTH; 12 13 int protohdr, thoff = 0; 13 14 unsigned short frag_off; 14 15 15 16 nft_set_pktinfo(pkt, skb, state); 16 17 17 - protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 18 + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); 18 19 if (protohdr < 0) { 19 20 nft_set_pktinfo_proto_unspec(pkt, skb); 20 21 return; ··· 33 32 const struct nf_hook_state *state) 34 33 { 35 34 #if IS_ENABLED(CONFIG_IPV6) 35 + unsigned int flags = IP6_FH_F_AUTH; 36 36 struct ipv6hdr *ip6h, _ip6h; 37 37 unsigned int thoff = 0; 38 38 unsigned short frag_off; ··· 52 50 if (pkt_len + sizeof(*ip6h) > skb->len) 53 51 return -1; 54 52 55 - protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 53 + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); 56 54 if (protohdr < 0) 57 55 return -1; 58 56
+3 -2
include/net/sctp/structs.h
··· 83 83 struct sctp_ulpq; 84 84 struct sctp_ep_common; 85 85 struct crypto_shash; 86 + struct sctp_stream; 86 87 87 88 88 89 #include <net/sctp/tsnmap.h> ··· 754 753 /* Is the Path MTU update pending on this tranport */ 755 754 pmtu_pending:1, 756 755 756 + dst_pending_confirm:1, /* need to confirm neighbour */ 757 + 757 758 /* Has this transport moved the ctsn since we last sacked */ 758 759 sack_generation:1; 759 760 u32 dst_cookie; ··· 808 805 __u32 flight_size; 809 806 810 807 __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ 811 - 812 - __u32 dst_pending_confirm; /* need to confirm neighbour */ 813 808 814 809 /* Destination */ 815 810 struct dst_entry *dst;
+6 -1
include/target/target_core_backend.h
··· 4 4 #include <linux/types.h> 5 5 #include <target/target_core_base.h> 6 6 7 - #define TRANSPORT_FLAG_PASSTHROUGH 1 7 + #define TRANSPORT_FLAG_PASSTHROUGH 0x1 8 + /* 9 + * ALUA commands, state checks and setup operations are handled by the 10 + * backend module. 11 + */ 12 + #define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2 8 13 9 14 struct request_queue; 10 15 struct scatterlist;
+1 -1
include/target/target_core_base.h
··· 299 299 struct list_head tg_pt_gp_lun_list; 300 300 struct se_lun *tg_pt_gp_alua_lun; 301 301 struct se_node_acl *tg_pt_gp_alua_nacl; 302 - struct delayed_work tg_pt_gp_transition_work; 302 + struct work_struct tg_pt_gp_transition_work; 303 303 struct completion *tg_pt_gp_transition_complete; 304 304 }; 305 305
+19 -19
include/uapi/drm/omap_drm.h
··· 33 33 #define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ 34 34 35 35 struct drm_omap_param { 36 - uint64_t param; /* in */ 37 - uint64_t value; /* in (set_param), out (get_param) */ 36 + __u64 param; /* in */ 37 + __u64 value; /* in (set_param), out (get_param) */ 38 38 }; 39 39 40 40 #define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ ··· 53 53 #define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) 54 54 55 55 union omap_gem_size { 56 - uint32_t bytes; /* (for non-tiled formats) */ 56 + __u32 bytes; /* (for non-tiled formats) */ 57 57 struct { 58 - uint16_t width; 59 - uint16_t height; 58 + __u16 width; 59 + __u16 height; 60 60 } tiled; /* (for tiled formats) */ 61 61 }; 62 62 63 63 struct drm_omap_gem_new { 64 64 union omap_gem_size size; /* in */ 65 - uint32_t flags; /* in */ 66 - uint32_t handle; /* out */ 67 - uint32_t __pad; 65 + __u32 flags; /* in */ 66 + __u32 handle; /* out */ 67 + __u32 __pad; 68 68 }; 69 69 70 70 /* mask of operations: */ ··· 74 74 }; 75 75 76 76 struct drm_omap_gem_cpu_prep { 77 - uint32_t handle; /* buffer handle (in) */ 78 - uint32_t op; /* mask of omap_gem_op (in) */ 77 + __u32 handle; /* buffer handle (in) */ 78 + __u32 op; /* mask of omap_gem_op (in) */ 79 79 }; 80 80 81 81 struct drm_omap_gem_cpu_fini { 82 - uint32_t handle; /* buffer handle (in) */ 83 - uint32_t op; /* mask of omap_gem_op (in) */ 82 + __u32 handle; /* buffer handle (in) */ 83 + __u32 op; /* mask of omap_gem_op (in) */ 84 84 /* TODO maybe here we pass down info about what regions are touched 85 85 * by sw so we can be clever about cache ops? For now a placeholder, 86 86 * set to zero and we just do full buffer flush.. 87 87 */ 88 - uint32_t nregions; 89 - uint32_t __pad; 88 + __u32 nregions; 89 + __u32 __pad; 90 90 }; 91 91 92 92 struct drm_omap_gem_info { 93 - uint32_t handle; /* buffer handle (in) */ 94 - uint32_t pad; 95 - uint64_t offset; /* mmap offset (out) */ 93 + __u32 handle; /* buffer handle (in) */ 94 + __u32 pad; 95 + __u64 offset; /* mmap offset (out) */ 96 96 /* note: in case of tiled buffers, the user virtual size can be 97 97 * different from the physical size (ie. how many pages are needed 98 98 * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. 99 99 * This size here is the one that should be used if you want to 100 100 * mmap() the buffer: 101 101 */ 102 - uint32_t size; /* virtual size for mmap'ing (out) */ 103 - uint32_t __pad; 102 + __u32 size; /* virtual size for mmap'ing (out) */ 103 + __u32 __pad; 104 104 }; 105 105 106 106 #define DRM_OMAP_GET_PARAM 0x00
-27
include/uapi/linux/btrfs.h
··· 713 713 BTRFS_ERROR_DEV_ONLY_WRITABLE, 714 714 BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS 715 715 }; 716 - /* An error code to error string mapping for the kernel 717 - * error codes 718 - */ 719 - static inline char *btrfs_err_str(enum btrfs_err_code err_code) 720 - { 721 - switch (err_code) { 722 - case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET: 723 - return "unable to go below two devices on raid1"; 724 - case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET: 725 - return "unable to go below four devices on raid10"; 726 - case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET: 727 - return "unable to go below two devices on raid5"; 728 - case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET: 729 - return "unable to go below three devices on raid6"; 730 - case BTRFS_ERROR_DEV_TGT_REPLACE: 731 - return "unable to remove the dev_replace target dev"; 732 - case BTRFS_ERROR_DEV_MISSING_NOT_FOUND: 733 - return "no missing devices found to remove"; 734 - case BTRFS_ERROR_DEV_ONLY_WRITABLE: 735 - return "unable to remove the only writeable device"; 736 - case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS: 737 - return "add/delete/balance/replace/resize operation "\ 738 - "in progress"; 739 - default: 740 - return NULL; 741 - } 742 - } 743 716 744 717 #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ 745 718 struct btrfs_ioctl_vol_args)
+72 -74
kernel/bpf/hashtab.c
··· 31 31 struct pcpu_freelist freelist; 32 32 struct bpf_lru lru; 33 33 }; 34 - void __percpu *extra_elems; 34 + struct htab_elem *__percpu *extra_elems; 35 35 atomic_t count; /* number of elements in this hashtable */ 36 36 u32 n_buckets; /* number of hash buckets */ 37 37 u32 elem_size; /* size of each element in bytes */ 38 - }; 39 - 40 - enum extra_elem_state { 41 - HTAB_NOT_AN_EXTRA_ELEM = 0, 42 - HTAB_EXTRA_ELEM_FREE, 43 - HTAB_EXTRA_ELEM_USED 44 38 }; 45 39 46 40 /* each htab element is struct htab_elem + key + value */ ··· 51 57 }; 52 58 union { 53 59 struct rcu_head rcu; 54 - enum extra_elem_state state; 55 60 struct bpf_lru_node lru_node; 56 61 }; 57 62 u32 hash; ··· 69 76 { 70 77 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || 71 78 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 79 + } 80 + 81 + static bool htab_is_prealloc(const struct bpf_htab *htab) 82 + { 83 + return !(htab->map.map_flags & BPF_F_NO_PREALLOC); 72 84 } 73 85 74 86 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, ··· 132 134 133 135 static int prealloc_init(struct bpf_htab *htab) 134 136 { 137 + u32 num_entries = htab->map.max_entries; 135 138 int err = -ENOMEM, i; 136 139 137 - htab->elems = bpf_map_area_alloc(htab->elem_size * 138 - htab->map.max_entries); 140 + if (!htab_is_percpu(htab) && !htab_is_lru(htab)) 141 + num_entries += num_possible_cpus(); 142 + 143 + htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries); 139 144 if (!htab->elems) 140 145 return -ENOMEM; 141 146 142 147 if (!htab_is_percpu(htab)) 143 148 goto skip_percpu_elems; 144 149 145 - for (i = 0; i < htab->map.max_entries; i++) { 150 + for (i = 0; i < num_entries; i++) { 146 151 u32 size = round_up(htab->map.value_size, 8); 147 152 void __percpu *pptr; 148 153 ··· 173 172 if (htab_is_lru(htab)) 174 173 bpf_lru_populate(&htab->lru, htab->elems, 175 174 offsetof(struct htab_elem, lru_node), 176 - htab->elem_size, htab->map.max_entries); 175 + htab->elem_size, num_entries); 177 176 else 178 177 pcpu_freelist_populate(&htab->freelist, 179 178 htab->elems + offsetof(struct htab_elem, fnode), 180 - htab->elem_size, htab->map.max_entries); 179 + htab->elem_size, num_entries); 181 180 182 181 return 0; 183 182 ··· 198 197 199 198 static int alloc_extra_elems(struct bpf_htab *htab) 200 199 { 201 - void __percpu *pptr; 200 + struct htab_elem *__percpu *pptr, *l_new; 201 + struct pcpu_freelist_node *l; 202 202 int cpu; 203 203 204 - pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); 204 + pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8, 205 + GFP_USER | __GFP_NOWARN); 205 206 if (!pptr) 206 207 return -ENOMEM; 207 208 208 209 for_each_possible_cpu(cpu) { 209 - ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = 210 - HTAB_EXTRA_ELEM_FREE; 210 + l = pcpu_freelist_pop(&htab->freelist); 211 + /* pop will succeed, since prealloc_init() 212 + * preallocated extra num_possible_cpus elements 213 + */ 214 + l_new = container_of(l, struct htab_elem, fnode); 215 + *per_cpu_ptr(pptr, cpu) = l_new; 211 216 } 212 217 htab->extra_elems = pptr; 213 218 return 0; ··· 355 348 raw_spin_lock_init(&htab->buckets[i].lock); 356 349 } 357 350 358 - if (!percpu && !lru) { 359 - /* lru itself can remove the least used element, so 360 - * there is no need for an extra elem during map_update. 361 - */ 362 - err = alloc_extra_elems(htab); 363 - if (err) 364 - goto free_buckets; 365 - } 366 - 367 351 if (prealloc) { 368 352 err = prealloc_init(htab); 369 353 if (err) 370 - goto free_extra_elems; 354 + goto free_buckets; 355 + 356 + if (!percpu && !lru) { 357 + /* lru itself can remove the least used element, so 358 + * there is no need for an extra elem during map_update. 359 + */ 360 + err = alloc_extra_elems(htab); 361 + if (err) 362 + goto free_prealloc; 363 + } 371 364 } 372 365 373 366 return &htab->map; 374 367 375 - free_extra_elems: 376 - free_percpu(htab->extra_elems); 368 + free_prealloc: 369 + prealloc_destroy(htab); 377 370 free_buckets: 378 371 bpf_map_area_free(htab->buckets); 379 372 free_htab: ··· 624 617 map->ops->map_fd_put_ptr(ptr); 625 618 } 626 619 627 - if (l->state == HTAB_EXTRA_ELEM_USED) { 628 - l->state = HTAB_EXTRA_ELEM_FREE; 629 - return; 630 - } 631 - 632 - if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { 620 + if (htab_is_prealloc(htab)) { 633 621 pcpu_freelist_push(&htab->freelist, &l->fnode); 634 622 } else { 635 623 atomic_dec(&htab->count); ··· 654 652 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 655 653 void *value, u32 key_size, u32 hash, 656 654 bool percpu, bool onallcpus, 657 - bool old_elem_exists) 655 + struct htab_elem *old_elem) 658 656 { 659 657 u32 size = htab->map.value_size; 660 - bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); 661 - struct htab_elem *l_new; 658 + bool prealloc = htab_is_prealloc(htab); 659 + struct htab_elem *l_new, **pl_new; 662 660 void __percpu *pptr; 663 - int err = 0; 664 661 665 662 if (prealloc) { 666 - struct pcpu_freelist_node *l; 667 - 668 - l = pcpu_freelist_pop(&htab->freelist); 669 - if (!l) 670 - err = -E2BIG; 671 - else 672 - l_new = container_of(l, struct htab_elem, fnode); 673 - } else { 674 - if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 675 - atomic_dec(&htab->count); 676 - err = -E2BIG; 663 + if (old_elem) { 664 + /* if we're updating the existing element, 665 + * use per-cpu extra elems to avoid freelist_pop/push 666 + */ 667 + pl_new = this_cpu_ptr(htab->extra_elems); 668 + l_new = *pl_new; 669 + *pl_new = old_elem; 677 670 } else { 678 - l_new = kmalloc(htab->elem_size, 679 - GFP_ATOMIC | __GFP_NOWARN); 680 - if (!l_new) 681 - return ERR_PTR(-ENOMEM); 671 + struct pcpu_freelist_node *l; 672 + 673 + l = pcpu_freelist_pop(&htab->freelist); 674 + if (!l) 675 + return ERR_PTR(-E2BIG); 676 + l_new = container_of(l, struct htab_elem, fnode); 682 677 } 683 - } 684 - 685 - if (err) { 686 - if (!old_elem_exists) 687 - return ERR_PTR(err); 688 - 689 - /* if we're updating the existing element and the hash table 690 - * is full, use per-cpu extra elems 691 - */ 692 - l_new = this_cpu_ptr(htab->extra_elems); 693 - if (l_new->state != HTAB_EXTRA_ELEM_FREE) 694 - return ERR_PTR(-E2BIG); 695 - l_new->state = HTAB_EXTRA_ELEM_USED; 696 678 } else { 697 - l_new->state = HTAB_NOT_AN_EXTRA_ELEM; 679 + if (atomic_inc_return(&htab->count) > htab->map.max_entries) 680 + if (!old_elem) { 681 + /* when map is full and update() is replacing 682 + * old element, it's ok to allocate, since 683 + * old element will be freed immediately. 684 + * Otherwise return an error 685 + */ 686 + atomic_dec(&htab->count); 687 + return ERR_PTR(-E2BIG); 688 + } 689 + l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); 690 + if (!l_new) 691 + return ERR_PTR(-ENOMEM); 698 692 } 699 693 700 694 memcpy(l_new->key, key, key_size); ··· 771 773 goto err; 772 774 773 775 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, 774 - !!l_old); 776 + l_old); 775 777 if (IS_ERR(l_new)) { 776 778 /* all pre-allocated elements are in use or memory exhausted */ 777 779 ret = PTR_ERR(l_new); ··· 784 786 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 785 787 if (l_old) { 786 788 hlist_nulls_del_rcu(&l_old->hash_node); 787 - free_htab_elem(htab, l_old); 789 + if (!htab_is_prealloc(htab)) 790 + free_htab_elem(htab, l_old); 788 791 } 789 792 ret = 0; 790 793 err: ··· 897 898 value, onallcpus); 898 899 } else { 899 900 l_new = alloc_htab_elem(htab, key, value, key_size, 900 - hash, true, onallcpus, false); 901 + hash, true, onallcpus, NULL); 901 902 if (IS_ERR(l_new)) { 902 903 ret = PTR_ERR(l_new); 903 904 goto err; ··· 1065 1066 1066 1067 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1067 1068 hlist_nulls_del_rcu(&l->hash_node); 1068 - if (l->state != HTAB_EXTRA_ELEM_USED) 1069 - htab_elem_free(htab, l); 1069 + htab_elem_free(htab, l); 1070 1070 } 1071 1071 } 1072 1072 } ··· 1086 1088 * not have executed. Wait for them. 1087 1089 */ 1088 1090 rcu_barrier(); 1089 - if (htab->map.map_flags & BPF_F_NO_PREALLOC) 1091 + if (!htab_is_prealloc(htab)) 1090 1092 delete_all_elements(htab); 1091 1093 else 1092 1094 prealloc_destroy(htab);
+14 -14
kernel/cpu.c
··· 1335 1335 struct cpuhp_step *sp; 1336 1336 int ret = 0; 1337 1337 1338 - mutex_lock(&cpuhp_state_mutex); 1339 - 1340 1338 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) { 1341 1339 ret = cpuhp_reserve_state(state); 1342 1340 if (ret < 0) 1343 - goto out; 1341 + return ret; 1344 1342 state = ret; 1345 1343 } 1346 1344 sp = cpuhp_get_step(state); 1347 - if (name && sp->name) { 1348 - ret = -EBUSY; 1349 - goto out; 1350 - } 1345 + if (name && sp->name) 1346 + return -EBUSY; 1347 + 1351 1348 sp->startup.single = startup; 1352 1349 sp->teardown.single = teardown; 1353 1350 sp->name = name; 1354 1351 sp->multi_instance = multi_instance; 1355 1352 INIT_HLIST_HEAD(&sp->list); 1356 - out: 1357 - mutex_unlock(&cpuhp_state_mutex); 1358 1353 return ret; 1359 1354 } 1360 1355 ··· 1423 1428 return -EINVAL; 1424 1429 1425 1430 get_online_cpus(); 1431 + mutex_lock(&cpuhp_state_mutex); 1426 1432 1427 1433 if (!invoke || !sp->startup.multi) 1428 1434 goto add_node; ··· 1443 1447 if (ret) { 1444 1448 if (sp->teardown.multi) 1445 1449 cpuhp_rollback_install(cpu, state, node); 1446 - goto err; 1450 + goto unlock; 1447 1451 } 1448 1452 } 1449 1453 add_node: 1450 1454 ret = 0; 1451 - mutex_lock(&cpuhp_state_mutex); 1452 1455 hlist_add_head(node, &sp->list); 1456 + unlock: 1453 1457 mutex_unlock(&cpuhp_state_mutex); 1454 - 1455 - err: 1456 1458 put_online_cpus(); 1457 1459 return ret; 1458 1460 } ··· 1485 1491 return -EINVAL; 1486 1492 1487 1493 get_online_cpus(); 1494 + mutex_lock(&cpuhp_state_mutex); 1488 1495 1489 1496 ret = cpuhp_store_callbacks(state, name, startup, teardown, 1490 1497 multi_instance); ··· 1519 1524 } 1520 1525 } 1521 1526 out: 1527 + mutex_unlock(&cpuhp_state_mutex); 1522 1528 put_online_cpus(); 1523 1529 /* 1524 1530 * If the requested state is CPUHP_AP_ONLINE_DYN, return the ··· 1543 1547 return -EINVAL; 1544 1548 1545 1549 get_online_cpus(); 1550 + mutex_lock(&cpuhp_state_mutex); 1551 + 1546 1552 if (!invoke || !cpuhp_get_teardown_cb(state)) 1547 1553 goto remove; 1548 1554 /* ··· 1561 1563 } 1562 1564 1563 1565 remove: 1564 - mutex_lock(&cpuhp_state_mutex); 1565 1566 hlist_del(node); 1566 1567 mutex_unlock(&cpuhp_state_mutex); 1567 1568 put_online_cpus(); ··· 1568 1571 return 0; 1569 1572 } 1570 1573 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); 1574 + 1571 1575 /** 1572 1576 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state 1573 1577 * @state: The state to remove ··· 1587 1589 1588 1590 get_online_cpus(); 1589 1591 1592 + mutex_lock(&cpuhp_state_mutex); 1590 1593 if (sp->multi_instance) { 1591 1594 WARN(!hlist_empty(&sp->list), 1592 1595 "Error: Removing state %d which has instances left.\n", ··· 1612 1613 } 1613 1614 remove: 1614 1615 cpuhp_store_callbacks(state, NULL, NULL, NULL, false); 1616 + mutex_unlock(&cpuhp_state_mutex); 1615 1617 put_online_cpus(); 1616 1618 } 1617 1619 EXPORT_SYMBOL(__cpuhp_remove_state);
+48 -16
kernel/events/core.c
··· 4256 4256 4257 4257 raw_spin_lock_irq(&ctx->lock); 4258 4258 /* 4259 - * Mark this even as STATE_DEAD, there is no external reference to it 4259 + * Mark this event as STATE_DEAD, there is no external reference to it 4260 4260 * anymore. 4261 4261 * 4262 4262 * Anybody acquiring event->child_mutex after the below loop _must_ ··· 10417 10417 continue; 10418 10418 10419 10419 mutex_lock(&ctx->mutex); 10420 - again: 10421 - list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 10422 - group_entry) 10423 - perf_free_event(event, ctx); 10420 + raw_spin_lock_irq(&ctx->lock); 10421 + /* 10422 + * Destroy the task <-> ctx relation and mark the context dead. 10423 + * 10424 + * This is important because even though the task hasn't been 10425 + * exposed yet the context has been (through child_list). 10426 + */ 10427 + RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); 10428 + WRITE_ONCE(ctx->task, TASK_TOMBSTONE); 10429 + put_task_struct(task); /* cannot be last */ 10430 + raw_spin_unlock_irq(&ctx->lock); 10424 10431 10425 - list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 10426 - group_entry) 10432 + list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) 10427 10433 perf_free_event(event, ctx); 10428 - 10429 - if (!list_empty(&ctx->pinned_groups) || 10430 - !list_empty(&ctx->flexible_groups)) 10431 - goto again; 10432 10434 10433 10435 mutex_unlock(&ctx->mutex); 10434 - 10435 10436 put_ctx(ctx); 10436 10437 } 10437 10438 } ··· 10470 10469 } 10471 10470 10472 10471 /* 10473 - * inherit a event from parent task to child task: 10472 + * Inherit a event from parent task to child task. 10473 + * 10474 + * Returns: 10475 + * - valid pointer on success 10476 + * - NULL for orphaned events 10477 + * - IS_ERR() on error 10474 10478 */ 10475 10479 static struct perf_event * 10476 10480 inherit_event(struct perf_event *parent_event, ··· 10569 10563 return child_event; 10570 10564 } 10571 10565 10566 + /* 10567 + * Inherits an event group. 10568 + * 10569 + * This will quietly suppress orphaned events; !inherit_event() is not an error. 10570 + * This matches with perf_event_release_kernel() removing all child events. 10571 + * 10572 + * Returns: 10573 + * - 0 on success 10574 + * - <0 on error 10575 + */ 10572 10576 static int inherit_group(struct perf_event *parent_event, 10573 10577 struct task_struct *parent, 10574 10578 struct perf_event_context *parent_ctx, ··· 10593 10577 child, NULL, child_ctx); 10594 10578 if (IS_ERR(leader)) 10595 10579 return PTR_ERR(leader); 10580 + /* 10581 + * @leader can be NULL here because of is_orphaned_event(). In this 10582 + * case inherit_event() will create individual events, similar to what 10583 + * perf_group_detach() would do anyway. 10584 + */ 10596 10585 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 10597 10586 child_ctr = inherit_event(sub, parent, parent_ctx, 10598 10587 child, leader, child_ctx); ··· 10607 10586 return 0; 10608 10587 } 10609 10588 10589 + /* 10590 + * Creates the child task context and tries to inherit the event-group. 10591 + * 10592 + * Clears @inherited_all on !attr.inherited or error. Note that we'll leave 10593 + * inherited_all set when we 'fail' to inherit an orphaned event; this is 10594 + * consistent with perf_event_release_kernel() removing all child events. 10595 + * 10596 + * Returns: 10597 + * - 0 on success 10598 + * - <0 on error 10599 + */ 10610 10600 static int 10611 10601 inherit_task_group(struct perf_event *event, struct task_struct *parent, 10612 10602 struct perf_event_context *parent_ctx, ··· 10640 10608 * First allocate and initialize a context for the 10641 10609 * child. 10642 10610 */ 10643 - 10644 10611 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 10645 10612 if (!child_ctx) 10646 10613 return -ENOMEM; ··· 10701 10670 ret = inherit_task_group(event, parent, parent_ctx, 10702 10671 child, ctxn, &inherited_all); 10703 10672 if (ret) 10704 - break; 10673 + goto out_unlock; 10705 10674 } 10706 10675 10707 10676 /* ··· 10717 10686 ret = inherit_task_group(event, parent, parent_ctx, 10718 10687 child, ctxn, &inherited_all); 10719 10688 if (ret) 10720 - break; 10689 + goto out_unlock; 10721 10690 } 10722 10691 10723 10692 raw_spin_lock_irqsave(&parent_ctx->lock, flags); ··· 10745 10714 } 10746 10715 10747 10716 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 10717 + out_unlock: 10748 10718 mutex_unlock(&parent_ctx->mutex); 10749 10719 10750 10720 perf_unpin_context(parent_ctx);
+13 -9
kernel/futex.c
··· 2815 2815 { 2816 2816 struct hrtimer_sleeper timeout, *to = NULL; 2817 2817 struct rt_mutex_waiter rt_waiter; 2818 - struct rt_mutex *pi_mutex = NULL; 2819 2818 struct futex_hash_bucket *hb; 2820 2819 union futex_key key2 = FUTEX_KEY_INIT; 2821 2820 struct futex_q q = futex_q_init; ··· 2898 2899 if (q.pi_state && (q.pi_state->owner != current)) { 2899 2900 spin_lock(q.lock_ptr); 2900 2901 ret = fixup_pi_state_owner(uaddr2, &q, current); 2902 + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) 2903 + rt_mutex_unlock(&q.pi_state->pi_mutex); 2901 2904 /* 2902 2905 * Drop the reference to the pi state which 2903 2906 * the requeue_pi() code acquired for us. ··· 2908 2907 spin_unlock(q.lock_ptr); 2909 2908 } 2910 2909 } else { 2910 + struct rt_mutex *pi_mutex; 2911 + 2911 2912 /* 2912 2913 * We have been woken up by futex_unlock_pi(), a timeout, or a 2913 2914 * signal. futex_unlock_pi() will not destroy the lock_ptr nor ··· 2933 2930 if (res) 2934 2931 ret = (res < 0) ? res : 0; 2935 2932 2933 + /* 2934 + * If fixup_pi_state_owner() faulted and was unable to handle 2935 + * the fault, unlock the rt_mutex and return the fault to 2936 + * userspace. 2937 + */ 2938 + if (ret && rt_mutex_owner(pi_mutex) == current) 2939 + rt_mutex_unlock(pi_mutex); 2940 + 2936 2941 /* Unqueue and drop the lock. */ 2937 2942 unqueue_me_pi(&q); 2938 2943 } 2939 2944 2940 - /* 2941 - * If fixup_pi_state_owner() faulted and was unable to handle the 2942 - * fault, unlock the rt_mutex and return the fault to userspace. 2943 - */ 2944 - if (ret == -EFAULT) { 2945 - if (pi_mutex && rt_mutex_owner(pi_mutex) == current) 2946 - rt_mutex_unlock(pi_mutex); 2947 - } else if (ret == -EINTR) { 2945 + if (ret == -EINTR) { 2948 2946 /* 2949 2947 * We've already been requeued, but cannot restart by calling 2950 2948 * futex_lock_pi() directly. We could restart this syscall, but
+11 -5
kernel/locking/rwsem-spinlock.c
··· 213 213 */ 214 214 if (sem->count == 0) 215 215 break; 216 - if (signal_pending_state(state, current)) { 217 - ret = -EINTR; 218 - goto out; 219 - } 216 + if (signal_pending_state(state, current)) 217 + goto out_nolock; 218 + 220 219 set_current_state(state); 221 220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 222 221 schedule(); ··· 223 224 } 224 225 /* got the lock */ 225 226 sem->count = -1; 226 - out: 227 227 list_del(&waiter.list); 228 228 229 229 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 230 230 231 231 return ret; 232 + 233 + out_nolock: 234 + list_del(&waiter.list); 235 + if (!list_empty(&sem->wait_list)) 236 + __rwsem_do_wake(sem, 1); 237 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 238 + 239 + return -EINTR; 232 240 } 233 241 234 242 void __sched __down_write(struct rw_semaphore *sem)
-4
kernel/memremap.c
··· 247 247 align_start = res->start & ~(SECTION_SIZE - 1); 248 248 align_size = ALIGN(resource_size(res), SECTION_SIZE); 249 249 250 - lock_device_hotplug(); 251 250 mem_hotplug_begin(); 252 251 arch_remove_memory(align_start, align_size); 253 252 mem_hotplug_done(); 254 - unlock_device_hotplug(); 255 253 256 254 untrack_pfn(NULL, PHYS_PFN(align_start), align_size); 257 255 pgmap_radix_release(res); ··· 362 364 if (error) 363 365 goto err_pfn_remap; 364 366 365 - lock_device_hotplug(); 366 367 mem_hotplug_begin(); 367 368 error = arch_add_memory(nid, align_start, align_size, true); 368 369 mem_hotplug_done(); 369 - unlock_device_hotplug(); 370 370 if (error) 371 371 goto err_add_memory; 372 372
+57 -6
kernel/sched/deadline.c
··· 445 445 * 446 446 * This function returns true if: 447 447 * 448 - * runtime / (deadline - t) > dl_runtime / dl_period , 448 + * runtime / (deadline - t) > dl_runtime / dl_deadline , 449 449 * 450 450 * IOW we can't recycle current parameters. 451 451 * 452 - * Notice that the bandwidth check is done against the period. For 452 + * Notice that the bandwidth check is done against the deadline. For 453 453 * task with deadline equal to period this is the same of using 454 - * dl_deadline instead of dl_period in the equation above. 454 + * dl_period instead of dl_deadline in the equation above. 455 455 */ 456 456 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, 457 457 struct sched_dl_entity *pi_se, u64 t) ··· 476 476 * of anything below microseconds resolution is actually fiction 477 477 * (but still we want to give the user that illusion >;). 478 478 */ 479 - left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 479 + left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 480 480 right = ((dl_se->deadline - t) >> DL_SCALE) * 481 481 (pi_se->dl_runtime >> DL_SCALE); 482 482 ··· 505 505 } 506 506 } 507 507 508 + static inline u64 dl_next_period(struct sched_dl_entity *dl_se) 509 + { 510 + return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; 511 + } 512 + 508 513 /* 509 514 * If the entity depleted all its runtime, and if we want it to sleep 510 515 * while waiting for some new execution time to become available, we 511 - * set the bandwidth enforcement timer to the replenishment instant 516 + * set the bandwidth replenishment timer to the replenishment instant 512 517 * and try to activate it. 513 518 * 514 519 * Notice that it is important for the caller to know if the timer ··· 535 530 * that it is actually coming from rq->clock and not from 536 531 * hrtimer's time base reading. 537 532 */ 538 - act = ns_to_ktime(dl_se->deadline); 533 + act = ns_to_ktime(dl_next_period(dl_se)); 539 534 now = hrtimer_cb_get_time(timer); 540 535 delta = ktime_to_ns(now) - rq_clock(rq); 541 536 act = ktime_add_ns(act, delta); ··· 643 638 lockdep_unpin_lock(&rq->lock, rf.cookie); 644 639 rq = dl_task_offline_migration(rq, p); 645 640 rf.cookie = lockdep_pin_lock(&rq->lock); 641 + update_rq_clock(rq); 646 642 647 643 /* 648 644 * Now that the task has been migrated to the new RQ and we ··· 693 687 694 688 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 695 689 timer->function = dl_task_timer; 690 + } 691 + 692 + /* 693 + * During the activation, CBS checks if it can reuse the current task's 694 + * runtime and period. If the deadline of the task is in the past, CBS 695 + * cannot use the runtime, and so it replenishes the task. This rule 696 + * works fine for implicit deadline tasks (deadline == period), and the 697 + * CBS was designed for implicit deadline tasks. However, a task with 698 + * constrained deadline (deadine < period) might be awakened after the 699 + * deadline, but before the next period. In this case, replenishing the 700 + * task would allow it to run for runtime / deadline. As in this case 701 + * deadline < period, CBS enables a task to run for more than the 702 + * runtime / period. In a very loaded system, this can cause a domino 703 + * effect, making other tasks miss their deadlines. 704 + * 705 + * To avoid this problem, in the activation of a constrained deadline 706 + * task after the deadline but before the next period, throttle the 707 + * task and set the replenishing timer to the begin of the next period, 708 + * unless it is boosted. 709 + */ 710 + static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) 711 + { 712 + struct task_struct *p = dl_task_of(dl_se); 713 + struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); 714 + 715 + if (dl_time_before(dl_se->deadline, rq_clock(rq)) && 716 + dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { 717 + if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) 718 + return; 719 + dl_se->dl_throttled = 1; 720 + } 696 721 } 697 722 698 723 static ··· 959 922 __dequeue_dl_entity(dl_se); 960 923 } 961 924 925 + static inline bool dl_is_constrained(struct sched_dl_entity *dl_se) 926 + { 927 + return dl_se->dl_deadline < dl_se->dl_period; 928 + } 929 + 962 930 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 963 931 { 964 932 struct task_struct *pi_task = rt_mutex_get_top_task(p); ··· 988 946 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); 989 947 return; 990 948 } 949 + 950 + /* 951 + * Check if a constrained deadline task was activated 952 + * after the deadline but before the next period. 953 + * If that is the case, the task will be throttled and 954 + * the replenishment timer will be set to the next period. 955 + */ 956 + if (!p->dl.dl_throttled && dl_is_constrained(&p->dl)) 957 + dl_check_constrained_dl(&p->dl); 991 958 992 959 /* 993 960 * If p is throttled, we do nothing. In fact, if it exhausted
+12 -8
kernel/sched/loadavg.c
··· 169 169 * If the folding window started, make sure we start writing in the 170 170 * next idle-delta. 171 171 */ 172 - if (!time_before(jiffies, calc_load_update)) 172 + if (!time_before(jiffies, READ_ONCE(calc_load_update))) 173 173 idx++; 174 174 175 175 return idx & 1; ··· 202 202 struct rq *this_rq = this_rq(); 203 203 204 204 /* 205 - * If we're still before the sample window, we're done. 205 + * If we're still before the pending sample window, we're done. 206 206 */ 207 + this_rq->calc_load_update = READ_ONCE(calc_load_update); 207 208 if (time_before(jiffies, this_rq->calc_load_update)) 208 209 return; 209 210 ··· 213 212 * accounted through the nohz accounting, so skip the entire deal and 214 213 * sync up for the next window. 215 214 */ 216 - this_rq->calc_load_update = calc_load_update; 217 215 if (time_before(jiffies, this_rq->calc_load_update + 10)) 218 216 this_rq->calc_load_update += LOAD_FREQ; 219 217 } ··· 308 308 */ 309 309 static void calc_global_nohz(void) 310 310 { 311 + unsigned long sample_window; 311 312 long delta, active, n; 312 313 313 - if (!time_before(jiffies, calc_load_update + 10)) { 314 + sample_window = READ_ONCE(calc_load_update); 315 + if (!time_before(jiffies, sample_window + 10)) { 314 316 /* 315 317 * Catch-up, fold however many we are behind still 316 318 */ 317 - delta = jiffies - calc_load_update - 10; 319 + delta = jiffies - sample_window - 10; 318 320 n = 1 + (delta / LOAD_FREQ); 319 321 320 322 active = atomic_long_read(&calc_load_tasks); ··· 326 324 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); 327 325 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); 328 326 329 - calc_load_update += n * LOAD_FREQ; 327 + WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); 330 328 } 331 329 332 330 /* ··· 354 352 */ 355 353 void calc_global_load(unsigned long ticks) 356 354 { 355 + unsigned long sample_window; 357 356 long active, delta; 358 357 359 - if (time_before(jiffies, calc_load_update + 10)) 358 + sample_window = READ_ONCE(calc_load_update); 359 + if (time_before(jiffies, sample_window + 10)) 360 360 return; 361 361 362 362 /* ··· 375 371 avenrun[1] = calc_load(avenrun[1], EXP_5, active); 376 372 avenrun[2] = calc_load(avenrun[2], EXP_15, active); 377 373 378 - calc_load_update += LOAD_FREQ; 374 + WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); 379 375 380 376 /* 381 377 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
+5 -1
mm/memory_hotplug.c
··· 125 125 126 126 } 127 127 128 + /* Serializes write accesses to mem_hotplug.active_writer. */ 129 + static DEFINE_MUTEX(memory_add_remove_lock); 130 + 128 131 void mem_hotplug_begin(void) 129 132 { 130 - assert_held_device_hotplug(); 133 + mutex_lock(&memory_add_remove_lock); 131 134 132 135 mem_hotplug.active_writer = current; 133 136 ··· 150 147 mem_hotplug.active_writer = NULL; 151 148 mutex_unlock(&mem_hotplug.lock); 152 149 memhp_lock_release(); 150 + mutex_unlock(&memory_add_remove_lock); 153 151 } 154 152 155 153 /* add this memory to iomem resource */
-2
mm/swap_slots.c
··· 267 267 { 268 268 struct swap_slots_cache *cache; 269 269 270 - BUG_ON(!swap_slot_cache_initialized); 271 - 272 270 cache = &get_cpu_var(swp_slots); 273 271 if (use_swap_slot_cache && cache->slots_ret) { 274 272 spin_lock_irq(&cache->free_lock);
+2 -1
mm/vmalloc.c
··· 1683 1683 1684 1684 if (fatal_signal_pending(current)) { 1685 1685 area->nr_pages = i; 1686 - goto fail; 1686 + goto fail_no_warn; 1687 1687 } 1688 1688 1689 1689 if (node == NUMA_NO_NODE) ··· 1709 1709 warn_alloc(gfp_mask, NULL, 1710 1710 "vmalloc: allocation failure, allocated %ld of %ld bytes", 1711 1711 (area->nr_pages*PAGE_SIZE), area->size); 1712 + fail_no_warn: 1712 1713 vfree(area->addr); 1713 1714 return NULL; 1714 1715 }
+1
mm/z3fold.c
··· 667 667 z3fold_page_unlock(zhdr); 668 668 spin_lock(&pool->lock); 669 669 if (kref_put(&zhdr->refcount, release_z3fold_page)) { 670 + spin_unlock(&pool->lock); 670 671 atomic64_dec(&pool->pages_nr); 671 672 return 0; 672 673 }
+11
net/batman-adv/bat_iv_ogm.c
··· 2477 2477 batadv_iv_ogm_schedule(hard_iface); 2478 2478 } 2479 2479 2480 + /** 2481 + * batadv_iv_init_sel_class - initialize GW selection class 2482 + * @bat_priv: the bat priv with all the soft interface information 2483 + */ 2484 + static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv) 2485 + { 2486 + /* set default TQ difference threshold to 20 */ 2487 + atomic_set(&bat_priv->gw.sel_class, 20); 2488 + } 2489 + 2480 2490 static struct batadv_gw_node * 2481 2491 batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) 2482 2492 { ··· 2833 2823 .del_if = batadv_iv_ogm_orig_del_if, 2834 2824 }, 2835 2825 .gw = { 2826 + .init_sel_class = batadv_iv_init_sel_class, 2836 2827 .get_best_gw_node = batadv_iv_gw_get_best_gw_node, 2837 2828 .is_eligible = batadv_iv_gw_is_eligible, 2838 2829 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+11 -3
net/batman-adv/bat_v.c
··· 668 668 return ret; 669 669 } 670 670 671 + /** 672 + * batadv_v_init_sel_class - initialize GW selection class 673 + * @bat_priv: the bat priv with all the soft interface information 674 + */ 675 + static void batadv_v_init_sel_class(struct batadv_priv *bat_priv) 676 + { 677 + /* set default throughput difference threshold to 5Mbps */ 678 + atomic_set(&bat_priv->gw.sel_class, 50); 679 + } 680 + 671 681 static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, 672 682 char *buff, size_t count) 673 683 { ··· 1062 1052 .dump = batadv_v_orig_dump, 1063 1053 }, 1064 1054 .gw = { 1055 + .init_sel_class = batadv_v_init_sel_class, 1065 1056 .store_sel_class = batadv_v_store_sel_class, 1066 1057 .show_sel_class = batadv_v_show_sel_class, 1067 1058 .get_best_gw_node = batadv_v_gw_get_best_gw_node, ··· 1102 1091 ret = batadv_v_ogm_init(bat_priv); 1103 1092 if (ret < 0) 1104 1093 return ret; 1105 - 1106 - /* set default throughput difference threshold to 5Mbps */ 1107 - atomic_set(&bat_priv->gw.sel_class, 50); 1108 1094 1109 1095 return 0; 1110 1096 }
+13 -7
net/batman-adv/fragmentation.c
··· 404 404 * batadv_frag_create - create a fragment from skb 405 405 * @skb: skb to create fragment from 406 406 * @frag_head: header to use in new fragment 407 - * @mtu: size of new fragment 407 + * @fragment_size: size of new fragment 408 408 * 409 409 * Split the passed skb into two fragments: A new one with size matching the 410 410 * passed mtu and the old one with the rest. The new skb contains data from the ··· 414 414 */ 415 415 static struct sk_buff *batadv_frag_create(struct sk_buff *skb, 416 416 struct batadv_frag_packet *frag_head, 417 - unsigned int mtu) 417 + unsigned int fragment_size) 418 418 { 419 419 struct sk_buff *skb_fragment; 420 420 unsigned int header_size = sizeof(*frag_head); 421 - unsigned int fragment_size = mtu - header_size; 421 + unsigned int mtu = fragment_size + header_size; 422 422 423 423 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); 424 424 if (!skb_fragment) ··· 456 456 struct sk_buff *skb_fragment; 457 457 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; 458 458 unsigned int header_size = sizeof(frag_header); 459 - unsigned int max_fragment_size, max_packet_size; 459 + unsigned int max_fragment_size, num_fragments; 460 460 int ret; 461 461 462 462 /* To avoid merge and refragmentation at next-hops we never send ··· 464 464 */ 465 465 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); 466 466 max_fragment_size = mtu - header_size; 467 - max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; 467 + 468 + if (skb->len == 0 || max_fragment_size == 0) 469 + return -EINVAL; 470 + 471 + num_fragments = (skb->len - 1) / max_fragment_size + 1; 472 + max_fragment_size = (skb->len - 1) / num_fragments + 1; 468 473 469 474 /* Don't even try to fragment, if we need more than 16 fragments */ 470 - if (skb->len > max_packet_size) { 475 + if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) { 471 476 ret = -EAGAIN; 472 477 goto free_skb; 473 478 } ··· 512 507 goto put_primary_if; 513 508 } 514 509 515 - skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 510 + skb_fragment = batadv_frag_create(skb, &frag_header, 511 + max_fragment_size); 516 512 if (!skb_fragment) { 517 513 ret = -ENOMEM; 518 514 goto put_primary_if;
+5
net/batman-adv/gateway_common.c
··· 253 253 */ 254 254 void batadv_gw_init(struct batadv_priv *bat_priv) 255 255 { 256 + if (bat_priv->algo_ops->gw.init_sel_class) 257 + bat_priv->algo_ops->gw.init_sel_class(bat_priv); 258 + else 259 + atomic_set(&bat_priv->gw.sel_class, 1); 260 + 256 261 batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, 257 262 NULL, BATADV_TVLV_GW, 1, 258 263 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
-1
net/batman-adv/soft-interface.c
··· 819 819 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); 820 820 #endif 821 821 atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); 822 - atomic_set(&bat_priv->gw.sel_class, 20); 823 822 atomic_set(&bat_priv->gw.bandwidth_down, 100); 824 823 atomic_set(&bat_priv->gw.bandwidth_up, 20); 825 824 atomic_set(&bat_priv->orig_interval, 1000);
+2
net/batman-adv/types.h
··· 1489 1489 1490 1490 /** 1491 1491 * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) 1492 + * @init_sel_class: initialize GW selection class (optional) 1492 1493 * @store_sel_class: parse and stores a new GW selection class (optional) 1493 1494 * @show_sel_class: prints the current GW selection class (optional) 1494 1495 * @get_best_gw_node: select the best GW from the list of available nodes ··· 1500 1499 * @dump: dump gateways to a netlink socket (optional) 1501 1500 */ 1502 1501 struct batadv_algo_gw_ops { 1502 + void (*init_sel_class)(struct batadv_priv *bat_priv); 1503 1503 ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, 1504 1504 size_t count); 1505 1505 ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
+1 -1
net/bridge/br_fdb.c
··· 106 106 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 107 107 struct net_bridge_fdb_entry *fdb; 108 108 109 - WARN_ON_ONCE(!br_hash_lock_held(br)); 109 + lockdep_assert_held_once(&br->hash_lock); 110 110 111 111 rcu_read_lock(); 112 112 fdb = fdb_find_rcu(head, addr, vid);
+7 -5
net/bridge/br_netfilter_hooks.c
··· 706 706 707 707 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 708 708 { 709 - struct nf_bridge_info *nf_bridge; 710 - unsigned int mtu_reserved; 709 + struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); 710 + unsigned int mtu, mtu_reserved; 711 711 712 712 mtu_reserved = nf_bridge_mtu_reduction(skb); 713 + mtu = skb->dev->mtu; 713 714 714 - if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) { 715 + if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) 716 + mtu = nf_bridge->frag_max_size; 717 + 718 + if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { 715 719 nf_bridge_info_free(skb); 716 720 return br_dev_queue_push_xmit(net, sk, skb); 717 721 } 718 - 719 - nf_bridge = nf_bridge_info_get(skb); 720 722 721 723 /* This is wrong! We should preserve the original fragment 722 724 * boundaries by preserving frag_list rather than refragmenting.
-9
net/bridge/br_private.h
··· 531 531 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, 532 532 const unsigned char *addr, u16 vid); 533 533 534 - static inline bool br_hash_lock_held(struct net_bridge *br) 535 - { 536 - #ifdef CONFIG_LOCKDEP 537 - return lockdep_is_held(&br->hash_lock); 538 - #else 539 - return true; 540 - #endif 541 - } 542 - 543 534 /* br_forward.c */ 544 535 enum br_pkt_type { 545 536 BR_PKT_UNICAST,
+18 -18
net/core/netclassid_cgroup.c
··· 71 71 return 0; 72 72 } 73 73 74 - static void update_classid(struct cgroup_subsys_state *css, void *v) 75 - { 76 - struct css_task_iter it; 77 - struct task_struct *p; 78 - 79 - css_task_iter_start(css, &it); 80 - while ((p = css_task_iter_next(&it))) { 81 - task_lock(p); 82 - iterate_fd(p->files, 0, update_classid_sock, v); 83 - task_unlock(p); 84 - } 85 - css_task_iter_end(&it); 86 - } 87 - 88 74 static void cgrp_attach(struct cgroup_taskset *tset) 89 75 { 90 76 struct cgroup_subsys_state *css; 77 + struct task_struct *p; 91 78 92 - cgroup_taskset_first(tset, &css); 93 - update_classid(css, 94 - (void *)(unsigned long)css_cls_state(css)->classid); 79 + cgroup_taskset_for_each(p, css, tset) { 80 + task_lock(p); 81 + iterate_fd(p->files, 0, update_classid_sock, 82 + (void *)(unsigned long)css_cls_state(css)->classid); 83 + task_unlock(p); 84 + } 95 85 } 96 86 97 87 static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) ··· 93 103 u64 value) 94 104 { 95 105 struct cgroup_cls_state *cs = css_cls_state(css); 106 + struct css_task_iter it; 107 + struct task_struct *p; 96 108 97 109 cgroup_sk_alloc_disable(); 98 110 99 111 cs->classid = (u32)value; 100 112 101 - update_classid(css, (void *)(unsigned long)cs->classid); 113 + css_task_iter_start(css, &it); 114 + while ((p = css_task_iter_next(&it))) { 115 + task_lock(p); 116 + iterate_fd(p->files, 0, update_classid_sock, 117 + (void *)(unsigned long)cs->classid); 118 + task_unlock(p); 119 + } 120 + css_task_iter_end(&it); 121 + 102 122 return 0; 103 123 } 104 124
+21 -6
net/core/skbuff.c
··· 3694 3694 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3695 3695 } 3696 3696 3697 + static void skb_set_err_queue(struct sk_buff *skb) 3698 + { 3699 + /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 3700 + * So, it is safe to (mis)use it to mark skbs on the error queue. 3701 + */ 3702 + skb->pkt_type = PACKET_OUTGOING; 3703 + BUILD_BUG_ON(PACKET_OUTGOING == 0); 3704 + } 3705 + 3697 3706 /* 3698 3707 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3699 3708 */ ··· 3716 3707 skb->sk = sk; 3717 3708 skb->destructor = sock_rmem_free; 3718 3709 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3710 + skb_set_err_queue(skb); 3719 3711 3720 3712 /* before exiting rcu section, make sure dst is refcounted */ 3721 3713 skb_dst_force(skb); ··· 3793 3783 3794 3784 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 3795 3785 struct sock *sk, 3796 - int tstype) 3786 + int tstype, 3787 + bool opt_stats) 3797 3788 { 3798 3789 struct sock_exterr_skb *serr; 3799 3790 int err; 3791 + 3792 + BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 3800 3793 3801 3794 serr = SKB_EXT_ERR(skb); 3802 3795 memset(serr, 0, sizeof(*serr)); 3803 3796 serr->ee.ee_errno = ENOMSG; 3804 3797 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3805 3798 serr->ee.ee_info = tstype; 3799 + serr->opt_stats = opt_stats; 3806 3800 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3807 3801 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3808 3802 if (sk->sk_protocol == IPPROTO_TCP && ··· 3847 3833 */ 3848 3834 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { 3849 3835 *skb_hwtstamps(skb) = *hwtstamps; 3850 - __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3836 + __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 3851 3837 sock_put(sk); 3852 3838 } 3853 3839 } ··· 3858 3844 struct sock *sk, int tstype) 3859 3845 { 3860 3846 struct sk_buff *skb; 3861 - bool tsonly; 3847 + bool tsonly, opt_stats = false; 3862 3848 3863 3849 if (!sk) 3864 3850 return; ··· 3871 3857 #ifdef CONFIG_INET 3872 3858 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 3873 3859 sk->sk_protocol == IPPROTO_TCP && 3874 - sk->sk_type == SOCK_STREAM) 3860 + sk->sk_type == SOCK_STREAM) { 3875 3861 skb = tcp_get_timestamping_opt_stats(sk); 3876 - else 3862 + opt_stats = true; 3863 + } else 3877 3864 #endif 3878 3865 skb = alloc_skb(0, GFP_ATOMIC); 3879 3866 } else { ··· 3893 3878 else 3894 3879 skb->tstamp = ktime_get_real(); 3895 3880 3896 - __skb_complete_tx_timestamp(skb, sk, tstype); 3881 + __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 3897 3882 } 3898 3883 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 3899 3884
+11 -5
net/core/sock.c
··· 1511 1511 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1512 1512 __func__, atomic_read(&sk->sk_omem_alloc)); 1513 1513 1514 + if (sk->sk_frag.page) { 1515 + put_page(sk->sk_frag.page); 1516 + sk->sk_frag.page = NULL; 1517 + } 1518 + 1514 1519 if (sk->sk_peer_cred) 1515 1520 put_cred(sk->sk_peer_cred); 1516 1521 put_pid(sk->sk_peer_pid); ··· 1627 1622 is_charged = sk_filter_charge(newsk, filter); 1628 1623 1629 1624 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1625 + /* We need to make sure that we don't uncharge the new 1626 + * socket if we couldn't charge it in the first place 1627 + * as otherwise we uncharge the parent's filter. 1628 + */ 1629 + if (!is_charged) 1630 + RCU_INIT_POINTER(newsk->sk_filter, NULL); 1630 1631 sk_free_unlock_clone(newsk); 1631 1632 newsk = NULL; 1632 1633 goto out; ··· 2877 2866 xfrm_sk_free_policy(sk); 2878 2867 2879 2868 sk_refcnt_debug_release(sk); 2880 - 2881 - if (sk->sk_frag.page) { 2882 - put_page(sk->sk_frag.page); 2883 - sk->sk_frag.page = NULL; 2884 - } 2885 2869 2886 2870 sock_put(sk); 2887 2871 }
+2 -1
net/ipv4/fib_frontend.c
··· 1083 1083 1084 1084 net = sock_net(skb->sk); 1085 1085 nlh = nlmsg_hdr(skb); 1086 - if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || 1086 + if (skb->len < nlmsg_total_size(sizeof(*frn)) || 1087 + skb->len < nlh->nlmsg_len || 1087 1088 nlmsg_len(nlh) < sizeof(*frn)) 1088 1089 return; 1089 1090
+17 -8
net/ipv4/ip_fragment.c
··· 198 198 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); 199 199 net = container_of(qp->q.net, struct net, ipv4.frags); 200 200 201 + rcu_read_lock(); 201 202 spin_lock(&qp->q.lock); 202 203 203 204 if (qp->q.flags & INET_FRAG_COMPLETE) ··· 208 207 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 209 208 210 209 if (!inet_frag_evicting(&qp->q)) { 211 - struct sk_buff *head = qp->q.fragments; 210 + struct sk_buff *clone, *head = qp->q.fragments; 212 211 const struct iphdr *iph; 213 212 int err; 214 213 ··· 217 216 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) 218 217 goto out; 219 218 220 - rcu_read_lock(); 221 219 head->dev = dev_get_by_index_rcu(net, qp->iif); 222 220 if (!head->dev) 223 - goto out_rcu_unlock; 221 + goto out; 222 + 224 223 225 224 /* skb has no dst, perform route lookup again */ 226 225 iph = ip_hdr(head); 227 226 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 228 227 iph->tos, head->dev); 229 228 if (err) 230 - goto out_rcu_unlock; 229 + goto out; 231 230 232 231 /* Only an end host needs to send an ICMP 233 232 * "Fragment Reassembly Timeout" message, per RFC792. 234 233 */ 235 234 if (frag_expire_skip_icmp(qp->user) && 236 235 (skb_rtable(head)->rt_type != RTN_LOCAL)) 237 - goto out_rcu_unlock; 236 + goto out; 237 + 238 + clone = skb_clone(head, GFP_ATOMIC); 238 239 239 240 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 240 - icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 241 - out_rcu_unlock: 242 - rcu_read_unlock(); 241 + if (clone) { 242 + spin_unlock(&qp->q.lock); 243 + icmp_send(clone, ICMP_TIME_EXCEEDED, 244 + ICMP_EXC_FRAGTIME, 0); 245 + consume_skb(clone); 246 + goto out_rcu_unlock; 247 + } 243 248 } 244 249 out: 245 250 spin_unlock(&qp->q.lock); 251 + out_rcu_unlock: 252 + rcu_read_unlock(); 246 253 ipq_put(qp); 247 254 } 248 255
+4
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
··· 165 165 if (skb->len < sizeof(struct iphdr) || 166 166 ip_hdrlen(skb) < sizeof(struct iphdr)) 167 167 return NF_ACCEPT; 168 + 169 + if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */ 170 + return NF_ACCEPT; 171 + 168 172 return nf_conntrack_in(state->net, PF_INET, state->hook, skb); 169 173 } 170 174
-5
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
··· 255 255 /* maniptype == SRC for postrouting. */ 256 256 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); 257 257 258 - /* We never see fragments: conntrack defrags on pre-routing 259 - * and local-out, and nf_nat_out protects post-routing. 260 - */ 261 - NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); 262 - 263 258 ct = nf_ct_get(skb, &ctinfo); 264 259 /* Can't track? It's not due to stress, or conntrack would 265 260 * have dropped it. Hence it's the user's responsibilty to
+4 -4
net/ipv4/netfilter/nft_masq_ipv4.c
··· 26 26 memset(&range, 0, sizeof(range)); 27 27 range.flags = priv->flags; 28 28 if (priv->sreg_proto_min) { 29 - range.min_proto.all = 30 - *(__be16 *)&regs->data[priv->sreg_proto_min]; 31 - range.max_proto.all = 32 - *(__be16 *)&regs->data[priv->sreg_proto_max]; 29 + range.min_proto.all = (__force __be16)nft_reg_load16( 30 + &regs->data[priv->sreg_proto_min]); 31 + range.max_proto.all = (__force __be16)nft_reg_load16( 32 + &regs->data[priv->sreg_proto_max]); 33 33 } 34 34 regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt), 35 35 &range, nft_out(pkt));
+4 -4
net/ipv4/netfilter/nft_redir_ipv4.c
··· 26 26 27 27 memset(&mr, 0, sizeof(mr)); 28 28 if (priv->sreg_proto_min) { 29 - mr.range[0].min.all = 30 - *(__be16 *)&regs->data[priv->sreg_proto_min]; 31 - mr.range[0].max.all = 32 - *(__be16 *)&regs->data[priv->sreg_proto_max]; 29 + mr.range[0].min.all = (__force __be16)nft_reg_load16( 30 + &regs->data[priv->sreg_proto_min]); 31 + mr.range[0].max.all = (__force __be16)nft_reg_load16( 32 + &regs->data[priv->sreg_proto_max]); 33 33 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 34 34 } 35 35
+2 -1
net/ipv4/tcp.c
··· 2770 2770 { 2771 2771 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 2772 2772 const struct inet_connection_sock *icsk = inet_csk(sk); 2773 - u32 now = tcp_time_stamp, intv; 2773 + u32 now, intv; 2774 2774 u64 rate64; 2775 2775 bool slow; 2776 2776 u32 rate; ··· 2839 2839 info->tcpi_retrans = tp->retrans_out; 2840 2840 info->tcpi_fackets = tp->fackets_out; 2841 2841 2842 + now = tcp_time_stamp; 2842 2843 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2843 2844 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2844 2845 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
+1 -1
net/ipv4/tcp_input.c
··· 5541 5541 struct inet_connection_sock *icsk = inet_csk(sk); 5542 5542 5543 5543 tcp_set_state(sk, TCP_ESTABLISHED); 5544 + icsk->icsk_ack.lrcvtime = tcp_time_stamp; 5544 5545 5545 5546 if (skb) { 5546 5547 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); ··· 5760 5759 * to stand against the temptation 8) --ANK 5761 5760 */ 5762 5761 inet_csk_schedule_ack(sk); 5763 - icsk->icsk_ack.lrcvtime = tcp_time_stamp; 5764 5762 tcp_enter_quickack_mode(sk); 5765 5763 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5766 5764 TCP_DELACK_MAX, TCP_RTO_MAX);
+1
net/ipv4/tcp_minisocks.c
··· 446 446 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 447 447 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); 448 448 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 449 + newicsk->icsk_ack.lrcvtime = tcp_time_stamp; 449 450 450 451 newtp->packets_out = 0; 451 452 newtp->retrans_out = 0;
+4 -4
net/ipv6/netfilter/nft_masq_ipv6.c
··· 27 27 memset(&range, 0, sizeof(range)); 28 28 range.flags = priv->flags; 29 29 if (priv->sreg_proto_min) { 30 - range.min_proto.all = 31 - *(__be16 *)&regs->data[priv->sreg_proto_min]; 32 - range.max_proto.all = 33 - *(__be16 *)&regs->data[priv->sreg_proto_max]; 30 + range.min_proto.all = (__force __be16)nft_reg_load16( 31 + &regs->data[priv->sreg_proto_min]); 32 + range.max_proto.all = (__force __be16)nft_reg_load16( 33 + &regs->data[priv->sreg_proto_max]); 34 34 } 35 35 regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, 36 36 nft_out(pkt));
+4 -4
net/ipv6/netfilter/nft_redir_ipv6.c
··· 26 26 27 27 memset(&range, 0, sizeof(range)); 28 28 if (priv->sreg_proto_min) { 29 - range.min_proto.all = 30 - *(__be16 *)&regs->data[priv->sreg_proto_min], 31 - range.max_proto.all = 32 - *(__be16 *)&regs->data[priv->sreg_proto_max], 29 + range.min_proto.all = (__force __be16)nft_reg_load16( 30 + &regs->data[priv->sreg_proto_min]); 31 + range.max_proto.all = (__force __be16)nft_reg_load16( 32 + &regs->data[priv->sreg_proto_max]); 33 33 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 34 34 } 35 35
+2
net/ipv6/route.c
··· 3423 3423 } 3424 3424 else if (rt->rt6i_flags & RTF_LOCAL) 3425 3425 rtm->rtm_type = RTN_LOCAL; 3426 + else if (rt->rt6i_flags & RTF_ANYCAST) 3427 + rtm->rtm_type = RTN_ANYCAST; 3426 3428 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) 3427 3429 rtm->rtm_type = RTN_LOCAL; 3428 3430 else
+1 -1
net/ipv6/udp.c
··· 1093 1093 ipc6.hlimit = -1; 1094 1094 ipc6.tclass = -1; 1095 1095 ipc6.dontfrag = -1; 1096 + sockc.tsflags = sk->sk_tsflags; 1096 1097 1097 1098 /* destination address check */ 1098 1099 if (sin6) { ··· 1218 1217 1219 1218 fl6.flowi6_mark = sk->sk_mark; 1220 1219 fl6.flowi6_uid = sk->sk_uid; 1221 - sockc.tsflags = sk->sk_tsflags; 1222 1220 1223 1221 if (msg->msg_controllen) { 1224 1222 opt = &opt_space;
+10 -3
net/mpls/af_mpls.c
··· 1298 1298 { 1299 1299 struct mpls_route __rcu **platform_label; 1300 1300 struct net *net = dev_net(dev); 1301 + unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN; 1302 + unsigned int alive; 1301 1303 unsigned index; 1302 1304 1303 1305 platform_label = rtnl_dereference(net->mpls.platform_label); ··· 1309 1307 if (!rt) 1310 1308 continue; 1311 1309 1310 + alive = 0; 1312 1311 change_nexthops(rt) { 1313 1312 if (rtnl_dereference(nh->nh_dev) != dev) 1314 - continue; 1313 + goto next; 1314 + 1315 1315 switch (event) { 1316 1316 case NETDEV_DOWN: 1317 1317 case NETDEV_UNREGISTER: ··· 1321 1317 /* fall through */ 1322 1318 case NETDEV_CHANGE: 1323 1319 nh->nh_flags |= RTNH_F_LINKDOWN; 1324 - if (event != NETDEV_UNREGISTER) 1325 - ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; 1326 1320 break; 1327 1321 } 1328 1322 if (event == NETDEV_UNREGISTER) 1329 1323 RCU_INIT_POINTER(nh->nh_dev, NULL); 1324 + next: 1325 + if (!(nh->nh_flags & nh_flags)) 1326 + alive++; 1330 1327 } endfor_nexthops(rt); 1328 + 1329 + WRITE_ONCE(rt->rt_nhn_alive, alive); 1331 1330 } 1332 1331 } 1333 1332
+5 -1
net/netfilter/nf_conntrack_core.c
··· 181 181 unsigned int nf_conntrack_max __read_mostly; 182 182 seqcount_t nf_conntrack_generation __read_mostly; 183 183 184 - DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 184 + /* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used 185 + * for the nfctinfo. We cheat by (ab)using the PER CPU cache line 186 + * alignment to enforce this. 187 + */ 188 + DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); 185 189 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); 186 190 187 191 static unsigned int nf_conntrack_hash_rnd __read_mostly;
+12 -1
net/netfilter/nf_nat_proto_sctp.c
··· 33 33 enum nf_nat_manip_type maniptype) 34 34 { 35 35 sctp_sctphdr_t *hdr; 36 + int hdrsize = 8; 36 37 37 - if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 38 + /* This could be an inner header returned in imcp packet; in such 39 + * cases we cannot update the checksum field since it is outside 40 + * of the 8 bytes of transport layer headers we are guaranteed. 41 + */ 42 + if (skb->len >= hdroff + sizeof(*hdr)) 43 + hdrsize = sizeof(*hdr); 44 + 45 + if (!skb_make_writable(skb, hdroff + hdrsize)) 38 46 return false; 39 47 40 48 hdr = (struct sctphdr *)(skb->data + hdroff); ··· 54 46 /* Get rid of dst port */ 55 47 hdr->dest = tuple->dst.u.sctp.port; 56 48 } 49 + 50 + if (hdrsize < sizeof(*hdr)) 51 + return true; 57 52 58 53 if (skb->ip_summed != CHECKSUM_PARTIAL) { 59 54 hdr->checksum = sctp_compute_cksum(skb, hdroff);
-4
net/netfilter/nf_tables_api.c
··· 3173 3173 iter.count = 0; 3174 3174 iter.err = 0; 3175 3175 iter.fn = nf_tables_bind_check_setelem; 3176 - iter.flush = false; 3177 3176 3178 3177 set->ops->walk(ctx, set, &iter); 3179 3178 if (iter.err < 0) ··· 3426 3427 args.iter.count = 0; 3427 3428 args.iter.err = 0; 3428 3429 args.iter.fn = nf_tables_dump_setelem; 3429 - args.iter.flush = false; 3430 3430 set->ops->walk(&ctx, set, &args.iter); 3431 3431 3432 3432 nla_nest_end(skb, nest); ··· 3989 3991 struct nft_set_iter iter = { 3990 3992 .genmask = genmask, 3991 3993 .fn = nft_flush_set, 3992 - .flush = true, 3993 3994 }; 3994 3995 set->ops->walk(&ctx, set, &iter); 3995 3996 ··· 5140 5143 iter.count = 0; 5141 5144 iter.err = 0; 5142 5145 iter.fn = nf_tables_loop_check_setelem; 5143 - iter.flush = false; 5144 5146 5145 5147 set->ops->walk(ctx, set, &iter); 5146 5148 if (iter.err < 0)
+12 -9
net/netfilter/nft_ct.c
··· 89 89 90 90 switch (priv->key) { 91 91 case NFT_CT_DIRECTION: 92 - *dest = CTINFO2DIR(ctinfo); 92 + nft_reg_store8(dest, CTINFO2DIR(ctinfo)); 93 93 return; 94 94 case NFT_CT_STATUS: 95 95 *dest = ct->status; ··· 157 157 return; 158 158 } 159 159 case NFT_CT_L3PROTOCOL: 160 - *dest = nf_ct_l3num(ct); 160 + nft_reg_store8(dest, nf_ct_l3num(ct)); 161 161 return; 162 162 case NFT_CT_PROTOCOL: 163 - *dest = nf_ct_protonum(ct); 163 + nft_reg_store8(dest, nf_ct_protonum(ct)); 164 164 return; 165 165 #ifdef CONFIG_NF_CONNTRACK_ZONES 166 166 case NFT_CT_ZONE: { 167 167 const struct nf_conntrack_zone *zone = nf_ct_zone(ct); 168 + u16 zoneid; 168 169 169 170 if (priv->dir < IP_CT_DIR_MAX) 170 - *dest = nf_ct_zone_id(zone, priv->dir); 171 + zoneid = nf_ct_zone_id(zone, priv->dir); 171 172 else 172 - *dest = zone->id; 173 + zoneid = zone->id; 173 174 175 + nft_reg_store16(dest, zoneid); 174 176 return; 175 177 } 176 178 #endif ··· 191 189 nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); 192 190 return; 193 191 case NFT_CT_PROTO_SRC: 194 - *dest = (__force __u16)tuple->src.u.all; 192 + nft_reg_store16(dest, (__force u16)tuple->src.u.all); 195 193 return; 196 194 case NFT_CT_PROTO_DST: 197 - *dest = (__force __u16)tuple->dst.u.all; 195 + nft_reg_store16(dest, (__force u16)tuple->dst.u.all); 198 196 return; 199 197 default: 200 198 break; ··· 213 211 const struct nft_ct *priv = nft_expr_priv(expr); 214 212 struct sk_buff *skb = pkt->skb; 215 213 enum ip_conntrack_info ctinfo; 216 - u16 value = regs->data[priv->sreg]; 214 + u16 value = nft_reg_load16(&regs->data[priv->sreg]); 217 215 struct nf_conn *ct; 218 216 219 217 ct = nf_ct_get(skb, &ctinfo); ··· 550 548 case IP_CT_DIR_REPLY: 551 549 break; 552 550 default: 553 - return -EINVAL; 551 + err = -EINVAL; 552 + goto err1; 554 553 } 555 554 } 556 555
+21 -19
net/netfilter/nft_meta.c
··· 45 45 *dest = skb->len; 46 46 break; 47 47 case NFT_META_PROTOCOL: 48 - *dest = 0; 49 - *(__be16 *)dest = skb->protocol; 48 + nft_reg_store16(dest, (__force u16)skb->protocol); 50 49 break; 51 50 case NFT_META_NFPROTO: 52 - *dest = nft_pf(pkt); 51 + nft_reg_store8(dest, nft_pf(pkt)); 53 52 break; 54 53 case NFT_META_L4PROTO: 55 54 if (!pkt->tprot_set) 56 55 goto err; 57 - *dest = pkt->tprot; 56 + nft_reg_store8(dest, pkt->tprot); 58 57 break; 59 58 case NFT_META_PRIORITY: 60 59 *dest = skb->priority; ··· 84 85 case NFT_META_IIFTYPE: 85 86 if (in == NULL) 86 87 goto err; 87 - *dest = 0; 88 - *(u16 *)dest = in->type; 88 + nft_reg_store16(dest, in->type); 89 89 break; 90 90 case NFT_META_OIFTYPE: 91 91 if (out == NULL) 92 92 goto err; 93 - *dest = 0; 94 - *(u16 *)dest = out->type; 93 + nft_reg_store16(dest, out->type); 95 94 break; 96 95 case NFT_META_SKUID: 97 96 sk = skb_to_full_sk(skb); ··· 139 142 #endif 140 143 case NFT_META_PKTTYPE: 141 144 if (skb->pkt_type != PACKET_LOOPBACK) { 142 - *dest = skb->pkt_type; 145 + nft_reg_store8(dest, skb->pkt_type); 143 146 break; 144 147 } 145 148 146 149 switch (nft_pf(pkt)) { 147 150 case NFPROTO_IPV4: 148 151 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) 149 - *dest = PACKET_MULTICAST; 152 + nft_reg_store8(dest, PACKET_MULTICAST); 150 153 else 151 - *dest = PACKET_BROADCAST; 154 + nft_reg_store8(dest, PACKET_BROADCAST); 152 155 break; 153 156 case NFPROTO_IPV6: 154 - *dest = PACKET_MULTICAST; 157 + nft_reg_store8(dest, PACKET_MULTICAST); 155 158 break; 156 159 case NFPROTO_NETDEV: 157 160 switch (skb->protocol) { ··· 165 168 goto err; 166 169 167 170 if (ipv4_is_multicast(iph->daddr)) 168 - *dest = PACKET_MULTICAST; 171 + nft_reg_store8(dest, PACKET_MULTICAST); 169 172 else 170 - *dest = PACKET_BROADCAST; 173 + nft_reg_store8(dest, PACKET_BROADCAST); 171 174 172 175 break; 173 176 } 174 177 case htons(ETH_P_IPV6): 175 - *dest = PACKET_MULTICAST; 178 + nft_reg_store8(dest, PACKET_MULTICAST); 176 179 break; 177 180 default: 178 181 WARN_ON_ONCE(1); ··· 227 230 { 228 231 const struct nft_meta *meta = nft_expr_priv(expr); 229 232 struct sk_buff *skb = pkt->skb; 230 - u32 value = regs->data[meta->sreg]; 233 + u32 *sreg = &regs->data[meta->sreg]; 234 + u32 value = *sreg; 235 + u8 pkt_type; 231 236 232 237 switch (meta->key) { 233 238 case NFT_META_MARK: ··· 239 240 skb->priority = value; 240 241 break; 241 242 case NFT_META_PKTTYPE: 242 - if (skb->pkt_type != value && 243 - skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type)) 244 - skb->pkt_type = value; 243 + pkt_type = nft_reg_load8(sreg); 244 + 245 + if (skb->pkt_type != pkt_type && 246 + skb_pkt_type_ok(pkt_type) && 247 + skb_pkt_type_ok(skb->pkt_type)) 248 + skb->pkt_type = pkt_type; 245 249 break; 246 250 case NFT_META_NFTRACE: 247 251 skb->nf_trace = !!value;
+4 -4
net/netfilter/nft_nat.c
··· 65 65 } 66 66 67 67 if (priv->sreg_proto_min) { 68 - range.min_proto.all = 69 - *(__be16 *)&regs->data[priv->sreg_proto_min]; 70 - range.max_proto.all = 71 - *(__be16 *)&regs->data[priv->sreg_proto_max]; 68 + range.min_proto.all = (__force __be16)nft_reg_load16( 69 + &regs->data[priv->sreg_proto_min]); 70 + range.max_proto.all = (__force __be16)nft_reg_load16( 71 + &regs->data[priv->sreg_proto_max]); 72 72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 73 73 } 74 74
+75 -82
net/netfilter/nft_set_bitmap.c
··· 15 15 #include <linux/netfilter/nf_tables.h> 16 16 #include <net/netfilter/nf_tables.h> 17 17 18 + struct nft_bitmap_elem { 19 + struct list_head head; 20 + struct nft_set_ext ext; 21 + }; 22 + 18 23 /* This bitmap uses two bits to represent one element. These two bits determine 19 24 * the element state in the current and the future generation. 20 25 * ··· 46 41 * restore its previous state. 47 42 */ 48 43 struct nft_bitmap { 49 - u16 bitmap_size; 50 - u8 bitmap[]; 44 + struct list_head list; 45 + u16 bitmap_size; 46 + u8 bitmap[]; 51 47 }; 52 48 53 - static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off) 49 + static inline void nft_bitmap_location(const struct nft_set *set, 50 + const void *key, 51 + u32 *idx, u32 *off) 54 52 { 55 - u32 k = (key << 1); 53 + u32 k; 54 + 55 + if (set->klen == 2) 56 + k = *(u16 *)key; 57 + else 58 + k = *(u8 *)key; 59 + k <<= 1; 56 60 57 61 *idx = k / BITS_PER_BYTE; 58 62 *off = k % BITS_PER_BYTE; ··· 83 69 u8 genmask = nft_genmask_cur(net); 84 70 u32 idx, off; 85 71 86 - nft_bitmap_location(*key, &idx, &off); 72 + nft_bitmap_location(set, key, &idx, &off); 87 73 88 74 return nft_bitmap_active(priv->bitmap, idx, off, genmask); 89 75 } 90 76 77 + static struct nft_bitmap_elem * 78 + nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this, 79 + u8 genmask) 80 + { 81 + const struct nft_bitmap *priv = nft_set_priv(set); 82 + struct nft_bitmap_elem *be; 83 + 84 + list_for_each_entry_rcu(be, &priv->list, head) { 85 + if (memcmp(nft_set_ext_key(&be->ext), 86 + nft_set_ext_key(&this->ext), set->klen) || 87 + !nft_set_elem_active(&be->ext, genmask)) 88 + continue; 89 + 90 + return be; 91 + } 92 + return NULL; 93 + } 94 + 91 95 static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, 92 96 const struct nft_set_elem *elem, 93 - struct nft_set_ext **_ext) 97 + struct nft_set_ext **ext) 94 98 { 95 99 struct nft_bitmap *priv = nft_set_priv(set); 96 - struct nft_set_ext *ext = elem->priv; 100 + struct nft_bitmap_elem *new = elem->priv, *be; 97 101 u8 genmask = nft_genmask_next(net); 98 102 u32 idx, off; 99 103 100 - nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 101 - if (nft_bitmap_active(priv->bitmap, idx, off, genmask)) 104 + be = nft_bitmap_elem_find(set, new, genmask); 105 + if (be) { 106 + *ext = &be->ext; 102 107 return -EEXIST; 108 + } 103 109 110 + nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off); 104 111 /* Enter 01 state. */ 105 112 priv->bitmap[idx] |= (genmask << off); 113 + list_add_tail_rcu(&new->head, &priv->list); 106 114 107 115 return 0; 108 116 } ··· 134 98 const struct nft_set_elem *elem) 135 99 { 136 100 struct nft_bitmap *priv = nft_set_priv(set); 137 - struct nft_set_ext *ext = elem->priv; 101 + struct nft_bitmap_elem *be = elem->priv; 138 102 u8 genmask = nft_genmask_next(net); 139 103 u32 idx, off; 140 104 141 - nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 105 + nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); 142 106 /* Enter 00 state. */ 143 107 priv->bitmap[idx] &= ~(genmask << off); 108 + list_del_rcu(&be->head); 144 109 } 145 110 146 111 static void nft_bitmap_activate(const struct net *net, ··· 149 112 const struct nft_set_elem *elem) 150 113 { 151 114 struct nft_bitmap *priv = nft_set_priv(set); 152 - struct nft_set_ext *ext = elem->priv; 115 + struct nft_bitmap_elem *be = elem->priv; 153 116 u8 genmask = nft_genmask_next(net); 154 117 u32 idx, off; 155 118 156 - nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 119 + nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); 157 120 /* Enter 11 state. */ 158 121 priv->bitmap[idx] |= (genmask << off); 122 + nft_set_elem_change_active(net, set, &be->ext); 159 123 } 160 124 161 125 static bool nft_bitmap_flush(const struct net *net, 162 - const struct nft_set *set, void *ext) 126 + const struct nft_set *set, void *_be) 163 127 { 164 128 struct nft_bitmap *priv = nft_set_priv(set); 165 129 u8 genmask = nft_genmask_next(net); 130 + struct nft_bitmap_elem *be = _be; 166 131 u32 idx, off; 167 132 168 - nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 133 + nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); 169 134 /* Enter 10 state, similar to deactivation. */ 170 135 priv->bitmap[idx] &= ~(genmask << off); 136 + nft_set_elem_change_active(net, set, &be->ext); 171 137 172 138 return true; 173 - } 174 - 175 - static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set, 176 - const struct nft_set_elem *elem) 177 - { 178 - struct nft_set_ext_tmpl tmpl; 179 - struct nft_set_ext *ext; 180 - 181 - nft_set_ext_prepare(&tmpl); 182 - nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); 183 - 184 - ext = kzalloc(tmpl.len, GFP_KERNEL); 185 - if (!ext) 186 - return NULL; 187 - 188 - nft_set_ext_init(ext, &tmpl); 189 - memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen); 190 - 191 - return ext; 192 139 } 193 140 194 141 static void *nft_bitmap_deactivate(const struct net *net, ··· 180 159 const struct nft_set_elem *elem) 181 160 { 182 161 struct nft_bitmap *priv = nft_set_priv(set); 162 + struct nft_bitmap_elem *this = elem->priv, *be; 183 163 u8 genmask = nft_genmask_next(net); 184 - struct nft_set_ext *ext; 185 - u32 idx, off, key = 0; 164 + u32 idx, off; 186 165 187 - memcpy(&key, elem->key.val.data, set->klen); 188 - nft_bitmap_location(key, &idx, &off); 166 + nft_bitmap_location(set, elem->key.val.data, &idx, &off); 189 167 190 - if (!nft_bitmap_active(priv->bitmap, idx, off, genmask)) 191 - return NULL; 192 - 193 - /* We have no real set extension since this is a bitmap, allocate this 194 - * dummy object that is released from the commit/abort path. 195 - */ 196 - ext = nft_bitmap_ext_alloc(set, elem); 197 - if (!ext) 168 + be = nft_bitmap_elem_find(set, this, genmask); 169 + if (!be) 198 170 return NULL; 199 171 200 172 /* Enter 10 state. */ 201 173 priv->bitmap[idx] &= ~(genmask << off); 174 + nft_set_elem_change_active(net, set, &be->ext); 202 175 203 - return ext; 176 + return be; 204 177 } 205 178 206 179 static void nft_bitmap_walk(const struct nft_ctx *ctx, ··· 202 187 struct nft_set_iter *iter) 203 188 { 204 189 const struct nft_bitmap *priv = nft_set_priv(set); 205 - struct nft_set_ext_tmpl tmpl; 190 + struct nft_bitmap_elem *be; 206 191 struct nft_set_elem elem; 207 - struct nft_set_ext *ext; 208 - int idx, off; 209 - u16 key; 210 192 211 - nft_set_ext_prepare(&tmpl); 212 - nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); 193 + list_for_each_entry_rcu(be, &priv->list, head) { 194 + if (iter->count < iter->skip) 195 + goto cont; 196 + if (!nft_set_elem_active(&be->ext, iter->genmask)) 197 + goto cont; 213 198 214 - for (idx = 0; idx < priv->bitmap_size; idx++) { 215 - for (off = 0; off < BITS_PER_BYTE; off += 2) { 216 - if (iter->count < iter->skip) 217 - goto cont; 199 + elem.priv = be; 218 200 219 - if (!nft_bitmap_active(priv->bitmap, idx, off, 220 - iter->genmask)) 221 - goto cont; 201 + iter->err = iter->fn(ctx, set, iter, &elem); 222 202 223 - ext = kzalloc(tmpl.len, GFP_KERNEL); 224 - if (!ext) { 225 - iter->err = -ENOMEM; 226 - return; 227 - } 228 - nft_set_ext_init(ext, &tmpl); 229 - key = ((idx * BITS_PER_BYTE) + off) >> 1; 230 - memcpy(nft_set_ext_key(ext), &key, set->klen); 231 - 232 - elem.priv = ext; 233 - iter->err = iter->fn(ctx, set, iter, &elem); 234 - 235 - /* On set flush, this dummy extension object is released 236 - * from the commit/abort path. 237 - */ 238 - if (!iter->flush) 239 - kfree(ext); 240 - 241 - if (iter->err < 0) 242 - return; 203 + if (iter->err < 0) 204 + return; 243 205 cont: 244 - iter->count++; 245 - } 206 + iter->count++; 246 207 } 247 208 } 248 209 ··· 249 258 { 250 259 struct nft_bitmap *priv = nft_set_priv(set); 251 260 261 + INIT_LIST_HEAD(&priv->list); 252 262 priv->bitmap_size = nft_bitmap_size(set->klen); 253 263 254 264 return 0; ··· 275 283 276 284 static struct nft_set_ops nft_bitmap_ops __read_mostly = { 277 285 .privsize = nft_bitmap_privsize, 286 + .elemsize = offsetof(struct nft_bitmap_elem, ext), 278 287 .estimate = nft_bitmap_estimate, 279 288 .init = nft_bitmap_init, 280 289 .destroy = nft_bitmap_destroy,
+41
net/netlink/af_netlink.c
··· 96 96 97 97 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 98 98 99 + static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS]; 100 + 101 + static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = { 102 + "nlk_cb_mutex-ROUTE", 103 + "nlk_cb_mutex-1", 104 + "nlk_cb_mutex-USERSOCK", 105 + "nlk_cb_mutex-FIREWALL", 106 + "nlk_cb_mutex-SOCK_DIAG", 107 + "nlk_cb_mutex-NFLOG", 108 + "nlk_cb_mutex-XFRM", 109 + "nlk_cb_mutex-SELINUX", 110 + "nlk_cb_mutex-ISCSI", 111 + "nlk_cb_mutex-AUDIT", 112 + "nlk_cb_mutex-FIB_LOOKUP", 113 + "nlk_cb_mutex-CONNECTOR", 114 + "nlk_cb_mutex-NETFILTER", 115 + "nlk_cb_mutex-IP6_FW", 116 + "nlk_cb_mutex-DNRTMSG", 117 + "nlk_cb_mutex-KOBJECT_UEVENT", 118 + "nlk_cb_mutex-GENERIC", 119 + "nlk_cb_mutex-17", 120 + "nlk_cb_mutex-SCSITRANSPORT", 121 + "nlk_cb_mutex-ECRYPTFS", 122 + "nlk_cb_mutex-RDMA", 123 + "nlk_cb_mutex-CRYPTO", 124 + "nlk_cb_mutex-SMC", 125 + "nlk_cb_mutex-23", 126 + "nlk_cb_mutex-24", 127 + "nlk_cb_mutex-25", 128 + "nlk_cb_mutex-26", 129 + "nlk_cb_mutex-27", 130 + "nlk_cb_mutex-28", 131 + "nlk_cb_mutex-29", 132 + "nlk_cb_mutex-30", 133 + "nlk_cb_mutex-31", 134 + "nlk_cb_mutex-MAX_LINKS" 135 + }; 136 + 99 137 static int netlink_dump(struct sock *sk); 100 138 static void netlink_skb_destructor(struct sk_buff *skb); 101 139 ··· 623 585 } else { 624 586 nlk->cb_mutex = &nlk->cb_def_mutex; 625 587 mutex_init(nlk->cb_mutex); 588 + lockdep_set_class_and_name(nlk->cb_mutex, 589 + nlk_cb_mutex_keys + protocol, 590 + nlk_cb_mutex_key_strings[protocol]); 626 591 } 627 592 init_waitqueue_head(&nlk->wait); 628 593
+3 -1
net/netlink/genetlink.c
··· 783 783 784 784 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, 785 785 cb->nlh->nlmsg_seq, NLM_F_MULTI, 786 - skb, CTRL_CMD_NEWFAMILY) < 0) 786 + skb, CTRL_CMD_NEWFAMILY) < 0) { 787 + n--; 787 788 break; 789 + } 788 790 } 789 791 790 792 cb->args[0] = n;
+3 -1
net/openvswitch/flow_netlink.c
··· 637 637 ipv4 = true; 638 638 break; 639 639 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: 640 - SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, 640 + SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src, 641 641 nla_get_in6_addr(a), is_mask); 642 642 ipv6 = true; 643 643 break; ··· 697 697 698 698 tun_flags |= TUNNEL_VXLAN_OPT; 699 699 opts_type = type; 700 + break; 701 + case OVS_TUNNEL_KEY_ATTR_PAD: 700 702 break; 701 703 default: 702 704 OVS_NLERR(log, "Unknown IP tunnel attribute %d",
+4
net/rxrpc/conn_event.c
··· 275 275 rxrpc_conn_retransmit_call(conn, skb); 276 276 return 0; 277 277 278 + case RXRPC_PACKET_TYPE_BUSY: 279 + /* Just ignore BUSY packets for now. */ 280 + return 0; 281 + 278 282 case RXRPC_PACKET_TYPE_ABORT: 279 283 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 280 284 &wtmp, sizeof(wtmp)) < 0)
+8 -2
net/sched/sch_dsmark.c
··· 201 201 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); 202 202 203 203 if (p->set_tc_index) { 204 + int wlen = skb_network_offset(skb); 205 + 204 206 switch (tc_skb_protocol(skb)) { 205 207 case htons(ETH_P_IP): 206 - if (skb_cow_head(skb, sizeof(struct iphdr))) 208 + wlen += sizeof(struct iphdr); 209 + if (!pskb_may_pull(skb, wlen) || 210 + skb_try_make_writable(skb, wlen)) 207 211 goto drop; 208 212 209 213 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) ··· 215 211 break; 216 212 217 213 case htons(ETH_P_IPV6): 218 - if (skb_cow_head(skb, sizeof(struct ipv6hdr))) 214 + wlen += sizeof(struct ipv6hdr); 215 + if (!pskb_may_pull(skb, wlen) || 216 + skb_try_make_writable(skb, wlen)) 219 217 goto drop; 220 218 221 219 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
+2 -4
net/sctp/associola.c
··· 71 71 { 72 72 struct net *net = sock_net(sk); 73 73 struct sctp_sock *sp; 74 - int i; 75 74 sctp_paramhdr_t *p; 76 - int err; 75 + int i; 77 76 78 77 /* Retrieve the SCTP per socket area. */ 79 78 sp = sctp_sk((struct sock *)sk); ··· 263 264 264 265 /* AUTH related initializations */ 265 266 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 266 - err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); 267 - if (err) 267 + if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) 268 268 goto fail_init; 269 269 270 270 asoc->active_key_id = ep->active_key_id;
+3 -4
net/sctp/output.c
··· 546 546 struct sctp_association *asoc = tp->asoc; 547 547 struct sctp_chunk *chunk, *tmp; 548 548 int pkt_count, gso = 0; 549 - int confirm; 550 549 struct dst_entry *dst; 551 550 struct sk_buff *head; 552 551 struct sctphdr *sh; ··· 624 625 asoc->peer.last_sent_to = tp; 625 626 } 626 627 head->ignore_df = packet->ipfragok; 627 - confirm = tp->dst_pending_confirm; 628 - if (confirm) 628 + if (tp->dst_pending_confirm) 629 629 skb_set_dst_pending_confirm(head, 1); 630 630 /* neighbour should be confirmed on successful transmission or 631 631 * positive error 632 632 */ 633 - if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm) 633 + if (tp->af_specific->sctp_xmit(head, tp) >= 0 && 634 + tp->dst_pending_confirm) 634 635 tp->dst_pending_confirm = 0; 635 636 636 637 out:
+5 -6
net/sctp/outqueue.c
··· 382 382 } 383 383 384 384 static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, 385 - struct sctp_sndrcvinfo *sinfo, 386 - struct list_head *queue, int msg_len) 385 + struct sctp_sndrcvinfo *sinfo, int msg_len) 387 386 { 387 + struct sctp_outq *q = &asoc->outqueue; 388 388 struct sctp_chunk *chk, *temp; 389 389 390 - list_for_each_entry_safe(chk, temp, queue, list) { 390 + list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) { 391 391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 392 392 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) 393 393 continue; 394 394 395 395 list_del_init(&chk->list); 396 + q->out_qlen -= chk->skb->len; 396 397 asoc->sent_cnt_removable--; 397 398 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; 398 399 ··· 432 431 return; 433 432 } 434 433 435 - sctp_prsctp_prune_unsent(asoc, sinfo, 436 - &asoc->outqueue.out_chunk_list, 437 - msg_len); 434 + sctp_prsctp_prune_unsent(asoc, sinfo, msg_len); 438 435 } 439 436 440 437 /* Mark all the eligible packets on a transport for retransmission. */
+12 -1
net/socket.c
··· 652 652 } 653 653 EXPORT_SYMBOL(kernel_sendmsg); 654 654 655 + static bool skb_is_err_queue(const struct sk_buff *skb) 656 + { 657 + /* pkt_type of skbs enqueued on the error queue are set to 658 + * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do 659 + * in recvmsg, since skbs received on a local socket will never 660 + * have a pkt_type of PACKET_OUTGOING. 661 + */ 662 + return skb->pkt_type == PACKET_OUTGOING; 663 + } 664 + 655 665 /* 656 666 * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) 657 667 */ ··· 705 695 put_cmsg(msg, SOL_SOCKET, 706 696 SCM_TIMESTAMPING, sizeof(tss), &tss); 707 697 708 - if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS)) 698 + if (skb_is_err_queue(skb) && skb->len && 699 + SKB_EXT_ERR(skb)->opt_stats) 709 700 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS, 710 701 skb->len, skb->data); 711 702 }
+2 -1
net/sunrpc/xprtrdma/verbs.c
··· 503 503 struct ib_cq *sendcq, *recvcq; 504 504 int rc; 505 505 506 - max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); 506 + max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, 507 + RPCRDMA_MAX_SEND_SGES); 507 508 if (max_sge < RPCRDMA_MIN_SEND_SGES) { 508 509 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); 509 510 return -ENOMEM;
+6 -1
net/tipc/subscr.c
··· 141 141 static void tipc_subscrp_timeout(unsigned long data) 142 142 { 143 143 struct tipc_subscription *sub = (struct tipc_subscription *)data; 144 + struct tipc_subscriber *subscriber = sub->subscriber; 145 + 146 + spin_lock_bh(&subscriber->lock); 147 + tipc_nametbl_unsubscribe(sub); 148 + spin_unlock_bh(&subscriber->lock); 144 149 145 150 /* Notify subscriber of timeout */ 146 151 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, ··· 178 173 struct tipc_subscriber *subscriber = sub->subscriber; 179 174 180 175 spin_lock_bh(&subscriber->lock); 181 - tipc_nametbl_unsubscribe(sub); 182 176 list_del(&sub->subscrp_list); 183 177 atomic_dec(&tn->subscription_count); 184 178 spin_unlock_bh(&subscriber->lock); ··· 209 205 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) 210 206 continue; 211 207 208 + tipc_nametbl_unsubscribe(sub); 212 209 tipc_subscrp_get(sub); 213 210 spin_unlock_bh(&subscriber->lock); 214 211 tipc_subscrp_delete(sub);
+9 -8
net/unix/garbage.c
··· 146 146 if (s) { 147 147 struct unix_sock *u = unix_sk(s); 148 148 149 + BUG_ON(!atomic_long_read(&u->inflight)); 149 150 BUG_ON(list_empty(&u->link)); 150 151 151 152 if (atomic_long_dec_and_test(&u->inflight)) ··· 342 341 } 343 342 list_del(&cursor); 344 343 344 + /* Now gc_candidates contains only garbage. Restore original 345 + * inflight counters for these as well, and remove the skbuffs 346 + * which are creating the cycle(s). 347 + */ 348 + skb_queue_head_init(&hitlist); 349 + list_for_each_entry(u, &gc_candidates, link) 350 + scan_children(&u->sk, inc_inflight, &hitlist); 351 + 345 352 /* not_cycle_list contains those sockets which do not make up a 346 353 * cycle. Restore these to the inflight list. 347 354 */ ··· 358 349 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 359 350 list_move_tail(&u->link, &gc_inflight_list); 360 351 } 361 - 362 - /* Now gc_candidates contains only garbage. Restore original 363 - * inflight counters for these as well, and remove the skbuffs 364 - * which are creating the cycle(s). 365 - */ 366 - skb_queue_head_init(&hitlist); 367 - list_for_each_entry(u, &gc_candidates, link) 368 - scan_children(&u->sk, inc_inflight, &hitlist); 369 352 370 353 spin_unlock(&unix_gc_lock); 371 354
+14
net/vmw_vsock/af_vsock.c
··· 1102 1102 .sendpage = sock_no_sendpage, 1103 1103 }; 1104 1104 1105 + static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) 1106 + { 1107 + if (!transport->cancel_pkt) 1108 + return -EOPNOTSUPP; 1109 + 1110 + return transport->cancel_pkt(vsk); 1111 + } 1112 + 1105 1113 static void vsock_connect_timeout(struct work_struct *work) 1106 1114 { 1107 1115 struct sock *sk; 1108 1116 struct vsock_sock *vsk; 1117 + int cancel = 0; 1109 1118 1110 1119 vsk = container_of(work, struct vsock_sock, dwork.work); 1111 1120 sk = sk_vsock(vsk); ··· 1125 1116 sk->sk_state = SS_UNCONNECTED; 1126 1117 sk->sk_err = ETIMEDOUT; 1127 1118 sk->sk_error_report(sk); 1119 + cancel = 1; 1128 1120 } 1129 1121 release_sock(sk); 1122 + if (cancel) 1123 + vsock_transport_cancel_pkt(vsk); 1130 1124 1131 1125 sock_put(sk); 1132 1126 } ··· 1236 1224 err = sock_intr_errno(timeout); 1237 1225 sk->sk_state = SS_UNCONNECTED; 1238 1226 sock->state = SS_UNCONNECTED; 1227 + vsock_transport_cancel_pkt(vsk); 1239 1228 goto out_wait; 1240 1229 } else if (timeout == 0) { 1241 1230 err = -ETIMEDOUT; 1242 1231 sk->sk_state = SS_UNCONNECTED; 1243 1232 sock->state = SS_UNCONNECTED; 1233 + vsock_transport_cancel_pkt(vsk); 1244 1234 goto out_wait; 1245 1235 } 1246 1236
+42
net/vmw_vsock/virtio_transport.c
··· 213 213 return len; 214 214 } 215 215 216 + static int 217 + virtio_transport_cancel_pkt(struct vsock_sock *vsk) 218 + { 219 + struct virtio_vsock *vsock; 220 + struct virtio_vsock_pkt *pkt, *n; 221 + int cnt = 0; 222 + LIST_HEAD(freeme); 223 + 224 + vsock = virtio_vsock_get(); 225 + if (!vsock) { 226 + return -ENODEV; 227 + } 228 + 229 + spin_lock_bh(&vsock->send_pkt_list_lock); 230 + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { 231 + if (pkt->vsk != vsk) 232 + continue; 233 + list_move(&pkt->list, &freeme); 234 + } 235 + spin_unlock_bh(&vsock->send_pkt_list_lock); 236 + 237 + list_for_each_entry_safe(pkt, n, &freeme, list) { 238 + if (pkt->reply) 239 + cnt++; 240 + list_del(&pkt->list); 241 + virtio_transport_free_pkt(pkt); 242 + } 243 + 244 + if (cnt) { 245 + struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 246 + int new_cnt; 247 + 248 + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); 249 + if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && 250 + new_cnt < virtqueue_get_vring_size(rx_vq)) 251 + queue_work(virtio_vsock_workqueue, &vsock->rx_work); 252 + } 253 + 254 + return 0; 255 + } 256 + 216 257 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 217 258 { 218 259 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; ··· 503 462 .release = virtio_transport_release, 504 463 .connect = virtio_transport_connect, 505 464 .shutdown = virtio_transport_shutdown, 465 + .cancel_pkt = virtio_transport_cancel_pkt, 506 466 507 467 .dgram_bind = virtio_transport_dgram_bind, 508 468 .dgram_dequeue = virtio_transport_dgram_dequeue,
+7
net/vmw_vsock/virtio_transport_common.c
··· 58 58 pkt->len = len; 59 59 pkt->hdr.len = cpu_to_le32(len); 60 60 pkt->reply = info->reply; 61 + pkt->vsk = info->vsk; 61 62 62 63 if (info->msg && len > 0) { 63 64 pkt->buf = kmalloc(len, GFP_KERNEL); ··· 181 180 struct virtio_vsock_pkt_info info = { 182 181 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, 183 182 .type = type, 183 + .vsk = vsk, 184 184 }; 185 185 186 186 return virtio_transport_send_pkt_info(vsk, &info); ··· 521 519 struct virtio_vsock_pkt_info info = { 522 520 .op = VIRTIO_VSOCK_OP_REQUEST, 523 521 .type = VIRTIO_VSOCK_TYPE_STREAM, 522 + .vsk = vsk, 524 523 }; 525 524 526 525 return virtio_transport_send_pkt_info(vsk, &info); ··· 537 534 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | 538 535 (mode & SEND_SHUTDOWN ? 539 536 VIRTIO_VSOCK_SHUTDOWN_SEND : 0), 537 + .vsk = vsk, 540 538 }; 541 539 542 540 return virtio_transport_send_pkt_info(vsk, &info); ··· 564 560 .type = VIRTIO_VSOCK_TYPE_STREAM, 565 561 .msg = msg, 566 562 .pkt_len = len, 563 + .vsk = vsk, 567 564 }; 568 565 569 566 return virtio_transport_send_pkt_info(vsk, &info); ··· 586 581 .op = VIRTIO_VSOCK_OP_RST, 587 582 .type = VIRTIO_VSOCK_TYPE_STREAM, 588 583 .reply = !!pkt, 584 + .vsk = vsk, 589 585 }; 590 586 591 587 /* Send RST only if the original pkt is not a RST pkt */ ··· 832 826 .remote_cid = le64_to_cpu(pkt->hdr.src_cid), 833 827 .remote_port = le32_to_cpu(pkt->hdr.src_port), 834 828 .reply = true, 829 + .vsk = vsk, 835 830 }; 836 831 837 832 return virtio_transport_send_pkt_info(vsk, &info);
+56 -71
net/wireless/nl80211.c
··· 545 545 { 546 546 int err; 547 547 548 - rtnl_lock(); 549 - 550 548 if (!cb->args[0]) { 551 549 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 552 550 genl_family_attrbuf(&nl80211_fam), 553 551 nl80211_fam.maxattr, nl80211_policy); 554 552 if (err) 555 - goto out_unlock; 553 + return err; 556 554 557 555 *wdev = __cfg80211_wdev_from_attrs( 558 556 sock_net(skb->sk), 559 557 genl_family_attrbuf(&nl80211_fam)); 560 - if (IS_ERR(*wdev)) { 561 - err = PTR_ERR(*wdev); 562 - goto out_unlock; 563 - } 558 + if (IS_ERR(*wdev)) 559 + return PTR_ERR(*wdev); 564 560 *rdev = wiphy_to_rdev((*wdev)->wiphy); 565 561 /* 0 is the first index - add 1 to parse only once */ 566 562 cb->args[0] = (*rdev)->wiphy_idx + 1; ··· 566 570 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 567 571 struct wireless_dev *tmp; 568 572 569 - if (!wiphy) { 570 - err = -ENODEV; 571 - goto out_unlock; 572 - } 573 + if (!wiphy) 574 + return -ENODEV; 573 575 *rdev = wiphy_to_rdev(wiphy); 574 576 *wdev = NULL; 575 577 ··· 578 584 } 579 585 } 580 586 581 - if (!*wdev) { 582 - err = -ENODEV; 583 - goto out_unlock; 584 - } 587 + if (!*wdev) 588 + return -ENODEV; 585 589 } 586 590 587 591 return 0; 588 - out_unlock: 589 - rtnl_unlock(); 590 - return err; 591 - } 592 - 593 - static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev) 594 - { 595 - rtnl_unlock(); 596 592 } 597 593 598 594 /* IE validation */ ··· 2592 2608 int filter_wiphy = -1; 2593 2609 struct cfg80211_registered_device *rdev; 2594 2610 struct wireless_dev *wdev; 2611 + int ret; 2595 2612 2596 2613 rtnl_lock(); 2597 2614 if (!cb->args[2]) { 2598 2615 struct nl80211_dump_wiphy_state state = { 2599 2616 .filter_wiphy = -1, 2600 2617 }; 2601 - int ret; 2602 2618 2603 2619 ret = nl80211_dump_wiphy_parse(skb, cb, &state); 2604 2620 if (ret) 2605 - return ret; 2621 + goto out_unlock; 2606 2622 2607 2623 filter_wiphy = state.filter_wiphy; 2608 2624 ··· 2647 2663 wp_idx++; 2648 2664 } 2649 2665 out: 2650 - rtnl_unlock(); 2651 - 2652 2666 cb->args[0] = wp_idx; 2653 2667 cb->args[1] = if_idx; 2654 2668 2655 - return skb->len; 2669 + ret = skb->len; 2670 + out_unlock: 2671 + rtnl_unlock(); 2672 + 2673 + return ret; 2656 2674 } 2657 2675 2658 2676 static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) ··· 4438 4452 int sta_idx = cb->args[2]; 4439 4453 int err; 4440 4454 4455 + rtnl_lock(); 4441 4456 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 4442 4457 if (err) 4443 - return err; 4458 + goto out_err; 4444 4459 4445 4460 if (!wdev->netdev) { 4446 4461 err = -EINVAL; ··· 4476 4489 cb->args[2] = sta_idx; 4477 4490 err = skb->len; 4478 4491 out_err: 4479 - nl80211_finish_wdev_dump(rdev); 4492 + rtnl_unlock(); 4480 4493 4481 4494 return err; 4482 4495 } ··· 5262 5275 int path_idx = cb->args[2]; 5263 5276 int err; 5264 5277 5278 + rtnl_lock(); 5265 5279 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 5266 5280 if (err) 5267 - return err; 5281 + goto out_err; 5268 5282 5269 5283 if (!rdev->ops->dump_mpath) { 5270 5284 err = -EOPNOTSUPP; ··· 5298 5310 cb->args[2] = path_idx; 5299 5311 err = skb->len; 5300 5312 out_err: 5301 - nl80211_finish_wdev_dump(rdev); 5313 + rtnl_unlock(); 5302 5314 return err; 5303 5315 } 5304 5316 ··· 5458 5470 int path_idx = cb->args[2]; 5459 5471 int err; 5460 5472 5473 + rtnl_lock(); 5461 5474 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 5462 5475 if (err) 5463 - return err; 5476 + goto out_err; 5464 5477 5465 5478 if (!rdev->ops->dump_mpp) { 5466 5479 err = -EOPNOTSUPP; ··· 5494 5505 cb->args[2] = path_idx; 5495 5506 err = skb->len; 5496 5507 out_err: 5497 - nl80211_finish_wdev_dump(rdev); 5508 + rtnl_unlock(); 5498 5509 return err; 5499 5510 } 5500 5511 ··· 7663 7674 int start = cb->args[2], idx = 0; 7664 7675 int err; 7665 7676 7677 + rtnl_lock(); 7666 7678 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 7667 - if (err) 7679 + if (err) { 7680 + rtnl_unlock(); 7668 7681 return err; 7682 + } 7669 7683 7670 7684 wdev_lock(wdev); 7671 7685 spin_lock_bh(&rdev->bss_lock); ··· 7691 7699 wdev_unlock(wdev); 7692 7700 7693 7701 cb->args[2] = idx; 7694 - nl80211_finish_wdev_dump(rdev); 7702 + rtnl_unlock(); 7695 7703 7696 7704 return skb->len; 7697 7705 } ··· 7776 7784 int res; 7777 7785 bool radio_stats; 7778 7786 7787 + rtnl_lock(); 7779 7788 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 7780 7789 if (res) 7781 - return res; 7790 + goto out_err; 7782 7791 7783 7792 /* prepare_wdev_dump parsed the attributes */ 7784 7793 radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; ··· 7820 7827 cb->args[2] = survey_idx; 7821 7828 res = skb->len; 7822 7829 out_err: 7823 - nl80211_finish_wdev_dump(rdev); 7830 + rtnl_unlock(); 7824 7831 return res; 7825 7832 } 7826 7833 ··· 11501 11508 void *data = NULL; 11502 11509 unsigned int data_len = 0; 11503 11510 11504 - rtnl_lock(); 11505 - 11506 11511 if (cb->args[0]) { 11507 11512 /* subtract the 1 again here */ 11508 11513 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 11509 11514 struct wireless_dev *tmp; 11510 11515 11511 - if (!wiphy) { 11512 - err = -ENODEV; 11513 - goto out_unlock; 11514 - } 11516 + if (!wiphy) 11517 + return -ENODEV; 11515 11518 *rdev = wiphy_to_rdev(wiphy); 11516 11519 *wdev = NULL; 11517 11520 ··· 11527 11538 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 11528 11539 attrbuf, nl80211_fam.maxattr, nl80211_policy); 11529 11540 if (err) 11530 - goto out_unlock; 11541 + return err; 11531 11542 11532 11543 if (!attrbuf[NL80211_ATTR_VENDOR_ID] || 11533 - !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { 11534 - err = -EINVAL; 11535 - goto out_unlock; 11536 - } 11544 + !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) 11545 + return -EINVAL; 11537 11546 11538 11547 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); 11539 11548 if (IS_ERR(*wdev)) 11540 11549 *wdev = NULL; 11541 11550 11542 11551 *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); 11543 - if (IS_ERR(*rdev)) { 11544 - err = PTR_ERR(*rdev); 11545 - goto out_unlock; 11546 - } 11552 + if (IS_ERR(*rdev)) 11553 + return PTR_ERR(*rdev); 11547 11554 11548 11555 vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); 11549 11556 subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); ··· 11552 11567 if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) 11553 11568 continue; 11554 11569 11555 - if (!vcmd->dumpit) { 11556 - err = -EOPNOTSUPP; 11557 - goto out_unlock; 11558 - } 11570 + if (!vcmd->dumpit) 11571 + return -EOPNOTSUPP; 11559 11572 11560 11573 vcmd_idx = i; 11561 11574 break; 11562 11575 } 11563 11576 11564 - if (vcmd_idx < 0) { 11565 - err = -EOPNOTSUPP; 11566 - goto out_unlock; 11567 - } 11577 + if (vcmd_idx < 0) 11578 + return -EOPNOTSUPP; 11568 11579 11569 11580 if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { 11570 11581 data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); ··· 11577 11596 11578 11597 /* keep rtnl locked in successful case */ 11579 11598 return 0; 11580 - out_unlock: 11581 - rtnl_unlock(); 11582 - return err; 11583 11599 } 11584 11600 11585 11601 static int nl80211_vendor_cmd_dump(struct sk_buff *skb, ··· 11591 11613 int err; 11592 11614 struct nlattr *vendor_data; 11593 11615 11616 + rtnl_lock(); 11594 11617 err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); 11595 11618 if (err) 11596 - return err; 11619 + goto out; 11597 11620 11598 11621 vcmd_idx = cb->args[2]; 11599 11622 data = (void *)cb->args[3]; ··· 11603 11624 11604 11625 if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | 11605 11626 WIPHY_VENDOR_CMD_NEED_NETDEV)) { 11606 - if (!wdev) 11607 - return -EINVAL; 11627 + if (!wdev) { 11628 + err = -EINVAL; 11629 + goto out; 11630 + } 11608 11631 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && 11609 - !wdev->netdev) 11610 - return -EINVAL; 11632 + !wdev->netdev) { 11633 + err = -EINVAL; 11634 + goto out; 11635 + } 11611 11636 11612 11637 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { 11613 - if (!wdev_running(wdev)) 11614 - return -ENETDOWN; 11638 + if (!wdev_running(wdev)) { 11639 + err = -ENETDOWN; 11640 + goto out; 11641 + } 11615 11642 } 11616 11643 } 11617 11644
+1
sound/core/seq/seq_clientmgr.c
··· 1832 1832 info->output_pool != client->pool->size)) { 1833 1833 if (snd_seq_write_pool_allocated(client)) { 1834 1834 /* remove all existing cells */ 1835 + snd_seq_pool_mark_closing(client->pool); 1835 1836 snd_seq_queue_client_leave_cells(client->number); 1836 1837 snd_seq_pool_done(client->pool); 1837 1838 }
+3
sound/core/seq/seq_fifo.c
··· 72 72 return; 73 73 *fifo = NULL; 74 74 75 + if (f->pool) 76 + snd_seq_pool_mark_closing(f->pool); 77 + 75 78 snd_seq_fifo_clear(f); 76 79 77 80 /* wake up clients if any */
+13 -4
sound/core/seq/seq_memory.c
··· 415 415 return 0; 416 416 } 417 417 418 + /* refuse the further insertion to the pool */ 419 + void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) 420 + { 421 + unsigned long flags; 422 + 423 + if (snd_BUG_ON(!pool)) 424 + return; 425 + spin_lock_irqsave(&pool->lock, flags); 426 + pool->closing = 1; 427 + spin_unlock_irqrestore(&pool->lock, flags); 428 + } 429 + 418 430 /* remove events */ 419 431 int snd_seq_pool_done(struct snd_seq_pool *pool) 420 432 { ··· 437 425 return -EINVAL; 438 426 439 427 /* wait for closing all threads */ 440 - spin_lock_irqsave(&pool->lock, flags); 441 - pool->closing = 1; 442 - spin_unlock_irqrestore(&pool->lock, flags); 443 - 444 428 if (waitqueue_active(&pool->output_sleep)) 445 429 wake_up(&pool->output_sleep); 446 430 ··· 493 485 *ppool = NULL; 494 486 if (pool == NULL) 495 487 return 0; 488 + snd_seq_pool_mark_closing(pool); 496 489 snd_seq_pool_done(pool); 497 490 kfree(pool); 498 491 return 0;
+1
sound/core/seq/seq_memory.h
··· 84 84 int snd_seq_pool_init(struct snd_seq_pool *pool); 85 85 86 86 /* done pool - free events */ 87 + void snd_seq_pool_mark_closing(struct snd_seq_pool *pool); 87 88 int snd_seq_pool_done(struct snd_seq_pool *pool); 88 89 89 90 /* create pool */
+1 -1
sound/pci/ctxfi/cthw20k1.c
··· 1905 1905 return err; 1906 1906 1907 1907 /* Set DMA transfer mask */ 1908 - if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { 1908 + if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { 1909 1909 dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); 1910 1910 } else { 1911 1911 dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
+11
sound/pci/hda/patch_conexant.c
··· 261 261 CXT_FIXUP_HP_530, 262 262 CXT_FIXUP_CAP_MIX_AMP_5047, 263 263 CXT_FIXUP_MUTE_LED_EAPD, 264 + CXT_FIXUP_HP_DOCK, 264 265 CXT_FIXUP_HP_SPECTRE, 265 266 CXT_FIXUP_HP_GATE_MIC, 266 267 }; ··· 779 778 .type = HDA_FIXUP_FUNC, 780 779 .v.func = cxt_fixup_mute_led_eapd, 781 780 }, 781 + [CXT_FIXUP_HP_DOCK] = { 782 + .type = HDA_FIXUP_PINS, 783 + .v.pins = (const struct hda_pintbl[]) { 784 + { 0x16, 0x21011020 }, /* line-out */ 785 + { 0x18, 0x2181103f }, /* line-in */ 786 + { } 787 + } 788 + }, 782 789 [CXT_FIXUP_HP_SPECTRE] = { 783 790 .type = HDA_FIXUP_PINS, 784 791 .v.pins = (const struct hda_pintbl[]) { ··· 848 839 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), 849 840 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), 850 841 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), 842 + SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), 851 843 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), 852 844 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), 853 845 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), ··· 881 871 { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, 882 872 { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, 883 873 { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, 874 + { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" }, 884 875 {} 885 876 }; 886 877
+15 -1
sound/pci/hda/patch_realtek.c
··· 4847 4847 ALC286_FIXUP_HP_GPIO_LED, 4848 4848 ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, 4849 4849 ALC280_FIXUP_HP_DOCK_PINS, 4850 + ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, 4850 4851 ALC280_FIXUP_HP_9480M, 4851 4852 ALC288_FIXUP_DELL_HEADSET_MODE, 4852 4853 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, ··· 5389 5388 .chained = true, 5390 5389 .chain_id = ALC280_FIXUP_HP_GPIO4 5391 5390 }, 5391 + [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = { 5392 + .type = HDA_FIXUP_PINS, 5393 + .v.pins = (const struct hda_pintbl[]) { 5394 + { 0x1b, 0x21011020 }, /* line-out */ 5395 + { 0x18, 0x2181103f }, /* line-in */ 5396 + { }, 5397 + }, 5398 + .chained = true, 5399 + .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED 5400 + }, 5392 5401 [ALC280_FIXUP_HP_9480M] = { 5393 5402 .type = HDA_FIXUP_FUNC, 5394 5403 .v.func = alc280_fixup_hp_9480m, ··· 5658 5647 SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5659 5648 SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5660 5649 SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5661 - SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5650 + SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED), 5662 5651 SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5663 5652 SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5664 5653 SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), ··· 5827 5816 {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, 5828 5817 {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, 5829 5818 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, 5819 + {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, 5830 5820 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, 5831 5821 {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, 5832 5822 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, ··· 6102 6090 ALC295_STANDARD_PINS, 6103 6091 {0x17, 0x21014040}, 6104 6092 {0x18, 0x21a19050}), 6093 + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 6094 + ALC295_STANDARD_PINS), 6105 6095 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 6106 6096 ALC298_STANDARD_PINS, 6107 6097 {0x17, 0x90170110}),
+2 -1
sound/x86/Kconfig
··· 1 1 menuconfig SND_X86 2 - tristate "X86 sound devices" 2 + bool "X86 sound devices" 3 3 depends on X86 4 + default y 4 5 ---help--- 5 6 X86 sound devices that don't fall under SoC or PCI categories 6 7
+1 -1
tools/perf/util/symbol.c
··· 202 202 203 203 /* Last entry */ 204 204 if (curr->end == curr->start) 205 - curr->end = roundup(curr->start, 4096); 205 + curr->end = roundup(curr->start, 4096) + 4096; 206 206 } 207 207 208 208 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
+10 -9
tools/testing/selftests/bpf/Makefile
··· 1 1 LIBDIR := ../../../lib 2 - BPFOBJ := $(LIBDIR)/bpf/bpf.o 2 + BPFDIR := $(LIBDIR)/bpf 3 3 4 - CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ) 4 + CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR) 5 + LDLIBS += -lcap 5 6 6 7 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map 7 8 8 9 TEST_PROGS := test_kmod.sh 9 10 10 - all: $(TEST_GEN_PROGS) 11 + include ../lib.mk 11 12 12 - .PHONY: all clean force 13 + BPFOBJ := $(OUTPUT)/bpf.o 14 + 15 + $(TEST_GEN_PROGS): $(BPFOBJ) 16 + 17 + .PHONY: force 13 18 14 19 # force a rebuild of BPFOBJ when its dependencies are updated 15 20 force: 16 21 17 22 $(BPFOBJ): force 18 - $(MAKE) -C $(dir $(BPFOBJ)) 19 - 20 - $(test_objs): $(BPFOBJ) 21 - 22 - include ../lib.mk 23 + $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
+26 -3
tools/testing/selftests/bpf/test_maps.c
··· 80 80 assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); 81 81 key = 2; 82 82 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 83 - key = 1; 84 - assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 83 + key = 3; 84 + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && 85 + errno == E2BIG); 85 86 86 87 /* Check that key = 0 doesn't exist. */ 87 88 key = 0; ··· 109 108 errno == ENOENT); 110 109 111 110 close(fd); 111 + } 112 + 113 + static void test_hashmap_sizes(int task, void *data) 114 + { 115 + int fd, i, j; 116 + 117 + for (i = 1; i <= 512; i <<= 1) 118 + for (j = 1; j <= 1 << 18; j <<= 1) { 119 + fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, 120 + 2, map_flags); 121 + if (fd < 0) { 122 + printf("Failed to create hashmap key=%d value=%d '%s'\n", 123 + i, j, strerror(errno)); 124 + exit(1); 125 + } 126 + close(fd); 127 + usleep(10); /* give kernel time to destroy */ 128 + } 112 129 } 113 130 114 131 static void test_hashmap_percpu(int task, void *data) ··· 336 317 static void test_arraymap_percpu_many_keys(void) 337 318 { 338 319 unsigned int nr_cpus = bpf_num_possible_cpus(); 339 - unsigned int nr_keys = 20000; 320 + /* nr_keys is not too large otherwise the test stresses percpu 321 + * allocator more than anything else 322 + */ 323 + unsigned int nr_keys = 2000; 340 324 long values[nr_cpus]; 341 325 int key, fd, i; 342 326 ··· 441 419 { 442 420 run_parallel(100, test_hashmap, NULL); 443 421 run_parallel(100, test_hashmap_percpu, NULL); 422 + run_parallel(100, test_hashmap_sizes, NULL); 444 423 445 424 run_parallel(100, test_arraymap, NULL); 446 425 run_parallel(100, test_arraymap_percpu, NULL);