Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'please-pull-mce-bitmap-comment' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras into x86/ras

Pull MCE updates from Tony Luck:

"Better comments so we understand our existing machine check
bank bitmaps - prelude to adding another bitmap soon."

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+1011 -537
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 10 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Unicycling Gorilla 6 6 7 7 # *DOCUMENTATION*
+11 -1
arch/arm/Kconfig
··· 1189 1189 is not correctly implemented in PL310 as clean lines are not 1190 1190 invalidated as a result of these operations. 1191 1191 1192 + config ARM_ERRATA_643719 1193 + bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" 1194 + depends on CPU_V7 && SMP 1195 + help 1196 + This option enables the workaround for the 643719 Cortex-A9 (prior to 1197 + r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR 1198 + register returns zero when it should return one. The workaround 1199 + corrects this value, ensuring cache maintenance operations which use 1200 + it behave as intended and avoiding data corruption. 1201 + 1192 1202 config ARM_ERRATA_720789 1193 1203 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" 1194 1204 depends on CPU_V7 ··· 2016 2006 2017 2007 config KEXEC 2018 2008 bool "Kexec system call (EXPERIMENTAL)" 2019 - depends on (!SMP || HOTPLUG_CPU) 2009 + depends on (!SMP || PM_SLEEP_SMP) 2020 2010 help 2021 2011 kexec is a system call that implements the ability to shutdown your 2022 2012 current kernel, and to start another kernel. It is like a reboot
+2 -1
arch/arm/boot/compressed/Makefile
··· 116 116 117 117 # Make sure files are removed during clean 118 118 extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ 119 - lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) 119 + lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ 120 + hyp-stub.S 120 121 121 122 ifeq ($(CONFIG_FUNCTION_TRACER),y) 122 123 ORIG_CFLAGS := $(KBUILD_CFLAGS)
+1 -1
arch/arm/boot/dts/exynos5250-pinctrl.dtsi
··· 763 763 }; 764 764 }; 765 765 766 - pinctrl@03680000 { 766 + pinctrl@03860000 { 767 767 gpz: gpz { 768 768 gpio-controller; 769 769 #gpio-cells = <2>;
+2 -2
arch/arm/boot/dts/exynos5250.dtsi
··· 161 161 interrupts = <0 50 0>; 162 162 }; 163 163 164 - pinctrl_3: pinctrl@03680000 { 164 + pinctrl_3: pinctrl@03860000 { 165 165 compatible = "samsung,exynos5250-pinctrl"; 166 - reg = <0x0368000 0x1000>; 166 + reg = <0x03860000 0x1000>; 167 167 interrupts = <0 47 0>; 168 168 }; 169 169
+1 -3
arch/arm/include/asm/cacheflush.h
··· 320 320 } 321 321 322 322 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 323 - static inline void flush_kernel_dcache_page(struct page *page) 324 - { 325 - } 323 + extern void flush_kernel_dcache_page(struct page *); 326 324 327 325 #define flush_dcache_mmap_lock(mapping) \ 328 326 spin_lock_irq(&(mapping)->tree_lock)
+4
arch/arm/kernel/machine_kexec.c
··· 134 134 unsigned long reboot_code_buffer_phys; 135 135 void *reboot_code_buffer; 136 136 137 + if (num_online_cpus() > 1) { 138 + pr_err("kexec: error: multiple CPUs still online\n"); 139 + return; 140 + } 137 141 138 142 page_list = image->head & PAGE_MASK; 139 143
+37 -6
arch/arm/kernel/process.c
··· 184 184 185 185 __setup("reboot=", reboot_setup); 186 186 187 + /* 188 + * Called by kexec, immediately prior to machine_kexec(). 189 + * 190 + * This must completely disable all secondary CPUs; simply causing those CPUs 191 + * to execute e.g. a RAM-based pin loop is not sufficient. This allows the 192 + * kexec'd kernel to use any and all RAM as it sees fit, without having to 193 + * avoid any code or data used by any SW CPU pin loop. The CPU hotplug 194 + * functionality embodied in disable_nonboot_cpus() to achieve this. 195 + */ 187 196 void machine_shutdown(void) 188 197 { 189 - #ifdef CONFIG_SMP 190 - smp_send_stop(); 191 - #endif 198 + disable_nonboot_cpus(); 192 199 } 193 200 201 + /* 202 + * Halting simply requires that the secondary CPUs stop performing any 203 + * activity (executing tasks, handling interrupts). smp_send_stop() 204 + * achieves this. 205 + */ 194 206 void machine_halt(void) 195 207 { 196 - machine_shutdown(); 208 + smp_send_stop(); 209 + 197 210 local_irq_disable(); 198 211 while (1); 199 212 } 200 213 214 + /* 215 + * Power-off simply requires that the secondary CPUs stop performing any 216 + * activity (executing tasks, handling interrupts). smp_send_stop() 217 + * achieves this. When the system power is turned off, it will take all CPUs 218 + * with it. 219 + */ 201 220 void machine_power_off(void) 202 221 { 203 - machine_shutdown(); 222 + smp_send_stop(); 223 + 204 224 if (pm_power_off) 205 225 pm_power_off(); 206 226 } 207 227 228 + /* 229 + * Restart requires that the secondary CPUs stop performing any activity 230 + * while the primary CPU resets the system. Systems with a single CPU can 231 + * use soft_restart() as their machine descriptor's .restart hook, since that 232 + * will cause the only available CPU to reset. Systems with multiple CPUs must 233 + * provide a HW restart implementation, to ensure that all CPUs reset at once. 234 + * This is required so that any code running after reset on the primary CPU 235 + * doesn't have to co-ordinate with other CPUs to ensure they aren't still 236 + * executing pre-reset code, and using RAM that the primary CPU's code wishes 237 + * to use. Implementing such co-ordination would be essentially impossible. 238 + */ 208 239 void machine_restart(char *cmd) 209 240 { 210 - machine_shutdown(); 241 + smp_send_stop(); 211 242 212 243 arm_pm_restart(reboot_mode, cmd); 213 244
-13
arch/arm/kernel/smp.c
··· 651 651 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 652 652 } 653 653 654 - #ifdef CONFIG_HOTPLUG_CPU 655 - static void smp_kill_cpus(cpumask_t *mask) 656 - { 657 - unsigned int cpu; 658 - for_each_cpu(cpu, mask) 659 - platform_cpu_kill(cpu); 660 - } 661 - #else 662 - static void smp_kill_cpus(cpumask_t *mask) { } 663 - #endif 664 - 665 654 void smp_send_stop(void) 666 655 { 667 656 unsigned long timeout; ··· 668 679 669 680 if (num_online_cpus() > 1) 670 681 pr_warning("SMP: failed to stop secondary CPUs\n"); 671 - 672 - smp_kill_cpus(&mask); 673 682 } 674 683 675 684 /*
+8
arch/arm/mm/cache-v7.S
··· 92 92 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr 93 93 ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr 94 94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr 95 + #ifdef CONFIG_ARM_ERRATA_643719 96 + ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register 97 + ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do 98 + ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? 99 + biceq r2, r2, #0x0000000f @ clear minor revision number 100 + teqeq r2, r1 @ test for errata affected core and if so... 101 + orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') 102 + #endif 95 103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 96 104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 97 105 moveq pc, lr @ return if level == 0
+33
arch/arm/mm/flush.c
··· 301 301 EXPORT_SYMBOL(flush_dcache_page); 302 302 303 303 /* 304 + * Ensure cache coherency for the kernel mapping of this page. We can 305 + * assume that the page is pinned via kmap. 306 + * 307 + * If the page only exists in the page cache and there are no user 308 + * space mappings, this is a no-op since the page was already marked 309 + * dirty at creation. Otherwise, we need to flush the dirty kernel 310 + * cache lines directly. 311 + */ 312 + void flush_kernel_dcache_page(struct page *page) 313 + { 314 + if (cache_is_vivt() || cache_is_vipt_aliasing()) { 315 + struct address_space *mapping; 316 + 317 + mapping = page_mapping(page); 318 + 319 + if (!mapping || mapping_mapped(mapping)) { 320 + void *addr; 321 + 322 + addr = page_address(page); 323 + /* 324 + * kmap_atomic() doesn't set the page virtual 325 + * address for highmem pages, and 326 + * kunmap_atomic() takes care of cache 327 + * flushing already. 328 + */ 329 + if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) 330 + __cpuc_flush_dcache_area(addr, PAGE_SIZE); 331 + } 332 + } 333 + } 334 + EXPORT_SYMBOL(flush_kernel_dcache_page); 335 + 336 + /* 304 337 * Flush an anonymous page so that users of get_user_pages() 305 338 * can safely access the data. The expected sequence is: 306 339 *
+5 -3
arch/arm/mm/mmu.c
··· 616 616 } while (pte++, addr += PAGE_SIZE, addr != end); 617 617 } 618 618 619 - static void __init map_init_section(pmd_t *pmd, unsigned long addr, 619 + static void __init __map_init_section(pmd_t *pmd, unsigned long addr, 620 620 unsigned long end, phys_addr_t phys, 621 621 const struct mem_type *type) 622 622 { 623 + pmd_t *p = pmd; 624 + 623 625 #ifndef CONFIG_ARM_LPAE 624 626 /* 625 627 * In classic MMU format, puds and pmds are folded in to ··· 640 638 phys += SECTION_SIZE; 641 639 } while (pmd++, addr += SECTION_SIZE, addr != end); 642 640 643 - flush_pmd_entry(pmd); 641 + flush_pmd_entry(p); 644 642 } 645 643 646 644 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, ··· 663 661 */ 664 662 if (type->prot_sect && 665 663 ((addr | next | phys) & ~SECTION_MASK) == 0) { 666 - map_init_section(pmd, addr, next, phys, type); 664 + __map_init_section(pmd, addr, next, phys, type); 667 665 } else { 668 666 alloc_init_pte(pmd, addr, next, 669 667 __phys_to_pfn(phys), type);
+2 -2
arch/arm/mm/proc-v7.S
··· 409 409 */ 410 410 .type __v7_pj4b_proc_info, #object 411 411 __v7_pj4b_proc_info: 412 - .long 0x562f5840 413 - .long 0xfffffff0 412 + .long 0x560f5800 413 + .long 0xff0fff00 414 414 __v7_proc __v7_pj4b_setup 415 415 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info 416 416
+1
arch/arm64/kernel/perf_event.c
··· 1336 1336 return; 1337 1337 } 1338 1338 1339 + perf_callchain_store(entry, regs->pc); 1339 1340 tail = (struct frame_tail __user *)regs->regs[29]; 1340 1341 1341 1342 while (entry->nr < PERF_MAX_STACK_DEPTH &&
+1
arch/metag/include/asm/hugetlb.h
··· 2 2 #define _ASM_METAG_HUGETLB_H 3 3 4 4 #include <asm/page.h> 5 + #include <asm-generic/hugetlb.h> 5 6 6 7 7 8 static inline int is_hugepage_only_range(struct mm_struct *mm,
+2 -3
arch/mn10300/include/asm/irqflags.h
··· 13 13 #define _ASM_IRQFLAGS_H 14 14 15 15 #include <asm/cpu-regs.h> 16 - #ifndef __ASSEMBLY__ 17 - #include <linux/smp.h> 18 - #endif 16 + /* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */ 17 + #include <asm/smp.h> 19 18 20 19 /* 21 20 * interrupt control
+3 -1
arch/mn10300/include/asm/smp.h
··· 24 24 #ifndef __ASSEMBLY__ 25 25 #include <linux/threads.h> 26 26 #include <linux/cpumask.h> 27 + #include <linux/thread_info.h> 27 28 #endif 28 29 29 30 #ifdef CONFIG_SMP ··· 86 85 extern void smp_init_cpus(void); 87 86 extern void smp_cache_interrupt(void); 88 87 extern void send_IPI_allbutself(int irq); 89 - extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait); 88 + extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait); 90 89 91 90 extern void arch_send_call_function_single_ipi(int cpu); 92 91 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); ··· 101 100 #ifndef __ASSEMBLY__ 102 101 103 102 static inline void smp_init_cpus(void) {} 103 + #define raw_smp_processor_id() 0 104 104 105 105 #endif /* __ASSEMBLY__ */ 106 106 #endif /* CONFIG_SMP */
+2 -2
arch/parisc/include/asm/mmzone.h
··· 27 27 28 28 #define PFNNID_SHIFT (30 - PAGE_SHIFT) 29 29 #define PFNNID_MAP_MAX 512 /* support 512GB */ 30 - extern unsigned char pfnnid_map[PFNNID_MAP_MAX]; 30 + extern signed char pfnnid_map[PFNNID_MAP_MAX]; 31 31 32 32 #ifndef CONFIG_64BIT 33 33 #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) ··· 46 46 i = pfn >> PFNNID_SHIFT; 47 47 BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); 48 48 49 - return (int)pfnnid_map[i]; 49 + return pfnnid_map[i]; 50 50 } 51 51 52 52 static inline int pfn_valid(int pfn)
+5
arch/parisc/include/asm/pci.h
··· 225 225 return channel ? 15 : 14; 226 226 } 227 227 228 + #define HAVE_PCI_MMAP 229 + 230 + extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 231 + enum pci_mmap_state mmap_state, int write_combine); 232 + 228 233 #endif /* __ASM_PARISC_PCI_H */
+1
arch/parisc/kernel/hardware.c
··· 1205 1205 {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 1206 1206 {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 1207 1207 {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 1208 + {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, 1208 1209 {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 1209 1210 {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 1210 1211 {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
+36 -36
arch/parisc/kernel/pacache.S
··· 860 860 #endif 861 861 862 862 ldil L%dcache_stride, %r1 863 - ldw R%dcache_stride(%r1), %r1 863 + ldw R%dcache_stride(%r1), r31 864 864 865 865 #ifdef CONFIG_64BIT 866 866 depdi,z 1, 63-PAGE_SHIFT,1, %r25 ··· 868 868 depwi,z 1, 31-PAGE_SHIFT,1, %r25 869 869 #endif 870 870 add %r28, %r25, %r25 871 - sub %r25, %r1, %r25 871 + sub %r25, r31, %r25 872 872 873 873 874 - 1: fdc,m %r1(%r28) 875 - fdc,m %r1(%r28) 876 - fdc,m %r1(%r28) 877 - fdc,m %r1(%r28) 878 - fdc,m %r1(%r28) 879 - fdc,m %r1(%r28) 880 - fdc,m %r1(%r28) 881 - fdc,m %r1(%r28) 882 - fdc,m %r1(%r28) 883 - fdc,m %r1(%r28) 884 - fdc,m %r1(%r28) 885 - fdc,m %r1(%r28) 886 - fdc,m %r1(%r28) 887 - fdc,m %r1(%r28) 888 - fdc,m %r1(%r28) 874 + 1: fdc,m r31(%r28) 875 + fdc,m r31(%r28) 876 + fdc,m r31(%r28) 877 + fdc,m r31(%r28) 878 + fdc,m r31(%r28) 879 + fdc,m r31(%r28) 880 + fdc,m r31(%r28) 881 + fdc,m r31(%r28) 882 + fdc,m r31(%r28) 883 + fdc,m r31(%r28) 884 + fdc,m r31(%r28) 885 + fdc,m r31(%r28) 886 + fdc,m r31(%r28) 887 + fdc,m r31(%r28) 888 + fdc,m r31(%r28) 889 889 cmpb,COND(<<) %r28, %r25,1b 890 - fdc,m %r1(%r28) 890 + fdc,m r31(%r28) 891 891 892 892 sync 893 893 ··· 936 936 #endif 937 937 938 938 ldil L%icache_stride, %r1 939 - ldw R%icache_stride(%r1), %r1 939 + ldw R%icache_stride(%r1), %r31 940 940 941 941 #ifdef CONFIG_64BIT 942 942 depdi,z 1, 63-PAGE_SHIFT,1, %r25 ··· 944 944 depwi,z 1, 31-PAGE_SHIFT,1, %r25 945 945 #endif 946 946 add %r28, %r25, %r25 947 - sub %r25, %r1, %r25 947 + sub %r25, %r31, %r25 948 948 949 949 950 950 /* fic only has the type 26 form on PA1.1, requiring an 951 951 * explicit space specification, so use %sr4 */ 952 - 1: fic,m %r1(%sr4,%r28) 953 - fic,m %r1(%sr4,%r28) 954 - fic,m %r1(%sr4,%r28) 955 - fic,m %r1(%sr4,%r28) 956 - fic,m %r1(%sr4,%r28) 957 - fic,m %r1(%sr4,%r28) 958 - fic,m %r1(%sr4,%r28) 959 - fic,m %r1(%sr4,%r28) 960 - fic,m %r1(%sr4,%r28) 961 - fic,m %r1(%sr4,%r28) 962 - fic,m %r1(%sr4,%r28) 963 - fic,m %r1(%sr4,%r28) 964 - fic,m %r1(%sr4,%r28) 965 - fic,m %r1(%sr4,%r28) 966 - fic,m %r1(%sr4,%r28) 952 + 1: fic,m %r31(%sr4,%r28) 953 + fic,m %r31(%sr4,%r28) 954 + fic,m %r31(%sr4,%r28) 955 + fic,m %r31(%sr4,%r28) 956 + fic,m %r31(%sr4,%r28) 957 + fic,m %r31(%sr4,%r28) 958 + fic,m %r31(%sr4,%r28) 959 + fic,m %r31(%sr4,%r28) 960 + fic,m %r31(%sr4,%r28) 961 + fic,m %r31(%sr4,%r28) 962 + fic,m %r31(%sr4,%r28) 963 + fic,m %r31(%sr4,%r28) 964 + fic,m %r31(%sr4,%r28) 965 + fic,m %r31(%sr4,%r28) 966 + fic,m %r31(%sr4,%r28) 967 967 cmpb,COND(<<) %r28, %r25,1b 968 - fic,m %r1(%sr4,%r28) 968 + fic,m %r31(%sr4,%r28) 969 969 970 970 sync 971 971
+27
arch/parisc/kernel/pci.c
··· 220 220 } 221 221 222 222 223 + int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 224 + enum pci_mmap_state mmap_state, int write_combine) 225 + { 226 + unsigned long prot; 227 + 228 + /* 229 + * I/O space can be accessed via normal processor loads and stores on 230 + * this platform but for now we elect not to do this and portable 231 + * drivers should not do this anyway. 232 + */ 233 + if (mmap_state == pci_mmap_io) 234 + return -EINVAL; 235 + 236 + if (write_combine) 237 + return -EINVAL; 238 + 239 + /* 240 + * Ignore write-combine; for now only return uncached mappings. 241 + */ 242 + prot = pgprot_val(vma->vm_page_prot); 243 + prot |= _PAGE_NO_CACHE; 244 + vma->vm_page_prot = __pgprot(prot); 245 + 246 + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 247 + vma->vm_end - vma->vm_start, vma->vm_page_prot); 248 + } 249 + 223 250 /* 224 251 * A driver is enabling the device. We make sure that all the appropriate 225 252 * bits are set to allow the device to operate as the driver is expecting.
+1 -1
arch/parisc/mm/init.c
··· 47 47 48 48 #ifdef CONFIG_DISCONTIGMEM 49 49 struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 50 - unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 50 + signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 51 51 #endif 52 52 53 53 static struct resource data_resource = {
+2 -1
arch/powerpc/kvm/booke.c
··· 673 673 ret = s; 674 674 goto out; 675 675 } 676 - kvmppc_lazy_ee_enable(); 677 676 678 677 kvm_guest_enter(); 679 678 ··· 697 698 698 699 kvmppc_load_guest_fp(vcpu); 699 700 #endif 701 + 702 + kvmppc_lazy_ee_enable(); 700 703 701 704 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 702 705
+7 -1
arch/powerpc/mm/hugetlbpage.c
··· 592 592 do { 593 593 pmd = pmd_offset(pud, addr); 594 594 next = pmd_addr_end(addr, end); 595 - if (pmd_none_or_clear_bad(pmd)) 595 + if (!is_hugepd(pmd)) { 596 + /* 597 + * if it is not hugepd pointer, we should already find 598 + * it cleared. 599 + */ 600 + WARN_ON(!pmd_none_or_clear_bad(pmd)); 596 601 continue; 602 + } 597 603 #ifdef CONFIG_PPC_FSL_BOOK3E 598 604 /* 599 605 * Increment next by the size of the huge mapping since
+1
arch/sparc/include/asm/Kbuild
··· 6 6 generic-y += div64.h 7 7 generic-y += emergency-restart.h 8 8 generic-y += exec.h 9 + generic-y += linkage.h 9 10 generic-y += local64.h 10 11 generic-y += mutex.h 11 12 generic-y += irq_regs.h
+1 -1
arch/sparc/include/asm/leon.h
··· 135 135 136 136 #ifdef CONFIG_SMP 137 137 # define LEON3_IRQ_IPI_DEFAULT 13 138 - # define LEON3_IRQ_TICKER (leon3_ticker_irq) 138 + # define LEON3_IRQ_TICKER (leon3_gptimer_irq) 139 139 # define LEON3_IRQ_CROSS_CALL 15 140 140 #endif 141 141
+1
arch/sparc/include/asm/leon_amba.h
··· 47 47 #define LEON3_GPTIMER_LD 4 48 48 #define LEON3_GPTIMER_IRQEN 8 49 49 #define LEON3_GPTIMER_SEPIRQ 8 50 + #define LEON3_GPTIMER_TIMERS 0x7 50 51 51 52 #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ 52 53 /* 0 = hold scalar and counter */
-6
arch/sparc/include/asm/linkage.h
··· 1 - #ifndef __ASM_LINKAGE_H 2 - #define __ASM_LINKAGE_H 3 - 4 - /* Nothing to see here... */ 5 - 6 - #endif
+2 -1
arch/sparc/kernel/ds.c
··· 843 843 unsigned long len; 844 844 845 845 strcpy(full_boot_str, "boot "); 846 - strcpy(full_boot_str + strlen("boot "), boot_command); 846 + strlcpy(full_boot_str + strlen("boot "), boot_command, 847 + sizeof(full_boot_str + strlen("boot "))); 847 848 len = strlen(full_boot_str); 848 849 849 850 if (reboot_data_supported) {
+24 -44
arch/sparc/kernel/leon_kernel.c
··· 38 38 39 39 unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ 40 40 unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ 41 - int leon3_ticker_irq; /* Timer ticker IRQ */ 42 41 unsigned int sparc_leon_eirq; 43 42 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) 44 43 #define LEON_IACK (&leon3_irqctrl_regs->iclear) ··· 277 278 278 279 leon_clear_profile_irq(cpu); 279 280 281 + if (cpu == boot_cpu_id) 282 + timer_interrupt(irq, NULL); 283 + 280 284 ce = &per_cpu(sparc32_clockevent, cpu); 281 285 282 286 irq_enter(); ··· 301 299 int icsel; 302 300 int ampopts; 303 301 int err; 302 + u32 config; 304 303 305 304 sparc_config.get_cycles_offset = leon_cycles_offset; 306 305 sparc_config.cs_period = 1000000 / HZ; ··· 380 377 LEON3_BYPASS_STORE_PA( 381 378 &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); 382 379 383 - #ifdef CONFIG_SMP 384 - leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx; 385 - 386 - if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & 387 - (1<<LEON3_GPTIMER_SEPIRQ))) { 388 - printk(KERN_ERR "timer not configured with separate irqs\n"); 389 - BUG(); 390 - } 391 - 392 - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val, 393 - 0); 394 - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld, 395 - (((1000000/HZ) - 1))); 396 - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, 397 - 0); 398 - #endif 399 - 400 380 /* 401 381 * The IRQ controller may (if implemented) consist of multiple 402 382 * IRQ controllers, each mapped on a 4Kb boundary. ··· 402 416 if (eirq != 0) 403 417 leon_eirq_setup(eirq); 404 418 405 - irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx); 406 - err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); 407 - if (err) { 408 - printk(KERN_ERR "unable to attach timer IRQ%d\n", irq); 409 - prom_halt(); 410 - } 411 - 412 419 #ifdef CONFIG_SMP 413 420 { 414 421 unsigned long flags; ··· 418 439 } 419 440 #endif 420 441 442 + config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); 443 + if (config & (1 << LEON3_GPTIMER_SEPIRQ)) 444 + leon3_gptimer_irq += leon3_gptimer_idx; 445 + else if ((config & LEON3_GPTIMER_TIMERS) > 1) 446 + pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); 447 + 448 + #ifdef CONFIG_SMP 449 + /* Install per-cpu IRQ handler for broadcasted ticker */ 450 + irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, 451 + "per-cpu", 0); 452 + err = request_irq(irq, leon_percpu_timer_ce_interrupt, 453 + IRQF_PERCPU | IRQF_TIMER, "timer", NULL); 454 + #else 455 + irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); 456 + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); 457 + #endif 458 + if (err) { 459 + pr_err("Unable to attach timer IRQ%d\n", irq); 460 + prom_halt(); 461 + } 421 462 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 422 463 LEON3_GPTIMER_EN | 423 464 LEON3_GPTIMER_RL | 424 465 LEON3_GPTIMER_LD | 425 466 LEON3_GPTIMER_IRQEN); 426 - 427 - #ifdef CONFIG_SMP 428 - /* Install per-cpu IRQ handler for broadcasted ticker */ 429 - irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq, 430 - "per-cpu", 0); 431 - err = request_irq(irq, leon_percpu_timer_ce_interrupt, 432 - IRQF_PERCPU | IRQF_TIMER, "ticker", 433 - NULL); 434 - if (err) { 435 - printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq); 436 - prom_halt(); 437 - } 438 - 439 - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, 440 - LEON3_GPTIMER_EN | 441 - LEON3_GPTIMER_RL | 442 - LEON3_GPTIMER_LD | 443 - LEON3_GPTIMER_IRQEN); 444 - #endif 445 467 return; 446 468 bad: 447 469 printk(KERN_ERR "No Timer/irqctrl found\n");
+3 -5
arch/sparc/kernel/leon_pci_grpci1.c
··· 536 536 537 537 /* find device register base address */ 538 538 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 539 - regs = devm_request_and_ioremap(&ofdev->dev, res); 540 - if (!regs) { 541 - dev_err(&ofdev->dev, "io-regs mapping failed\n"); 542 - return -EADDRNOTAVAIL; 543 - } 539 + regs = devm_ioremap_resource(&ofdev->dev, res); 540 + if (IS_ERR(regs)) 541 + return PTR_ERR(regs); 544 542 545 543 /* 546 544 * check that we're in Host Slot and that we can act as a Host Bridge
+7
arch/sparc/kernel/leon_pmc.c
··· 47 47 * MMU does not get a TLB miss here by using the MMU BYPASS ASI. 48 48 */ 49 49 register unsigned int address = (unsigned int)leon3_irqctrl_regs; 50 + 51 + /* Interrupts need to be enabled to not hang the CPU */ 52 + local_irq_enable(); 53 + 50 54 __asm__ __volatile__ ( 51 55 "wr %%g0, %%asr19\n" 52 56 "lda [%0] %1, %%g0\n" ··· 64 60 */ 65 61 void pmc_leon_idle(void) 66 62 { 63 + /* Interrupts need to be enabled to not hang the CPU */ 64 + local_irq_enable(); 65 + 67 66 /* For systems without power-down, this will be no-op */ 68 67 __asm__ __volatile__ ("wr %g0, %asr19\n\t"); 69 68 }
+1 -1
arch/sparc/kernel/setup_32.c
··· 304 304 305 305 /* Initialize PROM console and command line. */ 306 306 *cmdline_p = prom_getbootargs(); 307 - strcpy(boot_command_line, *cmdline_p); 307 + strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 308 308 parse_early_param(); 309 309 310 310 boot_flags_init(*cmdline_p);
+1 -1
arch/sparc/kernel/setup_64.c
··· 555 555 { 556 556 /* Initialize PROM console and command line. */ 557 557 *cmdline_p = prom_getbootargs(); 558 - strcpy(boot_command_line, *cmdline_p); 558 + strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 559 559 parse_early_param(); 560 560 561 561 boot_flags_init(*cmdline_p);
+8 -1
arch/sparc/mm/init_64.c
··· 1098 1098 m->size = *val; 1099 1099 val = mdesc_get_property(md, node, 1100 1100 "address-congruence-offset", NULL); 1101 - m->offset = *val; 1101 + 1102 + /* The address-congruence-offset property is optional. 1103 + * Explicity zero it be identifty this. 1104 + */ 1105 + if (val) 1106 + m->offset = *val; 1107 + else 1108 + m->offset = 0UL; 1102 1109 1103 1110 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", 1104 1111 count - 1, m->base, m->size, m->offset);
+1 -1
arch/sparc/mm/tlb.c
··· 85 85 } 86 86 87 87 if (!tb->active) { 88 - global_flush_tlb_page(mm, vaddr); 89 88 flush_tsb_user_page(mm, vaddr); 89 + global_flush_tlb_page(mm, vaddr); 90 90 goto out; 91 91 } 92 92
+7 -5
arch/sparc/prom/bootstr_32.c
··· 23 23 return barg_buf; 24 24 } 25 25 26 - switch(prom_vers) { 26 + switch (prom_vers) { 27 27 case PROM_V0: 28 28 cp = barg_buf; 29 29 /* Start from 1 and go over fd(0,0,0)kernel */ 30 - for(iter = 1; iter < 8; iter++) { 30 + for (iter = 1; iter < 8; iter++) { 31 31 arg = (*(romvec->pv_v0bootargs))->argv[iter]; 32 32 if (arg == NULL) 33 33 break; 34 - while(*arg != 0) { 34 + while (*arg != 0) { 35 35 /* Leave place for space and null. */ 36 - if(cp >= barg_buf + BARG_LEN-2){ 36 + if (cp >= barg_buf + BARG_LEN - 2) 37 37 /* We might issue a warning here. */ 38 38 break; 39 - } 40 39 *cp++ = *arg++; 41 40 } 42 41 *cp++ = ' '; 42 + if (cp >= barg_buf + BARG_LEN - 1) 43 + /* We might issue a warning here. */ 44 + break; 43 45 } 44 46 *cp = 0; 45 47 break;
+8 -8
arch/sparc/prom/tree_64.c
··· 39 39 return prom_node_to_node("child", node); 40 40 } 41 41 42 - inline phandle prom_getchild(phandle node) 42 + phandle prom_getchild(phandle node) 43 43 { 44 44 phandle cnode; 45 45 ··· 72 72 return prom_node_to_node(prom_peer_name, node); 73 73 } 74 74 75 - inline phandle prom_getsibling(phandle node) 75 + phandle prom_getsibling(phandle node) 76 76 { 77 77 phandle sibnode; 78 78 ··· 89 89 /* Return the length in bytes of property 'prop' at node 'node'. 90 90 * Return -1 on error. 91 91 */ 92 - inline int prom_getproplen(phandle node, const char *prop) 92 + int prom_getproplen(phandle node, const char *prop) 93 93 { 94 94 unsigned long args[6]; 95 95 ··· 113 113 * 'buffer' which has a size of 'bufsize'. If the acquisition 114 114 * was successful the length will be returned, else -1 is returned. 115 115 */ 116 - inline int prom_getproperty(phandle node, const char *prop, 117 - char *buffer, int bufsize) 116 + int prom_getproperty(phandle node, const char *prop, 117 + char *buffer, int bufsize) 118 118 { 119 119 unsigned long args[8]; 120 120 int plen; ··· 141 141 /* Acquire an integer property and return its value. Returns -1 142 142 * on failure. 143 143 */ 144 - inline int prom_getint(phandle node, const char *prop) 144 + int prom_getint(phandle node, const char *prop) 145 145 { 146 146 int intprop; 147 147 ··· 235 235 /* Return the first property type for node 'node'. 236 236 * buffer should be at least 32B in length 237 237 */ 238 - inline char *prom_firstprop(phandle node, char *buffer) 238 + char *prom_firstprop(phandle node, char *buffer) 239 239 { 240 240 unsigned long args[7]; 241 241 ··· 261 261 * at node 'node' . Returns NULL string if no more 262 262 * property types for this node. 263 263 */ 264 - inline char *prom_nextprop(phandle node, const char *oprop, char *buffer) 264 + char *prom_nextprop(phandle node, const char *oprop, char *buffer) 265 265 { 266 266 unsigned long args[7]; 267 267 char buf[32];
+2
arch/tile/lib/exports.c
··· 84 84 EXPORT_SYMBOL(__ashrdi3); 85 85 uint64_t __ashldi3(uint64_t, unsigned int); 86 86 EXPORT_SYMBOL(__ashldi3); 87 + int __ffsdi2(uint64_t); 88 + EXPORT_SYMBOL(__ffsdi2); 87 89 #endif
+1 -1
arch/um/drivers/mconsole_kern.c
··· 147 147 } 148 148 149 149 do { 150 - loff_t pos; 150 + loff_t pos = file->f_pos; 151 151 mm_segment_t old_fs = get_fs(); 152 152 set_fs(KERNEL_DS); 153 153 len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
+1
arch/x86/Kconfig
··· 2265 2265 config IA32_EMULATION 2266 2266 bool "IA32 Emulation" 2267 2267 depends on X86_64 2268 + select BINFMT_ELF 2268 2269 select COMPAT_BINFMT_ELF 2269 2270 select HAVE_UID16 2270 2271 ---help---
+32 -16
arch/x86/crypto/aesni-intel_asm.S
··· 2681 2681 addq %rcx, KEYP 2682 2682 2683 2683 movdqa IV, STATE1 2684 - pxor 0x00(INP), STATE1 2684 + movdqu 0x00(INP), INC 2685 + pxor INC, STATE1 2685 2686 movdqu IV, 0x00(OUTP) 2686 2687 2687 2688 _aesni_gf128mul_x_ble() 2688 2689 movdqa IV, STATE2 2689 - pxor 0x10(INP), STATE2 2690 + movdqu 0x10(INP), INC 2691 + pxor INC, STATE2 2690 2692 movdqu IV, 0x10(OUTP) 2691 2693 2692 2694 _aesni_gf128mul_x_ble() 2693 2695 movdqa IV, STATE3 2694 - pxor 0x20(INP), STATE3 2696 + movdqu 0x20(INP), INC 2697 + pxor INC, STATE3 2695 2698 movdqu IV, 0x20(OUTP) 2696 2699 2697 2700 _aesni_gf128mul_x_ble() 2698 2701 movdqa IV, STATE4 2699 - pxor 0x30(INP), STATE4 2702 + movdqu 0x30(INP), INC 2703 + pxor INC, STATE4 2700 2704 movdqu IV, 0x30(OUTP) 2701 2705 2702 2706 call *%r11 2703 2707 2704 - pxor 0x00(OUTP), STATE1 2708 + movdqu 0x00(OUTP), INC 2709 + pxor INC, STATE1 2705 2710 movdqu STATE1, 0x00(OUTP) 2706 2711 2707 2712 _aesni_gf128mul_x_ble() 2708 2713 movdqa IV, STATE1 2709 - pxor 0x40(INP), STATE1 2714 + movdqu 0x40(INP), INC 2715 + pxor INC, STATE1 2710 2716 movdqu IV, 0x40(OUTP) 2711 2717 2712 - pxor 0x10(OUTP), STATE2 2718 + movdqu 0x10(OUTP), INC 2719 + pxor INC, STATE2 2713 2720 movdqu STATE2, 0x10(OUTP) 2714 2721 2715 2722 _aesni_gf128mul_x_ble() 2716 2723 movdqa IV, STATE2 2717 - pxor 0x50(INP), STATE2 2724 + movdqu 0x50(INP), INC 2725 + pxor INC, STATE2 2718 2726 movdqu IV, 0x50(OUTP) 2719 2727 2720 - pxor 0x20(OUTP), STATE3 2728 + movdqu 0x20(OUTP), INC 2729 + pxor INC, STATE3 2721 2730 movdqu STATE3, 0x20(OUTP) 2722 2731 2723 2732 _aesni_gf128mul_x_ble() 2724 2733 movdqa IV, STATE3 2725 - pxor 0x60(INP), STATE3 2734 + movdqu 0x60(INP), INC 2735 + pxor INC, STATE3 2726 2736 movdqu IV, 0x60(OUTP) 2727 2737 2728 - pxor 0x30(OUTP), STATE4 2738 + movdqu 0x30(OUTP), INC 2739 + pxor INC, STATE4 2729 2740 movdqu STATE4, 0x30(OUTP) 2730 2741 2731 2742 _aesni_gf128mul_x_ble() 2732 2743 movdqa IV, STATE4 2733 - pxor 0x70(INP), STATE4 2744 + movdqu 0x70(INP), INC 2745 + pxor INC, STATE4 2734 2746 movdqu IV, 0x70(OUTP) 2735 2747 2736 2748 _aesni_gf128mul_x_ble() ··· 2750 2738 2751 2739 call *%r11 2752 2740 2753 - pxor 0x40(OUTP), STATE1 2741 + movdqu 0x40(OUTP), INC 2742 + pxor INC, STATE1 2754 2743 movdqu STATE1, 0x40(OUTP) 2755 2744 2756 - pxor 0x50(OUTP), STATE2 2745 + movdqu 0x50(OUTP), INC 2746 + pxor INC, STATE2 2757 2747 movdqu STATE2, 0x50(OUTP) 2758 2748 2759 - pxor 0x60(OUTP), STATE3 2749 + movdqu 0x60(OUTP), INC 2750 + pxor INC, STATE3 2760 2751 movdqu STATE3, 0x60(OUTP) 2761 2752 2762 - pxor 0x70(OUTP), STATE4 2753 + movdqu 0x70(OUTP), INC 2754 + pxor INC, STATE4 2763 2755 movdqu STATE4, 0x70(OUTP) 2764 2756 2765 2757 ret
+1 -1
arch/x86/ia32/ia32_aout.c
··· 192 192 /* struct user */ 193 193 DUMP_WRITE(&dump, sizeof(dump)); 194 194 /* Now dump all of the user data. Include malloced stuff as well */ 195 - DUMP_SEEK(PAGE_SIZE); 195 + DUMP_SEEK(PAGE_SIZE - sizeof(dump)); 196 196 /* now we start writing out the user space info */ 197 197 set_fs(USER_DS); 198 198 /* Dump the data area */
+5
arch/x86/include/asm/irq.h
··· 41 41 42 42 extern void init_ISA_irqs(void); 43 43 44 + #ifdef CONFIG_X86_LOCAL_APIC 45 + void arch_trigger_all_cpu_backtrace(void); 46 + #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 47 + #endif 48 + 44 49 #endif /* _ASM_X86_IRQ_H */
+2 -2
arch/x86/include/asm/microcode.h
··· 60 60 #ifdef CONFIG_MICROCODE_EARLY 61 61 #define MAX_UCODE_COUNT 128 62 62 extern void __init load_ucode_bsp(void); 63 - extern __init void load_ucode_ap(void); 63 + extern void __cpuinit load_ucode_ap(void); 64 64 extern int __init save_microcode_in_initrd(void); 65 65 #else 66 66 static inline void __init load_ucode_bsp(void) {} 67 - static inline __init void load_ucode_ap(void) {} 67 + static inline void __cpuinit load_ucode_ap(void) {} 68 68 static inline int __init save_microcode_in_initrd(void) 69 69 { 70 70 return 0;
+1 -3
arch/x86/include/asm/nmi.h
··· 18 18 void __user *, size_t *, loff_t *); 19 19 extern int unknown_nmi_panic; 20 20 21 - void arch_trigger_all_cpu_backtrace(void); 22 - #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 23 - #endif 21 + #endif /* CONFIG_X86_LOCAL_APIC */ 24 22 25 23 #define NMI_FLAG_FIRST 1 26 24
+1
arch/x86/kernel/apic/hw_nmi.c
··· 9 9 * 10 10 */ 11 11 #include <asm/apic.h> 12 + #include <asm/nmi.h> 12 13 13 14 #include <linux/cpumask.h> 14 15 #include <linux/kdebug.h>
+4 -1
arch/x86/kernel/cpu/mcheck/mce.c
··· 89 89 static DEFINE_PER_CPU(struct mce, mces_seen); 90 90 static int cpu_missing; 91 91 92 - /* MCA banks polled by the period polling timer for corrected events */ 92 + /* 93 + * MCA banks polled by the period polling timer for corrected events. 94 + * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). 95 + */ 93 96 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { 94 97 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL 95 98 };
+12
arch/x86/kernel/cpu/mcheck/mce_intel.c
··· 24 24 * Also supports reliable discovery of shared banks. 25 25 */ 26 26 27 + /* 28 + * CMCI can be delivered to multiple cpus that share a machine check bank 29 + * so we need to designate a single cpu to process errors logged in each bank 30 + * in the interrupt handler (otherwise we would have many races and potential 31 + * double reporting of the same error). 32 + * Note that this can change when a cpu is offlined or brought online since 33 + * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear() 34 + * disables CMCI on all banks owned by the cpu and clears this bitfield. At 35 + * this point, cmci_rediscover() kicks in and a different cpu may end up 36 + * taking ownership of some of the shared MCA banks that were previously 37 + * owned by the offlined cpu. 38 + */ 27 39 static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); 28 40 29 41 /*
+4 -4
arch/x86/kernel/cpu/mtrr/cleanup.c
··· 714 714 if (mtrr_tom2) 715 715 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; 716 716 717 - nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); 718 717 /* 719 718 * [0, 1M) should always be covered by var mtrr with WB 720 719 * and fixed mtrrs should take effect before var mtrr for it: 721 720 */ 722 - nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, 721 + nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0, 723 722 1ULL<<(20 - PAGE_SHIFT)); 724 - /* Sort the ranges: */ 725 - sort_range(range, nr_range); 723 + /* add from var mtrr at last */ 724 + nr_range = x86_get_mtrr_mem_range(range, nr_range, 725 + x_remove_base, x_remove_size); 726 726 727 727 range_sums = sum_ranges(range, nr_range); 728 728 printk(KERN_INFO "total RAM covered: %ldM\n",
+1 -1
arch/x86/kernel/cpu/perf_event_intel.c
··· 165 165 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 166 166 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 167 167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 168 - INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 169 168 EVENT_EXTRA_END 170 169 }; 171 170 172 171 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 173 172 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 174 173 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 174 + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 175 175 EVENT_EXTRA_END 176 176 }; 177 177
+1
arch/x86/kernel/kvmclock.c
··· 242 242 if (!mem) 243 243 return; 244 244 hv_clock = __va(mem); 245 + memset(hv_clock, 0, size); 245 246 246 247 if (kvm_register_clock("boot clock")) { 247 248 hv_clock = NULL;
-12
arch/x86/kernel/process.c
··· 277 277 } 278 278 #endif 279 279 280 - void arch_cpu_idle_prepare(void) 281 - { 282 - /* 283 - * If we're the non-boot CPU, nothing set the stack canary up 284 - * for us. CPU0 already has it initialized but no harm in 285 - * doing it again. This is a good place for updating it, as 286 - * we wont ever return from this function (so the invalid 287 - * canaries already on the stack wont ever trigger). 288 - */ 289 - boot_init_stack_canary(); 290 - } 291 - 292 280 void arch_cpu_idle_enter(void) 293 281 { 294 282 local_touch_nmi();
+4 -4
arch/x86/kernel/smpboot.c
··· 372 372 373 373 void __cpuinit set_cpu_sibling_map(int cpu) 374 374 { 375 - bool has_mc = boot_cpu_data.x86_max_cores > 1; 376 375 bool has_smt = smp_num_siblings > 1; 376 + bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; 377 377 struct cpuinfo_x86 *c = &cpu_data(cpu); 378 378 struct cpuinfo_x86 *o; 379 379 int i; 380 380 381 381 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); 382 382 383 - if (!has_smt && !has_mc) { 383 + if (!has_mp) { 384 384 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 385 385 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); 386 386 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); ··· 394 394 if ((i == cpu) || (has_smt && match_smt(c, o))) 395 395 link_mask(sibling, cpu, i); 396 396 397 - if ((i == cpu) || (has_mc && match_llc(c, o))) 397 + if ((i == cpu) || (has_mp && match_llc(c, o))) 398 398 link_mask(llc_shared, cpu, i); 399 399 400 400 } ··· 406 406 for_each_cpu(i, cpu_sibling_setup_mask) { 407 407 o = &cpu_data(i); 408 408 409 - if ((i == cpu) || (has_mc && match_mc(c, o))) { 409 + if ((i == cpu) || (has_mp && match_mc(c, o))) { 410 410 link_mask(core, cpu, i); 411 411 412 412 /*
+2 -3
arch/x86/kvm/x86.c
··· 582 582 if (index != XCR_XFEATURE_ENABLED_MASK) 583 583 return 1; 584 584 xcr0 = xcr; 585 - if (kvm_x86_ops->get_cpl(vcpu) != 0) 586 - return 1; 587 585 if (!(xcr0 & XSTATE_FP)) 588 586 return 1; 589 587 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) ··· 595 597 596 598 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 597 599 { 598 - if (__kvm_set_xcr(vcpu, index, xcr)) { 600 + if (kvm_x86_ops->get_cpl(vcpu) != 0 || 601 + __kvm_set_xcr(vcpu, index, xcr)) { 599 602 kvm_inject_gp(vcpu, 0); 600 603 return 1; 601 604 }
+6 -1
arch/x86/platform/efi/efi.c
··· 1069 1069 * that by attempting to use more space than is available. 1070 1070 */ 1071 1071 unsigned long dummy_size = remaining_size + 1024; 1072 - void *dummy = kmalloc(dummy_size, GFP_ATOMIC); 1072 + void *dummy = kzalloc(dummy_size, GFP_ATOMIC); 1073 + 1074 + if (!dummy) 1075 + return EFI_OUT_OF_RESOURCES; 1073 1076 1074 1077 status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, 1075 1078 EFI_VARIABLE_NON_VOLATILE | ··· 1091 1088 EFI_VARIABLE_RUNTIME_ACCESS, 1092 1089 0, dummy); 1093 1090 } 1091 + 1092 + kfree(dummy); 1094 1093 1095 1094 /* 1096 1095 * The runtime code may now have triggered a garbage collection
+15 -6
drivers/acpi/acpi_lpss.c
··· 164 164 if (dev_desc->clk_required) { 165 165 ret = register_device_clock(adev, pdata); 166 166 if (ret) { 167 - /* 168 - * Skip the device, but don't terminate the namespace 169 - * scan. 170 - */ 171 - kfree(pdata); 172 - return 0; 167 + /* Skip the device, but continue the namespace scan. */ 168 + ret = 0; 169 + goto err_out; 173 170 } 171 + } 172 + 173 + /* 174 + * This works around a known issue in ACPI tables where LPSS devices 175 + * have _PS0 and _PS3 without _PSC (and no power resources), so 176 + * acpi_bus_init_power() will assume that the BIOS has put them into D0. 177 + */ 178 + ret = acpi_device_fix_up_power(adev); 179 + if (ret) { 180 + /* Skip the device, but continue the namespace scan. */ 181 + ret = 0; 182 + goto err_out; 174 183 } 175 184 176 185 adev->driver_data = pdata;
+20
drivers/acpi/device_pm.c
··· 290 290 return 0; 291 291 } 292 292 293 + /** 294 + * acpi_device_fix_up_power - Force device with missing _PSC into D0. 295 + * @device: Device object whose power state is to be fixed up. 296 + * 297 + * Devices without power resources and _PSC, but having _PS0 and _PS3 defined, 298 + * are assumed to be put into D0 by the BIOS. However, in some cases that may 299 + * not be the case and this function should be used then. 300 + */ 301 + int acpi_device_fix_up_power(struct acpi_device *device) 302 + { 303 + int ret = 0; 304 + 305 + if (!device->power.flags.power_resources 306 + && !device->power.flags.explicit_get 307 + && device->power.state == ACPI_STATE_D0) 308 + ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); 309 + 310 + return ret; 311 + } 312 + 293 313 int acpi_bus_update_power(acpi_handle handle, int *state_p) 294 314 { 295 315 struct acpi_device *device;
+2
drivers/acpi/dock.c
··· 868 868 if (!count) 869 869 return -EINVAL; 870 870 871 + acpi_scan_lock_acquire(); 871 872 begin_undock(dock_station); 872 873 ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); 874 + acpi_scan_lock_release(); 873 875 return ret ? ret: count; 874 876 } 875 877 static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
+1
drivers/acpi/power.c
··· 885 885 ACPI_STA_DEFAULT); 886 886 mutex_init(&resource->resource_lock); 887 887 INIT_LIST_HEAD(&resource->dependent); 888 + INIT_LIST_HEAD(&resource->list_node); 888 889 resource->name = device->pnp.bus_id; 889 890 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); 890 891 strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
+11 -5
drivers/acpi/resource.c
··· 304 304 } 305 305 306 306 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, 307 - u8 triggering, u8 polarity, u8 shareable) 307 + u8 triggering, u8 polarity, u8 shareable, 308 + bool legacy) 308 309 { 309 310 int irq, p, t; 310 311 ··· 318 317 * In IO-APIC mode, use overrided attribute. Two reasons: 319 318 * 1. BIOS bug in DSDT 320 319 * 2. BIOS uses IO-APIC mode Interrupt Source Override 320 + * 321 + * We do this only if we are dealing with IRQ() or IRQNoFlags() 322 + * resource (the legacy ISA resources). With modern ACPI 5 devices 323 + * using extended IRQ descriptors we take the IRQ configuration 324 + * from _CRS directly. 321 325 */ 322 - if (!acpi_get_override_irq(gsi, &t, &p)) { 326 + if (legacy && !acpi_get_override_irq(gsi, &t, &p)) { 323 327 u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; 324 328 u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; 325 329 326 330 if (triggering != trig || polarity != pol) { 327 331 pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, 328 - t ? "edge" : "level", p ? "low" : "high"); 332 + t ? "level" : "edge", p ? "low" : "high"); 329 333 triggering = trig; 330 334 polarity = pol; 331 335 } ··· 379 373 } 380 374 acpi_dev_get_irqresource(res, irq->interrupts[index], 381 375 irq->triggering, irq->polarity, 382 - irq->sharable); 376 + irq->sharable, true); 383 377 break; 384 378 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 385 379 ext_irq = &ares->data.extended_irq; ··· 389 383 } 390 384 acpi_dev_get_irqresource(res, ext_irq->interrupts[index], 391 385 ext_irq->triggering, ext_irq->polarity, 392 - ext_irq->sharable); 386 + ext_irq->sharable, false); 393 387 break; 394 388 default: 395 389 return false;
+18 -9
drivers/base/firmware_class.c
··· 450 450 { 451 451 struct firmware_buf *buf = fw_priv->buf; 452 452 453 + /* 454 + * There is a small window in which user can write to 'loading' 455 + * between loading done and disappearance of 'loading' 456 + */ 457 + if (test_bit(FW_STATUS_DONE, &buf->status)) 458 + return; 459 + 453 460 set_bit(FW_STATUS_ABORT, &buf->status); 454 461 complete_all(&buf->completion); 462 + 463 + /* avoid user action after loading abort */ 464 + fw_priv->buf = NULL; 455 465 } 456 466 457 467 #define is_fw_load_aborted(buf) \ ··· 538 528 struct device_attribute *attr, char *buf) 539 529 { 540 530 struct firmware_priv *fw_priv = to_firmware_priv(dev); 541 - int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); 531 + int loading = 0; 532 + 533 + mutex_lock(&fw_lock); 534 + if (fw_priv->buf) 535 + loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); 536 + mutex_unlock(&fw_lock); 542 537 543 538 return sprintf(buf, "%d\n", loading); 544 539 } ··· 585 570 const char *buf, size_t count) 586 571 { 587 572 struct firmware_priv *fw_priv = to_firmware_priv(dev); 588 - struct firmware_buf *fw_buf = fw_priv->buf; 573 + struct firmware_buf *fw_buf; 589 574 int loading = simple_strtol(buf, NULL, 10); 590 575 int i; 591 576 592 577 mutex_lock(&fw_lock); 593 - 578 + fw_buf = fw_priv->buf; 594 579 if (!fw_buf) 595 580 goto out; 596 581 ··· 792 777 struct firmware_priv, timeout_work.work); 793 778 794 779 mutex_lock(&fw_lock); 795 - if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { 796 - mutex_unlock(&fw_lock); 797 - return; 798 - } 799 780 fw_load_abort(fw_priv); 800 781 mutex_unlock(&fw_lock); 801 782 } ··· 871 860 wait_for_completion(&buf->completion); 872 861 873 862 cancel_delayed_work_sync(&fw_priv->timeout_work); 874 - 875 - fw_priv->buf = NULL; 876 863 877 864 device_remove_file(f_dev, &dev_attr_loading); 878 865 err_del_bin_attr:
+5 -1
drivers/block/rbd.c
··· 1036 1036 char *name; 1037 1037 u64 segment; 1038 1038 int ret; 1039 + char *name_format; 1039 1040 1040 1041 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); 1041 1042 if (!name) 1042 1043 return NULL; 1043 1044 segment = offset >> rbd_dev->header.obj_order; 1044 - ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx", 1045 + name_format = "%s.%012llx"; 1046 + if (rbd_dev->image_format == 2) 1047 + name_format = "%s.%016llx"; 1048 + ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format, 1045 1049 rbd_dev->header.object_prefix, segment); 1046 1050 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { 1047 1051 pr_err("error formatting segment name for #%llu (%d)\n",
+1 -2
drivers/gpu/drm/drm_prime.c
··· 190 190 if (ret) 191 191 return ERR_PTR(ret); 192 192 } 193 - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, 194 - 0600); 193 + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); 195 194 } 196 195 EXPORT_SYMBOL(drm_gem_prime_export); 197 196
+10 -3
drivers/gpu/drm/radeon/r600.c
··· 2687 2687 int r600_uvd_init(struct radeon_device *rdev) 2688 2688 { 2689 2689 int i, j, r; 2690 + /* disable byte swapping */ 2691 + u32 lmi_swap_cntl = 0; 2692 + u32 mp_swap_cntl = 0; 2690 2693 2691 2694 /* raise clocks while booting up the VCPU */ 2692 2695 radeon_set_uvd_clocks(rdev, 53300, 40000); ··· 2714 2711 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 2715 2712 (1 << 21) | (1 << 9) | (1 << 20)); 2716 2713 2717 - /* disable byte swapping */ 2718 - WREG32(UVD_LMI_SWAP_CNTL, 0); 2719 - WREG32(UVD_MP_SWAP_CNTL, 0); 2714 + #ifdef __BIG_ENDIAN 2715 + /* swap (8 in 32) RB and IB */ 2716 + lmi_swap_cntl = 0xa; 2717 + mp_swap_cntl = 0; 2718 + #endif 2719 + WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); 2720 + WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); 2720 2721 2721 2722 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); 2722 2723 WREG32(UVD_MPC_SET_MUXA1, 0x0);
+24 -29
drivers/gpu/drm/radeon/radeon_device.c
··· 244 244 */ 245 245 void radeon_wb_disable(struct radeon_device *rdev) 246 246 { 247 - int r; 248 - 249 - if (rdev->wb.wb_obj) { 250 - r = radeon_bo_reserve(rdev->wb.wb_obj, false); 251 - if (unlikely(r != 0)) 252 - return; 253 - radeon_bo_kunmap(rdev->wb.wb_obj); 254 - radeon_bo_unpin(rdev->wb.wb_obj); 255 - radeon_bo_unreserve(rdev->wb.wb_obj); 256 - } 257 247 rdev->wb.enabled = false; 258 248 } 259 249 ··· 259 269 { 260 270 radeon_wb_disable(rdev); 261 271 if (rdev->wb.wb_obj) { 272 + if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { 273 + radeon_bo_kunmap(rdev->wb.wb_obj); 274 + radeon_bo_unpin(rdev->wb.wb_obj); 275 + radeon_bo_unreserve(rdev->wb.wb_obj); 276 + } 262 277 radeon_bo_unref(&rdev->wb.wb_obj); 263 278 rdev->wb.wb = NULL; 264 279 rdev->wb.wb_obj = NULL; ··· 290 295 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 291 296 return r; 292 297 } 293 - } 294 - r = radeon_bo_reserve(rdev->wb.wb_obj, false); 295 - if (unlikely(r != 0)) { 296 - radeon_wb_fini(rdev); 297 - return r; 298 - } 299 - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 300 - &rdev->wb.gpu_addr); 301 - if (r) { 298 + r = radeon_bo_reserve(rdev->wb.wb_obj, false); 299 + if (unlikely(r != 0)) { 300 + radeon_wb_fini(rdev); 301 + return r; 302 + } 303 + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 304 + &rdev->wb.gpu_addr); 305 + if (r) { 306 + radeon_bo_unreserve(rdev->wb.wb_obj); 307 + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 308 + radeon_wb_fini(rdev); 309 + return r; 310 + } 311 + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 302 312 radeon_bo_unreserve(rdev->wb.wb_obj); 303 - dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 304 - radeon_wb_fini(rdev); 305 - return r; 306 - } 307 - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 308 - radeon_bo_unreserve(rdev->wb.wb_obj); 309 - if (r) { 310 - dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 311 - radeon_wb_fini(rdev); 312 - return r; 313 + if (r) { 314 + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 315 + radeon_wb_fini(rdev); 316 + return r; 317 + } 313 318 } 314 319 315 320 /* clear wb memory */
+8 -2
drivers/gpu/drm/radeon/radeon_fence.c
··· 63 63 { 64 64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 65 65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 66 - *drv->cpu_addr = cpu_to_le32(seq); 66 + if (drv->cpu_addr) { 67 + *drv->cpu_addr = cpu_to_le32(seq); 68 + } 67 69 } else { 68 70 WREG32(drv->scratch_reg, seq); 69 71 } ··· 86 84 u32 seq = 0; 87 85 88 86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 89 - seq = le32_to_cpu(*drv->cpu_addr); 87 + if (drv->cpu_addr) { 88 + seq = le32_to_cpu(*drv->cpu_addr); 89 + } else { 90 + seq = lower_32_bits(atomic64_read(&drv->last_seq)); 91 + } 90 92 } else { 91 93 seq = RREG32(drv->scratch_reg); 92 94 }
+4 -2
drivers/gpu/drm/radeon/radeon_gart.c
··· 1197 1197 int radeon_vm_bo_rmv(struct radeon_device *rdev, 1198 1198 struct radeon_bo_va *bo_va) 1199 1199 { 1200 - int r; 1200 + int r = 0; 1201 1201 1202 1202 mutex_lock(&rdev->vm_manager.lock); 1203 1203 mutex_lock(&bo_va->vm->mutex); 1204 - r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1204 + if (bo_va->soffset) { 1205 + r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1206 + } 1205 1207 mutex_unlock(&rdev->vm_manager.lock); 1206 1208 list_del(&bo_va->vm_list); 1207 1209 mutex_unlock(&bo_va->vm->mutex);
+7
drivers/gpu/drm/radeon/radeon_ring.c
··· 402 402 return -ENOMEM; 403 403 /* Align requested size with padding so unlock_commit can 404 404 * pad safely */ 405 + radeon_ring_free_size(rdev, ring); 406 + if (ring->ring_free_dw == (ring->ring_size / 4)) { 407 + /* This is an empty ring update lockup info to avoid 408 + * false positive. 409 + */ 410 + radeon_ring_lockup_update(ring); 411 + } 405 412 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 406 413 while (ndw > (ring->ring_free_dw - 1)) { 407 414 radeon_ring_free_size(rdev, ring);
+31 -17
drivers/gpu/drm/radeon/radeon_uvd.c
··· 159 159 if (!r) { 160 160 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 161 161 radeon_bo_unpin(rdev->uvd.vcpu_bo); 162 + rdev->uvd.cpu_addr = NULL; 163 + if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { 164 + radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); 165 + } 162 166 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 167 + 168 + if (rdev->uvd.cpu_addr) { 169 + radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); 170 + } else { 171 + rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; 172 + } 163 173 } 164 174 return r; 165 175 } ··· 187 177 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); 188 178 return r; 189 179 } 180 + 181 + /* Have been pin in cpu unmap unpin */ 182 + radeon_bo_kunmap(rdev->uvd.vcpu_bo); 183 + radeon_bo_unpin(rdev->uvd.vcpu_bo); 190 184 191 185 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 192 186 &rdev->uvd.gpu_addr); ··· 627 613 } 628 614 629 615 /* stitch together an UVD create msg */ 630 - msg[0] = 0x00000de4; 631 - msg[1] = 0x00000000; 632 - msg[2] = handle; 633 - msg[3] = 0x00000000; 634 - msg[4] = 0x00000000; 635 - msg[5] = 0x00000000; 636 - msg[6] = 0x00000000; 637 - msg[7] = 0x00000780; 638 - msg[8] = 0x00000440; 639 - msg[9] = 0x00000000; 640 - msg[10] = 0x01b37000; 616 + msg[0] = cpu_to_le32(0x00000de4); 617 + msg[1] = cpu_to_le32(0x00000000); 618 + msg[2] = cpu_to_le32(handle); 619 + msg[3] = cpu_to_le32(0x00000000); 620 + msg[4] = cpu_to_le32(0x00000000); 621 + msg[5] = cpu_to_le32(0x00000000); 622 + msg[6] = cpu_to_le32(0x00000000); 623 + msg[7] = cpu_to_le32(0x00000780); 624 + msg[8] = cpu_to_le32(0x00000440); 625 + msg[9] = cpu_to_le32(0x00000000); 626 + msg[10] = cpu_to_le32(0x01b37000); 641 627 for (i = 11; i < 1024; ++i) 642 - msg[i] = 0x0; 628 + msg[i] = cpu_to_le32(0x0); 643 629 644 630 radeon_bo_kunmap(bo); 645 631 radeon_bo_unreserve(bo); ··· 673 659 } 674 660 675 661 /* stitch together an UVD destroy msg */ 676 - msg[0] = 0x00000de4; 677 - msg[1] = 0x00000002; 678 - msg[2] = handle; 679 - msg[3] = 0x00000000; 662 + msg[0] = cpu_to_le32(0x00000de4); 663 + msg[1] = cpu_to_le32(0x00000002); 664 + msg[2] = cpu_to_le32(handle); 665 + msg[3] = cpu_to_le32(0x00000000); 680 666 for (i = 4; i < 1024; ++i) 681 - msg[i] = 0x0; 667 + msg[i] = cpu_to_le32(0x0); 682 668 683 669 radeon_bo_kunmap(bo); 684 670 radeon_bo_unreserve(bo);
+1 -1
drivers/irqchip/irq-gic.c
··· 705 705 static int __cpuinit gic_secondary_init(struct notifier_block *nfb, 706 706 unsigned long action, void *hcpu) 707 707 { 708 - if (action == CPU_STARTING) 708 + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 709 709 gic_cpu_init(&gic_data[0]); 710 710 return NOTIFY_OK; 711 711 }
+9 -3
drivers/media/Kconfig
··· 136 136 137 137 # This Kconfig option is used by both PCI and USB drivers 138 138 config TTPCI_EEPROM 139 - tristate 140 - depends on I2C 141 - default n 139 + tristate 140 + depends on I2C 141 + default n 142 142 143 143 source "drivers/media/dvb-core/Kconfig" 144 144 ··· 188 188 the needed demodulators). 189 189 190 190 If unsure say Y. 191 + 192 + config MEDIA_ATTACH 193 + bool 194 + depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT 195 + depends on MODULES 196 + default MODULES 191 197 192 198 source "drivers/media/i2c/Kconfig" 193 199 source "drivers/media/tuners/Kconfig"
+8 -18
drivers/media/platform/exynos4-is/fimc-is.c
··· 834 834 goto err_clk; 835 835 } 836 836 pm_runtime_enable(dev); 837 - /* 838 - * Enable only the ISP power domain, keep FIMC-IS clocks off until 839 - * the whole clock tree is configured. The ISP power domain needs 840 - * be active in order to acces any CMU_ISP clock registers. 841 - */ 837 + 842 838 ret = pm_runtime_get_sync(dev); 843 839 if (ret < 0) 844 840 goto err_irq; 845 - 846 - ret = fimc_is_setup_clocks(is); 847 - pm_runtime_put_sync(dev); 848 - 849 - if (ret < 0) 850 - goto err_irq; 851 - 852 - is->clk_init = true; 853 841 854 842 is->alloc_ctx = vb2_dma_contig_init_ctx(dev); 855 843 if (IS_ERR(is->alloc_ctx)) { ··· 860 872 if (ret < 0) 861 873 goto err_dfs; 862 874 875 + pm_runtime_put_sync(dev); 876 + 863 877 dev_dbg(dev, "FIMC-IS registered successfully\n"); 864 878 return 0; 865 879 ··· 881 891 static int fimc_is_runtime_resume(struct device *dev) 882 892 { 883 893 struct fimc_is *is = dev_get_drvdata(dev); 894 + int ret; 884 895 885 - if (!is->clk_init) 886 - return 0; 896 + ret = fimc_is_setup_clocks(is); 897 + if (ret) 898 + return ret; 887 899 888 900 return fimc_is_enable_clocks(is); 889 901 } ··· 894 902 { 895 903 struct fimc_is *is = dev_get_drvdata(dev); 896 904 897 - if (is->clk_init) 898 - fimc_is_disable_clocks(is); 899 - 905 + fimc_is_disable_clocks(is); 900 906 return 0; 901 907 } 902 908
-1
drivers/media/platform/exynos4-is/fimc-is.h
··· 264 264 spinlock_t slock; 265 265 266 266 struct clk *clocks[ISS_CLKS_MAX]; 267 - bool clk_init; 268 267 void __iomem *regs; 269 268 void __iomem *pmu_regs; 270 269 int irq;
+1 -1
drivers/media/platform/s5p-jpeg/Makefile
··· 1 1 s5p-jpeg-objs := jpeg-core.o 2 - obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o 2 + obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
+1 -1
drivers/media/platform/s5p-mfc/Makefile
··· 1 - obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o 1 + obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o 2 2 s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o 3 3 s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o 4 4 s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
-20
drivers/media/tuners/Kconfig
··· 1 - config MEDIA_ATTACH 2 - bool "Load and attach frontend and tuner driver modules as needed" 3 - depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT 4 - depends on MODULES 5 - default y if !EXPERT 6 - help 7 - Remove the static dependency of DVB card drivers on all 8 - frontend modules for all possible card variants. Instead, 9 - allow the card drivers to only load the frontend modules 10 - they require. 11 - 12 - Also, tuner module will automatically load a tuner driver 13 - when needed, for analog mode. 14 - 15 - This saves several KBytes of memory. 16 - 17 - Note: You will need module-init-tools v3.2 or later for this feature. 18 - 19 - If unsure say Y. 20 - 21 1 # Analog TV tuners, auto-loaded via tuner.ko 22 2 config MEDIA_TUNER 23 3 tristate
+3 -3
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
··· 376 376 struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf}; 377 377 struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; 378 378 struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; 379 - struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf}; 379 + struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; 380 380 381 381 dev_dbg(&d->udev->dev, "%s:\n", __func__); 382 382 ··· 481 481 goto found; 482 482 } 483 483 484 - /* check R820T by reading tuner stats at I2C addr 0x1a */ 484 + /* check R820T ID register; reg=00 val=69 */ 485 485 ret = rtl28xxu_ctrl_msg(d, &req_r820t); 486 - if (ret == 0) { 486 + if (ret == 0 && buf[0] == 0x69) { 487 487 priv->tuner = TUNER_RTL2832_R820T; 488 488 priv->tuner_name = "R820T"; 489 489 goto found;
+66
drivers/parisc/iosapic.c
··· 811 811 return pcidev->irq; 812 812 } 813 813 814 + static struct iosapic_info *first_isi = NULL; 815 + 816 + #ifdef CONFIG_64BIT 817 + int iosapic_serial_irq(int num) 818 + { 819 + struct iosapic_info *isi = first_isi; 820 + struct irt_entry *irte = NULL; /* only used if PAT PDC */ 821 + struct vector_info *vi; 822 + int isi_line; /* line used by device */ 823 + 824 + /* lookup IRT entry for isi/slot/pin set */ 825 + irte = &irt_cell[num]; 826 + 827 + DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", 828 + irte, 829 + irte->entry_type, 830 + irte->entry_length, 831 + irte->polarity_trigger, 832 + irte->src_bus_irq_devno, 833 + irte->src_bus_id, 834 + irte->src_seg_id, 835 + irte->dest_iosapic_intin, 836 + (u32) irte->dest_iosapic_addr); 837 + isi_line = irte->dest_iosapic_intin; 838 + 839 + /* get vector info for this input line */ 840 + vi = isi->isi_vector + isi_line; 841 + DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); 842 + 843 + /* If this IRQ line has already been setup, skip it */ 844 + if (vi->irte) 845 + goto out; 846 + 847 + vi->irte = irte; 848 + 849 + /* 850 + * Allocate processor IRQ 851 + * 852 + * XXX/FIXME The txn_alloc_irq() code and related code should be 853 + * moved to enable_irq(). That way we only allocate processor IRQ 854 + * bits for devices that actually have drivers claiming them. 855 + * Right now we assign an IRQ to every PCI device present, 856 + * regardless of whether it's used or not. 857 + */ 858 + vi->txn_irq = txn_alloc_irq(8); 859 + 860 + if (vi->txn_irq < 0) 861 + panic("I/O sapic: couldn't get TXN IRQ\n"); 862 + 863 + /* enable_irq() will use txn_* to program IRdT */ 864 + vi->txn_addr = txn_alloc_addr(vi->txn_irq); 865 + vi->txn_data = txn_alloc_data(vi->txn_irq); 866 + 867 + vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI; 868 + vi->eoi_data = cpu_to_le32(vi->txn_data); 869 + 870 + cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi); 871 + 872 + out: 873 + 874 + return vi->txn_irq; 875 + } 876 + #endif 877 + 814 878 815 879 /* 816 880 ** squirrel away the I/O Sapic Version ··· 941 877 vip->irqline = (unsigned char) cnt; 942 878 vip->iosapic = isi; 943 879 } 880 + if (!first_isi) 881 + first_isi = isi; 944 882 return isi; 945 883 } 946 884
+5 -1
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 688 688 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen 689 689 * for qla_tgt_xmit_response LLD code 690 690 */ 691 + if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 692 + se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; 693 + se_cmd->residual_count = 0; 694 + } 691 695 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 692 - se_cmd->residual_count = se_cmd->data_length; 696 + se_cmd->residual_count += se_cmd->data_length; 693 697 694 698 cmd->bufflen = 0; 695 699 }
+14 -13
drivers/target/iscsi/iscsi_target_configfs.c
··· 155 155 struct iscsi_tpg_np *tpg_np_iser = NULL; 156 156 char *endptr; 157 157 u32 op; 158 - int rc; 158 + int rc = 0; 159 159 160 160 op = simple_strtoul(page, &endptr, 0); 161 161 if ((op != 1) && (op != 0)) { ··· 174 174 return -EINVAL; 175 175 176 176 if (op) { 177 - int rc = request_module("ib_isert"); 178 - if (rc != 0) 177 + rc = request_module("ib_isert"); 178 + if (rc != 0) { 179 179 pr_warn("Unable to request_module for ib_isert\n"); 180 + rc = 0; 181 + } 180 182 181 183 tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, 182 184 np->np_ip, tpg_np, ISCSI_INFINIBAND); 183 - if (!tpg_np_iser || IS_ERR(tpg_np_iser)) 185 + if (IS_ERR(tpg_np_iser)) { 186 + rc = PTR_ERR(tpg_np_iser); 184 187 goto out; 188 + } 185 189 } else { 186 190 tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); 187 - if (!tpg_np_iser) 188 - goto out; 189 - 190 - rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); 191 - if (rc < 0) 192 - goto out; 191 + if (tpg_np_iser) { 192 + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); 193 + if (rc < 0) 194 + goto out; 195 + } 193 196 } 194 - 195 - printk("lio_target_np_store_iser() done, op: %d\n", op); 196 197 197 198 iscsit_put_tpg(tpg); 198 199 return count; 199 200 out: 200 201 iscsit_put_tpg(tpg); 201 - return -EINVAL; 202 + return rc; 202 203 } 203 204 204 205 TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
+2 -2
drivers/target/iscsi/iscsi_target_erl0.c
··· 842 842 return 0; 843 843 844 844 sess->time2retain_timer_flags |= ISCSI_TF_STOP; 845 - spin_unlock_bh(&se_tpg->session_lock); 845 + spin_unlock(&se_tpg->session_lock); 846 846 847 847 del_timer_sync(&sess->time2retain_timer); 848 848 849 - spin_lock_bh(&se_tpg->session_lock); 849 + spin_lock(&se_tpg->session_lock); 850 850 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; 851 851 pr_debug("Stopped Time2Retain Timer for SID: %u\n", 852 852 sess->sid);
-3
drivers/target/iscsi/iscsi_target_login.c
··· 984 984 } 985 985 986 986 np->np_transport = t; 987 - printk("Set np->np_transport to %p -> %s\n", np->np_transport, 988 - np->np_transport->name); 989 987 return 0; 990 988 } 991 989 ··· 1000 1002 1001 1003 conn->sock = new_sock; 1002 1004 conn->login_family = np->np_sockaddr.ss_family; 1003 - printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock); 1004 1005 1005 1006 if (np->np_sockaddr.ss_family == AF_INET6) { 1006 1007 memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
-3
drivers/target/iscsi/iscsi_target_nego.c
··· 721 721 722 722 start += strlen(key) + strlen(value) + 2; 723 723 } 724 - 725 - printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf); 726 - 727 724 /* 728 725 * See 5.3. Login Phase. 729 726 */
+5 -8
drivers/tty/pty.c
··· 244 244 245 245 static int pty_open(struct tty_struct *tty, struct file *filp) 246 246 { 247 - int retval = -ENODEV; 248 - 249 247 if (!tty || !tty->link) 250 - goto out; 248 + return -ENODEV; 251 249 252 - set_bit(TTY_IO_ERROR, &tty->flags); 253 - 254 - retval = -EIO; 255 250 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 256 251 goto out; 257 252 if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) ··· 257 262 clear_bit(TTY_IO_ERROR, &tty->flags); 258 263 clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); 259 264 set_bit(TTY_THROTTLED, &tty->flags); 260 - retval = 0; 265 + return 0; 266 + 261 267 out: 262 - return retval; 268 + set_bit(TTY_IO_ERROR, &tty->flags); 269 + return -EIO; 263 270 } 264 271 265 272 static void pty_set_termios(struct tty_struct *tty,
+9 -1
drivers/tty/serial/8250/8250_gsc.c
··· 30 30 unsigned long address; 31 31 int err; 32 32 33 + #ifdef CONFIG_64BIT 34 + extern int iosapic_serial_irq(int cellnum); 35 + if (!dev->irq && (dev->id.sversion == 0xad)) 36 + dev->irq = iosapic_serial_irq(dev->mod_index-1); 37 + #endif 38 + 33 39 if (!dev->irq) { 34 40 /* We find some unattached serial ports by walking native 35 41 * busses. These should be silently ignored. Otherwise, ··· 57 51 memset(&uart, 0, sizeof(uart)); 58 52 uart.port.iotype = UPIO_MEM; 59 53 /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ 60 - uart.port.uartclk = 7272727; 54 + uart.port.uartclk = (dev->id.sversion != 0xad) ? 55 + 7272727 : 1843200; 61 56 uart.port.mapbase = address; 62 57 uart.port.membase = ioremap_nocache(address, 16); 63 58 uart.port.irq = dev->irq; ··· 80 73 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, 81 74 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, 82 75 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, 76 + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad }, 83 77 { 0 } 84 78 }; 85 79
+1 -4
drivers/tty/vt/vt_ioctl.c
··· 289 289 struct vc_data *vc = NULL; 290 290 int ret = 0; 291 291 292 - if (!vc_num) 293 - return 0; 294 - 295 292 console_lock(); 296 293 if (VT_BUSY(vc_num)) 297 294 ret = -EBUSY; 298 - else 295 + else if (vc_num) 299 296 vc = vc_deallocate(vc_num); 300 297 console_unlock(); 301 298
+10 -4
drivers/usb/phy/Kconfig
··· 4 4 menuconfig USB_PHY 5 5 bool "USB Physical Layer drivers" 6 6 help 7 - USB controllers (those which are host, device or DRD) need a 8 - device to handle the physical layer signalling, commonly called 9 - a PHY. 7 + Most USB controllers have the physical layer signalling part 8 + (commonly called a PHY) built in. However, dual-role devices 9 + (a.k.a. USB on-the-go) which support being USB master or slave 10 + with the same connector often use an external PHY. 10 11 11 - The following drivers add support for such PHY devices. 12 + The drivers in this submenu add support for such PHY devices. 13 + They are not needed for standard master-only (or the vast 14 + majority of slave-only) USB interfaces. 15 + 16 + If you're not sure if this applies to you, it probably doesn't; 17 + say N here. 12 18 13 19 if USB_PHY 14 20
+2 -1
drivers/usb/serial/ti_usb_3410_5052.c
··· 172 172 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, 173 173 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, 174 174 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, 175 - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 175 + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, 176 + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 176 177 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 177 178 }; 178 179
+3 -1
drivers/usb/serial/ti_usb_3410_5052.h
··· 52 52 53 53 /* Abbott Diabetics vendor and product ids */ 54 54 #define ABBOTT_VENDOR_ID 0x1a61 55 - #define ABBOTT_PRODUCT_ID 0x3410 55 + #define ABBOTT_STEREO_PLUG_ID 0x3410 56 + #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID 57 + #define ABBOTT_STRIP_PORT_ID 0x3420 56 58 57 59 /* Commands */ 58 60 #define TI_GET_VERSION 0x01
+6
fs/internal.h
··· 132 132 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); 133 133 134 134 /* 135 + * splice.c 136 + */ 137 + extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 138 + loff_t *opos, size_t len, unsigned int flags); 139 + 140 + /* 135 141 * pipe.c 136 142 */ 137 143 extern const struct file_operations pipefifo_fops;
+16 -8
fs/read_write.c
··· 1064 1064 struct fd in, out; 1065 1065 struct inode *in_inode, *out_inode; 1066 1066 loff_t pos; 1067 + loff_t out_pos; 1067 1068 ssize_t retval; 1068 1069 int fl; 1069 1070 ··· 1078 1077 if (!(in.file->f_mode & FMODE_READ)) 1079 1078 goto fput_in; 1080 1079 retval = -ESPIPE; 1081 - if (!ppos) 1082 - ppos = &in.file->f_pos; 1083 - else 1080 + if (!ppos) { 1081 + pos = in.file->f_pos; 1082 + } else { 1083 + pos = *ppos; 1084 1084 if (!(in.file->f_mode & FMODE_PREAD)) 1085 1085 goto fput_in; 1086 - retval = rw_verify_area(READ, in.file, ppos, count); 1086 + } 1087 + retval = rw_verify_area(READ, in.file, &pos, count); 1087 1088 if (retval < 0) 1088 1089 goto fput_in; 1089 1090 count = retval; ··· 1102 1099 retval = -EINVAL; 1103 1100 in_inode = file_inode(in.file); 1104 1101 out_inode = file_inode(out.file); 1105 - retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count); 1102 + out_pos = out.file->f_pos; 1103 + retval = rw_verify_area(WRITE, out.file, &out_pos, count); 1106 1104 if (retval < 0) 1107 1105 goto fput_out; 1108 1106 count = retval; ··· 1111 1107 if (!max) 1112 1108 max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); 1113 1109 1114 - pos = *ppos; 1115 1110 if (unlikely(pos + count > max)) { 1116 1111 retval = -EOVERFLOW; 1117 1112 if (pos >= max) ··· 1129 1126 if (in.file->f_flags & O_NONBLOCK) 1130 1127 fl = SPLICE_F_NONBLOCK; 1131 1128 #endif 1132 - retval = do_splice_direct(in.file, ppos, out.file, count, fl); 1129 + retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl); 1133 1130 1134 1131 if (retval > 0) { 1135 1132 add_rchar(current, retval); 1136 1133 add_wchar(current, retval); 1137 1134 fsnotify_access(in.file); 1138 1135 fsnotify_modify(out.file); 1136 + out.file->f_pos = out_pos; 1137 + if (ppos) 1138 + *ppos = pos; 1139 + else 1140 + in.file->f_pos = pos; 1139 1141 } 1140 1142 1141 1143 inc_syscr(current); 1142 1144 inc_syscw(current); 1143 - if (*ppos > max) 1145 + if (pos > max) 1144 1146 retval = -EOVERFLOW; 1145 1147 1146 1148 fput_out:
+18 -13
fs/splice.c
··· 1274 1274 { 1275 1275 struct file *file = sd->u.file; 1276 1276 1277 - return do_splice_from(pipe, file, &file->f_pos, sd->total_len, 1277 + return do_splice_from(pipe, file, sd->opos, sd->total_len, 1278 1278 sd->flags); 1279 1279 } 1280 1280 ··· 1294 1294 * 1295 1295 */ 1296 1296 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 1297 - size_t len, unsigned int flags) 1297 + loff_t *opos, size_t len, unsigned int flags) 1298 1298 { 1299 1299 struct splice_desc sd = { 1300 1300 .len = len, ··· 1302 1302 .flags = flags, 1303 1303 .pos = *ppos, 1304 1304 .u.file = out, 1305 + .opos = opos, 1305 1306 }; 1306 1307 long ret; 1307 1308 ··· 1326 1325 { 1327 1326 struct pipe_inode_info *ipipe; 1328 1327 struct pipe_inode_info *opipe; 1329 - loff_t offset, *off; 1328 + loff_t offset; 1330 1329 long ret; 1331 1330 1332 1331 ipipe = get_pipe_info(in); ··· 1357 1356 return -EINVAL; 1358 1357 if (copy_from_user(&offset, off_out, sizeof(loff_t))) 1359 1358 return -EFAULT; 1360 - off = &offset; 1361 - } else 1362 - off = &out->f_pos; 1359 + } else { 1360 + offset = out->f_pos; 1361 + } 1363 1362 1364 - ret = do_splice_from(ipipe, out, off, len, flags); 1363 + ret = do_splice_from(ipipe, out, &offset, len, flags); 1365 1364 1366 - if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) 1365 + if (!off_out) 1366 + out->f_pos = offset; 1367 + else if (copy_to_user(off_out, &offset, sizeof(loff_t))) 1367 1368 ret = -EFAULT; 1368 1369 1369 1370 return ret; ··· 1379 1376 return -EINVAL; 1380 1377 if (copy_from_user(&offset, off_in, sizeof(loff_t))) 1381 1378 return -EFAULT; 1382 - off = &offset; 1383 - } else 1384 - off = &in->f_pos; 1379 + } else { 1380 + offset = in->f_pos; 1381 + } 1385 1382 1386 - ret = do_splice_to(in, off, opipe, len, flags); 1383 + ret = do_splice_to(in, &offset, opipe, len, flags); 1387 1384 1388 - if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) 1385 + if (!off_in) 1386 + in->f_pos = offset; 1387 + else if (copy_to_user(off_in, &offset, sizeof(loff_t))) 1389 1388 ret = -EFAULT; 1390 1389 1391 1390 return ret;
+1
include/acpi/acpi_bus.h
··· 382 382 int acpi_device_get_power(struct acpi_device *device, int *state); 383 383 int acpi_device_set_power(struct acpi_device *device, int state); 384 384 int acpi_bus_init_power(struct acpi_device *device); 385 + int acpi_device_fix_up_power(struct acpi_device *device); 385 386 int acpi_bus_update_power(acpi_handle handle, int *state_p); 386 387 bool acpi_bus_power_manageable(acpi_handle handle); 387 388
+35
include/linux/context_tracking.h
··· 3 3 4 4 #include <linux/sched.h> 5 5 #include <linux/percpu.h> 6 + #include <linux/vtime.h> 6 7 #include <asm/ptrace.h> 7 8 8 9 struct context_tracking { ··· 20 19 } state; 21 20 }; 22 21 22 + static inline void __guest_enter(void) 23 + { 24 + /* 25 + * This is running in ioctl context so we can avoid 26 + * the call to vtime_account() with its unnecessary idle check. 27 + */ 28 + vtime_account_system(current); 29 + current->flags |= PF_VCPU; 30 + } 31 + 32 + static inline void __guest_exit(void) 33 + { 34 + /* 35 + * This is running in ioctl context so we can avoid 36 + * the call to vtime_account() with its unnecessary idle check. 37 + */ 38 + vtime_account_system(current); 39 + current->flags &= ~PF_VCPU; 40 + } 41 + 23 42 #ifdef CONFIG_CONTEXT_TRACKING 24 43 DECLARE_PER_CPU(struct context_tracking, context_tracking); 25 44 ··· 55 34 56 35 extern void user_enter(void); 57 36 extern void user_exit(void); 37 + 38 + extern void guest_enter(void); 39 + extern void guest_exit(void); 58 40 59 41 static inline enum ctx_state exception_enter(void) 60 42 { ··· 81 57 static inline bool context_tracking_in_user(void) { return false; } 82 58 static inline void user_enter(void) { } 83 59 static inline void user_exit(void) { } 60 + 61 + static inline void guest_enter(void) 62 + { 63 + __guest_enter(); 64 + } 65 + 66 + static inline void guest_exit(void) 67 + { 68 + __guest_exit(); 69 + } 70 + 84 71 static inline enum ctx_state exception_enter(void) { return 0; } 85 72 static inline void exception_exit(enum ctx_state prev_ctx) { } 86 73 static inline void context_tracking_task_switch(struct task_struct *prev,
-2
include/linux/fs.h
··· 2414 2414 struct file *, loff_t *, size_t, unsigned int); 2415 2415 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2416 2416 struct file *out, loff_t *, size_t len, unsigned int flags); 2417 - extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 2418 - size_t len, unsigned int flags); 2419 2417 2420 2418 extern void 2421 2419 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
+1 -36
include/linux/kvm_host.h
··· 23 23 #include <linux/ratelimit.h> 24 24 #include <linux/err.h> 25 25 #include <linux/irqflags.h> 26 + #include <linux/context_tracking.h> 26 27 #include <asm/signal.h> 27 28 28 29 #include <linux/kvm.h> ··· 760 759 return 0; 761 760 } 762 761 #endif 763 - 764 - static inline void __guest_enter(void) 765 - { 766 - /* 767 - * This is running in ioctl context so we can avoid 768 - * the call to vtime_account() with its unnecessary idle check. 769 - */ 770 - vtime_account_system(current); 771 - current->flags |= PF_VCPU; 772 - } 773 - 774 - static inline void __guest_exit(void) 775 - { 776 - /* 777 - * This is running in ioctl context so we can avoid 778 - * the call to vtime_account() with its unnecessary idle check. 779 - */ 780 - vtime_account_system(current); 781 - current->flags &= ~PF_VCPU; 782 - } 783 - 784 - #ifdef CONFIG_CONTEXT_TRACKING 785 - extern void guest_enter(void); 786 - extern void guest_exit(void); 787 - 788 - #else /* !CONFIG_CONTEXT_TRACKING */ 789 - static inline void guest_enter(void) 790 - { 791 - __guest_enter(); 792 - } 793 - 794 - static inline void guest_exit(void) 795 - { 796 - __guest_exit(); 797 - } 798 - #endif /* !CONFIG_CONTEXT_TRACKING */ 799 762 800 763 static inline void kvm_guest_enter(void) 801 764 {
+1 -2
include/linux/perf_event.h
··· 389 389 /* mmap bits */ 390 390 struct mutex mmap_mutex; 391 391 atomic_t mmap_count; 392 - int mmap_locked; 393 - struct user_struct *mmap_user; 392 + 394 393 struct ring_buffer *rb; 395 394 struct list_head rb_entry; 396 395
+17 -1
include/linux/preempt.h
··· 33 33 preempt_schedule(); \ 34 34 } while (0) 35 35 36 + #ifdef CONFIG_CONTEXT_TRACKING 37 + 38 + void preempt_schedule_context(void); 39 + 40 + #define preempt_check_resched_context() \ 41 + do { \ 42 + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ 43 + preempt_schedule_context(); \ 44 + } while (0) 45 + #else 46 + 47 + #define preempt_check_resched_context() preempt_check_resched() 48 + 49 + #endif /* CONFIG_CONTEXT_TRACKING */ 50 + 36 51 #else /* !CONFIG_PREEMPT */ 37 52 38 53 #define preempt_check_resched() do { } while (0) 54 + #define preempt_check_resched_context() do { } while (0) 39 55 40 56 #endif /* CONFIG_PREEMPT */ 41 57 ··· 104 88 do { \ 105 89 preempt_enable_no_resched_notrace(); \ 106 90 barrier(); \ 107 - preempt_check_resched(); \ 91 + preempt_check_resched_context(); \ 108 92 } while (0) 109 93 110 94 #else /* !CONFIG_PREEMPT_COUNT */
+1
include/linux/splice.h
··· 35 35 void *data; /* cookie */ 36 36 } u; 37 37 loff_t pos; /* file position */ 38 + loff_t *opos; /* sendfile: output position */ 38 39 size_t num_spliced; /* number of bytes already spliced */ 39 40 bool need_wakeup; /* need to wake up writer */ 40 41 };
+2 -2
include/linux/vtime.h
··· 34 34 } 35 35 extern void vtime_guest_enter(struct task_struct *tsk); 36 36 extern void vtime_guest_exit(struct task_struct *tsk); 37 - extern void vtime_init_idle(struct task_struct *tsk); 37 + extern void vtime_init_idle(struct task_struct *tsk, int cpu); 38 38 #else 39 39 static inline void vtime_account_irq_exit(struct task_struct *tsk) 40 40 { ··· 45 45 static inline void vtime_user_exit(struct task_struct *tsk) { } 46 46 static inline void vtime_guest_enter(struct task_struct *tsk) { } 47 47 static inline void vtime_guest_exit(struct task_struct *tsk) { } 48 - static inline void vtime_init_idle(struct task_struct *tsk) { } 48 + static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } 49 49 #endif 50 50 51 51 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+40 -1
kernel/context_tracking.c
··· 15 15 */ 16 16 17 17 #include <linux/context_tracking.h> 18 - #include <linux/kvm_host.h> 19 18 #include <linux/rcupdate.h> 20 19 #include <linux/sched.h> 21 20 #include <linux/hardirq.h> ··· 70 71 local_irq_restore(flags); 71 72 } 72 73 74 + #ifdef CONFIG_PREEMPT 75 + /** 76 + * preempt_schedule_context - preempt_schedule called by tracing 77 + * 78 + * The tracing infrastructure uses preempt_enable_notrace to prevent 79 + * recursion and tracing preempt enabling caused by the tracing 80 + * infrastructure itself. But as tracing can happen in areas coming 81 + * from userspace or just about to enter userspace, a preempt enable 82 + * can occur before user_exit() is called. This will cause the scheduler 83 + * to be called when the system is still in usermode. 84 + * 85 + * To prevent this, the preempt_enable_notrace will use this function 86 + * instead of preempt_schedule() to exit user context if needed before 87 + * calling the scheduler. 88 + */ 89 + void __sched notrace preempt_schedule_context(void) 90 + { 91 + struct thread_info *ti = current_thread_info(); 92 + enum ctx_state prev_ctx; 93 + 94 + if (likely(ti->preempt_count || irqs_disabled())) 95 + return; 96 + 97 + /* 98 + * Need to disable preemption in case user_exit() is traced 99 + * and the tracer calls preempt_enable_notrace() causing 100 + * an infinite recursion. 101 + */ 102 + preempt_disable_notrace(); 103 + prev_ctx = exception_enter(); 104 + preempt_enable_no_resched_notrace(); 105 + 106 + preempt_schedule(); 107 + 108 + preempt_disable_notrace(); 109 + exception_exit(prev_ctx); 110 + preempt_enable_notrace(); 111 + } 112 + EXPORT_SYMBOL_GPL(preempt_schedule_context); 113 + #endif /* CONFIG_PREEMPT */ 73 114 74 115 /** 75 116 * user_exit - Inform the context tracking that the CPU is
+17
kernel/cpu/idle.c
··· 5 5 #include <linux/cpu.h> 6 6 #include <linux/tick.h> 7 7 #include <linux/mm.h> 8 + #include <linux/stackprotector.h> 8 9 9 10 #include <asm/tlb.h> 10 11 ··· 59 58 void __weak arch_cpu_idle(void) 60 59 { 61 60 cpu_idle_force_poll = 1; 61 + local_irq_enable(); 62 62 } 63 63 64 64 /* ··· 114 112 115 113 void cpu_startup_entry(enum cpuhp_state state) 116 114 { 115 + /* 116 + * This #ifdef needs to die, but it's too late in the cycle to 117 + * make this generic (arm and sh have never invoked the canary 118 + * init for the non boot cpus!). Will be fixed in 3.11 119 + */ 120 + #ifdef CONFIG_X86 121 + /* 122 + * If we're the non-boot CPU, nothing set the stack canary up 123 + * for us. The boot CPU already has it initialized but no harm 124 + * in doing it again. This is a good place for updating it, as 125 + * we wont ever return from this function (so the invalid 126 + * canaries already on the stack wont ever trigger). 127 + */ 128 + boot_init_stack_canary(); 129 + #endif 117 130 current_set_polling(); 118 131 arch_cpu_idle_prepare(); 119 132 cpu_idle_loop();
+162 -73
kernel/events/core.c
··· 196 196 static void update_context_time(struct perf_event_context *ctx); 197 197 static u64 perf_event_time(struct perf_event *event); 198 198 199 - static void ring_buffer_attach(struct perf_event *event, 200 - struct ring_buffer *rb); 201 - 202 199 void __weak perf_event_print_debug(void) { } 203 200 204 201 extern __weak const char *perf_pmu_name(void) ··· 2915 2918 } 2916 2919 2917 2920 static void ring_buffer_put(struct ring_buffer *rb); 2921 + static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); 2918 2922 2919 2923 static void free_event(struct perf_event *event) 2920 2924 { ··· 2940 2942 if (has_branch_stack(event)) { 2941 2943 static_key_slow_dec_deferred(&perf_sched_events); 2942 2944 /* is system-wide event */ 2943 - if (!(event->attach_state & PERF_ATTACH_TASK)) 2945 + if (!(event->attach_state & PERF_ATTACH_TASK)) { 2944 2946 atomic_dec(&per_cpu(perf_branch_stack_events, 2945 2947 event->cpu)); 2948 + } 2946 2949 } 2947 2950 } 2948 2951 2949 2952 if (event->rb) { 2950 - ring_buffer_put(event->rb); 2951 - event->rb = NULL; 2953 + struct ring_buffer *rb; 2954 + 2955 + /* 2956 + * Can happen when we close an event with re-directed output. 2957 + * 2958 + * Since we have a 0 refcount, perf_mmap_close() will skip 2959 + * over us; possibly making our ring_buffer_put() the last. 2960 + */ 2961 + mutex_lock(&event->mmap_mutex); 2962 + rb = event->rb; 2963 + if (rb) { 2964 + rcu_assign_pointer(event->rb, NULL); 2965 + ring_buffer_detach(event, rb); 2966 + ring_buffer_put(rb); /* could be last */ 2967 + } 2968 + mutex_unlock(&event->mmap_mutex); 2952 2969 } 2953 2970 2954 2971 if (is_cgroup_event(event)) ··· 3201 3188 unsigned int events = POLL_HUP; 3202 3189 3203 3190 /* 3204 - * Race between perf_event_set_output() and perf_poll(): perf_poll() 3205 - * grabs the rb reference but perf_event_set_output() overrides it. 3206 - * Here is the timeline for two threads T1, T2: 3207 - * t0: T1, rb = rcu_dereference(event->rb) 3208 - * t1: T2, old_rb = event->rb 3209 - * t2: T2, event->rb = new rb 3210 - * t3: T2, ring_buffer_detach(old_rb) 3211 - * t4: T1, ring_buffer_attach(rb1) 3212 - * t5: T1, poll_wait(event->waitq) 3213 - * 3214 - * To avoid this problem, we grab mmap_mutex in perf_poll() 3215 - * thereby ensuring that the assignment of the new ring buffer 3216 - * and the detachment of the old buffer appear atomic to perf_poll() 3191 + * Pin the event->rb by taking event->mmap_mutex; otherwise 3192 + * perf_event_set_output() can swizzle our rb and make us miss wakeups. 3217 3193 */ 3218 3194 mutex_lock(&event->mmap_mutex); 3219 - 3220 - rcu_read_lock(); 3221 - rb = rcu_dereference(event->rb); 3222 - if (rb) { 3223 - ring_buffer_attach(event, rb); 3195 + rb = event->rb; 3196 + if (rb) 3224 3197 events = atomic_xchg(&rb->poll, 0); 3225 - } 3226 - rcu_read_unlock(); 3227 - 3228 3198 mutex_unlock(&event->mmap_mutex); 3229 3199 3230 3200 poll_wait(file, &event->waitq, wait); ··· 3517 3521 return; 3518 3522 3519 3523 spin_lock_irqsave(&rb->event_lock, flags); 3520 - if (!list_empty(&event->rb_entry)) 3521 - goto unlock; 3522 - 3523 - list_add(&event->rb_entry, &rb->event_list); 3524 - unlock: 3524 + if (list_empty(&event->rb_entry)) 3525 + list_add(&event->rb_entry, &rb->event_list); 3525 3526 spin_unlock_irqrestore(&rb->event_lock, flags); 3526 3527 } 3527 3528 3528 - static void ring_buffer_detach(struct perf_event *event, 3529 - struct ring_buffer *rb) 3529 + static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) 3530 3530 { 3531 3531 unsigned long flags; 3532 3532 ··· 3541 3549 3542 3550 rcu_read_lock(); 3543 3551 rb = rcu_dereference(event->rb); 3544 - if (!rb) 3545 - goto unlock; 3546 - 3547 - list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 3548 - wake_up_all(&event->waitq); 3549 - 3550 - unlock: 3552 + if (rb) { 3553 + list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 3554 + wake_up_all(&event->waitq); 3555 + } 3551 3556 rcu_read_unlock(); 3552 3557 } 3553 3558 ··· 3573 3584 3574 3585 static void ring_buffer_put(struct ring_buffer *rb) 3575 3586 { 3576 - struct perf_event *event, *n; 3577 - unsigned long flags; 3578 - 3579 3587 if (!atomic_dec_and_test(&rb->refcount)) 3580 3588 return; 3581 3589 3582 - spin_lock_irqsave(&rb->event_lock, flags); 3583 - list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { 3584 - list_del_init(&event->rb_entry); 3585 - wake_up_all(&event->waitq); 3586 - } 3587 - spin_unlock_irqrestore(&rb->event_lock, flags); 3590 + WARN_ON_ONCE(!list_empty(&rb->event_list)); 3588 3591 3589 3592 call_rcu(&rb->rcu_head, rb_free_rcu); 3590 3593 } ··· 3586 3605 struct perf_event *event = vma->vm_file->private_data; 3587 3606 3588 3607 atomic_inc(&event->mmap_count); 3608 + atomic_inc(&event->rb->mmap_count); 3589 3609 } 3590 3610 3611 + /* 3612 + * A buffer can be mmap()ed multiple times; either directly through the same 3613 + * event, or through other events by use of perf_event_set_output(). 3614 + * 3615 + * In order to undo the VM accounting done by perf_mmap() we need to destroy 3616 + * the buffer here, where we still have a VM context. This means we need 3617 + * to detach all events redirecting to us. 3618 + */ 3591 3619 static void perf_mmap_close(struct vm_area_struct *vma) 3592 3620 { 3593 3621 struct perf_event *event = vma->vm_file->private_data; 3594 3622 3595 - if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { 3596 - unsigned long size = perf_data_size(event->rb); 3597 - struct user_struct *user = event->mmap_user; 3598 - struct ring_buffer *rb = event->rb; 3623 + struct ring_buffer *rb = event->rb; 3624 + struct user_struct *mmap_user = rb->mmap_user; 3625 + int mmap_locked = rb->mmap_locked; 3626 + unsigned long size = perf_data_size(rb); 3599 3627 3600 - atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); 3601 - vma->vm_mm->pinned_vm -= event->mmap_locked; 3602 - rcu_assign_pointer(event->rb, NULL); 3603 - ring_buffer_detach(event, rb); 3604 - mutex_unlock(&event->mmap_mutex); 3628 + atomic_dec(&rb->mmap_count); 3605 3629 3606 - ring_buffer_put(rb); 3607 - free_uid(user); 3630 + if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 3631 + return; 3632 + 3633 + /* Detach current event from the buffer. */ 3634 + rcu_assign_pointer(event->rb, NULL); 3635 + ring_buffer_detach(event, rb); 3636 + mutex_unlock(&event->mmap_mutex); 3637 + 3638 + /* If there's still other mmap()s of this buffer, we're done. */ 3639 + if (atomic_read(&rb->mmap_count)) { 3640 + ring_buffer_put(rb); /* can't be last */ 3641 + return; 3608 3642 } 3643 + 3644 + /* 3645 + * No other mmap()s, detach from all other events that might redirect 3646 + * into the now unreachable buffer. Somewhat complicated by the 3647 + * fact that rb::event_lock otherwise nests inside mmap_mutex. 3648 + */ 3649 + again: 3650 + rcu_read_lock(); 3651 + list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 3652 + if (!atomic_long_inc_not_zero(&event->refcount)) { 3653 + /* 3654 + * This event is en-route to free_event() which will 3655 + * detach it and remove it from the list. 3656 + */ 3657 + continue; 3658 + } 3659 + rcu_read_unlock(); 3660 + 3661 + mutex_lock(&event->mmap_mutex); 3662 + /* 3663 + * Check we didn't race with perf_event_set_output() which can 3664 + * swizzle the rb from under us while we were waiting to 3665 + * acquire mmap_mutex. 3666 + * 3667 + * If we find a different rb; ignore this event, a next 3668 + * iteration will no longer find it on the list. We have to 3669 + * still restart the iteration to make sure we're not now 3670 + * iterating the wrong list. 3671 + */ 3672 + if (event->rb == rb) { 3673 + rcu_assign_pointer(event->rb, NULL); 3674 + ring_buffer_detach(event, rb); 3675 + ring_buffer_put(rb); /* can't be last, we still have one */ 3676 + } 3677 + mutex_unlock(&event->mmap_mutex); 3678 + put_event(event); 3679 + 3680 + /* 3681 + * Restart the iteration; either we're on the wrong list or 3682 + * destroyed its integrity by doing a deletion. 3683 + */ 3684 + goto again; 3685 + } 3686 + rcu_read_unlock(); 3687 + 3688 + /* 3689 + * It could be there's still a few 0-ref events on the list; they'll 3690 + * get cleaned up by free_event() -- they'll also still have their 3691 + * ref on the rb and will free it whenever they are done with it. 3692 + * 3693 + * Aside from that, this buffer is 'fully' detached and unmapped, 3694 + * undo the VM accounting. 3695 + */ 3696 + 3697 + atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 3698 + vma->vm_mm->pinned_vm -= mmap_locked; 3699 + free_uid(mmap_user); 3700 + 3701 + ring_buffer_put(rb); /* could be last */ 3609 3702 } 3610 3703 3611 3704 static const struct vm_operations_struct perf_mmap_vmops = { ··· 3729 3674 return -EINVAL; 3730 3675 3731 3676 WARN_ON_ONCE(event->ctx->parent_ctx); 3677 + again: 3732 3678 mutex_lock(&event->mmap_mutex); 3733 3679 if (event->rb) { 3734 - if (event->rb->nr_pages == nr_pages) 3735 - atomic_inc(&event->rb->refcount); 3736 - else 3680 + if (event->rb->nr_pages != nr_pages) { 3737 3681 ret = -EINVAL; 3682 + goto unlock; 3683 + } 3684 + 3685 + if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 3686 + /* 3687 + * Raced against perf_mmap_close() through 3688 + * perf_event_set_output(). Try again, hope for better 3689 + * luck. 3690 + */ 3691 + mutex_unlock(&event->mmap_mutex); 3692 + goto again; 3693 + } 3694 + 3738 3695 goto unlock; 3739 3696 } 3740 3697 ··· 3787 3720 ret = -ENOMEM; 3788 3721 goto unlock; 3789 3722 } 3790 - rcu_assign_pointer(event->rb, rb); 3723 + 3724 + atomic_set(&rb->mmap_count, 1); 3725 + rb->mmap_locked = extra; 3726 + rb->mmap_user = get_current_user(); 3791 3727 3792 3728 atomic_long_add(user_extra, &user->locked_vm); 3793 - event->mmap_locked = extra; 3794 - event->mmap_user = get_current_user(); 3795 - vma->vm_mm->pinned_vm += event->mmap_locked; 3729 + vma->vm_mm->pinned_vm += extra; 3730 + 3731 + ring_buffer_attach(event, rb); 3732 + rcu_assign_pointer(event->rb, rb); 3796 3733 3797 3734 perf_event_update_userpage(event); 3798 3735 ··· 3805 3734 atomic_inc(&event->mmap_count); 3806 3735 mutex_unlock(&event->mmap_mutex); 3807 3736 3808 - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3737 + /* 3738 + * Since pinned accounting is per vm we cannot allow fork() to copy our 3739 + * vma. 3740 + */ 3741 + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 3809 3742 vma->vm_ops = &perf_mmap_vmops; 3810 3743 3811 3744 return ret; ··· 6487 6412 if (atomic_read(&event->mmap_count)) 6488 6413 goto unlock; 6489 6414 6415 + old_rb = event->rb; 6416 + 6490 6417 if (output_event) { 6491 6418 /* get the rb we want to redirect to */ 6492 6419 rb = ring_buffer_get(output_event); ··· 6496 6419 goto unlock; 6497 6420 } 6498 6421 6499 - old_rb = event->rb; 6500 - rcu_assign_pointer(event->rb, rb); 6501 6422 if (old_rb) 6502 6423 ring_buffer_detach(event, old_rb); 6424 + 6425 + if (rb) 6426 + ring_buffer_attach(event, rb); 6427 + 6428 + rcu_assign_pointer(event->rb, rb); 6429 + 6430 + if (old_rb) { 6431 + ring_buffer_put(old_rb); 6432 + /* 6433 + * Since we detached before setting the new rb, so that we 6434 + * could attach the new rb, we could have missed a wakeup. 6435 + * Provide it now. 6436 + */ 6437 + wake_up_all(&event->waitq); 6438 + } 6439 + 6503 6440 ret = 0; 6504 6441 unlock: 6505 6442 mutex_unlock(&event->mmap_mutex); 6506 6443 6507 - if (old_rb) 6508 - ring_buffer_put(old_rb); 6509 6444 out: 6510 6445 return ret; 6511 6446 }
+4
kernel/events/internal.h
··· 31 31 spinlock_t event_lock; 32 32 struct list_head event_list; 33 33 34 + atomic_t mmap_count; 35 + unsigned long mmap_locked; 36 + struct user_struct *mmap_user; 37 + 34 38 struct perf_event_mmap_page *user_page; 35 39 void *data_pages[0]; 36 40 };
+20 -10
kernel/kprobes.c
··· 467 467 /* Optimization staging list, protected by kprobe_mutex */ 468 468 static LIST_HEAD(optimizing_list); 469 469 static LIST_HEAD(unoptimizing_list); 470 + static LIST_HEAD(freeing_list); 470 471 471 472 static void kprobe_optimizer(struct work_struct *work); 472 473 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); ··· 505 504 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 506 505 * if need) kprobes listed on unoptimizing_list. 507 506 */ 508 - static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) 507 + static __kprobes void do_unoptimize_kprobes(void) 509 508 { 510 509 struct optimized_kprobe *op, *tmp; 511 510 ··· 516 515 /* Ditto to do_optimize_kprobes */ 517 516 get_online_cpus(); 518 517 mutex_lock(&text_mutex); 519 - arch_unoptimize_kprobes(&unoptimizing_list, free_list); 518 + arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 520 519 /* Loop free_list for disarming */ 521 - list_for_each_entry_safe(op, tmp, free_list, list) { 520 + list_for_each_entry_safe(op, tmp, &freeing_list, list) { 522 521 /* Disarm probes if marked disabled */ 523 522 if (kprobe_disabled(&op->kp)) 524 523 arch_disarm_kprobe(&op->kp); ··· 537 536 } 538 537 539 538 /* Reclaim all kprobes on the free_list */ 540 - static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) 539 + static __kprobes void do_free_cleaned_kprobes(void) 541 540 { 542 541 struct optimized_kprobe *op, *tmp; 543 542 544 - list_for_each_entry_safe(op, tmp, free_list, list) { 543 + list_for_each_entry_safe(op, tmp, &freeing_list, list) { 545 544 BUG_ON(!kprobe_unused(&op->kp)); 546 545 list_del_init(&op->list); 547 546 free_aggr_kprobe(&op->kp); ··· 557 556 /* Kprobe jump optimizer */ 558 557 static __kprobes void kprobe_optimizer(struct work_struct *work) 559 558 { 560 - LIST_HEAD(free_list); 561 - 562 559 mutex_lock(&kprobe_mutex); 563 560 /* Lock modules while optimizing kprobes */ 564 561 mutex_lock(&module_mutex); ··· 565 566 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 566 567 * kprobes before waiting for quiesence period. 567 568 */ 568 - do_unoptimize_kprobes(&free_list); 569 + do_unoptimize_kprobes(); 569 570 570 571 /* 571 572 * Step 2: Wait for quiesence period to ensure all running interrupts ··· 580 581 do_optimize_kprobes(); 581 582 582 583 /* Step 4: Free cleaned kprobes after quiesence period */ 583 - do_free_cleaned_kprobes(&free_list); 584 + do_free_cleaned_kprobes(); 584 585 585 586 mutex_unlock(&module_mutex); 586 587 mutex_unlock(&kprobe_mutex); ··· 722 723 if (!list_empty(&op->list)) 723 724 /* Dequeue from the (un)optimization queue */ 724 725 list_del_init(&op->list); 725 - 726 726 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 727 + 728 + if (kprobe_unused(p)) { 729 + /* Enqueue if it is unused */ 730 + list_add(&op->list, &freeing_list); 731 + /* 732 + * Remove unused probes from the hash list. After waiting 733 + * for synchronization, this probe is reclaimed. 734 + * (reclaiming is done by do_free_cleaned_kprobes().) 735 + */ 736 + hlist_del_rcu(&op->kp.hlist); 737 + } 738 + 727 739 /* Don't touch the code, because it is already freed. */ 728 740 arch_remove_optimized_kprobe(op); 729 741 }
+11 -10
kernel/range.c
··· 4 4 #include <linux/kernel.h> 5 5 #include <linux/init.h> 6 6 #include <linux/sort.h> 7 - 7 + #include <linux/string.h> 8 8 #include <linux/range.h> 9 9 10 10 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) ··· 32 32 if (start >= end) 33 33 return nr_range; 34 34 35 - /* Try to merge it with old one: */ 35 + /* get new start/end: */ 36 36 for (i = 0; i < nr_range; i++) { 37 - u64 final_start, final_end; 38 37 u64 common_start, common_end; 39 38 40 39 if (!range[i].end) ··· 44 45 if (common_start > common_end) 45 46 continue; 46 47 47 - final_start = min(range[i].start, start); 48 - final_end = max(range[i].end, end); 48 + /* new start/end, will add it back at last */ 49 + start = min(range[i].start, start); 50 + end = max(range[i].end, end); 49 51 50 - /* clear it and add it back for further merge */ 51 - range[i].start = 0; 52 - range[i].end = 0; 53 - return add_range_with_merge(range, az, nr_range, 54 - final_start, final_end); 52 + memmove(&range[i], &range[i + 1], 53 + (nr_range - (i + 1)) * sizeof(range[i])); 54 + range[nr_range - 1].start = 0; 55 + range[nr_range - 1].end = 0; 56 + nr_range--; 57 + i--; 55 58 } 56 59 57 60 /* Need to add it: */
+18 -5
kernel/sched/core.c
··· 633 633 static inline bool got_nohz_idle_kick(void) 634 634 { 635 635 int cpu = smp_processor_id(); 636 - return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 636 + 637 + if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) 638 + return false; 639 + 640 + if (idle_cpu(cpu) && !need_resched()) 641 + return true; 642 + 643 + /* 644 + * We can't run Idle Load Balance on this CPU for this time so we 645 + * cancel it and clear NOHZ_BALANCE_KICK 646 + */ 647 + clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 648 + return false; 637 649 } 638 650 639 651 #else /* CONFIG_NO_HZ_COMMON */ ··· 1405 1393 1406 1394 void scheduler_ipi(void) 1407 1395 { 1408 - if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() 1409 - && !tick_nohz_full_cpu(smp_processor_id())) 1396 + if (llist_empty(&this_rq()->wake_list) 1397 + && !tick_nohz_full_cpu(smp_processor_id()) 1398 + && !got_nohz_idle_kick()) 1410 1399 return; 1411 1400 1412 1401 /* ··· 1430 1417 /* 1431 1418 * Check if someone kicked us for doing the nohz idle load balance. 1432 1419 */ 1433 - if (unlikely(got_nohz_idle_kick() && !need_resched())) { 1420 + if (unlikely(got_nohz_idle_kick())) { 1434 1421 this_rq()->idle_balance = 1; 1435 1422 raise_softirq_irqoff(SCHED_SOFTIRQ); 1436 1423 } ··· 4758 4745 */ 4759 4746 idle->sched_class = &idle_sched_class; 4760 4747 ftrace_graph_init_idle_task(idle, cpu); 4761 - vtime_init_idle(idle); 4748 + vtime_init_idle(idle, cpu); 4762 4749 #if defined(CONFIG_SMP) 4763 4750 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4764 4751 #endif
+3 -3
kernel/sched/cputime.c
··· 747 747 748 748 write_seqlock(&current->vtime_seqlock); 749 749 current->vtime_snap_whence = VTIME_SYS; 750 - current->vtime_snap = sched_clock(); 750 + current->vtime_snap = sched_clock_cpu(smp_processor_id()); 751 751 write_sequnlock(&current->vtime_seqlock); 752 752 } 753 753 754 - void vtime_init_idle(struct task_struct *t) 754 + void vtime_init_idle(struct task_struct *t, int cpu) 755 755 { 756 756 unsigned long flags; 757 757 758 758 write_seqlock_irqsave(&t->vtime_seqlock, flags); 759 759 t->vtime_snap_whence = VTIME_SYS; 760 - t->vtime_snap = sched_clock(); 760 + t->vtime_snap = sched_clock_cpu(cpu); 761 761 write_sequnlock_irqrestore(&t->vtime_seqlock, flags); 762 762 } 763 763
-4
kernel/time/tick-broadcast.c
··· 698 698 699 699 bc->event_handler = tick_handle_oneshot_broadcast; 700 700 701 - /* Take the do_timer update */ 702 - if (!tick_nohz_full_cpu(cpu)) 703 - tick_do_timer_cpu = cpu; 704 - 705 701 /* 706 702 * We must be careful here. There might be other CPUs 707 703 * waiting for periodic broadcast. We need to set the
+1 -1
kernel/time/tick-sched.c
··· 306 306 * we can't safely shutdown that CPU. 307 307 */ 308 308 if (have_nohz_full_mask && tick_do_timer_cpu == cpu) 309 - return -EINVAL; 309 + return NOTIFY_BAD; 310 310 break; 311 311 } 312 312 return NOTIFY_OK;