Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, mce: Clean up thermal init by introducing intel_thermal_supported()
x86, mce: Thermal monitoring depends on APIC being enabled
x86: Gart: fix breakage due to IOMMU initialization cleanup
x86: Move swiotlb initialization before dma32_free_bootmem
x86: Fix build warning in arch/x86/mm/mmio-mod.c
x86: Remove usedac in feature-removal-schedule.txt
x86: Fix duplicated UV BAU interrupt vector
nvram: Fix write beyond end condition; prove to gcc copy is safe
mm: Adjust do_pages_stat() so gcc can see copy_from_user() is safe
x86: Limit the number of processor bootup messages
x86: Remove enabling x2apic message for every CPU
doc: Add documentation for bootloader_{type,version}
x86, msr: Add support for non-contiguous cpumasks
x86: Use find_e820() instead of hard coded trampoline address
x86, AMD: Fix stale cpuid4_info shared_map data in shared_cpu_map cpumasks

Trivial percpu-naming-introduced conflicts in arch/x86/kernel/cpu/intel_cacheinfo.c

+192 -119
-7
Documentation/feature-removal-schedule.txt
··· 291 291 292 292 --------------------------- 293 293 294 - What: usedac i386 kernel parameter 295 - When: 2.6.27 296 - Why: replaced by allowdac and no dac combination 297 - Who: Glauber Costa <gcosta@redhat.com> 298 - 299 - --------------------------- 300 - 301 294 What: print_fn_descriptor_symbol() 302 295 When: October 2009 303 296 Why: The %pF vsprintf format provides the same functionality in a
+31
Documentation/sysctl/kernel.txt
··· 19 19 show up in /proc/sys/kernel: 20 20 - acpi_video_flags 21 21 - acct 22 + - bootloader_type [ X86 only ] 23 + - bootloader_version [ X86 only ] 22 24 - callhome [ S390 only ] 23 25 - auto_msgmni 24 26 - core_pattern ··· 92 90 That is, suspend accounting if there left <= 2% free; resume it 93 91 if we got >=4%; consider information about amount of free space 94 92 valid for 30 seconds. 93 + 94 + ============================================================== 95 + 96 + bootloader_type: 97 + 98 + x86 bootloader identification 99 + 100 + This gives the bootloader type number as indicated by the bootloader, 101 + shifted left by 4, and OR'd with the low four bits of the bootloader 102 + version. The reason for this encoding is that this used to match the 103 + type_of_loader field in the kernel header; the encoding is kept for 104 + backwards compatibility. That is, if the full bootloader type number 105 + is 0x15 and the full version number is 0x234, this file will contain 106 + the value 340 = 0x154. 107 + 108 + See the type_of_loader and ext_loader_type fields in 109 + Documentation/x86/boot.txt for additional information. 110 + 111 + ============================================================== 112 + 113 + bootloader_version: 114 + 115 + x86 bootloader version 116 + 117 + The complete bootloader version number. In the example above, this 118 + file will contain the value 564 = 0x234. 119 + 120 + See the type_of_loader and ext_loader_ver fields in 121 + Documentation/x86/boot.txt for additional information. 95 122 96 123 ============================================================== 97 124
+1 -1
arch/x86/include/asm/irq_vectors.h
··· 113 113 */ 114 114 #define LOCAL_PENDING_VECTOR 0xec 115 115 116 - #define UV_BAU_MESSAGE 0xec 116 + #define UV_BAU_MESSAGE 0xea 117 117 118 118 /* 119 119 * Self IPI vector for machine checks
+3
arch/x86/include/asm/msr.h
··· 244 244 245 245 #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 246 246 247 + struct msr *msrs_alloc(void); 248 + void msrs_free(struct msr *msrs); 249 + 247 250 #ifdef CONFIG_SMP 248 251 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 249 252 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-1
arch/x86/include/asm/trampoline.h
··· 16 16 extern unsigned long initial_gs; 17 17 18 18 #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 19 - #define TRAMPOLINE_BASE 0x6000 20 19 21 20 extern unsigned long setup_trampoline(void); 22 21 extern void __init reserve_trampoline_memory(void);
+6 -5
arch/x86/kernel/aperture_64.c
··· 280 280 * or BIOS forget to put that in reserved. 281 281 * try to update e820 to make that region as reserved. 282 282 */ 283 - int i, fix, slot; 283 + u32 agp_aper_base = 0, agp_aper_order = 0; 284 + int i, fix, slot, valid_agp = 0; 284 285 u32 ctl; 285 286 u32 aper_size = 0, aper_order = 0, last_aper_order = 0; 286 287 u64 aper_base = 0, last_aper_base = 0; ··· 291 290 return; 292 291 293 292 /* This is mostly duplicate of iommu_hole_init */ 293 + agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp); 294 + 294 295 fix = 0; 295 296 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 296 297 int bus; ··· 345 342 } 346 343 } 347 344 348 - if (!fix) 345 + if (valid_agp) 349 346 return; 350 347 351 - /* different nodes have different setting, disable them all at first*/ 348 + /* disable them all at first */ 352 349 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 353 350 int bus; 354 351 int dev_base, dev_limit; ··· 461 458 462 459 if (aper_alloc) { 463 460 /* Got the aperture from the AGP bridge */ 464 - } else if (!valid_agp) { 465 - /* Do nothing */ 466 461 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || 467 462 force_iommu || 468 463 valid_agp ||
+1 -1
arch/x86/kernel/apic/apic.c
··· 1341 1341 1342 1342 rdmsr(MSR_IA32_APICBASE, msr, msr2); 1343 1343 if (!(msr & X2APIC_ENABLE)) { 1344 - pr_info("Enabling x2apic\n"); 1344 + printk_once(KERN_INFO "Enabling x2apic\n"); 1345 1345 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); 1346 1346 } 1347 1347 }
+9 -6
arch/x86/kernel/cpu/addon_cpuid_features.c
··· 74 74 unsigned int eax, ebx, ecx, edx, sub_index; 75 75 unsigned int ht_mask_width, core_plus_mask_width; 76 76 unsigned int core_select_mask, core_level_siblings; 77 + static bool printed; 77 78 78 79 if (c->cpuid_level < 0xb) 79 80 return; ··· 128 127 129 128 c->x86_max_cores = (core_level_siblings / smp_num_siblings); 130 129 131 - 132 - printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 133 - c->phys_proc_id); 134 - if (c->x86_max_cores > 1) 135 - printk(KERN_INFO "CPU: Processor Core ID: %d\n", 136 - c->cpu_core_id); 130 + if (!printed) { 131 + printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 132 + c->phys_proc_id); 133 + if (c->x86_max_cores > 1) 134 + printk(KERN_INFO "CPU: Processor Core ID: %d\n", 135 + c->cpu_core_id); 136 + printed = 1; 137 + } 137 138 return; 138 139 #endif 139 140 }
-2
arch/x86/kernel/cpu/amd.c
··· 375 375 node = nearby_node(apicid); 376 376 } 377 377 numa_set_node(cpu, node); 378 - 379 - printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); 380 378 #endif 381 379 } 382 380
+5 -3
arch/x86/kernel/cpu/common.c
··· 427 427 #ifdef CONFIG_X86_HT 428 428 u32 eax, ebx, ecx, edx; 429 429 int index_msb, core_bits; 430 + static bool printed; 430 431 431 432 if (!cpu_has(c, X86_FEATURE_HT)) 432 433 return; ··· 443 442 smp_num_siblings = (ebx & 0xff0000) >> 16; 444 443 445 444 if (smp_num_siblings == 1) { 446 - printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 445 + printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); 447 446 goto out; 448 447 } 449 448 ··· 470 469 ((1 << core_bits) - 1); 471 470 472 471 out: 473 - if ((c->x86_max_cores * smp_num_siblings) > 1) { 472 + if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { 474 473 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 475 474 c->phys_proc_id); 476 475 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 477 476 c->cpu_core_id); 477 + printed = 1; 478 478 } 479 479 #endif 480 480 } ··· 1117 1115 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) 1118 1116 panic("CPU#%d already initialized!\n", cpu); 1119 1117 1120 - printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1118 + pr_debug("Initializing CPU#%d\n", cpu); 1121 1119 1122 1120 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1123 1121
-2
arch/x86/kernel/cpu/intel.c
··· 270 270 node = cpu_to_node(cpu); 271 271 } 272 272 numa_set_node(cpu, node); 273 - 274 - printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); 275 273 #endif 276 274 } 277 275
+7 -6
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 507 507 { 508 508 struct _cpuid4_info *this_leaf, *sibling_leaf; 509 509 unsigned long num_threads_sharing; 510 - int index_msb, i; 510 + int index_msb, i, sibling; 511 511 struct cpuinfo_x86 *c = &cpu_data(cpu); 512 512 513 513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 514 - struct cpuinfo_x86 *d; 515 - for_each_online_cpu(i) { 514 + for_each_cpu(i, c->llc_shared_map) { 516 515 if (!per_cpu(ici_cpuid4_info, i)) 517 516 continue; 518 - d = &cpu_data(i); 519 517 this_leaf = CPUID4_INFO_IDX(i, index); 520 - cpumask_copy(to_cpumask(this_leaf->shared_cpu_map), 521 - d->llc_shared_map); 518 + for_each_cpu(sibling, c->llc_shared_map) { 519 + if (!cpu_online(sibling)) 520 + continue; 521 + set_bit(sibling, this_leaf->shared_cpu_map); 522 + } 522 523 } 523 524 return; 524 525 }
+14 -6
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 256 256 ack_APIC_irq(); 257 257 } 258 258 259 + /* Thermal monitoring depends on APIC, ACPI and clock modulation */ 260 + static int intel_thermal_supported(struct cpuinfo_x86 *c) 261 + { 262 + if (!cpu_has_apic) 263 + return 0; 264 + if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) 265 + return 0; 266 + return 1; 267 + } 268 + 259 269 void __init mcheck_intel_therm_init(void) 260 270 { 261 271 /* ··· 273 263 * LVT value on BSP and use that value to restore APs' thermal LVT 274 264 * entry BIOS programmed later 275 265 */ 276 - if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && 277 - cpu_has(&boot_cpu_data, X86_FEATURE_ACC)) 266 + if (intel_thermal_supported(&boot_cpu_data)) 278 267 lvtthmr_init = apic_read(APIC_LVTTHMR); 279 268 } 280 269 ··· 283 274 int tm2 = 0; 284 275 u32 l, h; 285 276 286 - /* Thermal monitoring depends on ACPI and clock modulation*/ 287 - if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) 277 + if (!intel_thermal_supported(c)) 288 278 return; 289 279 290 280 /* ··· 347 339 l = apic_read(APIC_LVTTHMR); 348 340 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 349 341 350 - printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", 351 - cpu, tm2 ? "TM2" : "TM1"); 342 + printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n", 343 + tm2 ? "TM2" : "TM1"); 352 344 353 345 /* enable thermal throttle processing */ 354 346 atomic_set(&therm_throt_en, 1);
+10 -1
arch/x86/kernel/e820.c
··· 732 732 char overlap_ok; 733 733 }; 734 734 static struct early_res early_res[MAX_EARLY_RES] __initdata = { 735 - { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ 735 + { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */ 736 + #ifdef CONFIG_X86_32 737 + /* 738 + * But first pinch a few for the stack/trampoline stuff 739 + * FIXME: Don't need the extra page at 4K, but need to fix 740 + * trampoline before removing it. (see the GDT stuff) 741 + */ 742 + { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 }, 743 + #endif 744 + 736 745 {} 737 746 }; 738 747
-2
arch/x86/kernel/head32.c
··· 29 29 30 30 void __init i386_start_kernel(void) 31 31 { 32 - reserve_trampoline_memory(); 33 - 34 32 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 35 33 36 34 #ifdef CONFIG_BLK_DEV_INITRD
-2
arch/x86/kernel/head64.c
··· 98 98 { 99 99 copy_bootdata(__va(real_mode_data)); 100 100 101 - reserve_trampoline_memory(); 102 - 103 101 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 104 102 105 103 #ifdef CONFIG_BLK_DEV_INITRD
-3
arch/x86/kernel/mpparse.c
··· 945 945 { 946 946 if (enable_update_mptable && alloc_mptable) { 947 947 u64 startt = 0; 948 - #ifdef CONFIG_X86_TRAMPOLINE 949 - startt = TRAMPOLINE_BASE; 950 - #endif 951 948 mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4); 952 949 } 953 950 }
+4 -1
arch/x86/kernel/pci-dma.c
··· 120 120 121 121 void __init pci_iommu_alloc(void) 122 122 { 123 + int use_swiotlb; 124 + 125 + use_swiotlb = pci_swiotlb_init(); 123 126 #ifdef CONFIG_X86_64 124 127 /* free the range so iommu could get some range less than 4G */ 125 128 dma32_free_bootmem(); 126 129 #endif 127 - if (pci_swiotlb_init()) 130 + if (use_swiotlb) 128 131 return; 129 132 130 133 gart_iommu_hole_init();
+2 -1
arch/x86/kernel/pci-gart_64.c
··· 710 710 struct pci_dev *dev; 711 711 int i; 712 712 713 - if (no_agp) 713 + /* don't shutdown it if there is AGP installed */ 714 + if (!no_agp) 714 715 return; 715 716 716 717 for (i = 0; i < num_k8_northbridges; i++) {
+8 -5
arch/x86/kernel/setup.c
··· 73 73 74 74 #include <asm/mtrr.h> 75 75 #include <asm/apic.h> 76 + #include <asm/trampoline.h> 76 77 #include <asm/e820.h> 77 78 #include <asm/mpspec.h> 78 79 #include <asm/setup.h> ··· 876 875 877 876 reserve_brk(); 878 877 878 + /* 879 + * Find and reserve possible boot-time SMP configuration: 880 + */ 881 + find_smp_config(); 882 + 883 + reserve_trampoline_memory(); 884 + 879 885 #ifdef CONFIG_ACPI_SLEEP 880 886 /* 881 887 * Reserve low memory region for sleep support. ··· 928 920 acpi_boot_table_init(); 929 921 930 922 early_acpi_boot_init(); 931 - 932 - /* 933 - * Find and reserve possible boot-time SMP configuration: 934 - */ 935 - find_smp_config(); 936 923 937 924 #ifdef CONFIG_ACPI_NUMA 938 925 /*
+31 -14
arch/x86/kernel/smpboot.c
··· 671 671 complete(&c_idle->done); 672 672 } 673 673 674 + /* reduce the number of lines printed when booting a large cpu count system */ 675 + static void __cpuinit announce_cpu(int cpu, int apicid) 676 + { 677 + static int current_node = -1; 678 + int node = cpu_to_node(cpu); 679 + 680 + if (system_state == SYSTEM_BOOTING) { 681 + if (node != current_node) { 682 + if (current_node > (-1)) 683 + pr_cont(" Ok.\n"); 684 + current_node = node; 685 + pr_info("Booting Node %3d, Processors ", node); 686 + } 687 + pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : ""); 688 + return; 689 + } else 690 + pr_info("Booting Node %d Processor %d APIC 0x%x\n", 691 + node, cpu, apicid); 692 + } 693 + 674 694 /* 675 695 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 676 696 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. ··· 757 737 /* start_ip had better be page-aligned! */ 758 738 start_ip = setup_trampoline(); 759 739 760 - /* So we see what's up */ 761 - printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n", 762 - cpu, apicid, start_ip); 740 + /* So we see what's up */ 741 + announce_cpu(cpu, apicid); 763 742 764 743 /* 765 744 * This grunge runs the startup process for ··· 807 788 udelay(100); 808 789 } 809 790 810 - if (cpumask_test_cpu(cpu, cpu_callin_mask)) { 811 - /* number CPUs logically, starting from 1 (BSP is 0) */ 812 - pr_debug("OK.\n"); 813 - printk(KERN_INFO "CPU%d: ", cpu); 814 - print_cpu_info(&cpu_data(cpu)); 815 - pr_debug("CPU has booted.\n"); 816 - } else { 791 + if (cpumask_test_cpu(cpu, cpu_callin_mask)) 792 + pr_debug("CPU%d: has booted.\n", cpu); 793 + else { 817 794 boot_error = 1; 818 795 if (*((volatile unsigned char *)trampoline_base) 819 796 == 0xA5) 820 797 /* trampoline started but...? */ 821 - printk(KERN_ERR "Stuck ??\n"); 798 + pr_err("CPU%d: Stuck ??\n", cpu); 822 799 else 823 800 /* trampoline code not run */ 824 - printk(KERN_ERR "Not responding.\n"); 801 + pr_err("CPU%d: Not responding.\n", cpu); 825 802 if (apic->inquire_remote_apic) 826 803 apic->inquire_remote_apic(apicid); 827 804 } ··· 1308 1293 for (i = 0; i < 10; i++) { 1309 1294 /* They ack this in play_dead by setting CPU_DEAD */ 1310 1295 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1311 - printk(KERN_INFO "CPU %d is now offline\n", cpu); 1296 + if (system_state == SYSTEM_RUNNING) 1297 + pr_info("CPU %u is now offline\n", cpu); 1298 + 1312 1299 if (1 == num_online_cpus()) 1313 1300 alternatives_smp_switch(0); 1314 1301 return; 1315 1302 } 1316 1303 msleep(100); 1317 1304 } 1318 - printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1305 + pr_err("CPU %u didn't die...\n", cpu); 1319 1306 } 1320 1307 1321 1308 void play_dead_common(void)
+9 -11
arch/x86/kernel/trampoline.c
··· 12 12 #endif 13 13 14 14 /* ready for x86_64 and x86 */ 15 - unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE); 15 + unsigned char *__trampinitdata trampoline_base; 16 16 17 17 void __init reserve_trampoline_memory(void) 18 18 { 19 - #ifdef CONFIG_X86_32 20 - /* 21 - * But first pinch a few for the stack/trampoline stuff 22 - * FIXME: Don't need the extra page at 4K, but need to fix 23 - * trampoline before removing it. (see the GDT stuff) 24 - */ 25 - reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); 26 - #endif 19 + unsigned long mem; 20 + 27 21 /* Has to be in very low memory so we can execute real-mode AP code. */ 28 - reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE, 29 - "TRAMPOLINE"); 22 + mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); 23 + if (mem == -1L) 24 + panic("Cannot allocate trampoline\n"); 25 + 26 + trampoline_base = __va(mem); 27 + reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); 30 28 } 31 29 32 30 /*
+22 -4
arch/x86/lib/msr.c
··· 7 7 u32 msr_no; 8 8 struct msr reg; 9 9 struct msr *msrs; 10 - int off; 11 10 int err; 12 11 }; 13 12 ··· 17 18 int this_cpu = raw_smp_processor_id(); 18 19 19 20 if (rv->msrs) 20 - reg = &rv->msrs[this_cpu - rv->off]; 21 + reg = per_cpu_ptr(rv->msrs, this_cpu); 21 22 else 22 23 reg = &rv->reg; 23 24 ··· 31 32 int this_cpu = raw_smp_processor_id(); 32 33 33 34 if (rv->msrs) 34 - reg = &rv->msrs[this_cpu - rv->off]; 35 + reg = per_cpu_ptr(rv->msrs, this_cpu); 35 36 else 36 37 reg = &rv->reg; 37 38 ··· 79 80 80 81 memset(&rv, 0, sizeof(rv)); 81 82 82 - rv.off = cpumask_first(mask); 83 83 rv.msrs = msrs; 84 84 rv.msr_no = msr_no; 85 85 ··· 117 119 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu); 118 120 } 119 121 EXPORT_SYMBOL(wrmsr_on_cpus); 122 + 123 + struct msr *msrs_alloc(void) 124 + { 125 + struct msr *msrs = NULL; 126 + 127 + msrs = alloc_percpu(struct msr); 128 + if (!msrs) { 129 + pr_warning("%s: error allocating msrs\n", __func__); 130 + return NULL; 131 + } 132 + 133 + return msrs; 134 + } 135 + EXPORT_SYMBOL(msrs_alloc); 136 + 137 + void msrs_free(struct msr *msrs) 138 + { 139 + free_percpu(msrs); 140 + } 141 + EXPORT_SYMBOL(msrs_free); 120 142 121 143 /* These "safe" variants are slower and should be used when the target MSR 122 144 may not actually exist. */
+1 -1
arch/x86/mm/mmio-mod.c
··· 20 20 * Derived from the read-mod example from relay-examples by Tom Zanussi. 21 21 */ 22 22 23 - #define pr_fmt(fmt) "mmiotrace: " 23 + #define pr_fmt(fmt) "mmiotrace: " fmt 24 24 25 25 #define DEBUG 1 26 26
+10 -4
drivers/char/nvram.c
··· 264 264 unsigned char contents[NVRAM_BYTES]; 265 265 unsigned i = *ppos; 266 266 unsigned char *tmp; 267 - int len; 268 267 269 - len = (NVRAM_BYTES - i) < count ? (NVRAM_BYTES - i) : count; 270 - if (copy_from_user(contents, buf, len)) 268 + if (i >= NVRAM_BYTES) 269 + return 0; /* Past EOF */ 270 + 271 + if (count > NVRAM_BYTES - i) 272 + count = NVRAM_BYTES - i; 273 + if (count > NVRAM_BYTES) 274 + return -EFAULT; /* Can't happen, but prove it to gcc */ 275 + 276 + if (copy_from_user(contents, buf, count)) 271 277 return -EFAULT; 272 278 273 279 spin_lock_irq(&rtc_lock); ··· 281 275 if (!__nvram_check_checksum()) 282 276 goto checksum_err; 283 277 284 - for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp) 278 + for (tmp = contents; count--; ++i, ++tmp) 285 279 __nvram_write_byte(*tmp, i); 286 280 287 281 __nvram_set_checksum();
+17 -29
drivers/edac/amd64_edac.c
··· 13 13 static int ecc_enable_override; 14 14 module_param(ecc_enable_override, int, 0644); 15 15 16 + static struct msr *msrs; 17 + 16 18 /* Lookup table for all possible MC control instances */ 17 19 struct amd64_pvt; 18 20 static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; ··· 2497 2495 static bool amd64_nb_mce_bank_enabled_on_node(int nid) 2498 2496 { 2499 2497 cpumask_var_t mask; 2500 - struct msr *msrs; 2501 - int cpu, nbe, idx = 0; 2498 + int cpu, nbe; 2502 2499 bool ret = false; 2503 2500 2504 2501 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { ··· 2508 2507 2509 2508 get_cpus_on_this_dct_cpumask(mask, nid); 2510 2509 2511 - msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL); 2512 - if (!msrs) { 2513 - amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", 2514 - __func__); 2515 - free_cpumask_var(mask); 2516 - return false; 2517 - } 2518 - 2519 2510 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); 2520 2511 2521 2512 for_each_cpu(cpu, mask) { 2522 - nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE; 2513 + struct msr *reg = per_cpu_ptr(msrs, cpu); 2514 + nbe = reg->l & K8_MSR_MCGCTL_NBE; 2523 2515 2524 2516 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2525 - cpu, msrs[idx].q, 2517 + cpu, reg->q, 2526 2518 (nbe ? "enabled" : "disabled")); 2527 2519 2528 2520 if (!nbe) 2529 2521 goto out; 2530 - 2531 - idx++; 2532 2522 } 2533 2523 ret = true; 2534 2524 2535 2525 out: 2536 - kfree(msrs); 2537 2526 free_cpumask_var(mask); 2538 2527 return ret; 2539 2528 } ··· 2531 2540 static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) 2532 2541 { 2533 2542 cpumask_var_t cmask; 2534 - struct msr *msrs = NULL; 2535 - int cpu, idx = 0; 2543 + int cpu; 2536 2544 2537 2545 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { 2538 2546 amd64_printk(KERN_WARNING, "%s: error allocating mask\n", ··· 2541 2551 2542 2552 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); 2543 2553 2544 - msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL); 2545 - if (!msrs) { 2546 - amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", 2547 - __func__); 2548 - return -ENOMEM; 2549 - } 2550 - 2551 2554 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2552 2555 2553 2556 for_each_cpu(cpu, cmask) { 2554 2557 2558 + struct msr *reg = per_cpu_ptr(msrs, cpu); 2559 + 2555 2560 if (on) { 2556 - if (msrs[idx].l & K8_MSR_MCGCTL_NBE) 2561 + if (reg->l & K8_MSR_MCGCTL_NBE) 2557 2562 pvt->flags.ecc_report = 1; 2558 2563 2559 - msrs[idx].l |= K8_MSR_MCGCTL_NBE; 2564 + reg->l |= K8_MSR_MCGCTL_NBE; 2560 2565 } else { 2561 2566 /* 2562 2567 * Turn off ECC reporting only when it was off before 2563 2568 */ 2564 2569 if (!pvt->flags.ecc_report) 2565 - msrs[idx].l &= ~K8_MSR_MCGCTL_NBE; 2570 + reg->l &= ~K8_MSR_MCGCTL_NBE; 2566 2571 } 2567 - idx++; 2568 2572 } 2569 2573 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2570 2574 2571 - kfree(msrs); 2572 2575 free_cpumask_var(cmask); 2573 2576 2574 2577 return 0; ··· 3019 3036 if (cache_k8_northbridges() < 0) 3020 3037 return err; 3021 3038 3039 + msrs = msrs_alloc(); 3040 + 3022 3041 err = pci_register_driver(&amd64_pci_driver); 3023 3042 if (err) 3024 3043 return err; ··· 3056 3071 edac_pci_release_generic_ctl(amd64_ctl_pci); 3057 3072 3058 3073 pci_unregister_driver(&amd64_pci_driver); 3074 + 3075 + msrs_free(msrs); 3076 + msrs = NULL; 3059 3077 } 3060 3078 3061 3079 module_init(amd64_edac_init);
+1 -1
mm/migrate.c
··· 1044 1044 int err; 1045 1045 1046 1046 for (i = 0; i < nr_pages; i += chunk_nr) { 1047 - if (chunk_nr + i > nr_pages) 1047 + if (chunk_nr > nr_pages - i) 1048 1048 chunk_nr = nr_pages - i; 1049 1049 1050 1050 err = copy_from_user(chunk_pages, &pages[i],