Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Fix common misspellings

They were generated by 'codespell' and then manually reviewed.

Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi>
Cc: trivial@kernel.org
LKML-Reference: <1300389856-1099-3-git-send-email-lucas.demarchi@profusion.mobi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Lucas De Marchi and committed by
Ingo Molnar
0d2eb44f a6c3270b

+67 -67
+1 -1
arch/x86/Kconfig.cpu
··· 326 326 Old PentiumPro multiprocessor systems had errata that could cause 327 327 memory operations to violate the x86 ordering standard in rare cases. 328 328 Enabling this option will attempt to work around some (but not all) 329 - occurances of this problem, at the cost of much heavier spinlock and 329 + occurrences of this problem, at the cost of much heavier spinlock and 330 330 memory barrier operations. 331 331 332 332 If unsure, say n here. Even distro kernels should think twice before
+3 -3
arch/x86/crypto/aesni-intel_asm.S
··· 1346 1346 and $15, %r13 # %r13 = arg4 (mod 16) 1347 1347 je _multiple_of_16_bytes_decrypt 1348 1348 1349 - # Handle the last <16 byte block seperately 1349 + # Handle the last <16 byte block separately 1350 1350 1351 1351 paddd ONE(%rip), %xmm0 # increment CNT to get Yn 1352 1352 movdqa SHUF_MASK(%rip), %xmm10 ··· 1355 1355 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) 1356 1356 sub $16, %r11 1357 1357 add %r13, %r11 1358 - movdqu (%arg3,%r11,1), %xmm1 # recieve the last <16 byte block 1358 + movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block 1359 1359 lea SHIFT_MASK+16(%rip), %r12 1360 1360 sub %r13, %r12 1361 1361 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes ··· 1607 1607 and $15, %r13 # %r13 = arg4 (mod 16) 1608 1608 je _multiple_of_16_bytes_encrypt 1609 1609 1610 - # Handle the last <16 Byte block seperately 1610 + # Handle the last <16 Byte block separately 1611 1611 paddd ONE(%rip), %xmm0 # INCR CNT to get Yn 1612 1612 movdqa SHUF_MASK(%rip), %xmm10 1613 1613 PSHUFB_XMM %xmm10, %xmm0
+1 -1
arch/x86/include/asm/cacheflush.h
··· 71 71 * Read/Write : ReadOnly, ReadWrite 72 72 * Presence : NotPresent 73 73 * 74 - * Within a catagory, the attributes are mutually exclusive. 74 + * Within a category, the attributes are mutually exclusive. 75 75 * 76 76 * The implementation of this API will take care of various aspects that 77 77 * are associated with changing such attributes, such as:
+2 -2
arch/x86/include/asm/nmi.h
··· 29 29 * external nmis, because the local ones are more frequent. 30 30 * 31 31 * Also setup some default high/normal/low settings for 32 - * subsystems to registers with. Using 4 bits to seperate 33 - * the priorities. This can go alot higher if needed be. 32 + * subsystems to registers with. Using 4 bits to separate 33 + * the priorities. This can go a lot higher if needed be. 34 34 */ 35 35 36 36 #define NMI_LOCAL_SHIFT 16 /* randomly picked */
+1 -1
arch/x86/include/asm/nops.h
··· 38 38 #define K8_NOP8 K8_NOP4 K8_NOP4 39 39 40 40 /* K7 nops 41 - uses eax dependencies (arbitary choice) 41 + uses eax dependencies (arbitrary choice) 42 42 1: nop 43 43 2: movl %eax,%eax 44 44 3: leal (,%eax,1),%eax
+1 -1
arch/x86/include/asm/olpc.h
··· 20 20 21 21 /* 22 22 * OLPC board IDs contain the major build number within the mask 0x0ff0, 23 - * and the minor build number withing 0x000f. Pre-builds have a minor 23 + * and the minor build number within 0x000f. Pre-builds have a minor 24 24 * number less than 8, and normal builds start at 8. For example, 0x0B10 25 25 * is a PreB1, and 0x0C18 is a C1. 26 26 */
+2 -2
arch/x86/include/asm/perf_event_p4.h
··· 1 1 /* 2 - * Netburst Perfomance Events (P4, old Xeon) 2 + * Netburst Performance Events (P4, old Xeon) 3 3 */ 4 4 5 5 #ifndef PERF_EVENT_P4_H ··· 9 9 #include <linux/bitops.h> 10 10 11 11 /* 12 - * NetBurst has perfomance MSRs shared between 12 + * NetBurst has performance MSRs shared between 13 13 * threads if HT is turned on, ie for both logical 14 14 * processors (mem: in turn in Atom with HT support 15 15 * perf-MSRs are not shared and every thread has its
+1 -1
arch/x86/include/asm/processor-flags.h
··· 7 7 */ 8 8 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ 9 9 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ 10 - #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ 10 + #define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */ 11 11 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ 12 12 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ 13 13 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+1 -1
arch/x86/include/asm/ptrace-abi.h
··· 31 31 #define R12 24 32 32 #define RBP 32 33 33 #define RBX 40 34 - /* arguments: interrupts/non tracing syscalls only save upto here*/ 34 + /* arguments: interrupts/non tracing syscalls only save up to here*/ 35 35 #define R11 48 36 36 #define R10 56 37 37 #define R9 64
+2 -2
arch/x86/include/asm/ptrace.h
··· 73 73 unsigned long r12; 74 74 unsigned long rbp; 75 75 unsigned long rbx; 76 - /* arguments: non interrupts/non tracing syscalls only save upto here*/ 76 + /* arguments: non interrupts/non tracing syscalls only save up to here*/ 77 77 unsigned long r11; 78 78 unsigned long r10; 79 79 unsigned long r9; ··· 103 103 unsigned long r12; 104 104 unsigned long bp; 105 105 unsigned long bx; 106 - /* arguments: non interrupts/non tracing syscalls only save upto here*/ 106 + /* arguments: non interrupts/non tracing syscalls only save up to here*/ 107 107 unsigned long r11; 108 108 unsigned long r10; 109 109 unsigned long r9;
+1 -1
arch/x86/include/asm/tsc.h
··· 35 35 static __always_inline cycles_t vget_cycles(void) 36 36 { 37 37 /* 38 - * We only do VDSOs on TSC capable CPUs, so this shouldnt 38 + * We only do VDSOs on TSC capable CPUs, so this shouldn't 39 39 * access boot_cpu_data (which is not VDSO-safe): 40 40 */ 41 41 #ifndef CONFIG_X86_TSC
+1 -1
arch/x86/include/asm/xen/interface.h
··· 86 86 * The privilege level specifies which modes may enter a trap via a software 87 87 * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate 88 88 * privilege levels as follows: 89 - * Level == 0: Noone may enter 89 + * Level == 0: No one may enter 90 90 * Level == 1: Kernel may enter 91 91 * Level == 2: Kernel may enter 92 92 * Level == 3: Everyone may enter
+1 -1
arch/x86/kernel/alternative.c
··· 199 199 200 200 /* Replace instructions with better alternatives for this CPU type. 201 201 This runs before SMP is initialized to avoid SMP problems with 202 - self modifying code. This implies that assymetric systems where 202 + self modifying code. This implies that asymmetric systems where 203 203 APs have less capabilities than the boot processor are not handled. 204 204 Tough. Make sure you disable such features by hand. */ 205 205
+1 -1
arch/x86/kernel/aperture_64.c
··· 73 73 /* 74 74 * using 512M as goal, in case kexec will load kernel_big 75 75 * that will do the on position decompress, and could overlap with 76 - * that positon with gart that is used. 76 + * that position with gart that is used. 77 77 * sequende: 78 78 * kernel_small 79 79 * ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
+2 -2
arch/x86/kernel/apic/io_apic.c
··· 1886 1886 * 1887 1887 * With interrupt-remapping, for now we will use virtual wire A mode, 1888 1888 * as virtual wire B is little complex (need to configure both 1889 - * IOAPIC RTE aswell as interrupt-remapping table entry). 1889 + * IOAPIC RTE as well as interrupt-remapping table entry). 1890 1890 * As this gets called during crash dump, keep this simple for now. 1891 1891 */ 1892 1892 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { ··· 2905 2905 } 2906 2906 2907 2907 /* 2908 - * Called after all the initialization is done. If we didnt find any 2908 + * Called after all the initialization is done. If we didn't find any 2909 2909 * APIC bugs then we can allow the modify fast path 2910 2910 */ 2911 2911
+1 -1
arch/x86/kernel/apm_32.c
··· 66 66 * 1.5: Fix segment register reloading (in case of bad segments saved 67 67 * across BIOS call). 68 68 * Stephen Rothwell 69 - * 1.6: Cope with complier/assembler differences. 69 + * 1.6: Cope with compiler/assembler differences. 70 70 * Only try to turn off the first display device. 71 71 * Fix OOPS at power off with no APM BIOS by Jan Echternach 72 72 * <echter@informatik.uni-rostock.de>
+2 -2
arch/x86/kernel/cpu/cpufreq/longhaul.c
··· 444 444 return -EINVAL; 445 445 } 446 446 /* Get max multiplier - as we always did. 447 - * Longhaul MSR is usefull only when voltage scaling is enabled. 447 + * Longhaul MSR is useful only when voltage scaling is enabled. 448 448 * C3 is booting at max anyway. */ 449 449 maxmult = mult; 450 450 /* Get min multiplier */ ··· 1011 1011 * trigger frequency transition in some cases. */ 1012 1012 module_param(disable_acpi_c3, int, 0644); 1013 1013 MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); 1014 - /* Change CPU voltage with frequency. Very usefull to save 1014 + /* Change CPU voltage with frequency. Very useful to save 1015 1015 * power, but most VIA C3 processors aren't supporting it. */ 1016 1016 module_param(scale_voltage, int, 0644); 1017 1017 MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
+1 -1
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 1276 1276 1277 1277 if (powernow_k8_cpu_init_acpi(data)) { 1278 1278 /* 1279 - * Use the PSB BIOS structure. This is only availabe on 1279 + * Use the PSB BIOS structure. This is only available on 1280 1280 * an UP version, and is deprecated by AMD. 1281 1281 */ 1282 1282 if (num_online_cpus() != 1) {
+2 -2
arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
··· 292 292 293 293 result = speedstep_smi_ownership(); 294 294 if (result) { 295 - dprintk("fails in aquiring ownership of a SMI interface.\n"); 295 + dprintk("fails in acquiring ownership of a SMI interface.\n"); 296 296 return -EINVAL; 297 297 } 298 298 ··· 360 360 int result = speedstep_smi_ownership(); 361 361 362 362 if (result) 363 - dprintk("fails in re-aquiring ownership of a SMI interface.\n"); 363 + dprintk("fails in re-acquiring ownership of a SMI interface.\n"); 364 364 365 365 return result; 366 366 }
+1 -1
arch/x86/kernel/cpu/mcheck/mce-inject.c
··· 32 32 { 33 33 struct mce *i = &per_cpu(injectm, m->extcpu); 34 34 35 - /* Make sure noone reads partially written injectm */ 35 + /* Make sure no one reads partially written injectm */ 36 36 i->finished = 0; 37 37 mb(); 38 38 m->finished = 0;
+1 -1
arch/x86/kernel/cpu/mcheck/mce.c
··· 881 881 * Check if the address reported by the CPU is in a format we can parse. 882 882 * It would be possible to add code for most other cases, but all would 883 883 * be somewhat complicated (e.g. segment offset would require an instruction 884 - * parser). So only support physical addresses upto page granuality for now. 884 + * parser). So only support physical addresses up to page granuality for now. 885 885 */ 886 886 static int mce_usable_address(struct mce *m) 887 887 {
+1 -1
arch/x86/kernel/cpu/mtrr/generic.c
··· 1 1 /* 2 2 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 3 - * because MTRRs can span upto 40 bits (36bits on most modern x86) 3 + * because MTRRs can span up to 40 bits (36bits on most modern x86) 4 4 */ 5 5 #define DEBUG 6 6
+1 -1
arch/x86/kernel/cpu/perf_event.c
··· 1111 1111 1112 1112 /* 1113 1113 * If group events scheduling transaction was started, 1114 - * skip the schedulability test here, it will be peformed 1114 + * skip the schedulability test here, it will be performed 1115 1115 * at commit time (->commit_txn) as a whole 1116 1116 */ 1117 1117 if (cpuc->group_flag & PERF_EVENT_TXN)
+4 -4
arch/x86/kernel/cpu/perf_event_p4.c
··· 1 1 /* 2 - * Netburst Perfomance Events (P4, old Xeon) 2 + * Netburst Performance Events (P4, old Xeon) 3 3 * 4 4 * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org> 5 5 * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com> ··· 679 679 */ 680 680 681 681 /* 682 - * if an event is shared accross the logical threads 682 + * if an event is shared across the logical threads 683 683 * the user needs special permissions to be able to use it 684 684 */ 685 685 if (p4_ht_active() && p4_event_bind_map[v].shared) { ··· 790 790 * 791 791 * It's still allowed that two threads setup same cache 792 792 * events so we can't simply clear metrics until we knew 793 - * noone is depending on us, so we need kind of counter 793 + * no one is depending on us, so we need kind of counter 794 794 * for "ReplayEvent" users. 795 795 * 796 796 * What is more complex -- RAW events, if user (for some 797 797 * reason) will pass some cache event metric with improper 798 798 * event opcode -- it's fine from hardware point of view 799 - * but completely nonsence from "meaning" of such action. 799 + * but completely nonsense from "meaning" of such action. 800 800 * 801 801 * So at moment let leave metrics turned on forever -- it's 802 802 * ok for now but need to be revisited!
+1 -1
arch/x86/kernel/cpu/vmware.c
··· 86 86 } 87 87 88 88 /* 89 - * While checking the dmi string infomation, just checking the product 89 + * While checking the dmi string information, just checking the product 90 90 * serial key should be enough, as this will always have a VMware 91 91 * specific string when running under VMware hypervisor. 92 92 */
+2 -2
arch/x86/kernel/entry_64.S
··· 18 18 * A note on terminology: 19 19 * - top of stack: Architecture defined interrupt frame from SS to RIP 20 20 * at the top of the kernel process stack. 21 - * - partial stack frame: partially saved registers upto R11. 21 + * - partial stack frame: partially saved registers up to R11. 22 22 * - full stack frame: Like partial stack frame, but all register saved. 23 23 * 24 24 * Some macro usage: ··· 422 422 END(ret_from_fork) 423 423 424 424 /* 425 - * System call entry. Upto 6 arguments in registers are supported. 425 + * System call entry. Up to 6 arguments in registers are supported. 426 426 * 427 427 * SYSCALL does not save anything on the stack and does not change the 428 428 * stack pointer.
+1 -1
arch/x86/kernel/i387.c
··· 145 145 * The _current_ task is using the FPU for the first time 146 146 * so initialize it and set the mxcsr to its default 147 147 * value at reset if we support XMM instructions and then 148 - * remeber the current task has used the FPU. 148 + * remember the current task has used the FPU. 149 149 */ 150 150 int init_fpu(struct task_struct *tsk) 151 151 {
+1 -1
arch/x86/kernel/irq_32.c
··· 172 172 173 173 call_on_stack(__do_softirq, isp); 174 174 /* 175 - * Shouldnt happen, we returned above if in_interrupt(): 175 + * Shouldn't happen, we returned above if in_interrupt(): 176 176 */ 177 177 WARN_ON_ONCE(softirq_count()); 178 178 }
+1 -1
arch/x86/kernel/kgdb.c
··· 278 278 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); 279 279 if (dbg_release_bp_slot(*pevent)) 280 280 /* 281 - * The debugger is responisble for handing the retry on 281 + * The debugger is responsible for handing the retry on 282 282 * remove failure. 283 283 */ 284 284 return -1;
+1 -1
arch/x86/kernel/mca_32.c
··· 259 259 /* 260 260 * WARNING: Be careful when making changes here. Putting an adapter 261 261 * and the motherboard simultaneously into setup mode may result in 262 - * damage to chips (according to The Indispensible PC Hardware Book 262 + * damage to chips (according to The Indispensable PC Hardware Book 263 263 * by Hans-Peter Messmer). Also, we disable system interrupts (so 264 264 * that we are not disturbed in the middle of this). 265 265 */
+2 -2
arch/x86/kernel/mpparse.c
··· 883 883 884 884 if (!mpc_new_phys) { 885 885 unsigned char old, new; 886 - /* check if we can change the postion */ 886 + /* check if we can change the position */ 887 887 mpc->checksum = 0; 888 888 old = mpf_checksum((unsigned char *)mpc, mpc->length); 889 889 mpc->checksum = 0xff; ··· 892 892 printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n"); 893 893 return 0; 894 894 } 895 - printk(KERN_INFO "use in-positon replacing\n"); 895 + printk(KERN_INFO "use in-position replacing\n"); 896 896 } else { 897 897 mpf->physptr = mpc_new_phys; 898 898 mpc_new = phys_to_virt(mpc_new_phys);
+2 -2
arch/x86/kernel/pci-calgary_64.c
··· 1279 1279 1280 1280 if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) { 1281 1281 /* 1282 - * FIXME: properly scan for devices accross the 1282 + * FIXME: properly scan for devices across the 1283 1283 * PCI-to-PCI bridge on every CalIOC2 port. 1284 1284 */ 1285 1285 return 1; ··· 1295 1295 1296 1296 /* 1297 1297 * calgary_init_bitmap_from_tce_table(): 1298 - * Funtion for kdump case. In the second/kdump kernel initialize 1298 + * Function for kdump case. In the second/kdump kernel initialize 1299 1299 * the bitmap based on the tce table entries obtained from first kernel 1300 1300 */ 1301 1301 static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
+1 -1
arch/x86/kernel/step.c
··· 166 166 * Make sure block stepping (BTF) is not enabled unless it should be. 167 167 * Note that we don't try to worry about any is_setting_trap_flag() 168 168 * instructions after the first when using block stepping. 169 - * So noone should try to use debugger block stepping in a program 169 + * So no one should try to use debugger block stepping in a program 170 170 * that uses user-mode single stepping itself. 171 171 */ 172 172 if (enable_single_step(child) && block) {
+1 -1
arch/x86/kernel/topology.c
··· 39 39 /* 40 40 * CPU0 cannot be offlined due to several 41 41 * restrictions and assumptions in kernel. This basically 42 - * doesnt add a control file, one cannot attempt to offline 42 + * doesn't add a control file, one cannot attempt to offline 43 43 * BSP. 44 44 * 45 45 * Also certain PCI quirks require not to enable hotplug control
+2 -2
arch/x86/kernel/tsc.c
··· 427 427 * the delta to the previous read. We keep track of the min 428 428 * and max values of that delta. The delta is mostly defined 429 429 * by the IO time of the PIT access, so we can detect when a 430 - * SMI/SMM disturbance happend between the two reads. If the 430 + * SMI/SMM disturbance happened between the two reads. If the 431 431 * maximum time is significantly larger than the minimum time, 432 432 * then we discard the result and have another try. 433 433 * ··· 900 900 * timer based, instead of loop based, we don't block the boot 901 901 * process while this longer calibration is done. 902 902 * 903 - * If there are any calibration anomolies (too many SMIs, etc), 903 + * If there are any calibration anomalies (too many SMIs, etc), 904 904 * or the refined calibration is off by 1% of the fast early 905 905 * calibration, we throw out the new calibration and use the 906 906 * early calibration.
+1 -1
arch/x86/kernel/verify_cpu.S
··· 18 18 * This file is expected to run in 32bit code. Currently: 19 19 * 20 20 * arch/x86/boot/compressed/head_64.S: Boot cpu verification 21 - * arch/x86/kernel/trampoline_64.S: secondary processor verfication 21 + * arch/x86/kernel/trampoline_64.S: secondary processor verification 22 22 * arch/x86/kernel/head_32.S: processor startup 23 23 * 24 24 * verify_cpu, returns the status of longmode and SSE in register %eax.
+1 -1
arch/x86/kernel/xsave.c
··· 53 53 54 54 /* 55 55 * None of the feature bits are in init state. So nothing else 56 - * to do for us, as the memory layout is upto date. 56 + * to do for us, as the memory layout is up to date. 57 57 */ 58 58 if ((xstate_bv & pcntxt_mask) == pcntxt_mask) 59 59 return;
+1 -1
arch/x86/kvm/paging_tmpl.h
··· 348 348 return; 349 349 kvm_get_pfn(pfn); 350 350 /* 351 - * we call mmu_set_spte() with host_writable = true beacuse that 351 + * we call mmu_set_spte() with host_writable = true because that 352 352 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). 353 353 */ 354 354 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
+1 -1
arch/x86/kvm/timer.c
··· 25 25 26 26 /* 27 27 * There is a race window between reading and incrementing, but we do 28 - * not care about potentially loosing timer events in the !reinject 28 + * not care about potentially losing timer events in the !reinject 29 29 * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked 30 30 * in vcpu_enter_guest. 31 31 */
+1 -1
arch/x86/kvm/x86.c
··· 1028 1028 /* 1029 1029 * Special case: close write to TSC within 5 seconds of 1030 1030 * another CPU is interpreted as an attempt to synchronize 1031 - * The 5 seconds is to accomodate host load / swapping as 1031 + * The 5 seconds is to accommodate host load / swapping as 1032 1032 * well as any reset of TSC during the boot process. 1033 1033 * 1034 1034 * In that case, for a reliable TSC, we can match TSC offsets,
+1 -1
arch/x86/lguest/boot.c
··· 397 397 * instead we just use the real "cpuid" instruction. Then I pretty much turned 398 398 * off feature bits until the Guest booted. (Don't say that: you'll damage 399 399 * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is 400 - * hardly future proof.) Noone's listening! They don't like you anyway, 400 + * hardly future proof.) No one's listening! They don't like you anyway, 401 401 * parenthetic weirdo! 402 402 * 403 403 * Replacing the cpuid so we can turn features off is great for the kernel, but
+1 -1
arch/x86/lib/copy_user_64.S
··· 117 117 * rdx count 118 118 * 119 119 * Output: 120 - * eax uncopied bytes or 0 if successfull. 120 + * eax uncopied bytes or 0 if successful. 121 121 */ 122 122 ENTRY(copy_user_generic_unrolled) 123 123 CFI_STARTPROC
+2 -2
arch/x86/lib/csum-copy_64.S
··· 152 152 153 153 adcq %r9,%rax 154 154 155 - /* do last upto 56 bytes */ 155 + /* do last up to 56 bytes */ 156 156 .Lhandle_tail: 157 157 /* ecx: count */ 158 158 movl %ecx,%r10d ··· 180 180 addl %ebx,%eax 181 181 adcl %r9d,%eax 182 182 183 - /* do last upto 6 bytes */ 183 + /* do last up to 6 bytes */ 184 184 .Lhandle_7: 185 185 movl %r10d,%ecx 186 186 andl $7,%ecx
+1 -1
arch/x86/lib/csum-partial_64.c
··· 84 84 count64--; 85 85 } 86 86 87 - /* last upto 7 8byte blocks */ 87 + /* last up to 7 8byte blocks */ 88 88 count %= 8; 89 89 while (count) { 90 90 asm("addq %1,%0\n\t"
+1 -1
arch/x86/mm/hugetlbpage.c
··· 326 326 if (mm->free_area_cache < len) 327 327 goto fail; 328 328 329 - /* either no address requested or cant fit in requested address hole */ 329 + /* either no address requested or can't fit in requested address hole */ 330 330 addr = (mm->free_area_cache - len) & huge_page_mask(h); 331 331 do { 332 332 /*
+1 -1
arch/x86/mm/init_32.c
··· 917 917 { 918 918 /* 919 919 * When this called, init has already been executed and released, 920 - * so everything past _etext sould be NX. 920 + * so everything past _etext should be NX. 921 921 */ 922 922 unsigned long start = PFN_ALIGN(_etext); 923 923 /*
+1 -1
arch/x86/mm/numa_64.c
··· 446 446 * @distance: NUMA distance 447 447 * 448 448 * Set the distance from node @from to @to to @distance. If distance table 449 - * doesn't exist, one which is large enough to accomodate all the currently 449 + * doesn't exist, one which is large enough to accommodate all the currently 450 450 * known nodes will be created. 451 451 * 452 452 * If such table cannot be allocated, a warning is printed and further
+1 -1
arch/x86/mm/pageattr.c
··· 310 310 * these shared mappings are made of small page mappings. 311 311 * Thus this don't enforce !RW mapping for small page kernel 312 312 * text mapping logic will help Linux Xen parvirt guest boot 313 - * aswell. 313 + * as well. 314 314 */ 315 315 if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) 316 316 pgprot_val(forbidden) |= _PAGE_RW;
+2 -2
arch/x86/pci/i386.c
··· 241 241 e820_reserve_resources_late(); 242 242 /* 243 243 * Insert the IO APIC resources after PCI initialization has 244 - * occured to handle IO APICS that are mapped in on a BAR in 244 + * occurred to handle IO APICS that are mapped in on a BAR in 245 245 * PCI space, but before trying to assign unassigned pci res. 246 246 */ 247 247 ioapic_insert_resources(); ··· 304 304 /* 305 305 * ioremap() and ioremap_nocache() defaults to UC MINUS for now. 306 306 * To avoid attribute conflicts, request UC MINUS here 307 - * aswell. 307 + * as well. 308 308 */ 309 309 prot |= _PAGE_CACHE_UC_MINUS; 310 310
+1 -1
arch/x86/xen/mmu.c
··· 1745 1745 } 1746 1746 1747 1747 /* 1748 - * Set up the inital kernel pagetable. 1748 + * Set up the initial kernel pagetable. 1749 1749 * 1750 1750 * We can construct this by grafting the Xen provided pagetable into 1751 1751 * head_64.S's preconstructed pagetables. We copy the Xen L2's into