Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spelling fixes: arch/x86_64/

Spelling fixes in arch/x86_64/.

Signed-off-by: Simon Arlott <simon@fire.lp0.eu>
Signed-off-by: Adrian Bunk <bunk@kernel.org>

authored by

Simon Arlott and committed by
Adrian Bunk
676b1855 5b20311e

+13 -13
+2 -2
arch/x86/boot/compressed/misc_64.c
··· 25 25 26 26 /* 27 27 * Getting to provable safe in place decompression is hard. 28 - * Worst case behaviours need to be analized. 28 + * Worst case behaviours need to be analyzed. 29 29 * Background information: 30 30 * 31 31 * The file layout is: ··· 94 94 * Adding 32768 instead of 32767 just makes for round numbers. 95 95 * Adding the decompressor_size is necessary as it musht live after all 96 96 * of the data as well. Last I measured the decompressor is about 14K. 97 - * 10K of actuall data and 4K of bss. 97 + * 10K of actual data and 4K of bss. 98 98 * 99 99 */ 100 100
+2 -2
arch/x86/kernel/io_apic_64.c
··· 1770 1770 1771 1771 /* 1772 1772 * 1773 - * IRQ's that are handled by the PIC in the MPS IOAPIC case. 1773 + * IRQs that are handled by the PIC in the MPS IOAPIC case. 1774 1774 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. 1775 1775 * Linux doesn't really care, as it's not actually used 1776 1776 * for any interrupt handling anyway. ··· 1921 1921 } 1922 1922 1923 1923 /* 1924 - * MSI mesage composition 1924 + * MSI message composition 1925 1925 */ 1926 1926 #ifdef CONFIG_PCI_MSI 1927 1927 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
+2 -2
arch/x86/kernel/mce_64.c
··· 320 320 #ifdef CONFIG_X86_MCE_INTEL 321 321 /*** 322 322 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog 323 - * @cpu: The CPU on which the event occured. 323 + * @cpu: The CPU on which the event occurred. 324 324 * @status: Event status information 325 325 * 326 326 * This function should be called by the thermal interrupt after the ··· 688 688 return 1; 689 689 } 690 690 691 - /* mce=off disables machine check. Note you can reenable it later 691 + /* mce=off disables machine check. Note you can re-enable it later 692 692 using sysfs. 693 693 mce=TOLERANCELEVEL (number, see above) 694 694 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
+1 -1
arch/x86/kernel/signal_64.c
··· 410 410 411 411 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 412 412 if (signr > 0) { 413 - /* Reenable any watchpoints before delivering the 413 + /* Re-enable any watchpoints before delivering the 414 414 * signal to user space. The processor register will 415 415 * have been cleared if the watchpoint triggered 416 416 * inside the kernel.
+1 -1
arch/x86/kernel/smpboot_64.c
··· 350 350 /* 351 351 * We need to hold call_lock, so there is no inconsistency 352 352 * between the time smp_call_function() determines number of 353 - * IPI receipients, and the time when the determination is made 353 + * IPI recipients, and the time when the determination is made 354 354 * for which cpus receive the IPI in genapic_flat.c. Holding this 355 355 * lock helps us to not include this cpu in a currently in progress 356 356 * smp_call_function().
+1 -1
arch/x86/kernel/traps_64.c
··· 201 201 #define MSG(txt) ops->warning(data, txt) 202 202 203 203 /* 204 - * x86-64 can have upto three kernel stacks: 204 + * x86-64 can have up to three kernel stacks: 205 205 * process stack 206 206 * interrupt stack 207 207 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+1 -1
arch/x86/kernel/vsyscall_64.c
··· 53 53 /* 54 54 * vsyscall_gtod_data contains data that is : 55 55 * - readonly from vsyscalls 56 - * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) 56 + * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) 57 57 * Try to keep this structure as small as possible to avoid cache line ping pongs 58 58 */ 59 59 int __vgetcpu_mode __section_vgetcpu_mode;
+2 -2
arch/x86/mm/fault_64.c
··· 378 378 again: 379 379 /* When running in the kernel we expect faults to occur only to 380 380 * addresses in user space. All other faults represent errors in the 381 - * kernel and should generate an OOPS. Unfortunatly, in the case of an 381 + * kernel and should generate an OOPS. Unfortunately, in the case of an 382 382 * erroneous fault occurring in a code path which already holds mmap_sem 383 383 * we will deadlock attempting to validate the fault against the 384 384 * address space. Luckily the kernel only validly references user ··· 386 386 * exceptions table. 387 387 * 388 388 * As the vast majority of faults will be valid we will only perform 389 - * the source reference check when there is a possibilty of a deadlock. 389 + * the source reference check when there is a possibility of a deadlock. 390 390 * Attempt to lock the address space, if we cannot we then validate the 391 391 * source. If this is invalid we can skip the address space check, 392 392 * thus avoiding the deadlock.
+1 -1
arch/x86/mm/srat_64.c
··· 218 218 /* 219 219 * Update nodes_add and decide if to include add are in the zone. 220 220 * Both SPARSE and RESERVE need nodes_add infomation. 221 - * This code supports one contigious hot add area per node. 221 + * This code supports one contiguous hot add area per node. 222 222 */ 223 223 static int reserve_hotadd(int node, unsigned long start, unsigned long end) 224 224 {