Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Fix various typos in comments

Fix ~144 single-word typos in arch/x86/ code comments.

Doing this in a single commit should reduce the churn.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: linux-kernel@vger.kernel.org

+144 -144
+3 -3
arch/x86/crypto/curve25519-x86_64.c
··· 114 114 ); 115 115 } 116 116 117 - /* Computes the field substraction of two field elements */ 117 + /* Computes the field subtraction of two field elements */ 118 118 static inline void fsub(u64 *out, const u64 *f1, const u64 *f2) 119 119 { 120 120 asm volatile( 121 - /* Compute the raw substraction of f1-f2 */ 121 + /* Compute the raw subtraction of f1-f2 */ 122 122 " movq 0(%1), %%r8;" 123 123 " subq 0(%2), %%r8;" 124 124 " movq 8(%1), %%r9;" ··· 135 135 " mov $38, %%rcx;" 136 136 " cmovc %%rcx, %%rax;" 137 137 138 - /* Step 2: Substract carry*38 from the original difference */ 138 + /* Step 2: Subtract carry*38 from the original difference */ 139 139 " sub %%rax, %%r8;" 140 140 " sbb $0, %%r9;" 141 141 " sbb $0, %%r10;"
+1 -1
arch/x86/crypto/twofish_glue_3way.c
··· 117 117 * storing blocks in 64bit registers to allow three blocks to 118 118 * be processed parallel. Parallel operation then allows gaining 119 119 * more performance than was trade off, on out-of-order CPUs. 120 - * However Atom does not benefit from this parallellism and 120 + * However Atom does not benefit from this parallelism and 121 121 * should be blacklisted. 122 122 */ 123 123 return true;
+1 -1
arch/x86/events/amd/core.c
··· 623 623 /* 624 624 * Check each counter for overflow and wait for it to be reset by the 625 625 * NMI if it has overflowed. This relies on the fact that all active 626 - * counters are always enabled when this function is caled and 626 + * counters are always enabled when this function is called and 627 627 * ARCH_PERFMON_EVENTSEL_INT is always set. 628 628 */ 629 629 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+1 -1
arch/x86/events/amd/iommu.h
··· 17 17 #define IOMMU_PC_DEVID_MATCH_REG 0x20 18 18 #define IOMMU_PC_COUNTER_REPORT_REG 0x28 19 19 20 - /* maximun specified bank/counters */ 20 + /* maximum specified bank/counters */ 21 21 #define PC_MAX_SPEC_BNKS 64 22 22 #define PC_MAX_SPEC_CNTRS 16 23 23
+1 -1
arch/x86/events/core.c
··· 765 765 }; 766 766 767 767 /* 768 - * Initialize interator that runs through all events and counters. 768 + * Initialize iterator that runs through all events and counters. 769 769 */ 770 770 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, 771 771 int num, int wmin, int wmax, int gpmax)
+6 -6
arch/x86/events/intel/core.c
··· 137 137 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 138 138 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 139 139 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ 140 - INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */ 140 + INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */ 141 141 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ 142 142 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ 143 143 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ ··· 2186 2186 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either 2187 2187 * in sequence on the same PMC or on different PMCs. 2188 2188 * 2189 - * In practise it appears some of these events do in fact count, and 2189 + * In practice it appears some of these events do in fact count, and 2190 2190 * we need to program all 4 events. 2191 2191 */ 2192 2192 static void intel_pmu_nhm_workaround(void) ··· 2435 2435 2436 2436 /* 2437 2437 * The metric is reported as an 8bit integer fraction 2438 - * suming up to 0xff. 2438 + * summing up to 0xff. 2439 2439 * slots-in-metric = (Metric / 0xff) * slots 2440 2440 */ 2441 2441 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; ··· 2824 2824 } 2825 2825 2826 2826 /* 2827 - * Intel Perf mertrics 2827 + * Intel Perf metrics 2828 2828 */ 2829 2829 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { 2830 2830 handled++; ··· 4591 4591 4592 4592 /* 4593 4593 * Disable the check for real HW, so we don't 4594 - * mess with potentionaly enabled registers: 4594 + * mess with potentially enabled registers: 4595 4595 */ 4596 4596 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 4597 4597 return true; ··· 4656 4656 { 4657 4657 int bit; 4658 4658 4659 - /* disable event that reported as not presend by cpuid */ 4659 + /* disable event that reported as not present by cpuid */ 4660 4660 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { 4661 4661 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; 4662 4662 pr_warn("CPUID marked event: \'%s\' unavailable\n",
+1 -1
arch/x86/events/intel/ds.c
··· 1805 1805 * 1806 1806 * [-period, 0] 1807 1807 * 1808 - * the difference between two consequtive reads is: 1808 + * the difference between two consecutive reads is: 1809 1809 * 1810 1810 * A) value2 - value1; 1811 1811 * when no overflows have happened in between,
+1 -1
arch/x86/events/intel/lbr.c
··· 1198 1198 /* 1199 1199 * The LBR logs any address in the IP, even if the IP just 1200 1200 * faulted. This means userspace can control the from address. 1201 - * Ensure we don't blindy read any address by validating it is 1201 + * Ensure we don't blindly read any address by validating it is 1202 1202 * a known text address. 1203 1203 */ 1204 1204 if (kernel_text_address(from)) {
+2 -2
arch/x86/events/intel/p4.c
··· 24 24 unsigned int escr_msr[2]; /* ESCR MSR for this event */ 25 25 unsigned int escr_emask; /* valid ESCR EventMask bits */ 26 26 unsigned int shared; /* event is shared across threads */ 27 - char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ 27 + char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on absence */ 28 28 }; 29 29 30 30 struct p4_pebs_bind { ··· 45 45 * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of 46 46 * event configuration to find out which values are to be 47 47 * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT 48 - * resgisters 48 + * registers 49 49 */ 50 50 static struct p4_pebs_bind p4_pebs_bind_map[] = { 51 51 P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001),
+1 -1
arch/x86/events/intel/pt.c
··· 362 362 363 363 /* 364 364 * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config 365 - * clears the assomption that BranchEn must always be enabled, 365 + * clears the assumption that BranchEn must always be enabled, 366 366 * as was the case with the first implementation of PT. 367 367 * If this bit is not set, the legacy behavior is preserved 368 368 * for compatibility with the older userspace.
+1 -1
arch/x86/events/zhaoxin/core.c
··· 494 494 { 495 495 int bit; 496 496 497 - /* disable event that reported as not presend by cpuid */ 497 + /* disable event that reported as not present by cpuid */ 498 498 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) { 499 499 zx_pmon_event_map[zx_arch_events_map[bit].id] = 0; 500 500 pr_warn("CPUID marked event: \'%s\' unavailable\n",
+2 -2
arch/x86/hyperv/hv_init.c
··· 162 162 static inline bool hv_reenlightenment_available(void) 163 163 { 164 164 /* 165 - * Check for required features and priviliges to make TSC frequency 165 + * Check for required features and privileges to make TSC frequency 166 166 * change notifications work. 167 167 */ 168 168 return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS && ··· 292 292 293 293 /* 294 294 * Reset the hypercall page as it is going to be invalidated 295 - * accross hibernation. Setting hv_hypercall_pg to NULL ensures 295 + * across hibernation. Setting hv_hypercall_pg to NULL ensures 296 296 * that any subsequent hypercall operation fails safely instead of 297 297 * crashing due to an access of an invalid page. The hypercall page 298 298 * pointer is restored on resume.
+1 -1
arch/x86/include/asm/cmpxchg.h
··· 22 22 /* 23 23 * Constants for operation sizes. On 32-bit, the 64-bit size it set to 24 24 * -1 because sizeof will never return -1, thereby making those switch 25 - * case statements guaranteeed dead code which the compiler will 25 + * case statements guaranteed dead code which the compiler will 26 26 * eliminate, and allowing the "missing symbol in the default case" to 27 27 * indicate a usage error. 28 28 */
+1 -1
arch/x86/include/asm/idtentry.h
··· 547 547 /* 548 548 * Dummy trap number so the low level ASM macro vector number checks do not 549 549 * match which results in emitting plain IDTENTRY stubs without bells and 550 - * whistels. 550 + * whistles. 551 551 */ 552 552 #define X86_TRAP_OTHER 0xFFFF 553 553
+1 -1
arch/x86/include/asm/intel_pconfig.h
··· 38 38 #define MKTME_INVALID_ENC_ALG 4 39 39 #define MKTME_DEVICE_BUSY 5 40 40 41 - /* Hardware requires the structure to be 256 byte alinged. Otherwise #GP(0). */ 41 + /* Hardware requires the structure to be 256 byte aligned. Otherwise #GP(0). */ 42 42 struct mktme_key_program { 43 43 u16 keyid; 44 44 u32 keyid_ctrl;
+1 -1
arch/x86/include/asm/io.h
··· 159 159 /* 160 160 * ISA I/O bus memory addresses are 1:1 with the physical address. 161 161 * However, we truncate the address to unsigned int to avoid undesirable 162 - * promitions in legacy drivers. 162 + * promotions in legacy drivers. 163 163 */ 164 164 static inline unsigned int isa_virt_to_bus(volatile void *address) 165 165 {
+1 -1
arch/x86/include/asm/irq_stack.h
··· 190 190 191 191 /* 192 192 * Macro to invoke __do_softirq on the irq stack. This is only called from 193 - * task context when bottom halfs are about to be reenabled and soft 193 + * task context when bottom halves are about to be reenabled and soft 194 194 * interrupts are pending to be processed. The interrupt stack cannot be in 195 195 * use here. 196 196 */
+2 -2
arch/x86/include/asm/kvm_host.h
··· 1470 1470 /* 1471 1471 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing 1472 1472 * userspace I/O) to indicate that the emulation context 1473 - * should be resued as is, i.e. skip initialization of 1473 + * should be reused as is, i.e. skip initialization of 1474 1474 * emulation context, instruction fetch and decode. 1475 1475 * 1476 1476 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware. ··· 1495 1495 * 1496 1496 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware 1497 1497 * backdoor emulation, which is opt in via module param. 1498 - * VMware backoor emulation handles select instructions 1498 + * VMware backdoor emulation handles select instructions 1499 1499 * and reinjects the #GP for all other cases. 1500 1500 * 1501 1501 * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
+1 -1
arch/x86/include/asm/paravirt_types.h
··· 371 371 * on the stack. All caller-save registers (eax,edx,ecx) are expected 372 372 * to be modified (either clobbered or used for return values). 373 373 * X86_64, on the other hand, already specifies a register-based calling 374 - * conventions, returning at %rax, with parameteres going on %rdi, %rsi, 374 + * conventions, returning at %rax, with parameters going on %rdi, %rsi, 375 375 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any 376 376 * special handling for dealing with 4 arguments, unlike i386. 377 377 * However, x86_64 also have to clobber all caller saved registers, which
+1 -1
arch/x86/include/asm/pgtable.h
··· 1244 1244 /* 1245 1245 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 1246 1246 * 1247 - * dst - pointer to pgd range anwhere on a pgd page 1247 + * dst - pointer to pgd range anywhere on a pgd page 1248 1248 * src - "" 1249 1249 * count - the number of pgds to copy. 1250 1250 *
+1 -1
arch/x86/include/asm/processor.h
··· 527 527 struct io_bitmap *io_bitmap; 528 528 529 529 /* 530 - * IOPL. Priviledge level dependent I/O permission which is 530 + * IOPL. Privilege level dependent I/O permission which is 531 531 * emulated via the I/O bitmap to prevent user space from disabling 532 532 * interrupts. 533 533 */
+1 -1
arch/x86/include/asm/set_memory.h
··· 9 9 * The set_memory_* API can be used to change various attributes of a virtual 10 10 * address range. The attributes include: 11 11 * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack 12 - * Executability : eXeutable, NoteXecutable 12 + * Executability : eXecutable, NoteXecutable 13 13 * Read/Write : ReadOnly, ReadWrite 14 14 * Presence : NotPresent 15 15 * Encryption : Encrypted, Decrypted
+1 -1
arch/x86/include/asm/uv/uv_geo.h
··· 10 10 #ifndef _ASM_UV_GEO_H 11 11 #define _ASM_UV_GEO_H 12 12 13 - /* Type declaractions */ 13 + /* Type declarations */ 14 14 15 15 /* Size of a geoid_s structure (must be before decl. of geoid_u) */ 16 16 #define GEOID_SIZE 8
+1 -1
arch/x86/include/asm/uv/uv_hub.h
··· 353 353 * 354 354 * Note there are NO leds on a UV system. This register is only 355 355 * used by the system controller to monitor system-wide operation. 356 - * There are 64 regs per node. With Nahelem cpus (2 cores per node, 356 + * There are 64 regs per node. With Nehalem cpus (2 cores per node, 357 357 * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on 358 358 * a node. 359 359 *
+2 -2
arch/x86/include/uapi/asm/bootparam.h
··· 234 234 * handling of page tables. 235 235 * 236 236 * These enums should only ever be used by x86 code, and the code that uses 237 - * it should be well contained and compartamentalized. 237 + * it should be well contained and compartmentalized. 238 238 * 239 239 * KVM and Xen HVM do not have a subarch as these are expected to follow 240 240 * standard x86 boot entries. If there is a genuine need for "hypervisor" type ··· 252 252 * @X86_SUBARCH_XEN: Used for Xen guest types which follow the PV boot path, 253 253 * which start at asm startup_xen() entry point and later jump to the C 254 254 * xen_start_kernel() entry point. Both domU and dom0 type of guests are 255 - * currently supportd through this PV boot path. 255 + * currently supported through this PV boot path. 256 256 * @X86_SUBARCH_INTEL_MID: Used for Intel MID (Mobile Internet Device) platform 257 257 * systems which do not have the PCI legacy interfaces. 258 258 * @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC
+1 -1
arch/x86/include/uapi/asm/msgbuf.h
··· 12 12 * The msqid64_ds structure for x86 architecture with x32 ABI. 13 13 * 14 14 * On x86-32 and x86-64 we can just use the generic definition, but 15 - * x32 uses the same binary layout as x86_64, which is differnet 15 + * x32 uses the same binary layout as x86_64, which is different 16 16 * from other 32-bit architectures. 17 17 */ 18 18
+1 -1
arch/x86/include/uapi/asm/sgx.h
··· 152 152 * Most exceptions reported on ENCLU, including those that occur within the 153 153 * enclave, are fixed up and reported synchronously instead of being delivered 154 154 * via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are 155 - * never fixed up and are always delivered via standard signals. On synchrously 155 + * never fixed up and are always delivered via standard signals. On synchronously 156 156 * reported exceptions, -EFAULT is returned and details about the exception are 157 157 * recorded in @run.exception, the optional sgx_enclave_exception struct. 158 158 *
+1 -1
arch/x86/include/uapi/asm/shmbuf.h
··· 9 9 * The shmid64_ds structure for x86 architecture with x32 ABI. 10 10 * 11 11 * On x86-32 and x86-64 we can just use the generic definition, but 12 - * x32 uses the same binary layout as x86_64, which is differnet 12 + * x32 uses the same binary layout as x86_64, which is different 13 13 * from other 32-bit architectures. 14 14 */ 15 15
+1 -1
arch/x86/include/uapi/asm/sigcontext.h
··· 139 139 * The 64-bit FPU frame. (FXSAVE format and later) 140 140 * 141 141 * Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is 142 - * larger: 'struct _xstate'. Note that 'struct _xstate' embedds 142 + * larger: 'struct _xstate'. Note that 'struct _xstate' embeds 143 143 * 'struct _fpstate' so that you can always assume the _fpstate portion 144 144 * exists so that you can check the magic value. 145 145 *
+2 -2
arch/x86/kernel/acpi/boot.c
··· 830 830 EXPORT_SYMBOL(acpi_unregister_ioapic); 831 831 832 832 /** 833 - * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base 833 + * acpi_ioapic_registered - Check whether IOAPIC associated with @gsi_base 834 834 * has been registered 835 835 * @handle: ACPI handle of the IOAPIC device 836 836 * @gsi_base: GSI base associated with the IOAPIC ··· 1657 1657 else if (strcmp(arg, "noirq") == 0) { 1658 1658 acpi_noirq_set(); 1659 1659 } 1660 - /* "acpi=copy_dsdt" copys DSDT */ 1660 + /* "acpi=copy_dsdt" copies DSDT */ 1661 1661 else if (strcmp(arg, "copy_dsdt") == 0) { 1662 1662 acpi_gbl_copy_dsdt_locally = 1; 1663 1663 }
+1 -1
arch/x86/kernel/acpi/sleep.c
··· 41 41 * x86_acpi_enter_sleep_state - enter sleep state 42 42 * @state: Sleep state to enter. 43 43 * 44 - * Wrapper around acpi_enter_sleep_state() to be called by assmebly. 44 + * Wrapper around acpi_enter_sleep_state() to be called by assembly. 45 45 */ 46 46 asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state) 47 47 {
+5 -5
arch/x86/kernel/apic/apic.c
··· 619 619 620 620 if (this_cpu_has(X86_FEATURE_ARAT)) { 621 621 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; 622 - /* Make LAPIC timer preferrable over percpu HPET */ 622 + /* Make LAPIC timer preferable over percpu HPET */ 623 623 lapic_clockevent.rating = 150; 624 624 } 625 625 ··· 666 666 * In this functions we calibrate APIC bus clocks to the external timer. 667 667 * 668 668 * We want to do the calibration only once since we want to have local timer 669 - * irqs syncron. CPUs connected by the same APIC bus have the very same bus 669 + * irqs synchronous. CPUs connected by the same APIC bus have the very same bus 670 670 * frequency. 671 671 * 672 672 * This was previously done by reading the PIT/HPET and waiting for a wrap ··· 1532 1532 * Most probably by now the CPU has serviced that pending interrupt and it 1533 1533 * might not have done the ack_APIC_irq() because it thought, interrupt 1534 1534 * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear 1535 - * the ISR bit and cpu thinks it has already serivced the interrupt. Hence 1535 + * the ISR bit and cpu thinks it has already serviced the interrupt. Hence 1536 1536 * a vector might get locked. It was noticed for timer irq (vector 1537 1537 * 0x31). Issue an extra EOI to clear ISR. 1538 1538 * ··· 1657 1657 */ 1658 1658 /* 1659 1659 * Actually disabling the focus CPU check just makes the hang less 1660 - * frequent as it makes the interrupt distributon model be more 1660 + * frequent as it makes the interrupt distribution model be more 1661 1661 * like LRU than MRU (the short-term load is more even across CPUs). 1662 1662 */ 1663 1663 ··· 1875 1875 1876 1876 /* 1877 1877 * Without IR, all CPUs can be addressed by IOAPIC/MSI only 1878 - * in physical mode, and CPUs with an APIC ID that cannnot 1878 + * in physical mode, and CPUs with an APIC ID that cannot 1879 1879 * be addressed must not be brought online. 1880 1880 */ 1881 1881 x2apic_set_max_apicid(apic_limit);
+4 -4
arch/x86/kernel/apic/io_apic.c
··· 928 928 929 929 /* 930 930 * setup_IO_APIC_irqs() programs all legacy IRQs with default trigger 931 - * and polarity attirbutes. So allow the first user to reprogram the 931 + * and polarity attributes. So allow the first user to reprogram the 932 932 * pin with real trigger and polarity attributes. 933 933 */ 934 934 if (irq < nr_legacy_irqs() && data->count == 1) { ··· 994 994 995 995 /* 996 996 * Legacy ISA IRQ has already been allocated, just add pin to 997 - * the pin list assoicated with this IRQ and program the IOAPIC 997 + * the pin list associated with this IRQ and program the IOAPIC 998 998 * entry. The IOAPIC entry 999 999 */ 1000 1000 if (irq_data && irq_data->parent_data) { ··· 1742 1742 * with masking the ioapic entry and then polling until 1743 1743 * Remote IRR was clear before reprogramming the 1744 1744 * ioapic I don't trust the Remote IRR bit to be 1745 - * completey accurate. 1745 + * completely accurate. 1746 1746 * 1747 1747 * However there appears to be no other way to plug 1748 1748 * this race, so if the Remote IRR bit is not ··· 1820 1820 /* 1821 1821 * Tail end of clearing remote IRR bit (either by delivering the EOI 1822 1822 * message via io-apic EOI register write or simulating it using 1823 - * mask+edge followed by unnask+level logic) manually when the 1823 + * mask+edge followed by unmask+level logic) manually when the 1824 1824 * level triggered interrupt is seen as the edge triggered interrupt 1825 1825 * at the cpu. 1826 1826 */
+2 -2
arch/x86/kernel/apic/vector.c
··· 1045 1045 * 1046 1046 * But in case of cpu hotplug this should be a non issue 1047 1047 * because if the affinity update happens right before all 1048 - * cpus rendevouz in stop machine, there is no way that the 1048 + * cpus rendezvous in stop machine, there is no way that the 1049 1049 * interrupt can be blocked on the target cpu because all cpus 1050 1050 * loops first with interrupts enabled in stop machine, so the 1051 1051 * old vector is not yet cleaned up when the interrupt fires. ··· 1054 1054 * of the interrupt on the apic/system bus would be delayed 1055 1055 * beyond the point where the target cpu disables interrupts 1056 1056 * in stop machine. I doubt that it can happen, but at least 1057 - * there is a theroretical chance. Virtualization might be 1057 + * there is a theoretical chance. Virtualization might be 1058 1058 * able to expose this, but AFAICT the IOAPIC emulation is not 1059 1059 * as stupid as the real hardware. 1060 1060 *
+3 -3
arch/x86/kernel/apm_32.c
··· 94 94 * Remove APM dependencies in arch/i386/kernel/process.c 95 95 * Remove APM dependencies in drivers/char/sysrq.c 96 96 * Reset time across standby. 97 - * Allow more inititialisation on SMP. 97 + * Allow more initialisation on SMP. 98 98 * Remove CONFIG_APM_POWER_OFF and make it boot time 99 99 * configurable (default on). 100 100 * Make debug only a boot time parameter (remove APM_DEBUG). ··· 766 766 * not cleared until it is acknowledged. 767 767 * 768 768 * Additional information is returned in the info pointer, providing 769 - * that APM 1.2 is in use. If no messges are pending the value 0x80 769 + * that APM 1.2 is in use. If no messages are pending the value 0x80 770 770 * is returned (No power management events pending). 771 771 */ 772 772 static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) ··· 1025 1025 * status which gives the rough battery status, and current power 1026 1026 * source. The bat value returned give an estimate as a percentage 1027 1027 * of life and a status value for the battery. The estimated life 1028 - * if reported is a lifetime in secodnds/minutes at current powwer 1028 + * if reported is a lifetime in secodnds/minutes at current power 1029 1029 * consumption. 1030 1030 */ 1031 1031
+2 -2
arch/x86/kernel/cpu/common.c
··· 482 482 if (pk) 483 483 pk->pkru = init_pkru_value; 484 484 /* 485 - * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE 485 + * Setting X86_CR4_PKE will cause the X86_FEATURE_OSPKE 486 486 * cpuid bit to be set. We need to ensure that we 487 487 * update that bit in this CPU's "cpu_info". 488 488 */ ··· 1404 1404 * where GS is unused by the prev and next threads. 1405 1405 * 1406 1406 * Since neither vendor documents this anywhere that I can see, 1407 - * detect it directly instead of hardcoding the choice by 1407 + * detect it directly instead of hard-coding the choice by 1408 1408 * vendor. 1409 1409 * 1410 1410 * I've designated AMD's behavior as the "bug" because it's
+1 -1
arch/x86/kernel/cpu/cyrix.c
··· 291 291 mark_tsc_unstable("cyrix 5510/5520 detected"); 292 292 } 293 293 #endif 294 - c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */ 294 + c->x86_cache_size = 16; /* Yep 16K integrated cache that's it */ 295 295 296 296 /* GXm supports extended cpuid levels 'ala' AMD */ 297 297 if (c->cpuid_level == 2) {
+1 -1
arch/x86/kernel/cpu/mce/core.c
··· 529 529 * Check if the address reported by the CPU is in a format we can parse. 530 530 * It would be possible to add code for most other cases, but all would 531 531 * be somewhat complicated (e.g. segment offset would require an instruction 532 - * parser). So only support physical addresses up to page granuality for now. 532 + * parser). So only support physical addresses up to page granularity for now. 533 533 */ 534 534 int mce_usable_address(struct mce *m) 535 535 {
+2 -2
arch/x86/kernel/cpu/mshyperv.c
··· 197 197 #ifdef CONFIG_X86_LOCAL_APIC 198 198 /* 199 199 * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes 200 - * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle 200 + * it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle 201 201 * unknown NMI on the first CPU which gets it. 202 202 */ 203 203 static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) ··· 428 428 429 429 /* 430 430 * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic, 431 - * set x2apic destination mode to physcial mode when x2apic is available 431 + * set x2apic destination mode to physical mode when x2apic is available 432 432 * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs 433 433 * have 8-bit APIC id. 434 434 */
+1 -1
arch/x86/kernel/cpu/mtrr/cleanup.c
··· 434 434 state->range_sizek = sizek - second_sizek; 435 435 } 436 436 437 - /* Mininum size of mtrr block that can take hole: */ 437 + /* Minimum size of mtrr block that can take hole: */ 438 438 static u64 mtrr_chunk_size __initdata = (256ULL<<20); 439 439 440 440 static int __init parse_mtrr_chunk_size_opt(char *p)
+1 -1
arch/x86/kernel/cpu/resctrl/core.c
··· 192 192 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz 193 193 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz 194 194 * 195 - * Probe by trying to write the first of the L3 cach mask registers 195 + * Probe by trying to write the first of the L3 cache mask registers 196 196 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length 197 197 * is always 20 on hsw server parts. The minimum cache bitmask length 198 198 * allowed for HSW server is always 2 bits. Hardcode all of them.
+1 -1
arch/x86/kernel/cpu/resctrl/monitor.c
··· 387 387 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so 388 388 * that: 389 389 * 390 - * current bandwdith(cur_bw) < user specified bandwidth(user_bw) 390 + * current bandwidth(cur_bw) < user specified bandwidth(user_bw) 391 391 * 392 392 * This uses the MBM counters to measure the bandwidth and MBA throttle 393 393 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
+2 -2
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
··· 1307 1307 * If the thread does not get on the CPU for whatever 1308 1308 * reason and the process which sets up the region is 1309 1309 * interrupted then this will leave the thread in runnable 1310 - * state and once it gets on the CPU it will derefence 1310 + * state and once it gets on the CPU it will dereference 1311 1311 * the cleared, but not freed, plr struct resulting in an 1312 1312 * empty pseudo-locking loop. 1313 1313 */ ··· 1391 1391 * group is removed from user space via a "rmdir" from userspace or the 1392 1392 * unmount of the resctrl filesystem. On removal the resource group does 1393 1393 * not go back to pseudo-locksetup mode before it is removed, instead it is 1394 - * removed directly. There is thus assymmetry with the creation where the 1394 + * removed directly. There is thus asymmetry with the creation where the 1395 1395 * &struct pseudo_lock_region is removed here while it was not created in 1396 1396 * rdtgroup_pseudo_lock_create(). 1397 1397 *
+2 -2
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * User interface for Resource Alloction in Resource Director Technology(RDT) 3 + * User interface for Resource Allocation in Resource Director Technology(RDT) 4 4 * 5 5 * Copyright (C) 2016 Intel Corporation 6 6 * ··· 294 294 /* 295 295 * This is safe against resctrl_sched_in() called from __switch_to() 296 296 * because __switch_to() is executed with interrupts disabled. A local call 297 - * from update_closid_rmid() is proteced against __switch_to() because 297 + * from update_closid_rmid() is protected against __switch_to() because 298 298 * preemption is disabled. 299 299 */ 300 300 static void update_cpu_closid_rmid(void *info)
+1 -1
arch/x86/kernel/cpu/sgx/arch.h
··· 271 271 * @header1: constant byte string 272 272 * @vendor: must be either 0x0000 or 0x8086 273 273 * @date: YYYYMMDD in BCD 274 - * @header2: costant byte string 274 + * @header2: constant byte string 275 275 * @swdefined: software defined value 276 276 */ 277 277 struct sgx_sigstruct_header {
+2 -2
arch/x86/kernel/cpu/sgx/main.c
··· 195 195 196 196 /* 197 197 * Swap page to the regular memory transformed to the blocked state by using 198 - * EBLOCK, which means that it can no loger be referenced (no new TLB entries). 198 + * EBLOCK, which means that it can no longer be referenced (no new TLB entries). 199 199 * 200 200 * The first trial just tries to write the page assuming that some other thread 201 - * has reset the count for threads inside the enlave by using ETRACK, and 201 + * has reset the count for threads inside the enclave by using ETRACK, and 202 202 * previous thread count has been zeroed out. The second trial calls ETRACK 203 203 * before EWB. If that fails we kick all the HW threads out, and then do EWB, 204 204 * which should be guaranteed the succeed.
+2 -2
arch/x86/kernel/cpu/topology.c
··· 30 30 31 31 #ifdef CONFIG_SMP 32 32 /* 33 - * Check if given CPUID extended toplogy "leaf" is implemented 33 + * Check if given CPUID extended topology "leaf" is implemented 34 34 */ 35 35 static int check_extended_topology_leaf(int leaf) 36 36 { ··· 44 44 return 0; 45 45 } 46 46 /* 47 - * Return best CPUID Extended Toplogy Leaf supported 47 + * Return best CPUID Extended Topology Leaf supported 48 48 */ 49 49 static int detect_extended_topology_leaf(struct cpuinfo_x86 *c) 50 50 {
+1 -1
arch/x86/kernel/e820.c
··· 793 793 #endif 794 794 795 795 /* 796 - * Allocate the requested number of bytes with the requsted alignment 796 + * Allocate the requested number of bytes with the requested alignment 797 797 * and return (the physical address) to the caller. Also register this 798 798 * range in the 'kexec' E820 table as a reserved range. 799 799 *
+1 -1
arch/x86/kernel/fpu/xstate.c
··· 253 253 static void __init setup_xstate_features(void) 254 254 { 255 255 u32 eax, ebx, ecx, edx, i; 256 - /* start at the beginnning of the "extended state" */ 256 + /* start at the beginning of the "extended state" */ 257 257 unsigned int last_good_offset = offsetof(struct xregs_state, 258 258 extended_state_area); 259 259 /*
+1 -1
arch/x86/kernel/head64.c
··· 104 104 static bool __head check_la57_support(unsigned long physaddr) 105 105 { 106 106 /* 107 - * 5-level paging is detected and enabled at kernel decomression 107 + * 5-level paging is detected and enabled at kernel decompression 108 108 * stage. Only check if it has been enabled there. 109 109 */ 110 110 if (!(native_read_cr4() & X86_CR4_LA57))
+1 -1
arch/x86/kernel/idt.c
··· 245 245 * after that. 246 246 * 247 247 * Note, that X86_64 cannot install the real #PF handler in 248 - * idt_setup_early_traps() because the memory intialization needs the #PF 248 + * idt_setup_early_traps() because the memory initialization needs the #PF 249 249 * handler from the early_idt_handler_array to initialize the early page 250 250 * tables. 251 251 */
+1 -1
arch/x86/kernel/irq.c
··· 338 338 irq_migrate_all_off_this_cpu(); 339 339 340 340 /* 341 - * We can remove mdelay() and then send spuriuous interrupts to 341 + * We can remove mdelay() and then send spurious interrupts to 342 342 * new cpu targets for all the irqs that were handled previously by 343 343 * this cpu. While it works, I have seen spurious interrupt messages 344 344 * (nothing wrong but still...).
+2 -2
arch/x86/kernel/kgdb.c
··· 17 17 * Updated by: Tom Rini <trini@kernel.crashing.org> 18 18 * Updated by: Jason Wessel <jason.wessel@windriver.com> 19 19 * Modified for 386 by Jim Kingdon, Cygnus Support. 20 - * Origianl kgdb, compatibility with 2.1.xx kernel by 20 + * Original kgdb, compatibility with 2.1.xx kernel by 21 21 * David Grothe <dave@gcom.com> 22 22 * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com> 23 23 * X86_64 changes from Andi Kleen's patch merged by Jim Houston ··· 642 642 struct perf_event **pevent; 643 643 644 644 /* 645 - * Pre-allocate the hw breakpoint structions in the non-atomic 645 + * Pre-allocate the hw breakpoint instructions in the non-atomic 646 646 * portion of kgdb because this operation requires mutexs to 647 647 * complete. 648 648 */
+1 -1
arch/x86/kernel/kprobes/ftrace.c
··· 12 12 13 13 #include "common.h" 14 14 15 - /* Ftrace callback handler for kprobes -- called under preepmt disabed */ 15 + /* Ftrace callback handler for kprobes -- called under preempt disabled */ 16 16 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 17 17 struct ftrace_ops *ops, struct ftrace_regs *fregs) 18 18 {
+1 -1
arch/x86/kernel/machine_kexec_64.c
··· 260 260 { 261 261 struct desc_ptr curidt; 262 262 263 - /* x86-64 supports unaliged loads & stores */ 263 + /* x86-64 supports unaligned loads & stores */ 264 264 curidt.size = limit; 265 265 curidt.address = (unsigned long)newidt; 266 266
+1 -1
arch/x86/kernel/process.c
··· 451 451 * First HT sibling to come up on the core. Link shared state of 452 452 * the first HT sibling to itself. The siblings on the same core 453 453 * which come up later will see the shared state pointer and link 454 - * themself to the state of this CPU. 454 + * themselves to the state of this CPU. 455 455 */ 456 456 st->shared_state = st; 457 457 }
+1 -1
arch/x86/kernel/pvclock.c
··· 89 89 /* 90 90 * Assumption here is that last_value, a global accumulator, always goes 91 91 * forward. If we are less than that, we should not be much smaller. 92 - * We assume there is an error marging we're inside, and then the correction 92 + * We assume there is an error margin we're inside, and then the correction 93 93 * does not sacrifice accuracy. 94 94 * 95 95 * For reads: global may have changed between test and return,
+1 -1
arch/x86/kernel/signal.c
··· 492 492 * SS descriptor, but we do need SS to be valid. It's possible 493 493 * that the old SS is entirely bogus -- this can happen if the 494 494 * signal we're trying to deliver is #GP or #SS caused by a bad 495 - * SS value. We also have a compatbility issue here: DOSEMU 495 + * SS value. We also have a compatibility issue here: DOSEMU 496 496 * relies on the contents of the SS register indicating the 497 497 * SS value at the time of the signal, even though that code in 498 498 * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU
+1 -1
arch/x86/kernel/smp.c
··· 67 67 * 5AP. symmetric IO mode (normal Linux operation) not affected. 68 68 * 'noapic' mode has vector 0xf filled out properly. 69 69 * 6AP. 'noapic' mode might be affected - fixed in later steppings 70 - * 7AP. We do not assume writes to the LVT deassering IRQs 70 + * 7AP. We do not assume writes to the LVT deasserting IRQs 71 71 * 8AP. We do not enable low power mode (deep sleep) during MP bootup 72 72 * 9AP. We do not use mixed mode 73 73 *
+1 -1
arch/x86/kernel/smpboot.c
··· 1407 1407 int ncpus; 1408 1408 1409 1409 /* 1410 - * Today neither Intel nor AMD support heterogenous systems so 1410 + * Today neither Intel nor AMD support heterogeneous systems so 1411 1411 * extrapolate the boot cpu's data to all packages. 1412 1412 */ 1413 1413 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
+1 -1
arch/x86/kernel/sysfb_efi.c
··· 10 10 * EFI Quirks 11 11 * Several EFI systems do not correctly advertise their boot framebuffers. 12 12 * Hence, we use this static table of known broken machines and fix up the 13 - * information so framebuffer drivers can load corectly. 13 + * information so framebuffer drivers can load correctly. 14 14 */ 15 15 16 16 #include <linux/dmi.h>
+1 -1
arch/x86/kernel/topology.c
··· 113 113 * Two known BSP/CPU0 dependencies: Resume from suspend/hibernate 114 114 * depends on BSP. PIC interrupts depend on BSP. 115 115 * 116 - * If the BSP depencies are under control, one can tell kernel to 116 + * If the BSP dependencies are under control, one can tell kernel to 117 117 * enable BSP hotplug. This basically adds a control file and 118 118 * one can attempt to offline BSP. 119 119 */
+1 -1
arch/x86/kernel/traps.c
··· 395 395 /* 396 396 * Adjust our frame so that we return straight to the #GP 397 397 * vector with the expected RSP value. This is safe because 398 - * we won't enable interupts or schedule before we invoke 398 + * we won't enable interrupts or schedule before we invoke 399 399 * general_protection, so nothing will clobber the stack 400 400 * frame we just set up. 401 401 *
+3 -3
arch/x86/kernel/tsc.c
··· 739 739 * 2) Reference counter. If available we use the HPET or the 740 740 * PMTIMER as a reference to check the sanity of that value. 741 741 * We use separate TSC readouts and check inside of the 742 - * reference read for any possible disturbance. We dicard 742 + * reference read for any possible disturbance. We discard 743 743 * disturbed values here as well. We do that around the PIT 744 744 * calibration delay loop as we have to wait for a certain 745 745 * amount of time anyway. ··· 1079 1079 * very small window right after one CPU updated cycle_last under 1080 1080 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which 1081 1081 * is smaller than the cycle_last reference value due to a TSC which 1082 - * is slighty behind. This delta is nowhere else observable, but in 1082 + * is slightly behind. This delta is nowhere else observable, but in 1083 1083 * that case it results in a forward time jump in the range of hours 1084 1084 * due to the unsigned delta calculation of the time keeping core 1085 1085 * code, which is necessary to support wrapping clocksources like pm ··· 1264 1264 * corresponding clocksource 1265 1265 * @cycles: System counter value 1266 1266 * @cs: Clocksource corresponding to system counter value. Used 1267 - * by timekeeping code to verify comparibility of two cycle 1267 + * by timekeeping code to verify comparability of two cycle 1268 1268 * values. 1269 1269 */ 1270 1270
+1 -1
arch/x86/kvm/cpuid.c
··· 1033 1033 * - Centaur: 0xc0000000 - 0xcfffffff 1034 1034 * 1035 1035 * The Hypervisor class is further subdivided into sub-classes that each act as 1036 - * their own indepdent class associated with a 0x100 byte range. E.g. if Qemu 1036 + * their own independent class associated with a 0x100 byte range. E.g. if Qemu 1037 1037 * is advertising support for both HyperV and KVM, the resulting Hypervisor 1038 1038 * CPUID sub-classes are: 1039 1039 *
+1 -1
arch/x86/kvm/emulate.c
··· 3222 3222 } 3223 3223 3224 3224 /* 3225 - * Now load segment descriptors. If fault happenes at this stage 3225 + * Now load segment descriptors. If fault happens at this stage 3226 3226 * it is handled in a context of new task 3227 3227 */ 3228 3228 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
+1 -1
arch/x86/kvm/irq_comm.c
··· 269 269 const struct kvm_irq_routing_entry *ue) 270 270 { 271 271 /* We can't check irqchip_in_kernel() here as some callers are 272 - * currently inititalizing the irqchip. Other callers should therefore 272 + * currently initializing the irqchip. Other callers should therefore 273 273 * check kvm_arch_can_set_irq_routing() before calling this function. 274 274 */ 275 275 switch (ue->type) {
+1 -1
arch/x86/kvm/mmu/mmu.c
··· 4961 4961 4962 4962 /* 4963 4963 * No need to care whether allocation memory is successful 4964 - * or not since pte prefetch is skiped if it does not have 4964 + * or not since pte prefetch is skipped if it does not have 4965 4965 * enough objects in the cache. 4966 4966 */ 4967 4967 mmu_topup_memory_caches(vcpu, true);
+1 -1
arch/x86/kvm/mmu/mmu_internal.h
··· 59 59 #ifdef CONFIG_X86_64 60 60 bool tdp_mmu_page; 61 61 62 - /* Used for freeing the page asyncronously if it is a TDP MMU page. */ 62 + /* Used for freeing the page asynchronously if it is a TDP MMU page. */ 63 63 struct rcu_head rcu_head; 64 64 #endif 65 65 };
+3 -3
arch/x86/kvm/mmu/tdp_mmu.c
··· 404 404 * If this warning were to trigger it would indicate that there was a 405 405 * missing MMU notifier or a race with some notifier handler. 406 406 * A present, leaf SPTE should never be directly replaced with another 407 - * present leaf SPTE pointing to a differnt PFN. A notifier handler 407 + * present leaf SPTE pointing to a different PFN. A notifier handler 408 408 * should be zapping the SPTE before the main MM's page table is 409 409 * changed, or the SPTE should be zeroed, and the TLBs flushed by the 410 410 * thread before replacement. ··· 418 418 419 419 /* 420 420 * Crash the host to prevent error propagation and guest data 421 - * courruption. 421 + * corruption. 422 422 */ 423 423 BUG(); 424 424 } ··· 533 533 /* 534 534 * No other thread can overwrite the removed SPTE as they 535 535 * must either wait on the MMU lock or use 536 - * tdp_mmu_set_spte_atomic which will not overrite the 536 + * tdp_mmu_set_spte_atomic which will not overwrite the 537 537 * special removed SPTE value. No bookkeeping is needed 538 538 * here since the SPTE is going from non-present 539 539 * to non-present.
+1 -1
arch/x86/kvm/pmu.h
··· 103 103 104 104 /* returns general purpose PMC with the specified MSR. Note that it can be 105 105 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a 106 - * paramenter to tell them apart. 106 + * parameter to tell them apart. 107 107 */ 108 108 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, 109 109 u32 base)
+1 -1
arch/x86/kvm/svm/avic.c
··· 838 838 * Here, we setup with legacy mode in the following cases: 839 839 * 1. When cannot target interrupt to a specific vcpu. 840 840 * 2. Unsetting posted interrupt. 841 - * 3. APIC virtialization is disabled for the vcpu. 841 + * 3. APIC virtualization is disabled for the vcpu. 842 842 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc) 843 843 */ 844 844 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
+1 -1
arch/x86/kvm/svm/sev.c
··· 2082 2082 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); 2083 2083 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 2084 2084 2085 - /* PKRU is restored on VMEXIT, save the curent host value */ 2085 + /* PKRU is restored on VMEXIT, save the current host value */ 2086 2086 hostsa->pkru = read_pkru(); 2087 2087 2088 2088 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
+1 -1
arch/x86/kvm/svm/svm.c
··· 4400 4400 * 4401 4401 * This happens because CPU microcode reading instruction bytes 4402 4402 * uses a special opcode which attempts to read data using CPL=0 4403 - * priviledges. The microcode reads CS:RIP and if it hits a SMAP 4403 + * privileges. The microcode reads CS:RIP and if it hits a SMAP 4404 4404 * fault, it gives up and returns no instruction bytes. 4405 4405 * 4406 4406 * Detection:
+1 -1
arch/x86/kvm/vmx/posted_intr.c
··· 10 10 #include "vmx.h" 11 11 12 12 /* 13 - * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we 13 + * We maintain a per-CPU linked-list of vCPU, so in wakeup_handler() we 14 14 * can find which vCPU should be waken up. 15 15 */ 16 16 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
+3 -3
arch/x86/kvm/vmx/vmx.c
··· 1529 1529 1530 1530 /* 1531 1531 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that 1532 - * utilize encodings marked reserved will casue a #GP fault. 1532 + * utilize encodings marked reserved will cause a #GP fault. 1533 1533 */ 1534 1534 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); 1535 1535 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && ··· 2761 2761 struct vcpu_vmx *vmx = to_vmx(vcpu); 2762 2762 2763 2763 /* 2764 - * Update real mode segment cache. It may be not up-to-date if sement 2764 + * Update real mode segment cache. It may be not up-to-date if segment 2765 2765 * register was written while vcpu was in a guest mode. 2766 2766 */ 2767 2767 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); ··· 7252 7252 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) 7253 7253 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; 7254 7254 7255 - /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */ 7255 + /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */ 7256 7256 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) 7257 7257 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; 7258 7258
+3 -3
arch/x86/kvm/x86.c
··· 156 156 157 157 /* 158 158 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables 159 - * adaptive tuning starting from default advancment of 1000ns. '0' disables 159 + * adaptive tuning starting from default advancement of 1000ns. '0' disables 160 160 * advancement entirely. Any other value is used as-is and disables adaptive 161 - * tuning, i.e. allows priveleged userspace to set an exact advancement time. 161 + * tuning, i.e. allows privileged userspace to set an exact advancement time. 162 162 */ 163 163 static int __read_mostly lapic_timer_advance_ns = -1; 164 164 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); ··· 1373 1373 /* 1374 1374 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 1375 1375 * the nested hypervisor runs with NX huge pages. If it is not, 1376 - * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other 1376 + * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other 1377 1377 * L1 guests, so it need not worry about its own (L2) guests. 1378 1378 */ 1379 1379 data |= ARCH_CAP_PSCHANGE_MC_NO;
+3 -3
arch/x86/lib/insn-eval.c
··· 232 232 * resolve_seg_reg() - obtain segment register index 233 233 * @insn: Instruction with operands 234 234 * @regs: Register values as seen when entering kernel mode 235 - * @regoff: Operand offset, in pt_regs, used to deterimine segment register 235 + * @regoff: Operand offset, in pt_regs, used to determine segment register 236 236 * 237 237 * Determine the segment register associated with the operands and, if 238 238 * applicable, prefixes and the instruction pointed by @insn. ··· 517 517 * @insn: Instruction containing ModRM byte 518 518 * @regs: Register values as seen when entering kernel mode 519 519 * @offs1: Offset of the first operand register 520 - * @offs2: Offset of the second opeand register, if applicable 520 + * @offs2: Offset of the second operand register, if applicable 521 521 * 522 522 * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte 523 523 * in @insn. This function is to be used with 16-bit address encodings. The ··· 576 576 * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- 577 577 * only addressing. This means that no registers are involved in 578 578 * computing the effective address. Thus, ensure that the first 579 - * register offset is invalild. The second register offset is already 579 + * register offset is invalid. The second register offset is already 580 580 * invalid under the aforementioned conditions. 581 581 */ 582 582 if ((X86_MODRM_MOD(insn->modrm.value) == 0) &&
+1 -1
arch/x86/lib/mmx_32.c
··· 14 14 * tested so far for any MMX solution figured. 15 15 * 16 16 * 22/09/2000 - Arjan van de Ven 17 - * Improved for non-egineering-sample Athlons 17 + * Improved for non-engineering-sample Athlons 18 18 * 19 19 */ 20 20 #include <linux/hardirq.h>
+1 -1
arch/x86/mm/fault.c
··· 1523 1523 * 1524 1524 * In case the fault hit a RCU idle region the conditional entry 1525 1525 * code reenabled RCU to avoid subsequent wreckage which helps 1526 - * debugability. 1526 + * debuggability. 1527 1527 */ 1528 1528 state = irqentry_enter(regs); 1529 1529
+2 -2
arch/x86/mm/init.c
··· 29 29 30 30 /* 31 31 * We need to define the tracepoints somewhere, and tlb.c 32 - * is only compied when SMP=y. 32 + * is only compiled when SMP=y. 33 33 */ 34 34 #define CREATE_TRACE_POINTS 35 35 #include <trace/events/tlb.h> ··· 939 939 { 940 940 /* 941 941 * end could be not aligned, and We can not align that, 942 - * decompresser could be confused by aligned initrd_end 942 + * decompressor could be confused by aligned initrd_end 943 943 * We already reserve the end partial page before in 944 944 * - i386_start_kernel() 945 945 * - x86_64_start_kernel()
+3 -3
arch/x86/mm/init_64.c
··· 172 172 173 173 /* 174 174 * With folded p4d, pgd_none() is always false, we need to 175 - * handle synchonization on p4d level. 175 + * handle synchronization on p4d level. 176 176 */ 177 177 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); 178 178 p4d_ref = p4d_offset(pgd_ref, addr); ··· 986 986 if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { 987 987 /* 988 988 * Do not free direct mapping pages since they were 989 - * freed when offlining, or simplely not in use. 989 + * freed when offlining, or simply not in use. 990 990 */ 991 991 if (!direct) 992 992 free_pagetable(pte_page(*pte), 0); ··· 1004 1004 * 1005 1005 * If we are not removing the whole page, it means 1006 1006 * other page structs in this page are being used and 1007 - * we canot remove them. So fill the unused page_structs 1007 + * we cannot remove them. So fill the unused page_structs 1008 1008 * with 0xFD, and remove the page when it is wholly 1009 1009 * filled with 0xFD. 1010 1010 */
+1 -1
arch/x86/mm/kaslr.c
··· 96 96 memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + 97 97 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; 98 98 99 - /* Adapt phyiscal memory region size based on available memory */ 99 + /* Adapt physical memory region size based on available memory */ 100 100 if (memory_tb < kaslr_regions[0].size_tb) 101 101 kaslr_regions[0].size_tb = memory_tb; 102 102
+1 -1
arch/x86/mm/kmmio.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Support for MMIO probes. 3 - * Benfit many code from kprobes 3 + * Benefit many code from kprobes 4 4 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. 5 5 * 2007 Alexander Eichner 6 6 * 2008 Pekka Paalanen <pq@iki.fi>
+1 -1
arch/x86/mm/mem_encrypt_boot.S
··· 27 27 * - stack page (PAGE_SIZE) 28 28 * - encryption routine page (PAGE_SIZE) 29 29 * - intermediate copy buffer (PMD_PAGE_SIZE) 30 - * R8 - physcial address of the pagetables to use for encryption 30 + * R8 - physical address of the pagetables to use for encryption 31 31 */ 32 32 33 33 push %rbp
+1 -1
arch/x86/mm/pat/memtype.c
··· 695 695 696 696 697 697 /** 698 - * lookup_memtype - Looksup the memory type for a physical address 698 + * lookup_memtype - Looks up the memory type for a physical address 699 699 * @paddr: physical address of which memory type needs to be looked up 700 700 * 701 701 * Only to be called when PAT is enabled
+1 -1
arch/x86/mm/pat/set_memory.c
··· 680 680 * end up in this kind of memory, for instance. 681 681 * 682 682 * This could be optimized, but it is only intended to be 683 - * used at inititalization time, and keeping it 683 + * used at initialization time, and keeping it 684 684 * unoptimized should increase the testing coverage for 685 685 * the more obscure platforms. 686 686 */
+2 -2
arch/x86/mm/pti.c
··· 361 361 * global, so set it as global in both copies. Note: 362 362 * the X86_FEATURE_PGE check is not _required_ because 363 363 * the CPU ignores _PAGE_GLOBAL when PGE is not 364 - * supported. The check keeps consistentency with 364 + * supported. The check keeps consistency with 365 365 * code that only set this bit when supported. 366 366 */ 367 367 if (boot_cpu_has(X86_FEATURE_PGE)) ··· 512 512 static inline bool pti_kernel_image_global_ok(void) 513 513 { 514 514 /* 515 - * Systems with PCIDs get litlle benefit from global 515 + * Systems with PCIDs get little benefit from global 516 516 * kernel text and are not worth the downsides. 517 517 */ 518 518 if (cpu_feature_enabled(X86_FEATURE_PCID))
+3 -3
arch/x86/mm/tlb.c
··· 106 106 107 107 #ifdef CONFIG_PAGE_TABLE_ISOLATION 108 108 /* 109 - * Make sure that the dynamic ASID space does not confict with the 109 + * Make sure that the dynamic ASID space does not conflict with the 110 110 * bit we are using to switch between user and kernel ASIDs. 111 111 */ 112 112 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); ··· 736 736 * 3, we'd be break the invariant: we'd update local_tlb_gen above 737 737 * 1 without the full flush that's needed for tlb_gen 2. 738 738 * 739 - * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation. 739 + * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimization. 740 740 * Partial TLB flushes are not all that much cheaper than full TLB 741 741 * flushes, so it seems unlikely that it would be a performance win 742 742 * to do a partial flush if that won't bring our TLB fully up to ··· 876 876 static inline void put_flush_tlb_info(void) 877 877 { 878 878 #ifdef CONFIG_DEBUG_VM 879 - /* Complete reentrency prevention checks */ 879 + /* Complete reentrancy prevention checks */ 880 880 barrier(); 881 881 this_cpu_dec(flush_tlb_info_idx); 882 882 #endif
+2 -2
arch/x86/net/bpf_jit_comp.c
··· 1556 1556 if (is_imm8(jmp_offset)) { 1557 1557 if (jmp_padding) { 1558 1558 /* To keep the jmp_offset valid, the extra bytes are 1559 - * padded before the jump insn, so we substract the 1559 + * padded before the jump insn, so we subtract the 1560 1560 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1561 1561 * 1562 1562 * If the previous pass already emits an imm8 ··· 1631 1631 if (jmp_padding) { 1632 1632 /* To avoid breaking jmp_offset, the extra bytes 1633 1633 * are padded before the actual jmp insn, so 1634 - * 2 bytes is substracted from INSN_SZ_DIFF. 1634 + * 2 bytes is subtracted from INSN_SZ_DIFF. 1635 1635 * 1636 1636 * If the previous pass already emits an imm8 1637 1637 * jmp, there is nothing to pad (0 byte).
+1 -1
arch/x86/pci/fixup.c
··· 375 375 * The BIOS only gives options "DISABLED" and "AUTO". This code sets 376 376 * the corresponding register-value to enable the soundcard. 377 377 * 378 - * The soundcard is only enabled, if the mainborad is identified 378 + * The soundcard is only enabled, if the mainboard is identified 379 379 * via DMI-tables and the soundcard is detected to be off. 380 380 */ 381 381 static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
+2 -2
arch/x86/platform/efi/efi_64.c
··· 195 195 } 196 196 197 197 /* 198 - * Certain firmware versions are way too sentimential and still believe 198 + * Certain firmware versions are way too sentimental and still believe 199 199 * they are exclusive and unquestionable owners of the first physical page, 200 200 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY 201 201 * (but then write-access it later during SetVirtualAddressMap()). ··· 457 457 * in a kernel thread and user context. Preemption needs to remain disabled 458 458 * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm 459 459 * can not change under us. 460 - * It should be ensured that there are no concurent calls to this function. 460 + * It should be ensured that there are no concurrent calls to this function. 461 461 */ 462 462 void efi_enter_mm(void) 463 463 {
+1 -1
arch/x86/platform/efi/quirks.c
··· 726 726 * Buggy efi_reset_system() is handled differently from other EFI 727 727 * Runtime Services as it doesn't use efi_rts_wq. Although, 728 728 * native_machine_emergency_restart() says that machine_real_restart() 729 - * could fail, it's better not to compilcate this fault handler 729 + * could fail, it's better not to complicate this fault handler 730 730 * because this case occurs *very* rarely and hence could be improved 731 731 * on a need by basis. 732 732 */
+1 -1
arch/x86/platform/intel-quark/imr.c
··· 551 551 552 552 /* 553 553 * Setup an unlocked IMR around the physical extent of the kernel 554 - * from the beginning of the .text secton to the end of the 554 + * from the beginning of the .text section to the end of the 555 555 * .rodata section as one physically contiguous block. 556 556 * 557 557 * We don't round up @size since it is already PAGE_SIZE aligned.
+2 -2
arch/x86/platform/intel/iosf_mbi.c
··· 187 187 EXPORT_SYMBOL(iosf_mbi_available); 188 188 189 189 /* 190 - **************** P-Unit/kernel shared I2C bus arbritration **************** 190 + **************** P-Unit/kernel shared I2C bus arbitration **************** 191 191 * 192 192 * Some Bay Trail and Cherry Trail devices have the P-Unit and us (the kernel) 193 193 * share a single I2C bus to the PMIC. Below are helpers to arbitrate the ··· 493 493 /* mcrx */ 494 494 debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx); 495 495 496 - /* mcr - initiates mailbox tranaction */ 496 + /* mcr - initiates mailbox transaction */ 497 497 debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops); 498 498 } 499 499
+1 -1
arch/x86/platform/uv/uv_nmi.c
··· 889 889 * Call KGDB/KDB from NMI handler 890 890 * 891 891 * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or 892 - * 'kdb' has no affect on which is used. See the KGDB documention for further 892 + * 'kdb' has no affect on which is used. See the KGDB documentation for further 893 893 * information. 894 894 */ 895 895 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)