Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: fix typos in comments

Various spelling mistakes in comments.
Detected with the help of Coccinelle.

Signed-off-by: Julia Lawall <Julia.Lawall@inria.fr>
Reviewed-by: Joel Stanley <joel@jms.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220430185654.5855-1-Julia.Lawall@inria.fr

authored by

Julia Lawall and committed by
Michael Ellerman
1fd02f66 c14d31ba

+104 -104
+1 -1
arch/powerpc/boot/cuboot-hotfoot.c
··· 70 70 71 71 printf("Fixing devtree for 4M Flash\n"); 72 72 73 - /* First fix up the base addresse */ 73 + /* First fix up the base address */ 74 74 getprop(devp, "reg", regs, sizeof(regs)); 75 75 regs[0] = 0; 76 76 regs[1] = 0xffc00000;
+1 -1
arch/powerpc/crypto/aes-spe-glue.c
··· 404 404 405 405 /* 406 406 * Algorithm definitions. Disabling alignment (cra_alignmask=0) was chosen 407 - * because the e500 platform can handle unaligned reads/writes very efficently. 407 + * because the e500 platform can handle unaligned reads/writes very efficiently. 408 408 * This improves IPsec thoughput by another few percent. Additionally we assume 409 409 * that AES context is always aligned to at least 8 bytes because it is created 410 410 * with kmalloc() in the crypto infrastructure
+1 -1
arch/powerpc/kernel/cputable.c
··· 2025 2025 * oprofile_cpu_type already has a value, then we are 2026 2026 * possibly overriding a real PVR with a logical one, 2027 2027 * and, in that case, keep the current value for 2028 - * oprofile_cpu_type. Futhermore, let's ensure that the 2028 + * oprofile_cpu_type. Furthermore, let's ensure that the 2029 2029 * fix for the PMAO bug is enabled on compatibility mode. 2030 2030 */ 2031 2031 if (old.oprofile_cpu_type != NULL) {
+1 -1
arch/powerpc/kernel/dawr.c
··· 27 27 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) >> 3; 28 28 /* 29 29 * DAWR length is stored in field MDR bits 48:53. Matches range in 30 - * doublewords (64 bits) baised by -1 eg. 0b000000=1DW and 30 + * doublewords (64 bits) biased by -1 eg. 0b000000=1DW and 31 31 * 0b111111=64DW. 32 32 * brk->hw_len is in bytes. 33 33 * This aligns up to double word size, shifts and does the bias.
+2 -2
arch/powerpc/kernel/eeh.c
··· 1329 1329 1330 1330 /* 1331 1331 * EEH functionality could possibly be disabled, just 1332 - * return error for the case. And the EEH functinality 1332 + * return error for the case. And the EEH functionality 1333 1333 * isn't expected to be disabled on one specific PE. 1334 1334 */ 1335 1335 switch (option) { ··· 1804 1804 * PE freeze. Using the in_8() accessor skips the eeh detection hook 1805 1805 * so the freeze hook so the EEH Detection machinery won't be 1806 1806 * triggered here. This is to match the usual behaviour of EEH 1807 - * where the HW will asyncronously freeze a PE and it's up to 1807 + * where the HW will asynchronously freeze a PE and it's up to 1808 1808 * the kernel to notice and deal with it. 1809 1809 * 1810 1810 * 3. Turn Memory space back on. This is more important for VFs
+1 -1
arch/powerpc/kernel/eeh_event.c
··· 143 143 int eeh_send_failure_event(struct eeh_pe *pe) 144 144 { 145 145 /* 146 - * If we've manually supressed recovery events via debugfs 146 + * If we've manually suppressed recovery events via debugfs 147 147 * then just drop it on the floor. 148 148 */ 149 149 if (eeh_debugfs_no_recover) {
+2 -2
arch/powerpc/kernel/fadump.c
··· 1671 1671 } 1672 1672 /* 1673 1673 * Use subsys_initcall_sync() here because there is dependency with 1674 - * crash_save_vmcoreinfo_init(), which mush run first to ensure vmcoreinfo initialization 1675 - * is done before regisering with f/w. 1674 + * crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization 1675 + * is done before registering with f/w. 1676 1676 */ 1677 1677 subsys_initcall_sync(setup_fadump); 1678 1678 #else /* !CONFIG_PRESERVE_FA_DUMP */
+1 -1
arch/powerpc/kernel/module_32.c
··· 99 99 100 100 /* Sort the relocation information based on a symbol and 101 101 * addend key. This is a stable O(n*log n) complexity 102 - * alogrithm but it will reduce the complexity of 102 + * algorithm but it will reduce the complexity of 103 103 * count_relocs() to linear complexity O(n) 104 104 */ 105 105 sort((void *)hdr + sechdrs[i].sh_offset,
+2 -2
arch/powerpc/kernel/module_64.c
··· 194 194 195 195 /* Sort the relocation information based on a symbol and 196 196 * addend key. This is a stable O(n*log n) complexity 197 - * alogrithm but it will reduce the complexity of 197 + * algorithm but it will reduce the complexity of 198 198 * count_relocs() to linear complexity O(n) 199 199 */ 200 200 sort((void *)sechdrs[i].sh_addr, ··· 361 361 entry->jump[1] |= PPC_HA(reladdr); 362 362 entry->jump[2] |= PPC_LO(reladdr); 363 363 364 - /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */ 364 + /* Even though we don't use funcdata in the stub, it's needed elsewhere. */ 365 365 entry->funcdata = func_desc(addr); 366 366 entry->magic = STUB_MAGIC; 367 367
+1 -1
arch/powerpc/kernel/pci-common.c
··· 1688 1688 static void fixup_hide_host_resource_fsl(struct pci_dev *dev) 1689 1689 { 1690 1690 int i, class = dev->class >> 8; 1691 - /* When configured as agent, programing interface = 1 */ 1691 + /* When configured as agent, programming interface = 1 */ 1692 1692 int prog_if = dev->class & 0xf; 1693 1693 1694 1694 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
+1 -1
arch/powerpc/kernel/pci_of_scan.c
··· 244 244 * @dev: pci_dev structure for the bridge 245 245 * 246 246 * of_scan_bus() calls this routine for each PCI bridge that it finds, and 247 - * this routine in turn call of_scan_bus() recusively to scan for more child 247 + * this routine in turn call of_scan_bus() recursively to scan for more child 248 248 * devices. 249 249 */ 250 250 void of_scan_pci_bridge(struct pci_dev *dev)
+2 -2
arch/powerpc/kernel/process.c
··· 305 305 unsigned long msr = tsk->thread.regs->msr; 306 306 307 307 /* 308 - * We should never be ssetting MSR_VSX without also setting 308 + * We should never be setting MSR_VSX without also setting 309 309 * MSR_FP and MSR_VEC 310 310 */ 311 311 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC))); ··· 643 643 return; 644 644 } 645 645 646 - /* Otherwise findout which DAWR caused exception and disable it. */ 646 + /* Otherwise find out which DAWR caused exception and disable it. */ 647 647 wp_get_instr_detail(regs, &instr, &type, &size, &ea); 648 648 649 649 for (i = 0; i < nr_wp_slots(); i++) {
+1 -1
arch/powerpc/kernel/prom_init.c
··· 3416 3416 * 3417 3417 * PowerMacs use a different mechanism to spin CPUs 3418 3418 * 3419 - * (This must be done after instanciating RTAS) 3419 + * (This must be done after instantiating RTAS) 3420 3420 */ 3421 3421 if (of_platform != PLATFORM_POWERMAC) 3422 3422 prom_hold_cpus();
+1 -1
arch/powerpc/kernel/ptrace/ptrace-view.c
··· 174 174 175 175 /* 176 176 * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is 177 - * no more used as a flag, lets force usr to alway see the softe value as 1 177 + * no more used as a flag, lets force usr to always see the softe value as 1 178 178 * which means interrupts are not soft disabled. 179 179 */ 180 180 if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
+1 -1
arch/powerpc/kernel/rtas_flash.c
··· 120 120 /* 121 121 * Local copy of the flash block list. 122 122 * 123 - * The rtas_firmware_flash_list varable will be 123 + * The rtas_firmware_flash_list variable will be 124 124 * set once the data is fully read. 125 125 * 126 126 * For convenience as we build the list we use virtual addrs,
+1 -1
arch/powerpc/kernel/setup-common.c
··· 279 279 proc_freq / 1000000, proc_freq % 1000000); 280 280 281 281 /* If we are a Freescale core do a simple check so 282 - * we dont have to keep adding cases in the future */ 282 + * we don't have to keep adding cases in the future */ 283 283 if (PVR_VER(pvr) & 0x8000) { 284 284 switch (PVR_VER(pvr)) { 285 285 case 0x8000: /* 7441/7450/7451, Voyager */
+1 -1
arch/powerpc/kernel/signal_64.c
··· 123 123 #endif 124 124 struct pt_regs *regs = tsk->thread.regs; 125 125 unsigned long msr = regs->msr; 126 - /* Force usr to alway see softe as 1 (interrupts enabled) */ 126 + /* Force usr to always see softe as 1 (interrupts enabled) */ 127 127 unsigned long softe = 0x1; 128 128 129 129 BUG_ON(tsk != current);
+1 -1
arch/powerpc/kernel/smp.c
··· 1102 1102 DBG("smp_prepare_cpus\n"); 1103 1103 1104 1104 /* 1105 - * setup_cpu may need to be called on the boot cpu. We havent 1105 + * setup_cpu may need to be called on the boot cpu. We haven't 1106 1106 * spun any cpus up but lets be paranoid. 1107 1107 */ 1108 1108 BUG_ON(boot_cpuid != smp_processor_id());
+2 -2
arch/powerpc/kernel/time.c
··· 828 828 static int first = 1; 829 829 830 830 ts->tv_nsec = 0; 831 - /* XXX this is a litle fragile but will work okay in the short term */ 831 + /* XXX this is a little fragile but will work okay in the short term */ 832 832 if (first) { 833 833 first = 0; 834 834 if (ppc_md.time_init) ··· 973 973 */ 974 974 start_cpu_decrementer(); 975 975 976 - /* FIME: Should make unrelatred change to move snapshot_timebase 976 + /* FIME: Should make unrelated change to move snapshot_timebase 977 977 * call here ! */ 978 978 register_decrementer_clockevent(smp_processor_id()); 979 979 }
+1 -1
arch/powerpc/kernel/watchdog.c
··· 56 56 * solved by also having a SMP watchdog where all CPUs check all other 57 57 * CPUs heartbeat. 58 58 * 59 - * The SMP checker can detect lockups on other CPUs. A gobal "pending" 59 + * The SMP checker can detect lockups on other CPUs. A global "pending" 60 60 * cpumask is kept, containing all CPUs which enable the watchdog. Each 61 61 * CPU clears their pending bit in their heartbeat timer. When the bitmask 62 62 * becomes empty, the last CPU to clear its pending bit updates a global
+1 -1
arch/powerpc/kexec/core_64.c
··· 406 406 if (!node) 407 407 return -ENODEV; 408 408 409 - /* remove any stale propertys so ours can be found */ 409 + /* remove any stale properties so ours can be found */ 410 410 of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL)); 411 411 of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL)); 412 412
+1 -1
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 58 58 /* Possible values and their usage: 59 59 * <0 an error occurred during allocation, 60 60 * -EBUSY allocation is in the progress, 61 - * 0 allocation made successfuly. 61 + * 0 allocation made successfully. 62 62 */ 63 63 int error; 64 64
+1 -1
arch/powerpc/kvm/book3s_64_vio_hv.c
··· 453 453 * we are doing this on secondary cpus and current task there 454 454 * is not the hypervisor. Also this is safe against THP in the 455 455 * host, because an IPI to primary thread will wait for the secondary 456 - * to exit which will agains result in the below page table walk 456 + * to exit which will again result in the below page table walk 457 457 * to finish. 458 458 */ 459 459 /* an rmap lock won't make it safe. because that just ensure hash
+1 -1
arch/powerpc/kvm/book3s_emulate.c
··· 268 268 269 269 /* 270 270 * add rules to fit in ISA specification regarding TM 271 - * state transistion in TM disable/Suspended state, 271 + * state transition in TM disable/Suspended state, 272 272 * and target TM state is TM inactive(00) state. (the 273 273 * change should be suppressed). 274 274 */
+1 -1
arch/powerpc/kvm/book3s_hv_p9_entry.c
··· 379 379 { 380 380 /* 381 381 * current->thread.xxx registers must all be restored to host 382 - * values before a potential context switch, othrewise the context 382 + * values before a potential context switch, otherwise the context 383 383 * switch itself will overwrite current->thread.xxx with the values 384 384 * from the guest SPRs. 385 385 */
+1 -1
arch/powerpc/kvm/book3s_hv_uvmem.c
··· 120 120 * content is un-encrypted. 121 121 * 122 122 * (c) Normal - The GFN is a normal. The GFN is associated with 123 - * a normal VM. The contents of the GFN is accesible to 123 + * a normal VM. The contents of the GFN is accessible to 124 124 * the Hypervisor. Its content is never encrypted. 125 125 * 126 126 * States of a VM.
+1 -1
arch/powerpc/kvm/book3s_pr.c
··· 1287 1287 1288 1288 /* Get last sc for papr */ 1289 1289 if (vcpu->arch.papr_enabled) { 1290 - /* The sc instuction points SRR0 to the next inst */ 1290 + /* The sc instruction points SRR0 to the next inst */ 1291 1291 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); 1292 1292 if (emul != EMULATE_DONE) { 1293 1293 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
+1 -1
arch/powerpc/kvm/book3s_xics.c
··· 462 462 * new guy. We cannot assume that the rejected interrupt is less 463 463 * favored than the new one, and thus doesn't need to be delivered, 464 464 * because by the time we exit icp_try_to_deliver() the target 465 - * processor may well have alrady consumed & completed it, and thus 465 + * processor may well have already consumed & completed it, and thus 466 466 * the rejected interrupt might actually be already acceptable. 467 467 */ 468 468 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
+3 -3
arch/powerpc/kvm/book3s_xive.c
··· 124 124 * interrupt might have fired and be on its way to the 125 125 * host queue while we mask it, and if we unmask it 126 126 * early enough (re-cede right away), there is a 127 - * theorical possibility that it fires again, thus 127 + * theoretical possibility that it fires again, thus 128 128 * landing in the target queue more than once which is 129 129 * a big no-no. 130 130 * ··· 622 622 623 623 /* 624 624 * Targetting rules: In order to avoid losing track of 625 - * pending interrupts accross mask and unmask, which would 625 + * pending interrupts across mask and unmask, which would 626 626 * allow queue overflows, we implement the following rules: 627 627 * 628 628 * - Unless it was never enabled (or we run out of capacity) ··· 1073 1073 /* 1074 1074 * If old_p is set, the interrupt is pending, we switch it to 1075 1075 * PQ=11. This will force a resend in the host so the interrupt 1076 - * isn't lost to whatver host driver may pick it up 1076 + * isn't lost to whatever host driver may pick it up 1077 1077 */ 1078 1078 if (state->old_p) 1079 1079 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
+1 -1
arch/powerpc/kvm/e500mc.c
··· 309 309 BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0); 310 310 vcpu_e500 = to_e500(vcpu); 311 311 312 - /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */ 312 + /* Invalid PIR value -- this LPID doesn't have valid state on any cpu */ 313 313 vcpu->arch.oldpir = 0xffffffff; 314 314 315 315 err = kvmppc_e500_tlb_init(vcpu_e500);
+1 -1
arch/powerpc/mm/book3s64/hash_pgtable.c
··· 377 377 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) 378 378 return 0; 379 379 /* 380 - * We need to make sure that we support 16MB hugepage in a segement 380 + * We need to make sure that we support 16MB hugepage in a segment 381 381 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE 382 382 * of 64K. 383 383 */
+2 -2
arch/powerpc/mm/book3s64/hash_utils.c
··· 1338 1338 spp >>= 30 - 2 * ((ea >> 12) & 0xf); 1339 1339 1340 1340 /* 1341 - * 0 -> full premission 1341 + * 0 -> full permission 1342 1342 * 1 -> Read only 1343 1343 * 2 -> no access. 1344 1344 * We return the flag that need to be cleared. ··· 1659 1659 1660 1660 err = hash_page_mm(mm, ea, access, TRAP(regs), flags); 1661 1661 if (unlikely(err < 0)) { 1662 - // failed to instert a hash PTE due to an hypervisor error 1662 + // failed to insert a hash PTE due to an hypervisor error 1663 1663 if (user_mode(regs)) { 1664 1664 if (IS_ENABLED(CONFIG_PPC_SUBPAGE_PROT) && err == -2) 1665 1665 _exception(SIGSEGV, regs, SEGV_ACCERR, ea);
+1 -1
arch/powerpc/mm/book3s64/pgtable.c
··· 331 331 spin_lock(&mm->page_table_lock); 332 332 /* 333 333 * If we find pgtable_page set, we return 334 - * the allocated page with single fragement 334 + * the allocated page with single fragment 335 335 * count. 336 336 */ 337 337 if (likely(!mm->context.pmd_frag)) {
+1 -1
arch/powerpc/mm/book3s64/radix_pgtable.c
··· 359 359 if (!cpu_has_feature(CPU_FTR_HVMODE) && 360 360 cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { 361 361 /* 362 - * Older versions of KVM on these machines perfer if the 362 + * Older versions of KVM on these machines prefer if the 363 363 * guest only uses the low 19 PID bits. 364 364 */ 365 365 mmu_pid_bits = 19;
+1 -1
arch/powerpc/mm/book3s64/radix_tlb.c
··· 397 397 398 398 /* 399 399 * Workaround the fact that the "ric" argument to __tlbie_pid 400 - * must be a compile-time contraint to match the "i" constraint 400 + * must be a compile-time constraint to match the "i" constraint 401 401 * in the asm statement. 402 402 */ 403 403 switch (ric) {
+2 -2
arch/powerpc/mm/book3s64/slb.c
··· 347 347 /* 348 348 * We have no good place to clear the slb preload cache on exec, 349 349 * flush_thread is about the earliest arch hook but that happens 350 - * after we switch to the mm and have aleady preloaded the SLBEs. 350 + * after we switch to the mm and have already preloaded the SLBEs. 351 351 * 352 352 * For the most part that's probably okay to use entries from the 353 353 * previous exec, they will age out if unused. It may turn out to ··· 615 615 } else { 616 616 /* 617 617 * Our cache is full and the current cache content strictly 618 - * doesn't indicate the active SLB conents. Bump the ptr 618 + * doesn't indicate the active SLB contents. Bump the ptr 619 619 * so that switch_slb() will ignore the cache. 620 620 */ 621 621 local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
+2 -2
arch/powerpc/mm/init_64.c
··· 111 111 } 112 112 113 113 /* 114 - * vmemmap virtual address space management does not have a traditonal page 114 + * vmemmap virtual address space management does not have a traditional page 115 115 * table to track which virtual struct pages are backed by physical mapping. 116 116 * The virtual to physical mappings are tracked in a simple linked list 117 117 * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at ··· 128 128 129 129 /* 130 130 * The same pointer 'next' tracks individual chunks inside the allocated 131 - * full page during the boot time and again tracks the freeed nodes during 131 + * full page during the boot time and again tracks the freed nodes during 132 132 * runtime. It is racy but it does not happen as they are separated by the 133 133 * boot process. Will create problem if some how we have memory hotplug 134 134 * operation during boot !!
+1 -1
arch/powerpc/mm/nohash/book3e_hugetlbpage.c
··· 142 142 tsize = shift - 10; 143 143 /* 144 144 * We can't be interrupted while we're setting up the MAS 145 - * regusters or after we've confirmed that no tlb exists. 145 + * registers or after we've confirmed that no tlb exists. 146 146 */ 147 147 local_irq_save(flags); 148 148
+1 -1
arch/powerpc/mm/nohash/kaslr_booke.c
··· 315 315 ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true); 316 316 linear_sz = min_t(unsigned long, ram, SZ_512M); 317 317 318 - /* If the linear size is smaller than 64M, do not randmize */ 318 + /* If the linear size is smaller than 64M, do not randomize */ 319 319 if (linear_sz < SZ_64M) 320 320 return 0; 321 321
+1 -1
arch/powerpc/mm/pgtable-frag.c
··· 83 83 spin_lock(&mm->page_table_lock); 84 84 /* 85 85 * If we find pgtable_page set, we return 86 - * the allocated page with single fragement 86 + * the allocated page with single fragment 87 87 * count. 88 88 */ 89 89 if (likely(!pte_frag_get(&mm->context))) {
+1 -1
arch/powerpc/perf/8xx-pmu.c
··· 157 157 158 158 mpc8xx_pmu_read(event); 159 159 160 - /* If it was the last user, stop counting to avoid useles overhead */ 160 + /* If it was the last user, stop counting to avoid useless overhead */ 161 161 switch (event_type(event)) { 162 162 case PERF_8xx_ID_CPU_CYCLES: 163 163 break;
+3 -3
arch/powerpc/perf/core-book3s.c
··· 1142 1142 /* 1143 1143 * POWER7 can roll back counter values, if the new value is smaller 1144 1144 * than the previous value it will cause the delta and the counter to 1145 - * have bogus values unless we rolled a counter over. If a coutner is 1145 + * have bogus values unless we rolled a counter over. If a counter is 1146 1146 * rolled back, it will be smaller, but within 256, which is the maximum 1147 1147 * number of events to rollback at once. If we detect a rollback 1148 1148 * return 0. This can lead to a small lack of precision in the ··· 2057 2057 /* 2058 2058 * PMU config registers have fields that are 2059 2059 * reserved and some specific values for bit fields are reserved. 2060 - * For ex., MMCRA[61:62] is Randome Sampling Mode (SM) 2060 + * For ex., MMCRA[61:62] is Random Sampling Mode (SM) 2061 2061 * and value of 0b11 to this field is reserved. 2062 2062 * Check for invalid values in attr.config. 2063 2063 */ ··· 2447 2447 } 2448 2448 2449 2449 /* 2450 - * During system wide profling or while specific CPU is monitored for an 2450 + * During system wide profiling or while specific CPU is monitored for an 2451 2451 * event, some corner cases could cause PMC to overflow in idle path. This 2452 2452 * will trigger a PMI after waking up from idle. Since counter values are _not_ 2453 2453 * saved/restored in idle path, can lead to below "Can't find PMC" message.
+2 -2
arch/powerpc/perf/imc-pmu.c
··· 521 521 522 522 /* 523 523 * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). 524 - * Get the base memory addresss for this cpu. 524 + * Get the base memory address for this cpu. 525 525 */ 526 526 chip_id = cpu_to_chip_id(event->cpu); 527 527 ··· 674 674 /* 675 675 * Check whether core_imc is registered. We could end up here 676 676 * if the cpuhotplug callback registration fails. i.e, callback 677 - * invokes the offline path for all sucessfully registered cpus. 677 + * invokes the offline path for all successfully registered cpus. 678 678 * At this stage, core_imc pmu will not be registered and we 679 679 * should return here. 680 680 *
+3 -3
arch/powerpc/perf/isa207-common.c
··· 82 82 static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) 83 83 { 84 84 /* 85 - * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in 86 - * continous sampling mode. 85 + * MMCRA[SDAR_MODE] specifies how the SDAR should be updated in 86 + * continuous sampling mode. 87 87 * 88 88 * Incase of Power8: 89 - * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling 89 + * MMCRA[SDAR_MODE] will be programmed as "0b01" for continuous sampling 90 90 * mode and will be un-changed when setting MMCRA[63] (Marked events). 91 91 * 92 92 * Incase of Power9/power10:
+1 -1
arch/powerpc/platforms/512x/clock-commonclk.c
··· 663 663 * the PSC/MSCAN/SPDIF (serial drivers et al) need the MCLK 664 664 * for their bitrate 665 665 * - in the absence of "aliases" for clocks we need to create 666 - * individial 'struct clk' items for whatever might get 666 + * individual 'struct clk' items for whatever might get 667 667 * referenced or looked up, even if several of those items are 668 668 * identical from the logical POV (their rate value) 669 669 * - for easier future maintenance and for better reflection of
+1 -1
arch/powerpc/platforms/512x/mpc512x_shared.c
··· 289 289 290 290 /* 291 291 * We do not allocate and configure new area for bitmap buffer 292 - * because it would requere copying bitmap data (splash image) 292 + * because it would require copying bitmap data (splash image) 293 293 * and so negatively affect boot time. Instead we reserve the 294 294 * already configured frame buffer area so that it won't be 295 295 * destroyed. The starting address of the area to reserve and
+1 -1
arch/powerpc/platforms/52xx/mpc52xx_common.c
··· 308 308 309 309 spin_lock_irqsave(&gpio_lock, flags); 310 310 311 - /* Reconfiure pin-muxing to gpio */ 311 + /* Reconfigure pin-muxing to gpio */ 312 312 mux = in_be32(&simple_gpio->port_config); 313 313 out_be32(&simple_gpio->port_config, mux & (~gpio)); 314 314
+1 -1
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
··· 398 398 set |= MPC52xx_GPT_MODE_CONTINUOUS; 399 399 400 400 /* Determine the number of clocks in the requested period. 64 bit 401 - * arithmatic is done here to preserve the precision until the value 401 + * arithmetic is done here to preserve the precision until the value 402 402 * is scaled back down into the u32 range. Period is in 'ns', bus 403 403 * frequency is in Hz. */ 404 404 clocks = period * (u64)gpt->ipb_freq;
+1 -1
arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
··· 104 104 * 105 105 * Configure the watermarks so DMA will always complete correctly. 106 106 * It may be worth experimenting with the ALARM value to see if 107 - * there is a performance impacit. However, if it is wrong there 107 + * there is a performance impact. However, if it is wrong there 108 108 * is a risk of DMA not transferring the last chunk of data 109 109 */ 110 110 if (write) {
+1 -1
arch/powerpc/platforms/85xx/mpc85xx_cds.c
··· 151 151 */ 152 152 case PCI_DEVICE_ID_VIA_82C586_2: 153 153 /* There are two USB controllers. 154 - * Identify them by functon number 154 + * Identify them by function number 155 155 */ 156 156 if (PCI_FUNC(dev->devfn) == 3) 157 157 dev->irq = 11;
+1 -1
arch/powerpc/platforms/86xx/gef_ppc9a.c
··· 180 180 * 181 181 * This function is called to determine whether the BSP is compatible with the 182 182 * supplied device-tree, which is assumed to be the correct one for the actual 183 - * board. It is expected thati, in the future, a kernel may support multiple 183 + * board. It is expected that, in the future, a kernel may support multiple 184 184 * boards. 185 185 */ 186 186 static int __init gef_ppc9a_probe(void)
+1 -1
arch/powerpc/platforms/86xx/gef_sbc310.c
··· 167 167 * 168 168 * This function is called to determine whether the BSP is compatible with the 169 169 * supplied device-tree, which is assumed to be the correct one for the actual 170 - * board. It is expected thati, in the future, a kernel may support multiple 170 + * board. It is expected that, in the future, a kernel may support multiple 171 171 * boards. 172 172 */ 173 173 static int __init gef_sbc310_probe(void)
+1 -1
arch/powerpc/platforms/86xx/gef_sbc610.c
··· 157 157 * 158 158 * This function is called to determine whether the BSP is compatible with the 159 159 * supplied device-tree, which is assumed to be the correct one for the actual 160 - * board. It is expected thati, in the future, a kernel may support multiple 160 + * board. It is expected that, in the future, a kernel may support multiple 161 161 * boards. 162 162 */ 163 163 static int __init gef_sbc610_probe(void)
+1 -1
arch/powerpc/platforms/book3s/vas-api.c
··· 30 30 * 31 31 * where "vas_copy" and "vas_paste" are defined in copy-paste.h. 32 32 * copy/paste returns to the user space directly. So refer NX hardware 33 - * documententation for exact copy/paste usage and completion / error 33 + * documentation for exact copy/paste usage and completion / error 34 34 * conditions. 35 35 */ 36 36
+1 -1
arch/powerpc/platforms/cell/cbe_regs.c
··· 23 23 * Current implementation uses "cpu" nodes. We build our own mapping 24 24 * array of cpu numbers to cpu nodes locally for now to allow interrupt 25 25 * time code to have a fast path rather than call of_get_cpu_node(). If 26 - * we implement cpu hotplug, we'll have to install an appropriate norifier 26 + * we implement cpu hotplug, we'll have to install an appropriate notifier 27 27 * in order to release references to the cpu going away 28 28 */ 29 29 static struct cbe_regs_map
+1 -1
arch/powerpc/platforms/cell/iommu.c
··· 582 582 { 583 583 struct device *dev = data; 584 584 585 - /* We are only intereted in device addition */ 585 + /* We are only interested in device addition */ 586 586 if (action != BUS_NOTIFY_ADD_DEVICE) 587 587 return 0; 588 588
+1 -1
arch/powerpc/platforms/cell/spider-pci.c
··· 81 81 /* 82 82 * On CellBlade, we can't know that which XDR memory is used by 83 83 * kmalloc() to allocate dummy_page_va. 84 - * In order to imporve the performance, the XDR which is used to 84 + * In order to improve the performance, the XDR which is used to 85 85 * allocate dummy_page_va is the nearest the spider-pci. 86 86 * We have to select the CBE which is the nearest the spider-pci 87 87 * to allocate memory from the best XDR, but I don't know that
+1 -1
arch/powerpc/platforms/cell/spu_manage.c
··· 457 457 458 458 /* 459 459 * Walk through each phandle in vicinity property of the spu 460 - * (tipically two vicinity phandles per spe node) 460 + * (typically two vicinity phandles per spe node) 461 461 */ 462 462 for (i = 0; i < (lenp / sizeof(phandle)); i++) { 463 463 if (vic_handles[i] == avoid_ph)
+1 -1
arch/powerpc/platforms/powermac/low_i2c.c
··· 1472 1472 smu_i2c_probe(); 1473 1473 #endif 1474 1474 1475 - /* Now add plaform functions for some known devices */ 1475 + /* Now add platform functions for some known devices */ 1476 1476 pmac_i2c_devscan(pmac_i2c_dev_create); 1477 1477 1478 1478 return 0;
+5 -5
arch/powerpc/platforms/powernv/eeh-powernv.c
··· 390 390 * should be blocked until PE reset. MMIO access is dropped 391 391 * by hardware certainly. In order to drop PCI config requests, 392 392 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which 393 - * will be checked in the backend for PE state retrival. If 393 + * will be checked in the backend for PE state retrieval. If 394 394 * the PE becomes frozen for the first time and the flag has 395 395 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for 396 396 * that PE to block its config space. ··· 981 981 case EEH_RESET_FUNDAMENTAL: 982 982 /* 983 983 * Wait for Transaction Pending bit to clear. A word-aligned 984 - * test is used, so we use the conrol offset rather than status 984 + * test is used, so we use the control offset rather than status 985 985 * and shift the test bit to match. 986 986 */ 987 987 pnv_eeh_wait_for_pending(pdn, "AF", ··· 1048 1048 * frozen state during PE reset. However, the good idea here from 1049 1049 * benh is to keep frozen state before we get PE reset done completely 1050 1050 * (until BAR restore). With the frozen state, HW drops illegal IO 1051 - * or MMIO access, which can incur recrusive frozen PE during PE 1051 + * or MMIO access, which can incur recursive frozen PE during PE 1052 1052 * reset. The side effect is that EEH core has to clear the frozen 1053 1053 * state explicitly after BAR restore. 1054 1054 */ ··· 1095 1095 * bus is behind a hotplug slot and it will use the slot provided 1096 1096 * reset methods to prevent spurious hotplug events during the reset. 1097 1097 * 1098 - * Fundemental resets need to be handled internally to EEH since the 1099 - * PCI core doesn't really have a concept of a fundemental reset, 1098 + * Fundamental resets need to be handled internally to EEH since the 1099 + * PCI core doesn't really have a concept of a fundamental reset, 1100 1100 * mainly because there's no standard way to generate one. Only a 1101 1101 * few devices require an FRESET so it should be fine. 1102 1102 */
+2 -2
arch/powerpc/platforms/powernv/idle.c
··· 112 112 if (rc != 0) 113 113 return rc; 114 114 115 - /* Only p8 needs to set extra HID regiters */ 115 + /* Only p8 needs to set extra HID registers */ 116 116 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 117 117 uint64_t hid1_val = mfspr(SPRN_HID1); 118 118 uint64_t hid4_val = mfspr(SPRN_HID4); ··· 1204 1204 * The idle code does not deal with TB loss occurring 1205 1205 * in a shallower state than SPR loss, so force it to 1206 1206 * behave like SPRs are lost if TB is lost. POWER9 would 1207 - * never encouter this, but a POWER8 core would if it 1207 + * never encounter this, but a POWER8 core would if it 1208 1208 * implemented the stop instruction. So this is for forward 1209 1209 * compatibility. 1210 1210 */
+1 -1
arch/powerpc/platforms/powernv/ocxl.c
··· 289 289 * be used by a function depends on how many functions exist 290 290 * on the device. The NPU needs to be configured to know how 291 291 * many bits are available to PASIDs and how many are to be 292 - * used by the function BDF indentifier. 292 + * used by the function BDF identifier. 293 293 * 294 294 * We only support one AFU-carrying function for now. 295 295 */
+1 -1
arch/powerpc/platforms/powernv/opal-fadump.c
··· 206 206 opal_fdm->region_cnt = cpu_to_be16(reg_cnt); 207 207 208 208 /* 209 - * Kernel metadata is passed to f/w and retrieved in capture kerenl. 209 + * Kernel metadata is passed to f/w and retrieved in capture kernel. 210 210 * So, use it to save fadump header address instead of calculating it. 211 211 */ 212 212 opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) +
+1 -1
arch/powerpc/platforms/powernv/opal-lpc.c
··· 197 197 198 198 /* 199 199 * Select access size based on count and alignment and 200 - * access type. IO and MEM only support byte acceses, 200 + * access type. IO and MEM only support byte accesses, 201 201 * FW supports all 3. 202 202 */ 203 203 len = 1;
+1 -1
arch/powerpc/platforms/powernv/opal-memory-errors.c
··· 82 82 83 83 /* 84 84 * opal_memory_err_event - notifier handler that queues up the opal message 85 - * to be preocessed later. 85 + * to be processed later. 86 86 */ 87 87 static int opal_memory_err_event(struct notifier_block *nb, 88 88 unsigned long msg_type, void *msg)
+1 -1
arch/powerpc/platforms/powernv/pci-sriov.c
··· 699 699 return -ENOSPC; 700 700 } 701 701 702 - /* allocate a contigious block of PEs for our VFs */ 702 + /* allocate a contiguous block of PEs for our VFs */ 703 703 base_pe = pnv_ioda_alloc_pe(phb, num_vfs); 704 704 if (!base_pe) { 705 705 pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs);
+1 -1
arch/powerpc/platforms/ps3/mm.c
··· 364 364 * @bus_addr: Starting ioc bus address of the area to map. 365 365 * @len: Length in bytes of the area to map. 366 366 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the 367 - * list of all chuncks owned by the region. 367 + * list of all chunks owned by the region. 368 368 * 369 369 * This implementation uses a very simple dma page manager 370 370 * based on the dma_chunk structure. This scheme assumes
+1 -1
arch/powerpc/platforms/ps3/system-bus.c
··· 601 601 iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW; 602 602 break; 603 603 default: 604 - /* not happned */ 604 + /* not happened */ 605 605 BUG(); 606 606 } 607 607 result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
+1 -1
arch/powerpc/platforms/pseries/eeh_pseries.c
··· 512 512 int ret = 0; 513 513 514 514 /* 515 - * When we're enabling or disabling EEH functioality on 515 + * When we're enabling or disabling EEH functionality on 516 516 * the particular PE, the PE config address is possibly 517 517 * unavailable. Therefore, we have to figure it out from 518 518 * the FDT node.
+1 -1
arch/powerpc/platforms/pseries/iommu.c
··· 1430 1430 1431 1431 pci->table_group->tables[1] = newtbl; 1432 1432 1433 - /* Keep default DMA window stuct if removed */ 1433 + /* Keep default DMA window struct if removed */ 1434 1434 if (default_win_removed) { 1435 1435 tbl->it_size = 0; 1436 1436 vfree(tbl->it_map);
+2 -2
arch/powerpc/platforms/pseries/setup.c
··· 658 658 */ 659 659 num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1); 660 660 if (resno >= num_res) 661 - return 0; /* or an errror */ 661 + return 0; /* or an error */ 662 662 663 663 i = START_OF_ENTRIES + NEXT_ENTRY * resno; 664 664 switch (value) { ··· 762 762 763 763 if (!pdev->is_physfn) 764 764 return; 765 - /*Firmware must support open sriov otherwise dont configure*/ 765 + /*Firmware must support open sriov otherwise don't configure*/ 766 766 indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL); 767 767 if (indexes) 768 768 of_pci_parse_iov_addrs(pdev, indexes);
+1 -1
arch/powerpc/platforms/pseries/vas-sysfs.c
··· 67 67 * Create sysfs interface: 68 68 * /sys/devices/vas/vas0/gzip/default_capabilities 69 69 * This directory contains the following VAS GZIP capabilities 70 - * for the defaule credit type. 70 + * for the default credit type. 71 71 * /sys/devices/vas/vas0/gzip/default_capabilities/nr_total_credits 72 72 * Total number of default credits assigned to the LPAR which 73 73 * can be changed with DLPAR operation.
+1 -1
arch/powerpc/platforms/pseries/vas.c
··· 807 807 atomic_set(&caps->nr_total_credits, new_nr_creds); 808 808 /* 809 809 * The total number of available credits may be decreased or 810 - * inceased with DLPAR operation. Means some windows have to be 810 + * increased with DLPAR operation. Means some windows have to be 811 811 * closed / reopened. Hold the vas_pseries_mutex so that the 812 812 * the user space can not open new windows. 813 813 */
+1 -1
arch/powerpc/sysdev/fsl_lbc.c
··· 37 37 * 38 38 * This function converts a base address of lbc into the right format for the 39 39 * BR register. If the SOC has eLBC then it returns 32bit physical address 40 - * else it convers a 34bit local bus physical address to correct format of 40 + * else it converts a 34bit local bus physical address to correct format of 41 41 * 32bit address for BR register (Example: MPC8641). 42 42 */ 43 43 u32 fsl_lbc_addr(phys_addr_t addr_base)
+1 -1
arch/powerpc/sysdev/fsl_pci.c
··· 218 218 * windows have implemented the default target value as 0xf 219 219 * for CCSR space.In all Freescale legacy devices the target 220 220 * of 0xf is reserved for local memory space. 9132 Rev1.0 221 - * now has local mempry space mapped to target 0x0 instead of 221 + * now has local memory space mapped to target 0x0 instead of 222 222 * 0xf. Hence adding a workaround to remove the target 0xf 223 223 * defined for memory space from Inbound window attributes. 224 224 */
+1 -1
arch/powerpc/sysdev/ge/ge_pic.c
··· 150 150 }; 151 151 152 152 153 - /* When an interrupt is being configured, this call allows some flexibilty 153 + /* When an interrupt is being configured, this call allows some flexibility 154 154 * in deciding which irq_chip structure is used 155 155 */ 156 156 static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
+1 -1
arch/powerpc/sysdev/mpic_msgr.c
··· 99 99 EXPORT_SYMBOL_GPL(mpic_msgr_disable); 100 100 101 101 /* The following three functions are used to compute the order and number of 102 - * the message register blocks. They are clearly very inefficent. However, 102 + * the message register blocks. They are clearly very inefficient. However, 103 103 * they are called *only* a few times during device initialization. 104 104 */ 105 105 static unsigned int mpic_msgr_number_of_blocks(void)
+1 -1
arch/powerpc/sysdev/mpic_msi.c
··· 37 37 /* Reserve source numbers we know are reserved in the HW. 38 38 * 39 39 * This is a bit of a mix of U3 and U4 reserves but that's going 40 - * to work fine, we have plenty enugh numbers left so let's just 40 + * to work fine, we have plenty enough numbers left so let's just 41 41 * mark anything we don't like reserved. 42 42 */ 43 43 for (i = 0; i < 8; i++)
+1 -1
arch/powerpc/sysdev/mpic_timer.c
··· 255 255 256 256 /** 257 257 * mpic_stop_timer - stop hardware timer 258 - * @handle: the timer to be stoped 258 + * @handle: the timer to be stopped 259 259 * 260 260 * The timer periodically generates an interrupt. Unless user stops the timer. 261 261 */
+1 -1
arch/powerpc/sysdev/mpic_u3msi.c
··· 78 78 79 79 /* U4 PCIe MSIs need to write to the special register in 80 80 * the bridge that generates interrupts. There should be 81 - * theorically a register at 0xf8005000 where you just write 81 + * theoretically a register at 0xf8005000 where you just write 82 82 * the MSI number and that triggers the right interrupt, but 83 83 * unfortunately, this is busted in HW, the bridge endian swaps 84 84 * the value and hits the wrong nibble in the register.
+1 -1
arch/powerpc/sysdev/xive/native.c
··· 617 617 618 618 xive_tima_os = r.start; 619 619 620 - /* Grab size of provisionning pages */ 620 + /* Grab size of provisioning pages */ 621 621 xive_parse_provisioning(np); 622 622 623 623 /* Switch the XIVE to exploitation mode */
+1 -1
arch/powerpc/xmon/ppc-opc.c
··· 408 408 #define FXM4 FXM + 1 409 409 { 0xff, 12, insert_fxm, extract_fxm, 410 410 PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE}, 411 - /* If the FXM4 operand is ommitted, use the sentinel value -1. */ 411 + /* If the FXM4 operand is omitted, use the sentinel value -1. */ 412 412 { -1, -1, NULL, NULL, 0}, 413 413 414 414 /* The IMM20 field in an LI instruction. */
+1 -1
arch/powerpc/xmon/xmon.c
··· 2024 2024 if (!cpu_has_feature(CPU_FTR_ARCH_206)) 2025 2025 return; 2026 2026 2027 - /* Actually some of these pre-date 2.06, but whatevs */ 2027 + /* Actually some of these pre-date 2.06, but whatever */ 2028 2028 2029 2029 printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8lx\n", 2030 2030 mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR));