Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Various typo fixes

Signed-off-by: Andrea Gelmini <andrea.gelmini@gelma.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

+40 -40
+1 -1
Documentation/devicetree/bindings/rtc/rtc-opal.txt
··· 2 2 ------------------------ 3 3 4 4 Required properties: 5 - - comapatible: Should be "ibm,opal-rtc" 5 + - compatible: Should be "ibm,opal-rtc" 6 6 7 7 Optional properties: 8 8 - wakeup-source: Decides if the wakeup is supported or not
+1 -1
arch/powerpc/crypto/aes-spe-regs.h
··· 18 18 #define rLN r7 /* length of data to be processed */ 19 19 #define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */ 20 20 #define rKT r9 /* pointer to tweak key (XTS mode) */ 21 - #define rT0 r11 /* pointers to en-/decrpytion tables */ 21 + #define rT0 r11 /* pointers to en-/decryption tables */ 22 22 #define rT1 r10 23 23 #define rD0 r9 /* data */ 24 24 #define rD1 r14
+1 -1
arch/powerpc/include/asm/book3s/64/mmu-hash.h
··· 434 434 * function. Used in slb_allocate() and do_stab_bolted. The function 435 435 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS 436 436 * 437 - * rt = register continaing the proto-VSID and into which the 437 + * rt = register containing the proto-VSID and into which the 438 438 * VSID will be stored 439 439 * rx = scratch register (clobbered) 440 440 *
+1 -1
arch/powerpc/include/asm/eeh.h
··· 57 57 /* 58 58 * The struct is used to trace PE related EEH functionality. 59 59 * In theory, there will have one instance of the struct to 60 - * be created against particular PE. In nature, PEs corelate 60 + * be created against particular PE. In nature, PEs correlate 61 61 * to each other. the struct has to reflect that hierarchy in 62 62 * order to easily pick up those affected PEs when one particular 63 63 * PE has EEH errors.
+1 -1
arch/powerpc/include/asm/nohash/32/pte-44x.h
··· 32 32 * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR 33 33 * 34 34 * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional 35 - * TLB2 storage attibute fields. Those are: 35 + * TLB2 storage attribute fields. Those are: 36 36 * 37 37 * TLB2: 38 38 * 0...10 11 12 13 14 15 16...31
+2 -2
arch/powerpc/include/asm/opal-api.h
··· 802 802 }; 803 803 804 804 /* 805 - * Candiate image SG list. 805 + * Candidate image SG list. 806 806 * 807 807 * length = VER | length 808 808 */ ··· 852 852 * with individual elements being 16 bits wide to fetch the system 853 853 * wide EPOW status. Each element in the buffer will contain the 854 854 * EPOW status in it's bit representation for a particular EPOW sub 855 - * class as defiend here. So multiple detailed EPOW status bits 855 + * class as defined here. So multiple detailed EPOW status bits 856 856 * specific for any sub class can be represented in a single buffer 857 857 * element as it's bit representation. 858 858 */
+1 -1
arch/powerpc/include/asm/pmac_feature.h
··· 210 210 211 211 /* PMAC_FTR_SOUND_CHIP_ENABLE (struct device_node* node, 0, int value) 212 212 * enable/disable the sound chip, whatever it is and provided it can 213 - * acually be controlled 213 + * actually be controlled 214 214 */ 215 215 #define PMAC_FTR_SOUND_CHIP_ENABLE PMAC_FTR_DEF(9) 216 216
+1 -1
arch/powerpc/include/asm/processor.h
··· 224 224 unsigned int align_ctl; /* alignment handling control */ 225 225 #ifdef CONFIG_PPC64 226 226 unsigned long start_tb; /* Start purr when proc switched in */ 227 - unsigned long accum_tb; /* Total accumilated purr for process */ 227 + unsigned long accum_tb; /* Total accumulated purr for process */ 228 228 #ifdef CONFIG_HAVE_HW_BREAKPOINT 229 229 struct perf_event *ptrace_bps[HBP_NUM]; 230 230 /*
+1 -1
arch/powerpc/include/asm/ps3av.h
··· 104 104 #define PS3AV_CMD_AV_INPUTLEN_16 0x02 105 105 #define PS3AV_CMD_AV_INPUTLEN_20 0x0a 106 106 #define PS3AV_CMD_AV_INPUTLEN_24 0x0b 107 - /* alayout */ 107 + /* av_layout */ 108 108 #define PS3AV_CMD_AV_LAYOUT_32 (1 << 0) 109 109 #define PS3AV_CMD_AV_LAYOUT_44 (1 << 1) 110 110 #define PS3AV_CMD_AV_LAYOUT_48 (1 << 2)
+1 -1
arch/powerpc/include/asm/pte-common.h
··· 96 96 #define PTE_RPN_SHIFT (PAGE_SHIFT) 97 97 #endif 98 98 99 - /* The mask convered by the RPN must be a ULL on 32-bit platforms with 99 + /* The mask covered by the RPN must be a ULL on 32-bit platforms with 100 100 * 64-bit PTEs 101 101 */ 102 102 #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+1 -1
arch/powerpc/include/asm/smu.h
··· 185 185 * x = processor mask 186 186 * y = op. point index 187 187 * z = processor freq. step index 188 - * I haven't yet decyphered result codes 188 + * I haven't yet deciphered result codes 189 189 * 190 190 */ 191 191 #define SMU_CMD_POWER_COMMAND 0xaa
+1 -1
arch/powerpc/include/asm/tsi108.h
··· 77 77 * nodes if your board uses the Broadcom PHYs 78 78 */ 79 79 #define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */ 80 - #define TSI108_PHY_BCM54XX 1 /* Broardcom BCM54xx PHY */ 80 + #define TSI108_PHY_BCM54XX 1 /* Broadcom BCM54xx PHY */ 81 81 82 82 /* Global variables */ 83 83
+1 -1
arch/powerpc/kernel/cpu_setup_6xx.S
··· 156 156 blr 157 157 158 158 /* 740/750/7400/7410 159 - * Enable Store Gathering (SGE), Address Brodcast (ABE), 159 + * Enable Store Gathering (SGE), Address Broadcast (ABE), 160 160 * Branch History Table (BHTE), Branch Target ICache (BTIC) 161 161 * Dynamic Power Management (DPM), Speculative (SPD) 162 162 * Clear Instruction cache throttling (ICTC)
+1 -1
arch/powerpc/kernel/eeh_driver.c
··· 139 139 * into it. 140 140 * 141 141 * That's just wrong.The warning in the core code is 142 - * there to tell people to fix their assymetries in 142 + * there to tell people to fix their asymmetries in 143 143 * their own code, not by abusing the core information 144 144 * to avoid it. 145 145 *
+1 -1
arch/powerpc/kernel/exceptions-64e.S
··· 453 453 sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ 454 454 b bad_stack_book3e; /* bad stack error */ 455 455 456 - /* WARNING: If you change the layout of this stub, make sure you chcek 456 + /* WARNING: If you change the layout of this stub, make sure you check 457 457 * the debug exception handler which handles single stepping 458 458 * into exceptions from userspace, and the MM code in 459 459 * arch/powerpc/mm/tlb_nohash.c which patches the branch here
+1 -1
arch/powerpc/kernel/pci_64.c
··· 82 82 83 83 /* If this is not a PHB, we only flush the hash table over 84 84 * the area mapped by this bridge. We don't play with the PTE 85 - * mappings since we might have to deal with sub-page alignemnts 85 + * mappings since we might have to deal with sub-page alignments 86 86 * so flushing the hash table is the only sane way to make sure 87 87 * that no hash entries are covering that removed bridge area 88 88 * while still allowing other busses overlapping those pages
+1 -1
arch/powerpc/kernel/process.c
··· 802 802 * this state. 803 803 * We do this using the current MSR, rather tracking it in 804 804 * some specific thread_struct bit, as it has the additional 805 - * benifit of checking for a potential TM bad thing exception. 805 + * benefit of checking for a potential TM bad thing exception. 806 806 */ 807 807 if (!MSR_TM_SUSPENDED(mfmsr())) 808 808 return;
+1 -1
arch/powerpc/kernel/rtas-proc.c
··· 698 698 /* 699 699 * Format: 700 700 * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ] 701 - * the '.' may be an abbrevation 701 + * the '.' may be an abbreviation 702 702 */ 703 703 static void check_location_string(struct seq_file *m, const char *c) 704 704 {
+1 -1
arch/powerpc/lib/rheap.c
··· 325 325 } 326 326 EXPORT_SYMBOL_GPL(rh_init); 327 327 328 - /* Attach a free memory region, coalesces regions if adjuscent */ 328 + /* Attach a free memory region, coalesces regions if adjacent */ 329 329 int rh_attach_region(rh_info_t * info, unsigned long start, int size) 330 330 { 331 331 rh_block_t *blk;
+2 -2
arch/powerpc/mm/hash_native_64.c
··· 55 55 * We need 14 to 65 bits of va for a tlibe of 4K page 56 56 * With vpn we ignore the lower VPN_SHIFT bits already. 57 57 * And top two bits are already ignored because we can 58 - * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT 58 + * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT 59 59 * of 12. 60 60 */ 61 61 va = vpn << VPN_SHIFT; ··· 605 605 * crashdump and all bets are off anyway. 606 606 * 607 607 * TODO: add batching support when enabled. remember, no dynamic memory here, 608 - * athough there is the control page available... 608 + * although there is the control page available... 609 609 */ 610 610 static void native_hpte_clear(void) 611 611 {
+1 -1
arch/powerpc/oprofile/cell/spu_task_sync.c
··· 51 51 * That way we can tell the difference between the 52 52 * buffer being full versus empty. 53 53 * 54 - * ASSUPTION: the buffer_lock is held when this function 54 + * ASSUMPTION: the buffer_lock is held when this function 55 55 * is called to lock the buffer, head and tail. 56 56 */ 57 57 int full = 1;
+1 -1
arch/powerpc/perf/core-book3s.c
··· 992 992 * than the previous value it will cause the delta and the counter to 993 993 * have bogus values unless we rolled a counter over. If a coutner is 994 994 * rolled back, it will be smaller, but within 256, which is the maximum 995 - * number of events to rollback at once. If we dectect a rollback 995 + * number of events to rollback at once. If we detect a rollback 996 996 * return 0. This can lead to a small lack of precision in the 997 997 * counters. 998 998 */
+1 -1
arch/powerpc/perf/hv-24x7.c
··· 1298 1298 __this_cpu_write(hv_24x7_txn_err, ret); 1299 1299 } else { 1300 1300 /* 1301 - * Assoicate the event with the HCALL request index, 1301 + * Associate the event with the HCALL request index, 1302 1302 * so ->commit_txn() can quickly find/update count. 1303 1303 */ 1304 1304 i = request_buffer->num_requests - 1;
+1 -1
arch/powerpc/perf/hv-24x7.h
··· 66 66 /* -1 if @performance_domain does not refer to a virtual processor */ 67 67 __be32 lpar_cfg_instance_id; 68 68 69 - /* size = @result_element_data_size of cointaining result. */ 69 + /* size = @result_element_data_size of containing result. */ 70 70 __u64 element_data[1]; 71 71 } __packed; 72 72
+1 -1
arch/powerpc/platforms/512x/clock-commonclk.c
··· 719 719 * most one of a mux, div, and gate each into one 'struct clk' 720 720 * item 721 721 * - PSC/MSCAN/SPDIF clock generation OTOH already is very 722 - * specific and cannot get mapped to componsites (at least not 722 + * specific and cannot get mapped to composites (at least not 723 723 * a single one, maybe two of them, but then some of these 724 724 * intermediate clock signals get referenced elsewhere (e.g. 725 725 * in the clock frequency measurement, CFM) and thus need
+2 -2
arch/powerpc/platforms/cell/iommu.c
··· 178 178 * default for now.*/ 179 179 #ifdef CELL_IOMMU_STRICT_PROTECTION 180 180 /* to avoid referencing a global, we use a trick here to setup the 181 - * protection bit. "prot" is setup to be 3 fields of 4 bits apprended 181 + * protection bit. "prot" is setup to be 3 fields of 4 bits appended 182 182 * together for each of the 3 supported direction values. It is then 183 183 * shifted left so that the fields matching the desired direction 184 184 * lands on the appropriate bits, and other bits are masked out. ··· 338 338 start_seg = base >> IO_SEGMENT_SHIFT; 339 339 segments = size >> IO_SEGMENT_SHIFT; 340 340 pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); 341 - /* PTEs for each segment must start on a 4K bounday */ 341 + /* PTEs for each segment must start on a 4K boundary */ 342 342 pages_per_segment = max(pages_per_segment, 343 343 (1 << 12) / sizeof(unsigned long)); 344 344
+1 -1
arch/powerpc/platforms/cell/spider-pic.c
··· 217 217 chip->irq_eoi(&desc->irq_data); 218 218 } 219 219 220 - /* For hooking up the cascace we have a problem. Our device-tree is 220 + /* For hooking up the cascade we have a problem. Our device-tree is 221 221 * crap and we don't know on which BE iic interrupt we are hooked on at 222 222 * least not the "standard" way. We can reconstitute it based on two 223 223 * informations though: which BE node we are connected to and whether
+2 -2
arch/powerpc/platforms/cell/spu_base.c
··· 69 69 * spu_full_list_lock and spu_full_list_mutex held, while iterating 70 70 * through it requires either of these locks. 71 71 * 72 - * In addition spu_full_list_lock protects all assignmens to 72 + * In addition spu_full_list_lock protects all assignments to 73 73 * spu->mm. 74 74 */ 75 75 static LIST_HEAD(spu_full_list); ··· 253 253 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We 254 254 * need to map both the context save area, and the save/restore code. 255 255 * 256 - * Because the lscsa and code may cross segment boundaires, we check to see 256 + * Because the lscsa and code may cross segment boundaries, we check to see 257 257 * if mappings are required for the start and end of each range. We currently 258 258 * assume that the mappings are smaller that one segment - if not, something 259 259 * is seriously wrong.
+1 -1
arch/powerpc/platforms/cell/spufs/file.c
··· 866 866 * - end of the mapped area 867 867 * 868 868 * If the file is opened without O_NONBLOCK, we wait here until 869 - * space is availabyl, but return when we have been able to 869 + * space is available, but return when we have been able to 870 870 * write something. 871 871 */ 872 872 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
+1 -1
arch/powerpc/platforms/cell/spufs/run.c
··· 435 435 436 436 /* Note: we don't need to force_sig SIGTRAP on single-step 437 437 * since we have TIF_SINGLESTEP set, thus the kernel will do 438 - * it upon return from the syscall anyawy 438 + * it upon return from the syscall anyway. 439 439 */ 440 440 if (unlikely(status & SPU_STATUS_SINGLE_STEP)) 441 441 ret = -ERESTARTSYS;
+1 -1
arch/powerpc/platforms/cell/spufs/sched.c
··· 622 622 623 623 /** 624 624 * find_victim - find a lower priority context to preempt 625 - * @ctx: canidate context for running 625 + * @ctx: candidate context for running 626 626 * 627 627 * Returns the freed physical spu to run the new context on. 628 628 */
+1 -1
arch/powerpc/platforms/powermac/low_i2c.c
··· 15 15 * This file thus provides a simple low level unified i2c interface for 16 16 * powermac that covers the various types of i2c busses used in Apple machines. 17 17 * For now, keywest, PMU and SMU, though we could add Cuda, or other bit 18 - * banging busses found on older chipstes in earlier machines if we ever need 18 + * banging busses found on older chipsets in earlier machines if we ever need 19 19 * one of them. 20 20 * 21 21 * The drivers in this file are synchronous/blocking. In addition, the
+1 -1
arch/powerpc/platforms/powernv/pci-ioda.c
··· 197 197 198 198 /* 199 199 * Strip off the segment used by the reserved PE, which is 200 - * expected to be 0 or last one of PE capabicity. 200 + * expected to be 0 or last one of PE capability. 201 201 */ 202 202 r = &phb->hose->mem_resources[1]; 203 203 if (phb->ioda.reserved_pe_idx == 0)
+1 -1
arch/powerpc/platforms/pseries/eeh_pseries.c
··· 2 2 * The file intends to implement the platform dependent EEH operations on pseries. 3 3 * Actually, the pseries platform is built based on RTAS heavily. That means the 4 4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions 5 - * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 5 + * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 6 6 * been done. 7 7 * 8 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
+1 -1
arch/powerpc/platforms/pseries/io_event_irq.c
··· 113 113 * - The owner of an event is determined by combinations of scope, 114 114 * event type, and sub-type. There is no easy way to pre-sort clients 115 115 * by scope or event type alone. For example, Torrent ISR route change 116 - * event is reported with scope 0x00 (Not Applicatable) rather than 116 + * event is reported with scope 0x00 (Not Applicable) rather than 117 117 * 0x3B (Torrent-hub). It is better to let the clients to identify 118 118 * who owns the event. 119 119 */
+1 -1
arch/powerpc/platforms/pseries/setup.c
··· 367 367 { 368 368 /* 369 369 * Default handler to go into low thread priority and possibly 370 - * low power mode by cedeing processor to hypervisor 370 + * low power mode by ceding processor to hypervisor 371 371 */ 372 372 373 373 /* Indicate to hypervisor that we are idle. */