Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[IA64] spelling fixes: arch/ia64/

Spelling and apostrophe fixes in arch/ia64/.

Signed-off-by: Simon Arlott <simon@fire.lp0.eu>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by

Simon Arlott and committed by
Tony Luck
72fdbdce 0a3fd051

+75 -75
+1 -1
arch/ia64/kernel/acpi.c
··· 791 791 early_param("additional_cpus", setup_additional_cpus); 792 792 793 793 /* 794 - * cpu_possible_map should be static, it cannot change as cpu's 794 + * cpu_possible_map should be static, it cannot change as CPUs 795 795 * are onlined, or offlined. The reason is per-cpu data-structures 796 796 * are allocated by some modules at init time, and dont expect to 797 797 * do this dynamically on cpu arrival/departure.
+1 -1
arch/ia64/kernel/crash.c
··· 163 163 return NOTIFY_DONE; 164 164 165 165 nd = (struct ia64_mca_notify_die *)args->err; 166 - /* Reason code 1 means machine check rendezous*/ 166 + /* Reason code 1 means machine check rendezvous*/ 167 167 if ((val == DIE_INIT_MONARCH_ENTER || val == DIE_INIT_SLAVE_ENTER) && 168 168 nd->sos->rv_rc == 1) 169 169 return NOTIFY_DONE;
+3 -3
arch/ia64/kernel/irq.c
··· 4 4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 5 5 * 6 6 * This file contains the code used by various IRQ handling routines: 7 - * asking for different IRQ's should be done through these routines 7 + * asking for different IRQs should be done through these routines 8 8 * instead of just grabbing them. Thus setups with different IRQ numbers 9 9 * shouldn't result in any weird surprises, and installing new handlers 10 10 * should be easier. ··· 12 12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004 13 13 * 14 14 * 4/14/2004: Added code to handle cpu migration and do safe irq 15 - * migration without lossing interrupts for iosapic 15 + * migration without losing interrupts for iosapic 16 16 * architecture. 17 17 */ 18 18 ··· 190 190 } 191 191 192 192 /* 193 - * Phase 1: Locate irq's bound to this cpu and 193 + * Phase 1: Locate IRQs bound to this cpu and 194 194 * relocate them for cpu removal. 195 195 */ 196 196 migrate_irqs();
+1 -1
arch/ia64/kernel/irq_lsapic.c
··· 23 23 static void 24 24 lsapic_noop (unsigned int irq) 25 25 { 26 - /* nuthing to do... */ 26 + /* nothing to do... */ 27 27 } 28 28 29 29 static int lsapic_retrigger(unsigned int irq)
+3 -3
arch/ia64/kernel/kprobes.c
··· 151 151 152 152 cmp_inst.l = kprobe_inst; 153 153 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { 154 - /* Integere compare - Register Register (A6 type)*/ 154 + /* Integer compare - Register Register (A6 type)*/ 155 155 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) 156 156 &&(cmp_inst.f.c == 1)) 157 157 ctype_unc = 1; 158 158 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { 159 - /* Integere compare - Immediate Register (A8 type)*/ 159 + /* Integer compare - Immediate Register (A8 type)*/ 160 160 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) 161 161 ctype_unc = 1; 162 162 } ··· 954 954 /* 955 955 * Callee owns the argument space and could overwrite it, eg 956 956 * tail call optimization. So to be absolutely safe 957 - * we save the argument space before transfering the control 957 + * we save the argument space before transferring the control 958 958 * to instrumented jprobe function which runs in 959 959 * the process context 960 960 */
+2 -2
arch/ia64/kernel/mca_drv.c
··· 438 438 * @peidx: pointer of index of processor error section 439 439 * 440 440 * Return value: 441 - * target address on Success / 0 on Failue 441 + * target address on Success / 0 on Failure 442 442 */ 443 443 static u64 444 444 get_target_identifier(peidx_table_t *peidx) ··· 701 701 return fatal_mca("External bus check fatal status"); 702 702 703 703 /* 704 - * This is a local MCA and estimated as a recoverble error. 704 + * This is a local MCA and estimated as a recoverable error. 705 705 */ 706 706 if (platform) 707 707 return recover_from_platform_error(slidx, peidx, pbci, sos);
+1 -1
arch/ia64/kernel/module.c
··· 861 861 /* 862 862 * Modules contain a single unwind table which covers both the core and the init text 863 863 * sections but since the two are not contiguous, we need to split this table up such that 864 - * we can register (and unregister) each "segment" seperately. Fortunately, this sounds 864 + * we can register (and unregister) each "segment" separately. Fortunately, this sounds 865 865 * more complicated than it really is. 866 866 */ 867 867 static void
+9 -9
arch/ia64/kernel/perfmon.c
··· 1318 1318 { 1319 1319 unsigned long flags; 1320 1320 /* 1321 - * validy checks on cpu_mask have been done upstream 1321 + * validity checks on cpu_mask have been done upstream 1322 1322 */ 1323 1323 LOCK_PFS(flags); 1324 1324 ··· 1384 1384 { 1385 1385 unsigned long flags; 1386 1386 /* 1387 - * validy checks on cpu_mask have been done upstream 1387 + * validity checks on cpu_mask have been done upstream 1388 1388 */ 1389 1389 LOCK_PFS(flags); 1390 1390 ··· 1835 1835 /* 1836 1836 * remove our file from the async queue, if we use this mode. 1837 1837 * This can be done without the context being protected. We come 1838 - * here when the context has become unreacheable by other tasks. 1838 + * here when the context has become unreachable by other tasks. 1839 1839 * 1840 1840 * We may still have active monitoring at this point and we may 1841 1841 * end up in pfm_overflow_handler(). However, fasync_helper() ··· 2132 2132 filp->private_data = NULL; 2133 2133 2134 2134 /* 2135 - * if we free on the spot, the context is now completely unreacheable 2135 + * if we free on the spot, the context is now completely unreachable 2136 2136 * from the callers side. The monitored task side is also cut, so we 2137 2137 * can freely cut. 2138 2138 * ··· 2562 2562 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1; 2563 2563 2564 2564 /* 2565 - * bitmask of all PMDs that are accesible to this context 2565 + * bitmask of all PMDs that are accessible to this context 2566 2566 */ 2567 2567 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0]; 2568 2568 ··· 3395 3395 if (unlikely(!PMD_IS_IMPL(cnum))) goto error; 3396 3396 /* 3397 3397 * we can only read the register that we use. That includes 3398 - * the one we explicitely initialize AND the one we want included 3398 + * the one we explicitly initialize AND the one we want included 3399 3399 * in the sampling buffer (smpl_regs). 3400 3400 * 3401 3401 * Having this restriction allows optimization in the ctxsw routine ··· 3715 3715 * if non-blocking, then we ensure that the task will go into 3716 3716 * pfm_handle_work() before returning to user mode. 3717 3717 * 3718 - * We cannot explicitely reset another task, it MUST always 3718 + * We cannot explicitly reset another task, it MUST always 3719 3719 * be done by the task itself. This works for system wide because 3720 3720 * the tool that is controlling the session is logically doing 3721 3721 * "self-monitoring". ··· 4644 4644 switch(state) { 4645 4645 case PFM_CTX_UNLOADED: 4646 4646 /* 4647 - * only comes to thios function if pfm_context is not NULL, i.e., cannot 4647 + * only comes to this function if pfm_context is not NULL, i.e., cannot 4648 4648 * be in unloaded state 4649 4649 */ 4650 4650 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); ··· 5247 5247 5248 5248 /* 5249 5249 * main overflow processing routine. 5250 - * it can be called from the interrupt path or explicitely during the context switch code 5250 + * it can be called from the interrupt path or explicitly during the context switch code 5251 5251 */ 5252 5252 static void 5253 5253 pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
+1 -1
arch/ia64/kernel/perfmon_mckinley.h
··· 181 181 .pmc_desc = pfm_mck_pmc_desc, 182 182 .num_ibrs = 8, 183 183 .num_dbrs = 8, 184 - .use_rr_dbregs = 1 /* debug register are use for range retrictions */ 184 + .use_rr_dbregs = 1 /* debug register are use for range restrictions */ 185 185 }; 186 186 187 187
+1 -1
arch/ia64/kernel/sal.c
··· 134 134 * interrupt redirection. The reason is this would require that 135 135 * All interrupts be stopped and hard bind the irq to a cpu. 136 136 * Later when the interrupt is fired we need to set the redir hint 137 - * on again in the vector. This is combersome for something that the 137 + * on again in the vector. This is cumbersome for something that the 138 138 * user mode irq balancer will solve anyways. 139 139 */ 140 140 no_int_routing=1;
+1 -1
arch/ia64/kernel/salinfo.c
··· 162 162 /** salinfo_platform_oemdata - optional callback to decode oemdata from an error 163 163 * record. 164 164 * @sect_header: pointer to the start of the section to decode. 165 - * @oemdata: returns vmalloc area containing the decded output. 165 + * @oemdata: returns vmalloc area containing the decoded output. 166 166 * @oemdata_size: returns length of decoded output (strlen). 167 167 * 168 168 * Description: If user space asks for oem data to be decoded by the kernel
+3 -3
arch/ia64/kernel/setup.c
··· 576 576 } 577 577 578 578 /* 579 - * Display cpu info for all cpu's. 579 + * Display cpu info for all CPUs. 580 580 */ 581 581 static int 582 582 show_cpuinfo (struct seq_file *m, void *v) ··· 761 761 c->cpu = smp_processor_id(); 762 762 763 763 /* below default values will be overwritten by identify_siblings() 764 - * for Multi-Threading/Multi-Core capable cpu's 764 + * for Multi-Threading/Multi-Core capable CPUs 765 765 */ 766 766 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 767 767 c->socket_id = -1; ··· 947 947 ia32_cpu_init(); 948 948 #endif 949 949 950 - /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 950 + /* Clear ITC to eliminate sched_clock() overflows in human time. */ 951 951 ia64_set_itc(0); 952 952 953 953 /* disable all local interrupt sources: */
+6 -6
arch/ia64/kernel/smp.c
··· 186 186 } 187 187 188 188 /* 189 - * Called with preeemption disabled. 189 + * Called with preemption disabled. 190 190 */ 191 191 static inline void 192 192 send_IPI_single (int dest_cpu, int op) ··· 196 196 } 197 197 198 198 /* 199 - * Called with preeemption disabled. 199 + * Called with preemption disabled. 200 200 */ 201 201 static inline void 202 202 send_IPI_allbutself (int op) ··· 210 210 } 211 211 212 212 /* 213 - * Called with preeemption disabled. 213 + * Called with preemption disabled. 214 214 */ 215 215 static inline void 216 216 send_IPI_all (int op) ··· 223 223 } 224 224 225 225 /* 226 - * Called with preeemption disabled. 226 + * Called with preemption disabled. 227 227 */ 228 228 static inline void 229 229 send_IPI_self (int op) ··· 252 252 } 253 253 #endif 254 254 /* 255 - * Called with preeemption disabled. 255 + * Called with preemption disabled. 256 256 */ 257 257 void 258 258 smp_send_reschedule (int cpu) ··· 261 261 } 262 262 263 263 /* 264 - * Called with preeemption disabled. 264 + * Called with preemption disabled. 265 265 */ 266 266 static void 267 267 smp_send_local_flush_tlb (int cpu)
+3 -3
arch/ia64/kernel/smpboot.c
··· 694 694 set_cpei_target_cpu(new_cpei_cpu); 695 695 desc = irq_desc + ia64_cpe_irq; 696 696 /* 697 - * Switch for now, immediatly, we need to do fake intr 697 + * Switch for now, immediately, we need to do fake intr 698 698 * as other interrupts, but need to study CPEI behaviour with 699 699 * polling before making changes. 700 700 */ ··· 840 840 } 841 841 842 842 /* 843 - * Assume that CPU's have been discovered by some platform-dependent interface. For 843 + * Assume that CPUs have been discovered by some platform-dependent interface. For 844 844 * SoftSDV/Lion, that would be ACPI. 845 845 * 846 846 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). ··· 854 854 } *ap_startup; 855 855 long sal_ret; 856 856 857 - /* Tell SAL where to drop the AP's. */ 857 + /* Tell SAL where to drop the APs. */ 858 858 ap_startup = (struct fptr *) start_ap; 859 859 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, 860 860 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
+1 -1
arch/ia64/kernel/time.c
··· 216 216 #ifdef CONFIG_SMP 217 217 /* On IA64 in an SMP configuration ITCs are never accurately synchronized. 218 218 * Jitter compensation requires a cmpxchg which may limit 219 - * the scalability of the syscalls for retrieving time. 219 + * the scalibility of the syscalls for retrieving time. 220 220 * The ITC synchronization is usually successful to within a few 221 221 * ITC ticks but this is not a sure thing. If you need to improve 222 222 * timer performance in SMP situations then boot the kernel with the
+1 -1
arch/ia64/kernel/traps.c
··· 304 304 * Lower 4 bits are used as a count. Upper bits are a sequence 305 305 * number that is updated when count is reset. The cmpxchg will 306 306 * fail is seqno has changed. This minimizes mutiple cpus 307 - * reseting the count. 307 + * resetting the count. 308 308 */ 309 309 if (current_jiffies > last.time) 310 310 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
+1 -1
arch/ia64/kernel/unwind.c
··· 2 2 * Copyright (C) 1999-2004 Hewlett-Packard Co 3 3 * David Mosberger-Tang <davidm@hpl.hp.com> 4 4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com> 5 - * - Change pt_regs_off() to make it less dependant on pt_regs structure. 5 + * - Change pt_regs_off() to make it less dependent on pt_regs structure. 6 6 */ 7 7 /* 8 8 * This file implements call frame unwind support for the Linux
+1 -1
arch/ia64/mm/discontig.c
··· 317 317 * node_online_map is not set for hot-added nodes at this time, 318 318 * because we are halfway through initialization of the new node's 319 319 * structures. If for_each_online_node() is used, a new node's 320 - * pg_data_ptrs will be not initialized. Insted of using it, 320 + * pg_data_ptrs will be not initialized. Instead of using it, 321 321 * pgdat_list[] is checked. 322 322 */ 323 323 for_each_node(node) {
+6 -6
arch/ia64/sn/kernel/bte.c
··· 63 63 * Use the block transfer engine to move kernel memory from src to dest 64 64 * using the assigned mode. 65 65 * 66 - * Paramaters: 66 + * Parameters: 67 67 * src - physical address of the transfer source. 68 68 * dest - physical address of the transfer destination. 69 69 * len - number of bytes to transfer from source to dest. ··· 247 247 * use the block transfer engine to move kernel 248 248 * memory from src to dest using the assigned mode. 249 249 * 250 - * Paramaters: 250 + * Parameters: 251 251 * src - physical address of the transfer source. 252 252 * dest - physical address of the transfer destination. 253 253 * len - number of bytes to transfer from source to dest. ··· 255 255 * for IBCT0/1 in the SGI documentation. 256 256 * 257 257 * NOTE: If the source, dest, and len are all cache line aligned, 258 - * then it would be _FAR_ preferrable to use bte_copy instead. 258 + * then it would be _FAR_ preferable to use bte_copy instead. 259 259 */ 260 260 bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode) 261 261 { ··· 300 300 * a standard bte copy. 301 301 * 302 302 * One nasty exception to the above rule is when the 303 - * source and destination are not symetrically 303 + * source and destination are not symmetrically 304 304 * mis-aligned. If the source offset from the first 305 305 * cache line is different from the destination offset, 306 306 * we make the first section be the entire transfer ··· 337 337 338 338 if (footBcopyDest == (headBcopyDest + headBcopyLen)) { 339 339 /* 340 - * We have two contigous bcopy 340 + * We have two contiguous bcopy 341 341 * blocks. Merge them. 342 342 */ 343 343 headBcopyLen += footBcopyLen; ··· 375 375 } else { 376 376 377 377 /* 378 - * The transfer is not symetric, we will 378 + * The transfer is not symmetric, we will 379 379 * allocate a buffer large enough for all the 380 380 * data, bte_copy into that buffer and then 381 381 * bcopy to the destination.
+2 -2
arch/ia64/sn/kernel/bte_error.c
··· 105 105 } 106 106 107 107 BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id())); 108 - /* Reenable both bte interfaces */ 108 + /* Re-enable both bte interfaces */ 109 109 imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM); 110 110 imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1; 111 111 REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval); ··· 243 243 244 244 /* 245 245 * The caller has already figured out the error type, we save that 246 - * in the bte handle structure for the thread excercising the 246 + * in the bte handle structure for the thread exercising the 247 247 * interface to consume. 248 248 */ 249 249 bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
+1 -1
arch/ia64/sn/kernel/io_common.c
··· 479 479 } 480 480 481 481 /* 482 - * prime sn_pci_provider[]. Individial provider init routines will 482 + * prime sn_pci_provider[]. Individual provider init routines will 483 483 * override their respective default entries. 484 484 */ 485 485
+1 -1
arch/ia64/sn/kernel/setup.c
··· 167 167 * IO on SN2 is done via SAL calls, early_printk won't work without this. 168 168 * 169 169 * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c. 170 - * Any changes to those file may have to be made hereas well. 170 + * Any changes to those file may have to be made here as well. 171 171 */ 172 172 efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab); 173 173 config_tables = __va(efi_systab->tables);
+1 -1
arch/ia64/sn/kernel/sn2/sn2_smp.c
··· 104 104 * 105 105 * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. 106 106 * Context switching user threads which have memory-mapped MMIO may cause 107 - * PIOs to issue from seperate CPUs, thus the PIO writes must be drained 107 + * PIOs to issue from separate CPUs, thus the PIO writes must be drained 108 108 * from the previous CPU's Shub before execution resumes on the new CPU. 109 109 */ 110 110 void sn_migrate(struct task_struct *task)
+4 -4
arch/ia64/sn/kernel/xpc_channel.c
··· 293 293 294 294 295 295 /* 296 - * Pull the remote per partititon specific variables from the specified 296 + * Pull the remote per partition specific variables from the specified 297 297 * partition. 298 298 */ 299 299 enum xpc_retval ··· 461 461 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between 462 462 // >>> iterations of the for-loop, bail if set? 463 463 464 - // >>> should we impose a minumum #of entries? like 4 or 8? 464 + // >>> should we impose a minimum #of entries? like 4 or 8? 465 465 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 466 466 467 467 nbytes = nentries * ch->msg_size; ··· 514 514 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between 515 515 // >>> iterations of the for-loop, bail if set? 516 516 517 - // >>> should we impose a minumum #of entries? like 4 or 8? 517 + // >>> should we impose a minimum #of entries? like 4 or 8? 518 518 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 519 519 520 520 nbytes = nentries * ch->msg_size; ··· 1478 1478 1479 1479 1480 1480 /* 1481 - * Before proceding with the teardown we have to wait until all 1481 + * Before proceeding with the teardown we have to wait until all 1482 1482 * existing references cease. 1483 1483 */ 1484 1484 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
+1 -1
arch/ia64/sn/kernel/xpnet.c
··· 531 531 dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp); 532 532 533 533 /* 534 - * If we wanted to allow promiscous mode to work like an 534 + * If we wanted to allow promiscuous mode to work like an 535 535 * unswitched network, this would be a good point to OR in a 536 536 * mask of partitions which should be receiving all packets. 537 537 */
+4 -4
arch/ia64/sn/pci/pci_dma.c
··· 333 333 /* 334 334 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work 335 335 * around hw issues at the pci bus level. SGI proms older than 336 - * 4.10 don't implment this. 336 + * 4.10 don't implement this. 337 337 */ 338 338 339 339 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, ··· 348 348 /* 349 349 * If the above failed, retry using the SAL_PROBE call which should 350 350 * be present in all proms (but which cannot work round PCI chipset 351 - * bugs). This code is retained for compatability with old 351 + * bugs). This code is retained for compatibility with old 352 352 * pre-4.10 proms, and should be removed at some point in the future. 353 353 */ 354 354 ··· 379 379 /* 380 380 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work 381 381 * around hw issues at the pci bus level. SGI proms older than 382 - * 4.10 don't implment this. 382 + * 4.10 don't implement this. 383 383 */ 384 384 385 385 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, ··· 394 394 /* 395 395 * If the above failed, retry using the SAL_PROBE call which should 396 396 * be present in all proms (but which cannot work round PCI chipset 397 - * bugs). This code is retained for compatability with old 397 + * bugs). This code is retained for compatibility with old 398 398 * pre-4.10 proms, and should be removed at some point in the future. 399 399 */ 400 400
+3 -3
arch/ia64/sn/pci/pcibr/pcibr_ate.c
··· 30 30 31 31 /* 32 32 * find_free_ate: Find the first free ate index starting from the given 33 - * index for the desired consequtive count. 33 + * index for the desired consecutive count. 34 34 */ 35 35 static int find_free_ate(struct ate_resource *ate_resource, int start, 36 36 int count) ··· 88 88 return -1; 89 89 90 90 /* 91 - * Find the required number of free consequtive ates. 91 + * Find the required number of free consecutive ates. 92 92 */ 93 93 start_index = 94 94 find_free_ate(ate_resource, ate_resource->lowest_free_index, ··· 105 105 /* 106 106 * Allocate "count" contiguous Bridge Address Translation Entries 107 107 * on the specified bridge to be used for PCI to XTALK mappings. 108 - * Indices in rm map range from 1..num_entries. Indicies returned 108 + * Indices in rm map range from 1..num_entries. Indices returned 109 109 * to caller range from 0..num_entries-1. 110 110 * 111 111 * Return the start index on success, -1 on failure.
+1 -1
arch/ia64/sn/pci/pcibr/pcibr_dma.c
··· 201 201 } 202 202 203 203 /* 204 - * Wrapper routine for free'ing DMA maps 204 + * Wrapper routine for freeing DMA maps 205 205 * DMA mappings for Direct 64 and 32 do not have any DMA maps. 206 206 */ 207 207 void
+3 -3
arch/ia64/sn/pci/tioca_provider.c
··· 223 223 224 224 /* 225 225 * Scan all vga controllers on this bus making sure they all 226 - * suport FW. If not, return. 226 + * support FW. If not, return. 227 227 */ 228 228 229 229 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { ··· 364 364 * @req_size: len (bytes) to map 365 365 * 366 366 * Map @paddr into CA address space using the GART mechanism. The mapped 367 - * dma_addr_t is guarenteed to be contiguous in CA bus space. 367 + * dma_addr_t is guaranteed to be contiguous in CA bus space. 368 368 */ 369 369 static dma_addr_t 370 370 tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size) ··· 526 526 return 0; 527 527 528 528 /* 529 - * If card is 64 or 48 bit addresable, use a direct mapping. 32 529 + * If card is 64 or 48 bit addressable, use a direct mapping. 32 530 530 * bit direct is so restrictive w.r.t. where the memory resides that 531 531 * we don't use it even though CA has some support. 532 532 */
+8 -8
arch/ia64/sn/pci/tioce_provider.c
··· 256 256 * @ct_addr: the coretalk address to map 257 257 * @len: number of bytes to map 258 258 * 259 - * Given the addressing type, set up various paramaters that define the 259 + * Given the addressing type, set up various parameters that define the 260 260 * ATE pool to use. Search for a contiguous block of entries to cover the 261 - * length, and if enough resources exist, fill in the ATE's and construct a 261 + * length, and if enough resources exist, fill in the ATEs and construct a 262 262 * tioce_dmamap struct to track the mapping. 263 263 */ 264 264 static u64 ··· 581 581 */ 582 582 if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) { 583 583 /* 584 - * We have two options for 40-bit mappings: 16GB "super" ATE's 585 - * and 64MB "regular" ATE's. We'll try both if needed for a 584 + * We have two options for 40-bit mappings: 16GB "super" ATEs 585 + * and 64MB "regular" ATEs. We'll try both if needed for a 586 586 * given mapping but which one we try first depends on the 587 587 * size. For requests >64MB, prefer to use a super page with 588 588 * regular as the fallback. Otherwise, try in the reverse order. ··· 687 687 } 688 688 689 689 /** 690 - * tioce_reserve_m32 - reserve M32 ate's for the indicated address range 691 - * @tioce_kernel: TIOCE context to reserve ate's for 690 + * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range 691 + * @tioce_kernel: TIOCE context to reserve ATEs for 692 692 * @base: starting bus address to reserve 693 693 * @limit: last bus address to reserve 694 694 * ··· 763 763 764 764 /* 765 765 * Set PMU pagesize to the largest size available, and zero out 766 - * the ate's. 766 + * the ATEs. 767 767 */ 768 768 769 769 tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base; ··· 784 784 } 785 785 786 786 /* 787 - * Reserve ATE's corresponding to reserved address ranges. These 787 + * Reserve ATEs corresponding to reserved address ranges. These 788 788 * include: 789 789 * 790 790 * Memory space covered by each PPB mem base/limit register