···791791early_param("additional_cpus", setup_additional_cpus);792792793793/*794794- * cpu_possible_map should be static, it cannot change as cpu's794794+ * cpu_possible_map should be static, it cannot change as CPUs795795 * are onlined, or offlined. The reason is per-cpu data-structures796796 * are allocated by some modules at init time, and dont expect to797797 * do this dynamically on cpu arrival/departure.
+1-1
arch/ia64/kernel/crash.c
···163163 return NOTIFY_DONE;164164165165 nd = (struct ia64_mca_notify_die *)args->err;166166- /* Reason code 1 means machine check rendezous*/166166+ /* Reason code 1 means machine check rendezvous*/167167 if ((val == DIE_INIT_MONARCH_ENTER || val == DIE_INIT_SLAVE_ENTER) &&168168 nd->sos->rv_rc == 1)169169 return NOTIFY_DONE;
+3-3
arch/ia64/kernel/irq.c
···44 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar55 *66 * This file contains the code used by various IRQ handling routines:77- * asking for different IRQ's should be done through these routines77+ * asking for different IRQs should be done through these routines88 * instead of just grabbing them. Thus setups with different IRQ numbers99 * shouldn't result in any weird surprises, and installing new handlers1010 * should be easier.···1212 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 20041313 *1414 * 4/14/2004: Added code to handle cpu migration and do safe irq1515- * migration without lossing interrupts for iosapic1515+ * migration without losing interrupts for iosapic1616 * architecture.1717 */1818···190190 }191191192192 /*193193- * Phase 1: Locate irq's bound to this cpu and193193+ * Phase 1: Locate IRQs bound to this cpu and194194 * relocate them for cpu removal.195195 */196196 migrate_irqs();
+1-1
arch/ia64/kernel/irq_lsapic.c
···2323static void2424lsapic_noop (unsigned int irq)2525{2626- /* nuthing to do... */2626+ /* nothing to do... */2727}28282929static int lsapic_retrigger(unsigned int irq)
+3-3
arch/ia64/kernel/kprobes.c
···151151152152 cmp_inst.l = kprobe_inst;153153 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {154154- /* Integere compare - Register Register (A6 type)*/154154+ /* Integer compare - Register Register (A6 type)*/155155 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)156156 &&(cmp_inst.f.c == 1))157157 ctype_unc = 1;158158 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {159159- /* Integere compare - Immediate Register (A8 type)*/159159+ /* Integer compare - Immediate Register (A8 type)*/160160 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))161161 ctype_unc = 1;162162 }···954954 /*955955 * Callee owns the argument space and could overwrite it, eg956956 * tail call optimization. So to be absolutely safe957957- * we save the argument space before transfering the control957957+ * we save the argument space before transferring the control958958 * to instrumented jprobe function which runs in959959 * the process context960960 */
+2-2
arch/ia64/kernel/mca_drv.c
···438438 * @peidx: pointer of index of processor error section439439 *440440 * Return value:441441- * target address on Success / 0 on Failue441441+ * target address on Success / 0 on Failure442442 */443443static u64444444get_target_identifier(peidx_table_t *peidx)···701701 return fatal_mca("External bus check fatal status");702702703703 /*704704- * This is a local MCA and estimated as a recoverble error.704704+ * This is a local MCA and estimated as a recoverable error.705705 */706706 if (platform)707707 return recover_from_platform_error(slidx, peidx, pbci, sos);
+1-1
arch/ia64/kernel/module.c
···861861/*862862 * Modules contain a single unwind table which covers both the core and the init text863863 * sections but since the two are not contiguous, we need to split this table up such that864864- * we can register (and unregister) each "segment" seperately. Fortunately, this sounds864864+ * we can register (and unregister) each "segment" separately. Fortunately, this sounds865865 * more complicated than it really is.866866 */867867static void
+9-9
arch/ia64/kernel/perfmon.c
···13181318{13191319 unsigned long flags;13201320 /*13211321- * validy checks on cpu_mask have been done upstream13211321+ * validity checks on cpu_mask have been done upstream13221322 */13231323 LOCK_PFS(flags);13241324···13841384{13851385 unsigned long flags;13861386 /*13871387- * validy checks on cpu_mask have been done upstream13871387+ * validity checks on cpu_mask have been done upstream13881388 */13891389 LOCK_PFS(flags);13901390···18351835 /*18361836 * remove our file from the async queue, if we use this mode.18371837 * This can be done without the context being protected. We come18381838- * here when the context has become unreacheable by other tasks.18381838+ * here when the context has become unreachable by other tasks.18391839 *18401840 * We may still have active monitoring at this point and we may18411841 * end up in pfm_overflow_handler(). However, fasync_helper()···21322132 filp->private_data = NULL;2133213321342134 /*21352135- * if we free on the spot, the context is now completely unreacheable21352135+ * if we free on the spot, the context is now completely unreachable21362136 * from the callers side. The monitored task side is also cut, so we21372137 * can freely cut.21382138 *···25622562 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;2563256325642564 /*25652565- * bitmask of all PMDs that are accesible to this context25652565+ * bitmask of all PMDs that are accessible to this context25662566 */25672567 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];25682568···33953395 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;33963396 /*33973397 * we can only read the register that we use. That includes33983398- * the one we explicitely initialize AND the one we want included33983398+ * the one we explicitly initialize AND the one we want included33993399 * in the sampling buffer (smpl_regs).34003400 *34013401 * Having this restriction allows optimization in the ctxsw routine···37153715 * if non-blocking, then we ensure that the task will go into37163716 * pfm_handle_work() before returning to user mode.37173717 *37183718- * We cannot explicitely reset another task, it MUST always37183718+ * We cannot explicitly reset another task, it MUST always37193719 * be done by the task itself. This works for system wide because37203720 * the tool that is controlling the session is logically doing 37213721 * "self-monitoring".···46444644 switch(state) {46454645 case PFM_CTX_UNLOADED:46464646 /*46474647- * only comes to thios function if pfm_context is not NULL, i.e., cannot46474647+ * only comes to this function if pfm_context is not NULL, i.e., cannot46484648 * be in unloaded state46494649 */46504650 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);···5247524752485248/*52495249 * main overflow processing routine.52505250- * it can be called from the interrupt path or explicitely during the context switch code52505250+ * it can be called from the interrupt path or explicitly during the context switch code52515251 */52525252static void52535253pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
+1-1
arch/ia64/kernel/perfmon_mckinley.h
···181181 .pmc_desc = pfm_mck_pmc_desc,182182 .num_ibrs = 8,183183 .num_dbrs = 8,184184- .use_rr_dbregs = 1 /* debug register are use for range retrictions */184184+ .use_rr_dbregs = 1 /* debug register are use for range restrictions */185185};186186187187
+1-1
arch/ia64/kernel/sal.c
···134134 * interrupt redirection. The reason is this would require that135135 * All interrupts be stopped and hard bind the irq to a cpu.136136 * Later when the interrupt is fired we need to set the redir hint137137- * on again in the vector. This is combersome for something that the137137+ * on again in the vector. This is cumbersome for something that the138138 * user mode irq balancer will solve anyways.139139 */140140 no_int_routing=1;
+1-1
arch/ia64/kernel/salinfo.c
···162162/** salinfo_platform_oemdata - optional callback to decode oemdata from an error163163 * record.164164 * @sect_header: pointer to the start of the section to decode.165165- * @oemdata: returns vmalloc area containing the decded output.165165+ * @oemdata: returns vmalloc area containing the decoded output.166166 * @oemdata_size: returns length of decoded output (strlen).167167 *168168 * Description: If user space asks for oem data to be decoded by the kernel
+3-3
arch/ia64/kernel/setup.c
···576576}577577578578/*579579- * Display cpu info for all cpu's.579579+ * Display cpu info for all CPUs.580580 */581581static int582582show_cpuinfo (struct seq_file *m, void *v)···761761 c->cpu = smp_processor_id();762762763763 /* below default values will be overwritten by identify_siblings() 764764- * for Multi-Threading/Multi-Core capable cpu's764764+ * for Multi-Threading/Multi-Core capable CPUs765765 */766766 c->threads_per_core = c->cores_per_socket = c->num_log = 1;767767 c->socket_id = -1;···947947 ia32_cpu_init();948948#endif949949950950- /* Clear ITC to eliminiate sched_clock() overflows in human time. */950950+ /* Clear ITC to eliminate sched_clock() overflows in human time. */951951 ia64_set_itc(0);952952953953 /* disable all local interrupt sources: */
+6-6
arch/ia64/kernel/smp.c
···186186}187187188188/*189189- * Called with preeemption disabled.189189+ * Called with preemption disabled.190190 */191191static inline void192192send_IPI_single (int dest_cpu, int op)···196196}197197198198/*199199- * Called with preeemption disabled.199199+ * Called with preemption disabled.200200 */201201static inline void202202send_IPI_allbutself (int op)···210210}211211212212/*213213- * Called with preeemption disabled.213213+ * Called with preemption disabled.214214 */215215static inline void216216send_IPI_all (int op)···223223}224224225225/*226226- * Called with preeemption disabled.226226+ * Called with preemption disabled.227227 */228228static inline void229229send_IPI_self (int op)···252252}253253#endif254254/*255255- * Called with preeemption disabled.255255+ * Called with preemption disabled.256256 */257257void258258smp_send_reschedule (int cpu)···261261}262262263263/*264264- * Called with preeemption disabled.264264+ * Called with preemption disabled.265265 */266266static void267267smp_send_local_flush_tlb (int cpu)
+3-3
arch/ia64/kernel/smpboot.c
···694694 set_cpei_target_cpu(new_cpei_cpu);695695 desc = irq_desc + ia64_cpe_irq;696696 /*697697- * Switch for now, immediatly, we need to do fake intr697697+ * Switch for now, immediately, we need to do fake intr698698 * as other interrupts, but need to study CPEI behaviour with699699 * polling before making changes.700700 */···840840}841841842842/*843843- * Assume that CPU's have been discovered by some platform-dependent interface. For843843+ * Assume that CPUs have been discovered by some platform-dependent interface. For844844 * SoftSDV/Lion, that would be ACPI.845845 *846846 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().···854854 } *ap_startup;855855 long sal_ret;856856857857- /* Tell SAL where to drop the AP's. */857857+ /* Tell SAL where to drop the APs. */858858 ap_startup = (struct fptr *) start_ap;859859 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,860860 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
+1-1
arch/ia64/kernel/time.c
···216216#ifdef CONFIG_SMP217217 /* On IA64 in an SMP configuration ITCs are never accurately synchronized.218218 * Jitter compensation requires a cmpxchg which may limit219219- * the scalability of the syscalls for retrieving time.219219+ * the scalibility of the syscalls for retrieving time.220220 * The ITC synchronization is usually successful to within a few221221 * ITC ticks but this is not a sure thing. If you need to improve222222 * timer performance in SMP situations then boot the kernel with the
+1-1
arch/ia64/kernel/traps.c
···304304 * Lower 4 bits are used as a count. Upper bits are a sequence305305 * number that is updated when count is reset. The cmpxchg will306306 * fail is seqno has changed. This minimizes mutiple cpus307307- * reseting the count.307307+ * resetting the count.308308 */309309 if (current_jiffies > last.time)310310 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
+1-1
arch/ia64/kernel/unwind.c
···22 * Copyright (C) 1999-2004 Hewlett-Packard Co33 * David Mosberger-Tang <davidm@hpl.hp.com>44 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>55- * - Change pt_regs_off() to make it less dependant on pt_regs structure.55+ * - Change pt_regs_off() to make it less dependent on pt_regs structure.66 */77/*88 * This file implements call frame unwind support for the Linux
+1-1
arch/ia64/mm/discontig.c
···317317 * node_online_map is not set for hot-added nodes at this time,318318 * because we are halfway through initialization of the new node's319319 * structures. If for_each_online_node() is used, a new node's320320- * pg_data_ptrs will be not initialized. Insted of using it,320320+ * pg_data_ptrs will be not initialized. Instead of using it,321321 * pgdat_list[] is checked.322322 */323323 for_each_node(node) {
+6-6
arch/ia64/sn/kernel/bte.c
···6363 * Use the block transfer engine to move kernel memory from src to dest6464 * using the assigned mode.6565 *6666- * Paramaters:6666+ * Parameters:6767 * src - physical address of the transfer source.6868 * dest - physical address of the transfer destination.6969 * len - number of bytes to transfer from source to dest.···247247 * use the block transfer engine to move kernel248248 * memory from src to dest using the assigned mode.249249 *250250- * Paramaters:250250+ * Parameters:251251 * src - physical address of the transfer source.252252 * dest - physical address of the transfer destination.253253 * len - number of bytes to transfer from source to dest.···255255 * for IBCT0/1 in the SGI documentation.256256 *257257 * NOTE: If the source, dest, and len are all cache line aligned,258258- * then it would be _FAR_ preferrable to use bte_copy instead.258258+ * then it would be _FAR_ preferable to use bte_copy instead.259259 */260260bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)261261{···300300 * a standard bte copy.301301 *302302 * One nasty exception to the above rule is when the303303- * source and destination are not symetrically303303+ * source and destination are not symmetrically304304 * mis-aligned. If the source offset from the first305305 * cache line is different from the destination offset,306306 * we make the first section be the entire transfer···337337338338 if (footBcopyDest == (headBcopyDest + headBcopyLen)) {339339 /*340340- * We have two contigous bcopy340340+ * We have two contiguous bcopy341341 * blocks. Merge them.342342 */343343 headBcopyLen += footBcopyLen;···375375 } else {376376377377 /*378378- * The transfer is not symetric, we will378378+ * The transfer is not symmetric, we will379379 * allocate a buffer large enough for all the380380 * data, bte_copy into that buffer and then381381 * bcopy to the destination.
+2-2
arch/ia64/sn/kernel/bte_error.c
···105105 }106106107107 BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));108108- /* Reenable both bte interfaces */108108+ /* Re-enable both bte interfaces */109109 imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);110110 imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;111111 REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);···243243244244 /*245245 * The caller has already figured out the error type, we save that246246- * in the bte handle structure for the thread excercising the246246+ * in the bte handle structure for the thread exercising the247247 * interface to consume.248248 */249249 bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
+1-1
arch/ia64/sn/kernel/io_common.c
···479479 }480480481481 /*482482- * prime sn_pci_provider[]. Individial provider init routines will482482+ * prime sn_pci_provider[]. Individual provider init routines will483483 * override their respective default entries.484484 */485485
+1-1
arch/ia64/sn/kernel/setup.c
···167167 * IO on SN2 is done via SAL calls, early_printk won't work without this.168168 *169169 * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.170170- * Any changes to those file may have to be made hereas well.170170+ * Any changes to those file may have to be made here as well.171171 */172172 efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);173173 config_tables = __va(efi_systab->tables);
+1-1
arch/ia64/sn/kernel/sn2/sn2_smp.c
···104104 *105105 * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.106106 * Context switching user threads which have memory-mapped MMIO may cause107107- * PIOs to issue from seperate CPUs, thus the PIO writes must be drained107107+ * PIOs to issue from separate CPUs, thus the PIO writes must be drained108108 * from the previous CPU's Shub before execution resumes on the new CPU.109109 */110110void sn_migrate(struct task_struct *task)
+4-4
arch/ia64/sn/kernel/xpc_channel.c
···293293294294295295/*296296- * Pull the remote per partititon specific variables from the specified296296+ * Pull the remote per partition specific variables from the specified297297 * partition.298298 */299299enum xpc_retval···461461 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between462462 // >>> iterations of the for-loop, bail if set?463463464464- // >>> should we impose a minumum #of entries? like 4 or 8?464464+ // >>> should we impose a minimum #of entries? like 4 or 8?465465 for (nentries = ch->local_nentries; nentries > 0; nentries--) {466466467467 nbytes = nentries * ch->msg_size;···514514 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between515515 // >>> iterations of the for-loop, bail if set?516516517517- // >>> should we impose a minumum #of entries? like 4 or 8?517517+ // >>> should we impose a minimum #of entries? like 4 or 8?518518 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {519519520520 nbytes = nentries * ch->msg_size;···147814781479147914801480 /*14811481- * Before proceding with the teardown we have to wait until all14811481+ * Before proceeding with the teardown we have to wait until all14821482 * existing references cease.14831483 */14841484 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
+1-1
arch/ia64/sn/kernel/xpnet.c
···531531 dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);532532533533 /*534534- * If we wanted to allow promiscous mode to work like an534534+ * If we wanted to allow promiscuous mode to work like an535535 * unswitched network, this would be a good point to OR in a536536 * mask of partitions which should be receiving all packets.537537 */
+4-4
arch/ia64/sn/pci/pci_dma.c
···333333 /*334334 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work335335 * around hw issues at the pci bus level. SGI proms older than336336- * 4.10 don't implment this.336336+ * 4.10 don't implement this.337337 */338338339339 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,···348348 /*349349 * If the above failed, retry using the SAL_PROBE call which should350350 * be present in all proms (but which cannot work round PCI chipset351351- * bugs). This code is retained for compatability with old351351+ * bugs). This code is retained for compatibility with old352352 * pre-4.10 proms, and should be removed at some point in the future.353353 */354354···379379 /*380380 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work381381 * around hw issues at the pci bus level. SGI proms older than382382- * 4.10 don't implment this.382382+ * 4.10 don't implement this.383383 */384384385385 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,···394394 /*395395 * If the above failed, retry using the SAL_PROBE call which should396396 * be present in all proms (but which cannot work round PCI chipset397397- * bugs). This code is retained for compatability with old397397+ * bugs). This code is retained for compatibility with old398398 * pre-4.10 proms, and should be removed at some point in the future.399399 */400400
+3-3
arch/ia64/sn/pci/pcibr/pcibr_ate.c
···30303131/*3232 * find_free_ate: Find the first free ate index starting from the given3333- * index for the desired consequtive count.3333+ * index for the desired consecutive count.3434 */3535static int find_free_ate(struct ate_resource *ate_resource, int start,3636 int count)···8888 return -1;89899090 /*9191- * Find the required number of free consequtive ates.9191+ * Find the required number of free consecutive ates.9292 */9393 start_index =9494 find_free_ate(ate_resource, ate_resource->lowest_free_index,···105105/*106106 * Allocate "count" contiguous Bridge Address Translation Entries107107 * on the specified bridge to be used for PCI to XTALK mappings.108108- * Indices in rm map range from 1..num_entries. Indicies returned108108+ * Indices in rm map range from 1..num_entries. Indices returned109109 * to caller range from 0..num_entries-1.110110 *111111 * Return the start index on success, -1 on failure.
+1-1
arch/ia64/sn/pci/pcibr/pcibr_dma.c
···201201}202202203203/*204204- * Wrapper routine for free'ing DMA maps204204+ * Wrapper routine for freeing DMA maps205205 * DMA mappings for Direct 64 and 32 do not have any DMA maps.206206 */207207void
+3-3
arch/ia64/sn/pci/tioca_provider.c
···223223224224 /*225225 * Scan all vga controllers on this bus making sure they all226226- * suport FW. If not, return.226226+ * support FW. If not, return.227227 */228228229229 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {···364364 * @req_size: len (bytes) to map365365 *366366 * Map @paddr into CA address space using the GART mechanism. The mapped367367- * dma_addr_t is guarenteed to be contiguous in CA bus space.367367+ * dma_addr_t is guaranteed to be contiguous in CA bus space.368368 */369369static dma_addr_t370370tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)···526526 return 0;527527528528 /*529529- * If card is 64 or 48 bit addresable, use a direct mapping. 32529529+ * If card is 64 or 48 bit addressable, use a direct mapping. 32530530 * bit direct is so restrictive w.r.t. where the memory resides that531531 * we don't use it even though CA has some support.532532 */
+8-8
arch/ia64/sn/pci/tioce_provider.c
···256256 * @ct_addr: the coretalk address to map257257 * @len: number of bytes to map258258 *259259- * Given the addressing type, set up various paramaters that define the259259+ * Given the addressing type, set up various parameters that define the260260 * ATE pool to use. Search for a contiguous block of entries to cover the261261- * length, and if enough resources exist, fill in the ATE's and construct a261261+ * length, and if enough resources exist, fill in the ATEs and construct a262262 * tioce_dmamap struct to track the mapping.263263 */264264static u64···581581 */582582 if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {583583 /*584584- * We have two options for 40-bit mappings: 16GB "super" ATE's585585- * and 64MB "regular" ATE's. We'll try both if needed for a584584+ * We have two options for 40-bit mappings: 16GB "super" ATEs585585+ * and 64MB "regular" ATEs. We'll try both if needed for a586586 * given mapping but which one we try first depends on the587587 * size. For requests >64MB, prefer to use a super page with588588 * regular as the fallback. Otherwise, try in the reverse order.···687687}688688689689/**690690- * tioce_reserve_m32 - reserve M32 ate's for the indicated address range691691- * @tioce_kernel: TIOCE context to reserve ate's for690690+ * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range691691+ * @tioce_kernel: TIOCE context to reserve ATEs for692692 * @base: starting bus address to reserve693693 * @limit: last bus address to reserve694694 *···763763764764 /*765765 * Set PMU pagesize to the largest size available, and zero out766766- * the ate's.766766+ * the ATEs.767767 */768768769769 tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;···784784 }785785786786 /*787787- * Reserve ATE's corresponding to reserved address ranges. These787787+ * Reserve ATEs corresponding to reserved address ranges. These788788 * include:789789 *790790 * Memory space covered by each PPB mem base/limit register