Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] Adjust CMCI mask on CPU hotplug
[IA64] make flush_tlb_kernel_range() an inline function
[IA64] Guard elfcorehdr_addr with #if CONFIG_PROC_FS
[IA64] Fix Altix BTE error return status
[IA64] Remove assembler warnings on head.S
[IA64] Remove compiler warinings about uninitialized variable in irq_ia64.c
[IA64] set_thread_area fails in IA32 chroot
[IA64] print kernel release in OOPS to make kerneloops.org happy
[IA64] Two trivial spelling fixes
[IA64] Avoid unnecessary TLB flushes when allocating memory
[IA64] ia32 nopage
[IA64] signal: remove redundant code in setup_sigcontext()
IA64: Slim down __clear_bit_unlock

+171 -48
+2 -1
arch/ia64/hp/common/sba_iommu.c
··· 2034 2034 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) 2035 2035 return 0; 2036 2036 2037 - #if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) 2037 + #if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) && \ 2038 + defined(CONFIG_PROC_FS) 2038 2039 /* If we are booting a kdump kernel, the sba_iommu will 2039 2040 * cause devices that were not shutdown properly to MCA 2040 2041 * as soon as they are turned back on. Our only option for
+12 -16
arch/ia64/ia32/binfmt_elf32.c
··· 52 52 extern unsigned long *ia32_gdt; 53 53 extern struct page *ia32_gate_page; 54 54 55 - struct page * 56 - ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int *type) 55 + int 56 + ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf) 57 57 { 58 - struct page *pg = ia32_shared_page[smp_processor_id()]; 59 - get_page(pg); 60 - if (type) 61 - *type = VM_FAULT_MINOR; 62 - return pg; 58 + vmf->page = ia32_shared_page[smp_processor_id()]; 59 + get_page(vmf->page); 60 + return 0; 63 61 } 64 62 65 - struct page * 66 - ia32_install_gate_page (struct vm_area_struct *vma, unsigned long address, int *type) 63 + int 64 + ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf) 67 65 { 68 - struct page *pg = ia32_gate_page; 69 - get_page(pg); 70 - if (type) 71 - *type = VM_FAULT_MINOR; 72 - return pg; 66 + vmf->page = ia32_gate_page; 67 + get_page(vmf->page); 68 + return 0; 73 69 } 74 70 75 71 76 72 static struct vm_operations_struct ia32_shared_page_vm_ops = { 77 - .nopage = ia32_install_shared_page 73 + .fault = ia32_install_shared_page 78 74 }; 79 75 80 76 static struct vm_operations_struct ia32_gate_page_vm_ops = { 81 - .nopage = ia32_install_gate_page 77 + .fault = ia32_install_gate_page 82 78 }; 83 79 84 80 void
+1
arch/ia64/kernel/head.S
··· 1176 1176 RESTORE_REG(cr.dcr, r25, r17);; 1177 1177 RESTORE_REG(cr.iva, r25, r17);; 1178 1178 RESTORE_REG(cr.pta, r25, r17);; 1179 + srlz.d;; // required not to violate RAW dependency 1179 1180 RESTORE_REG(cr.itv, r25, r17);; 1180 1181 RESTORE_REG(cr.pmv, r25, r17);; 1181 1182 RESTORE_REG(cr.cmcv, r25, r17);;
+2 -2
arch/ia64/kernel/irq_ia64.c
··· 200 200 { 201 201 unsigned long flags; 202 202 int vector, cpu; 203 - cpumask_t domain; 203 + cpumask_t domain = CPU_MASK_NONE; 204 204 205 205 vector = -ENOSPC; 206 206 ··· 340 340 { 341 341 unsigned long flags; 342 342 int irq, vector, cpu; 343 - cpumask_t domain; 343 + cpumask_t domain = CPU_MASK_NONE; 344 344 345 345 irq = vector = -ENOSPC; 346 346 spin_lock_irqsave(&vector_lock, flags);
+33
arch/ia64/kernel/mca.c
··· 75 75 #include <linux/workqueue.h> 76 76 #include <linux/cpumask.h> 77 77 #include <linux/kdebug.h> 78 + #include <linux/cpu.h> 78 79 79 80 #include <asm/delay.h> 80 81 #include <asm/machvec.h> ··· 1814 1813 PAGE_KERNEL)); 1815 1814 } 1816 1815 1816 + static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) 1817 + { 1818 + unsigned long flags; 1819 + 1820 + local_irq_save(flags); 1821 + if (!cmc_polling_enabled) 1822 + ia64_mca_cmc_vector_enable(NULL); 1823 + local_irq_restore(flags); 1824 + } 1825 + 1826 + static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, 1827 + unsigned long action, 1828 + void *hcpu) 1829 + { 1830 + int hotcpu = (unsigned long) hcpu; 1831 + 1832 + switch (action) { 1833 + case CPU_ONLINE: 1834 + case CPU_ONLINE_FROZEN: 1835 + smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, 1836 + NULL, 1, 0); 1837 + break; 1838 + } 1839 + return NOTIFY_OK; 1840 + } 1841 + 1842 + static struct notifier_block mca_cpu_notifier __cpuinitdata = { 1843 + .notifier_call = mca_cpu_callback 1844 + }; 1845 + 1817 1846 /* 1818 1847 * ia64_mca_init 1819 1848 * ··· 2026 1995 { 2027 1996 if (!mca_init) 2028 1997 return 0; 1998 + 1999 + register_hotcpu_notifier(&mca_cpu_notifier); 2029 2000 2030 2001 /* Setup the CMCI/P vector and handler */ 2031 2002 init_timer(&cmc_poll_timer);
+5 -2
arch/ia64/kernel/process.c
··· 27 27 #include <linux/interrupt.h> 28 28 #include <linux/delay.h> 29 29 #include <linux/kdebug.h> 30 + #include <linux/utsname.h> 30 31 31 32 #include <asm/cpu.h> 32 33 #include <asm/delay.h> ··· 108 107 print_modules(); 109 108 printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), 110 109 smp_processor_id(), current->comm); 111 - printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", 112 - regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); 110 + printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", 111 + regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), 112 + init_utsname()->release); 113 113 print_symbol("ip is at %s\n", ip); 114 114 printk("unat: %016lx pfs : %016lx rsc : %016lx\n", 115 115 regs->ar_unat, regs->ar_pfs, regs->ar_rsc); ··· 739 737 ia32_drop_ia64_partial_page_list(current); 740 738 current->thread.task_size = IA32_PAGE_OFFSET; 741 739 set_fs(USER_DS); 740 + memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array)); 742 741 } 743 742 #endif 744 743 }
+1 -9
arch/ia64/kernel/signal.c
··· 280 280 err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ 281 281 err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); 282 282 283 - if (flags & IA64_SC_FLAG_IN_SYSCALL) { 284 - /* Clear scratch registers if the signal interrupted a system call. */ 285 - err |= __put_user(0, &sc->sc_ar_ccv); /* ar.ccv */ 286 - err |= __put_user(0, &sc->sc_br[7]); /* b7 */ 287 - err |= __put_user(0, &sc->sc_gr[14]); /* r14 */ 288 - err |= __clear_user(&sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ 289 - err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */ 290 - err |= __clear_user(&sc->sc_gr[16], 16*8); /* r16-r31 */ 291 - } else { 283 + if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { 292 284 /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ 293 285 err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ 294 286 err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
+1 -1
arch/ia64/kernel/uncached.c
··· 118 118 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) 119 119 SetPageUncached(&page[i]); 120 120 121 - flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); 121 + flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); 122 122 123 123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 124 124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
+15 -3
arch/ia64/mm/tlb.c
··· 10 10 * IPI based ptc implementation and A-step IPI implementation. 11 11 * Rohit Seth <rohit.seth@intel.com> 12 12 * Ken Chen <kenneth.w.chen@intel.com> 13 + * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation 13 14 */ 14 15 #include <linux/module.h> 15 16 #include <linux/init.h> ··· 90 89 { 91 90 static DEFINE_SPINLOCK(ptcg_lock); 92 91 93 - if (mm != current->active_mm || !current->mm) { 94 - flush_tlb_all(); 95 - return; 92 + struct mm_struct *active_mm = current->active_mm; 93 + 94 + if (mm != active_mm) { 95 + /* Restore region IDs for mm */ 96 + if (mm && active_mm) { 97 + activate_context(mm); 98 + } else { 99 + flush_tlb_all(); 100 + return; 101 + } 96 102 } 97 103 98 104 /* HW requires global serialization of ptc.ga. */ ··· 115 107 } while (start < end); 116 108 } 117 109 spin_unlock(&ptcg_lock); 110 + 111 + if (mm != active_mm) { 112 + activate_context(active_mm); 113 + } 118 114 } 119 115 120 116 void
+2 -2
arch/ia64/sn/kernel/bte.c
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. 6 + * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. 7 7 */ 8 8 9 9 #include <linux/module.h> ··· 227 227 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na)); 228 228 229 229 if (transfer_stat & IBLS_ERROR) { 230 - bte_status = transfer_stat & ~IBLS_ERROR; 230 + bte_status = BTE_GET_ERROR_STATUS(transfer_stat); 231 231 } else { 232 232 bte_status = BTE_SUCCESS; 233 233 }
+6 -2
arch/ia64/sn/kernel/bte_error.c
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. 6 + * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. 7 7 */ 8 8 9 9 #include <linux/types.h> ··· 148 148 for (i = 0; i < BTES_PER_NODE; i++) { 149 149 bte = &err_nodepda->bte_if[i]; 150 150 status = BTE_LNSTAT_LOAD(bte); 151 - if ((status & IBLS_ERROR) || !(status & IBLS_BUSY)) 151 + if (status & IBLS_ERROR) { 152 + bte->bh_error = BTE_SHUB2_ERROR(status); 153 + continue; 154 + } 155 + if (!(status & IBLS_BUSY)) 152 156 continue; 153 157 mod_timer(recovery_timer, jiffies + (HZ * 5)); 154 158 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
+1 -1
arch/ia64/sn/pci/tioce_provider.c
··· 41 41 * } else 42 42 * do desired mmr access 43 43 * 44 - * According to hw, we can use reads instead of writes to the above addres 44 + * According to hw, we can use reads instead of writes to the above address 45 45 * 46 46 * Note this WAR can only to be used for accessing internal MMR's in the 47 47 * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the
+14 -3
include/asm-ia64/bitops.h
··· 124 124 /** 125 125 * __clear_bit_unlock - Non-atomically clear a bit with release 126 126 * 127 - * This is like clear_bit_unlock, but the implementation may use a non-atomic 128 - * store (this one uses an atomic, however). 127 + * This is like clear_bit_unlock, but the implementation uses a store 128 + * with release semantics. See also __raw_spin_unlock(). 129 129 */ 130 - #define __clear_bit_unlock clear_bit_unlock 130 + static __inline__ void 131 + __clear_bit_unlock(int nr, volatile void *addr) 132 + { 133 + __u32 mask, new; 134 + volatile __u32 *m; 135 + 136 + m = (volatile __u32 *)addr + (nr >> 5); 137 + mask = ~(1 << (nr & 31)); 138 + new = *m & mask; 139 + barrier(); 140 + ia64_st4_rel_nta(m, new); 141 + } 131 142 132 143 /** 133 144 * __clear_bit - Clears a bit in memory (non-atomic version)
+5
include/asm-ia64/gcc_intrin.h
··· 191 191 asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \ 192 192 }) 193 193 194 + #define ia64_st4_rel_nta(m, val) \ 195 + ({ \ 196 + asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \ 197 + }) 198 + 194 199 #define ia64_stfs(x, regnum) \ 195 200 ({ \ 196 201 register double __f__ asm ("f"#regnum); \
+1 -1
include/asm-ia64/hw_irq.h
··· 63 63 #define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1) 64 64 65 65 #define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */ 66 - #define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ 66 + #define IA64_PERFMON_VECTOR 0xee /* performance monitor interrupt vector */ 67 67 #define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ 68 68 #define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ 69 69 #define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
+3
include/asm-ia64/intel_intrin.h
··· 110 110 #define ia64_st4_rel __st4_rel 111 111 #define ia64_st8_rel __st8_rel 112 112 113 + /* FIXME: need st4.rel.nta intrinsic */ 114 + #define ia64_st4_rel_nta __st4_rel 115 + 113 116 #define ia64_ld1_acq __ld1_acq 114 117 #define ia64_ld2_acq __ld2_acq 115 118 #define ia64_ld4_acq __ld4_acq
+30 -1
include/asm-ia64/sn/bte.h
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. 6 + * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. 7 7 */ 8 8 9 9 ··· 150 150 BTEFAIL_NOTAVAIL, /* BTE not available */ 151 151 } bte_result_t; 152 152 153 + #define BTEFAIL_SH2_RESP_SHORT 0x1 /* bit 000001 */ 154 + #define BTEFAIL_SH2_RESP_LONG 0x2 /* bit 000010 */ 155 + #define BTEFAIL_SH2_RESP_DSP 0x4 /* bit 000100 */ 156 + #define BTEFAIL_SH2_RESP_ACCESS 0x8 /* bit 001000 */ 157 + #define BTEFAIL_SH2_CRB_TO 0x10 /* bit 010000 */ 158 + #define BTEFAIL_SH2_NACK_LIMIT 0x20 /* bit 100000 */ 159 + #define BTEFAIL_SH2_ALL 0x3F /* bit 111111 */ 160 + 161 + #define BTE_ERR_BITS 0x3FUL 162 + #define BTE_ERR_SHIFT 36 163 + #define BTE_ERR_MASK (BTE_ERR_BITS << BTE_ERR_SHIFT) 164 + 165 + #define BTE_ERROR_RETRY(value) \ 166 + (is_shub2() ? (value != BTEFAIL_SH2_CRB_TO) \ 167 + : (value != BTEFAIL_TOUT)) 168 + 169 + /* 170 + * On shub1 BTE_ERR_MASK will always be false, so no need for is_shub2() 171 + */ 172 + #define BTE_SHUB2_ERROR(_status) \ 173 + ((_status & BTE_ERR_MASK) \ 174 + ? (((_status >> BTE_ERR_SHIFT) & BTE_ERR_BITS) | IBLS_ERROR) \ 175 + : _status) 176 + 177 + #define BTE_GET_ERROR_STATUS(_status) \ 178 + (BTE_SHUB2_ERROR(_status) & ~IBLS_ERROR) 179 + 180 + #define BTE_VALID_SH2_ERROR(value) \ 181 + ((value >= BTEFAIL_SH2_RESP_SHORT) && (value <= BTEFAIL_SH2_ALL)) 153 182 154 183 /* 155 184 * Structure defining a bte. An instance of this
+25 -2
include/asm-ia64/sn/xp.h
··· 86 86 BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); 87 87 88 88 ret = bte_copy(src, pdst, len, mode, notification); 89 - if (ret != BTE_SUCCESS) { 89 + if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { 90 90 if (!in_interrupt()) { 91 91 cond_resched(); 92 92 } ··· 244 244 245 245 xpcDisconnected, /* 51: channel disconnected (closed) */ 246 246 247 - xpcUnknownReason /* 52: unknown reason -- must be last in list */ 247 + xpcBteSh2Start, /* 52: BTE CRB timeout */ 248 + 249 + /* 53: 0x1 BTE Error Response Short */ 250 + xpcBteSh2RspShort = xpcBteSh2Start + BTEFAIL_SH2_RESP_SHORT, 251 + 252 + /* 54: 0x2 BTE Error Response Long */ 253 + xpcBteSh2RspLong = xpcBteSh2Start + BTEFAIL_SH2_RESP_LONG, 254 + 255 + /* 56: 0x4 BTE Error Response DSB */ 256 + xpcBteSh2RspDSB = xpcBteSh2Start + BTEFAIL_SH2_RESP_DSP, 257 + 258 + /* 60: 0x8 BTE Error Response Access */ 259 + xpcBteSh2RspAccess = xpcBteSh2Start + BTEFAIL_SH2_RESP_ACCESS, 260 + 261 + /* 68: 0x10 BTE Error CRB timeout */ 262 + xpcBteSh2CRBTO = xpcBteSh2Start + BTEFAIL_SH2_CRB_TO, 263 + 264 + /* 84: 0x20 BTE Error NACK limit */ 265 + xpcBteSh2NACKLimit = xpcBteSh2Start + BTEFAIL_SH2_NACK_LIMIT, 266 + 267 + /* 115: BTE end */ 268 + xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, 269 + 270 + xpcUnknownReason /* 116: unknown reason -- must be last in list */ 248 271 }; 249 272 250 273
+7 -1
include/asm-ia64/sn/xpc.h
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 6 + * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. 7 7 */ 8 8 9 9 ··· 1211 1211 static inline enum xpc_retval 1212 1212 xpc_map_bte_errors(bte_result_t error) 1213 1213 { 1214 + if (is_shub2()) { 1215 + if (BTE_VALID_SH2_ERROR(error)) 1216 + return xpcBteSh2Start + error; 1217 + else 1218 + return xpcBteUnmappedError; 1219 + } 1214 1220 switch (error) { 1215 1221 case BTE_SUCCESS: return xpcSuccess; 1216 1222 case BTEFAIL_DIR: return xpcBteDirectoryError;
+5 -1
include/asm-ia64/tlbflush.h
··· 92 92 #define smp_local_flush_tlb() 93 93 #endif 94 94 95 - #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ 95 + static inline void flush_tlb_kernel_range(unsigned long start, 96 + unsigned long end) 97 + { 98 + flush_tlb_all(); /* XXX fix me */ 99 + } 96 100 97 101 #endif /* _ASM_IA64_TLBFLUSH_H */