Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] ITC: Reduce rating for ITC clock if ITCs are drifty
[IA64] SN2: Fix up sn2_rtc clock
[IA64] Fix wrong access to irq_desc[] in iosapic_register_intr().
[IA64] Fix possible race in destroy_and_reserve_irq()
[IA64] Fix registered interrupt check
[IA64] Remove a few duplicate includes
[IA64] Allow smp_call_function_single() to current cpu
[IA64] fix a few section mismatch warnings

+50 -39
-1
arch/ia64/ia32/sys_ia32.c
··· 34 #include <linux/uio.h> 35 #include <linux/nfs_fs.h> 36 #include <linux/quota.h> 37 - #include <linux/syscalls.h> 38 #include <linux/sunrpc/svc.h> 39 #include <linux/nfsd/nfsd.h> 40 #include <linux/nfsd/cache.h>
··· 34 #include <linux/uio.h> 35 #include <linux/nfs_fs.h> 36 #include <linux/quota.h> 37 #include <linux/sunrpc/svc.h> 38 #include <linux/nfsd/nfsd.h> 39 #include <linux/nfsd/cache.h>
+10 -9
arch/ia64/kernel/iosapic.c
··· 142 static struct iosapic_intr_info { 143 struct list_head rtes; /* RTEs using this vector (empty => 144 * not an IOSAPIC interrupt) */ 145 - int count; /* # of RTEs that shares this vector */ 146 u32 low32; /* current value of low word of 147 * Redirection table entry */ 148 unsigned int dest; /* destination CPU physical ID */ ··· 313 int rte_index; 314 struct iosapic_rte_info *rte; 315 316 - if (list_empty(&iosapic_intr_info[irq].rtes)) 317 return; /* not an IOSAPIC interrupt! */ 318 319 /* set only the mask bit */ ··· 331 int rte_index; 332 struct iosapic_rte_info *rte; 333 334 - if (list_empty(&iosapic_intr_info[irq].rtes)) 335 return; /* not an IOSAPIC interrupt! */ 336 337 low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK; ··· 363 364 dest = cpu_physical_id(first_cpu(mask)); 365 366 - if (list_empty(&iosapic_intr_info[irq].rtes)) 367 return; /* not an IOSAPIC interrupt */ 368 369 set_irq_affinity_info(irq, dest, redir); ··· 542 { 543 int new_irq; 544 545 - if (!list_empty(&iosapic_intr_info[irq].rtes)) { 546 new_irq = create_irq(); 547 if (new_irq < 0) 548 panic("%s: out of interrupt vectors!\n", __FUNCTION__); ··· 560 } 561 } 562 563 - static struct iosapic_rte_info *iosapic_alloc_rte (void) 564 { 565 int i; 566 struct iosapic_rte_info *rte; ··· 677 * In case of vector shared by multiple RTEs, all RTEs that 678 * share the vector need to use the same destination CPU. 679 */ 680 - if (!list_empty(&iosapic_intr_info[irq].rtes)) 681 return iosapic_intr_info[irq].dest; 682 683 /* ··· 794 err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, 795 polarity, trigger); 796 if (err < 0) { 797 irq = err; 798 - goto unlock_all; 799 } 800 801 /* ··· 812 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), 813 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 814 cpu_logical_id(dest), dest, irq_to_vector(irq)); 815 - unlock_all: 816 spin_unlock(&irq_desc[irq].lock); 817 unlock_iosapic_lock: 818 spin_unlock_irqrestore(&iosapic_lock, flags);
··· 142 static struct iosapic_intr_info { 143 struct list_head rtes; /* RTEs using this vector (empty => 144 * not an IOSAPIC interrupt) */ 145 + int count; /* # of registered RTEs */ 146 u32 low32; /* current value of low word of 147 * Redirection table entry */ 148 unsigned int dest; /* destination CPU physical ID */ ··· 313 int rte_index; 314 struct iosapic_rte_info *rte; 315 316 + if (!iosapic_intr_info[irq].count) 317 return; /* not an IOSAPIC interrupt! */ 318 319 /* set only the mask bit */ ··· 331 int rte_index; 332 struct iosapic_rte_info *rte; 333 334 + if (!iosapic_intr_info[irq].count) 335 return; /* not an IOSAPIC interrupt! */ 336 337 low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK; ··· 363 364 dest = cpu_physical_id(first_cpu(mask)); 365 366 + if (!iosapic_intr_info[irq].count) 367 return; /* not an IOSAPIC interrupt */ 368 369 set_irq_affinity_info(irq, dest, redir); ··· 542 { 543 int new_irq; 544 545 + if (iosapic_intr_info[irq].count) { 546 new_irq = create_irq(); 547 if (new_irq < 0) 548 panic("%s: out of interrupt vectors!\n", __FUNCTION__); ··· 560 } 561 } 562 563 + static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void) 564 { 565 int i; 566 struct iosapic_rte_info *rte; ··· 677 * In case of vector shared by multiple RTEs, all RTEs that 678 * share the vector need to use the same destination CPU. 679 */ 680 + if (iosapic_intr_info[irq].count) 681 return iosapic_intr_info[irq].dest; 682 683 /* ··· 794 err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, 795 polarity, trigger); 796 if (err < 0) { 797 + spin_unlock(&irq_desc[irq].lock); 798 irq = err; 799 + goto unlock_iosapic_lock; 800 } 801 802 /* ··· 811 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), 812 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 813 cpu_logical_id(dest), dest, irq_to_vector(irq)); 814 + 815 spin_unlock(&irq_desc[irq].lock); 816 unlock_iosapic_lock: 817 spin_unlock_irqrestore(&iosapic_lock, flags);
+6 -11
arch/ia64/kernel/irq_ia64.c
··· 101 return -1; 102 } 103 104 - static void reserve_irq(unsigned int irq) 105 - { 106 - unsigned long flags; 107 - 108 - spin_lock_irqsave(&vector_lock, flags); 109 - irq_status[irq] = IRQ_RSVD; 110 - spin_unlock_irqrestore(&vector_lock, flags); 111 - } 112 - 113 static inline int find_unassigned_irq(void) 114 { 115 int irq; ··· 293 294 void destroy_and_reserve_irq(unsigned int irq) 295 { 296 dynamic_irq_cleanup(irq); 297 298 - clear_irq_vector(irq); 299 - reserve_irq(irq); 300 } 301 302 static int __reassign_irq_vector(int irq, int cpu)
··· 101 return -1; 102 } 103 104 static inline int find_unassigned_irq(void) 105 { 106 int irq; ··· 302 303 void destroy_and_reserve_irq(unsigned int irq) 304 { 305 + unsigned long flags; 306 + 307 dynamic_irq_cleanup(irq); 308 309 + spin_lock_irqsave(&vector_lock, flags); 310 + __clear_irq_vector(irq); 311 + irq_status[irq] = IRQ_RSVD; 312 + spin_unlock_irqrestore(&vector_lock, flags); 313 } 314 315 static int __reassign_irq_vector(int irq, int cpu)
+11 -6
arch/ia64/kernel/mca.c
··· 1750 strncpy(p->comm, type, sizeof(p->comm)-1); 1751 } 1752 1753 - /* Do per-CPU MCA-related initialization. */ 1754 1755 void __cpuinit 1756 ia64_mca_cpu_init(void *cpu_data) 1757 { ··· 1772 int cpu; 1773 1774 first_time = 0; 1775 - mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) 1776 - * NR_CPUS + KERNEL_STACK_SIZE); 1777 - mca_data = (void *)(((unsigned long)mca_data + 1778 - KERNEL_STACK_SIZE - 1) & 1779 - (-KERNEL_STACK_SIZE)); 1780 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1781 format_mca_init_stack(mca_data, 1782 offsetof(struct ia64_mca_cpu, mca_stack),
··· 1750 strncpy(p->comm, type, sizeof(p->comm)-1); 1751 } 1752 1753 + /* Caller prevents this from being called after init */ 1754 + static void * __init_refok mca_bootmem(void) 1755 + { 1756 + void *p; 1757 1758 + p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS + 1759 + KERNEL_STACK_SIZE); 1760 + return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE); 1761 + } 1762 + 1763 + /* Do per-CPU MCA-related initialization. */ 1764 void __cpuinit 1765 ia64_mca_cpu_init(void *cpu_data) 1766 { ··· 1763 int cpu; 1764 1765 first_time = 0; 1766 + mca_data = mca_bootmem(); 1767 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1768 format_mca_init_stack(mca_data, 1769 offsetof(struct ia64_mca_cpu, mca_stack),
-1
arch/ia64/kernel/setup.c
··· 60 #include <asm/smp.h> 61 #include <asm/system.h> 62 #include <asm/unistd.h> 63 - #include <asm/system.h> 64 65 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 66 # error "struct cpuinfo_ia64 too big!"
··· 60 #include <asm/smp.h> 61 #include <asm/system.h> 62 #include <asm/unistd.h> 63 64 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 65 # error "struct cpuinfo_ia64 too big!"
+5 -3
arch/ia64/kernel/smp.c
··· 346 } 347 348 /* 349 - * Run a function on another CPU 350 * <func> The function to run. This must be fast and non-blocking. 351 * <info> An arbitrary pointer to pass to the function. 352 * <nonatomic> Currently unused. ··· 366 int me = get_cpu(); /* prevent preemption and reschedule on another processor */ 367 368 if (cpuid == me) { 369 - printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__); 370 put_cpu(); 371 - return -EBUSY; 372 } 373 374 data.func = func;
··· 346 } 347 348 /* 349 + * Run a function on a specific CPU 350 * <func> The function to run. This must be fast and non-blocking. 351 * <info> An arbitrary pointer to pass to the function. 352 * <nonatomic> Currently unused. ··· 366 int me = get_cpu(); /* prevent preemption and reschedule on another processor */ 367 368 if (cpuid == me) { 369 + local_irq_disable(); 370 + func(info); 371 + local_irq_enable(); 372 put_cpu(); 373 + return 0; 374 } 375 376 data.func = func;
+15 -1
arch/ia64/kernel/time.c
··· 240 if (!nojitter) 241 itc_jitter_data.itc_jitter = 1; 242 #endif 243 - } 244 245 /* Setup the CPU local timer tick */ 246 ia64_cpu_local_tick();
··· 240 if (!nojitter) 241 itc_jitter_data.itc_jitter = 1; 242 #endif 243 + } else 244 + /* 245 + * ITC is drifty and we have not synchronized the ITCs in smpboot.c. 246 + * ITC values may fluctuate significantly between processors. 247 + * Clock should not be used for hrtimers. Mark itc as only 248 + * useful for boot and testing. 249 + * 250 + * Note that jitter compensation is off! There is no point of 251 + * synchronizing ITCs since they may be large differentials 252 + * that change over time. 253 + * 254 + * The only way to fix this would be to repeatedly sync the 255 + * ITCs. Until that time we have to avoid ITC. 256 + */ 257 + clocksource_itc.rating = 50; 258 259 /* Setup the CPU local timer tick */ 260 ia64_cpu_local_tick();
+1 -1
arch/ia64/sn/kernel/io_common.c
··· 391 * hubdev_init_node() - Creates the HUB data structure and link them to it's 392 * own NODE specific data area. 393 */ 394 - void hubdev_init_node(nodepda_t * npda, cnodeid_t node) 395 { 396 struct hubdev_info *hubdev_info; 397 int size;
··· 391 * hubdev_init_node() - Creates the HUB data structure and link them to it's 392 * own NODE specific data area. 393 */ 394 + void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node) 395 { 396 struct hubdev_info *hubdev_info; 397 int size;
-1
arch/ia64/sn/kernel/setup.c
··· 25 #include <linux/interrupt.h> 26 #include <linux/acpi.h> 27 #include <linux/compiler.h> 28 - #include <linux/sched.h> 29 #include <linux/root_dev.h> 30 #include <linux/nodemask.h> 31 #include <linux/pm.h>
··· 25 #include <linux/interrupt.h> 26 #include <linux/acpi.h> 27 #include <linux/compiler.h> 28 #include <linux/root_dev.h> 29 #include <linux/nodemask.h> 30 #include <linux/pm.h>
+2 -5
arch/ia64/sn/kernel/sn2/timer.c
··· 23 24 extern unsigned long sn_rtc_cycles_per_second; 25 26 - static void __iomem *sn2_mc; 27 - 28 static cycle_t read_sn2(void) 29 { 30 - return (cycle_t)readq(sn2_mc); 31 } 32 33 static struct clocksource clocksource_sn2 = { 34 .name = "sn2_rtc", 35 - .rating = 300, 36 .read = read_sn2, 37 .mask = (1LL << 55) - 1, 38 .mult = 0, ··· 56 57 void __init sn_timer_init(void) 58 { 59 - sn2_mc = RTC_COUNTER_ADDR; 60 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; 61 clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, 62 clocksource_sn2.shift);
··· 23 24 extern unsigned long sn_rtc_cycles_per_second; 25 26 static cycle_t read_sn2(void) 27 { 28 + return (cycle_t)readq(RTC_COUNTER_ADDR); 29 } 30 31 static struct clocksource clocksource_sn2 = { 32 .name = "sn2_rtc", 33 + .rating = 450, 34 .read = read_sn2, 35 .mask = (1LL << 55) - 1, 36 .mult = 0, ··· 58 59 void __init sn_timer_init(void) 60 { 61 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; 62 clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, 63 clocksource_sn2.shift);