Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] fix s390x_newuname
[S390] dasd: log sense for fatal errors
[S390] cpu topology: fix locking
[S390] cio: Fix refcount after moving devices.
[S390] ftrace: fix kernel stack backchain walking
[S390] ftrace: disable tracing on idle psw
[S390] lockdep: fix compile bug
[S390] kvm_s390: Fix oops in virtio device detection with "mem="
[S390] sclp: emit error message if assign storage fails
[S390] Fix range for add_active_range() in setup_memory()

+47 -20
+13 -7
arch/s390/kernel/entry.S
··· 61 62 #ifdef CONFIG_TRACE_IRQFLAGS 63 .macro TRACE_IRQS_ON 64 - l %r1,BASED(.Ltrace_irq_on) 65 basr %r14,%r1 66 .endm 67 68 .macro TRACE_IRQS_OFF 69 - l %r1,BASED(.Ltrace_irq_off) 70 basr %r14,%r1 71 .endm 72 73 .macro TRACE_IRQS_CHECK 74 tm SP_PSW(%r15),0x03 # irqs enabled? 75 jz 0f 76 - l %r1,BASED(.Ltrace_irq_on) 77 basr %r14,%r1 78 j 1f 79 - 0: l %r1,BASED(.Ltrace_irq_off) 80 basr %r14,%r1 81 1: 82 .endm ··· 1116 .Lschedtail: .long schedule_tail 1117 .Lsysc_table: .long sys_call_table 1118 #ifdef CONFIG_TRACE_IRQFLAGS 1119 - .Ltrace_irq_on: .long trace_hardirqs_on 1120 - .Ltrace_irq_off: 1121 - .long trace_hardirqs_off 1122 .Llockdep_sys_exit: 1123 .long lockdep_sys_exit 1124 #endif
··· 61 62 #ifdef CONFIG_TRACE_IRQFLAGS 63 .macro TRACE_IRQS_ON 64 + basr %r2,%r0 65 + l %r1,BASED(.Ltrace_irq_on_caller) 66 basr %r14,%r1 67 .endm 68 69 .macro TRACE_IRQS_OFF 70 + basr %r2,%r0 71 + l %r1,BASED(.Ltrace_irq_off_caller) 72 basr %r14,%r1 73 .endm 74 75 .macro TRACE_IRQS_CHECK 76 + basr %r2,%r0 77 tm SP_PSW(%r15),0x03 # irqs enabled? 78 jz 0f 79 + l %r1,BASED(.Ltrace_irq_on_caller) 80 basr %r14,%r1 81 j 1f 82 + 0: l %r1,BASED(.Ltrace_irq_off_caller) 83 basr %r14,%r1 84 1: 85 .endm ··· 1113 .Lschedtail: .long schedule_tail 1114 .Lsysc_table: .long sys_call_table 1115 #ifdef CONFIG_TRACE_IRQFLAGS 1116 + .Ltrace_irq_on_caller: 1117 + .long trace_hardirqs_on_caller 1118 + .Ltrace_irq_off_caller: 1119 + .long trace_hardirqs_off_caller 1120 + #endif 1121 + #ifdef CONFIG_LOCKDEP 1122 .Llockdep_sys_exit: 1123 .long lockdep_sys_exit 1124 #endif
+7 -4
arch/s390/kernel/entry64.S
··· 61 62 #ifdef CONFIG_TRACE_IRQFLAGS 63 .macro TRACE_IRQS_ON 64 - brasl %r14,trace_hardirqs_on 65 .endm 66 67 .macro TRACE_IRQS_OFF 68 - brasl %r14,trace_hardirqs_off 69 .endm 70 71 .macro TRACE_IRQS_CHECK 72 tm SP_PSW(%r15),0x03 # irqs enabled? 73 jz 0f 74 - brasl %r14,trace_hardirqs_on 75 j 1f 76 - 0: brasl %r14,trace_hardirqs_off 77 1: 78 .endm 79 #else
··· 61 62 #ifdef CONFIG_TRACE_IRQFLAGS 63 .macro TRACE_IRQS_ON 64 + basr %r2,%r0 65 + brasl %r14,trace_hardirqs_on_caller 66 .endm 67 68 .macro TRACE_IRQS_OFF 69 + basr %r2,%r0 70 + brasl %r14,trace_hardirqs_off_caller 71 .endm 72 73 .macro TRACE_IRQS_CHECK 74 + basr %r2,%r0 75 tm SP_PSW(%r15),0x03 # irqs enabled? 76 jz 0f 77 + brasl %r14,trace_hardirqs_on_caller 78 j 1f 79 + 0: brasl %r14,trace_hardirqs_off_caller 80 1: 81 .endm 82 #else
+3
arch/s390/kernel/process.c
··· 136 return; 137 } 138 trace_hardirqs_on(); 139 /* Wait for external, I/O or machine check interrupt. */ 140 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 141 PSW_MASK_IO | PSW_MASK_EXT); 142 } 143 144 void cpu_idle(void)
··· 136 return; 137 } 138 trace_hardirqs_on(); 139 + /* Don't trace preempt off for idle. */ 140 + stop_critical_timings(); 141 /* Wait for external, I/O or machine check interrupt. */ 142 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 143 PSW_MASK_IO | PSW_MASK_EXT); 144 + start_critical_timings(); 145 } 146 147 void cpu_idle(void)
+2 -2
arch/s390/kernel/setup.c
··· 604 if (memory_chunk[i].type != CHUNK_READ_WRITE) 605 continue; 606 start_chunk = PFN_DOWN(memory_chunk[i].addr); 607 - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; 608 end_chunk = min(end_chunk, end_pfn); 609 if (start_chunk >= end_chunk) 610 continue; 611 add_active_range(0, start_chunk, end_chunk); 612 pfn = max(start_chunk, start_pfn); 613 - for (; pfn <= end_chunk; pfn++) 614 page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); 615 } 616
··· 604 if (memory_chunk[i].type != CHUNK_READ_WRITE) 605 continue; 606 start_chunk = PFN_DOWN(memory_chunk[i].addr); 607 + end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); 608 end_chunk = min(end_chunk, end_pfn); 609 if (start_chunk >= end_chunk) 610 continue; 611 add_active_range(0, start_chunk, end_chunk); 612 pfn = max(start_chunk, start_pfn); 613 + for (; pfn < end_chunk; pfn++) 614 page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); 615 } 616
+1 -1
arch/s390/kernel/sys_s390.c
··· 198 { 199 int ret = sys_newuname(name); 200 201 - if (current->personality == PER_LINUX32 && !ret) { 202 ret = copy_to_user(name->machine, "s390\0\0\0\0", 8); 203 if (ret) ret = -EFAULT; 204 }
··· 198 { 199 int ret = sys_newuname(name); 200 201 + if (personality(current->personality) == PER_LINUX32 && !ret) { 202 ret = copy_to_user(name->machine, "s390\0\0\0\0", 8); 203 if (ret) ret = -EFAULT; 204 }
+7 -4
arch/s390/kernel/topology.c
··· 65 static struct timer_list topology_timer; 66 static void set_topology_timer(void); 67 static DECLARE_WORK(topology_work, topology_work_fn); 68 69 cpumask_t cpu_core_map[NR_CPUS]; 70 71 cpumask_t cpu_coregroup_map(unsigned int cpu) 72 { 73 struct core_info *core = &core_info; 74 cpumask_t mask; 75 76 cpus_clear(mask); 77 if (!machine_has_topology) 78 return cpu_present_map; 79 - mutex_lock(&smp_cpu_state_mutex); 80 while (core) { 81 if (cpu_isset(cpu, core->mask)) { 82 mask = core->mask; ··· 87 } 88 core = core->next; 89 } 90 - mutex_unlock(&smp_cpu_state_mutex); 91 if (cpus_empty(mask)) 92 mask = cpumask_of_cpu(cpu); 93 return mask; ··· 136 union tl_entry *tle, *end; 137 struct core_info *core = &core_info; 138 139 - mutex_lock(&smp_cpu_state_mutex); 140 clear_cores(); 141 tle = info->tle; 142 end = (union tl_entry *)((unsigned long)info + info->length); ··· 160 } 161 tle = next_tle(tle); 162 } 163 - mutex_unlock(&smp_cpu_state_mutex); 164 } 165 166 static void topology_update_polarization_simple(void)
··· 65 static struct timer_list topology_timer; 66 static void set_topology_timer(void); 67 static DECLARE_WORK(topology_work, topology_work_fn); 68 + /* topology_lock protects the core linked list */ 69 + static DEFINE_SPINLOCK(topology_lock); 70 71 cpumask_t cpu_core_map[NR_CPUS]; 72 73 cpumask_t cpu_coregroup_map(unsigned int cpu) 74 { 75 struct core_info *core = &core_info; 76 + unsigned long flags; 77 cpumask_t mask; 78 79 cpus_clear(mask); 80 if (!machine_has_topology) 81 return cpu_present_map; 82 + spin_lock_irqsave(&topology_lock, flags); 83 while (core) { 84 if (cpu_isset(cpu, core->mask)) { 85 mask = core->mask; ··· 84 } 85 core = core->next; 86 } 87 + spin_unlock_irqrestore(&topology_lock, flags); 88 if (cpus_empty(mask)) 89 mask = cpumask_of_cpu(cpu); 90 return mask; ··· 133 union tl_entry *tle, *end; 134 struct core_info *core = &core_info; 135 136 + spin_lock_irq(&topology_lock); 137 clear_cores(); 138 tle = info->tle; 139 end = (union tl_entry *)((unsigned long)info + info->length); ··· 157 } 158 tle = next_tle(tle); 159 } 160 + spin_unlock_irq(&topology_lock); 161 } 162 163 static void topology_update_polarization_simple(void)
+5
drivers/s390/block/dasd.c
··· 1746 goto restart; 1747 } 1748 1749 /* First of all call extended error reporting. */ 1750 if (dasd_eer_enabled(base) && 1751 cqr->status == DASD_CQR_FAILED) {
··· 1746 goto restart; 1747 } 1748 1749 + /* log sense for fatal error */ 1750 + if (cqr->status == DASD_CQR_FAILED) { 1751 + dasd_log_sense(cqr, &cqr->irb); 1752 + } 1753 + 1754 /* First of all call extended error reporting. */ 1755 if (dasd_eer_enabled(base) && 1756 cqr->status == DASD_CQR_FAILED) {
+3
drivers/s390/char/sclp_cmd.c
··· 324 case 0x0120: 325 break; 326 default: 327 rc = -EIO; 328 break; 329 }
··· 324 case 0x0120: 325 break; 326 default: 327 + pr_warning("assign storage failed (cmd=0x%08x, " 328 + "response=0x%04x, rn=0x%04x)\n", cmd, 329 + sccb->header.response_code, rn); 330 rc = -EIO; 331 break; 332 }
+4
drivers/s390/cio/device.c
··· 874 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); 875 if (replacing_cdev) { 876 sch_attach_disconnected_device(sch, replacing_cdev); 877 return; 878 } 879 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); 880 if (replacing_cdev) { 881 sch_attach_orphaned_device(sch, replacing_cdev); 882 return; 883 } 884 sch_create_and_recog_new_device(sch);
··· 874 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); 875 if (replacing_cdev) { 876 sch_attach_disconnected_device(sch, replacing_cdev); 877 + /* Release reference from get_disc_ccwdev_by_dev_id() */ 878 + put_device(&cdev->dev); 879 return; 880 } 881 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); 882 if (replacing_cdev) { 883 sch_attach_orphaned_device(sch, replacing_cdev); 884 + /* Release reference from get_orphaned_ccwdev_by_dev_id() */ 885 + put_device(&cdev->dev); 886 return; 887 } 888 sch_create_and_recog_new_device(sch);
+2 -2
drivers/s390/kvm/kvm_virtio.c
··· 322 return rc; 323 } 324 325 - rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE); 326 if (rc) { 327 s390_root_dev_unregister(kvm_root); 328 return rc; 329 } 330 331 - kvm_devices = (void *) PFN_PHYS(max_pfn); 332 333 ctl_set_bit(0, 9); 334 register_external_interrupt(0x2603, kvm_extint_handler);
··· 322 return rc; 323 } 324 325 + rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); 326 if (rc) { 327 s390_root_dev_unregister(kvm_root); 328 return rc; 329 } 330 331 + kvm_devices = (void *) real_memory_size; 332 333 ctl_set_bit(0, 9); 334 register_external_interrupt(0x2603, kvm_extint_handler);