Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] Use CONFIG_GENERIC_TIME and define TOD clock source.
[PATCH] sysrq: irq change build fix.
[S390] irq change build fixes.
[S390] cio: 0 is a valid chpid.
[S390] monwriter buffer limit.
[S390] ap bus poll thread priority.

+83 -107
+3
arch/s390/Kconfig
··· 30 30 bool 31 31 default y 32 32 33 + config GENERIC_TIME 34 + def_bool y 35 + 33 36 config GENERIC_BUST_SPINLOCK 34 37 bool 35 38
+1
arch/s390/defconfig
··· 9 9 CONFIG_RWSEM_XCHGADD_ALGORITHM=y 10 10 CONFIG_GENERIC_HWEIGHT=y 11 11 CONFIG_GENERIC_CALIBRATE_DELAY=y 12 + CONFIG_GENERIC_TIME=y 12 13 CONFIG_S390=y 13 14 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 14 15
+6 -3
arch/s390/kernel/s390_ext.c
··· 16 16 17 17 #include <asm/lowcore.h> 18 18 #include <asm/s390_ext.h> 19 + #include <asm/irq_regs.h> 19 20 #include <asm/irq.h> 20 21 21 22 /* ··· 115 114 { 116 115 ext_int_info_t *p; 117 116 int index; 117 + struct pt_regs *old_regs; 118 118 119 119 irq_enter(); 120 + old_regs = set_irq_regs(regs); 120 121 asm volatile ("mc 0,0"); 121 122 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 122 123 /** 123 124 * Make sure that the i/o interrupt did not "overtake" 124 125 * the last HZ timer interrupt. 125 126 */ 126 - account_ticks(regs); 127 + account_ticks(); 127 128 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 128 129 index = ext_hash(code); 129 130 for (p = ext_int_hash[index]; p; p = p->next) { 130 131 if (likely(p->code == code)) { 131 132 if (likely(p->handler)) 132 - p->handler(regs, code); 133 + p->handler(code); 133 134 } 134 135 } 136 + set_irq_regs(old_regs); 135 137 irq_exit(); 136 138 } 137 139 138 140 EXPORT_SYMBOL(register_external_interrupt); 139 141 EXPORT_SYMBOL(unregister_external_interrupt); 140 -
+1 -1
arch/s390/kernel/smp.c
··· 339 339 * cpus are handled. 340 340 */ 341 341 342 - void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) 342 + void do_ext_call_interrupt(__u16 code) 343 343 { 344 344 unsigned long bits; 345 345
+29 -74
arch/s390/kernel/time.c
··· 28 28 #include <linux/profile.h> 29 29 #include <linux/timex.h> 30 30 #include <linux/notifier.h> 31 + #include <linux/clocksource.h> 31 32 32 33 #include <asm/uaccess.h> 33 34 #include <asm/delay.h> 34 35 #include <asm/s390_ext.h> 35 36 #include <asm/div64.h> 36 37 #include <asm/irq.h> 38 + #include <asm/irq_regs.h> 37 39 #include <asm/timer.h> 38 40 39 41 /* change this if you have some constant time drift */ ··· 83 81 xtime->tv_nsec = ((todval * 1000) >> 12); 84 82 } 85 83 86 - static inline unsigned long do_gettimeoffset(void) 87 - { 88 - __u64 now; 89 - 90 - now = (get_clock() - jiffies_timer_cc) >> 12; 91 - now -= (__u64) jiffies * USECS_PER_JIFFY; 92 - return (unsigned long) now; 93 - } 94 - 95 - /* 96 - * This version of gettimeofday has microsecond resolution. 97 - */ 98 - void do_gettimeofday(struct timeval *tv) 99 - { 100 - unsigned long flags; 101 - unsigned long seq; 102 - unsigned long usec, sec; 103 - 104 - do { 105 - seq = read_seqbegin_irqsave(&xtime_lock, flags); 106 - 107 - sec = xtime.tv_sec; 108 - usec = xtime.tv_nsec / 1000 + do_gettimeoffset(); 109 - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 110 - 111 - while (usec >= 1000000) { 112 - usec -= 1000000; 113 - sec++; 114 - } 115 - 116 - tv->tv_sec = sec; 117 - tv->tv_usec = usec; 118 - } 119 - 120 - EXPORT_SYMBOL(do_gettimeofday); 121 - 122 - int do_settimeofday(struct timespec *tv) 123 - { 124 - time_t wtm_sec, sec = tv->tv_sec; 125 - long wtm_nsec, nsec = tv->tv_nsec; 126 - 127 - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 128 - return -EINVAL; 129 - 130 - write_seqlock_irq(&xtime_lock); 131 - /* This is revolting. We need to set the xtime.tv_nsec 132 - * correctly. However, the value in this location is 133 - * is value at the last tick. 134 - * Discover what correction gettimeofday 135 - * would have done, and then undo it! 136 - */ 137 - nsec -= do_gettimeoffset() * 1000; 138 - 139 - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 140 - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 141 - 142 - set_normalized_timespec(&xtime, sec, nsec); 143 - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 144 - 145 - ntp_clear(); 146 - write_sequnlock_irq(&xtime_lock); 147 - clock_was_set(); 148 - return 0; 149 - } 150 - 151 - EXPORT_SYMBOL(do_settimeofday); 152 - 153 - 154 84 #ifdef CONFIG_PROFILING 155 - #define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs) 85 + #define s390_do_profile() profile_tick(CPU_PROFILING) 156 86 #else 157 - #define s390_do_profile(regs) do { ; } while(0) 87 + #define s390_do_profile() do { ; } while(0) 158 88 #endif /* CONFIG_PROFILING */ 159 89 160 90 ··· 94 160 * timer_interrupt() needs to keep up the real-time clock, 95 161 * as well as call the "do_timer()" routine every clocktick 96 162 */ 97 - void account_ticks(struct pt_regs *regs) 163 + void account_ticks(void) 98 164 { 99 165 __u64 tmp; 100 166 __u32 ticks; ··· 155 221 account_tick_vtime(current); 156 222 #else 157 223 while (ticks--) 158 - update_process_times(user_mode(regs)); 224 + update_process_times(user_mode(get_irq_regs())); 159 225 #endif 160 226 161 - s390_do_profile(regs); 227 + s390_do_profile(); 162 228 } 163 229 164 230 #ifdef CONFIG_NO_IDLE_HZ ··· 219 285 */ 220 286 static inline void start_hz_timer(void) 221 287 { 288 + BUG_ON(!in_interrupt()); 289 + 222 290 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 223 291 return; 224 - account_ticks(task_pt_regs(current)); 292 + account_ticks(); 225 293 cpu_clear(smp_processor_id(), nohz_cpu_mask); 226 294 } 227 295 ··· 273 337 274 338 extern void vtime_init(void); 275 339 340 + static cycle_t read_tod_clock(void) 341 + { 342 + return get_clock(); 343 + } 344 + 345 + static struct clocksource clocksource_tod = { 346 + .name = "tod", 347 + .rating = 100, 348 + .read = read_tod_clock, 349 + .mask = -1ULL, 350 + .mult = 1000, 351 + .shift = 12, 352 + .is_continuous = 1, 353 + }; 354 + 355 + 276 356 /* 277 357 * Initialize the TOD clock and the CPU timer of 278 358 * the boot cpu. ··· 332 380 if (register_early_external_interrupt(0x1004, NULL, 333 381 &ext_int_info_cc) != 0) 334 382 panic("Couldn't request external interrupt 0x1004"); 383 + 384 + if (clocksource_register(&clocksource_tod) != 0) 385 + panic("Could not register TOD clock source"); 335 386 336 387 init_cpu_timer(); 337 388
+1 -1
arch/s390/kernel/traps.c
··· 61 61 #ifdef CONFIG_PFAULT 62 62 extern int pfault_init(void); 63 63 extern void pfault_fini(void); 64 - extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code); 64 + extern void pfault_interrupt(__u16 error_code); 65 65 static ext_int_info_t ext_int_pfault; 66 66 #endif 67 67 extern pgm_check_handler_t do_monitor_call;
+3 -2
arch/s390/kernel/vtime.c
··· 22 22 23 23 #include <asm/s390_ext.h> 24 24 #include <asm/timer.h> 25 + #include <asm/irq_regs.h> 25 26 26 27 static ext_int_info_t ext_int_info_timer; 27 28 DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); ··· 242 241 /* 243 242 * Handler for the virtual CPU timer. 244 243 */ 245 - static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code) 244 + static void do_cpu_timer_interrupt(__u16 error_code) 246 245 { 247 246 int cpu; 248 247 __u64 next, delta; ··· 275 274 list_move_tail(&event->entry, &cb_list); 276 275 } 277 276 spin_unlock(&vt_list->lock); 278 - do_callbacks(&cb_list, regs); 277 + do_callbacks(&cb_list, get_irq_regs()); 279 278 280 279 /* next event is first in list */ 281 280 spin_lock(&vt_list->lock);
+1 -1
arch/s390/mm/fault.c
··· 451 451 } 452 452 453 453 asmlinkage void 454 - pfault_interrupt(struct pt_regs *regs, __u16 error_code) 454 + pfault_interrupt(__u16 error_code) 455 455 { 456 456 struct task_struct *tsk; 457 457 __u16 subcode;
+1
drivers/char/sysrq.c
··· 38 38 #include <linux/irq.h> 39 39 40 40 #include <asm/ptrace.h> 41 + #include <asm/irq_regs.h> 41 42 42 43 /* Whether we react on sysrq keys or just ignore them */ 43 44 int sysrq_enabled = 1;
+1 -1
drivers/s390/block/dasd_diag.c
··· 218 218 219 219 /* Handle external interruption. */ 220 220 static void 221 - dasd_ext_handler(struct pt_regs *regs, __u16 code) 221 + dasd_ext_handler(__u16 code) 222 222 { 223 223 struct dasd_ccw_req *cqr, *next; 224 224 struct dasd_device *device;
+1 -1
drivers/s390/char/ctrlchar.c
··· 20 20 static void 21 21 ctrlchar_handle_sysrq(void *tty) 22 22 { 23 - handle_sysrq(ctrlchar_sysrq_key, NULL, (struct tty_struct *) tty); 23 + handle_sysrq(ctrlchar_sysrq_key, (struct tty_struct *) tty); 24 24 } 25 25 26 26 static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, NULL);
+1 -1
drivers/s390/char/keyboard.c
··· 304 304 if (kbd->sysrq) { 305 305 if (kbd->sysrq == K(KT_LATIN, '-')) { 306 306 kbd->sysrq = 0; 307 - handle_sysrq(value, NULL, kbd->tty); 307 + handle_sysrq(value, kbd->tty); 308 308 return; 309 309 } 310 310 if (value == '-') {
+5 -5
drivers/s390/char/monwriter.c
··· 26 26 #define MONWRITE_MAX_DATALEN 4024 27 27 28 28 static int mon_max_bufs = 255; 29 + static int mon_buf_count; 29 30 30 31 struct mon_buf { 31 32 struct list_head list; ··· 41 40 size_t hdr_to_read; 42 41 size_t data_to_read; 43 42 struct mon_buf *current_buf; 44 - int mon_buf_count; 45 43 }; 46 44 47 45 /* ··· 99 99 rc = monwrite_diag(monhdr, monbuf->data, 100 100 APPLDATA_STOP_REC); 101 101 list_del(&monbuf->list); 102 - monpriv->mon_buf_count--; 102 + mon_buf_count--; 103 103 kfree(monbuf->data); 104 104 kfree(monbuf); 105 105 monbuf = NULL; 106 106 } 107 107 } else { 108 - if (monpriv->mon_buf_count >= mon_max_bufs) 108 + if (mon_buf_count >= mon_max_bufs) 109 109 return -ENOSPC; 110 110 monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL); 111 111 if (!monbuf) ··· 118 118 } 119 119 monbuf->hdr = *monhdr; 120 120 list_add_tail(&monbuf->list, &monpriv->list); 121 - monpriv->mon_buf_count++; 121 + mon_buf_count++; 122 122 } 123 123 monpriv->current_buf = monbuf; 124 124 return 0; ··· 186 186 if (entry->hdr.mon_function != MONWRITE_GEN_EVENT) 187 187 monwrite_diag(&entry->hdr, entry->data, 188 188 APPLDATA_STOP_REC); 189 - monpriv->mon_buf_count--; 189 + mon_buf_count--; 190 190 list_del(&entry->list); 191 191 kfree(entry->data); 192 192 kfree(entry);
+2 -2
drivers/s390/char/sclp.c
··· 324 324 * Prepare read event data request if necessary. Start processing of next 325 325 * request on queue. */ 326 326 static void 327 - sclp_interrupt_handler(struct pt_regs *regs, __u16 code) 327 + sclp_interrupt_handler(__u16 code) 328 328 { 329 329 struct sclp_req *req; 330 330 u32 finished_sccb; ··· 743 743 /* Handler for external interruption used during initialization. Modify 744 744 * request state to done. */ 745 745 static void 746 - sclp_check_handler(struct pt_regs *regs, __u16 code) 746 + sclp_check_handler(__u16 code) 747 747 { 748 748 u32 finished_sccb; 749 749
+16 -9
drivers/s390/cio/chsc.c
··· 200 200 spin_unlock_irq(&sch->lock); 201 201 free_page((unsigned long)page); 202 202 if (!ret) { 203 - int j, chpid; 203 + int j, chpid, mask; 204 204 /* Allocate channel path structures, if needed. */ 205 205 for (j = 0; j < 8; j++) { 206 + mask = 0x80 >> j; 206 207 chpid = sch->ssd_info.chpid[j]; 207 - if (chpid && (get_chp_status(chpid) < 0)) 208 + if ((sch->schib.pmcw.pim & mask) && 209 + (get_chp_status(chpid) < 0)) 208 210 new_channel_path(chpid); 209 211 } 210 212 } ··· 224 222 225 223 sch = to_subchannel(dev); 226 224 chpid = data; 227 - for (j = 0; j < 8; j++) 228 - if (sch->schib.pmcw.chpid[j] == chpid->id) 225 + for (j = 0; j < 8; j++) { 226 + mask = 0x80 >> j; 227 + if ((sch->schib.pmcw.pim & mask) && 228 + (sch->schib.pmcw.chpid[j] == chpid->id)) 229 229 break; 230 + } 230 231 if (j >= 8) 231 232 return 0; 232 233 233 - mask = 0x80 >> j; 234 234 spin_lock_irq(&sch->lock); 235 235 236 236 stsch(sch->schid, &schib); ··· 624 620 static int 625 621 __chp_add(struct subchannel_id schid, void *data) 626 622 { 627 - int i; 623 + int i, mask; 628 624 struct channel_path *chp; 629 625 struct subchannel *sch; 630 626 ··· 634 630 /* Check if the subchannel is now available. */ 635 631 return __chp_add_new_sch(schid); 636 632 spin_lock_irq(&sch->lock); 637 - for (i=0; i<8; i++) 638 - if (sch->schib.pmcw.chpid[i] == chp->id) { 633 + for (i=0; i<8; i++) { 634 + mask = 0x80 >> i; 635 + if ((sch->schib.pmcw.pim & mask) && 636 + (sch->schib.pmcw.chpid[i] == chp->id)) { 639 637 if (stsch(sch->schid, &sch->schib) != 0) { 640 638 /* Endgame. */ 641 639 spin_unlock_irq(&sch->lock); ··· 645 639 } 646 640 break; 647 641 } 642 + } 648 643 if (i==8) { 649 644 spin_unlock_irq(&sch->lock); 650 645 return 0; ··· 653 646 sch->lpm = ((sch->schib.pmcw.pim & 654 647 sch->schib.pmcw.pam & 655 648 sch->schib.pmcw.pom) 656 - | 0x80 >> i) & sch->opm; 649 + | mask) & sch->opm; 657 650 658 651 if (sch->driver && sch->driver->verify) 659 652 sch->driver->verify(&sch->dev);
+5 -1
drivers/s390/cio/cio.c
··· 19 19 #include <asm/cio.h> 20 20 #include <asm/delay.h> 21 21 #include <asm/irq.h> 22 + #include <asm/irq_regs.h> 22 23 #include <asm/setup.h> 23 24 #include "airq.h" 24 25 #include "cio.h" ··· 607 606 struct tpi_info *tpi_info; 608 607 struct subchannel *sch; 609 608 struct irb *irb; 609 + struct pt_regs *old_regs; 610 610 611 611 irq_enter (); 612 + old_regs = set_irq_regs(regs); 612 613 asm volatile ("mc 0,0"); 613 614 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 614 615 /** 615 616 * Make sure that the i/o interrupt did not "overtake" 616 617 * the last HZ timer interrupt. 617 618 */ 618 - account_ticks(regs); 619 + account_ticks(); 619 620 /* 620 621 * Get interrupt information from lowcore 621 622 */ ··· 655 652 * out of the sie which costs more cycles than it saves. 656 653 */ 657 654 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 655 + set_irq_regs(old_regs); 658 656 irq_exit (); 659 657 } 660 658
+1 -1
drivers/s390/crypto/ap_bus.c
··· 1062 1062 unsigned long flags; 1063 1063 int requests; 1064 1064 1065 - set_user_nice(current, -20); 1065 + set_user_nice(current, 19); 1066 1066 while (1) { 1067 1067 if (need_resched()) { 1068 1068 schedule();
+2 -2
drivers/s390/net/iucv.c
··· 116 116 *Internal function prototypes 117 117 */ 118 118 static void iucv_tasklet_handler(unsigned long); 119 - static void iucv_irq_handler(struct pt_regs *, __u16); 119 + static void iucv_irq_handler(__u16); 120 120 121 121 static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0); 122 122 ··· 2251 2251 * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). 2252 2252 */ 2253 2253 static void 2254 - iucv_irq_handler(struct pt_regs *regs, __u16 code) 2254 + iucv_irq_handler(__u16 code) 2255 2255 { 2256 2256 iucv_irqdata *irqdata; 2257 2257
+1 -1
include/asm-s390/hardirq.h
··· 32 32 33 33 #define HARDIRQ_BITS 8 34 34 35 - extern void account_ticks(struct pt_regs *); 35 + extern void account_ticks(void); 36 36 37 37 #endif /* __ASM_HARDIRQ_H */
+1
include/asm-s390/irq_regs.h
··· 1 + #include <asm-generic/irq_regs.h>
+1 -1
include/asm-s390/s390_ext.h
··· 10 10 * Martin Schwidefsky (schwidefsky@de.ibm.com) 11 11 */ 12 12 13 - typedef void (*ext_int_handler_t)(struct pt_regs *regs, __u16 code); 13 + typedef void (*ext_int_handler_t)(__u16 code); 14 14 15 15 /* 16 16 * Warning: if you change ext_int_info_t you have to change the