Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha

Pull alpha updates from Matt Turner:
"It contains a few fixes and some work from Richard to make alpha
emulation under QEMU much more usable"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha:
alpha: Prevent a NULL ptr dereference in csum_partial_copy.
alpha: perf: fix out-of-bounds array access triggered from raw event
alpha: Use qemu+cserve provided high-res clock and alarm.
alpha: Switch to GENERIC_CLOCKEVENTS
alpha: Enable the rpcc clocksource for single processor
alpha: Reorganize rtc handling
alpha: Primitive support for CPU power down.
alpha: Allow HZ to be configured
alpha: Notice if we're being run under QEMU
alpha: Eliminate compiler warning from memset macro

+785 -396
+73 -3
arch/alpha/Kconfig
··· 16 16 select ARCH_WANT_IPC_PARSE_VERSION 17 17 select ARCH_HAVE_NMI_SAFE_CMPXCHG 18 18 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 19 + select GENERIC_CLOCKEVENTS 19 20 select GENERIC_SMP_IDLE_THREAD 20 - select GENERIC_CMOS_UPDATE 21 21 select GENERIC_STRNCPY_FROM_USER 22 22 select GENERIC_STRNLEN_USER 23 23 select HAVE_MOD_ARCH_SPECIFIC ··· 488 488 which always have multiple hoses, and whose consoles support it. 489 489 490 490 491 + config ALPHA_QEMU 492 + bool "Run under QEMU emulation" 493 + depends on !ALPHA_GENERIC 494 + ---help--- 495 + Assume the presence of special features supported by QEMU PALcode 496 + that reduce the overhead of system emulation. 497 + 498 + Generic kernels will auto-detect QEMU. But when building a 499 + system-specific kernel, the assumption is that we want to 500 + elimiate as many runtime tests as possible. 501 + 502 + If unsure, say N. 503 + 504 + 491 505 config ALPHA_SRM 492 506 bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME 493 507 depends on TTY ··· 586 572 Access). This option is for configuring high-end multiprocessor 587 573 server machines. If in doubt, say N. 588 574 575 + config ALPHA_WTINT 576 + bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC 577 + default y if ALPHA_QEMU 578 + default n if ALPHA_EV5 || ALPHA_EV56 || (ALPHA_EV4 && !ALPHA_LCA) 579 + default n if !ALPHA_SRM && !ALPHA_GENERIC 580 + default y if SMP 581 + ---help--- 582 + The Wait for Interrupt (WTINT) PALcall attempts to place the CPU 583 + to sleep until the next interrupt. This may reduce the power 584 + consumed, and the heat produced by the computer. However, it has 585 + the side effect of making the cycle counter unreliable as a timing 586 + device across the sleep. 587 + 588 + For emulation under QEMU, definitely say Y here, as we have other 589 + mechanisms for measuring time than the cycle counter. 590 + 591 + For EV4 (but not LCA), EV5 and EV56 systems, or for systems running 592 + MILO, sleep mode is not supported so you might as well say N here. 593 + 594 + For SMP systems we cannot use the cycle counter for timing anyway, 595 + so you might as well say Y here. 596 + 597 + If unsure, say N. 598 + 589 599 config NODES_SHIFT 590 600 int 591 601 default "7" ··· 651 613 652 614 Take the default (1) unless you want more control or more info. 653 615 616 + choice 617 + prompt "Timer interrupt frequency (HZ)?" 618 + default HZ_128 if ALPHA_QEMU 619 + default HZ_1200 if ALPHA_RAWHIDE 620 + default HZ_1024 621 + ---help--- 622 + The frequency at which timer interrupts occur. A high frequency 623 + minimizes latency, whereas a low frequency minimizes overhead of 624 + process accounting. The later effect is especially significant 625 + when being run under QEMU. 626 + 627 + Note that some Alpha hardware cannot change the interrupt frequency 628 + of the timer. If unsure, say 1024 (or 1200 for Rawhide). 629 + 630 + config HZ_32 631 + bool "32 Hz" 632 + config HZ_64 633 + bool "64 Hz" 634 + config HZ_128 635 + bool "128 Hz" 636 + config HZ_256 637 + bool "256 Hz" 638 + config HZ_1024 639 + bool "1024 Hz" 640 + config HZ_1200 641 + bool "1200 Hz" 642 + endchoice 643 + 654 644 config HZ 655 - int 656 - default 1200 if ALPHA_RAWHIDE 645 + int 646 + default 32 if HZ_32 647 + default 64 if HZ_64 648 + default 128 if HZ_128 649 + default 256 if HZ_256 650 + default 1200 if HZ_1200 657 651 default 1024 658 652 659 653 source "drivers/pci/Kconfig"
+13 -9
arch/alpha/include/asm/machvec.h
··· 33 33 34 34 int nr_irqs; 35 35 int rtc_port; 36 + int rtc_boot_cpu_only; 36 37 unsigned int max_asn; 37 38 unsigned long max_isa_dma_address; 38 39 unsigned long irq_probe_mask; ··· 96 95 97 96 struct _alpha_agp_info *(*agp_info)(void); 98 97 99 - unsigned int (*rtc_get_time)(struct rtc_time *); 100 - int (*rtc_set_time)(struct rtc_time *); 101 - 102 98 const char *vector_name; 103 99 104 100 /* NUMA information */ ··· 124 126 125 127 #ifdef CONFIG_ALPHA_GENERIC 126 128 extern int alpha_using_srm; 129 + extern int alpha_using_qemu; 127 130 #else 128 - #ifdef CONFIG_ALPHA_SRM 129 - #define alpha_using_srm 1 130 - #else 131 - #define alpha_using_srm 0 132 - #endif 131 + # ifdef CONFIG_ALPHA_SRM 132 + # define alpha_using_srm 1 133 + # else 134 + # define alpha_using_srm 0 135 + # endif 136 + # ifdef CONFIG_ALPHA_QEMU 137 + # define alpha_using_qemu 1 138 + # else 139 + # define alpha_using_qemu 0 140 + # endif 133 141 #endif /* GENERIC */ 134 142 135 - #endif 143 + #endif /* __KERNEL__ */ 136 144 #endif /* __ALPHA_MACHVEC_H */
+71
arch/alpha/include/asm/pal.h
··· 89 89 __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); 90 90 __CALL_PAL_W1(wrusp, unsigned long); 91 91 __CALL_PAL_W1(wrvptptr, unsigned long); 92 + __CALL_PAL_RW1(wtint, unsigned long, unsigned long); 92 93 93 94 /* 94 95 * TB routines.. ··· 111 110 #define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) 112 111 #define tbiap() __tbi(-1, /* no second argument */) 113 112 #define tbia() __tbi(-2, /* no second argument */) 113 + 114 + /* 115 + * QEMU Cserv routines.. 116 + */ 117 + 118 + static inline unsigned long 119 + qemu_get_walltime(void) 120 + { 121 + register unsigned long v0 __asm__("$0"); 122 + register unsigned long a0 __asm__("$16") = 3; 123 + 124 + asm("call_pal %2 # cserve get_time" 125 + : "=r"(v0), "+r"(a0) 126 + : "i"(PAL_cserve) 127 + : "$17", "$18", "$19", "$20", "$21"); 128 + 129 + return v0; 130 + } 131 + 132 + static inline unsigned long 133 + qemu_get_alarm(void) 134 + { 135 + register unsigned long v0 __asm__("$0"); 136 + register unsigned long a0 __asm__("$16") = 4; 137 + 138 + asm("call_pal %2 # cserve get_alarm" 139 + : "=r"(v0), "+r"(a0) 140 + : "i"(PAL_cserve) 141 + : "$17", "$18", "$19", "$20", "$21"); 142 + 143 + return v0; 144 + } 145 + 146 + static inline void 147 + qemu_set_alarm_rel(unsigned long expire) 148 + { 149 + register unsigned long a0 __asm__("$16") = 5; 150 + register unsigned long a1 __asm__("$17") = expire; 151 + 152 + asm volatile("call_pal %2 # cserve set_alarm_rel" 153 + : "+r"(a0), "+r"(a1) 154 + : "i"(PAL_cserve) 155 + : "$0", "$18", "$19", "$20", "$21"); 156 + } 157 + 158 + static inline void 159 + qemu_set_alarm_abs(unsigned long expire) 160 + { 161 + register unsigned long a0 __asm__("$16") = 6; 162 + register unsigned long a1 __asm__("$17") = expire; 163 + 164 + asm volatile("call_pal %2 # cserve set_alarm_abs" 165 + : "+r"(a0), "+r"(a1) 166 + : "i"(PAL_cserve) 167 + : "$0", "$18", "$19", "$20", "$21"); 168 + } 169 + 170 + static inline unsigned long 171 + qemu_get_vmtime(void) 172 + { 173 + register unsigned long v0 __asm__("$0"); 174 + register unsigned long a0 __asm__("$16") = 7; 175 + 176 + asm("call_pal %2 # cserve get_time" 177 + : "=r"(v0), "+r"(a0) 178 + : "i"(PAL_cserve) 179 + : "$17", "$18", "$19", "$20", "$21"); 180 + 181 + return v0; 182 + } 114 183 115 184 #endif /* !__ASSEMBLY__ */ 116 185 #endif /* __ALPHA_PAL_H */
-11
arch/alpha/include/asm/rtc.h
··· 1 - #ifndef _ALPHA_RTC_H 2 - #define _ALPHA_RTC_H 3 - 4 - #if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \ 5 - || defined(CONFIG_ALPHA_GENERIC) 6 - # define get_rtc_time alpha_mv.rtc_get_time 7 - # define set_rtc_time alpha_mv.rtc_set_time 8 - #endif 9 - 10 1 #include <asm-generic/rtc.h> 11 - 12 - #endif
+18 -6
arch/alpha/include/asm/string.h
··· 22 22 23 23 #define __HAVE_ARCH_MEMSET 24 24 extern void * __constant_c_memset(void *, unsigned long, size_t); 25 + extern void * ___memset(void *, int, size_t); 25 26 extern void * __memset(void *, int, size_t); 26 27 extern void * memset(void *, int, size_t); 27 28 28 - #define memset(s, c, n) \ 29 - (__builtin_constant_p(c) \ 30 - ? (__builtin_constant_p(n) && (c) == 0 \ 31 - ? __builtin_memset((s),0,(n)) \ 32 - : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \ 33 - : __memset((s),(c),(n))) 29 + /* For gcc 3.x, we cannot have the inline function named "memset" because 30 + the __builtin_memset will attempt to resolve to the inline as well, 31 + leading to a "sorry" about unimplemented recursive inlining. */ 32 + extern inline void *__memset(void *s, int c, size_t n) 33 + { 34 + if (__builtin_constant_p(c)) { 35 + if (__builtin_constant_p(n)) { 36 + return __builtin_memset(s, c, n); 37 + } else { 38 + unsigned long c8 = (c & 0xff) * 0x0101010101010101UL; 39 + return __constant_c_memset(s, c8, n); 40 + } 41 + } 42 + return ___memset(s, c, n); 43 + } 44 + 45 + #define memset __memset 34 46 35 47 #define __HAVE_ARCH_STRCPY 36 48 extern char * strcpy(char *,const char *);
+1
arch/alpha/include/uapi/asm/pal.h
··· 46 46 #define PAL_rdusp 58 47 47 #define PAL_whami 60 48 48 #define PAL_retsys 61 49 + #define PAL_wtint 62 49 50 #define PAL_rti 63 50 51 51 52
+1
arch/alpha/kernel/Makefile
··· 16 16 obj-$(CONFIG_SRM_ENV) += srm_env.o 17 17 obj-$(CONFIG_MODULES) += module.o 18 18 obj-$(CONFIG_PERF_EVENTS) += perf_event.o 19 + obj-$(CONFIG_RTC_DRV_ALPHA) += rtc.o 19 20 20 21 ifdef CONFIG_ALPHA_GENERIC 21 22
+1
arch/alpha/kernel/alpha_ksyms.c
··· 40 40 EXPORT_SYMBOL(memmove); 41 41 EXPORT_SYMBOL(__memcpy); 42 42 EXPORT_SYMBOL(__memset); 43 + EXPORT_SYMBOL(___memset); 43 44 EXPORT_SYMBOL(__memsetw); 44 45 EXPORT_SYMBOL(__constant_c_memset); 45 46 EXPORT_SYMBOL(copy_page);
+1 -15
arch/alpha/kernel/irq_alpha.c
··· 66 66 break; 67 67 case 1: 68 68 old_regs = set_irq_regs(regs); 69 - #ifdef CONFIG_SMP 70 - { 71 - long cpu; 72 - 73 - smp_percpu_timer_interrupt(regs); 74 - cpu = smp_processor_id(); 75 - if (cpu != boot_cpuid) { 76 - kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ)); 77 - } else { 78 - handle_irq(RTC_IRQ); 79 - } 80 - } 81 - #else 82 69 handle_irq(RTC_IRQ); 83 - #endif 84 70 set_irq_regs(old_regs); 85 71 return; 86 72 case 2: ··· 214 228 */ 215 229 216 230 struct irqaction timer_irqaction = { 217 - .handler = timer_interrupt, 231 + .handler = rtc_timer_interrupt, 218 232 .name = "timer", 219 233 }; 220 234
+1 -4
arch/alpha/kernel/machvec_impl.h
··· 43 43 #define CAT1(x,y) x##y 44 44 #define CAT(x,y) CAT1(x,y) 45 45 46 - #define DO_DEFAULT_RTC \ 47 - .rtc_port = 0x70, \ 48 - .rtc_get_time = common_get_rtc_time, \ 49 - .rtc_set_time = common_set_rtc_time 46 + #define DO_DEFAULT_RTC .rtc_port = 0x70 50 47 51 48 #define DO_EV4_MMU \ 52 49 .max_asn = EV4_MAX_ASN, \
+13 -2
arch/alpha/kernel/perf_event.c
··· 83 83 long pmc_left[3]; 84 84 /* Subroutine for allocation of PMCs. Enforces constraints. */ 85 85 int (*check_constraints)(struct perf_event **, unsigned long *, int); 86 + /* Subroutine for checking validity of a raw event for this PMU. */ 87 + int (*raw_event_valid)(u64 config); 86 88 }; 87 89 88 90 /* ··· 205 203 } 206 204 207 205 206 + static int ev67_raw_event_valid(u64 config) 207 + { 208 + return config >= EV67_CYCLES && config < EV67_LAST_ET; 209 + }; 210 + 211 + 208 212 static const struct alpha_pmu_t ev67_pmu = { 209 213 .event_map = ev67_perfmon_event_map, 210 214 .max_events = ARRAY_SIZE(ev67_perfmon_event_map), ··· 219 211 .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, 220 212 .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, 221 213 .pmc_left = {16, 4, 0}, 222 - .check_constraints = ev67_check_constraints 214 + .check_constraints = ev67_check_constraints, 215 + .raw_event_valid = ev67_raw_event_valid, 223 216 }; 224 217 225 218 ··· 618 609 } else if (attr->type == PERF_TYPE_HW_CACHE) { 619 610 return -EOPNOTSUPP; 620 611 } else if (attr->type == PERF_TYPE_RAW) { 621 - ev = attr->config & 0xff; 612 + if (!alpha_pmu->raw_event_valid(attr->config)) 613 + return -EINVAL; 614 + ev = attr->config; 622 615 } else { 623 616 return -EOPNOTSUPP; 624 617 }
+17
arch/alpha/kernel/process.c
··· 46 46 void (*pm_power_off)(void) = machine_power_off; 47 47 EXPORT_SYMBOL(pm_power_off); 48 48 49 + #ifdef CONFIG_ALPHA_WTINT 50 + /* 51 + * Sleep the CPU. 52 + * EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts. 53 + */ 54 + void arch_cpu_idle(void) 55 + { 56 + wtint(0); 57 + local_irq_enable(); 58 + } 59 + 60 + void arch_cpu_idle_dead(void) 61 + { 62 + wtint(INT_MAX); 63 + } 64 + #endif /* ALPHA_WTINT */ 65 + 49 66 struct halt_info { 50 67 int mode; 51 68 char *restart_cmd;
+2 -4
arch/alpha/kernel/proto.h
··· 135 135 /* smp.c */ 136 136 extern void setup_smp(void); 137 137 extern void handle_ipi(struct pt_regs *); 138 - extern void smp_percpu_timer_interrupt(struct pt_regs *); 139 138 140 139 /* bios32.c */ 141 140 /* extern void reset_for_srm(void); */ 142 141 143 142 /* time.c */ 144 - extern irqreturn_t timer_interrupt(int irq, void *dev); 143 + extern irqreturn_t rtc_timer_interrupt(int irq, void *dev); 144 + extern void init_clockevent(void); 145 145 extern void common_init_rtc(void); 146 146 extern unsigned long est_cycle_freq; 147 - extern unsigned int common_get_rtc_time(struct rtc_time *time); 148 - extern int common_set_rtc_time(struct rtc_time *time); 149 147 150 148 /* smc37c93x.c */ 151 149 extern void SMC93x_Init(void);
+323
arch/alpha/kernel/rtc.c
··· 1 + /* 2 + * linux/arch/alpha/kernel/rtc.c 3 + * 4 + * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds 5 + * 6 + * This file contains date handling. 7 + */ 8 + #include <linux/errno.h> 9 + #include <linux/init.h> 10 + #include <linux/kernel.h> 11 + #include <linux/param.h> 12 + #include <linux/string.h> 13 + #include <linux/mc146818rtc.h> 14 + #include <linux/bcd.h> 15 + #include <linux/rtc.h> 16 + #include <linux/platform_device.h> 17 + 18 + #include <asm/rtc.h> 19 + 20 + #include "proto.h" 21 + 22 + 23 + /* 24 + * Support for the RTC device. 25 + * 26 + * We don't want to use the rtc-cmos driver, because we don't want to support 27 + * alarms, as that would be indistinguishable from timer interrupts. 28 + * 29 + * Further, generic code is really, really tied to a 1900 epoch. This is 30 + * true in __get_rtc_time as well as the users of struct rtc_time e.g. 31 + * rtc_tm_to_time. Thankfully all of the other epochs in use are later 32 + * than 1900, and so it's easy to adjust. 33 + */ 34 + 35 + static unsigned long rtc_epoch; 36 + 37 + static int __init 38 + specifiy_epoch(char *str) 39 + { 40 + unsigned long epoch = simple_strtoul(str, NULL, 0); 41 + if (epoch < 1900) 42 + printk("Ignoring invalid user specified epoch %lu\n", epoch); 43 + else 44 + rtc_epoch = epoch; 45 + return 1; 46 + } 47 + __setup("epoch=", specifiy_epoch); 48 + 49 + static void __init 50 + init_rtc_epoch(void) 51 + { 52 + int epoch, year, ctrl; 53 + 54 + if (rtc_epoch != 0) { 55 + /* The epoch was specified on the command-line. */ 56 + return; 57 + } 58 + 59 + /* Detect the epoch in use on this computer. */ 60 + ctrl = CMOS_READ(RTC_CONTROL); 61 + year = CMOS_READ(RTC_YEAR); 62 + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 63 + year = bcd2bin(year); 64 + 65 + /* PC-like is standard; used for year >= 70 */ 66 + epoch = 1900; 67 + if (year < 20) { 68 + epoch = 2000; 69 + } else if (year >= 20 && year < 48) { 70 + /* NT epoch */ 71 + epoch = 1980; 72 + } else if (year >= 48 && year < 70) { 73 + /* Digital UNIX epoch */ 74 + epoch = 1952; 75 + } 76 + rtc_epoch = epoch; 77 + 78 + printk(KERN_INFO "Using epoch %d for rtc year %d\n", epoch, year); 79 + } 80 + 81 + static int 82 + alpha_rtc_read_time(struct device *dev, struct rtc_time *tm) 83 + { 84 + __get_rtc_time(tm); 85 + 86 + /* Adjust for non-default epochs. It's easier to depend on the 87 + generic __get_rtc_time and adjust the epoch here than create 88 + a copy of __get_rtc_time with the edits we need. */ 89 + if (rtc_epoch != 1900) { 90 + int year = tm->tm_year; 91 + /* Undo the century adjustment made in __get_rtc_time. */ 92 + if (year >= 100) 93 + year -= 100; 94 + year += rtc_epoch - 1900; 95 + /* Redo the century adjustment with the epoch in place. */ 96 + if (year <= 69) 97 + year += 100; 98 + tm->tm_year = year; 99 + } 100 + 101 + return rtc_valid_tm(tm); 102 + } 103 + 104 + static int 105 + alpha_rtc_set_time(struct device *dev, struct rtc_time *tm) 106 + { 107 + struct rtc_time xtm; 108 + 109 + if (rtc_epoch != 1900) { 110 + xtm = *tm; 111 + xtm.tm_year -= rtc_epoch - 1900; 112 + tm = &xtm; 113 + } 114 + 115 + return __set_rtc_time(tm); 116 + } 117 + 118 + static int 119 + alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime) 120 + { 121 + int retval = 0; 122 + int real_seconds, real_minutes, cmos_minutes; 123 + unsigned char save_control, save_freq_select; 124 + 125 + /* Note: This code only updates minutes and seconds. Comments 126 + indicate this was to avoid messing with unknown time zones, 127 + and with the epoch nonsense described above. In order for 128 + this to work, the existing clock cannot be off by more than 129 + 15 minutes. 130 + 131 + ??? This choice is may be out of date. The x86 port does 132 + not have problems with timezones, and the epoch processing has 133 + now been fixed in alpha_set_rtc_time. 134 + 135 + In either case, one can always force a full rtc update with 136 + the userland hwclock program, so surely 15 minute accuracy 137 + is no real burden. */ 138 + 139 + /* In order to set the CMOS clock precisely, we have to be called 140 + 500 ms after the second nowtime has started, because when 141 + nowtime is written into the registers of the CMOS clock, it will 142 + jump to the next second precisely 500 ms later. Check the Motorola 143 + MC146818A or Dallas DS12887 data sheet for details. */ 144 + 145 + /* irq are locally disabled here */ 146 + spin_lock(&rtc_lock); 147 + /* Tell the clock it's being set */ 148 + save_control = CMOS_READ(RTC_CONTROL); 149 + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 150 + 151 + /* Stop and reset prescaler */ 152 + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 153 + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); 154 + 155 + cmos_minutes = CMOS_READ(RTC_MINUTES); 156 + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 157 + cmos_minutes = bcd2bin(cmos_minutes); 158 + 159 + real_seconds = nowtime % 60; 160 + real_minutes = nowtime / 60; 161 + if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1) { 162 + /* correct for half hour time zone */ 163 + real_minutes += 30; 164 + } 165 + real_minutes %= 60; 166 + 167 + if (abs(real_minutes - cmos_minutes) < 30) { 168 + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 169 + real_seconds = bin2bcd(real_seconds); 170 + real_minutes = bin2bcd(real_minutes); 171 + } 172 + CMOS_WRITE(real_seconds,RTC_SECONDS); 173 + CMOS_WRITE(real_minutes,RTC_MINUTES); 174 + } else { 175 + printk_once(KERN_NOTICE 176 + "set_rtc_mmss: can't update from %d to %d\n", 177 + cmos_minutes, real_minutes); 178 + retval = -1; 179 + } 180 + 181 + /* The following flags have to be released exactly in this order, 182 + * otherwise the DS12887 (popular MC146818A clone with integrated 183 + * battery and quartz) will not reset the oscillator and will not 184 + * update precisely 500 ms later. You won't find this mentioned in 185 + * the Dallas Semiconductor data sheets, but who believes data 186 + * sheets anyway ... -- Markus Kuhn 187 + */ 188 + CMOS_WRITE(save_control, RTC_CONTROL); 189 + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 190 + spin_unlock(&rtc_lock); 191 + 192 + return retval; 193 + } 194 + 195 + static int 196 + alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 197 + { 198 + switch (cmd) { 199 + case RTC_EPOCH_READ: 200 + return put_user(rtc_epoch, (unsigned long __user *)arg); 201 + case RTC_EPOCH_SET: 202 + if (arg < 1900) 203 + return -EINVAL; 204 + rtc_epoch = arg; 205 + return 0; 206 + default: 207 + return -ENOIOCTLCMD; 208 + } 209 + } 210 + 211 + static const struct rtc_class_ops alpha_rtc_ops = { 212 + .read_time = alpha_rtc_read_time, 213 + .set_time = alpha_rtc_set_time, 214 + .set_mmss = alpha_rtc_set_mmss, 215 + .ioctl = alpha_rtc_ioctl, 216 + }; 217 + 218 + /* 219 + * Similarly, except do the actual CMOS access on the boot cpu only. 220 + * This requires marshalling the data across an interprocessor call. 221 + */ 222 + 223 + #if defined(CONFIG_SMP) && \ 224 + (defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_MARVEL)) 225 + # define HAVE_REMOTE_RTC 1 226 + 227 + union remote_data { 228 + struct rtc_time *tm; 229 + unsigned long now; 230 + long retval; 231 + }; 232 + 233 + static void 234 + do_remote_read(void *data) 235 + { 236 + union remote_data *x = data; 237 + x->retval = alpha_rtc_read_time(NULL, x->tm); 238 + } 239 + 240 + static int 241 + remote_read_time(struct device *dev, struct rtc_time *tm) 242 + { 243 + union remote_data x; 244 + if (smp_processor_id() != boot_cpuid) { 245 + x.tm = tm; 246 + smp_call_function_single(boot_cpuid, do_remote_read, &x, 1); 247 + return x.retval; 248 + } 249 + return alpha_rtc_read_time(NULL, tm); 250 + } 251 + 252 + static void 253 + do_remote_set(void *data) 254 + { 255 + union remote_data *x = data; 256 + x->retval = alpha_rtc_set_time(NULL, x->tm); 257 + } 258 + 259 + static int 260 + remote_set_time(struct device *dev, struct rtc_time *tm) 261 + { 262 + union remote_data x; 263 + if (smp_processor_id() != boot_cpuid) { 264 + x.tm = tm; 265 + smp_call_function_single(boot_cpuid, do_remote_set, &x, 1); 266 + return x.retval; 267 + } 268 + return alpha_rtc_set_time(NULL, tm); 269 + } 270 + 271 + static void 272 + do_remote_mmss(void *data) 273 + { 274 + union remote_data *x = data; 275 + x->retval = alpha_rtc_set_mmss(NULL, x->now); 276 + } 277 + 278 + static int 279 + remote_set_mmss(struct device *dev, unsigned long now) 280 + { 281 + union remote_data x; 282 + if (smp_processor_id() != boot_cpuid) { 283 + x.now = now; 284 + smp_call_function_single(boot_cpuid, do_remote_mmss, &x, 1); 285 + return x.retval; 286 + } 287 + return alpha_rtc_set_mmss(NULL, now); 288 + } 289 + 290 + static const struct rtc_class_ops remote_rtc_ops = { 291 + .read_time = remote_read_time, 292 + .set_time = remote_set_time, 293 + .set_mmss = remote_set_mmss, 294 + .ioctl = alpha_rtc_ioctl, 295 + }; 296 + #endif 297 + 298 + static int __init 299 + alpha_rtc_init(void) 300 + { 301 + const struct rtc_class_ops *ops; 302 + struct platform_device *pdev; 303 + struct rtc_device *rtc; 304 + const char *name; 305 + 306 + init_rtc_epoch(); 307 + name = "rtc-alpha"; 308 + ops = &alpha_rtc_ops; 309 + 310 + #ifdef HAVE_REMOTE_RTC 311 + if (alpha_mv.rtc_boot_cpu_only) 312 + ops = &remote_rtc_ops; 313 + #endif 314 + 315 + pdev = platform_device_register_simple(name, -1, NULL, 0); 316 + rtc = devm_rtc_device_register(&pdev->dev, name, ops, THIS_MODULE); 317 + if (IS_ERR(rtc)) 318 + return PTR_ERR(rtc); 319 + 320 + platform_set_drvdata(pdev, rtc); 321 + return 0; 322 + } 323 + device_initcall(alpha_rtc_init);
+20 -3
arch/alpha/kernel/setup.c
··· 115 115 116 116 #ifdef CONFIG_ALPHA_GENERIC 117 117 struct alpha_machine_vector alpha_mv; 118 + #endif 119 + 120 + #ifndef alpha_using_srm 118 121 int alpha_using_srm; 119 122 EXPORT_SYMBOL(alpha_using_srm); 123 + #endif 124 + 125 + #ifndef alpha_using_qemu 126 + int alpha_using_qemu; 120 127 #endif 121 128 122 129 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, ··· 536 529 atomic_notifier_chain_register(&panic_notifier_list, 537 530 &alpha_panic_block); 538 531 539 - #ifdef CONFIG_ALPHA_GENERIC 532 + #ifndef alpha_using_srm 540 533 /* Assume that we've booted from SRM if we haven't booted from MILO. 541 534 Detect the later by looking for "MILO" in the system serial nr. */ 542 535 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0; 536 + #endif 537 + #ifndef alpha_using_qemu 538 + /* Similarly, look for QEMU. */ 539 + alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0; 543 540 #endif 544 541 545 542 /* If we are using SRM, we want to allow callbacks ··· 1218 1207 char *systype_name; 1219 1208 char *sysvariation_name; 1220 1209 int nr_processors; 1210 + unsigned long timer_freq; 1221 1211 1222 1212 cpu_index = (unsigned) (cpu->type - 1); 1223 1213 cpu_name = "Unknown"; ··· 1229 1217 cpu->type, &systype_name, &sysvariation_name); 1230 1218 1231 1219 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); 1220 + 1221 + #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 1222 + timer_freq = (100UL * hwrpb->intr_freq) / 4096; 1223 + #else 1224 + timer_freq = 100UL * CONFIG_HZ; 1225 + #endif 1232 1226 1233 1227 seq_printf(f, "cpu\t\t\t: Alpha\n" 1234 1228 "cpu model\t\t: %s\n" ··· 1261 1243 (char*)hwrpb->ssn, 1262 1244 est_cycle_freq ? : hwrpb->cycle_freq, 1263 1245 est_cycle_freq ? "est." : "", 1264 - hwrpb->intr_freq / 4096, 1265 - (100 * hwrpb->intr_freq / 4096) % 100, 1246 + timer_freq / 100, timer_freq % 100, 1266 1247 hwrpb->pagesize, 1267 1248 hwrpb->pa_bits, 1268 1249 hwrpb->max_asn,
+3 -30
arch/alpha/kernel/smp.c
··· 138 138 139 139 /* Get our local ticker going. */ 140 140 smp_setup_percpu_timer(cpuid); 141 + init_clockevent(); 141 142 142 143 /* Call platform-specific callin, if specified */ 143 - if (alpha_mv.smp_callin) alpha_mv.smp_callin(); 144 + if (alpha_mv.smp_callin) 145 + alpha_mv.smp_callin(); 144 146 145 147 /* All kernel threads share the same mm context. */ 146 148 atomic_inc(&init_mm.mm_count); ··· 498 496 num_online_cpus(), 499 497 (bogosum + 2500) / (500000/HZ), 500 498 ((bogosum + 2500) / (5000/HZ)) % 100); 501 - } 502 - 503 - 504 - void 505 - smp_percpu_timer_interrupt(struct pt_regs *regs) 506 - { 507 - struct pt_regs *old_regs; 508 - int cpu = smp_processor_id(); 509 - unsigned long user = user_mode(regs); 510 - struct cpuinfo_alpha *data = &cpu_data[cpu]; 511 - 512 - old_regs = set_irq_regs(regs); 513 - 514 - /* Record kernel PC. */ 515 - profile_tick(CPU_PROFILING); 516 - 517 - if (!--data->prof_counter) { 518 - /* We need to make like a normal interrupt -- otherwise 519 - timer interrupts ignore the global interrupt lock, 520 - which would be a Bad Thing. */ 521 - irq_enter(); 522 - 523 - update_process_times(user); 524 - 525 - data->prof_counter = data->prof_multiplier; 526 - 527 - irq_exit(); 528 - } 529 - set_irq_regs(old_regs); 530 499 } 531 500 532 501 int
-2
arch/alpha/kernel/sys_jensen.c
··· 224 224 .machine_check = jensen_machine_check, 225 225 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, 226 226 .rtc_port = 0x170, 227 - .rtc_get_time = common_get_rtc_time, 228 - .rtc_set_time = common_set_rtc_time, 229 227 230 228 .nr_irqs = 16, 231 229 .device_interrupt = jensen_device_interrupt,
+1 -54
arch/alpha/kernel/sys_marvel.c
··· 22 22 #include <asm/hwrpb.h> 23 23 #include <asm/tlbflush.h> 24 24 #include <asm/vga.h> 25 - #include <asm/rtc.h> 26 25 27 26 #include "proto.h" 28 27 #include "err_impl.h" ··· 399 400 init_rtc_irq(); 400 401 } 401 402 402 - struct marvel_rtc_time { 403 - struct rtc_time *time; 404 - int retval; 405 - }; 406 - 407 - #ifdef CONFIG_SMP 408 - static void 409 - smp_get_rtc_time(void *data) 410 - { 411 - struct marvel_rtc_time *mrt = data; 412 - mrt->retval = __get_rtc_time(mrt->time); 413 - } 414 - 415 - static void 416 - smp_set_rtc_time(void *data) 417 - { 418 - struct marvel_rtc_time *mrt = data; 419 - mrt->retval = __set_rtc_time(mrt->time); 420 - } 421 - #endif 422 - 423 - static unsigned int 424 - marvel_get_rtc_time(struct rtc_time *time) 425 - { 426 - #ifdef CONFIG_SMP 427 - struct marvel_rtc_time mrt; 428 - 429 - if (smp_processor_id() != boot_cpuid) { 430 - mrt.time = time; 431 - smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1); 432 - return mrt.retval; 433 - } 434 - #endif 435 - return __get_rtc_time(time); 436 - } 437 - 438 - static int 439 - marvel_set_rtc_time(struct rtc_time *time) 440 - { 441 - #ifdef CONFIG_SMP 442 - struct marvel_rtc_time mrt; 443 - 444 - if (smp_processor_id() != boot_cpuid) { 445 - mrt.time = time; 446 - smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1); 447 - return mrt.retval; 448 - } 449 - #endif 450 - return __set_rtc_time(time); 451 - } 452 - 453 403 static void 454 404 marvel_smp_callin(void) 455 405 { ··· 440 492 .vector_name = "MARVEL/EV7", 441 493 DO_EV7_MMU, 442 494 .rtc_port = 0x70, 443 - .rtc_get_time = marvel_get_rtc_time, 444 - .rtc_set_time = marvel_set_rtc_time, 495 + .rtc_boot_cpu_only = 1, 445 496 DO_MARVEL_IO, 446 497 .machine_check = marvel_machine_check, 447 498 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+183 -238
arch/alpha/kernel/time.c
··· 3 3 * 4 4 * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds 5 5 * 6 - * This file contains the PC-specific time handling details: 7 - * reading the RTC at bootup, etc.. 8 - * 1994-07-02 Alan Modra 9 - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime 10 - * 1995-03-26 Markus Kuhn 11 - * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 12 - * precision CMOS clock update 6 + * This file contains the clocksource time handling. 13 7 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 14 8 * "A Kernel Model for Precision Timekeeping" by Dave Mills 15 9 * 1997-01-09 Adrian Sun ··· 15 21 * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) 16 22 * fixed algorithm in do_gettimeofday() for calculating the precise time 17 23 * from processor cycle counter (now taking lost_ticks into account) 18 - * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de> 19 - * Fixed time_init to be aware of epoches != 1900. This prevents 20 - * booting up in 2048 for me;) Code is stolen from rtc.c. 21 24 * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> 22 25 * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM 23 26 */ ··· 37 46 #include <asm/uaccess.h> 38 47 #include <asm/io.h> 39 48 #include <asm/hwrpb.h> 40 - #include <asm/rtc.h> 41 49 42 50 #include <linux/mc146818rtc.h> 43 51 #include <linux/time.h> 44 52 #include <linux/timex.h> 45 53 #include <linux/clocksource.h> 54 + #include <linux/clockchips.h> 46 55 47 56 #include "proto.h" 48 57 #include "irq_impl.h" 49 58 50 - static int set_rtc_mmss(unsigned long); 51 - 52 59 DEFINE_SPINLOCK(rtc_lock); 53 60 EXPORT_SYMBOL(rtc_lock); 54 - 55 - #define TICK_SIZE (tick_nsec / 1000) 56 - 57 - /* 58 - * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting 59 - * by 48 gives us 16 bits for HZ while keeping the accuracy good even 60 - * for large CPU clock rates. 61 - */ 62 - #define FIX_SHIFT 48 63 - 64 - /* lump static variables together for more efficient access: */ 65 - static struct { 66 - /* cycle counter last time it got invoked */ 67 - __u32 last_time; 68 - /* ticks/cycle * 2^48 */ 69 - unsigned long scaled_ticks_per_cycle; 70 - /* partial unused tick */ 71 - unsigned long partial_tick; 72 - } state; 73 61 74 62 unsigned long est_cycle_freq; 75 63 ··· 78 108 return __builtin_alpha_rpcc(); 79 109 } 80 110 81 - int update_persistent_clock(struct timespec now) 82 - { 83 - return set_rtc_mmss(now.tv_sec); 84 - } 85 111 86 - void read_persistent_clock(struct timespec *ts) 87 - { 88 - unsigned int year, mon, day, hour, min, sec, epoch; 89 - 90 - sec = CMOS_READ(RTC_SECONDS); 91 - min = CMOS_READ(RTC_MINUTES); 92 - hour = CMOS_READ(RTC_HOURS); 93 - day = CMOS_READ(RTC_DAY_OF_MONTH); 94 - mon = CMOS_READ(RTC_MONTH); 95 - year = CMOS_READ(RTC_YEAR); 96 - 97 - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 98 - sec = bcd2bin(sec); 99 - min = bcd2bin(min); 100 - hour = bcd2bin(hour); 101 - day = bcd2bin(day); 102 - mon = bcd2bin(mon); 103 - year = bcd2bin(year); 104 - } 105 - 106 - /* PC-like is standard; used for year >= 70 */ 107 - epoch = 1900; 108 - if (year < 20) 109 - epoch = 2000; 110 - else if (year >= 20 && year < 48) 111 - /* NT epoch */ 112 - epoch = 1980; 113 - else if (year >= 48 && year < 70) 114 - /* Digital UNIX epoch */ 115 - epoch = 1952; 116 - 117 - printk(KERN_INFO "Using epoch = %d\n", epoch); 118 - 119 - if ((year += epoch) < 1970) 120 - year += 100; 121 - 122 - ts->tv_sec = mktime(year, mon, day, hour, min, sec); 123 - ts->tv_nsec = 0; 124 - } 125 - 126 - 127 - 112 + 128 113 /* 129 - * timer_interrupt() needs to keep up the real-time clock, 130 - * as well as call the "xtime_update()" routine every clocktick 114 + * The RTC as a clock_event_device primitive. 131 115 */ 132 - irqreturn_t timer_interrupt(int irq, void *dev) 116 + 117 + static DEFINE_PER_CPU(struct clock_event_device, cpu_ce); 118 + 119 + irqreturn_t 120 + rtc_timer_interrupt(int irq, void *dev) 133 121 { 134 - unsigned long delta; 135 - __u32 now; 136 - long nticks; 122 + int cpu = smp_processor_id(); 123 + struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); 137 124 138 - #ifndef CONFIG_SMP 139 - /* Not SMP, do kernel PC profiling here. */ 140 - profile_tick(CPU_PROFILING); 141 - #endif 142 - 143 - /* 144 - * Calculate how many ticks have passed since the last update, 145 - * including any previous partial leftover. Save any resulting 146 - * fraction for the next pass. 147 - */ 148 - now = rpcc(); 149 - delta = now - state.last_time; 150 - state.last_time = now; 151 - delta = delta * state.scaled_ticks_per_cycle + state.partial_tick; 152 - state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); 153 - nticks = delta >> FIX_SHIFT; 154 - 155 - if (nticks) 156 - xtime_update(nticks); 125 + /* Don't run the hook for UNUSED or SHUTDOWN. */ 126 + if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC)) 127 + ce->event_handler(ce); 157 128 158 129 if (test_irq_work_pending()) { 159 130 clear_irq_work_pending(); 160 131 irq_work_run(); 161 132 } 162 133 163 - #ifndef CONFIG_SMP 164 - while (nticks--) 165 - update_process_times(user_mode(get_irq_regs())); 166 - #endif 167 - 168 134 return IRQ_HANDLED; 169 135 } 170 136 137 + static void 138 + rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce) 139 + { 140 + /* The mode member of CE is updated in generic code. 141 + Since we only support periodic events, nothing to do. */ 142 + } 143 + 144 + static int 145 + rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) 146 + { 147 + /* This hook is for oneshot mode, which we don't support. */ 148 + return -EINVAL; 149 + } 150 + 151 + static void __init 152 + init_rtc_clockevent(void) 153 + { 154 + int cpu = smp_processor_id(); 155 + struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); 156 + 157 + *ce = (struct clock_event_device){ 158 + .name = "rtc", 159 + .features = CLOCK_EVT_FEAT_PERIODIC, 160 + .rating = 100, 161 + .cpumask = cpumask_of(cpu), 162 + .set_mode = rtc_ce_set_mode, 163 + .set_next_event = rtc_ce_set_next_event, 164 + }; 165 + 166 + clockevents_config_and_register(ce, CONFIG_HZ, 0, 0); 167 + } 168 + 169 + 170 + /* 171 + * The QEMU clock as a clocksource primitive. 172 + */ 173 + 174 + static cycle_t 175 + qemu_cs_read(struct clocksource *cs) 176 + { 177 + return qemu_get_vmtime(); 178 + } 179 + 180 + static struct clocksource qemu_cs = { 181 + .name = "qemu", 182 + .rating = 400, 183 + .read = qemu_cs_read, 184 + .mask = CLOCKSOURCE_MASK(64), 185 + .flags = CLOCK_SOURCE_IS_CONTINUOUS, 186 + .max_idle_ns = LONG_MAX 187 + }; 188 + 189 + 190 + /* 191 + * The QEMU alarm as a clock_event_device primitive. 192 + */ 193 + 194 + static void 195 + qemu_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce) 196 + { 197 + /* The mode member of CE is updated for us in generic code. 198 + Just make sure that the event is disabled. */ 199 + qemu_set_alarm_abs(0); 200 + } 201 + 202 + static int 203 + qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) 204 + { 205 + qemu_set_alarm_rel(evt); 206 + return 0; 207 + } 208 + 209 + static irqreturn_t 210 + qemu_timer_interrupt(int irq, void *dev) 211 + { 212 + int cpu = smp_processor_id(); 213 + struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); 214 + 215 + ce->event_handler(ce); 216 + return IRQ_HANDLED; 217 + } 218 + 219 + static void __init 220 + init_qemu_clockevent(void) 221 + { 222 + int cpu = smp_processor_id(); 223 + struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); 224 + 225 + *ce = (struct clock_event_device){ 226 + .name = "qemu", 227 + .features = CLOCK_EVT_FEAT_ONESHOT, 228 + .rating = 400, 229 + .cpumask = cpumask_of(cpu), 230 + .set_mode = qemu_ce_set_mode, 231 + .set_next_event = qemu_ce_set_next_event, 232 + }; 233 + 234 + clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX); 235 + } 236 + 237 + 171 238 void __init 172 239 common_init_rtc(void) 173 240 { 174 - unsigned char x; 241 + unsigned char x, sel = 0; 175 242 176 243 /* Reset periodic interrupt frequency. */ 177 - x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; 178 - /* Test includes known working values on various platforms 179 - where 0x26 is wrong; we refuse to change those. */ 180 - if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { 181 - printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); 182 - CMOS_WRITE(0x26, RTC_FREQ_SELECT); 244 + #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 245 + x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; 246 + /* Test includes known working values on various platforms 247 + where 0x26 is wrong; we refuse to change those. */ 248 + if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { 249 + sel = RTC_REF_CLCK_32KHZ + 6; 183 250 } 251 + #elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32 252 + sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ); 253 + #else 254 + # error "Unknown HZ from arch/alpha/Kconfig" 255 + #endif 256 + if (sel) { 257 + printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n", 258 + CONFIG_HZ, sel); 259 + CMOS_WRITE(sel, RTC_FREQ_SELECT); 260 + } 184 261 185 262 /* Turn on periodic interrupts. */ 186 263 x = CMOS_READ(RTC_CONTROL); ··· 250 233 init_rtc_irq(); 251 234 } 252 235 253 - unsigned int common_get_rtc_time(struct rtc_time *time) 236 + 237 + #ifndef CONFIG_ALPHA_WTINT 238 + /* 239 + * The RPCC as a clocksource primitive. 240 + * 241 + * While we have free-running timecounters running on all CPUs, and we make 242 + * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter 243 + * with the wall clock, that initialization isn't kept up-to-date across 244 + * different time counters in SMP mode. Therefore we can only use this 245 + * method when there's only one CPU enabled. 246 + * 247 + * When using the WTINT PALcall, the RPCC may shift to a lower frequency, 248 + * or stop altogether, while waiting for the interrupt. Therefore we cannot 249 + * use this method when WTINT is in use. 250 + */ 251 + 252 + static cycle_t read_rpcc(struct clocksource *cs) 254 253 { 255 - return __get_rtc_time(time); 254 + return rpcc(); 256 255 } 257 256 258 - int common_set_rtc_time(struct rtc_time *time) 259 - { 260 - return __set_rtc_time(time); 261 - } 257 + static struct clocksource clocksource_rpcc = { 258 + .name = "rpcc", 259 + .rating = 300, 260 + .read = read_rpcc, 261 + .mask = CLOCKSOURCE_MASK(32), 262 + .flags = CLOCK_SOURCE_IS_CONTINUOUS 263 + }; 264 + #endif /* ALPHA_WTINT */ 262 265 266 + 263 267 /* Validate a computed cycle counter result against the known bounds for 264 268 the given processor core. There's too much brokenness in the way of 265 269 timing hardware for any one method to work everywhere. :-( ··· 391 353 return rpcc(); 392 354 } 393 355 394 - #ifndef CONFIG_SMP 395 - /* Until and unless we figure out how to get cpu cycle counters 396 - in sync and keep them there, we can't use the rpcc. */ 397 - static cycle_t read_rpcc(struct clocksource *cs) 398 - { 399 - cycle_t ret = (cycle_t)rpcc(); 400 - return ret; 401 - } 402 - 403 - static struct clocksource clocksource_rpcc = { 404 - .name = "rpcc", 405 - .rating = 300, 406 - .read = read_rpcc, 407 - .mask = CLOCKSOURCE_MASK(32), 408 - .flags = CLOCK_SOURCE_IS_CONTINUOUS 409 - }; 410 - 411 - static inline void register_rpcc_clocksource(long cycle_freq) 412 - { 413 - clocksource_register_hz(&clocksource_rpcc, cycle_freq); 414 - } 415 - #else /* !CONFIG_SMP */ 416 - static inline void register_rpcc_clocksource(long cycle_freq) 417 - { 418 - } 419 - #endif /* !CONFIG_SMP */ 420 - 421 356 void __init 422 357 time_init(void) 423 358 { 424 359 unsigned int cc1, cc2; 425 360 unsigned long cycle_freq, tolerance; 426 361 long diff; 362 + 363 + if (alpha_using_qemu) { 364 + clocksource_register_hz(&qemu_cs, NSEC_PER_SEC); 365 + init_qemu_clockevent(); 366 + 367 + timer_irqaction.handler = qemu_timer_interrupt; 368 + init_rtc_irq(); 369 + return; 370 + } 427 371 428 372 /* Calibrate CPU clock -- attempt #1. */ 429 373 if (!est_cycle_freq) ··· 441 421 "and unable to estimate a proper value!\n"); 442 422 } 443 423 444 - /* From John Bowman <bowman@math.ualberta.ca>: allow the values 445 - to settle, as the Update-In-Progress bit going low isn't good 446 - enough on some hardware. 2ms is our guess; we haven't found 447 - bogomips yet, but this is close on a 500Mhz box. */ 448 - __delay(1000000); 449 - 450 - 451 - if (HZ > (1<<16)) { 452 - extern void __you_loose (void); 453 - __you_loose(); 454 - } 455 - 456 - register_rpcc_clocksource(cycle_freq); 457 - 458 - state.last_time = cc1; 459 - state.scaled_ticks_per_cycle 460 - = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; 461 - state.partial_tick = 0L; 424 + /* See above for restrictions on using clocksource_rpcc. */ 425 + #ifndef CONFIG_ALPHA_WTINT 426 + if (hwrpb->nr_processors == 1) 427 + clocksource_register_hz(&clocksource_rpcc, cycle_freq); 428 + #endif 462 429 463 430 /* Startup the timer source. */ 464 431 alpha_mv.init_rtc(); 432 + init_rtc_clockevent(); 465 433 } 466 434 467 - /* 468 - * In order to set the CMOS clock precisely, set_rtc_mmss has to be 469 - * called 500 ms after the second nowtime has started, because when 470 - * nowtime is written into the registers of the CMOS clock, it will 471 - * jump to the next second precisely 500 ms later. Check the Motorola 472 - * MC146818A or Dallas DS12887 data sheet for details. 473 - * 474 - * BUG: This routine does not handle hour overflow properly; it just 475 - * sets the minutes. Usually you won't notice until after reboot! 476 - */ 477 - 478 - 479 - static int 480 - set_rtc_mmss(unsigned long nowtime) 435 + /* Initialize the clock_event_device for secondary cpus. */ 436 + #ifdef CONFIG_SMP 437 + void __init 438 + init_clockevent(void) 481 439 { 482 - int retval = 0; 483 - int real_seconds, real_minutes, cmos_minutes; 484 - unsigned char save_control, save_freq_select; 485 - 486 - /* irq are locally disabled here */ 487 - spin_lock(&rtc_lock); 488 - /* Tell the clock it's being set */ 489 - save_control = CMOS_READ(RTC_CONTROL); 490 - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 491 - 492 - /* Stop and reset prescaler */ 493 - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 494 - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); 495 - 496 - cmos_minutes = CMOS_READ(RTC_MINUTES); 497 - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 498 - cmos_minutes = bcd2bin(cmos_minutes); 499 - 500 - /* 501 - * since we're only adjusting minutes and seconds, 502 - * don't interfere with hour overflow. This avoids 503 - * messing with unknown time zones but requires your 504 - * RTC not to be off by more than 15 minutes 505 - */ 506 - real_seconds = nowtime % 60; 507 - real_minutes = nowtime / 60; 508 - if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) { 509 - /* correct for half hour time zone */ 510 - real_minutes += 30; 511 - } 512 - real_minutes %= 60; 513 - 514 - if (abs(real_minutes - cmos_minutes) < 30) { 515 - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 516 - real_seconds = bin2bcd(real_seconds); 517 - real_minutes = bin2bcd(real_minutes); 518 - } 519 - CMOS_WRITE(real_seconds,RTC_SECONDS); 520 - CMOS_WRITE(real_minutes,RTC_MINUTES); 521 - } else { 522 - printk_once(KERN_NOTICE 523 - "set_rtc_mmss: can't update from %d to %d\n", 524 - cmos_minutes, real_minutes); 525 - retval = -1; 526 - } 527 - 528 - /* The following flags have to be released exactly in this order, 529 - * otherwise the DS12887 (popular MC146818A clone with integrated 530 - * battery and quartz) will not reset the oscillator and will not 531 - * update precisely 500 ms later. You won't find this mentioned in 532 - * the Dallas Semiconductor data sheets, but who believes data 533 - * sheets anyway ... -- Markus Kuhn 534 - */ 535 - CMOS_WRITE(save_control, RTC_CONTROL); 536 - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 537 - spin_unlock(&rtc_lock); 538 - 539 - return retval; 440 + if (alpha_using_qemu) 441 + init_qemu_clockevent(); 442 + else 443 + init_rtc_clockevent(); 540 444 } 445 + #endif
+15
arch/alpha/kernel/traps.c
··· 241 241 (const char *)(data[1] | (long)data[2] << 32), 242 242 data[0]); 243 243 } 244 + #ifdef CONFIG_ALPHA_WTINT 245 + if (type == 4) { 246 + /* If CALL_PAL WTINT is totally unsupported by the 247 + PALcode, e.g. MILO, "emulate" it by overwriting 248 + the insn. */ 249 + unsigned int *pinsn 250 + = (unsigned int *) regs->pc - 1; 251 + if (*pinsn == PAL_wtint) { 252 + *pinsn = 0x47e01400; /* mov 0,$0 */ 253 + imb(); 254 + regs->r0 = 0; 255 + return; 256 + } 257 + } 258 + #endif /* ALPHA_WTINT */ 244 259 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), 245 260 regs, type, NULL); 246 261 }
+5 -5
arch/alpha/lib/csum_partial_copy.c
··· 130 130 *dst = word | tmp; 131 131 checksum += carry; 132 132 } 133 - if (err) *errp = err; 133 + if (err && errp) *errp = err; 134 134 return checksum; 135 135 } 136 136 ··· 185 185 *dst = word | tmp; 186 186 checksum += carry; 187 187 } 188 - if (err) *errp = err; 188 + if (err && errp) *errp = err; 189 189 return checksum; 190 190 } 191 191 ··· 242 242 stq_u(partial_dest | second_dest, dst); 243 243 out: 244 244 checksum += carry; 245 - if (err) *errp = err; 245 + if (err && errp) *errp = err; 246 246 return checksum; 247 247 } 248 248 ··· 325 325 stq_u(partial_dest | word | second_dest, dst); 326 326 checksum += carry; 327 327 } 328 - if (err) *errp = err; 328 + if (err && errp) *errp = err; 329 329 return checksum; 330 330 } 331 331 ··· 339 339 340 340 if (len) { 341 341 if (!access_ok(VERIFY_READ, src, len)) { 342 - *errp = -EFAULT; 342 + if (errp) *errp = -EFAULT; 343 343 memset(dst, 0, len); 344 344 return sum; 345 345 }
+7 -5
arch/alpha/lib/ev6-memset.S
··· 30 30 .set noat 31 31 .set noreorder 32 32 .text 33 + .globl memset 33 34 .globl __memset 35 + .globl ___memset 34 36 .globl __memsetw 35 37 .globl __constant_c_memset 36 - .globl memset 37 38 38 - .ent __memset 39 + .ent ___memset 39 40 .align 5 40 - __memset: 41 + ___memset: 41 42 .frame $30,0,$26,0 42 43 .prologue 0 43 44 ··· 228 227 nop 229 228 nop 230 229 ret $31,($26),1 # L0 : 231 - .end __memset 230 + .end ___memset 232 231 233 232 /* 234 233 * This is the original body of code, prior to replication and ··· 595 594 596 595 .end __memsetw 597 596 598 - memset = __memset 597 + memset = ___memset 598 + __memset = ___memset
+7 -4
arch/alpha/lib/memset.S
··· 19 19 .text 20 20 .globl memset 21 21 .globl __memset 22 + .globl ___memset 22 23 .globl __memsetw 23 24 .globl __constant_c_memset 24 - .ent __memset 25 + 26 + .ent ___memset 25 27 .align 5 26 - __memset: 28 + ___memset: 27 29 .frame $30,0,$26,0 28 30 .prologue 0 29 31 ··· 105 103 106 104 end: 107 105 ret $31,($26),1 /* E1 */ 108 - .end __memset 106 + .end ___memset 109 107 110 108 .align 5 111 109 .ent __memsetw ··· 123 121 124 122 .end __memsetw 125 123 126 - memset = __memset 124 + memset = ___memset 125 + __memset = ___memset
+9 -1
drivers/rtc/Kconfig
··· 626 626 627 627 config RTC_DRV_CMOS 628 628 tristate "PC-style 'CMOS'" 629 - depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS || SPARC64 629 + depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64 630 630 default y if X86 631 631 help 632 632 Say "yes" here to get direct support for the real time clock ··· 642 642 643 643 This driver can also be built as a module. If so, the module 644 644 will be called rtc-cmos. 645 + 646 + config RTC_DRV_ALPHA 647 + bool "Alpha PC-style CMOS" 648 + depends on ALPHA 649 + default y 650 + help 651 + Direct support for the real-time clock found on every Alpha 652 + system, specifically MC146818 compatibles. If in doubt, say Y. 645 653 646 654 config RTC_DRV_VRTC 647 655 tristate "Virtual RTC for Intel MID platforms"