Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/time,idle: get rid of unsigned long long

Get rid of unsigned long long, and use unsigned long instead
everywhere. The usage of unsigned long long is a leftover from
31 bit kernel support.

Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

+46 -46
+6 -6
arch/s390/include/asm/idle.h
··· 14 14 15 15 struct s390_idle_data { 16 16 seqcount_t seqcount; 17 - unsigned long long idle_count; 18 - unsigned long long idle_time; 19 - unsigned long long clock_idle_enter; 20 - unsigned long long clock_idle_exit; 21 - unsigned long long timer_idle_enter; 22 - unsigned long long timer_idle_exit; 17 + unsigned long idle_count; 18 + unsigned long idle_time; 19 + unsigned long clock_idle_enter; 20 + unsigned long clock_idle_exit; 21 + unsigned long timer_idle_enter; 22 + unsigned long timer_idle_exit; 23 23 unsigned long mt_cycles_enter[8]; 24 24 }; 25 25
+18 -18
arch/s390/include/asm/timex.h
··· 98 98 99 99 /* Query TOD offset result */ 100 100 struct ptff_qto { 101 - unsigned long long physical_clock; 102 - unsigned long long tod_offset; 103 - unsigned long long logical_tod_offset; 104 - unsigned long long tod_epoch_difference; 101 + unsigned long physical_clock; 102 + unsigned long tod_offset; 103 + unsigned long logical_tod_offset; 104 + unsigned long tod_epoch_difference; 105 105 } __packed; 106 106 107 107 static inline int ptff_query(unsigned int nr) ··· 151 151 rc; \ 152 152 }) 153 153 154 - static inline unsigned long long local_tick_disable(void) 154 + static inline unsigned long local_tick_disable(void) 155 155 { 156 - unsigned long long old; 156 + unsigned long old; 157 157 158 158 old = S390_lowcore.clock_comparator; 159 159 S390_lowcore.clock_comparator = clock_comparator_max; ··· 161 161 return old; 162 162 } 163 163 164 - static inline void local_tick_enable(unsigned long long comp) 164 + static inline void local_tick_enable(unsigned long comp) 165 165 { 166 166 S390_lowcore.clock_comparator = comp; 167 167 set_clock_comparator(S390_lowcore.clock_comparator); ··· 169 169 170 170 #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 171 171 172 - typedef unsigned long long cycles_t; 172 + typedef unsigned long cycles_t; 173 173 174 - static inline unsigned long long get_tod_clock(void) 174 + static inline unsigned long get_tod_clock(void) 175 175 { 176 176 union tod_clock clk; 177 177 ··· 179 179 return clk.tod; 180 180 } 181 181 182 - static inline unsigned long long get_tod_clock_fast(void) 182 + static inline unsigned long get_tod_clock_fast(void) 183 183 { 184 184 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 185 - unsigned long long clk; 185 + unsigned long clk; 186 186 187 187 asm volatile("stckf %0" : "=Q" (clk) : : "cc"); 188 188 return clk; ··· 208 208 * Therefore preemption must be disabled, otherwise the returned 209 209 * value is not guaranteed to be monotonic. 210 210 */ 211 - static inline unsigned long long get_tod_clock_monotonic(void) 211 + static inline unsigned long get_tod_clock_monotonic(void) 212 212 { 213 - unsigned long long tod; 213 + unsigned long tod; 214 214 215 215 preempt_disable_notrace(); 216 216 tod = get_tod_clock() - tod_clock_base.tod; ··· 237 237 * -> ns = (th * 125) + ((tl * 125) >> 9); 238 238 * 239 239 */ 240 - static inline unsigned long long tod_to_ns(unsigned long long todval) 240 + static inline unsigned long tod_to_ns(unsigned long todval) 241 241 { 242 242 return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); 243 243 } ··· 249 249 * 250 250 * Returns: true if a is later than b 251 251 */ 252 - static inline int tod_after(unsigned long long a, unsigned long long b) 252 + static inline int tod_after(unsigned long a, unsigned long b) 253 253 { 254 254 if (MACHINE_HAS_SCC) 255 - return (long long) a > (long long) b; 255 + return (long) a > (long) b; 256 256 return a > b; 257 257 } 258 258 ··· 263 263 * 264 264 * Returns: true if a is later than b 265 265 */ 266 - static inline int tod_after_eq(unsigned long long a, unsigned long long b) 266 + static inline int tod_after_eq(unsigned long a, unsigned long b) 267 267 { 268 268 if (MACHINE_HAS_SCC) 269 - return (long long) a >= (long long) b; 269 + return (long) a >= (long) b; 270 270 return a >= b; 271 271 } 272 272
+6 -6
arch/s390/kernel/idle.c
··· 47 47 void arch_cpu_idle(void) 48 48 { 49 49 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); 50 - unsigned long long idle_time; 50 + unsigned long idle_time; 51 51 unsigned long psw_mask; 52 52 53 53 /* Wait for external, I/O or machine check interrupt. */ ··· 73 73 struct device_attribute *attr, char *buf) 74 74 { 75 75 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 76 - unsigned long long idle_count; 76 + unsigned long idle_count; 77 77 unsigned int seq; 78 78 79 79 do { ··· 82 82 if (READ_ONCE(idle->clock_idle_enter)) 83 83 idle_count++; 84 84 } while (read_seqcount_retry(&idle->seqcount, seq)); 85 - return sprintf(buf, "%llu\n", idle_count); 85 + return sprintf(buf, "%lu\n", idle_count); 86 86 } 87 87 DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); 88 88 89 89 static ssize_t show_idle_time(struct device *dev, 90 90 struct device_attribute *attr, char *buf) 91 91 { 92 - unsigned long long now, idle_time, idle_enter, idle_exit, in_idle; 92 + unsigned long now, idle_time, idle_enter, idle_exit, in_idle; 93 93 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 94 94 unsigned int seq; 95 95 ··· 109 109 } 110 110 } 111 111 idle_time += in_idle; 112 - return sprintf(buf, "%llu\n", idle_time >> 12); 112 + return sprintf(buf, "%lu\n", idle_time >> 12); 113 113 } 114 114 DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); 115 115 116 116 u64 arch_cpu_idle_time(int cpu) 117 117 { 118 118 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); 119 - unsigned long long now, idle_enter, idle_exit, in_idle; 119 + unsigned long now, idle_enter, idle_exit, in_idle; 120 120 unsigned int seq; 121 121 122 122 do {
+14 -14
arch/s390/kernel/time.c
··· 68 68 69 69 unsigned char ptff_function_mask[16]; 70 70 71 - static unsigned long long lpar_offset; 72 - static unsigned long long initial_leap_seconds; 73 - static unsigned long long tod_steering_end; 74 - static long long tod_steering_delta; 71 + static unsigned long lpar_offset; 72 + static unsigned long initial_leap_seconds; 73 + static unsigned long tod_steering_end; 74 + static long tod_steering_delta; 75 75 76 76 /* 77 77 * Get time offsets with PTFF ··· 96 96 97 97 /* get initial leap seconds */ 98 98 if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0) 99 - initial_leap_seconds = (unsigned long long) 99 + initial_leap_seconds = (unsigned long) 100 100 ((long) qui.old_leap * 4096000000L); 101 101 } 102 102 ··· 222 222 223 223 static u64 read_tod_clock(struct clocksource *cs) 224 224 { 225 - unsigned long long now, adj; 225 + unsigned long now, adj; 226 226 227 227 preempt_disable(); /* protect from changes to steering parameters */ 228 228 now = get_tod_clock(); ··· 362 362 * Apply clock delta to the global data structures. 363 363 * This is called once on the CPU that performed the clock sync. 364 364 */ 365 - static void clock_sync_global(unsigned long long delta) 365 + static void clock_sync_global(unsigned long delta) 366 366 { 367 367 unsigned long now, adj; 368 368 struct ptff_qto qto; ··· 378 378 -(adj >> 15) : (adj >> 15); 379 379 tod_steering_delta += delta; 380 380 if ((abs(tod_steering_delta) >> 48) != 0) 381 - panic("TOD clock sync offset %lli is too large to drift\n", 381 + panic("TOD clock sync offset %li is too large to drift\n", 382 382 tod_steering_delta); 383 383 tod_steering_end = now + (abs(tod_steering_delta) << 15); 384 384 vdso_data->arch_data.tod_steering_end = tod_steering_end; ··· 394 394 * Apply clock delta to the per-CPU data structures of this CPU. 395 395 * This is called for each online CPU after the call to clock_sync_global. 396 396 */ 397 - static void clock_sync_local(unsigned long long delta) 397 + static void clock_sync_local(unsigned long delta) 398 398 { 399 399 /* Add the delta to the clock comparator. */ 400 400 if (S390_lowcore.clock_comparator != clock_comparator_max) { ··· 418 418 struct clock_sync_data { 419 419 atomic_t cpus; 420 420 int in_sync; 421 - unsigned long long clock_delta; 421 + unsigned long clock_delta; 422 422 }; 423 423 424 424 /* ··· 538 538 static int stp_sync_clock(void *data) 539 539 { 540 540 struct clock_sync_data *sync = data; 541 - unsigned long long clock_delta, flags; 541 + u64 clock_delta, flags; 542 542 static int first; 543 543 int rc; 544 544 ··· 720 720 721 721 mutex_lock(&stp_mutex); 722 722 if (stpinfo_valid()) 723 - ret = sprintf(buf, "%016llx\n", 724 - *(unsigned long long *) stp_info.ctnid); 723 + ret = sprintf(buf, "%016lx\n", 724 + *(unsigned long *) stp_info.ctnid); 725 725 mutex_unlock(&stp_mutex); 726 726 return ret; 727 727 } ··· 794 794 if (!stzi.lsoib.p) 795 795 return sprintf(buf, "0,0\n"); 796 796 797 - return sprintf(buf, "%llu,%d\n", 797 + return sprintf(buf, "%lu,%d\n", 798 798 tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC, 799 799 stzi.lsoib.nlso - stzi.lsoib.also); 800 800 }
+1 -1
arch/s390/kvm/interrupt.c
··· 1287 1287 /* already expired? */ 1288 1288 if (cputm >> 63) 1289 1289 return 0; 1290 - return min(sltime, tod_to_ns(cputm)); 1290 + return min_t(u64, sltime, tod_to_ns(cputm)); 1291 1291 } 1292 1292 } else if (cpu_timer_interrupts_enabled(vcpu)) { 1293 1293 sltime = kvm_s390_get_cpu_timer(vcpu);
+1 -1
drivers/s390/cio/device_fsm.c
··· 47 47 orb = &private->orb; 48 48 cc = stsch(sch->schid, &schib); 49 49 50 - printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 + printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, " 51 51 "device information:\n", get_tod_clock()); 52 52 printk(KERN_WARNING "cio: orb:\n"); 53 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,