Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/time: rename tod clock access functions

Fix name clash with some common code device drivers and add "tod"
to all tod clock access function names.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Heiko Carstens and committed by
Martin Schwidefsky
1aae0560 58fece78

+106 -106
+1 -1
arch/s390/appldata/appldata_mem.c
··· 108 108 mem_data->totalswap = P2K(val.totalswap); 109 109 mem_data->freeswap = P2K(val.freeswap); 110 110 111 - mem_data->timestamp = get_clock(); 111 + mem_data->timestamp = get_tod_clock(); 112 112 mem_data->sync_count_2++; 113 113 } 114 114
+1 -1
arch/s390/appldata/appldata_net_sum.c
··· 111 111 net_data->tx_dropped = tx_dropped; 112 112 net_data->collisions = collisions; 113 113 114 - net_data->timestamp = get_clock(); 114 + net_data->timestamp = get_tod_clock(); 115 115 net_data->sync_count_2++; 116 116 } 117 117
+1 -1
arch/s390/appldata/appldata_os.c
··· 156 156 } 157 157 ops.size = new_size; 158 158 } 159 - os_data->timestamp = get_clock(); 159 + os_data->timestamp = get_tod_clock(); 160 160 os_data->sync_count_2++; 161 161 } 162 162
+1 -1
arch/s390/hypfs/hypfs_vm.c
··· 245 245 d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); 246 246 if (IS_ERR(d2fc)) 247 247 return PTR_ERR(d2fc); 248 - get_clock_ext(d2fc->hdr.tod_ext); 248 + get_tod_clock_ext(d2fc->hdr.tod_ext); 249 249 d2fc->hdr.len = count * sizeof(struct diag2fc_data); 250 250 d2fc->hdr.version = DBFS_D2FC_HDR_VERSION; 251 251 d2fc->hdr.count = count;
+9 -9
arch/s390/include/asm/timex.h
··· 15 15 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL 16 16 17 17 /* Inline functions for clock register access. */ 18 - static inline int set_clock(__u64 time) 18 + static inline int set_tod_clock(__u64 time) 19 19 { 20 20 int cc; 21 21 ··· 27 27 return cc; 28 28 } 29 29 30 - static inline int store_clock(__u64 *time) 30 + static inline int store_tod_clock(__u64 *time) 31 31 { 32 32 int cc; 33 33 ··· 71 71 72 72 typedef unsigned long long cycles_t; 73 73 74 - static inline unsigned long long get_clock(void) 74 + static inline unsigned long long get_tod_clock(void) 75 75 { 76 76 unsigned long long clk; 77 77 ··· 83 83 return clk; 84 84 } 85 85 86 - static inline void get_clock_ext(char *clk) 86 + static inline void get_tod_clock_ext(char *clk) 87 87 { 88 88 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 89 89 } 90 90 91 - static inline unsigned long long get_clock_xt(void) 91 + static inline unsigned long long get_tod_clock_xt(void) 92 92 { 93 93 unsigned char clk[16]; 94 - get_clock_ext(clk); 94 + get_tod_clock_ext(clk); 95 95 return *((unsigned long long *)&clk[1]); 96 96 } 97 97 98 98 static inline cycles_t get_cycles(void) 99 99 { 100 - return (cycles_t) get_clock() >> 2; 100 + return (cycles_t) get_tod_clock() >> 2; 101 101 } 102 102 103 103 int get_sync_clock(unsigned long long *clock); ··· 123 123 * function, otherwise the returned value is not guaranteed to 124 124 * be monotonic. 125 125 */ 126 - static inline unsigned long long get_clock_monotonic(void) 126 + static inline unsigned long long get_tod_clock_monotonic(void) 127 127 { 128 - return get_clock_xt() - sched_clock_base_cc; 128 + return get_tod_clock_xt() - sched_clock_base_cc; 129 129 } 130 130 131 131 /**
+1 -1
arch/s390/kernel/debug.c
··· 867 867 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, 868 868 int exception) 869 869 { 870 - active->id.stck = get_clock(); 870 + active->id.stck = get_tod_clock(); 871 871 active->id.fields.cpuid = smp_processor_id(); 872 872 active->caller = __builtin_return_address(0); 873 873 active->id.fields.exception = exception;
+3 -3
arch/s390/kernel/early.c
··· 47 47 { 48 48 u64 time; 49 49 50 - if (store_clock(&time) == 0) 50 + if (store_tod_clock(&time) == 0) 51 51 return; 52 52 /* TOD clock not running. Set the clock to Unix Epoch. */ 53 - if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) 53 + if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) 54 54 disabled_wait(0); 55 55 56 56 sched_clock_base_cc = TOD_UNIX_EPOCH; ··· 173 173 } 174 174 175 175 /* re-initialize cputime accounting. */ 176 - sched_clock_base_cc = get_clock(); 176 + sched_clock_base_cc = get_tod_clock(); 177 177 S390_lowcore.last_update_clock = sched_clock_base_cc; 178 178 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; 179 179 S390_lowcore.user_timer = 0;
+1 -1
arch/s390/kernel/nmi.c
··· 293 293 * retry this instruction. 294 294 */ 295 295 spin_lock(&ipd_lock); 296 - tmp = get_clock(); 296 + tmp = get_tod_clock(); 297 297 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) 298 298 ipd_count++; 299 299 else
+5 -5
arch/s390/kernel/smp.c
··· 365 365 u64 end; 366 366 int cpu; 367 367 368 - end = get_clock() + (1000000UL << 12); 368 + end = get_tod_clock() + (1000000UL << 12); 369 369 for_each_cpu(cpu, cpumask) { 370 370 struct pcpu *pcpu = pcpu_devices + cpu; 371 371 set_bit(ec_stop_cpu, &pcpu->ec_mask); 372 372 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 373 373 0, NULL) == SIGP_CC_BUSY && 374 - get_clock() < end) 374 + get_tod_clock() < end) 375 375 cpu_relax(); 376 376 } 377 - while (get_clock() < end) { 377 + while (get_tod_clock() < end) { 378 378 for_each_cpu(cpu, cpumask) 379 379 if (pcpu_stopped(pcpu_devices + cpu)) 380 380 cpumask_clear_cpu(cpu, cpumask); ··· 694 694 */ 695 695 static void __cpuinit smp_start_secondary(void *cpuvoid) 696 696 { 697 - S390_lowcore.last_update_clock = get_clock(); 697 + S390_lowcore.last_update_clock = get_tod_clock(); 698 698 S390_lowcore.restart_stack = (unsigned long) restart_stack; 699 699 S390_lowcore.restart_fn = (unsigned long) do_restart; 700 700 S390_lowcore.restart_data = 0; ··· 947 947 unsigned int sequence; 948 948 949 949 do { 950 - now = get_clock(); 950 + now = get_tod_clock(); 951 951 sequence = ACCESS_ONCE(idle->sequence); 952 952 idle_time = ACCESS_ONCE(idle->idle_time); 953 953 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+13 -13
arch/s390/kernel/time.c
··· 63 63 */ 64 64 unsigned long long notrace __kprobes sched_clock(void) 65 65 { 66 - return tod_to_ns(get_clock_monotonic()); 66 + return tod_to_ns(get_tod_clock_monotonic()); 67 67 } 68 68 69 69 /* ··· 194 194 195 195 void read_persistent_clock(struct timespec *ts) 196 196 { 197 - tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); 197 + tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts); 198 198 } 199 199 200 200 void read_boot_clock(struct timespec *ts) ··· 204 204 205 205 static cycle_t read_tod_clock(struct clocksource *cs) 206 206 { 207 - return get_clock(); 207 + return get_tod_clock(); 208 208 } 209 209 210 210 static struct clocksource clocksource_tod = { ··· 342 342 343 343 sw_ptr = &get_cpu_var(clock_sync_word); 344 344 sw0 = atomic_read(sw_ptr); 345 - *clock = get_clock(); 345 + *clock = get_tod_clock(); 346 346 sw1 = atomic_read(sw_ptr); 347 347 put_cpu_var(clock_sync_word); 348 348 if (sw0 == sw1 && (sw0 & 0x80000000U)) ··· 486 486 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, 487 487 .es = 0, .sl = 0 }; 488 488 if (etr_setr(&etr_eacr) == 0) { 489 - etr_tolec = get_clock(); 489 + etr_tolec = get_tod_clock(); 490 490 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); 491 491 if (etr_port0_online && etr_port1_online) 492 492 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); ··· 768 768 __ctl_set_bit(14, 21); 769 769 __ctl_set_bit(0, 29); 770 770 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; 771 - old_clock = get_clock(); 772 - if (set_clock(clock) == 0) { 771 + old_clock = get_tod_clock(); 772 + if (set_tod_clock(clock) == 0) { 773 773 __udelay(1); /* Wait for the clock to start. */ 774 774 __ctl_clear_bit(0, 29); 775 775 __ctl_clear_bit(14, 21); ··· 845 845 * assume that this can have caused an stepping 846 846 * port switch. 847 847 */ 848 - etr_tolec = get_clock(); 848 + etr_tolec = get_tod_clock(); 849 849 eacr.p0 = etr_port0_online; 850 850 if (!eacr.p0) 851 851 eacr.e0 = 0; ··· 858 858 * assume that this can have caused an stepping 859 859 * port switch. 860 860 */ 861 - etr_tolec = get_clock(); 861 + etr_tolec = get_tod_clock(); 862 862 eacr.p1 = etr_port1_online; 863 863 if (!eacr.p1) 864 864 eacr.e1 = 0; ··· 974 974 etr_eacr = eacr; 975 975 etr_setr(&etr_eacr); 976 976 if (dp_changed) 977 - etr_tolec = get_clock(); 977 + etr_tolec = get_tod_clock(); 978 978 } 979 979 980 980 /* ··· 1012 1012 /* Store aib to get the current ETR status word. */ 1013 1013 BUG_ON(etr_stetr(&aib) != 0); 1014 1014 etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ 1015 - now = get_clock(); 1015 + now = get_tod_clock(); 1016 1016 1017 1017 /* 1018 1018 * Update the port information if the last stepping port change ··· 1537 1537 if (stp_info.todoff[0] || stp_info.todoff[1] || 1538 1538 stp_info.todoff[2] || stp_info.todoff[3] || 1539 1539 stp_info.tmd != 2) { 1540 - old_clock = get_clock(); 1540 + old_clock = get_tod_clock(); 1541 1541 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); 1542 1542 if (rc == 0) { 1543 - delta = adjust_time(old_clock, get_clock(), 0); 1543 + delta = adjust_time(old_clock, get_tod_clock(), 0); 1544 1544 fixup_clock_comparator(delta); 1545 1545 rc = chsc_sstpi(stp_page, &stp_info, 1546 1546 sizeof(struct stp_sstpi));
+1 -1
arch/s390/kernel/vtime.c
··· 191 191 unsigned int sequence; 192 192 193 193 do { 194 - now = get_clock(); 194 + now = get_tod_clock(); 195 195 sequence = ACCESS_ONCE(idle->sequence); 196 196 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 197 197 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
+3 -3
arch/s390/kvm/interrupt.c
··· 362 362 } 363 363 364 364 if ((!rc) && (vcpu->arch.sie_block->ckc < 365 - get_clock() + vcpu->arch.sie_block->epoch)) { 365 + get_tod_clock() + vcpu->arch.sie_block->epoch)) { 366 366 if ((!psw_extint_disabled(vcpu)) && 367 367 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 368 368 rc = 1; ··· 402 402 goto no_timer; 403 403 } 404 404 405 - now = get_clock() + vcpu->arch.sie_block->epoch; 405 + now = get_tod_clock() + vcpu->arch.sie_block->epoch; 406 406 if (vcpu->arch.sie_block->ckc < now) { 407 407 __unset_cpu_idle(vcpu); 408 408 return 0; ··· 492 492 } 493 493 494 494 if ((vcpu->arch.sie_block->ckc < 495 - get_clock() + vcpu->arch.sie_block->epoch)) 495 + get_tod_clock() + vcpu->arch.sie_block->epoch)) 496 496 __try_deliver_ckc_interrupt(vcpu); 497 497 498 498 if (atomic_read(&fi->active)) {
+8 -8
arch/s390/lib/delay.c
··· 32 32 unsigned long cr0, cr6, new; 33 33 u64 clock_saved, end; 34 34 35 - end = get_clock() + (usecs << 12); 35 + end = get_tod_clock() + (usecs << 12); 36 36 clock_saved = local_tick_disable(); 37 37 __ctl_store(cr0, 0, 0); 38 38 __ctl_store(cr6, 6, 6); ··· 45 45 set_clock_comparator(end); 46 46 vtime_stop_cpu(); 47 47 local_irq_disable(); 48 - } while (get_clock() < end); 48 + } while (get_tod_clock() < end); 49 49 lockdep_on(); 50 50 __ctl_load(cr0, 0, 0); 51 51 __ctl_load(cr6, 6, 6); ··· 56 56 { 57 57 u64 clock_saved, end; 58 58 59 - end = get_clock() + (usecs << 12); 59 + end = get_tod_clock() + (usecs << 12); 60 60 do { 61 61 clock_saved = 0; 62 62 if (end < S390_lowcore.clock_comparator) { ··· 67 67 local_irq_disable(); 68 68 if (clock_saved) 69 69 local_tick_enable(clock_saved); 70 - } while (get_clock() < end); 70 + } while (get_tod_clock() < end); 71 71 } 72 72 73 73 /* ··· 111 111 { 112 112 u64 end; 113 113 114 - end = get_clock() + (usecs << 12); 115 - while (get_clock() < end) 114 + end = get_tod_clock() + (usecs << 12); 115 + while (get_tod_clock() < end) 116 116 cpu_relax(); 117 117 } 118 118 ··· 122 122 123 123 nsecs <<= 9; 124 124 do_div(nsecs, 125); 125 - end = get_clock() + nsecs; 125 + end = get_tod_clock() + nsecs; 126 126 if (nsecs & ~0xfffUL) 127 127 __udelay(nsecs >> 12); 128 - while (get_clock() < end) 128 + while (get_tod_clock() < end) 129 129 barrier(); 130 130 } 131 131 EXPORT_SYMBOL(__ndelay);
+9 -9
drivers/s390/block/dasd.c
··· 1352 1352 switch (rc) { 1353 1353 case 0: /* termination successful */ 1354 1354 cqr->status = DASD_CQR_CLEAR_PENDING; 1355 - cqr->stopclk = get_clock(); 1355 + cqr->stopclk = get_tod_clock(); 1356 1356 cqr->starttime = 0; 1357 1357 DBF_DEV_EVENT(DBF_DEBUG, device, 1358 1358 "terminate cqr %p successful", ··· 1420 1420 cqr->status = DASD_CQR_ERROR; 1421 1421 return -EIO; 1422 1422 } 1423 - cqr->startclk = get_clock(); 1423 + cqr->startclk = get_tod_clock(); 1424 1424 cqr->starttime = jiffies; 1425 1425 cqr->retries--; 1426 1426 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { ··· 1623 1623 return; 1624 1624 } 1625 1625 1626 - now = get_clock(); 1626 + now = get_tod_clock(); 1627 1627 cqr = (struct dasd_ccw_req *) intparm; 1628 1628 /* check for conditions that should be handled immediately */ 1629 1629 if (!cqr || ··· 1963 1963 } 1964 1964 break; 1965 1965 case DASD_CQR_QUEUED: 1966 - cqr->stopclk = get_clock(); 1966 + cqr->stopclk = get_tod_clock(); 1967 1967 cqr->status = DASD_CQR_CLEARED; 1968 1968 break; 1969 1969 default: /* no need to modify the others */ ··· 2210 2210 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2211 2211 } 2212 2212 2213 - maincqr->endclk = get_clock(); 2213 + maincqr->endclk = get_tod_clock(); 2214 2214 if ((maincqr->status != DASD_CQR_DONE) && 2215 2215 (maincqr->intrc != -ERESTARTSYS)) 2216 2216 dasd_log_sense(maincqr, &maincqr->irb); ··· 2340 2340 "Cancelling request %p failed with rc=%d\n", 2341 2341 cqr, rc); 2342 2342 } else { 2343 - cqr->stopclk = get_clock(); 2343 + cqr->stopclk = get_tod_clock(); 2344 2344 } 2345 2345 break; 2346 2346 default: /* already finished or clear pending - do nothing */ ··· 2568 2568 } 2569 2569 2570 2570 /* Rechain finished requests to final queue */ 2571 - cqr->endclk = get_clock(); 2571 + cqr->endclk = get_tod_clock(); 2572 2572 list_move_tail(&cqr->blocklist, final_queue); 2573 2573 } 2574 2574 } ··· 2711 2711 } 2712 2712 /* call the callback function */ 2713 2713 spin_lock_irq(&block->request_queue_lock); 2714 - cqr->endclk = get_clock(); 2714 + cqr->endclk = get_tod_clock(); 2715 2715 list_del_init(&cqr->blocklist); 2716 2716 __dasd_cleanup_cqr(cqr); 2717 2717 spin_unlock_irq(&block->request_queue_lock); ··· 3504 3504 cqr->memdev = device; 3505 3505 cqr->expires = 10*HZ; 3506 3506 cqr->retries = 256; 3507 - cqr->buildclk = get_clock(); 3507 + cqr->buildclk = get_tod_clock(); 3508 3508 cqr->status = DASD_CQR_FILLED; 3509 3509 return cqr; 3510 3510 }
+4 -4
drivers/s390/block/dasd_3990_erp.c
··· 229 229 dctl_cqr->expires = 5 * 60 * HZ; 230 230 dctl_cqr->retries = 2; 231 231 232 - dctl_cqr->buildclk = get_clock(); 232 + dctl_cqr->buildclk = get_tod_clock(); 233 233 234 234 dctl_cqr->status = DASD_CQR_FILLED; 235 235 ··· 1719 1719 erp->magic = default_erp->magic; 1720 1720 erp->expires = default_erp->expires; 1721 1721 erp->retries = 256; 1722 - erp->buildclk = get_clock(); 1722 + erp->buildclk = get_tod_clock(); 1723 1723 erp->status = DASD_CQR_FILLED; 1724 1724 1725 1725 /* remove the default erp */ ··· 2322 2322 DBF_DEV_EVENT(DBF_ERR, device, "%s", 2323 2323 "Unable to allocate ERP request"); 2324 2324 cqr->status = DASD_CQR_FAILED; 2325 - cqr->stopclk = get_clock (); 2325 + cqr->stopclk = get_tod_clock(); 2326 2326 } else { 2327 2327 DBF_DEV_EVENT(DBF_ERR, device, 2328 2328 "Unable to allocate ERP request " ··· 2364 2364 erp->magic = cqr->magic; 2365 2365 erp->expires = cqr->expires; 2366 2366 erp->retries = 256; 2367 - erp->buildclk = get_clock(); 2367 + erp->buildclk = get_tod_clock(); 2368 2368 erp->status = DASD_CQR_FILLED; 2369 2369 2370 2370 return erp;
+2 -2
drivers/s390/block/dasd_alias.c
··· 448 448 ccw->count = sizeof(*(lcu->uac)); 449 449 ccw->cda = (__u32)(addr_t) lcu->uac; 450 450 451 - cqr->buildclk = get_clock(); 451 + cqr->buildclk = get_tod_clock(); 452 452 cqr->status = DASD_CQR_FILLED; 453 453 454 454 /* need to unset flag here to detect race with summary unit check */ ··· 733 733 cqr->memdev = device; 734 734 cqr->block = NULL; 735 735 cqr->expires = 5 * HZ; 736 - cqr->buildclk = get_clock(); 736 + cqr->buildclk = get_tod_clock(); 737 737 cqr->status = DASD_CQR_FILLED; 738 738 739 739 rc = dasd_sleep_on_immediatly(cqr);
+5 -5
drivers/s390/block/dasd_diag.c
··· 184 184 private->iob.bio_list = dreq->bio; 185 185 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 186 186 187 - cqr->startclk = get_clock(); 187 + cqr->startclk = get_tod_clock(); 188 188 cqr->starttime = jiffies; 189 189 cqr->retries--; 190 190 191 191 rc = dia250(&private->iob, RW_BIO); 192 192 switch (rc) { 193 193 case 0: /* Synchronous I/O finished successfully */ 194 - cqr->stopclk = get_clock(); 194 + cqr->stopclk = get_tod_clock(); 195 195 cqr->status = DASD_CQR_SUCCESS; 196 196 /* Indicate to calling function that only a dasd_schedule_bh() 197 197 and no timer is needed */ ··· 222 222 mdsk_term_io(device); 223 223 mdsk_init_io(device, device->block->bp_block, 0, NULL); 224 224 cqr->status = DASD_CQR_CLEAR_PENDING; 225 - cqr->stopclk = get_clock(); 225 + cqr->stopclk = get_tod_clock(); 226 226 dasd_schedule_device_bh(device); 227 227 return 0; 228 228 } ··· 276 276 return; 277 277 } 278 278 279 - cqr->stopclk = get_clock(); 279 + cqr->stopclk = get_tod_clock(); 280 280 281 281 expires = 0; 282 282 if ((ext_code.subcode & 0xff) == 0) { ··· 556 556 } 557 557 } 558 558 cqr->retries = DIAG_MAX_RETRIES; 559 - cqr->buildclk = get_clock(); 559 + cqr->buildclk = get_tod_clock(); 560 560 if (blk_noretry_request(req) || 561 561 block->base->features & DASD_FEATURE_FAILFAST) 562 562 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+15 -15
drivers/s390/block/dasd_eckd.c
··· 862 862 cqr->expires = 10*HZ; 863 863 cqr->lpm = lpm; 864 864 cqr->retries = 256; 865 - cqr->buildclk = get_clock(); 865 + cqr->buildclk = get_tod_clock(); 866 866 cqr->status = DASD_CQR_FILLED; 867 867 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 868 868 } ··· 1449 1449 ccw->count = sizeof(struct dasd_rssd_features); 1450 1450 ccw->cda = (__u32)(addr_t) features; 1451 1451 1452 - cqr->buildclk = get_clock(); 1452 + cqr->buildclk = get_tod_clock(); 1453 1453 cqr->status = DASD_CQR_FILLED; 1454 1454 rc = dasd_sleep_on(cqr); 1455 1455 if (rc == 0) { ··· 1501 1501 cqr->block = NULL; 1502 1502 cqr->retries = 256; 1503 1503 cqr->expires = 10*HZ; 1504 - cqr->buildclk = get_clock(); 1504 + cqr->buildclk = get_tod_clock(); 1505 1505 cqr->status = DASD_CQR_FILLED; 1506 1506 return cqr; 1507 1507 } ··· 1841 1841 cqr->startdev = device; 1842 1842 cqr->memdev = device; 1843 1843 cqr->retries = 255; 1844 - cqr->buildclk = get_clock(); 1844 + cqr->buildclk = get_tod_clock(); 1845 1845 cqr->status = DASD_CQR_FILLED; 1846 1846 return cqr; 1847 1847 } ··· 2241 2241 fcp->startdev = device; 2242 2242 fcp->memdev = device; 2243 2243 fcp->retries = 256; 2244 - fcp->buildclk = get_clock(); 2244 + fcp->buildclk = get_tod_clock(); 2245 2245 fcp->status = DASD_CQR_FILLED; 2246 2246 return fcp; 2247 2247 } ··· 2530 2530 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2531 2531 cqr->lpm = startdev->path_data.ppm; 2532 2532 cqr->retries = 256; 2533 - cqr->buildclk = get_clock(); 2533 + cqr->buildclk = get_tod_clock(); 2534 2534 cqr->status = DASD_CQR_FILLED; 2535 2535 return cqr; 2536 2536 } ··· 2705 2705 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2706 2706 cqr->lpm = startdev->path_data.ppm; 2707 2707 cqr->retries = 256; 2708 - cqr->buildclk = get_clock(); 2708 + cqr->buildclk = get_tod_clock(); 2709 2709 cqr->status = DASD_CQR_FILLED; 2710 2710 return cqr; 2711 2711 } ··· 2998 2998 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2999 2999 cqr->lpm = startdev->path_data.ppm; 3000 3000 cqr->retries = 256; 3001 - cqr->buildclk = get_clock(); 3001 + cqr->buildclk = get_tod_clock(); 3002 3002 cqr->status = DASD_CQR_FILLED; 3003 3003 return cqr; 3004 3004 out_error: ··· 3201 3201 cqr->expires = startdev->default_expires * HZ; 3202 3202 cqr->lpm = startdev->path_data.ppm; 3203 3203 cqr->retries = 256; 3204 - cqr->buildclk = get_clock(); 3204 + cqr->buildclk = get_tod_clock(); 3205 3205 cqr->status = DASD_CQR_FILLED; 3206 3206 3207 3207 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) ··· 3402 3402 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3403 3403 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3404 3404 cqr->expires = 2 * HZ; 3405 - cqr->buildclk = get_clock(); 3405 + cqr->buildclk = get_tod_clock(); 3406 3406 cqr->status = DASD_CQR_FILLED; 3407 3407 3408 3408 rc = dasd_sleep_on_immediatly(cqr); ··· 3457 3457 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3458 3458 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3459 3459 cqr->expires = 2 * HZ; 3460 - cqr->buildclk = get_clock(); 3460 + cqr->buildclk = get_tod_clock(); 3461 3461 cqr->status = DASD_CQR_FILLED; 3462 3462 3463 3463 rc = dasd_sleep_on_immediatly(cqr); ··· 3511 3511 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3512 3512 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3513 3513 cqr->expires = 2 * HZ; 3514 - cqr->buildclk = get_clock(); 3514 + cqr->buildclk = get_tod_clock(); 3515 3515 cqr->status = DASD_CQR_FILLED; 3516 3516 3517 3517 rc = dasd_sleep_on_immediatly(cqr); ··· 3572 3572 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 3573 3573 cqr->retries = 5; 3574 3574 cqr->expires = 10 * HZ; 3575 - cqr->buildclk = get_clock(); 3575 + cqr->buildclk = get_tod_clock(); 3576 3576 cqr->status = DASD_CQR_FILLED; 3577 3577 cqr->lpm = usrparm.path_mask; 3578 3578 ··· 3642 3642 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 3643 3643 ccw->cda = (__u32)(addr_t) stats; 3644 3644 3645 - cqr->buildclk = get_clock(); 3645 + cqr->buildclk = get_tod_clock(); 3646 3646 cqr->status = DASD_CQR_FILLED; 3647 3647 rc = dasd_sleep_on(cqr); 3648 3648 if (rc == 0) { ··· 3768 3768 cqr->memdev = device; 3769 3769 cqr->retries = 3; 3770 3770 cqr->expires = 10 * HZ; 3771 - cqr->buildclk = get_clock(); 3771 + cqr->buildclk = get_tod_clock(); 3772 3772 cqr->status = DASD_CQR_FILLED; 3773 3773 3774 3774 /* Build the ccws */
+1 -1
drivers/s390/block/dasd_eer.c
··· 481 481 ccw->flags = 0; 482 482 ccw->cda = (__u32)(addr_t) cqr->data; 483 483 484 - cqr->buildclk = get_clock(); 484 + cqr->buildclk = get_tod_clock(); 485 485 cqr->status = DASD_CQR_FILLED; 486 486 cqr->callback = dasd_eer_snss_cb; 487 487
+2 -2
drivers/s390/block/dasd_erp.c
··· 102 102 pr_err("%s: default ERP has run out of retries and failed\n", 103 103 dev_name(&device->cdev->dev)); 104 104 cqr->status = DASD_CQR_FAILED; 105 - cqr->stopclk = get_clock(); 105 + cqr->stopclk = get_tod_clock(); 106 106 } 107 107 return cqr; 108 108 } /* end dasd_default_erp_action */ ··· 146 146 cqr->status = DASD_CQR_DONE; 147 147 else { 148 148 cqr->status = DASD_CQR_FAILED; 149 - cqr->stopclk = get_clock(); 149 + cqr->stopclk = get_tod_clock(); 150 150 } 151 151 152 152 return cqr;
+1 -1
drivers/s390/block/dasd_fba.c
··· 370 370 cqr->block = block; 371 371 cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ 372 372 cqr->retries = 32; 373 - cqr->buildclk = get_clock(); 373 + cqr->buildclk = get_tod_clock(); 374 374 cqr->status = DASD_CQR_FILLED; 375 375 return cqr; 376 376 }
+2 -2
drivers/s390/char/sclp.c
··· 450 450 timeout = 0; 451 451 if (timer_pending(&sclp_request_timer)) { 452 452 /* Get timeout TOD value */ 453 - timeout = get_clock() + 453 + timeout = get_tod_clock() + 454 454 sclp_tod_from_jiffies(sclp_request_timer.expires - 455 455 jiffies); 456 456 } ··· 472 472 while (sclp_running_state != sclp_running_state_idle) { 473 473 /* Check for expired request timer */ 474 474 if (timer_pending(&sclp_request_timer) && 475 - get_clock() > timeout && 475 + get_tod_clock() > timeout && 476 476 del_timer(&sclp_request_timer)) 477 477 sclp_request_timer.function(sclp_request_timer.data); 478 478 cpu_relax();
+1 -1
drivers/s390/char/zcore.c
··· 637 637 hdr->rmem_size = memory; 638 638 hdr->mem_end = sys_info.mem_size; 639 639 hdr->num_pages = memory / PAGE_SIZE; 640 - hdr->tod = get_clock(); 640 + hdr->tod = get_tod_clock(); 641 641 get_cpu_id(&hdr->cpu_id); 642 642 for (i = 0; zfcpdump_save_areas[i]; i++) { 643 643 prefix = zfcpdump_save_areas[i]->pref_reg;
+2 -2
drivers/s390/cio/cio.c
··· 962 962 atomic_inc(&chpid_reset_count); 963 963 } 964 964 /* Wait for machine check for all channel paths. */ 965 - timeout = get_clock() + (RCHP_TIMEOUT << 12); 965 + timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); 966 966 while (atomic_read(&chpid_reset_count) != 0) { 967 - if (get_clock() > timeout) 967 + if (get_tod_clock() > timeout) 968 968 break; 969 969 cpu_relax(); 970 970 }
+3 -3
drivers/s390/cio/cmf.c
··· 33 33 #include <linux/module.h> 34 34 #include <linux/moduleparam.h> 35 35 #include <linux/slab.h> 36 - #include <linux/timex.h> /* get_clock() */ 36 + #include <linux/timex.h> /* get_tod_clock() */ 37 37 38 38 #include <asm/ccwdev.h> 39 39 #include <asm/cio.h> ··· 326 326 memcpy(cmb_data->last_block, hw_block, cmb_data->size); 327 327 memcpy(reference_buf, hw_block, cmb_data->size); 328 328 } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); 329 - cmb_data->last_update = get_clock(); 329 + cmb_data->last_update = get_tod_clock(); 330 330 kfree(reference_buf); 331 331 return 0; 332 332 } ··· 428 428 memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); 429 429 cmb_data->last_update = 0; 430 430 } 431 - cdev->private->cmb_start_time = get_clock(); 431 + cdev->private->cmb_start_time = get_tod_clock(); 432 432 spin_unlock_irq(cdev->ccwlock); 433 433 } 434 434
+1 -1
drivers/s390/cio/css.c
··· 780 780 css->cssid = nr; 781 781 dev_set_name(&css->device, "css%x", nr); 782 782 css->device.release = channel_subsystem_release; 783 - tod_high = (u32) (get_clock() >> 32); 783 + tod_high = (u32) (get_tod_clock() >> 32); 784 784 css_generate_pgid(css, tod_high); 785 785 return 0; 786 786 }
+1 -1
drivers/s390/cio/device_fsm.c
··· 47 47 cc = stsch_err(sch->schid, &schib); 48 48 49 49 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 - "device information:\n", get_clock()); 50 + "device information:\n", get_tod_clock()); 51 51 printk(KERN_WARNING "cio: orb:\n"); 52 52 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 53 53 orb, sizeof(*orb), 0);
+6 -6
drivers/s390/cio/qdio_main.c
··· 338 338 retries++; 339 339 340 340 if (!start_time) { 341 - start_time = get_clock(); 341 + start_time = get_tod_clock(); 342 342 goto again; 343 343 } 344 - if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 + if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 345 345 goto again; 346 346 } 347 347 if (retries) { ··· 504 504 int count, stop; 505 505 unsigned char state = 0; 506 506 507 - q->timestamp = get_clock(); 507 + q->timestamp = get_tod_clock(); 508 508 509 509 /* 510 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved ··· 563 563 if (bufnr != q->last_move) { 564 564 q->last_move = bufnr; 565 565 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 566 - q->u.in.timestamp = get_clock(); 566 + q->u.in.timestamp = get_tod_clock(); 567 567 return 1; 568 568 } else 569 569 return 0; ··· 595 595 * At this point we know, that inbound first_to_check 596 596 * has (probably) not moved (see qdio_inbound_processing). 597 597 */ 598 - if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 + if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 599 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 600 600 q->first_to_check); 601 601 return 1; ··· 772 772 int count, stop; 773 773 unsigned char state = 0; 774 774 775 - q->timestamp = get_clock(); 775 + q->timestamp = get_tod_clock(); 776 776 777 777 if (need_siga_sync(q)) 778 778 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+1 -1
drivers/s390/net/qeth_core.h
··· 816 816 817 817 static inline int qeth_get_micros(void) 818 818 { 819 - return (int) (get_clock() >> 12); 819 + return (int) (get_tod_clock() >> 12); 820 820 } 821 821 822 822 static inline int qeth_get_ip_version(struct sk_buff *skb)
+1 -1
drivers/s390/scsi/zfcp_fsf.c
··· 727 727 zfcp_reqlist_add(adapter->req_list, req); 728 728 729 729 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 730 - req->issued = get_clock(); 730 + req->issued = get_tod_clock(); 731 731 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 732 732 del_timer(&req->timer); 733 733 /* lookup request again, list might have changed */
+1 -1
drivers/s390/scsi/zfcp_qdio.c
··· 68 68 unsigned long long now, span; 69 69 int used; 70 70 71 - now = get_clock_monotonic(); 71 + now = get_tod_clock_monotonic(); 72 72 span = (now - qdio->req_q_time) >> 12; 73 73 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 74 74 qdio->req_q_util += used * span;