Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml

Pull UML updates from Richard Weinberger:

- A new timer mode, time travel, for testing with UML

- Many bugixes/improvements for the serial line driver

- Various bugfixes

* tag 'for-linus-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml:
um: fix build without CONFIG_UML_TIME_TRAVEL_SUPPORT
um: Fix kcov crash during startup
um: configs: Remove useless UEVENT_HELPER_PATH
um: Support time travel mode
um: Pass nsecs to os timer functions
um: Remove drivers/ssl.h
um: Don't garbage collect in deactivate_all_fds()
um: Silence lockdep complaint about mmap_sem
um: Remove locking in deactivate_all_fds()
um: Timer code cleanup
um: fix os_timer_one_shot()
um: Fix IRQ controller regression on console read

+321 -143
+12
arch/um/Kconfig
··· 184 184 185 185 If unsure, say Y. 186 186 187 + config UML_TIME_TRAVEL_SUPPORT 188 + bool 189 + prompt "Support time-travel mode (e.g. for test execution)" 190 + help 191 + Enable this option to support time travel inside the UML instance. 192 + 193 + After enabling this option, two modes are accessible at runtime 194 + (selected by the kernel command line), see the kernel's command- 195 + line help for more details. 196 + 197 + It is safe to say Y, but you probably don't need this. 198 + 187 199 endmenu 188 200 189 201 source "arch/um/drivers/Kconfig"
-1
arch/um/configs/i386_defconfig
··· 36 36 CONFIG_CON_CHAN="pts" 37 37 CONFIG_SSL_CHAN="pts" 38 38 CONFIG_UML_SOUND=m 39 - CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 40 39 CONFIG_DEVTMPFS=y 41 40 CONFIG_DEVTMPFS_MOUNT=y 42 41 CONFIG_BLK_DEV_UBD=y
-1
arch/um/configs/x86_64_defconfig
··· 34 34 CONFIG_CON_CHAN="pts" 35 35 CONFIG_SSL_CHAN="pts" 36 36 CONFIG_UML_SOUND=m 37 - CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 38 37 CONFIG_DEVTMPFS=y 39 38 CONFIG_DEVTMPFS_MOUNT=y 40 39 CONFIG_BLK_DEV_UBD=y
+44 -8
arch/um/drivers/chan_kern.c
··· 171 171 return err; 172 172 } 173 173 174 + /* Items are added in IRQ context, when free_irq can't be called, and 175 + * removed in process context, when it can. 176 + * This handles interrupt sources which disappear, and which need to 177 + * be permanently disabled. This is discovered in IRQ context, but 178 + * the freeing of the IRQ must be done later. 179 + */ 180 + static DEFINE_SPINLOCK(irqs_to_free_lock); 181 + static LIST_HEAD(irqs_to_free); 182 + 183 + void free_irqs(void) 184 + { 185 + struct chan *chan; 186 + LIST_HEAD(list); 187 + struct list_head *ele; 188 + unsigned long flags; 189 + 190 + spin_lock_irqsave(&irqs_to_free_lock, flags); 191 + list_splice_init(&irqs_to_free, &list); 192 + spin_unlock_irqrestore(&irqs_to_free_lock, flags); 193 + 194 + list_for_each(ele, &list) { 195 + chan = list_entry(ele, struct chan, free_list); 196 + 197 + if (chan->input && chan->enabled) 198 + um_free_irq(chan->line->driver->read_irq, chan); 199 + if (chan->output && chan->enabled) 200 + um_free_irq(chan->line->driver->write_irq, chan); 201 + chan->enabled = 0; 202 + } 203 + } 204 + 174 205 static void close_one_chan(struct chan *chan, int delay_free_irq) 175 206 { 207 + unsigned long flags; 208 + 176 209 if (!chan->opened) 177 210 return; 178 211 179 - /* we can safely call free now - it will be marked 180 - * as free and freed once the IRQ stopped processing 181 - */ 182 - if (chan->input && chan->enabled) 183 - um_free_irq(chan->line->driver->read_irq, chan); 184 - if (chan->output && chan->enabled) 185 - um_free_irq(chan->line->driver->write_irq, chan); 186 - chan->enabled = 0; 212 + if (delay_free_irq) { 213 + spin_lock_irqsave(&irqs_to_free_lock, flags); 214 + list_add(&chan->free_list, &irqs_to_free); 215 + spin_unlock_irqrestore(&irqs_to_free_lock, flags); 216 + } else { 217 + if (chan->input && chan->enabled) 218 + um_free_irq(chan->line->driver->read_irq, chan); 219 + if (chan->output && chan->enabled) 220 + um_free_irq(chan->line->driver->write_irq, chan); 221 + chan->enabled = 0; 222 + } 187 223 if (chan->ops->close != NULL) 188 224 (*chan->ops->close)(chan->fd, chan->data); 189 225
-1
arch/um/drivers/ssl.c
··· 12 12 #include <linux/console.h> 13 13 #include <asm/termbits.h> 14 14 #include <asm/irq.h> 15 - #include "ssl.h" 16 15 #include "chan.h" 17 16 #include <init.h> 18 17 #include <irq_user.h>
-13
arch/um/drivers/ssl.h
··· 1 - /* 2 - * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) 3 - * Licensed under the GPL 4 - */ 5 - 6 - #ifndef __SSL_H__ 7 - #define __SSL_H__ 8 - 9 - extern int ssl_read(int fd, int line); 10 - extern void ssl_receive_char(int line, char ch); 11 - 12 - #endif 13 -
+1 -1
arch/um/include/asm/mmu_context.h
··· 52 52 * when the new ->mm is used for the first time. 53 53 */ 54 54 __switch_mm(&new->context.id); 55 - down_write(&new->mmap_sem); 55 + down_write_nested(&new->mmap_sem, 1); 56 56 uml_setup_stubs(new); 57 57 up_write(&new->mmap_sem); 58 58 }
+4 -6
arch/um/include/shared/os.h
··· 250 250 251 251 /* time.c */ 252 252 extern void os_idle_sleep(unsigned long long nsecs); 253 - extern int os_timer_create(void* timer); 254 - extern int os_timer_set_interval(void* timer, void* its); 255 - extern int os_timer_one_shot(int ticks); 256 - extern long long os_timer_disable(void); 257 - extern long os_timer_remain(void* timer); 253 + extern int os_timer_create(void); 254 + extern int os_timer_set_interval(unsigned long long nsecs); 255 + extern int os_timer_one_shot(unsigned long long nsecs); 256 + extern void os_timer_disable(void); 258 257 extern void uml_idle_timer(void); 259 258 extern long long os_persistent_clock_emulation(void); 260 259 extern long long os_nsecs(void); 261 - extern long long os_vnsecs(void); 262 260 263 261 /* skas/mem.c */ 264 262 extern long run_syscall_stub(struct mm_id * mm_idp,
+48
arch/um/include/shared/timer-internal.h
··· 10 10 #define TIMER_MULTIPLIER 256 11 11 #define TIMER_MIN_DELTA 500 12 12 13 + enum time_travel_mode { 14 + TT_MODE_OFF, 15 + TT_MODE_BASIC, 16 + TT_MODE_INFCPU, 17 + }; 18 + 19 + enum time_travel_timer_mode { 20 + TT_TMR_DISABLED, 21 + TT_TMR_ONESHOT, 22 + TT_TMR_PERIODIC, 23 + }; 24 + 25 + #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT 26 + extern enum time_travel_mode time_travel_mode; 27 + extern unsigned long long time_travel_time; 28 + extern enum time_travel_timer_mode time_travel_timer_mode; 29 + extern unsigned long long time_travel_timer_expiry; 30 + extern unsigned long long time_travel_timer_interval; 31 + 32 + static inline void time_travel_set_time(unsigned long long ns) 33 + { 34 + time_travel_time = ns; 35 + } 36 + 37 + static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 38 + unsigned long long expiry) 39 + { 40 + time_travel_timer_mode = mode; 41 + time_travel_timer_expiry = expiry; 42 + } 43 + #else 44 + #define time_travel_mode TT_MODE_OFF 45 + #define time_travel_time 0 46 + #define time_travel_timer_expiry 0 47 + #define time_travel_timer_interval 0 48 + 49 + static inline void time_travel_set_time(unsigned long long ns) 50 + { 51 + } 52 + 53 + static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 54 + unsigned long long expiry) 55 + { 56 + } 57 + 58 + #define time_travel_timer_mode TT_TMR_DISABLED 59 + #endif 60 + 13 61 #endif
+5 -4
arch/um/kernel/irq.c
··· 21 21 #include <irq_user.h> 22 22 23 23 24 + extern void free_irqs(void); 25 + 24 26 /* When epoll triggers we do not know why it did so 25 27 * we can also have different IRQs for read and write. 26 28 * This is why we keep a small irq_fd array for each fd - ··· 102 100 } 103 101 } 104 102 } 103 + 104 + free_irqs(); 105 105 } 106 106 107 107 static int assign_epoll_events_to_irq(struct irq_entry *irq_entry) ··· 384 380 */ 385 381 int deactivate_all_fds(void) 386 382 { 387 - unsigned long flags; 388 383 struct irq_entry *to_free; 389 384 390 - spin_lock_irqsave(&irq_lock, flags); 391 385 /* Stop IO. The IRQ loop has no lock so this is our 392 386 * only way of making sure we are safe to dispose 393 387 * of all IRQ handlers ··· 401 399 ); 402 400 to_free = to_free->next; 403 401 } 404 - garbage_collect_irq_entries(); 405 - spin_unlock_irqrestore(&irq_lock, flags); 402 + /* don't garbage collect - we can no longer call kfree() here */ 406 403 os_close_epoll_fd(); 407 404 return 0; 408 405 }
+41 -1
arch/um/kernel/process.c
··· 203 203 kmalloc_ok = save_kmalloc_ok; 204 204 } 205 205 206 + static void time_travel_sleep(unsigned long long duration) 207 + { 208 + unsigned long long next = time_travel_time + duration; 209 + 210 + if (time_travel_mode != TT_MODE_INFCPU) 211 + os_timer_disable(); 212 + 213 + if (time_travel_timer_mode != TT_TMR_DISABLED || 214 + time_travel_timer_expiry < next) { 215 + if (time_travel_timer_mode == TT_TMR_ONESHOT) 216 + time_travel_set_timer(TT_TMR_DISABLED, 0); 217 + /* 218 + * time_travel_time will be adjusted in the timer 219 + * IRQ handler so it works even when the signal 220 + * comes from the OS timer 221 + */ 222 + deliver_alarm(); 223 + } else { 224 + time_travel_set_time(next); 225 + } 226 + 227 + if (time_travel_mode != TT_MODE_INFCPU) { 228 + if (time_travel_timer_mode == TT_TMR_PERIODIC) 229 + os_timer_set_interval(time_travel_timer_interval); 230 + else if (time_travel_timer_mode == TT_TMR_ONESHOT) 231 + os_timer_one_shot(time_travel_timer_expiry - next); 232 + } 233 + } 234 + 235 + static void um_idle_sleep(void) 236 + { 237 + unsigned long long duration = UM_NSEC_PER_SEC; 238 + 239 + if (time_travel_mode != TT_MODE_OFF) { 240 + time_travel_sleep(duration); 241 + } else { 242 + os_idle_sleep(duration); 243 + } 244 + } 245 + 206 246 void arch_cpu_idle(void) 207 247 { 208 248 cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); 209 - os_idle_sleep(UM_NSEC_PER_SEC); 249 + um_idle_sleep(); 210 250 local_irq_enable(); 211 251 } 212 252
+2
arch/um/kernel/skas/Makefile
··· 12 12 CFLAGS_clone.o := $(CFLAGS_NO_HARDENING) 13 13 UNPROFILE_OBJS := clone.o 14 14 15 + KCOV_INSTRUMENT := n 16 + 15 17 include arch/um/scripts/Makefile.rules
+11
arch/um/kernel/skas/syscall.c
··· 10 10 #include <sysdep/ptrace.h> 11 11 #include <sysdep/ptrace_user.h> 12 12 #include <sysdep/syscalls.h> 13 + #include <shared/timer-internal.h> 13 14 14 15 void handle_syscall(struct uml_pt_regs *r) 15 16 { 16 17 struct pt_regs *regs = container_of(r, struct pt_regs, regs); 17 18 int syscall; 19 + 20 + /* 21 + * If we have infinite CPU resources, then make every syscall also a 22 + * preemption point, since we don't have any other preemption in this 23 + * case, and kernel threads would basically never run until userspace 24 + * went to sleep, even if said userspace interacts with the kernel in 25 + * various ways. 26 + */ 27 + if (time_travel_mode == TT_MODE_INFCPU) 28 + schedule(); 18 29 19 30 /* Initialize the syscall number and default return value. */ 20 31 UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
+124 -7
arch/um/kernel/time.c
··· 19 19 #include <kern_util.h> 20 20 #include <os.h> 21 21 #include <timer-internal.h> 22 + #include <shared/init.h> 23 + 24 + #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT 25 + enum time_travel_mode time_travel_mode; 26 + unsigned long long time_travel_time; 27 + enum time_travel_timer_mode time_travel_timer_mode; 28 + unsigned long long time_travel_timer_expiry; 29 + unsigned long long time_travel_timer_interval; 30 + 31 + static bool time_travel_start_set; 32 + static unsigned long long time_travel_start; 33 + #else 34 + #define time_travel_start_set 0 35 + #define time_travel_start 0 36 + #endif 22 37 23 38 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 24 39 { 25 40 unsigned long flags; 41 + 42 + if (time_travel_mode != TT_MODE_OFF) 43 + time_travel_set_time(time_travel_timer_expiry); 26 44 27 45 local_irq_save(flags); 28 46 do_IRQ(TIMER_IRQ, regs); ··· 49 31 50 32 static int itimer_shutdown(struct clock_event_device *evt) 51 33 { 52 - os_timer_disable(); 34 + if (time_travel_mode != TT_MODE_OFF) 35 + time_travel_set_timer(TT_TMR_DISABLED, 0); 36 + 37 + if (time_travel_mode != TT_MODE_INFCPU) 38 + os_timer_disable(); 39 + 53 40 return 0; 54 41 } 55 42 56 43 static int itimer_set_periodic(struct clock_event_device *evt) 57 44 { 58 - os_timer_set_interval(NULL, NULL); 45 + unsigned long long interval = NSEC_PER_SEC / HZ; 46 + 47 + if (time_travel_mode != TT_MODE_OFF) 48 + time_travel_set_timer(TT_TMR_PERIODIC, 49 + time_travel_time + interval); 50 + 51 + if (time_travel_mode != TT_MODE_INFCPU) 52 + os_timer_set_interval(interval); 53 + 59 54 return 0; 60 55 } 61 56 62 57 static int itimer_next_event(unsigned long delta, 63 58 struct clock_event_device *evt) 64 59 { 65 - return os_timer_one_shot(delta); 60 + delta += 1; 61 + 62 + if (time_travel_mode != TT_MODE_OFF) 63 + time_travel_set_timer(TT_TMR_ONESHOT, 64 + time_travel_time + delta); 65 + 66 + if (time_travel_mode != TT_MODE_INFCPU) 67 + return os_timer_one_shot(delta); 68 + 69 + return 0; 66 70 } 67 71 68 72 static int itimer_one_shot(struct clock_event_device *evt) 69 73 { 70 - os_timer_one_shot(1); 71 - return 0; 74 + return itimer_next_event(0, evt); 72 75 } 73 76 74 77 static struct clock_event_device timer_clockevent = { ··· 126 87 127 88 static u64 timer_read(struct clocksource *cs) 128 89 { 90 + if (time_travel_mode != TT_MODE_OFF) { 91 + /* 92 + * We make reading the timer cost a bit so that we don't get 93 + * stuck in loops that expect time to move more than the 94 + * exact requested sleep amount, e.g. python's socket server, 95 + * see https://bugs.python.org/issue37026. 96 + */ 97 + time_travel_set_time(time_travel_time + TIMER_MULTIPLIER); 98 + return time_travel_time / TIMER_MULTIPLIER; 99 + } 100 + 129 101 return os_nsecs() / TIMER_MULTIPLIER; 130 102 } 131 103 ··· 157 107 printk(KERN_ERR "register_timer : request_irq failed - " 158 108 "errno = %d\n", -err); 159 109 160 - err = os_timer_create(NULL); 110 + err = os_timer_create(); 161 111 if (err != 0) { 162 112 printk(KERN_ERR "creation of timer failed - errno = %d\n", -err); 163 113 return; ··· 173 123 174 124 void read_persistent_clock64(struct timespec64 *ts) 175 125 { 176 - long long nsecs = os_persistent_clock_emulation(); 126 + long long nsecs; 127 + 128 + if (time_travel_start_set) 129 + nsecs = time_travel_start + time_travel_time; 130 + else 131 + nsecs = os_persistent_clock_emulation(); 177 132 178 133 set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC, 179 134 nsecs % NSEC_PER_SEC); ··· 189 134 timer_set_signal_handler(); 190 135 late_time_init = um_timer_setup; 191 136 } 137 + 138 + #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT 139 + unsigned long calibrate_delay_is_known(void) 140 + { 141 + if (time_travel_mode == TT_MODE_INFCPU) 142 + return 1; 143 + return 0; 144 + } 145 + 146 + int setup_time_travel(char *str) 147 + { 148 + if (strcmp(str, "=inf-cpu") == 0) { 149 + time_travel_mode = TT_MODE_INFCPU; 150 + timer_clockevent.name = "time-travel-timer-infcpu"; 151 + timer_clocksource.name = "time-travel-clock"; 152 + return 1; 153 + } 154 + 155 + if (!*str) { 156 + time_travel_mode = TT_MODE_BASIC; 157 + timer_clockevent.name = "time-travel-timer"; 158 + timer_clocksource.name = "time-travel-clock"; 159 + return 1; 160 + } 161 + 162 + return -EINVAL; 163 + } 164 + 165 + __setup("time-travel", setup_time_travel); 166 + __uml_help(setup_time_travel, 167 + "time-travel\n" 168 + "This option just enables basic time travel mode, in which the clock/timers\n" 169 + "inside the UML instance skip forward when there's nothing to do, rather than\n" 170 + "waiting for real time to elapse. However, instance CPU speed is limited by\n" 171 + "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n" 172 + "clock (but quicker when there's nothing to do).\n" 173 + "\n" 174 + "time-travel=inf-cpu\n" 175 + "This enables time travel mode with infinite processing power, in which there\n" 176 + "are no wall clock timers, and any CPU processing happens - as seen from the\n" 177 + "guest - instantly. This can be useful for accurate simulation regardless of\n" 178 + "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n" 179 + "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"); 180 + 181 + int setup_time_travel_start(char *str) 182 + { 183 + int err; 184 + 185 + err = kstrtoull(str, 0, &time_travel_start); 186 + if (err) 187 + return err; 188 + 189 + time_travel_start_set = 1; 190 + return 1; 191 + } 192 + 193 + __setup("time-travel-start", setup_time_travel_start); 194 + __uml_help(setup_time_travel_start, 195 + "time-travel-start=<seconds>\n" 196 + "Configure the UML instance's wall clock to start at this value rather than\n" 197 + "the host's wall clock at the time of UML boot.\n"); 198 + #endif
+29 -100
arch/um/os-Linux/time.c
··· 26 26 27 27 static inline long long timespec_to_ns(const struct timespec *ts) 28 28 { 29 - return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + 30 - ts->tv_nsec; 29 + return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + ts->tv_nsec; 31 30 } 32 31 33 - long long os_persistent_clock_emulation (void) { 32 + long long os_persistent_clock_emulation(void) 33 + { 34 34 struct timespec realtime_tp; 35 35 36 36 clock_gettime(CLOCK_REALTIME, &realtime_tp); ··· 40 40 /** 41 41 * os_timer_create() - create an new posix (interval) timer 42 42 */ 43 - int os_timer_create(void* timer) { 43 + int os_timer_create(void) 44 + { 45 + timer_t *t = &event_high_res_timer; 44 46 45 - timer_t* t = timer; 46 - 47 - if(t == NULL) { 48 - t = &event_high_res_timer; 49 - } 50 - 51 - if (timer_create( 52 - CLOCK_MONOTONIC, 53 - NULL, 54 - t) == -1) { 47 + if (timer_create(CLOCK_MONOTONIC, NULL, t) == -1) 55 48 return -1; 56 - } 57 - return 0; 58 - } 59 - 60 - int os_timer_set_interval(void* timer, void* i) 61 - { 62 - struct itimerspec its; 63 - unsigned long long nsec; 64 - timer_t* t = timer; 65 - struct itimerspec* its_in = i; 66 - 67 - if(t == NULL) { 68 - t = &event_high_res_timer; 69 - } 70 - 71 - nsec = UM_NSEC_PER_SEC / UM_HZ; 72 - 73 - if(its_in != NULL) { 74 - its.it_value.tv_sec = its_in->it_value.tv_sec; 75 - its.it_value.tv_nsec = its_in->it_value.tv_nsec; 76 - } else { 77 - its.it_value.tv_sec = 0; 78 - its.it_value.tv_nsec = nsec; 79 - } 80 - 81 - its.it_interval.tv_sec = 0; 82 - its.it_interval.tv_nsec = nsec; 83 - 84 - if(timer_settime(*t, 0, &its, NULL) == -1) { 85 - return -errno; 86 - } 87 49 88 50 return 0; 89 51 } 90 52 91 - /** 92 - * os_timer_remain() - returns the remaining nano seconds of the given interval 93 - * timer 94 - * Because this is the remaining time of an interval timer, which correspondends 95 - * to HZ, this value can never be bigger than one second. Just 96 - * the nanosecond part of the timer is returned. 97 - * The returned time is relative to the start time of the interval timer. 98 - * Return an negative value in an error case. 99 - */ 100 - long os_timer_remain(void* timer) 53 + int os_timer_set_interval(unsigned long long nsecs) 101 54 { 102 55 struct itimerspec its; 103 - timer_t* t = timer; 104 56 105 - if(t == NULL) { 106 - t = &event_high_res_timer; 107 - } 57 + its.it_value.tv_sec = nsecs / UM_NSEC_PER_SEC; 58 + its.it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC; 108 59 109 - if(timer_gettime(t, &its) == -1) { 60 + its.it_interval.tv_sec = nsecs / UM_NSEC_PER_SEC; 61 + its.it_interval.tv_nsec = nsecs % UM_NSEC_PER_SEC; 62 + 63 + if (timer_settime(event_high_res_timer, 0, &its, NULL) == -1) 110 64 return -errno; 111 - } 112 65 113 - return its.it_value.tv_nsec; 66 + return 0; 114 67 } 115 68 116 - int os_timer_one_shot(int ticks) 69 + int os_timer_one_shot(unsigned long long nsecs) 117 70 { 118 - struct itimerspec its; 119 - unsigned long long nsec; 120 - unsigned long sec; 71 + struct itimerspec its = { 72 + .it_value.tv_sec = nsecs / UM_NSEC_PER_SEC, 73 + .it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC, 121 74 122 - nsec = (ticks + 1); 123 - sec = nsec / UM_NSEC_PER_SEC; 124 - nsec = nsec % UM_NSEC_PER_SEC; 125 - 126 - its.it_value.tv_sec = nsec / UM_NSEC_PER_SEC; 127 - its.it_value.tv_nsec = nsec; 128 - 129 - its.it_interval.tv_sec = 0; 130 - its.it_interval.tv_nsec = 0; // we cheat here 75 + .it_interval.tv_sec = 0, 76 + .it_interval.tv_nsec = 0, // we cheat here 77 + }; 131 78 132 79 timer_settime(event_high_res_timer, 0, &its, NULL); 133 80 return 0; ··· 82 135 83 136 /** 84 137 * os_timer_disable() - disable the posix (interval) timer 85 - * Returns the remaining interval timer time in nanoseconds 86 138 */ 87 - long long os_timer_disable(void) 139 + void os_timer_disable(void) 88 140 { 89 141 struct itimerspec its; 90 142 91 143 memset(&its, 0, sizeof(struct itimerspec)); 92 - timer_settime(event_high_res_timer, 0, &its, &its); 93 - 94 - return its.it_value.tv_sec * UM_NSEC_PER_SEC + its.it_value.tv_nsec; 95 - } 96 - 97 - long long os_vnsecs(void) 98 - { 99 - struct timespec ts; 100 - 101 - clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&ts); 102 - return timespec_to_ns(&ts); 144 + timer_settime(event_high_res_timer, 0, &its, NULL); 103 145 } 104 146 105 147 long long os_nsecs(void) ··· 105 169 */ 106 170 void os_idle_sleep(unsigned long long nsecs) 107 171 { 108 - struct timespec ts; 109 - 110 - if (nsecs <= 0) { 111 - return; 112 - } 113 - 114 - ts = ((struct timespec) { 115 - .tv_sec = nsecs / UM_NSEC_PER_SEC, 116 - .tv_nsec = nsecs % UM_NSEC_PER_SEC 117 - }); 172 + struct timespec ts = { 173 + .tv_sec = nsecs / UM_NSEC_PER_SEC, 174 + .tv_nsec = nsecs % UM_NSEC_PER_SEC 175 + }; 118 176 119 177 /* 120 178 * Relay the signal if clock_nanosleep is interrupted. 121 179 */ 122 - if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL)) { 180 + if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL)) 123 181 deliver_alarm(); 124 - } 125 182 }