Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
3#include "clock.h"
4
5#include <linux/freezer.h>
6#include <linux/kthread.h>
7#include <linux/preempt.h>
8
9static inline bool io_timer_cmp(const void *l, const void *r, void __always_unused *args)
10{
11 struct io_timer **_l = (struct io_timer **)l;
12 struct io_timer **_r = (struct io_timer **)r;
13
14 return (*_l)->expire < (*_r)->expire;
15}
16
17static const struct min_heap_callbacks callbacks = {
18 .less = io_timer_cmp,
19 .swp = NULL,
20};
21
22void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
23{
24 spin_lock(&clock->timer_lock);
25
26 if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) {
27 spin_unlock(&clock->timer_lock);
28 timer->fn(timer);
29 return;
30 }
31
32 for (size_t i = 0; i < clock->timers.nr; i++)
33 if (clock->timers.data[i] == timer)
34 goto out;
35
36 BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL));
37out:
38 spin_unlock(&clock->timer_lock);
39}
40
41void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
42{
43 spin_lock(&clock->timer_lock);
44
45 for (size_t i = 0; i < clock->timers.nr; i++)
46 if (clock->timers.data[i] == timer) {
47 min_heap_del(&clock->timers, i, &callbacks, NULL);
48 break;
49 }
50
51 spin_unlock(&clock->timer_lock);
52}
53
54struct io_clock_wait {
55 struct io_timer io_timer;
56 struct task_struct *task;
57 int expired;
58};
59
60static void io_clock_wait_fn(struct io_timer *timer)
61{
62 struct io_clock_wait *wait = container_of(timer,
63 struct io_clock_wait, io_timer);
64
65 wait->expired = 1;
66 wake_up_process(wait->task);
67}
68
69void bch2_io_clock_schedule_timeout(struct io_clock *clock, u64 until)
70{
71 struct io_clock_wait wait = {
72 .io_timer.expire = until,
73 .io_timer.fn = io_clock_wait_fn,
74 .io_timer.fn2 = (void *) _RET_IP_,
75 .task = current,
76 };
77
78 bch2_io_timer_add(clock, &wait.io_timer);
79 schedule();
80 bch2_io_timer_del(clock, &wait.io_timer);
81}
82
83unsigned long bch2_kthread_io_clock_wait_once(struct io_clock *clock,
84 u64 io_until, unsigned long cpu_timeout)
85{
86 bool kthread = (current->flags & PF_KTHREAD) != 0;
87 struct io_clock_wait wait = {
88 .io_timer.expire = io_until,
89 .io_timer.fn = io_clock_wait_fn,
90 .io_timer.fn2 = (void *) _RET_IP_,
91 .task = current,
92 };
93
94 bch2_io_timer_add(clock, &wait.io_timer);
95
96 set_current_state(TASK_INTERRUPTIBLE);
97 if (!(kthread && kthread_should_stop())) {
98 cpu_timeout = schedule_timeout(cpu_timeout);
99 try_to_freeze();
100 }
101
102 __set_current_state(TASK_RUNNING);
103 bch2_io_timer_del(clock, &wait.io_timer);
104 return cpu_timeout;
105}
106
107void bch2_kthread_io_clock_wait(struct io_clock *clock,
108 u64 io_until, unsigned long cpu_timeout)
109{
110 bool kthread = (current->flags & PF_KTHREAD) != 0;
111
112 while (!(kthread && kthread_should_stop()) &&
113 cpu_timeout &&
114 atomic64_read(&clock->now) < io_until)
115 cpu_timeout = bch2_kthread_io_clock_wait_once(clock, io_until, cpu_timeout);
116}
117
118static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now)
119{
120 struct io_timer *ret = NULL;
121
122 if (clock->timers.nr &&
123 time_after_eq64(now, clock->timers.data[0]->expire)) {
124 ret = *min_heap_peek(&clock->timers);
125 min_heap_pop(&clock->timers, &callbacks, NULL);
126 }
127
128 return ret;
129}
130
131void __bch2_increment_clock(struct io_clock *clock, u64 sectors)
132{
133 struct io_timer *timer;
134 u64 now = atomic64_add_return(sectors, &clock->now);
135
136 spin_lock(&clock->timer_lock);
137 while ((timer = get_expired_timer(clock, now)))
138 timer->fn(timer);
139 spin_unlock(&clock->timer_lock);
140}
141
142void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
143{
144 out->atomic++;
145 spin_lock(&clock->timer_lock);
146 u64 now = atomic64_read(&clock->now);
147
148 printbuf_tabstop_push(out, 40);
149 prt_printf(out, "current time:\t%llu\n", now);
150
151 for (unsigned i = 0; i < clock->timers.nr; i++)
152 prt_printf(out, "%ps %ps:\t%llu\n",
153 clock->timers.data[i]->fn,
154 clock->timers.data[i]->fn2,
155 clock->timers.data[i]->expire);
156 spin_unlock(&clock->timer_lock);
157 --out->atomic;
158}
159
160void bch2_io_clock_exit(struct io_clock *clock)
161{
162 free_heap(&clock->timers);
163 free_percpu(clock->pcpu_buf);
164}
165
166int bch2_io_clock_init(struct io_clock *clock)
167{
168 atomic64_set(&clock->now, 0);
169 spin_lock_init(&clock->timer_lock);
170
171 clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
172
173 clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
174 if (!clock->pcpu_buf)
175 return -BCH_ERR_ENOMEM_io_clock_init;
176
177 if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
178 return -BCH_ERR_ENOMEM_io_clock_init;
179
180 return 0;
181}