Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Davidlohr Bueso.
4 */
5
6/* For the CLR_() macros */
7#include <string.h>
8#include <pthread.h>
9
10#include <signal.h>
11#include "../util/stat.h"
12#include <subcmd/parse-options.h>
13#include <linux/compiler.h>
14#include <linux/kernel.h>
15#include <linux/zalloc.h>
16#include <errno.h>
17#include <perf/cpumap.h>
18#include "bench.h"
19#include "futex.h"
20
21#include <err.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <sys/mman.h>
25
26struct worker {
27 int tid;
28 u_int32_t *futex;
29 pthread_t thread;
30 unsigned long ops;
31};
32
33static u_int32_t global_futex = 0;
34static struct worker *worker;
35static bool done = false;
36static int futex_flag = 0;
37static pthread_mutex_t thread_lock;
38static unsigned int threads_starting;
39static struct stats throughput_stats;
40static pthread_cond_t thread_parent, thread_worker;
41
42static struct bench_futex_parameters params = {
43 .runtime = 10,
44};
45
46static const struct option options[] = {
47 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"),
48 OPT_UINTEGER('r', "runtime", ¶ms.runtime, "Specify runtime (in seconds)"),
49 OPT_BOOLEAN( 'M', "multi", ¶ms.multi, "Use multiple futexes"),
50 OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"),
51 OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"),
52 OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"),
53 OPT_END()
54};
55
56static const char * const bench_futex_lock_pi_usage[] = {
57 "perf bench futex lock-pi <options>",
58 NULL
59};
60
61static void print_summary(void)
62{
63 unsigned long avg = avg_stats(&throughput_stats);
64 double stddev = stddev_stats(&throughput_stats);
65
66 printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
67 !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
68 (int)bench__runtime.tv_sec);
69}
70
71static void toggle_done(int sig __maybe_unused,
72 siginfo_t *info __maybe_unused,
73 void *uc __maybe_unused)
74{
75 /* inform all threads that we're done for the day */
76 done = true;
77 gettimeofday(&bench__end, NULL);
78 timersub(&bench__end, &bench__start, &bench__runtime);
79}
80
81static void *workerfn(void *arg)
82{
83 struct worker *w = (struct worker *) arg;
84 unsigned long ops = w->ops;
85
86 pthread_mutex_lock(&thread_lock);
87 threads_starting--;
88 if (!threads_starting)
89 pthread_cond_signal(&thread_parent);
90 pthread_cond_wait(&thread_worker, &thread_lock);
91 pthread_mutex_unlock(&thread_lock);
92
93 do {
94 int ret;
95 again:
96 ret = futex_lock_pi(w->futex, NULL, futex_flag);
97
98 if (ret) { /* handle lock acquisition */
99 if (!params.silent)
100 warn("thread %d: Could not lock pi-lock for %p (%d)",
101 w->tid, w->futex, ret);
102 if (done)
103 break;
104
105 goto again;
106 }
107
108 usleep(1);
109 ret = futex_unlock_pi(w->futex, futex_flag);
110 if (ret && !params.silent)
111 warn("thread %d: Could not unlock pi-lock for %p (%d)",
112 w->tid, w->futex, ret);
113 ops++; /* account for thread's share of work */
114 } while (!done);
115
116 w->ops = ops;
117 return NULL;
118}
119
120static void create_threads(struct worker *w, pthread_attr_t thread_attr,
121 struct perf_cpu_map *cpu)
122{
123 cpu_set_t *cpuset;
124 unsigned int i;
125 int nrcpus = perf_cpu_map__nr(cpu);
126 size_t size;
127
128 threads_starting = params.nthreads;
129
130 cpuset = CPU_ALLOC(nrcpus);
131 BUG_ON(!cpuset);
132 size = CPU_ALLOC_SIZE(nrcpus);
133
134 for (i = 0; i < params.nthreads; i++) {
135 worker[i].tid = i;
136
137 if (params.multi) {
138 worker[i].futex = calloc(1, sizeof(u_int32_t));
139 if (!worker[i].futex)
140 err(EXIT_FAILURE, "calloc");
141 } else
142 worker[i].futex = &global_futex;
143
144 CPU_ZERO_S(size, cpuset);
145 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
146
147 if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
148 CPU_FREE(cpuset);
149 err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
150 }
151
152 if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
153 CPU_FREE(cpuset);
154 err(EXIT_FAILURE, "pthread_create");
155 }
156 }
157 CPU_FREE(cpuset);
158}
159
160int bench_futex_lock_pi(int argc, const char **argv)
161{
162 int ret = 0;
163 unsigned int i;
164 struct sigaction act;
165 pthread_attr_t thread_attr;
166 struct perf_cpu_map *cpu;
167
168 argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
169 if (argc)
170 goto err;
171
172 cpu = perf_cpu_map__new(NULL);
173 if (!cpu)
174 err(EXIT_FAILURE, "calloc");
175
176 memset(&act, 0, sizeof(act));
177 sigfillset(&act.sa_mask);
178 act.sa_sigaction = toggle_done;
179 sigaction(SIGINT, &act, NULL);
180
181 if (params.mlockall) {
182 if (mlockall(MCL_CURRENT | MCL_FUTURE))
183 err(EXIT_FAILURE, "mlockall");
184 }
185
186 if (!params.nthreads)
187 params.nthreads = perf_cpu_map__nr(cpu);
188
189 worker = calloc(params.nthreads, sizeof(*worker));
190 if (!worker)
191 err(EXIT_FAILURE, "calloc");
192
193 if (!params.fshared)
194 futex_flag = FUTEX_PRIVATE_FLAG;
195
196 printf("Run summary [PID %d]: %d threads doing pi lock/unlock pairing for %d secs.\n\n",
197 getpid(), params.nthreads, params.runtime);
198
199 init_stats(&throughput_stats);
200 pthread_mutex_init(&thread_lock, NULL);
201 pthread_cond_init(&thread_parent, NULL);
202 pthread_cond_init(&thread_worker, NULL);
203
204 threads_starting = params.nthreads;
205 pthread_attr_init(&thread_attr);
206 gettimeofday(&bench__start, NULL);
207
208 create_threads(worker, thread_attr, cpu);
209 pthread_attr_destroy(&thread_attr);
210
211 pthread_mutex_lock(&thread_lock);
212 while (threads_starting)
213 pthread_cond_wait(&thread_parent, &thread_lock);
214 pthread_cond_broadcast(&thread_worker);
215 pthread_mutex_unlock(&thread_lock);
216
217 sleep(params.runtime);
218 toggle_done(0, NULL, NULL);
219
220 for (i = 0; i < params.nthreads; i++) {
221 ret = pthread_join(worker[i].thread, NULL);
222 if (ret)
223 err(EXIT_FAILURE, "pthread_join");
224 }
225
226 /* cleanup & report results */
227 pthread_cond_destroy(&thread_parent);
228 pthread_cond_destroy(&thread_worker);
229 pthread_mutex_destroy(&thread_lock);
230
231 for (i = 0; i < params.nthreads; i++) {
232 unsigned long t = bench__runtime.tv_sec > 0 ?
233 worker[i].ops / bench__runtime.tv_sec : 0;
234
235 update_stats(&throughput_stats, t);
236 if (!params.silent)
237 printf("[thread %3d] futex: %p [ %ld ops/sec ]\n",
238 worker[i].tid, worker[i].futex, t);
239
240 if (params.multi)
241 zfree(&worker[i].futex);
242 }
243
244 print_summary();
245
246 free(worker);
247 perf_cpu_map__put(cpu);
248 return ret;
249err:
250 usage_with_options(bench_futex_lock_pi_usage, options);
251 exit(EXIT_FAILURE);
252}